content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
## qPCR boxplots
library(ggplot2)
library(data.table)
library(tidyr)
library(reshape2)
library(useful)
library(ggpubr)
library(dplyr)
setwd("/Users/breesheyroskams-hieter/Desktop/cfRNA/manuscript/revised_paper_with_validation/figures/qPCR")
metadata <- read.csv("../../tables/PP_metadata_keep_FINAL_updated.csv")
PP_qPCR <- read.table("../../tables/qPCR_data_pilot_cohort_filt.txt", header = T, stringsAsFactors = F, sep = "\t")
Targets <- read.table("../../tables/Target_guide.txt", stringsAsFactors = F, header = T)
counts_table_updated <- read.delim("../../tables/counts_table_updated.txt")
RPM_updated <- read.csv("../../tables/RPM_without_dates_updated.csv", stringsAsFactors = F, row.names = 1)
biomart_ensembl_geneid <- read.delim("../../../../biomart_ensembl_geneid.txt")
colors <- read.delim("../../tables/colors.txt", stringsAsFactors = FALSE)
## read in LVQ results
LVQ_HCCvsHD <- readRDS(file="../../tables/LVQ/HD_HCC_importance.rds")
LVQ_MMvsHD <- readRDS(file="../../tables/LVQ/HD_MM_importance.rds")
HCC.vs.HD_top5lvq <- rownames(LVQ_HCCvsHD$importance[order(LVQ_HCCvsHD$importance$HD, decreasing = TRUE),])[1:5]
MM.vs.HD_top5lvq <- rownames(LVQ_MMvsHD$importance[order(LVQ_MMvsHD$importance$HD, decreasing = TRUE),])[1:5]
## Assign all missing values as 40
PP_qPCR[is.na(PP_qPCR$CT),3] <- 40
# Add group information
iv <- match(PP_qPCR$Sample, metadata$PP_ID)
PP_qPCR$Group <- metadata[iv,]$Status
iv <- match(PP_qPCR$Target_Name, Targets$Target_Name)
PP_qPCR$Target_type <- Targets[iv,]$Target_Type
# Rename all non-patient samples to a consistent naming structure
PP_qPCR[which(PP_qPCR$Sample %like% "NTC"),4] <- "NTC"
PP_qPCR[which(PP_qPCR$Sample =='(+)1'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)2'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)3'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)4'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)5'),4] <- "Positive Control"
PP_qPCR[is.na(PP_qPCR$Group),4] <- 'Control'
# Create separate dataframe for ACTB and B2M
PP_ACTB <- PP_qPCR[which(PP_qPCR$Target_Name == 'ACTB'),]
PP_B2M <- PP_qPCR[which(PP_qPCR$Target_Name == "B2M"),]
# Add in ACTB and B2M values to original dataframe
iv <- match(PP_qPCR$Sample, PP_ACTB$Sample)
PP_qPCR$ACTB <- PP_qPCR[iv,]$CT
iv <- match(PP_qPCR$Sample, PP_B2M$Sample)
PP_qPCR$B2M <- PP_qPCR[iv,]$CT
# Calculate deltaCT values
PP_qPCR$delta_ACTB <- PP_qPCR$CT - PP_qPCR$ACTB
PP_qPCR$delta_B2M <- PP_qPCR$CT - PP_qPCR$B2M
## Create separate dataframes for each type
PP_HCC <- PP_qPCR[which(PP_qPCR$Target_type == "HCC-LVQ"),]
PP_MM <- PP_qPCR[which(PP_qPCR$Target_type == "MM-LVQ"),]
## plot boxplots for pairwise comparisons of HD-HCC and HD-MM
GenerateBoxplot <- function(data, target, baseline, genelist, colors) {
# Filter for types and targets
filt <- data[data$Group %in% c(target, baseline) & data$Target_Name %in% genelist,]
filt$Group <- factor(filt$Group, levels = c(baseline, target))
# Colors
colourBaseline <- colors[colors$Status==baseline,]$Colour
colourTarget <- colors[colors$Status==target,]$Colour
my_comparisons <- list(c(baseline,target))
p <- ggplot(filt, mapping = aes(x = Group, y = CT, color = Group)) +
geom_boxplot() +
theme_bw() +
theme(text = element_text(family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 8),
axis.text.x = element_text(size = 8, angle = 90, hjust = 1),
axis.text.y = element_text(size = 5),
legend.position = "none") +
facet_wrap(~Target_Name, scales = 'free_y', ncol = 5) +
stat_compare_means(label = "p.signif", method = "t.test",
comparisons = my_comparisons,
show.legend = FALSE, size = 2, hide.ns = TRUE) +
scale_y_reverse(breaks = function(x) unique(floor(pretty(seq(0, (max(x) + 1) * 1.1)))),
expand = c(0.1,0)) +
scale_color_manual(breaks=c(baseline,target),
values=c(colourBaseline,colourTarget))
return(p)
}
HCC_boxplot <- GenerateBoxplot(data = PP_qPCR, target = "HCC", baseline = "NC", genelist = HCC.vs.HD_top5lvq, colors = colors)
MM_boxplot <- GenerateBoxplot(data = PP_qPCR, target = "MM", baseline = "NC", genelist = MM.vs.HD_top5lvq, colors = colors)
pdf("qRT_PCR_top5LVQ_boxplots.pdf", 4, 4.5, useDingbats = FALSE)
cowplot::plot_grid(MM_boxplot, HCC_boxplot, nrow = 2)
dev.off()
# to run the correlation plot we first need to create a matrix
PP_qPCR1 <- PP_qPCR[which(!PP_qPCR$Group %like% "NTC"),]
PP_qPCR1 <- PP_qPCR1[which(!PP_qPCR1$Group %like% "Positive"),]
PP_target_set <- unique(PP_qPCR1$Target_Name)
RPM_subset <- RPM_updated[which(RPM_updated$gene %in% PP_target_set),]
rownames(RPM_subset) <- RPM_subset[,1]
RPM_subset <- data.matrix(RPM_subset[,-1])
logRPM <- log2(RPM_subset + 1)
# making matrices for corrplot
PP_qpcr_raw <- PP_qPCR1[,1:3]
PP_qpcr_raw <- pivot_wider(PP_qpcr_raw, names_from = Target_Name, values_from = CT)
# Transpose and reformat
t_qpcr <- transpose(PP_qpcr_raw)
rownames(t_qpcr) <- colnames(PP_qpcr_raw)
colnames(t_qpcr) <- t_qpcr[1,]
t_qpcr <- t_qpcr[-1,]
t_qpcr$gene <- rownames(t_qpcr)
# Melt for plotting purposes
mlt_qpcr <- reshape2::melt(t_qpcr, id.vars = "gene", variable.name = "Sample", value.name = "CT")
logRPM <- data.frame(logRPM)
logRPM$gene <- rownames(logRPM)
mlt_RPM <- reshape2::melt(logRPM, id.vars = "gene", variable.name = "Sample", value.name = "log2RPM")
logRPM_qpcr <- merge(mlt_qpcr, mlt_RPM, by = c("Sample", "gene"))
logRPM_qpcr$CT <- as.numeric(logRPM_qpcr$CT)
logRPM_qpcr$log2RPM <- as.numeric(logRPM_qpcr$log2RPM)
## Remove log2RPM values that are zero or CT values that are 40
keep <- setdiff(1:nrow(logRPM_qpcr), rownames(logRPM_qpcr[logRPM_qpcr$CT==40 | logRPM_qpcr$log2RPM==0,]))
filt <- logRPM_qpcr[keep,]
## Remove CT values > 28
filt <- filt[filt$CT < 28,]
logRPM_qpcr_plot <- ggplot(filt, aes(x = log2RPM, y = CT)) +
stat_cor(method = "pearson", show.legend = FALSE, label.y = 30) +
geom_point(show.legend = FALSE, alpha = 0.7) +
xlab("RNA-Seq log2(RPM + 1)") +
ylab("RT-qPCR Ct")
pdf("qRT_PCR_RPM_corrPlot.pdf", 4, 4)
print(logRPM_qpcr_plot)
dev.off()
|
/scripts/figures/Figure_4.R
|
no_license
|
ohsu-cedar-comp-hub/cfRNA-seq-pipeline-Ngo-manuscript-2019
|
R
| false | false | 6,317 |
r
|
## qPCR boxplots
library(ggplot2)
library(data.table)
library(tidyr)
library(reshape2)
library(useful)
library(ggpubr)
library(dplyr)
setwd("/Users/breesheyroskams-hieter/Desktop/cfRNA/manuscript/revised_paper_with_validation/figures/qPCR")
metadata <- read.csv("../../tables/PP_metadata_keep_FINAL_updated.csv")
PP_qPCR <- read.table("../../tables/qPCR_data_pilot_cohort_filt.txt", header = T, stringsAsFactors = F, sep = "\t")
Targets <- read.table("../../tables/Target_guide.txt", stringsAsFactors = F, header = T)
counts_table_updated <- read.delim("../../tables/counts_table_updated.txt")
RPM_updated <- read.csv("../../tables/RPM_without_dates_updated.csv", stringsAsFactors = F, row.names = 1)
biomart_ensembl_geneid <- read.delim("../../../../biomart_ensembl_geneid.txt")
colors <- read.delim("../../tables/colors.txt", stringsAsFactors = FALSE)
## read in LVQ results
LVQ_HCCvsHD <- readRDS(file="../../tables/LVQ/HD_HCC_importance.rds")
LVQ_MMvsHD <- readRDS(file="../../tables/LVQ/HD_MM_importance.rds")
HCC.vs.HD_top5lvq <- rownames(LVQ_HCCvsHD$importance[order(LVQ_HCCvsHD$importance$HD, decreasing = TRUE),])[1:5]
MM.vs.HD_top5lvq <- rownames(LVQ_MMvsHD$importance[order(LVQ_MMvsHD$importance$HD, decreasing = TRUE),])[1:5]
## Assign all missing values as 40
PP_qPCR[is.na(PP_qPCR$CT),3] <- 40
# Add group information
iv <- match(PP_qPCR$Sample, metadata$PP_ID)
PP_qPCR$Group <- metadata[iv,]$Status
iv <- match(PP_qPCR$Target_Name, Targets$Target_Name)
PP_qPCR$Target_type <- Targets[iv,]$Target_Type
# Rename all non-patient samples to a consistent naming structure
PP_qPCR[which(PP_qPCR$Sample %like% "NTC"),4] <- "NTC"
PP_qPCR[which(PP_qPCR$Sample =='(+)1'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)2'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)3'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)4'),4] <- "Positive Control"
PP_qPCR[which(PP_qPCR$Sample =='(+)5'),4] <- "Positive Control"
PP_qPCR[is.na(PP_qPCR$Group),4] <- 'Control'
# Create separate dataframe for ACTB and B2M
PP_ACTB <- PP_qPCR[which(PP_qPCR$Target_Name == 'ACTB'),]
PP_B2M <- PP_qPCR[which(PP_qPCR$Target_Name == "B2M"),]
# Add in ACTB and B2M values to original dataframe
iv <- match(PP_qPCR$Sample, PP_ACTB$Sample)
PP_qPCR$ACTB <- PP_qPCR[iv,]$CT
iv <- match(PP_qPCR$Sample, PP_B2M$Sample)
PP_qPCR$B2M <- PP_qPCR[iv,]$CT
# Calculate deltaCT values
PP_qPCR$delta_ACTB <- PP_qPCR$CT - PP_qPCR$ACTB
PP_qPCR$delta_B2M <- PP_qPCR$CT - PP_qPCR$B2M
## Create separate dataframes for each type
PP_HCC <- PP_qPCR[which(PP_qPCR$Target_type == "HCC-LVQ"),]
PP_MM <- PP_qPCR[which(PP_qPCR$Target_type == "MM-LVQ"),]
## plot boxplots for pairwise comparisons of HD-HCC and HD-MM
GenerateBoxplot <- function(data, target, baseline, genelist, colors) {
# Filter for types and targets
filt <- data[data$Group %in% c(target, baseline) & data$Target_Name %in% genelist,]
filt$Group <- factor(filt$Group, levels = c(baseline, target))
# Colors
colourBaseline <- colors[colors$Status==baseline,]$Colour
colourTarget <- colors[colors$Status==target,]$Colour
my_comparisons <- list(c(baseline,target))
p <- ggplot(filt, mapping = aes(x = Group, y = CT, color = Group)) +
geom_boxplot() +
theme_bw() +
theme(text = element_text(family = "Arial"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 8),
axis.text.x = element_text(size = 8, angle = 90, hjust = 1),
axis.text.y = element_text(size = 5),
legend.position = "none") +
facet_wrap(~Target_Name, scales = 'free_y', ncol = 5) +
stat_compare_means(label = "p.signif", method = "t.test",
comparisons = my_comparisons,
show.legend = FALSE, size = 2, hide.ns = TRUE) +
scale_y_reverse(breaks = function(x) unique(floor(pretty(seq(0, (max(x) + 1) * 1.1)))),
expand = c(0.1,0)) +
scale_color_manual(breaks=c(baseline,target),
values=c(colourBaseline,colourTarget))
return(p)
}
HCC_boxplot <- GenerateBoxplot(data = PP_qPCR, target = "HCC", baseline = "NC", genelist = HCC.vs.HD_top5lvq, colors = colors)
MM_boxplot <- GenerateBoxplot(data = PP_qPCR, target = "MM", baseline = "NC", genelist = MM.vs.HD_top5lvq, colors = colors)
pdf("qRT_PCR_top5LVQ_boxplots.pdf", 4, 4.5, useDingbats = FALSE)
cowplot::plot_grid(MM_boxplot, HCC_boxplot, nrow = 2)
dev.off()
# to run the correlation plot we first need to create a matrix
PP_qPCR1 <- PP_qPCR[which(!PP_qPCR$Group %like% "NTC"),]
PP_qPCR1 <- PP_qPCR1[which(!PP_qPCR1$Group %like% "Positive"),]
PP_target_set <- unique(PP_qPCR1$Target_Name)
RPM_subset <- RPM_updated[which(RPM_updated$gene %in% PP_target_set),]
rownames(RPM_subset) <- RPM_subset[,1]
RPM_subset <- data.matrix(RPM_subset[,-1])
logRPM <- log2(RPM_subset + 1)
# making matrices for corrplot
PP_qpcr_raw <- PP_qPCR1[,1:3]
PP_qpcr_raw <- pivot_wider(PP_qpcr_raw, names_from = Target_Name, values_from = CT)
# Transpose and reformat
t_qpcr <- transpose(PP_qpcr_raw)
rownames(t_qpcr) <- colnames(PP_qpcr_raw)
colnames(t_qpcr) <- t_qpcr[1,]
t_qpcr <- t_qpcr[-1,]
t_qpcr$gene <- rownames(t_qpcr)
# Melt for plotting purposes
mlt_qpcr <- reshape2::melt(t_qpcr, id.vars = "gene", variable.name = "Sample", value.name = "CT")
logRPM <- data.frame(logRPM)
logRPM$gene <- rownames(logRPM)
mlt_RPM <- reshape2::melt(logRPM, id.vars = "gene", variable.name = "Sample", value.name = "log2RPM")
logRPM_qpcr <- merge(mlt_qpcr, mlt_RPM, by = c("Sample", "gene"))
logRPM_qpcr$CT <- as.numeric(logRPM_qpcr$CT)
logRPM_qpcr$log2RPM <- as.numeric(logRPM_qpcr$log2RPM)
## Remove log2RPM values that are zero or CT values that are 40
keep <- setdiff(1:nrow(logRPM_qpcr), rownames(logRPM_qpcr[logRPM_qpcr$CT==40 | logRPM_qpcr$log2RPM==0,]))
filt <- logRPM_qpcr[keep,]
## Remove CT values > 28
filt <- filt[filt$CT < 28,]
logRPM_qpcr_plot <- ggplot(filt, aes(x = log2RPM, y = CT)) +
stat_cor(method = "pearson", show.legend = FALSE, label.y = 30) +
geom_point(show.legend = FALSE, alpha = 0.7) +
xlab("RNA-Seq log2(RPM + 1)") +
ylab("RT-qPCR Ct")
pdf("qRT_PCR_RPM_corrPlot.pdf", 4, 4)
print(logRPM_qpcr_plot)
dev.off()
|
# chrisD_DonRecDeltaLinear.R
# -------------------------------------------------------------------
# Copyright 2011 Christiaan Klijn <c.klijn@nki.nl>
# Project: aCGH data Chris D - matched primary and metastasis
# Description: Code for the difference between the donors and
# recepients.
# Use linear regression normalization to make the arrays
# Comparable, then index the differences between the
# paired samples.
# -------------------------------------------------------------------
# Run on medoid
# Working dir
setwd("/home/klijn/data/smallproj/chrisD/")
# Code
source('~/gitCodeChris/generalFunctionsR/chris_cghdata_analysis.R')
source('~/gitCodeChris/generalFunctionsR/chris_delta_functions.R')
library(KCsmart)
library(DNAcopy)
library(robustbase)
library(preprocessCore)
# Data
load('rawData_chrisD.Rda')
load('chrisD_segmentedKC.Rda')
data(mmMirrorLocs)
altMirrorLocs <- mmMirrorLocs[-21]
attributes(altMirrorLocs) <- attributes(mmMirrorLocs)
# Fix the segmented data, remove the appended X
colnames(allKCseg$data) <- gsub('X', '', colnames(allKCseg$data))
# Sort allKC on chromosome and maploc
allKC <- allKC[order(allKC$chrom, allKC$maploc),]
# Check if sampleinfo and the data are ordered the same
all.equal(colnames(allKC[,3:ncol(allKC)]), paste(sampleInfo$Slide,
sampleInfo$Spot, sep=''))
# Remove the negative control sample
negativeControl <- grep('NegativeControl', sampleInfo$SampleID)
if (length(negativeControl) > 0) {
allKC <- allKC[,-(negativeControl+2)]
sampleInfo <- sampleInfo[-negativeControl,]
}
# Assign tumor numbers to sample, NA warning for the negative control
sampleInfo$tumNum <- as.numeric(gsub('[A-Z|a-z]','', sampleInfo$DRSet))
# Assign CGHID
sampleInfo$CGHID <- paste(sampleInfo$Slide, sampleInfo$Spot, sep='')
# Deltas between donor tumors and recepient tumors
# Aggregate per tumor
# First define the donor hybs and name them
tumNums <- unique(sampleInfo$tumNum)
diffList <- vector(mode='list', length=length(tumNums))
names(diffList) <- paste('T', tumNums, sep='')
for (t in tumNums) {
tempSampInfo <- subset(sampleInfo, tumNum == t)
donorSample <- tempSampInfo$CGHID[grep('D', tempSampInfo$DRSet)]
recepientSamples <- tempSampInfo$CGHID[grepl('R', tempSampInfo$DRSet) &
tempSampInfo$Site == 'Primary']
names(recepientSamples) <- tempSampInfo$DRSet[grepl('R',
tempSampInfo$DRSet) & tempSampInfo$Site == 'Primary']
resultList <- vector(mode='list', length=length(recepientSamples))
names(resultList) <- names(recepientSamples)
for (r in 1:length(recepientSamples)) {
tempKC <- allKC[,c('chrom', 'maploc', recepientSamples[r],
donorSample)]
tempSeg <- subset(allKCseg,
samplelist=c(recepientSamples[r], donorSample))
resultList[[names(recepientSamples)[r]]] <-
deltaLinear(comb= c(recepientSamples[r], donorSample),
tempKC, tempSeg, thres=.2)
}
diffList[[paste('T', t, sep='')]] <-
cbind(allKC[, c('chrom', 'maploc')], resultList)
}
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=24, chromosomes=6, setcex=2)
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=25, chromosomes=6, setcex=2)
plotRawCghDotPlot(KCdataSet=diffList[['T1']], mirrorLocs=altMirrorLocs, doFilter=T, samples=1, chromosomes=6, setcex=2)
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=24, setcex=2)
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=25, setcex=2)
plotRawCghDotPlot(KCdataSet=diffList[['T1']], mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=2)
par(mfrow=c(3,1))
for (i in 1:(length(diffList[['T3']])-2)) {
plotRawCghDotPlot(KCdataSet=diffList[['T3']], mirrorLocs=altMirrorLocs, doFilter=T, samples=i, setcex=2)
}
par(mfrow=c(3,1))
for (i in 1:(length(diffList[['T3']])-2)) {
plotRawCghDotPlot(KCdataSet=diffList[['T3']], mirrorLocs=altMirrorLocs, doFilter=T, samples=i, setcex=2, chromosomes=13)
}
# Tests
# Linear models
comb <- c('437371A02', '437371A01')
smallKC <- allKC[,c('chrom', 'maploc', comb)]
smallSeg <- subset(allKCseg, samplelist=comb)
smallFreq <- glFrequency(xout=smallSeg, threshold=1)
ind <- smallFreq$gain == 1 | smallFreq$loss == -1
fitrob <- lmrob(smallKC[ind,4] ~ smallKC[ind,3])
fitlm <- lm(smallKC[ind,4] ~ smallKC[ind,3])
plot(smallKC[ind,3], smallKC[ind, 4], pch='.', cex=2,
col=smallKC$chrom[ind], main='fitted lms on selected probes')
abline(a=coef(fitrob)[1], b=coef(fitrob)[2], col='red')
abline(a=coef(fitlm)[1], b=coef(fitlm)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
# Quantile Normalization
dataMatrix <- as.matrix(smallKC[,3:ncol(smallKC)])
dataMatrix <- normalize.quantiles(dataMatrix)
qnormKC <- smallKC
qnormKC[,3:ncol(qnormKC)] <- dataMatrix
par(mfrow=c(1,2))
plot(smallKC[,3],smallKC[,4], pch='.', cex=2, col=smallKC$chrom,
main='Pre-qnorm')
abline(a=0, b=1, col='black', lty='dotted')
plot(qnormKC[,3],qnormKC[,4], pch='.', cex=2, col=qnormKC$chrom,
main='qnorm')
abline(a=0, b=1, col='black', lty='dotted')
# set probes to segment values
segKC <- smallKC
for (i in 1:nrow(smallSeg$output)) {
probesInSeg <- with(smallSeg$output, segKC$chrom == chrom[i] &
segKC$maploc >= loc.start[i] &
segKC$maploc < loc.end[i])
segKC[probesInSeg, smallSeg$output$ID[i]] <-
smallSeg$output$seg.mean[i]
}
# Fit on hard cutoff instead of the MAD based cutoff from
# glFrequency
ind2 <- abs(segKC[,3]) > .2 & abs(segKC[,4]) > .2
fitrob2 <- lmrob(smallKC[ind2,4] ~ smallKC[ind2,3])
fitlm2 <- lm(smallKC[ind2,4] ~ smallKC[ind2,3])
plot(smallKC[ind2,3], smallKC[ind2, 4], pch='.', cex=2,
col=smallKC$chrom[ind2], main='Selection on seg.mean, not MAD')
abline(a=coef(fitrob2)[1], b=coef(fitrob2)[2], col='red')
abline(a=coef(fitlm2)[1], b=coef(fitlm2)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
# Fit on probes set to their segmean. (so, many probes have equal values).
# This is quite probably not a good choice
fitrob3 <- lmrob(segKC[ind2,4] ~ segKC[ind2,3])
fitlm3 <- lm(segKC[ind2,4] ~ segKC[ind2,3])
plot(segKC[ind2,3], segKC[ind2, 4], pch='.', cex=2,
col=smallKC$chrom[ind2], main='Fitted on probes set to seg.mean')
abline(a=coef(fitrob3)[1], b=coef(fitrob3)[2], col='red')
abline(a=coef(fitlm3)[1], b=coef(fitlm3)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
segKC$diffSegNorm <- (segKC[,3] + coef(fitrob3)[[1]]) * coef(fitrob3)[[2]] - segKC[,4]
# Visualize the segmean correlation:
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=2)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=2)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=3, setcex=2, setylim=c(-1,1))
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=6, chromosomes=6)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=6, chromosomes=6)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=3, setcex=6, setylim=c(-1,1), chromosomes=6)
allKCsegProbe <- setProbeToSeg(allKC, allKCseg)
for (t in tumNums) {
tempSampInfo <- subset(sampleInfo, tumNum == t)
donorSample <- tempSampInfo$CGHID[grep('D', tempSampInfo$DRSet)]
recepientSamples <- tempSampInfo$CGHID[grepl('R', tempSampInfo$DRSet) &
tempSampInfo$Site == 'Primary']
names(recepientSamples) <- tempSampInfo$DRSet[grepl('R',
tempSampInfo$DRSet) & tempSampInfo$Site == 'Primary']
resultList <- vector(mode='list', length=3)
names(resultList) <- c('donor', 'recepients', 'delta')
resultList$donor <- allKCsegProbe[,c('chrom', 'maploc', donorSample)]
resultList$recepients <-
allKCsegProbe[,c('chrom', 'maploc', recepientSamples)]
resultKC <- allKCsegProbe[, c('chrom', 'maploc')]
for (r in 1:length(recepientSamples)) {
tempKC <- allKC[,c('chrom', 'maploc', recepientSamples[r],
donorSample)]
tempSeg <- subset(allKCseg,
samplelist=c(recepientSamples[r], donorSample))
resultKC <- cbind(resultKC,
deltaLinearSeg(comb=c(recepientSamples[r], donorSample),
tempKC, tempSeg, thres=.2))
colnames(resultKC)[ncol(resultKC)] <- names(recepientSamples)[r]
}
resultList$delta <- resultKC
diffList[[paste('T', t, sep='')]] <- resultList
}
a <- diffList[['T2']]
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=a$donor, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=10)
plotRawCghDotPlot(KCdataSet=a$recepients, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=10)
plotRawCghDotPlot(KCdataSet=a$delta, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=10)
source("http://www.bioconductor.org/biocLite.R")
biocLite("CGHnormaliter", lib='~/lib/R')
|
/chrisD_DonRecDeltaLinear.R
|
no_license
|
ChrisKlijn/chrisD
|
R
| false | false | 9,340 |
r
|
# chrisD_DonRecDeltaLinear.R
# -------------------------------------------------------------------
# Copyright 2011 Christiaan Klijn <c.klijn@nki.nl>
# Project: aCGH data Chris D - matched primary and metastasis
# Description: Code for the difference between the donors and
# recepients.
# Use linear regression normalization to make the arrays
# Comparable, then index the differences between the
# paired samples.
# -------------------------------------------------------------------
# Run on medoid
# Working dir
setwd("/home/klijn/data/smallproj/chrisD/")
# Code
source('~/gitCodeChris/generalFunctionsR/chris_cghdata_analysis.R')
source('~/gitCodeChris/generalFunctionsR/chris_delta_functions.R')
library(KCsmart)
library(DNAcopy)
library(robustbase)
library(preprocessCore)
# Data
load('rawData_chrisD.Rda')
load('chrisD_segmentedKC.Rda')
data(mmMirrorLocs)
altMirrorLocs <- mmMirrorLocs[-21]
attributes(altMirrorLocs) <- attributes(mmMirrorLocs)
# Fix the segmented data, remove the appended X
colnames(allKCseg$data) <- gsub('X', '', colnames(allKCseg$data))
# Sort allKC on chromosome and maploc
allKC <- allKC[order(allKC$chrom, allKC$maploc),]
# Check if sampleinfo and the data are ordered the same
all.equal(colnames(allKC[,3:ncol(allKC)]), paste(sampleInfo$Slide,
sampleInfo$Spot, sep=''))
# Remove the negative control sample
negativeControl <- grep('NegativeControl', sampleInfo$SampleID)
if (length(negativeControl) > 0) {
allKC <- allKC[,-(negativeControl+2)]
sampleInfo <- sampleInfo[-negativeControl,]
}
# Assign tumor numbers to sample, NA warning for the negative control
sampleInfo$tumNum <- as.numeric(gsub('[A-Z|a-z]','', sampleInfo$DRSet))
# Assign CGHID
sampleInfo$CGHID <- paste(sampleInfo$Slide, sampleInfo$Spot, sep='')
# Deltas between donor tumors and recepient tumors
# Aggregate per tumor
# First define the donor hybs and name them
tumNums <- unique(sampleInfo$tumNum)
diffList <- vector(mode='list', length=length(tumNums))
names(diffList) <- paste('T', tumNums, sep='')
for (t in tumNums) {
tempSampInfo <- subset(sampleInfo, tumNum == t)
donorSample <- tempSampInfo$CGHID[grep('D', tempSampInfo$DRSet)]
recepientSamples <- tempSampInfo$CGHID[grepl('R', tempSampInfo$DRSet) &
tempSampInfo$Site == 'Primary']
names(recepientSamples) <- tempSampInfo$DRSet[grepl('R',
tempSampInfo$DRSet) & tempSampInfo$Site == 'Primary']
resultList <- vector(mode='list', length=length(recepientSamples))
names(resultList) <- names(recepientSamples)
for (r in 1:length(recepientSamples)) {
tempKC <- allKC[,c('chrom', 'maploc', recepientSamples[r],
donorSample)]
tempSeg <- subset(allKCseg,
samplelist=c(recepientSamples[r], donorSample))
resultList[[names(recepientSamples)[r]]] <-
deltaLinear(comb= c(recepientSamples[r], donorSample),
tempKC, tempSeg, thres=.2)
}
diffList[[paste('T', t, sep='')]] <-
cbind(allKC[, c('chrom', 'maploc')], resultList)
}
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=24, chromosomes=6, setcex=2)
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=25, chromosomes=6, setcex=2)
plotRawCghDotPlot(KCdataSet=diffList[['T1']], mirrorLocs=altMirrorLocs, doFilter=T, samples=1, chromosomes=6, setcex=2)
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=24, setcex=2)
plotRawCghDotPlot(KCdataSet=allKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=25, setcex=2)
plotRawCghDotPlot(KCdataSet=diffList[['T1']], mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=2)
par(mfrow=c(3,1))
for (i in 1:(length(diffList[['T3']])-2)) {
plotRawCghDotPlot(KCdataSet=diffList[['T3']], mirrorLocs=altMirrorLocs, doFilter=T, samples=i, setcex=2)
}
par(mfrow=c(3,1))
for (i in 1:(length(diffList[['T3']])-2)) {
plotRawCghDotPlot(KCdataSet=diffList[['T3']], mirrorLocs=altMirrorLocs, doFilter=T, samples=i, setcex=2, chromosomes=13)
}
# Tests
# Linear models
comb <- c('437371A02', '437371A01')
smallKC <- allKC[,c('chrom', 'maploc', comb)]
smallSeg <- subset(allKCseg, samplelist=comb)
smallFreq <- glFrequency(xout=smallSeg, threshold=1)
ind <- smallFreq$gain == 1 | smallFreq$loss == -1
fitrob <- lmrob(smallKC[ind,4] ~ smallKC[ind,3])
fitlm <- lm(smallKC[ind,4] ~ smallKC[ind,3])
plot(smallKC[ind,3], smallKC[ind, 4], pch='.', cex=2,
col=smallKC$chrom[ind], main='fitted lms on selected probes')
abline(a=coef(fitrob)[1], b=coef(fitrob)[2], col='red')
abline(a=coef(fitlm)[1], b=coef(fitlm)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
# Quantile Normalization
dataMatrix <- as.matrix(smallKC[,3:ncol(smallKC)])
dataMatrix <- normalize.quantiles(dataMatrix)
qnormKC <- smallKC
qnormKC[,3:ncol(qnormKC)] <- dataMatrix
par(mfrow=c(1,2))
plot(smallKC[,3],smallKC[,4], pch='.', cex=2, col=smallKC$chrom,
main='Pre-qnorm')
abline(a=0, b=1, col='black', lty='dotted')
plot(qnormKC[,3],qnormKC[,4], pch='.', cex=2, col=qnormKC$chrom,
main='qnorm')
abline(a=0, b=1, col='black', lty='dotted')
# set probes to segment values
segKC <- smallKC
for (i in 1:nrow(smallSeg$output)) {
probesInSeg <- with(smallSeg$output, segKC$chrom == chrom[i] &
segKC$maploc >= loc.start[i] &
segKC$maploc < loc.end[i])
segKC[probesInSeg, smallSeg$output$ID[i]] <-
smallSeg$output$seg.mean[i]
}
# Fit on hard cutoff instead of the MAD based cutoff from
# glFrequency
ind2 <- abs(segKC[,3]) > .2 & abs(segKC[,4]) > .2
fitrob2 <- lmrob(smallKC[ind2,4] ~ smallKC[ind2,3])
fitlm2 <- lm(smallKC[ind2,4] ~ smallKC[ind2,3])
plot(smallKC[ind2,3], smallKC[ind2, 4], pch='.', cex=2,
col=smallKC$chrom[ind2], main='Selection on seg.mean, not MAD')
abline(a=coef(fitrob2)[1], b=coef(fitrob2)[2], col='red')
abline(a=coef(fitlm2)[1], b=coef(fitlm2)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
# Fit on probes set to their segmean. (so, many probes have equal values).
# This is quite probably not a good choice
fitrob3 <- lmrob(segKC[ind2,4] ~ segKC[ind2,3])
fitlm3 <- lm(segKC[ind2,4] ~ segKC[ind2,3])
plot(segKC[ind2,3], segKC[ind2, 4], pch='.', cex=2,
col=smallKC$chrom[ind2], main='Fitted on probes set to seg.mean')
abline(a=coef(fitrob3)[1], b=coef(fitrob3)[2], col='red')
abline(a=coef(fitlm3)[1], b=coef(fitlm3)[2], col='blue')
abline(a=0, b=1, col='black', lty='dotted')
legend('topleft', legend=c('lm', 'robust lm', 'y=x'),
col=c('red', 'blue', 'black'), lty=c('solid', 'solid', 'dotted'),
horiz=T)
segKC$diffSegNorm <- (segKC[,3] + coef(fitrob3)[[1]]) * coef(fitrob3)[[2]] - segKC[,4]
# Visualize the segmean correlation:
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=2)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=2)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=3, setcex=2, setylim=c(-1,1))
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=6, chromosomes=6)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=6, chromosomes=6)
plotRawCghDotPlot(KCdataSet=segKC, mirrorLocs=altMirrorLocs, doFilter=T, samples=3, setcex=6, setylim=c(-1,1), chromosomes=6)
allKCsegProbe <- setProbeToSeg(allKC, allKCseg)
for (t in tumNums) {
tempSampInfo <- subset(sampleInfo, tumNum == t)
donorSample <- tempSampInfo$CGHID[grep('D', tempSampInfo$DRSet)]
recepientSamples <- tempSampInfo$CGHID[grepl('R', tempSampInfo$DRSet) &
tempSampInfo$Site == 'Primary']
names(recepientSamples) <- tempSampInfo$DRSet[grepl('R',
tempSampInfo$DRSet) & tempSampInfo$Site == 'Primary']
resultList <- vector(mode='list', length=3)
names(resultList) <- c('donor', 'recepients', 'delta')
resultList$donor <- allKCsegProbe[,c('chrom', 'maploc', donorSample)]
resultList$recepients <-
allKCsegProbe[,c('chrom', 'maploc', recepientSamples)]
resultKC <- allKCsegProbe[, c('chrom', 'maploc')]
for (r in 1:length(recepientSamples)) {
tempKC <- allKC[,c('chrom', 'maploc', recepientSamples[r],
donorSample)]
tempSeg <- subset(allKCseg,
samplelist=c(recepientSamples[r], donorSample))
resultKC <- cbind(resultKC,
deltaLinearSeg(comb=c(recepientSamples[r], donorSample),
tempKC, tempSeg, thres=.2))
colnames(resultKC)[ncol(resultKC)] <- names(recepientSamples)[r]
}
resultList$delta <- resultKC
diffList[[paste('T', t, sep='')]] <- resultList
}
a <- diffList[['T2']]
par(mfrow=c(3,1))
plotRawCghDotPlot(KCdataSet=a$donor, mirrorLocs=altMirrorLocs, doFilter=T, samples=1, setcex=10)
plotRawCghDotPlot(KCdataSet=a$recepients, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=10)
plotRawCghDotPlot(KCdataSet=a$delta, mirrorLocs=altMirrorLocs, doFilter=T, samples=2, setcex=10)
source("http://www.bioconductor.org/biocLite.R")
biocLite("CGHnormaliter", lib='~/lib/R')
|
# csvFileUI --------------------------------------------------------------------
#' @importFrom shiny NS tagList selectInput
#' @keywords internal
csvFileUI <- function(id, path_database)
{
ns <- shiny::NS(id)
shiny::tagList(
shiny::selectInput(
inputId = ns("file"),
label = "Load saved paths from",
choices = c(
get_file_info_files(path_database),
get_available_database_entries()
)
)
)
}
# get_file_info_files ----------------------------------------------------------
#' @importFrom kwb.utils removeExtension multiSubstitute
#' @importFrom stats setNames
#' @keywords internal
get_file_info_files <- function(path_database)
{
files <- c(
dir_or_stop(extdata_file(), "^example_file_info.*\\.csv$"),
dir_or_stop(path_database, "\\.csv$")
)
# Give user friendly labels to the files to appear in the dropdown list
file_labels <- kwb.utils::removeExtension(basename(files))
replacements <- list(
"^path-info(-ps-1)?_" = "",
"(\\d{2})_\\d{4}" = "\\2"
)
stats::setNames(files, kwb.utils::multiSubstitute(file_labels, replacements))
}
# csvFile ----------------------------------------------------------------------
#' @importFrom shiny reactive
#' @importFrom kwb.utils selectColumns
#' @importFrom pathlist pathlist hide_server
#' @keywords internal
csvFile <- function(input, output, session, read_function)
{
db_split_pattern <- "\\s*\\|\\s*"
# Path to CSV file
csv_file <- shiny::reactive({
input$file
})
# Path to RDS file in the same folder
rds_file <- shiny::reactive({
if (grepl("^db", csv_file())) {
file.path(
get_global("path_database"),
paste0(gsub(db_split_pattern, "_", csv_file()), ".rds")
)
} else {
gsub("\\.csv$", ".rds", csv_file())
}
})
# Does the RDS file already exist?
rds_file_exists <- shiny::reactive({
file.exists(rds_file())
})
raw_content <- shiny::reactive({
if (rds_file_exists()) {
return(NULL)
}
x <- run_with_modal(
text = paste("Reading", basename(csv_file())),
expr = {
if (grepl("^db", csv_file())) {
date_key <- strsplit(csv_file(), db_split_pattern)[[1]][-1]
get_path_data_from_database(date_key[1], date_key[2])
} else {
read_file_paths(csv_file())
}
}
)
kwb.utils::selectColumns(
x = normalise_column_names(x),
columns = c("path", "type", "size", "modified")
)
})
rds_content <- shiny::reactive({
if (! rds_file_exists()) {
return(NULL)
}
run_with_modal(
text = paste("Loading", basename(rds_file())),
expr = readRDS(rds_file())
)
})
path_list <- shiny::reactive({
if (! is.null(rds_content())) {
return(rds_content()$path_list)
}
run_with_modal(
text = "Providing table data",
expr = pathlist::hide_server(pathlist::pathlist(
paths = raw_content()$path,
data = kwb.utils::selectColumns(
raw_content(), c("type", "size", "modified")
)
))
)
})
content <- shiny::reactive({
if (! is.null(rds_content())) {
return(rds_content()$content)
}
x <- prepare_full_path_table(x = raw_content(), pl = path_list())
content <- structure(x, root = path_list()@root)
rds_content <- list(content = content, path_list = path_list())
run_with_modal(
text = paste("Caching data in", basename(rds_file())),
expr = saveRDS(rds_content, file = rds_file())
)
content
})
list(file = csv_file, content = content, path_list = path_list)
}
# prepare_full_path_table ------------------------------------------------------
#' @importFrom kwb.utils fileExtension moveColumnsToFront removeColumns
#' @importFrom kwb.utils selectColumns
#' @importFrom pathlist depth filename folder toplevel
#' @keywords internal
prepare_full_path_table <- function(x, pl)
{
# Convert column "modified" to POSIXct
timestamps <- kwb.utils::selectColumns(x, "modified")
x$modified <- as.Date(as.POSIXct(timestamps, "%Y-%m-%dT%H:%M:%S", tz = "UTC"))
# Provide/format columns "size", "toplevel", "folder", "filename"
#x$size <- round(x$size, 6)
x$toplevel <- factor(pathlist::toplevel(pl))
x$folder <- pathlist::folder(pl)
x$filename <- pathlist::filename(pl)
# Provide column "extension"
x$extension <- ""
is_file <- x$type == "file"
x$extension[is_file] <- kwb.utils::fileExtension(x$filename[is_file])
x$extension <- factor(x$extension)
# Provide column "depth"
x$depth <- pathlist::depth(pl)
# Remove column "path" and move main columns to the left
x <- kwb.utils::removeColumns(x, "path")
main_columns <- c("toplevel", "folder", "filename", "extension")
kwb.utils::moveColumnsToFront(x, main_columns)
}
|
/R/module_csv.R
|
permissive
|
KWB-R/fakin.path.app
|
R
| false | false | 4,905 |
r
|
# csvFileUI --------------------------------------------------------------------
#' @importFrom shiny NS tagList selectInput
#' @keywords internal
csvFileUI <- function(id, path_database)
{
ns <- shiny::NS(id)
shiny::tagList(
shiny::selectInput(
inputId = ns("file"),
label = "Load saved paths from",
choices = c(
get_file_info_files(path_database),
get_available_database_entries()
)
)
)
}
# get_file_info_files ----------------------------------------------------------
#' @importFrom kwb.utils removeExtension multiSubstitute
#' @importFrom stats setNames
#' @keywords internal
get_file_info_files <- function(path_database)
{
files <- c(
dir_or_stop(extdata_file(), "^example_file_info.*\\.csv$"),
dir_or_stop(path_database, "\\.csv$")
)
# Give user friendly labels to the files to appear in the dropdown list
file_labels <- kwb.utils::removeExtension(basename(files))
replacements <- list(
"^path-info(-ps-1)?_" = "",
"(\\d{2})_\\d{4}" = "\\2"
)
stats::setNames(files, kwb.utils::multiSubstitute(file_labels, replacements))
}
# csvFile ----------------------------------------------------------------------
#' @importFrom shiny reactive
#' @importFrom kwb.utils selectColumns
#' @importFrom pathlist pathlist hide_server
#' @keywords internal
csvFile <- function(input, output, session, read_function)
{
db_split_pattern <- "\\s*\\|\\s*"
# Path to CSV file
csv_file <- shiny::reactive({
input$file
})
# Path to RDS file in the same folder
rds_file <- shiny::reactive({
if (grepl("^db", csv_file())) {
file.path(
get_global("path_database"),
paste0(gsub(db_split_pattern, "_", csv_file()), ".rds")
)
} else {
gsub("\\.csv$", ".rds", csv_file())
}
})
# Does the RDS file already exist?
rds_file_exists <- shiny::reactive({
file.exists(rds_file())
})
raw_content <- shiny::reactive({
if (rds_file_exists()) {
return(NULL)
}
x <- run_with_modal(
text = paste("Reading", basename(csv_file())),
expr = {
if (grepl("^db", csv_file())) {
date_key <- strsplit(csv_file(), db_split_pattern)[[1]][-1]
get_path_data_from_database(date_key[1], date_key[2])
} else {
read_file_paths(csv_file())
}
}
)
kwb.utils::selectColumns(
x = normalise_column_names(x),
columns = c("path", "type", "size", "modified")
)
})
rds_content <- shiny::reactive({
if (! rds_file_exists()) {
return(NULL)
}
run_with_modal(
text = paste("Loading", basename(rds_file())),
expr = readRDS(rds_file())
)
})
path_list <- shiny::reactive({
if (! is.null(rds_content())) {
return(rds_content()$path_list)
}
run_with_modal(
text = "Providing table data",
expr = pathlist::hide_server(pathlist::pathlist(
paths = raw_content()$path,
data = kwb.utils::selectColumns(
raw_content(), c("type", "size", "modified")
)
))
)
})
content <- shiny::reactive({
if (! is.null(rds_content())) {
return(rds_content()$content)
}
x <- prepare_full_path_table(x = raw_content(), pl = path_list())
content <- structure(x, root = path_list()@root)
rds_content <- list(content = content, path_list = path_list())
run_with_modal(
text = paste("Caching data in", basename(rds_file())),
expr = saveRDS(rds_content, file = rds_file())
)
content
})
list(file = csv_file, content = content, path_list = path_list)
}
# prepare_full_path_table ------------------------------------------------------
#' @importFrom kwb.utils fileExtension moveColumnsToFront removeColumns
#' @importFrom kwb.utils selectColumns
#' @importFrom pathlist depth filename folder toplevel
#' @keywords internal
prepare_full_path_table <- function(x, pl)
{
# Convert column "modified" to POSIXct
timestamps <- kwb.utils::selectColumns(x, "modified")
x$modified <- as.Date(as.POSIXct(timestamps, "%Y-%m-%dT%H:%M:%S", tz = "UTC"))
# Provide/format columns "size", "toplevel", "folder", "filename"
#x$size <- round(x$size, 6)
x$toplevel <- factor(pathlist::toplevel(pl))
x$folder <- pathlist::folder(pl)
x$filename <- pathlist::filename(pl)
# Provide column "extension"
x$extension <- ""
is_file <- x$type == "file"
x$extension[is_file] <- kwb.utils::fileExtension(x$filename[is_file])
x$extension <- factor(x$extension)
# Provide column "depth"
x$depth <- pathlist::depth(pl)
# Remove column "path" and move main columns to the left
x <- kwb.utils::removeColumns(x, "path")
main_columns <- c("toplevel", "folder", "filename", "extension")
kwb.utils::moveColumnsToFront(x, main_columns)
}
|
require(bio.lobster)
require(lubridate)
require(bio.utilities)
lobster.db( DS="observer41")
lobster.db( DS="logs41")
observer41$Mon = month(observer41$BOARD)
observer41$Yr = year(observer41$BOARD)
logs41$Mon = month(logs41$FV_FISHED)
logs41$Yr = year(logs41$FV_FISHED)
l41 = subset(logs41,Yr>2010 & Yr< 2018)
o41 = subset(observer41,Yr>2010 & Yr<2018)
l41 = makePBS(l41,polygon=F)
o41 = makePBS(o41,polygon=F)
o41$X = o41$X*-1
LobsterMap(41)
addPoints(na.omit(o41[,c('X','Y','EID')]))
outs = list()
outs = list()
yrs = unique(l41$Yr)
for(i in yrs){
g = subset(l41,Yr==i)
g = g[order(g$FV_FISHED_DATETIME),]
g$LKg = cumsum(g$ADJCATCH/2.2)
g$LKgGBAS = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='GBASIN',1,0))
g$LKgGBAN = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='GBANK',1,0))
g$LKgSEB = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='SEBROWNS',1,0))
g$LKgSWB = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='SWBROWNS',1,0))
g$LKgGBAS = g$LKg
}
GBASIN UNKNOWN SWBROWNS SEBROWNS GBANK
|
/inst/IP/offshoreLandings2Obs.r
|
no_license
|
LobsterScience/bio.lobster
|
R
| false | false | 1,027 |
r
|
require(bio.lobster)
require(lubridate)
require(bio.utilities)
lobster.db( DS="observer41")
lobster.db( DS="logs41")
observer41$Mon = month(observer41$BOARD)
observer41$Yr = year(observer41$BOARD)
logs41$Mon = month(logs41$FV_FISHED)
logs41$Yr = year(logs41$FV_FISHED)
l41 = subset(logs41,Yr>2010 & Yr< 2018)
o41 = subset(observer41,Yr>2010 & Yr<2018)
l41 = makePBS(l41,polygon=F)
o41 = makePBS(o41,polygon=F)
o41$X = o41$X*-1
LobsterMap(41)
addPoints(na.omit(o41[,c('X','Y','EID')]))
outs = list()
outs = list()
yrs = unique(l41$Yr)
for(i in yrs){
g = subset(l41,Yr==i)
g = g[order(g$FV_FISHED_DATETIME),]
g$LKg = cumsum(g$ADJCATCH/2.2)
g$LKgGBAS = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='GBASIN',1,0))
g$LKgGBAN = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='GBANK',1,0))
g$LKgSEB = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='SEBROWNS',1,0))
g$LKgSWB = cumsum(g$ADJCATCH/2.2*ifelse(g$OFFAREA=='SWBROWNS',1,0))
g$LKgGBAS = g$LKg
}
GBASIN UNKNOWN SWBROWNS SEBROWNS GBANK
|
###
# generate the native area mask based on intersection between points, ecoregions, and countries
# dan.carver@carverd.com
# 20200414
###
nat_area_shp <- function(species) {
# clause for seeing if the product already exist
if (file.exists(paste0(sp_dir, "/modeling/nativeArea/narea.shp"))){
nativeArea <<-readOGR(paste0(sp_dir, "/modeling/nativeArea/narea.shp"),verbose = FALSE)}
else{
# define CRS to be equal between points and ecoRegions
crs(cleanPoints) <- crs(ecoReg)
# test to see which ecoregions have points within them
ecoVal <- data.frame(over(x = cleanPoints, y = ecoReg))%>%
dplyr::select(ECO_ID_U )%>%
distinct()%>%
drop_na()
#Probably don't need this cause, as all occurrence should be land points,
# but it's an easy check
if(length(ecoVal$ECO_ID_U) == 0 ){
print(paste0("No ecoregions intersected with the occurence data. Species can not be modeled."))
}else{
# subset ecoRegions that have points within them
ecoAreas <- subset(ecoReg, ECO_ID_U %in% ecoVal$ECO_ID_U)
# clip ecoregions to countries with points present
clipArea <-rgeos::gIntersection(ecoAreas, naSHP)
nativeArea <<- SpatialPolygonsDataFrame(clipArea, data.frame(ID=1:length(clipArea)))
# write out spatail feature
# I was having issues with writeOGR and providing the full file path, This
# should be cleaned up as setwd could cause issues down the line
setwd(paste0(sp_dir, "/modeling/nativeArea"))
writeOGR(obj=nativeArea, dsn="narea.shp", layer="narea", driver="ESRI Shapefile") # this is in geographical projection
}
}
}
|
/dataPrep/nat_area_shp.r
|
no_license
|
dcarver1/CWR-of-the-USA-Gap-Analysis
|
R
| false | false | 1,666 |
r
|
###
# generate the native area mask based on intersection between points, ecoregions, and countries
# dan.carver@carverd.com
# 20200414
###
nat_area_shp <- function(species) {
# clause for seeing if the product already exist
if (file.exists(paste0(sp_dir, "/modeling/nativeArea/narea.shp"))){
nativeArea <<-readOGR(paste0(sp_dir, "/modeling/nativeArea/narea.shp"),verbose = FALSE)}
else{
# define CRS to be equal between points and ecoRegions
crs(cleanPoints) <- crs(ecoReg)
# test to see which ecoregions have points within them
ecoVal <- data.frame(over(x = cleanPoints, y = ecoReg))%>%
dplyr::select(ECO_ID_U )%>%
distinct()%>%
drop_na()
#Probably don't need this cause, as all occurrence should be land points,
# but it's an easy check
if(length(ecoVal$ECO_ID_U) == 0 ){
print(paste0("No ecoregions intersected with the occurence data. Species can not be modeled."))
}else{
# subset ecoRegions that have points within them
ecoAreas <- subset(ecoReg, ECO_ID_U %in% ecoVal$ECO_ID_U)
# clip ecoregions to countries with points present
clipArea <-rgeos::gIntersection(ecoAreas, naSHP)
nativeArea <<- SpatialPolygonsDataFrame(clipArea, data.frame(ID=1:length(clipArea)))
# write out spatail feature
# I was having issues with writeOGR and providing the full file path, This
# should be cleaned up as setwd could cause issues down the line
setwd(paste0(sp_dir, "/modeling/nativeArea"))
writeOGR(obj=nativeArea, dsn="narea.shp", layer="narea", driver="ESRI Shapefile") # this is in geographical projection
}
}
}
|
## Put comments here that give an overall description of what your
## functions do
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #set the m value to NULL
set <- function(y) { #set the value of the matrix
x <<- y
m <<- NULL
}
get <- function() x #get the matrix
setinv <- function(solve) m <<- solve #set the inverse
getinv <- function() m #get the inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv() #obtain the inverse
if(!is.null(m)) { #determine if inverse matrix was calculated
message("getting cached data")
return(m)
}
data <- x$get() #if inverse matrix was not calculated obtain the inverse value
m <- solve(data, ...)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
clucken/ProgrammingAssignment2
|
R
| false | false | 835 |
r
|
## Put comments here that give an overall description of what your
## functions do
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #set the m value to NULL
set <- function(y) { #set the value of the matrix
x <<- y
m <<- NULL
}
get <- function() x #get the matrix
setinv <- function(solve) m <<- solve #set the inverse
getinv <- function() m #get the inverse
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv() #obtain the inverse
if(!is.null(m)) { #determine if inverse matrix was calculated
message("getting cached data")
return(m)
}
data <- x$get() #if inverse matrix was not calculated obtain the inverse value
m <- solve(data, ...)
x$setinv(m)
m
}
|
### R CODE FOR REPRODUCING CONTENT OF FIGURES AND TABLES IN CHAPTER 6 ...
wind_speed <- scan("http://faculty.washington.edu/dbp/sauts/Data/wind_speed_128.txt")
ar2_1 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_1.txt")
ar2_2 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_2.txt")
ar2_3 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_3.txt")
ar2_4 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_4.txt")
ar4_1 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_1.txt")
ar4_2 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_2.txt")
ar4_3 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_3.txt")
ar4_4 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_4.txt")
earth_20 <- scan("http://faculty.washington.edu/dbp/sauts/Data/earth_20.txt")
ocean_wave <- scan("http://faculty.washington.edu/dbp/sauts/Data/ocean_wave.txt")
chaotic_beam <- scan("http://faculty.washington.edu/dbp/sauts/Data/chaotic_beam.txt")
ocean_noise <- scan("http://faculty.washington.edu/dbp/sauts/Data/ocean_noise_128.txt")
### functions used to compute content of figures in Chapter 6 ...
source("http://faculty.washington.edu/dbp/sauts/R-code/acvs.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ar_coeffs_to_acvs.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ar_coeffs_to_sdf.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/B_H.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/B_U.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/circular_shift.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/cosine_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/create_tapered_series.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/dft.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/dB.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/do_crisscross_dse.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/direct_sdf_est.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_DCTII.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_lag_window_sdf_estimator.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_shp.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_shp_squared.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/fejer_kernel.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/hanning_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/is_even.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/next_power_of_2.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/pgram.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/rectangular_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/sim_ar_process.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/slepian_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/spec_window.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/step_down_LD_recursions.R")
###
ar2_innov_var <- 1
ar2_coeffs <- c(0.75,-0.5)
ar4_innov_var <- 0.002
ar4_coeffs <- c(2.7607, -3.8106, 2.6535, -0.9238)
### BEGINNING OF CODE TO REPRODUCE CONTENT OF FIGURES/TABLES
### Figure 168 ###
fig_168_top_row <- function(the_acvs,tag)
{
N <- length(the_acvs)
taus <- 0:(N-1)
plot(taus,the_acvs,
xlim=c(0,N),xlab=expression(tau),
ylim=c(-2,2),ylab="ACVS",
typ="b",lwd=0.25,cex=0.5,axes=FALSE,
main="Figure 168")
axis(1,at=seq(0,60,20))
axis(1,at=seq(0,60,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-2,2,2),las=2)
axis(2,at=seq(-2,2,1),label=FALSE,tcl=-0.25)
text(60,1.8,tag,pos=2)
box(bty="l")
}
fig_168_bot_rows <- function(biased,unbiased,y_lab)
{
max_lag <- length(biased)-1
taus <- 0:max_lag
plot(taus,dB(unbiased),
xlim=c(0,max_lag+1),xlab=expression(tau),
ylim=c(-80,20),ylab=y_lab,
typ="l",lwd=0.5,col="gray40",axes=FALSE,
main="Figure 168")
lines(taus,dB(biased))
axis(1,at=seq(0,60,20))
axis(1,at=seq(0,60,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-80,40,20),las=2)
axis(2,at=seq(-80,40,10),label=FALSE,tcl=-0.25)
box(bty="l")
}
ar2_acvs <- ar_coeffs_to_acvs(ar2_coeffs,63,ar2_innov_var,FALSE)
ar4_acvs <- ar_coeffs_to_acvs(ar4_coeffs,63,ar4_innov_var,FALSE)
b_to_u <- 64/(64:1)
### NOTE: evaluation of the following R code is time consuming
### (particularly the two lines involving ev_shp_squared,
### each of which took 45 minutes to execute on a 2017-vintage
### MacBook Pro):
###
### ev_shp_ar2 <- sapply(0:63,ev_shp,64,ar2_acvs)
### ev_shp_squared_ar2 <- sapply(0:63,ev_shp_squared,64,ar2_acvs)
### ev_shp_ar4 <- sapply(0:63,ev_shp,64,ar4_acvs)
### ev_shp_squared_ar4 <- sapply(0:63,ev_shp_squared,64,ar4_acvs)
###
### Evaluation of the following four load forms alleviates
### having to recreate ev_shp_ar2 etc.
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_ar2.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_squared_ar2.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_ar4.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_squared_ar4.Rdata"))
### Figure 168, plots in left-hand column from top to bottom
fig_168_top_row(ar2_acvs,"AR(2)")
fig_168_bot_rows((ev_shp_ar2-ar2_acvs)^2,(b_to_u*ev_shp_ar2-ar2_acvs)^2,"squared bias (dB)")
fig_168_bot_rows(ev_shp_squared_ar2-ev_shp_ar2^2,b_to_u^2*(ev_shp_squared_ar2-ev_shp_ar2^2),"variance (dB)")
fig_168_bot_rows(ev_shp_squared_ar2-ev_shp_ar2^2+(ev_shp_ar2-ar2_acvs)^2,b_to_u^2*(ev_shp_squared_ar2-ev_shp_ar2^2)+(b_to_u*ev_shp_ar2-ar2_acvs)^2,"MSE (dB)")
### Figure 168, plots in right-hand column from top to bottom
fig_168_top_row(ar4_acvs,"AR(2)")
fig_168_bot_rows((ev_shp_ar4-ar4_acvs)^2,(b_to_u*ev_shp_ar4-ar4_acvs)^2,"squared bias (dB)")
fig_168_bot_rows(ev_shp_squared_ar4-ev_shp_ar4^2,b_to_u^2*(ev_shp_squared_ar4-ev_shp_ar4^2),"variance (dB)")
fig_168_bot_rows(ev_shp_squared_ar4-ev_shp_ar4^2+(ev_shp_ar4-ar4_acvs)^2,b_to_u^2*(ev_shp_squared_ar4-ev_shp_ar4^2)+(b_to_u*ev_shp_ar4-ar4_acvs)^2,"MSE (dB)")
### Figure 169 ###
fig_169 <- function(ts)
{
temp <- acvs(ts)
taus <- temp$lags
acvs_biased <- temp$acvs
acvs_unbiased <- acvs(ts,unbiased=TRUE)$acvs
plot(taus,acvs_biased,
xlim=c(0,length(ts)),xlab=expression(paste(tau," (in 0.025 sec)")),
ylim=c(-4,4),ylab="ACVS",
typ="l",axes=FALSE,
main="Figure 169")
lines(taus,acvs_unbiased,lwd=0.5,col="gray40")
abline(h=0,lty="dashed")
axis(1,at=seq(0,128,32))
axis(2,at=seq(-4,4,2),las=2)
axis(2,at=seq(-4,4,1),label=FALSE,tcl=-0.25)
axis(4,at=seq(-4,4,2),label=FALSE)
axis(4,at=seq(-4,4,1),label=FALSE,tcl=-0.25)
box(bty="u")
}
### Figure 169
fig_169(wind_speed)
### Figure 172 ###
fig_172 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs,the_pgram$sdfe,
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(0,y_ats[length(y_ats)]),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 172",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,the_ar_spec$sdf)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=y_ats,las=2)
text(x=0.5,y=0.95*y_ats[length(y_ats)],tag,pos=2)
box(bty="l")
}
### Figure 172, top row of plots
fig_172(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_172(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 172, 2nd row of plots
fig_172(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_172(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 172, 3rd row of plots
fig_172(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_172(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 172, bottom row of plots
fig_172(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_172(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 173 ###
fig_173 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 173",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
if(length(coeffs) == 4)
{
N <- length(ts)
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE))
lines(temp$freqs, dB(temp$sdf_ev), lwd=0.5)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 173, top row of plots
fig_173(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_173(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 173, 2nd row of plots
fig_173(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_173(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 173, 3rd row of plots
fig_173(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_173(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 173, bottom row of plots
fig_173(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_173(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 176 ###
fig_176 <- function(N,right_p=FALSE,tag=NULL)
{
the_kernel <- fejer_kernel(N)
plot(the_kernel$freqs,if(!right_p) dB(the_kernel$kernel) else the_kernel$kernel,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(!right_p) c(-40,20) else c(0,N),yaxs="i",ylab="spectral window",
typ="l",lwd=0.25,axes=FALSE,
main="Figure 176")
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(!right_p) seq(-40,20,20) else seq(0,N,N/2),las=2)
if(!right_p)
{
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
}
box(bty="l")
}
### Figure 176, left-hand column of plots
fig_176(4,tag=expression(italic(N==4)))
fig_176(16,tag=expression(italic(N==16)))
fig_176(64,tag=expression(italic(N==64)))
### Figure 176, right-hand column of plots
fig_176(4,tag=expression(italic(N==4)),right_p=TRUE)
fig_176(16,tag=expression(italic(N==16)),right_p=TRUE)
fig_176(64,tag=expression(italic(N==64)),right_p=TRUE)
### Figure 177 ###
fig_177 <- function(N,coeffs,innov_var,tag_1,tag_2,tag_3=NULL)
{
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE),N_pad=1024)
plot(temp$freqs,dB(temp$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-10,10),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 177",tag_1,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-10,10,10),las=2)
axis(2,at=seq(-10,10,2),label=FALSE,tcl=-0.25)
text(x=0.5,y=9,tag_1,pos=2)
text(x=0.25,y=-8,tag_2,pos=1)
text(x=0.25,y=9,tag_3,pos=1)
box(bty="l")
}
### Figure 177, left-hand and right-hand plots
fig_177(16,ar2_coeffs,ar2_innov_var,"(a)",expression(italic(N==16)),"AR(2)")
fig_177(64,ar2_coeffs,ar2_innov_var,"(b)",expression(italic(N==64)))
### Figure 178 ###
fig_178 <- function(N,coeffs,innov_var,tag_1,tag_2,tag_3=NULL,vlines=NULL)
{
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE),N_pad=2048)
plot(temp$freqs,dB(temp$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 178",tag_1,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
abline(v=vlines, lty="dotted")
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag_1,pos=2)
text(x=0.25,y=-50,tag_2,pos=1)
text(x=0.25,y=20,tag_3,pos=1)
box(bty="l")
}
### Figure 178, top row of plots
fig_178(16,ar4_coeffs,ar4_innov_var,"(a)",expression(italic(N==16)),"AR(4)")
fig_178(64,ar4_coeffs,ar4_innov_var,"(b)",expression(italic(N==64)),vlines=c(1/8,0.4))
### Figure 178, bottom row of plots
fig_178(256,ar4_coeffs,ar4_innov_var,"(c)",expression(italic(N==256)))
fig_178(1024,ar4_coeffs,ar4_innov_var,"(d)",expression(italic(N==1024)))
### Figure 180 ###
fig_180 <- function(the_kernel,mult_p=FALSE,v_line=1/8,trans=function(x) x,big_y_ats=seq(-40,20,20),little_y_ats=seq(-50,30,10),tag="(a)",word="and",the_sdf=two_sided_ar4_sdf)
{
N_freqs <- length(the_kernel)
freqs <- seq(-0.5+1/N_freqs,0.5,length=N_freqs)
ys <- trans(if(mult_p) the_kernel*the_sdf else the_kernel)
plot(freqs,ys,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(little_y_ats[1],little_y_ats[length(little_y_ats)]),yaxs="i",ylab=paste("kernel",word,"AR(4) SDF"),
typ="l",lwd=0.25,axes=FALSE,
main=paste("Figure 180",tag,sep=""))
if(!mult_p) lines(freqs,trans(the_sdf))
abline(v=v_line,lty="dotted")
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=big_y_ats,las=2)
axis(2,at=little_y_ats,label=FALSE,tcl=-0.25)
text(x=-0.35,y=0.88*diff(range(little_y_ats))+little_y_ats[1],tag,pos=2)
box(bty="l")
}
temp <- ar_coeffs_to_sdf(ar4_coeffs,ar4_innov_var,N_pad=2048)$sdf
two_sided_ar4_sdf <- c(rev(temp[c(-1,-length(temp))]),temp)
temp <- fejer_kernel(64)$kernel
fejer_shift_1 <- circular_shift(temp,256)
fejer_shift_2 <- circular_shift(temp,820)
### Figure 180, top row of plots
fig_180(fejer_shift_1,trans=dB)
fig_180(fejer_shift_1,big=c(0,40,80),little=c(0,40,80),tag="(b)")
### Figure 180, 2nd row of plots
fig_180(fejer_shift_1,trans=dB,mult_p=TRUE,tag="(c)",word="times")
fig_180(fejer_shift_1,big=c(0,250,500),little=c(0,250,500),mult_p=TRUE,tag="(d)",word="times")
### Figure 180, 3rd row of plots
fig_180(fejer_shift_2,trans=dB,v_line=0.4,tag="(e)")
fig_180(fejer_shift_2,big=c(0,40,80),little=c(0,40,80),v_line=0.4,tag="(f)")
### Figure 180, bottom row of plots
fig_180(fejer_shift_2,trans=dB,mult_p=TRUE,v_line=0.4,tag="(g)",word="times")
fig_180(fejer_shift_2,big=c(0,1,2),little=c(0,1,2),mult_p=TRUE,v_line=0.4,tag="(h)",word="times")
### Figure 182 ###
###
### NOTE: fig_182 is virtually the same as fig_173, the only
### difference being the addition of pad=2 in the call
## to pgram (fig_173 uses the default pad=1)
fig_182 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE,pad=2)
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 182",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
if(length(coeffs) == 4)
{
N <- length(ts)
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE))
lines(temp$freqs, dB(temp$sdf_ev), lwd=0.5)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 182, top row of plots
fig_182(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_182(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 182, 2nd row of plots
fig_182(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_182(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 182, 3rd row of plots
fig_182(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_182(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 182, bottom row of plots
fig_182(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_182(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 183a ###
fig_183a <- function(ts,tag,coeffs=ar4_coeffs)
{
N <- length(ts)
p <- length(coeffs)
plot(0:3,ts[(N-3):N],
xlim=c(1,6),xlab=expression(italic(t)),
ylim=c(-5,5),ylab="AR(4) series",
typ="o",axes=FALSE,
main=paste("Figure 183a",tag,sep=""))
pred <- as.vector(coeffs%*%ts[N:(N-p+1)])
lines(3:4,c(ts[N],pred), type="b", pch=" ", lty="dotted")
points(4,pred, pch=3)
lines(4:7,ts[1:4], type="o")
axis(1,at=1:6,labels=c(1021,NA,1023,0,1,2))
axis(2,at=seq(-5,5,5),las=2)
axis(2,at=seq(-5,5,1),label=FALSE,tcl=-0.25)
text(x=5.5,y=4.5,tag,pos=2)
box(bty="l")
}
### Figure 183a, top row
fig_183a(ar4_1,"(e)")
fig_183a(ar4_2,"(f)")
### Figure 183a, bottom row
fig_183a(ar4_3,"(g)")
fig_183a(ar4_4,"(h)")
### Figure 183b ###
fig_183b <- function(x,y)
{
plot(x,y,
xlim=c(-0.15,5.5),xaxs="i",xlab="absolute prediction error",
ylim=c(-52,-25),yaxs="i",ylab="dB",
typ="p",cex=0.625,axes=FALSE,
main="Figure 183b")
lines(lowess(x,y))
abline(h=c(-47.20893,-30.3018),lty=c("dotted","dashed"))
axis(1,at=0:5)
axis(2,at=seq(-50,-30,10),las=2)
box(bty="l")
}
set.seed(1)
N_rep <- 100
x_results <- rep(0,100)
y_results <- rep(0,100)
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,proc=FALSE)
for(n in 1:N_rep)
{
ar_ts <- sim_ar_process(1024,LD=LD_ar4)
x_results[n] <- abs(as.numeric(ar_ts[1024:1021] %*% ar4_coeffs) - ar_ts[1])
y_results[n] <- dB(mean(pgram(c(ar_ts,rep(0,1024)),center=FALSE)$sdfe[821:1025]))
}
### Figure 183b
fig_183b(x_results,y_results)
### Figure 185 ###
fig_185 <- function(ys,big_y_ats=seq(-5,5,5),little_y_ats=NULL,y_lab="AR(4) series")
{
N <- length(ys)
plot(0:(N-1),ys,
xlim=c(0,N),xlab=expression(italic(t)),
ylim=c(big_y_ats[1],big_y_ats[length(big_y_ats)]),ylab=y_lab,
typ="l",lwd=0.25,axes=FALSE,
main="Figure 185")
axis(1,at=seq(0,1024,512))
axis(1,at=seq(0,1024,256),label=FALSE,tcl=-0.25)
axis(2,at=big_y_ats,las=2)
axis(2,at=little_y_ats,label=FALSE,tcl=-0.25)
box(bty="l")
}
the_taper <- hanning_taper(1024)
### Figure 185, top to bottom plots
fig_185(ar4_1,little=seq(-5,5,1))
fig_185(the_taper,big=seq(0,0.06,0.02),y_lab="Hanning taper")
fig_185(the_taper*ar4_1,big=seq(-0.2,0.2,0.1),y_lab="tapered series")
### Figure 187 ###
fig_187 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_dse <- direct_sdf_est(ts,hanning_taper(length(ts)),center=FALSE,pad=2)
plot(the_dse$freqs,dB(the_dse$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 187",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 187, top row of plots
fig_187(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_187(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 187, 2nd row of plots
fig_187(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_187(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 187, 3rd row of plots
fig_187(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_187(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 187, bottom row of plots
fig_187(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_187(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 190 ###
fig_190 <- function(the_taper,left_tag,right_tag)
{
N <- length(the_taper)
plot(0:(N-1),the_taper,
xlim=c(0,N),xlab=expression(italic(t)),
ylim=c(0,0.3),ylab="data taper",
typ="p",pch=20,cex=0.2,axes=FALSE,
main=paste("Figure 190",left_tag,sep=""))
axis(1,at=seq(0,64,32))
axis(1,at=seq(0,64,16),label=FALSE,tcl=-0.25)
axis(2,at=seq(0.0,0.3,0.1),las=2)
text(x=0,y=0.29,left_tag,pos=4)
text(x=64,y=0.29,right_tag,pos=2)
box(bty="l")
}
### Figure 190, left-hand column
fig_190(rectangular_taper(64),"(a)",expression(paste("rectangular (",italic(p==0),")",sep="")))
fig_190(cosine_taper(64,0.2),"(b)",expression(italic(p==0.2)))
fig_190(cosine_taper(64,0.5),"(c)",expression(italic(p==0.5)))
fig_190(hanning_taper(64),"(d)",expression(paste("Hanning (",italic(p==1),")",sep="")))
### Figure 190, right-hand column
fig_190(slepian_taper(64,1),"(e)",expression(italic(NW==1)))
fig_190(slepian_taper(64,2),"(f)",expression(italic(NW==2)))
fig_190(slepian_taper(64,4),"(g)",expression(italic(NW==4)))
fig_190(slepian_taper(64,8),"(h)",expression(italic(NW==8)))
### Figure 191 ###
fig_191 <- function(the_taper,left_tag,right_tag,v_line=NULL)
{
temp <- spec_window(the_taper,pad_factor=16,fix_nulls_p=TRUE,first_p=FALSE)
freqs <- temp$freqs
ys <- dB(temp$sw)
plot(freqs,ys,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-100,20),yaxs="i",ylab="spectral window (dB)",
typ="l",lwd=0.25,axes=FALSE,
main=paste("Figure 191",left_tag,sep=""))
abline(v=v_line*c(-1,1),lty="dotted")
## add 3 dB down width
i_max <- which.max(ys)
three_dB_down <- ys[i_max] - 3
i <- which(ys[i_max:length(ys)] <= three_dB_down)[1] + i_max - 1
lines(freqs[c(2*i_max-i,i)],c(three_dB_down,three_dB_down))
## add variance width
bw_v <- function(taper)
{
N <- length(taper)
Nm1 <- N - 1
autocor <- Re(fft(abs(fft(c(taper,rep(0,N)))^2)))/(2*N)
return(sqrt(1 + sum(((-1)^(1:Nm1))*autocor[2:N]/(1:Nm1)^2)*12/pi^2))
}
lines(bw_v(the_taper)*c(-0.5,0.5),c(three_dB_down-5,three_dB_down-5))
## add autocorrelation width
lines(B_H(the_taper)*c(-0.5,0.5),c(three_dB_down-10,three_dB_down-10))
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-100,20,20),las=2)
axis(2,at=seq(-100,20,10),label=FALSE,tcl=-0.25)
text(x=-0.5,y=10,left_tag,pos=4)
text(x=0.5,y=10,right_tag,pos=2)
box(bty="l")
}
### Figure 191, left-hand column
fig_191(rectangular_taper(64),"(a)","rectangular")
fig_191(cosine_taper(64,0.2),"(b)",expression(italic(p==0.2)))
fig_191(cosine_taper(64,0.5),"(c)",expression(italic(p==0.5)))
fig_191(hanning_taper(64),"(d)","Hanning")
### Figure 191, right-hand column
fig_191(slepian_taper(64,1),"(e)",expression(italic(NW==1)),v_line=1/64)
fig_191(slepian_taper(64,2),"(f)",expression(italic(NW==2)),v_line=1/32)
fig_191(slepian_taper(64,4),"(g)",expression(italic(NW==4)),v_line=1/16)
fig_191(slepian_taper(64,8),"(h)",expression(italic(NW==8)),v_line=1/8)
### Figure 193 ###
fig_193 <- function(the_taper,tag,coeffs=ar4_coeffs,innov_var=ar4_innov_var)
{
ev_dse <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,length(the_taper)-1,innov_var,FALSE),the_taper,N_pad=1024)
plot(ev_dse$freqs, dB(ev_dse$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 193")
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag,pos=2)
box(bty="l")
}
### Figure 193, left-hand column
fig_193(rectangular_taper(64),expression(paste("rectangular (",italic(p==0),")",sep="")))
fig_193(cosine_taper(64,0.2),expression(italic(p==0.2)))
fig_193(cosine_taper(64,0.5),expression(italic(p==0.5)))
fig_193(hanning_taper(64),expression(paste("Hanning (",italic(p==1),")",sep="")))
### Figure 193, right-hand column
fig_193(slepian_taper(64,1),expression(italic(NW==1)))
fig_193(slepian_taper(64,2),expression(italic(NW==2)))
fig_193(slepian_taper(64,4),expression(italic(NW==4)))
fig_193(slepian_taper(64,8),expression(italic(NW==8)))
### Figure 199 ###
fig_199 <- function(pw_filter,tag,right_p=FALSE,extra_p=FALSE,ts=ar4_2,coeffs=ar4_coeffs,innov_var=ar4_innov_var)
{
pw_ts <- convolve(ts,pw_filter,type="filter")
N_pad <- 2048
pgram_pw_ts <- pgram(pw_ts,center=FALSE,pad=N_pad/length(pw_ts))
freqs <- pgram_pw_ts$freqs
squared_gain <- abs(fft(c(pw_filter,rep(0,N_pad-length(pw_filter))))[1:((N_pad/2)+1)])^2
plot(freqs,dB(if(right_p) pgram_pw_ts$sdfe/squared_gain else pgram_pw_ts$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="spectra (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 199",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=N_pad)$sdf
lines(freqs,dB(if(right_p) the_ar_spec else the_ar_spec * squared_gain))
if(extra_p)
{
N <- length(ts)
L <- length(pw_filter)
ar_acvs <- ar_coeffs_to_acvs(coeffs,N+2*L,innov_var,FALSE)
pre_acvs <- rep(0,N-L+1)
for(tau in 0:(N-L))
for(k in 1:L)
for(l in 1:L)
{
pre_acvs[tau+1] <- pre_acvs[tau+1] + pw_filter[k]*pw_filter[l]*ar_acvs[abs(tau+k-l)+1]
}
temp <- ev_lag_window_sdf_estimator(pre_acvs,rep(1/sqrt(N-L+1),N-L+1),N_pad=N_pad)
pc <- abs(fft(c(pw_filter,rep(0,N_pad-L)))[1:((N_pad/2)+1)])^2
lines(0.25+temp$freqs[1:410],dB(temp$sdf_ev[1:410]/pc[1:410]),lwd=0.25)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag,pos=2)
box(bty="l")
}
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,FALSE)
### Figure 199, top row
fig_199(c(1,-ar4_coeffs),"(a)")
fig_199(c(1,-ar4_coeffs),"(b)",right_p=TRUE)
### Figure 199, 2nd row
fig_199(c(1,-0.99),"(c)")
fig_199(c(1,-0.99),"(d)",right_p=TRUE)
### Figure 199, 3rd row
fig_199(c(1,-LD_ar4$coeffs[[2]]),"(e)")
fig_199(c(1,-LD_ar4$coeffs[[2]]),"(f)",right_p=TRUE,extra_p=TRUE)
### Figure 199, bottom row
fig_199(c(1,-1.3,0.8),"(g)")
fig_199(c(1,-1.3,0.8),"(h)",right_p=TRUE,extra_p=TRUE)
### Figure 200 ###
fig_200 <- function(pwf_1,pwf_3,pwf_4)
{
N_pad <- 2048
squared_gain <- function(filter) abs(fft(c(filter,rep(0,N_pad-length(filter))))[1:((N_pad/2)+1)])^2
freqs <- seq(0.0,0.5,1/N_pad)
plot(freqs,dB(squared_gain(pwf_1)),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-50,30),yaxs="i",ylab="squared gain function (dB)",
typ="l",axes=FALSE,
main="Figure 200")
lines(freqs,dB(squared_gain(pwf_3)),lwd=0.25)
lines(freqs,dB(squared_gain(pwf_4)),lty="dotted")
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,30,10),label=FALSE,tcl=-0.25)
box(bty="l")
}
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,FALSE)
### Figure 200
fig_200(c(1,-ar4_coeffs),c(1,-LD_ar4$coeffs[[2]]),c(1,-1.3,0.8))
### Figure 206 ###
fig_206 <- function(the_taper,B_H_multiplier,tag,N_pad=8192)
{
N_pad_half <- N_pad/2
freqs <- (-(N_pad_half-1):N_pad_half)/N_pad
N <- length(the_taper)
temp <- abs(fft(c(the_taper,rep(0,N_pad-N))))
H_abs <- c(temp[(N_pad_half+2):N_pad],temp[1:(N_pad_half+1)])
B_H_taper <- B_H(the_taper)
i <- round(N_pad*(1-B_H_taper*B_H_multiplier))
H_abs_shifted <- c(H_abs[i:N_pad],H_abs[1:(i-1)])
for_xlim <- 1/8 + 1/64
plot(freqs,H_abs_shifted,
xlim=(1/8 + 1/64)*c(-1,1),xlab=expression(italic(v)),
ylim=c(0,30),ylab=" ",
typ="l",axes=FALSE,
main=paste("Figure 206",tag,sep=""))
lines(freqs,H_abs,lwd=0.5)
lines(freqs,H_abs*H_abs_shifted,col="gray",lwd=2)
abline(v=0,lty="dotted")
abline(v=B_H_taper,lty="dotted")
axis(1,at=seq(-1/8,1/8,1/8),labels=c("-1/8","0","1/8"))
axis(1,at=seq(-1/2,1/2,1/64),labels=FALSE,tcl=-0.25)
axis(2,at=seq(0,30,10),las=2)
text(1/8,28,tag,pos=2)
box(bty="l")
}
### Figure 206, first row, left to right
fig_206(slepian_taper(64,2),0.5,"(a)")
fig_206(slepian_taper(64,2),1,"(b)")
fig_206(slepian_taper(64,2),2,"(c)")
### Figure 206, second row, left to right
fig_206(slepian_taper(64,4),0.5,"(d)")
fig_206(slepian_taper(64,4),1,"(e)")
fig_206(slepian_taper(64,4),2,"(f)")
### Figure 207 ###
fig_207 <- function(ts,tag_1,tag_2)
{
N <- length(ts)
the_pgram <- pgram(ts,center=FALSE,pad=2^(11-round(log2(N))))
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-40,20),yaxs="i",ylab="periodogram (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 207",tag_1,sep=""))
abline(h=0)
x_cc <- 7/16
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(dB(2/qchisq(0.975,2)),dB(2/qchisq(0.025,2))),lwd=0.5)
lines(x_cc+c(-0.5,0.5)*the_pgram$cc$width,c(y_cc,y_cc),lwd=0.5)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,1/N),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,20,20),las=2)
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=18,tag_1,pos=2)
text(x=0.25,y=-35,tag_2,pos=1)
box(bty="l")
}
set.seed(42)
ts_128 <- rnorm(128)
### Figure 207, first row, left to right
fig_207(ts_128[1:16],"(a)",expression(N==16))
fig_207(ts_128[1:32],"(b)",expression(N==32))
### Figure 207, second row, left to right
fig_207(ts_128[1:64],"(c)",expression(N==64))
fig_207(ts_128,"(d)",expression(N==128))
### Figure 208 ###
fig_208 <- function(taper,tag_1,tag_2,ts=ar2_1[1:128],coeffs=ar2_coeffs,innov_var=ar2_innov_var)
{
N <- length(ts)
the_dse <- direct_sdf_est(ts,taper,center=FALSE,pad=2^(11-round(log2(N))))
plot(the_dse$freqs,dB(the_dse$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-40,20),yaxs="i",ylab="AR(2) spectra (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 208",tag_1,sep=""))
ar_sdf <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=2048)
lines(ar_sdf$freqs,dB(ar_sdf$sdf))
x_cc <- 0.4
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(dB(2/qchisq(0.975,2)),dB(2/qchisq(0.025,2))),lwd=0.5)
lines(x_cc+c(-0.5,0.5)*the_dse$cc$width,c(y_cc,y_cc),lwd=0.5)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,20,20),las=2)
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=18,tag_1,pos=2)
text(x=0.04,y=-36,tag_2,pos=4)
box(bty="l")
}
### Figure 208, first row, left to right
fig_208(default_taper(128),"(a)","periodogram")
fig_208(slepian_taper(128,2),"(b)",expression(italic(NW==2)))
### Figure 208, second row, left to right
fig_208(slepian_taper(128,4),"(c)",expression(italic(NW==4)))
fig_208(slepian_taper(128,8),"(d)",expression(italic(NW==8)))
### Figure 210a ###
fig_210a <- function(right_p=FALSE)
{
if(!right_p)
{
xs <- seq(0,10,0.01)
ys <- exp(-xs/2)/2
plot(xs,ys,
xlim=c(0,10),xaxs="i",xlab=expression(italic(u)),
ylim=c(0,0.5),yaxs="i",ylab="PDF",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 210a(a)")
xs_inner <- seq(5.9915,10,0.01)
ys_inner <- exp(-xs_inner/2)/2
polygon(c(5.9915,xs_inner,10),c(0,ys_inner,0),col="gray",border=NA)
abline(v=2,lty="dotted")
axis(1,at=seq(0,10,5))
axis(1,at=seq(0,10,1),label=FALSE,tcl=-0.25)
axis(2,at=seq(0,0.5,0.5),las=2)
axis(2,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
text(9.85,0.44,"(a)",pos=2)
}
else
{
xs <- seq(-20,20,0.04)
pdf_log_chi2 <- function(x)
{
temp <- 10^(x/10)
return(log(10) * temp * exp(-temp/2)/20)
}
ys <- pdf_log_chi2(xs)
plot(xs,ys,
xlim=c(-20,20),xaxs="i",xlab=expression(paste(italic(v)," (dB)")),
ylim=c(0,0.1),yaxs="i",ylab="PDF",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 210a(b)")
xs_inner <- seq(-20,-9.8891,0.04)
ys_inner <- pdf_log_chi2(xs_inner)
polygon(c(-20,xs_inner,-9.8891),c(0,ys_inner,0),col="gray",border=NA)
abline(v=dB(2/exp(-digamma(1))),lty="dotted")
axis(1,at=seq(-20,20,10))
axis(2,at=seq(0,0.1,0.1),las=2)
axis(2,at=seq(0,0.1,0.01),label=FALSE,tcl=-0.25)
text(19.4,0.088,"(b)",pos=2)
}
box(bty="l")
}
### Figure 210a, left-hand plot
fig_210a()
### Figure 210a, right-hand plot
fig_210a(right_p=TRUE)
### Figure 210b ###
fig_210b <- function(ts,right_p=FALSE)
{
trans <- if(right_p) dB else function(x) x
N <- length(ts)
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs[-c(1,65)],trans(the_pgram$sdfe[-c(1,65)]),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(right_p) c(-40,20) else c(0,10),yaxs="i",ylab=paste("periodogram",if(right_p) " (dB)" else NULL,sep=""),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 210b",if(right_p) "(b)" else "(a)",sep=""))
abline(h=trans(2))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) seq(-40,20,20) else seq(0,10,5),las=2)
axis(2,at=if(right_p) seq(-40,20,10) else seq(0,10,1),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 14 else 9,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
set.seed(4)
ts_128 <- rnorm(128)*sqrt(2)
### Figure 210b, left-hand plot
fig_210b(ts_128)
### Figure 210b, right-hand plot
fig_210b(ts_128,right_p=TRUE)
### Figure 212 ###
fig_212 <- function(N=64,N_pad=2048)
{
taper_1 <- slepian_taper(N,1)
taper_2 <- slepian_taper(N,2)
taper_4 <- slepian_taper(N,4)
taper_8 <- slepian_taper(N,8)
R_1 <- abs(fft(c(taper_1^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_2 <- abs(fft(c(taper_2^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_4 <- abs(fft(c(taper_4^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_8 <- abs(fft(c(taper_8^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
freqs <- (0:(N_pad/8))/N_pad
plot(freqs,R_1,
xlim=c(0,0.13),xaxs="i",xlab=expression(paste(eta," (frequency lag)")),
ylim=c(0,1),yaxs="i",ylab="correlation",
typ="l",axes=FALSE,
main="Figure 212")
lines(freqs,R_2,lty="longdash")
lines(freqs,R_4,lty="dashed")
lines(freqs,R_8,lty="dotted")
abline(v=c(1/64,1/32,1/16,1/8),
lty=c("solid","longdash","dashed","dotted"))
lines(c(sum(taper_1^4),0),c(0.6,0.6))
lines(c(sum(taper_2^4),0),c(0.5,0.5),lty="longdash")
lines(c(sum(taper_4^4),0),c(0.4,0.4),lty="dashed")
lines(c(sum(taper_8^4),0),c(0.3,0.3),lty="dotted")
axis(1,at=c(0,1/64,1/32,1/16,1/8),labels=c("0","1/64","1/32","1/16","1/8"))
axis(1, at=seq(0,1/8,1/64), labels=FALSE, tcl=-0.25)
axis(1, at=c(5/64), labels=c("5/64"), tcl=-0.25)
axis(2, at=seq(0,1,0.5), las=2)
axis(2, at=seq(0,1,0.1), labels=FALSE, tcl=-0.25, tcl=-0.25)
box(bty="l")
}
### Figure 212
fig_212()
### Table 214 ###
N <- 64
the_tapers <- list(cosine_taper(N,0),
cosine_taper(N,0.2),
cosine_taper(N,0.5),
cosine_taper(N,1),
slepian_taper(N,1),
slepian_taper(N,2),
slepian_taper(N,4),
slepian_taper(N,8))
delta_f <- 1/N
### Table 214, first row (1.50 1.56 1.72 2.06 1.59 2.07 2.86 4.01)
round(unlist(lapply(the_tapers,B_H))/delta_f,2)
### Table 214, second row (1.00 1.11 1.35 1.93 1.40 1.99 2.81 3.97)
round(unlist(lapply(the_tapers,function(x) sum(x^4)))/delta_f,2)
### Table 214, third row (1.50 1.41 1.27 1.06 1.13 1.04 1.02 1.01)
round(unlist(lapply(the_tapers,B_H))/unlist(lapply(the_tapers,function(x) sum(x^4))),2)
### Figure 216 ###
fig_216 <- function(ts,big_y_ats=c(0,4,8),delta_y=1,inc,right_p=FALSE)
{
y_upper_lim <- big_y_ats[length(big_y_ats)]
temp <- pgram(ts,center=FALSE)
N <- length(ts)
zap_me <- c(1,if(is_even(N)) N/2+1 else NULL)
freqs <- temp$freqs[-zap_me]
the_pgram <- temp$sdfe[-zap_me]
the_cumsum <- cumsum(the_pgram)
xs <- if(right_p) freqs[-length(freqs)] else freqs
ys <- if(right_p) the_cumsum[-length(the_cumsum)]/the_cumsum[length(the_cumsum)] else the_pgram
plot(xs,ys,
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(right_p) c(0,1) else c(0,y_upper_lim),yaxs="i",ylab=paste(if(right_p) "cumulative" else NULL,"periodogram"),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 216",if(right_p) "(b)" else "(a)",sep=""))
points(xs,ys)
if(right_p)
{
M <- length(the_cumsum)
D_0p05 <- 1.358/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
L_u <- function(f) D_0p05 - 1/(M-1) + N*f/(M-1)
L_l <- function(f) -D_0p05 + N*f/(M-1)
lines(c(0,0.5),c(L_u(0),L_u(0.5)))
lines(c(0,0.5),c(L_l(0),L_l(0.5)))
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) c(0,1) else big_y_ats,las=2)
if(!right_p) axis(2,at=seq(0,y_upper_lim,delta_y),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 0.1 else 0.9*y_upper_lim,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
### Figure 216, left-hand plot
fig_216(ar2_1[1:32])
### Figure 216, right-hand plot
fig_216(ar2_1[1:32],right_p=TRUE)
### Figure 217 ###
fig_217 <- function(ts,coeffs,innov_var,tag)
{
the_dct_pgram <- pgram(c(ts,rev(ts)),center=FALSE)
the_dct_pgram$sdfe[1] <- the_dct_pgram$sdfe[1]/2
plot(the_dct_pgram$freqs,dB(the_dct_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 217",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 217, top row of plots
fig_217(ar4_1,ar4_coeffs,ar4_innov_var,"(e)")
fig_217(ar4_2,ar4_coeffs,ar4_innov_var,"(f)")
### Figure 217, bottom row of plots
fig_217(ar4_3,ar4_coeffs,ar4_innov_var,"(g)")
fig_217(ar4_4,ar4_coeffs,ar4_innov_var,"(h)")
### Figure 218 ###
fig_218 <- function(N,coeffs,innov_var,tag_1,tag_2,N_pad_ar=1024)
{
ar_acvs <- ar_coeffs_to_acvs(coeffs,N-1,ar4_innov_var,FALSE)
ev_pgram <- ev_lag_window_sdf_estimator(ar_acvs) #,N_pad=N_pad)
ev_DCT <- ev_DCTII(ar_acvs)
plot(ev_pgram$freqs,dB(ev_pgram$sdf),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 218",tag_2,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=N_pad_ar)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
lines(ev_DCT$freqs,dB(ev_DCT$sdf),lwd=2.0)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.25,y=-50,tag_1,pos=1)
text(x=0.5,y=10,tag_2,pos=2)
box(bty="l")
}
### Figure 218, top row of plots
fig_218(16,ar4_coeffs,ar4_innov_var,expression(italic(N==16)),"(a)")
fig_218(64,ar4_coeffs,ar4_innov_var,expression(italic(N==64)),"(b)")
### Figure 218, bottom row of plots
fig_218(256,ar4_coeffs,ar4_innov_var,expression(italic(N==256)),"(c)")
fig_218(1024,ar4_coeffs,ar4_innov_var,expression(italic(N==1024)),"(d)")
### Figures 223 and 224b ###
fig_223 <- function(ys,y_ats,x_lab,main="Figure 223")
{
N <- length(ys)
plot(0:(N-1),Re(ys),
xlim=c(0,N),xlab=x_lab,
ylim=c(y_ats[1],tail(y_ats,1)),ylab=" ",
typ="n",axes=FALSE,
main=main)
if(!(sum(Im(ys)) == 0))
{
lines(0:(N-1), Im(ys), lwd=0.5, col="gray40")
points(0:(N-1), Im(ys), pch=16, cex=0.5, col="gray40")
}
lines(0:(N-1), Re(ys), col="black")
points(0:(N-1), Re(ys), pch=16, cex=0.5)
axis(1,at=c(0,N/2,N))
axis(2,at=y_ats,las=2)
box(bty="l")
}
### Figure 223, top row, left-hand plot
(N <- length(earth_20)) # 20
(M <- next_power_of_2(2*N-1)) # 64
M-N # 44
tXt <- c(earth_20-mean(earth_20),rep(0,M-N))
fig_223(tXt,seq(-6,6,6),expression(italic(t)))
### Figure 223, top row, right-hand plot
tXt_dft <- dft(tXt)
fig_223(tXt_dft,seq(-32.5,32.5,32.5),expression(italic(k)))
### Figure 223, 2nd row, left-hand plot
tht <- c(hanning_taper(N),rep(0,M-N))
fig_223(tht,c(0,1),expression(italic(t)))
### Figure 223, 2nd row, right-hand plot
tht_dft <- dft(tht)
fig_223(tht_dft,seq(-4,4,4),expression(italic(k)))
### Figure 223, 3rd row, left-hand plot
thttXt <- tht*tXt
fig_223(thttXt,seq(-2,2,2),expression(italic(t)))
### Figure 223, 3rd row, right-hand plot
thttXt_dft <- dft(thttXt)
fig_223(thttXt_dft,seq(-6,6,6),expression(italic(k)))
### Figure 223, 4th row, left-hand plot
tSdk <- abs(thttXt_dft)^2
tsdtau <- Re(inverse_dft(tSdk))
fig_223(tsdtau,seq(-8,8,8),expression(tau))
### Figure 223, 4th row, right-hand plot
fig_223(tSdk,seq(0,60,30),expression(italic(k)))
### Figure 224b, top row, left-hand plot
fig_223(tXt,seq(-6,6,6),expression(italic(t)),main="Figure 224b")
### Figure 224b, top row, right-hand plot
fig_223(tXt_dft,seq(-32.5,32.5,32.5),expression(italic(k)),main="Figure 224b")
### Figure 224b, 2nd row, left-hand plot
tSpk <- abs(tXt_dft)^2/N
tsptau <- Re(inverse_dft(tSpk))
fig_223(tsptau,seq(-8,8,8),expression(tau),main="Figure 224b")
### Figure 224b, 2nd row, right-hand plot
fig_223(tSpk,seq(0,60,30),expression(italic(k)),main="Figure 224b")
### Figure 225 ###
fig_225 <- function(ts,delta_t=1/4)
{
N <- length(ts)
plot((0:(N-1))*delta_t,ts,
xlim=c(0,N*delta_t),xlab="time (sec)",
ylim=c(-1200,1700),ylab="relative height",
typ="l",axes=FALSE,
main="Figure 225")
axis(1,at=seq(0,256,64))
axis(2,at=seq(-1000,1000,1000),las=2)
axis(2,at=seq(-1000,1500,500),label=FALSE,tcl=-0.25)
box(bty="l")
}
### Figure 225
fig_225(ocean_wave)
### Figures 226 and 227 ###
fig_226 <- function(ts,taper,tag_1,tag_2,pad=1,h_line=0,v_line_p=FALSE,delta_t=1/4,main="Figure 226")
{
dse <- direct_sdf_est(ts,taper,center=TRUE,delta_t=delta_t,pad=pad)
plot(dse$freqs,dB(dse$sdfe),
xlim=c(0,2.0),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=c(-40,80),yaxs="i",ylab="dB",
typ="l",axes=FALSE,
main=paste(main,tag_2,sep=""))
cc <- dse$cc
x_cc <- 0.16
y_cc <- 15
lines(c(x_cc,x_cc),y_cc+c(cc$up,-cc$down),lwd=0.5)
lines(x_cc+c(-cc$width/2,cc$width/2),c(y_cc,y_cc),lwd=0.5)
if(v_line_p) lines(c(0.16,0.16),c(68.5,80),lwd=0.5)
abline(h=h_line,lty="dashed",lwd=0.5)
axis(1,at=seq(0,2.0,0.5))
axis(1,at=seq(0,2.0,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,80,20),las=2)
axis(2,at=seq(-40,80,10),label=FALSE,tcl=-0.25)
text(1.0,80,tag_1,pos=1)
text(1.9,80,tag_2,pos=1)
box(bty="l")
}
### Figure 226, top plot
fig_226(ocean_wave,default_taper(1024),"periodogram","(a)",v=TRUE)
### Figure 226, 2nd plot
fig_226(ocean_wave,slepian_taper(1024,1),expression(paste("Slepian, ", italic(NW==1/Delta[t]))),"(b)")
### Figure 226, 3rd plot
fig_226(ocean_wave,slepian_taper(1024,2),expression(paste("Slepian, ", italic(NW==2/Delta[t]))),"(c)")
### Figure 226, bottom plot
fig_226(ocean_wave,slepian_taper(1024,4),expression(paste("Slepian, ", italic(NW==4/Delta[t]))),"(d)")
### Figure 227
fig_226(ocean_wave,default_taper(1024),"periodogram",NULL,pad=2,h_line=NULL,main="Figure 227")
### Figure 228 ###
fig_228 <- function(ts,delta_t=0.001)
{
N <- length(ts)
plot((0:(N-1))*delta_t,ts,
,xlab="time (sec)",
ylab="speed",
typ="l",axes=FALSE,
main="Figure 228")
axis(1,at=seq(0,2.0,0.5))
axis(2,at=seq(5,15,5),las=2)
axis(2,at=seq(0,20,1),label=FALSE,tcl=-0.25)
box(bty="l")
}
### Figure 228
fig_228(chaotic_beam)
### Figure 229 ###
fig_229 <- function(ts,taper,tag_1,tag_2,delta_t=0.001)
{
dse <- direct_sdf_est(ts,taper,center=TRUE,delta_t=delta_t)
plot(dse$freqs,dB(dse$sdfe),
xlim=c(0,500),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=c(-120,0),yaxs="i",ylab="dB",
typ="l",axes=FALSE,
main=paste("Figure 229",tag_2,sep=""))
cc <- dse$cc
x_cc <- 425
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(cc$up,-cc$down),lwd=0.5)
lines(x_cc+c(-cc$width/2,cc$width/2),c(y_cc,y_cc),lwd=0.5)
abline(h=-86,lty="dashed",lwd=0.5)
axis(1,at=seq(0,500,100))
axis(1,at=seq(0,500,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-120,0,20),las=2)
axis(2,at=seq(-120,0,10),label=FALSE,tcl=-0.25)
text(250,0,tag_1,pos=1)
text(475,0,tag_2,pos=1)
box(bty="l")
}
### Figure 229, top plot
fig_229(chaotic_beam,default_taper(2048),"periodogram","(a)")
### Figure 229, 2nd plot
fig_229(chaotic_beam,slepian_taper(2048,1),expression(paste("Slepian, ", italic(NW==1/Delta[t]))),"(b)")
### Figure 229, 3rd plot
fig_229(chaotic_beam,slepian_taper(2048,2),expression(paste("Slepian, ", italic(NW==2/Delta[t]))),"(c)")
### Figure 229, bottom plot
fig_229(chaotic_beam,hanning_taper(2048),"Hanning (100% cosine)","(d)")
### Figure 231 ###
fig_231 <- function(ts,right_p=FALSE)
{
temp <- pgram(ts,center=TRUE)
N <- length(ts)
zap_me <- c(1,N/2+2)
freqs <- temp$freqs[-zap_me]
the_pgram <- temp$sdfe[-zap_me]
the_cumsum <- cumsum(the_pgram)
xs <- if(right_p) freqs[-length(freqs)] else freqs
ys <- if(right_p) the_cumsum[-length(the_cumsum)]/the_cumsum[length(the_cumsum)] else the_pgram
plot(xs,ys,
xlim=c(0,0.5),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=if(right_p) c(0,1) else c(0,36),yaxs="i",ylab=paste(if(right_p) "cumulative" else NULL,"periodogram"),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 231",if(right_p) "(b)" else "(a)",sep=""))
if(right_p)
{
M <- length(the_cumsum)
D_0p05 <- 1.358/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
D_0p1 <- 1.224/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
L_u <- function(f,D) D - 1/(M-1) + N*f/(M-1)
L_l <- function(f,D) -D + N*f/(M-1)
lines(c(0,0.5),c(L_u(0,D_0p05),L_u(0.5,D_0p05)))
lines(c(0,0.5),c(L_l(0,D_0p05),L_l(0.5,D_0p05)))
lines(c(0,0.5),c(L_u(0,D_0p1),L_u(0.5,D_0p1)),lty="dashed")
lines(c(0,0.5),c(L_l(0,D_0p1),L_l(0.5,D_0p1)),lty="dashed")
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) c(0,1) else c(0,18,36),las=2)
if(!right_p) axis(2,at=seq(0,36,3),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 0.1 else 32.4,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
### Figure 231, left-hand plot
fig_231(ocean_noise)
### Figure 231, right-hand plot
fig_231(ocean_noise,right_p=TRUE)
### NOTE: code to recreate Figure 239 is not provided - to do so
### would reveal the solution to Exercise [6.11]!
|
/R-code/chapter-06.R
|
no_license
|
dmn001/sauts
|
R
| false | false | 48,410 |
r
|
### R CODE FOR REPRODUCING CONTENT OF FIGURES AND TABLES IN CHAPTER 6 ...
wind_speed <- scan("http://faculty.washington.edu/dbp/sauts/Data/wind_speed_128.txt")
ar2_1 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_1.txt")
ar2_2 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_2.txt")
ar2_3 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_3.txt")
ar2_4 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar2_4.txt")
ar4_1 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_1.txt")
ar4_2 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_2.txt")
ar4_3 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_3.txt")
ar4_4 <- scan("http://faculty.washington.edu/dbp/sauts/Data/ar4_4.txt")
earth_20 <- scan("http://faculty.washington.edu/dbp/sauts/Data/earth_20.txt")
ocean_wave <- scan("http://faculty.washington.edu/dbp/sauts/Data/ocean_wave.txt")
chaotic_beam <- scan("http://faculty.washington.edu/dbp/sauts/Data/chaotic_beam.txt")
ocean_noise <- scan("http://faculty.washington.edu/dbp/sauts/Data/ocean_noise_128.txt")
### functions used to compute content of figures in Chapter 6 ...
source("http://faculty.washington.edu/dbp/sauts/R-code/acvs.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ar_coeffs_to_acvs.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ar_coeffs_to_sdf.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/B_H.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/B_U.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/circular_shift.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/cosine_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/create_tapered_series.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/dft.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/dB.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/do_crisscross_dse.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/direct_sdf_est.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_DCTII.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_lag_window_sdf_estimator.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_shp.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/ev_shp_squared.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/fejer_kernel.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/hanning_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/is_even.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/next_power_of_2.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/pgram.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/rectangular_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/sim_ar_process.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/slepian_taper.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/spec_window.R")
source("http://faculty.washington.edu/dbp/sauts/R-code/step_down_LD_recursions.R")
###
ar2_innov_var <- 1
ar2_coeffs <- c(0.75,-0.5)
ar4_innov_var <- 0.002
ar4_coeffs <- c(2.7607, -3.8106, 2.6535, -0.9238)
### BEGINNING OF CODE TO REPRODUCE CONTENT OF FIGURES/TABLES
### Figure 168 ###
fig_168_top_row <- function(the_acvs,tag)
{
N <- length(the_acvs)
taus <- 0:(N-1)
plot(taus,the_acvs,
xlim=c(0,N),xlab=expression(tau),
ylim=c(-2,2),ylab="ACVS",
typ="b",lwd=0.25,cex=0.5,axes=FALSE,
main="Figure 168")
axis(1,at=seq(0,60,20))
axis(1,at=seq(0,60,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-2,2,2),las=2)
axis(2,at=seq(-2,2,1),label=FALSE,tcl=-0.25)
text(60,1.8,tag,pos=2)
box(bty="l")
}
fig_168_bot_rows <- function(biased,unbiased,y_lab)
{
max_lag <- length(biased)-1
taus <- 0:max_lag
plot(taus,dB(unbiased),
xlim=c(0,max_lag+1),xlab=expression(tau),
ylim=c(-80,20),ylab=y_lab,
typ="l",lwd=0.5,col="gray40",axes=FALSE,
main="Figure 168")
lines(taus,dB(biased))
axis(1,at=seq(0,60,20))
axis(1,at=seq(0,60,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-80,40,20),las=2)
axis(2,at=seq(-80,40,10),label=FALSE,tcl=-0.25)
box(bty="l")
}
ar2_acvs <- ar_coeffs_to_acvs(ar2_coeffs,63,ar2_innov_var,FALSE)
ar4_acvs <- ar_coeffs_to_acvs(ar4_coeffs,63,ar4_innov_var,FALSE)
b_to_u <- 64/(64:1)
### NOTE: evaluation of the following R code is time consuming
### (particularly the two lines involving ev_shp_squared,
### each of which took 45 minutes to execute on a 2017-vintage
### MacBook Pro):
###
### ev_shp_ar2 <- sapply(0:63,ev_shp,64,ar2_acvs)
### ev_shp_squared_ar2 <- sapply(0:63,ev_shp_squared,64,ar2_acvs)
### ev_shp_ar4 <- sapply(0:63,ev_shp,64,ar4_acvs)
### ev_shp_squared_ar4 <- sapply(0:63,ev_shp_squared,64,ar4_acvs)
###
### Evaluation of the following four load forms alleviates
### having to recreate ev_shp_ar2 etc.
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_ar2.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_squared_ar2.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_ar4.Rdata"))
load(url("http://faculty.washington.edu/dbp/sauts/Rdata/ev_shp_squared_ar4.Rdata"))
### Figure 168, plots in left-hand column from top to bottom
fig_168_top_row(ar2_acvs,"AR(2)")
fig_168_bot_rows((ev_shp_ar2-ar2_acvs)^2,(b_to_u*ev_shp_ar2-ar2_acvs)^2,"squared bias (dB)")
fig_168_bot_rows(ev_shp_squared_ar2-ev_shp_ar2^2,b_to_u^2*(ev_shp_squared_ar2-ev_shp_ar2^2),"variance (dB)")
fig_168_bot_rows(ev_shp_squared_ar2-ev_shp_ar2^2+(ev_shp_ar2-ar2_acvs)^2,b_to_u^2*(ev_shp_squared_ar2-ev_shp_ar2^2)+(b_to_u*ev_shp_ar2-ar2_acvs)^2,"MSE (dB)")
### Figure 168, plots in right-hand column from top to bottom
fig_168_top_row(ar4_acvs,"AR(2)")
fig_168_bot_rows((ev_shp_ar4-ar4_acvs)^2,(b_to_u*ev_shp_ar4-ar4_acvs)^2,"squared bias (dB)")
fig_168_bot_rows(ev_shp_squared_ar4-ev_shp_ar4^2,b_to_u^2*(ev_shp_squared_ar4-ev_shp_ar4^2),"variance (dB)")
fig_168_bot_rows(ev_shp_squared_ar4-ev_shp_ar4^2+(ev_shp_ar4-ar4_acvs)^2,b_to_u^2*(ev_shp_squared_ar4-ev_shp_ar4^2)+(b_to_u*ev_shp_ar4-ar4_acvs)^2,"MSE (dB)")
### Figure 169 ###
fig_169 <- function(ts)
{
temp <- acvs(ts)
taus <- temp$lags
acvs_biased <- temp$acvs
acvs_unbiased <- acvs(ts,unbiased=TRUE)$acvs
plot(taus,acvs_biased,
xlim=c(0,length(ts)),xlab=expression(paste(tau," (in 0.025 sec)")),
ylim=c(-4,4),ylab="ACVS",
typ="l",axes=FALSE,
main="Figure 169")
lines(taus,acvs_unbiased,lwd=0.5,col="gray40")
abline(h=0,lty="dashed")
axis(1,at=seq(0,128,32))
axis(2,at=seq(-4,4,2),las=2)
axis(2,at=seq(-4,4,1),label=FALSE,tcl=-0.25)
axis(4,at=seq(-4,4,2),label=FALSE)
axis(4,at=seq(-4,4,1),label=FALSE,tcl=-0.25)
box(bty="u")
}
### Figure 169
fig_169(wind_speed)
### Figure 172 ###
fig_172 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs,the_pgram$sdfe,
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(0,y_ats[length(y_ats)]),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 172",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,the_ar_spec$sdf)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=y_ats,las=2)
text(x=0.5,y=0.95*y_ats[length(y_ats)],tag,pos=2)
box(bty="l")
}
### Figure 172, top row of plots
fig_172(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_172(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 172, 2nd row of plots
fig_172(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_172(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 172, 3rd row of plots
fig_172(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_172(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 172, bottom row of plots
fig_172(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_172(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 173 ###
fig_173 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 173",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
if(length(coeffs) == 4)
{
N <- length(ts)
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE))
lines(temp$freqs, dB(temp$sdf_ev), lwd=0.5)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 173, top row of plots
fig_173(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_173(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 173, 2nd row of plots
fig_173(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_173(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 173, 3rd row of plots
fig_173(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_173(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 173, bottom row of plots
fig_173(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_173(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 176 ###
fig_176 <- function(N,right_p=FALSE,tag=NULL)
{
the_kernel <- fejer_kernel(N)
plot(the_kernel$freqs,if(!right_p) dB(the_kernel$kernel) else the_kernel$kernel,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(!right_p) c(-40,20) else c(0,N),yaxs="i",ylab="spectral window",
typ="l",lwd=0.25,axes=FALSE,
main="Figure 176")
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(!right_p) seq(-40,20,20) else seq(0,N,N/2),las=2)
if(!right_p)
{
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
}
box(bty="l")
}
### Figure 176, left-hand column of plots
fig_176(4,tag=expression(italic(N==4)))
fig_176(16,tag=expression(italic(N==16)))
fig_176(64,tag=expression(italic(N==64)))
### Figure 176, right-hand column of plots
fig_176(4,tag=expression(italic(N==4)),right_p=TRUE)
fig_176(16,tag=expression(italic(N==16)),right_p=TRUE)
fig_176(64,tag=expression(italic(N==64)),right_p=TRUE)
### Figure 177 ###
fig_177 <- function(N,coeffs,innov_var,tag_1,tag_2,tag_3=NULL)
{
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE),N_pad=1024)
plot(temp$freqs,dB(temp$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-10,10),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 177",tag_1,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-10,10,10),las=2)
axis(2,at=seq(-10,10,2),label=FALSE,tcl=-0.25)
text(x=0.5,y=9,tag_1,pos=2)
text(x=0.25,y=-8,tag_2,pos=1)
text(x=0.25,y=9,tag_3,pos=1)
box(bty="l")
}
### Figure 177, left-hand and right-hand plots
fig_177(16,ar2_coeffs,ar2_innov_var,"(a)",expression(italic(N==16)),"AR(2)")
fig_177(64,ar2_coeffs,ar2_innov_var,"(b)",expression(italic(N==64)))
### Figure 178 ###
fig_178 <- function(N,coeffs,innov_var,tag_1,tag_2,tag_3=NULL,vlines=NULL)
{
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE),N_pad=2048)
plot(temp$freqs,dB(temp$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 178",tag_1,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
abline(v=vlines, lty="dotted")
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag_1,pos=2)
text(x=0.25,y=-50,tag_2,pos=1)
text(x=0.25,y=20,tag_3,pos=1)
box(bty="l")
}
### Figure 178, top row of plots
fig_178(16,ar4_coeffs,ar4_innov_var,"(a)",expression(italic(N==16)),"AR(4)")
fig_178(64,ar4_coeffs,ar4_innov_var,"(b)",expression(italic(N==64)),vlines=c(1/8,0.4))
### Figure 178, bottom row of plots
fig_178(256,ar4_coeffs,ar4_innov_var,"(c)",expression(italic(N==256)))
fig_178(1024,ar4_coeffs,ar4_innov_var,"(d)",expression(italic(N==1024)))
### Figure 180 ###
fig_180 <- function(the_kernel,mult_p=FALSE,v_line=1/8,trans=function(x) x,big_y_ats=seq(-40,20,20),little_y_ats=seq(-50,30,10),tag="(a)",word="and",the_sdf=two_sided_ar4_sdf)
{
N_freqs <- length(the_kernel)
freqs <- seq(-0.5+1/N_freqs,0.5,length=N_freqs)
ys <- trans(if(mult_p) the_kernel*the_sdf else the_kernel)
plot(freqs,ys,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(little_y_ats[1],little_y_ats[length(little_y_ats)]),yaxs="i",ylab=paste("kernel",word,"AR(4) SDF"),
typ="l",lwd=0.25,axes=FALSE,
main=paste("Figure 180",tag,sep=""))
if(!mult_p) lines(freqs,trans(the_sdf))
abline(v=v_line,lty="dotted")
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=big_y_ats,las=2)
axis(2,at=little_y_ats,label=FALSE,tcl=-0.25)
text(x=-0.35,y=0.88*diff(range(little_y_ats))+little_y_ats[1],tag,pos=2)
box(bty="l")
}
temp <- ar_coeffs_to_sdf(ar4_coeffs,ar4_innov_var,N_pad=2048)$sdf
two_sided_ar4_sdf <- c(rev(temp[c(-1,-length(temp))]),temp)
temp <- fejer_kernel(64)$kernel
fejer_shift_1 <- circular_shift(temp,256)
fejer_shift_2 <- circular_shift(temp,820)
### Figure 180, top row of plots
fig_180(fejer_shift_1,trans=dB)
fig_180(fejer_shift_1,big=c(0,40,80),little=c(0,40,80),tag="(b)")
### Figure 180, 2nd row of plots
fig_180(fejer_shift_1,trans=dB,mult_p=TRUE,tag="(c)",word="times")
fig_180(fejer_shift_1,big=c(0,250,500),little=c(0,250,500),mult_p=TRUE,tag="(d)",word="times")
### Figure 180, 3rd row of plots
fig_180(fejer_shift_2,trans=dB,v_line=0.4,tag="(e)")
fig_180(fejer_shift_2,big=c(0,40,80),little=c(0,40,80),v_line=0.4,tag="(f)")
### Figure 180, bottom row of plots
fig_180(fejer_shift_2,trans=dB,mult_p=TRUE,v_line=0.4,tag="(g)",word="times")
fig_180(fejer_shift_2,big=c(0,1,2),little=c(0,1,2),mult_p=TRUE,v_line=0.4,tag="(h)",word="times")
### Figure 182 ###
###
### NOTE: fig_182 is virtually the same as fig_173, the only
### difference being the addition of pad=2 in the call
## to pgram (fig_173 uses the default pad=1)
fig_182 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_pgram <- pgram(ts,center=FALSE,pad=2)
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 182",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
if(length(coeffs) == 4)
{
N <- length(ts)
temp <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,N-1,innov_var,FALSE))
lines(temp$freqs, dB(temp$sdf_ev), lwd=0.5)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 182, top row of plots
fig_182(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_182(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 182, 2nd row of plots
fig_182(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_182(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 182, 3rd row of plots
fig_182(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_182(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 182, bottom row of plots
fig_182(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_182(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 183a ###
fig_183a <- function(ts,tag,coeffs=ar4_coeffs)
{
N <- length(ts)
p <- length(coeffs)
plot(0:3,ts[(N-3):N],
xlim=c(1,6),xlab=expression(italic(t)),
ylim=c(-5,5),ylab="AR(4) series",
typ="o",axes=FALSE,
main=paste("Figure 183a",tag,sep=""))
pred <- as.vector(coeffs%*%ts[N:(N-p+1)])
lines(3:4,c(ts[N],pred), type="b", pch=" ", lty="dotted")
points(4,pred, pch=3)
lines(4:7,ts[1:4], type="o")
axis(1,at=1:6,labels=c(1021,NA,1023,0,1,2))
axis(2,at=seq(-5,5,5),las=2)
axis(2,at=seq(-5,5,1),label=FALSE,tcl=-0.25)
text(x=5.5,y=4.5,tag,pos=2)
box(bty="l")
}
### Figure 183a, top row
fig_183a(ar4_1,"(e)")
fig_183a(ar4_2,"(f)")
### Figure 183a, bottom row
fig_183a(ar4_3,"(g)")
fig_183a(ar4_4,"(h)")
### Figure 183b ###
fig_183b <- function(x,y)
{
plot(x,y,
xlim=c(-0.15,5.5),xaxs="i",xlab="absolute prediction error",
ylim=c(-52,-25),yaxs="i",ylab="dB",
typ="p",cex=0.625,axes=FALSE,
main="Figure 183b")
lines(lowess(x,y))
abline(h=c(-47.20893,-30.3018),lty=c("dotted","dashed"))
axis(1,at=0:5)
axis(2,at=seq(-50,-30,10),las=2)
box(bty="l")
}
set.seed(1)
N_rep <- 100
x_results <- rep(0,100)
y_results <- rep(0,100)
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,proc=FALSE)
for(n in 1:N_rep)
{
ar_ts <- sim_ar_process(1024,LD=LD_ar4)
x_results[n] <- abs(as.numeric(ar_ts[1024:1021] %*% ar4_coeffs) - ar_ts[1])
y_results[n] <- dB(mean(pgram(c(ar_ts,rep(0,1024)),center=FALSE)$sdfe[821:1025]))
}
### Figure 183b
fig_183b(x_results,y_results)
### Figure 185 ###
fig_185 <- function(ys,big_y_ats=seq(-5,5,5),little_y_ats=NULL,y_lab="AR(4) series")
{
N <- length(ys)
plot(0:(N-1),ys,
xlim=c(0,N),xlab=expression(italic(t)),
ylim=c(big_y_ats[1],big_y_ats[length(big_y_ats)]),ylab=y_lab,
typ="l",lwd=0.25,axes=FALSE,
main="Figure 185")
axis(1,at=seq(0,1024,512))
axis(1,at=seq(0,1024,256),label=FALSE,tcl=-0.25)
axis(2,at=big_y_ats,las=2)
axis(2,at=little_y_ats,label=FALSE,tcl=-0.25)
box(bty="l")
}
the_taper <- hanning_taper(1024)
### Figure 185, top to bottom plots
fig_185(ar4_1,little=seq(-5,5,1))
fig_185(the_taper,big=seq(0,0.06,0.02),y_lab="Hanning taper")
fig_185(the_taper*ar4_1,big=seq(-0.2,0.2,0.1),y_lab="tapered series")
### Figure 187 ###
fig_187 <- function(ts,coeffs,innov_var,y_ats,tag)
{
the_dse <- direct_sdf_est(ts,hanning_taper(length(ts)),center=FALSE,pad=2)
plot(the_dse$freqs,dB(the_dse$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab=paste("AR(",length(coeffs),") spectra (dB)",sep=""),
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 187",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 187, top row of plots
fig_187(ar2_1,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(a)")
fig_187(ar2_2,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(b)")
### Figure 187, 2nd row of plots
fig_187(ar2_3,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(c)")
fig_187(ar2_4,ar2_coeffs,ar2_innov_var,seq(0,25,5),"(d)")
### Figure 187, 3rd row of plots
fig_187(ar4_1,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(e)")
fig_187(ar4_2,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(f)")
### Figure 187, bottom row of plots
fig_187(ar4_3,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(g)")
fig_187(ar4_4,ar4_coeffs,ar4_innov_var,seq(0,150,50),"(h)")
### Figure 190 ###
fig_190 <- function(the_taper,left_tag,right_tag)
{
N <- length(the_taper)
plot(0:(N-1),the_taper,
xlim=c(0,N),xlab=expression(italic(t)),
ylim=c(0,0.3),ylab="data taper",
typ="p",pch=20,cex=0.2,axes=FALSE,
main=paste("Figure 190",left_tag,sep=""))
axis(1,at=seq(0,64,32))
axis(1,at=seq(0,64,16),label=FALSE,tcl=-0.25)
axis(2,at=seq(0.0,0.3,0.1),las=2)
text(x=0,y=0.29,left_tag,pos=4)
text(x=64,y=0.29,right_tag,pos=2)
box(bty="l")
}
### Figure 190, left-hand column
fig_190(rectangular_taper(64),"(a)",expression(paste("rectangular (",italic(p==0),")",sep="")))
fig_190(cosine_taper(64,0.2),"(b)",expression(italic(p==0.2)))
fig_190(cosine_taper(64,0.5),"(c)",expression(italic(p==0.5)))
fig_190(hanning_taper(64),"(d)",expression(paste("Hanning (",italic(p==1),")",sep="")))
### Figure 190, right-hand column
fig_190(slepian_taper(64,1),"(e)",expression(italic(NW==1)))
fig_190(slepian_taper(64,2),"(f)",expression(italic(NW==2)))
fig_190(slepian_taper(64,4),"(g)",expression(italic(NW==4)))
fig_190(slepian_taper(64,8),"(h)",expression(italic(NW==8)))
### Figure 191 ###
fig_191 <- function(the_taper,left_tag,right_tag,v_line=NULL)
{
temp <- spec_window(the_taper,pad_factor=16,fix_nulls_p=TRUE,first_p=FALSE)
freqs <- temp$freqs
ys <- dB(temp$sw)
plot(freqs,ys,
xlim=c(-0.5,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-100,20),yaxs="i",ylab="spectral window (dB)",
typ="l",lwd=0.25,axes=FALSE,
main=paste("Figure 191",left_tag,sep=""))
abline(v=v_line*c(-1,1),lty="dotted")
## add 3 dB down width
i_max <- which.max(ys)
three_dB_down <- ys[i_max] - 3
i <- which(ys[i_max:length(ys)] <= three_dB_down)[1] + i_max - 1
lines(freqs[c(2*i_max-i,i)],c(three_dB_down,three_dB_down))
## add variance width
bw_v <- function(taper)
{
N <- length(taper)
Nm1 <- N - 1
autocor <- Re(fft(abs(fft(c(taper,rep(0,N)))^2)))/(2*N)
return(sqrt(1 + sum(((-1)^(1:Nm1))*autocor[2:N]/(1:Nm1)^2)*12/pi^2))
}
lines(bw_v(the_taper)*c(-0.5,0.5),c(three_dB_down-5,three_dB_down-5))
## add autocorrelation width
lines(B_H(the_taper)*c(-0.5,0.5),c(three_dB_down-10,three_dB_down-10))
axis(1,at=seq(-0.5,0.5,0.5))
axis(1,at=seq(-0.5,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-100,20,20),las=2)
axis(2,at=seq(-100,20,10),label=FALSE,tcl=-0.25)
text(x=-0.5,y=10,left_tag,pos=4)
text(x=0.5,y=10,right_tag,pos=2)
box(bty="l")
}
### Figure 191, left-hand column
fig_191(rectangular_taper(64),"(a)","rectangular")
fig_191(cosine_taper(64,0.2),"(b)",expression(italic(p==0.2)))
fig_191(cosine_taper(64,0.5),"(c)",expression(italic(p==0.5)))
fig_191(hanning_taper(64),"(d)","Hanning")
### Figure 191, right-hand column
fig_191(slepian_taper(64,1),"(e)",expression(italic(NW==1)),v_line=1/64)
fig_191(slepian_taper(64,2),"(f)",expression(italic(NW==2)),v_line=1/32)
fig_191(slepian_taper(64,4),"(g)",expression(italic(NW==4)),v_line=1/16)
fig_191(slepian_taper(64,8),"(h)",expression(italic(NW==8)),v_line=1/8)
### Figure 193 ###
fig_193 <- function(the_taper,tag,coeffs=ar4_coeffs,innov_var=ar4_innov_var)
{
ev_dse <- ev_lag_window_sdf_estimator(ar_coeffs_to_acvs(coeffs,length(the_taper)-1,innov_var,FALSE),the_taper,N_pad=1024)
plot(ev_dse$freqs, dB(ev_dse$sdf_ev),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 193")
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag,pos=2)
box(bty="l")
}
### Figure 193, left-hand column
fig_193(rectangular_taper(64),expression(paste("rectangular (",italic(p==0),")",sep="")))
fig_193(cosine_taper(64,0.2),expression(italic(p==0.2)))
fig_193(cosine_taper(64,0.5),expression(italic(p==0.5)))
fig_193(hanning_taper(64),expression(paste("Hanning (",italic(p==1),")",sep="")))
### Figure 193, right-hand column
fig_193(slepian_taper(64,1),expression(italic(NW==1)))
fig_193(slepian_taper(64,2),expression(italic(NW==2)))
fig_193(slepian_taper(64,4),expression(italic(NW==4)))
fig_193(slepian_taper(64,8),expression(italic(NW==8)))
### Figure 199 ###
fig_199 <- function(pw_filter,tag,right_p=FALSE,extra_p=FALSE,ts=ar4_2,coeffs=ar4_coeffs,innov_var=ar4_innov_var)
{
pw_ts <- convolve(ts,pw_filter,type="filter")
N_pad <- 2048
pgram_pw_ts <- pgram(pw_ts,center=FALSE,pad=N_pad/length(pw_ts))
freqs <- pgram_pw_ts$freqs
squared_gain <- abs(fft(c(pw_filter,rep(0,N_pad-length(pw_filter))))[1:((N_pad/2)+1)])^2
plot(freqs,dB(if(right_p) pgram_pw_ts$sdfe/squared_gain else pgram_pw_ts$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="spectra (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 199",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=N_pad)$sdf
lines(freqs,dB(if(right_p) the_ar_spec else the_ar_spec * squared_gain))
if(extra_p)
{
N <- length(ts)
L <- length(pw_filter)
ar_acvs <- ar_coeffs_to_acvs(coeffs,N+2*L,innov_var,FALSE)
pre_acvs <- rep(0,N-L+1)
for(tau in 0:(N-L))
for(k in 1:L)
for(l in 1:L)
{
pre_acvs[tau+1] <- pre_acvs[tau+1] + pw_filter[k]*pw_filter[l]*ar_acvs[abs(tau+k-l)+1]
}
temp <- ev_lag_window_sdf_estimator(pre_acvs,rep(1/sqrt(N-L+1),N-L+1),N_pad=N_pad)
pc <- abs(fft(c(pw_filter,rep(0,N_pad-L)))[1:((N_pad/2)+1)])^2
lines(0.25+temp$freqs[1:410],dB(temp$sdf_ev[1:410]/pc[1:410]),lwd=0.25)
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=15,tag,pos=2)
box(bty="l")
}
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,FALSE)
### Figure 199, top row
fig_199(c(1,-ar4_coeffs),"(a)")
fig_199(c(1,-ar4_coeffs),"(b)",right_p=TRUE)
### Figure 199, 2nd row
fig_199(c(1,-0.99),"(c)")
fig_199(c(1,-0.99),"(d)",right_p=TRUE)
### Figure 199, 3rd row
fig_199(c(1,-LD_ar4$coeffs[[2]]),"(e)")
fig_199(c(1,-LD_ar4$coeffs[[2]]),"(f)",right_p=TRUE,extra_p=TRUE)
### Figure 199, bottom row
fig_199(c(1,-1.3,0.8),"(g)")
fig_199(c(1,-1.3,0.8),"(h)",right_p=TRUE,extra_p=TRUE)
### Figure 200 ###
fig_200 <- function(pwf_1,pwf_3,pwf_4)
{
N_pad <- 2048
squared_gain <- function(filter) abs(fft(c(filter,rep(0,N_pad-length(filter))))[1:((N_pad/2)+1)])^2
freqs <- seq(0.0,0.5,1/N_pad)
plot(freqs,dB(squared_gain(pwf_1)),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-50,30),yaxs="i",ylab="squared gain function (dB)",
typ="l",axes=FALSE,
main="Figure 200")
lines(freqs,dB(squared_gain(pwf_3)),lwd=0.25)
lines(freqs,dB(squared_gain(pwf_4)),lty="dotted")
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,30,10),label=FALSE,tcl=-0.25)
box(bty="l")
}
LD_ar4 <- step_down_LD_recursions(ar4_coeffs,ar4_innov_var,FALSE)
### Figure 200
fig_200(c(1,-ar4_coeffs),c(1,-LD_ar4$coeffs[[2]]),c(1,-1.3,0.8))
### Figure 206 ###
fig_206 <- function(the_taper,B_H_multiplier,tag,N_pad=8192)
{
N_pad_half <- N_pad/2
freqs <- (-(N_pad_half-1):N_pad_half)/N_pad
N <- length(the_taper)
temp <- abs(fft(c(the_taper,rep(0,N_pad-N))))
H_abs <- c(temp[(N_pad_half+2):N_pad],temp[1:(N_pad_half+1)])
B_H_taper <- B_H(the_taper)
i <- round(N_pad*(1-B_H_taper*B_H_multiplier))
H_abs_shifted <- c(H_abs[i:N_pad],H_abs[1:(i-1)])
for_xlim <- 1/8 + 1/64
plot(freqs,H_abs_shifted,
xlim=(1/8 + 1/64)*c(-1,1),xlab=expression(italic(v)),
ylim=c(0,30),ylab=" ",
typ="l",axes=FALSE,
main=paste("Figure 206",tag,sep=""))
lines(freqs,H_abs,lwd=0.5)
lines(freqs,H_abs*H_abs_shifted,col="gray",lwd=2)
abline(v=0,lty="dotted")
abline(v=B_H_taper,lty="dotted")
axis(1,at=seq(-1/8,1/8,1/8),labels=c("-1/8","0","1/8"))
axis(1,at=seq(-1/2,1/2,1/64),labels=FALSE,tcl=-0.25)
axis(2,at=seq(0,30,10),las=2)
text(1/8,28,tag,pos=2)
box(bty="l")
}
### Figure 206, first row, left to right
fig_206(slepian_taper(64,2),0.5,"(a)")
fig_206(slepian_taper(64,2),1,"(b)")
fig_206(slepian_taper(64,2),2,"(c)")
### Figure 206, second row, left to right
fig_206(slepian_taper(64,4),0.5,"(d)")
fig_206(slepian_taper(64,4),1,"(e)")
fig_206(slepian_taper(64,4),2,"(f)")
### Figure 207 ###
fig_207 <- function(ts,tag_1,tag_2)
{
N <- length(ts)
the_pgram <- pgram(ts,center=FALSE,pad=2^(11-round(log2(N))))
plot(the_pgram$freqs,dB(the_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-40,20),yaxs="i",ylab="periodogram (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 207",tag_1,sep=""))
abline(h=0)
x_cc <- 7/16
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(dB(2/qchisq(0.975,2)),dB(2/qchisq(0.025,2))),lwd=0.5)
lines(x_cc+c(-0.5,0.5)*the_pgram$cc$width,c(y_cc,y_cc),lwd=0.5)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,1/N),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,20,20),las=2)
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=18,tag_1,pos=2)
text(x=0.25,y=-35,tag_2,pos=1)
box(bty="l")
}
set.seed(42)
ts_128 <- rnorm(128)
### Figure 207, first row, left to right
fig_207(ts_128[1:16],"(a)",expression(N==16))
fig_207(ts_128[1:32],"(b)",expression(N==32))
### Figure 207, second row, left to right
fig_207(ts_128[1:64],"(c)",expression(N==64))
fig_207(ts_128,"(d)",expression(N==128))
### Figure 208 ###
fig_208 <- function(taper,tag_1,tag_2,ts=ar2_1[1:128],coeffs=ar2_coeffs,innov_var=ar2_innov_var)
{
N <- length(ts)
the_dse <- direct_sdf_est(ts,taper,center=FALSE,pad=2^(11-round(log2(N))))
plot(the_dse$freqs,dB(the_dse$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-40,20),yaxs="i",ylab="AR(2) spectra (dB)",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 208",tag_1,sep=""))
ar_sdf <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=2048)
lines(ar_sdf$freqs,dB(ar_sdf$sdf))
x_cc <- 0.4
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(dB(2/qchisq(0.975,2)),dB(2/qchisq(0.025,2))),lwd=0.5)
lines(x_cc+c(-0.5,0.5)*the_dse$cc$width,c(y_cc,y_cc),lwd=0.5)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,20,20),las=2)
axis(2,at=seq(-40,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=18,tag_1,pos=2)
text(x=0.04,y=-36,tag_2,pos=4)
box(bty="l")
}
### Figure 208, first row, left to right
fig_208(default_taper(128),"(a)","periodogram")
fig_208(slepian_taper(128,2),"(b)",expression(italic(NW==2)))
### Figure 208, second row, left to right
fig_208(slepian_taper(128,4),"(c)",expression(italic(NW==4)))
fig_208(slepian_taper(128,8),"(d)",expression(italic(NW==8)))
### Figure 210a ###
fig_210a <- function(right_p=FALSE)
{
if(!right_p)
{
xs <- seq(0,10,0.01)
ys <- exp(-xs/2)/2
plot(xs,ys,
xlim=c(0,10),xaxs="i",xlab=expression(italic(u)),
ylim=c(0,0.5),yaxs="i",ylab="PDF",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 210a(a)")
xs_inner <- seq(5.9915,10,0.01)
ys_inner <- exp(-xs_inner/2)/2
polygon(c(5.9915,xs_inner,10),c(0,ys_inner,0),col="gray",border=NA)
abline(v=2,lty="dotted")
axis(1,at=seq(0,10,5))
axis(1,at=seq(0,10,1),label=FALSE,tcl=-0.25)
axis(2,at=seq(0,0.5,0.5),las=2)
axis(2,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
text(9.85,0.44,"(a)",pos=2)
}
else
{
xs <- seq(-20,20,0.04)
pdf_log_chi2 <- function(x)
{
temp <- 10^(x/10)
return(log(10) * temp * exp(-temp/2)/20)
}
ys <- pdf_log_chi2(xs)
plot(xs,ys,
xlim=c(-20,20),xaxs="i",xlab=expression(paste(italic(v)," (dB)")),
ylim=c(0,0.1),yaxs="i",ylab="PDF",
typ="l",lwd=0.5,axes=FALSE,
main="Figure 210a(b)")
xs_inner <- seq(-20,-9.8891,0.04)
ys_inner <- pdf_log_chi2(xs_inner)
polygon(c(-20,xs_inner,-9.8891),c(0,ys_inner,0),col="gray",border=NA)
abline(v=dB(2/exp(-digamma(1))),lty="dotted")
axis(1,at=seq(-20,20,10))
axis(2,at=seq(0,0.1,0.1),las=2)
axis(2,at=seq(0,0.1,0.01),label=FALSE,tcl=-0.25)
text(19.4,0.088,"(b)",pos=2)
}
box(bty="l")
}
### Figure 210a, left-hand plot
fig_210a()
### Figure 210a, right-hand plot
fig_210a(right_p=TRUE)
### Figure 210b ###
fig_210b <- function(ts,right_p=FALSE)
{
trans <- if(right_p) dB else function(x) x
N <- length(ts)
the_pgram <- pgram(ts,center=FALSE)
plot(the_pgram$freqs[-c(1,65)],trans(the_pgram$sdfe[-c(1,65)]),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(right_p) c(-40,20) else c(0,10),yaxs="i",ylab=paste("periodogram",if(right_p) " (dB)" else NULL,sep=""),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 210b",if(right_p) "(b)" else "(a)",sep=""))
abline(h=trans(2))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) seq(-40,20,20) else seq(0,10,5),las=2)
axis(2,at=if(right_p) seq(-40,20,10) else seq(0,10,1),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 14 else 9,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
set.seed(4)
ts_128 <- rnorm(128)*sqrt(2)
### Figure 210b, left-hand plot
fig_210b(ts_128)
### Figure 210b, right-hand plot
fig_210b(ts_128,right_p=TRUE)
### Figure 212 ###
fig_212 <- function(N=64,N_pad=2048)
{
taper_1 <- slepian_taper(N,1)
taper_2 <- slepian_taper(N,2)
taper_4 <- slepian_taper(N,4)
taper_8 <- slepian_taper(N,8)
R_1 <- abs(fft(c(taper_1^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_2 <- abs(fft(c(taper_2^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_4 <- abs(fft(c(taper_4^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
R_8 <- abs(fft(c(taper_8^2,rep(0,N_pad-N)))[1:((N_pad/8)+1)])^2
freqs <- (0:(N_pad/8))/N_pad
plot(freqs,R_1,
xlim=c(0,0.13),xaxs="i",xlab=expression(paste(eta," (frequency lag)")),
ylim=c(0,1),yaxs="i",ylab="correlation",
typ="l",axes=FALSE,
main="Figure 212")
lines(freqs,R_2,lty="longdash")
lines(freqs,R_4,lty="dashed")
lines(freqs,R_8,lty="dotted")
abline(v=c(1/64,1/32,1/16,1/8),
lty=c("solid","longdash","dashed","dotted"))
lines(c(sum(taper_1^4),0),c(0.6,0.6))
lines(c(sum(taper_2^4),0),c(0.5,0.5),lty="longdash")
lines(c(sum(taper_4^4),0),c(0.4,0.4),lty="dashed")
lines(c(sum(taper_8^4),0),c(0.3,0.3),lty="dotted")
axis(1,at=c(0,1/64,1/32,1/16,1/8),labels=c("0","1/64","1/32","1/16","1/8"))
axis(1, at=seq(0,1/8,1/64), labels=FALSE, tcl=-0.25)
axis(1, at=c(5/64), labels=c("5/64"), tcl=-0.25)
axis(2, at=seq(0,1,0.5), las=2)
axis(2, at=seq(0,1,0.1), labels=FALSE, tcl=-0.25, tcl=-0.25)
box(bty="l")
}
### Figure 212
fig_212()
### Table 214 ###
N <- 64
the_tapers <- list(cosine_taper(N,0),
cosine_taper(N,0.2),
cosine_taper(N,0.5),
cosine_taper(N,1),
slepian_taper(N,1),
slepian_taper(N,2),
slepian_taper(N,4),
slepian_taper(N,8))
delta_f <- 1/N
### Table 214, first row (1.50 1.56 1.72 2.06 1.59 2.07 2.86 4.01)
round(unlist(lapply(the_tapers,B_H))/delta_f,2)
### Table 214, second row (1.00 1.11 1.35 1.93 1.40 1.99 2.81 3.97)
round(unlist(lapply(the_tapers,function(x) sum(x^4)))/delta_f,2)
### Table 214, third row (1.50 1.41 1.27 1.06 1.13 1.04 1.02 1.01)
round(unlist(lapply(the_tapers,B_H))/unlist(lapply(the_tapers,function(x) sum(x^4))),2)
### Figure 216 ###
fig_216 <- function(ts,big_y_ats=c(0,4,8),delta_y=1,inc,right_p=FALSE)
{
y_upper_lim <- big_y_ats[length(big_y_ats)]
temp <- pgram(ts,center=FALSE)
N <- length(ts)
zap_me <- c(1,if(is_even(N)) N/2+1 else NULL)
freqs <- temp$freqs[-zap_me]
the_pgram <- temp$sdfe[-zap_me]
the_cumsum <- cumsum(the_pgram)
xs <- if(right_p) freqs[-length(freqs)] else freqs
ys <- if(right_p) the_cumsum[-length(the_cumsum)]/the_cumsum[length(the_cumsum)] else the_pgram
plot(xs,ys,
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=if(right_p) c(0,1) else c(0,y_upper_lim),yaxs="i",ylab=paste(if(right_p) "cumulative" else NULL,"periodogram"),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 216",if(right_p) "(b)" else "(a)",sep=""))
points(xs,ys)
if(right_p)
{
M <- length(the_cumsum)
D_0p05 <- 1.358/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
L_u <- function(f) D_0p05 - 1/(M-1) + N*f/(M-1)
L_l <- function(f) -D_0p05 + N*f/(M-1)
lines(c(0,0.5),c(L_u(0),L_u(0.5)))
lines(c(0,0.5),c(L_l(0),L_l(0.5)))
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) c(0,1) else big_y_ats,las=2)
if(!right_p) axis(2,at=seq(0,y_upper_lim,delta_y),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 0.1 else 0.9*y_upper_lim,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
### Figure 216, left-hand plot
fig_216(ar2_1[1:32])
### Figure 216, right-hand plot
fig_216(ar2_1[1:32],right_p=TRUE)
### Figure 217 ###
fig_217 <- function(ts,coeffs,innov_var,tag)
{
the_dct_pgram <- pgram(c(ts,rev(ts)),center=FALSE)
the_dct_pgram$sdfe[1] <- the_dct_pgram$sdfe[1]/2
plot(the_dct_pgram$freqs,dB(the_dct_pgram$sdfe),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.25,col="gray40",axes=FALSE,
main=paste("Figure 217",tag,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs, innov_var, N_pad=1024)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.5,y=10,tag,pos=2)
box(bty="l")
}
### Figure 217, top row of plots
fig_217(ar4_1,ar4_coeffs,ar4_innov_var,"(e)")
fig_217(ar4_2,ar4_coeffs,ar4_innov_var,"(f)")
### Figure 217, bottom row of plots
fig_217(ar4_3,ar4_coeffs,ar4_innov_var,"(g)")
fig_217(ar4_4,ar4_coeffs,ar4_innov_var,"(h)")
### Figure 218 ###
fig_218 <- function(N,coeffs,innov_var,tag_1,tag_2,N_pad_ar=1024)
{
ar_acvs <- ar_coeffs_to_acvs(coeffs,N-1,ar4_innov_var,FALSE)
ev_pgram <- ev_lag_window_sdf_estimator(ar_acvs) #,N_pad=N_pad)
ev_DCT <- ev_DCTII(ar_acvs)
plot(ev_pgram$freqs,dB(ev_pgram$sdf),
xlim=c(0,0.5),xaxs="i",xlab=expression(italic(f)),
ylim=c(-60,20),yaxs="i",ylab="dB",
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 218",tag_2,sep=""))
the_ar_spec <- ar_coeffs_to_sdf(coeffs,innov_var,N_pad=N_pad_ar)
lines(the_ar_spec$freqs,dB(the_ar_spec$sdf))
lines(ev_DCT$freqs,dB(ev_DCT$sdf),lwd=2.0)
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-60,20,20),las=2)
axis(2,at=seq(-60,20,10),label=FALSE,tcl=-0.25)
text(x=0.25,y=-50,tag_1,pos=1)
text(x=0.5,y=10,tag_2,pos=2)
box(bty="l")
}
### Figure 218, top row of plots
fig_218(16,ar4_coeffs,ar4_innov_var,expression(italic(N==16)),"(a)")
fig_218(64,ar4_coeffs,ar4_innov_var,expression(italic(N==64)),"(b)")
### Figure 218, bottom row of plots
fig_218(256,ar4_coeffs,ar4_innov_var,expression(italic(N==256)),"(c)")
fig_218(1024,ar4_coeffs,ar4_innov_var,expression(italic(N==1024)),"(d)")
### Figures 223 and 224b ###
fig_223 <- function(ys,y_ats,x_lab,main="Figure 223")
{
N <- length(ys)
plot(0:(N-1),Re(ys),
xlim=c(0,N),xlab=x_lab,
ylim=c(y_ats[1],tail(y_ats,1)),ylab=" ",
typ="n",axes=FALSE,
main=main)
if(!(sum(Im(ys)) == 0))
{
lines(0:(N-1), Im(ys), lwd=0.5, col="gray40")
points(0:(N-1), Im(ys), pch=16, cex=0.5, col="gray40")
}
lines(0:(N-1), Re(ys), col="black")
points(0:(N-1), Re(ys), pch=16, cex=0.5)
axis(1,at=c(0,N/2,N))
axis(2,at=y_ats,las=2)
box(bty="l")
}
### Figure 223, top row, left-hand plot
(N <- length(earth_20)) # 20
(M <- next_power_of_2(2*N-1)) # 64
M-N # 44
tXt <- c(earth_20-mean(earth_20),rep(0,M-N))
fig_223(tXt,seq(-6,6,6),expression(italic(t)))
### Figure 223, top row, right-hand plot
tXt_dft <- dft(tXt)
fig_223(tXt_dft,seq(-32.5,32.5,32.5),expression(italic(k)))
### Figure 223, 2nd row, left-hand plot
tht <- c(hanning_taper(N),rep(0,M-N))
fig_223(tht,c(0,1),expression(italic(t)))
### Figure 223, 2nd row, right-hand plot
tht_dft <- dft(tht)
fig_223(tht_dft,seq(-4,4,4),expression(italic(k)))
### Figure 223, 3rd row, left-hand plot
thttXt <- tht*tXt
fig_223(thttXt,seq(-2,2,2),expression(italic(t)))
### Figure 223, 3rd row, right-hand plot
thttXt_dft <- dft(thttXt)
fig_223(thttXt_dft,seq(-6,6,6),expression(italic(k)))
### Figure 223, 4th row, left-hand plot
tSdk <- abs(thttXt_dft)^2
tsdtau <- Re(inverse_dft(tSdk))
fig_223(tsdtau,seq(-8,8,8),expression(tau))
### Figure 223, 4th row, right-hand plot
fig_223(tSdk,seq(0,60,30),expression(italic(k)))
### Figure 224b, top row, left-hand plot
fig_223(tXt,seq(-6,6,6),expression(italic(t)),main="Figure 224b")
### Figure 224b, top row, right-hand plot
fig_223(tXt_dft,seq(-32.5,32.5,32.5),expression(italic(k)),main="Figure 224b")
### Figure 224b, 2nd row, left-hand plot
tSpk <- abs(tXt_dft)^2/N
tsptau <- Re(inverse_dft(tSpk))
fig_223(tsptau,seq(-8,8,8),expression(tau),main="Figure 224b")
### Figure 224b, 2nd row, right-hand plot
fig_223(tSpk,seq(0,60,30),expression(italic(k)),main="Figure 224b")
### Figure 225 ###
fig_225 <- function(ts,delta_t=1/4)
{
N <- length(ts)
plot((0:(N-1))*delta_t,ts,
xlim=c(0,N*delta_t),xlab="time (sec)",
ylim=c(-1200,1700),ylab="relative height",
typ="l",axes=FALSE,
main="Figure 225")
axis(1,at=seq(0,256,64))
axis(2,at=seq(-1000,1000,1000),las=2)
axis(2,at=seq(-1000,1500,500),label=FALSE,tcl=-0.25)
box(bty="l")
}
### Figure 225
fig_225(ocean_wave)
### Figures 226 and 227 ###
fig_226 <- function(ts,taper,tag_1,tag_2,pad=1,h_line=0,v_line_p=FALSE,delta_t=1/4,main="Figure 226")
{
dse <- direct_sdf_est(ts,taper,center=TRUE,delta_t=delta_t,pad=pad)
plot(dse$freqs,dB(dse$sdfe),
xlim=c(0,2.0),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=c(-40,80),yaxs="i",ylab="dB",
typ="l",axes=FALSE,
main=paste(main,tag_2,sep=""))
cc <- dse$cc
x_cc <- 0.16
y_cc <- 15
lines(c(x_cc,x_cc),y_cc+c(cc$up,-cc$down),lwd=0.5)
lines(x_cc+c(-cc$width/2,cc$width/2),c(y_cc,y_cc),lwd=0.5)
if(v_line_p) lines(c(0.16,0.16),c(68.5,80),lwd=0.5)
abline(h=h_line,lty="dashed",lwd=0.5)
axis(1,at=seq(0,2.0,0.5))
axis(1,at=seq(0,2.0,0.1),label=FALSE,tcl=-0.25)
axis(2,at=seq(-40,80,20),las=2)
axis(2,at=seq(-40,80,10),label=FALSE,tcl=-0.25)
text(1.0,80,tag_1,pos=1)
text(1.9,80,tag_2,pos=1)
box(bty="l")
}
### Figure 226, top plot
fig_226(ocean_wave,default_taper(1024),"periodogram","(a)",v=TRUE)
### Figure 226, 2nd plot
fig_226(ocean_wave,slepian_taper(1024,1),expression(paste("Slepian, ", italic(NW==1/Delta[t]))),"(b)")
### Figure 226, 3rd plot
fig_226(ocean_wave,slepian_taper(1024,2),expression(paste("Slepian, ", italic(NW==2/Delta[t]))),"(c)")
### Figure 226, bottom plot
fig_226(ocean_wave,slepian_taper(1024,4),expression(paste("Slepian, ", italic(NW==4/Delta[t]))),"(d)")
### Figure 227
fig_226(ocean_wave,default_taper(1024),"periodogram",NULL,pad=2,h_line=NULL,main="Figure 227")
### Figure 228 ###
fig_228 <- function(ts,delta_t=0.001)
{
N <- length(ts)
plot((0:(N-1))*delta_t,ts,
,xlab="time (sec)",
ylab="speed",
typ="l",axes=FALSE,
main="Figure 228")
axis(1,at=seq(0,2.0,0.5))
axis(2,at=seq(5,15,5),las=2)
axis(2,at=seq(0,20,1),label=FALSE,tcl=-0.25)
box(bty="l")
}
### Figure 228
fig_228(chaotic_beam)
### Figure 229 ###
fig_229 <- function(ts,taper,tag_1,tag_2,delta_t=0.001)
{
dse <- direct_sdf_est(ts,taper,center=TRUE,delta_t=delta_t)
plot(dse$freqs,dB(dse$sdfe),
xlim=c(0,500),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=c(-120,0),yaxs="i",ylab="dB",
typ="l",axes=FALSE,
main=paste("Figure 229",tag_2,sep=""))
cc <- dse$cc
x_cc <- 425
y_cc <- -30
lines(c(x_cc,x_cc),y_cc+c(cc$up,-cc$down),lwd=0.5)
lines(x_cc+c(-cc$width/2,cc$width/2),c(y_cc,y_cc),lwd=0.5)
abline(h=-86,lty="dashed",lwd=0.5)
axis(1,at=seq(0,500,100))
axis(1,at=seq(0,500,10),label=FALSE,tcl=-0.25)
axis(2,at=seq(-120,0,20),las=2)
axis(2,at=seq(-120,0,10),label=FALSE,tcl=-0.25)
text(250,0,tag_1,pos=1)
text(475,0,tag_2,pos=1)
box(bty="l")
}
### Figure 229, top plot
fig_229(chaotic_beam,default_taper(2048),"periodogram","(a)")
### Figure 229, 2nd plot
fig_229(chaotic_beam,slepian_taper(2048,1),expression(paste("Slepian, ", italic(NW==1/Delta[t]))),"(b)")
### Figure 229, 3rd plot
fig_229(chaotic_beam,slepian_taper(2048,2),expression(paste("Slepian, ", italic(NW==2/Delta[t]))),"(c)")
### Figure 229, bottom plot
fig_229(chaotic_beam,hanning_taper(2048),"Hanning (100% cosine)","(d)")
### Figure 231 ###
fig_231 <- function(ts,right_p=FALSE)
{
temp <- pgram(ts,center=TRUE)
N <- length(ts)
zap_me <- c(1,N/2+2)
freqs <- temp$freqs[-zap_me]
the_pgram <- temp$sdfe[-zap_me]
the_cumsum <- cumsum(the_pgram)
xs <- if(right_p) freqs[-length(freqs)] else freqs
ys <- if(right_p) the_cumsum[-length(the_cumsum)]/the_cumsum[length(the_cumsum)] else the_pgram
plot(xs,ys,
xlim=c(0,0.5),xaxs="i",xlab=expression(paste(italic(f)," (Hz)")),
ylim=if(right_p) c(0,1) else c(0,36),yaxs="i",ylab=paste(if(right_p) "cumulative" else NULL,"periodogram"),
typ="l",lwd=0.5,axes=FALSE,
main=paste("Figure 231",if(right_p) "(b)" else "(a)",sep=""))
if(right_p)
{
M <- length(the_cumsum)
D_0p05 <- 1.358/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
D_0p1 <- 1.224/(sqrt(M-1) + 0.12 + 0.11/sqrt(M-1))
L_u <- function(f,D) D - 1/(M-1) + N*f/(M-1)
L_l <- function(f,D) -D + N*f/(M-1)
lines(c(0,0.5),c(L_u(0,D_0p05),L_u(0.5,D_0p05)))
lines(c(0,0.5),c(L_l(0,D_0p05),L_l(0.5,D_0p05)))
lines(c(0,0.5),c(L_u(0,D_0p1),L_u(0.5,D_0p1)),lty="dashed")
lines(c(0,0.5),c(L_l(0,D_0p1),L_l(0.5,D_0p1)),lty="dashed")
}
axis(1,at=seq(0,0.5,0.5))
axis(1,at=seq(0,0.5,0.1),label=FALSE,tcl=-0.25)
axis(2,at=if(right_p) c(0,1) else c(0,18,36),las=2)
if(!right_p) axis(2,at=seq(0,36,3),label=FALSE,tcl=-0.25)
text(x=0.5,y=if(right_p) 0.1 else 32.4,if(right_p) "(b)" else "(a)",pos=2)
box(bty="l")
}
### Figure 231, left-hand plot
fig_231(ocean_noise)
### Figure 231, right-hand plot
fig_231(ocean_noise,right_p=TRUE)
### NOTE: code to recreate Figure 239 is not provided - to do so
### would reveal the solution to Exercise [6.11]!
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{app}
\alias{app}
\title{Launch interactive weather analysis app}
\usage{
app(...)
}
\arguments{
\item{\dots}{Arguments passed to \code{\link[shiny:runApp]{shiny::runApp()}}}
}
\description{
Launch interactive analysis of weather period comparison
for different RDWD stations.
The R session is blocked during usage, close the app to re-enable
console usage.
}
\examples{
# app()
}
\seealso{
\code{\link[shiny:runApp]{shiny::runApp()}}, \link{rdwd}
}
\author{
Berry Boessenkool, \email{berry-b@gmx.de}, July 2018 + April 2023
}
\keyword{iplot}
|
/man/app.Rd
|
no_license
|
brry/rdwd
|
R
| false | true | 632 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{app}
\alias{app}
\title{Launch interactive weather analysis app}
\usage{
app(...)
}
\arguments{
\item{\dots}{Arguments passed to \code{\link[shiny:runApp]{shiny::runApp()}}}
}
\description{
Launch interactive analysis of weather period comparison
for different RDWD stations.
The R session is blocked during usage, close the app to re-enable
console usage.
}
\examples{
# app()
}
\seealso{
\code{\link[shiny:runApp]{shiny::runApp()}}, \link{rdwd}
}
\author{
Berry Boessenkool, \email{berry-b@gmx.de}, July 2018 + April 2023
}
\keyword{iplot}
|
#' Gráfico do número de casos de COVID-19 no Brasil para os dados do Ministério da Saúde
#'
#' Esta função plota o crescimento no número de casos no Brasil ao longo do tempo. Há duas opções de gráfico, veja o argumento `tipo` para mais detalhes.
#'
#' @param df Data frame contendo o resultado da busca de `get_corona_minsaude()`
#' @param log Lógico. Se quer manter a escala log no eixo y do gráfico. Padrão log = TRUE. Apenas para `tipo = "numero"`
#' @param tipo Caractere. Padrão `tipo = "numero"` para o número de casos ao longo do tempo. Usar `tipo = "aumento"` para plotar o aumento diário no número de casos
#'
#' @export
#'
#' @importFrom rlang .data
#' @importFrom plyr count
#'
plot_corona_minsaude <- function(df,
log = TRUE,
tipo = "numero") {
# definindo data_max para plotar apenas atualizacoes completas
datas <- plyr::count(df$date[df$casosAcumulados > 0 & !is.na(df$estado)])
datas$lag <- datas$freq - dplyr::lag(datas$freq)
if (datas$lag[which.max(datas$x)] < 0) {
data_max <- max(datas$x, na.rm = TRUE) - 1
} else {
data_max <- max(datas$x, na.rm = TRUE)
}
# nomes dos eixos
xlab <- "Data"
ylab <- "Casos confirmados"
legenda <- "fonte: https://covid.saude.gov.br"
df <- df %>%
dplyr::group_by(., .data$date) %>%
dplyr::summarise_at(dplyr::vars(.data$casosAcumulados, .data$obitosAcumulados),
.funs = sum, na.rm = TRUE) %>%
dplyr::filter(., .data$date <= data_max)
# tipo = numero
if (tipo == "numero") {
if (log == TRUE) {
df <- df %>% dplyr::mutate(casosAcumulados = log(.data$casosAcumulados))
ylab <- paste(ylab, "(log)")
}
p <- ggplot2::ggplot(df, ggplot2::aes(x = .data$date,
y = .data$casosAcumulados,
color = "red")) +
ggplot2::geom_line(alpha = .7) +
ggplot2::geom_point(size = 2) +
ggplot2::labs(x = xlab,
y = ylab,
title = "Casos confirmados de COVID-19 no Brasil",
caption = legenda) +
ggplot2::scale_x_date(date_breaks = "1 day",
date_labels = "%d/%m") +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90),
legend.position = "none")
}
if (tipo == "aumento") {
df$delta_cases <- df$casosAcumulados - dplyr::lag(df$casosAcumulados)
# O.o tem valores negativos! por enquanto, deixei 0 nao bate com min saude
df$delta_cases <- ifelse(df$delta_cases < 0 , 0, df$delta_cases)
#df$diff_perc <- round(df$delta_cases/df$confirmed, 3) * 100
#df$label <- paste(df$delta_cases, "%")
p <- ggplot2::ggplot(df, ggplot2::aes(x = .data$date,
y = .data$delta_cases,
color = "red")) +
#ggplot2::geom_bar(stat = "identity", alpha = .7, color = "red", fill = "red")
ggplot2::geom_line(alpha = .7) +
ggplot2::geom_point(size = 2) +
ggplot2::scale_x_date(date_breaks = "1 day",
date_labels = "%d/%m") +
# ggplot2::scale_y_continuous(limits = c(0, max(df$delta_cases, na.rm = TRUE) + 3),
# expand = c(0, 0)) +
# ggplot2::geom_text(ggplot2::aes(label = .data$label),
# size = 2.5,
# vjust = -0.5) +
ggplot2::labs(x = xlab,
y = "Casos novos por dia",
title = "Aumento nos casos de COVID-19 confirmados",
caption = legenda) +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90),
legend.position = "none")
}
p
}
|
/R/plot_corona_minsaude.R
|
no_license
|
amrofi/coronabr
|
R
| false | false | 3,865 |
r
|
#' Gráfico do número de casos de COVID-19 no Brasil para os dados do Ministério da Saúde
#'
#' Esta função plota o crescimento no número de casos no Brasil ao longo do tempo. Há duas opções de gráfico, veja o argumento `tipo` para mais detalhes.
#'
#' @param df Data frame contendo o resultado da busca de `get_corona_minsaude()`
#' @param log Lógico. Se quer manter a escala log no eixo y do gráfico. Padrão log = TRUE. Apenas para `tipo = "numero"`
#' @param tipo Caractere. Padrão `tipo = "numero"` para o número de casos ao longo do tempo. Usar `tipo = "aumento"` para plotar o aumento diário no número de casos
#'
#' @export
#'
#' @importFrom rlang .data
#' @importFrom plyr count
#'
plot_corona_minsaude <- function(df,
log = TRUE,
tipo = "numero") {
# definindo data_max para plotar apenas atualizacoes completas
datas <- plyr::count(df$date[df$casosAcumulados > 0 & !is.na(df$estado)])
datas$lag <- datas$freq - dplyr::lag(datas$freq)
if (datas$lag[which.max(datas$x)] < 0) {
data_max <- max(datas$x, na.rm = TRUE) - 1
} else {
data_max <- max(datas$x, na.rm = TRUE)
}
# nomes dos eixos
xlab <- "Data"
ylab <- "Casos confirmados"
legenda <- "fonte: https://covid.saude.gov.br"
df <- df %>%
dplyr::group_by(., .data$date) %>%
dplyr::summarise_at(dplyr::vars(.data$casosAcumulados, .data$obitosAcumulados),
.funs = sum, na.rm = TRUE) %>%
dplyr::filter(., .data$date <= data_max)
# tipo = numero
if (tipo == "numero") {
if (log == TRUE) {
df <- df %>% dplyr::mutate(casosAcumulados = log(.data$casosAcumulados))
ylab <- paste(ylab, "(log)")
}
p <- ggplot2::ggplot(df, ggplot2::aes(x = .data$date,
y = .data$casosAcumulados,
color = "red")) +
ggplot2::geom_line(alpha = .7) +
ggplot2::geom_point(size = 2) +
ggplot2::labs(x = xlab,
y = ylab,
title = "Casos confirmados de COVID-19 no Brasil",
caption = legenda) +
ggplot2::scale_x_date(date_breaks = "1 day",
date_labels = "%d/%m") +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90),
legend.position = "none")
}
if (tipo == "aumento") {
df$delta_cases <- df$casosAcumulados - dplyr::lag(df$casosAcumulados)
# O.o tem valores negativos! por enquanto, deixei 0 nao bate com min saude
df$delta_cases <- ifelse(df$delta_cases < 0 , 0, df$delta_cases)
#df$diff_perc <- round(df$delta_cases/df$confirmed, 3) * 100
#df$label <- paste(df$delta_cases, "%")
p <- ggplot2::ggplot(df, ggplot2::aes(x = .data$date,
y = .data$delta_cases,
color = "red")) +
#ggplot2::geom_bar(stat = "identity", alpha = .7, color = "red", fill = "red")
ggplot2::geom_line(alpha = .7) +
ggplot2::geom_point(size = 2) +
ggplot2::scale_x_date(date_breaks = "1 day",
date_labels = "%d/%m") +
# ggplot2::scale_y_continuous(limits = c(0, max(df$delta_cases, na.rm = TRUE) + 3),
# expand = c(0, 0)) +
# ggplot2::geom_text(ggplot2::aes(label = .data$label),
# size = 2.5,
# vjust = -0.5) +
ggplot2::labs(x = xlab,
y = "Casos novos por dia",
title = "Aumento nos casos de COVID-19 confirmados",
caption = legenda) +
ggplot2::theme_minimal() +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90),
legend.position = "none")
}
p
}
|
library(IsoriX)
### Name: isopalette2
### Title: Colour palettes for plotting
### Aliases: isopalette2 isopalette1
### Keywords: color datasets
### ** Examples
## A comparison of some colour palette
par(mfrow = c(2, 3))
pie(rep(1, length(isopalette1)), col = isopalette1,
border = NA, labels = NA, clockwise = TRUE, main = "isopalette1")
pie(rep(1, length(isopalette2)), col = isopalette2,
border = NA, labels = NA, clockwise = TRUE, main = "isopalette2")
pie(rep(1, 100), col = terrain.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "terrain.colors")
pie(rep(1, 100), col = rainbow(100), border = NA, labels = NA,
clockwise = TRUE, main = "rainbow")
pie(rep(1, 100), col = topo.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "topo.colors")
pie(rep(1, 100), col = heat.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "heat.colors")
## Creating your own colour palette
MyPalette <- colorRampPalette(c("blue", "green", "red"), bias = 0.7)
par(mfrow = c(1, 1))
pie(1:100, col = MyPalette(100), border = NA, labels = NA,
clockwise = TRUE, main = "a home-made palette")
## Turing palettes into functions for use in IsoriX
Isopalette1Fn <- colorRampPalette(isopalette1, bias = 0.5)
Isopalette2Fn <- colorRampPalette(isopalette2, bias = 0.5)
par(mfrow = c(1, 2))
pie(1:100, col = Isopalette1Fn(100), border = NA, labels = NA,
clockwise = TRUE, main = "isopalette1")
pie(1:100, col = Isopalette2Fn(100), border = NA, labels = NA,
clockwise = TRUE, main = "isopalette2")
|
/data/genthat_extracted_code/IsoriX/examples/isopalette2.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,560 |
r
|
library(IsoriX)
### Name: isopalette2
### Title: Colour palettes for plotting
### Aliases: isopalette2 isopalette1
### Keywords: color datasets
### ** Examples
## A comparison of some colour palette
par(mfrow = c(2, 3))
pie(rep(1, length(isopalette1)), col = isopalette1,
border = NA, labels = NA, clockwise = TRUE, main = "isopalette1")
pie(rep(1, length(isopalette2)), col = isopalette2,
border = NA, labels = NA, clockwise = TRUE, main = "isopalette2")
pie(rep(1, 100), col = terrain.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "terrain.colors")
pie(rep(1, 100), col = rainbow(100), border = NA, labels = NA,
clockwise = TRUE, main = "rainbow")
pie(rep(1, 100), col = topo.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "topo.colors")
pie(rep(1, 100), col = heat.colors(100), border = NA, labels = NA,
clockwise = TRUE, main = "heat.colors")
## Creating your own colour palette
MyPalette <- colorRampPalette(c("blue", "green", "red"), bias = 0.7)
par(mfrow = c(1, 1))
pie(1:100, col = MyPalette(100), border = NA, labels = NA,
clockwise = TRUE, main = "a home-made palette")
## Turing palettes into functions for use in IsoriX
Isopalette1Fn <- colorRampPalette(isopalette1, bias = 0.5)
Isopalette2Fn <- colorRampPalette(isopalette2, bias = 0.5)
par(mfrow = c(1, 2))
pie(1:100, col = Isopalette1Fn(100), border = NA, labels = NA,
clockwise = TRUE, main = "isopalette1")
pie(1:100, col = Isopalette2Fn(100), border = NA, labels = NA,
clockwise = TRUE, main = "isopalette2")
|
## Get price data
#' Get securities prices
#'
#' @param symbols a character vector of securities tickers.
#' @param ... additional arguments to pass to quantmod's \code{getSymbols()}.
#'
#' @return an xts object with a column for each symbol.
#'
#' @export
#'
#' @examples
#' get_prices(c("AAPL", "IBM"), from = "2010-01-01")
get_prices <- function(symbols, ...) {
## Default start date is 2007-01-01
## To change that, add an argument: from = "YYYY-MM-DD"
## Download OHLC data
prices_env <- new.env()
suppressWarnings(
quantmod::getSymbols(symbols, env = prices_env, ...))
## Get "Adjsuted Close" price from environment with price data
## to take into account splits and dividends
close_prices <- function(sym, envir) {
out <- get(sym, envir = envir)
out <- quantmod::Ad(out)
names(out) <- sym
out
}
## Combine into an xts object with one column per symbol
x <- do.call(
xts::cbind.xts,
lapply(symbols, close_prices, envir = prices_env)
)
## Return as tbl_df
xts2tbl(na.omit(x))
}
daily_returns <- function(prices) {
x <- tbl2xts(prices)
out <- vapply(x, quantmod::dailyReturn, numeric(nrow(x)))
out <- xts::xts(out, order.by = zoo::index(x))
xts2tbl(out)
}
## !! apply function not working
# monthly_returns <- function(prices) {
#
# x <- tbl2xts(prices)
# out <- sapply(x, quantmod::monthlyReturn)
# out <- xts::xts(out, order.by = zoo::index(x))
# tbl2xts(out)
# }
cumulative_returns <- function(returns) {
date <- returns[, 1]
x <- 1 + returns[, -1]
out <- vapply(x, cumprod, numeric(nrow(x)))
out <- as.data.frame(out)
tibble::as_tibble(cbind(date, out))
}
h_weights <- function(weights, returns) {
date <- returns[, 1]
n <- ncol(returns) - 1
## Set initial values to weights then apply daily returns thereafter
tmp <- 1 + returns[, -1]
tmp[1, ] <- weights
tmp <- vapply(tmp, cumprod, numeric(nrow(tmp)))
tmp <- data.frame(tmp)
## Calculate sum across rows
row_sum <- rowSums(tmp)
## Then recalculate weights for a historical series
out <- tmp
for (i in 1:n) {
out[, i] <- out[, i] / row_sum
}
tibble::as_tibble(cbind(date, out))
}
|
/R/prices.R
|
no_license
|
brandonat/allocatr
|
R
| false | false | 2,172 |
r
|
## Get price data
#' Get securities prices
#'
#' @param symbols a character vector of securities tickers.
#' @param ... additional arguments to pass to quantmod's \code{getSymbols()}.
#'
#' @return an xts object with a column for each symbol.
#'
#' @export
#'
#' @examples
#' get_prices(c("AAPL", "IBM"), from = "2010-01-01")
get_prices <- function(symbols, ...) {
## Default start date is 2007-01-01
## To change that, add an argument: from = "YYYY-MM-DD"
## Download OHLC data
prices_env <- new.env()
suppressWarnings(
quantmod::getSymbols(symbols, env = prices_env, ...))
## Get "Adjsuted Close" price from environment with price data
## to take into account splits and dividends
close_prices <- function(sym, envir) {
out <- get(sym, envir = envir)
out <- quantmod::Ad(out)
names(out) <- sym
out
}
## Combine into an xts object with one column per symbol
x <- do.call(
xts::cbind.xts,
lapply(symbols, close_prices, envir = prices_env)
)
## Return as tbl_df
xts2tbl(na.omit(x))
}
daily_returns <- function(prices) {
x <- tbl2xts(prices)
out <- vapply(x, quantmod::dailyReturn, numeric(nrow(x)))
out <- xts::xts(out, order.by = zoo::index(x))
xts2tbl(out)
}
## !! apply function not working
# monthly_returns <- function(prices) {
#
# x <- tbl2xts(prices)
# out <- sapply(x, quantmod::monthlyReturn)
# out <- xts::xts(out, order.by = zoo::index(x))
# tbl2xts(out)
# }
cumulative_returns <- function(returns) {
date <- returns[, 1]
x <- 1 + returns[, -1]
out <- vapply(x, cumprod, numeric(nrow(x)))
out <- as.data.frame(out)
tibble::as_tibble(cbind(date, out))
}
h_weights <- function(weights, returns) {
date <- returns[, 1]
n <- ncol(returns) - 1
## Set initial values to weights then apply daily returns thereafter
tmp <- 1 + returns[, -1]
tmp[1, ] <- weights
tmp <- vapply(tmp, cumprod, numeric(nrow(tmp)))
tmp <- data.frame(tmp)
## Calculate sum across rows
row_sum <- rowSums(tmp)
## Then recalculate weights for a historical series
out <- tmp
for (i in 1:n) {
out[, i] <- out[, i] / row_sum
}
tibble::as_tibble(cbind(date, out))
}
|
#' Show the usage of a function
#'
#' Print the reformatted usage of a function. The arguments of the function are
#' searched by \code{\link{argsAnywhere}}, so the function can be either
#' exported or non-exported in a package. S3 methods will be marked.
#' @param FUN the function name
#' @param width the width of output (passed to \code{width.cutoff} in
#' \code{\link{tidy_source}})
#' @param tidy whether to reformat the usage code
#' @param output whether to write the output to the console (via
#' \code{\link{cat}})
#' @return The R code for the usage is returned as a character string
#' (invisibly).
#' @seealso \code{\link{tidy_source}}
#' @export
#' @examples library(formatR)
#' usage(var)
#'
#' usage(plot)
#'
#' usage(plot.default) # default method
#' usage(plot.lm) # on the 'lm' class
#'
#' usage(usage)
#'
#' usage(barplot.default, width = 60) # narrower output
usage = function(FUN, width = getOption('width'), tidy = TRUE, output = TRUE) {
fn = as.character(substitute(FUN))
res = capture.output(do.call(argsAnywhere, list(fn)))
if (identical(res, 'NULL')) return()
res[1] = substring(res[1], 9) # rm 'function ' in the beginning
isS3 = FALSE
if (grepl('.', fn, fixed = TRUE)) {
n = length(parts <- strsplit(fn, '.', fixed = TRUE)[[1]])
for (i in 2:n) {
gen = paste(parts[1L:(i - 1)], collapse = ".")
cl = paste(parts[i:n], collapse = ".")
if (gen == "" || cl == "") next
if (!is.null(f <- getS3method(gen, cl, TRUE)) && !is.null(environment(f))) {
res[1] = paste(gen, res[1])
header = if (cl == 'default')
'## Default S3 method:' else sprintf("## S3 method for class '%s'", cl)
res = c(header, res)
isS3 = TRUE
break
}
}
}
if (!isS3) res[1] = paste(fn, res[1])
if ((n <- length(res)) > 1 && res[n] == 'NULL') res = res[-n] # rm last element 'NULL'
if (!tidy) {
cat(res, sep = '\n')
return(invisible(res))
}
if (width <= 1) {
warning("'width' should no longer be specified as a proportion")
width = width * getOption("width")
}
tidy.res = tidy_source(text = res, output = FALSE, width.cutoff = width)
if (output) cat(tidy.res$text.tidy, sep = '\n')
invisible(tidy.res$text.tidy)
}
|
/R/usage.R
|
no_license
|
cognitivepsychology/formatR
|
R
| false | false | 2,255 |
r
|
#' Show the usage of a function
#'
#' Print the reformatted usage of a function. The arguments of the function are
#' searched by \code{\link{argsAnywhere}}, so the function can be either
#' exported or non-exported in a package. S3 methods will be marked.
#' @param FUN the function name
#' @param width the width of output (passed to \code{width.cutoff} in
#' \code{\link{tidy_source}})
#' @param tidy whether to reformat the usage code
#' @param output whether to write the output to the console (via
#' \code{\link{cat}})
#' @return The R code for the usage is returned as a character string
#' (invisibly).
#' @seealso \code{\link{tidy_source}}
#' @export
#' @examples library(formatR)
#' usage(var)
#'
#' usage(plot)
#'
#' usage(plot.default) # default method
#' usage(plot.lm) # on the 'lm' class
#'
#' usage(usage)
#'
#' usage(barplot.default, width = 60) # narrower output
usage = function(FUN, width = getOption('width'), tidy = TRUE, output = TRUE) {
fn = as.character(substitute(FUN))
res = capture.output(do.call(argsAnywhere, list(fn)))
if (identical(res, 'NULL')) return()
res[1] = substring(res[1], 9) # rm 'function ' in the beginning
isS3 = FALSE
if (grepl('.', fn, fixed = TRUE)) {
n = length(parts <- strsplit(fn, '.', fixed = TRUE)[[1]])
for (i in 2:n) {
gen = paste(parts[1L:(i - 1)], collapse = ".")
cl = paste(parts[i:n], collapse = ".")
if (gen == "" || cl == "") next
if (!is.null(f <- getS3method(gen, cl, TRUE)) && !is.null(environment(f))) {
res[1] = paste(gen, res[1])
header = if (cl == 'default')
'## Default S3 method:' else sprintf("## S3 method for class '%s'", cl)
res = c(header, res)
isS3 = TRUE
break
}
}
}
if (!isS3) res[1] = paste(fn, res[1])
if ((n <- length(res)) > 1 && res[n] == 'NULL') res = res[-n] # rm last element 'NULL'
if (!tidy) {
cat(res, sep = '\n')
return(invisible(res))
}
if (width <= 1) {
warning("'width' should no longer be specified as a proportion")
width = width * getOption("width")
}
tidy.res = tidy_source(text = res, output = FALSE, width.cutoff = width)
if (output) cat(tidy.res$text.tidy, sep = '\n')
invisible(tidy.res$text.tidy)
}
|
## RPT for common up and common down ###
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/noMedian/")
C9 <- read.csv("C9_unique.csv")
C9 <- C9[order(C9$P.Value),]
sals <- read.csv("sals_unique.csv")
sals <- sals[order(sals$P.Value),]
ftld <- read.csv("ftld_unique.csv")
ftld <- ftld[order(ftld$P.Value),]
vcp <- read.csv("vcp_unique.csv")
vcp <- vcp[order(vcp$P.Value),]
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/TDP-43_DEseq2/")
pet <- read.csv("PET_results_keepfiltering.csv")
rav <- read.csv("RAV_results_keepfiltering.csv")
m = 100000
r <- matrix(0, m, 3)
for (i in 1:m){
#Sample from all genes "up" genes of the same size as experiment. This means the overlap is proportional.
upC9 <- sample(C9$Gene.Symbol, size = 3788)
upC9 <- as.vector(upC9)
upSALS <- sample(sals$Gene.Symbol, size = 5905)
upSALS <- as.vector(upSALS)
upFTLD <- sample(ftld$Gene.Symbol, size = 4941)
upFTLD <- as.vector(upFTLD)
upVCP <- sample(vcp$Gene.Symbol, size = 8011)
upVCP <- as.vector(upVCP)
upPET <- sample(pet$hgnc_symbol, size = 9259)
upPET <- as.vector(upPET)
upRAV <- sample(rav$hgnc_symbol, size = 8028)
upRAV <- as.vector(upRAV)
INTUP <- Reduce(intersect, list(upC9, upSALS, upFTLD, upVCP, upPET, upRAV))
r[i,1] <- length(INTUP)
#### DOWN ####
thresh <- -1
downC9 <- subset(C9, !(C9$Gene.Symbol %in% upC9))
downC9 <- downC9$Gene.Symbol
downSALS <- subset(sals, !(sals$Gene.Symbol %in% upSALS))
downSALS <- downSALS$Gene.Symbol
downFTLD <- subset(ftld, !(ftld$Gene.Symbol %in% upFTLD))
downFTLD <- downFTLD$Gene.Symbol
downVCP <- subset(vcp, !(vcp$Gene.Symbol %in% upVCP))
downVCP <- downVCP$Gene.Symbol
downPET <- subset(pet, !(pet$hgnc_symbol %in% upPET))
downPET <- downPET$hgnc_symbol
downRAV <- subset(rav, !(rav$hgnc_symbol %in% upRAV))
downRAV <- downRAV$hgnc_symbol
INTDOWN <- Reduce(intersect, list(downC9, downSALS, downFTLD, downVCP, downPET, downRAV))
r[i,2] <- length(INTDOWN)
r[i,3] <- sum(length(INTUP) + length(INTDOWN))
}
setwd("/Users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/FoldChangeResults")
r <- read.csv("UpDownRPT.csv")
r <- read.csv("UpDownRPT.csv")
expup <- 328
expdown <- 69
exptotal <- 397
testup <- which(r$V1 >= expup)
resultup <- sum((length(testup)+1))/(m+1) # calculate P value
resultup
mean <- mean(r$V1)
mean
range <- range(r$V1)
range
hist(r$V1,
xlim = range(50:expup+30),
main = NULL,
xlab = "Number of Common Upregulated DEGs")
abline(v = expup, col = "red", lwd = 2)
testdown <- which(r$V2 >= expdown)
resultdown <- sum((length(testdown)+1))/(m+1) # calculate P value
resultdown
mean <- mean(r$V2)
mean
range <- range(r$V2)
range
hist(r$V2,
xlim = range(0:80),
main = NULL,
xlab = "Number of Common Downregulated DEGs")
abline(v = expdown, col = "red", lwd = 2)
testtotal <- which(r$V3 >= exptotal)
resulttotal <- sum((length(testtotal)+1))/(m+1) # calculate P value
resulttotal
mean <- mean(r$V3)
mean
range <- range(r$V3)
range
hist(r$V3,
xlim = range(80:exptotal+50),
main = NULL,
xlab = "Number of Common DEGs")
abline(v = exptotal, col = "red", lwd = 2)
table <- data.frame(NumOverTest = length(test1),
Pval = result,
mean = mean,
range = range)
|
/UpDownRPT.R
|
no_license
|
zerland/PhD_Code
|
R
| false | false | 3,390 |
r
|
## RPT for common up and common down ###
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/noMedian/")
C9 <- read.csv("C9_unique.csv")
C9 <- C9[order(C9$P.Value),]
sals <- read.csv("sals_unique.csv")
sals <- sals[order(sals$P.Value),]
ftld <- read.csv("ftld_unique.csv")
ftld <- ftld[order(ftld$P.Value),]
vcp <- read.csv("vcp_unique.csv")
vcp <- vcp[order(vcp$P.Value),]
setwd("/users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/TDP-43_DEseq2/")
pet <- read.csv("PET_results_keepfiltering.csv")
rav <- read.csv("RAV_results_keepfiltering.csv")
m = 100000
r <- matrix(0, m, 3)
for (i in 1:m){
#Sample from all genes "up" genes of the same size as experiment. This means the overlap is proportional.
upC9 <- sample(C9$Gene.Symbol, size = 3788)
upC9 <- as.vector(upC9)
upSALS <- sample(sals$Gene.Symbol, size = 5905)
upSALS <- as.vector(upSALS)
upFTLD <- sample(ftld$Gene.Symbol, size = 4941)
upFTLD <- as.vector(upFTLD)
upVCP <- sample(vcp$Gene.Symbol, size = 8011)
upVCP <- as.vector(upVCP)
upPET <- sample(pet$hgnc_symbol, size = 9259)
upPET <- as.vector(upPET)
upRAV <- sample(rav$hgnc_symbol, size = 8028)
upRAV <- as.vector(upRAV)
INTUP <- Reduce(intersect, list(upC9, upSALS, upFTLD, upVCP, upPET, upRAV))
r[i,1] <- length(INTUP)
#### DOWN ####
thresh <- -1
downC9 <- subset(C9, !(C9$Gene.Symbol %in% upC9))
downC9 <- downC9$Gene.Symbol
downSALS <- subset(sals, !(sals$Gene.Symbol %in% upSALS))
downSALS <- downSALS$Gene.Symbol
downFTLD <- subset(ftld, !(ftld$Gene.Symbol %in% upFTLD))
downFTLD <- downFTLD$Gene.Symbol
downVCP <- subset(vcp, !(vcp$Gene.Symbol %in% upVCP))
downVCP <- downVCP$Gene.Symbol
downPET <- subset(pet, !(pet$hgnc_symbol %in% upPET))
downPET <- downPET$hgnc_symbol
downRAV <- subset(rav, !(rav$hgnc_symbol %in% upRAV))
downRAV <- downRAV$hgnc_symbol
INTDOWN <- Reduce(intersect, list(downC9, downSALS, downFTLD, downVCP, downPET, downRAV))
r[i,2] <- length(INTDOWN)
r[i,3] <- sum(length(INTUP) + length(INTDOWN))
}
setwd("/Users/clairegreen/Documents/PhD/TDP-43/TDP-43_Code/Results/GeneExpression/FoldChangeResults")
r <- read.csv("UpDownRPT.csv")
r <- read.csv("UpDownRPT.csv")
expup <- 328
expdown <- 69
exptotal <- 397
testup <- which(r$V1 >= expup)
resultup <- sum((length(testup)+1))/(m+1) # calculate P value
resultup
mean <- mean(r$V1)
mean
range <- range(r$V1)
range
hist(r$V1,
xlim = range(50:expup+30),
main = NULL,
xlab = "Number of Common Upregulated DEGs")
abline(v = expup, col = "red", lwd = 2)
testdown <- which(r$V2 >= expdown)
resultdown <- sum((length(testdown)+1))/(m+1) # calculate P value
resultdown
mean <- mean(r$V2)
mean
range <- range(r$V2)
range
hist(r$V2,
xlim = range(0:80),
main = NULL,
xlab = "Number of Common Downregulated DEGs")
abline(v = expdown, col = "red", lwd = 2)
testtotal <- which(r$V3 >= exptotal)
resulttotal <- sum((length(testtotal)+1))/(m+1) # calculate P value
resulttotal
mean <- mean(r$V3)
mean
range <- range(r$V3)
range
hist(r$V3,
xlim = range(80:exptotal+50),
main = NULL,
xlab = "Number of Common DEGs")
abline(v = exptotal, col = "red", lwd = 2)
table <- data.frame(NumOverTest = length(test1),
Pval = result,
mean = mean,
range = range)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ttt_qlearn.R
\name{ttt_qlearn}
\alias{ttt_qlearn}
\title{Q-Learning for Training Tic-Tac-Toe AI}
\usage{
ttt_qlearn(player, N = 1000L, epsilon = 0.1, alpha = 0.8, gamma = 0.99,
simulate = TRUE, sim_every = 250L, N_sim = 1000L, verbose = TRUE)
}
\arguments{
\item{player}{AI player to train}
\item{N}{number of episode, i.e. training games}
\item{epsilon}{fraction of random exploration move}
\item{alpha}{learning rate}
\item{gamma}{discount factor}
\item{simulate}{if true, conduct simulation during training}
\item{sim_every}{conduct simulation after this many training games}
\item{N_sim}{number of simulation games}
\item{verbose}{if true, progress report is shown}
}
\value{
\code{data.frame} of simulation outcomes, if any
}
\description{
Train a tic-tac-toe AI through Q-learning
}
\details{
This function implements Q-learning to train a tic-tac-toe AI player.
It is designed to train one AI player, which plays against itself to update its
value and policy functions.
The employed algorithm is Q-learning with epsilon greedy.
For each state \eqn{s}, the player updates its value evaluation by
\deqn{V(s) = (1-\alpha) V(s) + \alpha \gamma max_s' V(s')}
if it is the first player's turn. If it is the other player's turn, replace
\eqn{max} by \eqn{min}.
Note that \eqn{s'} spans all possible states you can reach from \eqn{s}.
The policy function is also updated analogously, that is, the set of
actions to reach \eqn{s'} that maximizes \eqn{V(s')}.
The parameter \eqn{\alpha} controls the learning rate, and \eqn{gamma} is
the discount factor (earlier win is better than later).
Then the player chooses the next action by \eqn{\epsilon}-greedy method;
Follow its policy with probability \eqn{1-\epsilon}, and choose random
action with probability \eqn{\epsilon}. \eqn{\epsilon} controls
the ratio of explorative moves.
At the end of a game, the player sets the value of the final state either to
100 (if the first player wins), -100 (if the second player wins), or
0 (if draw).
This learning process is repeated for \code{N} training games.
When \code{simulate} is set true, simulation is conducted after
\code{sim_every} training games.
This would be usefule for observing the progress of training.
In general, as the AI gets smarter, the game tends to result in draw more.
See Sutton and Barto (1998) for more about the Q-learning.
}
\examples{
p <- ttt_ai()
o <- ttt_qlearn(p, N = 200)
}
\references{
Sutton, Richard S and Barto, Andrew G. Reinforcement Learning: An Introduction. The MIT Press (1998)
}
|
/man/ttt_qlearn.Rd
|
no_license
|
MangalMakwana/tictactoe
|
R
| false | true | 2,610 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ttt_qlearn.R
\name{ttt_qlearn}
\alias{ttt_qlearn}
\title{Q-Learning for Training Tic-Tac-Toe AI}
\usage{
ttt_qlearn(player, N = 1000L, epsilon = 0.1, alpha = 0.8, gamma = 0.99,
simulate = TRUE, sim_every = 250L, N_sim = 1000L, verbose = TRUE)
}
\arguments{
\item{player}{AI player to train}
\item{N}{number of episode, i.e. training games}
\item{epsilon}{fraction of random exploration move}
\item{alpha}{learning rate}
\item{gamma}{discount factor}
\item{simulate}{if true, conduct simulation during training}
\item{sim_every}{conduct simulation after this many training games}
\item{N_sim}{number of simulation games}
\item{verbose}{if true, progress report is shown}
}
\value{
\code{data.frame} of simulation outcomes, if any
}
\description{
Train a tic-tac-toe AI through Q-learning
}
\details{
This function implements Q-learning to train a tic-tac-toe AI player.
It is designed to train one AI player, which plays against itself to update its
value and policy functions.
The employed algorithm is Q-learning with epsilon greedy.
For each state \eqn{s}, the player updates its value evaluation by
\deqn{V(s) = (1-\alpha) V(s) + \alpha \gamma max_s' V(s')}
if it is the first player's turn. If it is the other player's turn, replace
\eqn{max} by \eqn{min}.
Note that \eqn{s'} spans all possible states you can reach from \eqn{s}.
The policy function is also updated analogously, that is, the set of
actions to reach \eqn{s'} that maximizes \eqn{V(s')}.
The parameter \eqn{\alpha} controls the learning rate, and \eqn{gamma} is
the discount factor (earlier win is better than later).
Then the player chooses the next action by \eqn{\epsilon}-greedy method;
Follow its policy with probability \eqn{1-\epsilon}, and choose random
action with probability \eqn{\epsilon}. \eqn{\epsilon} controls
the ratio of explorative moves.
At the end of a game, the player sets the value of the final state either to
100 (if the first player wins), -100 (if the second player wins), or
0 (if draw).
This learning process is repeated for \code{N} training games.
When \code{simulate} is set true, simulation is conducted after
\code{sim_every} training games.
This would be usefule for observing the progress of training.
In general, as the AI gets smarter, the game tends to result in draw more.
See Sutton and Barto (1998) for more about the Q-learning.
}
\examples{
p <- ttt_ai()
o <- ttt_qlearn(p, N = 200)
}
\references{
Sutton, Richard S and Barto, Andrew G. Reinforcement Learning: An Introduction. The MIT Press (1998)
}
|
# library(testthat); library(workflowHelper); library(remake)
context("short")
source("utils.R")
test_that("Short workflows without output stage can run.", {
testwd("short-ok")
sources = strings(code.R)
datasets = commands(poisson100 = poisson_dataset(n = 100))
plan_workflow(sources, datasets = datasets)
path = system.file("example", "code.R", package = "workflowHelper")
write(readLines(path), "code.R")
remake::make(verbose = F)
expect_equal(recallable(), "poisson100")
expect_equal(dim(recall("poisson100")), c(100, 2))
tmp = clean_example_workflowHelper(T)
analyses = commands(linear = linear_analysis(..dataset..))
plan_workflow(sources, datasets = datasets, analyses = analyses)
path = system.file("example", "code.R", package = "workflowHelper")
write(readLines(path), "code.R")
remake::make(verbose = F)
expect_equal(recallable(), c("poisson100", "poisson100_linear"))
expect_equal(dim(recall("poisson100")), c(100, 2))
expect_equal(class(recall("poisson100_linear")), "lm")
testrm()
})
|
/tests/testthat/test-short.R
|
no_license
|
wlandau/workflowHelper
|
R
| false | false | 1,038 |
r
|
# library(testthat); library(workflowHelper); library(remake)
context("short")
source("utils.R")
test_that("Short workflows without output stage can run.", {
testwd("short-ok")
sources = strings(code.R)
datasets = commands(poisson100 = poisson_dataset(n = 100))
plan_workflow(sources, datasets = datasets)
path = system.file("example", "code.R", package = "workflowHelper")
write(readLines(path), "code.R")
remake::make(verbose = F)
expect_equal(recallable(), "poisson100")
expect_equal(dim(recall("poisson100")), c(100, 2))
tmp = clean_example_workflowHelper(T)
analyses = commands(linear = linear_analysis(..dataset..))
plan_workflow(sources, datasets = datasets, analyses = analyses)
path = system.file("example", "code.R", package = "workflowHelper")
write(readLines(path), "code.R")
remake::make(verbose = F)
expect_equal(recallable(), c("poisson100", "poisson100_linear"))
expect_equal(dim(recall("poisson100")), c(100, 2))
expect_equal(class(recall("poisson100_linear")), "lm")
testrm()
})
|
subroutine tmove
implicit integer*4 (i-n)
#ccc version date: 02/04/86
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Ratfor
#ccc
#ccc short description:
#ccc This subroutine moves the cursor to the absolute
#ccc position ix,iy on the hp graphics terminal.
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc convrt
#ccc argument list description: none
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
####################################################################
#
# this subroutine moves the cursor to the absolute position
# ix,iy on the hp graphics terminal.
#
# 0 <or= ix <or= 720, 0 <or= iy <or= 360
#
# out of range is not checked
#
# escape sequence: esc *d ix iy oz
####################################################################
include "../common/hptrm"
if (igrmod >= 99) return
ix= ixlast
iy=iylast
if (igrmod < 20) { # HP2623A
ihpout(1:4) = char(27) // '*d '
call convrt (ix, ihpout(5:10), nchars)
call convrt (iy, ihpout(11:16), nchars)
ihpout(17:18) = 'oZ'
iot = 18
ii = iwrite(1,iot,ihpout)
iot=0
} else if (igrmod >= 20 && igrmod <= 22) { # Tektronix Plot-10
iot=0
}
return
end
|
/src-local/specpr/src.specpr/hpgraph/tmove.r
|
no_license
|
ns-bak/tetracorder-tutorial
|
R
| false | false | 1,451 |
r
|
subroutine tmove
implicit integer*4 (i-n)
#ccc version date: 02/04/86
#ccc author(s): Roger Clark & Jeff Hoover
#ccc language: Ratfor
#ccc
#ccc short description:
#ccc This subroutine moves the cursor to the absolute
#ccc position ix,iy on the hp graphics terminal.
#ccc algorithm description: none
#ccc system requirements: none
#ccc subroutines called:
#ccc convrt
#ccc argument list description: none
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
####################################################################
#
# this subroutine moves the cursor to the absolute position
# ix,iy on the hp graphics terminal.
#
# 0 <or= ix <or= 720, 0 <or= iy <or= 360
#
# out of range is not checked
#
# escape sequence: esc *d ix iy oz
####################################################################
include "../common/hptrm"
if (igrmod >= 99) return
ix= ixlast
iy=iylast
if (igrmod < 20) { # HP2623A
ihpout(1:4) = char(27) // '*d '
call convrt (ix, ihpout(5:10), nchars)
call convrt (iy, ihpout(11:16), nchars)
ihpout(17:18) = 'oZ'
iot = 18
ii = iwrite(1,iot,ihpout)
iot=0
} else if (igrmod >= 20 && igrmod <= 22) { # Tektronix Plot-10
iot=0
}
return
end
|
#' General Interface for Multinomial Regression Models
#'
#' `multinom_reg()` is a way to generate a _specification_ of a model
#' before fitting and allows the model to be created using
#' different packages in R, keras, or Spark. The main arguments for the
#' model are:
#' \itemize{
#' \item \code{penalty}: The total amount of regularization
#' in the model. Note that this must be zero for some engines.
#' \item \code{mixture}: The mixture amounts of different types of
#' regularization (see below). Note that this will be ignored for some engines.
#' }
#' These arguments are converted to their specific names at the
#' time that the model is fit. Other options and arguments can be
#' set using `set_engine()`. If left to their defaults
#' here (`NULL`), the values are taken from the underlying model
#' functions. If parameters need to be modified, `update()` can be used
#' in lieu of recreating the object from scratch.
#' @inheritParams boost_tree
#' @param mode A single character string for the type of model.
#' The only possible value for this model is "classification".
#' @param penalty A non-negative number representing the total
#' amount of regularization (`glmnet`, `keras`, and `spark` only).
#' For `keras` models, this corresponds to purely L2 regularization
#' (aka weight decay) while the other models can be a combination
#' of L1 and L2 (depending on the value of `mixture`).
#' @param mixture A number between zero and one (inclusive) that is the
#' proportion of L1 regularization (i.e. lasso) in the model. When
#' `mixture = 1`, it is a pure lasso model while `mixture = 0` indicates that
#' ridge regression is being used. (`glmnet` and `spark` only).
#' @details
#' For `multinom_reg()`, the mode will always be "classification".
#'
#' The model can be created using the `fit()` function using the
#' following _engines_:
#' \itemize{
#' \item \pkg{R}: `"glmnet"` (the default), `"nnet"`
#' \item \pkg{Stan}: `"stan"`
#' \item \pkg{keras}: `"keras"`
#' }
#'
#' @includeRmd man/rmd/multinom-reg.Rmd details
#'
#' @note For models created using the spark engine, there are
#' several differences to consider. First, only the formula
#' interface to via `fit()` is available; using `fit_xy()` will
#' generate an error. Second, the predictions will always be in a
#' spark table format. The names will be the same as documented but
#' without the dots. Third, there is no equivalent to factor
#' columns in spark tables so class predictions are returned as
#' character columns. Fourth, to retain the model object for a new
#' R session (via `save()`), the `model$fit` element of the `parsnip`
#' object should be serialized via `ml_save(object$fit)` and
#' separately saved to disk. In a new session, the object can be
#' reloaded and reattached to the `parsnip` object.
#'
#' @seealso [fit()]
#' @examples
#' show_engines("multinom_reg")
#'
#' multinom_reg()
#' # Parameters can be represented by a placeholder:
#' multinom_reg(penalty = varying())
#' @export
#' @importFrom purrr map_lgl
multinom_reg <-
function(mode = "classification",
penalty = NULL,
mixture = NULL) {
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
new_model_spec(
"multinom_reg",
args = args,
eng_args = NULL,
mode = mode,
method = NULL,
engine = NULL
)
}
#' @export
print.multinom_reg <- function(x, ...) {
cat("Multinomial Regression Model Specification (", x$mode, ")\n\n", sep = "")
model_printer(x, ...)
if (!is.null(x$method$fit$args)) {
cat("Model fit template:\n")
print(show_call(x))
}
invisible(x)
}
#' @export
translate.multinom_reg <- translate.linear_reg
# ------------------------------------------------------------------------------
#' @inheritParams update.boost_tree
#' @param object A multinomial regression model specification.
#' @examples
#' model <- multinom_reg(penalty = 10, mixture = 0.1)
#' model
#' update(model, penalty = 1)
#' update(model, penalty = 1, fresh = TRUE)
#' @method update multinom_reg
#' @rdname multinom_reg
#' @export
update.multinom_reg <-
function(object,
parameters = NULL,
penalty = NULL, mixture = NULL,
fresh = FALSE, ...) {
eng_args <- update_engine_parameters(object$eng_args, ...)
if (!is.null(parameters)) {
parameters <- check_final_param(parameters)
}
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
args <- update_main_parameters(args, parameters)
if (fresh) {
object$args <- args
object$eng_args <- eng_args
} else {
null_args <- map_lgl(args, null_value)
if (any(null_args))
args <- args[!null_args]
if (length(args) > 0)
object$args[names(args)] <- args
if (length(eng_args) > 0)
object$eng_args[names(eng_args)] <- eng_args
}
new_model_spec(
"multinom_reg",
args = object$args,
eng_args = object$eng_args,
mode = object$mode,
method = NULL,
engine = object$engine
)
}
# ------------------------------------------------------------------------------
check_args.multinom_reg <- function(object) {
args <- lapply(object$args, rlang::eval_tidy)
if (all(is.numeric(args$penalty)) && any(args$penalty < 0))
rlang::abort("The amount of regularization should be >= 0.")
if (is.numeric(args$mixture) && (args$mixture < 0 | args$mixture > 1))
rlang::abort("The mixture proportion should be within [0,1].")
invisible(object)
}
# ------------------------------------------------------------------------------
organize_multnet_class <- function(x, object) {
x[,1]
}
organize_multnet_prob <- function(x, object) {
x <- x[,,1]
as_tibble(x)
}
organize_nnet_prob <- function(x, object) {
format_classprobs(x)
}
# ------------------------------------------------------------------------------
# glmnet call stack for multinomial regression using `predict` when object has
# classes "_multnet" and "model_fit" (for class predictions):
#
# predict()
# predict._multnet(penalty = NULL) <-- checks and sets penalty
# predict.model_fit() <-- checks for extra vars in ...
# predict_class()
# predict_class._multnet()
# predict.multnet()
# glmnet call stack for multinomial regression using `multi_predict` when object has
# classes "_multnet" and "model_fit" (for class predictions):
#
# multi_predict()
# multi_predict._multnet(penalty = NULL)
# predict._multnet(multi = TRUE) <-- checks and sets penalty
# predict.model_fit() <-- checks for extra vars in ...
# predict_raw()
# predict_raw._multnet()
# predict_raw.model_fit(opts = list(s = penalty))
# predict.multnet()
# ------------------------------------------------------------------------------
#' @export
predict._multnet <-
function(object, new_data, type = NULL, opts = list(), penalty = NULL, multi = FALSE, ...) {
# See discussion in https://github.com/tidymodels/parsnip/issues/195
if (is.null(penalty) & !is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
}
object$spec$args$penalty <- check_penalty(penalty, object, multi)
object$spec <- eval_args(object$spec)
res <- predict.model_fit(
object = object,
new_data = new_data,
type = type,
opts = opts
)
res
}
#' @importFrom dplyr full_join as_tibble arrange
#' @importFrom tidyr gather
#' @export
#' @rdname multi_predict
multi_predict._multnet <-
function(object, new_data, type = NULL, penalty = NULL, ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
if (is_quosure(penalty))
penalty <- eval_tidy(penalty)
dots <- list(...)
if (is.null(penalty)) {
# See discussion in https://github.com/tidymodels/parsnip/issues/195
if (!is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
} else {
penalty <- object$fit$lambda
}
}
dots$s <- penalty
if (is.null(type))
type <- "class"
if (!(type %in% c("class", "prob", "link", "raw"))) {
rlang::abort("`type` should be either 'class', 'link', 'raw', or 'prob'.")
}
if (type == "prob")
dots$type <- "response"
else
dots$type <- type
object$spec <- eval_args(object$spec)
pred <- predict.model_fit(object, new_data = new_data, type = "raw", opts = dots)
format_probs <- function(x) {
x <- as_tibble(x)
names(x) <- paste0(".pred_", names(x))
nms <- names(x)
x$.row <- 1:nrow(x)
x[, c(".row", nms)]
}
if (type == "prob") {
pred <- apply(pred, 3, format_probs)
names(pred) <- NULL
pred <- map_dfr(pred, function(x) x)
pred$penalty <- rep(penalty, each = nrow(new_data))
} else {
pred <-
tibble(
.row = rep(1:nrow(new_data), length(penalty)),
.pred_class = factor(as.vector(pred), levels = object$lvl),
penalty = rep(penalty, each = nrow(new_data))
)
}
pred <- arrange(pred, .row, penalty)
.row <- pred$.row
pred$.row <- NULL
pred <- split(pred, .row)
names(pred) <- NULL
tibble(.pred = pred)
}
#' @export
predict_class._multnet <- function(object, new_data, ...) {
object$spec <- eval_args(object$spec)
predict_class.model_fit(object, new_data = new_data, ...)
}
#' @export
predict_classprob._multnet <- function(object, new_data, ...) {
object$spec <- eval_args(object$spec)
predict_classprob.model_fit(object, new_data = new_data, ...)
}
#' @export
predict_raw._multnet <- function(object, new_data, opts = list(), ...) {
object$spec <- eval_args(object$spec)
predict_raw.model_fit(object, new_data = new_data, opts = opts, ...)
}
# ------------------------------------------------------------------------------
# This checks as a pre-processor in the model data object
check_glmnet_lambda <- function(dat, object) {
if (length(object$fit$lambda) > 1)
rlang::abort(
glue::glue(
"`predict()` doesn't work with multiple penalties (i.e. lambdas). ",
"Please specify a single value using `penalty = some_value` or use ",
"`multi_predict()` to get multiple predictions per row of data."
)
)
dat
}
|
/R/multinom_reg.R
|
no_license
|
kwiscion/parsnip
|
R
| false | false | 10,469 |
r
|
#' General Interface for Multinomial Regression Models
#'
#' `multinom_reg()` is a way to generate a _specification_ of a model
#' before fitting and allows the model to be created using
#' different packages in R, keras, or Spark. The main arguments for the
#' model are:
#' \itemize{
#' \item \code{penalty}: The total amount of regularization
#' in the model. Note that this must be zero for some engines.
#' \item \code{mixture}: The mixture amounts of different types of
#' regularization (see below). Note that this will be ignored for some engines.
#' }
#' These arguments are converted to their specific names at the
#' time that the model is fit. Other options and arguments can be
#' set using `set_engine()`. If left to their defaults
#' here (`NULL`), the values are taken from the underlying model
#' functions. If parameters need to be modified, `update()` can be used
#' in lieu of recreating the object from scratch.
#' @inheritParams boost_tree
#' @param mode A single character string for the type of model.
#' The only possible value for this model is "classification".
#' @param penalty A non-negative number representing the total
#' amount of regularization (`glmnet`, `keras`, and `spark` only).
#' For `keras` models, this corresponds to purely L2 regularization
#' (aka weight decay) while the other models can be a combination
#' of L1 and L2 (depending on the value of `mixture`).
#' @param mixture A number between zero and one (inclusive) that is the
#' proportion of L1 regularization (i.e. lasso) in the model. When
#' `mixture = 1`, it is a pure lasso model while `mixture = 0` indicates that
#' ridge regression is being used. (`glmnet` and `spark` only).
#' @details
#' For `multinom_reg()`, the mode will always be "classification".
#'
#' The model can be created using the `fit()` function using the
#' following _engines_:
#' \itemize{
#' \item \pkg{R}: `"glmnet"` (the default), `"nnet"`
#' \item \pkg{Stan}: `"stan"`
#' \item \pkg{keras}: `"keras"`
#' }
#'
#' @includeRmd man/rmd/multinom-reg.Rmd details
#'
#' @note For models created using the spark engine, there are
#' several differences to consider. First, only the formula
#' interface to via `fit()` is available; using `fit_xy()` will
#' generate an error. Second, the predictions will always be in a
#' spark table format. The names will be the same as documented but
#' without the dots. Third, there is no equivalent to factor
#' columns in spark tables so class predictions are returned as
#' character columns. Fourth, to retain the model object for a new
#' R session (via `save()`), the `model$fit` element of the `parsnip`
#' object should be serialized via `ml_save(object$fit)` and
#' separately saved to disk. In a new session, the object can be
#' reloaded and reattached to the `parsnip` object.
#'
#' @seealso [fit()]
#' @examples
#' show_engines("multinom_reg")
#'
#' multinom_reg()
#' # Parameters can be represented by a placeholder:
#' multinom_reg(penalty = varying())
#' @export
#' @importFrom purrr map_lgl
multinom_reg <-
function(mode = "classification",
penalty = NULL,
mixture = NULL) {
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
new_model_spec(
"multinom_reg",
args = args,
eng_args = NULL,
mode = mode,
method = NULL,
engine = NULL
)
}
#' @export
print.multinom_reg <- function(x, ...) {
cat("Multinomial Regression Model Specification (", x$mode, ")\n\n", sep = "")
model_printer(x, ...)
if (!is.null(x$method$fit$args)) {
cat("Model fit template:\n")
print(show_call(x))
}
invisible(x)
}
#' @export
translate.multinom_reg <- translate.linear_reg
# ------------------------------------------------------------------------------
#' @inheritParams update.boost_tree
#' @param object A multinomial regression model specification.
#' @examples
#' model <- multinom_reg(penalty = 10, mixture = 0.1)
#' model
#' update(model, penalty = 1)
#' update(model, penalty = 1, fresh = TRUE)
#' @method update multinom_reg
#' @rdname multinom_reg
#' @export
update.multinom_reg <-
function(object,
parameters = NULL,
penalty = NULL, mixture = NULL,
fresh = FALSE, ...) {
eng_args <- update_engine_parameters(object$eng_args, ...)
if (!is.null(parameters)) {
parameters <- check_final_param(parameters)
}
args <- list(
penalty = enquo(penalty),
mixture = enquo(mixture)
)
args <- update_main_parameters(args, parameters)
if (fresh) {
object$args <- args
object$eng_args <- eng_args
} else {
null_args <- map_lgl(args, null_value)
if (any(null_args))
args <- args[!null_args]
if (length(args) > 0)
object$args[names(args)] <- args
if (length(eng_args) > 0)
object$eng_args[names(eng_args)] <- eng_args
}
new_model_spec(
"multinom_reg",
args = object$args,
eng_args = object$eng_args,
mode = object$mode,
method = NULL,
engine = object$engine
)
}
# ------------------------------------------------------------------------------
check_args.multinom_reg <- function(object) {
args <- lapply(object$args, rlang::eval_tidy)
if (all(is.numeric(args$penalty)) && any(args$penalty < 0))
rlang::abort("The amount of regularization should be >= 0.")
if (is.numeric(args$mixture) && (args$mixture < 0 | args$mixture > 1))
rlang::abort("The mixture proportion should be within [0,1].")
invisible(object)
}
# ------------------------------------------------------------------------------
organize_multnet_class <- function(x, object) {
x[,1]
}
organize_multnet_prob <- function(x, object) {
x <- x[,,1]
as_tibble(x)
}
organize_nnet_prob <- function(x, object) {
format_classprobs(x)
}
# ------------------------------------------------------------------------------
# glmnet call stack for multinomial regression using `predict` when object has
# classes "_multnet" and "model_fit" (for class predictions):
#
# predict()
# predict._multnet(penalty = NULL) <-- checks and sets penalty
# predict.model_fit() <-- checks for extra vars in ...
# predict_class()
# predict_class._multnet()
# predict.multnet()
# glmnet call stack for multinomial regression using `multi_predict` when object has
# classes "_multnet" and "model_fit" (for class predictions):
#
# multi_predict()
# multi_predict._multnet(penalty = NULL)
# predict._multnet(multi = TRUE) <-- checks and sets penalty
# predict.model_fit() <-- checks for extra vars in ...
# predict_raw()
# predict_raw._multnet()
# predict_raw.model_fit(opts = list(s = penalty))
# predict.multnet()
# ------------------------------------------------------------------------------
#' @export
predict._multnet <-
function(object, new_data, type = NULL, opts = list(), penalty = NULL, multi = FALSE, ...) {
# See discussion in https://github.com/tidymodels/parsnip/issues/195
if (is.null(penalty) & !is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
}
object$spec$args$penalty <- check_penalty(penalty, object, multi)
object$spec <- eval_args(object$spec)
res <- predict.model_fit(
object = object,
new_data = new_data,
type = type,
opts = opts
)
res
}
#' @importFrom dplyr full_join as_tibble arrange
#' @importFrom tidyr gather
#' @export
#' @rdname multi_predict
multi_predict._multnet <-
function(object, new_data, type = NULL, penalty = NULL, ...) {
if (any(names(enquos(...)) == "newdata"))
rlang::abort("Did you mean to use `new_data` instead of `newdata`?")
if (is_quosure(penalty))
penalty <- eval_tidy(penalty)
dots <- list(...)
if (is.null(penalty)) {
# See discussion in https://github.com/tidymodels/parsnip/issues/195
if (!is.null(object$spec$args$penalty)) {
penalty <- object$spec$args$penalty
} else {
penalty <- object$fit$lambda
}
}
dots$s <- penalty
if (is.null(type))
type <- "class"
if (!(type %in% c("class", "prob", "link", "raw"))) {
rlang::abort("`type` should be either 'class', 'link', 'raw', or 'prob'.")
}
if (type == "prob")
dots$type <- "response"
else
dots$type <- type
object$spec <- eval_args(object$spec)
pred <- predict.model_fit(object, new_data = new_data, type = "raw", opts = dots)
format_probs <- function(x) {
x <- as_tibble(x)
names(x) <- paste0(".pred_", names(x))
nms <- names(x)
x$.row <- 1:nrow(x)
x[, c(".row", nms)]
}
if (type == "prob") {
pred <- apply(pred, 3, format_probs)
names(pred) <- NULL
pred <- map_dfr(pred, function(x) x)
pred$penalty <- rep(penalty, each = nrow(new_data))
} else {
pred <-
tibble(
.row = rep(1:nrow(new_data), length(penalty)),
.pred_class = factor(as.vector(pred), levels = object$lvl),
penalty = rep(penalty, each = nrow(new_data))
)
}
pred <- arrange(pred, .row, penalty)
.row <- pred$.row
pred$.row <- NULL
pred <- split(pred, .row)
names(pred) <- NULL
tibble(.pred = pred)
}
#' @export
predict_class._multnet <- function(object, new_data, ...) {
object$spec <- eval_args(object$spec)
predict_class.model_fit(object, new_data = new_data, ...)
}
#' @export
predict_classprob._multnet <- function(object, new_data, ...) {
object$spec <- eval_args(object$spec)
predict_classprob.model_fit(object, new_data = new_data, ...)
}
#' @export
predict_raw._multnet <- function(object, new_data, opts = list(), ...) {
object$spec <- eval_args(object$spec)
predict_raw.model_fit(object, new_data = new_data, opts = opts, ...)
}
# ------------------------------------------------------------------------------
# This checks as a pre-processor in the model data object
check_glmnet_lambda <- function(dat, object) {
if (length(object$fit$lambda) > 1)
rlang::abort(
glue::glue(
"`predict()` doesn't work with multiple penalties (i.e. lambdas). ",
"Please specify a single value using `penalty = some_value` or use ",
"`multi_predict()` to get multiple predictions per row of data."
)
)
dat
}
|
#Download Data Files:
#spamdata.csv:
#spamnames.csv:
#Load the two files into R:
spamdata<- read.csv("spamdata.csv",header=FALSE,sep=";")
spamnames<- read.csv("spamnames.csv",header=FALSE,sep=";")
#Set the names of the dataset dataframe:
names(spamdata) <- sapply((1:nrow(spamnames)),function(i) toString(spamnames[i,1]))
#make column y a factor variable for binary classification (spam or non-spam)
spamdata$y <- factor(spamdata$y)
#get a sample of 1000 rows
sample <- spamdata[sample(nrow(spamdata), 1000),]
#Set up the packages:
install.packages("caret", dependencies = c("Depends", "Suggests"))
require(caret)
install.packages("kernlab", dependencies = c("Depends", "Suggests"))
require(kernlab)
install.packages("doMC", dependencies = c("Depends", "Suggests"))
require(doParallel)
#Split the data in trainData and testData
trainIndex <- createDataPartition(sample$y, p = .8, list = FALSE, times = 1)
trainData <- sample[ trainIndex,]
testData <- sample[-trainIndex,]
#set up multicore environment
registerDoParallel(cores=5)
#Create the SVM model:
### finding optimal value of a tuning parameter
sigDist <- sigest(y ~ ., data = trainData, frac = 1)
### creating a grid of two tuning parameters, .sigma comes from the earlier line. we are trying to find best value of .C
svmTuneGrid <- data.frame(.sigma = sigDist[1], .C = 2^(-2:7))
x <- train(y ~ .,
data = trainData,
method = "",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "repeatedcv", repeats = 5, classProbs = FALSE))
#Evaluate the model
predict_spam <- predict(x,testData[,1:57])
acc <- confusionMatrix(predict_spam, testData$y)
write.csv(predict_spam, file = "Result.csv")
|
/Spam.R
|
no_license
|
nil68657/SPAM-Filtering-using-R
|
R
| false | false | 1,830 |
r
|
#Download Data Files:
#spamdata.csv:
#spamnames.csv:
#Load the two files into R:
spamdata<- read.csv("spamdata.csv",header=FALSE,sep=";")
spamnames<- read.csv("spamnames.csv",header=FALSE,sep=";")
#Set the names of the dataset dataframe:
names(spamdata) <- sapply((1:nrow(spamnames)),function(i) toString(spamnames[i,1]))
#make column y a factor variable for binary classification (spam or non-spam)
spamdata$y <- factor(spamdata$y)
#get a sample of 1000 rows
sample <- spamdata[sample(nrow(spamdata), 1000),]
#Set up the packages:
install.packages("caret", dependencies = c("Depends", "Suggests"))
require(caret)
install.packages("kernlab", dependencies = c("Depends", "Suggests"))
require(kernlab)
install.packages("doMC", dependencies = c("Depends", "Suggests"))
require(doParallel)
#Split the data in trainData and testData
trainIndex <- createDataPartition(sample$y, p = .8, list = FALSE, times = 1)
trainData <- sample[ trainIndex,]
testData <- sample[-trainIndex,]
#set up multicore environment
registerDoParallel(cores=5)
#Create the SVM model:
### finding optimal value of a tuning parameter
sigDist <- sigest(y ~ ., data = trainData, frac = 1)
### creating a grid of two tuning parameters, .sigma comes from the earlier line. we are trying to find best value of .C
svmTuneGrid <- data.frame(.sigma = sigDist[1], .C = 2^(-2:7))
x <- train(y ~ .,
data = trainData,
method = "",
preProc = c("center", "scale"),
tuneGrid = svmTuneGrid,
trControl = trainControl(method = "repeatedcv", repeats = 5, classProbs = FALSE))
#Evaluate the model
predict_spam <- predict(x,testData[,1:57])
acc <- confusionMatrix(predict_spam, testData$y)
write.csv(predict_spam, file = "Result.csv")
|
library(plsdepot)
### Name: carsmissing
### Title: carsmissing data set
### Aliases: carsmissing
### Keywords: datasets
### ** Examples
data(carsmissing)
head(carsmissing)
|
/data/genthat_extracted_code/plsdepot/examples/carsmissing.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 179 |
r
|
library(plsdepot)
### Name: carsmissing
### Title: carsmissing data set
### Aliases: carsmissing
### Keywords: datasets
### ** Examples
data(carsmissing)
head(carsmissing)
|
library(tidyr)
library(ez)
library(ggplot2)
library(dplyr)
rm(list=ls())
#The eventual structure that the data will take after being run through the code
formatedData <- data.frame("SID" = double(),"Normal" = double(), "GCV" = double(), "imageType" = character(),"Group" = character())
#CHANGE REQUIRED BY CURRENT USER
#The number of radiologist subjects to analyze. Add additional subject numbers as data is collected
nsubjects <- c(102,103,104,105,106,107,108,109)
#Do analysis based on these conditions
targetPresence <- c(0,1)
imagetypes <- c("Radiograph","Perspective")
dependants <- c("clickResponse","CURRENT_SAC_AMPLITUDE","trialRT","IA_FIRST_FIXATION_TIME","dprime","DesTime")
#
#
#
#Saccadic Amplitude - Creates graphs comparing the saccadic amplitude of each population to each other
#
#
#
for (pres in targetPresence){
for (s in nsubjects){
for(imageset in imagetypes){
#import data and combine behavioral data into a single data frame
#CHANGE REQUIRED BY CURRENT USER
#Edit this to where your file is stored
setwd("C:/Users/Taren/Desktop/RadData")
rawBehav1 <- read.table(paste(s,"_naive_v1.txt", sep = ""), header = TRUE)
rawBehav2 <- read.table(paste(s,"_naive_v2.txt", sep = ""), header = TRUE)
rawSacAmp <- read.table("sacamplituderadsthru8.txt", header = TRUE)
rawBehav <- rbind(rawBehav1,rawBehav2)
#convert to double. Will result in some warnings of NAs being introduced. That's okay as long as there is only a few.
rawSacAmp$CURRENT_SAC_AMPLITUDE <- as.double(rawSacAmp$CURRENT_SAC_AMPLITUDE)
#remove NA
rawSacAmp <- rawSacAmp[!is.na(rawSacAmp$CURRENT_SAC_AMPLITUDE),]
#filter out practice trials
rawSacAmp <- filter(rawSacAmp, practice != 1)
rawBehav <- filter(rawBehav, practice != 1)
#filter to only look at one subject at a time
rawSacAmp <- filter(rawSacAmp, snumber == s)
#filter target presence
rawSacAmp <- filter(rawSacAmp, targetPresent== pres)
#filter image type to look at art and chest images separately
if (imageset == "Radiograph"){rawSacAmp <- filter(rawSacAmp, imageType == "chest")}
if (imageset == "Perspective"){rawSacAmp <- filter(rawSacAmp, imageType == "art")}
#adds each new row to a formatted data frame
toAdd <- data.frame("SID" = s,
"Normal" = mean(rawSacAmp$CURRENT_SAC_AMPLITUDE[rawSacAmp$viewType == "Normal"]),
"GCV" = mean(rawSacAmp$CURRENT_SAC_AMPLITUDE[rawSacAmp$viewType == "gazeContingent"]),
"imageType"= imageset,
"Group" = "Radiologist")
#add new row to the accumulating data frame
formatedData <- rbind(formatedData, toAdd)
#resets the adding row after each row is added
toAdd <- NULL
}
}
#
#TODO
#eventually do ANOVA here in both if statements and output ANOVA results
#
if (pres == 1){
#load in past target PRESENT data from naive an architects
artdata <-read.csv("C:/Users/Taren/Desktop/pvalueArt/SacAmpPres.csv")
chestdata <-read.csv("C:/Users/Taren/Desktop/pvalueRad/SacAmpPres.csv")
outputname <- "(Target Present)"
}
if (pres == 0){
#load in target ABSENT data
artdata <-read.csv("C:/Users/Taren/Desktop/pvalueArt/SacAmpAbs.csv")
chestdata <-read.csv("C:/Users/Taren/Desktop/pvalueRad/SacAmpAbs.csv")
outputname <- "(Target Absent)"
}
#format architect and naive data to work with the new radiologist data
artdata$imageType <- "Perspective"
chestdata$imageType <- "Radiograph"
artdata$X <- NULL
chestdata$X <- NULL
#add the radiologist data to the architect and naive data
formatedData <- rbind(formatedData, artdata)
formatedData <- rbind(formatedData, chestdata)
targetPresent <- formatedData
#convert to long format
long <- pivot_longer(targetPresent, c("Normal","GCV"), names_to = "viewType")
long$condition <- paste(long$imageType , long$viewType)
long$valueTypes <- paste(long$condition, long$Group)
#calculate means and SE
graph <- aggregate(long$value ~ long$valueTypes+long$Group+long$imageType+long$viewType, FUN= mean)
error <- aggregate(long$value ~ long$valueTypes+long$Group+long$imageType+long$viewType, FUN= sd)
colnames(error) <- c("condition","Group","ImageType","ViewType","sd")
error$sd <- error$sd / sqrt(length(nsubjects))
colnames(graph) <- c("condition","Group","ImageType","ViewType","value")
graph$sterror <- error$sd
#create new variables for the purpose of graphing
graph$axis <- paste(graph$ImageType, graph$ViewType)
graph$line <- paste(graph$Group, graph$ImageType)
#create and save graph
#WARNING every time you run this code the graphs will be overwritten without it asking you
setwd("C:/Users/Taren/Desktop/Output")
filename <- paste("Avg. Saccadic Amplitude", outputname, ".pdf", sep = "")
p <- ggplot(data = graph, aes(y = value, x =axis)) +
geom_line(aes( group = line, color = Group)) + geom_point(size = 3, aes(color = Group)) +
geom_errorbar(aes(ymin = (value - sterror), ymax = (value + sterror), width = .1, color = Group)) +
ylab("Visual Angle (deg.)") +
xlab("Image Type and Viewing Condition") +
ggtitle(paste("Avg. Saccadic Amplitude", outputname))
pdf(filename)
print(p)
dev.off()
#Reset formatedData for next loop iteration
formatedData <- NULL
}
|
/ExpertiseFormater.R
|
no_license
|
TarenRohovit/ArchitectExpertise
|
R
| false | false | 5,692 |
r
|
library(tidyr)
library(ez)
library(ggplot2)
library(dplyr)
rm(list=ls())
#The eventual structure that the data will take after being run through the code
formatedData <- data.frame("SID" = double(),"Normal" = double(), "GCV" = double(), "imageType" = character(),"Group" = character())
#CHANGE REQUIRED BY CURRENT USER
#The number of radiologist subjects to analyze. Add additional subject numbers as data is collected
nsubjects <- c(102,103,104,105,106,107,108,109)
#Do analysis based on these conditions
targetPresence <- c(0,1)
imagetypes <- c("Radiograph","Perspective")
dependants <- c("clickResponse","CURRENT_SAC_AMPLITUDE","trialRT","IA_FIRST_FIXATION_TIME","dprime","DesTime")
#
#
#
#Saccadic Amplitude - Creates graphs comparing the saccadic amplitude of each population to each other
#
#
#
for (pres in targetPresence){
for (s in nsubjects){
for(imageset in imagetypes){
#import data and combine behavioral data into a single data frame
#CHANGE REQUIRED BY CURRENT USER
#Edit this to where your file is stored
setwd("C:/Users/Taren/Desktop/RadData")
rawBehav1 <- read.table(paste(s,"_naive_v1.txt", sep = ""), header = TRUE)
rawBehav2 <- read.table(paste(s,"_naive_v2.txt", sep = ""), header = TRUE)
rawSacAmp <- read.table("sacamplituderadsthru8.txt", header = TRUE)
rawBehav <- rbind(rawBehav1,rawBehav2)
#convert to double. Will result in some warnings of NAs being introduced. That's okay as long as there is only a few.
rawSacAmp$CURRENT_SAC_AMPLITUDE <- as.double(rawSacAmp$CURRENT_SAC_AMPLITUDE)
#remove NA
rawSacAmp <- rawSacAmp[!is.na(rawSacAmp$CURRENT_SAC_AMPLITUDE),]
#filter out practice trials
rawSacAmp <- filter(rawSacAmp, practice != 1)
rawBehav <- filter(rawBehav, practice != 1)
#filter to only look at one subject at a time
rawSacAmp <- filter(rawSacAmp, snumber == s)
#filter target presence
rawSacAmp <- filter(rawSacAmp, targetPresent== pres)
#filter image type to look at art and chest images separately
if (imageset == "Radiograph"){rawSacAmp <- filter(rawSacAmp, imageType == "chest")}
if (imageset == "Perspective"){rawSacAmp <- filter(rawSacAmp, imageType == "art")}
#adds each new row to a formatted data frame
toAdd <- data.frame("SID" = s,
"Normal" = mean(rawSacAmp$CURRENT_SAC_AMPLITUDE[rawSacAmp$viewType == "Normal"]),
"GCV" = mean(rawSacAmp$CURRENT_SAC_AMPLITUDE[rawSacAmp$viewType == "gazeContingent"]),
"imageType"= imageset,
"Group" = "Radiologist")
#add new row to the accumulating data frame
formatedData <- rbind(formatedData, toAdd)
#resets the adding row after each row is added
toAdd <- NULL
}
}
#
#TODO
#eventually do ANOVA here in both if statements and output ANOVA results
#
if (pres == 1){
#load in past target PRESENT data from naive an architects
artdata <-read.csv("C:/Users/Taren/Desktop/pvalueArt/SacAmpPres.csv")
chestdata <-read.csv("C:/Users/Taren/Desktop/pvalueRad/SacAmpPres.csv")
outputname <- "(Target Present)"
}
if (pres == 0){
#load in target ABSENT data
artdata <-read.csv("C:/Users/Taren/Desktop/pvalueArt/SacAmpAbs.csv")
chestdata <-read.csv("C:/Users/Taren/Desktop/pvalueRad/SacAmpAbs.csv")
outputname <- "(Target Absent)"
}
#format architect and naive data to work with the new radiologist data
artdata$imageType <- "Perspective"
chestdata$imageType <- "Radiograph"
artdata$X <- NULL
chestdata$X <- NULL
#add the radiologist data to the architect and naive data
formatedData <- rbind(formatedData, artdata)
formatedData <- rbind(formatedData, chestdata)
targetPresent <- formatedData
#convert to long format
long <- pivot_longer(targetPresent, c("Normal","GCV"), names_to = "viewType")
long$condition <- paste(long$imageType , long$viewType)
long$valueTypes <- paste(long$condition, long$Group)
#calculate means and SE
graph <- aggregate(long$value ~ long$valueTypes+long$Group+long$imageType+long$viewType, FUN= mean)
error <- aggregate(long$value ~ long$valueTypes+long$Group+long$imageType+long$viewType, FUN= sd)
colnames(error) <- c("condition","Group","ImageType","ViewType","sd")
error$sd <- error$sd / sqrt(length(nsubjects))
colnames(graph) <- c("condition","Group","ImageType","ViewType","value")
graph$sterror <- error$sd
#create new variables for the purpose of graphing
graph$axis <- paste(graph$ImageType, graph$ViewType)
graph$line <- paste(graph$Group, graph$ImageType)
#create and save graph
#WARNING every time you run this code the graphs will be overwritten without it asking you
setwd("C:/Users/Taren/Desktop/Output")
filename <- paste("Avg. Saccadic Amplitude", outputname, ".pdf", sep = "")
p <- ggplot(data = graph, aes(y = value, x =axis)) +
geom_line(aes( group = line, color = Group)) + geom_point(size = 3, aes(color = Group)) +
geom_errorbar(aes(ymin = (value - sterror), ymax = (value + sterror), width = .1, color = Group)) +
ylab("Visual Angle (deg.)") +
xlab("Image Type and Viewing Condition") +
ggtitle(paste("Avg. Saccadic Amplitude", outputname))
pdf(filename)
print(p)
dev.off()
#Reset formatedData for next loop iteration
formatedData <- NULL
}
|
posterior_var = function(prior_var, likelihood_var) {
prior_var * likelihood_var / (prior_var + likelihood_var)
}
posterior_var_2 = function(prior_var, likelihood_var) {
1/ (1/prior_var + 1/likelihood_var)
}
posterior_var(0.5,1)
posterior_var_2(0.5,1)
posterior_var(0.5,10)
|
/Normal_posterior_exploration_2.R
|
no_license
|
christopher-gillies/BayesianDataAnalysis
|
R
| false | false | 283 |
r
|
posterior_var = function(prior_var, likelihood_var) {
prior_var * likelihood_var / (prior_var + likelihood_var)
}
posterior_var_2 = function(prior_var, likelihood_var) {
1/ (1/prior_var + 1/likelihood_var)
}
posterior_var(0.5,1)
posterior_var_2(0.5,1)
posterior_var(0.5,10)
|
## archivist package for R
##
#' @title Split tag column in database into two separate columns: tagKey and tagValue
#'
#' @description
#' \code{splitTagsLocal} and \code{splitTagsGithub} functions split \code{tag} column from
#' \emph{tag} table placed in \code{backpack.db} into two separate columns:
#' \code{tagKey} and \code{tagValue}.
#'
#' @details
#' \code{tag} column from \emph{tag} table has normally the follwing structure:
#' \code{TagKey:TagValue}. \code{splitTagsLocal} and \code{splitTagsGithub} functions
#' can be used to split \code{tag} column into two separate columns:
#' \code{tagKey} and \code{tagValue}. As a result functions from \code{dplyr} package
#' can be used to easily summarize, search, and extract artifacts' Tags.
#' See \code{examples}.
#'
#' @param repoDir While working with the local repository. A character denoting
#' an existing directory of the Repository. If it is set to \code{NULL} (by default),
#' it will use the \code{repoDir} specified in \link{setLocalRepo}.
#'
#' @param repo While working with the Github repository. A character containing
#' a name of the Github repository on which the Repository is stored.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param user While working with the Github repository. A character containing
#' a name of the Github user on whose account the \code{repo} is created.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param branch While working with the Github repository. A character containing
#' a name of the Github Repository's branch on which the Repository is stored.
#' Default \code{branch} is \code{master}.
#'
#' @param repoDirGit While working with the Github repository. A character containing
#' a name of a directory on the Github repository on which the Repository is stored.
#' If the Repository is stored in the main folder of the Github repository,
#' this should be set to \code{repoDirGit = FALSE} as default.
#'
#' @return
#' A \code{data.frame} with 4 columns: \code{artifact}, \code{tagKey},
#' \code{tagValue} and \code{createdDate}.
#'
#' @note
#' If \code{repo} and \code{user} are set to \code{NULL} (as default) in the Github mode
#' then global parameters set in \link{setGithubRepo} function are used.
#'
#' Sometimes we can use \code{addTags*} function or \code{userTags} parameter
#' in \code{saveToRepo} to specify a \code{Tag} which might not match
#' \code{TagKey:TagValue} structure. It is simply \code{Tag}. In this case
#' \code{tagKey = userTags} and \code{tagValue = Tag}. See \code{examples}.
#'
#' To learn more about \code{Tags} and \code{Repository} structure check
#' \link{Tags} and \link{Repository}.
#' @author
#' Witold Chodor , \email{witoldchodor@@gmail.com}
#'
#' @examples
#' \dontrun{
#' ## LOCAL VERSION
#'
#' # Creating example default repository
#' exampleRepoDir <- tempfile()
#' createEmptyRepo( exampleRepoDir, default = TRUE )
#'
#' # Adding new artifacts to repository
#' data(iris)
#' saveToRepo(iris, repoDir = exampleRepoDir )
#' library(datasets)
#' data(iris3)
#' saveToRepo(iris3)
#' data(longley)
#' saveToRepo(longley)
#'
#' # Let's see the difference in tag table in backpack.db
#' showLocalRepo( method = "tags" ) # a data frame with 3 columns
#' splitTagsLocal() # a data frame with 4 columns
#'
#' # Now we can sum up what kind of Tags we have in our repository.
#' library(dplyr)
#' splitTagsLocal() %>%
#' group_by(tagKey) %>%
#' summarise(count = n())
#'
#' # Deleting existing repository
#' deleteRepo(exampleRepoDir, deleteRoot = TRUE)
#' rm(exampleRepoDir)
#'
#' ## Example with Tag that does not match TagKey:TagValue structure
#'
#' # Creating example default repository
#' exampleRepoDir <- tempfile()
#' createEmptyRepo( exampleRepoDir, default = TRUE )
#' data(iris)
#' # adding special Tag "lengthOne" to iris artifact and saving to repository
#' saveToRepo(iris, repoDir = exampleRepoDir,
#' userTags = "lengthOne")
#'
#' # Let's see the difference in tag table in backpack.db
#' showLocalRepo(method = "tags")
#' splitTagsLocal()
#' # We can see that splitTagsLocal added tagKey = userTags to "lengthOne" Tag.
#'
#' # Deleting existing repository
#' deleteRepo(exampleRepoDir, deleteRoot = TRUE)
#' rm(exampleRepoDir)
#'
#' ## Github Version
#' # Let's check how does table tag look like while we are using the
#' # Gitub repository.
#' # We will choose only special columns of data frames that show Tags
#' showGithubRepo( user = "pbiecek", repo = "archivist", method = "tags" )[,2]
#' splitTagsGithub( user = "pbiecek", repo = "archivist" )[,2:3]
#'
#' }
#' @family archivist
#' @rdname splitTags
#' @export
splitTagsLocal <- function( repoDir = NULL ){
splitTags( repoDir = repoDir )
}
#' @rdname splitTags
#' @export
splitTagsGithub <- function( repo = NULL, user = NULL, branch = "master",
repoDirGit = FALSE ){
splitTags( repo = repo, user = user, branch = branch, repoDirGit = repoDirGit,
local = FALSE )
}
splitTags <- function( repoDir = NULL, repo = NULL, user = NULL,
branch = "master", repoDirGit = FALSE,
local = TRUE ){
# We will expand tag table in backpack.db
if (local) {
showLocalRepo( repoDir = repoDir, method = "tags" ) -> tags_df
} else {
showGithubRepo( repo = repo, user = user, branch = branch,
repoDirGit = repoDirGit,
method = "tags" ) -> tags_df
}
if (nrow(tags_df) == 0 & local) {
stop("There were no Tags for this Repository. Try showLocalRepo(method='tags') to ensure there are any Tags.")
}
if (nrow(tags_df) == 0 & !local) {
stop("There were no Tags for this Repository. Try showGithubRepo(method='tags') to ensure there are any Tags.")
}
# We will split tag column into tagKey and tagValue columns
strsplit(tags_df$tag, ":") %>%
lapply( function(element){
if (length(element) > 2) {
# in case of Tags with TagKey = date
element[2] <- paste0(element[-1], collapse = ":")
element <- element[1:2]
} else if (length(element) == 1){
# when a user gives Tag which does not match "TagKey:TagValue" structure
element <- c("userTags", element)
} else if (length(element) == 0){
# when a user gives Tag which is a character of length 0 :)
element <- c("userTags", "")
}
element
}) %>%
simplify2array %>%
t %>%
cbind(tags_df) -> tags_df
tags_df <- tags_df[, c(3,1,2,5)]
names(tags_df)[2:3] <- c("tagKey", "tagValue")
tags_df
}
|
/R/splitTags.R
|
no_license
|
gitter-badger/archivist
|
R
| false | false | 6,640 |
r
|
## archivist package for R
##
#' @title Split tag column in database into two separate columns: tagKey and tagValue
#'
#' @description
#' \code{splitTagsLocal} and \code{splitTagsGithub} functions split \code{tag} column from
#' \emph{tag} table placed in \code{backpack.db} into two separate columns:
#' \code{tagKey} and \code{tagValue}.
#'
#' @details
#' \code{tag} column from \emph{tag} table has normally the follwing structure:
#' \code{TagKey:TagValue}. \code{splitTagsLocal} and \code{splitTagsGithub} functions
#' can be used to split \code{tag} column into two separate columns:
#' \code{tagKey} and \code{tagValue}. As a result functions from \code{dplyr} package
#' can be used to easily summarize, search, and extract artifacts' Tags.
#' See \code{examples}.
#'
#' @param repoDir While working with the local repository. A character denoting
#' an existing directory of the Repository. If it is set to \code{NULL} (by default),
#' it will use the \code{repoDir} specified in \link{setLocalRepo}.
#'
#' @param repo While working with the Github repository. A character containing
#' a name of the Github repository on which the Repository is stored.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param user While working with the Github repository. A character containing
#' a name of the Github user on whose account the \code{repo} is created.
#' By default set to \code{NULL} - see \code{Note}.
#'
#' @param branch While working with the Github repository. A character containing
#' a name of the Github Repository's branch on which the Repository is stored.
#' Default \code{branch} is \code{master}.
#'
#' @param repoDirGit While working with the Github repository. A character containing
#' a name of a directory on the Github repository on which the Repository is stored.
#' If the Repository is stored in the main folder of the Github repository,
#' this should be set to \code{repoDirGit = FALSE} as default.
#'
#' @return
#' A \code{data.frame} with 4 columns: \code{artifact}, \code{tagKey},
#' \code{tagValue} and \code{createdDate}.
#'
#' @note
#' If \code{repo} and \code{user} are set to \code{NULL} (as default) in the Github mode
#' then global parameters set in \link{setGithubRepo} function are used.
#'
#' Sometimes we can use \code{addTags*} function or \code{userTags} parameter
#' in \code{saveToRepo} to specify a \code{Tag} which might not match
#' \code{TagKey:TagValue} structure. It is simply \code{Tag}. In this case
#' \code{tagKey = userTags} and \code{tagValue = Tag}. See \code{examples}.
#'
#' To learn more about \code{Tags} and \code{Repository} structure check
#' \link{Tags} and \link{Repository}.
#' @author
#' Witold Chodor , \email{witoldchodor@@gmail.com}
#'
#' @examples
#' \dontrun{
#' ## LOCAL VERSION
#'
#' # Creating example default repository
#' exampleRepoDir <- tempfile()
#' createEmptyRepo( exampleRepoDir, default = TRUE )
#'
#' # Adding new artifacts to repository
#' data(iris)
#' saveToRepo(iris, repoDir = exampleRepoDir )
#' library(datasets)
#' data(iris3)
#' saveToRepo(iris3)
#' data(longley)
#' saveToRepo(longley)
#'
#' # Let's see the difference in tag table in backpack.db
#' showLocalRepo( method = "tags" ) # a data frame with 3 columns
#' splitTagsLocal() # a data frame with 4 columns
#'
#' # Now we can sum up what kind of Tags we have in our repository.
#' library(dplyr)
#' splitTagsLocal() %>%
#' group_by(tagKey) %>%
#' summarise(count = n())
#'
#' # Deleting existing repository
#' deleteRepo(exampleRepoDir, deleteRoot = TRUE)
#' rm(exampleRepoDir)
#'
#' ## Example with Tag that does not match TagKey:TagValue structure
#'
#' # Creating example default repository
#' exampleRepoDir <- tempfile()
#' createEmptyRepo( exampleRepoDir, default = TRUE )
#' data(iris)
#' # adding special Tag "lengthOne" to iris artifact and saving to repository
#' saveToRepo(iris, repoDir = exampleRepoDir,
#' userTags = "lengthOne")
#'
#' # Let's see the difference in tag table in backpack.db
#' showLocalRepo(method = "tags")
#' splitTagsLocal()
#' # We can see that splitTagsLocal added tagKey = userTags to "lengthOne" Tag.
#'
#' # Deleting existing repository
#' deleteRepo(exampleRepoDir, deleteRoot = TRUE)
#' rm(exampleRepoDir)
#'
#' ## Github Version
#' # Let's check how does table tag look like while we are using the
#' # Gitub repository.
#' # We will choose only special columns of data frames that show Tags
#' showGithubRepo( user = "pbiecek", repo = "archivist", method = "tags" )[,2]
#' splitTagsGithub( user = "pbiecek", repo = "archivist" )[,2:3]
#'
#' }
#' @family archivist
#' @rdname splitTags
#' @export
splitTagsLocal <- function( repoDir = NULL ){
splitTags( repoDir = repoDir )
}
#' @rdname splitTags
#' @export
splitTagsGithub <- function( repo = NULL, user = NULL, branch = "master",
repoDirGit = FALSE ){
splitTags( repo = repo, user = user, branch = branch, repoDirGit = repoDirGit,
local = FALSE )
}
splitTags <- function( repoDir = NULL, repo = NULL, user = NULL,
branch = "master", repoDirGit = FALSE,
local = TRUE ){
# We will expand tag table in backpack.db
if (local) {
showLocalRepo( repoDir = repoDir, method = "tags" ) -> tags_df
} else {
showGithubRepo( repo = repo, user = user, branch = branch,
repoDirGit = repoDirGit,
method = "tags" ) -> tags_df
}
if (nrow(tags_df) == 0 & local) {
stop("There were no Tags for this Repository. Try showLocalRepo(method='tags') to ensure there are any Tags.")
}
if (nrow(tags_df) == 0 & !local) {
stop("There were no Tags for this Repository. Try showGithubRepo(method='tags') to ensure there are any Tags.")
}
# We will split tag column into tagKey and tagValue columns
strsplit(tags_df$tag, ":") %>%
lapply( function(element){
if (length(element) > 2) {
# in case of Tags with TagKey = date
element[2] <- paste0(element[-1], collapse = ":")
element <- element[1:2]
} else if (length(element) == 1){
# when a user gives Tag which does not match "TagKey:TagValue" structure
element <- c("userTags", element)
} else if (length(element) == 0){
# when a user gives Tag which is a character of length 0 :)
element <- c("userTags", "")
}
element
}) %>%
simplify2array %>%
t %>%
cbind(tags_df) -> tags_df
tags_df <- tags_df[, c(3,1,2,5)]
names(tags_df)[2:3] <- c("tagKey", "tagValue")
tags_df
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http-fxns.R
\name{sensibo.pod.state}
\alias{sensibo.pod.state}
\title{Get info from a specific state of a given air conditioner (pod).}
\usage{
sensibo.pod.state(pod, state, key = getOption("sensibo.key"))
}
\arguments{
\item{pod}{(character) Pod unique id.}
\item{state}{(character) State id to be retrieved.}
\item{key}{(character) API key from https://home.sensibo.com/me/api.}
}
\value{
A list with the requested state details.
}
\description{
Get info from a specific state of a given air conditioner (pod).
}
\examples{
\dontrun{
# Assuming that a valid Sensibo Sky API Key was created on https://home.sensibo.com/me/api
# and added to a 'sensibo.sky' global option.
#
# options("sensibo.key" = <Your Sensibo API Key>)
## Getting the list of pods available to the user
pods.id <- sensibo.pods()
## Getting the current state of the first pod
pod.current <- sensibo.pod.states(pods.id[1], n = 1)
## Get more details of the given state (if available)
pod.state.details <- sensibo.pod.state(pods.id[1], pod.current[1])
}
}
|
/man/sensibo.pod.state.Rd
|
no_license
|
cran/sensibo.sky
|
R
| false | true | 1,110 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/http-fxns.R
\name{sensibo.pod.state}
\alias{sensibo.pod.state}
\title{Get info from a specific state of a given air conditioner (pod).}
\usage{
sensibo.pod.state(pod, state, key = getOption("sensibo.key"))
}
\arguments{
\item{pod}{(character) Pod unique id.}
\item{state}{(character) State id to be retrieved.}
\item{key}{(character) API key from https://home.sensibo.com/me/api.}
}
\value{
A list with the requested state details.
}
\description{
Get info from a specific state of a given air conditioner (pod).
}
\examples{
\dontrun{
# Assuming that a valid Sensibo Sky API Key was created on https://home.sensibo.com/me/api
# and added to a 'sensibo.sky' global option.
#
# options("sensibo.key" = <Your Sensibo API Key>)
## Getting the list of pods available to the user
pods.id <- sensibo.pods()
## Getting the current state of the first pod
pod.current <- sensibo.pod.states(pods.id[1], n = 1)
## Get more details of the given state (if available)
pod.state.details <- sensibo.pod.state(pods.id[1], pod.current[1])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cleanColNames.R
\name{cleanColNames}
\alias{cleanColNames}
\title{Tidy up column names}
\usage{
cleanColNames(df)
}
\arguments{
\item{df}{a data frame}
}
\value{
a (tidied) data frame
}
\description{
Removes redundant punctuation and whitespace from data frame
}
|
/man/cleanColNames.Rd
|
no_license
|
gtm19/gmcustomfun
|
R
| false | true | 341 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cleanColNames.R
\name{cleanColNames}
\alias{cleanColNames}
\title{Tidy up column names}
\usage{
cleanColNames(df)
}
\arguments{
\item{df}{a data frame}
}
\value{
a (tidied) data frame
}
\description{
Removes redundant punctuation and whitespace from data frame
}
|
0c5f379a5aa9dbab3c8c93112a147c6b fpu-10Xh-correct04-uniform-depth-10.qdimacs 283647 756234
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Miller-Marin/fpu/fpu-10Xh-correct04-uniform-depth-10/fpu-10Xh-correct04-uniform-depth-10.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 90 |
r
|
0c5f379a5aa9dbab3c8c93112a147c6b fpu-10Xh-correct04-uniform-depth-10.qdimacs 283647 756234
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R"))
########################## Load in Libraries ########################## #
########################## Start Program Here ######################### #
library(dplyr)
library(ggplot2)
library(tidyr)
library(scales)
library(grid)
library(gridExtra)
library(gtable)
library(RColorBrewer)
library(stringr)
library(ggrepel)
library(quadprog)
library(lubridate)
library(fTrading)
# ############################ End ################################## #
# Load in BV returns
full_bv_returns <- readRDS(paste0(localdir, "06-bv-returns.Rds"))
# Get the number of years in case we subset later
n_years_full <- nrow(full_bv_returns)
# Define the number of simulations (this will be used later)
n_simulations <- 10000
# This seed allows us to have reproducible random sampling
set.seed(12345)
bv_returns <- full_bv_returns
min_year <- min(year(bv_returns$year))
max_year <- max(year(bv_returns$year))
# Define the number of years
n_years <- nrow(bv_returns)
# Drop the year and the risk free rate from the return to just have returns
returns <- bv_returns[, -which(names(bv_returns) %in% c("year", "tbill_3m"))]
n_assets <- ncol(returns)
avg_rf <- colMeans(bv_returns[, "tbill_3m"])
eff_frontier <- function (returns, short = "no", max_allocation = NULL, risk_premium_upper_limit = .5, risk_increment = .005){
# return argument should be a m x n matrix with one column per security
# short argument is whether short-selling is allowed; default is no (short selling prohibited)
# max.allocation is the maximum % allowed for any one security (reduces concentration)
# risk.premium.up is the upper limit of the risk premium modeled (see for loop below)
# risk.increment is the increment (by) value used in the for loop
# Create the covariance of returns
cov_matrix <- cov(returns)
n <- ncol(cov_matrix)
# Create initial Amat and bvec assuming only equality constraint is that weight >= 0
Amat <- matrix (1, nrow = n)
bvec <- 1
meq <- 1
# Then modify the Amat and bvec if short-selling is prohibited
if(short == "no"){
Amat <- cbind(1, diag(n))
bvec <- c(bvec, rep(0, n))
}
# And modify Amat and bvec if a max allocation (concentration) is specified
if(!is.null(max_allocation)){
if(max_allocation > 1 | max_allocation <0){
stop("max.allocation must be greater than 0 and less than 1")
}
if(max_allocation * n < 1){
stop("Need to set max_allocation higher; not enough assets to add to 1")
}
Amat <- cbind(Amat, -diag(n))
bvec <- c(bvec, rep(-max_allocation, n))
}
# Calculate the number of loops based on how high to vary the risk premium and by what increment
loops <- risk_premium_upper_limit / risk_increment + 1
loop <- 1
# Initialize a matrix to contain allocation and statistics
# This is not necessary, but speeds up processing and uses less memory
eff <- matrix(nrow=loops, ncol=n+3)
# Now I need to give the matrix column names
colnames(eff) <- c(colnames(returns), "sd", "exp_return", "sharpe")
# Loop through the quadratic program solver
for (i in seq(from = 0, to = risk_premium_upper_limit, by = risk_increment)){
dvec <- colMeans(returns) * i # This moves the solution up along the efficient frontier
sol <- solve.QP(cov_matrix, dvec = dvec, Amat = Amat, bvec = bvec, meq = meq)
eff[loop,"sd"] <- sqrt(sum(sol$solution * colSums((cov_matrix * sol$solution))))
eff[loop,"exp_return"] <- as.numeric(sol$solution %*% colMeans(returns))
eff[loop,"sharpe"] <- (eff[loop,"exp_return"] - avg_rf) / eff[loop,"sd"]
eff[loop,1:n] <- sol$solution
loop <- loop+1
}
return(as.data.frame(eff))
}
eff <- eff_frontier(returns=returns, short = "no", max_allocation = .33, risk_premium_upper_limit = .5, risk_increment = .001)
# Plot the efficient frontier
eff_optimal_point <- eff[eff$sharpe == max(eff$sharpe),]
# Color Scheme
ealred <- "#7D110C"
ealtan <- "#CDC4B6"
eallighttan <- "#F7F6F0"
ealdark <- "#423C30"
plot <- ggplot(eff, aes(x = sd, y = exp_return)) + geom_point(alpha = .1, color = ealdark) +
geom_point(data = eff_optimal_point, aes(x = sd, y = exp_return), color = ealred, size=5) +
annotate(geom="text", x = eff_optimal_point$sd, y = eff_optimal_point$exp_return, family = "my_font",
label=paste("Risk: ", round(eff_optimal_point$sd * 100, digits = 2),"%\nReal Return: ",
round(eff_optimal_point$exp_return * 100, digits = 2),"%\nSharpe: ",
round(eff_optimal_point$sharpe * 100, digits = 2), "%", sep=""), hjust=0, vjust=1.2) +
ggtitle(paste0("Efficient Frontier and Optimal Portfolio\n")) + labs(x = "Risk (standard deviation of portfolio variance)", y ="Real Return") +
of_dollars_and_data_theme +
scale_x_continuous(label = percent) +
scale_y_continuous(label = percent)
# Set the file_path based on the function input
file_path = paste0(exportdir, "06-simulate-bv-returns/bv-efficient-frontier.jpeg")
# Add a source and note string for the plots
source_string <- paste0("Source: BullionVault U.S. Asset Class Performance Data, ", min_year, "-", max_year," (OfDollarsAndData.com)")
note_string <- paste0("Note: Assumes no asset can be >33% of the portfolio and shorting is not allowed.")
# Turn plot into a gtable for adding text grobs
my_gtable <- ggplot_gtable(ggplot_build(plot))
# Make the source and note text grobs
source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
# Add the text grobs to the bototm of the gtable
my_gtable <- arrangeGrob(my_gtable, bottom = source_grob)
my_gtable <- arrangeGrob(my_gtable, bottom = note_grob)
# Save the gtable
ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm")
# Simulate the portfolio value
# Create a simulation vector
sim_vec <- seq(1, n_years, 1)
# Drop unneeded columns
optimal_weights <- as.data.frame((eff_optimal_point[1:n_assets]))
# Round any weights less than 0.05% to zero
optimal_weights <- t(apply(optimal_weights[,], 2, function(x) ifelse(x < 0.0005, 0, x)))
# Initialize all matrices used for returns and value paths
sampled_years_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
sampled_returns <- matrix(NA, nrow = n_simulations, ncol = n_assets)
portfolio_return_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
value_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
# Setup a yearly cash addition into the portfolio.
# This cash addition happens at the beginning of each return year
yearly_cash_add <- 5000
returns_for_simulation <- full_bv_returns[, -which(names(full_bv_returns) %in% c("year", "tbill_3m"))]
# Do this in a for loop over each year
for (i in 1:n_years_full){
sampled_years_matrix[, i] <- sample(sim_vec, n_simulations, replace = TRUE)
for (j in 1:n_assets){
sampled_returns[, j] <- 1 + unlist(returns_for_simulation[sampled_years_matrix[,i], j])
}
portfolio_return_matrix[, i] <- rowSums(t(as.vector(optimal_weights) * t(sampled_returns)))
if (i == 1){
value_matrix[, i] <- yearly_cash_add * (portfolio_return_matrix[ , i])
} else {
value_matrix[, i] <- (value_matrix[, i - 1] + yearly_cash_add) * (portfolio_return_matrix[, i])
}
}
# Calculate some statistics
total_invested_capital <- n_years_full * yearly_cash_add
max_end_value <- max(value_matrix[, n_years_full])
min_end_value <- min(value_matrix[, n_years_full])
median_end_value <- quantile(value_matrix[, n_years_full], probs = 0.5)
# Caluclate the maximum drawdown for each simulation
max_drawdown <- 0
max_drawdown_pct_matrix <- matrix(NA, nrow = n_simulations, ncol = 1)
max_drawdown_dollar_matrix <- matrix(NA, nrow = n_simulations, ncol = 1)
for (x in 1:n_simulations){
drawdown <- maxDrawDown(value_matrix[x,])$maxdrawdown
from <- maxDrawDown(value_matrix[x,])$from
to <- maxDrawDown(value_matrix[x,])$to
if (drawdown > 0){
max_drawdown_pct_matrix[x, 1] <- drawdown / value_matrix[x, from]
max_drawdown_dollar_matrix[x, 1] <- drawdown
} else{
max_drawdown_pct_matrix[x, 1] <- 0
max_drawdown_dollar_matrix[x, 1] <- drawdown
}
}
# Calculate summary statistics on the max, min, and median drawdowns
calculate_drawdown <- function(name){
type <- deparse(substitute(name))
matrix <- get(paste0("max_drawdown_", type, "_matrix"))
max <- max(matrix)
min <- min(matrix)
median <- quantile(matrix, probs = 0.5)
assign(paste0("max_drawdown_", type), max)
assign(paste0("min_drawdown_", type), min)
assign(paste0("median_drawdown_", type), median)
if (type == "pct"){
print(paste0("Maximum drawdown: ", max*100, "%"))
print(paste0("Minimum drawdown: ", min*100, "%"))
print(paste0("Median drawdown: ", median*100, "%"))
} else if (type == "dollar"){
print(paste0("Maximum drawdown: $", max))
print(paste0("Minimum drawdown: $", min))
print(paste0("Median drawdown: $", median))
}
}
calculate_drawdown(pct)
calculate_drawdown(dollar)
# Print other summary stats as well
print(paste0("Total invested capital: $", total_invested_capital))
print(paste0("Maximum Ending Value: $", max_end_value))
print(paste0("Minimum Ending Value: $", min_end_value))
print(paste0("Median Ending Value: $", median_end_value))
|
/analysis/06-simulate-bullion-vault-returns.R
|
no_license
|
joyeung/of-dollars-and-data
|
R
| false | false | 9,857 |
r
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
source(file.path("C:/Users/Nick/git/of-dollars-and-data/header.R"))
########################## Load in Libraries ########################## #
########################## Start Program Here ######################### #
library(dplyr)
library(ggplot2)
library(tidyr)
library(scales)
library(grid)
library(gridExtra)
library(gtable)
library(RColorBrewer)
library(stringr)
library(ggrepel)
library(quadprog)
library(lubridate)
library(fTrading)
# ############################ End ################################## #
# Load in BV returns
full_bv_returns <- readRDS(paste0(localdir, "06-bv-returns.Rds"))
# Get the number of years in case we subset later
n_years_full <- nrow(full_bv_returns)
# Define the number of simulations (this will be used later)
n_simulations <- 10000
# This seed allows us to have reproducible random sampling
set.seed(12345)
bv_returns <- full_bv_returns
min_year <- min(year(bv_returns$year))
max_year <- max(year(bv_returns$year))
# Define the number of years
n_years <- nrow(bv_returns)
# Drop the year and the risk free rate from the return to just have returns
returns <- bv_returns[, -which(names(bv_returns) %in% c("year", "tbill_3m"))]
n_assets <- ncol(returns)
avg_rf <- colMeans(bv_returns[, "tbill_3m"])
eff_frontier <- function (returns, short = "no", max_allocation = NULL, risk_premium_upper_limit = .5, risk_increment = .005){
# return argument should be a m x n matrix with one column per security
# short argument is whether short-selling is allowed; default is no (short selling prohibited)
# max.allocation is the maximum % allowed for any one security (reduces concentration)
# risk.premium.up is the upper limit of the risk premium modeled (see for loop below)
# risk.increment is the increment (by) value used in the for loop
# Create the covariance of returns
cov_matrix <- cov(returns)
n <- ncol(cov_matrix)
# Create initial Amat and bvec assuming only equality constraint is that weight >= 0
Amat <- matrix (1, nrow = n)
bvec <- 1
meq <- 1
# Then modify the Amat and bvec if short-selling is prohibited
if(short == "no"){
Amat <- cbind(1, diag(n))
bvec <- c(bvec, rep(0, n))
}
# And modify Amat and bvec if a max allocation (concentration) is specified
if(!is.null(max_allocation)){
if(max_allocation > 1 | max_allocation <0){
stop("max.allocation must be greater than 0 and less than 1")
}
if(max_allocation * n < 1){
stop("Need to set max_allocation higher; not enough assets to add to 1")
}
Amat <- cbind(Amat, -diag(n))
bvec <- c(bvec, rep(-max_allocation, n))
}
# Calculate the number of loops based on how high to vary the risk premium and by what increment
loops <- risk_premium_upper_limit / risk_increment + 1
loop <- 1
# Initialize a matrix to contain allocation and statistics
# This is not necessary, but speeds up processing and uses less memory
eff <- matrix(nrow=loops, ncol=n+3)
# Now I need to give the matrix column names
colnames(eff) <- c(colnames(returns), "sd", "exp_return", "sharpe")
# Loop through the quadratic program solver
for (i in seq(from = 0, to = risk_premium_upper_limit, by = risk_increment)){
dvec <- colMeans(returns) * i # This moves the solution up along the efficient frontier
sol <- solve.QP(cov_matrix, dvec = dvec, Amat = Amat, bvec = bvec, meq = meq)
eff[loop,"sd"] <- sqrt(sum(sol$solution * colSums((cov_matrix * sol$solution))))
eff[loop,"exp_return"] <- as.numeric(sol$solution %*% colMeans(returns))
eff[loop,"sharpe"] <- (eff[loop,"exp_return"] - avg_rf) / eff[loop,"sd"]
eff[loop,1:n] <- sol$solution
loop <- loop+1
}
return(as.data.frame(eff))
}
eff <- eff_frontier(returns=returns, short = "no", max_allocation = .33, risk_premium_upper_limit = .5, risk_increment = .001)
# Plot the efficient frontier
eff_optimal_point <- eff[eff$sharpe == max(eff$sharpe),]
# Color Scheme
ealred <- "#7D110C"
ealtan <- "#CDC4B6"
eallighttan <- "#F7F6F0"
ealdark <- "#423C30"
plot <- ggplot(eff, aes(x = sd, y = exp_return)) + geom_point(alpha = .1, color = ealdark) +
geom_point(data = eff_optimal_point, aes(x = sd, y = exp_return), color = ealred, size=5) +
annotate(geom="text", x = eff_optimal_point$sd, y = eff_optimal_point$exp_return, family = "my_font",
label=paste("Risk: ", round(eff_optimal_point$sd * 100, digits = 2),"%\nReal Return: ",
round(eff_optimal_point$exp_return * 100, digits = 2),"%\nSharpe: ",
round(eff_optimal_point$sharpe * 100, digits = 2), "%", sep=""), hjust=0, vjust=1.2) +
ggtitle(paste0("Efficient Frontier and Optimal Portfolio\n")) + labs(x = "Risk (standard deviation of portfolio variance)", y ="Real Return") +
of_dollars_and_data_theme +
scale_x_continuous(label = percent) +
scale_y_continuous(label = percent)
# Set the file_path based on the function input
file_path = paste0(exportdir, "06-simulate-bv-returns/bv-efficient-frontier.jpeg")
# Add a source and note string for the plots
source_string <- paste0("Source: BullionVault U.S. Asset Class Performance Data, ", min_year, "-", max_year," (OfDollarsAndData.com)")
note_string <- paste0("Note: Assumes no asset can be >33% of the portfolio and shorting is not allowed.")
# Turn plot into a gtable for adding text grobs
my_gtable <- ggplot_gtable(ggplot_build(plot))
# Make the source and note text grobs
source_grob <- textGrob(source_string, x = (unit(0.5, "strwidth", source_string) + unit(0.2, "inches")), y = unit(0.1, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
note_grob <- textGrob(note_string, x = (unit(0.5, "strwidth", note_string) + unit(0.2, "inches")), y = unit(0.15, "inches"),
gp =gpar(fontfamily = "my_font", fontsize = 8))
# Add the text grobs to the bototm of the gtable
my_gtable <- arrangeGrob(my_gtable, bottom = source_grob)
my_gtable <- arrangeGrob(my_gtable, bottom = note_grob)
# Save the gtable
ggsave(file_path, my_gtable, width = 15, height = 12, units = "cm")
# Simulate the portfolio value
# Create a simulation vector
sim_vec <- seq(1, n_years, 1)
# Drop unneeded columns
optimal_weights <- as.data.frame((eff_optimal_point[1:n_assets]))
# Round any weights less than 0.05% to zero
optimal_weights <- t(apply(optimal_weights[,], 2, function(x) ifelse(x < 0.0005, 0, x)))
# Initialize all matrices used for returns and value paths
sampled_years_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
sampled_returns <- matrix(NA, nrow = n_simulations, ncol = n_assets)
portfolio_return_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
value_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years_full)
# Setup a yearly cash addition into the portfolio.
# This cash addition happens at the beginning of each return year
yearly_cash_add <- 5000
returns_for_simulation <- full_bv_returns[, -which(names(full_bv_returns) %in% c("year", "tbill_3m"))]
# Do this in a for loop over each year
for (i in 1:n_years_full){
sampled_years_matrix[, i] <- sample(sim_vec, n_simulations, replace = TRUE)
for (j in 1:n_assets){
sampled_returns[, j] <- 1 + unlist(returns_for_simulation[sampled_years_matrix[,i], j])
}
portfolio_return_matrix[, i] <- rowSums(t(as.vector(optimal_weights) * t(sampled_returns)))
if (i == 1){
value_matrix[, i] <- yearly_cash_add * (portfolio_return_matrix[ , i])
} else {
value_matrix[, i] <- (value_matrix[, i - 1] + yearly_cash_add) * (portfolio_return_matrix[, i])
}
}
# Calculate some statistics
total_invested_capital <- n_years_full * yearly_cash_add
max_end_value <- max(value_matrix[, n_years_full])
min_end_value <- min(value_matrix[, n_years_full])
median_end_value <- quantile(value_matrix[, n_years_full], probs = 0.5)
# Caluclate the maximum drawdown for each simulation
max_drawdown <- 0
max_drawdown_pct_matrix <- matrix(NA, nrow = n_simulations, ncol = 1)
max_drawdown_dollar_matrix <- matrix(NA, nrow = n_simulations, ncol = 1)
for (x in 1:n_simulations){
drawdown <- maxDrawDown(value_matrix[x,])$maxdrawdown
from <- maxDrawDown(value_matrix[x,])$from
to <- maxDrawDown(value_matrix[x,])$to
if (drawdown > 0){
max_drawdown_pct_matrix[x, 1] <- drawdown / value_matrix[x, from]
max_drawdown_dollar_matrix[x, 1] <- drawdown
} else{
max_drawdown_pct_matrix[x, 1] <- 0
max_drawdown_dollar_matrix[x, 1] <- drawdown
}
}
# Calculate summary statistics on the max, min, and median drawdowns
calculate_drawdown <- function(name){
type <- deparse(substitute(name))
matrix <- get(paste0("max_drawdown_", type, "_matrix"))
max <- max(matrix)
min <- min(matrix)
median <- quantile(matrix, probs = 0.5)
assign(paste0("max_drawdown_", type), max)
assign(paste0("min_drawdown_", type), min)
assign(paste0("median_drawdown_", type), median)
if (type == "pct"){
print(paste0("Maximum drawdown: ", max*100, "%"))
print(paste0("Minimum drawdown: ", min*100, "%"))
print(paste0("Median drawdown: ", median*100, "%"))
} else if (type == "dollar"){
print(paste0("Maximum drawdown: $", max))
print(paste0("Minimum drawdown: $", min))
print(paste0("Median drawdown: $", median))
}
}
calculate_drawdown(pct)
calculate_drawdown(dollar)
# Print other summary stats as well
print(paste0("Total invested capital: $", total_invested_capital))
print(paste0("Maximum Ending Value: $", max_end_value))
print(paste0("Minimum Ending Value: $", min_end_value))
print(paste0("Median Ending Value: $", median_end_value))
|
#!/usr/bin/env Rscript
styler::style_dir(".",
recursive = FALSE,
filetype = c("R", "Rmd")
)
# styler::style_dir("templates",
# recursive = FALSE,
# filetype = c("R", "Rmd")
# )
|
/src/rmd/main/styler.R
|
permissive
|
guillaumecharbonnier/mw-miallot2021
|
R
| false | false | 185 |
r
|
#!/usr/bin/env Rscript
styler::style_dir(".",
recursive = FALSE,
filetype = c("R", "Rmd")
)
# styler::style_dir("templates",
# recursive = FALSE,
# filetype = c("R", "Rmd")
# )
|
#Read the data
dataset<-read.table('./household_power_consumption.txt', header = T, sep = ';', na.strings = '?',
stringsAsFactors = F)
#Convert dates into dates variables
dataset$Date<-strptime(dataset$Date, format = '%d/%m/%Y')
#Select only dates of interest
dataset<-dataset[(dataset$Date>='2007-02-01')&(dataset$Date<='2007-02-02'),]
#Create new variable that contains dates and time
datetime<-paste(dataset$Date, dataset$Time, sep = ' ')
datetime<-as.POSIXct(datetime)
#Elaborate plot and print to png
png("plot3.png", width=480, height=480)
plot(datetime,dataset$Sub_metering_1, type = 'l',ylab = 'Energy sub metering', xlab = '')
lines(datetime,dataset$Sub_metering_2, col = 'red')
lines(datetime,dataset$Sub_metering_3, col = 'blue')
legend('topright', c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), lty = 1, lwd = 2,
col = c('black', 'red','blue'))
dev.off()
|
/Plot3.R
|
no_license
|
alberto-gallotti/ExData_Plotting1
|
R
| false | false | 909 |
r
|
#Read the data
dataset<-read.table('./household_power_consumption.txt', header = T, sep = ';', na.strings = '?',
stringsAsFactors = F)
#Convert dates into dates variables
dataset$Date<-strptime(dataset$Date, format = '%d/%m/%Y')
#Select only dates of interest
dataset<-dataset[(dataset$Date>='2007-02-01')&(dataset$Date<='2007-02-02'),]
#Create new variable that contains dates and time
datetime<-paste(dataset$Date, dataset$Time, sep = ' ')
datetime<-as.POSIXct(datetime)
#Elaborate plot and print to png
png("plot3.png", width=480, height=480)
plot(datetime,dataset$Sub_metering_1, type = 'l',ylab = 'Energy sub metering', xlab = '')
lines(datetime,dataset$Sub_metering_2, col = 'red')
lines(datetime,dataset$Sub_metering_3, col = 'blue')
legend('topright', c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), lty = 1, lwd = 2,
col = c('black', 'red','blue'))
dev.off()
|
"%,%" <- function(x,y)paste(x,y,sep="")
"print.TableMonster" <-
function (x, special = NULL, simple = FALSE, dbg = FALSE, ...)
{
spcl <- FALSE
spcl.val <- NULL
if (!missing(special)) {
spcl.val <- special
spcl <- TRUE
}
m <- match.call()
m$simple <- m$dbg <- NULL
ddd <- list()
nmsddd <- names(m)[-(1:2)]
n.ddd <- length(nmsddd)
if (n.ddd > 0)
for (k in 1:n.ddd) ddd[[nmsddd[k]]] <- m[[2 + k]]
x.df <- as.data.frame(x)
nr <- nrow(x.df)
nc <- ncol(x.df)
headings <- attr(x, "headings")
ctypes <- attr(x, "ctypes")
digits <- attr(x, "digits")
displ <- attr(x, "display")
rowcolor <- attr(x, "rowcolor")
caption <- attr(x, "caption")
totals <- attr(x, "totals")
rc.idx <- grep("rowcolor", nmsddd)
is.rc <- (length(rc.idx) > 0)
if (is.rc)
{
rowcolor <- ddd[[rc.idx]]
ddd <- ddd[-rc.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
if(is.rc)
{
is.clr <- !is.null(rowcolor$color)
is.clr.rnm <- !is.null(rowcolor$rownum)
sum.is <- is.clr + is.clr.rnm
if(sum.is > 0 && (sum.is < 2))
stop("Specification of row color requires components 'color' and 'rownum' to be set")
if(is.clr) clr <- rowcolor$color
if(is.clr.rnm) clr.rnm <- eval(rowcolor$rownum, sys.parent())
}
is.tot <- !is.null(totals)
if (is.tot)
if (!is.logical(totals))
stop("Attribute 'totals' must be logical")
n.h <- length(headings)
depth <- rep(1, n.h)
lngths <- NULL
for (k in 1:n.h) {
ptr1 <- ptr0 <- headings[[k]]
if (!is.null(names(ptr1))) {
ptr0 <- ptr1
depth[k] <- depth[k] + 1
ptr1 <- ptr0[[1]]
}
lnptr <- length(ptr0)
lngths <- c(lngths, lnptr)
}
mxdpth <- max(depth)
atmxdpth <- which(depth == mxdpth)
for (k in 1:n.h) {
j <- mxdpth - depth[k]
out <- headings[[k]]
while (j > 0) {
out <- list(` ` = out)
names(out) <- names(headings)[k]
j <- j - 1
}
headings[[k]] <- out
}
hdr <- list()
hdr[[1]] <- names(headings[atmxdpth])
n.hdr.1 <- length(hdr[[1]])
if (mxdpth > 1)
{
nms.ul.hdngs <- names(unlist(headings))
nchr <- nchar(nms.ul.hdngs)
nchr.hlf <- (nchr-1)/2
frst <- substring(nms.ul.hdngs, 1, nchr.hlf)
scnd <- substring(nms.ul.hdngs, nchr.hlf+2, nchr)
idx.rpts <- which(frst==scnd)
nms.ul.hdngs[idx.rpts] <- frst[idx.rpts]
for(k in 1:n.hdr.1)
{
grp.hdr1.k <- grep(hdr[[1]][k], nms.ul.hdngs)
nms.ul.hdngs[grp.hdr1.k] <- substring(nms.ul.hdngs[grp.hdr1.k], nchar(hdr[[1]][k])+2, nchar(nms.ul.hdngs[grp.hdr1.k]))
}
hdr[[mxdpth]] <- nms.ul.hdngs
}
h1 <- h1a <- NULL
dpth2 <- any(depth > 1)
if (dpth2)
simple <- FALSE
if (dpth2) {
h1 <- h1a <- NULL
h1[atmxdpth] <- "\\multicolumn{" %,% lngths[atmxdpth[1]] %,%
"}{c}{" %,% hdr[[1]] %,% "}"
h1[setdiff(1:n.h, atmxdpth)] <- ""
h1 <- paste(h1, collapse = "&") %,% "\\\\\n"
nc1 <- length(hdr[[1]])
tt <- cumsum(lngths)
i0 <- tt[atmxdpth - 1] + 1
i1 <- tt[atmxdpth]
ni <- length(i0)
prfx <- "\\cmidrule(r){" %,% i0[1] %,% "-" %,% i1[1] %,% "}"
bdy <- NULL
sfx <- "\n"
if(ni>1)
{
k.k <- apply(cbind(i0, i1)[2:(ni - 1), , drop = FALSE],
1, FUN = function(x) x[1] %,% "-" %,% x[2])
bdy <- paste("\\cmidrule(lr){" %,% k.k, collapse = "}")
sfx <- "}\\cmidrule(l){" %,% i0[ni] %,% "-" %,% i1[ni] %,% "}\n"
}
h1a <- prfx %,% bdy %,% sfx
}
h2 <- paste(hdr[[mxdpth]], collapse = "&") %,% "\\\\\n"
nc2 <- length(hdr[[mxdpth]])
prfx <- "\\cmidrule(r){" %,% 1 %,% "-" %,% 1 %,% "}"
k.k <- sapply(2:(nc2 - 1), FUN = function(x) x %,% "-" %,%
x)
bdy <- paste("\\cmidrule(lr){" %,% k.k, collapse = "}")
sfx <- "}\\cmidrule(l){" %,% nc2 %,% "-" %,% nc2 %,% "}\n"
h2a <- ftr <- prfx %,% bdy %,% sfx
xtbl.call <-
as.call(expression(xtable, as.data.frame(x),
digits = c(0, digits), align = "ll" %,% paste(rep("r",
nc - 1), collapse = "")))
if (!is.null(displ))
xtbl.call$display <- c("s", displ)
pr.xtbl.call <-
as.call(expression(print, xtbl, hline.after = NULL,
include.rownames = FALSE, include.colnames = FALSE, type = "latex"))
is.lbl <- is.algn <- FALSE
if (n.ddd > 0) {
lbl.idx <- grep("label", nmsddd)
is.lbl <- (length(lbl.idx) > 0)
if (is.lbl) {
lbl.val <- ddd[[lbl.idx]]
ddd <- ddd[-lbl.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
algn.idx <- grep("align", nmsddd)
is.algn <- (length(algn.idx)>0)
if (is.algn) {
algn.val <- eval(ddd[[algn.idx]], sys.parent())
ddd <- ddd[-algn.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
is.ddd <- (n.ddd > 0)
if (is.ddd)
for (k in 1:n.ddd) pr.xtbl.call[[nmsddd[k]]] <- ddd[[nmsddd[k]]]
}
if (!spcl) {
xtbl.call[["caption"]] <- as.name("caption")
if(is.lbl) xtbl.call[["label"]] <- lbl.val
if(is.algn) xtbl.call$align <- c("l", algn.val)
atr <- c("\\toprule\n", h1, h1a, h2, h2a)
# \rowcolor{lightgray}
# or \rowcolors{1}{}{lightgray}
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n")
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:2] <- -1
add.to.row[["pos"]][3:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm-1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
}
if (spcl) {
if (spcl.val == "jrss-b") {
btbl <- "\\begin{table}\n"
cpn <- "\\caption{" %,% caption %,% "}\n"
if (is.lbl)
cpn <- "\\caption{\\label{" %,% lbl.val %,% "}" %,%
caption %,% "}\n"
ctr <- NULL
fb <- "\\fbox{%\n"
btblr <- "l" %,% paste(rep("r", nc2 - 1), collapse = "")
if(is.algn) btblr <- paste(algn.val, collapse="")
btblr <- "\\begin{tabular}{" %,% btblr %,% "}\n"
etblr <- "\\end{tabular}}\n"
etbl <- "\\end{table}\n"
tp <- btbl %,% cpn %,% ctr %,% fb %,% btblr %,% "\\toprule\n"
atr <- c(tp, h1, h1a, h2, h2a)
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n", etblr, etbl)
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm - 1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 2] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 3] <- nr
pr.xtbl.call$only.contents <- TRUE
}
if (spcl.val == "aos") {
btbl <- "\\begin{table}\n"
cpn <- "\\caption{" %,% caption %,% "}\n"
if (is.lbl)
cpn <- "\\caption{\\label{" %,% lbl.val %,% "}" %,%
caption %,% "}\n"
ctr <- NULL
btblr <- "l" %,% paste(rep("r", nc2 - 1), collapse = "")
if(is.algn) btblr <- paste(algn.val, collapse="")
btblr <- "\\begin{tabular}{" %,% btblr %,% "}\n"
etblr <- "\\end{tabular}\n"
etbl <- "\\end{table}\n"
tp <- btbl %,% cpn %,% ctr %,% btblr %,% "\\toprule\n"
atr <- c(tp, h1, h1a, h2, h2a)
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n", etblr, etbl)
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm - 1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 2] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 3] <- nr
pr.xtbl.call$only.contents <- TRUE
}
}
if (dbg)
save(list = "add.to.row", file = "debug.rda")
pr.xtbl.call$add.to.row <- as.name("add.to.row")
if(is.rc)
{
cat(sprintf("%s\n", "%% Don't forget to \\usepackage{xcolor} and include 'table' in your documentclass options, "))
cat(sprintf("%s\n", "%% e.g. \\documentclass[table]{beamer}, and remember to define the color, " %,% clr %,% ", in your preamble"))
}
xtbl <- eval(xtbl.call)
eval(pr.xtbl.call)
}
"as.data.frame.TableMonster" <-
function(x, row.names = NULL, optional = FALSE, ...)
{
attr(x, "headings") <- NULL
attr(x, "ctypes") <- NULL
attr(x, "digits") <- NULL
attr(x, "caption") <- NULL
attr(x, "totals") <- NULL
class(x) <- "data.frame"
x
}
"tmHeadings" <-
function(x)
{
attr(x, "headings")
}
"tmCtypes" <-
function(x)
{
attr(x, "ctypes")
}
"tmDigits" <-
function(x)
{
attr(x, "digits")
}
"tmTotals" <-
function(x)
{
attr(x, "totals")
}
"tmCaption" <-
function(x)
{
attr(x, "caption")
}
"tmHeadings<-" <-
function(x, value)
{
attr(x, "headings") <- value
x
}
"tmCtypes<-" <-
function(x, value)
{
attr(x, "ctypes") <- value
x
}
"tmDigits<-" <-
function(x, value)
{
attr(x, "digits") <- value
x
}
"tmTotals<-" <-
function(x, value)
{
attr(x, "totals") <- value
x
}
"tmCaption<-" <-
function(x, value)
{
attr(x, "caption") <- value
x
}
.onAttach <- function(libname, pkgname)
{
options(stringsAsFactors=FALSE)
ver <- read.dcf(file=system.file("DESCRIPTION", package=pkgname),
fields="Version")
msg <- paste(pkgname, ver) %,% "\n\n" %,%
"Type ?print.TableMonster"
packageStartupMessage(msg)
}
|
/R/TableMonster.R
|
no_license
|
cran/TableMonster
|
R
| false | false | 11,024 |
r
|
"%,%" <- function(x,y)paste(x,y,sep="")
"print.TableMonster" <-
function (x, special = NULL, simple = FALSE, dbg = FALSE, ...)
{
spcl <- FALSE
spcl.val <- NULL
if (!missing(special)) {
spcl.val <- special
spcl <- TRUE
}
m <- match.call()
m$simple <- m$dbg <- NULL
ddd <- list()
nmsddd <- names(m)[-(1:2)]
n.ddd <- length(nmsddd)
if (n.ddd > 0)
for (k in 1:n.ddd) ddd[[nmsddd[k]]] <- m[[2 + k]]
x.df <- as.data.frame(x)
nr <- nrow(x.df)
nc <- ncol(x.df)
headings <- attr(x, "headings")
ctypes <- attr(x, "ctypes")
digits <- attr(x, "digits")
displ <- attr(x, "display")
rowcolor <- attr(x, "rowcolor")
caption <- attr(x, "caption")
totals <- attr(x, "totals")
rc.idx <- grep("rowcolor", nmsddd)
is.rc <- (length(rc.idx) > 0)
if (is.rc)
{
rowcolor <- ddd[[rc.idx]]
ddd <- ddd[-rc.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
if(is.rc)
{
is.clr <- !is.null(rowcolor$color)
is.clr.rnm <- !is.null(rowcolor$rownum)
sum.is <- is.clr + is.clr.rnm
if(sum.is > 0 && (sum.is < 2))
stop("Specification of row color requires components 'color' and 'rownum' to be set")
if(is.clr) clr <- rowcolor$color
if(is.clr.rnm) clr.rnm <- eval(rowcolor$rownum, sys.parent())
}
is.tot <- !is.null(totals)
if (is.tot)
if (!is.logical(totals))
stop("Attribute 'totals' must be logical")
n.h <- length(headings)
depth <- rep(1, n.h)
lngths <- NULL
for (k in 1:n.h) {
ptr1 <- ptr0 <- headings[[k]]
if (!is.null(names(ptr1))) {
ptr0 <- ptr1
depth[k] <- depth[k] + 1
ptr1 <- ptr0[[1]]
}
lnptr <- length(ptr0)
lngths <- c(lngths, lnptr)
}
mxdpth <- max(depth)
atmxdpth <- which(depth == mxdpth)
for (k in 1:n.h) {
j <- mxdpth - depth[k]
out <- headings[[k]]
while (j > 0) {
out <- list(` ` = out)
names(out) <- names(headings)[k]
j <- j - 1
}
headings[[k]] <- out
}
hdr <- list()
hdr[[1]] <- names(headings[atmxdpth])
n.hdr.1 <- length(hdr[[1]])
if (mxdpth > 1)
{
nms.ul.hdngs <- names(unlist(headings))
nchr <- nchar(nms.ul.hdngs)
nchr.hlf <- (nchr-1)/2
frst <- substring(nms.ul.hdngs, 1, nchr.hlf)
scnd <- substring(nms.ul.hdngs, nchr.hlf+2, nchr)
idx.rpts <- which(frst==scnd)
nms.ul.hdngs[idx.rpts] <- frst[idx.rpts]
for(k in 1:n.hdr.1)
{
grp.hdr1.k <- grep(hdr[[1]][k], nms.ul.hdngs)
nms.ul.hdngs[grp.hdr1.k] <- substring(nms.ul.hdngs[grp.hdr1.k], nchar(hdr[[1]][k])+2, nchar(nms.ul.hdngs[grp.hdr1.k]))
}
hdr[[mxdpth]] <- nms.ul.hdngs
}
h1 <- h1a <- NULL
dpth2 <- any(depth > 1)
if (dpth2)
simple <- FALSE
if (dpth2) {
h1 <- h1a <- NULL
h1[atmxdpth] <- "\\multicolumn{" %,% lngths[atmxdpth[1]] %,%
"}{c}{" %,% hdr[[1]] %,% "}"
h1[setdiff(1:n.h, atmxdpth)] <- ""
h1 <- paste(h1, collapse = "&") %,% "\\\\\n"
nc1 <- length(hdr[[1]])
tt <- cumsum(lngths)
i0 <- tt[atmxdpth - 1] + 1
i1 <- tt[atmxdpth]
ni <- length(i0)
prfx <- "\\cmidrule(r){" %,% i0[1] %,% "-" %,% i1[1] %,% "}"
bdy <- NULL
sfx <- "\n"
if(ni>1)
{
k.k <- apply(cbind(i0, i1)[2:(ni - 1), , drop = FALSE],
1, FUN = function(x) x[1] %,% "-" %,% x[2])
bdy <- paste("\\cmidrule(lr){" %,% k.k, collapse = "}")
sfx <- "}\\cmidrule(l){" %,% i0[ni] %,% "-" %,% i1[ni] %,% "}\n"
}
h1a <- prfx %,% bdy %,% sfx
}
h2 <- paste(hdr[[mxdpth]], collapse = "&") %,% "\\\\\n"
nc2 <- length(hdr[[mxdpth]])
prfx <- "\\cmidrule(r){" %,% 1 %,% "-" %,% 1 %,% "}"
k.k <- sapply(2:(nc2 - 1), FUN = function(x) x %,% "-" %,%
x)
bdy <- paste("\\cmidrule(lr){" %,% k.k, collapse = "}")
sfx <- "}\\cmidrule(l){" %,% nc2 %,% "-" %,% nc2 %,% "}\n"
h2a <- ftr <- prfx %,% bdy %,% sfx
xtbl.call <-
as.call(expression(xtable, as.data.frame(x),
digits = c(0, digits), align = "ll" %,% paste(rep("r",
nc - 1), collapse = "")))
if (!is.null(displ))
xtbl.call$display <- c("s", displ)
pr.xtbl.call <-
as.call(expression(print, xtbl, hline.after = NULL,
include.rownames = FALSE, include.colnames = FALSE, type = "latex"))
is.lbl <- is.algn <- FALSE
if (n.ddd > 0) {
lbl.idx <- grep("label", nmsddd)
is.lbl <- (length(lbl.idx) > 0)
if (is.lbl) {
lbl.val <- ddd[[lbl.idx]]
ddd <- ddd[-lbl.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
algn.idx <- grep("align", nmsddd)
is.algn <- (length(algn.idx)>0)
if (is.algn) {
algn.val <- eval(ddd[[algn.idx]], sys.parent())
ddd <- ddd[-algn.idx]
n.ddd <- n.ddd - 1
nmsddd <- names(ddd)
}
is.ddd <- (n.ddd > 0)
if (is.ddd)
for (k in 1:n.ddd) pr.xtbl.call[[nmsddd[k]]] <- ddd[[nmsddd[k]]]
}
if (!spcl) {
xtbl.call[["caption"]] <- as.name("caption")
if(is.lbl) xtbl.call[["label"]] <- lbl.val
if(is.algn) xtbl.call$align <- c("l", algn.val)
atr <- c("\\toprule\n", h1, h1a, h2, h2a)
# \rowcolor{lightgray}
# or \rowcolors{1}{}{lightgray}
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n")
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:2] <- -1
add.to.row[["pos"]][3:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm-1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
}
if (spcl) {
if (spcl.val == "jrss-b") {
btbl <- "\\begin{table}\n"
cpn <- "\\caption{" %,% caption %,% "}\n"
if (is.lbl)
cpn <- "\\caption{\\label{" %,% lbl.val %,% "}" %,%
caption %,% "}\n"
ctr <- NULL
fb <- "\\fbox{%\n"
btblr <- "l" %,% paste(rep("r", nc2 - 1), collapse = "")
if(is.algn) btblr <- paste(algn.val, collapse="")
btblr <- "\\begin{tabular}{" %,% btblr %,% "}\n"
etblr <- "\\end{tabular}}\n"
etbl <- "\\end{table}\n"
tp <- btbl %,% cpn %,% ctr %,% fb %,% btblr %,% "\\toprule\n"
atr <- c(tp, h1, h1a, h2, h2a)
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n", etblr, etbl)
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm - 1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 2] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 3] <- nr
pr.xtbl.call$only.contents <- TRUE
}
if (spcl.val == "aos") {
btbl <- "\\begin{table}\n"
cpn <- "\\caption{" %,% caption %,% "}\n"
if (is.lbl)
cpn <- "\\caption{\\label{" %,% lbl.val %,% "}" %,%
caption %,% "}\n"
ctr <- NULL
btblr <- "l" %,% paste(rep("r", nc2 - 1), collapse = "")
if(is.algn) btblr <- paste(algn.val, collapse="")
btblr <- "\\begin{tabular}{" %,% btblr %,% "}\n"
etblr <- "\\end{tabular}\n"
etbl <- "\\end{table}\n"
tp <- btbl %,% cpn %,% ctr %,% btblr %,% "\\toprule\n"
atr <- c(tp, h1, h1a, h2, h2a)
if(is.rc)
atr <- c(atr, "\\rowcolor{" %,% clr %,% "}")
if (is.tot)
atr <- c(atr, ftr)
atr <- c(atr, "\\bottomrule\n", etblr, etbl)
add.to.row <- list()
add.to.row[["command"]] <- atr
add.to.row[["pos"]] <- list()
add.to.row[["pos"]][1:(3 + dpth2*2)] <- 0
if (is.rc)
add.to.row[["pos"]][3 + dpth2*2 + 1] <- clr.rnm - 1
if (is.tot)
add.to.row[["pos"]][3 + dpth2*2 + is.rc + 1] <- nr - 1
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 1] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 2] <- nr
add.to.row[["pos"]][3 + dpth2*2 + is.rc + is.tot + 3] <- nr
pr.xtbl.call$only.contents <- TRUE
}
}
if (dbg)
save(list = "add.to.row", file = "debug.rda")
pr.xtbl.call$add.to.row <- as.name("add.to.row")
if(is.rc)
{
cat(sprintf("%s\n", "%% Don't forget to \\usepackage{xcolor} and include 'table' in your documentclass options, "))
cat(sprintf("%s\n", "%% e.g. \\documentclass[table]{beamer}, and remember to define the color, " %,% clr %,% ", in your preamble"))
}
xtbl <- eval(xtbl.call)
eval(pr.xtbl.call)
}
"as.data.frame.TableMonster" <-
function(x, row.names = NULL, optional = FALSE, ...)
{
attr(x, "headings") <- NULL
attr(x, "ctypes") <- NULL
attr(x, "digits") <- NULL
attr(x, "caption") <- NULL
attr(x, "totals") <- NULL
class(x) <- "data.frame"
x
}
"tmHeadings" <-
function(x)
{
attr(x, "headings")
}
"tmCtypes" <-
function(x)
{
attr(x, "ctypes")
}
"tmDigits" <-
function(x)
{
attr(x, "digits")
}
"tmTotals" <-
function(x)
{
attr(x, "totals")
}
"tmCaption" <-
function(x)
{
attr(x, "caption")
}
"tmHeadings<-" <-
function(x, value)
{
attr(x, "headings") <- value
x
}
"tmCtypes<-" <-
function(x, value)
{
attr(x, "ctypes") <- value
x
}
"tmDigits<-" <-
function(x, value)
{
attr(x, "digits") <- value
x
}
"tmTotals<-" <-
function(x, value)
{
attr(x, "totals") <- value
x
}
"tmCaption<-" <-
function(x, value)
{
attr(x, "caption") <- value
x
}
.onAttach <- function(libname, pkgname)
{
options(stringsAsFactors=FALSE)
ver <- read.dcf(file=system.file("DESCRIPTION", package=pkgname),
fields="Version")
msg <- paste(pkgname, ver) %,% "\n\n" %,%
"Type ?print.TableMonster"
packageStartupMessage(msg)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotWeek.R
\name{plotWeek}
\alias{plotWeek}
\title{plotWeek plots your net score each week in the past 4 weeks}
\usage{
plotWeek(sq_summary)
}
\arguments{
\item{summary}{parameter for the summarised dataset created using the calcSummary() function}
}
\description{
This function looks at variation in your net score for each metric over the last 4 weeks
}
\examples{
plotWeek(summary = sq_data)
}
\keyword{quantified}
\keyword{self}
|
/man/plotWeek.Rd
|
no_license
|
maczokni/selfquant
|
R
| false | true | 512 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotWeek.R
\name{plotWeek}
\alias{plotWeek}
\title{plotWeek plots your net score each week in the past 4 weeks}
\usage{
plotWeek(sq_summary)
}
\arguments{
\item{summary}{parameter for the summarised dataset created using the calcSummary() function}
}
\description{
This function looks at variation in your net score for each metric over the last 4 weeks
}
\examples{
plotWeek(summary = sq_data)
}
\keyword{quantified}
\keyword{self}
|
#' d.to.r
#'
#' Calculates r from d and then translates r to r2 to calculate
#' the non-central confidence interval for r2 using the F distribution.
#'
#' @param d effect size statistic
#' @param n1 sample size group one
#' @param n2 sample size group two
#' @param a significance level
#' @keywords effect size, correlation
#' @export
#' @examples
#' d.to.r(d = .5, n1 = 50, n2 = 50, a = .05)
d.to.r <- function (d, n1, n2, a = .05) {
# This function Displays transformation from r to r2 to calculate
# the non-central confidence interval for r2.
#
# Args:
# d : effect size statistic
# n1 : sample size group one
# n2 : sample size group two
# a : significance level
#
# Returns:
# List of r, r2, and sample size statistics
library(MBESS)
correct = (n1 + n2)^2 / (n1*n2)
n = n1 + n2
r <- d / sqrt(d^2 + correct)
rsq <- (r) ^ 2
se <- sqrt(4 * rsq * ((1 - rsq) ^ 2) * ((n - 3) ^ 2) / ((n ^ 2 - 1) * (3 + n)))
t <- r / sqrt((1 - rsq) / (n - 2))
Fvalue <- t ^ 2
dfm <- 1
dfe <- n - 2
#ncpboth <- conf.limits.ncf(Fvalue, df.1 = dfm, df.2 = dfe, conf.level = (1 - a))
#rsqlow <- ncpboth$Lower.Limit / (ncpboth$Lower.Limit + dfm + dfe + 1)
#rsqhigh <- ncpboth$Upper.Limit / (ncpboth$Upper.Limit + dfm + dfe + 1)
limits <- ci.R2(R2 = rsq, df.1 = dfm, df.2 = dfe, conf.level = (1-a))
ciforr <- ci.R(R = abs(r), df.1 = dfm, df.2 = dfe, conf.level = (1 - a))
p <- pf(Fvalue, dfm, dfe, lower.tail = F)
#deal with negative r / d values
if (r < 0) {
rlow = 0 - ciforr$Lower.Conf.Limit.R
rhigh = 0 - ciforr$Upper.Conf.Limit.R
} else {
rlow = ciforr$Lower.Conf.Limit.R
rhigh = ciforr$Upper.Conf.Limit.R
}
output = list("r" = r, #r stats
"rlow" = rlow,
"rhigh" = rhigh,
"R2" = rsq, #R squared stats
"R2low" = limits$Lower.Conf.Limit.R2,
"R2high" = limits$Upper.Conf.Limit.R2,
"se" = se,
"n" = n, #sample stats
"dfm" = 1, #sig stats
"dfe" = (n - 2),
"t" = t,
"F" = Fvalue,
"p" = p)
return(output)
}
|
/R/d.to.r.R
|
no_license
|
adamcohen3/MOTE
|
R
| false | false | 2,205 |
r
|
#' d.to.r
#'
#' Calculates r from d and then translates r to r2 to calculate
#' the non-central confidence interval for r2 using the F distribution.
#'
#' @param d effect size statistic
#' @param n1 sample size group one
#' @param n2 sample size group two
#' @param a significance level
#' @keywords effect size, correlation
#' @export
#' @examples
#' d.to.r(d = .5, n1 = 50, n2 = 50, a = .05)
d.to.r <- function (d, n1, n2, a = .05) {
# This function Displays transformation from r to r2 to calculate
# the non-central confidence interval for r2.
#
# Args:
# d : effect size statistic
# n1 : sample size group one
# n2 : sample size group two
# a : significance level
#
# Returns:
# List of r, r2, and sample size statistics
library(MBESS)
correct = (n1 + n2)^2 / (n1*n2)
n = n1 + n2
r <- d / sqrt(d^2 + correct)
rsq <- (r) ^ 2
se <- sqrt(4 * rsq * ((1 - rsq) ^ 2) * ((n - 3) ^ 2) / ((n ^ 2 - 1) * (3 + n)))
t <- r / sqrt((1 - rsq) / (n - 2))
Fvalue <- t ^ 2
dfm <- 1
dfe <- n - 2
#ncpboth <- conf.limits.ncf(Fvalue, df.1 = dfm, df.2 = dfe, conf.level = (1 - a))
#rsqlow <- ncpboth$Lower.Limit / (ncpboth$Lower.Limit + dfm + dfe + 1)
#rsqhigh <- ncpboth$Upper.Limit / (ncpboth$Upper.Limit + dfm + dfe + 1)
limits <- ci.R2(R2 = rsq, df.1 = dfm, df.2 = dfe, conf.level = (1-a))
ciforr <- ci.R(R = abs(r), df.1 = dfm, df.2 = dfe, conf.level = (1 - a))
p <- pf(Fvalue, dfm, dfe, lower.tail = F)
#deal with negative r / d values
if (r < 0) {
rlow = 0 - ciforr$Lower.Conf.Limit.R
rhigh = 0 - ciforr$Upper.Conf.Limit.R
} else {
rlow = ciforr$Lower.Conf.Limit.R
rhigh = ciforr$Upper.Conf.Limit.R
}
output = list("r" = r, #r stats
"rlow" = rlow,
"rhigh" = rhigh,
"R2" = rsq, #R squared stats
"R2low" = limits$Lower.Conf.Limit.R2,
"R2high" = limits$Upper.Conf.Limit.R2,
"se" = se,
"n" = n, #sample stats
"dfm" = 1, #sig stats
"dfe" = (n - 2),
"t" = t,
"F" = Fvalue,
"p" = p)
return(output)
}
|
#' R interface to the National Hydrography Dataset
#' @name nhdR-package
#' @aliases nhdR
#' @docType package
#' @importFrom httr GET write_disk progress
#' @importFrom ggplot2 map_data
#' @importFrom sf st_drivers
#' @title R interface to the National Hydrography Dataset
#' @author \email{stachel2@msu.edu}
NULL
#' gull
#'
#' @title List of simple features lake polygons and flowlines within a buffer
#' around Gull Lake Michigan.
#' @description Data from NHD Plus
#' @docType data
#' @keywords datasets
#' @name gull
NULL
#' vpu_shp
#'
#' @title Low-res simple features data frame of the NHDPlus vector processing
#' units
#'
#' @docType data
#' @keywords datasets
#' @name vpu_shp
NULL
#' gull_flow
#'
#' @title Flowlines within a buffer around Gull Lake Michigan including flow information.
#' @description Data from NHD Plus
#' @docType data
#' @keywords datasets
#' @name gull_flow
NULL
|
/R/nhdR-package.R
|
no_license
|
bbreaker/nhdR
|
R
| false | false | 899 |
r
|
#' R interface to the National Hydrography Dataset
#' @name nhdR-package
#' @aliases nhdR
#' @docType package
#' @importFrom httr GET write_disk progress
#' @importFrom ggplot2 map_data
#' @importFrom sf st_drivers
#' @title R interface to the National Hydrography Dataset
#' @author \email{stachel2@msu.edu}
NULL
#' gull
#'
#' @title List of simple features lake polygons and flowlines within a buffer
#' around Gull Lake Michigan.
#' @description Data from NHD Plus
#' @docType data
#' @keywords datasets
#' @name gull
NULL
#' vpu_shp
#'
#' @title Low-res simple features data frame of the NHDPlus vector processing
#' units
#'
#' @docType data
#' @keywords datasets
#' @name vpu_shp
NULL
#' gull_flow
#'
#' @title Flowlines within a buffer around Gull Lake Michigan including flow information.
#' @description Data from NHD Plus
#' @docType data
#' @keywords datasets
#' @name gull_flow
NULL
|
library(readr)
library(psych)
#read in the dataset into the object called "complete"
complete <- read_excel("train_semi_clean.xlsx")
#first, i need to factorize some predictors since they are not on a continuous scale. I basically started by reading in
#each predictor name into a vector called "names"
names <- c("Biodata_01","Biodata_02","Biodata_03","Biodata_04","Biodata_05","Biodata_06","Biodata_07","Biodata_08","Biodata_09",
"Biodata_10","Biodata_11","Biodata_12","Biodata_13","Biodata_14","Biodata_15","Biodata_16","Biodata_17","Biodata_18",
"Biodata_19","Biodata_20",
"Scenario1_1","Scenario1_2","Scenario1_3","Scenario1_4","Scenario1_5","Scenario1_6","Scenario1_7","Scenario1_8",
"Scenario2_1","Scenario2_2","Scenario2_3","Scenario2_4","Scenario2_5","Scenario2_6","Scenario2_7","Scenario2_8",
"SJ_Most_1","SJ_Least_1",
"SJ_Most_2","SJ_Least_2",
"SJ_Most_3","SJ_Least_3",
"SJ_Most_4","SJ_Least_4",
"SJ_Most_5","SJ_Least_5",
"SJ_Most_6","SJ_Least_6",
"SJ_Most_7","SJ_Least_7",
"SJ_Most_8","SJ_Least_8",
"SJ_Most_9","SJ_Least_9"
)
#next, i use the lapply function to factorize multiple predictor variables. For the first argument,
# I need to take the "names" vector and denote it grab from the "complete" object. For the second argument, i denote what
# I want the lapply function to do; in this case, to factor the variables that match the variable names from the "names" vector
complete[,names] <- lapply(complete[,names] , factor)
#lets start modeling
#first we need to split data between train and test. The "complete" object is entirely the training dataset provided to us,
#however, i decided to further split the training dataset into train and test because a significant portion of the cases within
#the training dataset were missing criterion variables. Therefore, i decided to divide the "complete" dataset between cases that are
#complete and cases that are missing criterion variables. I used the crtierion variable "High_Performer" as a way to easily split
#the data, but other criterion variables will also work.
test <- subset(complete, High_Performer=="1" | High_Performer=="0")
#cases with no missing data went into the "train" object.
train <- complete[is.na(complete$High_Performer),]
#lets look at the summaries of each object
summary(test)
summary(train)
#next, lets specify the predictor variables. Note that this step is not necessary to run the model and there are more "efficient"
# ways of indicating the predictor variables, however, we specify each predictor variable name here in adherence to good data hygiene
xVars <- c("Biodata_01","Biodata_02","Biodata_03","Biodata_04","Biodata_05","Biodata_06","Biodata_07","Biodata_08","Biodata_09",
"Biodata_10","Biodata_11","Biodata_12","Biodata_13","Biodata_14","Biodata_15","Biodata_16","Biodata_17","Biodata_18",
"Biodata_19","Biodata_20",
"PScale01_Q1","PScale01_Q2","PScale01_Q3","PScale01_Q4",
"PScale02_Q1","PScale02_Q2","PScale02_Q3","PScale02_Q4",
"PScale03_Q1","PScale03_Q2","PScale03_Q3","PScale03_Q4",
"PScale04_Q1","PScale04_Q2","PScale04_Q3","PScale04_Q4",
"PScale05_Q1","PScale05_Q2","PScale05_Q3","PScale05_Q4",
"PScale06_Q1","PScale06_Q2","PScale06_Q3","PScale06_Q4","PScale06_Q5","PScale06_Q6",
"PScale07_Q1","PScale07_Q2","PScale07_Q3","PScale07_Q4",
"PScale08_Q1","PScale08_Q2","PScale08_Q3","PScale08_Q4",
"PScale09_Q1","PScale09_Q2","PScale09_Q3","PScale09_Q4",
"PScale10_Q1","PScale10_Q2","PScale10_Q3","PScale10_Q4",
"PScale11_Q1","PScale11_Q2","PScale11_Q3","PScale11_Q4",
"PScale12_Q1","PScale12_Q2","PScale12_Q3","PScale12_Q4",
"PScale13_Q1","PScale13_Q2","PScale13_Q3","PScale13_Q4",
"Scenario1_1","Scenario1_2","Scenario1_3","Scenario1_4","Scenario1_5","Scenario1_6","Scenario1_7","Scenario1_8",
"Scenario2_1","Scenario2_2","Scenario2_3","Scenario2_4","Scenario2_5","Scenario2_6","Scenario2_7","Scenario2_8",
"Scenario1_Time",
"Scenario2_Time",
"SJ_Most_1","SJ_Least_1","SJ_Time_1",
"SJ_Most_2","SJ_Least_2","SJ_Time_2",
"SJ_Most_3","SJ_Least_3","SJ_Time_3",
"SJ_Most_4","SJ_Least_4","SJ_Time_4",
"SJ_Most_5","SJ_Least_5","SJ_Time_5",
"SJ_Most_6","SJ_Least_6","SJ_Time_6",
"SJ_Most_7","SJ_Least_7","SJ_Time_7",
"SJ_Most_8","SJ_Least_8","SJ_Time_8",
"SJ_Most_9","SJ_Least_9","SJ_Time_9"
)
#specify the target variable. right now it is set to the "Retained" variable, but you can also switch to the "High_Performer" variable
targetVar<-c("Retained")
#now we subset using the vector called "XVars".
x <- train[,xVars]
#lets turn this into a data frame
train<-as.data.frame(train)
#lets subset again using the vector "targetVar" that contains the variable name "Retained". We also tell R to factorize this variable.
y <- as.factor(train[, targetVar])
#getting data ready for caret... Do not mess with lines 87 to 111.
cleanNames <- function(x){
feature.names=names(x)
for (f in feature.names) {
if (class(x[[f]])=="factor") {
levels <- unique(c(x[[f]]))
x[[f]] <- factor(x[[f]],
labels=make.names(levels))
}
}
return(x)}
xOld <-x
x <- cleanNames(x)
str(xOld)
str(x)
y = make.names(y)
test <- cleanNames(test)
str(test)
str(Prediction)
levels(test$Retained) <- c("X0","X1")
str(test)
Actual <-test$Retained
Actual<-as.factor(Actual)
levels(Actual) <- c("X0","X1")
#load in the caret library. we wont use the caret package just yet.
library(caret)
#load in the random forest package
library(randomForest)
yRf <-as.factor(y)
#lets fit the model. specify how many trees you want. you can play with that
fit <- randomForest(x = x, y = yRf
, data=train,
importance=TRUE,
# fit 2000 decision trees!
ntree=1)
#set your fit
fit
#plot the variable importance as measured by the RF(Random Forest)
varImpPlot(fit2)
#let's test our model
Prediction <- predict(fit, test)
confusionMatrix(reference = Actual, data = Prediction2)
#alright, now we can use the caret package to tune the model. first we need a training control for cross validation
trctrl <- trainControl(method = "repeatedcv"
, number = 10, repeats = 3
, classProbs = TRUE
, summaryFunction = twoClassSummary
)
set.seed(3875)
# we can adjust the parameters: tunelength and tunegrid. you can reference the site below for information regarding those parameters
# https://bookdown.org/mpfoley1973/data-sci/classification-tree.html
?train
fit2<- train(x = x
, y = y
, method = "rf",
tuneLength=20,
tuneGrid = expand.grid(cp = seq(from = 0.0001, to = 0.01, length = 11)),
metric="ROC",
trControl = trctrl)
fit2
plot(fit2)
Prediction2 <- predict(fit2, test, type = "raw")
confusionMatrix(reference = Actual, data = Prediction2)
|
/RFModelingCode (1).R
|
no_license
|
dennistran9/Practice
|
R
| false | false | 7,386 |
r
|
library(readr)
library(psych)
#read in the dataset into the object called "complete"
complete <- read_excel("train_semi_clean.xlsx")
#first, i need to factorize some predictors since they are not on a continuous scale. I basically started by reading in
#each predictor name into a vector called "names"
names <- c("Biodata_01","Biodata_02","Biodata_03","Biodata_04","Biodata_05","Biodata_06","Biodata_07","Biodata_08","Biodata_09",
"Biodata_10","Biodata_11","Biodata_12","Biodata_13","Biodata_14","Biodata_15","Biodata_16","Biodata_17","Biodata_18",
"Biodata_19","Biodata_20",
"Scenario1_1","Scenario1_2","Scenario1_3","Scenario1_4","Scenario1_5","Scenario1_6","Scenario1_7","Scenario1_8",
"Scenario2_1","Scenario2_2","Scenario2_3","Scenario2_4","Scenario2_5","Scenario2_6","Scenario2_7","Scenario2_8",
"SJ_Most_1","SJ_Least_1",
"SJ_Most_2","SJ_Least_2",
"SJ_Most_3","SJ_Least_3",
"SJ_Most_4","SJ_Least_4",
"SJ_Most_5","SJ_Least_5",
"SJ_Most_6","SJ_Least_6",
"SJ_Most_7","SJ_Least_7",
"SJ_Most_8","SJ_Least_8",
"SJ_Most_9","SJ_Least_9"
)
#next, i use the lapply function to factorize multiple predictor variables. For the first argument,
# I need to take the "names" vector and denote it grab from the "complete" object. For the second argument, i denote what
# I want the lapply function to do; in this case, to factor the variables that match the variable names from the "names" vector
complete[,names] <- lapply(complete[,names] , factor)
#lets start modeling
#first we need to split data between train and test. The "complete" object is entirely the training dataset provided to us,
#however, i decided to further split the training dataset into train and test because a significant portion of the cases within
#the training dataset were missing criterion variables. Therefore, i decided to divide the "complete" dataset between cases that are
#complete and cases that are missing criterion variables. I used the crtierion variable "High_Performer" as a way to easily split
#the data, but other criterion variables will also work.
test <- subset(complete, High_Performer=="1" | High_Performer=="0")
#cases with no missing data went into the "train" object.
train <- complete[is.na(complete$High_Performer),]
#lets look at the summaries of each object
summary(test)
summary(train)
#next, lets specify the predictor variables. Note that this step is not necessary to run the model and there are more "efficient"
# ways of indicating the predictor variables, however, we specify each predictor variable name here in adherence to good data hygiene
xVars <- c("Biodata_01","Biodata_02","Biodata_03","Biodata_04","Biodata_05","Biodata_06","Biodata_07","Biodata_08","Biodata_09",
"Biodata_10","Biodata_11","Biodata_12","Biodata_13","Biodata_14","Biodata_15","Biodata_16","Biodata_17","Biodata_18",
"Biodata_19","Biodata_20",
"PScale01_Q1","PScale01_Q2","PScale01_Q3","PScale01_Q4",
"PScale02_Q1","PScale02_Q2","PScale02_Q3","PScale02_Q4",
"PScale03_Q1","PScale03_Q2","PScale03_Q3","PScale03_Q4",
"PScale04_Q1","PScale04_Q2","PScale04_Q3","PScale04_Q4",
"PScale05_Q1","PScale05_Q2","PScale05_Q3","PScale05_Q4",
"PScale06_Q1","PScale06_Q2","PScale06_Q3","PScale06_Q4","PScale06_Q5","PScale06_Q6",
"PScale07_Q1","PScale07_Q2","PScale07_Q3","PScale07_Q4",
"PScale08_Q1","PScale08_Q2","PScale08_Q3","PScale08_Q4",
"PScale09_Q1","PScale09_Q2","PScale09_Q3","PScale09_Q4",
"PScale10_Q1","PScale10_Q2","PScale10_Q3","PScale10_Q4",
"PScale11_Q1","PScale11_Q2","PScale11_Q3","PScale11_Q4",
"PScale12_Q1","PScale12_Q2","PScale12_Q3","PScale12_Q4",
"PScale13_Q1","PScale13_Q2","PScale13_Q3","PScale13_Q4",
"Scenario1_1","Scenario1_2","Scenario1_3","Scenario1_4","Scenario1_5","Scenario1_6","Scenario1_7","Scenario1_8",
"Scenario2_1","Scenario2_2","Scenario2_3","Scenario2_4","Scenario2_5","Scenario2_6","Scenario2_7","Scenario2_8",
"Scenario1_Time",
"Scenario2_Time",
"SJ_Most_1","SJ_Least_1","SJ_Time_1",
"SJ_Most_2","SJ_Least_2","SJ_Time_2",
"SJ_Most_3","SJ_Least_3","SJ_Time_3",
"SJ_Most_4","SJ_Least_4","SJ_Time_4",
"SJ_Most_5","SJ_Least_5","SJ_Time_5",
"SJ_Most_6","SJ_Least_6","SJ_Time_6",
"SJ_Most_7","SJ_Least_7","SJ_Time_7",
"SJ_Most_8","SJ_Least_8","SJ_Time_8",
"SJ_Most_9","SJ_Least_9","SJ_Time_9"
)
#specify the target variable. right now it is set to the "Retained" variable, but you can also switch to the "High_Performer" variable
targetVar<-c("Retained")
#now we subset using the vector called "XVars".
x <- train[,xVars]
#lets turn this into a data frame
train<-as.data.frame(train)
#lets subset again using the vector "targetVar" that contains the variable name "Retained". We also tell R to factorize this variable.
y <- as.factor(train[, targetVar])
#getting data ready for caret... Do not mess with lines 87 to 111.
cleanNames <- function(x){
feature.names=names(x)
for (f in feature.names) {
if (class(x[[f]])=="factor") {
levels <- unique(c(x[[f]]))
x[[f]] <- factor(x[[f]],
labels=make.names(levels))
}
}
return(x)}
xOld <-x
x <- cleanNames(x)
str(xOld)
str(x)
y = make.names(y)
test <- cleanNames(test)
str(test)
str(Prediction)
levels(test$Retained) <- c("X0","X1")
str(test)
Actual <-test$Retained
Actual<-as.factor(Actual)
levels(Actual) <- c("X0","X1")
#load in the caret library. we wont use the caret package just yet.
library(caret)
#load in the random forest package
library(randomForest)
yRf <-as.factor(y)
#lets fit the model. specify how many trees you want. you can play with that
fit <- randomForest(x = x, y = yRf
, data=train,
importance=TRUE,
# fit 2000 decision trees!
ntree=1)
#set your fit
fit
#plot the variable importance as measured by the RF(Random Forest)
varImpPlot(fit2)
#let's test our model
Prediction <- predict(fit, test)
confusionMatrix(reference = Actual, data = Prediction2)
#alright, now we can use the caret package to tune the model. first we need a training control for cross validation
trctrl <- trainControl(method = "repeatedcv"
, number = 10, repeats = 3
, classProbs = TRUE
, summaryFunction = twoClassSummary
)
set.seed(3875)
# we can adjust the parameters: tunelength and tunegrid. you can reference the site below for information regarding those parameters
# https://bookdown.org/mpfoley1973/data-sci/classification-tree.html
?train
fit2<- train(x = x
, y = y
, method = "rf",
tuneLength=20,
tuneGrid = expand.grid(cp = seq(from = 0.0001, to = 0.01, length = 11)),
metric="ROC",
trControl = trctrl)
fit2
plot(fit2)
Prediction2 <- predict(fit2, test, type = "raw")
confusionMatrix(reference = Actual, data = Prediction2)
|
# let's plot the ccf function for intact / surrogate
plotfl = paste('figures/figure-4.pdf',sep='')
pdf(file=plotfl,height=3.5,width=10)
par(mfrow=c(1,3),mar=c(4,4,2,2))
intact = aggregate(ccf~lag,
data=ccfres[ccfres$cond=='obs',],
function(x) { c(m=mean(x),se=sd(x)/sqrt(35))})
plot(intact$lag*.033,intact$ccf[,1],type='l', # 33ms sample rate
xlab='Relative lag (s)',main='Dyad cross-correlation function',
ylab='Correlation coefficient (r)',
ylim=c(-.03,.08),lwd=3,col='green')
points(intact$lag*.033,intact$ccf[,1]+intact$ccf[,2],type='l',col='green')
points(intact$lag*.033,intact$ccf[,1]-intact$ccf[,2],type='l',col='green')
surrogate = aggregate(ccf~lag,
data=ccfres[ccfres$cond=='vrt',],
function(x) { c(m=mean(x),se=sd(x)/sqrt(35))})
points(surrogate$lag*.033,surrogate$ccf[,1],type='l',col='red',lwd=3)
points(surrogate$lag*.033,surrogate$ccf[,1]+surrogate$ccf[,2],type='l',col='red')
points(surrogate$lag*.033,surrogate$ccf[,1]-surrogate$ccf[,2],type='l',col='red')
lagLocs = wccres[wccres$cond=='obs',]
hist(lagLocs$max.loc*.033,5,main='Maximum lag location distribution',
xlab='Maximum lag location (s)',xlim=c(-6,6),
ylab='Number of dyads')
plot(intactTriad$lag,intactTriad$r[,1],type='l',
xlab='Lag (10s window)',main='Triad cross-correlation function',
ylab='Correlation coefficient (r)',
ylim=c(-.04,.2),lwd=3,col='green')
points(intactTriad$lag,intactTriad$r[,1]+intactTriad$r[,2],type='l',col='green')
points(intactTriad$lag,intactTriad$r[,1]-intactTriad$r[,2],type='l',col='green')
points(surrogateTriad$lag,surrogateTriad$r[,1],type='l',col='red',lwd=3)
points(surrogateTriad$lag,surrogateTriad$r[,1]+surrogateTriad$r[,2],type='l',col='red')
points(surrogateTriad$lag,surrogateTriad$r[,1]-surrogateTriad$r[,2],type='l',col='red')
dev.off()
|
/plotCCF.R
|
no_license
|
racdale/triadic-bodily-synchrony
|
R
| false | false | 1,891 |
r
|
# let's plot the ccf function for intact / surrogate
plotfl = paste('figures/figure-4.pdf',sep='')
pdf(file=plotfl,height=3.5,width=10)
par(mfrow=c(1,3),mar=c(4,4,2,2))
intact = aggregate(ccf~lag,
data=ccfres[ccfres$cond=='obs',],
function(x) { c(m=mean(x),se=sd(x)/sqrt(35))})
plot(intact$lag*.033,intact$ccf[,1],type='l', # 33ms sample rate
xlab='Relative lag (s)',main='Dyad cross-correlation function',
ylab='Correlation coefficient (r)',
ylim=c(-.03,.08),lwd=3,col='green')
points(intact$lag*.033,intact$ccf[,1]+intact$ccf[,2],type='l',col='green')
points(intact$lag*.033,intact$ccf[,1]-intact$ccf[,2],type='l',col='green')
surrogate = aggregate(ccf~lag,
data=ccfres[ccfres$cond=='vrt',],
function(x) { c(m=mean(x),se=sd(x)/sqrt(35))})
points(surrogate$lag*.033,surrogate$ccf[,1],type='l',col='red',lwd=3)
points(surrogate$lag*.033,surrogate$ccf[,1]+surrogate$ccf[,2],type='l',col='red')
points(surrogate$lag*.033,surrogate$ccf[,1]-surrogate$ccf[,2],type='l',col='red')
lagLocs = wccres[wccres$cond=='obs',]
hist(lagLocs$max.loc*.033,5,main='Maximum lag location distribution',
xlab='Maximum lag location (s)',xlim=c(-6,6),
ylab='Number of dyads')
plot(intactTriad$lag,intactTriad$r[,1],type='l',
xlab='Lag (10s window)',main='Triad cross-correlation function',
ylab='Correlation coefficient (r)',
ylim=c(-.04,.2),lwd=3,col='green')
points(intactTriad$lag,intactTriad$r[,1]+intactTriad$r[,2],type='l',col='green')
points(intactTriad$lag,intactTriad$r[,1]-intactTriad$r[,2],type='l',col='green')
points(surrogateTriad$lag,surrogateTriad$r[,1],type='l',col='red',lwd=3)
points(surrogateTriad$lag,surrogateTriad$r[,1]+surrogateTriad$r[,2],type='l',col='red')
points(surrogateTriad$lag,surrogateTriad$r[,1]-surrogateTriad$r[,2],type='l',col='red')
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc2plot.R
\name{mcmc2densitree}
\alias{mcmc2densitree}
\title{Plot a densi-tree from an MCMC sample}
\usage{
mcmc2densitree(
tree,
mcmc,
time.name,
thin,
col = "blue",
alpha = 1,
y.offset = 0,
pfract = 0.1,
plot.labels = TRUE,
axis = TRUE,
add = FALSE,
tip.ages = NULL
)
}
\arguments{
\item{tree}{an object of class phylo.}
\item{mcmc}{data frame with an MCMC sample from MCMCTree or a BPP A00
analysis.}
\item{time.name}{character vector of length one.}
\item{thin}{numeric, the fraction of MCMC samples to keep.}
\item{col}{character, the color for branches.}
\item{alpha}{numeric, between 0 and 1, the branch color transparency.}
\item{y.offset}{numeric, the vertical offset for plotting the tree.}
\item{pfract}{numeric, how much of the plotting space to used for plotting
the tip labels. If \code{pfrac = 1}, the same amount of space is used for
the tree and the labels. Use large values if your tip labels are long.}
\item{plot.labels}{logical, whether to plot the tip labels. Ignored if
\code{add = TRUE}.}
\item{axis}{logical, whether to plot the x axis.}
\item{add}{logical, if TRUE add the trees to an existing plot, otherwise
create a new plot.}
\item{tip.ages}{numeric, the ages of the tips, with the most recent tip
having age zero, and the oldest tip having the largest age. If \code{NULL},
tips are assumed to have all age zero.}
}
\description{
Plot a densi-tree from an MCMC sample from a BPP or MCMCTree analysis
}
\details{
The function will reduce the MCMC sample to \code{dim(mcmc)[1] *
thin} observations. Then the node ages in each observarion are used to plot
each tree in the sample. For a tree with \code{s} species. The y
coordinates of the tips are given by \code{0:(s - 1) + y.offset}.
The \code{tree} must be rooted, strictly bifurcating, and be the same tree
used to genarate the BPP (A00) or MCMCTree MCMC samples.
}
\examples{
data(microcebus)
mcmc2densitree(microcebus$tree, microcebus$mcmc, time.name="tau_", thin=0.05,
alpha=0.01, col="blue")
title(xlab="Distance (substitutions per site)")
data(hominids)
# Calibrate the hominid phylogeny with a uniform fossil calibration of
# between 6.5 to 10 Ma for the human-chimp divergence, and plot the
# calibrated sample
calmsc <- msc2time.t(mcmc=hominids$mcmc, node="7humanchimp", calf=runif,
min=6.5, max=10)
mcmc2densitree(hominids$tree, calmsc, "t_", thin=0.05, alpha=0.01)
title(xlab="Divergence time (Ma)")
}
\author{
Mario dos Reis
}
|
/man/mcmc2densitree.Rd
|
permissive
|
dosreislab/bppr
|
R
| false | true | 2,553 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc2plot.R
\name{mcmc2densitree}
\alias{mcmc2densitree}
\title{Plot a densi-tree from an MCMC sample}
\usage{
mcmc2densitree(
tree,
mcmc,
time.name,
thin,
col = "blue",
alpha = 1,
y.offset = 0,
pfract = 0.1,
plot.labels = TRUE,
axis = TRUE,
add = FALSE,
tip.ages = NULL
)
}
\arguments{
\item{tree}{an object of class phylo.}
\item{mcmc}{data frame with an MCMC sample from MCMCTree or a BPP A00
analysis.}
\item{time.name}{character vector of length one.}
\item{thin}{numeric, the fraction of MCMC samples to keep.}
\item{col}{character, the color for branches.}
\item{alpha}{numeric, between 0 and 1, the branch color transparency.}
\item{y.offset}{numeric, the vertical offset for plotting the tree.}
\item{pfract}{numeric, how much of the plotting space to used for plotting
the tip labels. If \code{pfrac = 1}, the same amount of space is used for
the tree and the labels. Use large values if your tip labels are long.}
\item{plot.labels}{logical, whether to plot the tip labels. Ignored if
\code{add = TRUE}.}
\item{axis}{logical, whether to plot the x axis.}
\item{add}{logical, if TRUE add the trees to an existing plot, otherwise
create a new plot.}
\item{tip.ages}{numeric, the ages of the tips, with the most recent tip
having age zero, and the oldest tip having the largest age. If \code{NULL},
tips are assumed to have all age zero.}
}
\description{
Plot a densi-tree from an MCMC sample from a BPP or MCMCTree analysis
}
\details{
The function will reduce the MCMC sample to \code{dim(mcmc)[1] *
thin} observations. Then the node ages in each observarion are used to plot
each tree in the sample. For a tree with \code{s} species. The y
coordinates of the tips are given by \code{0:(s - 1) + y.offset}.
The \code{tree} must be rooted, strictly bifurcating, and be the same tree
used to genarate the BPP (A00) or MCMCTree MCMC samples.
}
\examples{
data(microcebus)
mcmc2densitree(microcebus$tree, microcebus$mcmc, time.name="tau_", thin=0.05,
alpha=0.01, col="blue")
title(xlab="Distance (substitutions per site)")
data(hominids)
# Calibrate the hominid phylogeny with a uniform fossil calibration of
# between 6.5 to 10 Ma for the human-chimp divergence, and plot the
# calibrated sample
calmsc <- msc2time.t(mcmc=hominids$mcmc, node="7humanchimp", calf=runif,
min=6.5, max=10)
mcmc2densitree(hominids$tree, calmsc, "t_", thin=0.05, alpha=0.01)
title(xlab="Divergence time (Ma)")
}
\author{
Mario dos Reis
}
|
## function for conditional likelihoods at nodes
## written by Liam J. Revell 2015, 2016, 2019, 2020, 2021
## with input from (& structural similarity to) function ace by E. Paradis et al. 2013
fitMk<-function(tree,x,model="SYM",fixedQ=NULL,...){
if(hasArg(output.liks)) output.liks<-list(...)$output.liks
else output.liks<-FALSE
if(hasArg(q.init)) q.init<-list(...)$q.init
else q.init<-length(unique(x))/sum(tree$edge.length)
if(hasArg(opt.method)) opt.method<-list(...)$opt.method
else opt.method<-"nlminb"
if(hasArg(min.q)) min.q<-list(...)$min.q
else min.q<-1e-12
if(hasArg(max.q)) max.q<-list(...)$max.q
else max.q<-max(nodeHeights(tree))*100
if(hasArg(logscale)) logscale<-list(...)$logscale
else logscale<-FALSE
N<-Ntip(tree)
M<-tree$Nnode
if(is.matrix(x)){
x<-x[tree$tip.label,]
m<-ncol(x)
states<-colnames(x)
} else {
x<-to.matrix(x,sort(unique(x)))
x<-x[tree$tip.label,]
m<-ncol(x)
states<-colnames(x)
}
if(hasArg(pi)) pi<-list(...)$pi
else pi<-"equal"
if(is.numeric(pi)) root.prior<-"given"
if(pi[1]=="equal"){
pi<-setNames(rep(1/m,m),states)
root.prior<-"flat"
} else if(pi[1]=="estimated"){
pi<-if(!is.null(fixedQ)) statdist(fixedQ) else
statdist(summary(fitMk(tree,x,model),quiet=TRUE)$Q)
cat(paste("Using pi estimated from the stationary",
"distribution of Q assuming a flat prior.\npi =\n"))
print(round(pi,6))
cat("\n")
root.prior<-"stationary"
} else if(pi[1]=="fitzjohn") root.prior<-"nuisance"
if(is.numeric(pi)){
pi<-pi/sum(pi)
if(is.null(names(pi))) pi<-setNames(pi,states)
pi<-pi[states]
}
if(is.null(fixedQ)){
if(is.character(model)){
rate<-matrix(NA,m,m)
if(model=="ER"){
k<-rate[]<-1
diag(rate)<-NA
} else if(model=="ARD"){
k<-m*(m-1)
rate[col(rate)!=row(rate)]<-1:k
} else if(model=="SYM"){
k<-m*(m-1)/2
ii<-col(rate)<row(rate)
rate[ii]<-1:k
rate<-t(rate)
rate[ii]<-1:k
}
} else {
if(ncol(model)!=nrow(model))
stop("model is not a square matrix")
if(ncol(model)!=ncol(x))
stop("model does not have the right number of columns")
rate<-model
k<-max(rate)
}
Q<-matrix(0,m,m)
} else {
rate<-matrix(NA,m,m)
k<-m*(m-1)
rate[col(rate)!=row(rate)]<-1:k
Q<-fixedQ
}
index.matrix<-rate
tmp<-cbind(1:m,1:m)
rate[tmp]<-0
rate[rate==0]<-k+1
liks<-rbind(x,matrix(0,M,m,dimnames=list(1:M+N,states)))
pw<-reorder(tree,"pruningwise")
lik<-function(Q,output.liks=FALSE,pi,...){
if(hasArg(output.pi)) output.pi<-list(...)$output.pi
else output.pi<-FALSE
if(is.Qmatrix(Q)) Q<-unclass(Q)
if(any(is.nan(Q))||any(is.infinite(Q))) return(1e50)
comp<-vector(length=N+M,mode="numeric")
parents<-unique(pw$edge[,1])
root<-min(parents)
for(i in 1:length(parents)){
anc<-parents[i]
ii<-which(pw$edge[,1]==parents[i])
desc<-pw$edge[ii,2]
el<-pw$edge.length[ii]
v<-vector(length=length(desc),mode="list")
for(j in 1:length(v)){
v[[j]]<-EXPM(Q*el[j])%*%liks[desc[j],]
}
if(anc==root){
if(is.numeric(pi)) vv<-Reduce('*',v)[,1]*pi
else if(pi[1]=="fitzjohn"){
D<-Reduce('*',v)[,1]
pi<-D/sum(D)
vv<-D*D/sum(D)
}
} else vv<-Reduce('*',v)[,1]
## vv<-if(anc==root) Reduce('*',v)[,1]*pi else Reduce('*',v)[,1]
comp[anc]<-sum(vv)
liks[anc,]<-vv/comp[anc]
}
if(output.liks) return(liks[1:M+N,,drop=FALSE])
else if(output.pi) return(pi)
else {
logL<--sum(log(comp[1:M+N]))
if(is.na(logL)) logL<-Inf
return(logL)
}
}
if(is.null(fixedQ)){
if(length(q.init)!=k) q.init<-rep(q.init[1],k)
q.init<-if(logscale) log(q.init) else q.init
if(opt.method=="optim"){
fit<-if(logscale)
optim(q.init,function(p) lik(makeQ(m,exp(p),index.matrix),pi=pi),
method="L-BFGS-B",lower=rep(log(min.q),k),upper=rep(log(max.q),k)) else
optim(q.init,function(p) lik(makeQ(m,p,index.matrix),pi=pi),
method="L-BFGS-B",lower=rep(min.q,k),upper=rep(max.q,k))
} else if(opt.method=="none"){
fit<-list(objective=lik(makeQ(m,q.init,index.matrix),pi=pi),
par=q.init)
} else {
fit<-if(logscale)
nlminb(q.init,function(p) lik(makeQ(m,exp(p),index.matrix),pi=pi),
lower=rep(log(min.q),k),upper=rep(log(max.q),k))
else nlminb(q.init,function(p) lik(makeQ(m,p,index.matrix),
pi=pi),lower=rep(0,k),upper=rep(max.q,k))
}
if(logscale) fit$par<-exp(fit$par)
if(pi[1]=="fitzjohn") pi<-setNames(
lik(makeQ(m,fit$par,index.matrix),FALSE,pi=pi,output.pi=TRUE),
states)
obj<-list(logLik=
if(opt.method=="optim") -fit$value else -fit$objective,
rates=fit$par,
index.matrix=index.matrix,
states=states,
pi=pi,
method=opt.method,
root.prior=root.prior)
if(output.liks) obj$lik.anc<-lik(makeQ(m,obj$rates,index.matrix),TRUE,
pi=pi)
} else {
fit<-lik(Q,pi=pi)
if(pi[1]=="fitzjohn") pi<-setNames(lik(Q,FALSE,pi=pi,output.pi=TRUE),states)
obj<-list(logLik=-fit,
rates=Q[sapply(1:k,function(x,y) which(x==y),index.matrix)],
index.matrix=index.matrix,
states=states,
pi=pi,
root.prior=root.prior)
if(output.liks) obj$lik.anc<-lik(makeQ(m,obj$rates,index.matrix),TRUE,
pi=pi)
}
lik.f<-function(q) -lik(q,output.liks=FALSE,
pi=if(root.prior=="nuisance") "fitzjohn" else pi)
obj$lik<-lik.f
class(obj)<-"fitMk"
return(obj)
}
makeQ<-function(m,q,index.matrix){
Q<-matrix(0,m,m)
Q[]<-c(0,q)[index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
Q
}
## print method for objects of class "fitMk"
print.fitMk<-function(x,digits=6,...){
cat("Object of class \"fitMk\".\n\n")
cat("Fitted (or set) value of Q:\n")
Q<-matrix(NA,length(x$states),length(x$states))
Q[]<-c(0,x$rates)[x$index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
colnames(Q)<-rownames(Q)<-x$states
print(round(Q,digits))
cat("\nFitted (or set) value of pi:\n")
print(round(x$pi,digits))
cat(paste("due to treating the root prior as (a) ",x$root.prior,".\n",
sep=""))
cat(paste("\nLog-likelihood:",round(x$logLik,digits),"\n"))
cat(paste("\nOptimization method used was \"",x$method,"\"\n\n",
sep=""))
}
## summary method for objects of class "fitMk"
summary.fitMk<-function(object,...){
if(hasArg(digits)) digits<-list(...)$digits
else digits<-6
if(hasArg(quiet)) quiet<-list(...)$quiet
else quiet<-FALSE
if(!quiet) cat("Fitted (or set) value of Q:\n")
Q<-matrix(NA,length(object$states),length(object$states))
Q[]<-c(0,object$rates)[object$index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
colnames(Q)<-rownames(Q)<-object$states
if(!quiet) print(round(Q,digits))
if(!quiet) cat(paste("\nLog-likelihood:",round(object$logLik,digits),"\n\n"))
invisible(list(Q=Q,logLik=object$logLik))
}
## logLik method for objects of class "fitMk"
logLik.fitMk<-function(object,...){
lik<-object$logLik
attr(lik,"df")<-length(object$rates)
lik
}
## S3 plot method for objects of class "fitMk"
plot.fitMk<-function(x,...){
Q<-as.Qmatrix(x)
plot(Q,...)
}
## S3 plot method for "gfit" object from geiger::fitDiscrete
plot.gfit<-function(x,...){
if("mkn"%in%class(x$lik)==FALSE){
stop("Sorry. No plot method presently available for objects of this type.")
object<-NULL
} else {
chk<-.check.pkg("geiger")
if(chk) object<-plot(as.Qmatrix(x),...)
else {
obj<-list()
QQ<-.Qmatrix.from.gfit(x)
obj$states<-colnames(QQ)
m<-length(obj$states)
obj$index.matrix<-matrix(NA,m,m)
k<-m*(m-1)
obj$index.matrix[col(obj$index.matrix)!=row(obj$index.matrix)]<-1:k
obj$rates<-QQ[sapply(1:k,function(x,y) which(x==y),obj$index.matrix)]
class(obj)<-"fitMk"
object<-plot(obj,...)
}
}
invisible(object)
}
MIN<-function(x,...) min(x[is.finite(x)],...)
MAX<-function(x,...) max(x[is.finite(x)],...)
RANGE<-function(x,...) range(x[is.finite(x)],...)
## S3 method for "Qmatrix" object class
plot.Qmatrix<-function(x,...){
Q<-unclass(x)
if(hasArg(signif)) signif<-list(...)$signif
else signif<-3
if(hasArg(main)) main<-list(...)$main
else main<-NULL
if(hasArg(cex.main)) cex.main<-list(...)$cex.main
else cex.main<-1.2
if(hasArg(cex.traits)) cex.traits<-list(...)$cex.traits
else cex.traits<-1
if(hasArg(cex.rates)) cex.rates<-list(...)$cex.rates
else cex.rates<-0.6
if(hasArg(show.zeros)) show.zeros<-list(...)$show.zeros
else show.zeros<-TRUE
if(hasArg(tol)) tol<-list(...)$tol
else tol<-1e-6
if(hasArg(mar)) mar<-list(...)$mar
else mar<-c(1.1,1.1,3.1,1.1)
if(hasArg(lwd)) lwd<-list(...)$lwd
else lwd<-1
if(hasArg(umbral)) umbral<-list(...)$umbral
else umbral<-FALSE
if(hasArg(ncat)) ncat<-list(...)$ncat
else ncat<-NULL
if(hasArg(spacer)) spacer<-list(...)$spacer
else spacer<-0.1
if(hasArg(color)) color<-list(...)$color
else color<-FALSE
plot.new()
par(mar=mar)
xylim<-c(-1.2,1.2)
if(!color) plot.window(xlim=xylim,ylim=xylim,asp=1) else
plot.window(xlim=c(-1.4,xylim[2]-0.2),ylim=xylim,asp=1)
if(!is.null(main)) title(main=main,cex.main=cex.main)
nstates<-nrow(Q)
if(color){
col_pal<-function(qq) if(is.na(qq)) NA else
if(is.infinite(qq)) make.transparent("grey",0.4) else
rgb(colorRamp(c("blue","purple","red"))(qq),maxColorValue=255)
qq<-Q
diag(qq)<-NA
qq<-log(qq)
qq<-(qq-MIN(qq,na.rm=TRUE))/diff(RANGE(qq,na.rm=TRUE))
cols<-apply(qq,c(1,2),col_pal)
} else cols<-matrix(par("fg"),nstates,nstates)
if(!umbral||is.null(ncat)){
step<-360/nstates
angles<-seq(0,360-step,by=step)/180*pi
if(nstates==2) angles<-angles+pi/2
v.x<-cos(angles)
v.y<-sin(angles)
} else {
v.x<-v.y<-vector()
for(i in 1:length(ncat)){
Q<-Q[sort(rownames(Q)),sort(colnames(Q))]
xp<--1+2*(i-1)/(length(ncat)-1)
v.x<-c(v.x,rep(xp,ncat[i]))
yp<-seq(1,-1,length.out=max(ncat))[1:ncat[i]]
v.y<-c(v.y,yp)
}
}
for(i in 1:nstates) for(j in 1:nstates)
if(if(!isSymmetric(Q)) i!=j else i>j){
dx<-v.x[j]-v.x[i]
dy<-v.y[j]-v.y[i]
slope<-abs(dy/dx)
shift.x<-0.02*sin(atan(dy/dx))*sign(j-i)*if(dy/dx>0) 1 else -1
shift.y<-0.02*cos(atan(dy/dx))*sign(j-i)*if(dy/dx>0) -1 else 1
s<-c(v.x[i]+spacer*cos(atan(slope))*sign(dx)+
if(isSymmetric(Q)) 0 else shift.x,
v.y[i]+spacer*sin(atan(slope))*sign(dy)+
if(isSymmetric(Q)) 0 else shift.y)
e<-c(v.x[j]+spacer*cos(atan(slope))*sign(-dx)+
if(isSymmetric(Q)) 0 else shift.x,
v.y[j]+spacer*sin(atan(slope))*sign(-dy)+
if(isSymmetric(Q)) 0 else shift.y)
if(show.zeros||Q[i,j]>tol){
if(abs(diff(c(i,j)))==1||abs(diff(c(i,j)))==(nstates-1))
text(mean(c(s[1],e[1]))+1.5*shift.x,
mean(c(s[2],e[2]))+1.5*shift.y,
round(Q[i,j],signif),cex=cex.rates,
srt=atan(dy/dx)*180/pi)
else
text(mean(c(s[1],e[1]))+0.3*diff(c(s[1],e[1]))+
1.5*shift.x,
mean(c(s[2],e[2]))+0.3*diff(c(s[2],e[2]))+
1.5*shift.y,
round(Q[i,j],signif),cex=cex.rates,
srt=atan(dy/dx)*180/pi)
arrows(s[1],s[2],e[1],e[2],length=0.05,
code=if(isSymmetric(Q)) 3 else 2,lwd=lwd,
col=cols[i,j])
}
}
text(v.x,v.y,rownames(Q),cex=cex.traits,
col=make.transparent(par("fg"),0.9))
if(color){
h<-1.5
LWD<-diff(par()$usr[1:2])/dev.size("px")[1]
lines(x=rep(-1.3+LWD*15/2,2),y=c(-h/2,h/2))
nticks<-6
Y<-cbind(seq(-h/2,h/2,length.out=nticks),
seq(-h/2,h/2,length.out=nticks))
X<-cbind(rep(-1.3+LWD*15/2,nticks),
rep(-1.3+LWD*15/2+0.02*h,nticks))
for(i in 1:nrow(Y)) lines(X[i,],Y[i,])
add.color.bar(h,sapply(seq(0,1,length.out=100),col_pal),
title="evolutionary rate (q)",
lims=NULL,digits=3,
direction="upwards",
subtitle="",lwd=15,
x=-1.3,y=-h/2,prompt=FALSE)
QQ<-Q
diag(QQ)<-0
text(x=X[,2],y=Y[,2],signif(exp(seq(MIN(log(QQ),na.rm=TRUE),
MAX(log(QQ),na.rm=TRUE),length.out=6)),signif),pos=4,cex=0.7)
}
object<-data.frame(states=rownames(Q),x=v.x,y=v.y)
invisible(object)
}
## wraps around expm
## written by Liam Revell 2011, 2017
EXPM<-function(x,...){
e_x<-if(isSymmetric(x)) matexpo(x) else expm(x,...)
dimnames(e_x)<-dimnames(x)
e_x
}
## function to simulate multiple-rate Mk multiMk
## written by Liam J. Revell 2018
sim.multiMk<-function(tree,Q,anc=NULL,nsim=1,...){
if(hasArg(as.list)) as.list<-list(...)$as.list
else as.list<-FALSE
ss<-rownames(Q[[1]])
tt<-map.to.singleton(reorder(tree))
P<-vector(mode="list",length=nrow(tt$edge))
for(i in 1:nrow(tt$edge))
P[[i]]<-expm(Q[[names(tt$edge.length)[i]]]*tt$edge.length[i])
if(nsim>1) X<- if(as.list) vector(mode="list",length=nsim) else
data.frame(row.names=tt$tip.label)
for(i in 1:nsim){
a<-if(is.null(anc)) sample(ss,1) else anc
STATES<-matrix(NA,nrow(tt$edge),2)
root<-Ntip(tt)+1
STATES[which(tt$edge[,1]==root),1]<-a
for(j in 1:nrow(tt$edge)){
new<-ss[which(rmultinom(1,1,P[[j]][STATES[j,1],])[,1]==1)]
STATES[j,2]<-new
ii<-which(tt$edge[,1]==tt$edge[j,2])
if(length(ii)>0) STATES[ii,1]<-new
}
x<-as.factor(
setNames(sapply(1:Ntip(tt),function(n,S,E) S[which(E==n)],
S=STATES[,2],E=tt$edge[,2]),tt$tip.label))
if(nsim>1) X[,i]<-x else X<-x
}
X
}
## constant-rate Mk model simulator
## written by Liam J. Revell 2018
sim.Mk<-function(tree,Q,anc=NULL,nsim=1,...){
if(hasArg(as.list)) as.list<-list(...)$as.list
else as.list<-FALSE
ss<-rownames(Q)
tt<-reorder(tree)
P<-vector(mode="list",length=nrow(tt$edge))
for(i in 1:nrow(tt$edge))
P[[i]]<-expm(Q*tt$edge.length[i])
if(nsim>1) X<- if(as.list) vector(mode="list",length=nsim) else
data.frame(row.names=tt$tip.label)
for(i in 1:nsim){
a<-if(is.null(anc)) sample(ss,1) else anc
STATES<-matrix(NA,nrow(tt$edge),2)
root<-Ntip(tt)+1
STATES[which(tt$edge[,1]==root),1]<-a
for(j in 1:nrow(tt$edge)){
new<-ss[which(rmultinom(1,1,P[[j]][STATES[j,1],])[,1]==1)]
STATES[j,2]<-new
ii<-which(tt$edge[,1]==tt$edge[j,2])
if(length(ii)>0) STATES[ii,1]<-new
}
x<-as.factor(
setNames(sapply(1:Ntip(tt),function(n,S,E) S[which(E==n)],
S=STATES[,2],E=tt$edge[,2]),tt$tip.label))
if(nsim>1) X[[i]]<-x else X<-x
}
X
}
## as.Qmatrix method
as.Qmatrix<-function(x,...){
if(identical(class(x),"Qmatrix")) return(x)
UseMethod("as.Qmatrix")
}
as.Qmatrix.default<-function(x, ...){
warning(paste(
"as.Qmatrix does not know how to handle objects of class ",
class(x),"."))
}
as.Qmatrix.fitMk<-function(x,...){
Q<-matrix(NA,length(x$states),length(x$states))
Q[]<-c(0,x$rates)[x$index.matrix+1]
rownames(Q)<-colnames(Q)<-x$states
diag(Q)<--rowSums(Q,na.rm=TRUE)
class(Q)<-"Qmatrix"
Q
}
as.Qmatrix.ace<-function(x, ...){
if("index.matrix"%in%names(x)){
k<-nrow(x$index.matrix)
Q<-matrix(NA,k,k)
Q[]<-c(0,x$rates)[x$index.matrix+1]
rownames(Q)<-colnames(Q)<-colnames(x$lik.anc)
diag(Q)<--rowSums(Q,na.rm=TRUE)
class(Q)<-"Qmatrix"
return(Q)
} else cat("\"ace\" object does not appear to contain a Q matrix.\n")
}
print.Qmatrix<-function(x,...){
cat("Estimated Q matrix:\n")
print(unclass(x),...)
}
is.Qmatrix<-function(x) "Qmatrix" %in% class(x)
|
/R/fitMk.R
|
no_license
|
Phyo-Khine/phytools
|
R
| false | false | 15,224 |
r
|
## function for conditional likelihoods at nodes
## written by Liam J. Revell 2015, 2016, 2019, 2020, 2021
## with input from (& structural similarity to) function ace by E. Paradis et al. 2013
fitMk<-function(tree,x,model="SYM",fixedQ=NULL,...){
if(hasArg(output.liks)) output.liks<-list(...)$output.liks
else output.liks<-FALSE
if(hasArg(q.init)) q.init<-list(...)$q.init
else q.init<-length(unique(x))/sum(tree$edge.length)
if(hasArg(opt.method)) opt.method<-list(...)$opt.method
else opt.method<-"nlminb"
if(hasArg(min.q)) min.q<-list(...)$min.q
else min.q<-1e-12
if(hasArg(max.q)) max.q<-list(...)$max.q
else max.q<-max(nodeHeights(tree))*100
if(hasArg(logscale)) logscale<-list(...)$logscale
else logscale<-FALSE
N<-Ntip(tree)
M<-tree$Nnode
if(is.matrix(x)){
x<-x[tree$tip.label,]
m<-ncol(x)
states<-colnames(x)
} else {
x<-to.matrix(x,sort(unique(x)))
x<-x[tree$tip.label,]
m<-ncol(x)
states<-colnames(x)
}
if(hasArg(pi)) pi<-list(...)$pi
else pi<-"equal"
if(is.numeric(pi)) root.prior<-"given"
if(pi[1]=="equal"){
pi<-setNames(rep(1/m,m),states)
root.prior<-"flat"
} else if(pi[1]=="estimated"){
pi<-if(!is.null(fixedQ)) statdist(fixedQ) else
statdist(summary(fitMk(tree,x,model),quiet=TRUE)$Q)
cat(paste("Using pi estimated from the stationary",
"distribution of Q assuming a flat prior.\npi =\n"))
print(round(pi,6))
cat("\n")
root.prior<-"stationary"
} else if(pi[1]=="fitzjohn") root.prior<-"nuisance"
if(is.numeric(pi)){
pi<-pi/sum(pi)
if(is.null(names(pi))) pi<-setNames(pi,states)
pi<-pi[states]
}
if(is.null(fixedQ)){
if(is.character(model)){
rate<-matrix(NA,m,m)
if(model=="ER"){
k<-rate[]<-1
diag(rate)<-NA
} else if(model=="ARD"){
k<-m*(m-1)
rate[col(rate)!=row(rate)]<-1:k
} else if(model=="SYM"){
k<-m*(m-1)/2
ii<-col(rate)<row(rate)
rate[ii]<-1:k
rate<-t(rate)
rate[ii]<-1:k
}
} else {
if(ncol(model)!=nrow(model))
stop("model is not a square matrix")
if(ncol(model)!=ncol(x))
stop("model does not have the right number of columns")
rate<-model
k<-max(rate)
}
Q<-matrix(0,m,m)
} else {
rate<-matrix(NA,m,m)
k<-m*(m-1)
rate[col(rate)!=row(rate)]<-1:k
Q<-fixedQ
}
index.matrix<-rate
tmp<-cbind(1:m,1:m)
rate[tmp]<-0
rate[rate==0]<-k+1
liks<-rbind(x,matrix(0,M,m,dimnames=list(1:M+N,states)))
pw<-reorder(tree,"pruningwise")
lik<-function(Q,output.liks=FALSE,pi,...){
if(hasArg(output.pi)) output.pi<-list(...)$output.pi
else output.pi<-FALSE
if(is.Qmatrix(Q)) Q<-unclass(Q)
if(any(is.nan(Q))||any(is.infinite(Q))) return(1e50)
comp<-vector(length=N+M,mode="numeric")
parents<-unique(pw$edge[,1])
root<-min(parents)
for(i in 1:length(parents)){
anc<-parents[i]
ii<-which(pw$edge[,1]==parents[i])
desc<-pw$edge[ii,2]
el<-pw$edge.length[ii]
v<-vector(length=length(desc),mode="list")
for(j in 1:length(v)){
v[[j]]<-EXPM(Q*el[j])%*%liks[desc[j],]
}
if(anc==root){
if(is.numeric(pi)) vv<-Reduce('*',v)[,1]*pi
else if(pi[1]=="fitzjohn"){
D<-Reduce('*',v)[,1]
pi<-D/sum(D)
vv<-D*D/sum(D)
}
} else vv<-Reduce('*',v)[,1]
## vv<-if(anc==root) Reduce('*',v)[,1]*pi else Reduce('*',v)[,1]
comp[anc]<-sum(vv)
liks[anc,]<-vv/comp[anc]
}
if(output.liks) return(liks[1:M+N,,drop=FALSE])
else if(output.pi) return(pi)
else {
logL<--sum(log(comp[1:M+N]))
if(is.na(logL)) logL<-Inf
return(logL)
}
}
if(is.null(fixedQ)){
if(length(q.init)!=k) q.init<-rep(q.init[1],k)
q.init<-if(logscale) log(q.init) else q.init
if(opt.method=="optim"){
fit<-if(logscale)
optim(q.init,function(p) lik(makeQ(m,exp(p),index.matrix),pi=pi),
method="L-BFGS-B",lower=rep(log(min.q),k),upper=rep(log(max.q),k)) else
optim(q.init,function(p) lik(makeQ(m,p,index.matrix),pi=pi),
method="L-BFGS-B",lower=rep(min.q,k),upper=rep(max.q,k))
} else if(opt.method=="none"){
fit<-list(objective=lik(makeQ(m,q.init,index.matrix),pi=pi),
par=q.init)
} else {
fit<-if(logscale)
nlminb(q.init,function(p) lik(makeQ(m,exp(p),index.matrix),pi=pi),
lower=rep(log(min.q),k),upper=rep(log(max.q),k))
else nlminb(q.init,function(p) lik(makeQ(m,p,index.matrix),
pi=pi),lower=rep(0,k),upper=rep(max.q,k))
}
if(logscale) fit$par<-exp(fit$par)
if(pi[1]=="fitzjohn") pi<-setNames(
lik(makeQ(m,fit$par,index.matrix),FALSE,pi=pi,output.pi=TRUE),
states)
obj<-list(logLik=
if(opt.method=="optim") -fit$value else -fit$objective,
rates=fit$par,
index.matrix=index.matrix,
states=states,
pi=pi,
method=opt.method,
root.prior=root.prior)
if(output.liks) obj$lik.anc<-lik(makeQ(m,obj$rates,index.matrix),TRUE,
pi=pi)
} else {
fit<-lik(Q,pi=pi)
if(pi[1]=="fitzjohn") pi<-setNames(lik(Q,FALSE,pi=pi,output.pi=TRUE),states)
obj<-list(logLik=-fit,
rates=Q[sapply(1:k,function(x,y) which(x==y),index.matrix)],
index.matrix=index.matrix,
states=states,
pi=pi,
root.prior=root.prior)
if(output.liks) obj$lik.anc<-lik(makeQ(m,obj$rates,index.matrix),TRUE,
pi=pi)
}
lik.f<-function(q) -lik(q,output.liks=FALSE,
pi=if(root.prior=="nuisance") "fitzjohn" else pi)
obj$lik<-lik.f
class(obj)<-"fitMk"
return(obj)
}
makeQ<-function(m,q,index.matrix){
Q<-matrix(0,m,m)
Q[]<-c(0,q)[index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
Q
}
## print method for objects of class "fitMk"
print.fitMk<-function(x,digits=6,...){
cat("Object of class \"fitMk\".\n\n")
cat("Fitted (or set) value of Q:\n")
Q<-matrix(NA,length(x$states),length(x$states))
Q[]<-c(0,x$rates)[x$index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
colnames(Q)<-rownames(Q)<-x$states
print(round(Q,digits))
cat("\nFitted (or set) value of pi:\n")
print(round(x$pi,digits))
cat(paste("due to treating the root prior as (a) ",x$root.prior,".\n",
sep=""))
cat(paste("\nLog-likelihood:",round(x$logLik,digits),"\n"))
cat(paste("\nOptimization method used was \"",x$method,"\"\n\n",
sep=""))
}
## summary method for objects of class "fitMk"
summary.fitMk<-function(object,...){
if(hasArg(digits)) digits<-list(...)$digits
else digits<-6
if(hasArg(quiet)) quiet<-list(...)$quiet
else quiet<-FALSE
if(!quiet) cat("Fitted (or set) value of Q:\n")
Q<-matrix(NA,length(object$states),length(object$states))
Q[]<-c(0,object$rates)[object$index.matrix+1]
diag(Q)<-0
diag(Q)<--rowSums(Q)
colnames(Q)<-rownames(Q)<-object$states
if(!quiet) print(round(Q,digits))
if(!quiet) cat(paste("\nLog-likelihood:",round(object$logLik,digits),"\n\n"))
invisible(list(Q=Q,logLik=object$logLik))
}
## logLik method for objects of class "fitMk"
logLik.fitMk<-function(object,...){
lik<-object$logLik
attr(lik,"df")<-length(object$rates)
lik
}
## S3 plot method for objects of class "fitMk"
plot.fitMk<-function(x,...){
Q<-as.Qmatrix(x)
plot(Q,...)
}
## S3 plot method for "gfit" object from geiger::fitDiscrete
plot.gfit<-function(x,...){
if("mkn"%in%class(x$lik)==FALSE){
stop("Sorry. No plot method presently available for objects of this type.")
object<-NULL
} else {
chk<-.check.pkg("geiger")
if(chk) object<-plot(as.Qmatrix(x),...)
else {
obj<-list()
QQ<-.Qmatrix.from.gfit(x)
obj$states<-colnames(QQ)
m<-length(obj$states)
obj$index.matrix<-matrix(NA,m,m)
k<-m*(m-1)
obj$index.matrix[col(obj$index.matrix)!=row(obj$index.matrix)]<-1:k
obj$rates<-QQ[sapply(1:k,function(x,y) which(x==y),obj$index.matrix)]
class(obj)<-"fitMk"
object<-plot(obj,...)
}
}
invisible(object)
}
MIN<-function(x,...) min(x[is.finite(x)],...)
MAX<-function(x,...) max(x[is.finite(x)],...)
RANGE<-function(x,...) range(x[is.finite(x)],...)
## S3 method for "Qmatrix" object class
plot.Qmatrix<-function(x,...){
Q<-unclass(x)
if(hasArg(signif)) signif<-list(...)$signif
else signif<-3
if(hasArg(main)) main<-list(...)$main
else main<-NULL
if(hasArg(cex.main)) cex.main<-list(...)$cex.main
else cex.main<-1.2
if(hasArg(cex.traits)) cex.traits<-list(...)$cex.traits
else cex.traits<-1
if(hasArg(cex.rates)) cex.rates<-list(...)$cex.rates
else cex.rates<-0.6
if(hasArg(show.zeros)) show.zeros<-list(...)$show.zeros
else show.zeros<-TRUE
if(hasArg(tol)) tol<-list(...)$tol
else tol<-1e-6
if(hasArg(mar)) mar<-list(...)$mar
else mar<-c(1.1,1.1,3.1,1.1)
if(hasArg(lwd)) lwd<-list(...)$lwd
else lwd<-1
if(hasArg(umbral)) umbral<-list(...)$umbral
else umbral<-FALSE
if(hasArg(ncat)) ncat<-list(...)$ncat
else ncat<-NULL
if(hasArg(spacer)) spacer<-list(...)$spacer
else spacer<-0.1
if(hasArg(color)) color<-list(...)$color
else color<-FALSE
plot.new()
par(mar=mar)
xylim<-c(-1.2,1.2)
if(!color) plot.window(xlim=xylim,ylim=xylim,asp=1) else
plot.window(xlim=c(-1.4,xylim[2]-0.2),ylim=xylim,asp=1)
if(!is.null(main)) title(main=main,cex.main=cex.main)
nstates<-nrow(Q)
if(color){
col_pal<-function(qq) if(is.na(qq)) NA else
if(is.infinite(qq)) make.transparent("grey",0.4) else
rgb(colorRamp(c("blue","purple","red"))(qq),maxColorValue=255)
qq<-Q
diag(qq)<-NA
qq<-log(qq)
qq<-(qq-MIN(qq,na.rm=TRUE))/diff(RANGE(qq,na.rm=TRUE))
cols<-apply(qq,c(1,2),col_pal)
} else cols<-matrix(par("fg"),nstates,nstates)
if(!umbral||is.null(ncat)){
step<-360/nstates
angles<-seq(0,360-step,by=step)/180*pi
if(nstates==2) angles<-angles+pi/2
v.x<-cos(angles)
v.y<-sin(angles)
} else {
v.x<-v.y<-vector()
for(i in 1:length(ncat)){
Q<-Q[sort(rownames(Q)),sort(colnames(Q))]
xp<--1+2*(i-1)/(length(ncat)-1)
v.x<-c(v.x,rep(xp,ncat[i]))
yp<-seq(1,-1,length.out=max(ncat))[1:ncat[i]]
v.y<-c(v.y,yp)
}
}
for(i in 1:nstates) for(j in 1:nstates)
if(if(!isSymmetric(Q)) i!=j else i>j){
dx<-v.x[j]-v.x[i]
dy<-v.y[j]-v.y[i]
slope<-abs(dy/dx)
shift.x<-0.02*sin(atan(dy/dx))*sign(j-i)*if(dy/dx>0) 1 else -1
shift.y<-0.02*cos(atan(dy/dx))*sign(j-i)*if(dy/dx>0) -1 else 1
s<-c(v.x[i]+spacer*cos(atan(slope))*sign(dx)+
if(isSymmetric(Q)) 0 else shift.x,
v.y[i]+spacer*sin(atan(slope))*sign(dy)+
if(isSymmetric(Q)) 0 else shift.y)
e<-c(v.x[j]+spacer*cos(atan(slope))*sign(-dx)+
if(isSymmetric(Q)) 0 else shift.x,
v.y[j]+spacer*sin(atan(slope))*sign(-dy)+
if(isSymmetric(Q)) 0 else shift.y)
if(show.zeros||Q[i,j]>tol){
if(abs(diff(c(i,j)))==1||abs(diff(c(i,j)))==(nstates-1))
text(mean(c(s[1],e[1]))+1.5*shift.x,
mean(c(s[2],e[2]))+1.5*shift.y,
round(Q[i,j],signif),cex=cex.rates,
srt=atan(dy/dx)*180/pi)
else
text(mean(c(s[1],e[1]))+0.3*diff(c(s[1],e[1]))+
1.5*shift.x,
mean(c(s[2],e[2]))+0.3*diff(c(s[2],e[2]))+
1.5*shift.y,
round(Q[i,j],signif),cex=cex.rates,
srt=atan(dy/dx)*180/pi)
arrows(s[1],s[2],e[1],e[2],length=0.05,
code=if(isSymmetric(Q)) 3 else 2,lwd=lwd,
col=cols[i,j])
}
}
text(v.x,v.y,rownames(Q),cex=cex.traits,
col=make.transparent(par("fg"),0.9))
if(color){
h<-1.5
LWD<-diff(par()$usr[1:2])/dev.size("px")[1]
lines(x=rep(-1.3+LWD*15/2,2),y=c(-h/2,h/2))
nticks<-6
Y<-cbind(seq(-h/2,h/2,length.out=nticks),
seq(-h/2,h/2,length.out=nticks))
X<-cbind(rep(-1.3+LWD*15/2,nticks),
rep(-1.3+LWD*15/2+0.02*h,nticks))
for(i in 1:nrow(Y)) lines(X[i,],Y[i,])
add.color.bar(h,sapply(seq(0,1,length.out=100),col_pal),
title="evolutionary rate (q)",
lims=NULL,digits=3,
direction="upwards",
subtitle="",lwd=15,
x=-1.3,y=-h/2,prompt=FALSE)
QQ<-Q
diag(QQ)<-0
text(x=X[,2],y=Y[,2],signif(exp(seq(MIN(log(QQ),na.rm=TRUE),
MAX(log(QQ),na.rm=TRUE),length.out=6)),signif),pos=4,cex=0.7)
}
object<-data.frame(states=rownames(Q),x=v.x,y=v.y)
invisible(object)
}
## wraps around expm
## written by Liam Revell 2011, 2017
EXPM<-function(x,...){
e_x<-if(isSymmetric(x)) matexpo(x) else expm(x,...)
dimnames(e_x)<-dimnames(x)
e_x
}
## function to simulate multiple-rate Mk multiMk
## written by Liam J. Revell 2018
sim.multiMk<-function(tree,Q,anc=NULL,nsim=1,...){
if(hasArg(as.list)) as.list<-list(...)$as.list
else as.list<-FALSE
ss<-rownames(Q[[1]])
tt<-map.to.singleton(reorder(tree))
P<-vector(mode="list",length=nrow(tt$edge))
for(i in 1:nrow(tt$edge))
P[[i]]<-expm(Q[[names(tt$edge.length)[i]]]*tt$edge.length[i])
if(nsim>1) X<- if(as.list) vector(mode="list",length=nsim) else
data.frame(row.names=tt$tip.label)
for(i in 1:nsim){
a<-if(is.null(anc)) sample(ss,1) else anc
STATES<-matrix(NA,nrow(tt$edge),2)
root<-Ntip(tt)+1
STATES[which(tt$edge[,1]==root),1]<-a
for(j in 1:nrow(tt$edge)){
new<-ss[which(rmultinom(1,1,P[[j]][STATES[j,1],])[,1]==1)]
STATES[j,2]<-new
ii<-which(tt$edge[,1]==tt$edge[j,2])
if(length(ii)>0) STATES[ii,1]<-new
}
x<-as.factor(
setNames(sapply(1:Ntip(tt),function(n,S,E) S[which(E==n)],
S=STATES[,2],E=tt$edge[,2]),tt$tip.label))
if(nsim>1) X[,i]<-x else X<-x
}
X
}
## constant-rate Mk model simulator
## written by Liam J. Revell 2018
sim.Mk<-function(tree,Q,anc=NULL,nsim=1,...){
if(hasArg(as.list)) as.list<-list(...)$as.list
else as.list<-FALSE
ss<-rownames(Q)
tt<-reorder(tree)
P<-vector(mode="list",length=nrow(tt$edge))
for(i in 1:nrow(tt$edge))
P[[i]]<-expm(Q*tt$edge.length[i])
if(nsim>1) X<- if(as.list) vector(mode="list",length=nsim) else
data.frame(row.names=tt$tip.label)
for(i in 1:nsim){
a<-if(is.null(anc)) sample(ss,1) else anc
STATES<-matrix(NA,nrow(tt$edge),2)
root<-Ntip(tt)+1
STATES[which(tt$edge[,1]==root),1]<-a
for(j in 1:nrow(tt$edge)){
new<-ss[which(rmultinom(1,1,P[[j]][STATES[j,1],])[,1]==1)]
STATES[j,2]<-new
ii<-which(tt$edge[,1]==tt$edge[j,2])
if(length(ii)>0) STATES[ii,1]<-new
}
x<-as.factor(
setNames(sapply(1:Ntip(tt),function(n,S,E) S[which(E==n)],
S=STATES[,2],E=tt$edge[,2]),tt$tip.label))
if(nsim>1) X[[i]]<-x else X<-x
}
X
}
## as.Qmatrix method
as.Qmatrix<-function(x,...){
if(identical(class(x),"Qmatrix")) return(x)
UseMethod("as.Qmatrix")
}
as.Qmatrix.default<-function(x, ...){
warning(paste(
"as.Qmatrix does not know how to handle objects of class ",
class(x),"."))
}
as.Qmatrix.fitMk<-function(x,...){
Q<-matrix(NA,length(x$states),length(x$states))
Q[]<-c(0,x$rates)[x$index.matrix+1]
rownames(Q)<-colnames(Q)<-x$states
diag(Q)<--rowSums(Q,na.rm=TRUE)
class(Q)<-"Qmatrix"
Q
}
as.Qmatrix.ace<-function(x, ...){
if("index.matrix"%in%names(x)){
k<-nrow(x$index.matrix)
Q<-matrix(NA,k,k)
Q[]<-c(0,x$rates)[x$index.matrix+1]
rownames(Q)<-colnames(Q)<-colnames(x$lik.anc)
diag(Q)<--rowSums(Q,na.rm=TRUE)
class(Q)<-"Qmatrix"
return(Q)
} else cat("\"ace\" object does not appear to contain a Q matrix.\n")
}
print.Qmatrix<-function(x,...){
cat("Estimated Q matrix:\n")
print(unclass(x),...)
}
is.Qmatrix<-function(x) "Qmatrix" %in% class(x)
|
library(ape)
testtree <- read.tree("9602_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9602_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/9602_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("9602_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9602_0_unrooted.txt")
|
values <- reactiveValues()
observe({
if(input$generateSeuratFile)
{
withProgress(message = "Generating Seurat Object, please wait",{
print("Saving Seurat Object")
js$addStatusIcon("finishTab","loading")
pbmc <- tsneReactive()$pbmc
filename = paste0(input$projectname,"_seuratObj_",session$token,"_", format(Sys.time(), "%y-%m-%d_%H-%M-%S"), '.Robj')
filepath = file.path(tempdir(), filename)
cat(filepath)
shiny::setProgress(value = 0.3, detail = "might take some time for large datasets ...")
save(pbmc, file = filepath)
values$filepath <- filepath
#logs$Download <- logs$Download + 1
#cat(logs$Download, file="logs\\Download.txt", append=FALSE)
js$addStatusIcon("finishTab","done")
})
}
})
output$seuratFileExists <-
reactive({
return(!is.null(values$filepath))
})
outputOptions(output, 'seuratFileExists', suspendWhenHidden=FALSE)
output$downloadRObj <- downloadHandler(
filename = function() {
paste(input$projectname,"_seuratObj_", format(Sys.time(), "%y-%m-%d_%H-%M-%S"), '.Robj', sep='')
},
content = function(file) {
file.copy(values$filepath, file)
js$addStatusIcon("finishTab","done")
}
)
|
/wizard/server-download.R
|
no_license
|
goodhen2/single_cell_visual_analytics
|
R
| false | false | 1,309 |
r
|
values <- reactiveValues()
observe({
if(input$generateSeuratFile)
{
withProgress(message = "Generating Seurat Object, please wait",{
print("Saving Seurat Object")
js$addStatusIcon("finishTab","loading")
pbmc <- tsneReactive()$pbmc
filename = paste0(input$projectname,"_seuratObj_",session$token,"_", format(Sys.time(), "%y-%m-%d_%H-%M-%S"), '.Robj')
filepath = file.path(tempdir(), filename)
cat(filepath)
shiny::setProgress(value = 0.3, detail = "might take some time for large datasets ...")
save(pbmc, file = filepath)
values$filepath <- filepath
#logs$Download <- logs$Download + 1
#cat(logs$Download, file="logs\\Download.txt", append=FALSE)
js$addStatusIcon("finishTab","done")
})
}
})
output$seuratFileExists <-
reactive({
return(!is.null(values$filepath))
})
outputOptions(output, 'seuratFileExists', suspendWhenHidden=FALSE)
output$downloadRObj <- downloadHandler(
filename = function() {
paste(input$projectname,"_seuratObj_", format(Sys.time(), "%y-%m-%d_%H-%M-%S"), '.Robj', sep='')
},
content = function(file) {
file.copy(values$filepath, file)
js$addStatusIcon("finishTab","done")
}
)
|
A<-read.table("household_power_consumption.txt",header=T, sep=";",na.strings="?")
head(A)
B<-A[A$Date%in% c("1/2/2007","2/2/2007"),]
head(B)
paste()
C<-paste(B$Date,B$Time,sep=" ")
C
D<-strptime(C,"%d/%m/%Y%H:%M:%S")
D
merg<-cbind(D,B)
merg
hist(merg$Global_active_power,col='red',main="Global Active Power",xlab= "Global Active Power (kilowatts)")
png("plot1.png",width= 480,height= 480)
hist(merg$Global_active_power,col='red',main="Global Active Power",xlab= "Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
srichandana7/ExData_Plotting1
|
R
| false | false | 515 |
r
|
A<-read.table("household_power_consumption.txt",header=T, sep=";",na.strings="?")
head(A)
B<-A[A$Date%in% c("1/2/2007","2/2/2007"),]
head(B)
paste()
C<-paste(B$Date,B$Time,sep=" ")
C
D<-strptime(C,"%d/%m/%Y%H:%M:%S")
D
merg<-cbind(D,B)
merg
hist(merg$Global_active_power,col='red',main="Global Active Power",xlab= "Global Active Power (kilowatts)")
png("plot1.png",width= 480,height= 480)
hist(merg$Global_active_power,col='red',main="Global Active Power",xlab= "Global Active Power (kilowatts)")
dev.off()
|
#' tapplysum.R
#'
#' Faster replacement for tapply(..., FUN=sum)
#'
#' Adrian Baddeley and Tilman Davies
#'
#' $Revision: 1.11 $ $Date: 2016/12/12 09:07:06 $
tapplysum <- function(x, flist, do.names=FALSE, na.rm=TRUE) {
stopifnot(is.numeric(x))
stopifnot(is.list(flist))
stopifnot(all(lengths(flist) == length(x)))
stopifnot(all(sapply(flist, is.factor)))
nfac <- length(flist)
goodx <- is.finite(x)
if(na.rm) goodx <- goodx | is.na(x)
if(!(nfac %in% 2:3) || !all(goodx)) {
y <- tapply(x, flist, sum)
y[is.na(y)] <- 0
return(y)
}
ifac <- flist[[1L]]
jfac <- flist[[2L]]
mi <- length(levels(ifac))
mj <- length(levels(jfac))
ii <- as.integer(ifac)
jj <- as.integer(jfac)
if(nfac == 3) {
kfac <- flist[[3L]]
mk <- length(levels(kfac))
kk <- as.integer(kfac)
}
#' remove NA's
if(nfac == 2) {
if(anyNA(x) || anyNA(ii) || anyNA(jj)) {
ok <- !(is.na(x) | is.na(ii) | is.na(jj))
ii <- ii[ok]
jj <- jj[ok]
x <- x[ok]
}
} else {
if(anyNA(x) || anyNA(ii) || anyNA(jj) || anyNA(kk)) {
ok <- !(is.na(x) | is.na(ii) | is.na(jj) | is.na(kk))
ii <- ii[ok]
jj <- jj[ok]
kk <- kk[ok]
x <- x[ok]
}
}
n <- length(ii)
#'
if(nfac == 2) {
result <- matrix(0, mi, mj)
if(n > 0) {
oo <- order(ii, jj)
zz <- .C("ply2sum",
nin = as.integer(n),
xin = as.double(x[oo]),
iin = as.integer(ii[oo]),
jin = as.integer(jj[oo]),
nout = as.integer(integer(1L)),
xout = as.double(numeric(n)),
iout = as.integer(integer(n)),
jout = as.integer(integer(n)))
nout <- zz$nout
if(nout > 0) {
ijout <- cbind(zz$iout, zz$jout)[1:nout,,drop=FALSE]
xout <- zz$xout[1:nout]
result[ijout] <- xout
}
}
} else {
result <- array(0, dim=c(mi, mj, mk))
if(n > 0) {
oo <- order(ii, jj, kk)
zz <- .C("ply3sum",
nin = as.integer(n),
xin = as.double(x[oo]),
iin = as.integer(ii[oo]),
jin = as.integer(jj[oo]),
kin = as.integer(kk[oo]),
nout = as.integer(integer(1L)),
xout = as.double(numeric(n)),
iout = as.integer(integer(n)),
jout = as.integer(integer(n)),
kout = as.integer(integer(n)))
nout <- zz$nout
if(nout > 0) {
ijkout <- cbind(zz$iout, zz$jout, zz$kout)[1:nout,,drop=FALSE]
xout <- zz$xout[1:nout]
result[ijkout] <- xout
}
}
}
if(do.names)
dimnames(result) <- lapply(flist, levels)
return(result)
}
|
/R/tapplysum.R
|
no_license
|
jalilian/spatstat
|
R
| false | false | 2,757 |
r
|
#' tapplysum.R
#'
#' Faster replacement for tapply(..., FUN=sum)
#'
#' Adrian Baddeley and Tilman Davies
#'
#' $Revision: 1.11 $ $Date: 2016/12/12 09:07:06 $
tapplysum <- function(x, flist, do.names=FALSE, na.rm=TRUE) {
stopifnot(is.numeric(x))
stopifnot(is.list(flist))
stopifnot(all(lengths(flist) == length(x)))
stopifnot(all(sapply(flist, is.factor)))
nfac <- length(flist)
goodx <- is.finite(x)
if(na.rm) goodx <- goodx | is.na(x)
if(!(nfac %in% 2:3) || !all(goodx)) {
y <- tapply(x, flist, sum)
y[is.na(y)] <- 0
return(y)
}
ifac <- flist[[1L]]
jfac <- flist[[2L]]
mi <- length(levels(ifac))
mj <- length(levels(jfac))
ii <- as.integer(ifac)
jj <- as.integer(jfac)
if(nfac == 3) {
kfac <- flist[[3L]]
mk <- length(levels(kfac))
kk <- as.integer(kfac)
}
#' remove NA's
if(nfac == 2) {
if(anyNA(x) || anyNA(ii) || anyNA(jj)) {
ok <- !(is.na(x) | is.na(ii) | is.na(jj))
ii <- ii[ok]
jj <- jj[ok]
x <- x[ok]
}
} else {
if(anyNA(x) || anyNA(ii) || anyNA(jj) || anyNA(kk)) {
ok <- !(is.na(x) | is.na(ii) | is.na(jj) | is.na(kk))
ii <- ii[ok]
jj <- jj[ok]
kk <- kk[ok]
x <- x[ok]
}
}
n <- length(ii)
#'
if(nfac == 2) {
result <- matrix(0, mi, mj)
if(n > 0) {
oo <- order(ii, jj)
zz <- .C("ply2sum",
nin = as.integer(n),
xin = as.double(x[oo]),
iin = as.integer(ii[oo]),
jin = as.integer(jj[oo]),
nout = as.integer(integer(1L)),
xout = as.double(numeric(n)),
iout = as.integer(integer(n)),
jout = as.integer(integer(n)))
nout <- zz$nout
if(nout > 0) {
ijout <- cbind(zz$iout, zz$jout)[1:nout,,drop=FALSE]
xout <- zz$xout[1:nout]
result[ijout] <- xout
}
}
} else {
result <- array(0, dim=c(mi, mj, mk))
if(n > 0) {
oo <- order(ii, jj, kk)
zz <- .C("ply3sum",
nin = as.integer(n),
xin = as.double(x[oo]),
iin = as.integer(ii[oo]),
jin = as.integer(jj[oo]),
kin = as.integer(kk[oo]),
nout = as.integer(integer(1L)),
xout = as.double(numeric(n)),
iout = as.integer(integer(n)),
jout = as.integer(integer(n)),
kout = as.integer(integer(n)))
nout <- zz$nout
if(nout > 0) {
ijkout <- cbind(zz$iout, zz$jout, zz$kout)[1:nout,,drop=FALSE]
xout <- zz$xout[1:nout]
result[ijkout] <- xout
}
}
}
if(do.names)
dimnames(result) <- lapply(flist, levels)
return(result)
}
|
## Verify Inverse Matrix Existence Before Performing It
## set and get value of the inverse of Matrix.
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function()x
setinverse<-function(inverse)inv<<-inverse
getinverse<-function()inv
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## in this function, inverse cache is resolved from the cached Matrix
## as generated in function above. First check if inverse if already calculated,
## else computation of the invere with setinverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!is.null(inv)){
message("getting cached data.")
return(inv)
}
data<-x$get()
inv<-solve(data)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
SergioCavaleiroCosta/MyRepo
|
R
| false | false | 828 |
r
|
## Verify Inverse Matrix Existence Before Performing It
## set and get value of the inverse of Matrix.
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function()x
setinverse<-function(inverse)inv<<-inverse
getinverse<-function()inv
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## in this function, inverse cache is resolved from the cached Matrix
## as generated in function above. First check if inverse if already calculated,
## else computation of the invere with setinverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinverse()
if(!is.null(inv)){
message("getting cached data.")
return(inv)
}
data<-x$get()
inv<-solve(data)
x$setinv(inv)
inv
}
|
a <- c(3,2,25,0,25,3,4)
u <- c(11,5,0,25,0,15,13)
c <- c(3,14,0,0,0,3,5)
g <- c(8,4,0,0,0,4,3)
df <- data.frame(a,c,g,u)
df
#define function that divides the frequency by the row sum i.e. proportions
proportion <- function(x){
rs <- sum(x);
return(x / rs);
}
#create position weight matrix
mef2 <- apply(df, 1, proportion)
mef2 <- makePWM(mef2)
seqLogo(mef2)
|
/seqLogo.R
|
no_license
|
viv3kanand/My-works
|
R
| false | false | 366 |
r
|
a <- c(3,2,25,0,25,3,4)
u <- c(11,5,0,25,0,15,13)
c <- c(3,14,0,0,0,3,5)
g <- c(8,4,0,0,0,4,3)
df <- data.frame(a,c,g,u)
df
#define function that divides the frequency by the row sum i.e. proportions
proportion <- function(x){
rs <- sum(x);
return(x / rs);
}
#create position weight matrix
mef2 <- apply(df, 1, proportion)
mef2 <- makePWM(mef2)
seqLogo(mef2)
|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/entities-methods.R
\name{density.SpatialEntities}
\alias{density}
\alias{density.SpatialEntities}
\title{SpatialEntities density method}
\usage{
\method{density}{SpatialEntities}(x, bandwidth, newdata, ncells = 5000, ...)
}
\arguments{
\item{x}{object of class \link{SpatialEntities-class}}
\item{bandwidth}{bandwidth parameter (see \link[MASS]{kde2d})}
\item{newdata}{target grid; if omitted, a grid over the window is created}
\item{ncells}{in case no newdata is provided and window is a polygon, the approximate number of grid cells for the grid created}
\item{...}{ignored}
}
\value{
object of class \link{SpatialField-class}
}
\description{
density estimate for SpatialEntities data
}
|
/man/density.Rd
|
permissive
|
cynsky/mss
|
R
| false | false | 786 |
rd
|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/entities-methods.R
\name{density.SpatialEntities}
\alias{density}
\alias{density.SpatialEntities}
\title{SpatialEntities density method}
\usage{
\method{density}{SpatialEntities}(x, bandwidth, newdata, ncells = 5000, ...)
}
\arguments{
\item{x}{object of class \link{SpatialEntities-class}}
\item{bandwidth}{bandwidth parameter (see \link[MASS]{kde2d})}
\item{newdata}{target grid; if omitted, a grid over the window is created}
\item{ncells}{in case no newdata is provided and window is a polygon, the approximate number of grid cells for the grid created}
\item{...}{ignored}
}
\value{
object of class \link{SpatialField-class}
}
\description{
density estimate for SpatialEntities data
}
|
library(susieR)
set.seed(1)
args = commandArgs(trailingOnly = TRUE)
# [1] is summary statistics,[2] is LD matrix, [3] is the true causal file, [4] is number of causal snps,
# [5] is output subsets file name, [6] is output set file name, [7] is accuracy (sensitivity) file name,
# [8] is set size file name, [9] is the number of credible sets file name.
# z reports line 1 did not have 10 elements
z <- read.table(args[1], header = FALSE)
R <- read.table(args[2], header = FALSE)
causal <- read.table(args[3], header = FALSE)
num_causal <- as.numeric(args[4])
R <- data.matrix(R)
snp_list <- z[["V1"]]
fitted <- susie_rss(z[,2], R,
L = 10,
estimate_residual_variance = TRUE,
estimate_prior_variance = TRUE,
verbose = TRUE, check_R = FALSE)
# number of causal sets (CS)
num_cs <- length(fitted$sets$cs)
if (num_cs == 0) {
# when susie does not converge
write.table(0, args[6], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(0, args[7], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(0, args[8], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
} else {
for (each in 1:num_cs) {
subset <- fitted$sets$cs[[each]] # the n-th causal set among all causal sets
subset <- c(subset)
snp_subset <- c()
for (i in 1:length(subset)) {
snp_subset <-c(snp_subset, as.character(snp_list[subset[i]]))
}
print(snp_subset)
write.table(paste("cs", each, sep=""), args[5], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(snp_subset, args[5], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
set <- do.call(c, fitted$sets$cs)
count <- length(set)
snp_set <- c()
for (i in 1:count) {
snp_set <-c(snp_set, as.character(snp_list[set[i]]))
}
write.table(snp_set, args[6], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# sensitivity
write.table(length(which(causal$V1 %in% snp_set))/length(causal$V1), args[7], append = TRUE,
col.names = FALSE, row.names = FALSE, quote = FALSE)
# set size
write.table(count, args[8], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
write.table(num_cs, args[9], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
|
/simulation_scripts/Automation/susie2.R
|
no_license
|
nlapier2/mscaviar_replication
|
R
| false | false | 2,380 |
r
|
library(susieR)
set.seed(1)
args = commandArgs(trailingOnly = TRUE)
# [1] is summary statistics,[2] is LD matrix, [3] is the true causal file, [4] is number of causal snps,
# [5] is output subsets file name, [6] is output set file name, [7] is accuracy (sensitivity) file name,
# [8] is set size file name, [9] is the number of credible sets file name.
# z reports line 1 did not have 10 elements
z <- read.table(args[1], header = FALSE)
R <- read.table(args[2], header = FALSE)
causal <- read.table(args[3], header = FALSE)
num_causal <- as.numeric(args[4])
R <- data.matrix(R)
snp_list <- z[["V1"]]
fitted <- susie_rss(z[,2], R,
L = 10,
estimate_residual_variance = TRUE,
estimate_prior_variance = TRUE,
verbose = TRUE, check_R = FALSE)
# number of causal sets (CS)
num_cs <- length(fitted$sets$cs)
if (num_cs == 0) {
# when susie does not converge
write.table(0, args[6], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(0, args[7], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(0, args[8], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
} else {
for (each in 1:num_cs) {
subset <- fitted$sets$cs[[each]] # the n-th causal set among all causal sets
subset <- c(subset)
snp_subset <- c()
for (i in 1:length(subset)) {
snp_subset <-c(snp_subset, as.character(snp_list[subset[i]]))
}
print(snp_subset)
write.table(paste("cs", each, sep=""), args[5], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
write.table(snp_subset, args[5], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
set <- do.call(c, fitted$sets$cs)
count <- length(set)
snp_set <- c()
for (i in 1:count) {
snp_set <-c(snp_set, as.character(snp_list[set[i]]))
}
write.table(snp_set, args[6], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
# sensitivity
write.table(length(which(causal$V1 %in% snp_set))/length(causal$V1), args[7], append = TRUE,
col.names = FALSE, row.names = FALSE, quote = FALSE)
# set size
write.table(count, args[8], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
}
write.table(num_cs, args[9], append = TRUE, col.names = FALSE, row.names = FALSE, quote = FALSE)
|
library(ape)
testtree <- read.tree("6057_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6057_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/6057_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 135 |
r
|
library(ape)
testtree <- read.tree("6057_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6057_0_unrooted.txt")
|
library(dplyr)
library(tibble)
library(ggplot2)
library(RColorBrewer)
library(factoextra)
library(cluster)
library(NbClust)
library(mclust)
library(rgl)
# Set parameters
run = 1
g = 800
for (s in 17:24){
# Read in data and set up for k-means analysis
df = read.csv(paste(getwd(),'/para_set_',s,'/model_run_',run,'/paraset_',s,'_offspring_map_',g,'.csv',sep=""))
neutral.df = df %>% select(.,FLday,X_pos,Y_pos,mapA,mapB,mapC,loc1a:loc5b,neut1a:neut24b)
neutral.df[] = lapply(neutral.df, as.character)
neutral.df[neutral.df == 'D'] = 1;
neutral.df[neutral.df[,] == 'd'] = 0
neutral.df[] = lapply(neutral.df, as.numeric)
neutral.df = neutral.df %>%
mutate(.,F1 = loc1a+loc1b) %>%
mutate(.,F2 = loc2a+loc2b) %>%
mutate(.,F3 = loc3a+loc3b) %>%
mutate(.,F4 = loc4a+loc4b) %>%
mutate(.,F5 = loc5a+loc5b) %>%
mutate(.,map1 = neut1a+neut1b) %>%
mutate(.,map2 = neut2a+neut2b) %>%
mutate(.,map3 = neut3a+neut3b) %>%
mutate(.,map4 = neut4a+neut4b) %>%
mutate(.,map5 = neut5a+neut5b) %>%
mutate(.,map6 = neut6a+neut6b) %>%
mutate(.,map7 = neut7a+neut7b) %>%
mutate(.,map8 = neut8a+neut8b) %>%
mutate(.,map9 = neut9a+neut9b) %>%
mutate(.,map10 = neut10a+neut10b) %>%
mutate(.,map11 = neut11a+neut11b) %>%
mutate(.,map12 = neut12a+neut12b) %>%
mutate(.,map13 = neut13a+neut13b) %>%
mutate(.,map14 = neut14a+neut14b) %>%
mutate(.,map15 = neut15a+neut15b) %>%
mutate(.,map16 = neut16a+neut16b) %>%
mutate(.,map17 = neut17a+neut17b) %>%
mutate(.,map18 = neut18a+neut18b) %>%
mutate(.,map19 = neut19a+neut19b) %>%
mutate(.,map20 = neut20a+neut20b) %>%
mutate(.,map21 = neut21a+neut21b) %>%
mutate(.,map22 = neut22a+neut22b) %>%
mutate(.,map23 = neut23a+neut23b) %>%
mutate(.,map24 = neut24a+neut24b)
ind.neutral.df = neutral.df %>% select(.,FLday,X_pos,Y_pos,mapA:mapC,map1:map24)
neutral.df = neutral.df %>% select(.,mapA:mapC,map1:map24)
df.scaled = scale(neutral.df)
scaled.matrix = as.matrix(df.scaled)
k.means = 2#d_clust$G
km.res = kmeans(df.scaled, k.means, iter.max = 20,nstart = 25)
hist.df = df %>% bind_cols(.,as.data.frame(km.res$cluster))
names(hist.df)[ncol(hist.df)] = 'Cluster'
hist.df$paraset = s
if (s == 17){hist.joint = hist.df} else {hist.joint = bind_rows(hist.joint,hist.df)}
}
hist.joint$grouping = 'Selfing'
hist.joint$grouping[(1+nrow(hist.joint)/2):nrow(hist.joint)] = 'No selfing'
hist.joint$paraset[(hist.joint$paraset == 17)|(hist.joint$paraset == 21)] = 'Random'
hist.joint$paraset[(hist.joint$paraset == 18)|(hist.joint$paraset == 22)] = 'IBT'
hist.joint$paraset[(hist.joint$paraset == 19)|(hist.joint$paraset == 23)] = 'IBD'
hist.joint$paraset[(hist.joint$paraset == 20)|(hist.joint$paraset == 24)] = 'IBDxIBT'
names(hist.joint)[ncol(hist.joint)-1] = 'Isolation'
ggplot(data=hist.joint,aes(FLday)) + geom_histogram(aes(fill=factor(Cluster)),position='dodge')+guides(fill=guide_legend(title="Neutral cluster"))+theme_classic()+ylab('Count')+facet_grid(Isolation~grouping)
|
/NeutralCluster_Hist.R
|
no_license
|
madelineapeters/IBDxIBT_updated
|
R
| false | false | 3,090 |
r
|
library(dplyr)
library(tibble)
library(ggplot2)
library(RColorBrewer)
library(factoextra)
library(cluster)
library(NbClust)
library(mclust)
library(rgl)
# Set parameters
run = 1
g = 800
for (s in 17:24){
# Read in data and set up for k-means analysis
df = read.csv(paste(getwd(),'/para_set_',s,'/model_run_',run,'/paraset_',s,'_offspring_map_',g,'.csv',sep=""))
neutral.df = df %>% select(.,FLday,X_pos,Y_pos,mapA,mapB,mapC,loc1a:loc5b,neut1a:neut24b)
neutral.df[] = lapply(neutral.df, as.character)
neutral.df[neutral.df == 'D'] = 1;
neutral.df[neutral.df[,] == 'd'] = 0
neutral.df[] = lapply(neutral.df, as.numeric)
neutral.df = neutral.df %>%
mutate(.,F1 = loc1a+loc1b) %>%
mutate(.,F2 = loc2a+loc2b) %>%
mutate(.,F3 = loc3a+loc3b) %>%
mutate(.,F4 = loc4a+loc4b) %>%
mutate(.,F5 = loc5a+loc5b) %>%
mutate(.,map1 = neut1a+neut1b) %>%
mutate(.,map2 = neut2a+neut2b) %>%
mutate(.,map3 = neut3a+neut3b) %>%
mutate(.,map4 = neut4a+neut4b) %>%
mutate(.,map5 = neut5a+neut5b) %>%
mutate(.,map6 = neut6a+neut6b) %>%
mutate(.,map7 = neut7a+neut7b) %>%
mutate(.,map8 = neut8a+neut8b) %>%
mutate(.,map9 = neut9a+neut9b) %>%
mutate(.,map10 = neut10a+neut10b) %>%
mutate(.,map11 = neut11a+neut11b) %>%
mutate(.,map12 = neut12a+neut12b) %>%
mutate(.,map13 = neut13a+neut13b) %>%
mutate(.,map14 = neut14a+neut14b) %>%
mutate(.,map15 = neut15a+neut15b) %>%
mutate(.,map16 = neut16a+neut16b) %>%
mutate(.,map17 = neut17a+neut17b) %>%
mutate(.,map18 = neut18a+neut18b) %>%
mutate(.,map19 = neut19a+neut19b) %>%
mutate(.,map20 = neut20a+neut20b) %>%
mutate(.,map21 = neut21a+neut21b) %>%
mutate(.,map22 = neut22a+neut22b) %>%
mutate(.,map23 = neut23a+neut23b) %>%
mutate(.,map24 = neut24a+neut24b)
ind.neutral.df = neutral.df %>% select(.,FLday,X_pos,Y_pos,mapA:mapC,map1:map24)
neutral.df = neutral.df %>% select(.,mapA:mapC,map1:map24)
df.scaled = scale(neutral.df)
scaled.matrix = as.matrix(df.scaled)
k.means = 2#d_clust$G
km.res = kmeans(df.scaled, k.means, iter.max = 20,nstart = 25)
hist.df = df %>% bind_cols(.,as.data.frame(km.res$cluster))
names(hist.df)[ncol(hist.df)] = 'Cluster'
hist.df$paraset = s
if (s == 17){hist.joint = hist.df} else {hist.joint = bind_rows(hist.joint,hist.df)}
}
hist.joint$grouping = 'Selfing'
hist.joint$grouping[(1+nrow(hist.joint)/2):nrow(hist.joint)] = 'No selfing'
hist.joint$paraset[(hist.joint$paraset == 17)|(hist.joint$paraset == 21)] = 'Random'
hist.joint$paraset[(hist.joint$paraset == 18)|(hist.joint$paraset == 22)] = 'IBT'
hist.joint$paraset[(hist.joint$paraset == 19)|(hist.joint$paraset == 23)] = 'IBD'
hist.joint$paraset[(hist.joint$paraset == 20)|(hist.joint$paraset == 24)] = 'IBDxIBT'
names(hist.joint)[ncol(hist.joint)-1] = 'Isolation'
ggplot(data=hist.joint,aes(FLday)) + geom_histogram(aes(fill=factor(Cluster)),position='dodge')+guides(fill=guide_legend(title="Neutral cluster"))+theme_classic()+ylab('Count')+facet_grid(Isolation~grouping)
|
#--------------------#
# Carregando Pacotes #
#--------------------#
library(circular)
#----------------#
# Banco de Dados #
#----------------#
data <-
read.csv("Assessoria 03 - Mauricio/dados mortalidade.csv",
h = T,
sep = ",")
head(data)
data$group = as.factor(data$group) # transformando em fator
taxa <- levels(data[, 1])
# Grupos
Mysticeti <- data[data$group == "Mysticeti", ]
Odontoceti <- data[data$group == "Odontoceti", ]
Pinipedia <- data[data$group == "Pinipedia", ]
Procellariiformes <- data[data$group == "Procellariiformes", ]
SeaTurtles <- data[data$group == "Sea Turtles", ]
Sphenisciformes <- data[data$group == "Sphenisciformes", ]
#---------------#
# #
# Análises ! #
# #
#---------------#
#-------#
# Geral #
#-------#
## Explorando
geral <- rep(data$angle, data$abundance)
geral_rad <- rad(geral)
geral_circ <- as.circular(geral_rad)
plot.circular(geral_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(geral_circ)
#Comprimento do vetor médio (r)
rho.circular(geral_circ)
#Variância no pacote circular
var.circular(geral_circ)
#variância circular
rho.circular(geral_circ)
#variância angular
2 * (1 - rho.circular(geral_circ))
#desvio padrão angular (0 a infinito)
sd.circular(geral_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(geral_circ)
## Testes de Hipóteses
rao.spacing.test(geral_circ)
rayleigh.test(geral_circ)
watson.test(geral_circ, dist = "uniform")
## Gráficos
# frequência
rose.diag(
geral_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "white",
col = "lightsalmon",
bin = 12*6,
ticks = T,
prop = 3#,
# main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(geral_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
#-----------#
# Mysticeti #
#-----------#
## Explorando
mysti <- rep(Mysticeti$angle, Mysticeti$abundance)
mysti_rad <- rad(mysti)
mysti_circ <- as.circular(mysti_rad)
plot.circular(mysti_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(mysti_circ)
#Comprimento do vetor médio (r)
rho.circular(mysti_circ)
#Mediana
median.circular(mysti_circ)
#Moda (e frequência dos dados)
table(mysti_circ)
#Variância no pacote circular
var.circular(mysti_circ)
#variância circular
rho.circular(mysti_circ)
#variância angular
2 * (1 - rho.circular(mysti_circ))
#Desvio angular (ou desvio padrão angular, que vai de 0 a 81,03?)
sqrt(2 * (1 - rho.circular(mysti_circ)))
#desvio padrão angular (0 a infinito)
sd.circular(mysti_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(mysti_circ)
summary(mysti_circ)
## Gráficos
# frequência
rose.diag(
mysti_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(mysti_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(mysti_circ)
rayleigh.test(mysti_circ)
watson.test(mysti_circ, dist = "uniform")
#------------#
# Odontoceti #
#------------#
## Explorando
odonto <- rep(Odontoceti$angle, Odontoceti$abundance)
odonto_rad <- rad(odonto)
odonto_circ <- as.circular(odonto_rad)
plot.circular(odonto_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(odonto_circ)
#Comprimento do vetor médio (r)
rho.circular(odonto_circ)
#Variância no pacote circular
var.circular(odonto_circ)
#variância circular
rho.circular(odonto_circ)
#variância angular
2 * (1 - rho.circular(odonto_circ))
#desvio padrão angular (0 a infinito)
sd.circular(odonto_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(odonto_circ)
## Gráficos
# frequência
rose.diag(
odonto_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(odonto_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(odonto_circ)
rayleigh.test(odonto_circ)
watson.test(odonto_circ, dist = "uniform")
#-----------#
# Pinipedia #
#-----------#
## Explorando
pini <- rep(Pinipedia$angle, Pinipedia$abundance)
pini_rad <- rad(pini)
pini_circ <- as.circular(pini_rad)
plot.circular(pini_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(pini_circ)
#Comprimento do vetor médio (r)
rho.circular(pini_circ)
#Variância no pacote circular
var.circular(pini_circ)
#variância circular
rho.circular(pini_circ)
#variância angular
2 * (1 - rho.circular(pini_circ))
#desvio padrão angular (0 a infinito)
sd.circular(pini_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(pini_circ)
## Gráficos
# frequência
rose.diag(
pini_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(pini_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(pini_circ)
rayleigh.test(pini_circ)
watson.test(pini_circ, dist = "uniform")
#-------------------#
# Procellariiformes #
#-------------------#
## Explorando
proce <- rep(Procellariiformes$angle, Procellariiformes$abundance)
proce_rad <- rad(proce)
proce_circ <- as.circular(proce_rad)
plot.circular(proce_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(proce_circ)
#Comprimento do vetor médio (r)
rho.circular(proce_circ)
#Variância no pacote circular
var.circular(proce_circ)
#variância circular
rho.circular(proce_circ)
#variância angular
2 * (1 - rho.circular(proce_circ))
#desvio padrão angular (0 a infinito)
sd.circular(proce_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(proce_circ)
## Gráficos
# frequência
rose.diag(
proce_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(proce_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(proce_circ)
rayleigh.test(proce_circ)
watson.test(proce_circ, dist = "uniform")
#------------#
# SeaTurtles #
#------------#
## Explorando
turtles <- rep(SeaTurtles$angle, SeaTurtles$abundance)
turtles_rad <- rad(turtles)
turtles_circ <- as.circular(turtles_rad)
plot.circular(turtles_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(turtles_circ)
#Comprimento do vetor médio (r)
rho.circular(turtles_circ)
#Variância no pacote circular
var.circular(turtles_circ)
#variância circular
rho.circular(turtles_circ)
#variância angular
2 * (1 - rho.circular(turtles_circ))
#desvio padrão angular (0 a infinito)
sd.circular(turtles_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(turtles_circ)
## Gráficos
# frequência
rose.diag(
turtles_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(turtles_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(turtles_circ)
rayleigh.test(turtles_circ)
watson.test(turtles_circ, dist = "uniform")
#-----------------#
# Sphenisciformes #
#-----------------#
## Explorando
spheni <- rep(Sphenisciformes$angle, Sphenisciformes$abundance)
spheni_rad <- rad(spheni)
spheni_circ <- as.circular(spheni_rad)
plot.circular(spheni_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(spheni_circ)
#Comprimento do vetor médio (r)
rho.circular(spheni_circ)
#Variância no pacote circular
var.circular(spheni_circ)
#variância circular
rho.circular(spheni_circ)
#variância angular
2 * (1 - rho.circular(spheni_circ))
#desvio padrão angular (0 a infinito)
sd.circular(spheni_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(spheni_circ)
## Gráficos
# frequência
rose.diag(
spheni_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "white",
ticks = T,
prop = 3,
bins = 12*6,
col = "lightsalmon",
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)),
c(month.abb[c(5:12, 1:4)]))
lines(
density(spheni_circ, bw = 20),
col = "lightsalmon",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(spheni_circ)
rayleigh.test(spheni_circ)
watson.test(spheni_circ, dist = "uniform")
#--------
par(mfrow = c(1, 2))
rose.diag(
turtles_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(turtles_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 4
)
rose.diag(
spheni_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(spheni_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 4
)
|
/Assessoria 03 - Mauricio/estatisticas circulares (NAE).R
|
permissive
|
victorfrankg/lab_est_1_mat02031
|
R
| false | false | 10,515 |
r
|
#--------------------#
# Carregando Pacotes #
#--------------------#
library(circular)
#----------------#
# Banco de Dados #
#----------------#
data <-
read.csv("Assessoria 03 - Mauricio/dados mortalidade.csv",
h = T,
sep = ",")
head(data)
data$group = as.factor(data$group) # transformando em fator
taxa <- levels(data[, 1])
# Grupos
Mysticeti <- data[data$group == "Mysticeti", ]
Odontoceti <- data[data$group == "Odontoceti", ]
Pinipedia <- data[data$group == "Pinipedia", ]
Procellariiformes <- data[data$group == "Procellariiformes", ]
SeaTurtles <- data[data$group == "Sea Turtles", ]
Sphenisciformes <- data[data$group == "Sphenisciformes", ]
#---------------#
# #
# Análises ! #
# #
#---------------#
#-------#
# Geral #
#-------#
## Explorando
geral <- rep(data$angle, data$abundance)
geral_rad <- rad(geral)
geral_circ <- as.circular(geral_rad)
plot.circular(geral_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(geral_circ)
#Comprimento do vetor médio (r)
rho.circular(geral_circ)
#Variância no pacote circular
var.circular(geral_circ)
#variância circular
rho.circular(geral_circ)
#variância angular
2 * (1 - rho.circular(geral_circ))
#desvio padrão angular (0 a infinito)
sd.circular(geral_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(geral_circ)
## Testes de Hipóteses
rao.spacing.test(geral_circ)
rayleigh.test(geral_circ)
watson.test(geral_circ, dist = "uniform")
## Gráficos
# frequência
rose.diag(
geral_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "white",
col = "lightsalmon",
bin = 12*6,
ticks = T,
prop = 3#,
# main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(geral_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
#-----------#
# Mysticeti #
#-----------#
## Explorando
mysti <- rep(Mysticeti$angle, Mysticeti$abundance)
mysti_rad <- rad(mysti)
mysti_circ <- as.circular(mysti_rad)
plot.circular(mysti_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(mysti_circ)
#Comprimento do vetor médio (r)
rho.circular(mysti_circ)
#Mediana
median.circular(mysti_circ)
#Moda (e frequência dos dados)
table(mysti_circ)
#Variância no pacote circular
var.circular(mysti_circ)
#variância circular
rho.circular(mysti_circ)
#variância angular
2 * (1 - rho.circular(mysti_circ))
#Desvio angular (ou desvio padrão angular, que vai de 0 a 81,03?)
sqrt(2 * (1 - rho.circular(mysti_circ)))
#desvio padrão angular (0 a infinito)
sd.circular(mysti_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(mysti_circ)
summary(mysti_circ)
## Gráficos
# frequência
rose.diag(
mysti_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(mysti_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(mysti_circ)
rayleigh.test(mysti_circ)
watson.test(mysti_circ, dist = "uniform")
#------------#
# Odontoceti #
#------------#
## Explorando
odonto <- rep(Odontoceti$angle, Odontoceti$abundance)
odonto_rad <- rad(odonto)
odonto_circ <- as.circular(odonto_rad)
plot.circular(odonto_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(odonto_circ)
#Comprimento do vetor médio (r)
rho.circular(odonto_circ)
#Variância no pacote circular
var.circular(odonto_circ)
#variância circular
rho.circular(odonto_circ)
#variância angular
2 * (1 - rho.circular(odonto_circ))
#desvio padrão angular (0 a infinito)
sd.circular(odonto_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(odonto_circ)
## Gráficos
# frequência
rose.diag(
odonto_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(odonto_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(odonto_circ)
rayleigh.test(odonto_circ)
watson.test(odonto_circ, dist = "uniform")
#-----------#
# Pinipedia #
#-----------#
## Explorando
pini <- rep(Pinipedia$angle, Pinipedia$abundance)
pini_rad <- rad(pini)
pini_circ <- as.circular(pini_rad)
plot.circular(pini_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(pini_circ)
#Comprimento do vetor médio (r)
rho.circular(pini_circ)
#Variância no pacote circular
var.circular(pini_circ)
#variância circular
rho.circular(pini_circ)
#variância angular
2 * (1 - rho.circular(pini_circ))
#desvio padrão angular (0 a infinito)
sd.circular(pini_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(pini_circ)
## Gráficos
# frequência
rose.diag(
pini_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(pini_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(pini_circ)
rayleigh.test(pini_circ)
watson.test(pini_circ, dist = "uniform")
#-------------------#
# Procellariiformes #
#-------------------#
## Explorando
proce <- rep(Procellariiformes$angle, Procellariiformes$abundance)
proce_rad <- rad(proce)
proce_circ <- as.circular(proce_rad)
plot.circular(proce_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(proce_circ)
#Comprimento do vetor médio (r)
rho.circular(proce_circ)
#Variância no pacote circular
var.circular(proce_circ)
#variância circular
rho.circular(proce_circ)
#variância angular
2 * (1 - rho.circular(proce_circ))
#desvio padrão angular (0 a infinito)
sd.circular(proce_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(proce_circ)
## Gráficos
# frequência
rose.diag(
proce_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(proce_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(proce_circ)
rayleigh.test(proce_circ)
watson.test(proce_circ, dist = "uniform")
#------------#
# SeaTurtles #
#------------#
## Explorando
turtles <- rep(SeaTurtles$angle, SeaTurtles$abundance)
turtles_rad <- rad(turtles)
turtles_circ <- as.circular(turtles_rad)
plot.circular(turtles_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(turtles_circ)
#Comprimento do vetor médio (r)
rho.circular(turtles_circ)
#Variância no pacote circular
var.circular(turtles_circ)
#variância circular
rho.circular(turtles_circ)
#variância angular
2 * (1 - rho.circular(turtles_circ))
#desvio padrão angular (0 a infinito)
sd.circular(turtles_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(turtles_circ)
## Gráficos
# frequência
rose.diag(
turtles_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(turtles_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(turtles_circ)
rayleigh.test(turtles_circ)
watson.test(turtles_circ, dist = "uniform")
#-----------------#
# Sphenisciformes #
#-----------------#
## Explorando
spheni <- rep(Sphenisciformes$angle, Sphenisciformes$abundance)
spheni_rad <- rad(spheni)
spheni_circ <- as.circular(spheni_rad)
plot.circular(spheni_circ, rotation = "clock", units = "rads")
#Média (a)
mean.circular(spheni_circ)
#Comprimento do vetor médio (r)
rho.circular(spheni_circ)
#Variância no pacote circular
var.circular(spheni_circ)
#variância circular
rho.circular(spheni_circ)
#variância angular
2 * (1 - rho.circular(spheni_circ))
#desvio padrão angular (0 a infinito)
sd.circular(spheni_circ)
#intervalo de confiança (bootstrap)
mle.vonmises.bootstrap.ci(spheni_circ)
## Gráficos
# frequência
rose.diag(
spheni_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "white",
ticks = T,
prop = 3,
bins = 12*6,
col = "lightsalmon",
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)),
c(month.abb[c(5:12, 1:4)]))
lines(
density(spheni_circ, bw = 20),
col = "lightsalmon",
rotation = "clock",
zero = pi / 2,
shrink = 1.75
)
## Testes de Hipóteses
rao.spacing.test(spheni_circ)
rayleigh.test(spheni_circ)
watson.test(spheni_circ, dist = "uniform")
#--------
par(mfrow = c(1, 2))
rose.diag(
turtles_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(turtles_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 4
)
rose.diag(
spheni_circ,
rotation = "clock",
zero = pi / 2,
units = "rads",
axes = F,
border = "black",
ticks = T,
prop = 3,
main = "bla bla"
)
axis.circular(at = circular(sort(seq(0, 11 / 6 * pi, pi / 6), decreasing = T)), c(labels = c(
"M", "J",
"J", "A", "S", "O", "N", "D", "J", "F", "M", "A"
)))
lines(
density(spheni_circ, bw = 20),
col = "red",
rotation = "clock",
zero = pi / 2,
shrink = 4
)
|
## Data Visualization Principles
library(dplyr)
library(ggplot2)
library(dslabs)
# Contagious diseases and murder rates
dat <- us_contagious_diseases %>%
filter(year == 1967 & disease=="Measles" & !is.na(population)) %>% mutate(rate = count / population * 10000 * 52 / weeks_reporting)
state <- dat$state
rate <- dat$count/(dat$population/10000)*(52/dat$weeks_reporting)
# reordering by rate of infection
state <- reorder(state, rate)
levels(state)
# adding rate to the data set and reordering
data(us_contagious_diseases)
dat <- us_contagious_diseases %>% filter(year == 1967 & disease=="Measles" & count>0 & !is.na(population)) %>%
mutate(rate = count / population * 10000 * 52 / weeks_reporting) %>% mutate(state = reorder(state, rate))
dat %>% ggplot(aes(state, rate)) +
geom_bar(stat="identity") +
coord_flip()
# bar graphs can be misleading as they hide data
murders %>% mutate(rate = total/population*100000) %>%
group_by(region) %>%
summarize(avg = mean(rate)) %>%
mutate(region = factor(region)) %>%
ggplot(aes(region, avg)) +
geom_bar(stat="identity") +
ylab("Murder Rate Average")
# boxplots are more informative
murders %>% mutate(rate = total/population*100000) %>%
mutate(region = reorder(region, rate, FUN = median)) %>%
ggplot(aes(region, rate)) +
geom_boxplot() +
geom_point()
## Vaccines
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(dslabs)
data(us_contagious_diseases)
# filtering by disease to plot trends over time
the_disease = "Smallpox"
dat <- us_contagious_diseases %>%
filter(!state%in%c("Hawaii","Alaska") & disease == the_disease & weeks_reporting >= 10) %>%
mutate(rate = count / population * 10000) %>%
mutate(state = reorder(state, rate))
# ggtile to show intensity plots
dat %>% ggplot(aes(year, state, fill = rate)) +
geom_tile(color = "grey50") +
scale_x_continuous(expand=c(0,0)) +
scale_fill_gradientn(colors = brewer.pal(9, "Reds"), trans = "sqrt") +
theme_minimal() +
theme(panel.grid = element_blank()) +
ggtitle(the_disease) +
ylab("") +
xlab("")
# plotting time series data
avg <- us_contagious_diseases %>%
filter(disease==the_disease) %>% group_by(year) %>%
summarize(us_rate = sum(count, na.rm=TRUE)/sum(population, na.rm=TRUE)*10000)
dat %>% ggplot() +
geom_line(aes(year, rate, group = state), color = "grey50",
show.legend = FALSE, alpha = 0.2, size = 1) +
geom_line(mapping = aes(year, us_rate), data = avg, size = 1, color = "black") +
scale_y_continuous(trans = "sqrt", breaks = c(5,25,125,300)) +
ggtitle("Cases per 10,000 by state") +
xlab("") +
ylab("") +
geom_text(data = data.frame(x=1940, y=30), mapping = aes(x, y, label="US average"), color="black")
# all diseases in California
us_contagious_diseases %>% filter(state=="California" & weeks_reporting >= 10) %>%
group_by(year, disease) %>%
summarize(rate = sum(count)/sum(population)*10000) %>%
ggplot(aes(year, rate, color = disease)) +
geom_line()
# diseases in the US
us_contagious_diseases %>% filter(!is.na(population)) %>%
group_by(year, disease) %>%
summarize(rate = sum(count)/sum(population)*10000) %>%
ggplot(aes(year, rate, color = disease)) +
geom_line()
|
/Visualization/VisualizationPrinciples.R
|
no_license
|
AndrewS622/Data-Science
|
R
| false | false | 3,321 |
r
|
## Data Visualization Principles
library(dplyr)
library(ggplot2)
library(dslabs)
# Contagious diseases and murder rates
dat <- us_contagious_diseases %>%
filter(year == 1967 & disease=="Measles" & !is.na(population)) %>% mutate(rate = count / population * 10000 * 52 / weeks_reporting)
state <- dat$state
rate <- dat$count/(dat$population/10000)*(52/dat$weeks_reporting)
# reordering by rate of infection
state <- reorder(state, rate)
levels(state)
# adding rate to the data set and reordering
data(us_contagious_diseases)
dat <- us_contagious_diseases %>% filter(year == 1967 & disease=="Measles" & count>0 & !is.na(population)) %>%
mutate(rate = count / population * 10000 * 52 / weeks_reporting) %>% mutate(state = reorder(state, rate))
dat %>% ggplot(aes(state, rate)) +
geom_bar(stat="identity") +
coord_flip()
# bar graphs can be misleading as they hide data
murders %>% mutate(rate = total/population*100000) %>%
group_by(region) %>%
summarize(avg = mean(rate)) %>%
mutate(region = factor(region)) %>%
ggplot(aes(region, avg)) +
geom_bar(stat="identity") +
ylab("Murder Rate Average")
# boxplots are more informative
murders %>% mutate(rate = total/population*100000) %>%
mutate(region = reorder(region, rate, FUN = median)) %>%
ggplot(aes(region, rate)) +
geom_boxplot() +
geom_point()
## Vaccines
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(dslabs)
data(us_contagious_diseases)
# filtering by disease to plot trends over time
the_disease = "Smallpox"
dat <- us_contagious_diseases %>%
filter(!state%in%c("Hawaii","Alaska") & disease == the_disease & weeks_reporting >= 10) %>%
mutate(rate = count / population * 10000) %>%
mutate(state = reorder(state, rate))
# ggtile to show intensity plots
dat %>% ggplot(aes(year, state, fill = rate)) +
geom_tile(color = "grey50") +
scale_x_continuous(expand=c(0,0)) +
scale_fill_gradientn(colors = brewer.pal(9, "Reds"), trans = "sqrt") +
theme_minimal() +
theme(panel.grid = element_blank()) +
ggtitle(the_disease) +
ylab("") +
xlab("")
# plotting time series data
avg <- us_contagious_diseases %>%
filter(disease==the_disease) %>% group_by(year) %>%
summarize(us_rate = sum(count, na.rm=TRUE)/sum(population, na.rm=TRUE)*10000)
dat %>% ggplot() +
geom_line(aes(year, rate, group = state), color = "grey50",
show.legend = FALSE, alpha = 0.2, size = 1) +
geom_line(mapping = aes(year, us_rate), data = avg, size = 1, color = "black") +
scale_y_continuous(trans = "sqrt", breaks = c(5,25,125,300)) +
ggtitle("Cases per 10,000 by state") +
xlab("") +
ylab("") +
geom_text(data = data.frame(x=1940, y=30), mapping = aes(x, y, label="US average"), color="black")
# all diseases in California
us_contagious_diseases %>% filter(state=="California" & weeks_reporting >= 10) %>%
group_by(year, disease) %>%
summarize(rate = sum(count)/sum(population)*10000) %>%
ggplot(aes(year, rate, color = disease)) +
geom_line()
# diseases in the US
us_contagious_diseases %>% filter(!is.na(population)) %>%
group_by(year, disease) %>%
summarize(rate = sum(count)/sum(population)*10000) %>%
ggplot(aes(year, rate, color = disease)) +
geom_line()
|
test_that("API", {
expect_identical(
color_quos_to_display(
flights = "blue",
airlines = ,
airports = "orange",
planes = "green_nb"
) %>%
nest(data = -new_display) %>%
deframe() %>%
map(pull),
list(accent1 = "flights", accent2 = c("airlines", "airports"), accent4nb = "planes")
)
})
test_that("last", {
expect_cdm_error(
color_quos_to_display(
flights = "blue",
airlines =
),
class = "last_col_missing"
)
})
test_that("bad color", {
expect_cdm_error(
color_quos_to_display(
flights = "mauve"
),
class = "wrong_color"
)
})
test_that("getter", {
expect_equal(
cdm_get_colors(cdm_nycflights13()),
tibble::tribble(
~table, ~color,
"airlines", "orange",
"airports", "orange",
"flights", "blue",
"planes", "orange",
"weather", "green"
)
)
})
|
/tests/testthat/test-draw-dm.R
|
permissive
|
jasonyum/dm
|
R
| false | false | 914 |
r
|
test_that("API", {
expect_identical(
color_quos_to_display(
flights = "blue",
airlines = ,
airports = "orange",
planes = "green_nb"
) %>%
nest(data = -new_display) %>%
deframe() %>%
map(pull),
list(accent1 = "flights", accent2 = c("airlines", "airports"), accent4nb = "planes")
)
})
test_that("last", {
expect_cdm_error(
color_quos_to_display(
flights = "blue",
airlines =
),
class = "last_col_missing"
)
})
test_that("bad color", {
expect_cdm_error(
color_quos_to_display(
flights = "mauve"
),
class = "wrong_color"
)
})
test_that("getter", {
expect_equal(
cdm_get_colors(cdm_nycflights13()),
tibble::tribble(
~table, ~color,
"airlines", "orange",
"airports", "orange",
"flights", "blue",
"planes", "orange",
"weather", "green"
)
)
})
|
/rabbit/reheatmapLgFC.R
|
no_license
|
x-nm/lab_code
|
R
| false | false | 1,411 |
r
| ||
#apply PCA Clustering to Biospecimen, Imaging
# Developping Notes for May 7th -------------------------------------------
#Added Individual PCA analysis and clustering for both the DatSpect data and the CSF data
# Developping Notes For May 4th 2014 --------------------------------------
#Think in terms of a basis function for parkinson's disease
#This code shows that parkinson's patients can be mostly described by the asymmetry of their striatal region and the values of their abeta 42
#and alpha synuclein. In particular the basis function looks like this:
#(asymmetry, Abeta 42/Alphasynuclein), (asymmetry,-Abeta42/alphasynuclein)
#in other words, based on PC2 and PC2, there are 4 kinds of patient biospecimen profiles
#From PC2
##Those with high asymmetry, low Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#Those with low asymmetry, high Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#From PC3
#Those with low asymmetry, low Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#Those with high asymmetry, high Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#another interpretation is that any vector that isn't in the first few PCS can be ignored, as they represent a small populaiton of the data
#for biospecimen data, we choose 5 because after 6 each PC holds less than the average amount of information (the ei
#genvalue is less than 1)
rm(list = ls())
library(ggplot2)
library(gplots)
library(rgl)
library(lattice)
library(sjPlot)
library(fpc)
library(mclust)
library(cluster)
setwd("~/Dropbox//ORIE 4740 - Final Project/PCA_Clustering/Biospecimen_Imaging_Clustering/")
ppmi.raw.data.csv = read.csv("NMIB_AverageValues.csv") #use average values
ppmi.raw.data = ppmi.raw.data.csv
#adding ratio data
t.tau.Abeta.42.ratio = ppmi.raw.data$Total.tau/ppmi.raw.data$Abeta.42
p.tau.Abeta.42.ratio = ppmi.raw.data$p.Tau181P/ppmi.raw.data$Abeta.42
p.tau.t.tau.ratio = ppmi.raw.data$p.Tau181P/ppmi.raw.data$Total.tau
ppmi.raw.data = data.frame(ppmi.raw.data,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio)
#selecting PD patients
ppmi.raw.data = ppmi.raw.data[ppmi.raw.data$RECRUITMENT_CAT == 'PD',]
ppmi.biospecimen.imaging.data = subset.data.frame(ppmi.raw.data, select = c(CAUDATE_R,
CAUDATE_L,
PUTAMEN_R,
PUTAMEN_L,
CAUDATE_ASYMMETRY,
PUTAMEN_ASYMMETRY,
Abeta.42,
p.Tau181P,
Total.tau,
CSF.Alpha.synuclein,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio))
ppmi.CSF.data = subset.data.frame(ppmi.raw.data, select = c(Abeta.42,
p.Tau181P,
Total.tau,
CSF.Alpha.synuclein,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio))
ppmi.imaging.data = subset.data.frame(ppmi.raw.data, select = c(CAUDATE_R,
CAUDATE_L,
PUTAMEN_R,
PUTAMEN_L,
CAUDATE_ASYMMETRY,
PUTAMEN_ASYMMETRY))
#perform PCA on the whole biological dataset
ppmi.biospec.imaging.PCA = prcomp(ppmi.biospecimen.imaging.data,scale=TRUE)
plot(ppmi.biospec.imaging.PCA,
type = "line",
main = "Variances of each PCA loading")
sjp.pca(ppmi.biospec.imaging.PCA,
plotEigenvalues = TRUE,
type = "tile")
foo = sjp.pca(ppmi.biospecimen.imaging.data,
plotEigenvalues = TRUE,
hideLegend = FALSE,
type = "tile")
# Model Based Clustering based on the first 5 Principal Components --------------------------------------------------
mydata = ppmi.biospec.imaging.PCA$x[,c(1:5)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# K-Means Clustering on the first 5 Principal Components ------------------------------------------------------
mydata = ppmi.biospec.imaging.PCA$x[,c(1:5)]
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
# Perform PCA on the CSF Data ---------------------------------------------
ppmi.CSF.pca = prcomp(ppmi.CSF.data,scale = TRUE)
sjp.pca(ppmi.CSF.pca,
plotEigenvalues = TRUE,
type = "circle")
# Model Based Clustering Based on the first 3 PCs -------------------------
mydata = ppmi.CSF.pca$x[,c(1:3)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# Perform PCA on DatSPECT data --------------------------------------------
ppmi.imaging.pca = prcomp(ppmi.imaging.data,scale = TRUE)
sjp.pca(ppmi.imaging.pca,
plotEigenvalues = TRUE,
type = "circle")
# Model Based Clustering Based on the first 2 or 3 PCs --------------------
mydata = ppmi.imaging.pca$x[,c(1:2)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# plotting in 3dd ---------------------------------------------------------
plot3d(ppmi.biospec.imaging.PCA$x[,c(1:3)],
xlim = c(-10,10),
ylim = c(-10,10),
zlim = c(-10,10))
# plot3d(ppmi.biospec.imaging.PCA$x[,c(1:3)],
# xlim = c(-10,10),
# ylim = c(-10,10),
# xlim = c(-10,10))
|
/ORIE 4740 - Final Project/PCA_Clustering/Biospecimen_Imaging_Clustering/BiospecimenPCA.R
|
no_license
|
vtn6/PPMI-Data-Mining
|
R
| false | false | 6,493 |
r
|
#apply PCA Clustering to Biospecimen, Imaging
# Developping Notes for May 7th -------------------------------------------
#Added Individual PCA analysis and clustering for both the DatSpect data and the CSF data
# Developping Notes For May 4th 2014 --------------------------------------
#Think in terms of a basis function for parkinson's disease
#This code shows that parkinson's patients can be mostly described by the asymmetry of their striatal region and the values of their abeta 42
#and alpha synuclein. In particular the basis function looks like this:
#(asymmetry, Abeta 42/Alphasynuclein), (asymmetry,-Abeta42/alphasynuclein)
#in other words, based on PC2 and PC2, there are 4 kinds of patient biospecimen profiles
#From PC2
##Those with high asymmetry, low Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#Those with low asymmetry, high Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#From PC3
#Those with low asymmetry, low Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#Those with high asymmetry, high Total.tau and CSF.Alpha.synuclein (abeta 42 is not that important)
#another interpretation is that any vector that isn't in the first few PCS can be ignored, as they represent a small populaiton of the data
#for biospecimen data, we choose 5 because after 6 each PC holds less than the average amount of information (the ei
#genvalue is less than 1)
rm(list = ls())
library(ggplot2)
library(gplots)
library(rgl)
library(lattice)
library(sjPlot)
library(fpc)
library(mclust)
library(cluster)
setwd("~/Dropbox//ORIE 4740 - Final Project/PCA_Clustering/Biospecimen_Imaging_Clustering/")
ppmi.raw.data.csv = read.csv("NMIB_AverageValues.csv") #use average values
ppmi.raw.data = ppmi.raw.data.csv
#adding ratio data
t.tau.Abeta.42.ratio = ppmi.raw.data$Total.tau/ppmi.raw.data$Abeta.42
p.tau.Abeta.42.ratio = ppmi.raw.data$p.Tau181P/ppmi.raw.data$Abeta.42
p.tau.t.tau.ratio = ppmi.raw.data$p.Tau181P/ppmi.raw.data$Total.tau
ppmi.raw.data = data.frame(ppmi.raw.data,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio)
#selecting PD patients
ppmi.raw.data = ppmi.raw.data[ppmi.raw.data$RECRUITMENT_CAT == 'PD',]
ppmi.biospecimen.imaging.data = subset.data.frame(ppmi.raw.data, select = c(CAUDATE_R,
CAUDATE_L,
PUTAMEN_R,
PUTAMEN_L,
CAUDATE_ASYMMETRY,
PUTAMEN_ASYMMETRY,
Abeta.42,
p.Tau181P,
Total.tau,
CSF.Alpha.synuclein,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio))
ppmi.CSF.data = subset.data.frame(ppmi.raw.data, select = c(Abeta.42,
p.Tau181P,
Total.tau,
CSF.Alpha.synuclein,
t.tau.Abeta.42.ratio,
p.tau.Abeta.42.ratio,
p.tau.t.tau.ratio))
ppmi.imaging.data = subset.data.frame(ppmi.raw.data, select = c(CAUDATE_R,
CAUDATE_L,
PUTAMEN_R,
PUTAMEN_L,
CAUDATE_ASYMMETRY,
PUTAMEN_ASYMMETRY))
#perform PCA on the whole biological dataset
ppmi.biospec.imaging.PCA = prcomp(ppmi.biospecimen.imaging.data,scale=TRUE)
plot(ppmi.biospec.imaging.PCA,
type = "line",
main = "Variances of each PCA loading")
sjp.pca(ppmi.biospec.imaging.PCA,
plotEigenvalues = TRUE,
type = "tile")
foo = sjp.pca(ppmi.biospecimen.imaging.data,
plotEigenvalues = TRUE,
hideLegend = FALSE,
type = "tile")
# Model Based Clustering based on the first 5 Principal Components --------------------------------------------------
mydata = ppmi.biospec.imaging.PCA$x[,c(1:5)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# K-Means Clustering on the first 5 Principal Components ------------------------------------------------------
mydata = ppmi.biospec.imaging.PCA$x[,c(1:5)]
wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
# Perform PCA on the CSF Data ---------------------------------------------
ppmi.CSF.pca = prcomp(ppmi.CSF.data,scale = TRUE)
sjp.pca(ppmi.CSF.pca,
plotEigenvalues = TRUE,
type = "circle")
# Model Based Clustering Based on the first 3 PCs -------------------------
mydata = ppmi.CSF.pca$x[,c(1:3)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# Perform PCA on DatSPECT data --------------------------------------------
ppmi.imaging.pca = prcomp(ppmi.imaging.data,scale = TRUE)
sjp.pca(ppmi.imaging.pca,
plotEigenvalues = TRUE,
type = "circle")
# Model Based Clustering Based on the first 2 or 3 PCs --------------------
mydata = ppmi.imaging.pca$x[,c(1:2)]
model_fit = Mclust(mydata)
plot(model_fit)
summary(model_fit)
# plotting in 3dd ---------------------------------------------------------
plot3d(ppmi.biospec.imaging.PCA$x[,c(1:3)],
xlim = c(-10,10),
ylim = c(-10,10),
zlim = c(-10,10))
# plot3d(ppmi.biospec.imaging.PCA$x[,c(1:3)],
# xlim = c(-10,10),
# ylim = c(-10,10),
# xlim = c(-10,10))
|
##=================================
# CreatBMLGrid function creates a basic two dimension grid/matrix
# User input the following non-negative intergers:
# r: row number, c: column number
# c(red, blue): red car and blue car number and they dont have to be equal
# Assign S3 class to the grid and return the grid
##=================================
createBMLGrid =
function(r = 100, c = 100, ncars = c(red = 1500, blue = 1500) )
{
if (r>0 & c>0) {
if (ncars[1] >= 0 && ncars[2] >= 0 && (ncars[1]+ncars[2])<= r*c) {
dims = c(r, c)
grid = matrix("", r, c)
pos = sample(1:prod(dims), sum(ncars))
grid[pos] = sample(rep(c("red", "blue"), ncars))
# S3 class
class(grid) = append("BMLGrid", class(grid))
grid
} else {
stop ("Number of cars has to be positive and no more than the number of cells")
}
} else stop ("Dimensions of the grid has to be positive")
}
# Plot the S3 class grid with red block and blue block represent red cars and blue cars
plot.BMLGrid =
function(x,...)
{
z = matrix(match(x, c("", "red", "blue")), nrow(x), ncol(x))
image(t(z), col = c("white", "red", "blue"), axes = FALSE, xlab = "", ylab = "", ...)
box()
}
# Move Cars
# Since the grid is basically a big matrix,
# we can get the location (coordinates) of the current red/blue car
# Find out the neighbourhood situation
# Then determine wheter the car can move
getCarLocations =
function(g) # g is the grid we pass to the function
{
rowIndex = row(g)[g!=""] # where it is not blank
colIndex = col(g)[g!=""]
# put all the index in to dataframe
# Matrix subsetting thanks to Duncan and Piazza
data.frame(i = rowIndex, j = colIndex, colors = g[cbind(rowIndex, colIndex)])
}
## Method 1 (faster)
moveCars =
function(g, color = "red")
# g: the grid we want to pass to the function
# color: color of the car moves from t to t+1
{
RedBlue = getCarLocations(g)
# find the location of the colored car
full = which(RedBlue$colors == color)
rowsIndex = RedBlue[full, 1]
colsIndex = RedBlue[full, 2]
# If ask to move the red car, then move it to the right
if(color == "red") {
# Stay the same row
nextRowIndex = rowsIndex
nextColIndex = colsIndex + 1L
# Wrap around/reset if move out of the grid
nextColIndex[nextColIndex > ncol(g)] = 1L
} else {
# if the parameter specify "blue"
# Stat the same colum but move up one row
nextRowIndex = rowsIndex + 1L
nextColIndex = colsIndex
nextRowIndex[nextRowIndex > ncol(g)] = 1L
}
# Check whether the next location for red/blue cars is actually available
# subset the grid matrix using the next location matrix, yay piazza!
nextLoca = cbind(nextRowIndex, nextColIndex)
move = g[nextLoca] == ""
g[nextLoca[move,,drop = FALSE]] = color #only those ones count
# The ones that moved should leave a blank space
g[cbind(rowsIndex, colsIndex)[move,, drop = FALSE]] = ""
g
}
# run Blue car then Red car
# a function to compute the number of cars that moved,
# that were blocked, and the average velocity
summary.BMLGrid=
function(g,gPlus1)
{
if(nrow(g)!=nrow(gPlus1)|ncol(g)!=ncol(gPlus1)) {
stop ("Error: Two arguments need to have the same dimensions")
} else {
rows = nrow(g)
cols = ncol(g)
blueCars = sum(g=="blue")
redCars = sum(g=="red")
if (blueCars==sum(gPlus1=="blue") && redCars==sum(gPlus1=="red")) {
locaT = getCarLocations(g)
locaPlus1 = getCarLocations(gPlus1)
total = rbind(locaT, locaPlus1)
blockedCars = sum(duplicated(total))
movedCars = blueCars + redCars - blockedCars
density = (blueCars + redCars)/(rows*cols)
# locations with the cars that moved
# (including index of origin and index after moved)
moved = total[!(duplicated(total) | duplicated(total, fromLast = TRUE)), ]
# determine the color of the moved car
colour = unique(moved$colors)
if (length(colour) == 0) {
velocity = 0
colour = "No car moves"
} else {
if (colour == "red") {
velocity = movedCars/redCars
} else velocity = movedCars/blueCars
}
summaryMoves = list(rows, cols, blueCars, redCars, density,
blockedCars, movedCars,
colour, velocity)
names(summaryMoves) = c("row numbers", "column numbers",
"number of blueCars",
"number of redCars", "Density",
"number of blockedCars",
"number of movedCars", "movedCar color",
"velocity of movedCar")
summaryMoves
} else stop ("Error: Two grids need have the same number of cars")
}
}
# runBMLGrid() allow user to input the steps of the moving car
runBMLGrid =
function(g, numSteps = 10000, saveAll = FALSE, plotAll = FALSE)
# g: the initial grid before any car moves
# numSteps: a positive integer that specifiy the number of time steps cars move
# blue cars move at time periods t = 1, 3, 5.. (Odd times)
# red cars move at time periods t = 2, 4, 6.. (Even times)
# saveALL gives user a choice to save grids for every single step
{
density = summary.BMLGrid(g, moveCars(g))$Density
if (saveAll == TRUE) {
# Save all the grids OMG!!
AllGrids = lapply(rep(c("blue", "red"), numSteps%/%2),
function(i){g <<- moveCars(g, i)})
if (numSteps %% 2 ==0) {
FinalGrid = AllGrids
} else {
# last one shall be an odd number and will be blue car to move
lastGrid = moveCars(tail(AllGrids, 1), "blue")
FinalGrid = lastGrid
FinalGrid[[numSteps]] = lastGrid
}
} else {
for (i in 1:(numSteps%/%2)) {
g = moveCars(g, "blue")
g = moveCars(g, "red")
}
if (numSteps %% 2 ==0) {
FinalGrid = g
} else {
# last one shall be an odd number and will be blue car to move
FinalGrid = moveCars(g, "blue")
}
}
if (saveAll && plotAll) {
for (i in 1:numSteps) {
plot.BMLGrid(FinalGrid[[i]], main = paste0("Step", i,
"; Density = ", density))
}
} else if (saveAll && !plotAll) {
plot.BMLGrid(FinalGrid[[numSteps]], main = paste0("Step", numSteps,
"; Density = ", density))
} else plot.BMLGrid(FinalGrid, main = paste0("Step", numSteps,
"; Density = ", density))
FinalGrid
}
|
/R/moveCars.R
|
no_license
|
zning1994/BMLSimulations
|
R
| false | false | 6,882 |
r
|
##=================================
# CreatBMLGrid function creates a basic two dimension grid/matrix
# User input the following non-negative intergers:
# r: row number, c: column number
# c(red, blue): red car and blue car number and they dont have to be equal
# Assign S3 class to the grid and return the grid
##=================================
createBMLGrid =
function(r = 100, c = 100, ncars = c(red = 1500, blue = 1500) )
{
if (r>0 & c>0) {
if (ncars[1] >= 0 && ncars[2] >= 0 && (ncars[1]+ncars[2])<= r*c) {
dims = c(r, c)
grid = matrix("", r, c)
pos = sample(1:prod(dims), sum(ncars))
grid[pos] = sample(rep(c("red", "blue"), ncars))
# S3 class
class(grid) = append("BMLGrid", class(grid))
grid
} else {
stop ("Number of cars has to be positive and no more than the number of cells")
}
} else stop ("Dimensions of the grid has to be positive")
}
# Plot the S3 class grid with red block and blue block represent red cars and blue cars
plot.BMLGrid =
function(x,...)
{
z = matrix(match(x, c("", "red", "blue")), nrow(x), ncol(x))
image(t(z), col = c("white", "red", "blue"), axes = FALSE, xlab = "", ylab = "", ...)
box()
}
# Move Cars
# Since the grid is basically a big matrix,
# we can get the location (coordinates) of the current red/blue car
# Find out the neighbourhood situation
# Then determine wheter the car can move
getCarLocations =
function(g) # g is the grid we pass to the function
{
rowIndex = row(g)[g!=""] # where it is not blank
colIndex = col(g)[g!=""]
# put all the index in to dataframe
# Matrix subsetting thanks to Duncan and Piazza
data.frame(i = rowIndex, j = colIndex, colors = g[cbind(rowIndex, colIndex)])
}
## Method 1 (faster)
moveCars =
function(g, color = "red")
# g: the grid we want to pass to the function
# color: color of the car moves from t to t+1
{
RedBlue = getCarLocations(g)
# find the location of the colored car
full = which(RedBlue$colors == color)
rowsIndex = RedBlue[full, 1]
colsIndex = RedBlue[full, 2]
# If ask to move the red car, then move it to the right
if(color == "red") {
# Stay the same row
nextRowIndex = rowsIndex
nextColIndex = colsIndex + 1L
# Wrap around/reset if move out of the grid
nextColIndex[nextColIndex > ncol(g)] = 1L
} else {
# if the parameter specify "blue"
# Stat the same colum but move up one row
nextRowIndex = rowsIndex + 1L
nextColIndex = colsIndex
nextRowIndex[nextRowIndex > ncol(g)] = 1L
}
# Check whether the next location for red/blue cars is actually available
# subset the grid matrix using the next location matrix, yay piazza!
nextLoca = cbind(nextRowIndex, nextColIndex)
move = g[nextLoca] == ""
g[nextLoca[move,,drop = FALSE]] = color #only those ones count
# The ones that moved should leave a blank space
g[cbind(rowsIndex, colsIndex)[move,, drop = FALSE]] = ""
g
}
# run Blue car then Red car
# a function to compute the number of cars that moved,
# that were blocked, and the average velocity
summary.BMLGrid=
function(g,gPlus1)
{
if(nrow(g)!=nrow(gPlus1)|ncol(g)!=ncol(gPlus1)) {
stop ("Error: Two arguments need to have the same dimensions")
} else {
rows = nrow(g)
cols = ncol(g)
blueCars = sum(g=="blue")
redCars = sum(g=="red")
if (blueCars==sum(gPlus1=="blue") && redCars==sum(gPlus1=="red")) {
locaT = getCarLocations(g)
locaPlus1 = getCarLocations(gPlus1)
total = rbind(locaT, locaPlus1)
blockedCars = sum(duplicated(total))
movedCars = blueCars + redCars - blockedCars
density = (blueCars + redCars)/(rows*cols)
# locations with the cars that moved
# (including index of origin and index after moved)
moved = total[!(duplicated(total) | duplicated(total, fromLast = TRUE)), ]
# determine the color of the moved car
colour = unique(moved$colors)
if (length(colour) == 0) {
velocity = 0
colour = "No car moves"
} else {
if (colour == "red") {
velocity = movedCars/redCars
} else velocity = movedCars/blueCars
}
summaryMoves = list(rows, cols, blueCars, redCars, density,
blockedCars, movedCars,
colour, velocity)
names(summaryMoves) = c("row numbers", "column numbers",
"number of blueCars",
"number of redCars", "Density",
"number of blockedCars",
"number of movedCars", "movedCar color",
"velocity of movedCar")
summaryMoves
} else stop ("Error: Two grids need have the same number of cars")
}
}
# runBMLGrid() allow user to input the steps of the moving car
runBMLGrid =
function(g, numSteps = 10000, saveAll = FALSE, plotAll = FALSE)
# g: the initial grid before any car moves
# numSteps: a positive integer that specifiy the number of time steps cars move
# blue cars move at time periods t = 1, 3, 5.. (Odd times)
# red cars move at time periods t = 2, 4, 6.. (Even times)
# saveALL gives user a choice to save grids for every single step
{
density = summary.BMLGrid(g, moveCars(g))$Density
if (saveAll == TRUE) {
# Save all the grids OMG!!
AllGrids = lapply(rep(c("blue", "red"), numSteps%/%2),
function(i){g <<- moveCars(g, i)})
if (numSteps %% 2 ==0) {
FinalGrid = AllGrids
} else {
# last one shall be an odd number and will be blue car to move
lastGrid = moveCars(tail(AllGrids, 1), "blue")
FinalGrid = lastGrid
FinalGrid[[numSteps]] = lastGrid
}
} else {
for (i in 1:(numSteps%/%2)) {
g = moveCars(g, "blue")
g = moveCars(g, "red")
}
if (numSteps %% 2 ==0) {
FinalGrid = g
} else {
# last one shall be an odd number and will be blue car to move
FinalGrid = moveCars(g, "blue")
}
}
if (saveAll && plotAll) {
for (i in 1:numSteps) {
plot.BMLGrid(FinalGrid[[i]], main = paste0("Step", i,
"; Density = ", density))
}
} else if (saveAll && !plotAll) {
plot.BMLGrid(FinalGrid[[numSteps]], main = paste0("Step", numSteps,
"; Density = ", density))
} else plot.BMLGrid(FinalGrid, main = paste0("Step", numSteps,
"; Density = ", density))
FinalGrid
}
|
#Data Analytics Assignment 12.1 - Session 12
# Perform the below given activities:
# a. Take Apple Stock Prices from Yahoo Finance for last 90 days
# b. Predict the Stock closing prices for next 15 days.
# c. Submit your accuracy
# d. After 15 days again collect the data and compare with your forecast
# import Apple stock price data
df <- read.csv("AAPL.csv")
head(df)
str(df)
View(df)
df$Date <- as.Date(df$Date)
data = ts(df$Close)
test = data[62:73]
data = data[1:61]
plot(data, main= "Daily Close Price")
data = ts(df$Close, frequency = 365)
plot(data, main = "Daily Close Price")
decompose(data)
decompose(data, type = "multi")
par(mfrow=c(1,2))
plot(decompose(data, type = "multi"))
# creating seasonal forecast
library(forecast)
par(mfrow=c(1.1))
seasonplot(data)
# lags
lag(data,10)
lag.plot(data)
# Partial auto correlation
pac <- pacf(data)
pac$acf
# Auto correlation
ac <- acf(data)
ac$acf
# looking at ACF and PACF graph it is clear that the time series is not stationary
#------------------------------------------
model <- lm(data ~ c(1:length(data)))
summary(model)
plot(resid(model), type = 'l')
accuracy(model)
#----------------------------------------------
# deseasonlise the time series
tbl <- stl(data, 'periodic')
stab <- seasadj(tbl)
seasonplot(stab, 12)
# unit root for stationarity
# The Augmented Dicky Fuller Test for
library(tseries)
adf.test(data)
# P value is greater than 0.05 , hence we fail to reject the null hypo
# there is unit root in time series hence the time series is not stationary
#----------------------------------------------
# Automatic ARIMA Model
model2 <- auto.arima(data)
model2
plot(forecast(model2, h=12))
accuracy(model2)
#----------------------------------------------
# running model on diff data
# difference method to smoothen the data with lag = 5
adf.test(diff(data, lag = 5))
plot(diff(data, lag = 5))
model3 <- auto.arima(diff(data, lag = 5))
accuracy(model3)
acf(diff(data, lag = 5))
pacf(diff(data, lag = 5))
#-------------------------------------------------
# taking random order
model4 <- Arima(diff(data, lag = 5), order = c(4,0,5))
model4
accuracy(model4)
plot(forecast(model4, h=12))
#---------------------------------------------------
# taking random order
model5 <- Arima(diff(data, lag = 5), order = c(4,0,4))
model5
accuracy(model5)
plot(forecast(model5, h=12))
#---------------------------------------------------
# taking random order
model6 <- Arima(diff(data, lag = 5), order = c(3,0,5))
model6
accuracy(model6)
plot(forecast(model6, h=12))
#---------------------------------------------------
# taking random order
model7 <- Arima(diff(data, lag = 5), order = c(0,0,1))
model7
accuracy(model7)
plot(forecast(model7, h=12))
#---------------------------------------------------
# taking random order
model8 <- Arima(diff(data, lag = 5), order = c(1,0,0))
model8
accuracy(model8)
plot(forecast(model8, h=12))
#---------------------------------------------------
# Holt Winters Exponential Smoothing Model
model9 <- HoltWinters(data, gamma = F)
summary(model9)
plot(forecast(model9, h=12))
accuracy(forecast(model9, h=12))
#-----------------------------------------------------
# ETS
model10 <- ets(data)
summary(model10)
plot(forecast(model10, h=12))
accuracy(forecast(model10, h=12))
#---------------------------------------------------------------
# model2 ( Automatic ARIMA) is most accurate with MAPE 1.15
#---------------------------------------------------------------
# Making predictions for next 15 days
predicted <- forecast(model2, 15)
# comparing data with forecast
predicted$residuals[62:73]
#-------------------------------------------------------------------
|
/12.1.R
|
no_license
|
sheetalnishad/Assignment-12.1
|
R
| false | false | 3,850 |
r
|
#Data Analytics Assignment 12.1 - Session 12
# Perform the below given activities:
# a. Take Apple Stock Prices from Yahoo Finance for last 90 days
# b. Predict the Stock closing prices for next 15 days.
# c. Submit your accuracy
# d. After 15 days again collect the data and compare with your forecast
# import Apple stock price data
df <- read.csv("AAPL.csv")
head(df)
str(df)
View(df)
df$Date <- as.Date(df$Date)
data = ts(df$Close)
test = data[62:73]
data = data[1:61]
plot(data, main= "Daily Close Price")
data = ts(df$Close, frequency = 365)
plot(data, main = "Daily Close Price")
decompose(data)
decompose(data, type = "multi")
par(mfrow=c(1,2))
plot(decompose(data, type = "multi"))
# creating seasonal forecast
library(forecast)
par(mfrow=c(1.1))
seasonplot(data)
# lags
lag(data,10)
lag.plot(data)
# Partial auto correlation
pac <- pacf(data)
pac$acf
# Auto correlation
ac <- acf(data)
ac$acf
# looking at ACF and PACF graph it is clear that the time series is not stationary
#------------------------------------------
model <- lm(data ~ c(1:length(data)))
summary(model)
plot(resid(model), type = 'l')
accuracy(model)
#----------------------------------------------
# deseasonlise the time series
tbl <- stl(data, 'periodic')
stab <- seasadj(tbl)
seasonplot(stab, 12)
# unit root for stationarity
# The Augmented Dicky Fuller Test for
library(tseries)
adf.test(data)
# P value is greater than 0.05 , hence we fail to reject the null hypo
# there is unit root in time series hence the time series is not stationary
#----------------------------------------------
# Automatic ARIMA Model
model2 <- auto.arima(data)
model2
plot(forecast(model2, h=12))
accuracy(model2)
#----------------------------------------------
# running model on diff data
# difference method to smoothen the data with lag = 5
adf.test(diff(data, lag = 5))
plot(diff(data, lag = 5))
model3 <- auto.arima(diff(data, lag = 5))
accuracy(model3)
acf(diff(data, lag = 5))
pacf(diff(data, lag = 5))
#-------------------------------------------------
# taking random order
model4 <- Arima(diff(data, lag = 5), order = c(4,0,5))
model4
accuracy(model4)
plot(forecast(model4, h=12))
#---------------------------------------------------
# taking random order
model5 <- Arima(diff(data, lag = 5), order = c(4,0,4))
model5
accuracy(model5)
plot(forecast(model5, h=12))
#---------------------------------------------------
# taking random order
model6 <- Arima(diff(data, lag = 5), order = c(3,0,5))
model6
accuracy(model6)
plot(forecast(model6, h=12))
#---------------------------------------------------
# taking random order
model7 <- Arima(diff(data, lag = 5), order = c(0,0,1))
model7
accuracy(model7)
plot(forecast(model7, h=12))
#---------------------------------------------------
# taking random order
model8 <- Arima(diff(data, lag = 5), order = c(1,0,0))
model8
accuracy(model8)
plot(forecast(model8, h=12))
#---------------------------------------------------
# Holt Winters Exponential Smoothing Model
model9 <- HoltWinters(data, gamma = F)
summary(model9)
plot(forecast(model9, h=12))
accuracy(forecast(model9, h=12))
#-----------------------------------------------------
# ETS
model10 <- ets(data)
summary(model10)
plot(forecast(model10, h=12))
accuracy(forecast(model10, h=12))
#---------------------------------------------------------------
# model2 ( Automatic ARIMA) is most accurate with MAPE 1.15
#---------------------------------------------------------------
# Making predictions for next 15 days
predicted <- forecast(model2, 15)
# comparing data with forecast
predicted$residuals[62:73]
#-------------------------------------------------------------------
|
command <- paste(path_to_course,"ODBC_Setup.pdf",sep='/')
command <- gsub("/","\\\\",command)
system("cmd.exe", input = paste("\"",command,"\"",sep=""))
|
/R_102 - Getting_and_Cleaning_Data/ODBC/ODBC_Setup.R
|
no_license
|
ImprovementPathSystems/IPS_swirl_beta
|
R
| false | false | 157 |
r
|
command <- paste(path_to_course,"ODBC_Setup.pdf",sep='/')
command <- gsub("/","\\\\",command)
system("cmd.exe", input = paste("\"",command,"\"",sep=""))
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{levellog}
\alias{debug}
\alias{error}
\alias{fatal}
\alias{info}
\alias{levellog}
\alias{warn}
\title{Write messages to logs at a given priority level.}
\usage{
levellog(logger, level, message)
debug(logger, message)
info(logger, message)
warn(logger, message)
error(logger, message)
fatal(logger, message)
}
\arguments{
\item{logger}{An object of class 'logger'.}
\item{level}{The desired priority level: a number, a character, or an object
of class 'loglevel'. Will be coerced using \code{\link{as.loglevel}}.}
\item{message}{A string to be printed to the log with the corresponding priority level.}
}
\description{
Write messages to logs at a given priority level.
}
\examples{
library('log4r')
logger <- create.logger(logfile = 'debugging.log', level = "WARN")
levellog(logger, 'WARN', 'First warning from our code')
debug(logger, 'Debugging our code')
info(logger, 'Information about our code')
warn(logger, 'Another warning from our code')
error(logger, 'An error from our code')
fatal(logger, "I'm outta here")
}
\seealso{
\code{\link{loglevel}}
}
|
/man/levellog.Rd
|
no_license
|
ktaranov/log4r
|
R
| false | false | 1,125 |
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{levellog}
\alias{debug}
\alias{error}
\alias{fatal}
\alias{info}
\alias{levellog}
\alias{warn}
\title{Write messages to logs at a given priority level.}
\usage{
levellog(logger, level, message)
debug(logger, message)
info(logger, message)
warn(logger, message)
error(logger, message)
fatal(logger, message)
}
\arguments{
\item{logger}{An object of class 'logger'.}
\item{level}{The desired priority level: a number, a character, or an object
of class 'loglevel'. Will be coerced using \code{\link{as.loglevel}}.}
\item{message}{A string to be printed to the log with the corresponding priority level.}
}
\description{
Write messages to logs at a given priority level.
}
\examples{
library('log4r')
logger <- create.logger(logfile = 'debugging.log', level = "WARN")
levellog(logger, 'WARN', 'First warning from our code')
debug(logger, 'Debugging our code')
info(logger, 'Information about our code')
warn(logger, 'Another warning from our code')
error(logger, 'An error from our code')
fatal(logger, "I'm outta here")
}
\seealso{
\code{\link{loglevel}}
}
|
#Problem 1
#A
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean)
#Female Male
#698.0911 743.3915
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$smoking, mean)
#Nonsmoker Smoker
#719.3792 746.3494
#B
# As per problem 1 solution A Highest mean sallary of Male is high
#C
mean(RcmdrTestDrive$salary)
#[1] 724.5164
#Overall Average of the sallary is 724.5164
#D
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, sd)
#Female Male
#130.7053 158.5423
boxplot(salary~gender,data= RcmdrTestDrive,main="salary versus gender",xlab="gender",ylab="salary",col=topo.colors(2))
|
/EXPLORATORY_DATA_ANALYTICS_Assignment_1.R
|
no_license
|
vimalprnr/EXPLORATORY_DATA_ANALYTICS_Assignment_1
|
R
| false | false | 618 |
r
|
#Problem 1
#A
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, mean)
#Female Male
#698.0911 743.3915
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$smoking, mean)
#Nonsmoker Smoker
#719.3792 746.3494
#B
# As per problem 1 solution A Highest mean sallary of Male is high
#C
mean(RcmdrTestDrive$salary)
#[1] 724.5164
#Overall Average of the sallary is 724.5164
#D
tapply(RcmdrTestDrive$salary, RcmdrTestDrive$gender, sd)
#Female Male
#130.7053 158.5423
boxplot(salary~gender,data= RcmdrTestDrive,main="salary versus gender",xlab="gender",ylab="salary",col=topo.colors(2))
|
##### Sampling from moisture regimes by species #####
# sorting df based on gwc into three moisture regimes
# sampling 3 quadrats from each of these regimes
library(tidyverse)
setwd("C:/Users/alexa/Dropbox (Yale_FES)/Macrosystems Biol Bradford Wieder Wood 2019-2024/")
site_data <- read_csv("metadata/sample_IDs.csv")
soil_GWC <- read_csv("calculated-data/field-experiment/prelim/soilGWC_prelim-1_Fall-2019.csv")
SoilGWC <- soil_GWC$moisturePercent
masterdata <- cbind(site_data, SoilGWC)
##### quadrats chosen for lab microcosm expereriment 1 #####
set.seed(19)
str(site_data)
{
# RO
scbiRO <- masterdata[masterdata$site == "scbi" & masterdata$species == "RO",]
scbiRO_ord <- scbiRO[order(scbiRO$SoilGWC),]
microcosm_scbiRO <- c(sample(scbiRO_ord$unique.id[1:5], 3),
sample(scbiRO_ord$unique.id[6:11], 3),
sample(scbiRO_ord$unique.id[12:16], 3))
#HI
scbiHI <- masterdata[masterdata$site == "scbi" & masterdata$species == "HI",]
scbiHI_ord <- scbiHI[order(scbiHI$SoilGWC),]
microcosm_scbiHI <- c(sample(scbiHI_ord$unique.id[1:5], 3),
sample(scbiHI_ord$unique.id[6:11], 3),
sample(scbiHI_ord$unique.id[12:16], 3))
# TP
scbiTP <- masterdata[masterdata$site == "scbi" & masterdata$species == "TP",]
scbiTP_ord <- scbiTP[order(scbiTP$SoilGWC),]
microcosm_scbiTP <- c(sample(scbiTP_ord$unique.id[1:5], 3),
sample(scbiTP_ord$unique.id[6:11], 3),
sample(scbiTP_ord$unique.id[12:16], 3))
#harv
# RO
harvRO <- masterdata[masterdata$site == "harv" & masterdata$species == "RO",]
harvRO_ord <- harvRO[order(harvRO$SoilGWC),]
microcosm_harvRO <- c(sample(harvRO_ord$unique.id[1:5], 3),
sample(harvRO_ord$unique.id[6:11], 3),
sample(harvRO_ord$unique.id[12:16], 3))
#WP
harvWP <- masterdata[masterdata$site == "harv" & masterdata$species == "WP",]
harvWP_ord <- harvWP[order(harvWP$SoilGWC),]
microcosm_harvWP <- c(sample(harvWP_ord$unique.id[1:5], 3),
sample(harvWP_ord$unique.id[6:11], 3),
sample(harvWP_ord$unique.id[12:16], 3))
#RM
harvRM <- masterdata[masterdata$site == "harv" & masterdata$species == "RM",]
harvRM_ord <- harvRM[order(harvRM$SoilGWC),]
microcosm_harvRM <- c(sample(harvRM_ord$unique.id[1:5], 3),
sample(harvRM_ord$unique.id[6:11], 3),
sample(harvRM_ord$unique.id[12:16], 3))
microcosm_quadrat_number <- c(microcosm_scbiRO, microcosm_scbiHI, microcosm_scbiTP,
microcosm_harvRO, microcosm_harvWP, microcosm_harvRM)
microcosm_quadrats <- masterdata[masterdata$unique.id %in% microcosm_quadrat_number,]
masterdata$microcosm_select <- masterdata$unique.id %in% microcosm_quadrat_number
micocosm_selection <- data.frame(unique.id = masterdata$unique.id, microcosm_select = masterdata$microcosm_select)
}
setwd("metadata/")
write.csv(micocosm_selection, "exp-1_site_selection.csv")
##### high replication quadrat selection #####
# selected sites that are going to be subset and replicated 8 times for the distribution of variation.
# one high and one low moisture from each q. rubra plot at scbi and harv sites
{
scbiRO2 <- masterdata[masterdata$site == "scbi" & masterdata$species == "RO" & masterdata$microcosm_select == "TRUE",]
scbiRO_ord2 <- scbiRO2[order(scbiRO2$SoilGWC),]
microcosm_scbiRO2 <- c(sample(scbiRO_ord2$unique.id[1:3], 1),
sample(scbiRO_ord2$unique.id[7:9], 1))
harvRO2 <- masterdata[masterdata$site == "harv" & masterdata$species == "RO" & masterdata$microcosm_select == "TRUE",]
harvRO_ord2 <- harvRO2[order(harvRO2$SoilGWC),]
microcosm_harvRO2 <- c(sample(harvRO_ord2$unique.id[1:3], 1),
sample(harvRO_ord2$unique.id[7:9], 1))
microcosm_quadrat_number2 <- c(microcosm_scbiRO2, microcosm_harvRO2)
microcosm_quadrats2 <- masterdata[masterdata$unique.id %in% microcosm_quadrat_number2,]
masterdata$microcosm_select2 <- masterdata$unique.id %in% microcosm_quadrat_number2
high.rep.selection <- data.frame(unique.id = masterdata$unique.id, microcosm_select2 = masterdata$microcosm_select2)
}
write.csv(high.rep.selection, "exp-1_highrep_site_selection.csv")
write.csv(masterdata[masterdata$microcosm_select == "TRUE", c("site", "Plot", "species", "microcosm_select2")], "Selected_soils.csv")
###### Visualize chosen plots #####
# Soil GWC
ggplot(masterdata, aes(x = SoilGWC, fill = site)) + geom_histogram(binwidth = 2.5) + xlim(25,95) +
geom_histogram(data = microcosm_quadrats, aes(color = I("black")), fill = "white", alpha = 0.6, linetype="dashed", binwidth = 2.5) +
facet_grid(species~.) + labs(title = "Subset of quadrats for microcosm experiement",
subtitle = "Dashed are the subset used for microcosm. In red oak, at 40%, both chosen are from harv, not 1 from harv 1 from harv. visialization incorrect")
# graph2ppt(file = "Chosen_microsites_vs_allGWC.ppt", width = 7, height = 7)
# subset chosen RO
# Soil GWC
ggplot(masterdata, aes(x = SoilGWC, fill = site)) + geom_histogram(binwidth = 2.5) + xlim(25,95) +
geom_histogram(data = microcosm_quadrats2, aes(color = I("black")), fill = "white", alpha = 0.6, linetype="dashed", binwidth = 2.5) +
facet_grid(species~.) + labs(title = "Subset of quadrats for microcosm experiement",
subtitle = "Dashed are the subset used for microcosm. In red oak, at 40%, both chosen are from harv, not 1 from harv 1 from harv. visialization incorrect")
|
/code/sampling-design/quadrat_selection_exp-1.R
|
no_license
|
swood-ecology/nsf-macrosystems
|
R
| false | false | 5,840 |
r
|
##### Sampling from moisture regimes by species #####
# sorting df based on gwc into three moisture regimes
# sampling 3 quadrats from each of these regimes
library(tidyverse)
setwd("C:/Users/alexa/Dropbox (Yale_FES)/Macrosystems Biol Bradford Wieder Wood 2019-2024/")
site_data <- read_csv("metadata/sample_IDs.csv")
soil_GWC <- read_csv("calculated-data/field-experiment/prelim/soilGWC_prelim-1_Fall-2019.csv")
SoilGWC <- soil_GWC$moisturePercent
masterdata <- cbind(site_data, SoilGWC)
##### quadrats chosen for lab microcosm expereriment 1 #####
set.seed(19)
str(site_data)
{
# RO
scbiRO <- masterdata[masterdata$site == "scbi" & masterdata$species == "RO",]
scbiRO_ord <- scbiRO[order(scbiRO$SoilGWC),]
microcosm_scbiRO <- c(sample(scbiRO_ord$unique.id[1:5], 3),
sample(scbiRO_ord$unique.id[6:11], 3),
sample(scbiRO_ord$unique.id[12:16], 3))
#HI
scbiHI <- masterdata[masterdata$site == "scbi" & masterdata$species == "HI",]
scbiHI_ord <- scbiHI[order(scbiHI$SoilGWC),]
microcosm_scbiHI <- c(sample(scbiHI_ord$unique.id[1:5], 3),
sample(scbiHI_ord$unique.id[6:11], 3),
sample(scbiHI_ord$unique.id[12:16], 3))
# TP
scbiTP <- masterdata[masterdata$site == "scbi" & masterdata$species == "TP",]
scbiTP_ord <- scbiTP[order(scbiTP$SoilGWC),]
microcosm_scbiTP <- c(sample(scbiTP_ord$unique.id[1:5], 3),
sample(scbiTP_ord$unique.id[6:11], 3),
sample(scbiTP_ord$unique.id[12:16], 3))
#harv
# RO
harvRO <- masterdata[masterdata$site == "harv" & masterdata$species == "RO",]
harvRO_ord <- harvRO[order(harvRO$SoilGWC),]
microcosm_harvRO <- c(sample(harvRO_ord$unique.id[1:5], 3),
sample(harvRO_ord$unique.id[6:11], 3),
sample(harvRO_ord$unique.id[12:16], 3))
#WP
harvWP <- masterdata[masterdata$site == "harv" & masterdata$species == "WP",]
harvWP_ord <- harvWP[order(harvWP$SoilGWC),]
microcosm_harvWP <- c(sample(harvWP_ord$unique.id[1:5], 3),
sample(harvWP_ord$unique.id[6:11], 3),
sample(harvWP_ord$unique.id[12:16], 3))
#RM
harvRM <- masterdata[masterdata$site == "harv" & masterdata$species == "RM",]
harvRM_ord <- harvRM[order(harvRM$SoilGWC),]
microcosm_harvRM <- c(sample(harvRM_ord$unique.id[1:5], 3),
sample(harvRM_ord$unique.id[6:11], 3),
sample(harvRM_ord$unique.id[12:16], 3))
microcosm_quadrat_number <- c(microcosm_scbiRO, microcosm_scbiHI, microcosm_scbiTP,
microcosm_harvRO, microcosm_harvWP, microcosm_harvRM)
microcosm_quadrats <- masterdata[masterdata$unique.id %in% microcosm_quadrat_number,]
masterdata$microcosm_select <- masterdata$unique.id %in% microcosm_quadrat_number
micocosm_selection <- data.frame(unique.id = masterdata$unique.id, microcosm_select = masterdata$microcosm_select)
}
setwd("metadata/")
write.csv(micocosm_selection, "exp-1_site_selection.csv")
##### high replication quadrat selection #####
# selected sites that are going to be subset and replicated 8 times for the distribution of variation.
# one high and one low moisture from each q. rubra plot at scbi and harv sites
{
scbiRO2 <- masterdata[masterdata$site == "scbi" & masterdata$species == "RO" & masterdata$microcosm_select == "TRUE",]
scbiRO_ord2 <- scbiRO2[order(scbiRO2$SoilGWC),]
microcosm_scbiRO2 <- c(sample(scbiRO_ord2$unique.id[1:3], 1),
sample(scbiRO_ord2$unique.id[7:9], 1))
harvRO2 <- masterdata[masterdata$site == "harv" & masterdata$species == "RO" & masterdata$microcosm_select == "TRUE",]
harvRO_ord2 <- harvRO2[order(harvRO2$SoilGWC),]
microcosm_harvRO2 <- c(sample(harvRO_ord2$unique.id[1:3], 1),
sample(harvRO_ord2$unique.id[7:9], 1))
microcosm_quadrat_number2 <- c(microcosm_scbiRO2, microcosm_harvRO2)
microcosm_quadrats2 <- masterdata[masterdata$unique.id %in% microcosm_quadrat_number2,]
masterdata$microcosm_select2 <- masterdata$unique.id %in% microcosm_quadrat_number2
high.rep.selection <- data.frame(unique.id = masterdata$unique.id, microcosm_select2 = masterdata$microcosm_select2)
}
write.csv(high.rep.selection, "exp-1_highrep_site_selection.csv")
write.csv(masterdata[masterdata$microcosm_select == "TRUE", c("site", "Plot", "species", "microcosm_select2")], "Selected_soils.csv")
###### Visualize chosen plots #####
# Soil GWC
ggplot(masterdata, aes(x = SoilGWC, fill = site)) + geom_histogram(binwidth = 2.5) + xlim(25,95) +
geom_histogram(data = microcosm_quadrats, aes(color = I("black")), fill = "white", alpha = 0.6, linetype="dashed", binwidth = 2.5) +
facet_grid(species~.) + labs(title = "Subset of quadrats for microcosm experiement",
subtitle = "Dashed are the subset used for microcosm. In red oak, at 40%, both chosen are from harv, not 1 from harv 1 from harv. visialization incorrect")
# graph2ppt(file = "Chosen_microsites_vs_allGWC.ppt", width = 7, height = 7)
# subset chosen RO
# Soil GWC
ggplot(masterdata, aes(x = SoilGWC, fill = site)) + geom_histogram(binwidth = 2.5) + xlim(25,95) +
geom_histogram(data = microcosm_quadrats2, aes(color = I("black")), fill = "white", alpha = 0.6, linetype="dashed", binwidth = 2.5) +
facet_grid(species~.) + labs(title = "Subset of quadrats for microcosm experiement",
subtitle = "Dashed are the subset used for microcosm. In red oak, at 40%, both chosen are from harv, not 1 from harv 1 from harv. visialization incorrect")
|
run_analysis<-function(){
##Load the Features information
columnFeature<-read.table("UCI HAR Dataset/features.txt",sep=" ")
##List the columns that is associated with the Activities
allColumns<-rep(-16,each=nrow(columnFeature))
##only list columns that is of Standard deviation and Mean
allColumns[grep("std|(mean[^F])",columnFeature$V2)]<-16
##Extract the Activity Label
activityInfo<-read.table("UCI HAR Dataset/activity_labels.txt"
,col.names = c("activityLabel","activityDesc"),colClasses = "character")
activityInfo$activityLabel<-as.integer(activityInfo$activityLabel)
##Test Dataset
testData<-read.fwf("UCI HAR Dataset/test/X_test.txt",widths = allColumns)
colNameFeature<-columnFeature[grep("std|(mean[^F])",columnFeature$V2),2]
colNameFeature<-gsub("\\()","",colNameFeature)
colNameFeature<-gsub("-","",colNameFeature)
colNameFeature<-gsub("mean","Mean",colNameFeature)
colNameFeature<-gsub("std","Std",colNameFeature)
colnames(testData)<-colNameFeature
##insert test activity Label
testLabel<-read.table("UCI HAR Dataset/test/y_test.txt",col.names = "activityLabel")
testData$activityLabel<-testLabel$activityLabel
##insert test Subject
testSubject<-read.table("UCI HAR Dataset/test/subject_test.txt",col.names = "subject" ,colClasses = "character")
testData$subject<-as.integer(testSubject$subject)
##Train Dataset
trainData<-read.fwf("UCI HAR Dataset/train/X_train.txt",widths = allColumns)
colnames(trainData)<-colNameFeature
##insert train activity Label
trainLabel<-read.table("UCI HAR Dataset/train/y_train.txt",col.names = "activityLabel")
trainData$activityLabel<-trainLabel$activityLabel
##insert train Subject
trainSubject<-read.table("UCI HAR Dataset/train/subject_train.txt",col.names = "subject",colClasses = "character")
trainData$subject<-as.integer(trainSubject$subject)
totalDataset<-rbind(testData,trainData)
aggData<-aggregate(. ~ activityLabel + subject, data = totalDataset, FUN = mean)
actMerge<-merge(aggData,activityInfo,by.x="activityLabel",by.y="activityLabel",all = TRUE)
aggData<-actMerge[,c(colnames(aggData))]
aggData$activityLabel<-actMerge$activityDesc
write.table(aggData,row.names = FALSE,file = "tidy_Dat_Course3_4.csv",sep = ",")
}
|
/run_analysis.R
|
no_license
|
learnsharenp/CleanDataAssignment
|
R
| false | false | 2,295 |
r
|
run_analysis<-function(){
##Load the Features information
columnFeature<-read.table("UCI HAR Dataset/features.txt",sep=" ")
##List the columns that is associated with the Activities
allColumns<-rep(-16,each=nrow(columnFeature))
##only list columns that is of Standard deviation and Mean
allColumns[grep("std|(mean[^F])",columnFeature$V2)]<-16
##Extract the Activity Label
activityInfo<-read.table("UCI HAR Dataset/activity_labels.txt"
,col.names = c("activityLabel","activityDesc"),colClasses = "character")
activityInfo$activityLabel<-as.integer(activityInfo$activityLabel)
##Test Dataset
testData<-read.fwf("UCI HAR Dataset/test/X_test.txt",widths = allColumns)
colNameFeature<-columnFeature[grep("std|(mean[^F])",columnFeature$V2),2]
colNameFeature<-gsub("\\()","",colNameFeature)
colNameFeature<-gsub("-","",colNameFeature)
colNameFeature<-gsub("mean","Mean",colNameFeature)
colNameFeature<-gsub("std","Std",colNameFeature)
colnames(testData)<-colNameFeature
##insert test activity Label
testLabel<-read.table("UCI HAR Dataset/test/y_test.txt",col.names = "activityLabel")
testData$activityLabel<-testLabel$activityLabel
##insert test Subject
testSubject<-read.table("UCI HAR Dataset/test/subject_test.txt",col.names = "subject" ,colClasses = "character")
testData$subject<-as.integer(testSubject$subject)
##Train Dataset
trainData<-read.fwf("UCI HAR Dataset/train/X_train.txt",widths = allColumns)
colnames(trainData)<-colNameFeature
##insert train activity Label
trainLabel<-read.table("UCI HAR Dataset/train/y_train.txt",col.names = "activityLabel")
trainData$activityLabel<-trainLabel$activityLabel
##insert train Subject
trainSubject<-read.table("UCI HAR Dataset/train/subject_train.txt",col.names = "subject",colClasses = "character")
trainData$subject<-as.integer(trainSubject$subject)
totalDataset<-rbind(testData,trainData)
aggData<-aggregate(. ~ activityLabel + subject, data = totalDataset, FUN = mean)
actMerge<-merge(aggData,activityInfo,by.x="activityLabel",by.y="activityLabel",all = TRUE)
aggData<-actMerge[,c(colnames(aggData))]
aggData$activityLabel<-actMerge$activityDesc
write.table(aggData,row.names = FALSE,file = "tidy_Dat_Course3_4.csv",sep = ",")
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536281264e+146, 1.25233108607105e-280, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615785047-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 329 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536281264e+146, 1.25233108607105e-280, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
sq.pe <-
function(r1,r2,v){
S<-seq(r1,r2,1)
L<-length(S)
b<-lapply(1:v, function(i) c(make(r1,r2,i,v)))
bb<-do.call(cbind,b)
return(bb)}
|
/seqPERM/R/sq.pe.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 137 |
r
|
sq.pe <-
function(r1,r2,v){
S<-seq(r1,r2,1)
L<-length(S)
b<-lapply(1:v, function(i) c(make(r1,r2,i,v)))
bb<-do.call(cbind,b)
return(bb)}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generated by using data-raw/docgen.R -> do not edit by hand
#' Functions available in Arrow dplyr queries
#'
#' The `arrow` package contains methods for 37 `dplyr` table functions, many of
#' which are "verbs" that do transformations to one or more tables.
#' The package also has mappings of 211 R functions to the corresponding
#' functions in the Arrow compute library. These allow you to write code inside
#' of `dplyr` methods that call R functions, including many in packages like
#' `stringr` and `lubridate`, and they will get translated to Arrow and run
#' on the Arrow query engine (Acero). This document lists all of the mapped
#' functions.
#'
#' # `dplyr` verbs
#'
#' Most verb functions return an `arrow_dplyr_query` object, similar in spirit
#' to a `dbplyr::tbl_lazy`. This means that the verbs do not eagerly evaluate
#' the query on the data. To run the query, call either `compute()`,
#' which returns an `arrow` [Table], or `collect()`, which pulls the resulting
#' Table into an R `data.frame`.
#'
#' * [`anti_join()`][dplyr::anti_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`arrange()`][dplyr::arrange()]
#' * [`collapse()`][dplyr::collapse()]
#' * [`collect()`][dplyr::collect()]
#' * [`compute()`][dplyr::compute()]
#' * [`count()`][dplyr::count()]
#' * [`distinct()`][dplyr::distinct()]: `.keep_all = TRUE` not supported
#' * [`explain()`][dplyr::explain()]
#' * [`filter()`][dplyr::filter()]
#' * [`full_join()`][dplyr::full_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`glimpse()`][dplyr::glimpse()]
#' * [`group_by()`][dplyr::group_by()]
#' * [`group_by_drop_default()`][dplyr::group_by_drop_default()]
#' * [`group_vars()`][dplyr::group_vars()]
#' * [`groups()`][dplyr::groups()]
#' * [`inner_join()`][dplyr::inner_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`left_join()`][dplyr::left_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`mutate()`][dplyr::mutate()]: window functions (e.g. things that require aggregation within groups) not currently supported
#' * [`pull()`][dplyr::pull()]: the `name` argument is not supported; returns an R vector by default but this behavior is deprecated and will return an Arrow [ChunkedArray] in a future release. Provide `as_vector = TRUE/FALSE` to control this behavior, or set `options(arrow.pull_as_vector)` globally.
#' * [`relocate()`][dplyr::relocate()]
#' * [`rename()`][dplyr::rename()]
#' * [`rename_with()`][dplyr::rename_with()]
#' * [`right_join()`][dplyr::right_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`select()`][dplyr::select()]
#' * [`semi_join()`][dplyr::semi_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`show_query()`][dplyr::show_query()]
#' * [`slice_head()`][dplyr::slice_head()]: slicing within groups not supported; Arrow datasets do not have row order, so head is non-deterministic; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_max()`][dplyr::slice_max()]: slicing within groups not supported; `with_ties = TRUE` (dplyr default) is not supported; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_min()`][dplyr::slice_min()]: slicing within groups not supported; `with_ties = TRUE` (dplyr default) is not supported; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_sample()`][dplyr::slice_sample()]: slicing within groups not supported; `replace = TRUE` and the `weight_by` argument not supported; `n` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_tail()`][dplyr::slice_tail()]: slicing within groups not supported; Arrow datasets do not have row order, so tail is non-deterministic; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`summarise()`][dplyr::summarise()]: window functions not currently supported; arguments `.drop = FALSE` and `.groups = "rowwise" not supported
#' * [`tally()`][dplyr::tally()]
#' * [`transmute()`][dplyr::transmute()]
#' * [`ungroup()`][dplyr::ungroup()]
#' * [`union()`][dplyr::union()]
#' * [`union_all()`][dplyr::union_all()]
#'
#' # Function mappings
#'
#' In the list below, any differences in behavior or support between Acero and
#' the R function are listed. If no notes follow the function name, then you
#' can assume that the function works in Acero just as it does in R.
#'
#' Functions can be called either as `pkg::fun()` or just `fun()`, i.e. both
#' `str_sub()` and `stringr::str_sub()` work.
#'
#' In addition to these functions, you can call any of Arrow's 254 compute
#' functions directly. Arrow has many functions that don't map to an existing R
#' function. In other cases where there is an R function mapping, you can still
#' call the Arrow function directly if you don't want the adaptations that the R
#' mapping has that make Acero behave like R. These functions are listed in the
#' [C++ documentation](https://arrow.apache.org/docs/cpp/compute.html), and
#' in the function registry in R, they are named with an `arrow_` prefix, such
#' as `arrow_ascii_is_decimal`.
#'
#' ## arrow
#'
#' * [`add_filename()`][arrow::add_filename()]
#' * [`cast()`][arrow::cast()]
#'
#' ## base
#'
#' * [`!`][!()]
#' * [`!=`][!=()]
#' * [`%%`][%%()]
#' * [`%/%`][%/%()]
#' * [`%in%`][%in%()]
#' * [`&`][&()]
#' * [`*`][*()]
#' * [`+`][+()]
#' * [`-`][-()]
#' * [`/`][/()]
#' * [`<`][<()]
#' * [`<=`][<=()]
#' * [`==`][==()]
#' * [`>`][>()]
#' * [`>=`][>=()]
#' * [`ISOdate()`][base::ISOdate()]
#' * [`ISOdatetime()`][base::ISOdatetime()]
#' * [`^`][^()]
#' * [`abs()`][base::abs()]
#' * [`acos()`][base::acos()]
#' * [`all()`][base::all()]
#' * [`any()`][base::any()]
#' * [`as.Date()`][base::as.Date()]: Multiple `tryFormats` not supported in Arrow.
#' Consider using the lubridate specialised parsing functions `ymd()`, `ymd()`, etc.
#' * [`as.character()`][base::as.character()]
#' * [`as.difftime()`][base::as.difftime()]: only supports `units = "secs"` (the default)
#' * [`as.double()`][base::as.double()]
#' * [`as.integer()`][base::as.integer()]
#' * [`as.logical()`][base::as.logical()]
#' * [`as.numeric()`][base::as.numeric()]
#' * [`asin()`][base::asin()]
#' * [`ceiling()`][base::ceiling()]
#' * [`cos()`][base::cos()]
#' * [`data.frame()`][base::data.frame()]: `row.names` and `check.rows` arguments not supported;
#' `stringsAsFactors` must be `FALSE`
#' * [`difftime()`][base::difftime()]: only supports `units = "secs"` (the default);
#' `tz` argument not supported
#' * [`endsWith()`][base::endsWith()]
#' * [`exp()`][base::exp()]
#' * [`floor()`][base::floor()]
#' * [`format()`][base::format()]
#' * [`grepl()`][base::grepl()]
#' * [`gsub()`][base::gsub()]
#' * [`ifelse()`][base::ifelse()]
#' * [`is.character()`][base::is.character()]
#' * [`is.double()`][base::is.double()]
#' * [`is.factor()`][base::is.factor()]
#' * [`is.finite()`][base::is.finite()]
#' * [`is.infinite()`][base::is.infinite()]
#' * [`is.integer()`][base::is.integer()]
#' * [`is.list()`][base::is.list()]
#' * [`is.logical()`][base::is.logical()]
#' * [`is.na()`][base::is.na()]
#' * [`is.nan()`][base::is.nan()]
#' * [`is.numeric()`][base::is.numeric()]
#' * [`log()`][base::log()]
#' * [`log10()`][base::log10()]
#' * [`log1p()`][base::log1p()]
#' * [`log2()`][base::log2()]
#' * [`logb()`][base::logb()]
#' * [`max()`][base::max()]
#' * [`mean()`][base::mean()]
#' * [`min()`][base::min()]
#' * [`nchar()`][base::nchar()]: `allowNA = TRUE` and `keepNA = TRUE` not supported
#' * [`paste()`][base::paste()]: the `collapse` argument is not yet supported
#' * [`paste0()`][base::paste0()]: the `collapse` argument is not yet supported
#' * [`pmax()`][base::pmax()]
#' * [`pmin()`][base::pmin()]
#' * [`round()`][base::round()]
#' * [`sign()`][base::sign()]
#' * [`sin()`][base::sin()]
#' * [`sqrt()`][base::sqrt()]
#' * [`startsWith()`][base::startsWith()]
#' * [`strftime()`][base::strftime()]
#' * [`strptime()`][base::strptime()]: accepts a `unit` argument not present in the `base` function.
#' Valid values are "s", "ms" (default), "us", "ns".
#' * [`strrep()`][base::strrep()]
#' * [`strsplit()`][base::strsplit()]
#' * [`sub()`][base::sub()]
#' * [`substr()`][base::substr()]: `start` and `stop` must be length 1
#' * [`substring()`][base::substring()]
#' * [`sum()`][base::sum()]
#' * [`tan()`][base::tan()]
#' * [`tolower()`][base::tolower()]
#' * [`toupper()`][base::toupper()]
#' * [`trunc()`][base::trunc()]
#' * [`|`][|()]
#'
#' ## bit64
#'
#' * [`as.integer64()`][bit64::as.integer64()]
#' * [`is.integer64()`][bit64::is.integer64()]
#'
#' ## dplyr
#'
#' * [`across()`][dplyr::across()]
#' * [`between()`][dplyr::between()]
#' * [`case_when()`][dplyr::case_when()]: `.ptype` and `.size` arguments not supported
#' * [`coalesce()`][dplyr::coalesce()]
#' * [`desc()`][dplyr::desc()]
#' * [`if_all()`][dplyr::if_all()]
#' * [`if_any()`][dplyr::if_any()]
#' * [`if_else()`][dplyr::if_else()]
#' * [`n()`][dplyr::n()]
#' * [`n_distinct()`][dplyr::n_distinct()]
#'
#' ## lubridate
#'
#' * [`am()`][lubridate::am()]
#' * [`as_date()`][lubridate::as_date()]
#' * [`as_datetime()`][lubridate::as_datetime()]
#' * [`ceiling_date()`][lubridate::ceiling_date()]
#' * [`date()`][lubridate::date()]
#' * [`date_decimal()`][lubridate::date_decimal()]
#' * [`day()`][lubridate::day()]
#' * [`ddays()`][lubridate::ddays()]
#' * [`decimal_date()`][lubridate::decimal_date()]
#' * [`dhours()`][lubridate::dhours()]
#' * [`dmicroseconds()`][lubridate::dmicroseconds()]
#' * [`dmilliseconds()`][lubridate::dmilliseconds()]
#' * [`dminutes()`][lubridate::dminutes()]
#' * [`dmonths()`][lubridate::dmonths()]
#' * [`dmy()`][lubridate::dmy()]: `locale` argument not supported
#' * [`dmy_h()`][lubridate::dmy_h()]: `locale` argument not supported
#' * [`dmy_hm()`][lubridate::dmy_hm()]: `locale` argument not supported
#' * [`dmy_hms()`][lubridate::dmy_hms()]: `locale` argument not supported
#' * [`dnanoseconds()`][lubridate::dnanoseconds()]
#' * [`dpicoseconds()`][lubridate::dpicoseconds()]: not supported
#' * [`dseconds()`][lubridate::dseconds()]
#' * [`dst()`][lubridate::dst()]
#' * [`dweeks()`][lubridate::dweeks()]
#' * [`dyears()`][lubridate::dyears()]
#' * [`dym()`][lubridate::dym()]: `locale` argument not supported
#' * [`epiweek()`][lubridate::epiweek()]
#' * [`epiyear()`][lubridate::epiyear()]
#' * [`fast_strptime()`][lubridate::fast_strptime()]: non-default values of `lt` and `cutoff_2000` not supported
#' * [`floor_date()`][lubridate::floor_date()]
#' * [`force_tz()`][lubridate::force_tz()]: Timezone conversion from non-UTC timezone not supported;
#' `roll_dst` values of 'error' and 'boundary' are supported for nonexistent times,
#' `roll_dst` values of 'error', 'pre', and 'post' are supported for ambiguous times.
#' * [`format_ISO8601()`][lubridate::format_ISO8601()]
#' * [`hour()`][lubridate::hour()]
#' * [`is.Date()`][lubridate::is.Date()]
#' * [`is.POSIXct()`][lubridate::is.POSIXct()]
#' * [`is.instant()`][lubridate::is.instant()]
#' * [`is.timepoint()`][lubridate::is.timepoint()]
#' * [`isoweek()`][lubridate::isoweek()]
#' * [`isoyear()`][lubridate::isoyear()]
#' * [`leap_year()`][lubridate::leap_year()]
#' * [`make_date()`][lubridate::make_date()]
#' * [`make_datetime()`][lubridate::make_datetime()]: only supports UTC (default) timezone
#' * [`make_difftime()`][lubridate::make_difftime()]: only supports `units = "secs"` (the default);
#' providing both `num` and `...` is not supported
#' * [`mday()`][lubridate::mday()]
#' * [`mdy()`][lubridate::mdy()]: `locale` argument not supported
#' * [`mdy_h()`][lubridate::mdy_h()]: `locale` argument not supported
#' * [`mdy_hm()`][lubridate::mdy_hm()]: `locale` argument not supported
#' * [`mdy_hms()`][lubridate::mdy_hms()]: `locale` argument not supported
#' * [`minute()`][lubridate::minute()]
#' * [`month()`][lubridate::month()]
#' * [`my()`][lubridate::my()]: `locale` argument not supported
#' * [`myd()`][lubridate::myd()]: `locale` argument not supported
#' * [`parse_date_time()`][lubridate::parse_date_time()]: `quiet = FALSE` is not supported
#' Available formats are H, I, j, M, S, U, w, W, y, Y, R, T.
#' On Linux and OS X additionally a, A, b, B, Om, p, r are available.
#' * [`pm()`][lubridate::pm()]
#' * [`qday()`][lubridate::qday()]
#' * [`quarter()`][lubridate::quarter()]
#' * [`round_date()`][lubridate::round_date()]
#' * [`second()`][lubridate::second()]
#' * [`semester()`][lubridate::semester()]
#' * [`tz()`][lubridate::tz()]
#' * [`wday()`][lubridate::wday()]
#' * [`week()`][lubridate::week()]
#' * [`with_tz()`][lubridate::with_tz()]
#' * [`yday()`][lubridate::yday()]
#' * [`ydm()`][lubridate::ydm()]: `locale` argument not supported
#' * [`ydm_h()`][lubridate::ydm_h()]: `locale` argument not supported
#' * [`ydm_hm()`][lubridate::ydm_hm()]: `locale` argument not supported
#' * [`ydm_hms()`][lubridate::ydm_hms()]: `locale` argument not supported
#' * [`year()`][lubridate::year()]
#' * [`ym()`][lubridate::ym()]: `locale` argument not supported
#' * [`ymd()`][lubridate::ymd()]: `locale` argument not supported
#' * [`ymd_h()`][lubridate::ymd_h()]: `locale` argument not supported
#' * [`ymd_hm()`][lubridate::ymd_hm()]: `locale` argument not supported
#' * [`ymd_hms()`][lubridate::ymd_hms()]: `locale` argument not supported
#' * [`yq()`][lubridate::yq()]: `locale` argument not supported
#'
#' ## methods
#'
#' * [`is()`][methods::is()]
#'
#' ## rlang
#'
#' * [`is_character()`][rlang::is_character()]
#' * [`is_double()`][rlang::is_double()]
#' * [`is_integer()`][rlang::is_integer()]
#' * [`is_list()`][rlang::is_list()]
#' * [`is_logical()`][rlang::is_logical()]
#'
#' ## stats
#'
#' * [`median()`][stats::median()]: approximate median (t-digest) is computed
#' * [`quantile()`][stats::quantile()]: `probs` must be length 1;
#' approximate quantile (t-digest) is computed
#' * [`sd()`][stats::sd()]
#' * [`var()`][stats::var()]
#'
#' ## stringi
#'
#' * [`stri_reverse()`][stringi::stri_reverse()]
#'
#' ## stringr
#'
#' Pattern modifiers `coll()` and `boundary()` are not supported in any functions.
#'
#' * [`str_c()`][stringr::str_c()]: the `collapse` argument is not yet supported
#' * [`str_count()`][stringr::str_count()]: `pattern` must be a length 1 character vector
#' * [`str_detect()`][stringr::str_detect()]
#' * [`str_dup()`][stringr::str_dup()]
#' * [`str_ends()`][stringr::str_ends()]
#' * [`str_length()`][stringr::str_length()]
#' * [`str_like()`][stringr::str_like()]
#' * [`str_pad()`][stringr::str_pad()]
#' * [`str_remove()`][stringr::str_remove()]
#' * [`str_remove_all()`][stringr::str_remove_all()]
#' * [`str_replace()`][stringr::str_replace()]
#' * [`str_replace_all()`][stringr::str_replace_all()]
#' * [`str_split()`][stringr::str_split()]: Case-insensitive string splitting and splitting into 0 parts not supported
#' * [`str_starts()`][stringr::str_starts()]
#' * [`str_sub()`][stringr::str_sub()]: `start` and `end` must be length 1
#' * [`str_to_lower()`][stringr::str_to_lower()]
#' * [`str_to_title()`][stringr::str_to_title()]
#' * [`str_to_upper()`][stringr::str_to_upper()]
#' * [`str_trim()`][stringr::str_trim()]
#'
#' ## tibble
#'
#' * [`tibble()`][tibble::tibble()]
#'
#' ## tidyselect
#'
#' * [`all_of()`][tidyselect::all_of()]
#' * [`contains()`][tidyselect::contains()]
#' * [`ends_with()`][tidyselect::ends_with()]
#' * [`everything()`][tidyselect::everything()]
#' * [`last_col()`][tidyselect::last_col()]
#' * [`matches()`][tidyselect::matches()]
#' * [`num_range()`][tidyselect::num_range()]
#' * [`one_of()`][tidyselect::one_of()]
#' * [`starts_with()`][tidyselect::starts_with()]
#'
#' @name acero
#'
#' @aliases arrow-functions arrow-verbs arrow-dplyr
NULL
|
/r/R/dplyr-funcs-doc.R
|
permissive
|
avmi/arrow
|
R
| false | false | 16,478 |
r
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Generated by using data-raw/docgen.R -> do not edit by hand
#' Functions available in Arrow dplyr queries
#'
#' The `arrow` package contains methods for 37 `dplyr` table functions, many of
#' which are "verbs" that do transformations to one or more tables.
#' The package also has mappings of 211 R functions to the corresponding
#' functions in the Arrow compute library. These allow you to write code inside
#' of `dplyr` methods that call R functions, including many in packages like
#' `stringr` and `lubridate`, and they will get translated to Arrow and run
#' on the Arrow query engine (Acero). This document lists all of the mapped
#' functions.
#'
#' # `dplyr` verbs
#'
#' Most verb functions return an `arrow_dplyr_query` object, similar in spirit
#' to a `dbplyr::tbl_lazy`. This means that the verbs do not eagerly evaluate
#' the query on the data. To run the query, call either `compute()`,
#' which returns an `arrow` [Table], or `collect()`, which pulls the resulting
#' Table into an R `data.frame`.
#'
#' * [`anti_join()`][dplyr::anti_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`arrange()`][dplyr::arrange()]
#' * [`collapse()`][dplyr::collapse()]
#' * [`collect()`][dplyr::collect()]
#' * [`compute()`][dplyr::compute()]
#' * [`count()`][dplyr::count()]
#' * [`distinct()`][dplyr::distinct()]: `.keep_all = TRUE` not supported
#' * [`explain()`][dplyr::explain()]
#' * [`filter()`][dplyr::filter()]
#' * [`full_join()`][dplyr::full_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`glimpse()`][dplyr::glimpse()]
#' * [`group_by()`][dplyr::group_by()]
#' * [`group_by_drop_default()`][dplyr::group_by_drop_default()]
#' * [`group_vars()`][dplyr::group_vars()]
#' * [`groups()`][dplyr::groups()]
#' * [`inner_join()`][dplyr::inner_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`left_join()`][dplyr::left_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`mutate()`][dplyr::mutate()]: window functions (e.g. things that require aggregation within groups) not currently supported
#' * [`pull()`][dplyr::pull()]: the `name` argument is not supported; returns an R vector by default but this behavior is deprecated and will return an Arrow [ChunkedArray] in a future release. Provide `as_vector = TRUE/FALSE` to control this behavior, or set `options(arrow.pull_as_vector)` globally.
#' * [`relocate()`][dplyr::relocate()]
#' * [`rename()`][dplyr::rename()]
#' * [`rename_with()`][dplyr::rename_with()]
#' * [`right_join()`][dplyr::right_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`select()`][dplyr::select()]
#' * [`semi_join()`][dplyr::semi_join()]: the `copy` and `na_matches` arguments are ignored
#' * [`show_query()`][dplyr::show_query()]
#' * [`slice_head()`][dplyr::slice_head()]: slicing within groups not supported; Arrow datasets do not have row order, so head is non-deterministic; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_max()`][dplyr::slice_max()]: slicing within groups not supported; `with_ties = TRUE` (dplyr default) is not supported; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_min()`][dplyr::slice_min()]: slicing within groups not supported; `with_ties = TRUE` (dplyr default) is not supported; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_sample()`][dplyr::slice_sample()]: slicing within groups not supported; `replace = TRUE` and the `weight_by` argument not supported; `n` only supported on queries where `nrow()` is knowable without evaluating
#' * [`slice_tail()`][dplyr::slice_tail()]: slicing within groups not supported; Arrow datasets do not have row order, so tail is non-deterministic; `prop` only supported on queries where `nrow()` is knowable without evaluating
#' * [`summarise()`][dplyr::summarise()]: window functions not currently supported; arguments `.drop = FALSE` and `.groups = "rowwise" not supported
#' * [`tally()`][dplyr::tally()]
#' * [`transmute()`][dplyr::transmute()]
#' * [`ungroup()`][dplyr::ungroup()]
#' * [`union()`][dplyr::union()]
#' * [`union_all()`][dplyr::union_all()]
#'
#' # Function mappings
#'
#' In the list below, any differences in behavior or support between Acero and
#' the R function are listed. If no notes follow the function name, then you
#' can assume that the function works in Acero just as it does in R.
#'
#' Functions can be called either as `pkg::fun()` or just `fun()`, i.e. both
#' `str_sub()` and `stringr::str_sub()` work.
#'
#' In addition to these functions, you can call any of Arrow's 254 compute
#' functions directly. Arrow has many functions that don't map to an existing R
#' function. In other cases where there is an R function mapping, you can still
#' call the Arrow function directly if you don't want the adaptations that the R
#' mapping has that make Acero behave like R. These functions are listed in the
#' [C++ documentation](https://arrow.apache.org/docs/cpp/compute.html), and
#' in the function registry in R, they are named with an `arrow_` prefix, such
#' as `arrow_ascii_is_decimal`.
#'
#' ## arrow
#'
#' * [`add_filename()`][arrow::add_filename()]
#' * [`cast()`][arrow::cast()]
#'
#' ## base
#'
#' * [`!`][!()]
#' * [`!=`][!=()]
#' * [`%%`][%%()]
#' * [`%/%`][%/%()]
#' * [`%in%`][%in%()]
#' * [`&`][&()]
#' * [`*`][*()]
#' * [`+`][+()]
#' * [`-`][-()]
#' * [`/`][/()]
#' * [`<`][<()]
#' * [`<=`][<=()]
#' * [`==`][==()]
#' * [`>`][>()]
#' * [`>=`][>=()]
#' * [`ISOdate()`][base::ISOdate()]
#' * [`ISOdatetime()`][base::ISOdatetime()]
#' * [`^`][^()]
#' * [`abs()`][base::abs()]
#' * [`acos()`][base::acos()]
#' * [`all()`][base::all()]
#' * [`any()`][base::any()]
#' * [`as.Date()`][base::as.Date()]: Multiple `tryFormats` not supported in Arrow.
#' Consider using the lubridate specialised parsing functions `ymd()`, `ymd()`, etc.
#' * [`as.character()`][base::as.character()]
#' * [`as.difftime()`][base::as.difftime()]: only supports `units = "secs"` (the default)
#' * [`as.double()`][base::as.double()]
#' * [`as.integer()`][base::as.integer()]
#' * [`as.logical()`][base::as.logical()]
#' * [`as.numeric()`][base::as.numeric()]
#' * [`asin()`][base::asin()]
#' * [`ceiling()`][base::ceiling()]
#' * [`cos()`][base::cos()]
#' * [`data.frame()`][base::data.frame()]: `row.names` and `check.rows` arguments not supported;
#' `stringsAsFactors` must be `FALSE`
#' * [`difftime()`][base::difftime()]: only supports `units = "secs"` (the default);
#' `tz` argument not supported
#' * [`endsWith()`][base::endsWith()]
#' * [`exp()`][base::exp()]
#' * [`floor()`][base::floor()]
#' * [`format()`][base::format()]
#' * [`grepl()`][base::grepl()]
#' * [`gsub()`][base::gsub()]
#' * [`ifelse()`][base::ifelse()]
#' * [`is.character()`][base::is.character()]
#' * [`is.double()`][base::is.double()]
#' * [`is.factor()`][base::is.factor()]
#' * [`is.finite()`][base::is.finite()]
#' * [`is.infinite()`][base::is.infinite()]
#' * [`is.integer()`][base::is.integer()]
#' * [`is.list()`][base::is.list()]
#' * [`is.logical()`][base::is.logical()]
#' * [`is.na()`][base::is.na()]
#' * [`is.nan()`][base::is.nan()]
#' * [`is.numeric()`][base::is.numeric()]
#' * [`log()`][base::log()]
#' * [`log10()`][base::log10()]
#' * [`log1p()`][base::log1p()]
#' * [`log2()`][base::log2()]
#' * [`logb()`][base::logb()]
#' * [`max()`][base::max()]
#' * [`mean()`][base::mean()]
#' * [`min()`][base::min()]
#' * [`nchar()`][base::nchar()]: `allowNA = TRUE` and `keepNA = TRUE` not supported
#' * [`paste()`][base::paste()]: the `collapse` argument is not yet supported
#' * [`paste0()`][base::paste0()]: the `collapse` argument is not yet supported
#' * [`pmax()`][base::pmax()]
#' * [`pmin()`][base::pmin()]
#' * [`round()`][base::round()]
#' * [`sign()`][base::sign()]
#' * [`sin()`][base::sin()]
#' * [`sqrt()`][base::sqrt()]
#' * [`startsWith()`][base::startsWith()]
#' * [`strftime()`][base::strftime()]
#' * [`strptime()`][base::strptime()]: accepts a `unit` argument not present in the `base` function.
#' Valid values are "s", "ms" (default), "us", "ns".
#' * [`strrep()`][base::strrep()]
#' * [`strsplit()`][base::strsplit()]
#' * [`sub()`][base::sub()]
#' * [`substr()`][base::substr()]: `start` and `stop` must be length 1
#' * [`substring()`][base::substring()]
#' * [`sum()`][base::sum()]
#' * [`tan()`][base::tan()]
#' * [`tolower()`][base::tolower()]
#' * [`toupper()`][base::toupper()]
#' * [`trunc()`][base::trunc()]
#' * [`|`][|()]
#'
#' ## bit64
#'
#' * [`as.integer64()`][bit64::as.integer64()]
#' * [`is.integer64()`][bit64::is.integer64()]
#'
#' ## dplyr
#'
#' * [`across()`][dplyr::across()]
#' * [`between()`][dplyr::between()]
#' * [`case_when()`][dplyr::case_when()]: `.ptype` and `.size` arguments not supported
#' * [`coalesce()`][dplyr::coalesce()]
#' * [`desc()`][dplyr::desc()]
#' * [`if_all()`][dplyr::if_all()]
#' * [`if_any()`][dplyr::if_any()]
#' * [`if_else()`][dplyr::if_else()]
#' * [`n()`][dplyr::n()]
#' * [`n_distinct()`][dplyr::n_distinct()]
#'
#' ## lubridate
#'
#' * [`am()`][lubridate::am()]
#' * [`as_date()`][lubridate::as_date()]
#' * [`as_datetime()`][lubridate::as_datetime()]
#' * [`ceiling_date()`][lubridate::ceiling_date()]
#' * [`date()`][lubridate::date()]
#' * [`date_decimal()`][lubridate::date_decimal()]
#' * [`day()`][lubridate::day()]
#' * [`ddays()`][lubridate::ddays()]
#' * [`decimal_date()`][lubridate::decimal_date()]
#' * [`dhours()`][lubridate::dhours()]
#' * [`dmicroseconds()`][lubridate::dmicroseconds()]
#' * [`dmilliseconds()`][lubridate::dmilliseconds()]
#' * [`dminutes()`][lubridate::dminutes()]
#' * [`dmonths()`][lubridate::dmonths()]
#' * [`dmy()`][lubridate::dmy()]: `locale` argument not supported
#' * [`dmy_h()`][lubridate::dmy_h()]: `locale` argument not supported
#' * [`dmy_hm()`][lubridate::dmy_hm()]: `locale` argument not supported
#' * [`dmy_hms()`][lubridate::dmy_hms()]: `locale` argument not supported
#' * [`dnanoseconds()`][lubridate::dnanoseconds()]
#' * [`dpicoseconds()`][lubridate::dpicoseconds()]: not supported
#' * [`dseconds()`][lubridate::dseconds()]
#' * [`dst()`][lubridate::dst()]
#' * [`dweeks()`][lubridate::dweeks()]
#' * [`dyears()`][lubridate::dyears()]
#' * [`dym()`][lubridate::dym()]: `locale` argument not supported
#' * [`epiweek()`][lubridate::epiweek()]
#' * [`epiyear()`][lubridate::epiyear()]
#' * [`fast_strptime()`][lubridate::fast_strptime()]: non-default values of `lt` and `cutoff_2000` not supported
#' * [`floor_date()`][lubridate::floor_date()]
#' * [`force_tz()`][lubridate::force_tz()]: Timezone conversion from non-UTC timezone not supported;
#' `roll_dst` values of 'error' and 'boundary' are supported for nonexistent times,
#' `roll_dst` values of 'error', 'pre', and 'post' are supported for ambiguous times.
#' * [`format_ISO8601()`][lubridate::format_ISO8601()]
#' * [`hour()`][lubridate::hour()]
#' * [`is.Date()`][lubridate::is.Date()]
#' * [`is.POSIXct()`][lubridate::is.POSIXct()]
#' * [`is.instant()`][lubridate::is.instant()]
#' * [`is.timepoint()`][lubridate::is.timepoint()]
#' * [`isoweek()`][lubridate::isoweek()]
#' * [`isoyear()`][lubridate::isoyear()]
#' * [`leap_year()`][lubridate::leap_year()]
#' * [`make_date()`][lubridate::make_date()]
#' * [`make_datetime()`][lubridate::make_datetime()]: only supports UTC (default) timezone
#' * [`make_difftime()`][lubridate::make_difftime()]: only supports `units = "secs"` (the default);
#' providing both `num` and `...` is not supported
#' * [`mday()`][lubridate::mday()]
#' * [`mdy()`][lubridate::mdy()]: `locale` argument not supported
#' * [`mdy_h()`][lubridate::mdy_h()]: `locale` argument not supported
#' * [`mdy_hm()`][lubridate::mdy_hm()]: `locale` argument not supported
#' * [`mdy_hms()`][lubridate::mdy_hms()]: `locale` argument not supported
#' * [`minute()`][lubridate::minute()]
#' * [`month()`][lubridate::month()]
#' * [`my()`][lubridate::my()]: `locale` argument not supported
#' * [`myd()`][lubridate::myd()]: `locale` argument not supported
#' * [`parse_date_time()`][lubridate::parse_date_time()]: `quiet = FALSE` is not supported
#' Available formats are H, I, j, M, S, U, w, W, y, Y, R, T.
#' On Linux and OS X additionally a, A, b, B, Om, p, r are available.
#' * [`pm()`][lubridate::pm()]
#' * [`qday()`][lubridate::qday()]
#' * [`quarter()`][lubridate::quarter()]
#' * [`round_date()`][lubridate::round_date()]
#' * [`second()`][lubridate::second()]
#' * [`semester()`][lubridate::semester()]
#' * [`tz()`][lubridate::tz()]
#' * [`wday()`][lubridate::wday()]
#' * [`week()`][lubridate::week()]
#' * [`with_tz()`][lubridate::with_tz()]
#' * [`yday()`][lubridate::yday()]
#' * [`ydm()`][lubridate::ydm()]: `locale` argument not supported
#' * [`ydm_h()`][lubridate::ydm_h()]: `locale` argument not supported
#' * [`ydm_hm()`][lubridate::ydm_hm()]: `locale` argument not supported
#' * [`ydm_hms()`][lubridate::ydm_hms()]: `locale` argument not supported
#' * [`year()`][lubridate::year()]
#' * [`ym()`][lubridate::ym()]: `locale` argument not supported
#' * [`ymd()`][lubridate::ymd()]: `locale` argument not supported
#' * [`ymd_h()`][lubridate::ymd_h()]: `locale` argument not supported
#' * [`ymd_hm()`][lubridate::ymd_hm()]: `locale` argument not supported
#' * [`ymd_hms()`][lubridate::ymd_hms()]: `locale` argument not supported
#' * [`yq()`][lubridate::yq()]: `locale` argument not supported
#'
#' ## methods
#'
#' * [`is()`][methods::is()]
#'
#' ## rlang
#'
#' * [`is_character()`][rlang::is_character()]
#' * [`is_double()`][rlang::is_double()]
#' * [`is_integer()`][rlang::is_integer()]
#' * [`is_list()`][rlang::is_list()]
#' * [`is_logical()`][rlang::is_logical()]
#'
#' ## stats
#'
#' * [`median()`][stats::median()]: approximate median (t-digest) is computed
#' * [`quantile()`][stats::quantile()]: `probs` must be length 1;
#' approximate quantile (t-digest) is computed
#' * [`sd()`][stats::sd()]
#' * [`var()`][stats::var()]
#'
#' ## stringi
#'
#' * [`stri_reverse()`][stringi::stri_reverse()]
#'
#' ## stringr
#'
#' Pattern modifiers `coll()` and `boundary()` are not supported in any functions.
#'
#' * [`str_c()`][stringr::str_c()]: the `collapse` argument is not yet supported
#' * [`str_count()`][stringr::str_count()]: `pattern` must be a length 1 character vector
#' * [`str_detect()`][stringr::str_detect()]
#' * [`str_dup()`][stringr::str_dup()]
#' * [`str_ends()`][stringr::str_ends()]
#' * [`str_length()`][stringr::str_length()]
#' * [`str_like()`][stringr::str_like()]
#' * [`str_pad()`][stringr::str_pad()]
#' * [`str_remove()`][stringr::str_remove()]
#' * [`str_remove_all()`][stringr::str_remove_all()]
#' * [`str_replace()`][stringr::str_replace()]
#' * [`str_replace_all()`][stringr::str_replace_all()]
#' * [`str_split()`][stringr::str_split()]: Case-insensitive string splitting and splitting into 0 parts not supported
#' * [`str_starts()`][stringr::str_starts()]
#' * [`str_sub()`][stringr::str_sub()]: `start` and `end` must be length 1
#' * [`str_to_lower()`][stringr::str_to_lower()]
#' * [`str_to_title()`][stringr::str_to_title()]
#' * [`str_to_upper()`][stringr::str_to_upper()]
#' * [`str_trim()`][stringr::str_trim()]
#'
#' ## tibble
#'
#' * [`tibble()`][tibble::tibble()]
#'
#' ## tidyselect
#'
#' * [`all_of()`][tidyselect::all_of()]
#' * [`contains()`][tidyselect::contains()]
#' * [`ends_with()`][tidyselect::ends_with()]
#' * [`everything()`][tidyselect::everything()]
#' * [`last_col()`][tidyselect::last_col()]
#' * [`matches()`][tidyselect::matches()]
#' * [`num_range()`][tidyselect::num_range()]
#' * [`one_of()`][tidyselect::one_of()]
#' * [`starts_with()`][tidyselect::starts_with()]
#'
#' @name acero
#'
#' @aliases arrow-functions arrow-verbs arrow-dplyr
NULL
|
NEI <- readRDS("summarySCC_PM25.rds")
total_by_year <- aggregate(NEI$Emissions, by=list(year=NEI$year), sum)
png(filename = "plot1.png")
barplot(total_by_year$x/1000, names.arg=total_by_year$year, xlab="Year", ylab="Total Emissions (Kilotons)")
title("Total PM2.5 emissions : United States")
dev.off()
|
/plot1.R
|
no_license
|
nickcotter/exploratory-data-analysis-project
|
R
| false | false | 303 |
r
|
NEI <- readRDS("summarySCC_PM25.rds")
total_by_year <- aggregate(NEI$Emissions, by=list(year=NEI$year), sum)
png(filename = "plot1.png")
barplot(total_by_year$x/1000, names.arg=total_by_year$year, xlab="Year", ylab="Total Emissions (Kilotons)")
title("Total PM2.5 emissions : United States")
dev.off()
|
# load file into R as elecP
elecP<-read.table("household_power_consumption.txt",sep=";", header=TRUE, na.strings="?", stringsAsFactors = FALSE)
# change Date variables into Date class, and subset data from the dates 2007-02-01 and 2007-02-02
library(lubridate)
elecP$Date<-dmy(elecP$Date)
elecP_sub <- subset(elecP, Date >= ymd("2007-02-01") & Date <= ymd("2007-02-02") )
# use lubridate Time, combine Date and Time to one new variable called Datetime, use mutate function from with dplyr add the in the Datetime variable into dataframe
elecP_sub$Time<-hms(elecP_sub$Time)
Datetime<- paste(elecP_sub$Date+elecP_sub$Time)
library(dplyr)
elecP_sub<-mutate(elecP_sub,Datetime)
elecP_sub$Datetime <- as.POSIXct(elecP_sub$Datetime)
# plot3, print on screen (legend box is very big)
plot(elecP_sub$Sub_metering_1~elecP_sub$Datetime, type="l",ylab="Energy Submetering", xlab="")
lines(elecP_sub$Sub_metering_2~elecP_sub$Datetime, type="l",col="red")
lines(elecP_sub$Sub_metering_3~elecP_sub$Datetime, type="l",col="blue")
legend('topright', lty=1, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2.5, col=c("black", "red", "blue"))
# generate "plot3.png" file (if use copy to png file, it caused problem with legend)
png(file="plot3.png", width=480, height=480)
plot(elecP_sub$Sub_metering_1~elecP_sub$Datetime, type="l",ylab="Energy Submetering", xlab="")
lines(elecP_sub$Sub_metering_2~elecP_sub$Datetime, type="l",col="red")
lines(elecP_sub$Sub_metering_3~elecP_sub$Datetime, type="l",col="blue")
legend('topright', lty=1, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
tzzhangjuan1/ExData_Plotting1
|
R
| false | false | 1,676 |
r
|
# load file into R as elecP
elecP<-read.table("household_power_consumption.txt",sep=";", header=TRUE, na.strings="?", stringsAsFactors = FALSE)
# change Date variables into Date class, and subset data from the dates 2007-02-01 and 2007-02-02
library(lubridate)
elecP$Date<-dmy(elecP$Date)
elecP_sub <- subset(elecP, Date >= ymd("2007-02-01") & Date <= ymd("2007-02-02") )
# use lubridate Time, combine Date and Time to one new variable called Datetime, use mutate function from with dplyr add the in the Datetime variable into dataframe
elecP_sub$Time<-hms(elecP_sub$Time)
Datetime<- paste(elecP_sub$Date+elecP_sub$Time)
library(dplyr)
elecP_sub<-mutate(elecP_sub,Datetime)
elecP_sub$Datetime <- as.POSIXct(elecP_sub$Datetime)
# plot3, print on screen (legend box is very big)
plot(elecP_sub$Sub_metering_1~elecP_sub$Datetime, type="l",ylab="Energy Submetering", xlab="")
lines(elecP_sub$Sub_metering_2~elecP_sub$Datetime, type="l",col="red")
lines(elecP_sub$Sub_metering_3~elecP_sub$Datetime, type="l",col="blue")
legend('topright', lty=1, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2.5, col=c("black", "red", "blue"))
# generate "plot3.png" file (if use copy to png file, it caused problem with legend)
png(file="plot3.png", width=480, height=480)
plot(elecP_sub$Sub_metering_1~elecP_sub$Datetime, type="l",ylab="Energy Submetering", xlab="")
lines(elecP_sub$Sub_metering_2~elecP_sub$Datetime, type="l",col="red")
lines(elecP_sub$Sub_metering_3~elecP_sub$Datetime, type="l",col="blue")
legend('topright', lty=1, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
nu e ziulica de la Dumnezeu sa nu vezi trosneli prin ziare .
unele trebuie insa citite printre rinduri sau chiar pe dos .
Bunaoara , Ziua , de o bucata de vreme , toaca la Nicusor Nastase ( patronul discotecii Vox Maris ) si la Liviu Mihaiu ( redactor - sef adjunct la Academia Catavencu ) .
ba , in sondajele de pe Internet , ziarul condus de Sorin Rosca Stanescu a incercat sa afle raspunsul cititorilor la intrebarea : Este Liviu Mihaiu un jurnalist vindut escrocilor ? .
mult tupeu trebuie sa mai ai in Romania de azi incit sa ignori , ca bolovanul , toate cele spuse despre tine , sa nu - ti vezi birna din ochi si , cu o dezinvoltura de inconstient , sa te urci cu picioarele pe oricine .
poate ca raspunsuri la fel de interesante am fi aflat daca in sondajul respectiv , in locul numelui lui Liviu Mihaiu ar fi fost pus chiar cel al directorului ziarului Ziua .
sa ne intoarcem putin in timp .
inainte de 1996 , Ziua a fost dusmanul de moarte al presedintelui Ion Iliescu si al PDSR .
povestea cu Iliescu - KGB si cu fantomaticul Igor Botnariciuc pe unii i - a distrat , iar pe altii i - a facut sa creada ca - i adevarul gol - golut .
dupa alegerile din 2000 , Ziua a schimbat directia in ce - l priveste pe Ion Iliescu .
dragoste desavirsita !
anul trecut , Ziua a avut contracte de publicitate de un milion de dolari de la Societatea Nationala Tutunul Romanesc .
evident , in disputa privatizarii societatii s - a vazut ca ziarul a jucat intr - un fel aparte .
tot in anii trecuti , Radu Berceanu era un fel de client - abonat ca personaj pozitiv .
cite sute de mii de dolari publicitate au platit firmele de stat pe contractele cu Ziua ?
numai Curtea de Conturi ne - ar putea spune !
de indata ce a intarcat balaia de la Ministerul Industriilor , Ziua a inceput un atac la adresa lui Gheorghe Olteanu , seful Corpului de Control din Ministerul Industriei si Resurselor .
au fost folosite argumente multiple , dar nu s - a spus un lucru .
ca Olteanu n - a fost de acord cu alocarea unor fonduri de publicitate pentru Ziua .
daca luam la puricat si alte trosneli din Ziua descoperim destula negazetarie si putem intelege incasarile exagerate in raport cu tirajul .
mai observam ca , de la un moment dat , atacurile ziarului inceteaza si in locul lor apare publicitatea .
or fi nebuni toti cei care povestesc prin oras despre presiunile jurnalistice exercitate asupra unor societati pentru a contracta publicitate ?
sa nu mai vorbim despre articolele critice ale ziarului la adresa unor firme de panotaj , cind aproape toata lumea cunoaste interesele unor actionari de la Ziua intr - o firma cu acelasi profil , numita Beta Cons .
si de la Beta Cons , scamele pe care ziarul le ia aproape saptaminal de pe reverele lui Traian Basescu , primarul pe teritoriul caruia functioneaza firma , sint tot o legatura discutabila intre presa si interese .
chiar daca incearca sa - l scoata basma curata pe Irinel Columbeanu ( una dintre poamele financiare ale tranzitiei ) , Ziua este un campion al pietei , riscind sa ramina corigent la deontologia profesionala .
cazul in speta este cunoscut si politicienilor , si politistilor , si jurnalistilor , devenind familiar pina si pisicilor din cartier .
dar daca luam Curierul National si alte publicatii , observam lesne ca Ziua nu este un caz singular .
mai sint si televiziuni care cer si mai sint si grupuri media care primesc .
oricit ar parea de trist , presa romaneasca are si ea pacatele tranzitiei .
pe alocuri , pixul , microfonul si camera de luat vederi seamana cu niste instrumente de operat la drumul mare .
dar cu cravata si in numele libertatii de informare .
|
/data/Newspapers/2001.08.17.editorial.64578.0694.r
|
no_license
|
narcis96/decrypting-alpha
|
R
| false | false | 3,651 |
r
|
nu e ziulica de la Dumnezeu sa nu vezi trosneli prin ziare .
unele trebuie insa citite printre rinduri sau chiar pe dos .
Bunaoara , Ziua , de o bucata de vreme , toaca la Nicusor Nastase ( patronul discotecii Vox Maris ) si la Liviu Mihaiu ( redactor - sef adjunct la Academia Catavencu ) .
ba , in sondajele de pe Internet , ziarul condus de Sorin Rosca Stanescu a incercat sa afle raspunsul cititorilor la intrebarea : Este Liviu Mihaiu un jurnalist vindut escrocilor ? .
mult tupeu trebuie sa mai ai in Romania de azi incit sa ignori , ca bolovanul , toate cele spuse despre tine , sa nu - ti vezi birna din ochi si , cu o dezinvoltura de inconstient , sa te urci cu picioarele pe oricine .
poate ca raspunsuri la fel de interesante am fi aflat daca in sondajul respectiv , in locul numelui lui Liviu Mihaiu ar fi fost pus chiar cel al directorului ziarului Ziua .
sa ne intoarcem putin in timp .
inainte de 1996 , Ziua a fost dusmanul de moarte al presedintelui Ion Iliescu si al PDSR .
povestea cu Iliescu - KGB si cu fantomaticul Igor Botnariciuc pe unii i - a distrat , iar pe altii i - a facut sa creada ca - i adevarul gol - golut .
dupa alegerile din 2000 , Ziua a schimbat directia in ce - l priveste pe Ion Iliescu .
dragoste desavirsita !
anul trecut , Ziua a avut contracte de publicitate de un milion de dolari de la Societatea Nationala Tutunul Romanesc .
evident , in disputa privatizarii societatii s - a vazut ca ziarul a jucat intr - un fel aparte .
tot in anii trecuti , Radu Berceanu era un fel de client - abonat ca personaj pozitiv .
cite sute de mii de dolari publicitate au platit firmele de stat pe contractele cu Ziua ?
numai Curtea de Conturi ne - ar putea spune !
de indata ce a intarcat balaia de la Ministerul Industriilor , Ziua a inceput un atac la adresa lui Gheorghe Olteanu , seful Corpului de Control din Ministerul Industriei si Resurselor .
au fost folosite argumente multiple , dar nu s - a spus un lucru .
ca Olteanu n - a fost de acord cu alocarea unor fonduri de publicitate pentru Ziua .
daca luam la puricat si alte trosneli din Ziua descoperim destula negazetarie si putem intelege incasarile exagerate in raport cu tirajul .
mai observam ca , de la un moment dat , atacurile ziarului inceteaza si in locul lor apare publicitatea .
or fi nebuni toti cei care povestesc prin oras despre presiunile jurnalistice exercitate asupra unor societati pentru a contracta publicitate ?
sa nu mai vorbim despre articolele critice ale ziarului la adresa unor firme de panotaj , cind aproape toata lumea cunoaste interesele unor actionari de la Ziua intr - o firma cu acelasi profil , numita Beta Cons .
si de la Beta Cons , scamele pe care ziarul le ia aproape saptaminal de pe reverele lui Traian Basescu , primarul pe teritoriul caruia functioneaza firma , sint tot o legatura discutabila intre presa si interese .
chiar daca incearca sa - l scoata basma curata pe Irinel Columbeanu ( una dintre poamele financiare ale tranzitiei ) , Ziua este un campion al pietei , riscind sa ramina corigent la deontologia profesionala .
cazul in speta este cunoscut si politicienilor , si politistilor , si jurnalistilor , devenind familiar pina si pisicilor din cartier .
dar daca luam Curierul National si alte publicatii , observam lesne ca Ziua nu este un caz singular .
mai sint si televiziuni care cer si mai sint si grupuri media care primesc .
oricit ar parea de trist , presa romaneasca are si ea pacatele tranzitiei .
pe alocuri , pixul , microfonul si camera de luat vederi seamana cu niste instrumente de operat la drumul mare .
dar cu cravata si in numele libertatii de informare .
|
f.emo <- function(dt){
emotion_dad <- NULL
for(i in 1:dim(dt)[1]){
sentences <- syuzhet::get_sentences(dt$stemmedwords[i])
emotions <- matrix(emotion(sentences)$emotion,
nrow = length(sentences),
byrow = T)
colnames(emotions) <- emotion(sentences[1])$emotion_type
emotions <- data.frame(emotions)
emotions <- select(emotions,
anticipation,
joy,
surprise,
trust,
anger,
disgust,
fear,
sadness)
emotion_dad <- rbind(emotion_dad, emotions)
}
return(emotion_dad)
}
|
/lib/f.emo.R
|
no_license
|
Raymond-601/Sentiment-Analysis-on-Lyrics
|
R
| false | false | 728 |
r
|
f.emo <- function(dt){
emotion_dad <- NULL
for(i in 1:dim(dt)[1]){
sentences <- syuzhet::get_sentences(dt$stemmedwords[i])
emotions <- matrix(emotion(sentences)$emotion,
nrow = length(sentences),
byrow = T)
colnames(emotions) <- emotion(sentences[1])$emotion_type
emotions <- data.frame(emotions)
emotions <- select(emotions,
anticipation,
joy,
surprise,
trust,
anger,
disgust,
fear,
sadness)
emotion_dad <- rbind(emotion_dad, emotions)
}
return(emotion_dad)
}
|
##Now load 'inline' to compile C++ code on the fly
library(inline)
code = "
arma::mat coef = Rcpp::as<arma::mat>(a);
arma::mat errors = Rcpp::as<arma::mat>(u);
int m = errors.n_rows;
int n = errors.n_cols;
arma::mat simdata(m, n);
simdata.row(0) = arma::zeros<arma::mat>(1,n);
for (int row=1; row<m; row++) {
simdata.row(row) = simdata.row(row-1) * trans(coef) + errors.row(row);
}
return Rcpp::wrap(simdata);
"
## create the compiled function
rcppSim = cxxfunction(signature(a = "numeric", u = "numeric"), code, plugin = "RcppArmadillo")
set.seed(123)
a = matrix(c(0.5, 0.1, 0.1, 0.5), nrow = 2)
u = matrix(rnorm(10000), ncol = 2)
rcppData = rcppSim(a, u)
|
/chap_01/var_inline.R
|
no_license
|
ja-thomas/rcpp_examples
|
R
| false | false | 684 |
r
|
##Now load 'inline' to compile C++ code on the fly
library(inline)
code = "
arma::mat coef = Rcpp::as<arma::mat>(a);
arma::mat errors = Rcpp::as<arma::mat>(u);
int m = errors.n_rows;
int n = errors.n_cols;
arma::mat simdata(m, n);
simdata.row(0) = arma::zeros<arma::mat>(1,n);
for (int row=1; row<m; row++) {
simdata.row(row) = simdata.row(row-1) * trans(coef) + errors.row(row);
}
return Rcpp::wrap(simdata);
"
## create the compiled function
rcppSim = cxxfunction(signature(a = "numeric", u = "numeric"), code, plugin = "RcppArmadillo")
set.seed(123)
a = matrix(c(0.5, 0.1, 0.1, 0.5), nrow = 2)
u = matrix(rnorm(10000), ncol = 2)
rcppData = rcppSim(a, u)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.survexp.dk.R
\docType{data}
\name{survexp.dk}
\alias{survexp.dk}
\title{Ratetable of the Danish general population}
\format{
An object of class \code{ratetable} of dimension 111 x 180 x 2.
}
\usage{
survexp.dk
}
\description{
Object of class \code{ratetable} containing the daily hazards in the Danish general population
as reported by the Human Mortality Database (www.mortality.org).
}
\details{
The ratetable was generated by using the \code{relsurv::transrate.hmd} function.
The data were downloaded on 15-09-2017 seperately for male and female Danish citizens.\cr
The data can be accessed through:\cr
Female: http://www.mortality.org/hmd/DNK/STATS/fltper_1x1.txt\cr
Male: http://www.mortality.org/hmd/DNK/STATS/mltper_1x1.txt\cr
}
\keyword{datasets}
|
/man/survexp.dk.Rd
|
no_license
|
cran/cuRe
|
R
| false | true | 866 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.survexp.dk.R
\docType{data}
\name{survexp.dk}
\alias{survexp.dk}
\title{Ratetable of the Danish general population}
\format{
An object of class \code{ratetable} of dimension 111 x 180 x 2.
}
\usage{
survexp.dk
}
\description{
Object of class \code{ratetable} containing the daily hazards in the Danish general population
as reported by the Human Mortality Database (www.mortality.org).
}
\details{
The ratetable was generated by using the \code{relsurv::transrate.hmd} function.
The data were downloaded on 15-09-2017 seperately for male and female Danish citizens.\cr
The data can be accessed through:\cr
Female: http://www.mortality.org/hmd/DNK/STATS/fltper_1x1.txt\cr
Male: http://www.mortality.org/hmd/DNK/STATS/mltper_1x1.txt\cr
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{cutree_1h.dendrogram}
\alias{cutree_1h.dendrogram}
\title{cutree for dendrogram (by 1 height only!)}
\usage{
cutree_1h.dendrogram(tree, h, order_clusters_as_data = TRUE,
use_labels_not_values = TRUE, warn = TRUE, ...)
}
\arguments{
\item{tree}{a dendrogram object}
\item{h}{numeric scalar (NOT a vector) with a height where the tree should be cut.}
\item{use_labels_not_values}{logical, defaults to TRUE. If the actual labels of the
clusters do not matter - and we want to gain speed (say, 10 times faster) -
then use FALSE (gives the "leaves order" instead of their labels.).}
\item{order_clusters_as_data}{logical, defaults to TRUE. There are two ways by which
to order the clusters: 1) By the order of the original data. 2) by the order of the
labels in the dendrogram. In order to be consistent with \link[stats]{cutree}, this is set
to TRUE.}
\item{warn}{logical. Should the function report warning in extreme cases.}
\item{...}{(not currently in use)}
}
\value{
\code{cutree_1h.dendrogram} returns an integer vector with group memberships
}
\description{
Cuts a dendrogram tree into several groups
by specifying the desired cut height (only a single height!).
}
\examples{
hc <- hclust(dist(USArrests[c(1,6,13,20, 23),]), "ave")
dend <- as.dendrogram(hc)
cutree(hc, h=50) # on hclust
cutree_1h.dendrogram(dend, h=50) # on a dendrogram
labels(dend)
# the default (ordered by original data's order)
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = TRUE)
# A different order of labels - order by their order in the tree
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = FALSE)
# make it faster
\dontrun{
require(microbenchmark)
microbenchmark(
cutree_1h.dendrogram(dend, h=50),
cutree_1h.dendrogram(dend, h=50,use_labels_not_values = FALSE)
)
# 0.8 vs 0.6 sec - for 100 runs
}
}
\author{
Tal Galili
}
\seealso{
\code{\link{hclust}}, \code{\link{cutree}}
}
|
/man/cutree_1h.dendrogram.Rd
|
no_license
|
xtmgah/dendextend
|
R
| false | false | 1,984 |
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{cutree_1h.dendrogram}
\alias{cutree_1h.dendrogram}
\title{cutree for dendrogram (by 1 height only!)}
\usage{
cutree_1h.dendrogram(tree, h, order_clusters_as_data = TRUE,
use_labels_not_values = TRUE, warn = TRUE, ...)
}
\arguments{
\item{tree}{a dendrogram object}
\item{h}{numeric scalar (NOT a vector) with a height where the tree should be cut.}
\item{use_labels_not_values}{logical, defaults to TRUE. If the actual labels of the
clusters do not matter - and we want to gain speed (say, 10 times faster) -
then use FALSE (gives the "leaves order" instead of their labels.).}
\item{order_clusters_as_data}{logical, defaults to TRUE. There are two ways by which
to order the clusters: 1) By the order of the original data. 2) by the order of the
labels in the dendrogram. In order to be consistent with \link[stats]{cutree}, this is set
to TRUE.}
\item{warn}{logical. Should the function report warning in extreme cases.}
\item{...}{(not currently in use)}
}
\value{
\code{cutree_1h.dendrogram} returns an integer vector with group memberships
}
\description{
Cuts a dendrogram tree into several groups
by specifying the desired cut height (only a single height!).
}
\examples{
hc <- hclust(dist(USArrests[c(1,6,13,20, 23),]), "ave")
dend <- as.dendrogram(hc)
cutree(hc, h=50) # on hclust
cutree_1h.dendrogram(dend, h=50) # on a dendrogram
labels(dend)
# the default (ordered by original data's order)
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = TRUE)
# A different order of labels - order by their order in the tree
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = FALSE)
# make it faster
\dontrun{
require(microbenchmark)
microbenchmark(
cutree_1h.dendrogram(dend, h=50),
cutree_1h.dendrogram(dend, h=50,use_labels_not_values = FALSE)
)
# 0.8 vs 0.6 sec - for 100 runs
}
}
\author{
Tal Galili
}
\seealso{
\code{\link{hclust}}, \code{\link{cutree}}
}
|
#' @title List built targets.
#' @export
#' @family progress
#' @description List targets whose progress is `"built"`.
#' @return A character vector of built targets.
#' @inheritParams tar_progress
#' @param names Optional, names of the targets. If supplied, the
#' function restricts its output to these targets.
#' You can supply symbols
#' or `tidyselect` helpers like [all_of()] and [starts_with()].
#' @examples
#' if (identical(Sys.getenv("TAR_EXAMPLES"), "true")) {
#' tar_dir({ # tar_dir() runs code from a temporary directory.
#' tar_script({
#' list(
#' tar_target(x, seq_len(2)),
#' tar_target(y, 2 * x, pattern = map(x))
#' )
#' }, ask = FALSE)
#' tar_make()
#' tar_built()
#' tar_built(starts_with("y_")) # see also all_of()
#' })
#' }
tar_built <- function(
names = NULL,
store = targets::tar_config_get("store")
) {
progress <- progress_init(path_store = store)
progress <- tibble::as_tibble(progress$database$read_condensed_data())
names_quosure <- rlang::enquo(names)
names <- tar_tidyselect_eval(names_quosure, progress$name)
if (!is.null(names)) {
progress <- progress[match(names, progress$name), , drop = FALSE] # nolint
}
progress$name[progress$progress == "built"]
}
|
/R/tar_built.R
|
permissive
|
billdenney/targets
|
R
| false | false | 1,230 |
r
|
#' @title List built targets.
#' @export
#' @family progress
#' @description List targets whose progress is `"built"`.
#' @return A character vector of built targets.
#' @inheritParams tar_progress
#' @param names Optional, names of the targets. If supplied, the
#' function restricts its output to these targets.
#' You can supply symbols
#' or `tidyselect` helpers like [all_of()] and [starts_with()].
#' @examples
#' if (identical(Sys.getenv("TAR_EXAMPLES"), "true")) {
#' tar_dir({ # tar_dir() runs code from a temporary directory.
#' tar_script({
#' list(
#' tar_target(x, seq_len(2)),
#' tar_target(y, 2 * x, pattern = map(x))
#' )
#' }, ask = FALSE)
#' tar_make()
#' tar_built()
#' tar_built(starts_with("y_")) # see also all_of()
#' })
#' }
tar_built <- function(
names = NULL,
store = targets::tar_config_get("store")
) {
progress <- progress_init(path_store = store)
progress <- tibble::as_tibble(progress$database$read_condensed_data())
names_quosure <- rlang::enquo(names)
names <- tar_tidyselect_eval(names_quosure, progress$name)
if (!is.null(names)) {
progress <- progress[match(names, progress$name), , drop = FALSE] # nolint
}
progress$name[progress$progress == "built"]
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tcplPrepOtpt.R
\name{tcplPrepOtpt}
\alias{tcplPrepOtpt}
\title{Map assay/chemcial ID values to annotation information}
\usage{
tcplPrepOtpt(dat, ids = NULL)
}
\arguments{
\item{dat}{data.table, output from \code{\link{tcplLoadData}}}
\item{ids}{Character, (optional) a subset of ID fields to map}
}
\value{
The given data.table with chemical and assay information mapped
}
\description{
\code{tcplPrepOtpt} queries the chemical and assay information from the tcpl
database, and maps the annotation information to the given data.
}
\details{
\code{tcplPrepOtpt} is used to map chemical and assay identifiers to their
respective names and annotation information to create a human-readable table
that is more suitable for an export/output.
By default the function will map sample ID (spid), assay component id (acid),
and assay endpoint ID (aeid) values. However, if 'ids' is not null, the
function will only attempt to map the ID fields given by 'ids.'
}
\examples{
## Store the current config settings, so they can be reloaded at the end
## of the examples
conf_store <- tcplConfList()
tcplConfDefault()
## Load some example data
d1 <- tcplLoadData(1)
## Check for chemical name in 'dat'
"chnm" \%in\% names(d1) ## FALSE
## Map chemical annotation only
d2 <- tcplPrepOtpt(d1, ids = "spid")
"chnm" \%in\% names(d2) ## TRUE
"acnm" \%in\% names(d2) ## FALSE
## Map all annotations
d3 <- tcplPrepOtpt(d1) ## Also works if function is given d2
"chnm" \%in\% names(d2) ## TRUE
"acnm" \%in\% names(d2) ## TRUE
## Reset configuration
options(conf_store)
}
|
/man/tcplPrepOtpt.Rd
|
no_license
|
carolineshep/tcpl-toxcast-info
|
R
| false | true | 1,636 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tcplPrepOtpt.R
\name{tcplPrepOtpt}
\alias{tcplPrepOtpt}
\title{Map assay/chemcial ID values to annotation information}
\usage{
tcplPrepOtpt(dat, ids = NULL)
}
\arguments{
\item{dat}{data.table, output from \code{\link{tcplLoadData}}}
\item{ids}{Character, (optional) a subset of ID fields to map}
}
\value{
The given data.table with chemical and assay information mapped
}
\description{
\code{tcplPrepOtpt} queries the chemical and assay information from the tcpl
database, and maps the annotation information to the given data.
}
\details{
\code{tcplPrepOtpt} is used to map chemical and assay identifiers to their
respective names and annotation information to create a human-readable table
that is more suitable for an export/output.
By default the function will map sample ID (spid), assay component id (acid),
and assay endpoint ID (aeid) values. However, if 'ids' is not null, the
function will only attempt to map the ID fields given by 'ids.'
}
\examples{
## Store the current config settings, so they can be reloaded at the end
## of the examples
conf_store <- tcplConfList()
tcplConfDefault()
## Load some example data
d1 <- tcplLoadData(1)
## Check for chemical name in 'dat'
"chnm" \%in\% names(d1) ## FALSE
## Map chemical annotation only
d2 <- tcplPrepOtpt(d1, ids = "spid")
"chnm" \%in\% names(d2) ## TRUE
"acnm" \%in\% names(d2) ## FALSE
## Map all annotations
d3 <- tcplPrepOtpt(d1) ## Also works if function is given d2
"chnm" \%in\% names(d2) ## TRUE
"acnm" \%in\% names(d2) ## TRUE
## Reset configuration
options(conf_store)
}
|
### Welcome to this mini practice of using Github ###
### You will need to finish the following tasks, and then push your code to your repository ###
### Remeber to write down your documentations along with your codes ###
### Clear your R environment first using the following command ###
rm(list=ls())
### First generate 100 values from a normal distribution with mean=5 and standard deviation =1 ###
### Plot a histogram using these 100 values you just generated ###
### Plot the density of these 100 values you just generated (with blue color)###
### Comment on your density plot ###
### Now repeat the above 4 steps, but with sample size = 10000 ###
### Now find the 97.5 percent quantile of this data ###
### Plot the 97.5 percent quantile line (red in colour) on your plot ###
### Plot the 2.5 percent quantile line (green in colour) on your plot ###
### Remember to label your plot ###
### Add legend to your plot, indicating that which line represents which quantile ###
### Remember to save you codes before you proceed ###
### Now generate a 10000 times 10 matrix, with each column generated a normal distribution with mean=5 and standard deviation =1 ###
### Call this matrix_full ###
### Find the mean of each column, and store the values in a vector ###
### Find the mean of each row, and store the values in a vector ###
### Now we magically make 30% of each column becomes missing data and rename this matrix as matrix_mis ###
### HINT: Missing values are denoted by NA in R ###
### Now try to find the means of each column ###
### Simply pick one column (10 since today is 10th October )from matrix_mis and call it vector_mis ###
### Using random sample, (or normally called bootstraping method) to fill in the missing values, call this vector_fill ###
### Calculate the mean of this vector_fill ###
### Calculate the difference between mean of the matrix_full[,10] and this vector_fill ###
### Comment on your results ###
### Remember to save you codes before you proceed ###
### Now instead of 10th row, repeat the whole procedure for the whole matrix ###
##I HAVE MADE CHANGES##
|
/Mini_Practice.r
|
no_license
|
Janlim94/learninghub
|
R
| false | false | 2,114 |
r
|
### Welcome to this mini practice of using Github ###
### You will need to finish the following tasks, and then push your code to your repository ###
### Remeber to write down your documentations along with your codes ###
### Clear your R environment first using the following command ###
rm(list=ls())
### First generate 100 values from a normal distribution with mean=5 and standard deviation =1 ###
### Plot a histogram using these 100 values you just generated ###
### Plot the density of these 100 values you just generated (with blue color)###
### Comment on your density plot ###
### Now repeat the above 4 steps, but with sample size = 10000 ###
### Now find the 97.5 percent quantile of this data ###
### Plot the 97.5 percent quantile line (red in colour) on your plot ###
### Plot the 2.5 percent quantile line (green in colour) on your plot ###
### Remember to label your plot ###
### Add legend to your plot, indicating that which line represents which quantile ###
### Remember to save you codes before you proceed ###
### Now generate a 10000 times 10 matrix, with each column generated a normal distribution with mean=5 and standard deviation =1 ###
### Call this matrix_full ###
### Find the mean of each column, and store the values in a vector ###
### Find the mean of each row, and store the values in a vector ###
### Now we magically make 30% of each column becomes missing data and rename this matrix as matrix_mis ###
### HINT: Missing values are denoted by NA in R ###
### Now try to find the means of each column ###
### Simply pick one column (10 since today is 10th October )from matrix_mis and call it vector_mis ###
### Using random sample, (or normally called bootstraping method) to fill in the missing values, call this vector_fill ###
### Calculate the mean of this vector_fill ###
### Calculate the difference between mean of the matrix_full[,10] and this vector_fill ###
### Comment on your results ###
### Remember to save you codes before you proceed ###
### Now instead of 10th row, repeat the whole procedure for the whole matrix ###
##I HAVE MADE CHANGES##
|
#' Set up the project
#'
#' \code{setup} sources env.R in the repo/project top level folder.
#'
#' @import here
#' @author Ben Anderson, \email{b.anderson@@soton.ac.uk}
#' @export
#' @family utils
#'
setup <- function() {
source(here::here("env.R"))
}
|
/R/setup.R
|
permissive
|
CfSOtago/airQual
|
R
| false | false | 253 |
r
|
#' Set up the project
#'
#' \code{setup} sources env.R in the repo/project top level folder.
#'
#' @import here
#' @author Ben Anderson, \email{b.anderson@@soton.ac.uk}
#' @export
#' @family utils
#'
setup <- function() {
source(here::here("env.R"))
}
|
# link here: https://insightr.wordpress.com/2017/06/14/when-the-lasso-fails/
# notes: lasso based on two assumptions.
# 1. sparsity - only a small number of many available variables may be relevant
# 2. irrepresentable condition, irc. relevant variables and irrelevant variables are uncorrelated
# this demo is what happens when assumption 2 is violated
library(mvtnorm)
library(corrplot)
library(glmnet)
library(clusterGeneration)
k=10 # = Number of Candidate Variables
p=5 # = Number of Relevant Variables
N=500 # = Number of observations
betas=(-1)^(1:p) # = Values for beta = rep(c(-1, 1), 3)[1:p]
set.seed(12345) # = Seed for replication
sigma1=genPositiveDefMat(k,"unifcorrmat")$Sigma # = Sigma1 violates the irc
sigma2=sigma1 # = Sigma2 satisfies the irc
sigma2[(p+1):k,1:p]=0
sigma2[1:p,(p+1):k]=0
# note that the cov mat divides into 4 theoretical pieces, relevant cov, irrelevant cov, rel-irrel cov, irrel-rel cov
# irc respected if rel-irrl * rel^-1 * sign(betas) < 1 is true for all elements
# = Verify the irrepresentable condition
irc1=sort(abs(sigma1[(p+1):k,1:p]%*%solve(sigma1[1:p,1:p])%*%sign(betas)))
irc2=sort(abs(sigma2[(p+1):k,1:p]%*%solve(sigma2[1:p,1:p])%*%sign(betas)))
c(max(irc1),max(irc2))
# = Have a look at the correlation matrices
par(mfrow=c(1,2))
corrplot(cov2cor(sigma1))
corrplot(cov2cor(sigma2))
X1=rmvnorm(N,sigma = sigma1) # = Variables for the design that violates the IRC = #
X2=rmvnorm(N,sigma = sigma2) # = Variables for the design that satisfies the IRC = #
e=rnorm(N) # = Error = #
y1=X1[,1:p]%*%betas+e # = Generate y for design 1 = #
y2=X2[,1:p]%*%betas+e # = Generate y for design 2 = #
lasso1=glmnet(X1,y1,nlambda = 100) # = Estimation for design 1 = #
lasso2=glmnet(X2,y2,nlambda = 100) # = Estimation for design 2 = #
## == Regularization path == ##
par(mfrow=c(1,2))
l1=log(lasso1$lambda)
matplot(as.matrix(l1),t(coef(lasso1)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="Violates IRC")
l2=log(lasso2$lambda)
matplot(as.matrix(l2),t(coef(lasso2)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="Satisfies IRC")
# adalasso corrects for this problem
lasso1.1=cv.glmnet(X1,y1)
w.=(abs(coef(lasso1.1)[-1])+1/N)^(-1)
adalasso1=glmnet(X1,y1,penalty.factor = w.)
# penalty.factor - from glmnet help
# Separate penalty factors can be applied to each coefficient.
# This is a number that multiplies lambda to allow differential shrinkage.
# Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to nvars, and the lambda sequence will reflect this change.
par(mfrow=c(1,2))
l1=log(lasso1$lambda)
matplot(as.matrix(l1),t(coef(lasso1)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="LASSO")
l2=log(adalasso1$lambda)
matplot(as.matrix(l2),t(coef(adalasso1)[-1,]),type="l"
,lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="adaLASSO")
# extra glmnet plot
plot(lasso1, xvar="dev", label = FALSE, col = c(rep(1, 8), 2, 1))
|
/lasso_fails.R
|
no_license
|
YenMuHsin/Statistical_Learning_Basics
|
R
| false | false | 3,240 |
r
|
# link here: https://insightr.wordpress.com/2017/06/14/when-the-lasso-fails/
# notes: lasso based on two assumptions.
# 1. sparsity - only a small number of many available variables may be relevant
# 2. irrepresentable condition, irc. relevant variables and irrelevant variables are uncorrelated
# this demo is what happens when assumption 2 is violated
library(mvtnorm)
library(corrplot)
library(glmnet)
library(clusterGeneration)
k=10 # = Number of Candidate Variables
p=5 # = Number of Relevant Variables
N=500 # = Number of observations
betas=(-1)^(1:p) # = Values for beta = rep(c(-1, 1), 3)[1:p]
set.seed(12345) # = Seed for replication
sigma1=genPositiveDefMat(k,"unifcorrmat")$Sigma # = Sigma1 violates the irc
sigma2=sigma1 # = Sigma2 satisfies the irc
sigma2[(p+1):k,1:p]=0
sigma2[1:p,(p+1):k]=0
# note that the cov mat divides into 4 theoretical pieces, relevant cov, irrelevant cov, rel-irrel cov, irrel-rel cov
# irc respected if rel-irrl * rel^-1 * sign(betas) < 1 is true for all elements
# = Verify the irrepresentable condition
irc1=sort(abs(sigma1[(p+1):k,1:p]%*%solve(sigma1[1:p,1:p])%*%sign(betas)))
irc2=sort(abs(sigma2[(p+1):k,1:p]%*%solve(sigma2[1:p,1:p])%*%sign(betas)))
c(max(irc1),max(irc2))
# = Have a look at the correlation matrices
par(mfrow=c(1,2))
corrplot(cov2cor(sigma1))
corrplot(cov2cor(sigma2))
X1=rmvnorm(N,sigma = sigma1) # = Variables for the design that violates the IRC = #
X2=rmvnorm(N,sigma = sigma2) # = Variables for the design that satisfies the IRC = #
e=rnorm(N) # = Error = #
y1=X1[,1:p]%*%betas+e # = Generate y for design 1 = #
y2=X2[,1:p]%*%betas+e # = Generate y for design 2 = #
lasso1=glmnet(X1,y1,nlambda = 100) # = Estimation for design 1 = #
lasso2=glmnet(X2,y2,nlambda = 100) # = Estimation for design 2 = #
## == Regularization path == ##
par(mfrow=c(1,2))
l1=log(lasso1$lambda)
matplot(as.matrix(l1),t(coef(lasso1)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="Violates IRC")
l2=log(lasso2$lambda)
matplot(as.matrix(l2),t(coef(lasso2)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="Satisfies IRC")
# adalasso corrects for this problem
lasso1.1=cv.glmnet(X1,y1)
w.=(abs(coef(lasso1.1)[-1])+1/N)^(-1)
adalasso1=glmnet(X1,y1,penalty.factor = w.)
# penalty.factor - from glmnet help
# Separate penalty factors can be applied to each coefficient.
# This is a number that multiplies lambda to allow differential shrinkage.
# Can be 0 for some variables, which implies no shrinkage, and that variable is always included in the model. Default is 1 for all variables (and implicitly infinity for variables listed in exclude). Note: the penalty factors are internally rescaled to sum to nvars, and the lambda sequence will reflect this change.
par(mfrow=c(1,2))
l1=log(lasso1$lambda)
matplot(as.matrix(l1),t(coef(lasso1)[-1,])
,type="l",lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="LASSO")
l2=log(adalasso1$lambda)
matplot(as.matrix(l2),t(coef(adalasso1)[-1,]),type="l"
,lty=1,col=c(rep(1,9),2)
,ylab="coef",xlab="log(lambda)",main="adaLASSO")
# extra glmnet plot
plot(lasso1, xvar="dev", label = FALSE, col = c(rep(1, 8), 2, 1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot1clickprojects_operations.R
\name{iot1clickprojects_list_tags_for_resource}
\alias{iot1clickprojects_list_tags_for_resource}
\title{Lists the tags (metadata key/value pairs) which you have assigned to the
resource}
\usage{
iot1clickprojects_list_tags_for_resource(resourceArn)
}
\arguments{
\item{resourceArn}{[required] The ARN of the resource whose tags you want to list.}
}
\value{
A list with the following syntax:\preformatted{list(
tags = list(
"string"
)
)
}
}
\description{
Lists the tags (metadata key/value pairs) which you have assigned to the
resource.
}
\section{Request syntax}{
\preformatted{svc$list_tags_for_resource(
resourceArn = "string"
)
}
}
\keyword{internal}
|
/cran/paws.internet.of.things/man/iot1clickprojects_list_tags_for_resource.Rd
|
permissive
|
paws-r/paws
|
R
| false | true | 775 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot1clickprojects_operations.R
\name{iot1clickprojects_list_tags_for_resource}
\alias{iot1clickprojects_list_tags_for_resource}
\title{Lists the tags (metadata key/value pairs) which you have assigned to the
resource}
\usage{
iot1clickprojects_list_tags_for_resource(resourceArn)
}
\arguments{
\item{resourceArn}{[required] The ARN of the resource whose tags you want to list.}
}
\value{
A list with the following syntax:\preformatted{list(
tags = list(
"string"
)
)
}
}
\description{
Lists the tags (metadata key/value pairs) which you have assigned to the
resource.
}
\section{Request syntax}{
\preformatted{svc$list_tags_for_resource(
resourceArn = "string"
)
}
}
\keyword{internal}
|
# > file written: Sat, 08 Dec 2018 00:12:13 +0100
# in this file, settings that are specific for a run on a dataset
# gives path to output folder
pipOutFold <- "OUTPUT_FOLDER/TCGAgbm_classical_proneural"
# full path (starting with /mnt/...)
# following format expected for the input
# colnames = samplesID
# rownames = geneID
# !!! geneID are expected not difficulted
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 0_prepGeneData
# *************************************************************************************************************************
# UPDATE 07.12.2018: for RSEM data, the "analog" FPKM file is provided separately (built in prepData)
rna_fpkmDT_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/fpkmDT.Rdata"
rnaseqDT_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/rnaseqDT_v2.Rdata"
my_sep <- "\t"
# input is Rdata or txt file ?
# TRUE if the input is Rdata
inRdata <- TRUE
# can be ensemblID, entrezID, geneSymbol
geneID_format <- "entrezID"
stopifnot(geneID_format %in% c("ensemblID", "entrezID", "geneSymbol"))
# are geneID rownames ? -> "rn" or numeric giving the column
geneID_loc <- "rn"
stopifnot(geneID_loc == "rn" | is.numeric(geneID_loc))
removeDupGeneID <- TRUE
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 1_runGeneDE
# *************************************************************************************************************************
# labels for conditions
cond1 <- "classical"
cond2 <- "proneural"
# path to sampleID for each condition - should be Rdata ( ! sample1 for cond1, sample2 for cond2 ! )
sample1_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/classical_ID.Rdata"
sample2_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/proneural_ID.Rdata"
minCpmRatio <- 20/888
inputDataType <- "RSEM"
nCpu <- 20
# number of permutations
nRandomPermut <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# > file edited: Mon, 04 Mar 2019 11:51:32 +0100
# path to output folder:
pipOutFold <- "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/PIPELINE/OUTPUT_FOLDER/GSE105194_cerebellum_40kb/TCGAgbm_classical_proneural"
# OVERWRITE THE DEFAULT SETTINGS FOR INPUT FILES - use TADs from the current Hi-C dataset
TADpos_file <- paste0(setDir, "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/GSE105194_cerebellum_40kb/genes2tad/all_assigned_regions.txt")
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
gene2tadDT_file <- paste0(setDir, "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/GSE105194_cerebellum_40kb/genes2tad/all_genes_positions.txt")
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
# overwrite main_settings.R: nCpu <- 25
nCpu <- 20
# *************************************************************************************************************************
# ************************************ SETTINGS FOR PERMUTATIONS (5#_, 8c_)
# *************************************************************************************************************************
# number of permutations
nRandomPermut <- 10000
gene2tadAssignMethod <- "maxOverlap"
nRandomPermutShuffle <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
|
/PIPELINE/INPUT_FILES/GSE105194_cerebellum_40kb/run_settings_TCGAgbm_classical_proneural.R
|
no_license
|
marzuf/Cancer_HiC_data_TAD_DA
|
R
| false | false | 4,035 |
r
|
# > file written: Sat, 08 Dec 2018 00:12:13 +0100
# in this file, settings that are specific for a run on a dataset
# gives path to output folder
pipOutFold <- "OUTPUT_FOLDER/TCGAgbm_classical_proneural"
# full path (starting with /mnt/...)
# following format expected for the input
# colnames = samplesID
# rownames = geneID
# !!! geneID are expected not difficulted
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 0_prepGeneData
# *************************************************************************************************************************
# UPDATE 07.12.2018: for RSEM data, the "analog" FPKM file is provided separately (built in prepData)
rna_fpkmDT_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/fpkmDT.Rdata"
rnaseqDT_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/rnaseqDT_v2.Rdata"
my_sep <- "\t"
# input is Rdata or txt file ?
# TRUE if the input is Rdata
inRdata <- TRUE
# can be ensemblID, entrezID, geneSymbol
geneID_format <- "entrezID"
stopifnot(geneID_format %in% c("ensemblID", "entrezID", "geneSymbol"))
# are geneID rownames ? -> "rn" or numeric giving the column
geneID_loc <- "rn"
stopifnot(geneID_loc == "rn" | is.numeric(geneID_loc))
removeDupGeneID <- TRUE
# *************************************************************************************************************************
# ************************************ SETTINGS FOR 1_runGeneDE
# *************************************************************************************************************************
# labels for conditions
cond1 <- "classical"
cond2 <- "proneural"
# path to sampleID for each condition - should be Rdata ( ! sample1 for cond1, sample2 for cond2 ! )
sample1_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/classical_ID.Rdata"
sample2_file <- "/mnt/ed4/marie/other_datasets/TCGAgbm_classical_proneural/proneural_ID.Rdata"
minCpmRatio <- 20/888
inputDataType <- "RSEM"
nCpu <- 20
# number of permutations
nRandomPermut <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
# > file edited: Mon, 04 Mar 2019 11:51:32 +0100
# path to output folder:
pipOutFold <- "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/PIPELINE/OUTPUT_FOLDER/GSE105194_cerebellum_40kb/TCGAgbm_classical_proneural"
# OVERWRITE THE DEFAULT SETTINGS FOR INPUT FILES - use TADs from the current Hi-C dataset
TADpos_file <- paste0(setDir, "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/GSE105194_cerebellum_40kb/genes2tad/all_assigned_regions.txt")
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
gene2tadDT_file <- paste0(setDir, "/mnt/etemp/marie/Cancer_HiC_data_TAD_DA/GSE105194_cerebellum_40kb/genes2tad/all_genes_positions.txt")
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
# overwrite main_settings.R: nCpu <- 25
nCpu <- 20
# *************************************************************************************************************************
# ************************************ SETTINGS FOR PERMUTATIONS (5#_, 8c_)
# *************************************************************************************************************************
# number of permutations
nRandomPermut <- 10000
gene2tadAssignMethod <- "maxOverlap"
nRandomPermutShuffle <- 10000
step8_for_permutGenes <- TRUE
step8_for_randomTADsFix <- FALSE
step8_for_randomTADsGaussian <- FALSE
step8_for_randomTADsShuffle <- FALSE
step14_for_randomTADsShuffle <- FALSE
|
library(gofastr)
### Name: q_dtm
### Title: Quick DocumentTermMatrix
### Aliases: q_dtm q_dtm_stem
### Keywords: DocumentTermMatrix dtm
### ** Examples
(x <- with(presidential_debates_2012, q_dtm(dialogue, paste(time, tot, sep = "_"))))
tm::weightTfIdf(x)
(x2 <- with(presidential_debates_2012, q_dtm_stem(dialogue, paste(time, tot, sep = "_"))))
remove_stopwords(x2, stem=TRUE)
bigrams <- c('make sure', 'governor romney', 'mister president',
'united states', 'middle class', 'middle east', 'health care',
'american people', 'dodd frank', 'wall street', 'small business')
grep(" ", x$dimnames$Terms, value = TRUE) #no ngrams
(x3 <- with(presidential_debates_2012,
q_dtm(dialogue, paste(time, tot, sep = "_"), ngrams = bigrams)
))
grep(" ", x3$dimnames$Terms, value = TRUE) #ngrams
|
/data/genthat_extracted_code/gofastr/examples/q_dtm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 806 |
r
|
library(gofastr)
### Name: q_dtm
### Title: Quick DocumentTermMatrix
### Aliases: q_dtm q_dtm_stem
### Keywords: DocumentTermMatrix dtm
### ** Examples
(x <- with(presidential_debates_2012, q_dtm(dialogue, paste(time, tot, sep = "_"))))
tm::weightTfIdf(x)
(x2 <- with(presidential_debates_2012, q_dtm_stem(dialogue, paste(time, tot, sep = "_"))))
remove_stopwords(x2, stem=TRUE)
bigrams <- c('make sure', 'governor romney', 'mister president',
'united states', 'middle class', 'middle east', 'health care',
'american people', 'dodd frank', 'wall street', 'small business')
grep(" ", x$dimnames$Terms, value = TRUE) #no ngrams
(x3 <- with(presidential_debates_2012,
q_dtm(dialogue, paste(time, tot, sep = "_"), ngrams = bigrams)
))
grep(" ", x3$dimnames$Terms, value = TRUE) #ngrams
|
library(wooldridge)
### Name: return
### Title: return
### Aliases: return
### Keywords: datasets
### ** Examples
str(return)
|
/data/genthat_extracted_code/wooldridge/examples/return.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 134 |
r
|
library(wooldridge)
### Name: return
### Title: return
### Aliases: return
### Keywords: datasets
### ** Examples
str(return)
|
ui = dashboardPage(
skin = 'blue',
dashboardHeader(
title = 'gnomAD Ancestry Estimation',
titleWidth = 300
),
dashboardSidebar(
width = 250,
fluidRow(
align = 'center',
h4('Hendricks Research Group'),
h4('University of Colorado Denver')
),
radioGroupButtons(
inputId = 'exge',
label = NULL,
choiceNames = c('Genome', 'Exome'),
choiceValues = c('genome', 'exome'),
selected = 'genome',
individual = TRUE,
width = '100%',
justified = TRUE,
status = 'primary',
checkIcon = list(
yes = icon("ok",
lib = "glyphicon"))
),
pickerInput(
inputId = 'ancdat',
label = 'Ancestry Group',
choices = c('AFR', 'AMR', 'OTH'),
choicesOpt = list(
subtext = c('African/African American', 'American/Latinx', 'Other')
),
selected = 'AFR',
options = list(
`live-search` = TRUE)
),
sidebarMenu(
id = 'menuselect',
menuItem("Genome-wide Ancestry Proportions",
icon = icon("chart-area"),
startExpanded = TRUE,
menuSubItem("Block Bootstrap",
tabName = "bb",
selected = TRUE),
menuSubItem("Random SNP Sample",
tabName = "ran")
),
menuItem("Ancestry Proportions by Chromosome", tabName = "chr", icon = icon("chart-bar")),
menuItem("ReadMe", tabName = "readme", icon = icon("readme"))
# menuItem("Github", icon = icon("code"),
# href = "https://github.com/hendriau/Mixtures",
# newtab = TRUE)
)
),
dashboardBody(
tags$head(
tags$style(HTML(".main-sidebar { font-size: 12px; }")) # change the font size to 12
),
tabItems(
tabItem(tabName = "bb",
fluidRow(
column(
width = 3,
box(
title = "Block Bootstrapping", width = NULL, status = "primary",
'We use block bootstrapping to estimate error for the ancestry proportions.
We resample 3,357 centiMorgan blocks 1,000 times for the plots and confidence intervals shown here.'
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates for Block Bootstrapping",
width = NULL, height = 440, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'infobb'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotbb',
height = 370
))
)
)
)
),
fluidRow(
column(width = 1),
column(
width = 10,
box(
title = "Distribution Plots and 95% Confidence Intervals", width = NULL,
status = "primary", height = 240,
withSpinner(plotOutput(
'distbb',
height = 170
))
)
)
)
),
tabItem(tabName = "ran",
fluidRow(
column(
width = 3,
box(
title = "Random SNP Sample", width = NULL, status = "primary",
'We sample N random SNPs across the 22 autosomes to estimate ancestry proportions.
We randomly sample 1,000 times for the plots and confidence intervals shown here.
N can be varied to evaluate our method with different numbers of SNPs.'
),
box(
title = 'N Random SNPs', width = NULL, status = "primary",
conditionalPanel(
condition = "input.exge == 'genome' ",
sliderTextInput(
inputId = 'randsnpnumge',
label = NULL,
choices = c(10, 50, 100, 500, 1000, 2500, 5000, 10000, 50000, 100000),
selected = '1000',
grid = TRUE,
hide_min_max = TRUE
)
),
conditionalPanel(
condition = "input.exge == 'exome' ",
sliderTextInput(
inputId = 'randsnpnumex',
label = NULL,
choices = c(10, 50, 100, 500, 1000, 2500, 5000, 7500, 9000),
selected = '1000',
grid = TRUE,
hide_min_max = TRUE
)
)
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates for Random SNP Sample",
width = NULL, height = 440, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'inforan'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotran',
height = 370
))
)
)
)
),
fluidRow(
column(width = 1),
column(
width = 10,
box(
title = "Distribution Plots and 95% Confidence Intervals", width = NULL,
status = "primary", height = 240,
withSpinner(plotOutput(
'distran',
height = 170
))
)
)
)
),
tabItem(tabName = "chr",
fluidRow(
column(
width = 3,
box(
title = "Chromosome", width = NULL, status = "primary",
'Estimated ancestry proportions by chromosome using all SNPs.'
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates by Chromosome",
width = NULL, height = 700, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'sumchr'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotchr',
height = 650
))
)
)
)
)
),
tabItem(tabName = "readme",
fluidRow(
column(
width = 8,
box(
title = "ReadMe", width = NULL, status = "primary", height = 450,
'Our reference panel was created from ',
a('1000 Genomes Project', href = "https://www.internationalgenome.org/", target="_blank"),
' (GRCh37/hg19) superpopulations (African, Non-Finish European, East Asian, South Asian) and an ',
a('Indigenous American population', href = "ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/working/20130711_native_american_admix_train", target="_blank"),
' (616,568 SNPs and 43 individuals, GRCh37/hg19). Tri-allelic SNPs and SNPs with missing
allele frequency information were removed, leaving 613,298 SNPs across the 22 autosomes.',
br(),
br(),
'We estimate the ancestry proportions from ',
a('gnomAD V2', href = 'https://gnomad.broadinstitute.org/', target="_blank"),
'(GRCh37/hg19). After merging with our reference panel we checked for allele matching and strand flips.
Our final dataset had 582,550 genome SNPs and 9,835 exome SNPs across the 22 autosomes.'
)
),
column(
width = 4,
box(
title = "Acknowledgements", width = NULL, status = "primary", height = 450,
strong("This work was a collaborative effort by:"),
br(),
"Ian S. Arriaga-Mackenzie, Gregory M. Matesi, Alexandria Ronco, Ryan Scherenberg,
Andrew Zerwick, Yinfei Wu, James Vance, Jordan R. Hall, Christopher R. Gignoux,
Megan Null, Audrey E. Hendricks",
br(),
strong("Additional Funding from:"),
br(),
"CU Denver Undergraduate Research Opportunity Program (UROP)",
br(),
"Education through Undergraduate Research and Creative Activities (EUReCA) program",
br(),
strong('Shiny App created and maintained by:'),
br(),
'Ian S. Arriaga MacKenzie',
br(),
a(actionButton(inputId = "email1", label = "email",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:IAN.ARRIAGAMACKENZIE@ucdenver.edu"),
br(),
strong('Principle Investigator:'),
br(),
'Dr. Audrey E. Hendricks',
br(),
a(actionButton(inputId = "email2", label = "email",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:AUDREY.HENDRICKS@ucdenver.edu")
)
)
),
fluidRow(
column(
width = 8,
box(
title = "Disclaimer", width = NULL, status = "primary",
'Under no circumstances shall authors of this website and ancestry estimation algorithm be liable for
any indirect, incidental, consequential, special or exemplary damages arising out of or in connection
with your access or use of or inability to access the ancestry estimation website or any associated software
and tools and any third party content and services, whether or not the damages were foreseeable and whether or
not the authors were advised of the possibility of such damages. By using the ancestry estimation platform
you agree to use it to promote scientific research, learning or health.'
)
),
column(
width = 4,
img(src='CUdenverlogo.png', align = "Center", height = 150, width = 240)
)
)
)
)
)
)
|
/AncEstTestApp/ui.R
|
no_license
|
ianarriagamackenzie/mixturesresearch
|
R
| false | false | 12,435 |
r
|
ui = dashboardPage(
skin = 'blue',
dashboardHeader(
title = 'gnomAD Ancestry Estimation',
titleWidth = 300
),
dashboardSidebar(
width = 250,
fluidRow(
align = 'center',
h4('Hendricks Research Group'),
h4('University of Colorado Denver')
),
radioGroupButtons(
inputId = 'exge',
label = NULL,
choiceNames = c('Genome', 'Exome'),
choiceValues = c('genome', 'exome'),
selected = 'genome',
individual = TRUE,
width = '100%',
justified = TRUE,
status = 'primary',
checkIcon = list(
yes = icon("ok",
lib = "glyphicon"))
),
pickerInput(
inputId = 'ancdat',
label = 'Ancestry Group',
choices = c('AFR', 'AMR', 'OTH'),
choicesOpt = list(
subtext = c('African/African American', 'American/Latinx', 'Other')
),
selected = 'AFR',
options = list(
`live-search` = TRUE)
),
sidebarMenu(
id = 'menuselect',
menuItem("Genome-wide Ancestry Proportions",
icon = icon("chart-area"),
startExpanded = TRUE,
menuSubItem("Block Bootstrap",
tabName = "bb",
selected = TRUE),
menuSubItem("Random SNP Sample",
tabName = "ran")
),
menuItem("Ancestry Proportions by Chromosome", tabName = "chr", icon = icon("chart-bar")),
menuItem("ReadMe", tabName = "readme", icon = icon("readme"))
# menuItem("Github", icon = icon("code"),
# href = "https://github.com/hendriau/Mixtures",
# newtab = TRUE)
)
),
dashboardBody(
tags$head(
tags$style(HTML(".main-sidebar { font-size: 12px; }")) # change the font size to 12
),
tabItems(
tabItem(tabName = "bb",
fluidRow(
column(
width = 3,
box(
title = "Block Bootstrapping", width = NULL, status = "primary",
'We use block bootstrapping to estimate error for the ancestry proportions.
We resample 3,357 centiMorgan blocks 1,000 times for the plots and confidence intervals shown here.'
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates for Block Bootstrapping",
width = NULL, height = 440, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'infobb'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotbb',
height = 370
))
)
)
)
),
fluidRow(
column(width = 1),
column(
width = 10,
box(
title = "Distribution Plots and 95% Confidence Intervals", width = NULL,
status = "primary", height = 240,
withSpinner(plotOutput(
'distbb',
height = 170
))
)
)
)
),
tabItem(tabName = "ran",
fluidRow(
column(
width = 3,
box(
title = "Random SNP Sample", width = NULL, status = "primary",
'We sample N random SNPs across the 22 autosomes to estimate ancestry proportions.
We randomly sample 1,000 times for the plots and confidence intervals shown here.
N can be varied to evaluate our method with different numbers of SNPs.'
),
box(
title = 'N Random SNPs', width = NULL, status = "primary",
conditionalPanel(
condition = "input.exge == 'genome' ",
sliderTextInput(
inputId = 'randsnpnumge',
label = NULL,
choices = c(10, 50, 100, 500, 1000, 2500, 5000, 10000, 50000, 100000),
selected = '1000',
grid = TRUE,
hide_min_max = TRUE
)
),
conditionalPanel(
condition = "input.exge == 'exome' ",
sliderTextInput(
inputId = 'randsnpnumex',
label = NULL,
choices = c(10, 50, 100, 500, 1000, 2500, 5000, 7500, 9000),
selected = '1000',
grid = TRUE,
hide_min_max = TRUE
)
)
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates for Random SNP Sample",
width = NULL, height = 440, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'inforan'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotran',
height = 370
))
)
)
)
),
fluidRow(
column(width = 1),
column(
width = 10,
box(
title = "Distribution Plots and 95% Confidence Intervals", width = NULL,
status = "primary", height = 240,
withSpinner(plotOutput(
'distran',
height = 170
))
)
)
)
),
tabItem(tabName = "chr",
fluidRow(
column(
width = 3,
box(
title = "Chromosome", width = NULL, status = "primary",
'Estimated ancestry proportions by chromosome using all SNPs.'
)
),
column(
width = 9,
tabBox(
title = "Proportion Estimates by Chromosome",
width = NULL, height = 700, side = 'right', selected = 'Visual',
tabPanel(
'Numeric',
withSpinner(tableOutput(
'sumchr'
))
),
tabPanel(
'Visual',
withSpinner(plotOutput(
'plotchr',
height = 650
))
)
)
)
)
),
tabItem(tabName = "readme",
fluidRow(
column(
width = 8,
box(
title = "ReadMe", width = NULL, status = "primary", height = 450,
'Our reference panel was created from ',
a('1000 Genomes Project', href = "https://www.internationalgenome.org/", target="_blank"),
' (GRCh37/hg19) superpopulations (African, Non-Finish European, East Asian, South Asian) and an ',
a('Indigenous American population', href = "ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/working/20130711_native_american_admix_train", target="_blank"),
' (616,568 SNPs and 43 individuals, GRCh37/hg19). Tri-allelic SNPs and SNPs with missing
allele frequency information were removed, leaving 613,298 SNPs across the 22 autosomes.',
br(),
br(),
'We estimate the ancestry proportions from ',
a('gnomAD V2', href = 'https://gnomad.broadinstitute.org/', target="_blank"),
'(GRCh37/hg19). After merging with our reference panel we checked for allele matching and strand flips.
Our final dataset had 582,550 genome SNPs and 9,835 exome SNPs across the 22 autosomes.'
)
),
column(
width = 4,
box(
title = "Acknowledgements", width = NULL, status = "primary", height = 450,
strong("This work was a collaborative effort by:"),
br(),
"Ian S. Arriaga-Mackenzie, Gregory M. Matesi, Alexandria Ronco, Ryan Scherenberg,
Andrew Zerwick, Yinfei Wu, James Vance, Jordan R. Hall, Christopher R. Gignoux,
Megan Null, Audrey E. Hendricks",
br(),
strong("Additional Funding from:"),
br(),
"CU Denver Undergraduate Research Opportunity Program (UROP)",
br(),
"Education through Undergraduate Research and Creative Activities (EUReCA) program",
br(),
strong('Shiny App created and maintained by:'),
br(),
'Ian S. Arriaga MacKenzie',
br(),
a(actionButton(inputId = "email1", label = "email",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:IAN.ARRIAGAMACKENZIE@ucdenver.edu"),
br(),
strong('Principle Investigator:'),
br(),
'Dr. Audrey E. Hendricks',
br(),
a(actionButton(inputId = "email2", label = "email",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:AUDREY.HENDRICKS@ucdenver.edu")
)
)
),
fluidRow(
column(
width = 8,
box(
title = "Disclaimer", width = NULL, status = "primary",
'Under no circumstances shall authors of this website and ancestry estimation algorithm be liable for
any indirect, incidental, consequential, special or exemplary damages arising out of or in connection
with your access or use of or inability to access the ancestry estimation website or any associated software
and tools and any third party content and services, whether or not the damages were foreseeable and whether or
not the authors were advised of the possibility of such damages. By using the ancestry estimation platform
you agree to use it to promote scientific research, learning or health.'
)
),
column(
width = 4,
img(src='CUdenverlogo.png', align = "Center", height = 150, width = 240)
)
)
)
)
)
)
|
## Quantlet 1 - ImportPrepareData Load Packages used in Q1
library(foreign)
library(stringr)
library(data.table)
# Make Sure you check your Working Directory so that the code works flawless!
getwd()
# Otherwise Set the Working Directory -> setwd('/Your/Path/to/Happiness')
### IMPORT, MERGE AND CLEAN ALL DATA
### We need two iterators: i is to step through the list of years,
### beginning with k is always one digit higher than i as it reads the second column of the feature
### selection list (the first column is the label)
i = 1 # iterator to step through the list of years
k = 2 # iterator to step through the columns in variable list
# List all directories within the input data, non-recursive
list_dirs = list.dirs(path = "SOEPQ1_ImportPrepareData/input-data", recursive = FALSE)
# Extract the year name of the directories, so the last 4 digits
list_years = str_sub(list_dirs, -4)
# Create Variable names for every merged year based on the style merged[year]
list_varnames = paste("merged", list_years, sep = "")
# Load the variable list we cleaned manually in Excel as CSV
soep_selection = read.table("SOEPQ1_ImportPrepareData/variable-selection/soep-var-selection.csv", header = TRUE, sep = ";", check.names = FALSE)
# Get all Labels, unfiltered
labels = soep_selection[, 1]
# Create a vector to put object names of all years in it
datalist = c()
# Loop through all the years, import the data, merge, clean and label them
for (years in list_years) {
# Define Current List of import data based on the 'i' value
list_files = list.files(path = list_dirs[i], pattern = "", full.names = TRUE)
# Import all the data from the current list with the read.dta-Function (part of foreign package) for
# SPSS-Files
list_import = lapply(list_files, read.dta)
# Merge it into one file
data_merged = Reduce(function(x, y) merge(x, y, by = "persnr", all.x = TRUE), list_import)
# Cut the .x and .y values from the merge process, so that we have clean column names
colnames(data_merged) = gsub("\\.x|\\.y", "", colnames(data_merged))
# Get the variable list of the current year
current_list = sort(soep_selection[, k])
# ONLY take the data shortlisted for the current year
cleaned = data_merged[, which(names(data_merged) %in% current_list == TRUE)]
# Select the Label Column and the Variable Column of the current Year
soep_subcrit = c(1, k)
# Subset the Variable list so that only the label and the current year exist
soep_selection_sub = soep_selection[soep_subcrit]
# Delete NA-Values from the list
soep_selection_sub = na.omit(soep_selection_sub)
# Create a subset of the clean labels, where all codenames match, to make sure that the labels are
# correct
clean_labels = subset(soep_selection_sub, sort(soep_selection_sub[, 2]) == sort(names(cleaned)))
# Order Dataframe alphabetically
clean_sorted = cleaned[, order(names(cleaned))]
# Order Frame with the Labels based on the ID
ordered_colnames = clean_labels[order(clean_labels[2]), ]
# Label the columns properly
colnames(clean_sorted) = ordered_colnames[, 1]
# Assign data_merged to current merge[year]
assign(list_varnames[i], clean_sorted)
# Add Year Variable to a list so that we can access all years by a loop
datalist = c(datalist, list_varnames[i])
# Update our variables for the next round
i = i + 1
k = k + 1
}
# Merge all data into one dataframe and add a column with the respective year, called 'Wave'
# Create a new dataframe
merged_all = data.frame(matrix(ncol = nrow(soep_selection), nrow = 0))
# Name the dataframe using the first column of the csv
colnames(merged_all) <- soep_selection[, 1]
# Add 'Wave' column to the dataframe
merged_all$Wave = numeric(nrow(merged_all))
# Iterator to step through the years
z = 1
# For loop adding data of every year to the data frame
for (years in c(datalist)) {
# Get current year for the Wave column
current_year = list_years[z]
# Get dataset of the current year
current_data = get(datalist[z])
# Repeat the current year to fill the column 'Wave' of the respective year
Wave = rep(current_year, nrow(current_data))
# Add year-value to the 'Wave' column
current_data = cbind(Wave, current_data)
# Add the data to the merge dataframe
merged_all = rbindlist(list(merged_all, current_data), fill = TRUE)
# Iterator one up
z = z + 1
} # END OF FOR-LOOP
# Removes Spaces in Variable Names and substitues with a . - Necessary for the dplyr package, which is
# handy for later analysis of our data
valid_column_names = make.names(names = names(merged_all), unique = TRUE, allow_ = TRUE)
names(merged_all) = valid_column_names
# Delete the intermediate variables to clean up the workspace
rm(list = datalist)
rm(list = c("clean_labels", "clean_sorted", "cleaned", "current_data", "data_merged", "datalist", "list_import",
"ordered_colnames", "soep_selection", "soep_selection_sub", "current_list", "current_year", "i", "k",
"labels", "list_dirs", "list_files", "soep_subcrit", "valid_column_names", "Wave", "years", "z"))
|
/SOEPQ1_ImportPrepareData/ImportPrepareData.R
|
no_license
|
nonstoptimm/spl-pirates
|
R
| false | false | 5,129 |
r
|
## Quantlet 1 - ImportPrepareData Load Packages used in Q1
library(foreign)
library(stringr)
library(data.table)
# Make Sure you check your Working Directory so that the code works flawless!
getwd()
# Otherwise Set the Working Directory -> setwd('/Your/Path/to/Happiness')
### IMPORT, MERGE AND CLEAN ALL DATA
### We need two iterators: i is to step through the list of years,
### beginning with k is always one digit higher than i as it reads the second column of the feature
### selection list (the first column is the label)
i = 1 # iterator to step through the list of years
k = 2 # iterator to step through the columns in variable list
# List all directories within the input data, non-recursive
list_dirs = list.dirs(path = "SOEPQ1_ImportPrepareData/input-data", recursive = FALSE)
# Extract the year name of the directories, so the last 4 digits
list_years = str_sub(list_dirs, -4)
# Create Variable names for every merged year based on the style merged[year]
list_varnames = paste("merged", list_years, sep = "")
# Load the variable list we cleaned manually in Excel as CSV
soep_selection = read.table("SOEPQ1_ImportPrepareData/variable-selection/soep-var-selection.csv", header = TRUE, sep = ";", check.names = FALSE)
# Get all Labels, unfiltered
labels = soep_selection[, 1]
# Create a vector to put object names of all years in it
datalist = c()
# Loop through all the years, import the data, merge, clean and label them
for (years in list_years) {
# Define Current List of import data based on the 'i' value
list_files = list.files(path = list_dirs[i], pattern = "", full.names = TRUE)
# Import all the data from the current list with the read.dta-Function (part of foreign package) for
# SPSS-Files
list_import = lapply(list_files, read.dta)
# Merge it into one file
data_merged = Reduce(function(x, y) merge(x, y, by = "persnr", all.x = TRUE), list_import)
# Cut the .x and .y values from the merge process, so that we have clean column names
colnames(data_merged) = gsub("\\.x|\\.y", "", colnames(data_merged))
# Get the variable list of the current year
current_list = sort(soep_selection[, k])
# ONLY take the data shortlisted for the current year
cleaned = data_merged[, which(names(data_merged) %in% current_list == TRUE)]
# Select the Label Column and the Variable Column of the current Year
soep_subcrit = c(1, k)
# Subset the Variable list so that only the label and the current year exist
soep_selection_sub = soep_selection[soep_subcrit]
# Delete NA-Values from the list
soep_selection_sub = na.omit(soep_selection_sub)
# Create a subset of the clean labels, where all codenames match, to make sure that the labels are
# correct
clean_labels = subset(soep_selection_sub, sort(soep_selection_sub[, 2]) == sort(names(cleaned)))
# Order Dataframe alphabetically
clean_sorted = cleaned[, order(names(cleaned))]
# Order Frame with the Labels based on the ID
ordered_colnames = clean_labels[order(clean_labels[2]), ]
# Label the columns properly
colnames(clean_sorted) = ordered_colnames[, 1]
# Assign data_merged to current merge[year]
assign(list_varnames[i], clean_sorted)
# Add Year Variable to a list so that we can access all years by a loop
datalist = c(datalist, list_varnames[i])
# Update our variables for the next round
i = i + 1
k = k + 1
}
# Merge all data into one dataframe and add a column with the respective year, called 'Wave'
# Create a new dataframe
merged_all = data.frame(matrix(ncol = nrow(soep_selection), nrow = 0))
# Name the dataframe using the first column of the csv
colnames(merged_all) <- soep_selection[, 1]
# Add 'Wave' column to the dataframe
merged_all$Wave = numeric(nrow(merged_all))
# Iterator to step through the years
z = 1
# For loop adding data of every year to the data frame
for (years in c(datalist)) {
# Get current year for the Wave column
current_year = list_years[z]
# Get dataset of the current year
current_data = get(datalist[z])
# Repeat the current year to fill the column 'Wave' of the respective year
Wave = rep(current_year, nrow(current_data))
# Add year-value to the 'Wave' column
current_data = cbind(Wave, current_data)
# Add the data to the merge dataframe
merged_all = rbindlist(list(merged_all, current_data), fill = TRUE)
# Iterator one up
z = z + 1
} # END OF FOR-LOOP
# Removes Spaces in Variable Names and substitues with a . - Necessary for the dplyr package, which is
# handy for later analysis of our data
valid_column_names = make.names(names = names(merged_all), unique = TRUE, allow_ = TRUE)
names(merged_all) = valid_column_names
# Delete the intermediate variables to clean up the workspace
rm(list = datalist)
rm(list = c("clean_labels", "clean_sorted", "cleaned", "current_data", "data_merged", "datalist", "list_import",
"ordered_colnames", "soep_selection", "soep_selection_sub", "current_list", "current_year", "i", "k",
"labels", "list_dirs", "list_files", "soep_subcrit", "valid_column_names", "Wave", "years", "z"))
|
## In this assignment I coded a pair of functions that cache and compute the inverse of a matrix.
##
## Here is an explanation to both functions:
##
## 1. makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##
## and 2. cacheSolve: This function computes the inverse of the special "matrix" returned by
##
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not
##
## changed), then the cachesolve then retrieves the inverse from the cache.
makeCacheMatrix <- function(M = matrix()) {
inverse <- NULL
set <- function(x) {
M <<- x;
inverse <<- NULL;
}
get <- function() return(M);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}##makeCacheMatrix
## This is the second function cacheSolve as explained above.
cacheSolve <- function(M, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- M$get()
invserse <- solve(data, ...)
M$setinv(inverse)
return(inverse)
}##cacheSolve
|
/cachematrix.R
|
no_license
|
Bachelier/ProgrammingAssignment2
|
R
| false | false | 1,208 |
r
|
## In this assignment I coded a pair of functions that cache and compute the inverse of a matrix.
##
## Here is an explanation to both functions:
##
## 1. makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
##
## and 2. cacheSolve: This function computes the inverse of the special "matrix" returned by
##
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not
##
## changed), then the cachesolve then retrieves the inverse from the cache.
makeCacheMatrix <- function(M = matrix()) {
inverse <- NULL
set <- function(x) {
M <<- x;
inverse <<- NULL;
}
get <- function() return(M);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}##makeCacheMatrix
## This is the second function cacheSolve as explained above.
cacheSolve <- function(M, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- M$get()
invserse <- solve(data, ...)
M$setinv(inverse)
return(inverse)
}##cacheSolve
|
# make the EDF adn MSE results tables for the Ramsay horseshoe simulation
basefilename.mse<-"ramsay-mse-250-"
basefilename.edf<-"ramsay-edf-250-"
errlevs<-c(0.1,1,10)
sqrtn<-sqrt(250)
cat(" & & MSE & & & EDF & \\\\ \n")
mods<-c("TPRS","MDS (tprs)","Soap film")
cat(paste(mods,mods,sep=" & "))
cat("\\\\ \n")
for(errlev in errlevs){
cat(errlev," & ")
mse.dat<-read.csv(paste(basefilename.mse,errlev,".csv",sep=""))
mses<-c(mean(mse.dat$mds),mean(mse.dat$soap),mean(mse.dat$tprs))
ses<-c(sd(mse.dat$mds),sd(mse.dat$soap),sd(mse.dat$tprs))/sqrtn
edf.dat<-read.csv(paste(basefilename.edf,errlev,".csv",sep=""))
edfs<-c(mean(edf.dat$mds),mean(edf.dat$soap),mean(edf.dat$tprs))
edfse<-c(sd(edf.dat$mds),sd(edf.dat$soap),sd(edf.dat$tprs))/sqrtn
cat(round(mses[1],4)," (",round(ses[1],5),") & ",sep="")
cat(round(mses[2],4)," (",round(ses[2],5),") & ",sep="")
cat(round(mses[3],4)," (",round(ses[3],5),") &",sep="")
cat(round(edfs[1],4)," (",round(edfse[1],5),") & ",sep="")
cat(round(edfs[2],4)," (",round(edfse[2],5),") & ",sep="")
cat(round(edfs[3],4)," (",round(edfse[3],5),")\\\\ \n",sep="")
}
cat("\n\n")
|
/mds/sim/ramsay-table.R
|
no_license
|
distanceModling/phd-smoothing
|
R
| false | false | 1,152 |
r
|
# make the EDF adn MSE results tables for the Ramsay horseshoe simulation
basefilename.mse<-"ramsay-mse-250-"
basefilename.edf<-"ramsay-edf-250-"
errlevs<-c(0.1,1,10)
sqrtn<-sqrt(250)
cat(" & & MSE & & & EDF & \\\\ \n")
mods<-c("TPRS","MDS (tprs)","Soap film")
cat(paste(mods,mods,sep=" & "))
cat("\\\\ \n")
for(errlev in errlevs){
cat(errlev," & ")
mse.dat<-read.csv(paste(basefilename.mse,errlev,".csv",sep=""))
mses<-c(mean(mse.dat$mds),mean(mse.dat$soap),mean(mse.dat$tprs))
ses<-c(sd(mse.dat$mds),sd(mse.dat$soap),sd(mse.dat$tprs))/sqrtn
edf.dat<-read.csv(paste(basefilename.edf,errlev,".csv",sep=""))
edfs<-c(mean(edf.dat$mds),mean(edf.dat$soap),mean(edf.dat$tprs))
edfse<-c(sd(edf.dat$mds),sd(edf.dat$soap),sd(edf.dat$tprs))/sqrtn
cat(round(mses[1],4)," (",round(ses[1],5),") & ",sep="")
cat(round(mses[2],4)," (",round(ses[2],5),") & ",sep="")
cat(round(mses[3],4)," (",round(ses[3],5),") &",sep="")
cat(round(edfs[1],4)," (",round(edfse[1],5),") & ",sep="")
cat(round(edfs[2],4)," (",round(edfse[2],5),") & ",sep="")
cat(round(edfs[3],4)," (",round(edfse[3],5),")\\\\ \n",sep="")
}
cat("\n\n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stitch.R
\name{decimalHighLow}
\alias{decimalHighLow}
\title{decimalHighLow}
\usage{
decimalHighLow(df)
}
\arguments{
\item{df}{data.frame with Month, DecYear, and Month columns}
}
\value{
list with DecHigh and DecLow (water year high/low decimal values)
}
\description{
decimalHighLow figures out the highest and lowest decimal year based on
water year. The input is a data frame with columns Month and DecYear.
}
\examples{
eList <- Choptank_eList
highLow <- decimalHighLow(eList$Sample)
DecHigh <- highLow[["DecHigh"]]
DecLow <- highLow[["DecLow"]]
}
|
/man/decimalHighLow.Rd
|
no_license
|
cran/EGRET
|
R
| false | true | 658 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stitch.R
\name{decimalHighLow}
\alias{decimalHighLow}
\title{decimalHighLow}
\usage{
decimalHighLow(df)
}
\arguments{
\item{df}{data.frame with Month, DecYear, and Month columns}
}
\value{
list with DecHigh and DecLow (water year high/low decimal values)
}
\description{
decimalHighLow figures out the highest and lowest decimal year based on
water year. The input is a data frame with columns Month and DecYear.
}
\examples{
eList <- Choptank_eList
highLow <- decimalHighLow(eList$Sample)
DecHigh <- highLow[["DecHigh"]]
DecLow <- highLow[["DecLow"]]
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.getTypes}
\alias{h2o.getTypes}
\title{Get the types-per-column}
\usage{
h2o.getTypes(x)
}
\arguments{
\item{x}{An H2OFrame}
}
\value{
A list of types per column
}
\description{
Get the types-per-column
}
|
/h2o_3.10.4.4/h2o/man/h2o.getTypes.Rd
|
no_license
|
JoeyChiese/gitKraken_test
|
R
| false | false | 268 |
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{h2o.getTypes}
\alias{h2o.getTypes}
\title{Get the types-per-column}
\usage{
h2o.getTypes(x)
}
\arguments{
\item{x}{An H2OFrame}
}
\value{
A list of types per column
}
\description{
Get the types-per-column
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thinkr.r
\docType{package}
\name{thinkr-package}
\alias{thinkr}
\alias{thinkr-package}
\title{thinkr: Tools for Cleaning Up Messy Files}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
Some tools for cleaning up messy 'Excel' files to be suitable for R. People who have been working with 'Excel' for years built more or less complicated sheets with names, characters, formats that are not homogeneous. To be able to use them in R nowadays, we built a set of functions that will avoid the majority of importation problems and keep all the data at best.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/Thinkr-open/thinkr}
\item Report bugs at \url{https://github.com/Thinkr-open/thinkr/issues}
}
}
\author{
\strong{Maintainer}: Vincent Guyader \email{vincent@thinkr.fr} (\href{https://orcid.org/0000-0003-0671-9270}{ORCID})
Authors:
\itemize{
\item Sébastien Rochette \email{sebastien@thinkr.fr} (\href{https://orcid.org/0000-0002-1565-9313}{ORCID})
}
Other contributors:
\itemize{
\item ThinkR [copyright holder]
}
}
\keyword{internal}
|
/man/thinkr-package.Rd
|
no_license
|
cran/thinkr
|
R
| false | true | 1,194 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/thinkr.r
\docType{package}
\name{thinkr-package}
\alias{thinkr}
\alias{thinkr-package}
\title{thinkr: Tools for Cleaning Up Messy Files}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
Some tools for cleaning up messy 'Excel' files to be suitable for R. People who have been working with 'Excel' for years built more or less complicated sheets with names, characters, formats that are not homogeneous. To be able to use them in R nowadays, we built a set of functions that will avoid the majority of importation problems and keep all the data at best.
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/Thinkr-open/thinkr}
\item Report bugs at \url{https://github.com/Thinkr-open/thinkr/issues}
}
}
\author{
\strong{Maintainer}: Vincent Guyader \email{vincent@thinkr.fr} (\href{https://orcid.org/0000-0003-0671-9270}{ORCID})
Authors:
\itemize{
\item Sébastien Rochette \email{sebastien@thinkr.fr} (\href{https://orcid.org/0000-0002-1565-9313}{ORCID})
}
Other contributors:
\itemize{
\item ThinkR [copyright holder]
}
}
\keyword{internal}
|
#' Clean .DBF
#'
#' Function to clean .DBF files
#' @param x input dataframe with data
#' @param y input dataframe with column names if missing, defaults to NULL
#' @return \code{x} output dataframe
#' @export
#'
cleanDBF <- function(x,y = NULL) {
# Merge the date and time columns
x$Date <- as.POSIXct(paste(x$Date, x$Time), format="%Y-%m-%d %H:%M:%S")
# Remove the time column
x$Time <- NULL
# Remove 'Millitm' column
x$Millitm <- NULL
# Remove 'Marker' column
x$Marker <- NULL
# Remove 'Sts_XX' columns,
lastCol <- colnames(x)[ncol(x)]
n <- as.numeric(substr(lastCol,5,6))
for (i in 0:n) {
if (i < 10) {
name <- paste("Sts_0",i,sep = '')
}
else {
name <- paste("Sts_",i,sep = '')
}
x[[name]] <- NULL
}
# Remove rows that are only 0's
# First, subset all of the numeric data in order to use the 'rowSums' function
numericData <- subset(x[,2:ncol(x)])
# Second, the sum of the state functions = 4, therefore greater than 4 is equivalent to all zeros
x <- x[rowSums(numericData[,-1]) > 4, ]
if (!is.null(y)) { # If the column names have been read in
colnames(x)[2:ncol(x)] <- as.character(y[1:nrow(y),1]) # Name columns from 'Tagname' file
}
x <- subset(x, !duplicated(Date)) # Check for duplicates in date column
# Check for NAs
if (anyNA(x)) {
x <- na.omit(x)
}
return(x)
}
|
/R/cleanDBF.R
|
no_license
|
KNewhart/ADPCA
|
R
| false | false | 1,372 |
r
|
#' Clean .DBF
#'
#' Function to clean .DBF files
#' @param x input dataframe with data
#' @param y input dataframe with column names if missing, defaults to NULL
#' @return \code{x} output dataframe
#' @export
#'
cleanDBF <- function(x,y = NULL) {
# Merge the date and time columns
x$Date <- as.POSIXct(paste(x$Date, x$Time), format="%Y-%m-%d %H:%M:%S")
# Remove the time column
x$Time <- NULL
# Remove 'Millitm' column
x$Millitm <- NULL
# Remove 'Marker' column
x$Marker <- NULL
# Remove 'Sts_XX' columns,
lastCol <- colnames(x)[ncol(x)]
n <- as.numeric(substr(lastCol,5,6))
for (i in 0:n) {
if (i < 10) {
name <- paste("Sts_0",i,sep = '')
}
else {
name <- paste("Sts_",i,sep = '')
}
x[[name]] <- NULL
}
# Remove rows that are only 0's
# First, subset all of the numeric data in order to use the 'rowSums' function
numericData <- subset(x[,2:ncol(x)])
# Second, the sum of the state functions = 4, therefore greater than 4 is equivalent to all zeros
x <- x[rowSums(numericData[,-1]) > 4, ]
if (!is.null(y)) { # If the column names have been read in
colnames(x)[2:ncol(x)] <- as.character(y[1:nrow(y),1]) # Name columns from 'Tagname' file
}
x <- subset(x, !duplicated(Date)) # Check for duplicates in date column
# Check for NAs
if (anyNA(x)) {
x <- na.omit(x)
}
return(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift-utils.R
\name{table_attributes}
\alias{table_attributes}
\title{Get Table Attributes String}
\usage{
table_attributes(
diststyle = c("even", "all", "key"),
distkey = NULL,
compound_sort = NULL,
interleaved_sort = NULL
)
}
\arguments{
\item{diststyle}{Distribution style defaults to "even"}
\item{distkey}{character. optional. Distribution key}
\item{compound_sort}{character vector. optional. Compound sort keys}
\item{interleaved_sort}{character vector. optional. Interleaved sort keys}
}
\value{
character
}
\description{
Get Table Attributes String
}
|
/man/table_attributes.Rd
|
permissive
|
zapier/redshiftTools
|
R
| false | true | 652 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift-utils.R
\name{table_attributes}
\alias{table_attributes}
\title{Get Table Attributes String}
\usage{
table_attributes(
diststyle = c("even", "all", "key"),
distkey = NULL,
compound_sort = NULL,
interleaved_sort = NULL
)
}
\arguments{
\item{diststyle}{Distribution style defaults to "even"}
\item{distkey}{character. optional. Distribution key}
\item{compound_sort}{character vector. optional. Compound sort keys}
\item{interleaved_sort}{character vector. optional. Interleaved sort keys}
}
\value{
character
}
\description{
Get Table Attributes String
}
|
#!/usr/bin/env Rscript --vanilla
args <- commandArgs( T )
libDir <- args[1]
archDirs <- args[-1]
cat("\nModifying libraries in:\n\t",
libDir, '\n')
cat("\nCombining with libraries in:\n\t",
paste( archDirs, collapse = '\n\t' ),
'\n')
# Scan libDir for libraries.
libFiles <- list.files(libDir, full = T,
pattern = '(\\.a)$|(\\.dylib)$')
# Remove symlinks
libFiles <- libFiles[ !nzchar(Sys.readlink(libFiles)) ]
cat('\nThe following libraries have been targetted for modification:\n\t',
paste( libFiles, collapse = '\n\t' ),
'\n')
for( lib in libFiles ){
libsToAdd <- character(length( archDirs ))
for( i in 1:length(libsToAdd) ){
libsToAdd[i] <- list.files( archDirs[i], full = T )[
list.files( archDirs[i] ) %in% basename( lib )
]
}
cat("Combining library:\n\t",
lib,
"\nWith:\n\t",
paste( libsToAdd, collapse = "\n\t" ),
"\n"
)
system(paste('lipo', lib, paste(libsToAdd, collapse=' '),
'-create -output', lib))
}
|
/scripts/lipoSuck/lipoSuck.R
|
no_license
|
Sharpie/boneyard
|
R
| false | false | 991 |
r
|
#!/usr/bin/env Rscript --vanilla
args <- commandArgs( T )
libDir <- args[1]
archDirs <- args[-1]
cat("\nModifying libraries in:\n\t",
libDir, '\n')
cat("\nCombining with libraries in:\n\t",
paste( archDirs, collapse = '\n\t' ),
'\n')
# Scan libDir for libraries.
libFiles <- list.files(libDir, full = T,
pattern = '(\\.a)$|(\\.dylib)$')
# Remove symlinks
libFiles <- libFiles[ !nzchar(Sys.readlink(libFiles)) ]
cat('\nThe following libraries have been targetted for modification:\n\t',
paste( libFiles, collapse = '\n\t' ),
'\n')
for( lib in libFiles ){
libsToAdd <- character(length( archDirs ))
for( i in 1:length(libsToAdd) ){
libsToAdd[i] <- list.files( archDirs[i], full = T )[
list.files( archDirs[i] ) %in% basename( lib )
]
}
cat("Combining library:\n\t",
lib,
"\nWith:\n\t",
paste( libsToAdd, collapse = "\n\t" ),
"\n"
)
system(paste('lipo', lib, paste(libsToAdd, collapse=' '),
'-create -output', lib))
}
|
# makeCacheMatrix takes a matrix as input and returns a list of functions that get/set the matrix and it's inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# cacheSolve takes the list returned by makeCacheMatrix and returns the matrix's inverse using the cache if it has been calculated
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
inv <- solve(x$get(), ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
howardpaget/ProgrammingAssignment2
|
R
| false | false | 765 |
r
|
# makeCacheMatrix takes a matrix as input and returns a list of functions that get/set the matrix and it's inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# cacheSolve takes the list returned by makeCacheMatrix and returns the matrix's inverse using the cache if it has been calculated
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
inv <- solve(x$get(), ...)
x$setinv(inv)
inv
}
|
##' Draw MCMC samples from the Spatial GLMM with known link function
##'
##' The four-parameter prior for \code{phi} is defined by
##' \deqn{\propto (\phi - \theta_4)^{\theta_2 -1} \exp\{-(\frac{\phi -
##' \theta_4}{\theta_1})^{\theta_3}\}}{propto (phi -
##' phiprior[4])^(phiprior[2]-1) *
##' exp(-((phi-phiprior[4])/phiprior[1])^phiprior[3])} for \eqn{\phi >
##' \theta_4}{phi > phiprior[4]}. The prior for \code{omg} is similar.
##' The prior parameters correspond to scale, shape, exponent, and
##' location. See \code{arXiv:1005.3274} for details of this
##' distribution.
##'
##' The GEV (Generalised Extreme Value) link is defined by \deqn{\mu =
##' 1 - \exp\{-\max(0, 1 + \nu x)^{\frac{1}{\nu}}\}}{mu = 1 -
##' \exp[-max(0, 1 + nu x)^(1/nu)]} for any real \eqn{\nu}{nu}. At
##' \eqn{\nu = 0}{nu = 0} it reduces to the complementary log-log
##' link.
##' @title MCMC samples from the Spatial GLMM
##' @param formula A representation of the model in the form
##' \code{response ~ terms}. The response must be set to \code{NA}'s
##' at the prediction locations (see the examples on how to do this
##' using the function \code{\link{stackdata}}). At the observed
##' locations the response is assumed to be a total of replicated
##' measurements. The number of replications is inputted using the
##' argument \code{weights}.
##' @param family The distribution of the data. The
##' \code{"GEVbinomial"} family is the binomial family with link the
##' GEV link (see Details).
##' @param data An optional data frame containing the variables in the
##' model.
##' @param weights An optional vector of weights. Number of replicated
##' samples for Gaussian and gamma, number of trials for binomial,
##' time length for Poisson.
##' @param subset An optional vector specifying a subset of
##' observations to be used in the fitting process.
##' @param offset See \code{\link[stats]{lm}}.
##' @param atsample A formula in the form \code{~ x1 + x2 + ... + xd}
##' with the coordinates of the sampled locations.
##' @param corrfcn Spatial correlation function. See
##' \code{\link{geoBayes_correlation}} for details.
##' @param linkp Parameter of the link function. A scalar value.
##' @param phi Optional starting value for the MCMC for the
##' spatial range parameter \code{phi}. Defaults to the mean of its
##' prior. If \code{corrtuning[["phi"]]} is 0, then this argument is required and
##' it corresponds to the fixed value of \code{phi}. This can be a
##' vector of the same length as Nout.
##' @param omg Optional starting value for the MCMC for the
##' relative nugget parameter \code{omg}. Defaults to the mean of
##' its prior. If \code{corrtuning[["omg"]]} is 0, then this argument is required
##' and it corresponds to the fixed value of \code{omg}. This can be
##' a vector of the same length as Nout.
##' @param kappa Optional starting value for the MCMC for the
##' spatial correlation parameter \code{kappa} (Matern smoothness or
##' exponential power). Defaults to the mean of
##' its prior. If \code{corrtuning[["kappa"]]} is 0 and it is needed for
##' the chosen correlation function, then this argument is required
##' and it corresponds to the fixed value of \code{kappa}. This can be
##' a vector of the same length as Nout.
##' @param Nout Number of MCMC samples to return. This can be a vector
##' for running independent chains.
##' @param Nthin The thinning of the MCMC algorithm.
##' @param Nbi The burn-in of the MCMC algorithm.
##' @param betm0 Prior mean for beta (a vector or scalar).
##' @param betQ0 Prior standardised precision (inverse variance)
##' matrix. Can be a scalar, vector or matrix. The first two imply a
##' diagonal with those elements. Set this to 0 to indicate a flat
##' improper prior.
##' @param ssqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the partial sill parameter.
##' @param ssqsc Scale for the scaled inverse chi-square prior for the
##' partial sill parameter.
##' @param corrpriors A list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' prior distribution parameters. For \code{phi} and \code{omg} it
##' must be a vector of length 4. The generalized inverse gamma
##' prior is assumed and the input corresponds to the parameters
##' scale, shape, exponent, location in that order (see Details).
##' For \code{kappa} it must be a vector of length 2. A uniform
##' prior is assumed and the input corresponds to the lower and
##' upper bounds in that order.
##' @param corrtuning A vector or list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' random walk parameter for the Metropolis-Hastings step. Smaller values
##' increase the acceptance ratio. Set this to 0 for fixed
##' parameter value.
##' @param malatuning Tuning parameter for the MALA updates.
##' @param dispersion The fixed dispersion parameter.
##' @param longlat How to compute the distance between locations. If
##' \code{FALSE}, Euclidean distance, if \code{TRUE} Great Circle
##' distance. See \code{\link[sp]{spDists}}.
##' @param test Whether this is a trial run to monitor the acceptance
##' ratio of the random walk for \code{phi} and \code{omg}. If set
##' to \code{TRUE}, the acceptance ratio will be printed on the
##' screen every 100 iterations of the MCMC. Tune the \code{phisc}
##' and \code{omgsc} parameters in order to achive 20 to 30\%
##' acceptance. Set this to a positive number to change the default
##' 100. No thinning or burn-in are done when testing.
##' @return A list containing the objects \code{MODEL}, \code{DATA},
##' \code{FIXED}, \code{MCMC} and \code{call}. The MCMC samples are
##' stored in the object \code{MCMC} as follows:
##' \itemize{
##' \item \code{z} A matrix containing the MCMC samples for the
##' spatial random field. Each column is one sample.
##' \item \code{mu} A matrix containing the MCMC samples for the
##' mean response (a transformation of z). Each column is one sample.
##' \item \code{beta} A matrix containing the MCMC samples for the
##' regressor coefficients. Each column is one sample.
##' \item \code{ssq} A vector with the MCMC samples for the partial
## sill parameter.
##' \item \code{phi} A vector with the MCMC samples for the spatial
##' range parameter, if sampled.
##' \item \code{omg} A vector with the MCMC samples for the relative
##' nugget parameter, if sampled.
##' \item \code{logLik} A vector containing the value of the
##' log-likelihood evaluated at each sample.
##' \item \code{acc_ratio} The acceptance ratio for the joint update
##' of the parameters \code{phi} and \code{omg}, if sampled.
##' \item \code{sys_time} The total computing time for the MCMC sampling.
##' \item \code{Nout}, \code{Nbi}, \code{Nthin} As in input. Used
##' internally in other functions.
##' }
##' The other objects contain input variables. The object \code{call}
##' contains the function call.
##' @examples \dontrun{
##' data(rhizoctonia)
##'
##' ### Create prediction grid
##' predgrid <- mkpredgrid2d(rhizoctonia[c("Xcoord", "Ycoord")],
##' par.x = 100, chull = TRUE, exf = 1.2)
##'
##' ### Combine observed and prediction locations
##' rhizdata <- stackdata(rhizoctonia, predgrid$grid)
##' ##'
##' ### Define the model
##' corrf <- "spherical"
##' family <- "binomial.probit"
##' kappa <- 0
##' ssqdf <- 1
##' ssqsc <- 1
##' betm0 <- 0
##' betQ0 <- .01
##' phiprior <- c(100, 1, 1000, 100) # U(100, 200)
##' phisc <- 3
##' omgprior <- c(2, 1, 1, 0) # Exp(mean = 2)
##' omgsc <- .1
##' ##'
##' ### MCMC sizes
##' Nout <- 100
##' Nthin <- 1
##' Nbi <- 0
##'
##' ### Trial run
##' emt <- mcsglmm_mala(Infected ~ 1, family, rhizdata, weights = Total,
##' atsample = ~ Xcoord + Ycoord,
##' Nout = Nout, Nthin = Nthin, Nbi = Nbi,
##' betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
##' corrpriors = list(phi = phiprior, omg = omgprior),
##' corrfcn = corrf, kappa = kappa,
##' corrtuning = list(phi = phisc, omg = omgsc, kappa = 0),
##' malatuning = .003, dispersion = 1, test = 10)
##'
##' ### Full run
##' emc <- update(emt, test = FALSE)
##'
##' emcmc <- mcmcmake(emc)
##' summary(emcmc[, c("phi", "omg", "beta", "ssq")])
##' plot(emcmc[, c("phi", "omg", "beta", "ssq")])
##' }
##' @importFrom sp spDists
##' @importFrom stats model.matrix model.response model.weights
##' as.formula update model.offset
##' @useDynLib geoBayes mcspsamtry mcspsample
##' @export
mcsglmm_mala <- function (formula, family = "gaussian",
data, weights, subset, offset,
atsample, corrfcn = "matern",
linkp, phi, omg, kappa,
Nout, Nthin = 1, Nbi = 0, betm0, betQ0, ssqdf, ssqsc,
corrpriors, corrtuning, malatuning,
dispersion = 1, longlat = FALSE, test = FALSE) {
cl <- match.call()
## Family
ifam <- .geoBayes_family(family)
if (ifam) {
family <- .geoBayes_models$family[ifam]
} else {
stop ("This family has not been implemented.")
}
if (.geoBayes_models$needlinkp[ifam]) {
if (missing(linkp))
stop ("Missing input linkp.")
} else {
linkp <- 0
}
## Correlation function
icf <- .geoBayes_correlation(corrfcn)
corrfcn <- .geoBayes_corrfcn$corrfcn[icf]
needkappa <- .geoBayes_corrfcn$needkappa[icf]
## Design matrix and data
if (missing(data)) data <- environment(formula)
if (length(formula) != 3) stop ("The formula input is incomplete.")
if ("|" == all.names(formula[[2]], TRUE, 1)) formula[[2]] <- formula[[2]][[2]]
mfc <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "offset"),
names(mfc), 0L)
mfc <- mfc[c(1L, m)]
mfc$formula <- formula
mfc$drop.unused.levels <- TRUE
mfc$na.action <- "na.pass"
mfc[[1L]] <- quote(stats::model.frame)
mf <- eval(mfc, parent.frame())
mt <- attr(mf, "terms")
FF <- model.matrix(mt,mf)
if (!all(is.finite(FF))) stop ("Non-finite values in the design matrix")
p <- NCOL(FF)
yy <- unclass(model.response(mf))
if (!is.vector(yy)) {
stop ("The response must be a vector")
}
yy <- as.double(yy)
ll <- model.weights(mf)
oofset <- as.vector(model.offset(mf))
if (!is.null(oofset)) {
if (length(oofset) != NROW(yy)) {
stop(gettextf("number of offsets is %d, should equal %d (number of observations)",
length(oofset), NROW(yy)), domain = NA)
} else {
oofset <- as.double(oofset)
}
} else {
oofset <- double(NROW(yy))
}
## All locations
atsample <- update(atsample, NULL ~ . + 0) # No response and no intercept
mfatc <- mfc
mfatc$weights = NULL
mfatc$formula = atsample
mfat <- eval(mfatc, parent.frame())
loc <- as.matrix(mfat)
if (!all(is.finite(loc))) stop ("Non-finite values in the locations")
if (corrfcn == "spherical" && NCOL(loc) > 3) {
stop ("Cannot use the spherical correlation for dimensions
grater than 3.")
}
## Split sample, prediction
ii <- is.finite(yy)
y <- yy[ii]
k <- sum(ii)
l <- ll[ii]
l <- if (is.null(l)) rep.int(1.0, k) else as.double(l)
if (any(!is.finite(l))) stop ("Non-finite values in the weights")
if (any(l <= 0)) stop ("Non-positive weights not allowed")
if (grepl("^binomial(\\..+)?$", family)) {
l <- l - y # Number of failures
}
F <- FF[ii, , drop = FALSE]
offset <- oofset[ii]
dm <- sp::spDists(loc[ii, , drop = FALSE], longlat = longlat)
k0 <- sum(!ii)
if (k0 > 0) {
F0 <- FF[!ii, , drop = FALSE]
dmdm0 <- sp::spDists(loc[ii, , drop = FALSE], loc[!ii, , drop = FALSE],
longlat = longlat)
offset0 <- oofset[!ii]
} else {
F0 <- dmdm0 <- offset0 <- numeric(0)
dim(F0) <- c(0, p)
dim(dmdm0) <- c(k, 0)
}
## Prior for ssq
ssqdf <- as.double(ssqdf)
if (ssqdf <= 0) stop ("Argument ssqdf must > 0")
ssqsc <- as.double(ssqsc)
if (ssqsc <= 0) stop ("Argument ssqsc must > 0")
## Prior for beta
betaprior <- getbetaprior(betm0, betQ0, p)
betm0 <- betaprior$betm0
betQ0 <- betaprior$betQ0
## Other fixed parameters
dispersion <- as.double(dispersion)
if (dispersion <= 0) stop ("Invalid argument dispersion")
nu <- .geoBayes_getlinkp(linkp, ifam)
## MCMC samples
Nout <- as.integer(Nout)
if (any(Nout < 0)) stop ("Negative MCMC sample size entered.")
nch <- length(Nout) # Number of chains
Nmc <- Nout # Size of each chain
Nout <- sum(Nout) # Total MCMC size
Nbi <- as.integer(Nbi)
Nthin <- as.integer(Nthin)
lglk <- numeric(Nout)
z <- matrix(0, k, Nout)
z0 <- matrix(0, k0, Nout)
beta <- matrix(0, p, Nout)
ssq <- numeric(Nout)
if (malatuning <= 0) stop ("Input malatuning must > 0.")
## Starting values for correlation parameters
phisc <- corrtuning[["phi"]]
if (is.null(phisc) || !is.numeric(phisc) || phisc < 0)
stop ("Invalid tuning parameter for phi.")
if (phisc > 0) {
phipars <- check_gengamma_prior(corrpriors[["phi"]])
} else phipars <- rep.int(0, 4)
if (missing(phi)) {
if (phisc == 0) {
stop ("Argument phi needed for fixed phi")
} else {
if(phipars[2] == -1) {
tmp <- .1/abs(phipars[3])
} else {
tmp <- abs((phipars[2]+1)/phipars[3])
}
phistart <- phipars[4] + phipars[1]*gamma(tmp)/
gamma(phipars[2]/phipars[3])
}
} else {
phistart <- as.double(phi)
if (phisc > 0 && phistart <= phipars[4]) {
stop ("Starting value for phi not in the support of its prior")
}
}
phi <- numeric(Nout)
phi[cumsum(c(1, Nmc[-nch]))] <- phistart
omgsc <- corrtuning[["omg"]]
if (is.null(omgsc) || !is.numeric(omgsc) || omgsc < 0)
stop ("Invalid tuning parameter for omg.")
if (omgsc > 0) {
omgpars <- check_gengamma_prior(corrpriors[["omg"]])
} else omgpars <- rep.int(0, 4)
if (missing(omg)) {
if (omgsc == 0) {
stop ("Argument omg needed for fixed omg")
} else {
if(omgpars[2] == -1) {
tmp <- .1/abs(omgpars[3])
} else {
tmp <- abs((omgpars[2]+1)/omgpars[3])
}
omgstart <- omgpars[4] + omgpars[1]*gamma(tmp)/
gamma(omgpars[2]/omgpars[3])
}
} else {
omgstart <- as.double(omg)
if (omgsc > 0 && omgstart <= omgpars[4]) {
stop ("Starting value for omg not in the support of its prior")
}
}
omg <- numeric(Nout)
omg[cumsum(c(1, Nmc[-nch]))] <- omgstart
if (needkappa) {
kappasc <- corrtuning[["kappa"]]
} else {
kappasc <- 0
kappa <- 0
}
if (is.null(kappasc) || !is.numeric(kappasc) || kappasc < 0)
stop ("Invalid tuning parameter for kappa.")
if (kappasc > 0) {
kappapars <- check_unif_prior(corrpriors[["kappa"]])
} else kappapars <- c(0, 0)
if (missing(kappa)) {
if (kappasc == 0) {
stop ("Argument kappa needed for fixed kappa")
} else {
kappastart <- (kappapars[1] + kappapars[2])*.5
}
} else {
kappastart <- as.double(kappa)
}
if (kappasc > 0) {
kappastart <- .geoBayes_getkappa(kappastart, icf)
kappapars <- .geoBayes_getkappa(kappapars, icf)
if (kappastart >= kappapars[2] || kappastart <= kappapars[1]) {
stop ("Starting value for kappa not in the support of its prior")
}
}
kappa <- numeric(Nout)
kappa[cumsum(c(1, Nmc[-nch]))] <- kappastart
## Run code
if (test > 0) { # Running a test
if (is.logical(test)) test <- 100
test <- as.integer(test)
acc <- acc_z <- 0L
tm <- system.time({
RUN <- .Fortran("mcspsamtry_mala", ll = lglk, z = z, phi = phi, omg = omg,
kappa = kappa,
acc = acc,
as.double(y), as.double(l), as.double(F),
as.double(offset),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(phipars),
as.double(omgpars), as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dispersion), as.double(dm),
as.integer(Nout), as.integer(test), as.integer(k),
as.integer(p), as.integer(ifam), as.double(malatuning),
acc_z = acc_z, PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
mm0 <- NULL
beta <- NULL
ssq <- NULL
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/Nout
acc_ratio_z <- RUN$acc_z/Nout
Nthin <- 1
Nbi <- 0
### out <- list(z = zz0, beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### dispersion = dispersion, locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
} else {
acc <- acc_z <- integer(nch)
tm <- system.time({
RUN <- .Fortran("mcspsample_mala", ll = lglk, z = z, z0 = z0,
mu = z, mu0 = z0,
beta = beta, ssq = ssq,
phi = phi, omg = omg, kappa = kappa, acc = acc,
as.double(y), as.double(l), as.double(F),
as.double(offset), as.double(F0), as.double(offset0),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dispersion), as.double(dm),
as.double(dmdm0), as.integer(nch), as.integer(Nmc),
as.integer(Nout), as.integer(Nbi),
as.integer(Nthin), as.integer(k), as.integer(k0),
as.integer(p), as.integer(ifam), as.double(malatuning),
acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- mm0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
zz0[!ii, ] <- RUN$z0
mm0[ii, ] <- RUN$mu
mm0[!ii, ] <- RUN$mu0
beta <- RUN$beta
ssq <- RUN$ssq
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/(Nmc*Nthin + max(Nthin, Nbi))
acc_ratio_z <- RUN$acc_z/(Nmc*Nthin + max(Nthin, Nbi))
### out <- list(z = zz0, mu = mm0,
### beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### dispersion = dispersion, locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
}
MCMC <- FIXED <- MODEL <- DATA <- list()
MCMC$z <- zz0
MCMC$mu <- mm0
MCMC$beta <- beta
MCMC$ssq <- ssq
FIXED$linkp <- as.vector(linkp)
FIXED$linkp_num <- nu
if (phisc == 0) {
FIXED$phi <- phi[1]
} else {
MCMC$phi <- phi
}
if (omgsc == 0) {
FIXED$omg <- omg[1]
} else {
MCMC$omg <- omg
}
if (kappasc == 0) {
FIXED$kappa <- kappa[1]
} else {
MCMC$kappa <- kappa
}
MCMC$logLik <- ll
MCMC$acc_ratio <- acc_ratio
MCMC$acc_ratio_z <- acc_ratio_z
MCMC$sys_time <- tm
MCMC$Nout <- Nout
MCMC$Nbi <- Nbi
MCMC$Nthin <- Nthin
MCMC$whichobs <- ii
DATA$response <- y
DATA$weights <- l
DATA$modelmatrix <- F
DATA$offset <- offset
DATA$locations <- loc[ii, , drop = FALSE]
DATA$longlat <- longlat
MODEL$family <- family
MODEL$corrfcn <- corrfcn
MODEL$betm0 <- betm0
MODEL$betQ0 <- betQ0
MODEL$ssqdf <- ssqdf
MODEL$ssqsc <- ssqsc
MODEL$phipars <- phipars
MODEL$omgpars <- omgpars
MODEL$dispersion <- dispersion
out <- list(MODEL = MODEL, DATA = DATA, FIXED = FIXED, MCMC = MCMC, call = cl)
class(out) <- "geomcmc"
out
}
##' Draw MCMC samples from the transformed Gaussian model with known
##' link function
##'
##' Simulates from the posterior distribution of this model.
##' @title MCMC samples from the transformed Gaussian model
##' @param formula A representation of the model in the form
##' \code{response ~ terms}. The response must be set to \code{NA}'s
##' at the prediction locations (see the example in
##' \code{\link{mcsglmm}} for how to do this using
##' \code{\link{stackdata}}). At the observed locations the response
##' is assumed to be a total of replicated measurements. The number of
##' replications is inputted using the argument \code{weights}.
##' @param data An optional data frame containing the variables in the
##' model.
##' @param weights An optional vector of weights. Number of replicated
##' samples.
##' @param subset An optional vector specifying a subset of
##' observations to be used in the fitting process.
##' @param offset See \code{\link[stats]{lm}}.
##' @param atsample A formula in the form \code{~ x1 + x2 + ... + xd}
##' with the coordinates of the sampled locations.
##' @param corrfcn Spatial correlation function. See
##' \code{\link{geoBayes_correlation}} for details.
##' @param linkp Parameter of the link function. A scalar value.
##' @param phi Optional starting value for the MCMC for the
##' spatial range parameter \code{phi}. Defaults to the mean of its
##' prior. If \code{corrtuning[["phi"]]} is 0, then this argument is required and
##' it corresponds to the fixed value of \code{phi}. This can be a
##' vector of the same length as Nout.
##' @param omg Optional starting value for the MCMC for the
##' relative nugget parameter \code{omg}. Defaults to the mean of
##' its prior. If \code{corrtuning[["omg"]]} is 0, then this argument is required
##' and it corresponds to the fixed value of \code{omg}. This can be
##' a vector of the same length as Nout.
##' @param kappa Optional starting value for the MCMC for the
##' spatial correlation parameter \code{kappa} (Matern smoothness or
##' exponential power). Defaults to the mean of
##' its prior. If \code{corrtuning[["kappa"]]} is 0 and it is needed for
##' the chosen correlation function, then this argument is required
##' and it corresponds to the fixed value of \code{kappa}. This can be
##' a vector of the same length as Nout.
##' @param Nout Number of MCMC samples to return. This can be a vector
##' for running independent chains.
##' @param Nthin The thinning of the MCMC algorithm.
##' @param Nbi The burn-in of the MCMC algorithm.
##' @param betm0 Prior mean for beta (a vector or scalar).
##' @param betQ0 Prior standardised precision (inverse variance)
##' matrix. Can be a scalar, vector or matrix. The first two imply a
##' diagonal with those elements. Set this to 0 to indicate a flat
##' improper prior.
##' @param ssqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the partial sill parameter.
##' @param ssqsc Scale for the scaled inverse chi-square prior for the
##' partial sill parameter.
##' @param tsqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the measurement error parameter.
##' @param tsqsc Scale for the scaled inverse chi-square prior for the
##' measurement error parameter.
##' @param corrpriors A list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' prior distribution parameters. For \code{phi} and \code{omg} it
##' must be a vector of length 4. The generalized inverse gamma
##' prior is assumed and the input corresponds to the parameters
##' scale, shape, exponent, location in that order (see Details).
##' For \code{kappa} it must be a vector of length 2. A uniform
##' prior is assumed and the input corresponds to the lower and
##' upper bounds in that order.
##' @param malatuning Tuning parameter for the MALA updates.
##' @param corrtuning A vector or list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' random walk parameter for the Metropolis-Hastings step. Smaller values
##' increase the acceptance ratio. Set this to 0 for fixed
##' parameter value.
##' @param longlat How to compute the distance between locations. If
##' \code{FALSE}, Euclidean distance, if \code{TRUE} Great Circle
##' distance. See \code{\link[sp]{spDists}}.
##' @param test Whether this is a trial run to monitor the acceptance
##' ratio of the random walk for \code{phi} and \code{omg}. If set to
##' \code{TRUE}, the acceptance ratio will be printed on the screen
##' every 100 iterations of the MCMC. Tune the \code{phisc} and
##' \code{omgsc} parameters in order to achive 20 to 30\% acceptance.
##' Set this to a positive number to change the default 100. No
##' thinning or burn-in are done when testing.
##' @return A list containing the objects \code{MODEL}, \code{DATA},
##' \code{FIXED}, \code{MCMC} and \code{call}. The MCMC samples are
##' stored in the object \code{MCMC} as follows:
##' \itemize{
##' \item \code{z} A matrix containing the MCMC samples for the
##' spatial random field. Each column is one sample.
##' \item \code{mu} A matrix containing the MCMC samples for the
##' mean response (a transformation of z). Each column is one sample.
##' \item \code{beta} A matrix containing the MCMC samples for the
##' regressor coefficients. Each column is one sample.
##' \item \code{ssq} A vector with the MCMC samples for the partial
## sill parameter.
##' \item \code{tsq} A vector with the MCMC samples for the
##' measurement error variance.
##' \item \code{phi} A vector with the MCMC samples for the spatial
##' range parameter, if sampled.
##' \item \code{omg} A vector with the MCMC samples for the relative
##' nugget parameter, if sampled.
##' \item \code{logLik} A vector containing the value of the
##' log-likelihood evaluated at each sample.
##' \item \code{acc_ratio} The acceptance ratio for the joint update
##' of the parameters \code{phi} and \code{omg}, if sampled.
##' \item \code{sys_time} The total computing time for the MCMC sampling.
##' \item \code{Nout}, \code{Nbi}, \code{Nthin} As in input. Used
##' internally in other functions.
##' }
##' The other objects contain input variables. The object \code{call}
##' contains the function call.
##' @examples \dontrun{
##' ### Load the data
##' data(rhizoctonia)
##' rhiz <- na.omit(rhizoctonia)
##' rhiz$IR <- rhiz$Infected/rhiz$Total # Incidence rate of the
##' # rhizoctonia disease
##'
##' ### Define the model
##' corrf <- "spherical"
##' ssqdf <- 1
##' ssqsc <- 1
##' tsqdf <- 1
##' tsqsc <- 1
##' betm0 <- 0
##' betQ0 <- diag(.01, 2, 2)
##' phiprior <- c(200, 1, 1000, 100) # U(100, 300)
##' phisc <- 1
##' omgprior <- c(3, 1, 1000, 0) # U(0, 3)
##' omgsc <- 1
##' linkp <- 1
##'
##' ## MCMC parameters
##' Nout <- 100
##' Nbi <- 0
##' Nthin <- 1
##'
##' samplt <- mcstrga_mala(Yield ~ IR, data = rhiz,
##' atsample = ~ Xcoord + Ycoord, corrf = corrf,
##' Nout = Nout, Nthin = Nthin,
##' Nbi = Nbi, betm0 = betm0, betQ0 = betQ0,
##' ssqdf = ssqdf, ssqsc = ssqsc,
##' tsqdf = tsqdf, tsqsc = tsqsc,
##' corrprior = list(phi = phiprior, omg = omgprior),
##' linkp = linkp,
##' corrtuning = list(phi = phisc, omg = omgsc, kappa = 0),
##' malatuning = .0002, test=10)
##'
##' sample <- update(samplt, test = FALSE)
##' }
##' @importFrom sp spDists
##' @importFrom stats model.matrix model.response model.weights
##' as.formula update model.offset
##' @useDynLib geoBayes trgasamtry trgasample
##' @export
mcstrga_mala <- function (formula,
data, weights, subset, offset,
atsample, corrfcn = "matern",
linkp, phi, omg, kappa,
Nout, Nthin = 1, Nbi = 0, betm0, betQ0, ssqdf, ssqsc,
tsqdf, tsqsc,
corrpriors, corrtuning, malatuning,
longlat = FALSE,
test = FALSE) {
cl <- match.call()
family <- "transformed.gaussian"
## Correlation function
icf <- .geoBayes_correlation(corrfcn)
corrfcn <- .geoBayes_corrfcn$corrfcn[icf]
needkappa <- .geoBayes_corrfcn$needkappa[icf]
## Design matrix and data
if (missing(data)) data <- environment(formula)
if (length(formula) != 3) stop ("The formula input is incomplete.")
if ("|" == all.names(formula[[2]], TRUE, 1)) formula[[2]] <- formula[[2]][[2]]
mfc <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "offset"),
names(mfc), 0L)
mfc <- mfc[c(1L, m)]
mfc$formula <- formula
mfc$drop.unused.levels <- TRUE
mfc$na.action <- "na.pass"
mfc[[1L]] <- quote(stats::model.frame)
mf <- eval(mfc, parent.frame())
mt <- attr(mf, "terms")
FF <- model.matrix(mt,mf)
if (!all(is.finite(FF))) stop ("Non-finite values in the design matrix")
p <- NCOL(FF)
yy <- unclass(model.response(mf))
if (!is.vector(yy)) {
stop ("The response must be a vector")
}
yy <- as.double(yy)
ll <- model.weights(mf)
oofset <- as.vector(model.offset(mf))
if (!is.null(oofset)) {
if (length(oofset) != NROW(yy)) {
stop(gettextf("number of offsets is %d, should equal %d (number of observations)",
length(oofset), NROW(yy)), domain = NA)
} else {
oofset <- as.double(oofset)
}
} else {
oofset <- double(NROW(yy))
}
## All locations
atsample <- update(atsample, NULL ~ . + 0) # No response and no intercept
mfatc <- mfc
mfatc$weights = NULL
mfatc$formula = atsample
mfat <- eval(mfatc, parent.frame())
loc <- as.matrix(mfat)
if (!all(is.finite(loc))) stop ("Non-finite values in the locations")
if (corrfcn == "spherical" && NCOL(loc) > 3) {
stop ("Cannot use the spherical correlation for dimensions
grater than 3.")
}
## Split sample, prediction
ii <- is.finite(yy)
y <- yy[ii]
k <- sum(ii)
l <- ll[ii]
l <- if (is.null(l)) rep.int(1.0, k) else as.double(l)
if (any(!is.finite(l))) stop ("Non-finite values in the weights")
if (any(l <= 0)) stop ("Non-positive weights not allowed")
ybar <- y/l
F <- FF[ii, , drop = FALSE]
offset <- oofset[ii]
dm <- sp::spDists(loc[ii, , drop = FALSE], longlat = longlat)
k0 <- sum(!ii)
if (k0 > 0) {
F0 <- FF[!ii, , drop = FALSE]
dmdm0 <- sp::spDists(loc[ii, , drop = FALSE], loc[!ii, , drop = FALSE],
longlat = longlat)
offset0 <- oofset[!ii]
} else {
F0 <- dmdm0 <- offset0 <- numeric(0)
dim(F0) <- c(0, p)
dim(dmdm0) <- c(k, 0)
}
## Prior for ssq
ssqdf <- as.double(ssqdf)
if (ssqdf <= 0) stop ("Argument ssqdf must > 0")
ssqsc <- as.double(ssqsc)
if (ssqsc <= 0) stop ("Argument ssqsc must > 0")
## Prior for beta
betaprior <- getbetaprior(betm0, betQ0, p)
betm0 <- betaprior$betm0
betQ0 <- betaprior$betQ0
## Prior for tsq
tsqdf <- as.double(tsqdf)
if (tsqdf <= 0) stop ("Argument tsqdf must > 0")
tsqsc <- as.double(tsqsc)
if (tsqsc <= 0) stop ("Argument tsqsc must > 0")
if (missing(linkp))
stop ("Missing input linkp.")
nu <- .geoBayes_getlinkp(linkp, family)
## MCMC samples
Nout <- as.integer(Nout)
if (any(Nout < 0)) stop ("Negative MCMC sample size entered.")
nch <- length(Nout) # Number of chains
Nmc <- Nout # Size of each chain
Nout <- sum(Nout) # Total MCMC size
Nbi <- as.integer(Nbi)
Nthin <- as.integer(Nthin)
lglk <- numeric(Nout)
z <- matrix(0, k, Nout)
z0 <- matrix(0, k0, Nout)
beta <- matrix(0, p, Nout)
ssq <- tsq <- numeric(Nout)
if (malatuning <= 0) stop ("Input malatuning must > 0.")
## Starting values for correlation parameters
phisc <- corrtuning[["phi"]]
if (is.null(phisc) || !is.numeric(phisc) || phisc < 0)
stop ("Invalid tuning parameter for phi.")
if (phisc > 0) {
phipars <- check_gengamma_prior(corrpriors[["phi"]])
} else phipars <- rep.int(0, 4)
if (missing(phi)) {
if (phisc == 0) {
stop ("Argument phi needed for fixed phi")
} else {
if(phipars[2] == -1) {
tmp <- .1/abs(phipars[3])
} else {
tmp <- abs((phipars[2]+1)/phipars[3])
}
phistart <- phipars[4] + phipars[1]*gamma(tmp)/
gamma(phipars[2]/phipars[3])
}
} else {
phistart <- as.double(phi)
if (phisc > 0 && phistart <= phipars[4]) {
stop ("Starting value for phi not in the support of its prior")
}
}
phi <- numeric(Nout)
phi[cumsum(c(1, Nmc[-nch]))] <- phistart
omgsc <- corrtuning[["omg"]]
if (is.null(omgsc) || !is.numeric(omgsc) || omgsc < 0)
stop ("Invalid tuning parameter for omg.")
if (omgsc > 0) {
omgpars <- check_gengamma_prior(corrpriors[["omg"]])
} else omgpars <- rep.int(0, 4)
if (missing(omg)) {
if (omgsc == 0) {
stop ("Argument omg needed for fixed omg")
} else {
if(omgpars[2] == -1) {
tmp <- .1/abs(omgpars[3])
} else {
tmp <- abs((omgpars[2]+1)/omgpars[3])
}
omgstart <- omgpars[4] + omgpars[1]*gamma(tmp)/
gamma(omgpars[2]/omgpars[3])
}
} else {
omgstart <- as.double(omg)
if (omgsc > 0 && omgstart <= omgpars[4]) {
stop ("Starting value for omg not in the support of its prior")
}
}
omg <- numeric(Nout)
omg[cumsum(c(1, Nmc[-nch]))] <- omgstart
if (needkappa) {
kappasc <- corrtuning[["kappa"]]
} else {
kappasc <- 0
kappa <- 0
}
if (is.null(kappasc) || !is.numeric(kappasc) || kappasc < 0)
stop ("Invalid tuning parameter for kappa.")
if (kappasc > 0) {
kappapars <- check_unif_prior(corrpriors[["kappa"]])
} else kappapars <- c(0, 0)
if (missing(kappa)) {
if (kappasc == 0) {
stop ("Argument kappa needed for fixed kappa")
} else {
kappastart <- (kappapars[1] + kappapars[2])*.5
}
} else {
kappastart <- as.double(kappa)
}
if (kappasc > 0) {
kappastart <- .geoBayes_getkappa(kappastart, icf)
kappapars <- .geoBayes_getkappa(kappapars, icf)
if (kappastart >= kappapars[2] || kappastart <= kappapars[1]) {
stop ("Starting value for kappa not in the support of its prior")
}
}
kappa <- numeric(Nout)
kappa[cumsum(c(1, Nmc[-nch]))] <- kappastart
## Run code
if (test > 0) { # Running a test
if (is.logical(test)) test <- 100
test <- as.integer(test)
acc <- acc_z <- 0L
tm <- system.time({
RUN <- .Fortran("trgasamtry_mala", ll = lglk, z = z, phi = phi, omg = omg,
kappa = kappa, acc = acc,
as.double(ybar), as.double(l), as.double(F),
as.double(offset),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(tsqdf), as.double(tsqsc),
as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc),
as.double(kappasc), as.integer(icf),
as.double(nu), as.double(dm), as.integer(Nout),
as.integer(test), as.integer(k), as.integer(p),
as.double(malatuning), acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
mm0 <- NULL
beta <- NULL
ssq <- NULL
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/Nout
acc_ratio_z <- RUN$acc_z/Nout
Nthin <- 1
Nbi <- 0
### out <- list(z = zz0, beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### tsqdf = tsqdf, tsqsc = tsqsc,
### locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
} else {
acc <- acc_z <- integer(nch)
tm <- system.time({
RUN <- .Fortran("trgasample_mala", ll = lglk, z = z, z0 = z0,
mu = z, mu0 = z0, beta = beta, ssq = ssq,
tsq = tsq, phi = phi, omg = omg,
kappa = kappa, acc = acc,
as.double(ybar),
as.double(l), as.double(F),
as.double(offset), as.double(F0), as.double(offset0),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(tsqdf), as.double(tsqsc),
as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dm), as.double(dmdm0),
as.integer(nch), as.integer(Nmc),
as.integer(Nout), as.integer(Nbi), as.integer(Nthin),
as.integer(k), as.integer(k0), as.integer(p),
as.double(malatuning), acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- mm0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
zz0[!ii, ] <- RUN$z0
mm0[ii, ] <- RUN$mu
mm0[!ii, ] <- RUN$mu0
beta <- RUN$beta
ssq <- RUN$ssq
tsq <- RUN$tsq
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/(Nmc*Nthin + max(Nthin, Nbi))
acc_ratio_z <- RUN$acc_z/(Nmc*Nthin + max(Nthin, Nbi))
### out <- list(z = zz0, mu = mm0, beta = beta, ssq = ssq, tsq = tsq,
### phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = ybar, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### tsqdf = tsqdf, tsqsc = tsqsc,
### locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
}
MCMC <- FIXED <- MODEL <- DATA <- list()
MCMC$z <- zz0
MCMC$mu <- mm0
MCMC$beta <- beta
MCMC$ssq <- ssq
MCMC$tsq <- tsq
FIXED$linkp <- linkp
FIXED$linkp_num <- nu
if (phisc == 0) {
FIXED$phi <- phi[1]
} else {
MCMC$phi <- phi
}
if (omgsc == 0) {
FIXED$omg <- omg[1]
} else {
MCMC$omg <- omg
}
if (kappasc == 0) {
FIXED$kappa <- kappa[1]
} else {
MCMC$kappa <- kappa
}
MCMC$logLik <- ll
MCMC$acc_ratio <- acc_ratio
MCMC$acc_ratio_z <- acc_ratio_z
MCMC$sys_time <- tm
MCMC$Nout <- Nout
MCMC$Nbi <- Nbi
MCMC$Nthin <- Nthin
MCMC$whichobs <- ii
DATA$response <- ybar
DATA$weights <- l
DATA$modelmatrix <- F
DATA$offset <- offset
DATA$locations <- loc[ii, , drop = FALSE]
DATA$longlat <- longlat
MODEL$family <- family
MODEL$corrfcn <- corrfcn
MODEL$betm0 <- betm0
MODEL$betQ0 <- betQ0
MODEL$ssqdf <- ssqdf
MODEL$ssqsc <- ssqsc
MODEL$tsqdf <- tsqdf
MODEL$tsqsc <- tsqsc
MODEL$phipars <- phipars
MODEL$omgpars <- omgpars
out <- list(MODEL = MODEL, DATA = DATA, FIXED = FIXED, MCMC = MCMC, call = cl)
class(out) <- "geomcmc"
out
}
|
/R/mcsp_mala.R
|
no_license
|
cran/geoBayes
|
R
| false | false | 41,011 |
r
|
##' Draw MCMC samples from the Spatial GLMM with known link function
##'
##' The four-parameter prior for \code{phi} is defined by
##' \deqn{\propto (\phi - \theta_4)^{\theta_2 -1} \exp\{-(\frac{\phi -
##' \theta_4}{\theta_1})^{\theta_3}\}}{propto (phi -
##' phiprior[4])^(phiprior[2]-1) *
##' exp(-((phi-phiprior[4])/phiprior[1])^phiprior[3])} for \eqn{\phi >
##' \theta_4}{phi > phiprior[4]}. The prior for \code{omg} is similar.
##' The prior parameters correspond to scale, shape, exponent, and
##' location. See \code{arXiv:1005.3274} for details of this
##' distribution.
##'
##' The GEV (Generalised Extreme Value) link is defined by \deqn{\mu =
##' 1 - \exp\{-\max(0, 1 + \nu x)^{\frac{1}{\nu}}\}}{mu = 1 -
##' \exp[-max(0, 1 + nu x)^(1/nu)]} for any real \eqn{\nu}{nu}. At
##' \eqn{\nu = 0}{nu = 0} it reduces to the complementary log-log
##' link.
##' @title MCMC samples from the Spatial GLMM
##' @param formula A representation of the model in the form
##' \code{response ~ terms}. The response must be set to \code{NA}'s
##' at the prediction locations (see the examples on how to do this
##' using the function \code{\link{stackdata}}). At the observed
##' locations the response is assumed to be a total of replicated
##' measurements. The number of replications is inputted using the
##' argument \code{weights}.
##' @param family The distribution of the data. The
##' \code{"GEVbinomial"} family is the binomial family with link the
##' GEV link (see Details).
##' @param data An optional data frame containing the variables in the
##' model.
##' @param weights An optional vector of weights. Number of replicated
##' samples for Gaussian and gamma, number of trials for binomial,
##' time length for Poisson.
##' @param subset An optional vector specifying a subset of
##' observations to be used in the fitting process.
##' @param offset See \code{\link[stats]{lm}}.
##' @param atsample A formula in the form \code{~ x1 + x2 + ... + xd}
##' with the coordinates of the sampled locations.
##' @param corrfcn Spatial correlation function. See
##' \code{\link{geoBayes_correlation}} for details.
##' @param linkp Parameter of the link function. A scalar value.
##' @param phi Optional starting value for the MCMC for the
##' spatial range parameter \code{phi}. Defaults to the mean of its
##' prior. If \code{corrtuning[["phi"]]} is 0, then this argument is required and
##' it corresponds to the fixed value of \code{phi}. This can be a
##' vector of the same length as Nout.
##' @param omg Optional starting value for the MCMC for the
##' relative nugget parameter \code{omg}. Defaults to the mean of
##' its prior. If \code{corrtuning[["omg"]]} is 0, then this argument is required
##' and it corresponds to the fixed value of \code{omg}. This can be
##' a vector of the same length as Nout.
##' @param kappa Optional starting value for the MCMC for the
##' spatial correlation parameter \code{kappa} (Matern smoothness or
##' exponential power). Defaults to the mean of
##' its prior. If \code{corrtuning[["kappa"]]} is 0 and it is needed for
##' the chosen correlation function, then this argument is required
##' and it corresponds to the fixed value of \code{kappa}. This can be
##' a vector of the same length as Nout.
##' @param Nout Number of MCMC samples to return. This can be a vector
##' for running independent chains.
##' @param Nthin The thinning of the MCMC algorithm.
##' @param Nbi The burn-in of the MCMC algorithm.
##' @param betm0 Prior mean for beta (a vector or scalar).
##' @param betQ0 Prior standardised precision (inverse variance)
##' matrix. Can be a scalar, vector or matrix. The first two imply a
##' diagonal with those elements. Set this to 0 to indicate a flat
##' improper prior.
##' @param ssqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the partial sill parameter.
##' @param ssqsc Scale for the scaled inverse chi-square prior for the
##' partial sill parameter.
##' @param corrpriors A list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' prior distribution parameters. For \code{phi} and \code{omg} it
##' must be a vector of length 4. The generalized inverse gamma
##' prior is assumed and the input corresponds to the parameters
##' scale, shape, exponent, location in that order (see Details).
##' For \code{kappa} it must be a vector of length 2. A uniform
##' prior is assumed and the input corresponds to the lower and
##' upper bounds in that order.
##' @param corrtuning A vector or list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' random walk parameter for the Metropolis-Hastings step. Smaller values
##' increase the acceptance ratio. Set this to 0 for fixed
##' parameter value.
##' @param malatuning Tuning parameter for the MALA updates.
##' @param dispersion The fixed dispersion parameter.
##' @param longlat How to compute the distance between locations. If
##' \code{FALSE}, Euclidean distance, if \code{TRUE} Great Circle
##' distance. See \code{\link[sp]{spDists}}.
##' @param test Whether this is a trial run to monitor the acceptance
##' ratio of the random walk for \code{phi} and \code{omg}. If set
##' to \code{TRUE}, the acceptance ratio will be printed on the
##' screen every 100 iterations of the MCMC. Tune the \code{phisc}
##' and \code{omgsc} parameters in order to achive 20 to 30\%
##' acceptance. Set this to a positive number to change the default
##' 100. No thinning or burn-in are done when testing.
##' @return A list containing the objects \code{MODEL}, \code{DATA},
##' \code{FIXED}, \code{MCMC} and \code{call}. The MCMC samples are
##' stored in the object \code{MCMC} as follows:
##' \itemize{
##' \item \code{z} A matrix containing the MCMC samples for the
##' spatial random field. Each column is one sample.
##' \item \code{mu} A matrix containing the MCMC samples for the
##' mean response (a transformation of z). Each column is one sample.
##' \item \code{beta} A matrix containing the MCMC samples for the
##' regressor coefficients. Each column is one sample.
##' \item \code{ssq} A vector with the MCMC samples for the partial
## sill parameter.
##' \item \code{phi} A vector with the MCMC samples for the spatial
##' range parameter, if sampled.
##' \item \code{omg} A vector with the MCMC samples for the relative
##' nugget parameter, if sampled.
##' \item \code{logLik} A vector containing the value of the
##' log-likelihood evaluated at each sample.
##' \item \code{acc_ratio} The acceptance ratio for the joint update
##' of the parameters \code{phi} and \code{omg}, if sampled.
##' \item \code{sys_time} The total computing time for the MCMC sampling.
##' \item \code{Nout}, \code{Nbi}, \code{Nthin} As in input. Used
##' internally in other functions.
##' }
##' The other objects contain input variables. The object \code{call}
##' contains the function call.
##' @examples \dontrun{
##' data(rhizoctonia)
##'
##' ### Create prediction grid
##' predgrid <- mkpredgrid2d(rhizoctonia[c("Xcoord", "Ycoord")],
##' par.x = 100, chull = TRUE, exf = 1.2)
##'
##' ### Combine observed and prediction locations
##' rhizdata <- stackdata(rhizoctonia, predgrid$grid)
##' ##'
##' ### Define the model
##' corrf <- "spherical"
##' family <- "binomial.probit"
##' kappa <- 0
##' ssqdf <- 1
##' ssqsc <- 1
##' betm0 <- 0
##' betQ0 <- .01
##' phiprior <- c(100, 1, 1000, 100) # U(100, 200)
##' phisc <- 3
##' omgprior <- c(2, 1, 1, 0) # Exp(mean = 2)
##' omgsc <- .1
##' ##'
##' ### MCMC sizes
##' Nout <- 100
##' Nthin <- 1
##' Nbi <- 0
##'
##' ### Trial run
##' emt <- mcsglmm_mala(Infected ~ 1, family, rhizdata, weights = Total,
##' atsample = ~ Xcoord + Ycoord,
##' Nout = Nout, Nthin = Nthin, Nbi = Nbi,
##' betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
##' corrpriors = list(phi = phiprior, omg = omgprior),
##' corrfcn = corrf, kappa = kappa,
##' corrtuning = list(phi = phisc, omg = omgsc, kappa = 0),
##' malatuning = .003, dispersion = 1, test = 10)
##'
##' ### Full run
##' emc <- update(emt, test = FALSE)
##'
##' emcmc <- mcmcmake(emc)
##' summary(emcmc[, c("phi", "omg", "beta", "ssq")])
##' plot(emcmc[, c("phi", "omg", "beta", "ssq")])
##' }
##' @importFrom sp spDists
##' @importFrom stats model.matrix model.response model.weights
##' as.formula update model.offset
##' @useDynLib geoBayes mcspsamtry mcspsample
##' @export
mcsglmm_mala <- function (formula, family = "gaussian",
data, weights, subset, offset,
atsample, corrfcn = "matern",
linkp, phi, omg, kappa,
Nout, Nthin = 1, Nbi = 0, betm0, betQ0, ssqdf, ssqsc,
corrpriors, corrtuning, malatuning,
dispersion = 1, longlat = FALSE, test = FALSE) {
cl <- match.call()
## Family
ifam <- .geoBayes_family(family)
if (ifam) {
family <- .geoBayes_models$family[ifam]
} else {
stop ("This family has not been implemented.")
}
if (.geoBayes_models$needlinkp[ifam]) {
if (missing(linkp))
stop ("Missing input linkp.")
} else {
linkp <- 0
}
## Correlation function
icf <- .geoBayes_correlation(corrfcn)
corrfcn <- .geoBayes_corrfcn$corrfcn[icf]
needkappa <- .geoBayes_corrfcn$needkappa[icf]
## Design matrix and data
if (missing(data)) data <- environment(formula)
if (length(formula) != 3) stop ("The formula input is incomplete.")
if ("|" == all.names(formula[[2]], TRUE, 1)) formula[[2]] <- formula[[2]][[2]]
mfc <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "offset"),
names(mfc), 0L)
mfc <- mfc[c(1L, m)]
mfc$formula <- formula
mfc$drop.unused.levels <- TRUE
mfc$na.action <- "na.pass"
mfc[[1L]] <- quote(stats::model.frame)
mf <- eval(mfc, parent.frame())
mt <- attr(mf, "terms")
FF <- model.matrix(mt,mf)
if (!all(is.finite(FF))) stop ("Non-finite values in the design matrix")
p <- NCOL(FF)
yy <- unclass(model.response(mf))
if (!is.vector(yy)) {
stop ("The response must be a vector")
}
yy <- as.double(yy)
ll <- model.weights(mf)
oofset <- as.vector(model.offset(mf))
if (!is.null(oofset)) {
if (length(oofset) != NROW(yy)) {
stop(gettextf("number of offsets is %d, should equal %d (number of observations)",
length(oofset), NROW(yy)), domain = NA)
} else {
oofset <- as.double(oofset)
}
} else {
oofset <- double(NROW(yy))
}
## All locations
atsample <- update(atsample, NULL ~ . + 0) # No response and no intercept
mfatc <- mfc
mfatc$weights = NULL
mfatc$formula = atsample
mfat <- eval(mfatc, parent.frame())
loc <- as.matrix(mfat)
if (!all(is.finite(loc))) stop ("Non-finite values in the locations")
if (corrfcn == "spherical" && NCOL(loc) > 3) {
stop ("Cannot use the spherical correlation for dimensions
grater than 3.")
}
## Split sample, prediction
ii <- is.finite(yy)
y <- yy[ii]
k <- sum(ii)
l <- ll[ii]
l <- if (is.null(l)) rep.int(1.0, k) else as.double(l)
if (any(!is.finite(l))) stop ("Non-finite values in the weights")
if (any(l <= 0)) stop ("Non-positive weights not allowed")
if (grepl("^binomial(\\..+)?$", family)) {
l <- l - y # Number of failures
}
F <- FF[ii, , drop = FALSE]
offset <- oofset[ii]
dm <- sp::spDists(loc[ii, , drop = FALSE], longlat = longlat)
k0 <- sum(!ii)
if (k0 > 0) {
F0 <- FF[!ii, , drop = FALSE]
dmdm0 <- sp::spDists(loc[ii, , drop = FALSE], loc[!ii, , drop = FALSE],
longlat = longlat)
offset0 <- oofset[!ii]
} else {
F0 <- dmdm0 <- offset0 <- numeric(0)
dim(F0) <- c(0, p)
dim(dmdm0) <- c(k, 0)
}
## Prior for ssq
ssqdf <- as.double(ssqdf)
if (ssqdf <= 0) stop ("Argument ssqdf must > 0")
ssqsc <- as.double(ssqsc)
if (ssqsc <= 0) stop ("Argument ssqsc must > 0")
## Prior for beta
betaprior <- getbetaprior(betm0, betQ0, p)
betm0 <- betaprior$betm0
betQ0 <- betaprior$betQ0
## Other fixed parameters
dispersion <- as.double(dispersion)
if (dispersion <= 0) stop ("Invalid argument dispersion")
nu <- .geoBayes_getlinkp(linkp, ifam)
## MCMC samples
Nout <- as.integer(Nout)
if (any(Nout < 0)) stop ("Negative MCMC sample size entered.")
nch <- length(Nout) # Number of chains
Nmc <- Nout # Size of each chain
Nout <- sum(Nout) # Total MCMC size
Nbi <- as.integer(Nbi)
Nthin <- as.integer(Nthin)
lglk <- numeric(Nout)
z <- matrix(0, k, Nout)
z0 <- matrix(0, k0, Nout)
beta <- matrix(0, p, Nout)
ssq <- numeric(Nout)
if (malatuning <= 0) stop ("Input malatuning must > 0.")
## Starting values for correlation parameters
phisc <- corrtuning[["phi"]]
if (is.null(phisc) || !is.numeric(phisc) || phisc < 0)
stop ("Invalid tuning parameter for phi.")
if (phisc > 0) {
phipars <- check_gengamma_prior(corrpriors[["phi"]])
} else phipars <- rep.int(0, 4)
if (missing(phi)) {
if (phisc == 0) {
stop ("Argument phi needed for fixed phi")
} else {
if(phipars[2] == -1) {
tmp <- .1/abs(phipars[3])
} else {
tmp <- abs((phipars[2]+1)/phipars[3])
}
phistart <- phipars[4] + phipars[1]*gamma(tmp)/
gamma(phipars[2]/phipars[3])
}
} else {
phistart <- as.double(phi)
if (phisc > 0 && phistart <= phipars[4]) {
stop ("Starting value for phi not in the support of its prior")
}
}
phi <- numeric(Nout)
phi[cumsum(c(1, Nmc[-nch]))] <- phistart
omgsc <- corrtuning[["omg"]]
if (is.null(omgsc) || !is.numeric(omgsc) || omgsc < 0)
stop ("Invalid tuning parameter for omg.")
if (omgsc > 0) {
omgpars <- check_gengamma_prior(corrpriors[["omg"]])
} else omgpars <- rep.int(0, 4)
if (missing(omg)) {
if (omgsc == 0) {
stop ("Argument omg needed for fixed omg")
} else {
if(omgpars[2] == -1) {
tmp <- .1/abs(omgpars[3])
} else {
tmp <- abs((omgpars[2]+1)/omgpars[3])
}
omgstart <- omgpars[4] + omgpars[1]*gamma(tmp)/
gamma(omgpars[2]/omgpars[3])
}
} else {
omgstart <- as.double(omg)
if (omgsc > 0 && omgstart <= omgpars[4]) {
stop ("Starting value for omg not in the support of its prior")
}
}
omg <- numeric(Nout)
omg[cumsum(c(1, Nmc[-nch]))] <- omgstart
if (needkappa) {
kappasc <- corrtuning[["kappa"]]
} else {
kappasc <- 0
kappa <- 0
}
if (is.null(kappasc) || !is.numeric(kappasc) || kappasc < 0)
stop ("Invalid tuning parameter for kappa.")
if (kappasc > 0) {
kappapars <- check_unif_prior(corrpriors[["kappa"]])
} else kappapars <- c(0, 0)
if (missing(kappa)) {
if (kappasc == 0) {
stop ("Argument kappa needed for fixed kappa")
} else {
kappastart <- (kappapars[1] + kappapars[2])*.5
}
} else {
kappastart <- as.double(kappa)
}
if (kappasc > 0) {
kappastart <- .geoBayes_getkappa(kappastart, icf)
kappapars <- .geoBayes_getkappa(kappapars, icf)
if (kappastart >= kappapars[2] || kappastart <= kappapars[1]) {
stop ("Starting value for kappa not in the support of its prior")
}
}
kappa <- numeric(Nout)
kappa[cumsum(c(1, Nmc[-nch]))] <- kappastart
## Run code
if (test > 0) { # Running a test
if (is.logical(test)) test <- 100
test <- as.integer(test)
acc <- acc_z <- 0L
tm <- system.time({
RUN <- .Fortran("mcspsamtry_mala", ll = lglk, z = z, phi = phi, omg = omg,
kappa = kappa,
acc = acc,
as.double(y), as.double(l), as.double(F),
as.double(offset),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(phipars),
as.double(omgpars), as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dispersion), as.double(dm),
as.integer(Nout), as.integer(test), as.integer(k),
as.integer(p), as.integer(ifam), as.double(malatuning),
acc_z = acc_z, PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
mm0 <- NULL
beta <- NULL
ssq <- NULL
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/Nout
acc_ratio_z <- RUN$acc_z/Nout
Nthin <- 1
Nbi <- 0
### out <- list(z = zz0, beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### dispersion = dispersion, locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
} else {
acc <- acc_z <- integer(nch)
tm <- system.time({
RUN <- .Fortran("mcspsample_mala", ll = lglk, z = z, z0 = z0,
mu = z, mu0 = z0,
beta = beta, ssq = ssq,
phi = phi, omg = omg, kappa = kappa, acc = acc,
as.double(y), as.double(l), as.double(F),
as.double(offset), as.double(F0), as.double(offset0),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dispersion), as.double(dm),
as.double(dmdm0), as.integer(nch), as.integer(Nmc),
as.integer(Nout), as.integer(Nbi),
as.integer(Nthin), as.integer(k), as.integer(k0),
as.integer(p), as.integer(ifam), as.double(malatuning),
acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- mm0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
zz0[!ii, ] <- RUN$z0
mm0[ii, ] <- RUN$mu
mm0[!ii, ] <- RUN$mu0
beta <- RUN$beta
ssq <- RUN$ssq
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/(Nmc*Nthin + max(Nthin, Nbi))
acc_ratio_z <- RUN$acc_z/(Nmc*Nthin + max(Nthin, Nbi))
### out <- list(z = zz0, mu = mm0,
### beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### dispersion = dispersion, locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
}
MCMC <- FIXED <- MODEL <- DATA <- list()
MCMC$z <- zz0
MCMC$mu <- mm0
MCMC$beta <- beta
MCMC$ssq <- ssq
FIXED$linkp <- as.vector(linkp)
FIXED$linkp_num <- nu
if (phisc == 0) {
FIXED$phi <- phi[1]
} else {
MCMC$phi <- phi
}
if (omgsc == 0) {
FIXED$omg <- omg[1]
} else {
MCMC$omg <- omg
}
if (kappasc == 0) {
FIXED$kappa <- kappa[1]
} else {
MCMC$kappa <- kappa
}
MCMC$logLik <- ll
MCMC$acc_ratio <- acc_ratio
MCMC$acc_ratio_z <- acc_ratio_z
MCMC$sys_time <- tm
MCMC$Nout <- Nout
MCMC$Nbi <- Nbi
MCMC$Nthin <- Nthin
MCMC$whichobs <- ii
DATA$response <- y
DATA$weights <- l
DATA$modelmatrix <- F
DATA$offset <- offset
DATA$locations <- loc[ii, , drop = FALSE]
DATA$longlat <- longlat
MODEL$family <- family
MODEL$corrfcn <- corrfcn
MODEL$betm0 <- betm0
MODEL$betQ0 <- betQ0
MODEL$ssqdf <- ssqdf
MODEL$ssqsc <- ssqsc
MODEL$phipars <- phipars
MODEL$omgpars <- omgpars
MODEL$dispersion <- dispersion
out <- list(MODEL = MODEL, DATA = DATA, FIXED = FIXED, MCMC = MCMC, call = cl)
class(out) <- "geomcmc"
out
}
##' Draw MCMC samples from the transformed Gaussian model with known
##' link function
##'
##' Simulates from the posterior distribution of this model.
##' @title MCMC samples from the transformed Gaussian model
##' @param formula A representation of the model in the form
##' \code{response ~ terms}. The response must be set to \code{NA}'s
##' at the prediction locations (see the example in
##' \code{\link{mcsglmm}} for how to do this using
##' \code{\link{stackdata}}). At the observed locations the response
##' is assumed to be a total of replicated measurements. The number of
##' replications is inputted using the argument \code{weights}.
##' @param data An optional data frame containing the variables in the
##' model.
##' @param weights An optional vector of weights. Number of replicated
##' samples.
##' @param subset An optional vector specifying a subset of
##' observations to be used in the fitting process.
##' @param offset See \code{\link[stats]{lm}}.
##' @param atsample A formula in the form \code{~ x1 + x2 + ... + xd}
##' with the coordinates of the sampled locations.
##' @param corrfcn Spatial correlation function. See
##' \code{\link{geoBayes_correlation}} for details.
##' @param linkp Parameter of the link function. A scalar value.
##' @param phi Optional starting value for the MCMC for the
##' spatial range parameter \code{phi}. Defaults to the mean of its
##' prior. If \code{corrtuning[["phi"]]} is 0, then this argument is required and
##' it corresponds to the fixed value of \code{phi}. This can be a
##' vector of the same length as Nout.
##' @param omg Optional starting value for the MCMC for the
##' relative nugget parameter \code{omg}. Defaults to the mean of
##' its prior. If \code{corrtuning[["omg"]]} is 0, then this argument is required
##' and it corresponds to the fixed value of \code{omg}. This can be
##' a vector of the same length as Nout.
##' @param kappa Optional starting value for the MCMC for the
##' spatial correlation parameter \code{kappa} (Matern smoothness or
##' exponential power). Defaults to the mean of
##' its prior. If \code{corrtuning[["kappa"]]} is 0 and it is needed for
##' the chosen correlation function, then this argument is required
##' and it corresponds to the fixed value of \code{kappa}. This can be
##' a vector of the same length as Nout.
##' @param Nout Number of MCMC samples to return. This can be a vector
##' for running independent chains.
##' @param Nthin The thinning of the MCMC algorithm.
##' @param Nbi The burn-in of the MCMC algorithm.
##' @param betm0 Prior mean for beta (a vector or scalar).
##' @param betQ0 Prior standardised precision (inverse variance)
##' matrix. Can be a scalar, vector or matrix. The first two imply a
##' diagonal with those elements. Set this to 0 to indicate a flat
##' improper prior.
##' @param ssqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the partial sill parameter.
##' @param ssqsc Scale for the scaled inverse chi-square prior for the
##' partial sill parameter.
##' @param tsqdf Degrees of freedom for the scaled inverse chi-square
##' prior for the measurement error parameter.
##' @param tsqsc Scale for the scaled inverse chi-square prior for the
##' measurement error parameter.
##' @param corrpriors A list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' prior distribution parameters. For \code{phi} and \code{omg} it
##' must be a vector of length 4. The generalized inverse gamma
##' prior is assumed and the input corresponds to the parameters
##' scale, shape, exponent, location in that order (see Details).
##' For \code{kappa} it must be a vector of length 2. A uniform
##' prior is assumed and the input corresponds to the lower and
##' upper bounds in that order.
##' @param malatuning Tuning parameter for the MALA updates.
##' @param corrtuning A vector or list with the components \code{phi},
##' \code{omg} and \code{kappa} as needed. These correspond to the
##' random walk parameter for the Metropolis-Hastings step. Smaller values
##' increase the acceptance ratio. Set this to 0 for fixed
##' parameter value.
##' @param longlat How to compute the distance between locations. If
##' \code{FALSE}, Euclidean distance, if \code{TRUE} Great Circle
##' distance. See \code{\link[sp]{spDists}}.
##' @param test Whether this is a trial run to monitor the acceptance
##' ratio of the random walk for \code{phi} and \code{omg}. If set to
##' \code{TRUE}, the acceptance ratio will be printed on the screen
##' every 100 iterations of the MCMC. Tune the \code{phisc} and
##' \code{omgsc} parameters in order to achive 20 to 30\% acceptance.
##' Set this to a positive number to change the default 100. No
##' thinning or burn-in are done when testing.
##' @return A list containing the objects \code{MODEL}, \code{DATA},
##' \code{FIXED}, \code{MCMC} and \code{call}. The MCMC samples are
##' stored in the object \code{MCMC} as follows:
##' \itemize{
##' \item \code{z} A matrix containing the MCMC samples for the
##' spatial random field. Each column is one sample.
##' \item \code{mu} A matrix containing the MCMC samples for the
##' mean response (a transformation of z). Each column is one sample.
##' \item \code{beta} A matrix containing the MCMC samples for the
##' regressor coefficients. Each column is one sample.
##' \item \code{ssq} A vector with the MCMC samples for the partial
## sill parameter.
##' \item \code{tsq} A vector with the MCMC samples for the
##' measurement error variance.
##' \item \code{phi} A vector with the MCMC samples for the spatial
##' range parameter, if sampled.
##' \item \code{omg} A vector with the MCMC samples for the relative
##' nugget parameter, if sampled.
##' \item \code{logLik} A vector containing the value of the
##' log-likelihood evaluated at each sample.
##' \item \code{acc_ratio} The acceptance ratio for the joint update
##' of the parameters \code{phi} and \code{omg}, if sampled.
##' \item \code{sys_time} The total computing time for the MCMC sampling.
##' \item \code{Nout}, \code{Nbi}, \code{Nthin} As in input. Used
##' internally in other functions.
##' }
##' The other objects contain input variables. The object \code{call}
##' contains the function call.
##' @examples \dontrun{
##' ### Load the data
##' data(rhizoctonia)
##' rhiz <- na.omit(rhizoctonia)
##' rhiz$IR <- rhiz$Infected/rhiz$Total # Incidence rate of the
##' # rhizoctonia disease
##'
##' ### Define the model
##' corrf <- "spherical"
##' ssqdf <- 1
##' ssqsc <- 1
##' tsqdf <- 1
##' tsqsc <- 1
##' betm0 <- 0
##' betQ0 <- diag(.01, 2, 2)
##' phiprior <- c(200, 1, 1000, 100) # U(100, 300)
##' phisc <- 1
##' omgprior <- c(3, 1, 1000, 0) # U(0, 3)
##' omgsc <- 1
##' linkp <- 1
##'
##' ## MCMC parameters
##' Nout <- 100
##' Nbi <- 0
##' Nthin <- 1
##'
##' samplt <- mcstrga_mala(Yield ~ IR, data = rhiz,
##' atsample = ~ Xcoord + Ycoord, corrf = corrf,
##' Nout = Nout, Nthin = Nthin,
##' Nbi = Nbi, betm0 = betm0, betQ0 = betQ0,
##' ssqdf = ssqdf, ssqsc = ssqsc,
##' tsqdf = tsqdf, tsqsc = tsqsc,
##' corrprior = list(phi = phiprior, omg = omgprior),
##' linkp = linkp,
##' corrtuning = list(phi = phisc, omg = omgsc, kappa = 0),
##' malatuning = .0002, test=10)
##'
##' sample <- update(samplt, test = FALSE)
##' }
##' @importFrom sp spDists
##' @importFrom stats model.matrix model.response model.weights
##' as.formula update model.offset
##' @useDynLib geoBayes trgasamtry trgasample
##' @export
mcstrga_mala <- function (formula,
data, weights, subset, offset,
atsample, corrfcn = "matern",
linkp, phi, omg, kappa,
Nout, Nthin = 1, Nbi = 0, betm0, betQ0, ssqdf, ssqsc,
tsqdf, tsqsc,
corrpriors, corrtuning, malatuning,
longlat = FALSE,
test = FALSE) {
cl <- match.call()
family <- "transformed.gaussian"
## Correlation function
icf <- .geoBayes_correlation(corrfcn)
corrfcn <- .geoBayes_corrfcn$corrfcn[icf]
needkappa <- .geoBayes_corrfcn$needkappa[icf]
## Design matrix and data
if (missing(data)) data <- environment(formula)
if (length(formula) != 3) stop ("The formula input is incomplete.")
if ("|" == all.names(formula[[2]], TRUE, 1)) formula[[2]] <- formula[[2]][[2]]
mfc <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "offset"),
names(mfc), 0L)
mfc <- mfc[c(1L, m)]
mfc$formula <- formula
mfc$drop.unused.levels <- TRUE
mfc$na.action <- "na.pass"
mfc[[1L]] <- quote(stats::model.frame)
mf <- eval(mfc, parent.frame())
mt <- attr(mf, "terms")
FF <- model.matrix(mt,mf)
if (!all(is.finite(FF))) stop ("Non-finite values in the design matrix")
p <- NCOL(FF)
yy <- unclass(model.response(mf))
if (!is.vector(yy)) {
stop ("The response must be a vector")
}
yy <- as.double(yy)
ll <- model.weights(mf)
oofset <- as.vector(model.offset(mf))
if (!is.null(oofset)) {
if (length(oofset) != NROW(yy)) {
stop(gettextf("number of offsets is %d, should equal %d (number of observations)",
length(oofset), NROW(yy)), domain = NA)
} else {
oofset <- as.double(oofset)
}
} else {
oofset <- double(NROW(yy))
}
## All locations
atsample <- update(atsample, NULL ~ . + 0) # No response and no intercept
mfatc <- mfc
mfatc$weights = NULL
mfatc$formula = atsample
mfat <- eval(mfatc, parent.frame())
loc <- as.matrix(mfat)
if (!all(is.finite(loc))) stop ("Non-finite values in the locations")
if (corrfcn == "spherical" && NCOL(loc) > 3) {
stop ("Cannot use the spherical correlation for dimensions
grater than 3.")
}
## Split sample, prediction
ii <- is.finite(yy)
y <- yy[ii]
k <- sum(ii)
l <- ll[ii]
l <- if (is.null(l)) rep.int(1.0, k) else as.double(l)
if (any(!is.finite(l))) stop ("Non-finite values in the weights")
if (any(l <= 0)) stop ("Non-positive weights not allowed")
ybar <- y/l
F <- FF[ii, , drop = FALSE]
offset <- oofset[ii]
dm <- sp::spDists(loc[ii, , drop = FALSE], longlat = longlat)
k0 <- sum(!ii)
if (k0 > 0) {
F0 <- FF[!ii, , drop = FALSE]
dmdm0 <- sp::spDists(loc[ii, , drop = FALSE], loc[!ii, , drop = FALSE],
longlat = longlat)
offset0 <- oofset[!ii]
} else {
F0 <- dmdm0 <- offset0 <- numeric(0)
dim(F0) <- c(0, p)
dim(dmdm0) <- c(k, 0)
}
## Prior for ssq
ssqdf <- as.double(ssqdf)
if (ssqdf <= 0) stop ("Argument ssqdf must > 0")
ssqsc <- as.double(ssqsc)
if (ssqsc <= 0) stop ("Argument ssqsc must > 0")
## Prior for beta
betaprior <- getbetaprior(betm0, betQ0, p)
betm0 <- betaprior$betm0
betQ0 <- betaprior$betQ0
## Prior for tsq
tsqdf <- as.double(tsqdf)
if (tsqdf <= 0) stop ("Argument tsqdf must > 0")
tsqsc <- as.double(tsqsc)
if (tsqsc <= 0) stop ("Argument tsqsc must > 0")
if (missing(linkp))
stop ("Missing input linkp.")
nu <- .geoBayes_getlinkp(linkp, family)
## MCMC samples
Nout <- as.integer(Nout)
if (any(Nout < 0)) stop ("Negative MCMC sample size entered.")
nch <- length(Nout) # Number of chains
Nmc <- Nout # Size of each chain
Nout <- sum(Nout) # Total MCMC size
Nbi <- as.integer(Nbi)
Nthin <- as.integer(Nthin)
lglk <- numeric(Nout)
z <- matrix(0, k, Nout)
z0 <- matrix(0, k0, Nout)
beta <- matrix(0, p, Nout)
ssq <- tsq <- numeric(Nout)
if (malatuning <= 0) stop ("Input malatuning must > 0.")
## Starting values for correlation parameters
phisc <- corrtuning[["phi"]]
if (is.null(phisc) || !is.numeric(phisc) || phisc < 0)
stop ("Invalid tuning parameter for phi.")
if (phisc > 0) {
phipars <- check_gengamma_prior(corrpriors[["phi"]])
} else phipars <- rep.int(0, 4)
if (missing(phi)) {
if (phisc == 0) {
stop ("Argument phi needed for fixed phi")
} else {
if(phipars[2] == -1) {
tmp <- .1/abs(phipars[3])
} else {
tmp <- abs((phipars[2]+1)/phipars[3])
}
phistart <- phipars[4] + phipars[1]*gamma(tmp)/
gamma(phipars[2]/phipars[3])
}
} else {
phistart <- as.double(phi)
if (phisc > 0 && phistart <= phipars[4]) {
stop ("Starting value for phi not in the support of its prior")
}
}
phi <- numeric(Nout)
phi[cumsum(c(1, Nmc[-nch]))] <- phistart
omgsc <- corrtuning[["omg"]]
if (is.null(omgsc) || !is.numeric(omgsc) || omgsc < 0)
stop ("Invalid tuning parameter for omg.")
if (omgsc > 0) {
omgpars <- check_gengamma_prior(corrpriors[["omg"]])
} else omgpars <- rep.int(0, 4)
if (missing(omg)) {
if (omgsc == 0) {
stop ("Argument omg needed for fixed omg")
} else {
if(omgpars[2] == -1) {
tmp <- .1/abs(omgpars[3])
} else {
tmp <- abs((omgpars[2]+1)/omgpars[3])
}
omgstart <- omgpars[4] + omgpars[1]*gamma(tmp)/
gamma(omgpars[2]/omgpars[3])
}
} else {
omgstart <- as.double(omg)
if (omgsc > 0 && omgstart <= omgpars[4]) {
stop ("Starting value for omg not in the support of its prior")
}
}
omg <- numeric(Nout)
omg[cumsum(c(1, Nmc[-nch]))] <- omgstart
if (needkappa) {
kappasc <- corrtuning[["kappa"]]
} else {
kappasc <- 0
kappa <- 0
}
if (is.null(kappasc) || !is.numeric(kappasc) || kappasc < 0)
stop ("Invalid tuning parameter for kappa.")
if (kappasc > 0) {
kappapars <- check_unif_prior(corrpriors[["kappa"]])
} else kappapars <- c(0, 0)
if (missing(kappa)) {
if (kappasc == 0) {
stop ("Argument kappa needed for fixed kappa")
} else {
kappastart <- (kappapars[1] + kappapars[2])*.5
}
} else {
kappastart <- as.double(kappa)
}
if (kappasc > 0) {
kappastart <- .geoBayes_getkappa(kappastart, icf)
kappapars <- .geoBayes_getkappa(kappapars, icf)
if (kappastart >= kappapars[2] || kappastart <= kappapars[1]) {
stop ("Starting value for kappa not in the support of its prior")
}
}
kappa <- numeric(Nout)
kappa[cumsum(c(1, Nmc[-nch]))] <- kappastart
## Run code
if (test > 0) { # Running a test
if (is.logical(test)) test <- 100
test <- as.integer(test)
acc <- acc_z <- 0L
tm <- system.time({
RUN <- .Fortran("trgasamtry_mala", ll = lglk, z = z, phi = phi, omg = omg,
kappa = kappa, acc = acc,
as.double(ybar), as.double(l), as.double(F),
as.double(offset),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(tsqdf), as.double(tsqsc),
as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc),
as.double(kappasc), as.integer(icf),
as.double(nu), as.double(dm), as.integer(Nout),
as.integer(test), as.integer(k), as.integer(p),
as.double(malatuning), acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
mm0 <- NULL
beta <- NULL
ssq <- NULL
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/Nout
acc_ratio_z <- RUN$acc_z/Nout
Nthin <- 1
Nbi <- 0
### out <- list(z = zz0, beta = beta, ssq = ssq, phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = y, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### tsqdf = tsqdf, tsqsc = tsqsc,
### locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
} else {
acc <- acc_z <- integer(nch)
tm <- system.time({
RUN <- .Fortran("trgasample_mala", ll = lglk, z = z, z0 = z0,
mu = z, mu0 = z0, beta = beta, ssq = ssq,
tsq = tsq, phi = phi, omg = omg,
kappa = kappa, acc = acc,
as.double(ybar),
as.double(l), as.double(F),
as.double(offset), as.double(F0), as.double(offset0),
as.double(betm0), as.double(betQ0), as.double(ssqdf),
as.double(ssqsc), as.double(tsqdf), as.double(tsqsc),
as.double(phipars), as.double(omgpars),
as.double(kappapars),
as.double(phisc), as.double(omgsc), as.double(kappasc),
as.integer(icf),
as.double(nu), as.double(dm), as.double(dmdm0),
as.integer(nch), as.integer(Nmc),
as.integer(Nout), as.integer(Nbi), as.integer(Nthin),
as.integer(k), as.integer(k0), as.integer(p),
as.double(malatuning), acc_z = acc_z,
PACKAGE = "geoBayes")
})
## Store samples
ll <- RUN$ll
zz0 <- mm0 <- matrix(NA, NROW(yy), Nout)
zz0[ii, ] <- RUN$z
zz0[!ii, ] <- RUN$z0
mm0[ii, ] <- RUN$mu
mm0[!ii, ] <- RUN$mu0
beta <- RUN$beta
ssq <- RUN$ssq
tsq <- RUN$tsq
phi <- RUN$phi
### attr(phi, 'fixed') <- phisc == 0
omg <- RUN$omg
### attr(omg, 'fixed') <- omgsc == 0
### attr(nu, 'fixed') <- TRUE
kappa <- RUN$kappa
acc_ratio <- RUN$acc/(Nmc*Nthin + max(Nthin, Nbi))
acc_ratio_z <- RUN$acc_z/(Nmc*Nthin + max(Nthin, Nbi))
### out <- list(z = zz0, mu = mm0, beta = beta, ssq = ssq, tsq = tsq,
### phi = phi, omg = omg, nu = nu,
### logLik = ll, acc_ratio = acc_ratio, sys_time = tm,
### Nout = Nout, Nbi = Nbi, Nthin = Nthin,
### response = ybar, weights = l, modelmatrix = F, family = family,
### betm0 = betm0, betQ0 = betQ0, ssqdf = ssqdf, ssqsc = ssqsc,
### corrfcn = corrfcn, kappa = kappa,
### tsqdf = tsqdf, tsqsc = tsqsc,
### locations = loc[ii, , drop = FALSE],
### longlat = longlat, whichobs = ii)
}
MCMC <- FIXED <- MODEL <- DATA <- list()
MCMC$z <- zz0
MCMC$mu <- mm0
MCMC$beta <- beta
MCMC$ssq <- ssq
MCMC$tsq <- tsq
FIXED$linkp <- linkp
FIXED$linkp_num <- nu
if (phisc == 0) {
FIXED$phi <- phi[1]
} else {
MCMC$phi <- phi
}
if (omgsc == 0) {
FIXED$omg <- omg[1]
} else {
MCMC$omg <- omg
}
if (kappasc == 0) {
FIXED$kappa <- kappa[1]
} else {
MCMC$kappa <- kappa
}
MCMC$logLik <- ll
MCMC$acc_ratio <- acc_ratio
MCMC$acc_ratio_z <- acc_ratio_z
MCMC$sys_time <- tm
MCMC$Nout <- Nout
MCMC$Nbi <- Nbi
MCMC$Nthin <- Nthin
MCMC$whichobs <- ii
DATA$response <- ybar
DATA$weights <- l
DATA$modelmatrix <- F
DATA$offset <- offset
DATA$locations <- loc[ii, , drop = FALSE]
DATA$longlat <- longlat
MODEL$family <- family
MODEL$corrfcn <- corrfcn
MODEL$betm0 <- betm0
MODEL$betQ0 <- betQ0
MODEL$ssqdf <- ssqdf
MODEL$ssqsc <- ssqsc
MODEL$tsqdf <- tsqdf
MODEL$tsqsc <- tsqsc
MODEL$phipars <- phipars
MODEL$omgpars <- omgpars
out <- list(MODEL = MODEL, DATA = DATA, FIXED = FIXED, MCMC = MCMC, call = cl)
class(out) <- "geomcmc"
out
}
|
## ----echo=TRUE-----------------------------------------------------------
# loading library
library(BHTSpack)
# Generating a data set of 100 8x10 plates, each plate containing 80 compounds.
# A total of 8000 compounds. 10% of the compounds are hits.
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234)
# Generating the data set as before, but this time adding plate noise to all compounds
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
# Running the model with 200 iterations
system.time(b.est <- bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
# Compute threshold (r) for significant hit probabilities at FDR=0.05
res = r.fdr(b.est, fdr=0.05)
names(res)
res[["r"]]
# Significant compound hit list
head(res[["res"]])
# Trace plots of hit compound activity
ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4)
# ACF plots of hit compound activity
ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4, type="acf")
sessionInfo()
## ----echo=TRUE-----------------------------------------------------------
# loading library
library(BHTSpack)
# Generating a data set of 100 8x10 plates, each plate containing 80 compounds.
# A total of 8000 compounds. 40% of the compounds are hits.
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
# Running the model with 200 iterations
b.est = bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)
# create an html file
#bhts2HTML(res, dir="/dir/", fname="tophits")
## ----echo=TRUE-----------------------------------------------------------
library(BHTSpack)
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
I = unlist(Z[["I"]])
B = unlist(Z[["B"]])
Z = unlist(Z[["Z"]])
plot(density(Z[I==1 & B==0]), xlim=range(Z), ylim=c(0,6), col="black", lty=2, ylab="Density", main="", xlab="Raw Value")
lines(density(Z[I==1 & B==0]), col="blue", lty=2)
lines(density(Z[I==2 & B==0]), col="green", lty=2)
lines(density(Z[I==3 & B==0]), col="yellow", lty=2)
lines(density(Z[I==4 & B==0]), col="red", lty=2)
lines(density(Z[B==0]), col="black", lty=2, lwd=2)
lines(density(Z[I==1 & B==1]), col="blue", lty=3)
lines(density(Z[I==2 & B==1]), col="green", lty=3)
lines(density(Z[I==3 & B==1]), col="yellow", lty=3)
lines(density(Z[I==4 & B==1]), col="red", lty=3)
lines(density(Z[B==1]), col="black", lty=3, lwd=2)
legend("topright", legend=c("Component 1", "Component 2", "Component 3", "Component 4", "All Components", "Non-Hits", "Hits"),
col=c("blue", "green", "yellow", "red", "black", "black", "black"), lty=c(1, 1, 1, 1, 1, 2, 3), lwd=c(1, 1, 1, 1, 1, 2, 2))
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#score = function(t, sdat, B){
# res = unlist(lapply(sdat, as.vector))
# ind = rep(0, length(res))
# ind[res>t] = 1
# a = auc(B, ind)
# return(a)
#}
### Left Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
### Middle Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-5, 26, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
### Right Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 23, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-5, 28, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
## ----echo=TRUE-----------------------------------------------------------
library(BHTSpack)
#library(pROC)
aucfunc = function(dat, B){
Btab = data.frame(hitind=unlist(B))
Btab = data.frame(IDmatch=rownames(Btab), Btab)
Res = merge(dat, Btab, by="IDmatch")
return(auc(Res[["hitind"]], Res[["hatpai"]]))
}
## Left plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Middle plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Right plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Right plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#score = function(t, sdat, B){
# res = unlist(lapply(sdat, as.vector))
# ind = rep(0, length(res))
# ind[res>t] = 1
# a = auc(B, ind)
# return(a)
#}
#Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 30, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#r = seq(-5, 29, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#library(gdata)
#score = function(t, s, B){
# ind = rep(0, length(s))
# ind[s>t] = 1
# a = auc(B, ind)
# return(a)
#}
## It is assumed that data files are in a folder "temp"
## read data
#dat = read.csv("temp/EColiFilamentation2006_screeningdata.csv", sep="\t")
#dim(dat)
## read hit indicators
#hits = read.csv("temp/CompoundSearchResults.csv", sep=",")
#dim(hits)
#hits = data.frame(hits, hits=rep(1,nrow(hits)))
#hits = data.frame(ChembankId=hits[["ChemBank.Id"]], hitind=rep(1,nrow(hits)))
## merge with hit indicator
#dat = merge(dat, hits, by="ChembankId", all.x=TRUE)
#dim(dat)
#dat[["hitind"]][is.na(dat[["hitind"]])] = 0
## merge with map
#map = read.xls("map.xlsx")
#dat = merge(dat, map, by="AssayName")
## Organism DRC39 at 24h
#dat = subset(dat, Organism=="DRC39" & ExpTime=="24h")
#plates = unique(as.character(dat[["Plate"]]))
#unique(as.character(dat[["WellType"]]))
#dat = subset(dat, WellType=="compound-treatment")
#dat = lapply(plates, function(x){d=subset(dat, Plate==x)})
#names(dat) = plates
#l = unlist(lapply(dat, nrow))
#table(l)
## include only 352-well plates
#dat = dat[l==352]
#unique(as.character(unlist(lapply(dat, function(x){x$AssayName}))))
#sum(is.na(unlist(lapply(dat, function(x){x$RawValueA}))))
#sum(unlist(lapply(dat, function(x){x$hitind})))
#sum(!is.na(unlist(lapply(dat, function(x){x$RawValueA}))))
## sorting wells row-wise
#dat = lapply(dat, function(x){ix=sort.int(as.character(x[["Well"]]), index.return=TRUE)[["ix"]]; return(x[ix,]);})
## extracting raw values, hit indicators and well names
#Z = lapply(dat, function(x){x[["RawValueA"]]})
#B = lapply(dat, function(x){x[["hitind"]]})
#W = lapply(dat, function(x){x[["Well"]]})
## constructing plates of raw values, row-wise
#Z = lapply(Z, function(x){matrix(x, 16, 22, byrow=TRUE)})
## naming rows and columns of plates
#Z = lapply(Z, function(x){rownames(x)=LETTERS[1:16]; colnames(x)=formatC(seq(1,22),flag=0,digits=1); return(x);})
## constructing plates of indicator variables (row-wise) and vectorizing (column-wise) each plate
#B = lapply(B, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))})
## constructing plates of well names (row-wise) and vectorizing (column-wise) each plate
#W = lapply(W, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))})
## Left plot
#plot(density(unlist(Z)[unlist(B)==0]), col="blue", ylab="Density", main="", xlim=range(unlist(Z)), xlab="Raw Value")
#lines(density(unlist(Z)[unlist(B)==1]), col="red")
#legend("topright", legend=c("Non-Hits", "Hits"), col=c("blue", "red"), lty=c(1,1))
## normalizing plates of raw values
#Z = lapply(Z, function(x){(x-mean(x))/sd(x)})
## naming indicator variables
#bn = names(B)
#B = lapply(1:length(B), function(x){names(B[[x]])=W[[x]]; return(B[[x]]);})
#names(B) = bn
## construct object for B-score and R-score methods
#Zmat = list(Z=Z, B=B)
## construct object for BHTS method
## vectorizing (column-wise) each plate of raw values and naming them with well names
#zn = names(Z)
#Z = lapply(1:length(Z), function(x){d=as.vector(Z[[x]]); names(d)=W[[x]]; return(d);})
#names(Z) = zn
#Z = list(Z=Z, B=B)
## Run BHTS
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
## Run B-score
#bs = unlist(lapply(Zmat[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
## Middle plot
#r = seq(-31, 9, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Zmat[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.44, 0.56))
#abline(v=btmax, col="red", lty=2)
#axis(1)
#axis(1, at=btmax)
## Run R-score
#rs = unlist(lapply(Zmat[["Z"]], function(x){matrix(normR(as.vector(t(x)), 16, 22), 16, 22, byrow=TRUE)}))
#summary(rs)
#r = seq(-45, 29, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Zmat[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#abline(v=rtmax, col="green", lty=2)
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Right plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Zmat[["B"]]), bhitind, col="red")
#lines.roc(unlist(Zmat[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Zmat[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Zmat[["B"]]), bhitind),
#3), ")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
|
/data/genthat_extracted_code/BHTSpack/vignettes/BHTSpackManual.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 19,770 |
r
|
## ----echo=TRUE-----------------------------------------------------------
# loading library
library(BHTSpack)
# Generating a data set of 100 8x10 plates, each plate containing 80 compounds.
# A total of 8000 compounds. 10% of the compounds are hits.
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234)
# Generating the data set as before, but this time adding plate noise to all compounds
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
# Running the model with 200 iterations
system.time(b.est <- bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
# Compute threshold (r) for significant hit probabilities at FDR=0.05
res = r.fdr(b.est, fdr=0.05)
names(res)
res[["r"]]
# Significant compound hit list
head(res[["res"]])
# Trace plots of hit compound activity
ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4)
# ACF plots of hit compound activity
ptrace(b.est, "mu1", ndisc=100, nr=3, nc=4, type="acf")
sessionInfo()
## ----echo=TRUE-----------------------------------------------------------
# loading library
library(BHTSpack)
# Generating a data set of 100 8x10 plates, each plate containing 80 compounds.
# A total of 8000 compounds. 40% of the compounds are hits.
Z = data.create(N=80, nr=8, nc=10, M=100, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
# Running the model with 200 iterations
b.est = bhts(Z[["Z"]], iters=200, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE)
# create an html file
#bhts2HTML(res, dir="/dir/", fname="tophits")
## ----echo=TRUE-----------------------------------------------------------
library(BHTSpack)
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.4, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
I = unlist(Z[["I"]])
B = unlist(Z[["B"]])
Z = unlist(Z[["Z"]])
plot(density(Z[I==1 & B==0]), xlim=range(Z), ylim=c(0,6), col="black", lty=2, ylab="Density", main="", xlab="Raw Value")
lines(density(Z[I==1 & B==0]), col="blue", lty=2)
lines(density(Z[I==2 & B==0]), col="green", lty=2)
lines(density(Z[I==3 & B==0]), col="yellow", lty=2)
lines(density(Z[I==4 & B==0]), col="red", lty=2)
lines(density(Z[B==0]), col="black", lty=2, lwd=2)
lines(density(Z[I==1 & B==1]), col="blue", lty=3)
lines(density(Z[I==2 & B==1]), col="green", lty=3)
lines(density(Z[I==3 & B==1]), col="yellow", lty=3)
lines(density(Z[I==4 & B==1]), col="red", lty=3)
lines(density(Z[B==1]), col="black", lty=3, lwd=2)
legend("topright", legend=c("Component 1", "Component 2", "Component 3", "Component 4", "All Components", "Non-Hits", "Hits"),
col=c("blue", "green", "yellow", "red", "black", "black", "black"), lty=c(1, 1, 1, 1, 1, 2, 3), lwd=c(1, 1, 1, 1, 1, 2, 2))
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#score = function(t, sdat, B){
# res = unlist(lapply(sdat, as.vector))
# ind = rep(0, length(res))
# ind[res>t] = 1
# a = auc(B, ind)
# return(a)
#}
### Left Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
### Middle Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 21, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-5, 26, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
### Right Column
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 23, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.5, 0.75))
#abline(v=btmax, col="red", lty=2)
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=btmax)
#r = seq(-5, 28, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#axis(1, at=c(-5, 5, 10, 15))
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Bottom plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
## ----echo=TRUE-----------------------------------------------------------
library(BHTSpack)
#library(pROC)
aucfunc = function(dat, B){
Btab = data.frame(hitind=unlist(B))
Btab = data.frame(IDmatch=rownames(Btab), Btab)
Res = merge(dat, Btab, by="IDmatch")
return(auc(Res[["hitind"]], Res[["hatpai"]]))
}
## Left plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.1, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Middle plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.05, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Right plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## Right plot
Z = data.create(N=80, nr=8, nc=10, M=1000, p=0.01, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
mu = mean(unlist(Z[["Z"]]))
mu00 = seq(mu, 0, -mu/25)
mu10 = seq(mu, 2*mu, mu/25)
#res = lapply(1:25, function(x){print(x); res=bhts(Z[["Z"]], iters=7000, H=10, K=10, mu00[x], mu10[x], a.alpha=10,
#b.alpha=5, a.tau=10, b.tau=5, s=1234); return(res);})
#hatpai = lapply(res, function(x){unlist(x[["hatpai"]])})
#hatpai = lapply(hatpai, function(x){data.frame(IDmatch=names(x), hatpai=x)})
#AUC = unlist(lapply(hatpai, aucfunc, Z[["B"]]))
#plot((mu10-mu00)[1:25], AUC, pch=16, xlab=expression(paste(mu[1][0]-mu[0][0])), cex=1.5, cex.lab=1.5, ylim=c(0.8, 0.9))
#abline(v=mu, col="red", lty=2, lwd=2)
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#score = function(t, sdat, B){
# res = unlist(lapply(sdat, as.vector))
# ind = rep(0, length(res))
# ind[res>t] = 1
# a = auc(B, ind)
# return(a)
#}
#Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"))
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
#Z = data.create(N=80, nr=8, nc=10, M=5000, p=0.00021, s=1234, covrow=read.csv("covrow.csv"), covcol=read.csv("covcol.csv"), mat=TRUE)
## Top plot
#bs = unlist(lapply(Z[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
#rs = unlist(lapply(Z[["Z"]], function(x){matrix(normR(as.vector(t(x)), 8, 10), 8, 10, byrow=TRUE)}))
#summary(rs)
#r = seq(-4, 30, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Z[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#r = seq(-5, 29, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Z[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Z[["B"]]), bhitind, col="red")
#lines.roc(unlist(Z[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Z[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Z[["B"]]), bhitind), 3),
#")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
## ----echo=TRUE-----------------------------------------------------------
#library(BHTSpack)
#library(pROC)
#library(sights)
#library(gdata)
#score = function(t, s, B){
# ind = rep(0, length(s))
# ind[s>t] = 1
# a = auc(B, ind)
# return(a)
#}
## It is assumed that data files are in a folder "temp"
## read data
#dat = read.csv("temp/EColiFilamentation2006_screeningdata.csv", sep="\t")
#dim(dat)
## read hit indicators
#hits = read.csv("temp/CompoundSearchResults.csv", sep=",")
#dim(hits)
#hits = data.frame(hits, hits=rep(1,nrow(hits)))
#hits = data.frame(ChembankId=hits[["ChemBank.Id"]], hitind=rep(1,nrow(hits)))
## merge with hit indicator
#dat = merge(dat, hits, by="ChembankId", all.x=TRUE)
#dim(dat)
#dat[["hitind"]][is.na(dat[["hitind"]])] = 0
## merge with map
#map = read.xls("map.xlsx")
#dat = merge(dat, map, by="AssayName")
## Organism DRC39 at 24h
#dat = subset(dat, Organism=="DRC39" & ExpTime=="24h")
#plates = unique(as.character(dat[["Plate"]]))
#unique(as.character(dat[["WellType"]]))
#dat = subset(dat, WellType=="compound-treatment")
#dat = lapply(plates, function(x){d=subset(dat, Plate==x)})
#names(dat) = plates
#l = unlist(lapply(dat, nrow))
#table(l)
## include only 352-well plates
#dat = dat[l==352]
#unique(as.character(unlist(lapply(dat, function(x){x$AssayName}))))
#sum(is.na(unlist(lapply(dat, function(x){x$RawValueA}))))
#sum(unlist(lapply(dat, function(x){x$hitind})))
#sum(!is.na(unlist(lapply(dat, function(x){x$RawValueA}))))
## sorting wells row-wise
#dat = lapply(dat, function(x){ix=sort.int(as.character(x[["Well"]]), index.return=TRUE)[["ix"]]; return(x[ix,]);})
## extracting raw values, hit indicators and well names
#Z = lapply(dat, function(x){x[["RawValueA"]]})
#B = lapply(dat, function(x){x[["hitind"]]})
#W = lapply(dat, function(x){x[["Well"]]})
## constructing plates of raw values, row-wise
#Z = lapply(Z, function(x){matrix(x, 16, 22, byrow=TRUE)})
## naming rows and columns of plates
#Z = lapply(Z, function(x){rownames(x)=LETTERS[1:16]; colnames(x)=formatC(seq(1,22),flag=0,digits=1); return(x);})
## constructing plates of indicator variables (row-wise) and vectorizing (column-wise) each plate
#B = lapply(B, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))})
## constructing plates of well names (row-wise) and vectorizing (column-wise) each plate
#W = lapply(W, function(x){as.vector(matrix(x, 16, 22, byrow=TRUE))})
## Left plot
#plot(density(unlist(Z)[unlist(B)==0]), col="blue", ylab="Density", main="", xlim=range(unlist(Z)), xlab="Raw Value")
#lines(density(unlist(Z)[unlist(B)==1]), col="red")
#legend("topright", legend=c("Non-Hits", "Hits"), col=c("blue", "red"), lty=c(1,1))
## normalizing plates of raw values
#Z = lapply(Z, function(x){(x-mean(x))/sd(x)})
## naming indicator variables
#bn = names(B)
#B = lapply(1:length(B), function(x){names(B[[x]])=W[[x]]; return(B[[x]]);})
#names(B) = bn
## construct object for B-score and R-score methods
#Zmat = list(Z=Z, B=B)
## construct object for BHTS method
## vectorizing (column-wise) each plate of raw values and naming them with well names
#zn = names(Z)
#Z = lapply(1:length(Z), function(x){d=as.vector(Z[[x]]); names(d)=W[[x]]; return(d);})
#names(Z) = zn
#Z = list(Z=Z, B=B)
## Run BHTS
#system.time(b.est <- bhts(Z[["Z"]], iters=7000, H=10, K=10, a.alpha=10, b.alpha=5, a.tau=10, b.tau=5, s=1234, store=TRUE))
#hatpai = unlist(b.est[["hatpai"]])
#res = data.frame(IDmatch=names(hatpai), hatpai)
#Btab = data.frame(IDmatch=names(unlist(Z[["B"]])), hitind=unlist(Z[["B"]]))
#res = merge(res, Btab, by="IDmatch")
## Run B-score
#bs = unlist(lapply(Zmat[["Z"]], function(x){medpolish(x)[["residuals"]]/mad(x)}))
#summary(bs)
## Middle plot
#r = seq(-31, 9, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, bs, unlist(Zmat[["B"]]))}))
#summary(AUC)
#btmax = r[which.max(AUC)]
#plot(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="red", ylim=c(0.44, 0.56))
#abline(v=btmax, col="red", lty=2)
#axis(1)
#axis(1, at=btmax)
## Run R-score
#rs = unlist(lapply(Zmat[["Z"]], function(x){matrix(normR(as.vector(t(x)), 16, 22), 16, 22, byrow=TRUE)}))
#summary(rs)
#r = seq(-45, 29, 0.5)
#AUC = unlist(lapply(r, function(x){score(x, rs, unlist(Zmat[["B"]]))}))
#summary(AUC)
#rtmax = r[which.max(AUC)]
#lines(r, AUC, type="l", xlab="Threshold", ylab="AUC", lwd=2, xaxt="n", col="green")
#abline(v=rtmax, col="green", lty=2)
#axis(1, at=rtmax)
#legend("topright", legend=c("R-score", "B-score"), col=c("green", "red"), lty=c(1,1))
## Right plot
#rhitind = rep(0, length(rs))
#rhitind[rs>rtmax] = 1
#bhitind = rep(0, length(bs))
#bhitind[bs>btmax] = 1
#plot.roc(res[["hitind"]], res[["hatpai"]], col="blue")
#lines.roc(unlist(Zmat[["B"]]), bhitind, col="red")
#lines.roc(unlist(Zmat[["B"]]), rhitind, col="green")
#legend("bottomright", legend=c(paste("BHTS", " (AUC=", round(auc(res[["hitind"]], res[["hatpai"]]), 3), ")", sep=""), paste("R-score",
#" (AUC=", round(auc(unlist(Zmat[["B"]]), rhitind), 3), ")", sep=""), paste("B-score", " (AUC=", round(auc(unlist(Zmat[["B"]]), bhitind),
#3), ")", sep="")), col=c("blue", "green", "red"), lty=c(1,1,1))
|
#########################################
# 景気ウオッチャーで試す
library(lda)
library(reshape2)
library(ggplot2)
library(RMeCab)
library(RMySQL)
con<-dbConnect(dbDriver("MySQL"),dbname="watcher",host="zaaa16d.qr.com",user="root")
dbGetQuery(con,"set names utf8")
data.tmp<-dbSendQuery(con,"select * from now_description")
data.now<-fetch(data.tmp,n=-1)
dbDisconnect(con)
|
/senti/watcher_lda.R
|
no_license
|
oleglr/forecast
|
R
| false | false | 389 |
r
|
#########################################
# 景気ウオッチャーで試す
library(lda)
library(reshape2)
library(ggplot2)
library(RMeCab)
library(RMySQL)
con<-dbConnect(dbDriver("MySQL"),dbname="watcher",host="zaaa16d.qr.com",user="root")
dbGetQuery(con,"set names utf8")
data.tmp<-dbSendQuery(con,"select * from now_description")
data.now<-fetch(data.tmp,n=-1)
dbDisconnect(con)
|
library(faraway)
library(tidyverse)
library(KernSmooth)
# Fix exb data
exb <- as.tibble(exb)
## Loess
smr <- loess(waiting ~ eruptions, data=faithful)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Loess, span=0.75)") +
geom_line(aes(x=eruptions, y=fitted(smr)), col='blue')
smr <- loess(y ~ x, data=exa, span=0.22)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Loess, span=0.75)") +
geom_line(aes(x=x, y=fitted(smr)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
smr <- loess(y ~ x, data=exb, family='symmetric')
ggplot(as.data.frame(exb)) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Robust Loess, span=0.75)") +
geom_line(aes(x=x, y=fitted(smr)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
## geom_smooth (uses non-robust loess)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
geom_smooth(aes(x=eruptions,y=waiting), span=0.3)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='loess',
span=0.22)
ggplot(exb) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='loess')
# Smoothing splines
lambda <- 0.001
smr <- smooth.spline(faithful$eruptions, faithful$waiting, lambda=lambda)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle(paste("Old Faithful (Smoothing spline, lambda=",lambda, sep="")) +
geom_line(data=smr, aes(x=x, y=y), col='blue')
smr <- smooth.spline(faithful$eruptions, faithful$waiting, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue')
smr <- smooth.spline(exa$x,exa$y, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue') +
geom_line(aes(x=x,y=m), col='red')
smr <- smooth.spline(exb$x,exb$y, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(exb) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue') +
geom_line(aes(x=x,y=m), col='red')
## Regression splines
library(splines)
fit <- lm(waiting ~ ns(eruptions, df=6), faithful)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Natural splines, 6 df)") +
geom_line(aes(x=eruptions, y=fitted(fit)), col='blue')
fit <- lm(y ~ ns(x, knots=c(0.5, 0.65,0.75, 0.8,0.9)), exa)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Natural splines, 12 df)") +
geom_line(aes(x=x, y=fitted(fit)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
fit <- lm(y ~ ns(x, df=3), exb)
ggplot(as.data.frame(exb)) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Natural splines, 3 df)") +
geom_line(aes(x=x, y=fitted(fit)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='gam',
formula = y ~ s(x,k=12))
lomod <- loess(sr ~ pop15 + ddpi, data=savings)
xg <- seq(21,48,len=20)
yg <- seq(0,17,len=20)
zg <- expand.grid(pop15=xg,ddpi=yg)
par(mar=c(0,0,0,0))
persp(xg, yg, predict(lomod, zg), theta=-30,
ticktype="detailed", col=heat.colors(500),
xlab="pop15", ylab="ddpi", zlab="savings rate")
smod <- mgcv::gam(sr ~ s(pop15, ddpi), data=savings)
mgcv::vis.gam(smod, ticktype="detailed",theta=-30)
|
/Examples/2018-09-18.R
|
no_license
|
mnblanco/Forecasting
|
R
| false | false | 3,574 |
r
|
library(faraway)
library(tidyverse)
library(KernSmooth)
# Fix exb data
exb <- as.tibble(exb)
## Loess
smr <- loess(waiting ~ eruptions, data=faithful)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Loess, span=0.75)") +
geom_line(aes(x=eruptions, y=fitted(smr)), col='blue')
smr <- loess(y ~ x, data=exa, span=0.22)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Loess, span=0.75)") +
geom_line(aes(x=x, y=fitted(smr)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
smr <- loess(y ~ x, data=exb, family='symmetric')
ggplot(as.data.frame(exb)) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Robust Loess, span=0.75)") +
geom_line(aes(x=x, y=fitted(smr)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
## geom_smooth (uses non-robust loess)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
geom_smooth(aes(x=eruptions,y=waiting), span=0.3)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='loess',
span=0.22)
ggplot(exb) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='loess')
# Smoothing splines
lambda <- 0.001
smr <- smooth.spline(faithful$eruptions, faithful$waiting, lambda=lambda)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle(paste("Old Faithful (Smoothing spline, lambda=",lambda, sep="")) +
geom_line(data=smr, aes(x=x, y=y), col='blue')
smr <- smooth.spline(faithful$eruptions, faithful$waiting, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue')
smr <- smooth.spline(exa$x,exa$y, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue') +
geom_line(aes(x=x,y=m), col='red')
smr <- smooth.spline(exb$x,exb$y, cv=TRUE)
smr <- data.frame(x=smr$x,y=smr$y)
ggplot(exb) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Smoothing spline, lambda chosen by CV)") +
geom_line(data=smr, aes(x=x, y=y), col='blue') +
geom_line(aes(x=x,y=m), col='red')
## Regression splines
library(splines)
fit <- lm(waiting ~ ns(eruptions, df=6), faithful)
ggplot(faithful) +
geom_point(aes(x=eruptions,y=waiting)) +
ggtitle("Old Faithful (Natural splines, 6 df)") +
geom_line(aes(x=eruptions, y=fitted(fit)), col='blue')
fit <- lm(y ~ ns(x, knots=c(0.5, 0.65,0.75, 0.8,0.9)), exa)
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example A (Natural splines, 12 df)") +
geom_line(aes(x=x, y=fitted(fit)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
fit <- lm(y ~ ns(x, df=3), exb)
ggplot(as.data.frame(exb)) +
geom_point(aes(x=x,y=y)) +
ggtitle("Example B (Natural splines, 3 df)") +
geom_line(aes(x=x, y=fitted(fit)), col='blue') +
geom_line(aes(x=x,y=m), col='red')
ggplot(exa) +
geom_point(aes(x=x,y=y)) +
geom_smooth(aes(x=x,y=y), method='gam',
formula = y ~ s(x,k=12))
lomod <- loess(sr ~ pop15 + ddpi, data=savings)
xg <- seq(21,48,len=20)
yg <- seq(0,17,len=20)
zg <- expand.grid(pop15=xg,ddpi=yg)
par(mar=c(0,0,0,0))
persp(xg, yg, predict(lomod, zg), theta=-30,
ticktype="detailed", col=heat.colors(500),
xlab="pop15", ylab="ddpi", zlab="savings rate")
smod <- mgcv::gam(sr ~ s(pop15, ddpi), data=savings)
mgcv::vis.gam(smod, ticktype="detailed",theta=-30)
|
rm(list=ls())
#loading libraries
library(here)
library(reshape2)
library(dplyr)
library(corrplot)
library(countrycode)
# PHOENIX dataset uses three sources:
source1<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixFBIS_1995-2004.csv")
source2<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixNYT_1945-2005.csv")
source3<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixSWB_1979-2015.csv")
sources <-rbind(source1, source2, source3)
#international
# only GOV and MIL actors included
phoenix.data1 = sources %>%
filter(source_root != target_root) %>%
filter(source_agent=="GOV" | source_agent=="MIL") %>%
filter(source_root!="") %>%
filter(target_agent=="GOV" | target_agent=='MIL') %>%
filter(target_root!="") %>%
filter(is.na(year)==F) %>%
filter(year >=2001 & year <=2014) %>%
filter(source_root!="PSE" & source_root!="HKG" & # I exclude non-recognized states, such as Hong Kong, Palestine,
source_root!="NGO" & source_root!="IGO" & source_root!="MNC" &
source_root!="BMU" & source_root!="ABW" & source_root!="AIA" &
source_root!="COK" & source_root!="CYM") %>%
filter(target_root!="PSE" & target_root!="HKG" &
target_root!="NGO" & target_root!="IGO" & target_root!="MNC" &
target_root!="BMU" & target_root!="ABW" & target_root!="AIA" &
target_root!="COK" & target_root!="CYM") %>%
mutate(cow1 = countrycode(source_root, 'iso3c', 'cown')) %>% # I convert the names of the countries to COW code
mutate(cow1 = ifelse(source_root=='SRB', '345', cow1)) %>%
mutate(cow1 = ifelse(source_root=='TMP', '860', cow1)) %>%
mutate(cow1 = ifelse(source_root=='SUN', '365', cow1)) %>%
mutate(cow1 = ifelse(source_root=='KSV', '347', cow1)) %>%
mutate(cow2 = countrycode(target_root, 'iso3c', 'cown')) %>%
mutate(cow2 = ifelse(target_root=='SRB', '345', cow2)) %>%
mutate(cow2 = ifelse(target_root=='TMP', '860', cow2)) %>%
mutate(cow2 = ifelse(target_root=='SUN', '365', cow2)) %>%
mutate(cow2 = ifelse(target_root=='KSV', '347', cow2)) %>%
mutate(ccode = cow1) %>%
mutate(vcp = ifelse(quad_class==1, 1, 0)) %>% # verbal cooperation
mutate(mcp = ifelse(quad_class==2, 1, 0)) %>% # material cooperation
mutate(vcf = ifelse(quad_class==3, 1, 0)) %>% # verbal conflict
mutate(mcf = ifelse(quad_class==4, 1, 0)) %>% # material conflict
select(ccode, year, month, cow1, cow2, vcp, mcp, vcf, mcf)
phoenix.data2 = phoenix.data1 %>%
mutate(ccode = cow2)
pho = rbind(phoenix.data1, phoenix.data2)
#Aggregate by country-month
pho.data = pho %>%
select(ccode, year, month, vcp, mcp, vcf, mcf) %>%
melt(id.vars = c('ccode','year', 'month')) %>%
dcast(ccode+year+month~variable, fun.aggregate=sum)
names(pho.data)<-c('ccode', 'year', 'month','vcp', 'mcp', 'vcf', 'mcf')
write.csv(pho.data, "pho_international.csv")
# Select domestic crises based on gov/mil vs rebels
phoenix.data3 = sources %>%
filter(source_root == target_root) %>%
filter(source_agent=="GOV" | source_agent=="MIL" | source_agent=="REB") %>%
filter(source_root!="") %>%
filter(target_agent=="GOV" | target_agent=='MIL' | target_agent=="REB") %>%
filter(target_root!="") %>%
filter(is.na(year)==F) %>%
filter(year >=2001 & year <=2014) %>%
filter(source_root!="PSE" & source_root!="HKG" & # exclude non-recognized states
source_root!="NGO" & source_root!="IGO" & source_root!="MNC" &
source_root!="BMU" & source_root!="ABW" & source_root!="AIA" &
source_root!="COK" & source_root!="CYM") %>%
filter(target_root!="PSE" & target_root!="HKG" &
target_root!="NGO" & target_root!="IGO" & target_root!="MNC" &
target_root!="BMU" & target_root!="ABW" & target_root!="AIA" &
target_root!="COK" & target_root!="CYM") %>%
mutate(cow1 = countrycode(source_root, 'iso3c', 'cown')) %>% # convert to cow code
mutate(cow1 = ifelse(source_root=='SRB', '345', cow1)) %>%
mutate(cow1 = ifelse(source_root=='TMP', '860', cow1)) %>%
mutate(cow1 = ifelse(source_root=='SUN', '365', cow1)) %>%
mutate(cow1 = ifelse(source_root=='KSV', '347', cow1)) %>%
mutate(cow2 = countrycode(target_root, 'iso3c', 'cown')) %>%
mutate(cow2 = ifelse(target_root=='SRB', '345', cow2)) %>%
mutate(cow2 = ifelse(target_root=='TMP', '860', cow2)) %>%
mutate(cow2 = ifelse(target_root=='SUN', '365', cow2)) %>%
mutate(cow2 = ifelse(target_root=='KSV', '347', cow2)) %>%
mutate(ccode = cow1) %>%
mutate(vcp = ifelse(quad_class==1, 1, 0)) %>% # verbal cooperation
mutate(mcp = ifelse(quad_class==2, 1, 0)) %>% # material cooperation
mutate(vcf = ifelse(quad_class==3, 1, 0)) %>% # verbal conflict
mutate(mcf = ifelse(quad_class==4, 1, 0)) %>% # material conflict
select(ccode, year, month, vcp, mcp, vcf, mcf)
# Aggregate by country-month
pho.data3 = phoenix.data3 %>%
melt(id.vars = c('ccode','year', 'month')) %>%
dcast(ccode+year+month~variable, fun.aggregate=sum)
write.csv(pho.data3, "pho_domestic.csv")
|
/pho_processing.R
|
no_license
|
ZTerechshenko/Forecasting
|
R
| false | false | 5,152 |
r
|
rm(list=ls())
#loading libraries
library(here)
library(reshape2)
library(dplyr)
library(corrplot)
library(countrycode)
# PHOENIX dataset uses three sources:
source1<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixFBIS_1995-2004.csv")
source2<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixNYT_1945-2005.csv")
source3<-read.csv("/Users/zhanna.terechshenko/MA/DATA/Phoenix/ClineCenterHistoricalPhoenixEventData/PhoenixSWB_1979-2015.csv")
sources <-rbind(source1, source2, source3)
#international
# only GOV and MIL actors included
phoenix.data1 = sources %>%
filter(source_root != target_root) %>%
filter(source_agent=="GOV" | source_agent=="MIL") %>%
filter(source_root!="") %>%
filter(target_agent=="GOV" | target_agent=='MIL') %>%
filter(target_root!="") %>%
filter(is.na(year)==F) %>%
filter(year >=2001 & year <=2014) %>%
filter(source_root!="PSE" & source_root!="HKG" & # I exclude non-recognized states, such as Hong Kong, Palestine,
source_root!="NGO" & source_root!="IGO" & source_root!="MNC" &
source_root!="BMU" & source_root!="ABW" & source_root!="AIA" &
source_root!="COK" & source_root!="CYM") %>%
filter(target_root!="PSE" & target_root!="HKG" &
target_root!="NGO" & target_root!="IGO" & target_root!="MNC" &
target_root!="BMU" & target_root!="ABW" & target_root!="AIA" &
target_root!="COK" & target_root!="CYM") %>%
mutate(cow1 = countrycode(source_root, 'iso3c', 'cown')) %>% # I convert the names of the countries to COW code
mutate(cow1 = ifelse(source_root=='SRB', '345', cow1)) %>%
mutate(cow1 = ifelse(source_root=='TMP', '860', cow1)) %>%
mutate(cow1 = ifelse(source_root=='SUN', '365', cow1)) %>%
mutate(cow1 = ifelse(source_root=='KSV', '347', cow1)) %>%
mutate(cow2 = countrycode(target_root, 'iso3c', 'cown')) %>%
mutate(cow2 = ifelse(target_root=='SRB', '345', cow2)) %>%
mutate(cow2 = ifelse(target_root=='TMP', '860', cow2)) %>%
mutate(cow2 = ifelse(target_root=='SUN', '365', cow2)) %>%
mutate(cow2 = ifelse(target_root=='KSV', '347', cow2)) %>%
mutate(ccode = cow1) %>%
mutate(vcp = ifelse(quad_class==1, 1, 0)) %>% # verbal cooperation
mutate(mcp = ifelse(quad_class==2, 1, 0)) %>% # material cooperation
mutate(vcf = ifelse(quad_class==3, 1, 0)) %>% # verbal conflict
mutate(mcf = ifelse(quad_class==4, 1, 0)) %>% # material conflict
select(ccode, year, month, cow1, cow2, vcp, mcp, vcf, mcf)
phoenix.data2 = phoenix.data1 %>%
mutate(ccode = cow2)
pho = rbind(phoenix.data1, phoenix.data2)
#Aggregate by country-month
pho.data = pho %>%
select(ccode, year, month, vcp, mcp, vcf, mcf) %>%
melt(id.vars = c('ccode','year', 'month')) %>%
dcast(ccode+year+month~variable, fun.aggregate=sum)
names(pho.data)<-c('ccode', 'year', 'month','vcp', 'mcp', 'vcf', 'mcf')
write.csv(pho.data, "pho_international.csv")
# Select domestic crises based on gov/mil vs rebels
phoenix.data3 = sources %>%
filter(source_root == target_root) %>%
filter(source_agent=="GOV" | source_agent=="MIL" | source_agent=="REB") %>%
filter(source_root!="") %>%
filter(target_agent=="GOV" | target_agent=='MIL' | target_agent=="REB") %>%
filter(target_root!="") %>%
filter(is.na(year)==F) %>%
filter(year >=2001 & year <=2014) %>%
filter(source_root!="PSE" & source_root!="HKG" & # exclude non-recognized states
source_root!="NGO" & source_root!="IGO" & source_root!="MNC" &
source_root!="BMU" & source_root!="ABW" & source_root!="AIA" &
source_root!="COK" & source_root!="CYM") %>%
filter(target_root!="PSE" & target_root!="HKG" &
target_root!="NGO" & target_root!="IGO" & target_root!="MNC" &
target_root!="BMU" & target_root!="ABW" & target_root!="AIA" &
target_root!="COK" & target_root!="CYM") %>%
mutate(cow1 = countrycode(source_root, 'iso3c', 'cown')) %>% # convert to cow code
mutate(cow1 = ifelse(source_root=='SRB', '345', cow1)) %>%
mutate(cow1 = ifelse(source_root=='TMP', '860', cow1)) %>%
mutate(cow1 = ifelse(source_root=='SUN', '365', cow1)) %>%
mutate(cow1 = ifelse(source_root=='KSV', '347', cow1)) %>%
mutate(cow2 = countrycode(target_root, 'iso3c', 'cown')) %>%
mutate(cow2 = ifelse(target_root=='SRB', '345', cow2)) %>%
mutate(cow2 = ifelse(target_root=='TMP', '860', cow2)) %>%
mutate(cow2 = ifelse(target_root=='SUN', '365', cow2)) %>%
mutate(cow2 = ifelse(target_root=='KSV', '347', cow2)) %>%
mutate(ccode = cow1) %>%
mutate(vcp = ifelse(quad_class==1, 1, 0)) %>% # verbal cooperation
mutate(mcp = ifelse(quad_class==2, 1, 0)) %>% # material cooperation
mutate(vcf = ifelse(quad_class==3, 1, 0)) %>% # verbal conflict
mutate(mcf = ifelse(quad_class==4, 1, 0)) %>% # material conflict
select(ccode, year, month, vcp, mcp, vcf, mcf)
# Aggregate by country-month
pho.data3 = phoenix.data3 %>%
melt(id.vars = c('ccode','year', 'month')) %>%
dcast(ccode+year+month~variable, fun.aggregate=sum)
write.csv(pho.data3, "pho_domestic.csv")
|
# set the type to fit
estimator <- "Muthen"
# set the working director
try({
baseDir <- "/nas/longleaf/home/mgiordan/forumPres"
setwd(baseDir)
})
try({
baseDir <- "C:/users/mgiordan/git/mlmcfasimulation/presentationSim"
setwd(baseDir)
})
# reading in the parameters of the model
simParams <- readRDS("SimParams.rds")
designMatrix <- simParams$designMatrix
iterationsPer <- simParams$iterationsPer
wModelTrue <- simParams$wModelTrue
wModelMis <- simParams$wModelMis
wModelMis1 <- simParams$wModelMis1
wModelMis2 <- simParams$wModelMis2
wModelMis3 <- simParams$wModelMis3
bModelTrue <- simParams$bModelTrue
#----------------------------------------------------------------------------
# Should not need to edit below this line
#----------------------------------------------------------------------------
# load relevant packages
try({
library("lavaan", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("MIIVsem", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("nlme", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
})
try({
library("lavaan")
library("MIIVsem")
library("nlme")
})
# source relevant functions
try({
source("SimulationFunctions.R") # for longleaf
})
try({
source("../SimulationFunctions.R") # for my computer
})
# subset just the estimator we want
designMatrix <- designMatrix[which(designMatrix$estimators==estimator),]
for (i in 9001:9200) {
print(i)
# if the current row is the FIML estimator move to next bc fiml is all Mplus
if (designMatrix$estimators[[i]]=="FIML") {
next
}
# set the model spec
if (designMatrix$modelSpec[[i]]=="trueModel") {
wModel <- wModelTrue
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec") {
wModel <- wModelMis
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec1") {
wModel <- wModelMis1
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec2") {
wModel <- wModelMis2
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec3") {
wModel <- wModelMis3
bModel <- bModelTrue
}
# read in data
df <- read.table(designMatrix$dfName[[i]])
names(df) <- c(paste0("y", 1:6), "cluster")
df$id <- 1:nrow(df)
fit <- tryCatch({
mlcfaMIIV(withinModel = wModel,
betweenModel = bModel,
estimator = designMatrix$estimators[[i]],
allIndicators = paste0("y", 1:6),
l1Var = "id",
l2Var = "cluster",
df = df)
}, warning = function(e) {
message(e)
return("model did not fit properly")
}, error = function(e) {
message(e)
return("model did not fit properly")
})
#save as RDS
saveRDS(fit, file = designMatrix$rdsName[[i]])
}
|
/presentationSim/ZsimRun_muthen46.R
|
no_license
|
mlgiordano1/mlmCFASimulation
|
R
| false | false | 2,841 |
r
|
# set the type to fit
estimator <- "Muthen"
# set the working director
try({
baseDir <- "/nas/longleaf/home/mgiordan/forumPres"
setwd(baseDir)
})
try({
baseDir <- "C:/users/mgiordan/git/mlmcfasimulation/presentationSim"
setwd(baseDir)
})
# reading in the parameters of the model
simParams <- readRDS("SimParams.rds")
designMatrix <- simParams$designMatrix
iterationsPer <- simParams$iterationsPer
wModelTrue <- simParams$wModelTrue
wModelMis <- simParams$wModelMis
wModelMis1 <- simParams$wModelMis1
wModelMis2 <- simParams$wModelMis2
wModelMis3 <- simParams$wModelMis3
bModelTrue <- simParams$bModelTrue
#----------------------------------------------------------------------------
# Should not need to edit below this line
#----------------------------------------------------------------------------
# load relevant packages
try({
library("lavaan", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("MIIVsem", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
library("nlme", lib.loc="/nas/longleaf/home/mgiordan/Rlibs")
})
try({
library("lavaan")
library("MIIVsem")
library("nlme")
})
# source relevant functions
try({
source("SimulationFunctions.R") # for longleaf
})
try({
source("../SimulationFunctions.R") # for my computer
})
# subset just the estimator we want
designMatrix <- designMatrix[which(designMatrix$estimators==estimator),]
for (i in 9001:9200) {
print(i)
# if the current row is the FIML estimator move to next bc fiml is all Mplus
if (designMatrix$estimators[[i]]=="FIML") {
next
}
# set the model spec
if (designMatrix$modelSpec[[i]]=="trueModel") {
wModel <- wModelTrue
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec") {
wModel <- wModelMis
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec1") {
wModel <- wModelMis1
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec2") {
wModel <- wModelMis2
bModel <- bModelTrue
}
if (designMatrix$modelSpec[[i]]=="misSpec3") {
wModel <- wModelMis3
bModel <- bModelTrue
}
# read in data
df <- read.table(designMatrix$dfName[[i]])
names(df) <- c(paste0("y", 1:6), "cluster")
df$id <- 1:nrow(df)
fit <- tryCatch({
mlcfaMIIV(withinModel = wModel,
betweenModel = bModel,
estimator = designMatrix$estimators[[i]],
allIndicators = paste0("y", 1:6),
l1Var = "id",
l2Var = "cluster",
df = df)
}, warning = function(e) {
message(e)
return("model did not fit properly")
}, error = function(e) {
message(e)
return("model did not fit properly")
})
#save as RDS
saveRDS(fit, file = designMatrix$rdsName[[i]])
}
|
## This set of functions calculates the inverse of a matrix and
## saves the result so it does not need to be recalculated.
## Takes a matrix 'x'
## Returns a list of 4 functions
makeCacheMatrix <- function(x = matrix()) {
##Initialize inverse as null
inv <- NULL
## Replaces matrix 'x' with new matrix 'y' and delete
## any saved inverse
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x ## Returns the matrix 'x'
setSolve <- function(solve) inv <<- solve ## Saves the inverse
getSolve <- function() inv ## Returns the saved inverse
## Return a list of the four functions
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## Takes a matrix 'x' created with the function makeCacheMatrix
## Returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
## Pull the matrix's cache so we can find out if it has
## already been calculated.
inv <- x$getSolve()
## If the inverse of the matrix has already been calculated,
## then return the cached value
if(!is.null(inv)) {
message("Getting cached inverse.")
return(inv)
}
data <- x$get()
## Inverse is not cached, so calculate the inverse
inv <- solve(data, ...)
## Save the inverse with setSolve() so we do not have to
## calculate it again
x$setSolve(inv)
inv
}
|
/cachematrix.R
|
no_license
|
Tyrannactus/ProgrammingAssignment2
|
R
| false | false | 1,452 |
r
|
## This set of functions calculates the inverse of a matrix and
## saves the result so it does not need to be recalculated.
## Takes a matrix 'x'
## Returns a list of 4 functions
makeCacheMatrix <- function(x = matrix()) {
##Initialize inverse as null
inv <- NULL
## Replaces matrix 'x' with new matrix 'y' and delete
## any saved inverse
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x ## Returns the matrix 'x'
setSolve <- function(solve) inv <<- solve ## Saves the inverse
getSolve <- function() inv ## Returns the saved inverse
## Return a list of the four functions
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## Takes a matrix 'x' created with the function makeCacheMatrix
## Returns a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
## Pull the matrix's cache so we can find out if it has
## already been calculated.
inv <- x$getSolve()
## If the inverse of the matrix has already been calculated,
## then return the cached value
if(!is.null(inv)) {
message("Getting cached inverse.")
return(inv)
}
data <- x$get()
## Inverse is not cached, so calculate the inverse
inv <- solve(data, ...)
## Save the inverse with setSolve() so we do not have to
## calculate it again
x$setSolve(inv)
inv
}
|
#' @title Tuning Functional Neural Networks
#'
#' @description
#' A convenience function for the user that implements a simple grid search for the purpose of tuning. For each combination
#' in the grid, a cross-validated error is calculated. The best combination is returned along with additional information.
#' This function only works for scalar responses.
#'
#' @return The following are returned:
#'
#' `Parameters` -- The final list of hyperparameter chosen by the tuning process.
#'
#' `All_Information` -- A list object containing the errors for every combination in the grid. Each element of the list
#' corresponds to a different choice of number of hidden layers.
#'
#' `Best_Per_Layer` -- An object that returns the best parameter combination for each choice of hidden layers.
#'
#' `Grid_List` -- An object containing information about all combinations tried by the tuning process.
#'
#' @details No additional details for now.
#'
#' @param tune_list This is a list object containing the values from which to develop the grid. For each of the hyperparameters
#' that can be tuned for (`num_hidden_layers`, `neurons`, `epochs`, `val_split`, `patience`, `learn_rate`, `num_basis`,
#' `activation_choice`), the user inputs a set of values to try. Note that the combinations are found based on the number of
#' hidden layers. For example, if `num_hidden_layers` = 3 and `neurons` = c(8, 16), then the combinations will begin as
#' c(8, 8, 8), c(8, 8, 16), ..., c(16, 16, 16). Example provided below.
#'
#' @param resp For scalar responses, this is a vector of the observed dependent variable. For functional responses,
#' this is a matrix where each row contains the basis coefficients defining the functional response (for each observation).
#'
#' @param func_cov The form of this depends on whether the `raw_data` argument is true or not. If true, then this is
#' a list of k matrices. The dimensionality of the matrices should be the same (n x p) where n is the number of
#' observations and p is the number of longitudinal observations. If `raw_data` is false, then the input should be a tensor
#' with dimensionality b x n x k where b is the number of basis functions used to define the functional covariates, n is
#' the number of observations, and k is the number of functional covariates.
#'
#' @param scalar_cov A matrix contained the multivariate information associated with the data set. This is all of your
#' non-longitudinal data.
#'
#' @param basis_choice A vector of size k (the number of functional covariates) with either "fourier" or "bspline" as the inputs.
#' This is the choice for the basis functions used for the functional weight expansion. If you only specify one, with k > 1,
#' then the argument will repeat that choice for all k functional covariates.
#'
#' @param domain_range List of size k. Each element of the list is a 2-dimensional vector containing the upper and lower
#' bounds of the k-th functional weight.
#'
#' @param batch_size Size of the batch for stochastic gradient descent.
#'
#' @param decay_rate A modification to the learning rate that decreases the learning rate as more and more learning
#' iterations are completed.
#'
#' @param nfolds The number of folds to be used in the cross-validation process.
#'
#' @param cores For the purpose of parallelization.
#'
#' @param raw_data If TRUE, then user does not need to create functional observations beforehand. The function will
#' internally take care of that pre-processing.
#'
#' @examples
#' \donttest{
#' # libraries
#' library(fda)
#'
#' # Loading data
#' data("daily")
#'
#' # Obtaining response
#' total_prec = apply(daily$precav, 2, mean)
#'
#' # Creating functional data
#' temp_data = array(dim = c(65, 35, 1))
#' tempbasis65 = create.fourier.basis(c(0,365), 65)
#' timepts = seq(1, 365, 1)
#' temp_fd = Data2fd(timepts, daily$tempav, tempbasis65)
#'
#' # Data set up
#' temp_data[,,1] = temp_fd$coefs
#'
#' # Creating grid
#' tune_list_weather = list(num_hidden_layers = c(2),
#' neurons = c(8, 16),
#' epochs = c(250),
#' val_split = c(0.2),
#' patience = c(15),
#' learn_rate = c(0.01, 0.1),
#' num_basis = c(7),
#' activation_choice = c("relu", "sigmoid"))
#'
#' # Running Tuning
#' weather_tuned = fnn.tune(tune_list_weather,
#' total_prec,
#' temp_data,
#' basis_choice = c("fourier"),
#' domain_range = list(c(1, 24)),
#' nfolds = 2)
#'
#' # Looking at results
#' weather_tuned
#' }
#'
#' @export
# @import keras tensorflow fda.usc fda ggplot2 ggpubr caret pbapply reshape2 flux Matrix doParallel
#returns product of two numbers, as a trivial example
fnn.tune = function(tune_list,
resp,
func_cov,
scalar_cov = NULL,
basis_choice,
domain_range,
batch_size = 32,
decay_rate = 0,
nfolds = 5,
cores = 4,
raw_data = FALSE){
# Parallel apply set up
#plan(multiprocess, workers = cores)
#### Output size
if(is.vector(resp) == TRUE){
output_size = 1
} else {
output_size = ncol(resp)
}
if(raw_data == TRUE){
dim_check = length(func_cov)
} else {
dim_check = dim(func_cov)[3]
}
#### Creating functional observations in the case of raw data
if(raw_data == TRUE){
# Taking in data
dat = func_cov
# Setting up array
temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat)))
for (t in 1:length(dat)) {
# Getting appropriate obs
curr_func = dat[[t]]
# Getting current domain
curr_domain = domain_range[[1]] # BE CAREFUL HERE - ALL DOMAINS NEED TO BE THE SAME IN THIS CASE
# Creating basis (using bspline)
basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]),
nbasis = 31,
norder = 4)
# Time points
time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func))
# Making functional observation
temp_fd = Data2fd(time_points, t(curr_func), basis_setup)
# Storing data
temp_tensor[,,t] = temp_fd$coefs
}
# Saving as appropriate names
func_cov = temp_tensor
}
if(output_size == 1){
# Setting up function
tune_func = function(x,
nfolds,
resp,
func_cov,
scalar_cov,
basis_choice,
domain_range,
batch_size,
decay_rate,
raw_data){
# Setting seed
use_session_with_seed(
1,
disable_gpu = FALSE,
disable_parallel_cpu = FALSE,
quiet = TRUE
)
# Clearing irrelevant information
colnames(x) <- NULL
rownames(x) <- NULL
# Running model
model_results = fnn.cv(nfolds,
resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
domain_range = domain_range,
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
loss_choice = "mse",
metric_choice = list("mean_squared_error"),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
early_stopping = TRUE,
print_info = FALSE,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Putting together
list_returned <- list(MSPE = model_results$MSPE$Overall_MSPE,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])))
# Clearing backend
K <- backend()
K$clear_session()
# Returning
return(list_returned)
}
# Saving MSPEs
Errors = list()
All_Errors = list()
Grid_List = list()
# Setting up tuning parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
# Current layer number
current_layer = tune_list$num_hidden_layers[i]
# Creating data frame of list
df = expand.grid(rep(list(tune_list$neurons), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
df2 = expand.grid(rep(list(tune_list$num_basis), length(basis_choice)), stringsAsFactors = FALSE)
df3 = expand.grid(rep(list(tune_list$activation_choice), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
colnames(df2)[length(basis_choice)] <- "Var2.y"
colnames(df3)[i] <- "Var2.z"
# Getting grid
pre_grid = expand.grid(df$Var1,
Var2.y = df2$Var2.y,
Var2.z = df3$Var2.z,
tune_list$epochs,
tune_list$val_split,
tune_list$patience,
tune_list$learn_rate)
# Merging
combined <- unique(merge(df, pre_grid, by = "Var1"))
combined2 <- unique(merge(df2, combined, by = "Var2.y"))
final_grid <- suppressWarnings(unique(merge(df3, combined2, by = "Var2.z")))
# Saving grid
Grid_List[[i]] = final_grid
# Now, we can pass on the combinations to the model
results = pbapply(final_grid, 1, tune_func,
nfolds = nfolds,
resp = resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
domain_range = domain_range,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Initializing
MSPE_vals = c()
# Collecting results
for (u in 1:length(results)) {
MSPE_vals[u] <- as.vector(results[[u]][1])
}
# All Errors
All_Errors[[i]] = results
# Getting best
Errors[[i]] = results[[which.min(do.call(c, MSPE_vals))]]
# Printing where we are at
cat("\n")
message(paste0("Done tuning for: ", current_layer, " hidden layers."))
}
# Initializing
MSPE_after = c()
# Getting best set of parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
MSPE_after[i] = Errors[[i]]$MSPE
}
# Selecting minimum
best = which.min(MSPE_after)
# Returning best set of parameters
return(list(Parameters = Errors[[best]],
All_Information = All_Errors,
Best_Per_Layer = Errors,
Grid_List = Grid_List))
} else {
stop("Tuning isn't available yet for functional responses")
}
}
|
/R/fnn.tune.R
|
no_license
|
b-thi/FuncNN
|
R
| false | false | 12,958 |
r
|
#' @title Tuning Functional Neural Networks
#'
#' @description
#' A convenience function for the user that implements a simple grid search for the purpose of tuning. For each combination
#' in the grid, a cross-validated error is calculated. The best combination is returned along with additional information.
#' This function only works for scalar responses.
#'
#' @return The following are returned:
#'
#' `Parameters` -- The final list of hyperparameter chosen by the tuning process.
#'
#' `All_Information` -- A list object containing the errors for every combination in the grid. Each element of the list
#' corresponds to a different choice of number of hidden layers.
#'
#' `Best_Per_Layer` -- An object that returns the best parameter combination for each choice of hidden layers.
#'
#' `Grid_List` -- An object containing information about all combinations tried by the tuning process.
#'
#' @details No additional details for now.
#'
#' @param tune_list This is a list object containing the values from which to develop the grid. For each of the hyperparameters
#' that can be tuned for (`num_hidden_layers`, `neurons`, `epochs`, `val_split`, `patience`, `learn_rate`, `num_basis`,
#' `activation_choice`), the user inputs a set of values to try. Note that the combinations are found based on the number of
#' hidden layers. For example, if `num_hidden_layers` = 3 and `neurons` = c(8, 16), then the combinations will begin as
#' c(8, 8, 8), c(8, 8, 16), ..., c(16, 16, 16). Example provided below.
#'
#' @param resp For scalar responses, this is a vector of the observed dependent variable. For functional responses,
#' this is a matrix where each row contains the basis coefficients defining the functional response (for each observation).
#'
#' @param func_cov The form of this depends on whether the `raw_data` argument is true or not. If true, then this is
#' a list of k matrices. The dimensionality of the matrices should be the same (n x p) where n is the number of
#' observations and p is the number of longitudinal observations. If `raw_data` is false, then the input should be a tensor
#' with dimensionality b x n x k where b is the number of basis functions used to define the functional covariates, n is
#' the number of observations, and k is the number of functional covariates.
#'
#' @param scalar_cov A matrix contained the multivariate information associated with the data set. This is all of your
#' non-longitudinal data.
#'
#' @param basis_choice A vector of size k (the number of functional covariates) with either "fourier" or "bspline" as the inputs.
#' This is the choice for the basis functions used for the functional weight expansion. If you only specify one, with k > 1,
#' then the argument will repeat that choice for all k functional covariates.
#'
#' @param domain_range List of size k. Each element of the list is a 2-dimensional vector containing the upper and lower
#' bounds of the k-th functional weight.
#'
#' @param batch_size Size of the batch for stochastic gradient descent.
#'
#' @param decay_rate A modification to the learning rate that decreases the learning rate as more and more learning
#' iterations are completed.
#'
#' @param nfolds The number of folds to be used in the cross-validation process.
#'
#' @param cores For the purpose of parallelization.
#'
#' @param raw_data If TRUE, then user does not need to create functional observations beforehand. The function will
#' internally take care of that pre-processing.
#'
#' @examples
#' \donttest{
#' # libraries
#' library(fda)
#'
#' # Loading data
#' data("daily")
#'
#' # Obtaining response
#' total_prec = apply(daily$precav, 2, mean)
#'
#' # Creating functional data
#' temp_data = array(dim = c(65, 35, 1))
#' tempbasis65 = create.fourier.basis(c(0,365), 65)
#' timepts = seq(1, 365, 1)
#' temp_fd = Data2fd(timepts, daily$tempav, tempbasis65)
#'
#' # Data set up
#' temp_data[,,1] = temp_fd$coefs
#'
#' # Creating grid
#' tune_list_weather = list(num_hidden_layers = c(2),
#' neurons = c(8, 16),
#' epochs = c(250),
#' val_split = c(0.2),
#' patience = c(15),
#' learn_rate = c(0.01, 0.1),
#' num_basis = c(7),
#' activation_choice = c("relu", "sigmoid"))
#'
#' # Running Tuning
#' weather_tuned = fnn.tune(tune_list_weather,
#' total_prec,
#' temp_data,
#' basis_choice = c("fourier"),
#' domain_range = list(c(1, 24)),
#' nfolds = 2)
#'
#' # Looking at results
#' weather_tuned
#' }
#'
#' @export
# @import keras tensorflow fda.usc fda ggplot2 ggpubr caret pbapply reshape2 flux Matrix doParallel
#returns product of two numbers, as a trivial example
fnn.tune = function(tune_list,
resp,
func_cov,
scalar_cov = NULL,
basis_choice,
domain_range,
batch_size = 32,
decay_rate = 0,
nfolds = 5,
cores = 4,
raw_data = FALSE){
# Parallel apply set up
#plan(multiprocess, workers = cores)
#### Output size
if(is.vector(resp) == TRUE){
output_size = 1
} else {
output_size = ncol(resp)
}
if(raw_data == TRUE){
dim_check = length(func_cov)
} else {
dim_check = dim(func_cov)[3]
}
#### Creating functional observations in the case of raw data
if(raw_data == TRUE){
# Taking in data
dat = func_cov
# Setting up array
temp_tensor = array(dim = c(31, nrow(dat[[1]]), length(dat)))
for (t in 1:length(dat)) {
# Getting appropriate obs
curr_func = dat[[t]]
# Getting current domain
curr_domain = domain_range[[1]] # BE CAREFUL HERE - ALL DOMAINS NEED TO BE THE SAME IN THIS CASE
# Creating basis (using bspline)
basis_setup = create.bspline.basis(rangeval = c(curr_domain[1], curr_domain[2]),
nbasis = 31,
norder = 4)
# Time points
time_points = seq(curr_domain[1], curr_domain[2], length.out = ncol(curr_func))
# Making functional observation
temp_fd = Data2fd(time_points, t(curr_func), basis_setup)
# Storing data
temp_tensor[,,t] = temp_fd$coefs
}
# Saving as appropriate names
func_cov = temp_tensor
}
if(output_size == 1){
# Setting up function
tune_func = function(x,
nfolds,
resp,
func_cov,
scalar_cov,
basis_choice,
domain_range,
batch_size,
decay_rate,
raw_data){
# Setting seed
use_session_with_seed(
1,
disable_gpu = FALSE,
disable_parallel_cpu = FALSE,
quiet = TRUE
)
# Clearing irrelevant information
colnames(x) <- NULL
rownames(x) <- NULL
# Running model
model_results = fnn.cv(nfolds,
resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
domain_range = domain_range,
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
loss_choice = "mse",
metric_choice = list("mean_squared_error"),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
early_stopping = TRUE,
print_info = FALSE,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Putting together
list_returned <- list(MSPE = model_results$MSPE$Overall_MSPE,
num_basis = as.numeric(as.character((x[(current_layer + 1):(length(basis_choice) + current_layer)]))),
hidden_layers = current_layer,
neurons_per_layer = as.numeric(as.character(x[(length(basis_choice) + current_layer + 1):((length(basis_choice) + current_layer) + current_layer)])),
activations_in_layers = as.character(x[1:current_layer]),
epochs = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 1])),
val_split = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 2])),
patience_param = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 3])),
learn_rate = as.numeric(as.character(x[((length(basis_choice) + current_layer) + current_layer) + 4])))
# Clearing backend
K <- backend()
K$clear_session()
# Returning
return(list_returned)
}
# Saving MSPEs
Errors = list()
All_Errors = list()
Grid_List = list()
# Setting up tuning parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
# Current layer number
current_layer = tune_list$num_hidden_layers[i]
# Creating data frame of list
df = expand.grid(rep(list(tune_list$neurons), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
df2 = expand.grid(rep(list(tune_list$num_basis), length(basis_choice)), stringsAsFactors = FALSE)
df3 = expand.grid(rep(list(tune_list$activation_choice), tune_list$num_hidden_layers[i]), stringsAsFactors = FALSE)
colnames(df2)[length(basis_choice)] <- "Var2.y"
colnames(df3)[i] <- "Var2.z"
# Getting grid
pre_grid = expand.grid(df$Var1,
Var2.y = df2$Var2.y,
Var2.z = df3$Var2.z,
tune_list$epochs,
tune_list$val_split,
tune_list$patience,
tune_list$learn_rate)
# Merging
combined <- unique(merge(df, pre_grid, by = "Var1"))
combined2 <- unique(merge(df2, combined, by = "Var2.y"))
final_grid <- suppressWarnings(unique(merge(df3, combined2, by = "Var2.z")))
# Saving grid
Grid_List[[i]] = final_grid
# Now, we can pass on the combinations to the model
results = pbapply(final_grid, 1, tune_func,
nfolds = nfolds,
resp = resp,
func_cov = func_cov,
scalar_cov = scalar_cov,
basis_choice = basis_choice,
domain_range = domain_range,
batch_size = batch_size,
decay_rate = decay_rate,
raw_data = FALSE)
# Initializing
MSPE_vals = c()
# Collecting results
for (u in 1:length(results)) {
MSPE_vals[u] <- as.vector(results[[u]][1])
}
# All Errors
All_Errors[[i]] = results
# Getting best
Errors[[i]] = results[[which.min(do.call(c, MSPE_vals))]]
# Printing where we are at
cat("\n")
message(paste0("Done tuning for: ", current_layer, " hidden layers."))
}
# Initializing
MSPE_after = c()
# Getting best set of parameters
for (i in 1:length(tune_list$num_hidden_layers)) {
MSPE_after[i] = Errors[[i]]$MSPE
}
# Selecting minimum
best = which.min(MSPE_after)
# Returning best set of parameters
return(list(Parameters = Errors[[best]],
All_Information = All_Errors,
Best_Per_Layer = Errors,
Grid_List = Grid_List))
} else {
stop("Tuning isn't available yet for functional responses")
}
}
|
# Test case 95
Input <- matrix(c(1,2,
1,2,
1,2), byrow = TRUE, nrow = 3);
Output <- matrix(c(4,4,
3,4,
3,4), byrow = TRUE, nrow = 3);
Link <- matrix(c(2,
2,
2), byrow = TRUE, nrow = 3);
K = 2; # 3 divisions
N = 3; # Amount of DMUs
sum_m = 2; # Amount of inputs
sum_r = 2; # Amount of outputs
sum_l = 1; # Amount of Link variables
# Distinguish the Amount vector:
Amount = matrix(c(1,1,1,1,1), byrow=TRUE, nrow=1);
Amount_Input = c(1,1);
Amount_Output = c(1,1);
Amount_Link = c(1);
weights = matrix(c(0.5,0.5), byrow=TRUE, nrow=1);
direction = "non";
link_con = 1; # fix
return_to_scale = "CRS" ;
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 95",{
# Slack_transformation:
weightsNSBM <- matrix(c( 1,2,2,2,2,2,2,3,3,4,4,
2,2,2,2,2,2,2,3,3,4,4,
3,2,2,2,2,2,2,3,3,4,4), byrow = TRUE, nrow = 3);
t <- matrix(c(1,
2,
3), byrow = TRUE, nrow = 3);
lambda <- matrix(c( 2,2,2,2,2,2,
1,1,1,1,1,1,
2/3,2/3,2/3,2/3,2/3,2/3), byrow = TRUE, nrow = 3);
slack_plus <- matrix(c( 3,3,
3/2,3/2,
1,1), byrow = TRUE, nrow = 3);
slack_minus <- matrix(c( 4,4,
2,2,
4/3,4/3), byrow = TRUE, nrow = 3);
# nsbm_division
DivEffNSBM <- matrix(c( -12/7,-4/7,
-1/1.5,0,
-1/4,4/15), byrow = TRUE, nrow = 3);
# projection_frontier
Input_proj <- matrix(c( -3,-2,
-1,0,
-1/3,2/3), byrow = TRUE, nrow = 3);
Output_proj <- matrix(c( 7,7,
4.5,5.5,
4,5), byrow = TRUE, nrow = 3);
Link_proj <- Link;
#########################################
#########################################
#########################################
# slacks_transformation:
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$t, t, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_plus, slack_plus, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_minus, slack_minus, check.attributes = FALSE)
# nsbm.division
expect_equal(nsbm.division(direction, slack_plus, slack_minus, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, Link_obj), DivEffNSBM, check.attributes = FALSE)
# projection.frontier:
expect_equal(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Input_Proj, Input_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Output_Proj,3), Output_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Link_Proj,4), Link_proj, check.attributes = FALSE)
})
|
/2_nsbm_approach/Nsbm.function/tests/Test_case_95.R
|
no_license
|
thomaskrupa/thesis
|
R
| false | false | 3,838 |
r
|
# Test case 95
Input <- matrix(c(1,2,
1,2,
1,2), byrow = TRUE, nrow = 3);
Output <- matrix(c(4,4,
3,4,
3,4), byrow = TRUE, nrow = 3);
Link <- matrix(c(2,
2,
2), byrow = TRUE, nrow = 3);
K = 2; # 3 divisions
N = 3; # Amount of DMUs
sum_m = 2; # Amount of inputs
sum_r = 2; # Amount of outputs
sum_l = 1; # Amount of Link variables
# Distinguish the Amount vector:
Amount = matrix(c(1,1,1,1,1), byrow=TRUE, nrow=1);
Amount_Input = c(1,1);
Amount_Output = c(1,1);
Amount_Link = c(1);
weights = matrix(c(0.5,0.5), byrow=TRUE, nrow=1);
direction = "non";
link_con = 1; # fix
return_to_scale = "CRS" ;
NIRS = 0;
Link_obj = 0; # No Link variable in the objective function
#Loading all the functioN:
setwd(getwd())
setwd("..")
setwd("00_pkg_src")
setwd("Nsbm.function")
setwd("R")
source("load_all_func.R");
load_all_func();
setwd("..")
setwd("..")
setwd("..")
setwd("tests")
test_that("Test case 95",{
# Slack_transformation:
weightsNSBM <- matrix(c( 1,2,2,2,2,2,2,3,3,4,4,
2,2,2,2,2,2,2,3,3,4,4,
3,2,2,2,2,2,2,3,3,4,4), byrow = TRUE, nrow = 3);
t <- matrix(c(1,
2,
3), byrow = TRUE, nrow = 3);
lambda <- matrix(c( 2,2,2,2,2,2,
1,1,1,1,1,1,
2/3,2/3,2/3,2/3,2/3,2/3), byrow = TRUE, nrow = 3);
slack_plus <- matrix(c( 3,3,
3/2,3/2,
1,1), byrow = TRUE, nrow = 3);
slack_minus <- matrix(c( 4,4,
2,2,
4/3,4/3), byrow = TRUE, nrow = 3);
# nsbm_division
DivEffNSBM <- matrix(c( -12/7,-4/7,
-1/1.5,0,
-1/4,4/15), byrow = TRUE, nrow = 3);
# projection_frontier
Input_proj <- matrix(c( -3,-2,
-1,0,
-1/3,2/3), byrow = TRUE, nrow = 3);
Output_proj <- matrix(c( 7,7,
4.5,5.5,
4,5), byrow = TRUE, nrow = 3);
Link_proj <- Link;
#########################################
#########################################
#########################################
# slacks_transformation:
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$t, t, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_plus, slack_plus, check.attributes = FALSE)
expect_equal(slacks.transformation(direction, weightsNSBM, K, N, sum_m, sum_r, sum_l, Link_obj)$slack_minus, slack_minus, check.attributes = FALSE)
# nsbm.division
expect_equal(nsbm.division(direction, slack_plus, slack_minus, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, K, N, sum_m, sum_r, Link_obj), DivEffNSBM, check.attributes = FALSE)
# projection.frontier:
expect_equal(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Input_Proj, Input_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Output_Proj,3), Output_proj, check.attributes = FALSE)
expect_equal(round(projection.frontier(link_con, slack_plus, slack_minus, lambda, Input, Output, Link, Amount_Input, Amount_Output, Amount_Link, N, K, sum_m, sum_r, sum_l)$Link_Proj,4), Link_proj, check.attributes = FALSE)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testnestedclang2-package.R
\docType{package}
\name{testnestedclang2-package}
\alias{testnestedclang2}
\alias{testnestedclang2-package}
\title{testnestedclang2: What the Package Does (One Line, Title Case)}
\description{
What the package does (one paragraph).
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/DavisVaughan/testnestedclang2}
\item Report bugs at \url{https://github.com/DavisVaughan/testnestedclang2/issues}
}
}
\author{
\strong{Maintainer}: First Last \email{first.last@example.com} (\href{https://orcid.org/YOUR-ORCID-ID}{ORCID})
}
\keyword{internal}
|
/man/testnestedclang2-package.Rd
|
permissive
|
DavisVaughan/testnestedclang2
|
R
| false | true | 668 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testnestedclang2-package.R
\docType{package}
\name{testnestedclang2-package}
\alias{testnestedclang2}
\alias{testnestedclang2-package}
\title{testnestedclang2: What the Package Does (One Line, Title Case)}
\description{
What the package does (one paragraph).
}
\seealso{
Useful links:
\itemize{
\item \url{https://github.com/DavisVaughan/testnestedclang2}
\item Report bugs at \url{https://github.com/DavisVaughan/testnestedclang2/issues}
}
}
\author{
\strong{Maintainer}: First Last \email{first.last@example.com} (\href{https://orcid.org/YOUR-ORCID-ID}{ORCID})
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drop_zero.R
\name{drop_zero}
\alias{drop_zero}
\title{Drop Zero Count Elements}
\usage{
drop_zero(x, ...)
}
\arguments{
\item{x}{A \code{\link[synonym]{get_synonym}} object.}
\item{\ldots}{ignored.}
}
\value{
Returns a list with \code{NA} elements removed.
}
\description{
The \code{\link[synonym]{get_synonym}} terms that are found in the key but
that do not match the relevant distance return an \code{NA}. This function
conveniently drops these elements.
}
\examples{
get_synonym(c('cat', 'dog', 'chicken', 'dfsf'))
drop_zero(
get_synonym(c('cat', 'dog', 'chicken', 'dfsf'))
)
}
|
/man/drop_zero.Rd
|
no_license
|
trinker/synonym
|
R
| false | true | 667 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drop_zero.R
\name{drop_zero}
\alias{drop_zero}
\title{Drop Zero Count Elements}
\usage{
drop_zero(x, ...)
}
\arguments{
\item{x}{A \code{\link[synonym]{get_synonym}} object.}
\item{\ldots}{ignored.}
}
\value{
Returns a list with \code{NA} elements removed.
}
\description{
The \code{\link[synonym]{get_synonym}} terms that are found in the key but
that do not match the relevant distance return an \code{NA}. This function
conveniently drops these elements.
}
\examples{
get_synonym(c('cat', 'dog', 'chicken', 'dfsf'))
drop_zero(
get_synonym(c('cat', 'dog', 'chicken', 'dfsf'))
)
}
|
# setwd("C:/Users/srinija/Dropbox/Orders")
library(lubridate)
library(zoo)
# library(reshape)
net_orders <- read.csv("./Order(Log).csv", stringsAsFactors = F)
net_orders_i <- subset(net_orders, net_orders$Country != "India" & net_orders$Country != "-")
########################################################## ONLY FOR Present Month -----
p <- as.Date('2017-09-01') ##First day of current month
q<-3 ## start day of the current month for considering cancellations
Y<-'2017' ## cancellation year
r<-as.Date('2017-10-02') ##(next month end day for considering cancellations)
## Gross----
gross_orders_march <- subset(net_orders_i, as.yearmon(net_orders_i$placed_at) == as.yearmon(p) & net_orders_i$Removal_Date == "-")
report <- data.frame(unique(net_orders_i$Country), stringsAsFactors = F)
colnames(report) <- c("Country")
gross_orders_march <- subset(gross_orders_march, gross_orders_march$original_duration_months%%3 == 0)
gross_orders_march <- subset(gross_orders_march, grepl('Ray',gross_orders_march$Master_Plan_2) | grepl('Fabric', gross_orders_march$Master_Plan_2))
##Ray New Acquistions(Gross)
temp<-subset(gross_orders_march,gross_orders_march$HUR_Flag=="Hunt" & grepl('Ray',gross_orders_march$Master_Plan_2))
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = function(x) length(unique(x)))},error=function(e){print("empty dataframe orders_country")})
colnames(temp) <- c("x", "freq")
report$Ray_New_Acq<-temp$freq[match(report$Country,temp$x)]
##Reach Slots
temp<-subset(gross_orders_march, grepl('Fabric',gross_orders_march$Master_Plan_2))
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = length)},error=function(e){print("empty dataframe reachorders_country")})
colnames(temp) <- c("x", "freq")
report$Reach_slots<-temp$freq[match(report$Country,temp$x)]
##temporary MasterPlan column
gross_orders_march$MasterPlan<-"Fabric"
gross_orders_march$MasterPlan[gross_orders_march$Master_Plan_2 != "Fabric"] <- "Ray"
##Gross billings
tryCatch({temp <- aggregate(Revenue~MasterPlan+Country, data = gross_orders_march, FUN = sum)},error=function(e){print("empty dataframe gross revenue country")})
temp_ray <- subset(temp, temp$MasterPlan == "Ray")
report$ray_revenue <- temp_ray$Revenue[match(report$Country, temp_ray$Country)]
temp_fabric <- subset(temp, temp$MasterPlan == "Fabric")
report$reach_revenue <- temp_fabric$Revenue[match(report$Country, temp_fabric$Country)]
report[is.na(report)] <- 0
report$total_revenue <- report$ray_revenue + report$reach_revenue
##Cancellations----
net_orders_i<-subset(net_orders_i,net_orders_i$Removal_Date != "-")
cn_orders_march <- subset(net_orders_i,
((substr(net_orders_i$Removal_Date, 6, 7) == paste0("0",month(p)) & day(as.Date(net_orders_i$Removal_Date))>q)
| (substr(net_orders_i$Removal_Date, 6, 7) == paste0("0",month(r)) & day(as.Date(net_orders_i$Removal_Date))< day(r)))
# & substr(net_orders_i$Removal_Date, 1, 4) == year(r)
& month(net_orders_i$placed_at) != month(p)
& year(as.Date(net_orders_i$Removal_Date))==Y)
cn_orders_march <- subset(cn_orders_march, grepl('Ray',cn_orders_march$Master_Plan_2) | grepl('Fabric', cn_orders_march$Master_Plan_2))
cn_orders_march$MasterPlan<-"Fabric"
cn_orders_march$MasterPlan[grepl('Ray',cn_orders_march$Master_Plan_2)]<-"Ray"
## cancelled ray accounts hunts
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Ray" & cn_orders_march$HUR_Flag=="Hunt")
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = function(x) length(unique(x)))},error=function(e){print("no Ray hunts cancelled orders")})
report$ray_accounts_cn <- temp$practice_id[match(report$Country, temp$Country)]
## cancelled reach slots
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Fabric")
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = length)},error=function(e){print("no Reach slots cancelled")})
report$reach_slots_cn <- temp$practice_id[match(report$Country, temp$Country)]
##billings cancelled
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Ray" | cn_orders_march$MasterPlan=="Fabric")
tryCatch({temp<-aggregate(Revenue~Country+MasterPlan, data = temp, FUN = sum)},error=function(e){print("no cancellations")})
temp1<-subset(temp,temp$MasterPlan=="Ray")
report$ray_cn<-temp1$Revenue[match(report$Country,temp1$Country)]
temp1<-subset(temp,temp$MasterPlan== "Fabric")
report$reach_cn<-temp1$Revenue[match(report$Country,temp1$Country)]
report[is.na(report)]<-0
report$total_cn<-report$ray_cn+report$reach_cn
##Net----
report$Ray_Net<-report$ray_revenue-report$ray_cn
report$Reach_Net<-report$reach_revenue-report$reach_cn
report$Total_Net<-report$Ray_Net+report$Reach_Net
##Trend past##----
##International----
Int_orders <- read.csv("./Order(Log).csv", stringsAsFactors = F)
# Int_orders<- subset(Int_orders, Int_orders$Country != "India" & Int_orders$Country != "-")
Int_orders<-subset(Int_orders,Int_orders$Removal_Date == "-")
# Int_orders<-subset(Int_orders,Int_orders$original_duration_months%%3==0 & Int_orders$original_duration_months!=0)
Int_orders$MasterPlan<-"Reach"
Int_orders$MasterPlan[!grepl('fabric',tolower(Int_orders$Master_Plan_2))]<-"Ray"
Int_orders$month_name<-(as.yearmon(Int_orders$placed_at))
Int_orders$per_month<-Int_orders$Revenue*30.5/Int_orders$duration_days
Int_orders$dur_actual<- Int_orders$duration_days/30.5
Int_orders[is.na(Int_orders)]<-0
Int_Ray<-subset(Int_orders,Int_orders$MasterPlan=="Ray")
Int_Reach<-subset(Int_orders,Int_orders$MasterPlan=="Reach")
## Reach_Master_Orders
Master_Reach<-read.csv("./Reach_Orders.csv",stringsAsFactors = F)
Int_Reach$tag<-Master_Reach$tag[match(Int_Reach$order_id,Master_Reach$order_id)]
Int_Reach$Renewal_subid<-Master_Reach$Renewal_subid[match(Int_Reach$order_id,Master_Reach$order_id)]
Int_Reach$Renewal_date<-Master_Reach$Renewal_date[match(Int_Reach$order_id,Master_Reach$order_id)]
##Cleared orders
# sum(Int_orders$Revenue)
#
# C1<-Int_orders %>%
# group_by(month_name,MasterPlan) %>%
# # mutate(HURFlag,Int_orders$HUR_Flag != )
# filter(MasterPlan=="Ray")%>%
# summarise(Cleared_Revenue = sum(Revenue)) %>%
# select(month_name, MasterPlan, Cleared_Revenue) %>%
# ungroup()
# filter(country %in% input$country) %>%
|
/Int_dashboard.R
|
no_license
|
srinijav4/RShiny
|
R
| false | false | 6,383 |
r
|
# setwd("C:/Users/srinija/Dropbox/Orders")
library(lubridate)
library(zoo)
# library(reshape)
net_orders <- read.csv("./Order(Log).csv", stringsAsFactors = F)
net_orders_i <- subset(net_orders, net_orders$Country != "India" & net_orders$Country != "-")
########################################################## ONLY FOR Present Month -----
p <- as.Date('2017-09-01') ##First day of current month
q<-3 ## start day of the current month for considering cancellations
Y<-'2017' ## cancellation year
r<-as.Date('2017-10-02') ##(next month end day for considering cancellations)
## Gross----
gross_orders_march <- subset(net_orders_i, as.yearmon(net_orders_i$placed_at) == as.yearmon(p) & net_orders_i$Removal_Date == "-")
report <- data.frame(unique(net_orders_i$Country), stringsAsFactors = F)
colnames(report) <- c("Country")
gross_orders_march <- subset(gross_orders_march, gross_orders_march$original_duration_months%%3 == 0)
gross_orders_march <- subset(gross_orders_march, grepl('Ray',gross_orders_march$Master_Plan_2) | grepl('Fabric', gross_orders_march$Master_Plan_2))
##Ray New Acquistions(Gross)
temp<-subset(gross_orders_march,gross_orders_march$HUR_Flag=="Hunt" & grepl('Ray',gross_orders_march$Master_Plan_2))
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = function(x) length(unique(x)))},error=function(e){print("empty dataframe orders_country")})
colnames(temp) <- c("x", "freq")
report$Ray_New_Acq<-temp$freq[match(report$Country,temp$x)]
##Reach Slots
temp<-subset(gross_orders_march, grepl('Fabric',gross_orders_march$Master_Plan_2))
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = length)},error=function(e){print("empty dataframe reachorders_country")})
colnames(temp) <- c("x", "freq")
report$Reach_slots<-temp$freq[match(report$Country,temp$x)]
##temporary MasterPlan column
gross_orders_march$MasterPlan<-"Fabric"
gross_orders_march$MasterPlan[gross_orders_march$Master_Plan_2 != "Fabric"] <- "Ray"
##Gross billings
tryCatch({temp <- aggregate(Revenue~MasterPlan+Country, data = gross_orders_march, FUN = sum)},error=function(e){print("empty dataframe gross revenue country")})
temp_ray <- subset(temp, temp$MasterPlan == "Ray")
report$ray_revenue <- temp_ray$Revenue[match(report$Country, temp_ray$Country)]
temp_fabric <- subset(temp, temp$MasterPlan == "Fabric")
report$reach_revenue <- temp_fabric$Revenue[match(report$Country, temp_fabric$Country)]
report[is.na(report)] <- 0
report$total_revenue <- report$ray_revenue + report$reach_revenue
##Cancellations----
net_orders_i<-subset(net_orders_i,net_orders_i$Removal_Date != "-")
cn_orders_march <- subset(net_orders_i,
((substr(net_orders_i$Removal_Date, 6, 7) == paste0("0",month(p)) & day(as.Date(net_orders_i$Removal_Date))>q)
| (substr(net_orders_i$Removal_Date, 6, 7) == paste0("0",month(r)) & day(as.Date(net_orders_i$Removal_Date))< day(r)))
# & substr(net_orders_i$Removal_Date, 1, 4) == year(r)
& month(net_orders_i$placed_at) != month(p)
& year(as.Date(net_orders_i$Removal_Date))==Y)
cn_orders_march <- subset(cn_orders_march, grepl('Ray',cn_orders_march$Master_Plan_2) | grepl('Fabric', cn_orders_march$Master_Plan_2))
cn_orders_march$MasterPlan<-"Fabric"
cn_orders_march$MasterPlan[grepl('Ray',cn_orders_march$Master_Plan_2)]<-"Ray"
## cancelled ray accounts hunts
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Ray" & cn_orders_march$HUR_Flag=="Hunt")
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = function(x) length(unique(x)))},error=function(e){print("no Ray hunts cancelled orders")})
report$ray_accounts_cn <- temp$practice_id[match(report$Country, temp$Country)]
## cancelled reach slots
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Fabric")
tryCatch({temp<-aggregate(practice_id~Country, data = temp, FUN = length)},error=function(e){print("no Reach slots cancelled")})
report$reach_slots_cn <- temp$practice_id[match(report$Country, temp$Country)]
##billings cancelled
temp<-subset(cn_orders_march,cn_orders_march$MasterPlan=="Ray" | cn_orders_march$MasterPlan=="Fabric")
tryCatch({temp<-aggregate(Revenue~Country+MasterPlan, data = temp, FUN = sum)},error=function(e){print("no cancellations")})
temp1<-subset(temp,temp$MasterPlan=="Ray")
report$ray_cn<-temp1$Revenue[match(report$Country,temp1$Country)]
temp1<-subset(temp,temp$MasterPlan== "Fabric")
report$reach_cn<-temp1$Revenue[match(report$Country,temp1$Country)]
report[is.na(report)]<-0
report$total_cn<-report$ray_cn+report$reach_cn
##Net----
report$Ray_Net<-report$ray_revenue-report$ray_cn
report$Reach_Net<-report$reach_revenue-report$reach_cn
report$Total_Net<-report$Ray_Net+report$Reach_Net
##Trend past##----
##International----
Int_orders <- read.csv("./Order(Log).csv", stringsAsFactors = F)
# Int_orders<- subset(Int_orders, Int_orders$Country != "India" & Int_orders$Country != "-")
Int_orders<-subset(Int_orders,Int_orders$Removal_Date == "-")
# Int_orders<-subset(Int_orders,Int_orders$original_duration_months%%3==0 & Int_orders$original_duration_months!=0)
Int_orders$MasterPlan<-"Reach"
Int_orders$MasterPlan[!grepl('fabric',tolower(Int_orders$Master_Plan_2))]<-"Ray"
Int_orders$month_name<-(as.yearmon(Int_orders$placed_at))
Int_orders$per_month<-Int_orders$Revenue*30.5/Int_orders$duration_days
Int_orders$dur_actual<- Int_orders$duration_days/30.5
Int_orders[is.na(Int_orders)]<-0
Int_Ray<-subset(Int_orders,Int_orders$MasterPlan=="Ray")
Int_Reach<-subset(Int_orders,Int_orders$MasterPlan=="Reach")
## Reach_Master_Orders
Master_Reach<-read.csv("./Reach_Orders.csv",stringsAsFactors = F)
Int_Reach$tag<-Master_Reach$tag[match(Int_Reach$order_id,Master_Reach$order_id)]
Int_Reach$Renewal_subid<-Master_Reach$Renewal_subid[match(Int_Reach$order_id,Master_Reach$order_id)]
Int_Reach$Renewal_date<-Master_Reach$Renewal_date[match(Int_Reach$order_id,Master_Reach$order_id)]
##Cleared orders
# sum(Int_orders$Revenue)
#
# C1<-Int_orders %>%
# group_by(month_name,MasterPlan) %>%
# # mutate(HURFlag,Int_orders$HUR_Flag != )
# filter(MasterPlan=="Ray")%>%
# summarise(Cleared_Revenue = sum(Revenue)) %>%
# select(month_name, MasterPlan, Cleared_Revenue) %>%
# ungroup()
# filter(country %in% input$country) %>%
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.