content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# Assignment Week 1
# setwd("E:/Biblioteca/Exploratory Coursera/Week1")
# Plot 3
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
d <- read.csv(file = "household_power_consumption.txt", sep=";")
d <- transform(d, Date = as.Date(Date,format="%d/%m/%Y"))
#Aplica o filtro de data
d <- rbind( d[(d$Date == "2007-02-01"),],d[(d$Date == "2007-02-02"),])
d <- transform(d, DateTime = strftime(paste(Date, Time) ,format="%Y-%m-%d %T"))
d <- transform(d, Time = as.character.factor(Time))
d <- transform(d, Time = strftime(paste(Date,Time,sep=" "),format="%D%T", tz=""))
d <- cbind(d, Dat <- as.POSIXct(d$DateTime))
names(d)[length(d)] <- "Dat"
dkw <-as.data.frame( d[ as.character(d$Global_active_power) != "?" ,
c("Dat","Sub_metering_1","Sub_metering_2","Sub_metering_3") ] )
dkw <- cbind(dkw,
as.numeric.factor(dkw$Sub_metering_1),
as.numeric.factor(dkw$Sub_metering_2),
as.numeric(dkw$Sub_metering_3))
names(dkw)[ (length(dkw)-2) : length(dkw)] <- c("Sub_metering_1_n","Sub_metering_2_n","Sub_metering_3_n")
plot(dkw$Dat ,
dkw$Sub_metering_1_n,
type = "l",
main= "",
xlab= "",
ylab = "Energy sub metering")
lines(dkw$Dat ,
dkw$Sub_metering_2_n,
col="red")
lines(dkw$Dat ,
dkw$Sub_metering_3_n,
col="blue")
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),
col=c("black","blue","red"),
cex = 0.5
)
dev.copy(png, file="plot3.png",width=480, height=480)
dev.off()
|
/plot3.R
|
no_license
|
smatioli/ExploratoryDataAnalysis
|
R
| false | false | 1,636 |
r
|
# Assignment Week 1
# setwd("E:/Biblioteca/Exploratory Coursera/Week1")
# Plot 3
as.numeric.factor <- function(x) {as.numeric(levels(x))[x]}
d <- read.csv(file = "household_power_consumption.txt", sep=";")
d <- transform(d, Date = as.Date(Date,format="%d/%m/%Y"))
#Aplica o filtro de data
d <- rbind( d[(d$Date == "2007-02-01"),],d[(d$Date == "2007-02-02"),])
d <- transform(d, DateTime = strftime(paste(Date, Time) ,format="%Y-%m-%d %T"))
d <- transform(d, Time = as.character.factor(Time))
d <- transform(d, Time = strftime(paste(Date,Time,sep=" "),format="%D%T", tz=""))
d <- cbind(d, Dat <- as.POSIXct(d$DateTime))
names(d)[length(d)] <- "Dat"
dkw <-as.data.frame( d[ as.character(d$Global_active_power) != "?" ,
c("Dat","Sub_metering_1","Sub_metering_2","Sub_metering_3") ] )
dkw <- cbind(dkw,
as.numeric.factor(dkw$Sub_metering_1),
as.numeric.factor(dkw$Sub_metering_2),
as.numeric(dkw$Sub_metering_3))
names(dkw)[ (length(dkw)-2) : length(dkw)] <- c("Sub_metering_1_n","Sub_metering_2_n","Sub_metering_3_n")
plot(dkw$Dat ,
dkw$Sub_metering_1_n,
type = "l",
main= "",
xlab= "",
ylab = "Energy sub metering")
lines(dkw$Dat ,
dkw$Sub_metering_2_n,
col="red")
lines(dkw$Dat ,
dkw$Sub_metering_3_n,
col="blue")
legend("topright",
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1),
col=c("black","blue","red"),
cex = 0.5
)
dev.copy(png, file="plot3.png",width=480, height=480)
dev.off()
|
# Set command line args, set defaults so we can run the script from R console as well
args = commandArgs(trailingOnly = TRUE)
if(length(args) > 2) {
reportName = args[1]
csvFile = args[2]
description = args[3]
outputDir = args[1]
} else {
reportName = 'test'
csvFile = '/home/corpaul/workspace/gumby/tests/data/test_session1.csv'
#csvFile = '/home/corpaul/workspace/gumby/tests/data/nosetests_test_magnetlink1.csv'
description = 'Test run from R console'
outputDir = "/home/corpaul/workspace/gumby/output/perf_reports/"
}
# Read data
csvData = read.csv(csvFile)
# Make summary
rowCount = nrow(csvData)
#totalBytes = sum(as.numeric(as.character(data$BYTES)))
totalBytes = sum(csvData$BYTES)
sink(sprintf("%s/summary.txt", outputDir))
cat(sprintf("Report name: %s\n", reportName))
cat(sprintf("Data monitored for: %s\n", description))
cat(sprintf("Total number of write transactions is: %d\n", rowCount))
cat(sprintf("Total bytes written is: %d\n", totalBytes))
sink()
csvData$type = 'other'
csvData$type[grep("etilqs", csvData$FILE)] <- "temp_table"
csvData$type[grep("db-wal", csvData$FILE)] <- "commit"
csvData$type[grep("torrent", csvData$FILE)] <- "torrent"
# get top 20 IO writes per stacktrace
bytescountPerStacktrace = aggregate(csvData$BYTES, by=list(csvData$TRACE, csvData$type, csvData$PROCESS), FUN=sum)
counts <- aggregate(csvData$BYTES, by=list(csvData$TRACE),FUN="length")
colnames(bytescountPerStacktrace) <- c("TRACE", "FILE", "PROCESS", "BYTES")
colnames(counts) <- c("TRACE", "count")
bytescountPerStacktrace = merge(bytescountPerStacktrace, counts, by = "TRACE")
colnames(bytescountPerStacktrace) <- c("TRACE", "FILE", "PROCESS", "BYTES", "COUNT")
top20PerStacktrace = bytescountPerStacktrace [with(bytescountPerStacktrace , order(-BYTES))[1:30],]
write.csv(top20PerStacktrace, file=sprintf("%s/top20_per_stacktrace.csv", outputDir))
# calculate percentage and write to file for comparison
library(plyr)
perc<-ddply(bytescountPerStacktrace,.(TRACE=TRACE), summarize, PERC = (BYTES/totalBytes)*100)
bytescountPerStacktrace = merge(bytescountPerStacktrace, perc, by = "TRACE")
#totalBytes
sum(bytescountPerStacktrace$PERC)
write.csv(bytescountPerStacktrace, file=sprintf("%s/summary_per_stacktrace.csv", outputDir))
# get top 20 IO writes per filename
bytescountPerFilename = aggregate(csvData$BYTES, by=list(csvData$FILE, csvData$type, csvData$PROCESS), FUN=sum)
colnames(bytescountPerFilename) = c("FILE", "type", "PROCESS", "BYTES")
topsize = min(30, length(bytescountPerFilename$FILE))
top20PerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:topsize],]
write.csv(top20PerFilename , file=sprintf("%s/top20_per_filename.csv", outputDir))
# get top 20 IO writes per filename
topLargestWrites = csvData[with(csvData, order(-BYTES))[1:topsize],]
write.csv(topLargestWrites , file=sprintf("%s/top_largest_writes.csv", outputDir))
# graph top 50 writes per filename
#top100PerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:50],]
#bytescountPerFilename$type <- factor(bytescountPerFilename$type) # it must be a factor
#bytescountPerFilename$color[bytescountPerFilename$type=='sqlite'] <- "red"
#bytescountPerFilename$color[bytescountPerFilename$type=='torrent'] <- "blue"
#bytescountPerFilename$color[bytescountPerFilename$type=='other'] <- "green"
topPerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:topsize],]
library(ggplot2)
# Remove specific paths
#topPerFilename = as.data.frame(sapply(topPerFilename,gsub,pattern="/home/user/Desktop/TriblerDownloads",replacement=""))
#topPerFilename$BYTES = as.numeric(levels(topPerFilename$BYTES)[topPerFilename$BYTES])
minVal = min(topPerFilename$BYTES)
maxVal = max(topPerFilename$BYTES)
# order by bytes
ticksSeq = seq(0, maxVal, by=maxVal/10)
#qplot(x = BYTES, y = FILE, data = topPerFilename, geom = "point", colour=topPerFilename$color, facets = ~ topPerFilename$color )
topPerFilename$FILE <- reorder(topPerFilename$FILE, -topPerFilename$BYTES)
p = ggplot(data = topPerFilename, aes(x = topPerFilename$BYTES, y = topPerFilename$FILE, colour = topPerFilename$type))
p + geom_point() +
scale_x_continuous(breaks=ticksSeq, limits = c(0,maxVal), expand = c(0,0)) +
theme(axis.text.x=element_text(angle = 90))
ggsave(file=sprintf("%s/top_per_filename.svg", outputDir), width=12, height=6, dpi=100)
#dotchart(topPerFilename$BYTES,labels=topPerFilename$FILE,cex=.7,groups= topPerFilename$type,
# main="Bytes written to file\ngrouped by type of write",
# xlab="Bytes written", gcolor="black", color=topPerFilename$color)
#jpeg(filename = sprintf("python/PerformanceReports/%s/topPerFilename.jpg", reportName))
#timePerStacktrace = aggregate(data$TIME, by=list(data$TRACE, data$FILE, data$PROCESS), FUN=sum)
#top20TimePerStacktrace = timePerStacktrace [with(timePerStacktrace , order(-x))[1:20],]
#colnames(top20TimePerStacktrace) <- c("TRACE", "FILE", "PROCESS", "TIME(us)")
#write.csv(top20TimePerStacktrace, file=sprintf("python/PerformanceReports/%s/Top20TimePerStacktrace.csv", reportName))
|
/scripts/r/io_writes_report.R
|
no_license
|
vladum/gumby
|
R
| false | false | 5,118 |
r
|
# Set command line args, set defaults so we can run the script from R console as well
args = commandArgs(trailingOnly = TRUE)
if(length(args) > 2) {
reportName = args[1]
csvFile = args[2]
description = args[3]
outputDir = args[1]
} else {
reportName = 'test'
csvFile = '/home/corpaul/workspace/gumby/tests/data/test_session1.csv'
#csvFile = '/home/corpaul/workspace/gumby/tests/data/nosetests_test_magnetlink1.csv'
description = 'Test run from R console'
outputDir = "/home/corpaul/workspace/gumby/output/perf_reports/"
}
# Read data
csvData = read.csv(csvFile)
# Make summary
rowCount = nrow(csvData)
#totalBytes = sum(as.numeric(as.character(data$BYTES)))
totalBytes = sum(csvData$BYTES)
sink(sprintf("%s/summary.txt", outputDir))
cat(sprintf("Report name: %s\n", reportName))
cat(sprintf("Data monitored for: %s\n", description))
cat(sprintf("Total number of write transactions is: %d\n", rowCount))
cat(sprintf("Total bytes written is: %d\n", totalBytes))
sink()
csvData$type = 'other'
csvData$type[grep("etilqs", csvData$FILE)] <- "temp_table"
csvData$type[grep("db-wal", csvData$FILE)] <- "commit"
csvData$type[grep("torrent", csvData$FILE)] <- "torrent"
# get top 20 IO writes per stacktrace
bytescountPerStacktrace = aggregate(csvData$BYTES, by=list(csvData$TRACE, csvData$type, csvData$PROCESS), FUN=sum)
counts <- aggregate(csvData$BYTES, by=list(csvData$TRACE),FUN="length")
colnames(bytescountPerStacktrace) <- c("TRACE", "FILE", "PROCESS", "BYTES")
colnames(counts) <- c("TRACE", "count")
bytescountPerStacktrace = merge(bytescountPerStacktrace, counts, by = "TRACE")
colnames(bytescountPerStacktrace) <- c("TRACE", "FILE", "PROCESS", "BYTES", "COUNT")
top20PerStacktrace = bytescountPerStacktrace [with(bytescountPerStacktrace , order(-BYTES))[1:30],]
write.csv(top20PerStacktrace, file=sprintf("%s/top20_per_stacktrace.csv", outputDir))
# calculate percentage and write to file for comparison
library(plyr)
perc<-ddply(bytescountPerStacktrace,.(TRACE=TRACE), summarize, PERC = (BYTES/totalBytes)*100)
bytescountPerStacktrace = merge(bytescountPerStacktrace, perc, by = "TRACE")
#totalBytes
sum(bytescountPerStacktrace$PERC)
write.csv(bytescountPerStacktrace, file=sprintf("%s/summary_per_stacktrace.csv", outputDir))
# get top 20 IO writes per filename
bytescountPerFilename = aggregate(csvData$BYTES, by=list(csvData$FILE, csvData$type, csvData$PROCESS), FUN=sum)
colnames(bytescountPerFilename) = c("FILE", "type", "PROCESS", "BYTES")
topsize = min(30, length(bytescountPerFilename$FILE))
top20PerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:topsize],]
write.csv(top20PerFilename , file=sprintf("%s/top20_per_filename.csv", outputDir))
# get top 20 IO writes per filename
topLargestWrites = csvData[with(csvData, order(-BYTES))[1:topsize],]
write.csv(topLargestWrites , file=sprintf("%s/top_largest_writes.csv", outputDir))
# graph top 50 writes per filename
#top100PerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:50],]
#bytescountPerFilename$type <- factor(bytescountPerFilename$type) # it must be a factor
#bytescountPerFilename$color[bytescountPerFilename$type=='sqlite'] <- "red"
#bytescountPerFilename$color[bytescountPerFilename$type=='torrent'] <- "blue"
#bytescountPerFilename$color[bytescountPerFilename$type=='other'] <- "green"
topPerFilename = bytescountPerFilename[with(bytescountPerFilename, order(-BYTES))[1:topsize],]
library(ggplot2)
# Remove specific paths
#topPerFilename = as.data.frame(sapply(topPerFilename,gsub,pattern="/home/user/Desktop/TriblerDownloads",replacement=""))
#topPerFilename$BYTES = as.numeric(levels(topPerFilename$BYTES)[topPerFilename$BYTES])
minVal = min(topPerFilename$BYTES)
maxVal = max(topPerFilename$BYTES)
# order by bytes
ticksSeq = seq(0, maxVal, by=maxVal/10)
#qplot(x = BYTES, y = FILE, data = topPerFilename, geom = "point", colour=topPerFilename$color, facets = ~ topPerFilename$color )
topPerFilename$FILE <- reorder(topPerFilename$FILE, -topPerFilename$BYTES)
p = ggplot(data = topPerFilename, aes(x = topPerFilename$BYTES, y = topPerFilename$FILE, colour = topPerFilename$type))
p + geom_point() +
scale_x_continuous(breaks=ticksSeq, limits = c(0,maxVal), expand = c(0,0)) +
theme(axis.text.x=element_text(angle = 90))
ggsave(file=sprintf("%s/top_per_filename.svg", outputDir), width=12, height=6, dpi=100)
#dotchart(topPerFilename$BYTES,labels=topPerFilename$FILE,cex=.7,groups= topPerFilename$type,
# main="Bytes written to file\ngrouped by type of write",
# xlab="Bytes written", gcolor="black", color=topPerFilename$color)
#jpeg(filename = sprintf("python/PerformanceReports/%s/topPerFilename.jpg", reportName))
#timePerStacktrace = aggregate(data$TIME, by=list(data$TRACE, data$FILE, data$PROCESS), FUN=sum)
#top20TimePerStacktrace = timePerStacktrace [with(timePerStacktrace , order(-x))[1:20],]
#colnames(top20TimePerStacktrace) <- c("TRACE", "FILE", "PROCESS", "TIME(us)")
#write.csv(top20TimePerStacktrace, file=sprintf("python/PerformanceReports/%s/Top20TimePerStacktrace.csv", reportName))
|
#!/usr/bin/env Rscript
# This script extracts specified samples from read counts dataset and
# experiment design table.
###################### Define internally used functions ######################
# Determine script invocation method.
getInvokeMethod <- function()
{
# Get all command arguments including R system arguments.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
n_prog_cmd <- length(prog_cmd)
# Get the availability of "--args" option.
args_opt_avail <- "--args" %in% cmd_args
# Determine invocation method based on n_prog_cmd and args_opt_avail.
invoke_method <- NULL
if(n_prog_cmd == 0) invoke_method <- "R"
else if(n_prog_cmd == 1) invoke_method <- "Rscript"
else invoke_method <- "unknown"
# Return invocation method.
return(invoke_method)
}
# Check the command line of script invocation by Rscript.
checkInvokeCommandLine <- function(invoke_method)
{
# Initialize checking status.
cmd_status <- TRUE
# Only check command line for Rscript invocation.
if(invoke_method == "Rscript")
{
# Get all command arguments including R system arguments.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
prog_name <- basename(prog_cmd)
# Get the command line of script invocation by Rscript.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) < 1)
{
cat(paste("Usage:", prog_cmd, "[Counts Directory] [Function Directory]\n"))
cat("\n")
cat("Arguments:\n\n")
cat("\t[Counts Directory] is a directory containing read counts and experiment design.\n\n")
cat("\t[Function Directory] is a directory with all rquired R functions (Optional).\n\n")
cmd_status <- FALSE
}
}
else
{
warning("Script command line is only available for Rscript invocation!")
# Comment out the following line to allow running this script in an interactive R console.
#cmd_status <- FALSE
}
# Return checking status.
return(cmd_status)
}
# Obtain the path of read counts directory.
getCountsDir <- function(invoke_method="Rscript", default_dir=getwd())
{
# invoke_method: "Rscript" or "R".
counts_dir <- NULL
# Determine the function/config path.
if(invoke_method == "Rscript")
{
# For the invocation by Rscript at system terminal.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) > 0) counts_dir <- cmd_args[1]
else
{
warning("No counts directory is provided!")
counts_dir <- default_dir
}
if(is.null(counts_dir)) warning("counts_dir is NULL when invoke_method is \"Rscript\"!")
}
else if(invoke_method == "R")
{
# For the invocation by R command or "source" at R terminal.
counts_dir <- default_dir
if(is.null(counts_dir)) warning("counts_dir is NULL when invoke_method is \"R\"!")
}
else warning("invoke_method must be either \"R\" or \"Rscript\"!")
# Return the function/config path.
return(counts_dir)
}
# Obtain the path of R function scripts.
getFuncDir <- function(invoke_method="Rscript", default_dir=getwd())
{
# invoke_method: "Rscript" or "R".
func_dir <- NULL
# Determine the function/config path.
if(invoke_method == "Rscript")
{
# For the invocation by Rscript at system terminal.
# First of all, check the command line for user-specified function path.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) > 1) func_dir <- cmd_args[2]
else
{
# Secondly, retrieve function path from the path of this main program.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
func_dir <- dirname(prog_cmd)
}
if(is.null(func_dir))
{
# Thirdly, use the default value for function path.
warning("No argument is assigned to func_dir when invoke_method is \"Rscript\"!")
func_dir <- default_dir
}
}
else if(invoke_method == "R")
{
# For the invocation by R command or "source" at R terminal.
func_dir <- default_dir
if(is.null(func_dir)) warning("func_dir is NULL when invoke_method is \"R\"!")
}
else warning("invoke_method must be either \"R\" or \"Rscript\"!")
# Return the function/config path.
return(func_dir)
}
# Set default directories for differential comparison analysis.
setDefaultDEMDirs <- function(data_set="default")
{
# Initialize default directories.
user_home <- Sys.getenv("HOME")
if(data_set == "default")
{
default_func_dir <- file.path(user_home, "LINCS/DEG/Programs")
default_counts_dir <- file.path(user_home, "LINCS/DEG/Repo/Counts")
}
else
{
default_func_dir <- NULL
default_counts_dir <- NULL
}
# Copy all variables from current function's environment to its parent's environment.
for(obj in ls()) assign(obj, get(obj,environment()), parent.env(environment()))
}
################### Set up basic computational environment ###################
# Initialize default directories.
# Set default directories for differential comparison analysis.
setDefaultDEMDirs()
# Determine script invocation method.
invoke_method <- getInvokeMethod()
if(invoke_method == "unknown") stop("Script must be invoked by either R or Rscript!")
# Obtain invoked R script command.
stopifnot(checkInvokeCommandLine(invoke_method))
# Determine directory path of read counts data.
counts_dir <- getCountsDir(invoke_method=invoke_method, default_dir=default_counts_dir)
stopifnot(!is.null(counts_dir) && length(counts_dir)==1 && is.character(counts_dir))
if(!dir.exists(counts_dir)) stop(paste0("Counts directory \"", counts_dir, "\" doesn't exist!"))
counts_dir <- normalizePath(counts_dir)
# Determine directory path of all required R function scripts.
func_dir <- getFuncDir(invoke_method=invoke_method, default_dir=default_func_dir)
stopifnot(!is.null(func_dir) && length(func_dir)==1 && is.character(func_dir))
if(!dir.exists(func_dir)) stop(paste0("Function directory \"", func_dir, "\" doesn't exist!"))
func_dir <- normalizePath(func_dir)
# Load the function for extracting read counts and experiment design.
library(tools)
source(file.path(func_dir, "convertReadCountsFileName.R"), local=TRUE)
source(file.path(func_dir, "extractGeneExpSamples.R"), local=TRUE)
# Extract specified samples from read counts dataset and experiment design table.
write_file <- TRUE
ext_name <- "tsv"
# Plate-1 dataset (20150409).
sample_id_name <- "RNAseq"
sample_id_series <- "20150409"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "unq.refseq.umi", "dat", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20150409_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, sample_name_filter="T96s4_", exprt_design_file=exprt_design_file, sample_names=c(paste("H",6:8,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
# Plate-2 dataset (20150503).
sample_id_name <- "RNAseq"
sample_id_series <- "20150503"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "unq.refseq.umi", "dat", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20150503_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, sample_name_filter="T96s2_", exprt_design_file=exprt_design_file, sample_names=c(paste("H",7:12,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
# Plate-3 dataset (20150712).
sample_id_name <- "RNAseq"
sample_id_series <- "20150712"
sample_id_series_set2 <- paste(sample_id_series, "Set2", sep="-")
sample_id_set2 <- paste(sample_id_name, sample_id_series_set2, sep="_")
read_counts_file_set2 <- paste(sample_id_set2, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2 <- file.path(counts_dir, read_counts_file_set2)
exprt_design_file_set2 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2, "txt", sep=".")
exprt_design_file_set2 <- file.path(counts_dir, exprt_design_file_set2)
sample_id_series_set3 <- paste(sample_id_series, "Set3", sep="-")
sample_id_set3 <- paste(sample_id_name, sample_id_series_set3, sep="_")
read_counts_file_set3 <- paste(sample_id_set3, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set3 <- file.path(counts_dir, read_counts_file_set3)
exprt_design_file_set3 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set3, "txt", sep=".")
exprt_design_file_set3 <- file.path(counts_dir, exprt_design_file_set3)
if(file.exists(read_counts_file_set2) && file.exists(exprt_design_file_set2) && file.exists(read_counts_file_set3) && file.exists(exprt_design_file_set3))
{
read_counts_name <- "Read-Counts"
exprt_design_name <- "Experiment-Design"
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
# Extract dataset 2.
read_counts_file_set2_new <- convertReadCountsFileName(read_counts_file_set2, read_counts_name)
file.copy(read_counts_file_set2, read_counts_file_set2_new)
read_counts_datasets_20150712_set2_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2, sample_names=c(paste("P",19:24,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_new)
# Extract dataset 3.
read_counts_file_set3_new <- convertReadCountsFileName(read_counts_file_set3, read_counts_name)
file.copy(read_counts_file_set3, read_counts_file_set3_new)
read_counts_datasets_20150712_set3_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set3_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set3, sample_names=c(paste("P",19:24,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set3_new)
# Merge dataset 2 and 3 to a single dataset.
read_counts_datasets_20150712_lincs <- NULL
read_counts_datasets_20150712_lincs$exprt_design <- read_counts_datasets_20150712_set2_lincs$exprt_design
read_counts_datasets_20150712_lincs$read_counts <- read_counts_datasets_20150712_set2_lincs$read_counts + read_counts_datasets_20150712_set3_lincs$read_counts
read_counts_file <- paste(paste(sample_id_name,read_counts_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,exprt_design_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
write.table(read_counts_datasets_20150712_lincs$read_counts, file=read_counts_file, quote=FALSE, sep="\t", na="")
write.table(read_counts_datasets_20150712_lincs$exprt_design, file=exprt_design_file, quote=FALSE, sep="\t", na="", row.names=FALSE)
cat("Done\n")
}
# Plate-4 datasets (20151120).
sample_id_name <- "RNAseq"
sample_id_series <- "20151120"
sample_id_series_set1 <- paste(sample_id_series, "Set1", sep="-")
sample_id_set1 <- paste(sample_id_name, sample_id_series_set1, sep="_")
read_counts_file_set1 <- paste(sample_id_set1, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set1 <- file.path(counts_dir, read_counts_file_set1)
exprt_design_file_set1 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set1, "txt", sep=".")
exprt_design_file_set1 <- file.path(counts_dir, exprt_design_file_set1)
sample_id_series_set2_k107 <- paste(sample_id_series, "Set2-K107", sep="-")
sample_id_set2_k107 <- paste(sample_id_name, sample_id_series_set2_k107, sep="_")
read_counts_file_set2_k107 <- paste(sample_id_set2_k107, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2_k107 <- file.path(counts_dir, read_counts_file_set2_k107)
exprt_design_file_set2_k107 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2_k107, "txt", sep=".")
exprt_design_file_set2_k107 <- file.path(counts_dir, exprt_design_file_set2_k107)
sample_id_series_set2_k108 <- paste(sample_id_series, "Set2-K108", sep="-")
sample_id_set2_k108 <- paste(sample_id_name, sample_id_series_set2_k108, sep="_")
read_counts_file_set2_k108 <- paste(sample_id_set2_k108, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2_k108 <- file.path(counts_dir, read_counts_file_set2_k108)
exprt_design_file_set2_k108 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2_k108, "txt", sep=".")
exprt_design_file_set2_k108 <- file.path(counts_dir, exprt_design_file_set2_k108)
if(file.exists(read_counts_file_set1) && file.exists(exprt_design_file_set1) && file.exists(read_counts_file_set2_k107) && file.exists(exprt_design_file_set2_k107) && file.exists(read_counts_file_set2_k108) && file.exists(exprt_design_file_set2_k108))
{
read_counts_name <- "Read-Counts"
exprt_design_name <- "Experiment-Design"
sample_type <- "LINCS-PC"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
# Extract dataset 1.
read_counts_file_set1_new <- convertReadCountsFileName(read_counts_file_set1, read_counts_name)
file.copy(read_counts_file_set1, read_counts_file_set1_new)
read_counts_datasets_20151120_set1_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set1_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set1, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set1_new)
# Extract dataset 2-K107.
read_counts_file_set2_k107_new <- convertReadCountsFileName(read_counts_file_set2_k107, read_counts_name)
file.copy(read_counts_file_set2_k107, read_counts_file_set2_k107_new)
read_counts_datasets_20151120_set2_k107_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_k107_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2_k107, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_k107_new)
# Extract dataset 2 K108.
read_counts_file_set2_k108_new <- convertReadCountsFileName(read_counts_file_set2_k108, read_counts_name)
file.copy(read_counts_file_set2_k108, read_counts_file_set2_k108_new)
read_counts_datasets_20151120_set2_k108_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_k108_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2_k108, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_k108_new)
# Merge dataset 1, 2-K107 and 2-K108 to a single dataset.
read_counts_datasets_20151120_lincs <- NULL
read_counts_datasets_20151120_lincs$exprt_design <- read_counts_datasets_20151120_set1_lincs$exprt_design
read_counts_datasets_20151120_lincs$read_counts <- read_counts_datasets_20151120_set1_lincs$read_counts + read_counts_datasets_20151120_set2_k107_lincs$read_counts + read_counts_datasets_20151120_set2_k108_lincs$read_counts
read_counts_file <- paste(paste(sample_id_name,read_counts_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,exprt_design_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
write.table(read_counts_datasets_20151120_lincs$read_counts, file=read_counts_file, quote=FALSE, sep="\t", na="")
write.table(read_counts_datasets_20151120_lincs$exprt_design, file=exprt_design_file, quote=FALSE, sep="\t", na="", row.names=FALSE)
cat("Done\n")
}
# Conventional dataset (20151223).
sample_id_name <- "RNAseq"
sample_id_series <- "20151223"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "ReadCounts", "tsv", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20151223_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, exprt_design_file=exprt_design_file, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
|
/Comparison/src/Extract-Gene-Expression-Samples.R
|
no_license
|
g4x86/mRNA-Seq-Pipeline
|
R
| false | false | 19,572 |
r
|
#!/usr/bin/env Rscript
# This script extracts specified samples from read counts dataset and
# experiment design table.
###################### Define internally used functions ######################
# Determine script invocation method.
getInvokeMethod <- function()
{
# Get all command arguments including R system arguments.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
n_prog_cmd <- length(prog_cmd)
# Get the availability of "--args" option.
args_opt_avail <- "--args" %in% cmd_args
# Determine invocation method based on n_prog_cmd and args_opt_avail.
invoke_method <- NULL
if(n_prog_cmd == 0) invoke_method <- "R"
else if(n_prog_cmd == 1) invoke_method <- "Rscript"
else invoke_method <- "unknown"
# Return invocation method.
return(invoke_method)
}
# Check the command line of script invocation by Rscript.
checkInvokeCommandLine <- function(invoke_method)
{
# Initialize checking status.
cmd_status <- TRUE
# Only check command line for Rscript invocation.
if(invoke_method == "Rscript")
{
# Get all command arguments including R system arguments.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
prog_name <- basename(prog_cmd)
# Get the command line of script invocation by Rscript.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) < 1)
{
cat(paste("Usage:", prog_cmd, "[Counts Directory] [Function Directory]\n"))
cat("\n")
cat("Arguments:\n\n")
cat("\t[Counts Directory] is a directory containing read counts and experiment design.\n\n")
cat("\t[Function Directory] is a directory with all rquired R functions (Optional).\n\n")
cmd_status <- FALSE
}
}
else
{
warning("Script command line is only available for Rscript invocation!")
# Comment out the following line to allow running this script in an interactive R console.
#cmd_status <- FALSE
}
# Return checking status.
return(cmd_status)
}
# Obtain the path of read counts directory.
getCountsDir <- function(invoke_method="Rscript", default_dir=getwd())
{
# invoke_method: "Rscript" or "R".
counts_dir <- NULL
# Determine the function/config path.
if(invoke_method == "Rscript")
{
# For the invocation by Rscript at system terminal.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) > 0) counts_dir <- cmd_args[1]
else
{
warning("No counts directory is provided!")
counts_dir <- default_dir
}
if(is.null(counts_dir)) warning("counts_dir is NULL when invoke_method is \"Rscript\"!")
}
else if(invoke_method == "R")
{
# For the invocation by R command or "source" at R terminal.
counts_dir <- default_dir
if(is.null(counts_dir)) warning("counts_dir is NULL when invoke_method is \"R\"!")
}
else warning("invoke_method must be either \"R\" or \"Rscript\"!")
# Return the function/config path.
return(counts_dir)
}
# Obtain the path of R function scripts.
getFuncDir <- function(invoke_method="Rscript", default_dir=getwd())
{
# invoke_method: "Rscript" or "R".
func_dir <- NULL
# Determine the function/config path.
if(invoke_method == "Rscript")
{
# For the invocation by Rscript at system terminal.
# First of all, check the command line for user-specified function path.
cmd_args <- commandArgs(TRUE)
if(length(cmd_args) > 1) func_dir <- cmd_args[2]
else
{
# Secondly, retrieve function path from the path of this main program.
cmd_args <- commandArgs(FALSE)
# Get the value of "--file" option.
matched_pattern <- regexpr("(?<=^--file=).+", cmd_args, perl=TRUE)
prog_cmd <- regmatches(cmd_args, matched_pattern)
func_dir <- dirname(prog_cmd)
}
if(is.null(func_dir))
{
# Thirdly, use the default value for function path.
warning("No argument is assigned to func_dir when invoke_method is \"Rscript\"!")
func_dir <- default_dir
}
}
else if(invoke_method == "R")
{
# For the invocation by R command or "source" at R terminal.
func_dir <- default_dir
if(is.null(func_dir)) warning("func_dir is NULL when invoke_method is \"R\"!")
}
else warning("invoke_method must be either \"R\" or \"Rscript\"!")
# Return the function/config path.
return(func_dir)
}
# Set default directories for differential comparison analysis.
setDefaultDEMDirs <- function(data_set="default")
{
# Initialize default directories.
user_home <- Sys.getenv("HOME")
if(data_set == "default")
{
default_func_dir <- file.path(user_home, "LINCS/DEG/Programs")
default_counts_dir <- file.path(user_home, "LINCS/DEG/Repo/Counts")
}
else
{
default_func_dir <- NULL
default_counts_dir <- NULL
}
# Copy all variables from current function's environment to its parent's environment.
for(obj in ls()) assign(obj, get(obj,environment()), parent.env(environment()))
}
################### Set up basic computational environment ###################
# Initialize default directories.
# Set default directories for differential comparison analysis.
setDefaultDEMDirs()
# Determine script invocation method.
invoke_method <- getInvokeMethod()
if(invoke_method == "unknown") stop("Script must be invoked by either R or Rscript!")
# Obtain invoked R script command.
stopifnot(checkInvokeCommandLine(invoke_method))
# Determine directory path of read counts data.
counts_dir <- getCountsDir(invoke_method=invoke_method, default_dir=default_counts_dir)
stopifnot(!is.null(counts_dir) && length(counts_dir)==1 && is.character(counts_dir))
if(!dir.exists(counts_dir)) stop(paste0("Counts directory \"", counts_dir, "\" doesn't exist!"))
counts_dir <- normalizePath(counts_dir)
# Determine directory path of all required R function scripts.
func_dir <- getFuncDir(invoke_method=invoke_method, default_dir=default_func_dir)
stopifnot(!is.null(func_dir) && length(func_dir)==1 && is.character(func_dir))
if(!dir.exists(func_dir)) stop(paste0("Function directory \"", func_dir, "\" doesn't exist!"))
func_dir <- normalizePath(func_dir)
# Load the function for extracting read counts and experiment design.
library(tools)
source(file.path(func_dir, "convertReadCountsFileName.R"), local=TRUE)
source(file.path(func_dir, "extractGeneExpSamples.R"), local=TRUE)
# Extract specified samples from read counts dataset and experiment design table.
write_file <- TRUE
ext_name <- "tsv"
# Plate-1 dataset (20150409).
sample_id_name <- "RNAseq"
sample_id_series <- "20150409"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "unq.refseq.umi", "dat", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20150409_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, sample_name_filter="T96s4_", exprt_design_file=exprt_design_file, sample_names=c(paste("H",6:8,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
# Plate-2 dataset (20150503).
sample_id_name <- "RNAseq"
sample_id_series <- "20150503"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "unq.refseq.umi", "dat", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20150503_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, sample_name_filter="T96s2_", exprt_design_file=exprt_design_file, sample_names=c(paste("H",7:12,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
# Plate-3 dataset (20150712).
sample_id_name <- "RNAseq"
sample_id_series <- "20150712"
sample_id_series_set2 <- paste(sample_id_series, "Set2", sep="-")
sample_id_set2 <- paste(sample_id_name, sample_id_series_set2, sep="_")
read_counts_file_set2 <- paste(sample_id_set2, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2 <- file.path(counts_dir, read_counts_file_set2)
exprt_design_file_set2 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2, "txt", sep=".")
exprt_design_file_set2 <- file.path(counts_dir, exprt_design_file_set2)
sample_id_series_set3 <- paste(sample_id_series, "Set3", sep="-")
sample_id_set3 <- paste(sample_id_name, sample_id_series_set3, sep="_")
read_counts_file_set3 <- paste(sample_id_set3, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set3 <- file.path(counts_dir, read_counts_file_set3)
exprt_design_file_set3 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set3, "txt", sep=".")
exprt_design_file_set3 <- file.path(counts_dir, exprt_design_file_set3)
if(file.exists(read_counts_file_set2) && file.exists(exprt_design_file_set2) && file.exists(read_counts_file_set3) && file.exists(exprt_design_file_set3))
{
read_counts_name <- "Read-Counts"
exprt_design_name <- "Experiment-Design"
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
# Extract dataset 2.
read_counts_file_set2_new <- convertReadCountsFileName(read_counts_file_set2, read_counts_name)
file.copy(read_counts_file_set2, read_counts_file_set2_new)
read_counts_datasets_20150712_set2_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2, sample_names=c(paste("P",19:24,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_new)
# Extract dataset 3.
read_counts_file_set3_new <- convertReadCountsFileName(read_counts_file_set3, read_counts_name)
file.copy(read_counts_file_set3, read_counts_file_set3_new)
read_counts_datasets_20150712_set3_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set3_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set3, sample_names=c(paste("P",19:24,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set3_new)
# Merge dataset 2 and 3 to a single dataset.
read_counts_datasets_20150712_lincs <- NULL
read_counts_datasets_20150712_lincs$exprt_design <- read_counts_datasets_20150712_set2_lincs$exprt_design
read_counts_datasets_20150712_lincs$read_counts <- read_counts_datasets_20150712_set2_lincs$read_counts + read_counts_datasets_20150712_set3_lincs$read_counts
read_counts_file <- paste(paste(sample_id_name,read_counts_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,exprt_design_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
write.table(read_counts_datasets_20150712_lincs$read_counts, file=read_counts_file, quote=FALSE, sep="\t", na="")
write.table(read_counts_datasets_20150712_lincs$exprt_design, file=exprt_design_file, quote=FALSE, sep="\t", na="", row.names=FALSE)
cat("Done\n")
}
# Plate-4 datasets (20151120).
sample_id_name <- "RNAseq"
sample_id_series <- "20151120"
sample_id_series_set1 <- paste(sample_id_series, "Set1", sep="-")
sample_id_set1 <- paste(sample_id_name, sample_id_series_set1, sep="_")
read_counts_file_set1 <- paste(sample_id_set1, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set1 <- file.path(counts_dir, read_counts_file_set1)
exprt_design_file_set1 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set1, "txt", sep=".")
exprt_design_file_set1 <- file.path(counts_dir, exprt_design_file_set1)
sample_id_series_set2_k107 <- paste(sample_id_series, "Set2-K107", sep="-")
sample_id_set2_k107 <- paste(sample_id_name, sample_id_series_set2_k107, sep="_")
read_counts_file_set2_k107 <- paste(sample_id_set2_k107, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2_k107 <- file.path(counts_dir, read_counts_file_set2_k107)
exprt_design_file_set2_k107 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2_k107, "txt", sep=".")
exprt_design_file_set2_k107 <- file.path(counts_dir, exprt_design_file_set2_k107)
sample_id_series_set2_k108 <- paste(sample_id_series, "Set2-K108", sep="-")
sample_id_set2_k108 <- paste(sample_id_name, sample_id_series_set2_k108, sep="_")
read_counts_file_set2_k108 <- paste(sample_id_set2_k108, "unq.refseq.umi", "dat", sep=".")
read_counts_file_set2_k108 <- file.path(counts_dir, read_counts_file_set2_k108)
exprt_design_file_set2_k108 <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series_set2_k108, "txt", sep=".")
exprt_design_file_set2_k108 <- file.path(counts_dir, exprt_design_file_set2_k108)
if(file.exists(read_counts_file_set1) && file.exists(exprt_design_file_set1) && file.exists(read_counts_file_set2_k107) && file.exists(exprt_design_file_set2_k107) && file.exists(read_counts_file_set2_k108) && file.exists(exprt_design_file_set2_k108))
{
read_counts_name <- "Read-Counts"
exprt_design_name <- "Experiment-Design"
sample_type <- "LINCS-PC"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
# Extract dataset 1.
read_counts_file_set1_new <- convertReadCountsFileName(read_counts_file_set1, read_counts_name)
file.copy(read_counts_file_set1, read_counts_file_set1_new)
read_counts_datasets_20151120_set1_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set1_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set1, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set1_new)
# Extract dataset 2-K107.
read_counts_file_set2_k107_new <- convertReadCountsFileName(read_counts_file_set2_k107, read_counts_name)
file.copy(read_counts_file_set2_k107, read_counts_file_set2_k107_new)
read_counts_datasets_20151120_set2_k107_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_k107_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2_k107, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_k107_new)
# Extract dataset 2 K108.
read_counts_file_set2_k108_new <- convertReadCountsFileName(read_counts_file_set2_k108, read_counts_name)
file.copy(read_counts_file_set2_k108, read_counts_file_set2_k108_new)
read_counts_datasets_20151120_set2_k108_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_set2_k108_new, mol_name_col=1, sample_name_filter="T384s1_", exprt_design_file=exprt_design_file_set2_k108, sample_names=c("A1","A24","P1","P24","F9",paste("P",9:23,sep="")), sample_keep=FALSE, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_set2_k108_new)
# Merge dataset 1, 2-K107 and 2-K108 to a single dataset.
read_counts_datasets_20151120_lincs <- NULL
read_counts_datasets_20151120_lincs$exprt_design <- read_counts_datasets_20151120_set1_lincs$exprt_design
read_counts_datasets_20151120_lincs$read_counts <- read_counts_datasets_20151120_set1_lincs$read_counts + read_counts_datasets_20151120_set2_k107_lincs$read_counts + read_counts_datasets_20151120_set2_k108_lincs$read_counts
read_counts_file <- paste(paste(sample_id_name,read_counts_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,exprt_design_name,sep="-"), sample_type, sample_id_series, ext_name, sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
write.table(read_counts_datasets_20151120_lincs$read_counts, file=read_counts_file, quote=FALSE, sep="\t", na="")
write.table(read_counts_datasets_20151120_lincs$exprt_design, file=exprt_design_file, quote=FALSE, sep="\t", na="", row.names=FALSE)
cat("Done\n")
}
# Conventional dataset (20151223).
sample_id_name <- "RNAseq"
sample_id_series <- "20151223"
sample_id <- paste(sample_id_name, sample_id_series, sep="_")
read_counts_file <- paste(sample_id, "ReadCounts", "tsv", sep=".")
read_counts_file <- file.path(counts_dir, read_counts_file)
exprt_design_file <- paste(paste(sample_id_name,"Experiment-Design",sep="-"), sample_id_series, "txt", sep=".")
exprt_design_file <- file.path(counts_dir, exprt_design_file)
if(file.exists(read_counts_file) && file.exists(exprt_design_file))
{
sample_type <- "LINCS"
cat("Extracting", paste(sample_id_name,sample_id_series,sample_type,sep="-"), "samples...\n")
read_counts_name <- "Read-Counts"
read_counts_file_new <- convertReadCountsFileName(read_counts_file, read_counts_name)
file.copy(read_counts_file, read_counts_file_new)
read_counts_datasets_20151223_lincs <- extractGeneExpSamples(read_counts_file=read_counts_file_new, mol_name_col=1, exprt_design_file=exprt_design_file, write_file=write_file, extra_name=sample_type, ext_name=ext_name, swap_series_type=TRUE, func_dir=func_dir)
file.remove(read_counts_file_new)
cat("Done\n")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harss_settings.R
\name{harss_settings}
\alias{harss_settings}
\title{Generate a list of settings for automated factor slice sampling}
\usage{
harss_settings(n_harss_updates = 1, bracket_update_interval = 100,
bracket_limits = c(0, Inf))
}
\arguments{
\item{n_harss_updates}{number of hit-and-run updates.}
\item{bracket_update_interval}{how often the bracket update interval should be updated}
\item{bracket_limits}{limits for the slice bracket width, defaults to (0,Inf).}
}
\value{
list with additional settings for hit and run slice sampling
}
\description{
Generate a list of settings for automated factor slice sampling
}
|
/man/harss_settings.Rd
|
no_license
|
mschauer/stemr
|
R
| false | true | 709 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/harss_settings.R
\name{harss_settings}
\alias{harss_settings}
\title{Generate a list of settings for automated factor slice sampling}
\usage{
harss_settings(n_harss_updates = 1, bracket_update_interval = 100,
bracket_limits = c(0, Inf))
}
\arguments{
\item{n_harss_updates}{number of hit-and-run updates.}
\item{bracket_update_interval}{how often the bracket update interval should be updated}
\item{bracket_limits}{limits for the slice bracket width, defaults to (0,Inf).}
}
\value{
list with additional settings for hit and run slice sampling
}
\description{
Generate a list of settings for automated factor slice sampling
}
|
rankall <- function(outcome, num = "best") {
dataIn <- read.csv("outcome-of-care-measures.csv", stringsAsFactor = FALSE, na.strings = "Not Available")
if (outcome %in% c("pneumonia", "heart attack", "heart failure") == FALSE) {
stop("invalid outcome")}
if (outcome == "heart attack") indx <- 11
if (outcome == "heart failure") indx <- 17
if (outcome == "pneumonia") indx <- 23
hosp <- dataIn[, c(2,7,indx)]
hosp[,2] <- as.factor(hosp[,2])
states <- levels(hosp[,2])
hosp <- hosp[complete.cases(hosp),]
output <- data.frame()
for (i in states) {
sdata <- hosp[hosp$State == i, ]
orsdata <- sdata[order(sdata[,3],sdata[,1]),]
if (num == "best") {nu <- 1}
else if (num == "worst") {nu <- nrow(sdata)}
else {nu <- num}
output <- rbind(output, orsdata[nu, c(1,2)])
}
colnames(output) <- c("hospital", "state")
rownames(output) <- states
return(output)
}
|
/R_programming/rankall.R
|
no_license
|
JagusTin23/data-science-specialization
|
R
| false | false | 999 |
r
|
rankall <- function(outcome, num = "best") {
dataIn <- read.csv("outcome-of-care-measures.csv", stringsAsFactor = FALSE, na.strings = "Not Available")
if (outcome %in% c("pneumonia", "heart attack", "heart failure") == FALSE) {
stop("invalid outcome")}
if (outcome == "heart attack") indx <- 11
if (outcome == "heart failure") indx <- 17
if (outcome == "pneumonia") indx <- 23
hosp <- dataIn[, c(2,7,indx)]
hosp[,2] <- as.factor(hosp[,2])
states <- levels(hosp[,2])
hosp <- hosp[complete.cases(hosp),]
output <- data.frame()
for (i in states) {
sdata <- hosp[hosp$State == i, ]
orsdata <- sdata[order(sdata[,3],sdata[,1]),]
if (num == "best") {nu <- 1}
else if (num == "worst") {nu <- nrow(sdata)}
else {nu <- num}
output <- rbind(output, orsdata[nu, c(1,2)])
}
colnames(output) <- c("hospital", "state")
rownames(output) <- states
return(output)
}
|
# load data
data(tu)
# attach it
attach(tu)
# create the model
sabre.model.1<-sabre(TU~YEAR+AGE+EVNO+SUPR+HRS+factor(NOEM)+SEX1+PROM+factor(SC80)-1,case=CASE)
# examine the results
sabre.model.1
|
/demo/tu.R
|
no_license
|
cran/sabreR
|
R
| false | false | 195 |
r
|
# load data
data(tu)
# attach it
attach(tu)
# create the model
sabre.model.1<-sabre(TU~YEAR+AGE+EVNO+SUPR+HRS+factor(NOEM)+SEX1+PROM+factor(SC80)-1,case=CASE)
# examine the results
sabre.model.1
|
source("setup_data.R")
### Configure graphic device
png(filename = "plot3.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, type = c("cairo", "cairo-png", "Xlib", "quartz"))
par(mar=c(4.1, 5.1, 4.1, 4.1))
### Draw graphs
with(consumptions, {
plot(
DateTime,
Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = ""
)
lines(
DateTime,
Sub_metering_2,
#type = "l",
col = "red"
)
lines(
DateTime,
Sub_metering_3,
#type = "l",
col = "blue"
)
})
legend(
"topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = 1,
lwd = 2,
cex=0.9
)
### Flush PNG device to disk
dev.off()
|
/plot3.R
|
no_license
|
jimleroyer/ExData_Plotting1
|
R
| false | false | 776 |
r
|
source("setup_data.R")
### Configure graphic device
png(filename = "plot3.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, type = c("cairo", "cairo-png", "Xlib", "quartz"))
par(mar=c(4.1, 5.1, 4.1, 4.1))
### Draw graphs
with(consumptions, {
plot(
DateTime,
Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = ""
)
lines(
DateTime,
Sub_metering_2,
#type = "l",
col = "red"
)
lines(
DateTime,
Sub_metering_3,
#type = "l",
col = "blue"
)
})
legend(
"topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"),
lty = 1,
lwd = 2,
cex=0.9
)
### Flush PNG device to disk
dev.off()
|
### Simulating microsatellite data with forward time simulations:
#setwd("/Users/christineewers/Desktop/EffSize_package/EffSize_hackathon")
library(rmetasim)
library(adegenet)
library(pegas)
#?source("rmetasim2adegenet.R")
ne <- function(t, mu) {
x <- t / (4*mu)
return(x)
}
exampleS <- matrix(c(0.5, 0, 0.5, 0.3), byrow=T, nrow = 2)
exampleR <- matrix(c(0, 1.1, 0, 0), byrow=T, nrow = 2)
exampleM <- matrix(c(0, 0, 0, 1), byrow=T, nrow = 2)
landscapeS <- matrix(0,2,2)
landscapeR <- landscapeS
landscapeM <- landscapeS
mu <- 0.01
dummy <- landscape.new.empty()
dummy <- landscape.new.intparam(dummy,s=2,cg=0,ce=0,totgen=1000)
dummy <- landscape.new.floatparam(dummy, s=0)
dummy <- landscape.new.switchparam(dummy, mp=0)
dummy <- landscape.new.local.demo(dummy, S=exampleS, R=exampleR, M=exampleM)
dummy <- landscape.new.epoch(dummy,S=landscapeS,R=landscapeR,M=landscapeM,extinct=c(0),carry=(1000)) #carry limits final popsize, the larger carry, the more alleles
dummy <- landscape.new.locus(dummy,type=1,ploidy=2,transmission=0,mutationrate=mu,numalleles=1) # the number of alleles will not increase if mutationrate=0
dummy <- landscape.new.locus(dummy,type=1,ploidy=2,transmission=0,mutationrate=mu,numalleles=1) # the number of alleles will not increase if mutationrate=0
dummy <- landscape.new.individuals(dummy,PopulationSizes=c(50,50)) # need a popsize for each stage in each population, two stages here.
dummy.gen0 <- landscape.simulate(dummy, 500) #simulate 500 time-clicks -> equilibrium? if I simulate 1000 generations, there are no differences in allele freq -> why?
dummy.gen1 <- landscape.simulate(dummy.gen0, 1) # advance to the next generation (+1)
dummy.gen2 <- landscape.simulate(dummy.gen1, 1) # advance to the next generation (+1)
#make the output into an adegenet object
#?g0 <- landscape.make.genind(dummy.gen0)
#?g1 <- landscape.make.genind(dummy.gen1)
#?g2 <- landscape.make.genind(dummy.gen2)
#?g0@pop.names <- "0"
#?g1@pop.names <- "1"
#?g2@pop.names <- "2"
#?g <- repool(g0, g1, g2)
#?g@loc.n.all # 2 loci: 6 and 5 alleles
# subsample 50 individuals
#?g0S <- g0[sample(1:999, size=50, replace=F)]
#?g1S <- g1[sample(1:999, size=50, replace=F)]
#?g2S <- g2[sample(1:999, size=50, replace=F)]
#?simG3 <-repool(g0S, g1S, g2S)
#?simG <- g0S
|
/R/simulate_genind_obj.R
|
no_license
|
georgeshirreff/multiNe
|
R
| false | false | 2,280 |
r
|
### Simulating microsatellite data with forward time simulations:
#setwd("/Users/christineewers/Desktop/EffSize_package/EffSize_hackathon")
library(rmetasim)
library(adegenet)
library(pegas)
#?source("rmetasim2adegenet.R")
ne <- function(t, mu) {
x <- t / (4*mu)
return(x)
}
exampleS <- matrix(c(0.5, 0, 0.5, 0.3), byrow=T, nrow = 2)
exampleR <- matrix(c(0, 1.1, 0, 0), byrow=T, nrow = 2)
exampleM <- matrix(c(0, 0, 0, 1), byrow=T, nrow = 2)
landscapeS <- matrix(0,2,2)
landscapeR <- landscapeS
landscapeM <- landscapeS
mu <- 0.01
dummy <- landscape.new.empty()
dummy <- landscape.new.intparam(dummy,s=2,cg=0,ce=0,totgen=1000)
dummy <- landscape.new.floatparam(dummy, s=0)
dummy <- landscape.new.switchparam(dummy, mp=0)
dummy <- landscape.new.local.demo(dummy, S=exampleS, R=exampleR, M=exampleM)
dummy <- landscape.new.epoch(dummy,S=landscapeS,R=landscapeR,M=landscapeM,extinct=c(0),carry=(1000)) #carry limits final popsize, the larger carry, the more alleles
dummy <- landscape.new.locus(dummy,type=1,ploidy=2,transmission=0,mutationrate=mu,numalleles=1) # the number of alleles will not increase if mutationrate=0
dummy <- landscape.new.locus(dummy,type=1,ploidy=2,transmission=0,mutationrate=mu,numalleles=1) # the number of alleles will not increase if mutationrate=0
dummy <- landscape.new.individuals(dummy,PopulationSizes=c(50,50)) # need a popsize for each stage in each population, two stages here.
dummy.gen0 <- landscape.simulate(dummy, 500) #simulate 500 time-clicks -> equilibrium? if I simulate 1000 generations, there are no differences in allele freq -> why?
dummy.gen1 <- landscape.simulate(dummy.gen0, 1) # advance to the next generation (+1)
dummy.gen2 <- landscape.simulate(dummy.gen1, 1) # advance to the next generation (+1)
#make the output into an adegenet object
#?g0 <- landscape.make.genind(dummy.gen0)
#?g1 <- landscape.make.genind(dummy.gen1)
#?g2 <- landscape.make.genind(dummy.gen2)
#?g0@pop.names <- "0"
#?g1@pop.names <- "1"
#?g2@pop.names <- "2"
#?g <- repool(g0, g1, g2)
#?g@loc.n.all # 2 loci: 6 and 5 alleles
# subsample 50 individuals
#?g0S <- g0[sample(1:999, size=50, replace=F)]
#?g1S <- g1[sample(1:999, size=50, replace=F)]
#?g2S <- g2[sample(1:999, size=50, replace=F)]
#?simG3 <-repool(g0S, g1S, g2S)
#?simG <- g0S
|
# Getting the raster stacks together...
library(raster)
library(tidyverse)
library(future.apply)
clim.proj<- readRDS("~/Box/RES Data/CMIP5_SST/ProcessedSSTProjectionsWithRasters_ECW.rds")
clim.proj$Scenario<- ifelse(grepl("RCP85", clim.proj$Path), "RCP85", "RCP45")
# A very big dataset, try to reduce it
dates.keep<- seq(as.Date("1980-01-16"), as.Date("2060-01-15"), by = "day")
dates.keep2<- paste("X", gsub("-", ".", as.character(dates.keep)), sep = "")
raster_to_df<- function(raster.stack){
df.out<- raster::as.data.frame(raster.stack, xy = TRUE)
return(df.out)
}
clim.data<- clim.proj %>%
dplyr::select(., Scenario, Proj.SST) %>%
mutate(., "Proj.SST.DataFrame" = map(Proj.SST, raster_to_df)) %>%
dplyr::select(., Scenario, Proj.SST.DataFrame) %>%
unnest(cols = "Proj.SST.DataFrame") %>%
gather(., Year, SST, -Scenario, -x, -y) %>%
dplyr::filter(., Year %in% dates.keep2)
clim.data$Year<- gsub("X", "", clim.data$Year)
rm(clim.proj)
# Mean, rcp45 and rcp85
clim.summs<- clim.data %>%
separate("Year", into = c("Year", "Month", "Day")) %>%
group_by(Scenario, Year, Month, x, y) %>%
summarize("Mean" = mean(SST, na.rm = TRUE),
"Pct5th" = quantile(SST, probs = c(0.05), na.rm = TRUE, names = FALSE),
"Pct95th" = quantile(SST, probs = c(0.95), na.rm = TRUE, names = FALSE))
clim.summs<- clim.summs %>%
group_by(Scenario, Year, Month) %>%
nest()
df_to_rast<- function(df, stat) {
if(FALSE){
df<- clim.summs$data[[1]]
}
df.temp<- df %>%
dplyr::select(x, y, stat)
rast.temp<- rasterFromXYZ(df.temp)
return(rast.temp)
}
clim.summs<- clim.summs %>%
mutate(., "RasterStack.Mean" = map2(data, "Mean", df_to_rast),
"RasterStack.Pct05" = map2(data, "Pct5th", df_to_rast),
"RasterStack.Pct95" = map2(data, "Pct95th", df_to_rast))
# Okay, now save them....
scenarios<- c("RCP45", "RCP85")
for(i in seq_along(scenarios)){
scenario.use<- scenarios[i]
dat.use<- clim.summs %>%
dplyr::filter(., Scenario %in% scenario.use)
# Mean
mean.stack.out<- raster::stack(dat.use$RasterStack.Mean)
names(mean.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(mean.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_mu.grd", sep = ""), overwrite = TRUE)
# Pct5th
pct5th.stack.out<- raster::stack(dat.use$RasterStack.Pct05)
names(pct5th.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(pct5th.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_5th.grd", sep = ""), overwrite = TRUE)
# Pct95th
pct95th.stack.out<- raster::stack(dat.use$RasterStack.Pct95)
names(pct95th.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(pct95th.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_95th.grd", sep = ""), overwrite = TRUE)
}
fishSDM.prediction.df<- function(rcp85.mu.dir, rcp85.pct05.dir, rcp85.pct95.dir, rcp45.mu.dir, rcp45.pct05.dir, rcp45.pct95.dir, oisst.dir, sp.in, dates.baseline, dates.future, seasonal.mu, season, model.dat) {
library(tidyverse)
library(maptools)
library(raster)
library(rgeos)
library(geosphere)
library(zoo)
suppressWarnings(sapply(list.files(pattern = "[.]R$", path = "~/Dropbox/Andrew/Work/GMRI/AllRFunctions/", full.names = TRUE), source))
if(FALSE) {
proj.path = "~/Box/Mills Lab/Projects/ECW_FishClimate/"
rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = "")
rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = "")
rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = "")
rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = "")
rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = "")
rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = "")
oisst.dir<- paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = "")
sp.in<- "~/Box/RES Data/Shapefiles/"
dates.baseline = c("2014-10-16", "2015-10-16", "2016-10-16", "2017-10-16", "2018-10-16")
dates.future = c("2025-10-16", "2040-10-16", "2055-10-16", "2100-10-16")
seasonal.mu<- TRUE
season<- "Fall"
model.dat<- paste(proj.path, "Data/ECWmodel.dat.rds", sep = "")
plot<- TRUE
}
## Projections
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
##### Start
## Baseline SSTs
# Empty stack
pred.rast.stack<- stack()
# Add OISST
name.ind<- nlayers(pred.rast.stack)+1
stack0<- raster::stack(oisst.dir)
# Move to monthly?
oisst.min<- gsub("X", "", min(names(stack0)))
oisst.min.date<- as.Date(gsub("[.]", "-", oisst.min))
oisst.max<- gsub("X", "", max(names(stack0)))
oisst.max.date<- as.Date(gsub("[.]", "-", oisst.max))
# Calculate monthly mean temperature -- this would be compared to the sstclim data (monthly climate ensemble)
oisst.dates<- seq.Date(from = oisst.min.date, to = oisst.max.date, by = "day")
oisst.dat<- setZ(stack0, oisst.dates)
# Aggregate daily to monthly data
oisst.monthly <- zApply(oisst.dat, by = as.yearmon, mean)
#### Mean seasonal temperature
if(seasonal.mu) {
# Basline
#Baseline stack, store seasonal means and then average them all
sst.stack<- stack()
years<- c("2014", "2015", "2016", "2017", "2018")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = paste(c("Sep", "Oct", "Nov"), rep(years[i]), sep = "."),
"Spring" = paste(c("Mar", "Apr", "May"), rep(years[i]), sep = "."),
"Summer" = paste(c("Jun", "Jul", "Aug", "Sep"), rep(years[i]), sep = "."))
sst.temp<- calc(oisst.monthly[[which(names(oisst.monthly) %in% dates.use)]], mean)
sst.stack<- stack(sst.stack, sst.temp)
print(years[i])
}
names(sst.stack)<- paste(season, years, sep = ".")
sst.basemeans<- calc(sst.stack[[c(1,2,3,4,5)]], mean)
pred.rast.stack<- stack(pred.rast.stack, sst.basemeans)
names(pred.rast.stack)[c(1)]<- c("Baseline")
# Climate
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.mu.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.mu.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.mu.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.mu.stack<- stack(rcp85.mu.stack, clim.mu.temp)
print(years[i])
}
names(rcp85.mu.stack)<- paste(season, years, "rcp85.mu", sep = ".")
# Climate 5th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.pct05.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.pct05.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct05.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.pct05.stack<- stack(rcp85.pct05.stack, clim.pct05.temp)
print(years[i])
}
names(rcp85.pct05.stack)<- paste(season, years, "rcp85.pct05", sep = ".")
# Climate -- 95th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.pct95.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.pct95.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct95.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.pct95.stack<- stack(rcp85.pct95.stack, clim.pct95.temp)
print(years[i])
}
names(rcp85.pct95.stack)<- paste(season, years, "rcp85.pct95", sep = ".")
# RCP45
# Climate
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.mu.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.mu.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.mu.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.mu.stack<- stack(rcp45.mu.stack, clim.mu.temp)
print(years[i])
}
names(rcp45.mu.stack)<- paste(season, years, "rcp45.mu", sep = ".")
# Climate 5th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.pct05.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.pct05.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct05.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.pct05.stack<- stack(rcp45.pct05.stack, clim.pct05.temp)
print(years[i])
}
names(rcp45.pct05.stack)<- paste(season, years, "rcp45.pct05", sep = ".")
# Climate -- 95th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.pct95.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.pct95.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct95.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.pct95.stack<- stack(rcp45.pct95.stack, clim.pct95.temp)
print(years[i])
}
names(rcp45.pct95.stack)<- paste(season, years, "rcp45.pct95", sep = ".")
# Add it to the pred rast
pred.rast.stack<- stack(pred.rast.stack, rcp85.mu.stack, rcp85.pct05.stack, rcp85.pct95.stack, rcp45.mu.stack, rcp45.pct05.stack, rcp45.pct95.stack)
# Other predictors
# Add depth
neshelf.bathy<- raster(paste(sp.in, "NEShelf_Etopo1_bathy.tiff", sep = ""))
proj4string(neshelf.bathy)<- proj.wgs84
DEPTH<- resample(neshelf.bathy, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, DEPTH)
# Get these values out
pred.df<- raster::as.data.frame(pred.rast.stack, xy = T)
points.wgs84<- pred.df
coordinates(points.wgs84)<- ~x+y
proj4string(points.wgs84)<- proj.wgs84
return(pred.df)
} else {
for(i in 1:length(oisst.windows)) {
window<- round(oisst.windows[i]/30, 0)
dates.stack.temp<- stack()
for(k in 1:length(dates.baseline)) {
date.new<- unlist(strsplit(gsub("-", ".", format(as.Date(dates.baseline[k]), "%Y-%b")), "[.]"))
date2<- paste(date.new[2], date.new[1], sep = ".")
stack.start<- which(names(oisst.monthly) == date2)
stack.t<- oisst.monthly[[(stack.start-(window-1)):stack.start]]
datemu.t<- calc(stack.t, mean)
dates.stack.temp<- stack(dates.stack.temp, datemu.t)
}
mu.t<- calc(dates.stack.temp, mean)
pred.rast.stack<- stack(pred.rast.stack, mu.t)
names(pred.rast.stack)[name.ind]<- paste("d", oisst.windows[i], "MU.OISST", sep = "")
name.ind<- name.ind+1
print(paste(window, " is done", sep = ""))
}
## Climate projections SSTs
name.ind<- nlayers(pred.rast.stack)+1
stack0<- stack(climate.dir)
pred.rast.stack<- raster::resample(pred.rast.stack, stack0[[1]])
for(i in 1:length(dates.future)) {
year<- format(as.Date(dates.future[[i]]), "%Y")
stack.start<- which(gsub("[.]", "-", gsub("X", "", names(stack0))) == dates.future[i])
for(k in 1:length(climate.windows)) {
window<- round(climate.windows[k]/30, 0)
stack.t<- stack0[[(stack.start-(window-1)):stack.start]]
datemu.t<- calc(stack.t, mean)
pred.rast.stack<- stack(pred.rast.stack, datemu.t)
names(pred.rast.stack)[name.ind]<- paste(year, ".", window, "MO.Clim", sep = "")
name.ind<- name.ind+1
print(paste(dates.future[i], climate.windows[k], " is done", sep = " "))
}
}
# Add depth
neshelf.bathy<- raster(paste(sp.in, "NEShelf_etopo1_bathy_reclass.tif", sep = ""))
proj4string(neshelf.bathy)<- proj.wgs84
depth.temp<- projectRaster(neshelf.bathy, crs = proj.utm)
DEPTH<- resample(depth.temp, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, DEPTH)
# Add TRI
TRI.temp<- terrain(DEPTH, opt = "TRI")
TRI<- resample(TRI.temp, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, TRI)
# Get these variables
#Mask out points outside of NELME
nelme.rast<- pred.rast.stack[[1]]
nelme.rast[]<- NA
nelme<- readShapePoly(paste("~/Dropbox/Andrew/Work/GMRI/AllGIS/nelme.shp", sep = ""))
proj4string(nelme)<- proj.wgs84
nelme.utm<- spTransform(nelme, proj.utm)
nelme.buff<- gBuffer(nelme.utm, width = 40000)
nelme.rast<- rasterize(nelme.buff, nelme.rast)
pred.rast.stack.m<- mask(pred.rast.stack, mask = nelme.rast, inverse = FALSE)
# Species specific biomass
# Load it
temp.space <- new.env()
temp.df <- load(model.dat, temp.space)
dat.all <- get(temp.df, temp.space)
rm(temp.space)
# Add in new along/cross shelf position
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
pts<- data.frame("x" = dat.all$DECDEG_BEGLON, "y" = dat.all$DECDEG_BEGLAT)
coordinates(pts)<- ~x+y
proj4string(pts)<- proj.utm
pts.sp<- data.frame(spTransform(pts, proj.wgs84))
dat.all$SHELF_POS<- distCosine(pts.sp, cbind(-75, 35), r=6378137)/1000
# Filter
species.all<- read.csv("~/Dropbox/Andrew/Work/GMRI/AllData/Assesmentfishspecies.csv")
dat<- filter(dat.all, COMNAME %in% species.all$COMNAME)
# Formatting datasets for this specific modeling strucutre
dat$BIOMASS.ADJ<- ifelse(dat$bio.abund.flag == "FLAG1", 1, dat$BIOMASS) # Abundance, but no biomass recorded. Set these records to biomass = 1.
dat$BIOMASS.ADJ<- ifelse(is.na(dat$bio.abund.flag), 0, dat$BIOMASS.ADJ) # True absences, change NA to 0.
# Create BIOMASS.MOD for modeling log biomass
dat<- dat %>%
mutate(.,
"BIOMASS.LOG" = log(BIOMASS.ADJ),
"BIOMASS.MOD" = ifelse(BIOMASS.LOG == -Inf | BIOMASS.LOG <=0, NA, BIOMASS.LOG),
"RANDOM" = rnorm(nrow(.)))
# Create Mean Biomass
load("~/Dropbox/Andrew/Work/GMRI/AllData/stratum.area.Rdata")
dat<- left_join(dat, stratum.area, by = "STRATUM")
# Weighted mean biomass
# Get STRATUM code and STRATUM_AREA and only unique observations for each combo
t1<- dplyr::select(dat, STRATUM, STRATUM_AREA)
t2<- t1[!duplicated(t1["STRATUM"]),]
totstrwt<-sum(t2$STRATUM_AREA, na.rm = TRUE)
# Get STRATUM proportion area relative to total area surveyed
ratio.dat<- data.frame("STRATUM" = t2$STRATUM, "STRATUM.RATIO" = t2$STRATUM_AREA/totstrwt)
# Get a count of the unique number of tows per stratum per year
tows_unique<- dat[!duplicated(dat["ID"]),]
numtows<-aggregate(ID ~ EST_YEAR + STRATUM, length, data= tows_unique)
colnames(numtows)<-c('EST_YEAR','STRATUM','COUNT')
dat<- left_join(dat, numtows, by = c("EST_YEAR", "STRATUM"))
dat<- left_join(dat, ratio.dat, by = "STRATUM")
# Calculate total species biomass per strata per year
strat.biomass<-aggregate(BIOMASS.ADJ ~ EST_YEAR + SVSPP + STRATUM + STRATUM.RATIO, sum, data = dat)
strat.biomass<- left_join(strat.biomass, numtows, by = c("EST_YEAR", "STRATUM"))
# Mean biomass per year/species/strata
strat.biomass$mean.biomass<- strat.biomass$BIOMASS.ADJ/strat.biomass$COUNT
# Area weighted mean biomass per year/species/strata
strat.biomass$BIOMASS.WMEAN<-strat.biomass$mean.biomass*strat.biomass$STRATUM.RATIO
# Mean Annual Biomass
annual.seasonal.wtmean<- strat.biomass %>%
group_by(., EST_YEAR, SEASON, SVSPP) %>%
summarise(., "BIOMASS.WMEAN" = sum(BIOMASS.WMEAN))
avg.seasonal.wtmean<- annual.seasonal.wtmean %>%
group_by(., SEASON, SVSPP) %>%
summarize(., "AVG.BIOMASS.WMEAN" = mean(BIOMASS.WMEAN, na.rm = T))
annual.seasonal.wtmean<- annual.seasonal.wtmean %>%
left_join(., avg.seasonal.wtmean, by = c("SEASON", "SVSPP")) %>%
mutate(., "BIOMASS.WMEAN.ANOM" = BIOMASS.WMEAN - AVG.BIOMASS.WMEAN)
# Add back to full data
dat<- left_join(dat, annual.wtmean, by = c("EST_YEAR", "SVSPP"))
# Get baseline observations
dat$TRAIN.TEST<- ifelse(as.Date(dat$DATE) >= "1986-01-01" & as.Date(dat$DATE) <= "2010-12-31", "TRAIN",
ifelse(as.Date(dat$DATE) >= "2011-01-01" & as.Date(dat$DATE) <= "2016-12-31", "TEST", "Neither"))
dat.test<- dat %>%
group_by(., COMNAME, TRAIN.TEST) %>%
dplyr::filter(., TRAIN.TEST == "TEST") %>%
nest(.key = "TEST.DATA") %>%
arrange(COMNAME)
# Rasters ready
stack.id<- nlayers(pred.rast.stack.m)
count.rast.temp<- pred.rast.stack.m[[1]]
count.rast.temp[]<- NA
names(count.rast.temp)<- "Count"
for(i in seq_along(as.character(unique(dat.test$COMNAME)))) {
temp<- dat.test$TEST.DATA[[i]]
temp.sp<- temp
coordinates(temp.sp)<- ~DECDEG_BEGLON+DECDEG_BEGLAT
proj4string(temp.sp)<- proj.utm
# Get average weighted mean biomass in each grid cell
temp$cell.id<- extract(count.rast.temp, temp.sp, cellnumbers=TRUE)[,1]
temp2<- temp %>%
group_by(cell.id) %>%
dplyr::summarise(., mean = mean(BIOMASS.ADJ))
# Save values in raster
out.rast<- count.rast.temp
out.rast[temp2$cell.id]<- temp2$mean
# Add em to the stack
pred.rast.stack.m<- stack(pred.rast.stack.m, out.rast)
names(pred.rast.stack.m)[nlayers(pred.rast.stack.m)]<- paste(dat.test$COMNAME[i], ".", "Biomass", sep = "")
}
# Now values
points<- data.frame(coordinates(pred.rast.stack.m[[1]]))
coordinates(points)<- ~x+y
proj4string(points)<- proj.utm
pred.df<- data.frame(raster::extract(pred.rast.stack.m, points))
names(pred.df)[21]<- "DEPTH"
names(pred.df)[22]<- "TRI"
# Add sediment
neshelf.sediment<- readShapePoly(paste(sp.in, "TNC_benthicsediment.shp", sep = ""))
proj4string(neshelf.sediment)<- proj.wgs84
neshelf.sediment.utm<- spTransform(neshelf.sediment, proj.utm)
neshelf.sediment.pts<- over(points, neshelf.sediment.utm)$GRPSED
pred.df$SED.TYPE<- as.factor(neshelf.sediment.pts)
points.wgs84<- spTransform(points, proj.wgs84)
pred.df$x<- coordinates(points.wgs84)[,1]
pred.df$y<- coordinates(points.wgs84)[,2]
return(pred.df)
}
}
fall.rast.pred<- fishSDM.prediction.df(rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = ""), rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = ""), rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = ""), rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = ""), rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = ""), rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = ""), oisst.dir = paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = ""), sp.in = "~/Box/RES Data/Shapefiles/", dates.baseline = c("2014-10-16", "2015-10-16", "2016-10-16", "2017-10-16", "2018-10-16"), dates.future = c("2025-10-16", "2040-10-16", "2055-10-16", "2100-10-16"), seasonal.mu = TRUE, season = "Fall", model.dat = paste(proj.path, "Data/ECWmodel.dat.rds", sep = ""))
fall.rast.pred<- pred.df
fall.rast.pred$SEASON<- "FALL"
saveRDS(fall.rast.pred, file = paste(proj.path, "Data/fall.rast.preds.rds", sep = ""))
spring.rast.pred<- fishSDM.prediction.df(rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = ""), rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = ""), rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = ""), rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = ""), rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = ""), rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = ""), oisst.dir = paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = ""), sp.in = "~/Box/RES Data/Shapefiles/", dates.baseline = c("2014-04-16", "2015-04-16", "2016-04-16", "2017-04-16", "2018-04-16"), dates.future = c("2025-04-16", "2040-04-16", "2055-04-16", "2100-04-16"), seasonal.mu = TRUE, season = "Spring", model.dat = paste(proj.path, "Data/ECWmodel.dat.rds", sep = ""))
spring.rast.pred$SEASON<- "SPRING"
saveRDS(spring.rast.pred, file = paste(proj.path, "Data/spring.rast.preds.rds", sep = ""))
|
/Code/SDM_PredandProjectionVariables.R
|
no_license
|
aallyn/ECW_FishClimate
|
R
| false | false | 24,494 |
r
|
# Getting the raster stacks together...
library(raster)
library(tidyverse)
library(future.apply)
clim.proj<- readRDS("~/Box/RES Data/CMIP5_SST/ProcessedSSTProjectionsWithRasters_ECW.rds")
clim.proj$Scenario<- ifelse(grepl("RCP85", clim.proj$Path), "RCP85", "RCP45")
# A very big dataset, try to reduce it
dates.keep<- seq(as.Date("1980-01-16"), as.Date("2060-01-15"), by = "day")
dates.keep2<- paste("X", gsub("-", ".", as.character(dates.keep)), sep = "")
raster_to_df<- function(raster.stack){
df.out<- raster::as.data.frame(raster.stack, xy = TRUE)
return(df.out)
}
clim.data<- clim.proj %>%
dplyr::select(., Scenario, Proj.SST) %>%
mutate(., "Proj.SST.DataFrame" = map(Proj.SST, raster_to_df)) %>%
dplyr::select(., Scenario, Proj.SST.DataFrame) %>%
unnest(cols = "Proj.SST.DataFrame") %>%
gather(., Year, SST, -Scenario, -x, -y) %>%
dplyr::filter(., Year %in% dates.keep2)
clim.data$Year<- gsub("X", "", clim.data$Year)
rm(clim.proj)
# Mean, rcp45 and rcp85
clim.summs<- clim.data %>%
separate("Year", into = c("Year", "Month", "Day")) %>%
group_by(Scenario, Year, Month, x, y) %>%
summarize("Mean" = mean(SST, na.rm = TRUE),
"Pct5th" = quantile(SST, probs = c(0.05), na.rm = TRUE, names = FALSE),
"Pct95th" = quantile(SST, probs = c(0.95), na.rm = TRUE, names = FALSE))
clim.summs<- clim.summs %>%
group_by(Scenario, Year, Month) %>%
nest()
df_to_rast<- function(df, stat) {
if(FALSE){
df<- clim.summs$data[[1]]
}
df.temp<- df %>%
dplyr::select(x, y, stat)
rast.temp<- rasterFromXYZ(df.temp)
return(rast.temp)
}
clim.summs<- clim.summs %>%
mutate(., "RasterStack.Mean" = map2(data, "Mean", df_to_rast),
"RasterStack.Pct05" = map2(data, "Pct5th", df_to_rast),
"RasterStack.Pct95" = map2(data, "Pct95th", df_to_rast))
# Okay, now save them....
scenarios<- c("RCP45", "RCP85")
for(i in seq_along(scenarios)){
scenario.use<- scenarios[i]
dat.use<- clim.summs %>%
dplyr::filter(., Scenario %in% scenario.use)
# Mean
mean.stack.out<- raster::stack(dat.use$RasterStack.Mean)
names(mean.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(mean.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_mu.grd", sep = ""), overwrite = TRUE)
# Pct5th
pct5th.stack.out<- raster::stack(dat.use$RasterStack.Pct05)
names(pct5th.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(pct5th.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_5th.grd", sep = ""), overwrite = TRUE)
# Pct95th
pct95th.stack.out<- raster::stack(dat.use$RasterStack.Pct95)
names(pct95th.stack.out)<- paste(dat.use$Year, dat.use$Month)
writeRaster(pct95th.stack.out, paste(res.data.path, "CMIP5_SST/ECW_", scenario.use, "_95th.grd", sep = ""), overwrite = TRUE)
}
fishSDM.prediction.df<- function(rcp85.mu.dir, rcp85.pct05.dir, rcp85.pct95.dir, rcp45.mu.dir, rcp45.pct05.dir, rcp45.pct95.dir, oisst.dir, sp.in, dates.baseline, dates.future, seasonal.mu, season, model.dat) {
library(tidyverse)
library(maptools)
library(raster)
library(rgeos)
library(geosphere)
library(zoo)
suppressWarnings(sapply(list.files(pattern = "[.]R$", path = "~/Dropbox/Andrew/Work/GMRI/AllRFunctions/", full.names = TRUE), source))
if(FALSE) {
proj.path = "~/Box/Mills Lab/Projects/ECW_FishClimate/"
rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = "")
rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = "")
rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = "")
rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = "")
rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = "")
rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = "")
oisst.dir<- paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = "")
sp.in<- "~/Box/RES Data/Shapefiles/"
dates.baseline = c("2014-10-16", "2015-10-16", "2016-10-16", "2017-10-16", "2018-10-16")
dates.future = c("2025-10-16", "2040-10-16", "2055-10-16", "2100-10-16")
seasonal.mu<- TRUE
season<- "Fall"
model.dat<- paste(proj.path, "Data/ECWmodel.dat.rds", sep = "")
plot<- TRUE
}
## Projections
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
##### Start
## Baseline SSTs
# Empty stack
pred.rast.stack<- stack()
# Add OISST
name.ind<- nlayers(pred.rast.stack)+1
stack0<- raster::stack(oisst.dir)
# Move to monthly?
oisst.min<- gsub("X", "", min(names(stack0)))
oisst.min.date<- as.Date(gsub("[.]", "-", oisst.min))
oisst.max<- gsub("X", "", max(names(stack0)))
oisst.max.date<- as.Date(gsub("[.]", "-", oisst.max))
# Calculate monthly mean temperature -- this would be compared to the sstclim data (monthly climate ensemble)
oisst.dates<- seq.Date(from = oisst.min.date, to = oisst.max.date, by = "day")
oisst.dat<- setZ(stack0, oisst.dates)
# Aggregate daily to monthly data
oisst.monthly <- zApply(oisst.dat, by = as.yearmon, mean)
#### Mean seasonal temperature
if(seasonal.mu) {
# Basline
#Baseline stack, store seasonal means and then average them all
sst.stack<- stack()
years<- c("2014", "2015", "2016", "2017", "2018")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = paste(c("Sep", "Oct", "Nov"), rep(years[i]), sep = "."),
"Spring" = paste(c("Mar", "Apr", "May"), rep(years[i]), sep = "."),
"Summer" = paste(c("Jun", "Jul", "Aug", "Sep"), rep(years[i]), sep = "."))
sst.temp<- calc(oisst.monthly[[which(names(oisst.monthly) %in% dates.use)]], mean)
sst.stack<- stack(sst.stack, sst.temp)
print(years[i])
}
names(sst.stack)<- paste(season, years, sep = ".")
sst.basemeans<- calc(sst.stack[[c(1,2,3,4,5)]], mean)
pred.rast.stack<- stack(pred.rast.stack, sst.basemeans)
names(pred.rast.stack)[c(1)]<- c("Baseline")
# Climate
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.mu.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.mu.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.mu.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.mu.stack<- stack(rcp85.mu.stack, clim.mu.temp)
print(years[i])
}
names(rcp85.mu.stack)<- paste(season, years, "rcp85.mu", sep = ".")
# Climate 5th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.pct05.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.pct05.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct05.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.pct05.stack<- stack(rcp85.pct05.stack, clim.pct05.temp)
print(years[i])
}
names(rcp85.pct05.stack)<- paste(season, years, "rcp85.pct05", sep = ".")
# Climate -- 95th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp85.pct95.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp85.pct95.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct95.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp85.pct95.stack<- stack(rcp85.pct95.stack, clim.pct95.temp)
print(years[i])
}
names(rcp85.pct95.stack)<- paste(season, years, "rcp85.pct95", sep = ".")
# RCP45
# Climate
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.mu.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.mu.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.mu.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.mu.stack<- stack(rcp45.mu.stack, clim.mu.temp)
print(years[i])
}
names(rcp45.mu.stack)<- paste(season, years, "rcp45.mu", sep = ".")
# Climate 5th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.pct05.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.pct05.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct05.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.pct05.stack<- stack(rcp45.pct05.stack, clim.pct05.temp)
print(years[i])
}
names(rcp45.pct05.stack)<- paste(season, years, "rcp45.pct05", sep = ".")
# Climate -- 95th
name.ind<- nlayers(pred.rast.stack)+1
stack.temp<- raster::stack(rcp45.pct95.dir)
crs(stack.temp)<- proj.wgs84
stack0<- resample(stack.temp, oisst.monthly[[1]])
clim.dates<- seq.Date(from = as.Date("1980-01-16"), to = as.Date("2060-02-15"), by = "month")
clim.stack<- setZ(stack0, clim.dates)
clim.stack.zind<- getZ(clim.stack)
#Baseline stack, store seasonal means and then average them all
rcp45.pct95.stack<- stack()
years<- c("2055")
for(i in seq_along(years)){
dates.use<- switch(season,
"Fall" = as.Date(paste(rep(years[i]), c("09-16", "10-16", "11-16"), sep = "-")),
"Spring" = as.Date(paste(rep(years[i]), c("03-16", "04-16", "05-16"), sep = "-")),
"Summer" = as.Date(paste(rep(years[i]), c("06-16", "07-16", "08-16", "09-16"), sep = "-")))
clim.pct95.temp<- calc(clim.stack[[which(clim.stack.zind %in% dates.use)]], mean)
rcp45.pct95.stack<- stack(rcp45.pct95.stack, clim.pct95.temp)
print(years[i])
}
names(rcp45.pct95.stack)<- paste(season, years, "rcp45.pct95", sep = ".")
# Add it to the pred rast
pred.rast.stack<- stack(pred.rast.stack, rcp85.mu.stack, rcp85.pct05.stack, rcp85.pct95.stack, rcp45.mu.stack, rcp45.pct05.stack, rcp45.pct95.stack)
# Other predictors
# Add depth
neshelf.bathy<- raster(paste(sp.in, "NEShelf_Etopo1_bathy.tiff", sep = ""))
proj4string(neshelf.bathy)<- proj.wgs84
DEPTH<- resample(neshelf.bathy, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, DEPTH)
# Get these values out
pred.df<- raster::as.data.frame(pred.rast.stack, xy = T)
points.wgs84<- pred.df
coordinates(points.wgs84)<- ~x+y
proj4string(points.wgs84)<- proj.wgs84
return(pred.df)
} else {
for(i in 1:length(oisst.windows)) {
window<- round(oisst.windows[i]/30, 0)
dates.stack.temp<- stack()
for(k in 1:length(dates.baseline)) {
date.new<- unlist(strsplit(gsub("-", ".", format(as.Date(dates.baseline[k]), "%Y-%b")), "[.]"))
date2<- paste(date.new[2], date.new[1], sep = ".")
stack.start<- which(names(oisst.monthly) == date2)
stack.t<- oisst.monthly[[(stack.start-(window-1)):stack.start]]
datemu.t<- calc(stack.t, mean)
dates.stack.temp<- stack(dates.stack.temp, datemu.t)
}
mu.t<- calc(dates.stack.temp, mean)
pred.rast.stack<- stack(pred.rast.stack, mu.t)
names(pred.rast.stack)[name.ind]<- paste("d", oisst.windows[i], "MU.OISST", sep = "")
name.ind<- name.ind+1
print(paste(window, " is done", sep = ""))
}
## Climate projections SSTs
name.ind<- nlayers(pred.rast.stack)+1
stack0<- stack(climate.dir)
pred.rast.stack<- raster::resample(pred.rast.stack, stack0[[1]])
for(i in 1:length(dates.future)) {
year<- format(as.Date(dates.future[[i]]), "%Y")
stack.start<- which(gsub("[.]", "-", gsub("X", "", names(stack0))) == dates.future[i])
for(k in 1:length(climate.windows)) {
window<- round(climate.windows[k]/30, 0)
stack.t<- stack0[[(stack.start-(window-1)):stack.start]]
datemu.t<- calc(stack.t, mean)
pred.rast.stack<- stack(pred.rast.stack, datemu.t)
names(pred.rast.stack)[name.ind]<- paste(year, ".", window, "MO.Clim", sep = "")
name.ind<- name.ind+1
print(paste(dates.future[i], climate.windows[k], " is done", sep = " "))
}
}
# Add depth
neshelf.bathy<- raster(paste(sp.in, "NEShelf_etopo1_bathy_reclass.tif", sep = ""))
proj4string(neshelf.bathy)<- proj.wgs84
depth.temp<- projectRaster(neshelf.bathy, crs = proj.utm)
DEPTH<- resample(depth.temp, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, DEPTH)
# Add TRI
TRI.temp<- terrain(DEPTH, opt = "TRI")
TRI<- resample(TRI.temp, pred.rast.stack[[1]])
pred.rast.stack<- stack(pred.rast.stack, TRI)
# Get these variables
#Mask out points outside of NELME
nelme.rast<- pred.rast.stack[[1]]
nelme.rast[]<- NA
nelme<- readShapePoly(paste("~/Dropbox/Andrew/Work/GMRI/AllGIS/nelme.shp", sep = ""))
proj4string(nelme)<- proj.wgs84
nelme.utm<- spTransform(nelme, proj.utm)
nelme.buff<- gBuffer(nelme.utm, width = 40000)
nelme.rast<- rasterize(nelme.buff, nelme.rast)
pred.rast.stack.m<- mask(pred.rast.stack, mask = nelme.rast, inverse = FALSE)
# Species specific biomass
# Load it
temp.space <- new.env()
temp.df <- load(model.dat, temp.space)
dat.all <- get(temp.df, temp.space)
rm(temp.space)
# Add in new along/cross shelf position
proj.wgs84<- CRS("+init=epsg:4326") #WGS84
proj.utm<- CRS("+init=epsg:2960") #UTM 19
pts<- data.frame("x" = dat.all$DECDEG_BEGLON, "y" = dat.all$DECDEG_BEGLAT)
coordinates(pts)<- ~x+y
proj4string(pts)<- proj.utm
pts.sp<- data.frame(spTransform(pts, proj.wgs84))
dat.all$SHELF_POS<- distCosine(pts.sp, cbind(-75, 35), r=6378137)/1000
# Filter
species.all<- read.csv("~/Dropbox/Andrew/Work/GMRI/AllData/Assesmentfishspecies.csv")
dat<- filter(dat.all, COMNAME %in% species.all$COMNAME)
# Formatting datasets for this specific modeling strucutre
dat$BIOMASS.ADJ<- ifelse(dat$bio.abund.flag == "FLAG1", 1, dat$BIOMASS) # Abundance, but no biomass recorded. Set these records to biomass = 1.
dat$BIOMASS.ADJ<- ifelse(is.na(dat$bio.abund.flag), 0, dat$BIOMASS.ADJ) # True absences, change NA to 0.
# Create BIOMASS.MOD for modeling log biomass
dat<- dat %>%
mutate(.,
"BIOMASS.LOG" = log(BIOMASS.ADJ),
"BIOMASS.MOD" = ifelse(BIOMASS.LOG == -Inf | BIOMASS.LOG <=0, NA, BIOMASS.LOG),
"RANDOM" = rnorm(nrow(.)))
# Create Mean Biomass
load("~/Dropbox/Andrew/Work/GMRI/AllData/stratum.area.Rdata")
dat<- left_join(dat, stratum.area, by = "STRATUM")
# Weighted mean biomass
# Get STRATUM code and STRATUM_AREA and only unique observations for each combo
t1<- dplyr::select(dat, STRATUM, STRATUM_AREA)
t2<- t1[!duplicated(t1["STRATUM"]),]
totstrwt<-sum(t2$STRATUM_AREA, na.rm = TRUE)
# Get STRATUM proportion area relative to total area surveyed
ratio.dat<- data.frame("STRATUM" = t2$STRATUM, "STRATUM.RATIO" = t2$STRATUM_AREA/totstrwt)
# Get a count of the unique number of tows per stratum per year
tows_unique<- dat[!duplicated(dat["ID"]),]
numtows<-aggregate(ID ~ EST_YEAR + STRATUM, length, data= tows_unique)
colnames(numtows)<-c('EST_YEAR','STRATUM','COUNT')
dat<- left_join(dat, numtows, by = c("EST_YEAR", "STRATUM"))
dat<- left_join(dat, ratio.dat, by = "STRATUM")
# Calculate total species biomass per strata per year
strat.biomass<-aggregate(BIOMASS.ADJ ~ EST_YEAR + SVSPP + STRATUM + STRATUM.RATIO, sum, data = dat)
strat.biomass<- left_join(strat.biomass, numtows, by = c("EST_YEAR", "STRATUM"))
# Mean biomass per year/species/strata
strat.biomass$mean.biomass<- strat.biomass$BIOMASS.ADJ/strat.biomass$COUNT
# Area weighted mean biomass per year/species/strata
strat.biomass$BIOMASS.WMEAN<-strat.biomass$mean.biomass*strat.biomass$STRATUM.RATIO
# Mean Annual Biomass
annual.seasonal.wtmean<- strat.biomass %>%
group_by(., EST_YEAR, SEASON, SVSPP) %>%
summarise(., "BIOMASS.WMEAN" = sum(BIOMASS.WMEAN))
avg.seasonal.wtmean<- annual.seasonal.wtmean %>%
group_by(., SEASON, SVSPP) %>%
summarize(., "AVG.BIOMASS.WMEAN" = mean(BIOMASS.WMEAN, na.rm = T))
annual.seasonal.wtmean<- annual.seasonal.wtmean %>%
left_join(., avg.seasonal.wtmean, by = c("SEASON", "SVSPP")) %>%
mutate(., "BIOMASS.WMEAN.ANOM" = BIOMASS.WMEAN - AVG.BIOMASS.WMEAN)
# Add back to full data
dat<- left_join(dat, annual.wtmean, by = c("EST_YEAR", "SVSPP"))
# Get baseline observations
dat$TRAIN.TEST<- ifelse(as.Date(dat$DATE) >= "1986-01-01" & as.Date(dat$DATE) <= "2010-12-31", "TRAIN",
ifelse(as.Date(dat$DATE) >= "2011-01-01" & as.Date(dat$DATE) <= "2016-12-31", "TEST", "Neither"))
dat.test<- dat %>%
group_by(., COMNAME, TRAIN.TEST) %>%
dplyr::filter(., TRAIN.TEST == "TEST") %>%
nest(.key = "TEST.DATA") %>%
arrange(COMNAME)
# Rasters ready
stack.id<- nlayers(pred.rast.stack.m)
count.rast.temp<- pred.rast.stack.m[[1]]
count.rast.temp[]<- NA
names(count.rast.temp)<- "Count"
for(i in seq_along(as.character(unique(dat.test$COMNAME)))) {
temp<- dat.test$TEST.DATA[[i]]
temp.sp<- temp
coordinates(temp.sp)<- ~DECDEG_BEGLON+DECDEG_BEGLAT
proj4string(temp.sp)<- proj.utm
# Get average weighted mean biomass in each grid cell
temp$cell.id<- extract(count.rast.temp, temp.sp, cellnumbers=TRUE)[,1]
temp2<- temp %>%
group_by(cell.id) %>%
dplyr::summarise(., mean = mean(BIOMASS.ADJ))
# Save values in raster
out.rast<- count.rast.temp
out.rast[temp2$cell.id]<- temp2$mean
# Add em to the stack
pred.rast.stack.m<- stack(pred.rast.stack.m, out.rast)
names(pred.rast.stack.m)[nlayers(pred.rast.stack.m)]<- paste(dat.test$COMNAME[i], ".", "Biomass", sep = "")
}
# Now values
points<- data.frame(coordinates(pred.rast.stack.m[[1]]))
coordinates(points)<- ~x+y
proj4string(points)<- proj.utm
pred.df<- data.frame(raster::extract(pred.rast.stack.m, points))
names(pred.df)[21]<- "DEPTH"
names(pred.df)[22]<- "TRI"
# Add sediment
neshelf.sediment<- readShapePoly(paste(sp.in, "TNC_benthicsediment.shp", sep = ""))
proj4string(neshelf.sediment)<- proj.wgs84
neshelf.sediment.utm<- spTransform(neshelf.sediment, proj.utm)
neshelf.sediment.pts<- over(points, neshelf.sediment.utm)$GRPSED
pred.df$SED.TYPE<- as.factor(neshelf.sediment.pts)
points.wgs84<- spTransform(points, proj.wgs84)
pred.df$x<- coordinates(points.wgs84)[,1]
pred.df$y<- coordinates(points.wgs84)[,2]
return(pred.df)
}
}
fall.rast.pred<- fishSDM.prediction.df(rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = ""), rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = ""), rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = ""), rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = ""), rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = ""), rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = ""), oisst.dir = paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = ""), sp.in = "~/Box/RES Data/Shapefiles/", dates.baseline = c("2014-10-16", "2015-10-16", "2016-10-16", "2017-10-16", "2018-10-16"), dates.future = c("2025-10-16", "2040-10-16", "2055-10-16", "2100-10-16"), seasonal.mu = TRUE, season = "Fall", model.dat = paste(proj.path, "Data/ECWmodel.dat.rds", sep = ""))
fall.rast.pred<- pred.df
fall.rast.pred$SEASON<- "FALL"
saveRDS(fall.rast.pred, file = paste(proj.path, "Data/fall.rast.preds.rds", sep = ""))
spring.rast.pred<- fishSDM.prediction.df(rcp45.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_mu.grd", sep = ""), rcp45.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_5th.grd", sep = ""), rcp45.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP45_95th.grd", sep = ""), rcp85.mu.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_mu.grd", sep = ""), rcp85.pct05.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_5th.grd", sep = ""), rcp85.pct95.dir = paste(res.data.path, "CMIP5_SST/ECW_RCP85_95th.grd", sep = ""), oisst.dir = paste(proj.path, "Data/OISSTThroughFeb2020.grd", sep = ""), sp.in = "~/Box/RES Data/Shapefiles/", dates.baseline = c("2014-04-16", "2015-04-16", "2016-04-16", "2017-04-16", "2018-04-16"), dates.future = c("2025-04-16", "2040-04-16", "2055-04-16", "2100-04-16"), seasonal.mu = TRUE, season = "Spring", model.dat = paste(proj.path, "Data/ECWmodel.dat.rds", sep = ""))
spring.rast.pred$SEASON<- "SPRING"
saveRDS(spring.rast.pred, file = paste(proj.path, "Data/spring.rast.preds.rds", sep = ""))
|
library(psychometric)
### Name: CI.Rsq
### Title: Confidence Interval for R-squared
### Aliases: CI.Rsq
### Keywords: htest models
### ** Examples
# see section 3.6.2 Cohen et al (2003)
# 95 percent CI
CI.Rsq(.5032, 62, 4, level = .95)
# 80 percent CI
CI.Rsq(.5032, 62, 4, level = .80)
|
/data/genthat_extracted_code/psychometric/examples/CI.Rsq.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 294 |
r
|
library(psychometric)
### Name: CI.Rsq
### Title: Confidence Interval for R-squared
### Aliases: CI.Rsq
### Keywords: htest models
### ** Examples
# see section 3.6.2 Cohen et al (2003)
# 95 percent CI
CI.Rsq(.5032, 62, 4, level = .95)
# 80 percent CI
CI.Rsq(.5032, 62, 4, level = .80)
|
#Coursera ProgrammingAssignment2
# A pair of functions that cache the inverse of a matrix.
## This function creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse_matrix) m <<- inverse_matrix
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the matrix returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# Test data and code to check assignment
testm <- matrix(runif(9), 3, 3)
a <- makeCacheMatrix(testm)
cacheSolve(a)
cacheSolve(a)
|
/cachematrix.R
|
no_license
|
gmw12/ProgrammingAssignment2
|
R
| false | false | 985 |
r
|
#Coursera ProgrammingAssignment2
# A pair of functions that cache the inverse of a matrix.
## This function creates a matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse_matrix) m <<- inverse_matrix
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the matrix returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# Test data and code to check assignment
testm <- matrix(runif(9), 3, 3)
a <- makeCacheMatrix(testm)
cacheSolve(a)
cacheSolve(a)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/housekeeping.R
\name{GetPcawgSbsPentanucleotideSignaturesData}
\alias{GetPcawgSbsPentanucleotideSignaturesData}
\title{Returns the PCAWG SBS penta-nucleotide reference signature data}
\usage{
GetPcawgSbsPentanucleotideSignaturesData()
}
\value{
A data.frame of the PCAWG SBS penta-nucleotide mutational signatures.
}
\description{
This function returns the PCAWG single base substitution
(SBS) pentanucleotide context mutational signatures data.
}
|
/man/GetPcawgSbsPentanucleotideSignaturesData.Rd
|
no_license
|
Honglab-Research/MutaliskR
|
R
| false | true | 526 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/housekeeping.R
\name{GetPcawgSbsPentanucleotideSignaturesData}
\alias{GetPcawgSbsPentanucleotideSignaturesData}
\title{Returns the PCAWG SBS penta-nucleotide reference signature data}
\usage{
GetPcawgSbsPentanucleotideSignaturesData()
}
\value{
A data.frame of the PCAWG SBS penta-nucleotide mutational signatures.
}
\description{
This function returns the PCAWG single base substitution
(SBS) pentanucleotide context mutational signatures data.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{fat_192x96x1}
\alias{fat_192x96x1}
\title{Fat 192x96x1}
\format{
A list of tensors. Images resolution 192x96.
\describe{
\item{train}{number of samples 141}
\item{test}{number of samples 10}
}
}
\usage{
fat_192x96x1
}
\description{
Fat 192x96x1
}
\keyword{datasets}
|
/man/fat_192x96x1.Rd
|
permissive
|
ventri2020/rraysplot
|
R
| false | true | 372 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{fat_192x96x1}
\alias{fat_192x96x1}
\title{Fat 192x96x1}
\format{
A list of tensors. Images resolution 192x96.
\describe{
\item{train}{number of samples 141}
\item{test}{number of samples 10}
}
}
\usage{
fat_192x96x1
}
\description{
Fat 192x96x1
}
\keyword{datasets}
|
## Graphics
plot(1:100, 1:100, type="n", xlab="", ylab="")
curve(x^2/100, add=TRUE)
text(x=80, y=50, "This is a graph of")
text(x=80, y=45, "the equation")
text(x=80, y=37, expression(y == frac(1,100) * x^2))
points(x=c(20, 60, 90), y=c(4, 36, 81), pch=6)
points(x=rep(100,10), y=seq(0,90,10), pch=seq(1,20,2))
#"a=the y-intercept" and "b=the slope" of the desired line.
abline(a=-18, b=1.1, col="red")
text(x=19, y=0, "A")
text(x=100, y=95, "B") ## add my labels
abline(h=20, lty=2) # abline(h=20, lty="dashed") also works
abline(v=20, lty=3) # abline(v=20, lty="dotted") also works
lines(x=c(40, 40, 60, 60), y=c(80, 100, 100, 80), type="b")
lines(x=c(40, 60), y=c(80, 80), type="l") # type="lower case L", not "one"
title(main="A Drawing To Put On the Refrigerator!")
title(xlab="This is the x-axis", col.lab="blue", cex.lab=1.5)
## stacked barplots
data(UCBAdmissions)
par(mfrow=c(1,2))
margin.table(UCBAdmissions, c(1,3)) -> Admit.by.Dept
barplot(Admit.by.Dept)
barplot(Admit.by.Dept, beside=T, ylim=c(0,1000), legend=T,
main="Admissions by Department")
# histograms
data(faithful) # This is optional.
attach(faithful)
hist(waiting)
hist(waiting, breaks=seq(40,100,10), right=F) # inclue xlim
hist(waiting, prob=T) # To plot the smoothed curve on top of a histogram, set the "prob=" option to TRUE
lines(density(waiting))
detach(faithful)
# Numerical Summaries by Groups
data(chickwts) # Weight gain by type of diet.
attach(chickwts)
plot(feed, weight) # boxplot(weight ~ feed) will also work
#boxplot(feed~weight)
title(main="Body Weight of Chicks by Type of Diet")
## adding means
means = tapply(weight, feed, mean)
points(x=1:6, y=means, pch=12) # pch=16 is a filled circle
detach(chickwts)
# Scatterplots
data(mammals, package="MASS")
attach(mammals)
plot(log(body), log(brain)) # plot(x=body, y=brain, log="xy") is similar (try it)
scatter.smooth(log(body), log(brain))
detach(mammals)
|
/code/r/Eg_Graphics.R
|
no_license
|
anilshankarphd/hciresearch
|
R
| false | false | 2,023 |
r
|
## Graphics
plot(1:100, 1:100, type="n", xlab="", ylab="")
curve(x^2/100, add=TRUE)
text(x=80, y=50, "This is a graph of")
text(x=80, y=45, "the equation")
text(x=80, y=37, expression(y == frac(1,100) * x^2))
points(x=c(20, 60, 90), y=c(4, 36, 81), pch=6)
points(x=rep(100,10), y=seq(0,90,10), pch=seq(1,20,2))
#"a=the y-intercept" and "b=the slope" of the desired line.
abline(a=-18, b=1.1, col="red")
text(x=19, y=0, "A")
text(x=100, y=95, "B") ## add my labels
abline(h=20, lty=2) # abline(h=20, lty="dashed") also works
abline(v=20, lty=3) # abline(v=20, lty="dotted") also works
lines(x=c(40, 40, 60, 60), y=c(80, 100, 100, 80), type="b")
lines(x=c(40, 60), y=c(80, 80), type="l") # type="lower case L", not "one"
title(main="A Drawing To Put On the Refrigerator!")
title(xlab="This is the x-axis", col.lab="blue", cex.lab=1.5)
## stacked barplots
data(UCBAdmissions)
par(mfrow=c(1,2))
margin.table(UCBAdmissions, c(1,3)) -> Admit.by.Dept
barplot(Admit.by.Dept)
barplot(Admit.by.Dept, beside=T, ylim=c(0,1000), legend=T,
main="Admissions by Department")
# histograms
data(faithful) # This is optional.
attach(faithful)
hist(waiting)
hist(waiting, breaks=seq(40,100,10), right=F) # inclue xlim
hist(waiting, prob=T) # To plot the smoothed curve on top of a histogram, set the "prob=" option to TRUE
lines(density(waiting))
detach(faithful)
# Numerical Summaries by Groups
data(chickwts) # Weight gain by type of diet.
attach(chickwts)
plot(feed, weight) # boxplot(weight ~ feed) will also work
#boxplot(feed~weight)
title(main="Body Weight of Chicks by Type of Diet")
## adding means
means = tapply(weight, feed, mean)
points(x=1:6, y=means, pch=12) # pch=16 is a filled circle
detach(chickwts)
# Scatterplots
data(mammals, package="MASS")
attach(mammals)
plot(log(body), log(brain)) # plot(x=body, y=brain, log="xy") is similar (try it)
scatter.smooth(log(body), log(brain))
detach(mammals)
|
source("utility_functions_kdd.R")
nSim <- 3000
nfeat <- 3
parallel <- TRUE
saveCheckPoints <- seq(100, 3000, by = 100)
outFileName <- "sim_study_H0c_H1d.RData"
set.seed(12345)
mySeeds <- sample(seq(1e+4, 1e+5, by = 1), nSim, replace = FALSE)
out <- matrix(NA, nSim, 4)
colnames(out) <- c("AUC.0",
"AUC.u",
"confoundingPval",
"cov(C,Y)")
nseq <- seq(300, 500, by = 1)
for (i in seq(nSim)) {
cat(i, "\n")
## simulate data
set.seed(mySeeds[i])
p11 <- runif(1, 0.05, 0.45)
p10 <- p11
p00 <- 0.5 - p11
p01 <- 0.5 - p11
my.n <- sample(nseq, 1)
my.rho <- runif(1, 0.2, 0.8)
my.beta <- runif(1, 0.1, 1)
dat <- GenerateData(n = my.n,
nfeat,
p11 = p11,
p10 = p10,
p01 = p01,
p00 = p00,
alpha = c(my.beta, 0), ## beta (label effect), theta (conf effect)
rho = my.rho,
binVarNames = c("disease", "gender"))
dat$disease <- factor(dat$disease, labels = c("control", "case"))
dat$gender <- factor(dat$gender, labels = c("female", "male"))
idxTrain <- seq(1, round(my.n/2), by = 1)
idxTest <- setdiff(seq(my.n), idxTrain)
ntest <- length(idxTest)
obs <- GetAUC(dat,
idxTrain,
idxTest,
labelName = "disease",
featNames = colnames(dat)[-c(1, 2)],
negClassName = "control",
posClassName = "case")
AUC.0 <- obs$aucObs
aux1 <- RestrictedPermAUC(dat,
idxTrain,
idxTest,
nperm = ntest,
labelName = "disease",
confName = "gender",
featNames = colnames(dat)[-c(1, 2)],
negClassName = "control",
posClassName = "case",
verbose = FALSE,
parallel = parallel)
rnull <- aux1$restrictedPermNull
meanRestrictedPermNull <- mean(rnull)
varRestrictedPermNull <- var(rnull)
avarStandNull <- as.numeric(obs$approxVar["v"])
out[i, "AUC.0"] <- AUC.0
out[i, "AUC.u"] <- (AUC.0 - meanRestrictedPermNull) * sqrt(avarStandNull/varRestrictedPermNull) + 0.5
out[i, "confoundingPval"] <- pnorm(meanRestrictedPermNull, 0.5, sqrt(avarStandNull/ntest), lower.tail = FALSE)
out[i, "cov(C,Y)"] <- p11 * p00 - p10 * p01
if (i %in% saveCheckPoints) {
cat("saving ", i, " H0 confounding, H1 disease simulations", "\n")
save(out, file = outFileName, compress = TRUE)
}
}
|
/run_simulations_H0c_H1d.R
|
no_license
|
echaibub/codeForKDD2019
|
R
| false | false | 2,725 |
r
|
source("utility_functions_kdd.R")
nSim <- 3000
nfeat <- 3
parallel <- TRUE
saveCheckPoints <- seq(100, 3000, by = 100)
outFileName <- "sim_study_H0c_H1d.RData"
set.seed(12345)
mySeeds <- sample(seq(1e+4, 1e+5, by = 1), nSim, replace = FALSE)
out <- matrix(NA, nSim, 4)
colnames(out) <- c("AUC.0",
"AUC.u",
"confoundingPval",
"cov(C,Y)")
nseq <- seq(300, 500, by = 1)
for (i in seq(nSim)) {
cat(i, "\n")
## simulate data
set.seed(mySeeds[i])
p11 <- runif(1, 0.05, 0.45)
p10 <- p11
p00 <- 0.5 - p11
p01 <- 0.5 - p11
my.n <- sample(nseq, 1)
my.rho <- runif(1, 0.2, 0.8)
my.beta <- runif(1, 0.1, 1)
dat <- GenerateData(n = my.n,
nfeat,
p11 = p11,
p10 = p10,
p01 = p01,
p00 = p00,
alpha = c(my.beta, 0), ## beta (label effect), theta (conf effect)
rho = my.rho,
binVarNames = c("disease", "gender"))
dat$disease <- factor(dat$disease, labels = c("control", "case"))
dat$gender <- factor(dat$gender, labels = c("female", "male"))
idxTrain <- seq(1, round(my.n/2), by = 1)
idxTest <- setdiff(seq(my.n), idxTrain)
ntest <- length(idxTest)
obs <- GetAUC(dat,
idxTrain,
idxTest,
labelName = "disease",
featNames = colnames(dat)[-c(1, 2)],
negClassName = "control",
posClassName = "case")
AUC.0 <- obs$aucObs
aux1 <- RestrictedPermAUC(dat,
idxTrain,
idxTest,
nperm = ntest,
labelName = "disease",
confName = "gender",
featNames = colnames(dat)[-c(1, 2)],
negClassName = "control",
posClassName = "case",
verbose = FALSE,
parallel = parallel)
rnull <- aux1$restrictedPermNull
meanRestrictedPermNull <- mean(rnull)
varRestrictedPermNull <- var(rnull)
avarStandNull <- as.numeric(obs$approxVar["v"])
out[i, "AUC.0"] <- AUC.0
out[i, "AUC.u"] <- (AUC.0 - meanRestrictedPermNull) * sqrt(avarStandNull/varRestrictedPermNull) + 0.5
out[i, "confoundingPval"] <- pnorm(meanRestrictedPermNull, 0.5, sqrt(avarStandNull/ntest), lower.tail = FALSE)
out[i, "cov(C,Y)"] <- p11 * p00 - p10 * p01
if (i %in% saveCheckPoints) {
cat("saving ", i, " H0 confounding, H1 disease simulations", "\n")
save(out, file = outFileName, compress = TRUE)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bluer.R
\docType{package}
\name{bluer}
\alias{bluer}
\alias{bluer-package}
\title{Functions to work with Instron Bluehill 2.0 RawData files}
\description{
Bluehill works with samples (created when you start a test) containing
specimens The user chooses a sample name and a folder in which to save the
sample & results (the study folder).
}
\details{
within the study folder are:
\preformatted{
<sample_name>.is_<test_type> an xml file describing the sample/test
<sample_name>.im_<test_type> an xml file describing the test method
<sample_name>.id_<test_type> a binary file (magic number TDAT)
containing the recorded data & results
if the data or results have been exported they appear as:
<sample_name>.is_<test_type>_RawData a folder containing exported raw data
as Specimen_RawData_<sample_no>.csv
<sample_name>.is_<test_type>_Results.csv any calcuated results
}
the <test_type>s available on our system are:
\tabular{rrl}{
\tab tens \tab tension\cr
\tab comp \tab compression\cr
\tab \tab \cr
\tab tcyclic \tab tension profile\cr
\tab ccyclic \tab compression profile\cr
\tab \tab \cr
\tab trelax \tab tension creep/relaxation\cr
\tab crelax \tab compression creep/relaxation\cr
\tab \tab \cr
\tab flex \tab bending\cr
}
Bluehill 2.0 exported data are structured in folders by sample & specimen.
The sample folder name is chosen by the user at the start of a test.
Each sample uses a single test method (which may be changed during the test,
complicating matters)
\preformatted{
project
study
Sample_1
Specimen_1
...
Specimen_n
...
sample_n
Specimen_1
...
Specimen_n
}
For each sample, all specimens should have the same columns Specimen files
can optionally contain a header, which may contain a Specimen Label
Each Specimen represents one continuous run of the testing machine (what
could normally be thought of as a 'test'). If there are results files, they
are in the study folder and named sample_n_Results.csv
At RPH we have used two conventions: Each Specimen in a seperate test folder
(so only a single Specimen_Rawdata_1.csv file per folder) or multiple
Specimens in a test folder, which wil have several Specimen_Rawdata_n.csv
files in the same folder
}
|
/man/bluer.Rd
|
no_license
|
yadbor/bluer
|
R
| false | true | 2,401 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bluer.R
\docType{package}
\name{bluer}
\alias{bluer}
\alias{bluer-package}
\title{Functions to work with Instron Bluehill 2.0 RawData files}
\description{
Bluehill works with samples (created when you start a test) containing
specimens The user chooses a sample name and a folder in which to save the
sample & results (the study folder).
}
\details{
within the study folder are:
\preformatted{
<sample_name>.is_<test_type> an xml file describing the sample/test
<sample_name>.im_<test_type> an xml file describing the test method
<sample_name>.id_<test_type> a binary file (magic number TDAT)
containing the recorded data & results
if the data or results have been exported they appear as:
<sample_name>.is_<test_type>_RawData a folder containing exported raw data
as Specimen_RawData_<sample_no>.csv
<sample_name>.is_<test_type>_Results.csv any calcuated results
}
the <test_type>s available on our system are:
\tabular{rrl}{
\tab tens \tab tension\cr
\tab comp \tab compression\cr
\tab \tab \cr
\tab tcyclic \tab tension profile\cr
\tab ccyclic \tab compression profile\cr
\tab \tab \cr
\tab trelax \tab tension creep/relaxation\cr
\tab crelax \tab compression creep/relaxation\cr
\tab \tab \cr
\tab flex \tab bending\cr
}
Bluehill 2.0 exported data are structured in folders by sample & specimen.
The sample folder name is chosen by the user at the start of a test.
Each sample uses a single test method (which may be changed during the test,
complicating matters)
\preformatted{
project
study
Sample_1
Specimen_1
...
Specimen_n
...
sample_n
Specimen_1
...
Specimen_n
}
For each sample, all specimens should have the same columns Specimen files
can optionally contain a header, which may contain a Specimen Label
Each Specimen represents one continuous run of the testing machine (what
could normally be thought of as a 'test'). If there are results files, they
are in the study folder and named sample_n_Results.csv
At RPH we have used two conventions: Each Specimen in a seperate test folder
(so only a single Specimen_Rawdata_1.csv file per folder) or multiple
Specimens in a test folder, which wil have several Specimen_Rawdata_n.csv
files in the same folder
}
|
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
##set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ##get the value of the matrix
setsolve <- function(solve) m <<- solve ##set the value of its inverse
getsolve <- function() m ##get the value of its inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed),
##then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
##Deal with the situation when the inverse has already been calculated
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m ## Return a matrix that is the inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
coco11111/ProgrammingAssignment2
|
R
| false | false | 1,082 |
r
|
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
##set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x ##get the value of the matrix
setsolve <- function(solve) m <<- solve ##set the value of its inverse
getsolve <- function() m ##get the value of its inverse
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed),
##then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getsolve()
##Deal with the situation when the inverse has already been calculated
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m ## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/population.R
\name{get_elderly_population}
\alias{get_elderly_population}
\title{Get elderly population data (5 year age-breakdown for 80-84, 85-89 and 90+)}
\usage{
get_elderly_population(country = NULL, iso3c = NULL, simple_SEIR = FALSE)
}
\arguments{
\item{country}{Country name}
\item{iso3c}{ISO 3C Country Code}
\item{simple_SEIR}{Logical. Is the population for the \code{simple_SEIR}.
Default = FALSE}
}
\value{
Population data.frame
}
\description{
Get elderly population data (5 year age-breakdown for 80-84, 85-89 and 90+)
}
|
/man/get_elderly_population.Rd
|
permissive
|
mrc-ide/squire
|
R
| false | true | 614 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/population.R
\name{get_elderly_population}
\alias{get_elderly_population}
\title{Get elderly population data (5 year age-breakdown for 80-84, 85-89 and 90+)}
\usage{
get_elderly_population(country = NULL, iso3c = NULL, simple_SEIR = FALSE)
}
\arguments{
\item{country}{Country name}
\item{iso3c}{ISO 3C Country Code}
\item{simple_SEIR}{Logical. Is the population for the \code{simple_SEIR}.
Default = FALSE}
}
\value{
Population data.frame
}
\description{
Get elderly population data (5 year age-breakdown for 80-84, 85-89 and 90+)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formulas.R
\name{ml_prepare_response_features_intercept}
\alias{ml_prepare_features}
\alias{ml_prepare_inputs}
\alias{ml_prepare_response_features_intercept}
\title{Pre-process the Inputs to a Spark ML Routine}
\usage{
ml_prepare_response_features_intercept(x = NULL, response, features,
intercept, envir = parent.frame(),
categorical.transformations = new.env(parent = emptyenv()),
ml.options = ml_options())
ml_prepare_features(x, features, envir = parent.frame(),
ml.options = ml_options())
}
\arguments{
\item{x}{An object coercable to a Spark DataFrame (typically, a
\code{tbl_spark}).}
\item{response}{The name of the response vector (as a length-one character
vector), or a formula, giving a symbolic description of the model to be
fitted. When \code{response} is a formula, it is used in preference to other
parameters to set the \code{response}, \code{features}, and \code{intercept}
parameters (if available). Currently, only simple linear combinations of
existing parameters is supposed; e.g. \code{response ~ feature1 + feature2 + ...}.
The intercept term can be omitted by using \code{- 1} in the model fit.}
\item{features}{The name of features (terms) to use for the model fit.}
\item{intercept}{Boolean; should the model be fit with an intercept term?}
\item{envir}{The \R environment in which the \code{response}, \code{features}
and \code{intercept} bindings should be mutated. (Typically, the parent frame).}
\item{categorical.transformations}{An \R environment used to record what
categorical variables were binarized in this procedure. Categorical
variables that included in the model formula will be transformed into
binary variables, and the generated mappings will be stored in this
environment.}
\item{ml.options}{Optional arguments, used to affect the model generated. See
\code{\link{ml_options}} for more details.}
}
\description{
Pre-process / normalize the inputs typically passed to a
Spark ML routine.
}
\details{
Pre-processing of these inputs typically involves:
\enumerate{
\item Handling the case where \code{response} is itself a formula
describing the model to be fit, thereby extracting the names
of the \code{response} and \code{features} to be used,
\item Splitting categorical features into dummy variables (so they
can easily be accommodated + specified in the underlying
Spark ML model fit),
\item Mutating the associated variables \emph{in the specified environment}.
}
Please take heed of the last point, as while this is useful in practice,
the behavior will be very surprising if you are not expecting it.
}
\examples{
\dontrun{
# note that ml_prepare_features, by default, mutates the 'features'
# binding in the same environment in which the function was called
local({
ml_prepare_features(features = ~ x1 + x2 + x3)
print(features) # c("x1", "x2", "x3")
})
}
}
|
/man/ml_prepare_inputs.Rd
|
permissive
|
mahesh2013/sparklyr
|
R
| false | true | 2,938 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/formulas.R
\name{ml_prepare_response_features_intercept}
\alias{ml_prepare_features}
\alias{ml_prepare_inputs}
\alias{ml_prepare_response_features_intercept}
\title{Pre-process the Inputs to a Spark ML Routine}
\usage{
ml_prepare_response_features_intercept(x = NULL, response, features,
intercept, envir = parent.frame(),
categorical.transformations = new.env(parent = emptyenv()),
ml.options = ml_options())
ml_prepare_features(x, features, envir = parent.frame(),
ml.options = ml_options())
}
\arguments{
\item{x}{An object coercable to a Spark DataFrame (typically, a
\code{tbl_spark}).}
\item{response}{The name of the response vector (as a length-one character
vector), or a formula, giving a symbolic description of the model to be
fitted. When \code{response} is a formula, it is used in preference to other
parameters to set the \code{response}, \code{features}, and \code{intercept}
parameters (if available). Currently, only simple linear combinations of
existing parameters is supposed; e.g. \code{response ~ feature1 + feature2 + ...}.
The intercept term can be omitted by using \code{- 1} in the model fit.}
\item{features}{The name of features (terms) to use for the model fit.}
\item{intercept}{Boolean; should the model be fit with an intercept term?}
\item{envir}{The \R environment in which the \code{response}, \code{features}
and \code{intercept} bindings should be mutated. (Typically, the parent frame).}
\item{categorical.transformations}{An \R environment used to record what
categorical variables were binarized in this procedure. Categorical
variables that included in the model formula will be transformed into
binary variables, and the generated mappings will be stored in this
environment.}
\item{ml.options}{Optional arguments, used to affect the model generated. See
\code{\link{ml_options}} for more details.}
}
\description{
Pre-process / normalize the inputs typically passed to a
Spark ML routine.
}
\details{
Pre-processing of these inputs typically involves:
\enumerate{
\item Handling the case where \code{response} is itself a formula
describing the model to be fit, thereby extracting the names
of the \code{response} and \code{features} to be used,
\item Splitting categorical features into dummy variables (so they
can easily be accommodated + specified in the underlying
Spark ML model fit),
\item Mutating the associated variables \emph{in the specified environment}.
}
Please take heed of the last point, as while this is useful in practice,
the behavior will be very surprising if you are not expecting it.
}
\examples{
\dontrun{
# note that ml_prepare_features, by default, mutates the 'features'
# binding in the same environment in which the function was called
local({
ml_prepare_features(features = ~ x1 + x2 + x3)
print(features) # c("x1", "x2", "x3")
})
}
}
|
#' @param areacols Optional vector of colors for each area if model has
#' multiple areas. NULL value will be replaced by a default set of areas.
|
/man-roxygen/areacols.R
|
no_license
|
r4ss/r4ss
|
R
| false | false | 147 |
r
|
#' @param areacols Optional vector of colors for each area if model has
#' multiple areas. NULL value will be replaced by a default set of areas.
|
exam<-read.csv('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/csv_exam.csv')
exam
library(dplyr)
exam %>% summarise(mean_math=mean(math))
exam %>% summarise(mean_math=sum(math))
# 반별 수학 점수 평균
exam %>%
group_by(class) %>%
summarise(mean_math=mean(math))
exam %>%
group_by(class) %>%
summarise(mm=mean(math),
sm=sum(math),
md=median(math),
cnt=n())
library(ggplot2)
mpg %>%
group_by(manufacturer) %>%
summarise(mc=mean(cty)) %>%
head()
mpg %>%
group_by(manufacturer,drv) %>%
summarise(mc=mean(cty))
mpg %>%
group_by(manufacturer) %>%
filter(class=='suv') %>%
mutate(tot=(cty+hwy)/2) %>%
summarise(mt=mean(tot)) %>%
arrange(desc(mt)) %>%
head(5)
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
# join
total<-left_join(test1,test2,by='id')
total
name<-data.frame(class=c(1,2,3,4,5),
teacher=c('kim','lee','park','choi','cho'))
left_join(exam,name,by='class')
## bind
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(6,7,8,9,10),
midterm=c(70,80,40,80,75))
bind_rows(test1,test2)
exam %>% filter(class==1 & math>=50)
exam %>% filter(class %in% c(1,3,5))
exam %>% select(id,math)
exam$test<-ifelse(exam$english>=60,'pass','fail')
exam %>% mutate(test=ifelse(english>=60,'pass','fail')) %>%
arrange(test)
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
left_join(test1,test2,by='id')
## 결측값 처리
df<-data.frame(sex=c('M','F',NA,'M','F'),score=c(5,4,3,5,NA))
is.na(df)
table(is.na(df))
table(is.na(df$score))
mean(df$score)
sum(df$score)
df %>% filter(is.na(score))
df_nomiss<-df %>% filter(!is.na(score))
mean(df_nomiss$score)
df_nomiss<-df %>% filter(!is.na(score)&!is.na(sex))
df_nomiss
df
df_nomiss2<-na.omit(df)
mean(df$score,na.rm = T)
sum(df$score,na.rm = T)
exam<-read.csv('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/csv_exam.csv')
exam[c(3,5,15),'math']<-NA
exam %>%
summarise(mm=mean(math,na.rm = T),
sm=sum(math,na.rm = T),
med=median(math,na.rm = T))
exam$math<-ifelse(is.na(exam$math),55,exam$math)
exam$math
mean(exam$math)
df<-data.frame(sex=c(1,2,1,3,2,1),score=c(5,4,3,4,2,6))
table(df$sex)
table(df$score)
df$sex<-ifelse(df$sex==3,NA,df$sex)
df$score<-ifelse(df$score>5,NA,df$score)
df %>%
filter(!is.na(sex)&!is.na(score)) %>%
group_by((sex)) %>%
summarise(ms=mean(score))
boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats
mpg$hwy<-ifelse(mpg$hwy<12 | mpg$hwy>37,NA,mpg$hwy)
table(is.na(mpg$hwy))
mpg %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy,na.rm=T))
table(is.na(df$score))
##시각화
#배경설정
ggplot(data=mpg,aes(x=displ,y=hwy))
#산점도 graph
ggplot(data=mpg,aes(x=displ,y=hwy))+
geom_point()+
xlim(3,6)+
ylim(10,30)
df_mpg<-mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy))
ggplot(data=df_mpg,aes(x=drv,y=mean_hwy))+geom_col()
economics
ggplot(data=economics,aes(x=date,y=unemploy))+geom_line()
ggplot(data=economics,aes(x=date,y=unemploy))+geom_point()
# spss file 불러오기
install.packages('foreign')
library(foreign)
library(dplyr) # data 전처리
library(ggplot2) # 시각화
library(readxl) # excel 불러오기
raw_welfare<-read.spss('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/Koweps_hpc10_2015_beta1.sav',to.data.frame=T)
welfare<-raw_welfare
str(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
welfare<-rename(welfare,
sex=h10_g3,birth=h10_g4,marriage=h10_g10,
religion=h10_g11,code_job=h10_eco9,
income=p1002_8aq1,region=h10_reg7)
welfare
class(welfare$sex)
table(welfare$sex)
welfare$sex=ifelse(welfare$sex==9,NA,welfare$sex)
table(is.na(welfare$sex))
welfare$sex<-ifelse(welfare$sex==1,'male','female')
table(welfare$sex)
qplot(welfare$sex)
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)+xlim(0,1000)
welfare$income<-ifelse(welfare$income %in% c(0,9999),NA,welfare$income)
table(is.na(welfare$income))
sex_income<-welfare %>% filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mi=mean(income))
ggplot(data=sex_income,aes(x=sex,y=mi))+geom_col()
summary(welfare$birth)
table(is.na(welfare$birth))
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
welfare$age<-2015-welfare$birth+1
summary(welfare$age)
qplot(welfare$age)
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
age_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mi=mean(income))
ggplot(age_income,
aes(x=age,y=mi))+geom_line()
welfare<-welfare %>%
mutate(ageg=ifelse(age<30,'young',ifelse(age<=59,'middle','old')))
welfare$ageg
table(welfare$ageg)
qplot(welfare$ageg)
ageg_income<-welfare %>% filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mi=mean(income))
ggplot(ageg_income,aes(x=ageg,y=mi))+
geom_col()+
scale_x_discrete(limits=c('young','middle','old'))
# 성별 연령대별 월급 차이
sex_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(ageg,sex) %>%
summarise(mi=mean(income))
ggplot(sex_income,aes(x=ageg,y=mi,fill=sex))+
geom_col()+
scale_x_discrete(limits=c('young','middle','old'))
ggplot(sex_income,aes(x=ageg,y=mi,fill=sex))+
geom_col(position = 'dodge')+
scale_x_discrete(limits=c('young','middle','old'))
# 성별 연령별
sex_age<-welfare %>%
filter(!is.na(income)) %>%
group_by(age,sex) %>%
summarise(mi=mean(income))
ggplot(sex_age,aes(x=age,y=mi,col=sex))+geom_line()
# 직업별 월급 차이
library(readxl)
list_job<-read_excel('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/Koweps_Codebook.xlsx',sheet=2,col_names=T)
welfare<-left_join(welfare,list_job,id='code_job')
welfare$job
welfare$code_job
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job,job) %>% head(10)
job_income<-welfare %>%
filter(!is.na(job)&!is.na(income)) %>%
group_by(job) %>%
summarise(mi=mean(income))
top10<-job_income %>% arrange(desc(mi)) %>% head(10)
ggplot(top10,aes(x=reorder(job,mi),y=mi))+
geom_col()+coord_flip()
top10<-job_income %>% arrange(desc(mi)) %>% head(10)
ggplot(top10,aes(x=reorder(job,-mi),y=mi))+
geom_col()+coord_flip()
#성별에 따라 어떤 직업이 가장 많은지
welfare$sex<-ifelse(welfare$sex==1,'male','female')
sex_job<-welfare %>%
filter(!is.na(job)&!is.na(code_job)) %>%
group_by(sex) %>%
count(job) %>% arrange(desc(n))
femalejob<-sex_job %>%
filter(sex=='female')%>% head(10)
femalejob
malejob<-sex_job %>%
filter(sex=='male') %>% head(10)
malejob
ggplot(femalejob,aes(x=reorder(job,n),y=n))+geom_col()+coord_flip()
ggplot(malejob,aes(x=reorder(job,n),y=n))+geom_col()+coord_flip()
|
/bigdata_AI/R_TIL/0204 R분석연습.R
|
no_license
|
eunsung-git/TIL
|
R
| false | false | 7,150 |
r
|
exam<-read.csv('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/csv_exam.csv')
exam
library(dplyr)
exam %>% summarise(mean_math=mean(math))
exam %>% summarise(mean_math=sum(math))
# 반별 수학 점수 평균
exam %>%
group_by(class) %>%
summarise(mean_math=mean(math))
exam %>%
group_by(class) %>%
summarise(mm=mean(math),
sm=sum(math),
md=median(math),
cnt=n())
library(ggplot2)
mpg %>%
group_by(manufacturer) %>%
summarise(mc=mean(cty)) %>%
head()
mpg %>%
group_by(manufacturer,drv) %>%
summarise(mc=mean(cty))
mpg %>%
group_by(manufacturer) %>%
filter(class=='suv') %>%
mutate(tot=(cty+hwy)/2) %>%
summarise(mt=mean(tot)) %>%
arrange(desc(mt)) %>%
head(5)
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
# join
total<-left_join(test1,test2,by='id')
total
name<-data.frame(class=c(1,2,3,4,5),
teacher=c('kim','lee','park','choi','cho'))
left_join(exam,name,by='class')
## bind
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(6,7,8,9,10),
midterm=c(70,80,40,80,75))
bind_rows(test1,test2)
exam %>% filter(class==1 & math>=50)
exam %>% filter(class %in% c(1,3,5))
exam %>% select(id,math)
exam$test<-ifelse(exam$english>=60,'pass','fail')
exam %>% mutate(test=ifelse(english>=60,'pass','fail')) %>%
arrange(test)
test1<-data.frame(id=c(1,2,3,4,5),
midterm=c(60,80,70,90,55))
test2<-data.frame(id=c(1,2,3,4,5),
final=c(70,80,40,80,75))
left_join(test1,test2,by='id')
## 결측값 처리
df<-data.frame(sex=c('M','F',NA,'M','F'),score=c(5,4,3,5,NA))
is.na(df)
table(is.na(df))
table(is.na(df$score))
mean(df$score)
sum(df$score)
df %>% filter(is.na(score))
df_nomiss<-df %>% filter(!is.na(score))
mean(df_nomiss$score)
df_nomiss<-df %>% filter(!is.na(score)&!is.na(sex))
df_nomiss
df
df_nomiss2<-na.omit(df)
mean(df$score,na.rm = T)
sum(df$score,na.rm = T)
exam<-read.csv('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/csv_exam.csv')
exam[c(3,5,15),'math']<-NA
exam %>%
summarise(mm=mean(math,na.rm = T),
sm=sum(math,na.rm = T),
med=median(math,na.rm = T))
exam$math<-ifelse(is.na(exam$math),55,exam$math)
exam$math
mean(exam$math)
df<-data.frame(sex=c(1,2,1,3,2,1),score=c(5,4,3,4,2,6))
table(df$sex)
table(df$score)
df$sex<-ifelse(df$sex==3,NA,df$sex)
df$score<-ifelse(df$score>5,NA,df$score)
df %>%
filter(!is.na(sex)&!is.na(score)) %>%
group_by((sex)) %>%
summarise(ms=mean(score))
boxplot(mpg$hwy)
boxplot(mpg$hwy)$stats
mpg$hwy<-ifelse(mpg$hwy<12 | mpg$hwy>37,NA,mpg$hwy)
table(is.na(mpg$hwy))
mpg %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy,na.rm=T))
table(is.na(df$score))
##시각화
#배경설정
ggplot(data=mpg,aes(x=displ,y=hwy))
#산점도 graph
ggplot(data=mpg,aes(x=displ,y=hwy))+
geom_point()+
xlim(3,6)+
ylim(10,30)
df_mpg<-mpg %>%
group_by(drv) %>%
summarise(mean_hwy=mean(hwy))
ggplot(data=df_mpg,aes(x=drv,y=mean_hwy))+geom_col()
economics
ggplot(data=economics,aes(x=date,y=unemploy))+geom_line()
ggplot(data=economics,aes(x=date,y=unemploy))+geom_point()
# spss file 불러오기
install.packages('foreign')
library(foreign)
library(dplyr) # data 전처리
library(ggplot2) # 시각화
library(readxl) # excel 불러오기
raw_welfare<-read.spss('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/Koweps_hpc10_2015_beta1.sav',to.data.frame=T)
welfare<-raw_welfare
str(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
welfare<-rename(welfare,
sex=h10_g3,birth=h10_g4,marriage=h10_g10,
religion=h10_g11,code_job=h10_eco9,
income=p1002_8aq1,region=h10_reg7)
welfare
class(welfare$sex)
table(welfare$sex)
welfare$sex=ifelse(welfare$sex==9,NA,welfare$sex)
table(is.na(welfare$sex))
welfare$sex<-ifelse(welfare$sex==1,'male','female')
table(welfare$sex)
qplot(welfare$sex)
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)+xlim(0,1000)
welfare$income<-ifelse(welfare$income %in% c(0,9999),NA,welfare$income)
table(is.na(welfare$income))
sex_income<-welfare %>% filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mi=mean(income))
ggplot(data=sex_income,aes(x=sex,y=mi))+geom_col()
summary(welfare$birth)
table(is.na(welfare$birth))
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
welfare$age<-2015-welfare$birth+1
summary(welfare$age)
qplot(welfare$age)
welfare$birth<-ifelse(welfare$birth==9999,NA,welfare$birth)
table(is.na(welfare$birth))
age_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mi=mean(income))
ggplot(age_income,
aes(x=age,y=mi))+geom_line()
welfare<-welfare %>%
mutate(ageg=ifelse(age<30,'young',ifelse(age<=59,'middle','old')))
welfare$ageg
table(welfare$ageg)
qplot(welfare$ageg)
ageg_income<-welfare %>% filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mi=mean(income))
ggplot(ageg_income,aes(x=ageg,y=mi))+
geom_col()+
scale_x_discrete(limits=c('young','middle','old'))
# 성별 연령대별 월급 차이
sex_income<-welfare %>%
filter(!is.na(income)) %>%
group_by(ageg,sex) %>%
summarise(mi=mean(income))
ggplot(sex_income,aes(x=ageg,y=mi,fill=sex))+
geom_col()+
scale_x_discrete(limits=c('young','middle','old'))
ggplot(sex_income,aes(x=ageg,y=mi,fill=sex))+
geom_col(position = 'dodge')+
scale_x_discrete(limits=c('young','middle','old'))
# 성별 연령별
sex_age<-welfare %>%
filter(!is.na(income)) %>%
group_by(age,sex) %>%
summarise(mi=mean(income))
ggplot(sex_age,aes(x=age,y=mi,col=sex))+geom_line()
# 직업별 월급 차이
library(readxl)
list_job<-read_excel('C:/Users/student/Desktop/공부/멀캠TIL/dataset/R/r데이터분석_Data/Data/Koweps_Codebook.xlsx',sheet=2,col_names=T)
welfare<-left_join(welfare,list_job,id='code_job')
welfare$job
welfare$code_job
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job,job) %>% head(10)
job_income<-welfare %>%
filter(!is.na(job)&!is.na(income)) %>%
group_by(job) %>%
summarise(mi=mean(income))
top10<-job_income %>% arrange(desc(mi)) %>% head(10)
ggplot(top10,aes(x=reorder(job,mi),y=mi))+
geom_col()+coord_flip()
top10<-job_income %>% arrange(desc(mi)) %>% head(10)
ggplot(top10,aes(x=reorder(job,-mi),y=mi))+
geom_col()+coord_flip()
#성별에 따라 어떤 직업이 가장 많은지
welfare$sex<-ifelse(welfare$sex==1,'male','female')
sex_job<-welfare %>%
filter(!is.na(job)&!is.na(code_job)) %>%
group_by(sex) %>%
count(job) %>% arrange(desc(n))
femalejob<-sex_job %>%
filter(sex=='female')%>% head(10)
femalejob
malejob<-sex_job %>%
filter(sex=='male') %>% head(10)
malejob
ggplot(femalejob,aes(x=reorder(job,n),y=n))+geom_col()+coord_flip()
ggplot(malejob,aes(x=reorder(job,n),y=n))+geom_col()+coord_flip()
|
library(dplyr)
library(Matrix)
library(reshape2)
library(tibble)
library(parallel)
options('mc.cores' = 16) # Use 16 cores
# Un-normalize ---------------
# Load the normalized count matrix and associated meta...
counts.norm <- readMM('data/raw/counts.singlecell.normalized.mtx') %>% as('dgCMatrix')
metadata <- read.csv('data/raw/meta.singlecell.csv')
# Back-transform the normalized count matrix to a proper count matrix.
# Mutiplication occurs column-wise and we want row-wise (across cells), so doubly transpose.
# At least on my machine, this doesn't fit in memory, so divide into chunks
counts.unlogged <- do.call(cbind, mclapply(cumsum(c(rep(6534, 15), 6549)), function(chunk) {
2^counts.norm[, (chunk - ifelse(chunk == 104559, 6548, 6533)):chunk] - 1
})) %>% as.matrix %>% Matrix(sparse = T) # This stypidity bypasses some weird dense issue
counts.raw <- t(t(counts.unlogged) * metadata$UMIs) / 10000
rm(counts.unlogged)
# Bulkize ---------------
# For each sample, spit back row means and then merge the columns together.
counts.bulkized.raw <- do.call(cbind, mclapply(unique(metadata$sample), function(sample) {
counts.raw[, which(metadata$sample == sample)] %>% Matrix::rowSums(na.rm = T)
})) %>% Matrix(sparse = T)
counts.bulkized.norm <- do.call(cbind, mclapply(unique(metadata$sample), function(sample) {
counts.norm[, which(metadata$sample == sample)] %>% Matrix::rowSums(na.rm = T)
})) %>% Matrix(sparse = T)
rm(counts.raw, counts.norm)
# Save our files.
writeMM(counts.bulkized.raw, 'data/processed/counts.bulkized.raw.mtx')
writeMM(counts.bulkized.norm, 'data/processed/counts.bulkized.norm.mtx')
# Make Long ---------------
counts.bulk <- readMM('data/raw/counts.bulk.mtx')
counts.bulk <- counts.bulk %>% as.matrix %>% as.data.frame
counts.bulkized.raw <- counts.bulkized.raw %>% as.matrix %>% as.data.frame
counts.bulkized.norm <- counts.bulkized.norm %>% as.matrix %>% as.data.frame
# Sample ordering is preserved, but they each use different naming schemes. Normalize them...
colnames(counts.bulkized.raw) <- unique(metadata$sample)
rownames(counts.bulkized.raw) <- read.csv('data/processed/genes.bulkized.csv', header = F)$V1
colnames(counts.bulkized.norm) <- colnames(counts.bulkized.raw)
rownames(counts.bulkized.norm) <- rownames(counts.bulkized.raw)
colnames(counts.bulk) <- colnames(counts.bulkized.raw)
rownames(counts.bulk) <- read.csv('data/raw/genes.bulk.csv', header = F)$V1
# Melt the data into long format...
counts.bulk <- counts.bulk %>% rownames_to_column('gene') %>% melt(value.name = 'bulk', id.vars = 'gene', variable.name = 'sample')
counts.bulkized.raw <- counts.bulkized.raw %>% rownames_to_column('gene') %>% melt(value.name = 'bulkized.raw', id.vars = 'gene', variable.name = 'sample')
counts.bulkized.norm <- counts.bulkized.norm %>% rownames_to_column('gene') %>% melt(value.name = 'bulkized.norm', id.vars = 'gene', variable.name = 'sample')
# Finally, merge the data together and save.
merge(counts.bulk, counts.bulkized.raw, by = c('gene', 'sample')) %>%
merge(counts.bulkized.norm, by = c('gene', 'sample')) %>% saveRDS('data/processed/counts.merged.long.rds')
rm(counts.bulk, counts.bulkized.raw, counts.bulkized.norm, metadata)
# Helper Functions ---------------
#' loadCounts
#' Load the count matrices into the global environment.
#'
#' @param asLog Logical; whether or not to return counts as log counts. Default is F.
#' @param filterGenes Logical; whether or not to only include genes in common between samples. Default is T.
#' @param bulkized Character; the filename of the bulkized counts to read. Default is 'counts.bulkized.norm.mtx'.
#'
#' @return NULL, but adds expr.bulk and expr.bulkized to the global environment.
loadCounts <- function(asLog = F, filterGenes = T, bulkized = 'counts.bulkized.norm.mtx') {
expr.bulkized <<- readMM(paste0('data/processed/', bulkized)) %>% as('dgCMatrix')
expr.bulk <<- readMM('data/raw/counts.bulk.mtx') %>% as('dgCMatrix')
meta <- read.csv('data/raw/meta.singlecell.csv')
colnames(expr.bulkized) <<- unique(meta$sample)
colnames(expr.bulk) <<- unique(meta$sample)
rownames(expr.bulkized) <<- as.character(read.csv('data/processed/genes.bulkized.csv', header = F)$V1)
rownames(expr.bulk) <<- as.character(read.csv('data/raw/genes.bulk.csv', header = F)$V1)
if(filterGenes) {
genes <- intersect(rownames(expr.bulkized), rownames(expr.bulk))
expr.bulkized <<- expr.bulkized[genes, ]
expr.bulk <<- expr.bulk[genes, ]
}
if(asLog) {
expr.bulkized <<- log2(1 + expr.bulkized)
expr.bulk <<- log2(1 + expr.bulk)
}
}
|
/Scripts/load_data.R
|
no_license
|
SaelinB/STAT540_Project
|
R
| false | false | 4,582 |
r
|
library(dplyr)
library(Matrix)
library(reshape2)
library(tibble)
library(parallel)
options('mc.cores' = 16) # Use 16 cores
# Un-normalize ---------------
# Load the normalized count matrix and associated meta...
counts.norm <- readMM('data/raw/counts.singlecell.normalized.mtx') %>% as('dgCMatrix')
metadata <- read.csv('data/raw/meta.singlecell.csv')
# Back-transform the normalized count matrix to a proper count matrix.
# Mutiplication occurs column-wise and we want row-wise (across cells), so doubly transpose.
# At least on my machine, this doesn't fit in memory, so divide into chunks
counts.unlogged <- do.call(cbind, mclapply(cumsum(c(rep(6534, 15), 6549)), function(chunk) {
2^counts.norm[, (chunk - ifelse(chunk == 104559, 6548, 6533)):chunk] - 1
})) %>% as.matrix %>% Matrix(sparse = T) # This stypidity bypasses some weird dense issue
counts.raw <- t(t(counts.unlogged) * metadata$UMIs) / 10000
rm(counts.unlogged)
# Bulkize ---------------
# For each sample, spit back row means and then merge the columns together.
counts.bulkized.raw <- do.call(cbind, mclapply(unique(metadata$sample), function(sample) {
counts.raw[, which(metadata$sample == sample)] %>% Matrix::rowSums(na.rm = T)
})) %>% Matrix(sparse = T)
counts.bulkized.norm <- do.call(cbind, mclapply(unique(metadata$sample), function(sample) {
counts.norm[, which(metadata$sample == sample)] %>% Matrix::rowSums(na.rm = T)
})) %>% Matrix(sparse = T)
rm(counts.raw, counts.norm)
# Save our files.
writeMM(counts.bulkized.raw, 'data/processed/counts.bulkized.raw.mtx')
writeMM(counts.bulkized.norm, 'data/processed/counts.bulkized.norm.mtx')
# Make Long ---------------
counts.bulk <- readMM('data/raw/counts.bulk.mtx')
counts.bulk <- counts.bulk %>% as.matrix %>% as.data.frame
counts.bulkized.raw <- counts.bulkized.raw %>% as.matrix %>% as.data.frame
counts.bulkized.norm <- counts.bulkized.norm %>% as.matrix %>% as.data.frame
# Sample ordering is preserved, but they each use different naming schemes. Normalize them...
colnames(counts.bulkized.raw) <- unique(metadata$sample)
rownames(counts.bulkized.raw) <- read.csv('data/processed/genes.bulkized.csv', header = F)$V1
colnames(counts.bulkized.norm) <- colnames(counts.bulkized.raw)
rownames(counts.bulkized.norm) <- rownames(counts.bulkized.raw)
colnames(counts.bulk) <- colnames(counts.bulkized.raw)
rownames(counts.bulk) <- read.csv('data/raw/genes.bulk.csv', header = F)$V1
# Melt the data into long format...
counts.bulk <- counts.bulk %>% rownames_to_column('gene') %>% melt(value.name = 'bulk', id.vars = 'gene', variable.name = 'sample')
counts.bulkized.raw <- counts.bulkized.raw %>% rownames_to_column('gene') %>% melt(value.name = 'bulkized.raw', id.vars = 'gene', variable.name = 'sample')
counts.bulkized.norm <- counts.bulkized.norm %>% rownames_to_column('gene') %>% melt(value.name = 'bulkized.norm', id.vars = 'gene', variable.name = 'sample')
# Finally, merge the data together and save.
merge(counts.bulk, counts.bulkized.raw, by = c('gene', 'sample')) %>%
merge(counts.bulkized.norm, by = c('gene', 'sample')) %>% saveRDS('data/processed/counts.merged.long.rds')
rm(counts.bulk, counts.bulkized.raw, counts.bulkized.norm, metadata)
# Helper Functions ---------------
#' loadCounts
#' Load the count matrices into the global environment.
#'
#' @param asLog Logical; whether or not to return counts as log counts. Default is F.
#' @param filterGenes Logical; whether or not to only include genes in common between samples. Default is T.
#' @param bulkized Character; the filename of the bulkized counts to read. Default is 'counts.bulkized.norm.mtx'.
#'
#' @return NULL, but adds expr.bulk and expr.bulkized to the global environment.
loadCounts <- function(asLog = F, filterGenes = T, bulkized = 'counts.bulkized.norm.mtx') {
expr.bulkized <<- readMM(paste0('data/processed/', bulkized)) %>% as('dgCMatrix')
expr.bulk <<- readMM('data/raw/counts.bulk.mtx') %>% as('dgCMatrix')
meta <- read.csv('data/raw/meta.singlecell.csv')
colnames(expr.bulkized) <<- unique(meta$sample)
colnames(expr.bulk) <<- unique(meta$sample)
rownames(expr.bulkized) <<- as.character(read.csv('data/processed/genes.bulkized.csv', header = F)$V1)
rownames(expr.bulk) <<- as.character(read.csv('data/raw/genes.bulk.csv', header = F)$V1)
if(filterGenes) {
genes <- intersect(rownames(expr.bulkized), rownames(expr.bulk))
expr.bulkized <<- expr.bulkized[genes, ]
expr.bulk <<- expr.bulk[genes, ]
}
if(asLog) {
expr.bulkized <<- log2(1 + expr.bulkized)
expr.bulk <<- log2(1 + expr.bulk)
}
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.2281053610894e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613099065-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 342 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.2281053610894e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
set.seed(1234)
# read data
p.clim <- read.csv("./Clean.Data/clim-hourly.csv") # past climate
p.clim$Date <- as.POSIXct(strptime(p.clim$Date, "%Y-%m-%d %H:%M:%S","GMT"))
f.tide <- read.csv("./Clean.Data/future-tides.csv") # future tide
f.tide$Date <- as.POSIXct(strptime(f.tide$Date, "%Y-%m-%d %H:%M:%S", "GMT"))
# set variable of study in a logaritmic scale
log.hs <- log(p.clim$hs)
# load code for Harmonic Analysis
source("R.functions/Harmonic.R")
# split signal in log.hs into periodic and residual components
split.lg.hs <- SubstractPeriodSignal(log.hs, c(1/(365.25*24),1/(8)), c(15,5))
ts.orig <- log.hs #original time series
ts.peri <- split.lg.hs$periodic
ts.stat <- split.lg.hs$residual
p.clim$lg.hs.p <- split.lg.hs$periodic
p.clim$lg.hs.r <- split.lg.hs$residual
##############################################################
# ------ study of the point process of excedances -----------#
##############################################################
# ---- compute the threshold of the stationary -----------------
# ------ part to compute all storm candidates ------------------
thres.orig <- log(2.5) #original threshold
thres.stat <- thres.orig - max(ts.peri) #candidates threshold
source("R.functions/ExtractPOTpp.R")
pots <- ExtractPOTPointProcess(ts.stat,thres.stat)
storms <- p.clim[ p.clim$lg.hs.r > thres.stat,]
str(pots)
true.exc <-storms$lg.hs.p+storms$lg.hs.r > thres.orig
plot(storms$lg.hs.p, storms$lg.hs.r, col=ifelse(true.exc,"red","black"))
lines(seq(-1.3,0.1,0.01),thres.orig -seq(-1.3,0.1,0.01))
#############################################################################
# -- computation of the empirical distribution function with smoothing ---- #
# ------------------ for the storms inter arrival times ------------------- #
# #
############################################################################
sample <- pots$nex.exc
dens.lg <- density(log(sample))
plot(dens.lg)
xfit <- exp(dens.lg$x)
yfit <- dens.lg$y*exp(-dens.lg$x)
#load auxirial function CDF (cumulative distribution function)
source("R.functions/funcs_HA.R")
#function to sample the new distrib
r.Approx.Empirical <- function(nsample=1, xfit,yfit){
approx(CDF(xfit,yfit),xfit,runif(nsample))[[2]]
}
resample <- r.Approx.Empirical(length(sample), xfit,yfit)
new.nex.exc <- as.integer(resample-min(resample)+1)
op <- par(mfrow=c(2,2))
h <- hist(sample,probability=T,breaks=100)
#lines(xfit,yfit,xlim=c(0,max(sample)),col="red")
plot(h$mids,h$density,log='xy')
lines(xfit,yfit, col="red",type='l')
h <- hist(new.nex.exc,breaks=100,prob=T)
plot(h$mids, h$density, log='xy')
lines(xfit,yfit, col="red",type='l')
par(op)
####################################################
# Part 3: Generate a sample #
####################################################
FindStorm <- function(nex.exc, pots){
aux <- which(pots$nex.exc == nex.exc)
if (length(aux)==0){
i.aux <-which.min(abs(pots$nex.exc - nex.exc))
aux <- which(pots$nex.exc== pots$nex.exc[i.aux])
index <- aux[sample(1:length(aux),1)]
} else {
index <- aux[sample(1:length(aux),1)]
}
index
}
interv <- range(p.clim$lg.hs.p)
wid <- abs(interv[1]-interv[2])/100
# storms generation
n.str <- as.POSIXct("20150101 00:00",format ="%Y%m%d %H:%M") #next strom time
i <- 1
future <- as.POSIXct("20650101 00:00",format ="%Y%m%d %H:%M")
s.t <- vector() # storm start time
s.ind <- vector() # storm index to be cloned
gap <- vector() # time to the next storm
while(n.str < future){
s.t[i] <- as.POSIXct(n.str, origin="1970-01-01 00:00.00 UTC") #storm date
gap[i] <- max(as.integer(r.Approx.Empirical(1, xfit,yfit)),1)
#time to next storm
# select a storm with similar value of lg.hs.p for the corresponding
aux.date <- paste("2004-",format(n.str, "%m-%d %H:%M"), sep="")
date.hs.p <- as.POSIXct(strptime(aux.date, "%Y-%m-%d %H:%M", "GMT"))
lg.hs.p <- p.clim[which(p.clim$Date == date.hs.p),"lg.hs.p"] #value of lg.hs.p at the start of the storm
s.ind[i] <- sample(which(abs(p.clim$lg.hs.p[pots$p.exc] - lg.hs.p)<wid),1)
# yf <- yearfraction(date=n.str) #year fraction
# n.gap.aux<- r.pp.cond(lambda3,thet.lam3,yf) #gap in year units
# s.ind[i] <- FindStorm(gap[i],pots) #storm index
n.dur <- pots$c.siz[s.ind[i]] #storm duration hours
n.str <- as.POSIXct(s.t[i] + 3600*(gap[i] + n.dur),
origin="1970-01-01 00:00.00 UTC") #time to next storm
i <- i+1
}
new.storms <- data.frame(s.times= as.POSIXct(s.t,
origin="1970-01-01 00:00.00 UTC"),
s.index= s.ind, s.dur= pots$c.siz[s.ind],
s.gap=gap)
length(pots[[1]])/11
length(new.storms[[1]])/50
storm.climate <- data.frame(Date = as.POSIXct(vector(mode="numeric"),
origin= '1970-01-01 00:00.00 UTC'),
hs = vector(mode="numeric"), fp = vector(mode="numeric"),
tm = vector(mode="numeric"), dir = vector(mode="numeric"),
U10 = vector(mode="numeric"), V10 = vector(mode="numeric"),
a.tide=vector(mode="numeric"), res= vector(mode="numeric"))
i.aux<-1
for(i in 1:length(new.storms$s.times)){
print(paste(i," out of ",length(new.storms$s.times),sep=""))
for(j in 1:pots$c.siz[new.storms$s.ind[i]]){
i.past <- pots$p.exc[new.storms$s.ind[i]] + j-1
date.fut <- as.POSIXct(new.storms$s.times[i] + 3600*(j-1),
origin="1970-01-01 00:00.00 UTC")
# index.fut.tide <- which(f.tide$Date == date.fut
tide.fut <- f.tide[which(f.tide$Date == date.fut),]$Level
aux.date <- paste("2004-",format(date.fut, "%m-%d %H:%M"), sep="")
date.hs.p <- as.POSIXct(strptime(aux.date, "%Y-%m-%d %H:%M", "GMT"))
lg.hs.p <- p.clim[which(p.clim$Date == date.hs.p),"lg.hs.p"]
lg.hs.r <- p.clim[i.past,"lg.hs.r"]
storm.climate[i.aux,"hs"] = exp(lg.hs.p + lg.hs.r)
storm.climate[i.aux,"Date"] = date.fut
storm.climate[i.aux, c("fp", "tm", "dir", "U10", "V10", "res")] =
p.clim[i.past, c("fp", "tm", "dir", "U10", "V10", "res")]
storm.climate[i.aux, "a.tide"] = tide.fut
i.aux <- i.aux + 1
}
}
true.storms <- storm.climate[storm.climate$hs>2.5,]
write.csv(true.storms, "New.Data/storms-Harm-2.csv",row.names=F)
|
/R.old.code/Harm-Analisis-2.R
|
no_license
|
paurabassa/storm-generation
|
R
| false | false | 6,461 |
r
|
set.seed(1234)
# read data
p.clim <- read.csv("./Clean.Data/clim-hourly.csv") # past climate
p.clim$Date <- as.POSIXct(strptime(p.clim$Date, "%Y-%m-%d %H:%M:%S","GMT"))
f.tide <- read.csv("./Clean.Data/future-tides.csv") # future tide
f.tide$Date <- as.POSIXct(strptime(f.tide$Date, "%Y-%m-%d %H:%M:%S", "GMT"))
# set variable of study in a logaritmic scale
log.hs <- log(p.clim$hs)
# load code for Harmonic Analysis
source("R.functions/Harmonic.R")
# split signal in log.hs into periodic and residual components
split.lg.hs <- SubstractPeriodSignal(log.hs, c(1/(365.25*24),1/(8)), c(15,5))
ts.orig <- log.hs #original time series
ts.peri <- split.lg.hs$periodic
ts.stat <- split.lg.hs$residual
p.clim$lg.hs.p <- split.lg.hs$periodic
p.clim$lg.hs.r <- split.lg.hs$residual
##############################################################
# ------ study of the point process of excedances -----------#
##############################################################
# ---- compute the threshold of the stationary -----------------
# ------ part to compute all storm candidates ------------------
thres.orig <- log(2.5) #original threshold
thres.stat <- thres.orig - max(ts.peri) #candidates threshold
source("R.functions/ExtractPOTpp.R")
pots <- ExtractPOTPointProcess(ts.stat,thres.stat)
storms <- p.clim[ p.clim$lg.hs.r > thres.stat,]
str(pots)
true.exc <-storms$lg.hs.p+storms$lg.hs.r > thres.orig
plot(storms$lg.hs.p, storms$lg.hs.r, col=ifelse(true.exc,"red","black"))
lines(seq(-1.3,0.1,0.01),thres.orig -seq(-1.3,0.1,0.01))
#############################################################################
# -- computation of the empirical distribution function with smoothing ---- #
# ------------------ for the storms inter arrival times ------------------- #
# #
############################################################################
sample <- pots$nex.exc
dens.lg <- density(log(sample))
plot(dens.lg)
xfit <- exp(dens.lg$x)
yfit <- dens.lg$y*exp(-dens.lg$x)
#load auxirial function CDF (cumulative distribution function)
source("R.functions/funcs_HA.R")
#function to sample the new distrib
r.Approx.Empirical <- function(nsample=1, xfit,yfit){
approx(CDF(xfit,yfit),xfit,runif(nsample))[[2]]
}
resample <- r.Approx.Empirical(length(sample), xfit,yfit)
new.nex.exc <- as.integer(resample-min(resample)+1)
op <- par(mfrow=c(2,2))
h <- hist(sample,probability=T,breaks=100)
#lines(xfit,yfit,xlim=c(0,max(sample)),col="red")
plot(h$mids,h$density,log='xy')
lines(xfit,yfit, col="red",type='l')
h <- hist(new.nex.exc,breaks=100,prob=T)
plot(h$mids, h$density, log='xy')
lines(xfit,yfit, col="red",type='l')
par(op)
####################################################
# Part 3: Generate a sample #
####################################################
FindStorm <- function(nex.exc, pots){
aux <- which(pots$nex.exc == nex.exc)
if (length(aux)==0){
i.aux <-which.min(abs(pots$nex.exc - nex.exc))
aux <- which(pots$nex.exc== pots$nex.exc[i.aux])
index <- aux[sample(1:length(aux),1)]
} else {
index <- aux[sample(1:length(aux),1)]
}
index
}
interv <- range(p.clim$lg.hs.p)
wid <- abs(interv[1]-interv[2])/100
# storms generation
n.str <- as.POSIXct("20150101 00:00",format ="%Y%m%d %H:%M") #next strom time
i <- 1
future <- as.POSIXct("20650101 00:00",format ="%Y%m%d %H:%M")
s.t <- vector() # storm start time
s.ind <- vector() # storm index to be cloned
gap <- vector() # time to the next storm
while(n.str < future){
s.t[i] <- as.POSIXct(n.str, origin="1970-01-01 00:00.00 UTC") #storm date
gap[i] <- max(as.integer(r.Approx.Empirical(1, xfit,yfit)),1)
#time to next storm
# select a storm with similar value of lg.hs.p for the corresponding
aux.date <- paste("2004-",format(n.str, "%m-%d %H:%M"), sep="")
date.hs.p <- as.POSIXct(strptime(aux.date, "%Y-%m-%d %H:%M", "GMT"))
lg.hs.p <- p.clim[which(p.clim$Date == date.hs.p),"lg.hs.p"] #value of lg.hs.p at the start of the storm
s.ind[i] <- sample(which(abs(p.clim$lg.hs.p[pots$p.exc] - lg.hs.p)<wid),1)
# yf <- yearfraction(date=n.str) #year fraction
# n.gap.aux<- r.pp.cond(lambda3,thet.lam3,yf) #gap in year units
# s.ind[i] <- FindStorm(gap[i],pots) #storm index
n.dur <- pots$c.siz[s.ind[i]] #storm duration hours
n.str <- as.POSIXct(s.t[i] + 3600*(gap[i] + n.dur),
origin="1970-01-01 00:00.00 UTC") #time to next storm
i <- i+1
}
new.storms <- data.frame(s.times= as.POSIXct(s.t,
origin="1970-01-01 00:00.00 UTC"),
s.index= s.ind, s.dur= pots$c.siz[s.ind],
s.gap=gap)
length(pots[[1]])/11
length(new.storms[[1]])/50
storm.climate <- data.frame(Date = as.POSIXct(vector(mode="numeric"),
origin= '1970-01-01 00:00.00 UTC'),
hs = vector(mode="numeric"), fp = vector(mode="numeric"),
tm = vector(mode="numeric"), dir = vector(mode="numeric"),
U10 = vector(mode="numeric"), V10 = vector(mode="numeric"),
a.tide=vector(mode="numeric"), res= vector(mode="numeric"))
i.aux<-1
for(i in 1:length(new.storms$s.times)){
print(paste(i," out of ",length(new.storms$s.times),sep=""))
for(j in 1:pots$c.siz[new.storms$s.ind[i]]){
i.past <- pots$p.exc[new.storms$s.ind[i]] + j-1
date.fut <- as.POSIXct(new.storms$s.times[i] + 3600*(j-1),
origin="1970-01-01 00:00.00 UTC")
# index.fut.tide <- which(f.tide$Date == date.fut
tide.fut <- f.tide[which(f.tide$Date == date.fut),]$Level
aux.date <- paste("2004-",format(date.fut, "%m-%d %H:%M"), sep="")
date.hs.p <- as.POSIXct(strptime(aux.date, "%Y-%m-%d %H:%M", "GMT"))
lg.hs.p <- p.clim[which(p.clim$Date == date.hs.p),"lg.hs.p"]
lg.hs.r <- p.clim[i.past,"lg.hs.r"]
storm.climate[i.aux,"hs"] = exp(lg.hs.p + lg.hs.r)
storm.climate[i.aux,"Date"] = date.fut
storm.climate[i.aux, c("fp", "tm", "dir", "U10", "V10", "res")] =
p.clim[i.past, c("fp", "tm", "dir", "U10", "V10", "res")]
storm.climate[i.aux, "a.tide"] = tide.fut
i.aux <- i.aux + 1
}
}
true.storms <- storm.climate[storm.climate$hs>2.5,]
write.csv(true.storms, "New.Data/storms-Harm-2.csv",row.names=F)
|
#################################################################################################
# Basic R and Intro to Data Manipulation and Visualization #
# SAPPK - Institut Teknologi Bandung #
# #
# Script names : Basic_R #
# Purpose : This script contains line commands that performs basic tasks, and #
# introduction to data manipulation and visualization for Urban Analytics #
# Programmer : Adenantera Dwicaksono #
# First Created : 10/29/2019 12:08 PM #
# Last updated : 10/29/2019 #
#################################################################################################
#################################################################################################
# Note: This comment provides information about any requirements that need to be met before
# running the script. It tells other users important requirements to execute the script
# without errors.
#
# Requirements:
# - R and R studio are properly install in the desktop
#
#################################################################################################
#################################################################################################
# Note: This comments describes any processes that will be performed by the script
#
# This script performs the following steps:
# Step 1: Install packages, load required library packages, and set working directories
#
#################################################################################################
##################
# 1: Install packages, load required library packages, and set working directories
##################
#The following packages are essential for running the following processes, and have been installed
#in my machine and therefore no need to reinstall them. It the script is run in other
#machine, these packages must have been installed
#install.packages("tidyverse")
#install.packages("dplyr")
#install.packages("microbenchmark")
# open libraries of spatial utilities
library(tidyverse)
library(microbenchmark)
# set working directory
wd <- 'D:/Gdrive/ITB/Workshop/Labscan/01_Basic R' # a string object containing
# the location of the main working directory
setwd(wd) # This set the working directory
#set data folder
input.dir <- paste0(wd,"/",'Data')
#specify output folder
output.dir <- paste0(wd,"/",'Output')
# check folder contents
dir()
##############################################
# 2. Demo of R as an over-powered calculator
##############################################
## R is an over-powered calculator - you can use it to calculate numbers
# Example: Type the following into the console or run them from the script.
1 + 1
sqrt(9) ## sqrt() is a function.
sqrt(1+5+6-3) ## We can pass values to functions and get results.
# EXERCISE: # What is the value of (1+3)*(1+4+6) ? Write the "equation" here and run it.
## ASSIGNMENT-OPERATORS
# The equals sign can be used to assign value to a variable.
a = 1
a
# But the preferred assignment operator is "<-".
# RStudio Shortcut: [Alt] [-] will insert the R assignment operator.
a <- 1
a
## EXERCISE - Create an object "sank" and give it a value of "1912".
##############################################
# 3. Data Types in R: ATOMIC DATA TYPE
##############################################
# Atomic data contains only a single value or object
# Numeric
pi <- 3.14159
#We can confirm this using typeof() function
typeof(pi)
# Integers - Integers are funny. They have to be created consciously.
starboard <- 10
typeof(starboard)
# We can (double) confirm this:
is.integer(starboard)
# we can assign a number to an integer using as.integer()
port <- as.integer(starboard)
port
typeof(port)
# What would happen if a float data is converted integer?
port <- as.integer(3.14159)
port
# LOGICAL variables are often created in the process of comparing things.
starboard <- 10; port <- 15;
# Question: Is this TRUE or FALSE?
starboard == port
# Character
# You can use " or ' to create a character
# Some character vars:
first_name <- "Captain"
last_name <- "Smith"
# Let's put them together using paste funtion
paste(first_name, last_name, sep=" ")
##############################################
# 3. Data Types in R: VECTORS
##############################################
# - A vector is a sequence of data elements all of the same type
# - Vectors are 1-dimensional *(length)*.
# - Vectors can contain only one data type *(integer, character, date)*.
# - Vectors containing multiple data types are characters.
# Example:
yard_arm <- c(1,2,3,4,5)
yard_arm
## VECTORS-LENGTH
# The variable yard_arm has five distinct values in it.
length(yard_arm)
# Vector Indexing using "[]"
## Returns the THIRD value stored in yard_arm.
yard_arm[3]
## Returns ALL values in yard_arm with a value greater than 2.
yard_arm[ yard_arm>2 ] ## Easy way to filter data.
## Arithmatic Operations on Vector
starboard <- c(25,30,40,45,50)
port <- c(90,70,50,30,10)
starboard + port
starboard <- c(25,30,40,45,50)
port <- 3
starboard * port
## We can do math on vectors.
starboard <- c(25,30,40,45,50)
mean(starboard)
sd(starboard)
## Other Functions for Vectors:
# - mean() finds the arithmetic mean of a vector.
# - median() finds the median of a vector.
# - sd() and var() finds the standard deviation and variance of a vector respectively.
# - min() and max() finds the minimum and maximum of a vector respectively.
# - sort() returns a vector that is sorted.
# - summary() returns a 5 number summary of the numbers in a vector.
##############################################
# 3. Data Types in R: ARRAYS
##############################################
# - Arrays are still a vector in R but they have added extra options to them.
# - A regular vector has a single dimension
# - A matrix has 2 dimensions
# - An array can have up to n dimensions.
# Let's start with a simple vector
x <- c(1,2,3,4) # It is a vestor with 4 elements
# We can turn this vestor into an array by specifying some dimensions on it
x.array <- array(x, dim=c(2,2))
x.array
# Let's check if it is a vector or an array
is.vector(x.array)
is.array(x.array)
# We can also have R tell us the type of elements in the array
typeof(x.array)
#Check the structure of the array
str(x.array)
#Find out other attributes of an array
attributes(x.array)
###################################
# 4. Data Types in R: Matrix and Data Frames
###################################
# Two main object types that can be used to store tabular data in R include the data frame and matrix.
# Data frames can contain input columns that are of multiple types (e.g. character, numeric etc); and
# a matrix a single type.
# You can create these within R manually or by reading in other common formats such as spreadsheets
# or csv files.
# A data frame can be created using the data.frame() function.
# Create two vectors
a <- rep(2010:2017, each = 4) # this uses the rep() function to repeat values
b <- round(runif(32, 0, 40)) # runif can be used to generate random numbers - in this case between 0 and 40
# Create data frame
c <- data.frame(a,b)
# Check the content of the data frame:
head(c)
tail(c)
#Create a list of numbers
a <- 1:25 #The colon signifies a range
head(a)
#Create a matrix with 5 rows and 5 columns
b <- matrix(a,nrow=5, ncol=5)
b
# It is possible to perform algreba operations onto matrix
b*10
b*b
# When a matrix prints, the columns and rows show their index as a set of numbers within square brackets.
# These can be used to extract values from the matrix.
# These are formatted as [row number, column number]. For example:
#Extract first row
b[1,]
#Extract fourth column
b[,4]
#Extract third and fourth columns
b[,3:4] # The colon is used to define a numeric vector between the two numbers
#Extract first and fourth rows
b[c(1,5),] # The c() is used to create a numeric vector with the numbers separated by a comma
#Extract the value in the third row and fourth column
b[3,4]
# You can also reference the column names themselves using the $ symbol, for example:
c$a
#A different way of returning the column called "a"
c[,"a"]
# We can also find out what a data frame column names are using the colnames() function:
colnames(c)
# we can also use the same function to set new column names:
colnames(c) <- c("Year","Count")
## The nice thing with working with matrices is that we can use apply() function
## The apply() function allows someone to use an R function or user-defined
## function with a matrix
## The syntax of apply function is:
## apply(m, dimcode, f, arguments)
## With:
## - m: matrix you wish to use.
## - dimcode = 1 if you want to apply function to rows;
## dimcode= 2 if you want to apply to columns
## - f: function you wish to use
## arguments: specific arguments for function being used.
# let's use matrix b for this
b
# Calculate the mean for each row
apply(b,1,mean)
# Calculate the mean for each column
apply(b,2,mean)
#let's create our self-made function
func.1 <- function(x) (2*x)^20
#Let's use it onto matrix b by row
apply(b,1,func.1)
###################################
# 4. Data Types in R: List
###################################
## - list is a structure that can combine objects of different types.
## - A list is actually a vector but it does differ in comparison to
## the other types of vectors:
## - Other vectors are atomic vectors
## - A list is a type of vector called a recursive vector: that we can have a list
## within a list
# Lets imagine that you're going to create a list of one patient containing
# Information of the name, amount of bill due, and boolean indicator of whether
# the patient has an insurance or not
a <- list(name="Jack", owed="55", insurance=FALSE)
a
#another way to make a list
a.alt <- vector(mode="list")
a.alt[["name"]] <- "Jack"
a.alt[["owed"]] <- 55
a.alt[["insurance"]] <- FALSE
a.alt
# There are different ways in which we can index a list
a[["name"]]
a[[1]]
a$name
#The difference between indexing with single and double brackets
a[1]
class(a[1]) #With the single bracket we have a list with the name element only.
a[[1]]
class(a[[1]]) #double brackets extract thr value out
###################################
# 4. Data Types in R: Tibble
###################################
# - "Tibbles" are a new modern data frame.
# - It keeps many important features of the original data frame.
# - It removes many of the outdated features.
# - They are another amazing feature added to R by Hadley Wickham.
# - We will use them in the tidyverse to replace the older outdated dataframe
# that we just learned about.
# Compared to Data Frames
# - A tibble never changes the input type: No more worry of characters
# being automatically turned into strings.
# - A tibble can have columns that are lists.
# - A tibble can have non-standard variable names: It can start with a number or
# contain spaces.
# - It never creates row names.
#Load the tydyverse package
library(tidyverse)
#create a tible out of nothing
try <- tibble(x = 1:3, y = list(1:5, 1:10, 1:20))
try
# We can see that y is displayed as a list.
# If we try to do this with a traditional data frame we get:
try <- as_data_frame(c(x = 1:3, y = list(1:5, 1:10, 1:20)))
try
# A tibble can be made by coercing as_tibble().
# This works similar to as.data.frame(). It is a very efficient process though.
l <- replicate(26, sample(100), simplify = FALSE)
names(l) <- letters
microbenchmark::microbenchmark(
as_tibble(l),
as.data.frame(l)
)
## Tibbles vs Data Frames
# There are a couple key differences between tibbles and data frames.
# - Printing.
# - Subsetting.
tibble(
a = lubridate::now() + runif(1e3) * 86400,
b = lubridate::today() + runif(1e3) * 30,
c = 1:1e3,
d = runif(1e3),
e = sample(letters, 1e3, replace = TRUE)
)
# Subsetting: We can index a tibble in the manners we are used to, but now we
# We can also use a pipe
df <- tibble(x = runif(5), y = rnorm(5))
df %>% .$x
df %>% .[["x"]]
df %>% .[[1]]
###################################
# 5. Getting External Data into R and Perform Basic Data manipulation using dplyr
###################################
# A common way in which data can be stored externally are the the use of .csv files.
# These are text files, and have a very simple format where columns of attributes are
# separated by a comma 1, and each row by a carriage return.
# In the following example you will read in some U.S. Census Bureau, 2010-2014
# American Community Survey (ACS) 5-Year Estimate data. This was downloaded from the
# American Fact Finder website. The data are for census tracts in San Francisco and
# relate to median earnings in the past 12 months.
#Read CSV file - creates a data frame called earnings
earnings <- read_csv("./Data/ACS_14_5YR_S2001_with_ann.csv")
# inspect the data
str(earnings)
# preview the data
View(earnings)
#Show column headings
colnames(earnings)
#UID - Tract ID
#pop - estimated total population over 16 with income
#pop_m - estimated total population over 16 with income (margin of error)
#earnings - estimated median earnings
#earnings - estimated median earnings (margin of error)
# It is possible to show the structure of the object using the str() function.
str(earnings)
# We're going to learn some of the most common dplyr functions:
# - select(): subset columns
# - filter(): subset rows on conditions
# - mutate(): create new columns by using information from other columns
# - group_by() and summarize(): create summary statisitcs on grouped data
# - arrange(): sort results
# - count(): count discrete values
# Selecting columns and filtering rows
# To select columns of a data frame, use select().
# The first argument to this function is the data frame (surveys),
# and the subsequent arguments are the columns to keep.
select(earnings, UID,pop, earnings)
# To select all columns except certain ones,
# put a "-" in front of the variable to exclude it.
select(earnings, -pop_m, -earnings_m)
# To choose rows based on a specific criteria, use filter():
filter(earnings, pop >4000)
# Piping
# pipes, are a recent addition to R. It let you take the output of one function
# and send it directly to the next, which is useful when you need to do many
# things to the same dataset.
# Pipes in R look like %>% and are made available via the magrittr package,
# installed automatically with dplyr. If you use RStudio, you can type the
# pipe with Ctrl + Shift + M if you have a PC or Cmd + Shift + M if you have a Mac.
earnings %>%
filter(pop > 4000) %>%
select(UID,pop, earnings)
# If we want to create a new object with this smaller version of the data,
# we can assign it a new name:
earnings_sml <- earnings %>%
filter(pop > 4000) %>%
select(UID,pop, earnings)
earnings_sml
# You'll want to create new columns based on the values in existing columns. We're
# going to use mutate
earnings %>%
mutate(earnings = as.numeric(earnings), earnings_m = as.numeric(earnings_m))
earnings %>%
mutate(earnings = as.numeric(earnings), earnings_m = as.numeric(earnings_m)) %>%
mutate(earnings_rp = earnings * 13500, earnings_m_rp = earnings_m * 13500)
# Split-apply-combine data analysis and the summarize() function
# Create group high-earning and low earnig
earnings.new <- earnings %>%
mutate(earnings = as.numeric(earnings)) %>%
mutate(e_hilo = if_else(earnings>=mean(earnings),"High","Low"))
#Use function command with group_by
earnings.new %>%
group_by(e_hilo) %>%
summarize(mean_earnings = mean(earnings, na.rm = TRUE))
###################################
# 6. Getting External Data into R - Reading Spatial Data
###################################
# Spatial data are distributed in a variety of formats, but commonly as Shapefiles.
# These can be read into R using a number of packages, however, is illustrated here with "rgdal".
# The following code loads a Census Tract Shapefile which was downloaded from the SF OpenData.
#Download and install package
install.packages("rgdal")
#Load package
library(rgdal)
# Read Shapefile
SF <- readOGR(dsn = "D:/Gdrive/ITB/Teaching/2019-2020/PL3102/01_Basic R/Data", layer = "tl_2010_06075_tract10")
# This has created a SpatialPolygonsDataFrame Reading Spatial Data object and can view
# the tract boundaries using the plot() function:
plot(SF)
# The San Francisco peninsula is shown, however, the formal boundaries extend into the ocean
# and also include the Farallon Islands. For cartographic purposes it may not be desirable to #
# show these extents, and later we will explore how these can be cleaned up.
#The slotNames() function prints their names.
slotNames(SF)
#Show the top rows of the data object
head(SF@data)
# The "data" slot contains a data frame with a row of attributes for each of the spatial polygons
# contained within the SF object; thus, one each row equates to one polygon.
# Other slots contain useful information such as the spatial projection.
###################################
# 7. Creating Spatial Data
###################################
# Sometimes it is necessary to create a spatial object from scratch, which is most common for
# point data given that only a single co-ordinate is required for each feature.
# This can be achieved using the SpatialPointsDataFrame() function and is used within this
# example to create a 311 point dataset. 311 data record non emergency calls within the US,
# and in this case are those which occurred within San Francisco between
# January and December 2016. The 311 data used here have been simplified from the original
# data to only a few variables, and those calls without spatial references have been removed.
# Read csv into R
data_311 <- read.csv("./Data/311.csv")
# Have a look at the structure
head(data_311)
# Create the SpatialPointsDataFrame
SP_311 <- SpatialPointsDataFrame(coords = data.frame(data_311$Lon, data_311$Lat),
data = data.frame(data_311$CaseID,data_311$Category),
proj4string = SF@proj4string)
# Show the results
plot(SP_311)
###################################
# 8. Subsetting Data
###################################
# It is often necessary to subset data; either restricting a data frame to a set of
# columns or rows; or in the case of spatial data, creating an extract for a particular
# set of geographic features. Subsetting can occur in a number of different ways
#Create a table of frequencies by the categories used within the 311 data
table(data_311$Category)
# Use the subset() function to extract rows from the data which relate to Sewer Issues
sewer_issues <- subset(data_311,Category == "Sewer Issues")
# Use the square brackets "[]" to perform the same task
sewer_issues <- data_311[data_311$Category == "Sewer Issues",]
# Extract a list of IDs for the "Sewer Issues"
sewer_issues_IDs <- subset(data_311,Category == "Sewer Issues", select = "CaseID")
# Subsetting can also be useful for spatial data. In the example above the full extent of
# San Francisco was plotted, however, for cartographic purposes it may be preferable to
# remove the "Farallon Islands". This has a GEOID10 of "06075980401" which can be used to
# remove this from a plot:
plot(SF[SF@data$GEOID10 != "06075980401",]) # Removes Farallon Islands from the plot
# This can also be quite useful if you want to plot only a single feature, for example:
plot(SF[SF@data$GEOID10 == "06075980401",]) # Only plots Farallon Islands
# You can also use the same syntax to create a new object - for example:
SF <- SF[SF@data$GEOID10 != "06075980401",] # Overwrites the SF object
plot(SF)
###################################
# 9. Clipping Spatial Data
###################################
# Clipping is a process of subsetting using overlapping spatial data.
# The following code uses the outline of the coast of the U.S. to clip the boundaries of
# the SD spatial data frame object:
#Load library
library("raster")
#Read in coastal outline (Source from - https://www.census.gov/geo/maps-data/data/cbf/cbf_counties.html)
coast <- readOGR(dsn = paste0(wd,"/Data"), layer = "cb_2015_us_county_500k")
# Clip the the SF spatial data frame object to the coastline
SF_clipped <- crop(SF, coast)
#Plot the results
plot(SF_clipped)
SF_clipped <- crop(SF, coast)
#Plot the results
plot(SF_clipped)
###################################
# 10. Merging Tabular Data
###################################
# So far we have utilized a single data frame or spatial object; however, it is often
# the case that in order to generate information, data from multiple sources are required.
# Where data share a common "key", these can be used to combine / link tables together.
# This might for example be an identifier for a zone; and is one of the reasons
# why most statistical agencies adopt a standard sets of geographic codes to identify areas.
# In the earlier imported data "earnings" this included a UID column which relates to a Tract ID.
# We can now import an additional data table called bachelors - this also includes the same ID.
#Read CSV file - creates a data frame called earnings
bachelors <- read_csv("./Data/ACS_14_5YR_S1501_with_ann.csv")
# Using the matching ID columns on both datasets we can link them together to create a
# new object with the merge() function:
#Perform the merge
SF_Tract_ACS <- merge(x=earnings,y=bachelors,by.x="UID",by.y="UID")
SF_Tract_ACS <- merge(earnings,bachelors,by="UID")
# An alternative method to the above, but a shortened version as the ID columns are
# the same on both tables
#You can also use all.x=TRUE (or all.y=TRUE) to keep all the rows from either the x or y
# table - for more details type ?merge()
# The combined table now looks like
head(SF_Tract_ACS) # shows the top of the table
###################################
# 11. Removing and Creating Attributes
###################################
# It is sometimes necessary to remove variables from a tabular object or to create new values.
# In the following example we will remove some unwanted columns in the SF_clipped object,
# leaving just the zone id for each polygon.
#Remind yourself what the data look like...
head(SF_clipped@data)
SF_clipped@data <- data.frame(SF_clipped@data[,"GEOID10"]) #Makes a new version of the @data slot with just the values of the GEOID10 column - this is wrapped with the data.frame() function
#The data frame within the data slot now looks as follows
head(SF_clipped)
# One thing you may not like on this new data frame is the column heading which has got a bit messy.
# We can clean this up using the colnames() function.
colnames(SF_clipped@data) <- "GEOID10" #Update column names
head(SF_clipped@data) #Check the updated values
# These tract ID are supposed to match with those in the "SF_Tract_ACS" object, however,
# if you are very observant you will notice that there is one issue; the above have a
# leading zero.
head(SF_Tract_ACS) # show the top of the SF_Tract_ACS object
# As such, in this instance we will create a new column on the SF_Tract_ACS data frame with a
# new ID that will match the SF GEOID10 column. We can achieve this using the $ symbol and will
# call this new variable "GEOID10".
# Creates a new variable with a leading zero
SF_Tract_ACS$GEOID10 <- paste0("0",SF_Tract_ACS$UID)
head(SF_Tract_ACS)
# The earnings data had some values that were stored as factors rather than numeric or integers,
# and the same is true for both the bachelors data; and now the combined SF_Tract_ACS object.
# We can check this again as follows:
str(SF_Tract_ACS)
# We can also remove the UID column. A quick way of doing this for a single variable is to use "NULL":
SF_Tract_ACS$UID <- NULL
# We will now convert the factor variables to numerics. The first stage will be to remove
# the "-" and "**" characters from the variables with the gsub() function, replacing these
# with NA values. This also has the effect of converting the factors to characters.
#Replace the "-" and "*" characters
SF_Tract_ACS$earnings <- gsub("-",NA,SF_Tract_ACS$earnings,fixed=TRUE) #replace the "-" values with NA
SF_Tract_ACS$earnings_m <- gsub("**",NA,SF_Tract_ACS$earnings_m,fixed=TRUE) #replace the "**" values with NA
SF_Tract_ACS$Bachelor_Higher <- gsub("-",NA,SF_Tract_ACS$Bachelor_Higher,fixed=TRUE) #replace the "-" values with NA
SF_Tract_ACS$Bachelor_Higher_m <- gsub("**",NA,SF_Tract_ACS$Bachelor_Higher_m,fixed=TRUE) #replace the "**" values with NA
# We will now convert these to numeric values:
SF_Tract_ACS$earnings <- as.numeric(SF_Tract_ACS$earnings)
SF_Tract_ACS$earnings_m <- as.numeric(SF_Tract_ACS$earnings_m)
SF_Tract_ACS$Bachelor_Higher <- as.numeric(SF_Tract_ACS$Bachelor_Higher)
SF_Tract_ACS$Bachelor_Higher_m <- as.numeric(SF_Tract_ACS$Bachelor_Higher_m )
# Now all the variables other than the "GEOID10" are stored as integers or numerics:
str(SF_Tract_ACS)
###################################
# 12. Merging Spatial Data
###################################
# It is also possible to join tabular data onto a spatial object (e.g. SpatialPolygonsDataFrame)
# in the same way as with regular data frames. In this example, we will join the newly
#created SF_Tract_ACS data onto the SF_clipped data frame.
SF_clipped <- merge(SF_clipped,SF_Tract_ACS, by="GEOID10") # merge
head(SF_clipped@data)#show the attribute data
###################################
# 13.Spatial Joins
###################################
# Earlier in this practical we created a SpatialPointDataFrame which we later cropped
# using the point.in.poly() function to create the "SP_311_PIP" object.
#As a reminder of what this looks like it is plotted below:
plot(SP_311_PIP)
# We will now clean up the associated data frame by removing all of the attributes apart
# from the category ("data_311.Category") and then add a sensible column name.
SP_311_PIP@data <- data.frame(SP_311_PIP@data[,"data_311.Category"])#subset data
colnames(SP_311_PIP@data) <- "Category" #update column names
# Although point.in.poly() was used to clip a dataset to an extent earlier,
# the other really useful feature of this point in polygon function is that
# it also appends the attributes of the polygon to the point. For example,
# we might be interested in finding out which census tracts each of the 311 calls resides within.
# As such, we will implement another point in polygon analysis to create a new object SF_clipped_311:
SF_clipped_311 <- point.in.poly(SP_311_PIP, SF) # point in polygon
#Cleanup the attributes
SF_clipped_311@data <- SF_clipped_311@data[,c("GEOID10","Category")] #note that we don't need to use the data.frame() function as we are keeping more than one column
#Show the top rows of the data
head(SF_clipped_311@data)
###################################
# Writing out and saving your data
###################################
# In order to share data it is often useful to write data frames or spatial
# objects back out of R as external files. This is very simple, and R supports multiple formats.
# In these examples, a CSV file and a Shapefile are both created.
#In this example we write out a CSV file from the data slot of the SpatialPointsDataFrame SF_clipped_311
write.csv(SF_clipped_311@data,"311_Tract_Coded.csv")
#This will write out a Shapefile for San Francisco - note, a warning is returned as the column
# names are a little longer than are allowed within a Shapefile and as such are
# automatically shortened.
writeOGR(SF_clipped, ".", "SF_clipped", driver="ESRI Shapefile")
###################################
# Basic plotting
###################################
#Import the dataset
ozone_airq_df <- read.csv("./Data/daily_44201_2017.csv")
#install package
install.packages("tidyverse")
library(tidyverse)
#We just need a few variables:
ozone_airq_df2 <- ozone_airq_df[,c('State.Code', 'Site.Num', 'Latitude', 'Longitude', 'Date.Local',
'State.Name', 'County.Name', 'CBSA.Name', 'AQI')]
g1 <- ozone_airq_df2 %>%
filter(stcofips == "06059") %>% # Orange County
ggplot() +
geom_point(aes(x=dateL, y=AQI, color=SiteID)) +
geom_smooth(aes(x=dateL, y=AQI, color=SiteID), method="loess")+
scale_colour_brewer(palette = "Set2") +
labs(x = "Month", y = "Air Quality Index")
library(plotly)
ggplotly(g1)
|
/Basic_R_new.R
|
no_license
|
adenant/Basic-R
|
R
| false | false | 29,812 |
r
|
#################################################################################################
# Basic R and Intro to Data Manipulation and Visualization #
# SAPPK - Institut Teknologi Bandung #
# #
# Script names : Basic_R #
# Purpose : This script contains line commands that performs basic tasks, and #
# introduction to data manipulation and visualization for Urban Analytics #
# Programmer : Adenantera Dwicaksono #
# First Created : 10/29/2019 12:08 PM #
# Last updated : 10/29/2019 #
#################################################################################################
#################################################################################################
# Note: This comment provides information about any requirements that need to be met before
# running the script. It tells other users important requirements to execute the script
# without errors.
#
# Requirements:
# - R and R studio are properly install in the desktop
#
#################################################################################################
#################################################################################################
# Note: This comments describes any processes that will be performed by the script
#
# This script performs the following steps:
# Step 1: Install packages, load required library packages, and set working directories
#
#################################################################################################
##################
# 1: Install packages, load required library packages, and set working directories
##################
#The following packages are essential for running the following processes, and have been installed
#in my machine and therefore no need to reinstall them. It the script is run in other
#machine, these packages must have been installed
#install.packages("tidyverse")
#install.packages("dplyr")
#install.packages("microbenchmark")
# open libraries of spatial utilities
library(tidyverse)
library(microbenchmark)
# set working directory
wd <- 'D:/Gdrive/ITB/Workshop/Labscan/01_Basic R' # a string object containing
# the location of the main working directory
setwd(wd) # This set the working directory
#set data folder
input.dir <- paste0(wd,"/",'Data')
#specify output folder
output.dir <- paste0(wd,"/",'Output')
# check folder contents
dir()
##############################################
# 2. Demo of R as an over-powered calculator
##############################################
## R is an over-powered calculator - you can use it to calculate numbers
# Example: Type the following into the console or run them from the script.
1 + 1
sqrt(9) ## sqrt() is a function.
sqrt(1+5+6-3) ## We can pass values to functions and get results.
# EXERCISE: # What is the value of (1+3)*(1+4+6) ? Write the "equation" here and run it.
## ASSIGNMENT-OPERATORS
# The equals sign can be used to assign value to a variable.
a = 1
a
# But the preferred assignment operator is "<-".
# RStudio Shortcut: [Alt] [-] will insert the R assignment operator.
a <- 1
a
## EXERCISE - Create an object "sank" and give it a value of "1912".
##############################################
# 3. Data Types in R: ATOMIC DATA TYPE
##############################################
# Atomic data contains only a single value or object
# Numeric
pi <- 3.14159
#We can confirm this using typeof() function
typeof(pi)
# Integers - Integers are funny. They have to be created consciously.
starboard <- 10
typeof(starboard)
# We can (double) confirm this:
is.integer(starboard)
# we can assign a number to an integer using as.integer()
port <- as.integer(starboard)
port
typeof(port)
# What would happen if a float data is converted integer?
port <- as.integer(3.14159)
port
# LOGICAL variables are often created in the process of comparing things.
starboard <- 10; port <- 15;
# Question: Is this TRUE or FALSE?
starboard == port
# Character
# You can use " or ' to create a character
# Some character vars:
first_name <- "Captain"
last_name <- "Smith"
# Let's put them together using paste funtion
paste(first_name, last_name, sep=" ")
##############################################
# 3. Data Types in R: VECTORS
##############################################
# - A vector is a sequence of data elements all of the same type
# - Vectors are 1-dimensional *(length)*.
# - Vectors can contain only one data type *(integer, character, date)*.
# - Vectors containing multiple data types are characters.
# Example:
yard_arm <- c(1,2,3,4,5)
yard_arm
## VECTORS-LENGTH
# The variable yard_arm has five distinct values in it.
length(yard_arm)
# Vector Indexing using "[]"
## Returns the THIRD value stored in yard_arm.
yard_arm[3]
## Returns ALL values in yard_arm with a value greater than 2.
yard_arm[ yard_arm>2 ] ## Easy way to filter data.
## Arithmatic Operations on Vector
starboard <- c(25,30,40,45,50)
port <- c(90,70,50,30,10)
starboard + port
starboard <- c(25,30,40,45,50)
port <- 3
starboard * port
## We can do math on vectors.
starboard <- c(25,30,40,45,50)
mean(starboard)
sd(starboard)
## Other Functions for Vectors:
# - mean() finds the arithmetic mean of a vector.
# - median() finds the median of a vector.
# - sd() and var() finds the standard deviation and variance of a vector respectively.
# - min() and max() finds the minimum and maximum of a vector respectively.
# - sort() returns a vector that is sorted.
# - summary() returns a 5 number summary of the numbers in a vector.
##############################################
# 3. Data Types in R: ARRAYS
##############################################
# - Arrays are still a vector in R but they have added extra options to them.
# - A regular vector has a single dimension
# - A matrix has 2 dimensions
# - An array can have up to n dimensions.
# Let's start with a simple vector
x <- c(1,2,3,4) # It is a vestor with 4 elements
# We can turn this vestor into an array by specifying some dimensions on it
x.array <- array(x, dim=c(2,2))
x.array
# Let's check if it is a vector or an array
is.vector(x.array)
is.array(x.array)
# We can also have R tell us the type of elements in the array
typeof(x.array)
#Check the structure of the array
str(x.array)
#Find out other attributes of an array
attributes(x.array)
###################################
# 4. Data Types in R: Matrix and Data Frames
###################################
# Two main object types that can be used to store tabular data in R include the data frame and matrix.
# Data frames can contain input columns that are of multiple types (e.g. character, numeric etc); and
# a matrix a single type.
# You can create these within R manually or by reading in other common formats such as spreadsheets
# or csv files.
# A data frame can be created using the data.frame() function.
# Create two vectors
a <- rep(2010:2017, each = 4) # this uses the rep() function to repeat values
b <- round(runif(32, 0, 40)) # runif can be used to generate random numbers - in this case between 0 and 40
# Create data frame
c <- data.frame(a,b)
# Check the content of the data frame:
head(c)
tail(c)
#Create a list of numbers
a <- 1:25 #The colon signifies a range
head(a)
#Create a matrix with 5 rows and 5 columns
b <- matrix(a,nrow=5, ncol=5)
b
# It is possible to perform algreba operations onto matrix
b*10
b*b
# When a matrix prints, the columns and rows show their index as a set of numbers within square brackets.
# These can be used to extract values from the matrix.
# These are formatted as [row number, column number]. For example:
#Extract first row
b[1,]
#Extract fourth column
b[,4]
#Extract third and fourth columns
b[,3:4] # The colon is used to define a numeric vector between the two numbers
#Extract first and fourth rows
b[c(1,5),] # The c() is used to create a numeric vector with the numbers separated by a comma
#Extract the value in the third row and fourth column
b[3,4]
# You can also reference the column names themselves using the $ symbol, for example:
c$a
#A different way of returning the column called "a"
c[,"a"]
# We can also find out what a data frame column names are using the colnames() function:
colnames(c)
# we can also use the same function to set new column names:
colnames(c) <- c("Year","Count")
## The nice thing with working with matrices is that we can use apply() function
## The apply() function allows someone to use an R function or user-defined
## function with a matrix
## The syntax of apply function is:
## apply(m, dimcode, f, arguments)
## With:
## - m: matrix you wish to use.
## - dimcode = 1 if you want to apply function to rows;
## dimcode= 2 if you want to apply to columns
## - f: function you wish to use
## arguments: specific arguments for function being used.
# let's use matrix b for this
b
# Calculate the mean for each row
apply(b,1,mean)
# Calculate the mean for each column
apply(b,2,mean)
#let's create our self-made function
func.1 <- function(x) (2*x)^20
#Let's use it onto matrix b by row
apply(b,1,func.1)
###################################
# 4. Data Types in R: List
###################################
## - list is a structure that can combine objects of different types.
## - A list is actually a vector but it does differ in comparison to
## the other types of vectors:
## - Other vectors are atomic vectors
## - A list is a type of vector called a recursive vector: that we can have a list
## within a list
# Lets imagine that you're going to create a list of one patient containing
# Information of the name, amount of bill due, and boolean indicator of whether
# the patient has an insurance or not
a <- list(name="Jack", owed="55", insurance=FALSE)
a
#another way to make a list
a.alt <- vector(mode="list")
a.alt[["name"]] <- "Jack"
a.alt[["owed"]] <- 55
a.alt[["insurance"]] <- FALSE
a.alt
# There are different ways in which we can index a list
a[["name"]]
a[[1]]
a$name
#The difference between indexing with single and double brackets
a[1]
class(a[1]) #With the single bracket we have a list with the name element only.
a[[1]]
class(a[[1]]) #double brackets extract thr value out
###################################
# 4. Data Types in R: Tibble
###################################
# - "Tibbles" are a new modern data frame.
# - It keeps many important features of the original data frame.
# - It removes many of the outdated features.
# - They are another amazing feature added to R by Hadley Wickham.
# - We will use them in the tidyverse to replace the older outdated dataframe
# that we just learned about.
# Compared to Data Frames
# - A tibble never changes the input type: No more worry of characters
# being automatically turned into strings.
# - A tibble can have columns that are lists.
# - A tibble can have non-standard variable names: It can start with a number or
# contain spaces.
# - It never creates row names.
#Load the tydyverse package
library(tidyverse)
#create a tible out of nothing
try <- tibble(x = 1:3, y = list(1:5, 1:10, 1:20))
try
# We can see that y is displayed as a list.
# If we try to do this with a traditional data frame we get:
try <- as_data_frame(c(x = 1:3, y = list(1:5, 1:10, 1:20)))
try
# A tibble can be made by coercing as_tibble().
# This works similar to as.data.frame(). It is a very efficient process though.
l <- replicate(26, sample(100), simplify = FALSE)
names(l) <- letters
microbenchmark::microbenchmark(
as_tibble(l),
as.data.frame(l)
)
## Tibbles vs Data Frames
# There are a couple key differences between tibbles and data frames.
# - Printing.
# - Subsetting.
tibble(
a = lubridate::now() + runif(1e3) * 86400,
b = lubridate::today() + runif(1e3) * 30,
c = 1:1e3,
d = runif(1e3),
e = sample(letters, 1e3, replace = TRUE)
)
# Subsetting: We can index a tibble in the manners we are used to, but now we
# We can also use a pipe
df <- tibble(x = runif(5), y = rnorm(5))
df %>% .$x
df %>% .[["x"]]
df %>% .[[1]]
###################################
# 5. Getting External Data into R and Perform Basic Data manipulation using dplyr
###################################
# A common way in which data can be stored externally are the the use of .csv files.
# These are text files, and have a very simple format where columns of attributes are
# separated by a comma 1, and each row by a carriage return.
# In the following example you will read in some U.S. Census Bureau, 2010-2014
# American Community Survey (ACS) 5-Year Estimate data. This was downloaded from the
# American Fact Finder website. The data are for census tracts in San Francisco and
# relate to median earnings in the past 12 months.
#Read CSV file - creates a data frame called earnings
earnings <- read_csv("./Data/ACS_14_5YR_S2001_with_ann.csv")
# inspect the data
str(earnings)
# preview the data
View(earnings)
#Show column headings
colnames(earnings)
#UID - Tract ID
#pop - estimated total population over 16 with income
#pop_m - estimated total population over 16 with income (margin of error)
#earnings - estimated median earnings
#earnings - estimated median earnings (margin of error)
# It is possible to show the structure of the object using the str() function.
str(earnings)
# We're going to learn some of the most common dplyr functions:
# - select(): subset columns
# - filter(): subset rows on conditions
# - mutate(): create new columns by using information from other columns
# - group_by() and summarize(): create summary statisitcs on grouped data
# - arrange(): sort results
# - count(): count discrete values
# Selecting columns and filtering rows
# To select columns of a data frame, use select().
# The first argument to this function is the data frame (surveys),
# and the subsequent arguments are the columns to keep.
select(earnings, UID,pop, earnings)
# To select all columns except certain ones,
# put a "-" in front of the variable to exclude it.
select(earnings, -pop_m, -earnings_m)
# To choose rows based on a specific criteria, use filter():
filter(earnings, pop >4000)
# Piping
# pipes, are a recent addition to R. It let you take the output of one function
# and send it directly to the next, which is useful when you need to do many
# things to the same dataset.
# Pipes in R look like %>% and are made available via the magrittr package,
# installed automatically with dplyr. If you use RStudio, you can type the
# pipe with Ctrl + Shift + M if you have a PC or Cmd + Shift + M if you have a Mac.
earnings %>%
filter(pop > 4000) %>%
select(UID,pop, earnings)
# If we want to create a new object with this smaller version of the data,
# we can assign it a new name:
earnings_sml <- earnings %>%
filter(pop > 4000) %>%
select(UID,pop, earnings)
earnings_sml
# You'll want to create new columns based on the values in existing columns. We're
# going to use mutate
earnings %>%
mutate(earnings = as.numeric(earnings), earnings_m = as.numeric(earnings_m))
earnings %>%
mutate(earnings = as.numeric(earnings), earnings_m = as.numeric(earnings_m)) %>%
mutate(earnings_rp = earnings * 13500, earnings_m_rp = earnings_m * 13500)
# Split-apply-combine data analysis and the summarize() function
# Create group high-earning and low earnig
earnings.new <- earnings %>%
mutate(earnings = as.numeric(earnings)) %>%
mutate(e_hilo = if_else(earnings>=mean(earnings),"High","Low"))
#Use function command with group_by
earnings.new %>%
group_by(e_hilo) %>%
summarize(mean_earnings = mean(earnings, na.rm = TRUE))
###################################
# 6. Getting External Data into R - Reading Spatial Data
###################################
# Spatial data are distributed in a variety of formats, but commonly as Shapefiles.
# These can be read into R using a number of packages, however, is illustrated here with "rgdal".
# The following code loads a Census Tract Shapefile which was downloaded from the SF OpenData.
#Download and install package
install.packages("rgdal")
#Load package
library(rgdal)
# Read Shapefile
SF <- readOGR(dsn = "D:/Gdrive/ITB/Teaching/2019-2020/PL3102/01_Basic R/Data", layer = "tl_2010_06075_tract10")
# This has created a SpatialPolygonsDataFrame Reading Spatial Data object and can view
# the tract boundaries using the plot() function:
plot(SF)
# The San Francisco peninsula is shown, however, the formal boundaries extend into the ocean
# and also include the Farallon Islands. For cartographic purposes it may not be desirable to #
# show these extents, and later we will explore how these can be cleaned up.
#The slotNames() function prints their names.
slotNames(SF)
#Show the top rows of the data object
head(SF@data)
# The "data" slot contains a data frame with a row of attributes for each of the spatial polygons
# contained within the SF object; thus, one each row equates to one polygon.
# Other slots contain useful information such as the spatial projection.
###################################
# 7. Creating Spatial Data
###################################
# Sometimes it is necessary to create a spatial object from scratch, which is most common for
# point data given that only a single co-ordinate is required for each feature.
# This can be achieved using the SpatialPointsDataFrame() function and is used within this
# example to create a 311 point dataset. 311 data record non emergency calls within the US,
# and in this case are those which occurred within San Francisco between
# January and December 2016. The 311 data used here have been simplified from the original
# data to only a few variables, and those calls without spatial references have been removed.
# Read csv into R
data_311 <- read.csv("./Data/311.csv")
# Have a look at the structure
head(data_311)
# Create the SpatialPointsDataFrame
SP_311 <- SpatialPointsDataFrame(coords = data.frame(data_311$Lon, data_311$Lat),
data = data.frame(data_311$CaseID,data_311$Category),
proj4string = SF@proj4string)
# Show the results
plot(SP_311)
###################################
# 8. Subsetting Data
###################################
# It is often necessary to subset data; either restricting a data frame to a set of
# columns or rows; or in the case of spatial data, creating an extract for a particular
# set of geographic features. Subsetting can occur in a number of different ways
#Create a table of frequencies by the categories used within the 311 data
table(data_311$Category)
# Use the subset() function to extract rows from the data which relate to Sewer Issues
sewer_issues <- subset(data_311,Category == "Sewer Issues")
# Use the square brackets "[]" to perform the same task
sewer_issues <- data_311[data_311$Category == "Sewer Issues",]
# Extract a list of IDs for the "Sewer Issues"
sewer_issues_IDs <- subset(data_311,Category == "Sewer Issues", select = "CaseID")
# Subsetting can also be useful for spatial data. In the example above the full extent of
# San Francisco was plotted, however, for cartographic purposes it may be preferable to
# remove the "Farallon Islands". This has a GEOID10 of "06075980401" which can be used to
# remove this from a plot:
plot(SF[SF@data$GEOID10 != "06075980401",]) # Removes Farallon Islands from the plot
# This can also be quite useful if you want to plot only a single feature, for example:
plot(SF[SF@data$GEOID10 == "06075980401",]) # Only plots Farallon Islands
# You can also use the same syntax to create a new object - for example:
SF <- SF[SF@data$GEOID10 != "06075980401",] # Overwrites the SF object
plot(SF)
###################################
# 9. Clipping Spatial Data
###################################
# Clipping is a process of subsetting using overlapping spatial data.
# The following code uses the outline of the coast of the U.S. to clip the boundaries of
# the SD spatial data frame object:
#Load library
library("raster")
#Read in coastal outline (Source from - https://www.census.gov/geo/maps-data/data/cbf/cbf_counties.html)
coast <- readOGR(dsn = paste0(wd,"/Data"), layer = "cb_2015_us_county_500k")
# Clip the the SF spatial data frame object to the coastline
SF_clipped <- crop(SF, coast)
#Plot the results
plot(SF_clipped)
SF_clipped <- crop(SF, coast)
#Plot the results
plot(SF_clipped)
###################################
# 10. Merging Tabular Data
###################################
# So far we have utilized a single data frame or spatial object; however, it is often
# the case that in order to generate information, data from multiple sources are required.
# Where data share a common "key", these can be used to combine / link tables together.
# This might for example be an identifier for a zone; and is one of the reasons
# why most statistical agencies adopt a standard sets of geographic codes to identify areas.
# In the earlier imported data "earnings" this included a UID column which relates to a Tract ID.
# We can now import an additional data table called bachelors - this also includes the same ID.
#Read CSV file - creates a data frame called earnings
bachelors <- read_csv("./Data/ACS_14_5YR_S1501_with_ann.csv")
# Using the matching ID columns on both datasets we can link them together to create a
# new object with the merge() function:
#Perform the merge
SF_Tract_ACS <- merge(x=earnings,y=bachelors,by.x="UID",by.y="UID")
SF_Tract_ACS <- merge(earnings,bachelors,by="UID")
# An alternative method to the above, but a shortened version as the ID columns are
# the same on both tables
#You can also use all.x=TRUE (or all.y=TRUE) to keep all the rows from either the x or y
# table - for more details type ?merge()
# The combined table now looks like
head(SF_Tract_ACS) # shows the top of the table
###################################
# 11. Removing and Creating Attributes
###################################
# It is sometimes necessary to remove variables from a tabular object or to create new values.
# In the following example we will remove some unwanted columns in the SF_clipped object,
# leaving just the zone id for each polygon.
#Remind yourself what the data look like...
head(SF_clipped@data)
SF_clipped@data <- data.frame(SF_clipped@data[,"GEOID10"]) #Makes a new version of the @data slot with just the values of the GEOID10 column - this is wrapped with the data.frame() function
#The data frame within the data slot now looks as follows
head(SF_clipped)
# One thing you may not like on this new data frame is the column heading which has got a bit messy.
# We can clean this up using the colnames() function.
colnames(SF_clipped@data) <- "GEOID10" #Update column names
head(SF_clipped@data) #Check the updated values
# These tract ID are supposed to match with those in the "SF_Tract_ACS" object, however,
# if you are very observant you will notice that there is one issue; the above have a
# leading zero.
head(SF_Tract_ACS) # show the top of the SF_Tract_ACS object
# As such, in this instance we will create a new column on the SF_Tract_ACS data frame with a
# new ID that will match the SF GEOID10 column. We can achieve this using the $ symbol and will
# call this new variable "GEOID10".
# Creates a new variable with a leading zero
SF_Tract_ACS$GEOID10 <- paste0("0",SF_Tract_ACS$UID)
head(SF_Tract_ACS)
# The earnings data had some values that were stored as factors rather than numeric or integers,
# and the same is true for both the bachelors data; and now the combined SF_Tract_ACS object.
# We can check this again as follows:
str(SF_Tract_ACS)
# We can also remove the UID column. A quick way of doing this for a single variable is to use "NULL":
SF_Tract_ACS$UID <- NULL
# We will now convert the factor variables to numerics. The first stage will be to remove
# the "-" and "**" characters from the variables with the gsub() function, replacing these
# with NA values. This also has the effect of converting the factors to characters.
#Replace the "-" and "*" characters
SF_Tract_ACS$earnings <- gsub("-",NA,SF_Tract_ACS$earnings,fixed=TRUE) #replace the "-" values with NA
SF_Tract_ACS$earnings_m <- gsub("**",NA,SF_Tract_ACS$earnings_m,fixed=TRUE) #replace the "**" values with NA
SF_Tract_ACS$Bachelor_Higher <- gsub("-",NA,SF_Tract_ACS$Bachelor_Higher,fixed=TRUE) #replace the "-" values with NA
SF_Tract_ACS$Bachelor_Higher_m <- gsub("**",NA,SF_Tract_ACS$Bachelor_Higher_m,fixed=TRUE) #replace the "**" values with NA
# We will now convert these to numeric values:
SF_Tract_ACS$earnings <- as.numeric(SF_Tract_ACS$earnings)
SF_Tract_ACS$earnings_m <- as.numeric(SF_Tract_ACS$earnings_m)
SF_Tract_ACS$Bachelor_Higher <- as.numeric(SF_Tract_ACS$Bachelor_Higher)
SF_Tract_ACS$Bachelor_Higher_m <- as.numeric(SF_Tract_ACS$Bachelor_Higher_m )
# Now all the variables other than the "GEOID10" are stored as integers or numerics:
str(SF_Tract_ACS)
###################################
# 12. Merging Spatial Data
###################################
# It is also possible to join tabular data onto a spatial object (e.g. SpatialPolygonsDataFrame)
# in the same way as with regular data frames. In this example, we will join the newly
#created SF_Tract_ACS data onto the SF_clipped data frame.
SF_clipped <- merge(SF_clipped,SF_Tract_ACS, by="GEOID10") # merge
head(SF_clipped@data)#show the attribute data
###################################
# 13.Spatial Joins
###################################
# Earlier in this practical we created a SpatialPointDataFrame which we later cropped
# using the point.in.poly() function to create the "SP_311_PIP" object.
#As a reminder of what this looks like it is plotted below:
plot(SP_311_PIP)
# We will now clean up the associated data frame by removing all of the attributes apart
# from the category ("data_311.Category") and then add a sensible column name.
SP_311_PIP@data <- data.frame(SP_311_PIP@data[,"data_311.Category"])#subset data
colnames(SP_311_PIP@data) <- "Category" #update column names
# Although point.in.poly() was used to clip a dataset to an extent earlier,
# the other really useful feature of this point in polygon function is that
# it also appends the attributes of the polygon to the point. For example,
# we might be interested in finding out which census tracts each of the 311 calls resides within.
# As such, we will implement another point in polygon analysis to create a new object SF_clipped_311:
SF_clipped_311 <- point.in.poly(SP_311_PIP, SF) # point in polygon
#Cleanup the attributes
SF_clipped_311@data <- SF_clipped_311@data[,c("GEOID10","Category")] #note that we don't need to use the data.frame() function as we are keeping more than one column
#Show the top rows of the data
head(SF_clipped_311@data)
###################################
# Writing out and saving your data
###################################
# In order to share data it is often useful to write data frames or spatial
# objects back out of R as external files. This is very simple, and R supports multiple formats.
# In these examples, a CSV file and a Shapefile are both created.
#In this example we write out a CSV file from the data slot of the SpatialPointsDataFrame SF_clipped_311
write.csv(SF_clipped_311@data,"311_Tract_Coded.csv")
#This will write out a Shapefile for San Francisco - note, a warning is returned as the column
# names are a little longer than are allowed within a Shapefile and as such are
# automatically shortened.
writeOGR(SF_clipped, ".", "SF_clipped", driver="ESRI Shapefile")
###################################
# Basic plotting
###################################
#Import the dataset
ozone_airq_df <- read.csv("./Data/daily_44201_2017.csv")
#install package
install.packages("tidyverse")
library(tidyverse)
#We just need a few variables:
ozone_airq_df2 <- ozone_airq_df[,c('State.Code', 'Site.Num', 'Latitude', 'Longitude', 'Date.Local',
'State.Name', 'County.Name', 'CBSA.Name', 'AQI')]
g1 <- ozone_airq_df2 %>%
filter(stcofips == "06059") %>% # Orange County
ggplot() +
geom_point(aes(x=dateL, y=AQI, color=SiteID)) +
geom_smooth(aes(x=dateL, y=AQI, color=SiteID), method="loess")+
scale_colour_brewer(palette = "Set2") +
labs(x = "Month", y = "Air Quality Index")
library(plotly)
ggplotly(g1)
|
\name{stsm-get-methods}
\docType{methods}
\alias{stsm-get-methods}
\alias{get.cpar}
\alias{get.cpar,stsm-method}
\alias{get.nopars}
\alias{get.nopars,stsm-method}
\alias{get.pars}
\alias{get.pars,stsm-method}
\title{Getter Methods for Class \code{stsm}}
\description{
Get access to the information stored in the slots \code{cpar}, \code{nopars} and
\code{pars} in objects of class \code{\link{stsm}}.
}
\usage{
\S4method{get.cpar}{stsm}(x, rescale = FALSE)
\S4method{get.nopars}{stsm}(x, rescale = FALSE)
\S4method{get.pars}{stsm}(x, rescale = FALSE, gradient = FALSE)
}
\arguments{
\item{x}{an object of class \code{\link{stsm}}.}
\item{rescale}{logical. If \code{TRUE}, relative variance parameters are
rescaled into absolute variances. Ignored if \code{x@cpar} is null.}
\item{gradient}{logical. If \code{TRUE}, first order derivatives of
\code{\link{transPars}} with respect to the parameters in the slot \code{pars} are returned.}
}
\details{
\emph{Transformation of the parameters of the model.}
The method \code{\link{transPars}} allows parameterizing the model
in terms of an auxiliary vector of parameters.
The output of \code{get.pars} is returned in terms of the actual parameters of the model, i.e.,
the variances and the autoregressive coefficients if they are part of the model.
With the standard parameterization, \code{x@transPars = NULL}, \code{get.pars(x)} returns
the output stored in \code{x@pars}.
When the model is parameterized in terms of an auxiliary set of parameters
\eqn{\theta}, \code{get.pars} return the variance parameters instead of the values of
\eqn{\theta} that are stored in \code{x@pars}.
For example, with \code{x@transPars = "square"} (where the variances are \eqn{\theta^2}),
\code{ger.pars} returns \eqn{\theta^2} while \code{x@pars} contains the vector \eqn{\theta}.
\emph{Absolute and relative variances.}
%NOTE this paragraph is included in 'sts-model.Rd',
%repeated here for introduction and a standalone explanation
The model can be defined in terms of relative variances.
In this case, the variance that acts as a scaling parameter is stored in the slot \code{cpar}.
Otherwise, \code{cpar} is null and ignored.
Typically, the scaling parameter will be chosen to be the variance parameter that is
concentrated out of the likelihood function.
%
If \code{rescale = TRUE}, the relative variance parameters are rescaled into
absolute variance parameters (i.e., they are multiplied by \code{x@cpar})
and then returned by these methods.
If \code{rescale = FALSE}, relative variance parameters are returned, that is,
the variances divided by the scaling parameter \code{cpar}.
Since the scaling parameter is one of the variances, the relative variance stored
in \code{cpar} is \eqn{1} (the parameter divided by itself).
\emph{Transformation of parameters in a model defined in terms of relative
variances.} When a model is defined so that the parameters are the relative variances
(\code{cpar} is not null) and a parameterization \code{transPars} is also specified,
then the transformation of parameters is applied to the relative variances,
not to the absolute variances. The relative variances are first transformed and
afterwards they are rescaled back to absolute variances if requested by
setting \code{rescale = TRUE}.
The transformation \code{transPars} is applied to the parameters defined in
\code{pars}; \code{cpar} is assumed to be chosen following other rationale; usually,
it is the value that maximizes the likelihood since one of the variance parameters
can be concentrated out of the likelihood function.
\emph{Note.}
When \code{cpar} is not null, it is more convenient to store in the slots
\code{pars} and \code{nopars} the values of the relative variances, while
the slot \code{cpar} stores the value of the scaling parameter rather than
the relative variance (which will be \eqn{1}).
If the relative values were stored, then the scaling parameter would
need to be recomputed each time the value is requested by \code{get.cpar}.
Assuming that \code{cpar} is the parameter that is concentrated out of the likelihood function,
the expression that maximizes the likelihood should be evaluated whenever
the value is requested to be printed or to do any other operation.
To avoid this, the scaling value is directly stored. This approach makes also
sense with the way the method \code{\link{set.cpar}} works.
\emph{Note for users.}
For those users that are not familiar with the design and internal structure of the
class \code{\link{stsm}}, it is safer to use the get and set methods
rather than retrieving or modifying the contents of the slots through the \code{@} and
\code{@<-} operators.
}
\value{
\item{get.cpar}{named numeric of length one.}
\item{get.nopars}{named numeric vector.}
\item{get.pars}{named numeric vector.}
}
\seealso{
\code{\link{stsm}}.
}
\examples{
# sample models with arbitrary parameter values
# model in standard parameterization
# internal parameter values are the same as the model parameter
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var1" = 2, "var2" = 15, "var3" = 30))
m@pars
get.pars(m)
# model parameterized, the variances are the square
# of an auxiliary vector of parameters
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var1" = 2, "var2" = 15, "var3" = 30), transPars = "square")
# auxiliary vector of parameters
m@pars
# parameters of the model, variances
get.pars(m)
# model rescaled, variances are relative to 'var1'
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 15, "var3" = 30), cpar = c("var1" = 2))
# internal values
m@pars
m@cpar
# relative variances
get.pars(m)
get.cpar(m)
# absolute variances
get.pars(m, rescale = TRUE)
get.cpar(m, rescale = TRUE)
# model defined in terms of relative variances
# and with the parameterization \code{transPars="square};
# the transformation is applied to the relative variances,
# the relative variances are first transformed and afterwards
# they are rescaled back to absolute variances if requested
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3, "var3" = 6), cpar = c("var1" = 2),
transPars = "square")
c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE))
c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE))
# when 'cpar' is defined, 'nopars' is also interpreted as a relative variance
# and therefore it is rescaled if absolute variances are requested
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3), cpar = c("var1" = 2), nopars = c("var3" = 6),
transPars = NULL)
v <- c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE), get.nopars(m, rescale = FALSE))
v[c("var1", "var2", "var3")]
v <- c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE), get.nopars(m, rescale = TRUE))
v[c("var1", "var2", "var3")]
# 'nopars' is rescaled as shown in the previous example
# but it is not affected by the parameterization chosen for 'pars'
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3), cpar = c("var1" = 2), nopars = c("var3" = 6),
transPars = "square")
v <- c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE), get.nopars(m, rescale = FALSE))
v[c("var1", "var2", "var3")]
v <- c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE), get.nopars(m, rescale = TRUE))
v[c("var1", "var2", "var3")]
}
\keyword{methods}
|
/man/stsm-methods-get.Rd
|
no_license
|
ccrostirolla/stsm
|
R
| false | false | 7,425 |
rd
|
\name{stsm-get-methods}
\docType{methods}
\alias{stsm-get-methods}
\alias{get.cpar}
\alias{get.cpar,stsm-method}
\alias{get.nopars}
\alias{get.nopars,stsm-method}
\alias{get.pars}
\alias{get.pars,stsm-method}
\title{Getter Methods for Class \code{stsm}}
\description{
Get access to the information stored in the slots \code{cpar}, \code{nopars} and
\code{pars} in objects of class \code{\link{stsm}}.
}
\usage{
\S4method{get.cpar}{stsm}(x, rescale = FALSE)
\S4method{get.nopars}{stsm}(x, rescale = FALSE)
\S4method{get.pars}{stsm}(x, rescale = FALSE, gradient = FALSE)
}
\arguments{
\item{x}{an object of class \code{\link{stsm}}.}
\item{rescale}{logical. If \code{TRUE}, relative variance parameters are
rescaled into absolute variances. Ignored if \code{x@cpar} is null.}
\item{gradient}{logical. If \code{TRUE}, first order derivatives of
\code{\link{transPars}} with respect to the parameters in the slot \code{pars} are returned.}
}
\details{
\emph{Transformation of the parameters of the model.}
The method \code{\link{transPars}} allows parameterizing the model
in terms of an auxiliary vector of parameters.
The output of \code{get.pars} is returned in terms of the actual parameters of the model, i.e.,
the variances and the autoregressive coefficients if they are part of the model.
With the standard parameterization, \code{x@transPars = NULL}, \code{get.pars(x)} returns
the output stored in \code{x@pars}.
When the model is parameterized in terms of an auxiliary set of parameters
\eqn{\theta}, \code{get.pars} return the variance parameters instead of the values of
\eqn{\theta} that are stored in \code{x@pars}.
For example, with \code{x@transPars = "square"} (where the variances are \eqn{\theta^2}),
\code{ger.pars} returns \eqn{\theta^2} while \code{x@pars} contains the vector \eqn{\theta}.
\emph{Absolute and relative variances.}
%NOTE this paragraph is included in 'sts-model.Rd',
%repeated here for introduction and a standalone explanation
The model can be defined in terms of relative variances.
In this case, the variance that acts as a scaling parameter is stored in the slot \code{cpar}.
Otherwise, \code{cpar} is null and ignored.
Typically, the scaling parameter will be chosen to be the variance parameter that is
concentrated out of the likelihood function.
%
If \code{rescale = TRUE}, the relative variance parameters are rescaled into
absolute variance parameters (i.e., they are multiplied by \code{x@cpar})
and then returned by these methods.
If \code{rescale = FALSE}, relative variance parameters are returned, that is,
the variances divided by the scaling parameter \code{cpar}.
Since the scaling parameter is one of the variances, the relative variance stored
in \code{cpar} is \eqn{1} (the parameter divided by itself).
\emph{Transformation of parameters in a model defined in terms of relative
variances.} When a model is defined so that the parameters are the relative variances
(\code{cpar} is not null) and a parameterization \code{transPars} is also specified,
then the transformation of parameters is applied to the relative variances,
not to the absolute variances. The relative variances are first transformed and
afterwards they are rescaled back to absolute variances if requested by
setting \code{rescale = TRUE}.
The transformation \code{transPars} is applied to the parameters defined in
\code{pars}; \code{cpar} is assumed to be chosen following other rationale; usually,
it is the value that maximizes the likelihood since one of the variance parameters
can be concentrated out of the likelihood function.
\emph{Note.}
When \code{cpar} is not null, it is more convenient to store in the slots
\code{pars} and \code{nopars} the values of the relative variances, while
the slot \code{cpar} stores the value of the scaling parameter rather than
the relative variance (which will be \eqn{1}).
If the relative values were stored, then the scaling parameter would
need to be recomputed each time the value is requested by \code{get.cpar}.
Assuming that \code{cpar} is the parameter that is concentrated out of the likelihood function,
the expression that maximizes the likelihood should be evaluated whenever
the value is requested to be printed or to do any other operation.
To avoid this, the scaling value is directly stored. This approach makes also
sense with the way the method \code{\link{set.cpar}} works.
\emph{Note for users.}
For those users that are not familiar with the design and internal structure of the
class \code{\link{stsm}}, it is safer to use the get and set methods
rather than retrieving or modifying the contents of the slots through the \code{@} and
\code{@<-} operators.
}
\value{
\item{get.cpar}{named numeric of length one.}
\item{get.nopars}{named numeric vector.}
\item{get.pars}{named numeric vector.}
}
\seealso{
\code{\link{stsm}}.
}
\examples{
# sample models with arbitrary parameter values
# model in standard parameterization
# internal parameter values are the same as the model parameter
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var1" = 2, "var2" = 15, "var3" = 30))
m@pars
get.pars(m)
# model parameterized, the variances are the square
# of an auxiliary vector of parameters
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var1" = 2, "var2" = 15, "var3" = 30), transPars = "square")
# auxiliary vector of parameters
m@pars
# parameters of the model, variances
get.pars(m)
# model rescaled, variances are relative to 'var1'
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 15, "var3" = 30), cpar = c("var1" = 2))
# internal values
m@pars
m@cpar
# relative variances
get.pars(m)
get.cpar(m)
# absolute variances
get.pars(m, rescale = TRUE)
get.cpar(m, rescale = TRUE)
# model defined in terms of relative variances
# and with the parameterization \code{transPars="square};
# the transformation is applied to the relative variances,
# the relative variances are first transformed and afterwards
# they are rescaled back to absolute variances if requested
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3, "var3" = 6), cpar = c("var1" = 2),
transPars = "square")
c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE))
c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE))
# when 'cpar' is defined, 'nopars' is also interpreted as a relative variance
# and therefore it is rescaled if absolute variances are requested
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3), cpar = c("var1" = 2), nopars = c("var3" = 6),
transPars = NULL)
v <- c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE), get.nopars(m, rescale = FALSE))
v[c("var1", "var2", "var3")]
v <- c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE), get.nopars(m, rescale = TRUE))
v[c("var1", "var2", "var3")]
# 'nopars' is rescaled as shown in the previous example
# but it is not affected by the parameterization chosen for 'pars'
m <- stsm.model(model = "llm+seas", y = JohnsonJohnson,
pars = c("var2" = 3), cpar = c("var1" = 2), nopars = c("var3" = 6),
transPars = "square")
v <- c(get.cpar(m, rescale = FALSE), get.pars(m, rescale = FALSE), get.nopars(m, rescale = FALSE))
v[c("var1", "var2", "var3")]
v <- c(get.cpar(m, rescale = TRUE), get.pars(m, rescale = TRUE), get.nopars(m, rescale = TRUE))
v[c("var1", "var2", "var3")]
}
\keyword{methods}
|
#File for declaring global data.table objects for CRAN binding check
utils::globalVariables(c('tissue.data','physiology.data'))
|
/httk/R/globals.R
|
no_license
|
jrsfeir/CompTox-ExpoCast-httk
|
R
| false | false | 127 |
r
|
#File for declaring global data.table objects for CRAN binding check
utils::globalVariables(c('tissue.data','physiology.data'))
|
#Reading household_power_consumption_1st_and_2nd_Feb_2007.txt file#Reading household_power_consumption_1st_and_2nd_Feb_2007.txt file
r<-read.table("household_power_consumption_1st_and_2nd_Feb_2007.txt",
header = TRUE,
sep = ";",
na.strings = "?"
)
#creating Plot 3
png(file=" plot3.png",width=480,height=480) #openig device png to plot
plot(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"), #x variable
r$Sub_metering_1,type="l", #y variable
ylab = "Engery sub metering",
xlab=""
) #Black line done!
lines(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"),
r$Sub_metering_2,
col="red"
) #Red line done!
lines(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"),
r$Sub_metering_3,
col="blue"
) #Blue line done!
legend("topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1
) #Legend done!
dev.off() #closing device png
rm(r) #cleaning memory
|
/plot3.R
|
no_license
|
cpenc/130Mb_file-2_million_records-plot-histogram-time_series-scatter-multi_variables_legends_layouts_PNG
|
R
| false | false | 1,114 |
r
|
#Reading household_power_consumption_1st_and_2nd_Feb_2007.txt file#Reading household_power_consumption_1st_and_2nd_Feb_2007.txt file
r<-read.table("household_power_consumption_1st_and_2nd_Feb_2007.txt",
header = TRUE,
sep = ";",
na.strings = "?"
)
#creating Plot 3
png(file=" plot3.png",width=480,height=480) #openig device png to plot
plot(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"), #x variable
r$Sub_metering_1,type="l", #y variable
ylab = "Engery sub metering",
xlab=""
) #Black line done!
lines(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"),
r$Sub_metering_2,
col="red"
) #Red line done!
lines(strptime(paste(r$Date,r$Time),format = "%d/%m/%Y %H:%M:%S"),
r$Sub_metering_3,
col="blue"
) #Blue line done!
legend("topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=1
) #Legend done!
dev.off() #closing device png
rm(r) #cleaning memory
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scraper_utils.R
\name{id}
\alias{id}
\title{Extracts id from oxl flat rental offer}
\usage{
id(webpage)
}
\arguments{
\item{webpage}{xml2 object read_html}
}
\value{
integer
}
\description{
Extracts id from oxl flat rental offer
}
|
/man/id.Rd
|
permissive
|
p-drozd/OlxScraperPkg
|
R
| false | true | 309 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scraper_utils.R
\name{id}
\alias{id}
\title{Extracts id from oxl flat rental offer}
\usage{
id(webpage)
}
\arguments{
\item{webpage}{xml2 object read_html}
}
\value{
integer
}
\description{
Extracts id from oxl flat rental offer
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confusion_stats.R
\name{confusion_stats}
\alias{confusion_stats}
\title{Confusion Stats}
\usage{
confusion_stats(Actual.score, Predicted.score, positiveC = "W")
}
|
/man/confusion_stats.Rd
|
no_license
|
ntyndall/mltools
|
R
| false | true | 241 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/confusion_stats.R
\name{confusion_stats}
\alias{confusion_stats}
\title{Confusion Stats}
\usage{
confusion_stats(Actual.score, Predicted.score, positiveC = "W")
}
|
## Have total emissions from PM2.5 decreased in Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
library(plyr)
library(dplyr)
## Read the RDS files
NEI <- readRDS('summarySCC_PM25.rds')
## Filter for Baltimore data
balt <- filter(NEI, fips == '24510')
## Summarize the data by year, summing the emissions
emi <- ddply(balt, .(year), summarize, totalEmissions = sum(Emissions))
## Remove scientific notation
options(scipen=10)
## Open the png device
png(file='plot2.png')
## Plot the total emissions by year and fit a linear model
plot(emi$year, emi$totalEmissions, xaxt='n', xlab='Year', ylab='Total PM2.5 Emissions (tons)', main='Total PM2.5 Emissions by Year in Baltimore City, Maryland', pch=19)
model <- lm(emi$totalEmissions ~ emi$year)
abline(model, lwd=2, col='blue')
## Plot the relevant years as axis labels
axis(1, emi$year)
## Close the graphics device
dev.off()
|
/plot2.R
|
no_license
|
eideal/PM25EmissionsAnalysis
|
R
| false | false | 895 |
r
|
## Have total emissions from PM2.5 decreased in Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
library(plyr)
library(dplyr)
## Read the RDS files
NEI <- readRDS('summarySCC_PM25.rds')
## Filter for Baltimore data
balt <- filter(NEI, fips == '24510')
## Summarize the data by year, summing the emissions
emi <- ddply(balt, .(year), summarize, totalEmissions = sum(Emissions))
## Remove scientific notation
options(scipen=10)
## Open the png device
png(file='plot2.png')
## Plot the total emissions by year and fit a linear model
plot(emi$year, emi$totalEmissions, xaxt='n', xlab='Year', ylab='Total PM2.5 Emissions (tons)', main='Total PM2.5 Emissions by Year in Baltimore City, Maryland', pch=19)
model <- lm(emi$totalEmissions ~ emi$year)
abline(model, lwd=2, col='blue')
## Plot the relevant years as axis labels
axis(1, emi$year)
## Close the graphics device
dev.off()
|
perform_validation <- function(filename_manualPages = NULL, tabsheet_manualPages = NULL,
ds_machinePages = NULL,
compOut_dir = NULL) {
#################################################
## Preprocess dataset with manually created. ##
#################################################
# defOrig_fileName <- "./input files/Define specs CDISC SDTM completed - manually.xlsx"
# sheetVarTab_name <- "Variables"
defOrig_fileName <- filename_manualPages
sheetVarTab_name <- tabsheet_manualPages
## Read Define specs with page numbers populated manually
defineOrigin_variableTab_manual <- readxl::read_excel(path = file.path(defOrig_fileName),
sheet = sheetVarTab_name,
col_names = TRUE)
defineOrigin_variableTab_manual_CRF <- defineOrigin_variableTab_manual %>%
dplyr::filter(Origin %in% c("CRF")) %>%
dplyr::select(Order, Dataset, Variable, Pages)
################################################
## Compare manually created dataset against ##
## machine created dataset. ##
################################################
res_comparison <- compareDF::compare_df(df_new = ds_machinePages, df_old = defineOrigin_variableTab_manual_CRF, group_col = c("Pages"))
# res_comparison$html_output
## preprocess comparison result before export
comp_df <- dplyr::tbl_df(res_comparison$comparison_df)
chg_summary_df <- as.data.frame(res_comparison$change_summary)
chg_summary <- t(chg_summary_df)
colnames(chg_summary) <- rownames(chg_summary_df)
out_comparison <- list(comp_df, chg_summary)
## export comparison report
# filename_out <- file.path(paste("./../../../04 Output/comparison report_", Sys.Date(), ".txt", sep = ""))
filename_out <- file.path(paste(compOut_dir, "/comparison report_", Sys.Date(), ".txt", sep = ""))
cat(capture.output(print(out_comparison, row.names = FALSE), file=filename_out))
print("")
print("<!-- ############################################################################### -->")
print(paste("To see comparison report, please go to this path: ", filename_out, sep = ""))
print("<!-- ############################################################################### -->")
}
|
/prod/backup/functions/backup/perfom_validation.R
|
no_license
|
Hw1OCS/aCRFExtractor_OCS-version
|
R
| false | false | 2,401 |
r
|
perform_validation <- function(filename_manualPages = NULL, tabsheet_manualPages = NULL,
ds_machinePages = NULL,
compOut_dir = NULL) {
#################################################
## Preprocess dataset with manually created. ##
#################################################
# defOrig_fileName <- "./input files/Define specs CDISC SDTM completed - manually.xlsx"
# sheetVarTab_name <- "Variables"
defOrig_fileName <- filename_manualPages
sheetVarTab_name <- tabsheet_manualPages
## Read Define specs with page numbers populated manually
defineOrigin_variableTab_manual <- readxl::read_excel(path = file.path(defOrig_fileName),
sheet = sheetVarTab_name,
col_names = TRUE)
defineOrigin_variableTab_manual_CRF <- defineOrigin_variableTab_manual %>%
dplyr::filter(Origin %in% c("CRF")) %>%
dplyr::select(Order, Dataset, Variable, Pages)
################################################
## Compare manually created dataset against ##
## machine created dataset. ##
################################################
res_comparison <- compareDF::compare_df(df_new = ds_machinePages, df_old = defineOrigin_variableTab_manual_CRF, group_col = c("Pages"))
# res_comparison$html_output
## preprocess comparison result before export
comp_df <- dplyr::tbl_df(res_comparison$comparison_df)
chg_summary_df <- as.data.frame(res_comparison$change_summary)
chg_summary <- t(chg_summary_df)
colnames(chg_summary) <- rownames(chg_summary_df)
out_comparison <- list(comp_df, chg_summary)
## export comparison report
# filename_out <- file.path(paste("./../../../04 Output/comparison report_", Sys.Date(), ".txt", sep = ""))
filename_out <- file.path(paste(compOut_dir, "/comparison report_", Sys.Date(), ".txt", sep = ""))
cat(capture.output(print(out_comparison, row.names = FALSE), file=filename_out))
print("")
print("<!-- ############################################################################### -->")
print(paste("To see comparison report, please go to this path: ", filename_out, sep = ""))
print("<!-- ############################################################################### -->")
}
|
\name{ET.Penman}
\alias{ET.Penman}
\title{Penman Formulation
}
\description{
Implementing the Penman formulation for estimating open-water evaporation or potential evapotranspiration
}
\usage{
\method{ET}{Penman}(data, constants, ts="daily", solar="sunshine hours",
wind="yes", windfunction_ver=1948, alpha = 0.08, z0 = 0.001, \dots)
}
\arguments{
\item{data}{
A list which contains the following items (climate variables) required by Penman formulation:\cr
\emph{Tmax}, \emph{Tmin}, \emph{RHmax}, \emph{RHmin}, \emph{Rs} or \emph{n} or \emph{Cd}, \emph{u2} or \emph{uz}
}
\item{constants}{
A list named \code{constants} consists of constants required for the calculation of Penman formulation which must contain the following items:\cr
\emph{Elev} - ground elevation above mean sea level in m,\cr
\emph{lambda} - latent heat of vaporisation = 2.45 MJ.kg^-1,\cr
\emph{lat_rad} - latitude in radians,\cr
\emph{Gsc} - solar constant = 0.0820 MJ.m^-2.min^-1,\cr
\emph{z} - height of wind instrument in m,\cr
\emph{sigma} - Stefan-Boltzmann constant = 4.903*10^-9 MJ.K^-4.m^-2.day^-1.\cr
\cr
The following constants are also required when argument \code{solar} has value of \code{sunshine hours}:\cr
\emph{as} - fraction of extraterrestrial radiation reaching earth on sunless days,\cr
\emph{bs} - difference between fracion of extraterrestrial radiation reaching full-sun days and that on sunless days.
}
\item{ts}{
Must be either \code{daily}, \code{monthly} or \code{annual}, which indicates the disired time step that the output ET estimates should be on.
Default is \code{daily}.
}
\item{solar}{
Must be either \code{data}, \code{sunshine hours}, \code{cloud} or \code{monthly precipitation}:\cr
\code{data} indicates that solar radiation data is to be used directly for calculating evapotranspiration; \cr
\code{sunshine hours} indicates that solar radiation is to be calculated using the real data of sunshine hours;\cr
\code{cloud} sunshine hours is to be estimated from cloud data; \cr
\code{monthly precipitation} indicates that solar radiation is to be calculated directly from monthly precipitation.\cr
Default is \code{sunshine hours}.
}
\item{wind}{
Must be either \code{yes} or \code{no}.\cr
\code{yes} indicates that the calculation will use real data of wind speed; \cr
\code{no} indicates that the alternative calculation without using wind data will be used in Penman formulation (Valiantzas 2006, Equation33).\cr
Default is \code{yes}.
}
\item{windfunction_ver}{
The version of Penman wind function that will be used within the Penman formulation. Must be either \code{1948} or \code{1956}. \cr
\code{1948} is for applying the Penman's 1948 wind function (Penman, 1948); \cr
\code{1956} is for applying the Penman's 1956 wind function (Penman, 1956)
Default is \code{1948}.
}
\item{alpha}{
Any numeric value between 0 and 1 (dimensionless), albedo of evaporative surface representing the portion of the incident radiation that is reflected back at the surface. \cr
Default is 0.08 for open-water surface which is for the calculation of Penman open-water evaporation, all other values will trigger the calculation of Penman potential evapotranspriation.
}
\item{z0}{
Any value (metres), roughness height of the evaporative surface. \cr
Default is 0.001 for open-water surface which is for the calculation of Penman open-water evaporation, all other values will trigger the calculation of Penman potential evapotranspriation.
}
\item{\dots}{
Dummy for generic function, no need to define.
}
}
\details{
The alternative calculation options can be selected through arguments \code{solar}, \code{wind} and \code{windfunction_ver}, please see \code{Arguments} for details.\cr
User-defined evaporative surface is allowed through arguments \code{alpha} and \code{z0}, please see \code{Arguments} for details.
}
\value{
The function prints a calculation summary to the screen containing the following elements:\cr
- ET model name and ET quantity estimated\cr
- Evaporative surface with values of albedo and roughness height\cr
- Option for calculating solar radiation (i.e. the value of argument \code{solar})\cr
- If actual wind data has been used for calculation (i.e. the value of argument \code{wind}) and which version of Penman wind function has been used (i.e. the value of argument \code{windfunction_ver})\cr
- Time step of the output ET estimates (i.e. the value of argument \code{ts})\cr
- Units of the output ET estimates\cr
- Time duration of the ET estimation\cr
- Number of ET estimates obtained in the entire time-series\cr
- Basic statistics of the estimated ET time-series including \emph{mean}, \emph{max} and \emph{min} values.\cr
\cr
The function also generates a list containing the following components, which is saved into a \code{csv} file named as \emph{ET_Penman.csv} in the working directory:
\item{ET.Daily}{
Daily aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.Monthly}{
Monthly aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.Annual}{
Annually aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.MonthlyAve}{
Monthly averaged estimations of daily Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.AnnualAve}{
Annually averaged estimations of daily Penman open-water evaporation or potential evapotranspiration.
}
\item{ET_formulation}{
Name of the formulation used which equals to \code{Penman}.
}
\item{ET_type}{
Type of the estimation obtained which is either \code{Open-water Evaporation} or \code{Potential Evapotranspiration}.
}
\item{message1}{
A message to inform the users about how solar radiation has been calculated by using which data.
}
\item{message2}{
A message to inform the users about if actual wind data has been used in the calculations or alternative calculations has been performed without wind data, and which version of the Penman wind function has been used.
}
}
\references{
McMahon, T., Peel, M., Lowe, L., Srikanthan, R. & McVicar, T. 2012. \emph{Estimating actual, potential, reference crop and pan evaporation using standard meteorological data: a pragmatic synthesis}. Hydrology and Earth System Sciences Discussions, 9, 11829-11910.
Penman, H. L. 1948. \emph{Natural evaporation from open water, bare soil and grass}. Proceedings of the Royal Society of London. Series A. Mathematical and Physical Sciences, 193, 120-145.
Valiantzas, J. D. 2006. \emph{Simplified versions for the Penman evaporation equation using routine weather data}. Journal of Hydrology, 331, 690-702.
Penman, H. L. 1956. \emph{Evaporation: An introductory survey}. Netherlands Journal of Agricultural Science, 4, 9-29.
}
\author{
Danlu Guo
}
\seealso{
\code{\link{ET}},\code{\link{processeddata}},\code{\link{defaultconstants}},\code{\link{constants}}
}
\examples{
# Use processed existing data set and constants from kent Town, Adelaide
data("processeddata")
data("constants")
# Call ET.Penman under the generic function ET
results <- ET.Penman(data, constants, ts="daily",
solar="sunshine hours", wind="yes", windfunction_ver = "1948",
alpha = 0.08, z0 = 0.001)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Penman}
\keyword{evapotranspiration}
\keyword{open-water evaporation}
\keyword{potential evapotranspiration}
% __ONLY ONE__ keyword per line
|
/man/ET.Penman.Rd
|
no_license
|
AllysonS/Evapotranspiration
|
R
| false | false | 7,760 |
rd
|
\name{ET.Penman}
\alias{ET.Penman}
\title{Penman Formulation
}
\description{
Implementing the Penman formulation for estimating open-water evaporation or potential evapotranspiration
}
\usage{
\method{ET}{Penman}(data, constants, ts="daily", solar="sunshine hours",
wind="yes", windfunction_ver=1948, alpha = 0.08, z0 = 0.001, \dots)
}
\arguments{
\item{data}{
A list which contains the following items (climate variables) required by Penman formulation:\cr
\emph{Tmax}, \emph{Tmin}, \emph{RHmax}, \emph{RHmin}, \emph{Rs} or \emph{n} or \emph{Cd}, \emph{u2} or \emph{uz}
}
\item{constants}{
A list named \code{constants} consists of constants required for the calculation of Penman formulation which must contain the following items:\cr
\emph{Elev} - ground elevation above mean sea level in m,\cr
\emph{lambda} - latent heat of vaporisation = 2.45 MJ.kg^-1,\cr
\emph{lat_rad} - latitude in radians,\cr
\emph{Gsc} - solar constant = 0.0820 MJ.m^-2.min^-1,\cr
\emph{z} - height of wind instrument in m,\cr
\emph{sigma} - Stefan-Boltzmann constant = 4.903*10^-9 MJ.K^-4.m^-2.day^-1.\cr
\cr
The following constants are also required when argument \code{solar} has value of \code{sunshine hours}:\cr
\emph{as} - fraction of extraterrestrial radiation reaching earth on sunless days,\cr
\emph{bs} - difference between fracion of extraterrestrial radiation reaching full-sun days and that on sunless days.
}
\item{ts}{
Must be either \code{daily}, \code{monthly} or \code{annual}, which indicates the disired time step that the output ET estimates should be on.
Default is \code{daily}.
}
\item{solar}{
Must be either \code{data}, \code{sunshine hours}, \code{cloud} or \code{monthly precipitation}:\cr
\code{data} indicates that solar radiation data is to be used directly for calculating evapotranspiration; \cr
\code{sunshine hours} indicates that solar radiation is to be calculated using the real data of sunshine hours;\cr
\code{cloud} sunshine hours is to be estimated from cloud data; \cr
\code{monthly precipitation} indicates that solar radiation is to be calculated directly from monthly precipitation.\cr
Default is \code{sunshine hours}.
}
\item{wind}{
Must be either \code{yes} or \code{no}.\cr
\code{yes} indicates that the calculation will use real data of wind speed; \cr
\code{no} indicates that the alternative calculation without using wind data will be used in Penman formulation (Valiantzas 2006, Equation33).\cr
Default is \code{yes}.
}
\item{windfunction_ver}{
The version of Penman wind function that will be used within the Penman formulation. Must be either \code{1948} or \code{1956}. \cr
\code{1948} is for applying the Penman's 1948 wind function (Penman, 1948); \cr
\code{1956} is for applying the Penman's 1956 wind function (Penman, 1956)
Default is \code{1948}.
}
\item{alpha}{
Any numeric value between 0 and 1 (dimensionless), albedo of evaporative surface representing the portion of the incident radiation that is reflected back at the surface. \cr
Default is 0.08 for open-water surface which is for the calculation of Penman open-water evaporation, all other values will trigger the calculation of Penman potential evapotranspriation.
}
\item{z0}{
Any value (metres), roughness height of the evaporative surface. \cr
Default is 0.001 for open-water surface which is for the calculation of Penman open-water evaporation, all other values will trigger the calculation of Penman potential evapotranspriation.
}
\item{\dots}{
Dummy for generic function, no need to define.
}
}
\details{
The alternative calculation options can be selected through arguments \code{solar}, \code{wind} and \code{windfunction_ver}, please see \code{Arguments} for details.\cr
User-defined evaporative surface is allowed through arguments \code{alpha} and \code{z0}, please see \code{Arguments} for details.
}
\value{
The function prints a calculation summary to the screen containing the following elements:\cr
- ET model name and ET quantity estimated\cr
- Evaporative surface with values of albedo and roughness height\cr
- Option for calculating solar radiation (i.e. the value of argument \code{solar})\cr
- If actual wind data has been used for calculation (i.e. the value of argument \code{wind}) and which version of Penman wind function has been used (i.e. the value of argument \code{windfunction_ver})\cr
- Time step of the output ET estimates (i.e. the value of argument \code{ts})\cr
- Units of the output ET estimates\cr
- Time duration of the ET estimation\cr
- Number of ET estimates obtained in the entire time-series\cr
- Basic statistics of the estimated ET time-series including \emph{mean}, \emph{max} and \emph{min} values.\cr
\cr
The function also generates a list containing the following components, which is saved into a \code{csv} file named as \emph{ET_Penman.csv} in the working directory:
\item{ET.Daily}{
Daily aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.Monthly}{
Monthly aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.Annual}{
Annually aggregated estimations of Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.MonthlyAve}{
Monthly averaged estimations of daily Penman open-water evaporation or potential evapotranspiration.
}
\item{ET.AnnualAve}{
Annually averaged estimations of daily Penman open-water evaporation or potential evapotranspiration.
}
\item{ET_formulation}{
Name of the formulation used which equals to \code{Penman}.
}
\item{ET_type}{
Type of the estimation obtained which is either \code{Open-water Evaporation} or \code{Potential Evapotranspiration}.
}
\item{message1}{
A message to inform the users about how solar radiation has been calculated by using which data.
}
\item{message2}{
A message to inform the users about if actual wind data has been used in the calculations or alternative calculations has been performed without wind data, and which version of the Penman wind function has been used.
}
}
\references{
McMahon, T., Peel, M., Lowe, L., Srikanthan, R. & McVicar, T. 2012. \emph{Estimating actual, potential, reference crop and pan evaporation using standard meteorological data: a pragmatic synthesis}. Hydrology and Earth System Sciences Discussions, 9, 11829-11910.
Penman, H. L. 1948. \emph{Natural evaporation from open water, bare soil and grass}. Proceedings of the Royal Society of London. Series A. Mathematical and Physical Sciences, 193, 120-145.
Valiantzas, J. D. 2006. \emph{Simplified versions for the Penman evaporation equation using routine weather data}. Journal of Hydrology, 331, 690-702.
Penman, H. L. 1956. \emph{Evaporation: An introductory survey}. Netherlands Journal of Agricultural Science, 4, 9-29.
}
\author{
Danlu Guo
}
\seealso{
\code{\link{ET}},\code{\link{processeddata}},\code{\link{defaultconstants}},\code{\link{constants}}
}
\examples{
# Use processed existing data set and constants from kent Town, Adelaide
data("processeddata")
data("constants")
# Call ET.Penman under the generic function ET
results <- ET.Penman(data, constants, ts="daily",
solar="sunshine hours", wind="yes", windfunction_ver = "1948",
alpha = 0.08, z0 = 0.001)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Penman}
\keyword{evapotranspiration}
\keyword{open-water evaporation}
\keyword{potential evapotranspiration}
% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.secretsmanager_operations.R
\name{put_resource_policy}
\alias{put_resource_policy}
\title{Attaches the contents of the specified resource-based permission policy to a secret}
\usage{
put_resource_policy(SecretId, ResourcePolicy)
}
\arguments{
\item{SecretId}{[required] Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.
If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too---for example, if you don't include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you're specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don't create secret names that end with a hyphen followed by six characters.}
\item{ResourcePolicy}{[required] A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json}{Using JSON for Parameters} in the \emph{AWS CLI User Guide}.}
}
\description{
Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's \code{Resources} element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see \href{http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html}{Using Resource-Based Policies for AWS Secrets Manager}. For the complete description of the AWS policy syntax and grammar, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html}{IAM JSON Policy Reference} in the \emph{IAM User Guide}.
}
\details{
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:PutResourcePolicy
}
\strong{Related operations}
\itemize{
\item To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.
\item To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.
\item To list all of the currently available secrets, use ListSecrets.
}
}
\section{Accepted Parameters}{
\preformatted{put_resource_policy(
SecretId = "string",
ResourcePolicy = "string"
)
}
}
\examples{
# The following example shows how to add a resource-based policy to a
# secret.
\donttest{put_resource_policy(
ResourcePolicy = "{\\n\\"Version\\":\\"2012-10-17\\",\\n\\"Statement\\":[{\\n\\"Effect\\":\\"Allow\\",\\n\\"Principal\\":{\\n\\"AWS\\":\\"arn:aws:iam::123456789012:root\\"\\n},\\n\\"Action\\":\\"secretsmanager:GetSecretValue\\",\\n\\"Resource\\":\\"*\\"\\n}]\\n}",
SecretId = "MyTestDatabaseSecret"
)}
}
|
/service/paws.secretsmanager/man/put_resource_policy.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false | true | 3,698 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.secretsmanager_operations.R
\name{put_resource_policy}
\alias{put_resource_policy}
\title{Attaches the contents of the specified resource-based permission policy to a secret}
\usage{
put_resource_policy(SecretId, ResourcePolicy)
}
\arguments{
\item{SecretId}{[required] Specifies the secret that you want to attach the resource-based policy to. You can specify either the ARN or the friendly name of the secret.
If you specify an ARN, we generally recommend that you specify a complete ARN. You can specify a partial ARN too---for example, if you don't include the final hyphen and six random characters that Secrets Manager adds at the end of the ARN when you created the secret. A partial ARN match can work as long as it uniquely matches only one secret. However, if your secret has a name that ends in a hyphen followed by six characters (before Secrets Manager adds the hyphen and six characters to the ARN) and you try to use that as a partial ARN, then those characters cause Secrets Manager to assume that you're specifying a complete ARN. This confusion can cause unexpected results. To avoid this situation, we recommend that you don't create secret names that end with a hyphen followed by six characters.}
\item{ResourcePolicy}{[required] A JSON-formatted string that's constructed according to the grammar and syntax for an AWS resource-based policy. The policy in the string identifies who can access or manage this secret and its versions. For information on how to format a JSON parameter for the various command line tool environments, see \href{http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json}{Using JSON for Parameters} in the \emph{AWS CLI User Guide}.}
}
\description{
Attaches the contents of the specified resource-based permission policy to a secret. A resource-based policy is optional. Alternatively, you can use IAM identity-based policies that specify the secret's Amazon Resource Name (ARN) in the policy statement's \code{Resources} element. You can also use a combination of both identity-based and resource-based policies. The affected users and roles receive the permissions that are permitted by all of the relevant policies. For more information, see \href{http://docs.aws.amazon.com/secretsmanager/latest/userguide/auth-and-access_resource-based-policies.html}{Using Resource-Based Policies for AWS Secrets Manager}. For the complete description of the AWS policy syntax and grammar, see \href{http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html}{IAM JSON Policy Reference} in the \emph{IAM User Guide}.
}
\details{
\strong{Minimum permissions}
To run this command, you must have the following permissions:
\itemize{
\item secretsmanager:PutResourcePolicy
}
\strong{Related operations}
\itemize{
\item To retrieve the resource policy that's attached to a secret, use GetResourcePolicy.
\item To delete the resource-based policy that's attached to a secret, use DeleteResourcePolicy.
\item To list all of the currently available secrets, use ListSecrets.
}
}
\section{Accepted Parameters}{
\preformatted{put_resource_policy(
SecretId = "string",
ResourcePolicy = "string"
)
}
}
\examples{
# The following example shows how to add a resource-based policy to a
# secret.
\donttest{put_resource_policy(
ResourcePolicy = "{\\n\\"Version\\":\\"2012-10-17\\",\\n\\"Statement\\":[{\\n\\"Effect\\":\\"Allow\\",\\n\\"Principal\\":{\\n\\"AWS\\":\\"arn:aws:iam::123456789012:root\\"\\n},\\n\\"Action\\":\\"secretsmanager:GetSecretValue\\",\\n\\"Resource\\":\\"*\\"\\n}]\\n}",
SecretId = "MyTestDatabaseSecret"
)}
}
|
## chunked queries for large number of records:
# https://github.com/ncss-tech/soilDB/issues/71
## TODO items summarized here
# https://github.com/ncss-tech/soilDB/issues/81
# helper function for processing WKT returned by SDA_query()
# result is an SPDF
# d: data frame
# g: column containing WKT
# p4s: PROJ4 CRS defs
## TODO: test with geom appearing in different positions within query results
## TODO: geometry collections are not allowed in sp objects..
## TODO: consider moving to sf
#' @title Post-process WKT returned from SDA.
#'
#' @description This is a helper function, commonly used with \code{SDA_query} to extract WKT (well-known text) representation of geometry to an sp-class object.
#'
#' @param d \code{data.frame} returned by \code{SDA_query}, containing WKT representation of geometry
#' @param g name of column in \code{d} containing WKT geometry
#' @param p4s PROJ4 CRS defintion, typically GCS WGS84
#'
#' @details The SDA website can be found at \url{http://sdmdataaccess.nrcs.usda.gov}. See the \href{http://ncss-tech.github.io/AQP/soilDB/SDA-tutorial.html}{SDA Tutorial} for detailed examples.
#'
#' @note This function requires the `httr`, `jsonlite`, `XML`, and `rgeos` packages.
#'
#' @author D.E. Beaudette
#'
#' @return A \code{Spatial*} object.
#'
processSDA_WKT <- function(d, g='geom', p4s='+proj=longlat +datum=WGS84') {
# iterate over features (rows) and convert into list of SPDF
p <- list()
n <- nrow(d)
# points or polygons?
# looking at the first feature for efficiency, all others should be the same
g.type <- class(rgeos::readWKT(d[1, g]))
for(i in seq(1, n)) {
# extract the current row in the DF
d.i <- d[i, ]
# extract the current feature from WKT
p.i <- rgeos::readWKT(d.i[[g]], id = i, p4s = p4s)
# remove geom from current row of DF
d.i[[g]] <- NULL
# compose SpatialPointsDataFrame, with other attributes
if(g.type == 'SpatialPoints')
s.i <- SpatialPointsDataFrame(p.i, data=cbind(data.frame(gid=i, stringsAsFactors = FALSE), d.i), match.ID = FALSE)
# compose SpatialPolygonsDataFrame, with other attributes
if(g.type == 'SpatialPolygons')
s.i <- SpatialPolygonsDataFrame(p.i, data=cbind(data.frame(gid=i, stringsAsFactors = FALSE), d.i), match.ID = FALSE)
# fix column names
names(s.i) <- c('gid', names(d.i))
# save to list
p[[i]] <- s.i
}
# reduce list to single SPDF
spdf <- do.call('rbind', p)
return(spdf)
}
## TODO: this will replace *most* of the functionality in:
## SDA_make_spatial_query
## SDA_query_features
#
# get a SPDF of intersecting MU polygons and mukey, in a single query
# row-order and number of rows won't always match input
# that is fine as we can use sp::over to connect
# 10-20x speed improvement over SDA_query_features
#' @title SDA Spatial Query
#'
#' @description Query SDA (SSURGO / STATSGO) records via spatial intersection with supplied geometries. Input can be SpatialPoints, SpatialLines, or SpatialPolygons objects with a valid CRS. Map unit keys, overlapping polygons, or the spatial intersectionion of \code{geom} + SSURGO / STATSGO polygons can be returned. See details.
#'
#' @param geom a Spatial* object, with valid CRS. May contain multiple features.
#' @param what a character vector specifting what to return. 'mukey': \code{data.frame} with intersecting map unit keys and names, \code{geom} overlapping or intersecting map unit polygons
#' @param geomIntersection logical; \code{FALSE}: overlapping map unit polygons returned, \code{TRUE}: intersection of \code{geom} + map unit polygons is returned.
#' @param db a character vector identifying the Soil Geographic Databases
#' ('SSURGO' or 'STATSGO') to query. Option \var{STATSGO} currently works
#' only in combination with \code{what = "geom"}.
#'
#' @return A \code{data.frame} if \code{what = 'mukey'}, otherwise \code{SpatialPolygonsDataFrame} object.
#'
#' @author D.E. Beaudette, A.G. Brown, D.R. Schlaepfer
#' @seealso \code{\link{SDA_query}}
#' @keywords manip
#'
#' @aliases SDA_make_spatial_query,SDA_query_features
#'
#' @note Row-order is not preserved across features in \code{geom} and returned object. Use \code{sp::over()} or similar functionality to extract from results. Polygon area in acres is computed server-side when \code{what = 'geom'} and \code{geomIntersection = TRUE}.
#'
#'
#' @details Queries for map unit keys are always more efficient vs. queries for overlapping or intersecting (i.e. least efficient) features. \code{geom} is converted to GCS / WGS84 as needed. Map unit keys are always returned when using \code{what = "geom"}.
#'
#' There is a 100,000 record limit and 32Mb JSON serializer limit, per query.
#'
#' SSURGO (detailed soil survey, typicall 1:24,000 scale) and STATSGO (generalized soil survey, 1:250,000 scale) data are stored together within SDA. This means that queries that don't specify an area symbol may result in a mixture of SSURGO and STATSGO records. See the examples below and the \href{http://ncss-tech.github.io/AQP/soilDB/SDA-tutorial.html}{SDA Tutorial} for details.
#'
#'
#' @examples
#' \donttest{
#' if(requireNamespace("curl") &
#' curl::has_internet() &
#' requireNamespace("sp") &
#' requireNamespace("raster")
#' ) {
#'
#' library(aqp)
#' library(sp)
#' library(raster)
#'
#' ## query at a point
#'
#' # example point
#' p <- SpatialPoints(cbind(x = -119.72330, y = 36.92204),
#' proj4string = CRS('+proj=longlat +datum=WGS84'))
#'
#' # query map unit records at this point
#' res <- SDA_spatialQuery(p, what = 'mukey')
#'
#' # convert results into an SQL "IN" statement
#' # useful when there are multiple intersecting records
#' mu.is <- format_SQL_in_statement(res$mukey)
#'
#' # composite SQL WHERE clause
#' sql <- sprintf("mukey IN %s", mu.is)
#'
#' # get commonly used map unit / component / chorizon records
#' # as a SoilProfileCollection object
#' # confusing but essential: request that results contain `mukey`
#' # with `duplicates = TRUE`
#' x <- fetchSDA(sql, duplicates = TRUE)
#'
#' # safely set texture class factor levels
#' # by making a copy of this column
#' # this will save in lieu of textures in the original
#' # `texture` column
#' horizons(x)$texture.class <- factor(x$texture, levels = SoilTextureLevels())
#'
#' # graphical depiction of the result
#' plotSPC(x, color='texture.class', label='compname',
#' name='hzname', cex.names = 1, width=0.25,
#' plot.depth.axis=FALSE, hz.depths=TRUE,
#' name.style='center-center'
#' )
#'
#'
#'
#' ## query mukey + geometry that intersect with a bounding box
#'
#' # define a bounding box: xmin, xmax, ymin, ymax
#' #
#' # +-------------------(ymax, xmax)
#' # | |
#' # | |
#' # (ymin, xmin) ----------------+
#' b <- c(-119.747629, -119.67935, 36.912019, 36.944987)
#'
#' # convert bounding box to WKT
#' bbox.sp <-as(extent(b), 'SpatialPolygons')
#' proj4string(bbox.sp) <- '+proj=longlat +datum=WGS84'
#'
#' # results contain associated map unit keys (mukey)
#' # return SSURGO polygons, after intersection with provided BBOX
#' ssurgo.geom <- SDA_spatialQuery(
#' bbox.sp,
#' what = 'geom',
#' db = 'SSURGO',
#' geomIntersection = TRUE
#' )
#'
#' # return STATSGO polygons, after intersection with provided BBOX
#' statsgo.geom <- SDA_spatialQuery(
#' bbox.sp,
#' what = 'geom',
#' db = 'STATSGO',
#' geomIntersection = TRUE
#' )
#'
#' # inspect results
#' par(mar = c(0,0,3,1))
#' plot(ssurgo.geom, border = 'royalblue')
#' plot(statsgo.geom, lwd = 2, border = 'firebrick', add = TRUE)
#' plot(bbox.sp, lwd = 3, add = TRUE)
#' legend(
#' x = 'top',
#' legend = c('BBOX', 'STATSGO', 'SSURGO'),
#' lwd = c(3, 2, 1),
#' col = c('black', 'firebrick', 'royalblue'),
#' horiz = TRUE,
#' bty = 'n'
#' )
#'
#' # quick reminder that STATSGO map units often contain many components
#' # format an SQL IN statement using the first STATSGO mukey
#' mu.is <- format_SQL_in_statement(statsgo.geom$mukey[1])
#'
#' # composite SQL WHERE clause
#' sql <- sprintf("mukey IN %s", mu.is)
#'
#' # get commonly used map unit / component / chorizon records
#' # as a SoilProfileCollection object
#' x <- fetchSDA(sql)
#'
#' # tighter figure margins
#' par(mar = c(0,0,3,1))
#'
#'
#' # organize component sketches by national map unit symbol
#' # color horizons via awc
#' # adjust legend title
#' # add alternate label (vertical text) containing component percent
#' # move horizon names into the profile sketches
#' # make profiles wider
#' groupedProfilePlot(
#' x,
#' groups = 'nationalmusym',
#' label = 'compname',
#' color = 'awc_r',
#' col.label = 'Available Water Holding Capacity (cm / cm)',
#' alt.label = 'comppct_r',
#' name.style = 'center-center',
#' width = 0.3
#' )
#'
#'
#' mtext(
#' 'STATSGO (1:250,000) map units contain a lot of components!',
#' side = 1,
#' adj = 0,
#' line = -1.5,
#' at = 0.25,
#' font = 4
#' )
#' }
#' }
#'
SDA_spatialQuery <- function(geom, what='mukey', geomIntersection=FALSE,
db = c("SSURGO", "STATSGO")) {
# check for required packages
if(!requireNamespace('rgeos', quietly = TRUE))
stop('please install the `rgeos` package', call.=FALSE)
# sanity checks
if(! what %in% c('mukey', 'geom')) {
stop("query type must be either 'mukey' or 'geom'",call. = FALSE)
}
db <- match.arg(db)
if (what == "mukey" && db == "STATSGO") {
stop("query type 'mukey' for 'STATSGO' is not supported", call. = FALSE)
}
# geom must be an sp object
if(! inherits(geom, 'Spatial')) {
stop('`geom` must be a Spatial* object', call. = FALSE)
}
# geom must have a valid CRS
if(is.na(proj4string(geom))) {
stop('`geom` must have a valid CRS', call. = FALSE)
}
# CRS conversion if needed
target.prj <- "+proj=longlat +datum=WGS84"
if(proj4string(geom) != target.prj) {
geom <- spTransform(geom, CRS(target.prj))
}
# WKT encoding
# use a geometry collection
wkt <- rgeos::writeWKT(geom, byid = FALSE)
## dang it! STATSGO geometry is duplicated: US and state versions stored together
# https://github.com/ncss-tech/soilDB/issues/143
# slower query, returning geom + mukey
# replacement for depreciated SDA_query_features()
# 10-30x faster than spatial-returning query by input feature
# TODO: this is 15x slower than non-spatial-returning-query in SDA_query_features()
if(what == 'geom') {
db_table <- switch(db, SSURGO = "mupolygon", STATSGO = "gsmmupolygon")
# return intersection + area
if(geomIntersection) {
q <- sprintf("
WITH geom_data (geom, mukey) AS (
SELECT
mupolygongeo.STIntersection( geometry::STGeomFromText('%s', 4326) ) AS geom, P.mukey
FROM %s AS P
WHERE mupolygongeo.STIntersects( geometry::STGeomFromText('%s', 4326) ) = 1
)
SELECT
geom.STAsText() AS geom, mukey,
GEOGRAPHY::STGeomFromWKB(
geom.STUnion(geom.STStartPoint()).STAsBinary(), 4326).STArea() * 0.000247105 AS area_ac
FROM geom_data;
",
wkt, db_table, wkt
)
} else {
# return overlapping
q <- sprintf("
SELECT
mupolygongeo.STAsText() AS geom, P.mukey
FROM %s AS P
WHERE mupolygongeo.STIntersects( geometry::STGeomFromText('%s', 4326) ) = 1;",
db_table, wkt
)
}
# single query for all of the features
# note that row-order / number of rows in results may not match geom
res <- suppressMessages(SDA_query(q))
res <- processSDA_WKT(res)
}
# SSURGO only
# faster query, returning mukey + muname
# replacement for depreciated SDA_make_spatial_query()
# ~ 3x faster than SDA_query_features()
# TODO: how can we link these back with the source data?
if(what == 'mukey') {
q <- sprintf("SELECT mukey, muname
FROM mapunit
WHERE mukey IN (
SELECT DISTINCT mukey from SDA_Get_Mukey_from_intersection_with_WktWgs84('%s')
)", wkt)
# single query for all of the features
# note that row-order / number of rows in results may not match geom
res <- suppressMessages(SDA_query(q))
}
return(res)
}
## now deprecated
SDA_make_spatial_query <- function(i) {
.Deprecated(new = 'SDA_spatialQuery')
# check for required packages
if(!requireNamespace('rgeos', quietly = TRUE))
stop('please install the `rgeos` package', call.=FALSE)
# convert single feature to WKT
i.wkt <- rgeos::writeWKT(i, byid = FALSE)
# programatically generate query
q <- paste0("SELECT mukey, muname
FROM mapunit
WHERE mukey IN (
SELECT DISTINCT mukey from SDA_Get_Mukey_from_intersection_with_WktWgs84('", i.wkt, "')
)")
# send query, messages aren't useful here
res <- suppressMessages(SDA_query(q))
# check for no data
if(is.null(res))
res <- NA
# done
return(res)
}
# this is a safe way to query features while preserving IDs
# note that it is a very slow for large collections
# x is a Spatial* object with more than 1 feature
# id is the name of an attribute that contains a unique ID for each feature
## now deprecated
SDA_query_features <- function(x, id='pedon_id') {
.Deprecated(new = 'SDA_spatialQuery')
# sanity check: ensure that the ID is unique
if(length(x[[id]]) != length(unique(x[[id]])))
stop('id is not unique')
# transform to GCS WGS84 if needed
target.prj <- "+proj=longlat +datum=WGS84"
if(proj4string(x) != target.prj) {
geom <- spTransform(x, CRS(target.prj))
}
# iterate over features and save to list
l <- list()
n <- length(geom)
# setup a progress bar for timing
pb <- txtProgressBar(max=n, style=3)
for(i in 1:n) {
# make query
res <- SDA_make_spatial_query(geom[i, ])
# save results along with an ID
res <- cbind(id=geom[[id]][i], res, stringsAsFactors = FALSE)
names(res) <- c(id, 'mukey', 'muname')
l[[i]] <- res
setTxtProgressBar(pb, i)
}
close(pb)
# convert to data.frame, there may be > 1 row / feature when using lines / polygons
d <- ldply(l)
return(d)
}
|
/R/SDA-spatial.R
|
no_license
|
MarconiS/soilDB
|
R
| false | false | 14,309 |
r
|
## chunked queries for large number of records:
# https://github.com/ncss-tech/soilDB/issues/71
## TODO items summarized here
# https://github.com/ncss-tech/soilDB/issues/81
# helper function for processing WKT returned by SDA_query()
# result is an SPDF
# d: data frame
# g: column containing WKT
# p4s: PROJ4 CRS defs
## TODO: test with geom appearing in different positions within query results
## TODO: geometry collections are not allowed in sp objects..
## TODO: consider moving to sf
#' @title Post-process WKT returned from SDA.
#'
#' @description This is a helper function, commonly used with \code{SDA_query} to extract WKT (well-known text) representation of geometry to an sp-class object.
#'
#' @param d \code{data.frame} returned by \code{SDA_query}, containing WKT representation of geometry
#' @param g name of column in \code{d} containing WKT geometry
#' @param p4s PROJ4 CRS defintion, typically GCS WGS84
#'
#' @details The SDA website can be found at \url{http://sdmdataaccess.nrcs.usda.gov}. See the \href{http://ncss-tech.github.io/AQP/soilDB/SDA-tutorial.html}{SDA Tutorial} for detailed examples.
#'
#' @note This function requires the `httr`, `jsonlite`, `XML`, and `rgeos` packages.
#'
#' @author D.E. Beaudette
#'
#' @return A \code{Spatial*} object.
#'
processSDA_WKT <- function(d, g='geom', p4s='+proj=longlat +datum=WGS84') {
# iterate over features (rows) and convert into list of SPDF
p <- list()
n <- nrow(d)
# points or polygons?
# looking at the first feature for efficiency, all others should be the same
g.type <- class(rgeos::readWKT(d[1, g]))
for(i in seq(1, n)) {
# extract the current row in the DF
d.i <- d[i, ]
# extract the current feature from WKT
p.i <- rgeos::readWKT(d.i[[g]], id = i, p4s = p4s)
# remove geom from current row of DF
d.i[[g]] <- NULL
# compose SpatialPointsDataFrame, with other attributes
if(g.type == 'SpatialPoints')
s.i <- SpatialPointsDataFrame(p.i, data=cbind(data.frame(gid=i, stringsAsFactors = FALSE), d.i), match.ID = FALSE)
# compose SpatialPolygonsDataFrame, with other attributes
if(g.type == 'SpatialPolygons')
s.i <- SpatialPolygonsDataFrame(p.i, data=cbind(data.frame(gid=i, stringsAsFactors = FALSE), d.i), match.ID = FALSE)
# fix column names
names(s.i) <- c('gid', names(d.i))
# save to list
p[[i]] <- s.i
}
# reduce list to single SPDF
spdf <- do.call('rbind', p)
return(spdf)
}
## TODO: this will replace *most* of the functionality in:
## SDA_make_spatial_query
## SDA_query_features
#
# get a SPDF of intersecting MU polygons and mukey, in a single query
# row-order and number of rows won't always match input
# that is fine as we can use sp::over to connect
# 10-20x speed improvement over SDA_query_features
#' @title SDA Spatial Query
#'
#' @description Query SDA (SSURGO / STATSGO) records via spatial intersection with supplied geometries. Input can be SpatialPoints, SpatialLines, or SpatialPolygons objects with a valid CRS. Map unit keys, overlapping polygons, or the spatial intersectionion of \code{geom} + SSURGO / STATSGO polygons can be returned. See details.
#'
#' @param geom a Spatial* object, with valid CRS. May contain multiple features.
#' @param what a character vector specifting what to return. 'mukey': \code{data.frame} with intersecting map unit keys and names, \code{geom} overlapping or intersecting map unit polygons
#' @param geomIntersection logical; \code{FALSE}: overlapping map unit polygons returned, \code{TRUE}: intersection of \code{geom} + map unit polygons is returned.
#' @param db a character vector identifying the Soil Geographic Databases
#' ('SSURGO' or 'STATSGO') to query. Option \var{STATSGO} currently works
#' only in combination with \code{what = "geom"}.
#'
#' @return A \code{data.frame} if \code{what = 'mukey'}, otherwise \code{SpatialPolygonsDataFrame} object.
#'
#' @author D.E. Beaudette, A.G. Brown, D.R. Schlaepfer
#' @seealso \code{\link{SDA_query}}
#' @keywords manip
#'
#' @aliases SDA_make_spatial_query,SDA_query_features
#'
#' @note Row-order is not preserved across features in \code{geom} and returned object. Use \code{sp::over()} or similar functionality to extract from results. Polygon area in acres is computed server-side when \code{what = 'geom'} and \code{geomIntersection = TRUE}.
#'
#'
#' @details Queries for map unit keys are always more efficient vs. queries for overlapping or intersecting (i.e. least efficient) features. \code{geom} is converted to GCS / WGS84 as needed. Map unit keys are always returned when using \code{what = "geom"}.
#'
#' There is a 100,000 record limit and 32Mb JSON serializer limit, per query.
#'
#' SSURGO (detailed soil survey, typicall 1:24,000 scale) and STATSGO (generalized soil survey, 1:250,000 scale) data are stored together within SDA. This means that queries that don't specify an area symbol may result in a mixture of SSURGO and STATSGO records. See the examples below and the \href{http://ncss-tech.github.io/AQP/soilDB/SDA-tutorial.html}{SDA Tutorial} for details.
#'
#'
#' @examples
#' \donttest{
#' if(requireNamespace("curl") &
#' curl::has_internet() &
#' requireNamespace("sp") &
#' requireNamespace("raster")
#' ) {
#'
#' library(aqp)
#' library(sp)
#' library(raster)
#'
#' ## query at a point
#'
#' # example point
#' p <- SpatialPoints(cbind(x = -119.72330, y = 36.92204),
#' proj4string = CRS('+proj=longlat +datum=WGS84'))
#'
#' # query map unit records at this point
#' res <- SDA_spatialQuery(p, what = 'mukey')
#'
#' # convert results into an SQL "IN" statement
#' # useful when there are multiple intersecting records
#' mu.is <- format_SQL_in_statement(res$mukey)
#'
#' # composite SQL WHERE clause
#' sql <- sprintf("mukey IN %s", mu.is)
#'
#' # get commonly used map unit / component / chorizon records
#' # as a SoilProfileCollection object
#' # confusing but essential: request that results contain `mukey`
#' # with `duplicates = TRUE`
#' x <- fetchSDA(sql, duplicates = TRUE)
#'
#' # safely set texture class factor levels
#' # by making a copy of this column
#' # this will save in lieu of textures in the original
#' # `texture` column
#' horizons(x)$texture.class <- factor(x$texture, levels = SoilTextureLevels())
#'
#' # graphical depiction of the result
#' plotSPC(x, color='texture.class', label='compname',
#' name='hzname', cex.names = 1, width=0.25,
#' plot.depth.axis=FALSE, hz.depths=TRUE,
#' name.style='center-center'
#' )
#'
#'
#'
#' ## query mukey + geometry that intersect with a bounding box
#'
#' # define a bounding box: xmin, xmax, ymin, ymax
#' #
#' # +-------------------(ymax, xmax)
#' # | |
#' # | |
#' # (ymin, xmin) ----------------+
#' b <- c(-119.747629, -119.67935, 36.912019, 36.944987)
#'
#' # convert bounding box to WKT
#' bbox.sp <-as(extent(b), 'SpatialPolygons')
#' proj4string(bbox.sp) <- '+proj=longlat +datum=WGS84'
#'
#' # results contain associated map unit keys (mukey)
#' # return SSURGO polygons, after intersection with provided BBOX
#' ssurgo.geom <- SDA_spatialQuery(
#' bbox.sp,
#' what = 'geom',
#' db = 'SSURGO',
#' geomIntersection = TRUE
#' )
#'
#' # return STATSGO polygons, after intersection with provided BBOX
#' statsgo.geom <- SDA_spatialQuery(
#' bbox.sp,
#' what = 'geom',
#' db = 'STATSGO',
#' geomIntersection = TRUE
#' )
#'
#' # inspect results
#' par(mar = c(0,0,3,1))
#' plot(ssurgo.geom, border = 'royalblue')
#' plot(statsgo.geom, lwd = 2, border = 'firebrick', add = TRUE)
#' plot(bbox.sp, lwd = 3, add = TRUE)
#' legend(
#' x = 'top',
#' legend = c('BBOX', 'STATSGO', 'SSURGO'),
#' lwd = c(3, 2, 1),
#' col = c('black', 'firebrick', 'royalblue'),
#' horiz = TRUE,
#' bty = 'n'
#' )
#'
#' # quick reminder that STATSGO map units often contain many components
#' # format an SQL IN statement using the first STATSGO mukey
#' mu.is <- format_SQL_in_statement(statsgo.geom$mukey[1])
#'
#' # composite SQL WHERE clause
#' sql <- sprintf("mukey IN %s", mu.is)
#'
#' # get commonly used map unit / component / chorizon records
#' # as a SoilProfileCollection object
#' x <- fetchSDA(sql)
#'
#' # tighter figure margins
#' par(mar = c(0,0,3,1))
#'
#'
#' # organize component sketches by national map unit symbol
#' # color horizons via awc
#' # adjust legend title
#' # add alternate label (vertical text) containing component percent
#' # move horizon names into the profile sketches
#' # make profiles wider
#' groupedProfilePlot(
#' x,
#' groups = 'nationalmusym',
#' label = 'compname',
#' color = 'awc_r',
#' col.label = 'Available Water Holding Capacity (cm / cm)',
#' alt.label = 'comppct_r',
#' name.style = 'center-center',
#' width = 0.3
#' )
#'
#'
#' mtext(
#' 'STATSGO (1:250,000) map units contain a lot of components!',
#' side = 1,
#' adj = 0,
#' line = -1.5,
#' at = 0.25,
#' font = 4
#' )
#' }
#' }
#'
SDA_spatialQuery <- function(geom, what='mukey', geomIntersection=FALSE,
db = c("SSURGO", "STATSGO")) {
# check for required packages
if(!requireNamespace('rgeos', quietly = TRUE))
stop('please install the `rgeos` package', call.=FALSE)
# sanity checks
if(! what %in% c('mukey', 'geom')) {
stop("query type must be either 'mukey' or 'geom'",call. = FALSE)
}
db <- match.arg(db)
if (what == "mukey" && db == "STATSGO") {
stop("query type 'mukey' for 'STATSGO' is not supported", call. = FALSE)
}
# geom must be an sp object
if(! inherits(geom, 'Spatial')) {
stop('`geom` must be a Spatial* object', call. = FALSE)
}
# geom must have a valid CRS
if(is.na(proj4string(geom))) {
stop('`geom` must have a valid CRS', call. = FALSE)
}
# CRS conversion if needed
target.prj <- "+proj=longlat +datum=WGS84"
if(proj4string(geom) != target.prj) {
geom <- spTransform(geom, CRS(target.prj))
}
# WKT encoding
# use a geometry collection
wkt <- rgeos::writeWKT(geom, byid = FALSE)
## dang it! STATSGO geometry is duplicated: US and state versions stored together
# https://github.com/ncss-tech/soilDB/issues/143
# slower query, returning geom + mukey
# replacement for depreciated SDA_query_features()
# 10-30x faster than spatial-returning query by input feature
# TODO: this is 15x slower than non-spatial-returning-query in SDA_query_features()
if(what == 'geom') {
db_table <- switch(db, SSURGO = "mupolygon", STATSGO = "gsmmupolygon")
# return intersection + area
if(geomIntersection) {
q <- sprintf("
WITH geom_data (geom, mukey) AS (
SELECT
mupolygongeo.STIntersection( geometry::STGeomFromText('%s', 4326) ) AS geom, P.mukey
FROM %s AS P
WHERE mupolygongeo.STIntersects( geometry::STGeomFromText('%s', 4326) ) = 1
)
SELECT
geom.STAsText() AS geom, mukey,
GEOGRAPHY::STGeomFromWKB(
geom.STUnion(geom.STStartPoint()).STAsBinary(), 4326).STArea() * 0.000247105 AS area_ac
FROM geom_data;
",
wkt, db_table, wkt
)
} else {
# return overlapping
q <- sprintf("
SELECT
mupolygongeo.STAsText() AS geom, P.mukey
FROM %s AS P
WHERE mupolygongeo.STIntersects( geometry::STGeomFromText('%s', 4326) ) = 1;",
db_table, wkt
)
}
# single query for all of the features
# note that row-order / number of rows in results may not match geom
res <- suppressMessages(SDA_query(q))
res <- processSDA_WKT(res)
}
# SSURGO only
# faster query, returning mukey + muname
# replacement for depreciated SDA_make_spatial_query()
# ~ 3x faster than SDA_query_features()
# TODO: how can we link these back with the source data?
if(what == 'mukey') {
q <- sprintf("SELECT mukey, muname
FROM mapunit
WHERE mukey IN (
SELECT DISTINCT mukey from SDA_Get_Mukey_from_intersection_with_WktWgs84('%s')
)", wkt)
# single query for all of the features
# note that row-order / number of rows in results may not match geom
res <- suppressMessages(SDA_query(q))
}
return(res)
}
## now deprecated
SDA_make_spatial_query <- function(i) {
.Deprecated(new = 'SDA_spatialQuery')
# check for required packages
if(!requireNamespace('rgeos', quietly = TRUE))
stop('please install the `rgeos` package', call.=FALSE)
# convert single feature to WKT
i.wkt <- rgeos::writeWKT(i, byid = FALSE)
# programatically generate query
q <- paste0("SELECT mukey, muname
FROM mapunit
WHERE mukey IN (
SELECT DISTINCT mukey from SDA_Get_Mukey_from_intersection_with_WktWgs84('", i.wkt, "')
)")
# send query, messages aren't useful here
res <- suppressMessages(SDA_query(q))
# check for no data
if(is.null(res))
res <- NA
# done
return(res)
}
# this is a safe way to query features while preserving IDs
# note that it is a very slow for large collections
# x is a Spatial* object with more than 1 feature
# id is the name of an attribute that contains a unique ID for each feature
## now deprecated
SDA_query_features <- function(x, id='pedon_id') {
.Deprecated(new = 'SDA_spatialQuery')
# sanity check: ensure that the ID is unique
if(length(x[[id]]) != length(unique(x[[id]])))
stop('id is not unique')
# transform to GCS WGS84 if needed
target.prj <- "+proj=longlat +datum=WGS84"
if(proj4string(x) != target.prj) {
geom <- spTransform(x, CRS(target.prj))
}
# iterate over features and save to list
l <- list()
n <- length(geom)
# setup a progress bar for timing
pb <- txtProgressBar(max=n, style=3)
for(i in 1:n) {
# make query
res <- SDA_make_spatial_query(geom[i, ])
# save results along with an ID
res <- cbind(id=geom[[id]][i], res, stringsAsFactors = FALSE)
names(res) <- c(id, 'mukey', 'muname')
l[[i]] <- res
setTxtProgressBar(pb, i)
}
close(pb)
# convert to data.frame, there may be > 1 row / feature when using lines / polygons
d <- ldply(l)
return(d)
}
|
# ============================================================
# By: Weersma Group, UMCG (2020)
#
# DMP Variance explained analysis,
#
# script does univariate ADONIS analysis on one phenotype for DMP data
# it is intended to be run once per phenotype in parallel
#
# Note: must be invoked with 2 CL params:
# (1) = number of phenotype (column in phenotype file)
# (2) = number of permutations
# ============================================================
# load libraries
library(vegan)
library(parallel)
# SETTINGS
# ==========================================
outFolder <- "output_adonis_taxa"
# ==========================================
# ==========================================
# >> MAIN <<
# ==========================================
# === COLLECT COMMAND LINE PARAMS ===
args = commandArgs(trailingOnly=TRUE)
# (1) = number of phenotype
# (2) = number of permutations
phenoNR <- as.numeric(args[1])
permNR <- as.numeric(args[2])
# ===================================
# set WD
setwd('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_statistics/codes/adonis_taxa_big_v26')
# load helper scripts
source('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_statistics/codes/myLibs_v3/R_Microbiome_scripts.R')
# load phenotypes
print(' >> LOADING PHENOTYPES')
inPhenos <- read.table('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_data_ready/phenotypes/DAG3_metadata_merged_ready_v26.csv',sep=',',header=T,quote='"')
rownames(inPhenos) <- inPhenos$DAG3_sampleID
# load data (microbiome)
print(' >> LOADING MICROBIOME')
inDF <- read.table('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_data_ready/microbiome/processed/DAG3_metaphlan_bacteria_archaea_nofiltering.txt',sep='\t',header=T)
# grab species only
inMB <- filterMetaGenomeDF(inDF,presPerc = -1,minMRelAb = -1,minMedRelAb = -1,rescaleTaxa = T,verbose = T,keepLevels = c("S"))
# cleanup of folders
# ==========================================
if (!dir.exists(outFolder)) {
dir.create(outFolder)
}
# load phenotypes
# ==========================================
# > remove IDs and other (irrelevant) phenotypes
adonisVarsTouse <- colnames(inPhenos)
toExclude = c("DAG3_sampleID","ID")
adonisVarsTouse <- adonisVarsTouse[!adonisVarsTouse %in% toExclude]
# fix names of columns and check for errors
# ================================================================
if ( ("ID" %in% colnames(inPhenos)) & !("DAG3_sampleID" %in% colnames(inPhenos)) ) {
colnames(inPhenos)[colnames(inPhenos)=="ID"] <- "DAG3_sampleID"
}
if ( ("ID" %in% colnames(inMB)) & !("DAG3_sampleID" %in% colnames(inMB)) ) {
colnames(inMB)[colnames(inMB)=="ID"] <- "DAG3_sampleID"
}
if (!("DAG3_sampleID" %in% colnames(inMB))) {
stop('ERROR: DAG3_sampleID not in columns of microbiome data')
}
if (!("DAG3_sampleID" %in% colnames(inPhenos))) {
stop('ERROR: DAG3_sampleID not in columns of phenotype data')
}
# =================================================================
# do adonis
# =================================================================
print (' >> STARTING ADONIS CALCULATIONS')
print (paste0(' >> phenotype NR ', phenoNR))
print (paste0(' >> NR of premut ', permNR))
print (' ============================================ ')
print(timestamp())
adonisResults <- NULL
for (i in adonisVarsTouse[phenoNR]) {
print (paste(' >>> ANALYSING VARIABLE <',i,'> <<<'))
print(timestamp())
print (' >> collecting complete cases')
inPhenosOneVarID <- inPhenos[,colnames(inPhenos) %in% c(i,"DAG3_sampleID")]
allDF <- merge(x=inPhenosOneVarID,by.x="DAG3_sampleID",y=inMB,by.y="DAG3_sampleID")
rownames(allDF) <- allDF$DAG3_sampleID
allDF$DAG3_sampleID <- NULL
allDF <- allDF[complete.cases(allDF),]
av <- allDF[[i]]
allDF[[i]] <- NULL
print (' >> calculating B/C distance')
inBC <- vegdist(allDF,method = "bray",parallel=4)
print(timestamp())
print (' >> doing adonis')
nrRows <- length(av)
if (length(av) < 3 | length(unique(av)) < 2) {
print(paste0(' >> WARNING: ',i,' has no useful data!!'))
} else {
#print(paste0(' NR NAs: ',sum(is.na(av))))
ad <- adonis(inBC ~ av,permutations=permNR)
aov_table <- ad$aov.tab
# accumulate results
oneRow <- data.frame(Var=i,
NR_nonNA=nrRows,
DF=aov_table[1,1],
SumsOfSqs=aov_table[1,2],
MeanSqs=aov_table[1,3],
FModel=aov_table[1,4],
R2=aov_table[1,5],
pval=aov_table[1,6],
FDR.BH=NA,
Significant=NA)
print(oneRow)
adonisResults <- rbind.data.frame(adonisResults,oneRow)
write.table(adonisResults,paste0(outFolder,"/adonis_results_table_",phenoNR,".csv"),sep=",",row.names=F)
print (paste0('--- ',i,' DONE! ---'))
}
}
print (' >> DONE WITH CALCULATIONS')
#print(timestamp())
print (' >> saving output')
rownames(adonisResults) = adonisResults$Var
adonisResults$FDR.BH=p.adjust(adonisResults$pval, method = "BH")
adonisResults$Significant="No"
adonisResults$Significant[adonisResults$FDR.BH<0.05]="Yes"
adonisResults <- adonisResults[order(adonisResults$pval),]
write.table(adonisResults,paste0(outFolder,"/adonis_Taxa_results_table_",phenoNR,".csv"),sep=",",row.names=F)
|
/Projects/DMP/microbiome_variance/DMP_do_univar_adonis_taxa.R
|
no_license
|
GRONINGEN-MICROBIOME-CENTRE/Groningen-Microbiome
|
R
| false | false | 5,458 |
r
|
# ============================================================
# By: Weersma Group, UMCG (2020)
#
# DMP Variance explained analysis,
#
# script does univariate ADONIS analysis on one phenotype for DMP data
# it is intended to be run once per phenotype in parallel
#
# Note: must be invoked with 2 CL params:
# (1) = number of phenotype (column in phenotype file)
# (2) = number of permutations
# ============================================================
# load libraries
library(vegan)
library(parallel)
# SETTINGS
# ==========================================
outFolder <- "output_adonis_taxa"
# ==========================================
# ==========================================
# >> MAIN <<
# ==========================================
# === COLLECT COMMAND LINE PARAMS ===
args = commandArgs(trailingOnly=TRUE)
# (1) = number of phenotype
# (2) = number of permutations
phenoNR <- as.numeric(args[1])
permNR <- as.numeric(args[2])
# ===================================
# set WD
setwd('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_statistics/codes/adonis_taxa_big_v26')
# load helper scripts
source('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_statistics/codes/myLibs_v3/R_Microbiome_scripts.R')
# load phenotypes
print(' >> LOADING PHENOTYPES')
inPhenos <- read.table('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_data_ready/phenotypes/DAG3_metadata_merged_ready_v26.csv',sep=',',header=T,quote='"')
rownames(inPhenos) <- inPhenos$DAG3_sampleID
# load data (microbiome)
print(' >> LOADING MICROBIOME')
inDF <- read.table('/groups/umcg-lifelines/tmp03/projects/dag3_fecal_mgs/DAG3_data_ready/microbiome/processed/DAG3_metaphlan_bacteria_archaea_nofiltering.txt',sep='\t',header=T)
# grab species only
inMB <- filterMetaGenomeDF(inDF,presPerc = -1,minMRelAb = -1,minMedRelAb = -1,rescaleTaxa = T,verbose = T,keepLevels = c("S"))
# cleanup of folders
# ==========================================
if (!dir.exists(outFolder)) {
dir.create(outFolder)
}
# load phenotypes
# ==========================================
# > remove IDs and other (irrelevant) phenotypes
adonisVarsTouse <- colnames(inPhenos)
toExclude = c("DAG3_sampleID","ID")
adonisVarsTouse <- adonisVarsTouse[!adonisVarsTouse %in% toExclude]
# fix names of columns and check for errors
# ================================================================
if ( ("ID" %in% colnames(inPhenos)) & !("DAG3_sampleID" %in% colnames(inPhenos)) ) {
colnames(inPhenos)[colnames(inPhenos)=="ID"] <- "DAG3_sampleID"
}
if ( ("ID" %in% colnames(inMB)) & !("DAG3_sampleID" %in% colnames(inMB)) ) {
colnames(inMB)[colnames(inMB)=="ID"] <- "DAG3_sampleID"
}
if (!("DAG3_sampleID" %in% colnames(inMB))) {
stop('ERROR: DAG3_sampleID not in columns of microbiome data')
}
if (!("DAG3_sampleID" %in% colnames(inPhenos))) {
stop('ERROR: DAG3_sampleID not in columns of phenotype data')
}
# =================================================================
# do adonis
# =================================================================
print (' >> STARTING ADONIS CALCULATIONS')
print (paste0(' >> phenotype NR ', phenoNR))
print (paste0(' >> NR of premut ', permNR))
print (' ============================================ ')
print(timestamp())
adonisResults <- NULL
for (i in adonisVarsTouse[phenoNR]) {
print (paste(' >>> ANALYSING VARIABLE <',i,'> <<<'))
print(timestamp())
print (' >> collecting complete cases')
inPhenosOneVarID <- inPhenos[,colnames(inPhenos) %in% c(i,"DAG3_sampleID")]
allDF <- merge(x=inPhenosOneVarID,by.x="DAG3_sampleID",y=inMB,by.y="DAG3_sampleID")
rownames(allDF) <- allDF$DAG3_sampleID
allDF$DAG3_sampleID <- NULL
allDF <- allDF[complete.cases(allDF),]
av <- allDF[[i]]
allDF[[i]] <- NULL
print (' >> calculating B/C distance')
inBC <- vegdist(allDF,method = "bray",parallel=4)
print(timestamp())
print (' >> doing adonis')
nrRows <- length(av)
if (length(av) < 3 | length(unique(av)) < 2) {
print(paste0(' >> WARNING: ',i,' has no useful data!!'))
} else {
#print(paste0(' NR NAs: ',sum(is.na(av))))
ad <- adonis(inBC ~ av,permutations=permNR)
aov_table <- ad$aov.tab
# accumulate results
oneRow <- data.frame(Var=i,
NR_nonNA=nrRows,
DF=aov_table[1,1],
SumsOfSqs=aov_table[1,2],
MeanSqs=aov_table[1,3],
FModel=aov_table[1,4],
R2=aov_table[1,5],
pval=aov_table[1,6],
FDR.BH=NA,
Significant=NA)
print(oneRow)
adonisResults <- rbind.data.frame(adonisResults,oneRow)
write.table(adonisResults,paste0(outFolder,"/adonis_results_table_",phenoNR,".csv"),sep=",",row.names=F)
print (paste0('--- ',i,' DONE! ---'))
}
}
print (' >> DONE WITH CALCULATIONS')
#print(timestamp())
print (' >> saving output')
rownames(adonisResults) = adonisResults$Var
adonisResults$FDR.BH=p.adjust(adonisResults$pval, method = "BH")
adonisResults$Significant="No"
adonisResults$Significant[adonisResults$FDR.BH<0.05]="Yes"
adonisResults <- adonisResults[order(adonisResults$pval),]
write.table(adonisResults,paste0(outFolder,"/adonis_Taxa_results_table_",phenoNR,".csv"),sep=",",row.names=F)
|
\name{EstimateProbability}
\alias{EstimateProbability}
\title{
Estimate oligomeric state score of coiled-coil sequences
}
\description{
Sub-function used in scorer2.R in order to compute the oligomeric state score
of input coiled-coil sequences.
}
\usage{
EstimateProbability(id, seq, reg, pssm, var, delta=1)
}
\arguments{
\item{id}{A string that represents the id name of the test sequence}
\item{seq}{ A character string of the amino-acid sequence to be predicted. Valid characters are all uppercase letters except \sQuote{B}, \sQuote{J}, \sQuote{O}, \sQuote{U}, \sQuote{X}, and \sQuote{Z}; }
\item{reg}{A character string of register assignements. Valid characters are the lowercase letters \sQuote{a} to \sQuote{g}. Register characters are not required to be in proper order and may start with any of the seven letters. It must always have the same length as the matching amino-acid sequence.}
\item{pssm}{
A profile scoring matrix generated from the SCORER 2.0 training data. You can either use the default one
or create your own PSSM using the pssm.R function
}
\item{var}{
A list of two elements containing all valid amino-acid and register characters.
}
\item{delta}{
The pseudocount parameter introduced in the PSSM used for the estimation of oligomeric state scores. This helps avoid cases with zero count. Empirical analysis has shown that a default delta score of 1 is optimal.
}
}
\value{
It is used to apply the SCORER 2.0 prediction
algorithm to a new coiled-coil sequence.
By default the final classification is computed on the basis of
the discriminant function value. If \eqn{f(x)>=0}, \eqn{x} is
predicted as a dimer, otherwise as a trimer.
}
\references{
Craig T. Armstrong, Thomas L. Vincent, Peter J. Green and Dek N. Woolfson.
(2011) SCORER 2.0: an algortihm for distinguishing parallel dimeric and trimeric
coiled-coil sequences. Bioinformatics.
DOI: 10.1093/bioinformatics/btr299
}
\author{
Thomas L. Vincent \email{tlfvincent@gmail.com}
}
\examples{
# load pssm data
data(pssm)
# define allowed amino and register characters
var <- list(
amino = c("A","C","D","E","F","G","H","I","K","L",
"M","N","P","Q","R","S","T","V","W","Y","X"),
register = letters[1:7])
# run SCORER 2.0 on GCN4 wild-type
GCN4wt.score <- EstimateProbability("GCN4wt",
"MKQLEDKVEELLSKNYHLENEVARLKKLV",
"abcdefgabcdefgabcdefgabcdefga",
pssm,
var,
delta=1)
}
|
/SCORER2.Rcheck/00_pkg_src/SCORER2/man/EstimateProbability.Rd
|
no_license
|
tlfvincent/scorer
|
R
| false | false | 2,431 |
rd
|
\name{EstimateProbability}
\alias{EstimateProbability}
\title{
Estimate oligomeric state score of coiled-coil sequences
}
\description{
Sub-function used in scorer2.R in order to compute the oligomeric state score
of input coiled-coil sequences.
}
\usage{
EstimateProbability(id, seq, reg, pssm, var, delta=1)
}
\arguments{
\item{id}{A string that represents the id name of the test sequence}
\item{seq}{ A character string of the amino-acid sequence to be predicted. Valid characters are all uppercase letters except \sQuote{B}, \sQuote{J}, \sQuote{O}, \sQuote{U}, \sQuote{X}, and \sQuote{Z}; }
\item{reg}{A character string of register assignements. Valid characters are the lowercase letters \sQuote{a} to \sQuote{g}. Register characters are not required to be in proper order and may start with any of the seven letters. It must always have the same length as the matching amino-acid sequence.}
\item{pssm}{
A profile scoring matrix generated from the SCORER 2.0 training data. You can either use the default one
or create your own PSSM using the pssm.R function
}
\item{var}{
A list of two elements containing all valid amino-acid and register characters.
}
\item{delta}{
The pseudocount parameter introduced in the PSSM used for the estimation of oligomeric state scores. This helps avoid cases with zero count. Empirical analysis has shown that a default delta score of 1 is optimal.
}
}
\value{
It is used to apply the SCORER 2.0 prediction
algorithm to a new coiled-coil sequence.
By default the final classification is computed on the basis of
the discriminant function value. If \eqn{f(x)>=0}, \eqn{x} is
predicted as a dimer, otherwise as a trimer.
}
\references{
Craig T. Armstrong, Thomas L. Vincent, Peter J. Green and Dek N. Woolfson.
(2011) SCORER 2.0: an algortihm for distinguishing parallel dimeric and trimeric
coiled-coil sequences. Bioinformatics.
DOI: 10.1093/bioinformatics/btr299
}
\author{
Thomas L. Vincent \email{tlfvincent@gmail.com}
}
\examples{
# load pssm data
data(pssm)
# define allowed amino and register characters
var <- list(
amino = c("A","C","D","E","F","G","H","I","K","L",
"M","N","P","Q","R","S","T","V","W","Y","X"),
register = letters[1:7])
# run SCORER 2.0 on GCN4 wild-type
GCN4wt.score <- EstimateProbability("GCN4wt",
"MKQLEDKVEELLSKNYHLENEVARLKKLV",
"abcdefgabcdefgabcdefgabcdefga",
pssm,
var,
delta=1)
}
|
summary.latent.regression <- function( object , ... ){
cat("\nRegression Parameters\n\n")
.prnum(object$summary.coef,4) # print results
cat( paste( "\nResidual Variance =" , round( object$sigma^2 , 4 ) ) , "\n" )
cat( paste( "Explained Variance =" , round( object$explvar , 4 ) ) , "\n" )
cat( paste( "Total Variance =" , round( object$totalvar , 4 ) ) , "\n" )
cat( paste( " R2 =" , round( object$rsquared , 4 ) ) , "\n" )
}
|
/R/summary.latent.regression.R
|
no_license
|
daniloap/sirt
|
R
| false | false | 530 |
r
|
summary.latent.regression <- function( object , ... ){
cat("\nRegression Parameters\n\n")
.prnum(object$summary.coef,4) # print results
cat( paste( "\nResidual Variance =" , round( object$sigma^2 , 4 ) ) , "\n" )
cat( paste( "Explained Variance =" , round( object$explvar , 4 ) ) , "\n" )
cat( paste( "Total Variance =" , round( object$totalvar , 4 ) ) , "\n" )
cat( paste( " R2 =" , round( object$rsquared , 4 ) ) , "\n" )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dSimer-package.R
\docType{data}
\name{HumanNet_sample}
\alias{HumanNet_sample}
\title{HumanNet_sample}
\value{
HumanNet_sample is a data.frame has 22708 rows and 3 columns. Each row
indicates a pair of genes and their normalized likelihood score in HumanNet.
HumanNet_sample will be used in method FunSim after being converted to list
by method LLSn2List.
The entire data of HumanNet can be downloaded from
the website http://www.functionalnet.org/humannet/ .
}
\description{
a sample of HumanNet likelihood score data which will be used in method FunSim.
}
\examples{
data(HumanNet_sample)
}
\references{
Cheng L, Li J, Ju P, et al. SemFunSim: a new method for measuring
disease similarity by integrating semantic and gene functional association[J].
PloS one, 2014, 9(6): e99415.
}
\seealso{
\code{\link{FunSim}}, \code{\link{LLSn2List}}
}
\keyword{dataset}
|
/man/HumanNet_sample.Rd
|
no_license
|
PengNi/dSimer
|
R
| false | true | 944 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dSimer-package.R
\docType{data}
\name{HumanNet_sample}
\alias{HumanNet_sample}
\title{HumanNet_sample}
\value{
HumanNet_sample is a data.frame has 22708 rows and 3 columns. Each row
indicates a pair of genes and their normalized likelihood score in HumanNet.
HumanNet_sample will be used in method FunSim after being converted to list
by method LLSn2List.
The entire data of HumanNet can be downloaded from
the website http://www.functionalnet.org/humannet/ .
}
\description{
a sample of HumanNet likelihood score data which will be used in method FunSim.
}
\examples{
data(HumanNet_sample)
}
\references{
Cheng L, Li J, Ju P, et al. SemFunSim: a new method for measuring
disease similarity by integrating semantic and gene functional association[J].
PloS one, 2014, 9(6): e99415.
}
\seealso{
\code{\link{FunSim}}, \code{\link{LLSn2List}}
}
\keyword{dataset}
|
make_expandedRhombus = function (parent_nodes, child_nodes, Probs = NULL, nodename = NULL, cardinality = NULL)
{
nodes = parent_nodes + child_nodes;
if (parent_nodes < 1) {
stop("Need More Parent Nodes!");
} else if (child_nodes < 1) {
stop("Need More Child Nodes!");
}
arcs_mat = matrix(0, nodes, nodes)
for (i in 1:parent_nodes)
{
for (j in (1+parent_nodes):nodes)
{
arcs_mat[i,j] = 1
}
}
# Check Input Probs & Cardinalities
checker = check_cardinality(arcs_mat = arcs_mat, nodename = nodename, cardinality = cardinality);
cardinality = checker$cardinality;
num_of_probs = checker$num_of_probs;
nodename = checker$nodename;
#####
if (is.null(Probs) & is.null(cardinality))
{
Probs = list()
for (i in 1:parent_nodes)
{
Probs[[i]] = runif(1);
}
for (i in (1+parent_nodes):nodes)
{
Probs[[i]] = runif(2^2)
}
} else if (is.null(Probs)) {
Probs = list()
for (i in 1:length(num_of_probs))
{
Probs[[i]] = runif(num_of_probs[i])
}
}
#####
result = list(arcs_mat = arcs_mat, Probs = Probs, nodename = nodename, cardinality = cardinality, num_of_nodes = nodes);
return(result)
}
|
/make_expandedRhombus.R
|
no_license
|
JaeseongYoo/PatternRecognition
|
R
| false | false | 1,154 |
r
|
make_expandedRhombus = function (parent_nodes, child_nodes, Probs = NULL, nodename = NULL, cardinality = NULL)
{
nodes = parent_nodes + child_nodes;
if (parent_nodes < 1) {
stop("Need More Parent Nodes!");
} else if (child_nodes < 1) {
stop("Need More Child Nodes!");
}
arcs_mat = matrix(0, nodes, nodes)
for (i in 1:parent_nodes)
{
for (j in (1+parent_nodes):nodes)
{
arcs_mat[i,j] = 1
}
}
# Check Input Probs & Cardinalities
checker = check_cardinality(arcs_mat = arcs_mat, nodename = nodename, cardinality = cardinality);
cardinality = checker$cardinality;
num_of_probs = checker$num_of_probs;
nodename = checker$nodename;
#####
if (is.null(Probs) & is.null(cardinality))
{
Probs = list()
for (i in 1:parent_nodes)
{
Probs[[i]] = runif(1);
}
for (i in (1+parent_nodes):nodes)
{
Probs[[i]] = runif(2^2)
}
} else if (is.null(Probs)) {
Probs = list()
for (i in 1:length(num_of_probs))
{
Probs[[i]] = runif(num_of_probs[i])
}
}
#####
result = list(arcs_mat = arcs_mat, Probs = Probs, nodename = nodename, cardinality = cardinality, num_of_nodes = nodes);
return(result)
}
|
\name{distx}
\alias{distx}
\alias{distx.matrix}
\alias{distx.biom}
\title{
Calculate distances with optional grouping and other features
}
\description{
Calculate several distances and dissimilarities with optional grouping,
by default columnwise and pairwise,
or from an optionally specified common point.
}
\usage{
distx(x, ...)
\method{distx}{matrix}(x, method=c("euclidean", "bray-curtis", "jaccard", "mahalanobis",
"sorensen", "difference", "maximum", "manhattan", "canberra", "binary", "minkowski"),
groups=NULL, p=NULL, ..., bycol=TRUE)
\method{distx}{biom}(x, method="euclidean", groups=NULL, ..., bycol=TRUE)
}
\arguments{
\item{x}{{a matrix-like object (\code{matrix} or \code{biom})}}
\item{method}{name of distance or dissimilarity measure (\code{character})}
\item{groups}{a grouping of columns/rows (\code{character} or \code{factor})}
\item{p}{a single column/row (\code{numeric})}
\item{\dots}{unused}
\item{bycol}{compute columnwise rather than rowwise? (\code{logical})}
}
\details{
Complete technical documentation is forthcoming.
For the current preliminary release, please refer
to the examples provided.
}
\value{
Complete technical documentation is forthcoming.
For the current preliminary release, please refer
to the examples provided.
}
\author{Daniel T. Braithwaite and Kevin P. Keegan}
\seealso{
\code{stats::\link{dist}},
\code{ecodist::\link[ecodist]{distance}}
}
\examples{
#### Euclidean distance between samples based on raw counts
distx (xx1)
#### alternate dissimilarity measure
distx (xx1, method="bray-curtis")
#### distance in log-transformed data
distx (transform (xx2, t_Log))
#### mean pairwise distance between biomes
distx (xx3, groups="$$biome", method="bray-curtis")
}
|
/man/distx.Rd
|
no_license
|
braithwaite/matR
|
R
| false | false | 1,754 |
rd
|
\name{distx}
\alias{distx}
\alias{distx.matrix}
\alias{distx.biom}
\title{
Calculate distances with optional grouping and other features
}
\description{
Calculate several distances and dissimilarities with optional grouping,
by default columnwise and pairwise,
or from an optionally specified common point.
}
\usage{
distx(x, ...)
\method{distx}{matrix}(x, method=c("euclidean", "bray-curtis", "jaccard", "mahalanobis",
"sorensen", "difference", "maximum", "manhattan", "canberra", "binary", "minkowski"),
groups=NULL, p=NULL, ..., bycol=TRUE)
\method{distx}{biom}(x, method="euclidean", groups=NULL, ..., bycol=TRUE)
}
\arguments{
\item{x}{{a matrix-like object (\code{matrix} or \code{biom})}}
\item{method}{name of distance or dissimilarity measure (\code{character})}
\item{groups}{a grouping of columns/rows (\code{character} or \code{factor})}
\item{p}{a single column/row (\code{numeric})}
\item{\dots}{unused}
\item{bycol}{compute columnwise rather than rowwise? (\code{logical})}
}
\details{
Complete technical documentation is forthcoming.
For the current preliminary release, please refer
to the examples provided.
}
\value{
Complete technical documentation is forthcoming.
For the current preliminary release, please refer
to the examples provided.
}
\author{Daniel T. Braithwaite and Kevin P. Keegan}
\seealso{
\code{stats::\link{dist}},
\code{ecodist::\link[ecodist]{distance}}
}
\examples{
#### Euclidean distance between samples based on raw counts
distx (xx1)
#### alternate dissimilarity measure
distx (xx1, method="bray-curtis")
#### distance in log-transformed data
distx (transform (xx2, t_Log))
#### mean pairwise distance between biomes
distx (xx3, groups="$$biome", method="bray-curtis")
}
|
#' Sample QC
#'
#' Generate QC metrics for sample. Can use mutation calls in MAF file.
#'
#' @param facets_output Output from \code{run_facets}.
#' @param genome Genome build.
#' @param algorithm Choose assessing the fit from the \code{em} or \code{cncf} algorithm.
#' @param maf Optional: mutation calls for assessed samples, should only include mutations for given sample.
#'
#' @return A list object with the following items:
#' \itemize{
#' \item{\code{dipLogR_flag}:} {Boolean indicating extreme dipLogR value.}
#' \item{\code{n_alternative_dipLogR}:} {Number of alternative dipLogR values.}
#' \item{\code{n_dip_bal_segs}, \code{frac_dip_bal_segs}:} {Number of balanced segments at dipLogR and the fraction of genome they represent.}
#' \item{\code{n_dip_imbal_segs}, \code{frac_dip_imbal_segs}:} {Number of imbalanced segments at dipLogR and the fraction of genome they represent.}
#' \item{\code{n_amp}:} {Number of segments at total copy number >= 10.}
#' \item{\code{n_homdels}:} {Number of homozygously deleted segments (total copy number = 0).}
#' \item{\code{n_homdels_clonal}, \code{frac_homdels_clonal}:} {Number of clonal homdel segments and the fraction of the genome they represent.}
#' \item{\code{n_cn_states}:} {Number of unique copy-number states (i.e. combinations of major and minor copy number).}
#' \item{\code{n_segs}:} {Number of segments.}
#' \item{\code{n_cnlr_clusters}:} {Number of copy-number log-ratio clusters}
#' \item{\code{n_lcn_na}:} {Number of segments where no minor copy number was inferred (lcn is NA).}
#' \item{\code{n_loh}, \code{n_loh}:} {Number of segments where the minor copy number is 0 and the fraction of the genome they represent.}
#' \item{\code{n_snps}:} {Number of SNPs used for segmentation.}
#' \item{\code{n_het_snps}, \code{frac_het_snps}:} {Number of heterozyous SNPs used for segmentation and their fraction of the total.}
#' \item{\code{n_het_snps_hom_in_tumor_1pct}, \code{frac_het_snps_hom_in_tumor_1pct}:} {Number of heterozyous SNPs where the tumor allele frequency is <0.01/>0.99 their fraction of the total.}
#' \item{\code{n_het_snps_hom_in_tumor_5pct}, \code{frac_het_snps_hom_in_tumor_5pct}:} {Number of heterozyous SNPs where the tumor allele frequency is <0.05/>0.95 their fraction of the total.}
#' \item{\code{mean_cnlr_residual}, \code{sd_cnlr_residual}:} {Mean and standard deviation of SNPs' log-ratio from their segments copy-number log-ratio.}
#' \item{\code{n_segs_discordant_tcn}, \code{frac_segs_discordant_tcn}:} {Number of segments where the naïve and EM algorithm estimates of the total copy number are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_discordant_lcn}, \code{frac_segs_discordant_lcn}:} {Number of segments where the naïve and EM algorithm estimates of the minor copy number are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_discordant_both}, \code{frac_segs_discordant_both}:} {Number of segments where the naïve and EM algorithm estimates of the both copy numbers are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_icn_cnlor_discordant}, \code{frac_icn_cnlor_discordant}:} {Number of clonal segments where the log-ratio shows balance but the copy-number solution does not, and the reverse, and the fraction of the genome they represent.}
#' \item{\code{dip_median_vaf}:} {If MAF input: median tumor VAF of somatic mutations on clonal segments with total copy number 2 and allelic balance.}
#' \item{\code{n_homdel_muts}:} {If MAF input: number of somatic mutations in homozygously deleted segments.}
#' \item{\code{median_vaf_homdel_muts}:} {If MAF input: Median tumor VAF of somatic mutations homozygously deleted segments.}
#' }
#'
#' @importFrom dplyr distinct
#' @importFrom purrr map_if
#' @import data.table
#' @export
check_fit = function(facets_output,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf'),
maf = NULL) {
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = F)
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = F))
# Set variables
segs = as.data.table(facets_output$segs)
snps = facets_output$snps
dipLogR = facets_output$dipLogR
alballogr = as.numeric(facets_output$alballogr[, 1])
purity = facets_output$purity
fcna_output = calculate_fraction_cna(segs, facets_output$ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
fga = fcna_output$fraction_cna
segs[, `:=` (tcn.original = tcn, lcn.original = lcn, cf.original = cf,
tcn.em.original = tcn.em, lcn.em.original = lcn.em, cf.em.original = cf.em)] # these are retained since the parse_segs function consolidates cncf/em solutions
segs = parse_segs(segs, algorithm)
segs = as.data.table(segs)
setkey(segs, chrom, start, end)
snps = as.data.table(snps)
setkey(snps, chrom, maploc)
# Label clonal segments
segs[, clonal := cf >= (purity * 0.8)]
# Subset on autosomes
auto_segs = segs[chrom < 23]
# Check for extrema dipLogR values
dipLogR_flag = abs(facets_output$dipLogR) > 1
# Check for alternative dipLogR values
n_alt_dipLogR = length(setdiff(alballogr, dipLogR))
# Check for balance/imbalance of copy-number at segments at dipLogR
# cnlr.median.clust == dipLogR for purity runs, for others, find the closest one
cnlr_clusts = unique(segs$cnlr.median.clust)
cnlr_clust_value = cnlr_clusts[which.min(abs(cnlr_clusts - dipLogR))]
dip_bal_segs = auto_segs[cnlr.median.clust == cnlr_clust_value & mcn == lcn, ]
n_dip_bal_segs = nrow(dip_bal_segs)
frac_dip_bal_segs = sum(dip_bal_segs$length)/sum(auto_segs$length)
dip_imbal_segs = auto_segs[cnlr.median.clust == cnlr_clust_value & mcn != lcn & !is.na(lcn), ]
n_dip_imbal_segs = nrow(dip_imbal_segs)
frac_dip_imbal_segs = sum(dip_imbal_segs$length)/sum(auto_segs$length)
#############################
### Other metrics
#############################
## fraction genome unaltered (2-1) -- too high --> low purity?
segs_unaltered = auto_segs[tcn == 2 & lcn == 1, ]
n_segs_unaltered = nrow(segs_unaltered)
frac_genome_unaltered = sum(segs_unaltered$length)/sum(auto_segs$length)
segs_below_dipLogR = auto_segs[cnlr.median.clust < dipLogR] # part of ploidy filter
n_segs_below_dipLogR = nrow(segs_below_dipLogR) # part of ploidy filter
frac_below_dipLogR = sum(segs_below_dipLogR$length)/sum(auto_segs$length)
segs_balanced_odd_tcn = auto_segs[mafR <= 0.025 & (tcn %% 2) != 0 & clonal ==T & tcn < 8,]
n_segs_balanced_odd_tcn = nrow(segs_balanced_odd_tcn)
frac_balanced_odd_tcn = sum(segs_balanced_odd_tcn$length)/sum(auto_segs$length)
segs_imbalanced_diploid_cn = auto_segs[clonal==T & mafR > 0.1 & !is.na(lcn) & lcn != 0 & (as.double(tcn) / lcn) == 2, ]
n_segs_imbalanced_diploid_cn = nrow(segs_imbalanced_diploid_cn)
frac_imbalanced_diploid_cn = sum(segs_imbalanced_diploid_cn$length)/sum(auto_segs$length)
segs_lcn_greater_mcn = auto_segs[clonal==T & lcn < mcn,]
n_segs_lcn_greater_mcn = nrow(segs_lcn_greater_mcn)
frac_lcn_greater_mcn = sum(segs_lcn_greater_mcn$length)/sum(auto_segs$length)
mafr_median_all = median(auto_segs$mafR, na.rm = T)
mafr_median_clonal = median(auto_segs[clonal==T, ]$mafR, na.rm = T)
mafr_n_gt_1 = nrow(auto_segs[mafR > 1, ])
# Number of high-level amplifications and homozygous deletions
# Clonal homdels, how much of the genome do they represent
n_amps = nrow(segs[tcn >= 10])
homdels = auto_segs[tcn == 0]
n_homdels = nrow(homdels)
clonal_homdels = auto_segs[tcn == 0 & clonal == TRUE]
n_homdels_clonal = nrow(clonal_homdels)
frac_homdels = sum(homdels$length)/sum(auto_segs$length)
frac_homdels_clonal = sum(clonal_homdels$length)/sum(auto_segs$length)
# Count number of unique copy-number states and total number of segments
n_cn_states = nrow(distinct(segs, tcn, lcn))
n_segs = nrow(segs)
n_cnlr_clusters = length(unique(segs$cnlr.median.clust))
# Number of segments with lcn==NA
n_lcn_na = nrow(auto_segs[is.na(lcn)])
frac_lcn_na = sum(auto_segs[is.na(lcn)]$length)/sum(auto_segs$length)
# Number of segments with LOH
loh_segs = auto_segs[lcn == 0,]
n_loh = nrow(loh_segs)
frac_loh = sum(loh_segs$length)/sum(auto_segs$length)
# Check fraction of subclonal events within autosomal chromosomes
subclonal_segs = auto_segs[clonal == F, ]
n_segs_subclonal = nrow(subclonal_segs)
frac_segs_subclonal = sum(subclonal_segs$length)/sum(auto_segs$length)
# Compile SNP statistics
n_snps = nrow(snps)
het_snps = snps[het == 1]
n_het_snps = nrow(het_snps)
frac_het_snps = n_het_snps/n_snps
n_snps_with_300x_in_tumor = nrow(snps[rCountT > 300, ])
n_het_snps_with_300x_in_tumor = nrow(het_snps[rCountT > 300, ])
n_het_snps_hom_in_tumor_1pct = nrow(het_snps[ (vafT < 0.01 | vafT > 0.99), ])
n_het_snps_hom_in_tumor_5pct = nrow(het_snps[ (rCountN > 35 & rCountT > 35) & (vafT < 0.05 | vafT > 0.95), ])
frac_het_snps_hom_in_tumor_1pct = n_het_snps_hom_in_tumor_1pct/n_het_snps
frac_het_snps_hom_in_tumor_5pct = n_het_snps_hom_in_tumor_5pct/n_het_snps
# Check the mean/standard deviation of the cnlr
snps[, cnlr_residual := cnlr - median(cnlr), by = seg]
mean_cnlr_residual = mean(snps$cnlr_residual)
sd_cnlr_residual = sd(snps$cnlr_residual)
# Check concordance between CNCF and EM fits
discordant_segs = auto_segs[, `:=` (
discordant_tcn = (tcn.em.original != tcn.original | (is.na(tcn.em.original) | is.na(tcn.original))) & !(is.na(tcn.em.original) & is.na(tcn.original)),
discordant_lcn = (lcn.em.original != lcn.original | (is.na(lcn.em.original) | is.na(lcn.original))) & !(is.na(lcn.em.original) & is.na(lcn.original))
)][(discordant_tcn == TRUE | discordant_lcn == TRUE) & tcn < 10]
discordant_stats = discordant_segs[, list(
n_discordant_tcn = sum(discordant_tcn),
length_discordant_tcn = sum(length[discordant_tcn ]),
n_discordant_lcn = sum(length[discordant_lcn]),
length_discordant_lcn = sum(length[discordant_lcn]),
n_discordant_both = sum(discordant_tcn & discordant_lcn),
length_discordant_both = sum(length[discordant_tcn & discordant_lcn])
)]
evaluable_length = segs[chrom <= 22 & tcn < 10][, list(
tcn = sum(length[!(is.na(tcn.em.original) & is.na(tcn.original))]),
lcn = sum(length[!(is.na(lcn.em.original) & is.na(lcn.original))]),
both = sum(length[!(is.na(tcn.em.original) & is.na(tcn.original)) & !(is.na(lcn.em.original) & is.na(lcn.original))])
)]
# Check concordance between log odds-ratio and integer copy-number with regards to balance
# Count segments where logOR shows balanced but tcn/lcn is imbalanced and vice-versa.
clonal_segs = auto_segs[clonal == TRUE & !is.na(lcn)]
clonal_segs_disc_icn = clonal_segs[, `:=` (
icn_bal_mafr_high = lcn == mcn & mafR > 0.05,
icn_imbal_mafr_low = lcn != mcn & mafR < 0.05
)][icn_bal_mafr_high == TRUE | icn_imbal_mafr_low == TRUE]
n_icn_cnlor_discordant = nrow(clonal_segs_disc_icn)
frac_icn_cnlor_discordant = sum(clonal_segs_disc_icn$length)/sum(auto_segs$length)
# Output all values
# n: denotes number
# frac: denotes fraction of assesses genome
output = list(
dipLogR_flag = dipLogR_flag,
n_alternative_dipLogR = n_alt_dipLogR,
wgd = wgd,
fga = fga,
n_dip_bal_segs = n_dip_bal_segs,
frac_dip_bal_segs = frac_dip_bal_segs,
n_dip_imbal_segs = n_dip_imbal_segs,
frac_dip_imbal_segs = frac_dip_imbal_segs,
n_amps = n_amps,
n_homdels = n_homdels,
frac_homdels = frac_homdels,
n_homdels_clonal = n_homdels_clonal,
frac_homdels_clonal = frac_homdels_clonal,
n_cn_states = n_cn_states,
n_segs = n_segs,
n_cnlr_clusters = n_cnlr_clusters,
n_lcn_na = n_lcn_na,
n_loh = n_loh,
frac_loh = frac_loh,
n_segs_subclonal = n_segs_subclonal,
frac_segs_subclonal = frac_segs_subclonal,
n_segs_below_dipLogR = n_segs_below_dipLogR,
frac_below_dipLogR = frac_below_dipLogR,
n_segs_balanced_odd_tcn = n_segs_balanced_odd_tcn,
frac_balanced_odd_tcn = frac_balanced_odd_tcn,
n_segs_imbalanced_diploid_cn = n_segs_imbalanced_diploid_cn,
frac_imbalanced_diploid_cn = frac_imbalanced_diploid_cn,
n_segs_lcn_greater_mcn = n_segs_lcn_greater_mcn,
frac_lcn_greater_mcn = frac_lcn_greater_mcn,
n_snps = n_snps,
n_het_snps = n_het_snps,
frac_het_snps = frac_het_snps,
n_snps_with_300x_in_tumor = n_snps_with_300x_in_tumor,
n_het_snps_with_300x_in_tumor = n_het_snps_with_300x_in_tumor,
n_het_snps_hom_in_tumor_1pct = n_het_snps_hom_in_tumor_1pct,
n_het_snps_hom_in_tumor_5pct = n_het_snps_hom_in_tumor_5pct,
frac_het_snps_hom_in_tumor_1pct = frac_het_snps_hom_in_tumor_1pct,
frac_het_snps_hom_in_tumor_5pct = frac_het_snps_hom_in_tumor_5pct,
mean_cnlr_residual = mean_cnlr_residual,
sd_cnlr_residual = sd_cnlr_residual,
n_segs_discordant_tcn = discordant_stats$n_discordant_tcn,
frac_discordant_tcn = discordant_stats$length_discordant_tcn/evaluable_length$tcn,
n_segs_discordant_lcn = discordant_stats$n_discordant_lcn,
frac_discordant_lcn = discordant_stats$length_discordant_lcn/evaluable_length$lcn,
n_segs_discordant_both = discordant_stats$n_discordant_both,
frac_discordant_both = discordant_stats$length_discordant_both/evaluable_length$both,
n_segs_icn_cnlor_discordant = n_icn_cnlor_discordant,
frac_icn_cnlor_discordant = frac_icn_cnlor_discordant,
mafr_median_all = mafr_median_all,
mafr_median_clonal = mafr_median_clonal,
mafr_n_gt_1 = mafr_n_gt_1
)
# If input MAF is provided add some stats based on this
if (!is.null(maf)) {
maf = as.data.table(maf)
maf[, `:=` (
Chromosome = ifelse(Chromosome == 'X', 23, Chromosome),
t_var_freq = t_alt_count/(t_alt_count+t_ref_count)
)][, Chromosome := as.integer(Chromosome)]
setkey(maf, Chromosome, Start_Position, End_Position)
maf = foverlaps(maf, segs, mult = 'first', nomatch = NA,
by.x = c('Chromosome', 'Start_Position', 'End_Position'),
by.y = c('chrom', 'start', 'end'))
# Median mutation VAF at clonal 2:1 segments
output$dip_median_vaf = maf[clonal == TRUE & mcn == 1 & lcn == 1][, median(t_var_freq)]
# Mutations at homdels
homdel_muts = maf[tcn == 0]
output$n_homdel_muts = nrow(homdel_muts)
output$median_vaf_homdel_muts = median(homdel_muts$t_var_freq)
}
# Return rounded values for fractions
map_if(output, is.double, .f = function(x) signif(x, 2))
}
|
/R/check-fit.R
|
permissive
|
mskcc/facets-suite
|
R
| false | false | 15,475 |
r
|
#' Sample QC
#'
#' Generate QC metrics for sample. Can use mutation calls in MAF file.
#'
#' @param facets_output Output from \code{run_facets}.
#' @param genome Genome build.
#' @param algorithm Choose assessing the fit from the \code{em} or \code{cncf} algorithm.
#' @param maf Optional: mutation calls for assessed samples, should only include mutations for given sample.
#'
#' @return A list object with the following items:
#' \itemize{
#' \item{\code{dipLogR_flag}:} {Boolean indicating extreme dipLogR value.}
#' \item{\code{n_alternative_dipLogR}:} {Number of alternative dipLogR values.}
#' \item{\code{n_dip_bal_segs}, \code{frac_dip_bal_segs}:} {Number of balanced segments at dipLogR and the fraction of genome they represent.}
#' \item{\code{n_dip_imbal_segs}, \code{frac_dip_imbal_segs}:} {Number of imbalanced segments at dipLogR and the fraction of genome they represent.}
#' \item{\code{n_amp}:} {Number of segments at total copy number >= 10.}
#' \item{\code{n_homdels}:} {Number of homozygously deleted segments (total copy number = 0).}
#' \item{\code{n_homdels_clonal}, \code{frac_homdels_clonal}:} {Number of clonal homdel segments and the fraction of the genome they represent.}
#' \item{\code{n_cn_states}:} {Number of unique copy-number states (i.e. combinations of major and minor copy number).}
#' \item{\code{n_segs}:} {Number of segments.}
#' \item{\code{n_cnlr_clusters}:} {Number of copy-number log-ratio clusters}
#' \item{\code{n_lcn_na}:} {Number of segments where no minor copy number was inferred (lcn is NA).}
#' \item{\code{n_loh}, \code{n_loh}:} {Number of segments where the minor copy number is 0 and the fraction of the genome they represent.}
#' \item{\code{n_snps}:} {Number of SNPs used for segmentation.}
#' \item{\code{n_het_snps}, \code{frac_het_snps}:} {Number of heterozyous SNPs used for segmentation and their fraction of the total.}
#' \item{\code{n_het_snps_hom_in_tumor_1pct}, \code{frac_het_snps_hom_in_tumor_1pct}:} {Number of heterozyous SNPs where the tumor allele frequency is <0.01/>0.99 their fraction of the total.}
#' \item{\code{n_het_snps_hom_in_tumor_5pct}, \code{frac_het_snps_hom_in_tumor_5pct}:} {Number of heterozyous SNPs where the tumor allele frequency is <0.05/>0.95 their fraction of the total.}
#' \item{\code{mean_cnlr_residual}, \code{sd_cnlr_residual}:} {Mean and standard deviation of SNPs' log-ratio from their segments copy-number log-ratio.}
#' \item{\code{n_segs_discordant_tcn}, \code{frac_segs_discordant_tcn}:} {Number of segments where the naïve and EM algorithm estimates of the total copy number are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_discordant_lcn}, \code{frac_segs_discordant_lcn}:} {Number of segments where the naïve and EM algorithm estimates of the minor copy number are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_discordant_both}, \code{frac_segs_discordant_both}:} {Number of segments where the naïve and EM algorithm estimates of the both copy numbers are discordant and the fraction of the genome they represent.}
#' \item{\code{n_segs_icn_cnlor_discordant}, \code{frac_icn_cnlor_discordant}:} {Number of clonal segments where the log-ratio shows balance but the copy-number solution does not, and the reverse, and the fraction of the genome they represent.}
#' \item{\code{dip_median_vaf}:} {If MAF input: median tumor VAF of somatic mutations on clonal segments with total copy number 2 and allelic balance.}
#' \item{\code{n_homdel_muts}:} {If MAF input: number of somatic mutations in homozygously deleted segments.}
#' \item{\code{median_vaf_homdel_muts}:} {If MAF input: Median tumor VAF of somatic mutations homozygously deleted segments.}
#' }
#'
#' @importFrom dplyr distinct
#' @importFrom purrr map_if
#' @import data.table
#' @export
check_fit = function(facets_output,
genome = c('hg19', 'hg18', 'hg38'),
algorithm = c('em', 'cncf'),
maf = NULL) {
algorithm = match.arg(algorithm, c('em', 'cncf'), several.ok = F)
genome_choice = get(match.arg(genome, c('hg19', 'hg18', 'hg38'), several.ok = F))
# Set variables
segs = as.data.table(facets_output$segs)
snps = facets_output$snps
dipLogR = facets_output$dipLogR
alballogr = as.numeric(facets_output$alballogr[, 1])
purity = facets_output$purity
fcna_output = calculate_fraction_cna(segs, facets_output$ploidy, genome, algorithm)
wgd = fcna_output$genome_doubled
fga = fcna_output$fraction_cna
segs[, `:=` (tcn.original = tcn, lcn.original = lcn, cf.original = cf,
tcn.em.original = tcn.em, lcn.em.original = lcn.em, cf.em.original = cf.em)] # these are retained since the parse_segs function consolidates cncf/em solutions
segs = parse_segs(segs, algorithm)
segs = as.data.table(segs)
setkey(segs, chrom, start, end)
snps = as.data.table(snps)
setkey(snps, chrom, maploc)
# Label clonal segments
segs[, clonal := cf >= (purity * 0.8)]
# Subset on autosomes
auto_segs = segs[chrom < 23]
# Check for extrema dipLogR values
dipLogR_flag = abs(facets_output$dipLogR) > 1
# Check for alternative dipLogR values
n_alt_dipLogR = length(setdiff(alballogr, dipLogR))
# Check for balance/imbalance of copy-number at segments at dipLogR
# cnlr.median.clust == dipLogR for purity runs, for others, find the closest one
cnlr_clusts = unique(segs$cnlr.median.clust)
cnlr_clust_value = cnlr_clusts[which.min(abs(cnlr_clusts - dipLogR))]
dip_bal_segs = auto_segs[cnlr.median.clust == cnlr_clust_value & mcn == lcn, ]
n_dip_bal_segs = nrow(dip_bal_segs)
frac_dip_bal_segs = sum(dip_bal_segs$length)/sum(auto_segs$length)
dip_imbal_segs = auto_segs[cnlr.median.clust == cnlr_clust_value & mcn != lcn & !is.na(lcn), ]
n_dip_imbal_segs = nrow(dip_imbal_segs)
frac_dip_imbal_segs = sum(dip_imbal_segs$length)/sum(auto_segs$length)
#############################
### Other metrics
#############################
## fraction genome unaltered (2-1) -- too high --> low purity?
segs_unaltered = auto_segs[tcn == 2 & lcn == 1, ]
n_segs_unaltered = nrow(segs_unaltered)
frac_genome_unaltered = sum(segs_unaltered$length)/sum(auto_segs$length)
segs_below_dipLogR = auto_segs[cnlr.median.clust < dipLogR] # part of ploidy filter
n_segs_below_dipLogR = nrow(segs_below_dipLogR) # part of ploidy filter
frac_below_dipLogR = sum(segs_below_dipLogR$length)/sum(auto_segs$length)
segs_balanced_odd_tcn = auto_segs[mafR <= 0.025 & (tcn %% 2) != 0 & clonal ==T & tcn < 8,]
n_segs_balanced_odd_tcn = nrow(segs_balanced_odd_tcn)
frac_balanced_odd_tcn = sum(segs_balanced_odd_tcn$length)/sum(auto_segs$length)
segs_imbalanced_diploid_cn = auto_segs[clonal==T & mafR > 0.1 & !is.na(lcn) & lcn != 0 & (as.double(tcn) / lcn) == 2, ]
n_segs_imbalanced_diploid_cn = nrow(segs_imbalanced_diploid_cn)
frac_imbalanced_diploid_cn = sum(segs_imbalanced_diploid_cn$length)/sum(auto_segs$length)
segs_lcn_greater_mcn = auto_segs[clonal==T & lcn < mcn,]
n_segs_lcn_greater_mcn = nrow(segs_lcn_greater_mcn)
frac_lcn_greater_mcn = sum(segs_lcn_greater_mcn$length)/sum(auto_segs$length)
mafr_median_all = median(auto_segs$mafR, na.rm = T)
mafr_median_clonal = median(auto_segs[clonal==T, ]$mafR, na.rm = T)
mafr_n_gt_1 = nrow(auto_segs[mafR > 1, ])
# Number of high-level amplifications and homozygous deletions
# Clonal homdels, how much of the genome do they represent
n_amps = nrow(segs[tcn >= 10])
homdels = auto_segs[tcn == 0]
n_homdels = nrow(homdels)
clonal_homdels = auto_segs[tcn == 0 & clonal == TRUE]
n_homdels_clonal = nrow(clonal_homdels)
frac_homdels = sum(homdels$length)/sum(auto_segs$length)
frac_homdels_clonal = sum(clonal_homdels$length)/sum(auto_segs$length)
# Count number of unique copy-number states and total number of segments
n_cn_states = nrow(distinct(segs, tcn, lcn))
n_segs = nrow(segs)
n_cnlr_clusters = length(unique(segs$cnlr.median.clust))
# Number of segments with lcn==NA
n_lcn_na = nrow(auto_segs[is.na(lcn)])
frac_lcn_na = sum(auto_segs[is.na(lcn)]$length)/sum(auto_segs$length)
# Number of segments with LOH
loh_segs = auto_segs[lcn == 0,]
n_loh = nrow(loh_segs)
frac_loh = sum(loh_segs$length)/sum(auto_segs$length)
# Check fraction of subclonal events within autosomal chromosomes
subclonal_segs = auto_segs[clonal == F, ]
n_segs_subclonal = nrow(subclonal_segs)
frac_segs_subclonal = sum(subclonal_segs$length)/sum(auto_segs$length)
# Compile SNP statistics
n_snps = nrow(snps)
het_snps = snps[het == 1]
n_het_snps = nrow(het_snps)
frac_het_snps = n_het_snps/n_snps
n_snps_with_300x_in_tumor = nrow(snps[rCountT > 300, ])
n_het_snps_with_300x_in_tumor = nrow(het_snps[rCountT > 300, ])
n_het_snps_hom_in_tumor_1pct = nrow(het_snps[ (vafT < 0.01 | vafT > 0.99), ])
n_het_snps_hom_in_tumor_5pct = nrow(het_snps[ (rCountN > 35 & rCountT > 35) & (vafT < 0.05 | vafT > 0.95), ])
frac_het_snps_hom_in_tumor_1pct = n_het_snps_hom_in_tumor_1pct/n_het_snps
frac_het_snps_hom_in_tumor_5pct = n_het_snps_hom_in_tumor_5pct/n_het_snps
# Check the mean/standard deviation of the cnlr
snps[, cnlr_residual := cnlr - median(cnlr), by = seg]
mean_cnlr_residual = mean(snps$cnlr_residual)
sd_cnlr_residual = sd(snps$cnlr_residual)
# Check concordance between CNCF and EM fits
discordant_segs = auto_segs[, `:=` (
discordant_tcn = (tcn.em.original != tcn.original | (is.na(tcn.em.original) | is.na(tcn.original))) & !(is.na(tcn.em.original) & is.na(tcn.original)),
discordant_lcn = (lcn.em.original != lcn.original | (is.na(lcn.em.original) | is.na(lcn.original))) & !(is.na(lcn.em.original) & is.na(lcn.original))
)][(discordant_tcn == TRUE | discordant_lcn == TRUE) & tcn < 10]
discordant_stats = discordant_segs[, list(
n_discordant_tcn = sum(discordant_tcn),
length_discordant_tcn = sum(length[discordant_tcn ]),
n_discordant_lcn = sum(length[discordant_lcn]),
length_discordant_lcn = sum(length[discordant_lcn]),
n_discordant_both = sum(discordant_tcn & discordant_lcn),
length_discordant_both = sum(length[discordant_tcn & discordant_lcn])
)]
evaluable_length = segs[chrom <= 22 & tcn < 10][, list(
tcn = sum(length[!(is.na(tcn.em.original) & is.na(tcn.original))]),
lcn = sum(length[!(is.na(lcn.em.original) & is.na(lcn.original))]),
both = sum(length[!(is.na(tcn.em.original) & is.na(tcn.original)) & !(is.na(lcn.em.original) & is.na(lcn.original))])
)]
# Check concordance between log odds-ratio and integer copy-number with regards to balance
# Count segments where logOR shows balanced but tcn/lcn is imbalanced and vice-versa.
clonal_segs = auto_segs[clonal == TRUE & !is.na(lcn)]
clonal_segs_disc_icn = clonal_segs[, `:=` (
icn_bal_mafr_high = lcn == mcn & mafR > 0.05,
icn_imbal_mafr_low = lcn != mcn & mafR < 0.05
)][icn_bal_mafr_high == TRUE | icn_imbal_mafr_low == TRUE]
n_icn_cnlor_discordant = nrow(clonal_segs_disc_icn)
frac_icn_cnlor_discordant = sum(clonal_segs_disc_icn$length)/sum(auto_segs$length)
# Output all values
# n: denotes number
# frac: denotes fraction of assesses genome
output = list(
dipLogR_flag = dipLogR_flag,
n_alternative_dipLogR = n_alt_dipLogR,
wgd = wgd,
fga = fga,
n_dip_bal_segs = n_dip_bal_segs,
frac_dip_bal_segs = frac_dip_bal_segs,
n_dip_imbal_segs = n_dip_imbal_segs,
frac_dip_imbal_segs = frac_dip_imbal_segs,
n_amps = n_amps,
n_homdels = n_homdels,
frac_homdels = frac_homdels,
n_homdels_clonal = n_homdels_clonal,
frac_homdels_clonal = frac_homdels_clonal,
n_cn_states = n_cn_states,
n_segs = n_segs,
n_cnlr_clusters = n_cnlr_clusters,
n_lcn_na = n_lcn_na,
n_loh = n_loh,
frac_loh = frac_loh,
n_segs_subclonal = n_segs_subclonal,
frac_segs_subclonal = frac_segs_subclonal,
n_segs_below_dipLogR = n_segs_below_dipLogR,
frac_below_dipLogR = frac_below_dipLogR,
n_segs_balanced_odd_tcn = n_segs_balanced_odd_tcn,
frac_balanced_odd_tcn = frac_balanced_odd_tcn,
n_segs_imbalanced_diploid_cn = n_segs_imbalanced_diploid_cn,
frac_imbalanced_diploid_cn = frac_imbalanced_diploid_cn,
n_segs_lcn_greater_mcn = n_segs_lcn_greater_mcn,
frac_lcn_greater_mcn = frac_lcn_greater_mcn,
n_snps = n_snps,
n_het_snps = n_het_snps,
frac_het_snps = frac_het_snps,
n_snps_with_300x_in_tumor = n_snps_with_300x_in_tumor,
n_het_snps_with_300x_in_tumor = n_het_snps_with_300x_in_tumor,
n_het_snps_hom_in_tumor_1pct = n_het_snps_hom_in_tumor_1pct,
n_het_snps_hom_in_tumor_5pct = n_het_snps_hom_in_tumor_5pct,
frac_het_snps_hom_in_tumor_1pct = frac_het_snps_hom_in_tumor_1pct,
frac_het_snps_hom_in_tumor_5pct = frac_het_snps_hom_in_tumor_5pct,
mean_cnlr_residual = mean_cnlr_residual,
sd_cnlr_residual = sd_cnlr_residual,
n_segs_discordant_tcn = discordant_stats$n_discordant_tcn,
frac_discordant_tcn = discordant_stats$length_discordant_tcn/evaluable_length$tcn,
n_segs_discordant_lcn = discordant_stats$n_discordant_lcn,
frac_discordant_lcn = discordant_stats$length_discordant_lcn/evaluable_length$lcn,
n_segs_discordant_both = discordant_stats$n_discordant_both,
frac_discordant_both = discordant_stats$length_discordant_both/evaluable_length$both,
n_segs_icn_cnlor_discordant = n_icn_cnlor_discordant,
frac_icn_cnlor_discordant = frac_icn_cnlor_discordant,
mafr_median_all = mafr_median_all,
mafr_median_clonal = mafr_median_clonal,
mafr_n_gt_1 = mafr_n_gt_1
)
# If input MAF is provided add some stats based on this
if (!is.null(maf)) {
maf = as.data.table(maf)
maf[, `:=` (
Chromosome = ifelse(Chromosome == 'X', 23, Chromosome),
t_var_freq = t_alt_count/(t_alt_count+t_ref_count)
)][, Chromosome := as.integer(Chromosome)]
setkey(maf, Chromosome, Start_Position, End_Position)
maf = foverlaps(maf, segs, mult = 'first', nomatch = NA,
by.x = c('Chromosome', 'Start_Position', 'End_Position'),
by.y = c('chrom', 'start', 'end'))
# Median mutation VAF at clonal 2:1 segments
output$dip_median_vaf = maf[clonal == TRUE & mcn == 1 & lcn == 1][, median(t_var_freq)]
# Mutations at homdels
homdel_muts = maf[tcn == 0]
output$n_homdel_muts = nrow(homdel_muts)
output$median_vaf_homdel_muts = median(homdel_muts$t_var_freq)
}
# Return rounded values for fractions
map_if(output, is.double, .f = function(x) signif(x, 2))
}
|
#' Redact columns from a dataframe with the default redactors
#'
#' This function redacts the columns specified in `columns` in the data given in
#' `data` using dittodb's standard redactors.
#'
#' The column names given in the `columns` argument are treated as regular
#' expressions, however they always have `^` and `$` added to the beginning and
#' end of the strings. So if you would like to match any column that starts with
#' the string `sensitive` (e.g. `sensitive_name`, `sensitive_date`) you could
#' use `"sensitive.*` and this would catch all of those columns (though it would
#' not catch a column called `most_sensitive_name`).
#'
#' The standard redactors replace all values in the column with the following
#' values based on the columns type:
#'
#' * integer -- `9L`
#' * numeric -- `9`
#' * character -- `"[redacted]"`
#' * `POSIXct` (date times) -- `as.POSIXct("1988-10-11T17:00:00", tz = tzone)`
#'
#' @param data a dataframe to redact
#' @param columns character, the columns to redact
#' @param ignore.case should case be ignored? (default: `TRUE`)
#' @param ... additional options to pass on to `grep` when matching the column
#' names
#'
#' @return data, with the columns specified in `columns` duly redacted
#' @export
#'
#' @examples
#' small_flights <- head(nycflights13::flights)
#'
#' # with no columns specified, redacting does nothing
#' redact_columns(small_flights, columns = NULL)
#'
#' # integer
#' redact_columns(small_flights, columns = c("arr_time"))
#'
#' # numeric
#' redact_columns(small_flights, columns = c("arr_delay"))
#'
#' # characters
#' redact_columns(small_flights, columns = c("origin", "dest"))
#'
#' # datetiems
#' redact_columns(small_flights, columns = c("time_hour"))
redact_columns <- function(data, columns, ignore.case = TRUE, ...) { # nolint
columns <- unlist(lapply(
glue("^{columns}$"),
grep,
x = colnames(data),
value = TRUE,
ignore.case = ignore.case,
...
))
# remove non-matches
columns <- columns[lapply(columns, length) > 0]
return(redact(data, standard_redactors(data, columns)))
}
redact <- function(data, redactors) {
columns <- names(redactors)
data[, columns] <- lapply(columns, function(x) redactors[[x]](data[, x]))
return(data)
}
standard_redactors <- function(data, columns) {
out <- lapply(columns, function(x) {
col <- data[[x]]
if (inherits(col, "integer")) {
return(function(data) return(rep(9L, length(data))))
} else if (inherits(col, "numeric")) {
return(function(data) return(rep(9, length(data))))
} else if (inherits(col, "character")) {
return(function(data) return(rep("[redacted]", length(data))))
} else if (inherits(col, "POSIXct")) {
# should this actually be POSIXt? or have a separate POSIXlt?
return(function(data) {
tzone <- attributes(data)$tzone %||% "EST"
return(rep(as.POSIXct("1988-10-11T17:00:00", tz = tzone), length(data)))
})
}
})
names(out) <- columns
return(out)
}
|
/R/redact.R
|
permissive
|
etiennebr/dittodb
|
R
| false | false | 3,002 |
r
|
#' Redact columns from a dataframe with the default redactors
#'
#' This function redacts the columns specified in `columns` in the data given in
#' `data` using dittodb's standard redactors.
#'
#' The column names given in the `columns` argument are treated as regular
#' expressions, however they always have `^` and `$` added to the beginning and
#' end of the strings. So if you would like to match any column that starts with
#' the string `sensitive` (e.g. `sensitive_name`, `sensitive_date`) you could
#' use `"sensitive.*` and this would catch all of those columns (though it would
#' not catch a column called `most_sensitive_name`).
#'
#' The standard redactors replace all values in the column with the following
#' values based on the columns type:
#'
#' * integer -- `9L`
#' * numeric -- `9`
#' * character -- `"[redacted]"`
#' * `POSIXct` (date times) -- `as.POSIXct("1988-10-11T17:00:00", tz = tzone)`
#'
#' @param data a dataframe to redact
#' @param columns character, the columns to redact
#' @param ignore.case should case be ignored? (default: `TRUE`)
#' @param ... additional options to pass on to `grep` when matching the column
#' names
#'
#' @return data, with the columns specified in `columns` duly redacted
#' @export
#'
#' @examples
#' small_flights <- head(nycflights13::flights)
#'
#' # with no columns specified, redacting does nothing
#' redact_columns(small_flights, columns = NULL)
#'
#' # integer
#' redact_columns(small_flights, columns = c("arr_time"))
#'
#' # numeric
#' redact_columns(small_flights, columns = c("arr_delay"))
#'
#' # characters
#' redact_columns(small_flights, columns = c("origin", "dest"))
#'
#' # datetiems
#' redact_columns(small_flights, columns = c("time_hour"))
redact_columns <- function(data, columns, ignore.case = TRUE, ...) { # nolint
columns <- unlist(lapply(
glue("^{columns}$"),
grep,
x = colnames(data),
value = TRUE,
ignore.case = ignore.case,
...
))
# remove non-matches
columns <- columns[lapply(columns, length) > 0]
return(redact(data, standard_redactors(data, columns)))
}
redact <- function(data, redactors) {
columns <- names(redactors)
data[, columns] <- lapply(columns, function(x) redactors[[x]](data[, x]))
return(data)
}
standard_redactors <- function(data, columns) {
out <- lapply(columns, function(x) {
col <- data[[x]]
if (inherits(col, "integer")) {
return(function(data) return(rep(9L, length(data))))
} else if (inherits(col, "numeric")) {
return(function(data) return(rep(9, length(data))))
} else if (inherits(col, "character")) {
return(function(data) return(rep("[redacted]", length(data))))
} else if (inherits(col, "POSIXct")) {
# should this actually be POSIXt? or have a separate POSIXlt?
return(function(data) {
tzone <- attributes(data)$tzone %||% "EST"
return(rep(as.POSIXct("1988-10-11T17:00:00", tz = tzone), length(data)))
})
}
})
names(out) <- columns
return(out)
}
|
### SOLUTIONS OF LINEAR SYSTEM OF EQUATION
## EXAMPLE 1: Forward Substitution
forwardSub<-function(L,b){
x=c(0)
n=nrow(L)
for(i in (1:n)){
x[i]=b[i]
if(i>1){
for(j in (1:(i-1))){
x[i]=x[i]-L[i,j]*x[j]
}
}
x[i]=x[i]/L[i,j]
}
return(cbind(x))
}
```
```{r}
## EXAMPLE 2: Backward Substitution
backwardSub<-function(U,b){
x=c(0)
n=nrow(U)
for(i in (n:1)){
x[i]=b[i]
if(i<n){
for(j in ((i+1):n)){
x[i]=x[i]-U[i,j]*x[j]
}
}
x[i]=x[i]/U[i,i]
}
return(cbind(x))
}
```
```{r}
## ## EXAMPLE 3: Gaussian Substitution
gaussianElimination<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
for (i in ((k+1):n)){
mik=Ab[i,k]/Ab[k,k]
Ab[i,k]=0
for(j in ((k+1):(n+1))){
Ab[i,j]=Ab[i,j]-mik*Ab[k,j]
}
}
}
return(Ab)
}
```
```{r}
## EXAMPLE 4: LU FACTORIZATION
luFactorization<-function(A){
n=nrow(A)
L=matrix(0,nrow=n,ncol=n)
for(k in (1:(n-1))){
for(i in ((k+1):n)){
L[i,k]=A[i,k]/A[k,k]
A[i,k]=0
for(j in (k+1):n)){
A[i,j]=A[i,j]-L[i,k]*Ab[k,j]
}
}
}
for(k in (1:n)){
L[k,k]=1
}
return(cbind(L,A))
}
```
```{r}
## EXAMPLE 5: Gaussian Elimination With Partial Pivoting (ISSUE WITH THE CODE)
gaussianEliminationPartial<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
pivotIndex=k
for(i in ((k+1):n)){
if((abs[Ab(i,k)])>abs(Ab[pivotIndex,k])){
pivotIndex=i
}
}
If (pivotIndex !=k){
for(j in (k:(n+1))){
buffer=Ab[k,j]
Ab[k,j]=Ab[pivotIndex,j]
Ab[pivotIndex,j]=buffer
}
}
for(i in ((k+1):n)){
mik=Ab[i,k]/Ab[k,k]
Ab[i,k]=0
for(j in ((k+1):(n+1))){
Ab[i,j]=Ab[i,j]-mik*Ab[k,j]
}
}
}
return(Ab)
}
```
```{r}
## EXAMPLE 6: Gaussian Elimination for Tridiagonal Matrices
gaussianEliminationTridigonal<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
multiplier=Ab[k+1,k]/Ab[k,k]
Ab[k+1,k]=0
Ab[k+1,k+1]=Ab[k+1,k+1]-multiplier*Ab[k,k+1]
Ab[k+1,n+1]=Ab[k+1,n+1]-multiplier*Ab[k,n+1]
}
return(Ab)
}
```
```{r}
## EXAMPLE 7: Backward Substitution for Tridiagonal Matrices
backwardSubTridiagonal<-function(U,b){
x=c(0)
n=nrow(U)
for(i in (n:1)){
x[i]=b[i]
if(i<n){
x[i]=x[i]-U[i,i+1]*x[i+1]
}
x[i]=x[i]/U[i,i]
}
return(cbind(x))
}
```
```{r}
# EXAMPLE 8: Cholesky Factorization
choleskyfactorization = function(A){
n = nrow(A)
L = matrix(0,nrow=n,ncol=n)
for (i in (1:n)){
L[i,i] = A[i,i]
if (i > 1){
for (k in (1:(i-1))){
L[i,i] = L[i,i] - L[i,k]*L[i,k]
}
}
L[i,i] = (L[i,i])^(1/2)
if (i < n){
for (j in ((i+1):n)){
L[j,i] = A[j,i]
if (i > 1){
for (k in (1:(i-1))){
L[j,i] = L[j,i] - L[j,k]*L[i,k]
}
}
L[j,i] = L[j,i]/L[i,i]
}
}
}
return(L)
}
```
```{r}
### MISSED AN EXAMPLE ###
```
```{r}
## PROBLEM 1:
|
/Chapter 7 Solutions of Linear System of Equations.R
|
no_license
|
Aarijit/programming_and_algo_using_R
|
R
| false | false | 3,095 |
r
|
### SOLUTIONS OF LINEAR SYSTEM OF EQUATION
## EXAMPLE 1: Forward Substitution
forwardSub<-function(L,b){
x=c(0)
n=nrow(L)
for(i in (1:n)){
x[i]=b[i]
if(i>1){
for(j in (1:(i-1))){
x[i]=x[i]-L[i,j]*x[j]
}
}
x[i]=x[i]/L[i,j]
}
return(cbind(x))
}
```
```{r}
## EXAMPLE 2: Backward Substitution
backwardSub<-function(U,b){
x=c(0)
n=nrow(U)
for(i in (n:1)){
x[i]=b[i]
if(i<n){
for(j in ((i+1):n)){
x[i]=x[i]-U[i,j]*x[j]
}
}
x[i]=x[i]/U[i,i]
}
return(cbind(x))
}
```
```{r}
## ## EXAMPLE 3: Gaussian Substitution
gaussianElimination<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
for (i in ((k+1):n)){
mik=Ab[i,k]/Ab[k,k]
Ab[i,k]=0
for(j in ((k+1):(n+1))){
Ab[i,j]=Ab[i,j]-mik*Ab[k,j]
}
}
}
return(Ab)
}
```
```{r}
## EXAMPLE 4: LU FACTORIZATION
luFactorization<-function(A){
n=nrow(A)
L=matrix(0,nrow=n,ncol=n)
for(k in (1:(n-1))){
for(i in ((k+1):n)){
L[i,k]=A[i,k]/A[k,k]
A[i,k]=0
for(j in (k+1):n)){
A[i,j]=A[i,j]-L[i,k]*Ab[k,j]
}
}
}
for(k in (1:n)){
L[k,k]=1
}
return(cbind(L,A))
}
```
```{r}
## EXAMPLE 5: Gaussian Elimination With Partial Pivoting (ISSUE WITH THE CODE)
gaussianEliminationPartial<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
pivotIndex=k
for(i in ((k+1):n)){
if((abs[Ab(i,k)])>abs(Ab[pivotIndex,k])){
pivotIndex=i
}
}
If (pivotIndex !=k){
for(j in (k:(n+1))){
buffer=Ab[k,j]
Ab[k,j]=Ab[pivotIndex,j]
Ab[pivotIndex,j]=buffer
}
}
for(i in ((k+1):n)){
mik=Ab[i,k]/Ab[k,k]
Ab[i,k]=0
for(j in ((k+1):(n+1))){
Ab[i,j]=Ab[i,j]-mik*Ab[k,j]
}
}
}
return(Ab)
}
```
```{r}
## EXAMPLE 6: Gaussian Elimination for Tridiagonal Matrices
gaussianEliminationTridigonal<-function(Ab){
n=nrow(Ab)
for(k in (1:(n-1))){
multiplier=Ab[k+1,k]/Ab[k,k]
Ab[k+1,k]=0
Ab[k+1,k+1]=Ab[k+1,k+1]-multiplier*Ab[k,k+1]
Ab[k+1,n+1]=Ab[k+1,n+1]-multiplier*Ab[k,n+1]
}
return(Ab)
}
```
```{r}
## EXAMPLE 7: Backward Substitution for Tridiagonal Matrices
backwardSubTridiagonal<-function(U,b){
x=c(0)
n=nrow(U)
for(i in (n:1)){
x[i]=b[i]
if(i<n){
x[i]=x[i]-U[i,i+1]*x[i+1]
}
x[i]=x[i]/U[i,i]
}
return(cbind(x))
}
```
```{r}
# EXAMPLE 8: Cholesky Factorization
choleskyfactorization = function(A){
n = nrow(A)
L = matrix(0,nrow=n,ncol=n)
for (i in (1:n)){
L[i,i] = A[i,i]
if (i > 1){
for (k in (1:(i-1))){
L[i,i] = L[i,i] - L[i,k]*L[i,k]
}
}
L[i,i] = (L[i,i])^(1/2)
if (i < n){
for (j in ((i+1):n)){
L[j,i] = A[j,i]
if (i > 1){
for (k in (1:(i-1))){
L[j,i] = L[j,i] - L[j,k]*L[i,k]
}
}
L[j,i] = L[j,i]/L[i,i]
}
}
}
return(L)
}
```
```{r}
### MISSED AN EXAMPLE ###
```
```{r}
## PROBLEM 1:
|
#Testing whether the modified quasi-binomial family function works
set.seed(123)
library(bamlss)
d <- GAMart()
head(d)
library(frmselection)
###logit link
b <- bamlss(bnum ~ x1 + x2 + x3, data = d, family = frm_bamlss(link = "logit"),
sampler = FALSE, multiple = FALSE)
coef(b)
###Using frm() to check, get the same result
x <- d[, 9:11]
f <- frm::frm(d$bnum, x, linkfrac = "logit", table = FALSE)
f$p
###probit link
b1 <- bamlss(bnum ~ x1 + x2 + x3, data = d, family = frm_bamlss(link = "probit"),
sampler = FALSE, multiple = FALSE)
coef(b1)
###Using frm() to check, get the very similar result
f1 <- frm::frm(d$bnum, x, linkfrac = "probit", table = FALSE)
f1$p
|
/data-raw/Testing.R
|
no_license
|
Lydia2kkx/frmselection
|
R
| false | false | 690 |
r
|
#Testing whether the modified quasi-binomial family function works
set.seed(123)
library(bamlss)
d <- GAMart()
head(d)
library(frmselection)
###logit link
b <- bamlss(bnum ~ x1 + x2 + x3, data = d, family = frm_bamlss(link = "logit"),
sampler = FALSE, multiple = FALSE)
coef(b)
###Using frm() to check, get the same result
x <- d[, 9:11]
f <- frm::frm(d$bnum, x, linkfrac = "logit", table = FALSE)
f$p
###probit link
b1 <- bamlss(bnum ~ x1 + x2 + x3, data = d, family = frm_bamlss(link = "probit"),
sampler = FALSE, multiple = FALSE)
coef(b1)
###Using frm() to check, get the very similar result
f1 <- frm::frm(d$bnum, x, linkfrac = "probit", table = FALSE)
f1$p
|
## Exploratory Data Analysis
## Assigment #1: Course Project 1
## By. Francisco J. Chavez
## Plot3.R
getwd()
##setwd("CD:/OneDrive/School, Training and Education/CourseRA/Data Science Certification/4. Exploratory Data Analysis/Week1/household_power_consumption")
setwd("c:\\Temp\\data")
getwd()
electricData <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",
colClasses= c('character', 'character', 'numeric', 'numeric','numeric','numeric','numeric','numeric','numeric'))
electricData$Date <- as.Date(electricData$Date, "%d/%m/%Y")
electricData <- subset(electricData, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
electricData <- electricData[complete.cases(electricData),]
dateTime <- paste(electricData$date, electricData$Time)
dateTime <- setNames(dateTime, "DateTime")
electricData <- electricData[ , !(names(electricData) %in% c("Date", "Time"))]
electricData <- cbind(dateTime, electricData)
## Create Plot 3
with(electricData, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
##Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/Plot3.R
|
no_license
|
FranciscoChavez/ExData_Plotting1
|
R
| false | false | 1,460 |
r
|
## Exploratory Data Analysis
## Assigment #1: Course Project 1
## By. Francisco J. Chavez
## Plot3.R
getwd()
##setwd("CD:/OneDrive/School, Training and Education/CourseRA/Data Science Certification/4. Exploratory Data Analysis/Week1/household_power_consumption")
setwd("c:\\Temp\\data")
getwd()
electricData <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",
colClasses= c('character', 'character', 'numeric', 'numeric','numeric','numeric','numeric','numeric','numeric'))
electricData$Date <- as.Date(electricData$Date, "%d/%m/%Y")
electricData <- subset(electricData, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
electricData <- electricData[complete.cases(electricData),]
dateTime <- paste(electricData$date, electricData$Time)
dateTime <- setNames(dateTime, "DateTime")
electricData <- electricData[ , !(names(electricData) %in% c("Date", "Time"))]
electricData <- cbind(dateTime, electricData)
## Create Plot 3
with(electricData, {
plot(Sub_metering_1~dateTime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~dateTime,col='Red')
lines(Sub_metering_3~dateTime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lwd=c(1,1,1),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
##Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
library(devtools)
library(usethis)
library(desc)
# Remove default DESC
unlink("DESCRIPTION")
# Create and clean desc
my_desc <- description$new("!new")
# Set your package name
my_desc$set("Package", "rFirestore")
#Set your name
my_desc$set("Authors@R", "person('Travis', 'Smith', email = 'trvs.smth.11@gmail.com.com', role = c('cre', 'aut'))")
# Remove some author fields
my_desc$del("Maintainer")
# Set the version
my_desc$set_version("0.0.0.9000")
# The title of your package
my_desc$set(Title = "Cloud Firestore API Wrapper")
# The description of your package
my_desc$set(Description = "A set of functions that wrap the Cloud Firestore REST API detailed at: https://firebase.google.com/docs/firestore/use-rest-api")
# The urls
my_desc$set("URL", "http://this")
my_desc$set("BugReports", "http://that")
# Save everyting
my_desc$write(file = "DESCRIPTION")
# If you want to use the MIT licence, code of conduct, and lifecycle badge
use_mit_license(name = "Travis Smith")
use_code_of_conduct()
use_lifecycle_badge("Experimental")
use_news_md()
# Get the dependencies
use_package("httr")
use_package("jsonlite")
use_package("curl")
use_package("attempt")
use_package("purrr")
# Clean your description
use_tidy_description()
|
/data-raw/devstuffs.R
|
no_license
|
RightChain/gFirestoreR
|
R
| false | false | 1,232 |
r
|
library(devtools)
library(usethis)
library(desc)
# Remove default DESC
unlink("DESCRIPTION")
# Create and clean desc
my_desc <- description$new("!new")
# Set your package name
my_desc$set("Package", "rFirestore")
#Set your name
my_desc$set("Authors@R", "person('Travis', 'Smith', email = 'trvs.smth.11@gmail.com.com', role = c('cre', 'aut'))")
# Remove some author fields
my_desc$del("Maintainer")
# Set the version
my_desc$set_version("0.0.0.9000")
# The title of your package
my_desc$set(Title = "Cloud Firestore API Wrapper")
# The description of your package
my_desc$set(Description = "A set of functions that wrap the Cloud Firestore REST API detailed at: https://firebase.google.com/docs/firestore/use-rest-api")
# The urls
my_desc$set("URL", "http://this")
my_desc$set("BugReports", "http://that")
# Save everyting
my_desc$write(file = "DESCRIPTION")
# If you want to use the MIT licence, code of conduct, and lifecycle badge
use_mit_license(name = "Travis Smith")
use_code_of_conduct()
use_lifecycle_badge("Experimental")
use_news_md()
# Get the dependencies
use_package("httr")
use_package("jsonlite")
use_package("curl")
use_package("attempt")
use_package("purrr")
# Clean your description
use_tidy_description()
|
################################################################
## Copyright 2014 Tracy Holsclaw.
## This file is part of NHMM.
## NHMM is free software: you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation, either version 3 of the License, or any later version.
## NHMM is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
## A PARTICULAR PURPOSE. See the GNU General Public License for more details.
## You should have received a copy of the GNU General Public License along with
## NHMM. If not, see <http://www.gnu.org/licenses/>.
#############################################################
#' Most probable state (similar to Viterbi)
#'
#' \code{Oz} calculates the most probable state per time step (Viterbi like) with values from 1,...,K.
#' The histogram of this sequence is displayed in the GUI output.
#' If there are ties for a given day then the lowest number state is chosen.
#'
#' @param nhmmobj an object created from the NHMM function
#' @param outfile a directory to put the .png plot
#' @return zbest the most probable sequence from all iterations
#' @return output: a plot of a histogram of the distribution of
#' the most probable state sequence. If the number of states in
#' the histogram is less than K, it probably means you should probably
#' re-run the model with smaller K as some of the states have disappeared.
#' @export
#' @keywords Viterbi
#' @examples #Oz(my.nhmm)
Oz=function(nhmmobj, outfile=NULL)
{
T=nhmmobj$T
J=nhmmobj$J
K=nhmmobj$K
B=nhmmobj$B
A=nhmmobj$A
iters=nhmmobj$iters
burnin=nhmmobj$burnin
outboo=nhmmobj$outboo
outdir=nhmmobj$outdir
L=B+K
if(K==1){stop("K=1, there is no hidden state sequence")}
if(outboo==TRUE) ##betasave was written to a file
{ zsave=t(as.matrix(read.table(paste(outdir,"zsave.txt", sep=""))))
}else{ #zsave=matrix(0,T,iters)
zsave=nhmmobj$zsave
}
#### Most used z (like Viterbi)
mode=function(vector_name)
{ as.numeric(names(sort(table(vector_name),decreasing=TRUE))[1]) }
zbest=numeric(T)
for(t in 1:T){ zbest[t]=mode(zsave[t,]) }
if(!is.null(outfile)){ png(paste(outfile,"z.png",sep=""), width=300, height=300)}
hist(zbest, xlab="Most probable z")
if(!is.null(outfile)){ dev.off()}
zbest
}
|
/NHMM/R/Oz.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 2,559 |
r
|
################################################################
## Copyright 2014 Tracy Holsclaw.
## This file is part of NHMM.
## NHMM is free software: you can redistribute it and/or modify it under
## the terms of the GNU General Public License as published by the Free Software
## Foundation, either version 3 of the License, or any later version.
## NHMM is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
## A PARTICULAR PURPOSE. See the GNU General Public License for more details.
## You should have received a copy of the GNU General Public License along with
## NHMM. If not, see <http://www.gnu.org/licenses/>.
#############################################################
#' Most probable state (similar to Viterbi)
#'
#' \code{Oz} calculates the most probable state per time step (Viterbi like) with values from 1,...,K.
#' The histogram of this sequence is displayed in the GUI output.
#' If there are ties for a given day then the lowest number state is chosen.
#'
#' @param nhmmobj an object created from the NHMM function
#' @param outfile a directory to put the .png plot
#' @return zbest the most probable sequence from all iterations
#' @return output: a plot of a histogram of the distribution of
#' the most probable state sequence. If the number of states in
#' the histogram is less than K, it probably means you should probably
#' re-run the model with smaller K as some of the states have disappeared.
#' @export
#' @keywords Viterbi
#' @examples #Oz(my.nhmm)
Oz=function(nhmmobj, outfile=NULL)
{
T=nhmmobj$T
J=nhmmobj$J
K=nhmmobj$K
B=nhmmobj$B
A=nhmmobj$A
iters=nhmmobj$iters
burnin=nhmmobj$burnin
outboo=nhmmobj$outboo
outdir=nhmmobj$outdir
L=B+K
if(K==1){stop("K=1, there is no hidden state sequence")}
if(outboo==TRUE) ##betasave was written to a file
{ zsave=t(as.matrix(read.table(paste(outdir,"zsave.txt", sep=""))))
}else{ #zsave=matrix(0,T,iters)
zsave=nhmmobj$zsave
}
#### Most used z (like Viterbi)
mode=function(vector_name)
{ as.numeric(names(sort(table(vector_name),decreasing=TRUE))[1]) }
zbest=numeric(T)
for(t in 1:T){ zbest[t]=mode(zsave[t,]) }
if(!is.null(outfile)){ png(paste(outfile,"z.png",sep=""), width=300, height=300)}
hist(zbest, xlab="Most probable z")
if(!is.null(outfile)){ dev.off()}
zbest
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 27646
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 27646
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9391
c no.of clauses 27646
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 27646
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp.qdimacs 9391 27646 E1 [] 0 128 9263 27646 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 732 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 27646
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 27646
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9391
c no.of clauses 27646
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 27646
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#128.A#48.c#.w#9.s#26.asp.qdimacs 9391 27646 E1 [] 0 128 9263 27646 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_misc.R
\name{modules}
\alias{modules}
\title{Extract the module colours}
\usage{
modules(CenTFinderObject)
}
\arguments{
\item{CenTFinderObject}{An instance of the CenTFinder class.}
}
\value{
A character vector containing the module colours in the CenTFinder analysis.
}
\description{
\code{modules} allows one to extract the module colours of a CenTFinder object.
}
|
/man/modules.Rd
|
no_license
|
jonathandesmedt92/CenTFinder
|
R
| false | true | 451 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main_misc.R
\name{modules}
\alias{modules}
\title{Extract the module colours}
\usage{
modules(CenTFinderObject)
}
\arguments{
\item{CenTFinderObject}{An instance of the CenTFinder class.}
}
\value{
A character vector containing the module colours in the CenTFinder analysis.
}
\description{
\code{modules} allows one to extract the module colours of a CenTFinder object.
}
|
library(MIIVsem)
### Name: bollen1989a
### Title: Industrialization-Democracy Data
### Aliases: bollen1989a
### Keywords: datasets
### ** Examples
## Not run:
##D model <- '
##D Eta1 =~ y1 + y2 + y3 + y4
##D Eta2 =~ y5 + y6 + y7 + y8
##D Xi1 =~ x1 + x2 + x3
##D Eta1 ~ Xi1
##D Eta2 ~ Xi1
##D Eta2 ~ Eta1
##D y1 ~~ y5
##D y2 ~~ y4
##D y2 ~~ y6
##D y3 ~~ y7
##D y4 ~~ y8
##D y6 ~~ y8 devtools::build_win()
##D '
## End(Not run)
|
/data/genthat_extracted_code/MIIVsem/examples/bollen1989a.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 516 |
r
|
library(MIIVsem)
### Name: bollen1989a
### Title: Industrialization-Democracy Data
### Aliases: bollen1989a
### Keywords: datasets
### ** Examples
## Not run:
##D model <- '
##D Eta1 =~ y1 + y2 + y3 + y4
##D Eta2 =~ y5 + y6 + y7 + y8
##D Xi1 =~ x1 + x2 + x3
##D Eta1 ~ Xi1
##D Eta2 ~ Xi1
##D Eta2 ~ Eta1
##D y1 ~~ y5
##D y2 ~~ y4
##D y2 ~~ y6
##D y3 ~~ y7
##D y4 ~~ y8
##D y6 ~~ y8 devtools::build_win()
##D '
## End(Not run)
|
\alias{gtkActionSetShortLabel}
\name{gtkActionSetShortLabel}
\title{gtkActionSetShortLabel}
\description{Sets a shorter label text on \code{action}.}
\usage{gtkActionSetShortLabel(object, short.label)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkAction}}}
\item{\verb{short.label}}{the label text to set}
}
\details{Since 2.16}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/RGtk2/man/gtkActionSetShortLabel.Rd
|
no_license
|
lawremi/RGtk2
|
R
| false | false | 405 |
rd
|
\alias{gtkActionSetShortLabel}
\name{gtkActionSetShortLabel}
\title{gtkActionSetShortLabel}
\description{Sets a shorter label text on \code{action}.}
\usage{gtkActionSetShortLabel(object, short.label)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkAction}}}
\item{\verb{short.label}}{the label text to set}
}
\details{Since 2.16}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(assertive.base)
### Name: safe_deparse
### Title: Safe version of deparse
### Aliases: safe_deparse
### ** Examples
# safe_deparse only differs from deparse when the deparse string is longer
# than width.cutoff
deparse(CO2, width.cutoff = 500L) # has length 6
safe_deparse(CO2) # has length 1
|
/data/genthat_extracted_code/assertive.base/examples/safe_deparse.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 324 |
r
|
library(assertive.base)
### Name: safe_deparse
### Title: Safe version of deparse
### Aliases: safe_deparse
### ** Examples
# safe_deparse only differs from deparse when the deparse string is longer
# than width.cutoff
deparse(CO2, width.cutoff = 500L) # has length 6
safe_deparse(CO2) # has length 1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{coef.blblm}
\alias{coef.blblm}
\title{Get the coefficient of fitted model}
\usage{
\method{coef}{blblm}(object, ...)
}
\arguments{
\item{object}{the fitted model}
\item{...}{further arguments passed to or from other methods}
}
\description{
Get the coefficient of fitted model
}
|
/man/coef.blblm.Rd
|
permissive
|
JZNeilZ/blblm
|
R
| false | true | 372 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{coef.blblm}
\alias{coef.blblm}
\title{Get the coefficient of fitted model}
\usage{
\method{coef}{blblm}(object, ...)
}
\arguments{
\item{object}{the fitted model}
\item{...}{further arguments passed to or from other methods}
}
\description{
Get the coefficient of fitted model
}
|
makeCacheMatrix <- function(x) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function()
{
x
}
setsolve <- function(solve)
{
m <<- solve
}
getsolve <- function()
{
m
}
list(set = set,
get = get,
setsolve = setsolve,
getsolve = getsolve)
}
cacheSolve <- function(x, ...)
{
m <- x$getsolve()
if(!is.null(m))
{
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
/course2_R/week3/week3_prog_ass2.R
|
no_license
|
proetman/datasciencecoursera
|
R
| false | false | 749 |
r
|
makeCacheMatrix <- function(x) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function()
{
x
}
setsolve <- function(solve)
{
m <<- solve
}
getsolve <- function()
{
m
}
list(set = set,
get = get,
setsolve = setsolve,
getsolve = getsolve)
}
cacheSolve <- function(x, ...)
{
m <- x$getsolve()
if(!is.null(m))
{
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
#######################################################################
# TSP - Traveling Salesperson Problem
# Copyrigth (C) 2011 Michael Hahsler and Kurt Hornik
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## heuristic to improve a tour using exchanges of 2 edges.
tsp_two_opt <- function(x, control = NULL){
control <- .get_parameters(control, list(
tour = NULL,
rep = 1
))
## improve a given tour or create a random tour
## we use a function since for rep >1 we want several random
## initial tours
initial <- function() {
if(!is.null(control$tour)) as.integer(control$tour)
else sample(n_of_cities(x))
}
xx <- as.matrix(x)
if(control$rep > 1) {
tour <- replicate(control$rep, .Call(R_two_opt, xx, initial()), simplify = FALSE)
lengths <- sapply(tour, FUN = function(t) tour_length(x, t))
tour <- tour[[which.min(lengths)]]
}else tour <- .Call(R_two_opt, xx, initial())
tour
}
|
/R/tsp_two_opt.R
|
no_license
|
amrrs/TSP
|
R
| false | false | 1,584 |
r
|
#######################################################################
# TSP - Traveling Salesperson Problem
# Copyrigth (C) 2011 Michael Hahsler and Kurt Hornik
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
## heuristic to improve a tour using exchanges of 2 edges.
tsp_two_opt <- function(x, control = NULL){
control <- .get_parameters(control, list(
tour = NULL,
rep = 1
))
## improve a given tour or create a random tour
## we use a function since for rep >1 we want several random
## initial tours
initial <- function() {
if(!is.null(control$tour)) as.integer(control$tour)
else sample(n_of_cities(x))
}
xx <- as.matrix(x)
if(control$rep > 1) {
tour <- replicate(control$rep, .Call(R_two_opt, xx, initial()), simplify = FALSE)
lengths <- sapply(tour, FUN = function(t) tour_length(x, t))
tour <- tour[[which.min(lengths)]]
}else tour <- .Call(R_two_opt, xx, initial())
tour
}
|
log(-1)
printmessage <- function(x){
if(x > 0)
print("x is greater than zero")
else
print("x is less than or equal to zero")
invisible(x)
}
printmessage(1)
printmessage(NA)
printmessage2 <- function(x){
if(is.na(x))
print("x is a missing value!")
else if(x > 0)
print("x is greater than 0")
else
print("x is less than or equal to zero")
invisible(x)
}
x <- log(-1)
printmessage2(x)
|
/drill/debugging_tools_diagnosing_the_problem.R
|
no_license
|
smstaneva/datasciencecoursera
|
R
| false | false | 521 |
r
|
log(-1)
printmessage <- function(x){
if(x > 0)
print("x is greater than zero")
else
print("x is less than or equal to zero")
invisible(x)
}
printmessage(1)
printmessage(NA)
printmessage2 <- function(x){
if(is.na(x))
print("x is a missing value!")
else if(x > 0)
print("x is greater than 0")
else
print("x is less than or equal to zero")
invisible(x)
}
x <- log(-1)
printmessage2(x)
|
library(dplyr)
library(ggplot2)
library(e1071)
library(mlr)
sick <- OpenML::getOMLDataSet(data.name = "sick")$data
sick %>% apply(2, table)
sick %>% select(-TBG, -TBG_measured, -FTI_measured, -T4U_measured, -TT4_measured, -T3_measured, -TSH_measured) %>% na.exclude() %>% apply(2, unique)
sick_tidy <- sick %>% select(-TBG, -TBG_measured, -FTI_measured, -T4U_measured, -TT4_measured, -T3_measured, -TSH_measured) %>% na.exclude()
sick_tidy
# Ważne w tych danych jest to, że te kolumny wskazują, czy pomiar obok został wykonany
nrow(sick)
nrow(sick_tidy) # Nie jest aż tak źle
# Random Forest
library(randomForest)
sick_tidy %>% apply(2, typeof)
sick_tidy %>% mutate(age = as.numeric(age))
rf <- randomForest(as.factor(Class) ~., data=sick_tidy)
rf # Bardzo ładny model
1- mean(predict(rf) != sick_tidy$Class)
# GLM
g <- glm(Class ~., data=sick_tidy, family="binomial")
summary(g)
1 - mean(abs(round(predict(g, type="response")) - (as.numeric(sick_tidy$Class)-1)))
table(paste(round(predict(g, type="response")), (as.numeric(sick_tidy$Class)-1)))
# Plots
sick_t <- sick_tidy
pairs(sick_tidy[, 17:21], color=sick_tidy$Class)
# 17
hist(log(sick_tidy[, 17]))
hist(sick_tidy[, 17])
i <- 17
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i]))
skewness(log(sick_tidy[, i])) #
sick_t[,17] <- log(sick_tidy[,17])
# 18
hist(sick_tidy[, 18])
hist(sqrt(sick_tidy[, 18]))
shapiro.test(sqrt(sick_tidy[, 18])) #
shapiro.test(log(sick_tidy[, 18]))
i <- 18
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i])) #
skewness(log(sick_tidy[, i]))
sick_t[,i] <- sqrt(sick_tidy[,i])
# 19
hist(sick_tidy[, 19])
hist(sqrt(sick_tidy[, 19]))
hist(log(sick_tidy[, 19]))
shapiro.test(sick_tidy[, 19])
shapiro.test(sqrt(sick_tidy[, 19])) #
shapiro.test(log(sick_tidy[, 19]))
i <- 19
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i])) #
skewness(log(sick_tidy[, i]))
sick_t[,i] <- sqrt(sick_tidy[,i])
# 20
shapiro.test(sick_tidy[, 20])
shapiro.test(sqrt(sick_tidy[, 20]))
shapiro.test(log(sick_tidy[, 20]))
i <- 20
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i]))
skewness(log(sick_tidy[, i])) #
hist(sick_tidy[, 20])
hist(sqrt(sick_tidy[, 20]))
hist(log(sick_tidy[, 20])) #
sick_t[,i] <- log(sick_tidy[,i])
# 21
hist(sick_tidy[, 21])
skewness(sick_tidy[,21])
skewness(sqrt(sick_tidy[,21]))
skewness(log(sick_tidy[,21]))
sick_t[,21] <- sqrt(sick_tidy[,21])
pairs(sick_t[, 17:21])
pairs(sick_tidy[, 17:21])
# Ostatnia klasy
# GLM robi to automatycznie
unique(sick_tidy$referral_source)
summary(g)
names(sick_tidy)
# Nowy model
g2 <- glm(Class ~., data=sick_t, family="binomial")
summary(g2)
1 - mean(abs(round(predict(g2, type="response")) - (as.numeric(sick_t$Class)-1)))
table(paste(round(predict(g2, type="response")), (as.numeric(sick_t$Class)-1)))
# Drzewo
library(rpart)
library(rpart.plot)
?rpart
rp <- rpart(Class ~., data=sick_t)
summary(rp)
rpart.plot(rp)
1 - mean(abs(round(predict(rp)[,2]) - (sick_t$Class == "sick")))
s<- predict(rp)
s[,2]
# PLSRGLM
|
/PracaDomowa1/TotallySickProject.R
|
no_license
|
bogdankjastrzebski/WarsztatyBadawcze2
|
R
| false | false | 3,016 |
r
|
library(dplyr)
library(ggplot2)
library(e1071)
library(mlr)
sick <- OpenML::getOMLDataSet(data.name = "sick")$data
sick %>% apply(2, table)
sick %>% select(-TBG, -TBG_measured, -FTI_measured, -T4U_measured, -TT4_measured, -T3_measured, -TSH_measured) %>% na.exclude() %>% apply(2, unique)
sick_tidy <- sick %>% select(-TBG, -TBG_measured, -FTI_measured, -T4U_measured, -TT4_measured, -T3_measured, -TSH_measured) %>% na.exclude()
sick_tidy
# Ważne w tych danych jest to, że te kolumny wskazują, czy pomiar obok został wykonany
nrow(sick)
nrow(sick_tidy) # Nie jest aż tak źle
# Random Forest
library(randomForest)
sick_tidy %>% apply(2, typeof)
sick_tidy %>% mutate(age = as.numeric(age))
rf <- randomForest(as.factor(Class) ~., data=sick_tidy)
rf # Bardzo ładny model
1- mean(predict(rf) != sick_tidy$Class)
# GLM
g <- glm(Class ~., data=sick_tidy, family="binomial")
summary(g)
1 - mean(abs(round(predict(g, type="response")) - (as.numeric(sick_tidy$Class)-1)))
table(paste(round(predict(g, type="response")), (as.numeric(sick_tidy$Class)-1)))
# Plots
sick_t <- sick_tidy
pairs(sick_tidy[, 17:21], color=sick_tidy$Class)
# 17
hist(log(sick_tidy[, 17]))
hist(sick_tidy[, 17])
i <- 17
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i]))
skewness(log(sick_tidy[, i])) #
sick_t[,17] <- log(sick_tidy[,17])
# 18
hist(sick_tidy[, 18])
hist(sqrt(sick_tidy[, 18]))
shapiro.test(sqrt(sick_tidy[, 18])) #
shapiro.test(log(sick_tidy[, 18]))
i <- 18
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i])) #
skewness(log(sick_tidy[, i]))
sick_t[,i] <- sqrt(sick_tidy[,i])
# 19
hist(sick_tidy[, 19])
hist(sqrt(sick_tidy[, 19]))
hist(log(sick_tidy[, 19]))
shapiro.test(sick_tidy[, 19])
shapiro.test(sqrt(sick_tidy[, 19])) #
shapiro.test(log(sick_tidy[, 19]))
i <- 19
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i])) #
skewness(log(sick_tidy[, i]))
sick_t[,i] <- sqrt(sick_tidy[,i])
# 20
shapiro.test(sick_tidy[, 20])
shapiro.test(sqrt(sick_tidy[, 20]))
shapiro.test(log(sick_tidy[, 20]))
i <- 20
skewness(sick_tidy[, i])
skewness(sqrt(sick_tidy[, i]))
skewness(log(sick_tidy[, i])) #
hist(sick_tidy[, 20])
hist(sqrt(sick_tidy[, 20]))
hist(log(sick_tidy[, 20])) #
sick_t[,i] <- log(sick_tidy[,i])
# 21
hist(sick_tidy[, 21])
skewness(sick_tidy[,21])
skewness(sqrt(sick_tidy[,21]))
skewness(log(sick_tidy[,21]))
sick_t[,21] <- sqrt(sick_tidy[,21])
pairs(sick_t[, 17:21])
pairs(sick_tidy[, 17:21])
# Ostatnia klasy
# GLM robi to automatycznie
unique(sick_tidy$referral_source)
summary(g)
names(sick_tidy)
# Nowy model
g2 <- glm(Class ~., data=sick_t, family="binomial")
summary(g2)
1 - mean(abs(round(predict(g2, type="response")) - (as.numeric(sick_t$Class)-1)))
table(paste(round(predict(g2, type="response")), (as.numeric(sick_t$Class)-1)))
# Drzewo
library(rpart)
library(rpart.plot)
?rpart
rp <- rpart(Class ~., data=sick_t)
summary(rp)
rpart.plot(rp)
1 - mean(abs(round(predict(rp)[,2]) - (sick_t$Class == "sick")))
s<- predict(rp)
s[,2]
# PLSRGLM
|
bprobgHsDiscr1ROY <- function(params, respvec, VC, ps, AT = FALSE){
p1 <- p2 <- pdf1 <- pdf2 <- c.copula.be2 <- c.copula.be1 <- l.par <- dl.dbe1 <- d2l.be1.be1 <- NA
eta1 <- VC$X1%*%params[1:VC$X1.d2]
eta2 <- eta.tr( VC$X2%*%params[(VC$X1.d2+1):(VC$X1.d2+VC$X2.d2)], VC$margins[2])
eta3 <- eta.tr( VC$X3%*%params[(VC$X1.d2+VC$X2.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2)], VC$margins[3])
etad1 <- etad2 <- etas1 <- etas2 <- etan1 <- etan2 <- l.ln <- NULL
teta.st1 <- etad1 <- VC$X4%*%params[(VC$X1.d2+VC$X2.d2+VC$X3.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2)]
teta.st2 <- etad2 <- VC$X5%*%params[(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2+VC$X5.d2)]
pd1 <- probm(eta1, VC$margins[1], bc = TRUE, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
p1 <- pd1$pr #
p0 <- 1 - p1 #
derp1.dereta1 <- pd1$derp1.dereta1 # recall that these derivs are wrt to 1 - p1
der2p1.dereta1eta1 <- pd1$der2p1.dereta1eta1
dHs <- distrHsDiscr(respvec$y2, eta2, 1, 1, nu = 1, nu.st = 1, margin2=VC$margins[2], naive = FALSE, y2m = VC$y2m, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
pdf2.M2 <- dHs$pdf2
p2.M2 <- dHs$p2
derpdf2.dereta2.M2 <- dHs$derpdf2.dereta2
derp2.dereta2.M2 <- dHs$derp2.dereta2
der2p2.dereta2eta2.M2 <- dHs$der2p2.dereta2eta2
der2pdf2.dereta2.M2 <- dHs$der2pdf2.dereta2
#******** SET UP y3m ************ DONE
dHs <- distrHsDiscr(respvec$y3, eta3, 1, 1, nu = 1, nu.st = 1, margin2=VC$margins[3], naive = FALSE, y2m = VC$y3m, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
pdf2.M3 <- dHs$pdf2
p2.M3 <- dHs$p2
derpdf2.dereta2.M3 <- dHs$derpdf2.dereta2
derp2.dereta2.M3 <- dHs$derp2.dereta2
der2p2.dereta2eta2.M3 <- dHs$der2p2.dereta2eta2
der2pdf2.dereta2.M3 <- dHs$der2pdf2.dereta2
########################################################################################################
VC1 <- list(BivD = VC$BivD1, BivD2 = NULL)
resT1 <- teta.tr(VC1, teta.st1)
VC2 <- list(BivD = VC$BivD2, BivD2 = NULL)
resT2 <- teta.tr(VC2, teta.st2) # ********** careful here the correct copula is selected DONE
teta.st1 <- resT1$teta.st
teta.st2 <- resT2$teta.st
teta1 <- resT1$teta
teta2 <- resT2$teta
##################
Cop1 <- VC$BivD1
Cop2 <- VC$BivD2
nC1 <- VC$nC1
nC2 <- VC$nC2
########################################################################################################
C1.M2 <- mm(BiCDF(p0[VC$inde0], p2.M2, nC1, teta1, VC$dof1), min.pr = VC$min.pr, max.pr = VC$max.pr )
C2.M2 <- mm(BiCDF(p0[VC$inde0], mm(p2.M2 - pdf2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr), nC1, teta1, VC$dof1), min.pr = VC$min.pr, max.pr = VC$max.pr )
A.M2 <- mm(C1.M2 - C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
C1.M3 <- mm(BiCDF(p0[VC$inde1], p2.M3, nC2, teta2, VC$dof2), min.pr = VC$min.pr, max.pr = VC$max.pr )
C2.M3 <- mm(BiCDF(p0[VC$inde1], mm(p2.M3 - pdf2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr), nC2, teta2, VC$dof2), min.pr = VC$min.pr, max.pr = VC$max.pr )
A.M3 <- mm(C1.M3 - C2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
B.M3 <- mm( pdf2.M3 - A.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
l.par[VC$inde0] <- log( A.M2 )
l.par[VC$inde1] <- log( B.M3 )
l.par <- VC$weights*l.par
########################################################################################################
dH1F.M2 <- copgHs(p0[VC$inde0], p2.M2, eta1=NULL, eta2=NULL, teta1, teta.st1, Cop1, VC$dof1, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH2F.M2 <- copgHs(p0[VC$inde0], mm(p2.M2 - pdf2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr), eta1=NULL, eta2=NULL, teta1, teta.st1, Cop1, VC$dof1, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
c.copula.be1.C1.M2 <- dH1F.M2$c.copula.be1
c.copula.be1.C2.M2 <- dH2F.M2$c.copula.be1
c.copula.be2.C1.M2 <- dH1F.M2$c.copula.be2
c.copula.be2.C2.M2 <- dH2F.M2$c.copula.be2
c.copula.theta.C1.M2 <- dH1F.M2$c.copula.theta
c.copula.theta.C2.M2 <- dH2F.M2$c.copula.theta
derp2m1.dereta2.M2 <- derp2.dereta2.M2 - derpdf2.dereta2.M2
Cc.M2 <- c.copula.be1.C1.M2 - c.copula.be1.C2.M2 # mm(c.copula.be1.C1.M2 - c.copula.be1.C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
C.M2 <- Cc.M2*derp1.dereta1[VC$inde0] # contains already -
Cs.M2 <- c.copula.theta.C1.M2 - c.copula.theta.C2.M2
Ceta2 <- (c.copula.be2.C1.M2 - c.copula.be2.C2.M2)*derp2.dereta2.M2 + c.copula.be2.C2.M2*derpdf2.dereta2.M2 # mm(c.copula.be2.C1.M2 - c.copula.be2.C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH1F.M3 <- copgHs(p0[VC$inde1], p2.M3, eta1=NULL, eta2=NULL, teta2, teta.st2, Cop2, VC$dof2, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH2F.M3 <- copgHs(p0[VC$inde1], mm(p2.M3 - pdf2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr), eta1=NULL, eta2=NULL, teta2, teta.st2, Cop2, VC$dof2, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
c.copula.be1.C1.M3 <- dH1F.M3$c.copula.be1
c.copula.be1.C2.M3 <- dH2F.M3$c.copula.be1
c.copula.be2.C1.M3 <- dH1F.M3$c.copula.be2
c.copula.be2.C2.M3 <- dH2F.M3$c.copula.be2
c.copula.theta.C1.M3 <- dH1F.M3$c.copula.theta
c.copula.theta.C2.M3 <- dH2F.M3$c.copula.theta
derp2m1.dereta2.M3 <- derp2.dereta2.M3 - derpdf2.dereta2.M3
Cc.M3 <- c.copula.be1.C1.M3 - c.copula.be1.C2.M3 # mm(c.copula.be1.C1.M3 - c.copula.be1.C2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
C.M3 <- Cc.M3*derp1.dereta1[VC$inde1] # contains already -
Cs.M3 <- c.copula.theta.C1.M3 - c.copula.theta.C2.M3
Ceta3 <- derpdf2.dereta2.M3 - ((c.copula.be2.C1.M3 - c.copula.be2.C2.M3)*derp2.dereta2.M3 + c.copula.be2.C2.M3*derpdf2.dereta2.M3)
# ************ check from above what is really needed and not
dl.dbe1[VC$inde0] <- C.M2/A.M2
dl.dbe1[VC$inde1] <- -C.M3/B.M3
dl.dbe1 <- VC$weights*dl.dbe1
dl.dbe2 <- VC$weights[VC$inde0]*( Ceta2/A.M2)
dl.dbe3 <- VC$weights[VC$inde1]*( Ceta3/B.M3)
dl.dteta1.st <- VC$weights[VC$inde0]*( Cs.M2/A.M2)
dl.dteta2.st <- VC$weights[VC$inde1]*(-Cs.M3/B.M3)
########################################################################################################
c.copula2.be1.C1.M2 <- dH1F.M2$c.copula2.be1
c.copula2.be1.C2.M2 <- dH2F.M2$c.copula2.be1
c.copula2.be2.C1.M2 <- dH1F.M2$c.copula2.be2
c.copula2.be2.C2.M2 <- dH2F.M2$c.copula2.be2
c.copula2.be1be2.C1.M2 <- dH1F.M2$c.copula2.be1be2
c.copula2.be1be2.C2.M2 <- dH2F.M2$c.copula2.be1be2
c.copula2.be2th.C1.M2 <- dH1F.M2$c.copula2.be2th # with star
c.copula2.be2th.C2.M2 <- dH2F.M2$c.copula2.be2th
c.copula2.theta.C1.M2 <- dH1F.M2$bit1.th2ATE # no start
c.copula2.theta.C2.M2 <- dH2F.M2$bit1.th2ATE
c.copula.thet.C1.M2 <- dH1F.M2$c.copula.thet # NO star
c.copula.thet.C2.M2 <- dH2F.M2$c.copula.thet
derteta.derteta.st.M2 <- dH1F.M2$derteta.derteta.st # does not matter dH1 or dH2
der2teta.derteta.stteta.st.M2 <- dH1F.M2$der2teta.derteta.stteta.st
c.copula2.be1th.C1.M2 <- dH1F.M2$c.copula2.be1th
c.copula2.be1th.C2.M2 <- dH2F.M2$c.copula2.be1th
c.copula2.be1.C1.M3 <- dH1F.M3$c.copula2.be1
c.copula2.be1.C2.M3 <- dH2F.M3$c.copula2.be1
c.copula2.be2.C1.M3 <- dH1F.M3$c.copula2.be2
c.copula2.be2.C2.M3 <- dH2F.M3$c.copula2.be2
c.copula2.be1be2.C1.M3 <- dH1F.M3$c.copula2.be1be2
c.copula2.be1be2.C2.M3 <- dH2F.M3$c.copula2.be1be2
c.copula2.be2th.C1.M3 <- dH1F.M3$c.copula2.be2th
c.copula2.be2th.C2.M3 <- dH2F.M3$c.copula2.be2th
c.copula2.theta.C1.M3 <- dH1F.M3$bit1.th2ATE
c.copula2.theta.C2.M3 <- dH2F.M3$bit1.th2ATE
c.copula.thet.C1.M3 <- dH1F.M3$c.copula.thet # NO star
c.copula.thet.C2.M3 <- dH2F.M3$c.copula.thet
derteta.derteta.st.M3 <- dH1F.M3$derteta.derteta.st # does not matter dH1 or dH2
der2teta.derteta.stteta.st.M3 <- dH1F.M3$der2teta.derteta.stteta.st
c.copula2.be1th.C1.M3 <- dH1F.M3$c.copula2.be1th
c.copula2.be1th.C2.M3 <- dH2F.M3$c.copula2.be1th
der2p2m1.dereta2eta2.M2 <- der2p2.dereta2eta2.M2 - der2pdf2.dereta2.M2
der2p2m1.dereta2eta2.M3 <- der2p2.dereta2eta2.M3 - der2pdf2.dereta2.M3
##########################
b1b1CY <- (c.copula2.be1.C1.M2 - c.copula2.be1.C2.M2)*derp1.dereta1[VC$inde0]^2 + Cc.M2*der2p1.dereta1eta1[VC$inde0]
b1b1Y <- (c.copula2.be1.C1.M3 - c.copula2.be1.C2.M3)*derp1.dereta1[VC$inde1]^2 + Cc.M3*der2p1.dereta1eta1[VC$inde1]
d2l.be1.be1[VC$inde0] <- b1b1CY/A.M2 - C.M2^2/A.M2^2
d2l.be1.be1[VC$inde1] <- -b1b1Y/B.M3 - C.M3^2/B.M3^2
d2l.be1.be1 <- -VC$weights*d2l.be1.be1 # ok verified
b3b3Y <- der2pdf2.dereta2.M3 - ( c.copula2.be2.C1.M3*derp2.dereta2.M3^2 + c.copula.be2.C1.M3*der2p2.dereta2eta2.M3 - (c.copula2.be2.C2.M3*derp2m1.dereta2.M3^2 + c.copula.be2.C2.M3*der2p2m1.dereta2eta2.M3) )
d2l.be3.be3 <- -VC$weights[VC$inde1]*( b3b3Y/B.M3 - Ceta3^2/B.M3^2 ) # ok verified
b2b2CY <- c.copula2.be2.C1.M2*derp2.dereta2.M2^2 + c.copula.be2.C1.M2*der2p2.dereta2eta2.M2 - (c.copula2.be2.C2.M2*derp2m1.dereta2.M2^2 + c.copula.be2.C2.M2*der2p2m1.dereta2eta2.M2)
d2l.be2.be2 <- -VC$weights[VC$inde0]*( b2b2CY/A.M2 - Ceta2^2/A.M2^2 ) # ok verified
b1b3 <- -(c.copula2.be1be2.C1.M3*derp2.dereta2.M3 - c.copula2.be1be2.C2.M3*derp2m1.dereta2.M3)*derp1.dereta1[VC$inde1]
d2l.be1.be3 <- -VC$weights[VC$inde1]*(b1b3/B.M3 - -C.M3*Ceta3/B.M3^2 ) # ok verified
b1t2 <- -(c.copula2.be1th.C1.M3 - c.copula2.be1th.C2.M3)*derp1.dereta1[VC$inde1]
d2l.be1.th2 <- -VC$weights[VC$inde1]*( b1t2/B.M3 - -C.M3*-Cs.M3/B.M3^2 ) # ok verified
t1t1CY <- c.copula2.theta.C1.M2*derteta.derteta.st.M2^2 + c.copula.thet.C1.M2*der2teta.derteta.stteta.st.M2 - (c.copula2.theta.C2.M2*derteta.derteta.st.M2^2 + c.copula.thet.C2.M2*der2teta.derteta.stteta.st.M2 )
d2l.th1.th1 <- -VC$weights[VC$inde0]*( t1t1CY/A.M2 - Cs.M2^2/A.M2^2 ) # ok verified
t2t2Y <- -(c.copula2.theta.C1.M3*derteta.derteta.st.M3^2 + c.copula.thet.C1.M3*der2teta.derteta.stteta.st.M3 - (c.copula2.theta.C2.M3*derteta.derteta.st.M3^2 + c.copula.thet.C2.M3*der2teta.derteta.stteta.st.M3) )
d2l.th2.th2 <- -VC$weights[VC$inde1]*(t2t2Y/B.M3 - Cs.M3^2/B.M3^2 ) # ok but then results are not good
b1t1 <- (c.copula2.be1th.C1.M2 - c.copula2.be1th.C2.M2)*derp1.dereta1[VC$inde0]
d2l.be1.th1 <- -VC$weights[VC$inde0]*( b1t1/A.M2 - C.M2*Cs.M2/A.M2^2 ) # looks ok
b1b2 <- (c.copula2.be1be2.C1.M2*derp2.dereta2.M2 - c.copula2.be1be2.C2.M2*derp2m1.dereta2.M2)*derp1.dereta1[VC$inde0]
d2l.be1.be2 <- -VC$weights[VC$inde0]*( b1b2/A.M2 - C.M2*Ceta2/A.M2^2 ) # looks ok
b2t1 <- c.copula2.be2th.C1.M2*derp2.dereta2.M2 - c.copula2.be2th.C2.M2*derp2m1.dereta2.M2
d2l.be2.th1 <- -VC$weights[VC$inde0]*( b2t1/A.M2 - Ceta2*Cs.M2/A.M2^2 ) # looks ok
b3t2 <- -(c.copula2.be2th.C1.M3*derp2.dereta2.M3 - c.copula2.be2th.C2.M3*derp2m1.dereta2.M3)
d2l.be3.th2 <- -VC$weights[VC$inde1]*( b3t2/B.M3 - -Ceta3*Cs.M3/B.M3^2 ) # looks ok
d2l.th1.th2 <- 0
d2l.be2.be3 <- 0
d2l.be2.th2 <- 0
d2l.be3.th1 <- 0
be1.be1 <- crossprod(VC$X1*c(d2l.be1.be1),VC$X1)
be2.be2 <- crossprod(VC$X2*c(d2l.be2.be2),VC$X2)
be3.be3 <- crossprod(VC$X3*c(d2l.be3.be3),VC$X3)
th1.th1 <- crossprod(VC$X4*c(d2l.th1.th1),VC$X4)
th2.th2 <- crossprod(VC$X5*c(d2l.th2.th2),VC$X5)
be1.be2 <- crossprod(VC$X1[VC$inde0,]*c(d2l.be1.be2),VC$X2)
be1.be3 <- crossprod(VC$X1[VC$inde1,]*c(d2l.be1.be3),VC$X3)
be1.th1 <- crossprod(VC$X1[VC$inde0,]*c(d2l.be1.th1),VC$X4)
be1.th2 <- crossprod(VC$X1[VC$inde1,]*c(d2l.be1.th2),VC$X5)
be2.th1 <- crossprod(VC$X2*c(d2l.be2.th1),VC$X4)
be3.th2 <- crossprod(VC$X3*c(d2l.be3.th2),VC$X5)
th1.th2 <- matrix(0, dim(VC$X4)[2], dim(VC$X5)[2])
be2.be3 <- matrix(0, dim(VC$X2)[2], dim(VC$X3)[2])
be2.th2 <- matrix(0, dim(VC$X2)[2], dim(VC$X5)[2])
be3.th1 <- matrix(0, dim(VC$X3)[2], dim(VC$X4)[2])
H <- rbind( cbind( be1.be1 , be1.be2 , be1.be3 , be1.th1, be1.th2 ),
cbind( t(be1.be2), be2.be2 , be2.be3 , be2.th1, be2.th2 ),
cbind( t(be1.be3), t(be2.be3), be3.be3 , be3.th1, be3.th2 ),
cbind( t(be1.th1), t(be2.th1), t(be3.th1), th1.th1, th1.th2 ),
cbind( t(be1.th2), t(be2.th2), t(be3.th2), t(th1.th2), th2.th2 )
)
G <- -c( colSums( c(dl.dbe1)*VC$X1),
colSums( c(dl.dbe2)*VC$X2),
colSums( c(dl.dbe3)*VC$X3),
colSums(c(dl.dteta1.st)*VC$X4),
colSums(c(dl.dteta2.st)*VC$X5)
)
res <- -sum(l.par)
if(VC$extra.regI == "pC" && VC$hess==FALSE) H <- regH(H, type = 1)
S.h <- ps$S.h
if( length(S.h) != 1){
S.h1 <- 0.5*crossprod(params,S.h)%*%params
S.h2 <- S.h%*%params
} else S.h <- S.h1 <- S.h2 <- 0
S.res <- res
res <- S.res + S.h1
G <- G + S.h2
H <- H + S.h
if(VC$extra.regI == "sED") H <- regH(H, type = 2)
list(value = res, gradient = G, hessian = H, S.h = S.h, S.h1 = S.h1, S.h2 = S.h2, l = S.res, l.par = l.par, ps = ps, l.ln = NULL,
eta1 = eta1, eta2 = eta2, eta3 = eta3, etad1 = etad1,
etad2 = etad2, etas1 = etas1, etas2 = etas2, etan1 = etan1, etan2 = etan2,
dl.dbe1 = dl.dbe1, dl.dbe2 = dl.dbe2, dl.dbe3 = dl.dbe3, dl.dteta1.st = dl.dteta1.st, dl.dteta2.st = dl.dteta2.st,
BivD1 = VC$BivD1, BivD2 = VC$BivD2,
p1 = p1, p0 = p0, pdf1 = pdf2.M2, pdf2 = pdf2.M3, c.copula2.be1be2 = c(c.copula2.be1be2.C1.M2, c.copula2.be1be2.C2.M2, c.copula2.be1be2.C1.M3, c.copula2.be1be2.C2.M3),
teta.st1 = teta.st1, teta.st2 = teta.st2,
Cop1 = Cop1, Cop2 = Cop2, teta1 = teta1, teta2 = teta2)
}
|
/R/bprobgHsDiscr1ROY.r
|
no_license
|
cran/GJRM
|
R
| false | false | 15,859 |
r
|
bprobgHsDiscr1ROY <- function(params, respvec, VC, ps, AT = FALSE){
p1 <- p2 <- pdf1 <- pdf2 <- c.copula.be2 <- c.copula.be1 <- l.par <- dl.dbe1 <- d2l.be1.be1 <- NA
eta1 <- VC$X1%*%params[1:VC$X1.d2]
eta2 <- eta.tr( VC$X2%*%params[(VC$X1.d2+1):(VC$X1.d2+VC$X2.d2)], VC$margins[2])
eta3 <- eta.tr( VC$X3%*%params[(VC$X1.d2+VC$X2.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2)], VC$margins[3])
etad1 <- etad2 <- etas1 <- etas2 <- etan1 <- etan2 <- l.ln <- NULL
teta.st1 <- etad1 <- VC$X4%*%params[(VC$X1.d2+VC$X2.d2+VC$X3.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2)]
teta.st2 <- etad2 <- VC$X5%*%params[(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2+1):(VC$X1.d2+VC$X2.d2+VC$X3.d2+VC$X4.d2+VC$X5.d2)]
pd1 <- probm(eta1, VC$margins[1], bc = TRUE, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
p1 <- pd1$pr #
p0 <- 1 - p1 #
derp1.dereta1 <- pd1$derp1.dereta1 # recall that these derivs are wrt to 1 - p1
der2p1.dereta1eta1 <- pd1$der2p1.dereta1eta1
dHs <- distrHsDiscr(respvec$y2, eta2, 1, 1, nu = 1, nu.st = 1, margin2=VC$margins[2], naive = FALSE, y2m = VC$y2m, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
pdf2.M2 <- dHs$pdf2
p2.M2 <- dHs$p2
derpdf2.dereta2.M2 <- dHs$derpdf2.dereta2
derp2.dereta2.M2 <- dHs$derp2.dereta2
der2p2.dereta2eta2.M2 <- dHs$der2p2.dereta2eta2
der2pdf2.dereta2.M2 <- dHs$der2pdf2.dereta2
#******** SET UP y3m ************ DONE
dHs <- distrHsDiscr(respvec$y3, eta3, 1, 1, nu = 1, nu.st = 1, margin2=VC$margins[3], naive = FALSE, y2m = VC$y3m, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
pdf2.M3 <- dHs$pdf2
p2.M3 <- dHs$p2
derpdf2.dereta2.M3 <- dHs$derpdf2.dereta2
derp2.dereta2.M3 <- dHs$derp2.dereta2
der2p2.dereta2eta2.M3 <- dHs$der2p2.dereta2eta2
der2pdf2.dereta2.M3 <- dHs$der2pdf2.dereta2
########################################################################################################
VC1 <- list(BivD = VC$BivD1, BivD2 = NULL)
resT1 <- teta.tr(VC1, teta.st1)
VC2 <- list(BivD = VC$BivD2, BivD2 = NULL)
resT2 <- teta.tr(VC2, teta.st2) # ********** careful here the correct copula is selected DONE
teta.st1 <- resT1$teta.st
teta.st2 <- resT2$teta.st
teta1 <- resT1$teta
teta2 <- resT2$teta
##################
Cop1 <- VC$BivD1
Cop2 <- VC$BivD2
nC1 <- VC$nC1
nC2 <- VC$nC2
########################################################################################################
C1.M2 <- mm(BiCDF(p0[VC$inde0], p2.M2, nC1, teta1, VC$dof1), min.pr = VC$min.pr, max.pr = VC$max.pr )
C2.M2 <- mm(BiCDF(p0[VC$inde0], mm(p2.M2 - pdf2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr), nC1, teta1, VC$dof1), min.pr = VC$min.pr, max.pr = VC$max.pr )
A.M2 <- mm(C1.M2 - C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
C1.M3 <- mm(BiCDF(p0[VC$inde1], p2.M3, nC2, teta2, VC$dof2), min.pr = VC$min.pr, max.pr = VC$max.pr )
C2.M3 <- mm(BiCDF(p0[VC$inde1], mm(p2.M3 - pdf2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr), nC2, teta2, VC$dof2), min.pr = VC$min.pr, max.pr = VC$max.pr )
A.M3 <- mm(C1.M3 - C2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
B.M3 <- mm( pdf2.M3 - A.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
l.par[VC$inde0] <- log( A.M2 )
l.par[VC$inde1] <- log( B.M3 )
l.par <- VC$weights*l.par
########################################################################################################
dH1F.M2 <- copgHs(p0[VC$inde0], p2.M2, eta1=NULL, eta2=NULL, teta1, teta.st1, Cop1, VC$dof1, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH2F.M2 <- copgHs(p0[VC$inde0], mm(p2.M2 - pdf2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr), eta1=NULL, eta2=NULL, teta1, teta.st1, Cop1, VC$dof1, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
c.copula.be1.C1.M2 <- dH1F.M2$c.copula.be1
c.copula.be1.C2.M2 <- dH2F.M2$c.copula.be1
c.copula.be2.C1.M2 <- dH1F.M2$c.copula.be2
c.copula.be2.C2.M2 <- dH2F.M2$c.copula.be2
c.copula.theta.C1.M2 <- dH1F.M2$c.copula.theta
c.copula.theta.C2.M2 <- dH2F.M2$c.copula.theta
derp2m1.dereta2.M2 <- derp2.dereta2.M2 - derpdf2.dereta2.M2
Cc.M2 <- c.copula.be1.C1.M2 - c.copula.be1.C2.M2 # mm(c.copula.be1.C1.M2 - c.copula.be1.C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
C.M2 <- Cc.M2*derp1.dereta1[VC$inde0] # contains already -
Cs.M2 <- c.copula.theta.C1.M2 - c.copula.theta.C2.M2
Ceta2 <- (c.copula.be2.C1.M2 - c.copula.be2.C2.M2)*derp2.dereta2.M2 + c.copula.be2.C2.M2*derpdf2.dereta2.M2 # mm(c.copula.be2.C1.M2 - c.copula.be2.C2.M2, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH1F.M3 <- copgHs(p0[VC$inde1], p2.M3, eta1=NULL, eta2=NULL, teta2, teta.st2, Cop2, VC$dof2, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
dH2F.M3 <- copgHs(p0[VC$inde1], mm(p2.M3 - pdf2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr), eta1=NULL, eta2=NULL, teta2, teta.st2, Cop2, VC$dof2, min.dn = VC$min.dn, min.pr = VC$min.pr, max.pr = VC$max.pr)
c.copula.be1.C1.M3 <- dH1F.M3$c.copula.be1
c.copula.be1.C2.M3 <- dH2F.M3$c.copula.be1
c.copula.be2.C1.M3 <- dH1F.M3$c.copula.be2
c.copula.be2.C2.M3 <- dH2F.M3$c.copula.be2
c.copula.theta.C1.M3 <- dH1F.M3$c.copula.theta
c.copula.theta.C2.M3 <- dH2F.M3$c.copula.theta
derp2m1.dereta2.M3 <- derp2.dereta2.M3 - derpdf2.dereta2.M3
Cc.M3 <- c.copula.be1.C1.M3 - c.copula.be1.C2.M3 # mm(c.copula.be1.C1.M3 - c.copula.be1.C2.M3, min.pr = VC$min.pr, max.pr = VC$max.pr)
C.M3 <- Cc.M3*derp1.dereta1[VC$inde1] # contains already -
Cs.M3 <- c.copula.theta.C1.M3 - c.copula.theta.C2.M3
Ceta3 <- derpdf2.dereta2.M3 - ((c.copula.be2.C1.M3 - c.copula.be2.C2.M3)*derp2.dereta2.M3 + c.copula.be2.C2.M3*derpdf2.dereta2.M3)
# ************ check from above what is really needed and not
dl.dbe1[VC$inde0] <- C.M2/A.M2
dl.dbe1[VC$inde1] <- -C.M3/B.M3
dl.dbe1 <- VC$weights*dl.dbe1
dl.dbe2 <- VC$weights[VC$inde0]*( Ceta2/A.M2)
dl.dbe3 <- VC$weights[VC$inde1]*( Ceta3/B.M3)
dl.dteta1.st <- VC$weights[VC$inde0]*( Cs.M2/A.M2)
dl.dteta2.st <- VC$weights[VC$inde1]*(-Cs.M3/B.M3)
########################################################################################################
c.copula2.be1.C1.M2 <- dH1F.M2$c.copula2.be1
c.copula2.be1.C2.M2 <- dH2F.M2$c.copula2.be1
c.copula2.be2.C1.M2 <- dH1F.M2$c.copula2.be2
c.copula2.be2.C2.M2 <- dH2F.M2$c.copula2.be2
c.copula2.be1be2.C1.M2 <- dH1F.M2$c.copula2.be1be2
c.copula2.be1be2.C2.M2 <- dH2F.M2$c.copula2.be1be2
c.copula2.be2th.C1.M2 <- dH1F.M2$c.copula2.be2th # with star
c.copula2.be2th.C2.M2 <- dH2F.M2$c.copula2.be2th
c.copula2.theta.C1.M2 <- dH1F.M2$bit1.th2ATE # no start
c.copula2.theta.C2.M2 <- dH2F.M2$bit1.th2ATE
c.copula.thet.C1.M2 <- dH1F.M2$c.copula.thet # NO star
c.copula.thet.C2.M2 <- dH2F.M2$c.copula.thet
derteta.derteta.st.M2 <- dH1F.M2$derteta.derteta.st # does not matter dH1 or dH2
der2teta.derteta.stteta.st.M2 <- dH1F.M2$der2teta.derteta.stteta.st
c.copula2.be1th.C1.M2 <- dH1F.M2$c.copula2.be1th
c.copula2.be1th.C2.M2 <- dH2F.M2$c.copula2.be1th
c.copula2.be1.C1.M3 <- dH1F.M3$c.copula2.be1
c.copula2.be1.C2.M3 <- dH2F.M3$c.copula2.be1
c.copula2.be2.C1.M3 <- dH1F.M3$c.copula2.be2
c.copula2.be2.C2.M3 <- dH2F.M3$c.copula2.be2
c.copula2.be1be2.C1.M3 <- dH1F.M3$c.copula2.be1be2
c.copula2.be1be2.C2.M3 <- dH2F.M3$c.copula2.be1be2
c.copula2.be2th.C1.M3 <- dH1F.M3$c.copula2.be2th
c.copula2.be2th.C2.M3 <- dH2F.M3$c.copula2.be2th
c.copula2.theta.C1.M3 <- dH1F.M3$bit1.th2ATE
c.copula2.theta.C2.M3 <- dH2F.M3$bit1.th2ATE
c.copula.thet.C1.M3 <- dH1F.M3$c.copula.thet # NO star
c.copula.thet.C2.M3 <- dH2F.M3$c.copula.thet
derteta.derteta.st.M3 <- dH1F.M3$derteta.derteta.st # does not matter dH1 or dH2
der2teta.derteta.stteta.st.M3 <- dH1F.M3$der2teta.derteta.stteta.st
c.copula2.be1th.C1.M3 <- dH1F.M3$c.copula2.be1th
c.copula2.be1th.C2.M3 <- dH2F.M3$c.copula2.be1th
der2p2m1.dereta2eta2.M2 <- der2p2.dereta2eta2.M2 - der2pdf2.dereta2.M2
der2p2m1.dereta2eta2.M3 <- der2p2.dereta2eta2.M3 - der2pdf2.dereta2.M3
##########################
b1b1CY <- (c.copula2.be1.C1.M2 - c.copula2.be1.C2.M2)*derp1.dereta1[VC$inde0]^2 + Cc.M2*der2p1.dereta1eta1[VC$inde0]
b1b1Y <- (c.copula2.be1.C1.M3 - c.copula2.be1.C2.M3)*derp1.dereta1[VC$inde1]^2 + Cc.M3*der2p1.dereta1eta1[VC$inde1]
d2l.be1.be1[VC$inde0] <- b1b1CY/A.M2 - C.M2^2/A.M2^2
d2l.be1.be1[VC$inde1] <- -b1b1Y/B.M3 - C.M3^2/B.M3^2
d2l.be1.be1 <- -VC$weights*d2l.be1.be1 # ok verified
b3b3Y <- der2pdf2.dereta2.M3 - ( c.copula2.be2.C1.M3*derp2.dereta2.M3^2 + c.copula.be2.C1.M3*der2p2.dereta2eta2.M3 - (c.copula2.be2.C2.M3*derp2m1.dereta2.M3^2 + c.copula.be2.C2.M3*der2p2m1.dereta2eta2.M3) )
d2l.be3.be3 <- -VC$weights[VC$inde1]*( b3b3Y/B.M3 - Ceta3^2/B.M3^2 ) # ok verified
b2b2CY <- c.copula2.be2.C1.M2*derp2.dereta2.M2^2 + c.copula.be2.C1.M2*der2p2.dereta2eta2.M2 - (c.copula2.be2.C2.M2*derp2m1.dereta2.M2^2 + c.copula.be2.C2.M2*der2p2m1.dereta2eta2.M2)
d2l.be2.be2 <- -VC$weights[VC$inde0]*( b2b2CY/A.M2 - Ceta2^2/A.M2^2 ) # ok verified
b1b3 <- -(c.copula2.be1be2.C1.M3*derp2.dereta2.M3 - c.copula2.be1be2.C2.M3*derp2m1.dereta2.M3)*derp1.dereta1[VC$inde1]
d2l.be1.be3 <- -VC$weights[VC$inde1]*(b1b3/B.M3 - -C.M3*Ceta3/B.M3^2 ) # ok verified
b1t2 <- -(c.copula2.be1th.C1.M3 - c.copula2.be1th.C2.M3)*derp1.dereta1[VC$inde1]
d2l.be1.th2 <- -VC$weights[VC$inde1]*( b1t2/B.M3 - -C.M3*-Cs.M3/B.M3^2 ) # ok verified
t1t1CY <- c.copula2.theta.C1.M2*derteta.derteta.st.M2^2 + c.copula.thet.C1.M2*der2teta.derteta.stteta.st.M2 - (c.copula2.theta.C2.M2*derteta.derteta.st.M2^2 + c.copula.thet.C2.M2*der2teta.derteta.stteta.st.M2 )
d2l.th1.th1 <- -VC$weights[VC$inde0]*( t1t1CY/A.M2 - Cs.M2^2/A.M2^2 ) # ok verified
t2t2Y <- -(c.copula2.theta.C1.M3*derteta.derteta.st.M3^2 + c.copula.thet.C1.M3*der2teta.derteta.stteta.st.M3 - (c.copula2.theta.C2.M3*derteta.derteta.st.M3^2 + c.copula.thet.C2.M3*der2teta.derteta.stteta.st.M3) )
d2l.th2.th2 <- -VC$weights[VC$inde1]*(t2t2Y/B.M3 - Cs.M3^2/B.M3^2 ) # ok but then results are not good
b1t1 <- (c.copula2.be1th.C1.M2 - c.copula2.be1th.C2.M2)*derp1.dereta1[VC$inde0]
d2l.be1.th1 <- -VC$weights[VC$inde0]*( b1t1/A.M2 - C.M2*Cs.M2/A.M2^2 ) # looks ok
b1b2 <- (c.copula2.be1be2.C1.M2*derp2.dereta2.M2 - c.copula2.be1be2.C2.M2*derp2m1.dereta2.M2)*derp1.dereta1[VC$inde0]
d2l.be1.be2 <- -VC$weights[VC$inde0]*( b1b2/A.M2 - C.M2*Ceta2/A.M2^2 ) # looks ok
b2t1 <- c.copula2.be2th.C1.M2*derp2.dereta2.M2 - c.copula2.be2th.C2.M2*derp2m1.dereta2.M2
d2l.be2.th1 <- -VC$weights[VC$inde0]*( b2t1/A.M2 - Ceta2*Cs.M2/A.M2^2 ) # looks ok
b3t2 <- -(c.copula2.be2th.C1.M3*derp2.dereta2.M3 - c.copula2.be2th.C2.M3*derp2m1.dereta2.M3)
d2l.be3.th2 <- -VC$weights[VC$inde1]*( b3t2/B.M3 - -Ceta3*Cs.M3/B.M3^2 ) # looks ok
d2l.th1.th2 <- 0
d2l.be2.be3 <- 0
d2l.be2.th2 <- 0
d2l.be3.th1 <- 0
be1.be1 <- crossprod(VC$X1*c(d2l.be1.be1),VC$X1)
be2.be2 <- crossprod(VC$X2*c(d2l.be2.be2),VC$X2)
be3.be3 <- crossprod(VC$X3*c(d2l.be3.be3),VC$X3)
th1.th1 <- crossprod(VC$X4*c(d2l.th1.th1),VC$X4)
th2.th2 <- crossprod(VC$X5*c(d2l.th2.th2),VC$X5)
be1.be2 <- crossprod(VC$X1[VC$inde0,]*c(d2l.be1.be2),VC$X2)
be1.be3 <- crossprod(VC$X1[VC$inde1,]*c(d2l.be1.be3),VC$X3)
be1.th1 <- crossprod(VC$X1[VC$inde0,]*c(d2l.be1.th1),VC$X4)
be1.th2 <- crossprod(VC$X1[VC$inde1,]*c(d2l.be1.th2),VC$X5)
be2.th1 <- crossprod(VC$X2*c(d2l.be2.th1),VC$X4)
be3.th2 <- crossprod(VC$X3*c(d2l.be3.th2),VC$X5)
th1.th2 <- matrix(0, dim(VC$X4)[2], dim(VC$X5)[2])
be2.be3 <- matrix(0, dim(VC$X2)[2], dim(VC$X3)[2])
be2.th2 <- matrix(0, dim(VC$X2)[2], dim(VC$X5)[2])
be3.th1 <- matrix(0, dim(VC$X3)[2], dim(VC$X4)[2])
H <- rbind( cbind( be1.be1 , be1.be2 , be1.be3 , be1.th1, be1.th2 ),
cbind( t(be1.be2), be2.be2 , be2.be3 , be2.th1, be2.th2 ),
cbind( t(be1.be3), t(be2.be3), be3.be3 , be3.th1, be3.th2 ),
cbind( t(be1.th1), t(be2.th1), t(be3.th1), th1.th1, th1.th2 ),
cbind( t(be1.th2), t(be2.th2), t(be3.th2), t(th1.th2), th2.th2 )
)
G <- -c( colSums( c(dl.dbe1)*VC$X1),
colSums( c(dl.dbe2)*VC$X2),
colSums( c(dl.dbe3)*VC$X3),
colSums(c(dl.dteta1.st)*VC$X4),
colSums(c(dl.dteta2.st)*VC$X5)
)
res <- -sum(l.par)
if(VC$extra.regI == "pC" && VC$hess==FALSE) H <- regH(H, type = 1)
S.h <- ps$S.h
if( length(S.h) != 1){
S.h1 <- 0.5*crossprod(params,S.h)%*%params
S.h2 <- S.h%*%params
} else S.h <- S.h1 <- S.h2 <- 0
S.res <- res
res <- S.res + S.h1
G <- G + S.h2
H <- H + S.h
if(VC$extra.regI == "sED") H <- regH(H, type = 2)
list(value = res, gradient = G, hessian = H, S.h = S.h, S.h1 = S.h1, S.h2 = S.h2, l = S.res, l.par = l.par, ps = ps, l.ln = NULL,
eta1 = eta1, eta2 = eta2, eta3 = eta3, etad1 = etad1,
etad2 = etad2, etas1 = etas1, etas2 = etas2, etan1 = etan1, etan2 = etan2,
dl.dbe1 = dl.dbe1, dl.dbe2 = dl.dbe2, dl.dbe3 = dl.dbe3, dl.dteta1.st = dl.dteta1.st, dl.dteta2.st = dl.dteta2.st,
BivD1 = VC$BivD1, BivD2 = VC$BivD2,
p1 = p1, p0 = p0, pdf1 = pdf2.M2, pdf2 = pdf2.M3, c.copula2.be1be2 = c(c.copula2.be1be2.C1.M2, c.copula2.be1be2.C2.M2, c.copula2.be1be2.C1.M3, c.copula2.be1be2.C2.M3),
teta.st1 = teta.st1, teta.st2 = teta.st2,
Cop1 = Cop1, Cop2 = Cop2, teta1 = teta1, teta2 = teta2)
}
|
# PROBLEM SET 1 -----
# Use "which" and "letters" to find the position in the alphabet of the vowels
which(letters %in% c("a", "e", "i", "o", "u", "y"))
# Generate the sequence 1^2, 2^2, ... 20^2
(1:20)^2
# Write code that does the same thing as the "which" function for:
x <- c(2,4,3,1,0,3)
which(x==3)
seq_along(x)[x==3]
# How many english colornames does R recognize that are longer than 8 characters? (use "nchar")
sum(nchar(colors()) > 8)
# Thanksgiving is the 4th Thursday of November. Professor Rossi's birthay is Nov 25.
# Find all years between (and including) 1950-2050 in which his birthday is on Thanksgiving.
# (use "seq" and "as.Date")
# get all november 25ths
twofives <- seq(from=as.Date("1950-11-25"), to=as.Date("2050-11-25"), by="year")
bdays <- which(weekdays(twofives) == "Thursday")
length(bdays)
twofives[bdays]
# PROBLEM SET 2 -----
# Write your own code to extract the diagonal of an nxn matrix
n <- 8
mat <- matrix(1:(n^2), nrow=n, ncol=n); mat
mat[cbind(1:n, 1:n)]
# Write code to extract the first lower off-diagonal band of a matrix
# i.e., for the matrix below, extract values (2, 8, 14, 20)
n <- 5
mat <- matrix(1:(n^2), nrow=n, ncol=n); mat
mat[cbind(2:n, 1:(n-1))]
# Write code to generate a general matrix (i.e., of any dimension n × n) that follows this pattern
[,1] [,2] [,3] [,4] [,5] [,6]
[1,] 2 3 4 5 6 7
[2,] 3 4 5 6 7 8
[3,] 4 5 6 7 8 9
[4,] 5 6 7 8 9 10
[5,] 6 7 8 9 10 11
[6,] 7 8 9 10 11 12
n <- 6
matrix(1:n, nrow=n, ncol=n) +
matrix(1:n, nrow=n, ncol=n, byrow=T)
# use the "rnorm()" function to take 10,000 draws from a normal distribution with mean 3, and sdev 5.
# store these 10,000 draws in a 100x100 matrix and calculate the mean and stdev of each row;
# then take the average of those 100 means and the average of those 100 sd's
draws <- rnorm(10000, mean=3, sd=5)
mat <- matrix(draws, nrow=100, ncol=100)
means <- apply(mat, 1, mean)
sds <- apply(mat, 1, sd)
mean(means)
mean(sds)
## PROBLEM SET 3
# Consider two families: the SSS family and the YYY family. Here's a list that has two
# elements (SSS and YYY), each is a (sub)list. These sublists each have two vectors.
# Write code to "extract" the 'Alison' element (bonus points if you do this multiple ways).
fams <- list(
SSS = list(parents = c("Ron", "Su"), kids = c("Alison", "Elisabeth")),
YYY = list(parents = c("Lorie", "Bill"), children = c("Dan", "Nick", "Matt"))
)
fams$SSS$kids[1]
fams[["SSS"]][["kids"]][1]
fams[[1]][[2]][1]
# I use split to make a list below.
# Use an `apply` function to find the mean of each list element of xx
xx <- split(matrix(1:100, 10, 10), 1:10)
lapply(xx, mean)
sapply(xx, mean)
# PROBLEM SET 4
# 1. load the "mtcars" dataset like this:
data(mtcars)
# 1a) Are any variables factors?
str(mtcars) #no
# 1b) calculate the average miles per gallon of cars in the mtcars dataset
mean(mtcars$mpg)
# 1c) how many cars (ie, observations or rows) have each number of cylinders? (use "table")
table(mtcars$cyl)
# 1d) make a new variable called "carname". Populate that column with the rownames of the mtcars dataframe
mtcars$carname <- rownames(mtcars)
# 1e) create a dummy (0/1) variable to indicate which cars are Mercedes:
# hint: use grepl("Merc", ...) where you fill in the ...
# aside: the name grep comes from a command line tool
# g/re/p (globally search for a regular expression and print matching lines)
# the "l" in grepl is because this R function returns logical values
mtcars$is_merc_log <- grepl("Merc", mtcars$carname) # for a col of TRUE/FALSE
mtcars$is_merc_int <- as.integer(grepl("Merc", mtcars$carname)) # for a col of 0/1
# 1f) calculate the correlation between the miles per gallon and the weight of the Mercedes cars
# base R approach
cor(mtcars[mtcars$is_merc_log, c("mpg", "wt")])
# another base R approach
cor(mtcars[is_merc_log == T, "mpg"], mtcars[is_merc_log == T, "wt"])
# using "with"
with(mtcars[mtcars$is_merc_log==T,], cor(mpg, wt))
# using "data table"
library(data.table)
setDT(mtcars)
mtcars[is_merc_log==T, cor(mpg, wt)]
# using "tidyverse"
library(tidyverse)
mtcars %>% summarize(cor(mpg, wt))
# 2. Load the "iris" dataset
data(iris)
# 2a) use "aggregate" find the maximum of each of the numeric values by "species",
# then store this aggregated dataset as a dataframe named "iris2"
iris2 <- aggregate(. ~ Species, data=iris, FUN="max")
# let's change the names of iris2 to indicate they are maximums
names(iris2)[-1] <- paste0(names(iris)[-1], ".species.max")
# 2b) merge iris2 onto iris by species
merge(iris, iris2, by="Species")
# 2c) take the original iris dataset and reshape it to be long; the resulting dataset
# should have 3 columns: species, measurement_name, and measurement_value
tidyr::pivot_longer(data=iris, !Species, names_to="m_name", values_to="m_value")
## PROBLEM SET 5
# This function should replace NAs with zeros. Finish it by replacing the ...'s
na_to_zero <- function(vec_with_nas) {
stopifnot(is.vector(...))
fixed_vec <- ifelse(is.na(...), ..., ...)
return(fixed_vec)
}
na_to_zero <- function(vec_with_nas) {
stopifnot(is.vector(vec_with_nas))
fixed_vec <- ifelse(is.na(vec_with_nas), 0, vec_with_nas)
return(fixed_vec)
}
na_to_zero(c(1, 2, NA, NA, 5))
# Write a function that takes in a dataframe and calculates the correlation
# between the first column and every other column using a loop.
set.seed(234)
example_df <- mapply(function (x) sample(1:100, 10, T), 1:5)
example_df <- as.data.frame(example_df)
corr_with_col1 <- function(df) {
N <- ncol(df)
result <- vector(length=N)
for(i in 1:N) {
result[i] <- cor(df[,1], df[,i])
}
return(result)
}
corr_with_col1(example_df)
cor(example_df)
# Use an apply function and an anonymous function to calculate sum(x^2 - 10)
# for each vector in the following list
testlist <- list(
x1 = 1:10,
x2 = 11:20,
x3 = 21:30
)
sapply(testlist, function(x) sum(x^2, - 10))
## PROBLEM SET 6
# Use the 'diamonds' dataset in the 'ggplot2' package to make the following plot:
# x is 'log(carat)'
# y is 'log(price)'
# color the points with a different color for each level of 'clarity'
# (you may find that transparency is nice addition here)
# use pch to change the point character to a filled-in circle
# use cex to make the points small
# be sure to label the x-axis and y-axis, and add a title
data(diamonds, package="ggplot2")
colvec <- RColorBrewer::brewer.pal(n=8, name="Dark2")
plot(x=log(diamonds$carat),
y=log(diamonds$price),
col=colvec[diamonds$clarity],
pch=19,
cex=0.5,
xlab="Log Carat",
ylab="Log Price",
main="My Plot")
# Run a regression of log(price) on log(carat) using the diamonds dataset and plot
# the fitted regression line on the plot. Use lwd to make the line thicker and more noticeable.
reg <- lm(log(price) ~ log(carat), data=diamonds)
abline(reg, col="black", lwd=3)
## PROBLEM SET 7
# Use the 'diamonds' dataset in the 'ggplot2' to make the following plot:
# x is 'log(carat)'
# y is 'log(price)'
# color the points with a different color for each level of 'clarity'
# add a title
ggplot(diamonds) +
geom_point(aes(x=log(carat), y=log(price), color=clarity)) +
ggtitle("Look at my fancy plot!")
## PROBLEM SET 8
# use the flights data to answer these questions
install.packages("nycflights13")
data(flights, package="nycflights13")
# Convert the flights data.frame to a tibble.
library(tidyverse)
flights <- as_tibble(flights)
# What is the average airtime per destination?
flights %>% group_by(dest) %>% summarize(mean(air_time, na.rm=T))
# Calculate the monthly average departure delay and arrival delay by NY airport ("origin")
flights %>%
group_by(year, month, origin) %>%
summarize(avg_dd = mean(dep_delay, na.rm=T),
avg_ar = mean(arr_delay, na.rm=T))
# Add the variable "tdf" to the data.frame, where "tdf" is the total number of daily flights.
# That is, "tdf" is a count of the number of flights that departed each day
# As a check, Jan 1, 2013 had 842 flights that day (you should get the same number for Jan 1)
flights <- flights %>%
group_by(year, month, day) %>%
mutate(tdf = n())
# Delete the variable time_hour from the data.table
flights <- flights %>% select(-time_hour)
## PROBLEM SET 9
# use the flights data to answer these questions with data.table syntax
install.packages("nycflights13")
data(flights, package="nycflights13")
# Convert the flights data.frame to a data.table.
library(data.table)
setDT(flights)
# What is the average airtime per destination?
flights[ , .(avg_airtime = mean(air_time, na.rm=T)), by=.(dest)]
# Calculate the monthly average departure delay and arrival delay by NY airport ("origin")
# Do this *without* .SDcols
flights[ , .(avg_ad = mean(arr_delay, na.rm=T),
avg_dd = mean(dep_delay, na.rm=T)), by=.(month, year, origin)]
# Do this *with* .SDcols
cols <- grep("delay", names(flights), value=T)
flights[ , lapply(.SD, mean, na.rm=T), by=.(month, year, origin), .SDcols=cols]
# Add the variable "tdf" to the data.table, where "tdf" is the total number of daily flights.
# That is, "tdf" is a count of the number of flights that departed each day
# As a check, Jan 1, 2013 had 842 flights that day (you should get the same number for Jan 1)
flights[ , tdf := .N, by=.(year, month, day)]
# Delete the variable time_hour from the data.table
flights[ , time_hour := NULL]
|
/Code/pset_solutions.R
|
no_license
|
309406882/MFE-R-Workshop-2021
|
R
| false | false | 11,683 |
r
|
# PROBLEM SET 1 -----
# Use "which" and "letters" to find the position in the alphabet of the vowels
which(letters %in% c("a", "e", "i", "o", "u", "y"))
# Generate the sequence 1^2, 2^2, ... 20^2
(1:20)^2
# Write code that does the same thing as the "which" function for:
x <- c(2,4,3,1,0,3)
which(x==3)
seq_along(x)[x==3]
# How many english colornames does R recognize that are longer than 8 characters? (use "nchar")
sum(nchar(colors()) > 8)
# Thanksgiving is the 4th Thursday of November. Professor Rossi's birthay is Nov 25.
# Find all years between (and including) 1950-2050 in which his birthday is on Thanksgiving.
# (use "seq" and "as.Date")
# get all november 25ths
twofives <- seq(from=as.Date("1950-11-25"), to=as.Date("2050-11-25"), by="year")
bdays <- which(weekdays(twofives) == "Thursday")
length(bdays)
twofives[bdays]
# PROBLEM SET 2 -----
# Write your own code to extract the diagonal of an nxn matrix
n <- 8
mat <- matrix(1:(n^2), nrow=n, ncol=n); mat
mat[cbind(1:n, 1:n)]
# Write code to extract the first lower off-diagonal band of a matrix
# i.e., for the matrix below, extract values (2, 8, 14, 20)
n <- 5
mat <- matrix(1:(n^2), nrow=n, ncol=n); mat
mat[cbind(2:n, 1:(n-1))]
# Write code to generate a general matrix (i.e., of any dimension n × n) that follows this pattern
[,1] [,2] [,3] [,4] [,5] [,6]
[1,] 2 3 4 5 6 7
[2,] 3 4 5 6 7 8
[3,] 4 5 6 7 8 9
[4,] 5 6 7 8 9 10
[5,] 6 7 8 9 10 11
[6,] 7 8 9 10 11 12
n <- 6
matrix(1:n, nrow=n, ncol=n) +
matrix(1:n, nrow=n, ncol=n, byrow=T)
# use the "rnorm()" function to take 10,000 draws from a normal distribution with mean 3, and sdev 5.
# store these 10,000 draws in a 100x100 matrix and calculate the mean and stdev of each row;
# then take the average of those 100 means and the average of those 100 sd's
draws <- rnorm(10000, mean=3, sd=5)
mat <- matrix(draws, nrow=100, ncol=100)
means <- apply(mat, 1, mean)
sds <- apply(mat, 1, sd)
mean(means)
mean(sds)
## PROBLEM SET 3
# Consider two families: the SSS family and the YYY family. Here's a list that has two
# elements (SSS and YYY), each is a (sub)list. These sublists each have two vectors.
# Write code to "extract" the 'Alison' element (bonus points if you do this multiple ways).
fams <- list(
SSS = list(parents = c("Ron", "Su"), kids = c("Alison", "Elisabeth")),
YYY = list(parents = c("Lorie", "Bill"), children = c("Dan", "Nick", "Matt"))
)
fams$SSS$kids[1]
fams[["SSS"]][["kids"]][1]
fams[[1]][[2]][1]
# I use split to make a list below.
# Use an `apply` function to find the mean of each list element of xx
xx <- split(matrix(1:100, 10, 10), 1:10)
lapply(xx, mean)
sapply(xx, mean)
# PROBLEM SET 4
# 1. load the "mtcars" dataset like this:
data(mtcars)
# 1a) Are any variables factors?
str(mtcars) #no
# 1b) calculate the average miles per gallon of cars in the mtcars dataset
mean(mtcars$mpg)
# 1c) how many cars (ie, observations or rows) have each number of cylinders? (use "table")
table(mtcars$cyl)
# 1d) make a new variable called "carname". Populate that column with the rownames of the mtcars dataframe
mtcars$carname <- rownames(mtcars)
# 1e) create a dummy (0/1) variable to indicate which cars are Mercedes:
# hint: use grepl("Merc", ...) where you fill in the ...
# aside: the name grep comes from a command line tool
# g/re/p (globally search for a regular expression and print matching lines)
# the "l" in grepl is because this R function returns logical values
mtcars$is_merc_log <- grepl("Merc", mtcars$carname) # for a col of TRUE/FALSE
mtcars$is_merc_int <- as.integer(grepl("Merc", mtcars$carname)) # for a col of 0/1
# 1f) calculate the correlation between the miles per gallon and the weight of the Mercedes cars
# base R approach
cor(mtcars[mtcars$is_merc_log, c("mpg", "wt")])
# another base R approach
cor(mtcars[is_merc_log == T, "mpg"], mtcars[is_merc_log == T, "wt"])
# using "with"
with(mtcars[mtcars$is_merc_log==T,], cor(mpg, wt))
# using "data table"
library(data.table)
setDT(mtcars)
mtcars[is_merc_log==T, cor(mpg, wt)]
# using "tidyverse"
library(tidyverse)
mtcars %>% summarize(cor(mpg, wt))
# 2. Load the "iris" dataset
data(iris)
# 2a) use "aggregate" find the maximum of each of the numeric values by "species",
# then store this aggregated dataset as a dataframe named "iris2"
iris2 <- aggregate(. ~ Species, data=iris, FUN="max")
# let's change the names of iris2 to indicate they are maximums
names(iris2)[-1] <- paste0(names(iris)[-1], ".species.max")
# 2b) merge iris2 onto iris by species
merge(iris, iris2, by="Species")
# 2c) take the original iris dataset and reshape it to be long; the resulting dataset
# should have 3 columns: species, measurement_name, and measurement_value
tidyr::pivot_longer(data=iris, !Species, names_to="m_name", values_to="m_value")
## PROBLEM SET 5
# This function should replace NAs with zeros. Finish it by replacing the ...'s
na_to_zero <- function(vec_with_nas) {
stopifnot(is.vector(...))
fixed_vec <- ifelse(is.na(...), ..., ...)
return(fixed_vec)
}
na_to_zero <- function(vec_with_nas) {
stopifnot(is.vector(vec_with_nas))
fixed_vec <- ifelse(is.na(vec_with_nas), 0, vec_with_nas)
return(fixed_vec)
}
na_to_zero(c(1, 2, NA, NA, 5))
# Write a function that takes in a dataframe and calculates the correlation
# between the first column and every other column using a loop.
set.seed(234)
example_df <- mapply(function (x) sample(1:100, 10, T), 1:5)
example_df <- as.data.frame(example_df)
corr_with_col1 <- function(df) {
N <- ncol(df)
result <- vector(length=N)
for(i in 1:N) {
result[i] <- cor(df[,1], df[,i])
}
return(result)
}
corr_with_col1(example_df)
cor(example_df)
# Use an apply function and an anonymous function to calculate sum(x^2 - 10)
# for each vector in the following list
testlist <- list(
x1 = 1:10,
x2 = 11:20,
x3 = 21:30
)
sapply(testlist, function(x) sum(x^2, - 10))
## PROBLEM SET 6
# Use the 'diamonds' dataset in the 'ggplot2' package to make the following plot:
# x is 'log(carat)'
# y is 'log(price)'
# color the points with a different color for each level of 'clarity'
# (you may find that transparency is nice addition here)
# use pch to change the point character to a filled-in circle
# use cex to make the points small
# be sure to label the x-axis and y-axis, and add a title
data(diamonds, package="ggplot2")
colvec <- RColorBrewer::brewer.pal(n=8, name="Dark2")
plot(x=log(diamonds$carat),
y=log(diamonds$price),
col=colvec[diamonds$clarity],
pch=19,
cex=0.5,
xlab="Log Carat",
ylab="Log Price",
main="My Plot")
# Run a regression of log(price) on log(carat) using the diamonds dataset and plot
# the fitted regression line on the plot. Use lwd to make the line thicker and more noticeable.
reg <- lm(log(price) ~ log(carat), data=diamonds)
abline(reg, col="black", lwd=3)
## PROBLEM SET 7
# Use the 'diamonds' dataset in the 'ggplot2' to make the following plot:
# x is 'log(carat)'
# y is 'log(price)'
# color the points with a different color for each level of 'clarity'
# add a title
ggplot(diamonds) +
geom_point(aes(x=log(carat), y=log(price), color=clarity)) +
ggtitle("Look at my fancy plot!")
## PROBLEM SET 8
# use the flights data to answer these questions
install.packages("nycflights13")
data(flights, package="nycflights13")
# Convert the flights data.frame to a tibble.
library(tidyverse)
flights <- as_tibble(flights)
# What is the average airtime per destination?
flights %>% group_by(dest) %>% summarize(mean(air_time, na.rm=T))
# Calculate the monthly average departure delay and arrival delay by NY airport ("origin")
flights %>%
group_by(year, month, origin) %>%
summarize(avg_dd = mean(dep_delay, na.rm=T),
avg_ar = mean(arr_delay, na.rm=T))
# Add the variable "tdf" to the data.frame, where "tdf" is the total number of daily flights.
# That is, "tdf" is a count of the number of flights that departed each day
# As a check, Jan 1, 2013 had 842 flights that day (you should get the same number for Jan 1)
flights <- flights %>%
group_by(year, month, day) %>%
mutate(tdf = n())
# Delete the variable time_hour from the data.table
flights <- flights %>% select(-time_hour)
## PROBLEM SET 9
# use the flights data to answer these questions with data.table syntax
install.packages("nycflights13")
data(flights, package="nycflights13")
# Convert the flights data.frame to a data.table.
library(data.table)
setDT(flights)
# What is the average airtime per destination?
flights[ , .(avg_airtime = mean(air_time, na.rm=T)), by=.(dest)]
# Calculate the monthly average departure delay and arrival delay by NY airport ("origin")
# Do this *without* .SDcols
flights[ , .(avg_ad = mean(arr_delay, na.rm=T),
avg_dd = mean(dep_delay, na.rm=T)), by=.(month, year, origin)]
# Do this *with* .SDcols
cols <- grep("delay", names(flights), value=T)
flights[ , lapply(.SD, mean, na.rm=T), by=.(month, year, origin), .SDcols=cols]
# Add the variable "tdf" to the data.table, where "tdf" is the total number of daily flights.
# That is, "tdf" is a count of the number of flights that departed each day
# As a check, Jan 1, 2013 had 842 flights that day (you should get the same number for Jan 1)
flights[ , tdf := .N, by=.(year, month, day)]
# Delete the variable time_hour from the data.table
flights[ , time_hour := NULL]
|
# Species Richness Analysis
# Testing composition of sensitive species among zones and habitats
# Julie Pristed
# April 2021
rm(list=ls()) #clear environment
#install.packages("DescTools")
# Working Directory
# setwd("/Users/JuliePristed/Documents/Dokumenter/AU/A. Kandidat/A. Speciale/Data")
# Softcoding working directory
Dir.Base <- getwd() # to find the project folder
Dir.Data <- file.path(Dir.Base, "UnikkeArter.csv") # index the data folder
Dir.Data <- file.path(Dir.Base, "ArtsrigdomR.csv") # index the data folder
# Loading datasets
Arter <- read.csv("UnikkeArter.csv", header = TRUE, sep = ",", stringsAsFactors = TRUE)
str(Arter)
levels(Arter$Zone)
ArterStat <- read.csv("ArtsrigdomR.csv", header = TRUE, sep = ",")
ArterStat$Zone_orto <- as.factor(ArterStat$Zone_orto)
str(ArterStat)
levels(ArterStat$Zone_orto)
# Loading packages
library(dplyr) # Data manipulation
library(DescTools) # Computing Dunn's post hoc test
# Zone subsets
ArterNaturlig <- filter(Arter, Zone == "Naturlig Vegetation")
Arter2014 <- filter(Arter, Zone == "2014")
Arter2017 <- filter(Arter, Zone == "2017")
Arter2018 <- filter(Arter, Zone == "2018")
Arter2019 <- filter(Arter, Zone == "2019")
ArterPlantage <- filter(Arter, Zone == "Plantage")
ArterRydning <- filter(Arter, Zone != "Naturlig Vegetation")
ArterRydning <- filter(ArterRydning, Zone != "Plantage")
# Habitat subsets
Arter2100 <- filter(Arter, Naturtype == "2100")
Arter2120 <- filter(Arter, Naturtype == "2120")
Arter2130 <- filter(Arter, Naturtype == "2130")
Arter2140 <- filter(Arter, Naturtype == "2140")
Arter2170 <- filter(Arter, Naturtype == "2170")
Arter2190 <- filter(Arter, Naturtype == "2190")
#----------------------------------------------------------------------------------------
# Counting unique species
#----------------------------------------------------------------------------------------
# Total richness
length(unique(Arter$ArtLatin))
Bidragsart <-filter(Arter, Bidragsart == 1)
length(unique(Bidragsart$ArtLatin))
Stjernearter <- filter(Arter, Artsscore > 3)
length(unique(Stjernearter$ArtLatin))
Tostjernearter <-filter(Arter, Artsscore >5)
length(unique(Tostjernearter$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique species in zones
#----------------------------------------------------------------------------------------
# Natural Vegetation
length(unique(ArterNaturlig$ArtLatin))
BidragsartNaturlig <-filter(ArterNaturlig, Bidragsart == 1)
length(unique(BidragsartNaturlig$ArtLatin))
# 2014
length(unique(Arter2014$ArtLatin))
Bidragsart2014 <-filter(Arter2014, Bidragsart == 1)
length(unique(Bidragsart2014$ArtLatin))
# 2017
length(unique(Arter2017$ArtLatin))
Bidragsart2017 <-filter(Arter2017, Bidragsart == 1)
length(unique(Bidragsart2017$ArtLatin))
# 2018
length(unique(Arter2018$ArtLatin))
Bidragsart2018 <-filter(Arter2018, Bidragsart == 1)
length(unique(Bidragsart2018$ArtLatin))
# 2019
length(unique(Arter2019$ArtLatin))
Bidragsart2019 <-filter(Arter2019, Bidragsart == 1)
length(unique(Bidragsart2019$ArtLatin))
# Plantation Vegetation
length(unique(ArterPlantage$ArtLatin))
BidragsartPlantage <-filter(ArterPlantage, Bidragsart == 1)
length(unique(BidragsartPlantage$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique STAR species in zones
#----------------------------------------------------------------------------------------
# Natural Vegetation
StjernearterNaturlig <- filter(ArterNaturlig, Artsscore > 3)
length(unique(StjernearterNaturlig$ArtLatin))
TostjernearterNaturlig <-filter(ArterNaturlig, Artsscore >5)
length(unique(TostjernearterNaturlig$ArtLatin))
# 2014
Stjernearter2014 <- filter(Arter2014, Artsscore > 3)
length(unique(Stjernearter2014$ArtLatin))
Tostjernearter2014 <-filter(Arter2014, Artsscore >5)
length(unique(Tostjernearter2014$ArtLatin))
# 2017
Stjernearter2017 <- filter(Arter2017, Artsscore > 3)
length(unique(Stjernearter2017$ArtLatin))
Tostjernearter2017 <-filter(Arter2017, Artsscore >5)
length(unique(Tostjernearter2017$ArtLatin))
# 2018
Stjernearter2018 <- filter(Arter2018, Artsscore > 3)
length(unique(Stjernearter2018$ArtLatin))
Tostjernearter2018 <-filter(Arter2018, Artsscore >5)
length(unique(Tostjernearter2018$ArtLatin))
# 2019
Stjernearter2019 <- filter(Arter2019, Artsscore > 3)
length(unique(Stjernearter2019$ArtLatin))
Tostjernearter2019 <-filter(Arter2019, Artsscore >5)
length(unique(Tostjernearter2019$ArtLatin))
# Plantation Vegetation
StjernearterPlantage <- filter(ArterPlantage, Artsscore > 3)
length(unique(StjernearterPlantage$ArtLatin))
TostjernearterPlantage <-filter(ArterPlantage, Artsscore >5)
length(unique(TostjernearterPlantage$ArtLatin))
# Cleared zones
StjernearterRydning <- filter(ArterRydning, Artsscore > 3)
length(unique(StjernearterRydning$ArtLatin))
TostjernearterRydning <-filter(ArterRydning, Artsscore >5)
length(unique(TostjernearterRydning$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique species in habitats
#----------------------------------------------------------------------------------------
# 2100
length(unique(Arter2100$ArtLatin))
Bidragsart2100 <-filter(Arter2100, Bidragsart == 1)
length(unique(Bidragsart2100$ArtLatin))
# 2120
length(unique(Arter2120$ArtLatin))
Bidragsart2120 <-filter(Arter2120, Bidragsart == 1)
length(unique(Bidragsart2120$ArtLatin))
# 2130
length(unique(Arter2130$ArtLatin))
Bidragsart2130 <-filter(Arter2130, Bidragsart == 1)
length(unique(Bidragsart2130$ArtLatin))
# 2140
length(unique(Arter2140$ArtLatin))
Bidragsart2140 <-filter(Arter2140, Bidragsart == 1)
length(unique(Bidragsart2140$ArtLatin))
# 2170
length(unique(Arter2170$ArtLatin))
Bidragsart2170 <-filter(Arter2170, Bidragsart == 1)
length(unique(Bidragsart2170$ArtLatin))
# 2190
length(unique(Arter2190$ArtLatin))
Bidragsart2190 <-filter(Arter2190, Bidragsart == 1)
length(unique(Bidragsart2190$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique STAR species in habitats
#----------------------------------------------------------------------------------------
# 2100
Stjernearter2100 <- filter(Arter2100, Artsscore > 3)
length(unique(Stjernearter2100$ArtLatin))
Tostjernearter2100 <-filter(Arter2100, Artsscore >5)
length(unique(Tostjernearter2100$ArtLatin))
# 2120
Stjernearter2120 <- filter(Arter2120, Artsscore > 3)
length(unique(Stjernearter2120$ArtLatin))
Tostjernearter2120 <-filter(Arter2120, Artsscore >5)
length(unique(Tostjernearter2120$ArtLatin))
# 2130
Stjernearter2130 <- filter(Arter2130, Artsscore > 3)
length(unique(Stjernearter2130$ArtLatin))
Tostjernearter2130 <-filter(Arter2130, Artsscore >5)
length(unique(Tostjernearter2130$ArtLatin))
# 2140
Stjernearter2140 <- filter(Arter2140, Artsscore > 3)
length(unique(Stjernearter2140$ArtLatin))
Tostjernearter2140 <-filter(Arter2140, Artsscore >5)
length(unique(Tostjernearter2140$ArtLatin))
# 2170
Stjernearter2170 <- filter(Arter2170, Artsscore > 3)
length(unique(Stjernearter2170$ArtLatin))
Tostjernearter2170 <-filter(Arter2170, Artsscore >5)
length(unique(Tostjernearter2170$ArtLatin))
# 2190
Stjernearter2190 <- filter(Arter2190, Artsscore > 3)
length(unique(Stjernearter2190$ArtLatin))
Tostjernearter2190 <-filter(Arter2190, Artsscore >5)
length(unique(Tostjernearter2190$ArtLatin))
#----------------------------------------------------------------------------------------
# Analysis of variance
#----------------------------------------------------------------------------------------
# Testing normality
hist(ArterStat$Antal.arter)
shapiro.test(ArterStat$Antal.arter) # normality not OK
hist(log10(ArterStat$Antal.arter))
shapiro.test(log10(ArterStat$Antal.arter)) # didnt help
hist(ArterStat$Antal.bidragsarter)
shapiro.test(ArterStat$Antal.bidragsarter)
hist(log10(ArterStat$Antal.bidragsarter))
shapiro.test(log10(ArterStat$Antal.bidragsarter))
hist(ArterStat$Antal.stjernearter)
shapiro.test(ArterStat$Antal.stjernearter)
hist(log10(ArterStat$Antal.stjernearter))
shapiro.test(log10(ArterStat$Antal.stjernearter))
hist(ArterStat$Antal.tostjernearter)
shapiro.test(ArterStat$Antal.tostjernearter)
hist(log10(ArterStat$Antal.tostjernearter))
shapiro.test(log10(ArterStat$Antal.tostjernearter))
# Computing Kruskal-Wallis test - we cannot use anova when data are not normally distributed
# Resource: http://www.r-tutor.com/elementary-statistics/non-parametric-methods/kruskal-wallis-test
# star species in zones
starKW <- kruskal.test(Antal.stjernearter ~ Zone_orto, data = ArterStat)
starKW # there is significant numbers of star species among the zones
star2KW <- kruskal.test(Antal.tostjernearter ~ Zone_orto, data = ArterStat)
star2KW
# star species in habitats
starKWhab <- kruskal.test(Antal.stjernearter ~ Habitat.vurdering, data = ArterStat)
starKWhab
star2KWhab <- kruskal.test(Antal.tostjernearter ~ Habitat.vurdering, data = ArterStat)
star2KWhab
# Running Dunn's test
# Resource: http://www.r-tutor.com/elementary-statistics/non-parametric-methods/mann-whitney-wilcoxon-test
# https://rdrr.io/cran/DescTools/man/DunnTest.html
# https://stackoverflow.com/questions/58472408/how-to-apply-dunn-test-for-dataframes-in-r
# https://rcompanion.org/handbook/F_08.html
# Star species in zones
starDunn <- DunnTest(Antal.stjernearter ~ Zone_orto, ArterStat)
starDunn
star2Dunn <- DunnTest(Antal.tostjernearter ~ Zone_orto, ArterStat)
star2Dunn
# Star species in habitats
starDunnhab <- DunnTest(Antal.stjernearter ~ Habitat.vurdering, ArterStat)
starDunnhab
star2Dunnhab <- DunnTest(Antal.tostjernearter ~ Habitat.vurdering, ArterStat)
star2Dunnhab
# End of script ---------------
|
/Julie/Species Richness.R
|
no_license
|
JuliePristed/HusbyKlitplantage
|
R
| false | false | 9,755 |
r
|
# Species Richness Analysis
# Testing composition of sensitive species among zones and habitats
# Julie Pristed
# April 2021
rm(list=ls()) #clear environment
#install.packages("DescTools")
# Working Directory
# setwd("/Users/JuliePristed/Documents/Dokumenter/AU/A. Kandidat/A. Speciale/Data")
# Softcoding working directory
Dir.Base <- getwd() # to find the project folder
Dir.Data <- file.path(Dir.Base, "UnikkeArter.csv") # index the data folder
Dir.Data <- file.path(Dir.Base, "ArtsrigdomR.csv") # index the data folder
# Loading datasets
Arter <- read.csv("UnikkeArter.csv", header = TRUE, sep = ",", stringsAsFactors = TRUE)
str(Arter)
levels(Arter$Zone)
ArterStat <- read.csv("ArtsrigdomR.csv", header = TRUE, sep = ",")
ArterStat$Zone_orto <- as.factor(ArterStat$Zone_orto)
str(ArterStat)
levels(ArterStat$Zone_orto)
# Loading packages
library(dplyr) # Data manipulation
library(DescTools) # Computing Dunn's post hoc test
# Zone subsets
ArterNaturlig <- filter(Arter, Zone == "Naturlig Vegetation")
Arter2014 <- filter(Arter, Zone == "2014")
Arter2017 <- filter(Arter, Zone == "2017")
Arter2018 <- filter(Arter, Zone == "2018")
Arter2019 <- filter(Arter, Zone == "2019")
ArterPlantage <- filter(Arter, Zone == "Plantage")
ArterRydning <- filter(Arter, Zone != "Naturlig Vegetation")
ArterRydning <- filter(ArterRydning, Zone != "Plantage")
# Habitat subsets
Arter2100 <- filter(Arter, Naturtype == "2100")
Arter2120 <- filter(Arter, Naturtype == "2120")
Arter2130 <- filter(Arter, Naturtype == "2130")
Arter2140 <- filter(Arter, Naturtype == "2140")
Arter2170 <- filter(Arter, Naturtype == "2170")
Arter2190 <- filter(Arter, Naturtype == "2190")
#----------------------------------------------------------------------------------------
# Counting unique species
#----------------------------------------------------------------------------------------
# Total richness
length(unique(Arter$ArtLatin))
Bidragsart <-filter(Arter, Bidragsart == 1)
length(unique(Bidragsart$ArtLatin))
Stjernearter <- filter(Arter, Artsscore > 3)
length(unique(Stjernearter$ArtLatin))
Tostjernearter <-filter(Arter, Artsscore >5)
length(unique(Tostjernearter$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique species in zones
#----------------------------------------------------------------------------------------
# Natural Vegetation
length(unique(ArterNaturlig$ArtLatin))
BidragsartNaturlig <-filter(ArterNaturlig, Bidragsart == 1)
length(unique(BidragsartNaturlig$ArtLatin))
# 2014
length(unique(Arter2014$ArtLatin))
Bidragsart2014 <-filter(Arter2014, Bidragsart == 1)
length(unique(Bidragsart2014$ArtLatin))
# 2017
length(unique(Arter2017$ArtLatin))
Bidragsart2017 <-filter(Arter2017, Bidragsart == 1)
length(unique(Bidragsart2017$ArtLatin))
# 2018
length(unique(Arter2018$ArtLatin))
Bidragsart2018 <-filter(Arter2018, Bidragsart == 1)
length(unique(Bidragsart2018$ArtLatin))
# 2019
length(unique(Arter2019$ArtLatin))
Bidragsart2019 <-filter(Arter2019, Bidragsart == 1)
length(unique(Bidragsart2019$ArtLatin))
# Plantation Vegetation
length(unique(ArterPlantage$ArtLatin))
BidragsartPlantage <-filter(ArterPlantage, Bidragsart == 1)
length(unique(BidragsartPlantage$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique STAR species in zones
#----------------------------------------------------------------------------------------
# Natural Vegetation
StjernearterNaturlig <- filter(ArterNaturlig, Artsscore > 3)
length(unique(StjernearterNaturlig$ArtLatin))
TostjernearterNaturlig <-filter(ArterNaturlig, Artsscore >5)
length(unique(TostjernearterNaturlig$ArtLatin))
# 2014
Stjernearter2014 <- filter(Arter2014, Artsscore > 3)
length(unique(Stjernearter2014$ArtLatin))
Tostjernearter2014 <-filter(Arter2014, Artsscore >5)
length(unique(Tostjernearter2014$ArtLatin))
# 2017
Stjernearter2017 <- filter(Arter2017, Artsscore > 3)
length(unique(Stjernearter2017$ArtLatin))
Tostjernearter2017 <-filter(Arter2017, Artsscore >5)
length(unique(Tostjernearter2017$ArtLatin))
# 2018
Stjernearter2018 <- filter(Arter2018, Artsscore > 3)
length(unique(Stjernearter2018$ArtLatin))
Tostjernearter2018 <-filter(Arter2018, Artsscore >5)
length(unique(Tostjernearter2018$ArtLatin))
# 2019
Stjernearter2019 <- filter(Arter2019, Artsscore > 3)
length(unique(Stjernearter2019$ArtLatin))
Tostjernearter2019 <-filter(Arter2019, Artsscore >5)
length(unique(Tostjernearter2019$ArtLatin))
# Plantation Vegetation
StjernearterPlantage <- filter(ArterPlantage, Artsscore > 3)
length(unique(StjernearterPlantage$ArtLatin))
TostjernearterPlantage <-filter(ArterPlantage, Artsscore >5)
length(unique(TostjernearterPlantage$ArtLatin))
# Cleared zones
StjernearterRydning <- filter(ArterRydning, Artsscore > 3)
length(unique(StjernearterRydning$ArtLatin))
TostjernearterRydning <-filter(ArterRydning, Artsscore >5)
length(unique(TostjernearterRydning$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique species in habitats
#----------------------------------------------------------------------------------------
# 2100
length(unique(Arter2100$ArtLatin))
Bidragsart2100 <-filter(Arter2100, Bidragsart == 1)
length(unique(Bidragsart2100$ArtLatin))
# 2120
length(unique(Arter2120$ArtLatin))
Bidragsart2120 <-filter(Arter2120, Bidragsart == 1)
length(unique(Bidragsart2120$ArtLatin))
# 2130
length(unique(Arter2130$ArtLatin))
Bidragsart2130 <-filter(Arter2130, Bidragsart == 1)
length(unique(Bidragsart2130$ArtLatin))
# 2140
length(unique(Arter2140$ArtLatin))
Bidragsart2140 <-filter(Arter2140, Bidragsart == 1)
length(unique(Bidragsart2140$ArtLatin))
# 2170
length(unique(Arter2170$ArtLatin))
Bidragsart2170 <-filter(Arter2170, Bidragsart == 1)
length(unique(Bidragsart2170$ArtLatin))
# 2190
length(unique(Arter2190$ArtLatin))
Bidragsart2190 <-filter(Arter2190, Bidragsart == 1)
length(unique(Bidragsart2190$ArtLatin))
#----------------------------------------------------------------------------------------
# Counting unique STAR species in habitats
#----------------------------------------------------------------------------------------
# 2100
Stjernearter2100 <- filter(Arter2100, Artsscore > 3)
length(unique(Stjernearter2100$ArtLatin))
Tostjernearter2100 <-filter(Arter2100, Artsscore >5)
length(unique(Tostjernearter2100$ArtLatin))
# 2120
Stjernearter2120 <- filter(Arter2120, Artsscore > 3)
length(unique(Stjernearter2120$ArtLatin))
Tostjernearter2120 <-filter(Arter2120, Artsscore >5)
length(unique(Tostjernearter2120$ArtLatin))
# 2130
Stjernearter2130 <- filter(Arter2130, Artsscore > 3)
length(unique(Stjernearter2130$ArtLatin))
Tostjernearter2130 <-filter(Arter2130, Artsscore >5)
length(unique(Tostjernearter2130$ArtLatin))
# 2140
Stjernearter2140 <- filter(Arter2140, Artsscore > 3)
length(unique(Stjernearter2140$ArtLatin))
Tostjernearter2140 <-filter(Arter2140, Artsscore >5)
length(unique(Tostjernearter2140$ArtLatin))
# 2170
Stjernearter2170 <- filter(Arter2170, Artsscore > 3)
length(unique(Stjernearter2170$ArtLatin))
Tostjernearter2170 <-filter(Arter2170, Artsscore >5)
length(unique(Tostjernearter2170$ArtLatin))
# 2190
Stjernearter2190 <- filter(Arter2190, Artsscore > 3)
length(unique(Stjernearter2190$ArtLatin))
Tostjernearter2190 <-filter(Arter2190, Artsscore >5)
length(unique(Tostjernearter2190$ArtLatin))
#----------------------------------------------------------------------------------------
# Analysis of variance
#----------------------------------------------------------------------------------------
# Testing normality
hist(ArterStat$Antal.arter)
shapiro.test(ArterStat$Antal.arter) # normality not OK
hist(log10(ArterStat$Antal.arter))
shapiro.test(log10(ArterStat$Antal.arter)) # didnt help
hist(ArterStat$Antal.bidragsarter)
shapiro.test(ArterStat$Antal.bidragsarter)
hist(log10(ArterStat$Antal.bidragsarter))
shapiro.test(log10(ArterStat$Antal.bidragsarter))
hist(ArterStat$Antal.stjernearter)
shapiro.test(ArterStat$Antal.stjernearter)
hist(log10(ArterStat$Antal.stjernearter))
shapiro.test(log10(ArterStat$Antal.stjernearter))
hist(ArterStat$Antal.tostjernearter)
shapiro.test(ArterStat$Antal.tostjernearter)
hist(log10(ArterStat$Antal.tostjernearter))
shapiro.test(log10(ArterStat$Antal.tostjernearter))
# Computing Kruskal-Wallis test - we cannot use anova when data are not normally distributed
# Resource: http://www.r-tutor.com/elementary-statistics/non-parametric-methods/kruskal-wallis-test
# star species in zones
starKW <- kruskal.test(Antal.stjernearter ~ Zone_orto, data = ArterStat)
starKW # there is significant numbers of star species among the zones
star2KW <- kruskal.test(Antal.tostjernearter ~ Zone_orto, data = ArterStat)
star2KW
# star species in habitats
starKWhab <- kruskal.test(Antal.stjernearter ~ Habitat.vurdering, data = ArterStat)
starKWhab
star2KWhab <- kruskal.test(Antal.tostjernearter ~ Habitat.vurdering, data = ArterStat)
star2KWhab
# Running Dunn's test
# Resource: http://www.r-tutor.com/elementary-statistics/non-parametric-methods/mann-whitney-wilcoxon-test
# https://rdrr.io/cran/DescTools/man/DunnTest.html
# https://stackoverflow.com/questions/58472408/how-to-apply-dunn-test-for-dataframes-in-r
# https://rcompanion.org/handbook/F_08.html
# Star species in zones
starDunn <- DunnTest(Antal.stjernearter ~ Zone_orto, ArterStat)
starDunn
star2Dunn <- DunnTest(Antal.tostjernearter ~ Zone_orto, ArterStat)
star2Dunn
# Star species in habitats
starDunnhab <- DunnTest(Antal.stjernearter ~ Habitat.vurdering, ArterStat)
starDunnhab
star2Dunnhab <- DunnTest(Antal.tostjernearter ~ Habitat.vurdering, ArterStat)
star2Dunnhab
# End of script ---------------
|
#' @rdname chart.RollingRegression
#' @export
charts.RollingRegression = function (Ra, Rb, width = 12, Rf = 0, main = NULL, legend.loc = NULL, event.labels=NULL, ...)
{ # @author Peter Carl
# DESCRIPTION:
# A wrapper to create a panel of RollingRegression charts that demonstrates
# how the attributes change through time.
# Inputs:
# Ra: a matrix, data frame, or timeSeries, usually a set of monthly returns.
# The first column is assumed to be the returns of interest, the next
# columns are assumed to be relevant benchmarks for comparison.
# Rb: a matrix, data frame, or timeSeries that is a set of returns of the
# same scale and periodicity as R.
# Rf: the risk free rate. Remember to set this to the same periodicity
# as the data being passed in.
# attribute: Used to select the regression parameter to use in the chart May
# be any of:
# Alpha - shows the y-intercept
# Beta - shows the slope of the regression line
# R-Squared - shows the fit of the regression to the data
#
# Outputs:
# A stack of three related timeseries line charts
# FUNCTION:
columns.a = ncol(Ra)
columns.b = ncol(Rb)
# if(columns.a > 1 | columns.b > 1)
# legend.loc = "topleft"
# else
# legend.loc = NULL
# plot.new()
op <- par(no.readonly=TRUE)
layout(matrix(c(1,2,3)),heights=c(1.3,1,1.3),widths=1)
par(mar=c(1,4,4,2))
if(is.null(main)){
freq = periodicity(Ra)
switch(freq$scale,
minute = {freq.lab = "minute"},
hourly = {freq.lab = "hour"},
daily = {freq.lab = "day"},
weekly = {freq.lab = "week"},
monthly = {freq.lab = "month"},
quarterly = {freq.lab = "quarter"},
yearly = {freq.lab = "year"}
)
main = paste("Rolling ",width,"-",freq.lab," Regressions", sep="")
}
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "Alpha", xaxis = FALSE, main = main, ylab = "Alpha", legend.loc=legend.loc, event.labels = event.labels, ...)
par(mar=c(1,4,0,2))
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "Beta", main = "", ylab = "Beta", xaxis = FALSE, event.labels = NULL, ...)
par(mar=c(5,4,0,2))
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "R-Squared", main = "", ylab = "R-Squared", event.labels = NULL, ...)
par(op)
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: charts.RollingRegression.R 2163 2012-07-16 00:30:19Z braverock $
#
###############################################################################
|
/R/charts.RollingRegression.R
|
no_license
|
sanjivkv/PerformanceAnalytics
|
R
| false | false | 2,952 |
r
|
#' @rdname chart.RollingRegression
#' @export
charts.RollingRegression = function (Ra, Rb, width = 12, Rf = 0, main = NULL, legend.loc = NULL, event.labels=NULL, ...)
{ # @author Peter Carl
# DESCRIPTION:
# A wrapper to create a panel of RollingRegression charts that demonstrates
# how the attributes change through time.
# Inputs:
# Ra: a matrix, data frame, or timeSeries, usually a set of monthly returns.
# The first column is assumed to be the returns of interest, the next
# columns are assumed to be relevant benchmarks for comparison.
# Rb: a matrix, data frame, or timeSeries that is a set of returns of the
# same scale and periodicity as R.
# Rf: the risk free rate. Remember to set this to the same periodicity
# as the data being passed in.
# attribute: Used to select the regression parameter to use in the chart May
# be any of:
# Alpha - shows the y-intercept
# Beta - shows the slope of the regression line
# R-Squared - shows the fit of the regression to the data
#
# Outputs:
# A stack of three related timeseries line charts
# FUNCTION:
columns.a = ncol(Ra)
columns.b = ncol(Rb)
# if(columns.a > 1 | columns.b > 1)
# legend.loc = "topleft"
# else
# legend.loc = NULL
# plot.new()
op <- par(no.readonly=TRUE)
layout(matrix(c(1,2,3)),heights=c(1.3,1,1.3),widths=1)
par(mar=c(1,4,4,2))
if(is.null(main)){
freq = periodicity(Ra)
switch(freq$scale,
minute = {freq.lab = "minute"},
hourly = {freq.lab = "hour"},
daily = {freq.lab = "day"},
weekly = {freq.lab = "week"},
monthly = {freq.lab = "month"},
quarterly = {freq.lab = "quarter"},
yearly = {freq.lab = "year"}
)
main = paste("Rolling ",width,"-",freq.lab," Regressions", sep="")
}
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "Alpha", xaxis = FALSE, main = main, ylab = "Alpha", legend.loc=legend.loc, event.labels = event.labels, ...)
par(mar=c(1,4,0,2))
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "Beta", main = "", ylab = "Beta", xaxis = FALSE, event.labels = NULL, ...)
par(mar=c(5,4,0,2))
chart.RollingRegression(Ra, Rb, width = width, Rf = Rf, attribute = "R-Squared", main = "", ylab = "R-Squared", event.labels = NULL, ...)
par(op)
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2012 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id: charts.RollingRegression.R 2163 2012-07-16 00:30:19Z braverock $
#
###############################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a4aLAK.R
\name{a4aLAK}
\alias{a4aLAK}
\title{Convert length frequency to age composition data}
\usage{
a4aLAK(
Ldat,
linf = 100,
k = 0.2,
t0 = -0.5,
cvL = 0.15,
ages = 0:5,
sd.fix = FALSE,
plot = FALSE,
aW = NULL,
bW = NULL
)
}
\arguments{
\item{Ldat}{a matrix with two columns, where each row specifies the lower and upper length for a given length bin (the lowest should be -Inf, and highest Inf)}
\item{linf}{Infinitve length}
\item{k}{Brody growth coefficient}
\item{t0}{theoretical age at zero Length}
\item{cvL, }{CV of mean length-at-age}
\item{ages}{desired age range}
\item{sd.fix}{if TRUE a fix sd at length-at-age is assumed corresponding to 0.5*Linf}
\item{aW}{parameter of length-weight in mm}
\item{bW}{parameter of length-weight relationship}
}
\value{
AgeComp_at, a matrix of expected age-composition data, where columns are samples in year t, and cells are the count of samples with a given age and year
}
\description{
Converts a data.frame of multi-annual length frequency into age composition data
}
|
/man/a4aLAK.Rd
|
no_license
|
iagomosqueira/a4adiags
|
R
| false | true | 1,126 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a4aLAK.R
\name{a4aLAK}
\alias{a4aLAK}
\title{Convert length frequency to age composition data}
\usage{
a4aLAK(
Ldat,
linf = 100,
k = 0.2,
t0 = -0.5,
cvL = 0.15,
ages = 0:5,
sd.fix = FALSE,
plot = FALSE,
aW = NULL,
bW = NULL
)
}
\arguments{
\item{Ldat}{a matrix with two columns, where each row specifies the lower and upper length for a given length bin (the lowest should be -Inf, and highest Inf)}
\item{linf}{Infinitve length}
\item{k}{Brody growth coefficient}
\item{t0}{theoretical age at zero Length}
\item{cvL, }{CV of mean length-at-age}
\item{ages}{desired age range}
\item{sd.fix}{if TRUE a fix sd at length-at-age is assumed corresponding to 0.5*Linf}
\item{aW}{parameter of length-weight in mm}
\item{bW}{parameter of length-weight relationship}
}
\value{
AgeComp_at, a matrix of expected age-composition data, where columns are samples in year t, and cells are the count of samples with a given age and year
}
\description{
Converts a data.frame of multi-annual length frequency into age composition data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LAngle.R
\name{LAngle}
\alias{LAngle}
\title{LAngle}
\usage{
LAngle(
Disc_1 = -5,
Disc_2 = -33,
Rad_1 = 1.4375,
Rad_2 = 3,
Ins_1 = 2.375,
Fing_1 = 4.8125,
Knuc_1 = 0.610865,
Rot_1 = 0.785398,
Interval = 0.2
)
}
\arguments{
\item{Disc_1}{relative angle (in degrees) of larger disc (disc 1) in chamber door vs. gravity}
\item{Disc_2}{relative angle (in degrees) of smaller disc (disc 2) in chamber door vs. gravity}
\item{Rad_1}{fixed value describing the distance between the center of larger disc (disc 1) and the center of the smaller disc (disc 2)}
\item{Rad_2}{fixed value describing the distance between the center of disc 2 and the position of each cathode (cathodes are symmetric)}
\item{Ins_1}{distance the gun has been inserted into chamber, measured from inside of disc 2 to the center of the gun knuckle}
\item{Fing_1}{distance from the knuckle to the gun tip}
\item{Knuc_1}{angle (in radians) at which the gun is set at the knuckle. Min is 0, max is 1.18}
\item{Rot_1}{angle (in radians) at which the gun is rotated, min is 0, max is 2*pi}
\item{Interval}{the size of each step in a numeric solution. Larger intervals will produce courser solutions, but will be faster. Smaller intervals will produce more detailed solutions but will be more computationally expensive}
}
\description{
calculates the angle between L and the platen normal
}
\author{
Greg Pilgrim \email{gpilgrim@vergason.com}
}
|
/man/LAngle.Rd
|
permissive
|
gpilgrim2670/SputterCalc
|
R
| false | true | 1,513 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LAngle.R
\name{LAngle}
\alias{LAngle}
\title{LAngle}
\usage{
LAngle(
Disc_1 = -5,
Disc_2 = -33,
Rad_1 = 1.4375,
Rad_2 = 3,
Ins_1 = 2.375,
Fing_1 = 4.8125,
Knuc_1 = 0.610865,
Rot_1 = 0.785398,
Interval = 0.2
)
}
\arguments{
\item{Disc_1}{relative angle (in degrees) of larger disc (disc 1) in chamber door vs. gravity}
\item{Disc_2}{relative angle (in degrees) of smaller disc (disc 2) in chamber door vs. gravity}
\item{Rad_1}{fixed value describing the distance between the center of larger disc (disc 1) and the center of the smaller disc (disc 2)}
\item{Rad_2}{fixed value describing the distance between the center of disc 2 and the position of each cathode (cathodes are symmetric)}
\item{Ins_1}{distance the gun has been inserted into chamber, measured from inside of disc 2 to the center of the gun knuckle}
\item{Fing_1}{distance from the knuckle to the gun tip}
\item{Knuc_1}{angle (in radians) at which the gun is set at the knuckle. Min is 0, max is 1.18}
\item{Rot_1}{angle (in radians) at which the gun is rotated, min is 0, max is 2*pi}
\item{Interval}{the size of each step in a numeric solution. Larger intervals will produce courser solutions, but will be faster. Smaller intervals will produce more detailed solutions but will be more computationally expensive}
}
\description{
calculates the angle between L and the platen normal
}
\author{
Greg Pilgrim \email{gpilgrim@vergason.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power.R
\name{power}
\alias{power}
\title{General power calculation method for PLR.}
\usage{
power(power = NULL, p = NULL, n3 = NULL, s2 = NULL, tau = NULL)
}
\arguments{
\item{p}{significance level at which to estimate power}
\item{s2}{underlying parameter for which power is to be estimated. Marginal standard deviation for \eqn{Z_a}{Z_{a}} in category 3}
\item{tau}{underlying parameter for which power is to be estimated. Marginal standard deviation for \eqn{Z_d}{Z_{d}} in category 3}
\item{ll}{object of class \code{\link{lm}}. Given simulated values, \code{\link{ll}} should be generated by a function of the form \code{ll=lm(I(log(1+plr)~ pi2+ poly(s2,2)+poly(tau,2) + I(pi2*s2) + I(pi2*tau))))}. Simulated values should be from uncorrelated observations on unweighted SNPs, so the asymptotic distribution of PLR is mixture-chi^2.
Power also depends on parameters \code{s1}, the marginal standard deviation for \eqn{Z_{a}}{Z_a} in category 2, \code{pi1}, the proportion of SNPs in category 2, and \code{rho}, the correlation between \eqn{Z_{a}}{Z_a} and \eqn{Z_{d}}{Z_d} in category 3, but the influence on power from these variables is small.}
\item{pi2}{underlying parameter for which power is to be estimated. Corresponds to number of SNPs in category 3}
\item{rho}{underlying parameter for which power is to be estimated. Correlation between \eqn{Z_a}{Z_{a}} and \eqn{Z_d}{Z_{d}} in category 3}
}
\description{
Power of the PLR method depends on the number of SNPs in category 3 (\code{\link{n3}}), and the marginal variances in category 3 of \eqn{Z_{a}}{Z_a} (\code{\link{s2}}) and \eqn{Z_{a}}{Z_a} (\code{\link{tau}}). Given a required power and p-value,
}
\details{
Function to estimate power to reject the null hypothesis at significance \code{\link{p}} given underlying parameters \code{\link{pi2}}, \code{\link{s2}}, \code{\link{tau}} and \code{\link{rho}}.
}
|
/man/power.Rd
|
no_license
|
mja/subtest
|
R
| false | true | 1,962 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/power.R
\name{power}
\alias{power}
\title{General power calculation method for PLR.}
\usage{
power(power = NULL, p = NULL, n3 = NULL, s2 = NULL, tau = NULL)
}
\arguments{
\item{p}{significance level at which to estimate power}
\item{s2}{underlying parameter for which power is to be estimated. Marginal standard deviation for \eqn{Z_a}{Z_{a}} in category 3}
\item{tau}{underlying parameter for which power is to be estimated. Marginal standard deviation for \eqn{Z_d}{Z_{d}} in category 3}
\item{ll}{object of class \code{\link{lm}}. Given simulated values, \code{\link{ll}} should be generated by a function of the form \code{ll=lm(I(log(1+plr)~ pi2+ poly(s2,2)+poly(tau,2) + I(pi2*s2) + I(pi2*tau))))}. Simulated values should be from uncorrelated observations on unweighted SNPs, so the asymptotic distribution of PLR is mixture-chi^2.
Power also depends on parameters \code{s1}, the marginal standard deviation for \eqn{Z_{a}}{Z_a} in category 2, \code{pi1}, the proportion of SNPs in category 2, and \code{rho}, the correlation between \eqn{Z_{a}}{Z_a} and \eqn{Z_{d}}{Z_d} in category 3, but the influence on power from these variables is small.}
\item{pi2}{underlying parameter for which power is to be estimated. Corresponds to number of SNPs in category 3}
\item{rho}{underlying parameter for which power is to be estimated. Correlation between \eqn{Z_a}{Z_{a}} and \eqn{Z_d}{Z_{d}} in category 3}
}
\description{
Power of the PLR method depends on the number of SNPs in category 3 (\code{\link{n3}}), and the marginal variances in category 3 of \eqn{Z_{a}}{Z_a} (\code{\link{s2}}) and \eqn{Z_{a}}{Z_a} (\code{\link{tau}}). Given a required power and p-value,
}
\details{
Function to estimate power to reject the null hypothesis at significance \code{\link{p}} given underlying parameters \code{\link{pi2}}, \code{\link{s2}}, \code{\link{tau}} and \code{\link{rho}}.
}
|
context("test-x_tbl")
# compute_x_tbl -----------------------------------------------------------
# Tested in `new_*()` functions
# compute_x_tbl_dis -------------------------------------------------------
# Tested in `new_*()` functions
# compute_x_tbl_con -------------------------------------------------------
# Tested in `new_*()` functions
# dirac_x_tbl -------------------------------------------------------------
# Main functionality is tested in `new_*()` functions
test_that("dirac_x_tbl ensures that total integral is 1", {
d_dirac <- new_d(1e8, "continuous")
x_tbl <- meta_x_tbl(d_dirac)
expect_equal(trapez_part_integral(x_tbl[["x"]], x_tbl[["y"]]), c(0, 0.5, 1))
# This was the indicator of problem: error was returned because total integral
# wasn't equal to 1.
expect_silent(assert_pdqr_fun(d_dirac))
})
# impute_x_tbl ------------------------------------------------------------
# Tested in `new_*()` functions
# impute_x_tbl_impl -------------------------------------------------------
# Main tests are in `new_*()` functions
test_that("impute_x_tbl_impl throws error", {
expect_error(impute_x_tbl_impl(x_dis_x_tbl, "a"), "type")
})
# impute_x_tbl_impl_dis ---------------------------------------------------
# Main functionality is tested in `impute_x_tbl_impl()`
test_that("impute_x_tbl_impl_dis correctly collapses duplicate 'x'", {
expect_equal(
impute_x_tbl_impl_dis(data.frame(x = c(1, 2, 1), prob = c(0.3, 0.2, 0.5))),
data.frame(x = c(1, 2), prob = c(0.8, 0.2), cumprob = c(0.8, 1))
)
})
# impute_x_tbl_impl_con ---------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_prob -------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_y ----------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_vec --------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# get_x_tbl_sec_col -------------------------------------------------------
test_that("get_x_tbl_sec_col works", {
expect_equal(get_x_tbl_sec_col(x_dis_x_tbl), "prob")
expect_equal(get_x_tbl_sec_col(x_con_x_tbl), "y")
})
# get_type_from_x_tbl -----------------------------------------------------
test_that("get_type_from_x_tbl works", {
expect_equal(get_type_from_x_tbl(x_dis_x_tbl), "discrete")
expect_equal(get_type_from_x_tbl(x_con_x_tbl), "continuous")
})
# filter_x_tbl ------------------------------------------------------------
test_that("filter_x_tbl works", {
x_tbl_dis <- data.frame(x = 1:5, prob = (1:5) / 15)
expect_equal(
filter_x_tbl(x_tbl_dis, c(-10, 1))[, c("x", "prob")],
x_tbl_dis[1, ]
)
x_tbl_con <- data.frame(x = 1:5, y = (1:5) / 12)
expect_equal(
filter_x_tbl(x_tbl_con, c(2.5, 5))[, c("x", "y")],
x_tbl_con[3:5, ]
)
})
# union_inside_x_tbl ------------------------------------------------------
test_that("union_inside_x_tbl works", {
x_tbl_dis_1 <- data.frame(x = 1:3, prob = c(0, 0.3, 0.7))
x_tbl_dis_2 <- data.frame(x = c(0, 1, 1.5, 3.1), prob = rep(0.5, 4))
expect_equal(
union_inside_x_tbl(x_tbl_dis_1, x_tbl_dis_2),
data.frame(x = c(1, 1.5, 2, 3), prob = c(0, 0.5, 0.3, 0.7))
)
x_tbl_con_1 <- data.frame(x = 1:3, y = c(0, 1, 0))
x_tbl_con_2 <- data.frame(x = c(0, 1, 1.5, 3.1), y = rep(0.5, 4))
expect_equal(
union_inside_x_tbl(x_tbl_con_1, x_tbl_con_2),
data.frame(x = c(1, 1.5, 2, 3), y = c(0, 0.5, 1, 0))
)
})
# reflect_x_tbl -----------------------------------------------------------
test_that("reflect_x_tbl works with 'discrete' type", {
x_tbl_dis <- data.frame(
x = c(1, 2, 4), prob = c(0.1, 0, 0.9), cumprob = c(0.1, 0.1, 1)
)
expect_equal(
reflect_x_tbl(x_tbl_dis, 0),
data.frame(
x = c(-4, -2, -1), prob = c(0.9, 0, 0.1), cumprob = c(0.9, 0.9, 1)
)
)
expect_equal(
reflect_x_tbl(x_tbl_dis, 2),
data.frame(
x = c(0, 2, 3), prob = c(0.9, 0, 0.1), cumprob = c(0.9, 0.9, 1)
)
)
})
test_that("reflect_x_tbl works with 'continuous' type", {
x_tbl_con <- data.frame(
x = c( -2, -1, 0, 0.5, 4),
y = c(0.5, 0, 1, 0, 0),
cumprob = c(0, 0.25, 0.75, 1, 1)
)
expect_equal(
reflect_x_tbl(x_tbl_con, 0),
data.frame(
x = c(-4, -0.5, 0, 1, 2),
y = c( 0, 0, 1, 0, 0.5),
cumprob = c(0, 0, 0.25, 0.75, 1)
)
)
expect_equal(
reflect_x_tbl(x_tbl_con, 10),
data.frame(
x = c(16, 19.5, 20, 21, 22),
y = c( 0, 0, 1, 0, 0.5),
cumprob = c(0, 0, 0.25, 0.75, 1)
)
)
})
# ground_x_tbl ------------------------------------------------------------
test_that("ground_x_tbl works", {
expect_equal(ground_x_tbl(x_dis_x_tbl), x_dis_x_tbl)
x_tbl <- data.frame(
x = c(-1, 0.25, 2), y = c(1/1.25, 0, 1/1.75), cumprob = c(0, 0.5, 1)
)
x <- x_tbl[["x"]]
y <- x_tbl[["y"]]
n <- nrow(x_tbl)
out_left <- ground_x_tbl(x_tbl, "left")
expect_equal(out_left[["x"]], c(x[1]-1e-8, x))
expect_equal(out_left[["y"]], c( 0, y))
out_right <- ground_x_tbl(x_tbl, "right")
expect_equal(out_right[["x"]], c(x, x[n]+1e-8))
expect_equal(out_right[["y"]], c(y, 0))
out_both <- ground_x_tbl(x_tbl, "both")
expect_equal(out_both[["x"]], c(x[1]-1e-8, x, x[n]+1e-8))
expect_equal(out_both[["y"]], c( 0, y, 0))
})
test_that("ground_x_tbl doesn't add new zeros to 'y'", {
x_tbl <- data.frame(x = c(1, 2, 3), y = c(0, 1, 0), cumprob = c(0, 0.5, 1))
expect_equal(ground_x_tbl(x_tbl, "left"), x_tbl)
expect_equal(ground_x_tbl(x_tbl, "right"), x_tbl)
expect_equal(ground_x_tbl(x_tbl, "both"), x_tbl)
})
test_that("ground_x_tbl works without column 'cumprob' present", {
output <- ground_x_tbl(data.frame(x = 0:1, y = c(1, 1)), "both")
expect_named(output, c("x", "y"))
})
# add_x_tbl_knots ---------------------------------------------------------
test_that("add_x_tbl_knots works", {
x_tbl <- data.frame(x = 1:3, y = c(1, 2, 1))
expect_equal(
add_x_tbl_knots(x_tbl, c(1.5, 1, -1, 10), only_inside = TRUE),
data.frame(x = c(1, 1.5, 2, 3), y = c(1, 1.5, 2, 1))
)
expect_equal(
add_x_tbl_knots(x_tbl, c(1.5, 1, -1, 10), only_inside = FALSE),
data.frame(x = c(-1, 1, 1.5, 2, 3, 10), y = c(0, 1, 1.5, 2, 1, 0))
)
# `only_inside` is `TRUE` by default
expect_equal(add_x_tbl_knots(x_tbl, c(-100, 100)), x_tbl)
# Present knots aren't get duplicated
expect_equal(add_x_tbl_knots(x_tbl, x_tbl[["x"]]), x_tbl)
})
# enfun_x_tbl -------------------------------------------------------------
test_that("enfun_x_tbl works", {
out_f <- enfun_x_tbl(data.frame(x = c(1, 2, 5), y = c(0, 10, 2)))
expect_equal(
out_f(c(0, 1, 1.5, 2.75, 5, 1000)), c(0, 0, 5, 8, 2, 0)
)
})
# stack_x_tbl -------------------------------------------------------------
test_that("stack_x_tbl works with 'discrete' type", {
x_tbl_dis_1 <- data.frame(x = 1, prob = 1)
x_tbl_dis_2 <- data.frame(x = 2:4, prob = c(0.2, 0.5, 0.3))
x_tbl_dis_3 <- data.frame(x = c(-1, 1, 4, 5), prob = c(0.1, 0.2, 0.3, 0.4))
expect_equal(
stack_x_tbl(list(x_tbl_dis_1, x_tbl_dis_2, x_tbl_dis_3)),
data.frame(x = c(-1, 1, 2, 3, 4, 5), prob = c(0.1, 1.2, 0.2, 0.5, 0.6, 0.4))
)
expect_equal(stack_x_tbl(list(x_tbl_dis_3)), x_tbl_dis_3)
})
test_that("stack_x_tbl works with 'continuous' type", {
x_tbl_con_1 <- data.frame(x = c(1, 3), y = c(0.5, 0.5))
x_tbl_con_2 <- data.frame(x = c(2, 6), y = c(0.25, 0.25))
x_tbl_con_3 <- data.frame(x = c(7, 8), y = c(1, 1))
expect_equal(
data.frame(
x = c( 1, 2-1e-8, 2, 3, 3+1e-8, 6, 6+1e-8, 7-1e-8, 7, 8),
y = c(0.5, 0.5, 0.75, 0.75, 0.25, 0.25, 0, 0, 1, 1)
),
stack_x_tbl(list(x_tbl_con_1, x_tbl_con_2, x_tbl_con_3))
)
})
test_that("stack_x_tbl handles zero density edges", {
x_tbl_1 <- data.frame(x = 1:3, y = c(0, 1, 0))
x_tbl_2 <- data.frame(x = 2:4, y = c(0, 1, 0))
expect_equal(
stack_x_tbl(list(x_tbl_1, x_tbl_2)), data.frame(x = 1:4, y = c(0, 1, 1, 0))
)
})
# stack_x_tbl_dis ---------------------------------------------------------
# Tested in `stack_x_tbl()`
# stack_x_tbl_con ---------------------------------------------------------
# Tested in `stack_x_tbl()`
# remove_extra_edges ------------------------------------------------------
# Tested in `stack_x_tbl()`
# is_x_extra --------------------------------------------------------------
# Tested in `stack_x_tbl()`
|
/tests/testthat/test-x_tbl.R
|
permissive
|
ismayc/pdqr
|
R
| false | false | 8,512 |
r
|
context("test-x_tbl")
# compute_x_tbl -----------------------------------------------------------
# Tested in `new_*()` functions
# compute_x_tbl_dis -------------------------------------------------------
# Tested in `new_*()` functions
# compute_x_tbl_con -------------------------------------------------------
# Tested in `new_*()` functions
# dirac_x_tbl -------------------------------------------------------------
# Main functionality is tested in `new_*()` functions
test_that("dirac_x_tbl ensures that total integral is 1", {
d_dirac <- new_d(1e8, "continuous")
x_tbl <- meta_x_tbl(d_dirac)
expect_equal(trapez_part_integral(x_tbl[["x"]], x_tbl[["y"]]), c(0, 0.5, 1))
# This was the indicator of problem: error was returned because total integral
# wasn't equal to 1.
expect_silent(assert_pdqr_fun(d_dirac))
})
# impute_x_tbl ------------------------------------------------------------
# Tested in `new_*()` functions
# impute_x_tbl_impl -------------------------------------------------------
# Main tests are in `new_*()` functions
test_that("impute_x_tbl_impl throws error", {
expect_error(impute_x_tbl_impl(x_dis_x_tbl, "a"), "type")
})
# impute_x_tbl_impl_dis ---------------------------------------------------
# Main functionality is tested in `impute_x_tbl_impl()`
test_that("impute_x_tbl_impl_dis correctly collapses duplicate 'x'", {
expect_equal(
impute_x_tbl_impl_dis(data.frame(x = c(1, 2, 1), prob = c(0.3, 0.2, 0.5))),
data.frame(x = c(1, 2), prob = c(0.8, 0.2), cumprob = c(0.8, 1))
)
})
# impute_x_tbl_impl_con ---------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_prob -------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_y ----------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# impute_vec --------------------------------------------------------------
# Tested in `impute_x_tbl_impl()`
# get_x_tbl_sec_col -------------------------------------------------------
test_that("get_x_tbl_sec_col works", {
expect_equal(get_x_tbl_sec_col(x_dis_x_tbl), "prob")
expect_equal(get_x_tbl_sec_col(x_con_x_tbl), "y")
})
# get_type_from_x_tbl -----------------------------------------------------
test_that("get_type_from_x_tbl works", {
expect_equal(get_type_from_x_tbl(x_dis_x_tbl), "discrete")
expect_equal(get_type_from_x_tbl(x_con_x_tbl), "continuous")
})
# filter_x_tbl ------------------------------------------------------------
test_that("filter_x_tbl works", {
x_tbl_dis <- data.frame(x = 1:5, prob = (1:5) / 15)
expect_equal(
filter_x_tbl(x_tbl_dis, c(-10, 1))[, c("x", "prob")],
x_tbl_dis[1, ]
)
x_tbl_con <- data.frame(x = 1:5, y = (1:5) / 12)
expect_equal(
filter_x_tbl(x_tbl_con, c(2.5, 5))[, c("x", "y")],
x_tbl_con[3:5, ]
)
})
# union_inside_x_tbl ------------------------------------------------------
test_that("union_inside_x_tbl works", {
x_tbl_dis_1 <- data.frame(x = 1:3, prob = c(0, 0.3, 0.7))
x_tbl_dis_2 <- data.frame(x = c(0, 1, 1.5, 3.1), prob = rep(0.5, 4))
expect_equal(
union_inside_x_tbl(x_tbl_dis_1, x_tbl_dis_2),
data.frame(x = c(1, 1.5, 2, 3), prob = c(0, 0.5, 0.3, 0.7))
)
x_tbl_con_1 <- data.frame(x = 1:3, y = c(0, 1, 0))
x_tbl_con_2 <- data.frame(x = c(0, 1, 1.5, 3.1), y = rep(0.5, 4))
expect_equal(
union_inside_x_tbl(x_tbl_con_1, x_tbl_con_2),
data.frame(x = c(1, 1.5, 2, 3), y = c(0, 0.5, 1, 0))
)
})
# reflect_x_tbl -----------------------------------------------------------
test_that("reflect_x_tbl works with 'discrete' type", {
x_tbl_dis <- data.frame(
x = c(1, 2, 4), prob = c(0.1, 0, 0.9), cumprob = c(0.1, 0.1, 1)
)
expect_equal(
reflect_x_tbl(x_tbl_dis, 0),
data.frame(
x = c(-4, -2, -1), prob = c(0.9, 0, 0.1), cumprob = c(0.9, 0.9, 1)
)
)
expect_equal(
reflect_x_tbl(x_tbl_dis, 2),
data.frame(
x = c(0, 2, 3), prob = c(0.9, 0, 0.1), cumprob = c(0.9, 0.9, 1)
)
)
})
test_that("reflect_x_tbl works with 'continuous' type", {
x_tbl_con <- data.frame(
x = c( -2, -1, 0, 0.5, 4),
y = c(0.5, 0, 1, 0, 0),
cumprob = c(0, 0.25, 0.75, 1, 1)
)
expect_equal(
reflect_x_tbl(x_tbl_con, 0),
data.frame(
x = c(-4, -0.5, 0, 1, 2),
y = c( 0, 0, 1, 0, 0.5),
cumprob = c(0, 0, 0.25, 0.75, 1)
)
)
expect_equal(
reflect_x_tbl(x_tbl_con, 10),
data.frame(
x = c(16, 19.5, 20, 21, 22),
y = c( 0, 0, 1, 0, 0.5),
cumprob = c(0, 0, 0.25, 0.75, 1)
)
)
})
# ground_x_tbl ------------------------------------------------------------
test_that("ground_x_tbl works", {
expect_equal(ground_x_tbl(x_dis_x_tbl), x_dis_x_tbl)
x_tbl <- data.frame(
x = c(-1, 0.25, 2), y = c(1/1.25, 0, 1/1.75), cumprob = c(0, 0.5, 1)
)
x <- x_tbl[["x"]]
y <- x_tbl[["y"]]
n <- nrow(x_tbl)
out_left <- ground_x_tbl(x_tbl, "left")
expect_equal(out_left[["x"]], c(x[1]-1e-8, x))
expect_equal(out_left[["y"]], c( 0, y))
out_right <- ground_x_tbl(x_tbl, "right")
expect_equal(out_right[["x"]], c(x, x[n]+1e-8))
expect_equal(out_right[["y"]], c(y, 0))
out_both <- ground_x_tbl(x_tbl, "both")
expect_equal(out_both[["x"]], c(x[1]-1e-8, x, x[n]+1e-8))
expect_equal(out_both[["y"]], c( 0, y, 0))
})
test_that("ground_x_tbl doesn't add new zeros to 'y'", {
x_tbl <- data.frame(x = c(1, 2, 3), y = c(0, 1, 0), cumprob = c(0, 0.5, 1))
expect_equal(ground_x_tbl(x_tbl, "left"), x_tbl)
expect_equal(ground_x_tbl(x_tbl, "right"), x_tbl)
expect_equal(ground_x_tbl(x_tbl, "both"), x_tbl)
})
test_that("ground_x_tbl works without column 'cumprob' present", {
output <- ground_x_tbl(data.frame(x = 0:1, y = c(1, 1)), "both")
expect_named(output, c("x", "y"))
})
# add_x_tbl_knots ---------------------------------------------------------
test_that("add_x_tbl_knots works", {
x_tbl <- data.frame(x = 1:3, y = c(1, 2, 1))
expect_equal(
add_x_tbl_knots(x_tbl, c(1.5, 1, -1, 10), only_inside = TRUE),
data.frame(x = c(1, 1.5, 2, 3), y = c(1, 1.5, 2, 1))
)
expect_equal(
add_x_tbl_knots(x_tbl, c(1.5, 1, -1, 10), only_inside = FALSE),
data.frame(x = c(-1, 1, 1.5, 2, 3, 10), y = c(0, 1, 1.5, 2, 1, 0))
)
# `only_inside` is `TRUE` by default
expect_equal(add_x_tbl_knots(x_tbl, c(-100, 100)), x_tbl)
# Present knots aren't get duplicated
expect_equal(add_x_tbl_knots(x_tbl, x_tbl[["x"]]), x_tbl)
})
# enfun_x_tbl -------------------------------------------------------------
test_that("enfun_x_tbl works", {
out_f <- enfun_x_tbl(data.frame(x = c(1, 2, 5), y = c(0, 10, 2)))
expect_equal(
out_f(c(0, 1, 1.5, 2.75, 5, 1000)), c(0, 0, 5, 8, 2, 0)
)
})
# stack_x_tbl -------------------------------------------------------------
test_that("stack_x_tbl works with 'discrete' type", {
x_tbl_dis_1 <- data.frame(x = 1, prob = 1)
x_tbl_dis_2 <- data.frame(x = 2:4, prob = c(0.2, 0.5, 0.3))
x_tbl_dis_3 <- data.frame(x = c(-1, 1, 4, 5), prob = c(0.1, 0.2, 0.3, 0.4))
expect_equal(
stack_x_tbl(list(x_tbl_dis_1, x_tbl_dis_2, x_tbl_dis_3)),
data.frame(x = c(-1, 1, 2, 3, 4, 5), prob = c(0.1, 1.2, 0.2, 0.5, 0.6, 0.4))
)
expect_equal(stack_x_tbl(list(x_tbl_dis_3)), x_tbl_dis_3)
})
test_that("stack_x_tbl works with 'continuous' type", {
x_tbl_con_1 <- data.frame(x = c(1, 3), y = c(0.5, 0.5))
x_tbl_con_2 <- data.frame(x = c(2, 6), y = c(0.25, 0.25))
x_tbl_con_3 <- data.frame(x = c(7, 8), y = c(1, 1))
expect_equal(
data.frame(
x = c( 1, 2-1e-8, 2, 3, 3+1e-8, 6, 6+1e-8, 7-1e-8, 7, 8),
y = c(0.5, 0.5, 0.75, 0.75, 0.25, 0.25, 0, 0, 1, 1)
),
stack_x_tbl(list(x_tbl_con_1, x_tbl_con_2, x_tbl_con_3))
)
})
test_that("stack_x_tbl handles zero density edges", {
x_tbl_1 <- data.frame(x = 1:3, y = c(0, 1, 0))
x_tbl_2 <- data.frame(x = 2:4, y = c(0, 1, 0))
expect_equal(
stack_x_tbl(list(x_tbl_1, x_tbl_2)), data.frame(x = 1:4, y = c(0, 1, 1, 0))
)
})
# stack_x_tbl_dis ---------------------------------------------------------
# Tested in `stack_x_tbl()`
# stack_x_tbl_con ---------------------------------------------------------
# Tested in `stack_x_tbl()`
# remove_extra_edges ------------------------------------------------------
# Tested in `stack_x_tbl()`
# is_x_extra --------------------------------------------------------------
# Tested in `stack_x_tbl()`
|
library(ggplot2)
library(tidyverse)
NEI <- readRDS("./data/summarySCC_PM25.rds")%>% janitor::clean_names()
SCC <- readRDS("./data/Source_Classification_Code.rds") %>% janitor::clean_names()
vehicle_scc = SCC$scc[grep("Vehicle",SCC$scc_level_two)]
vehicle_emi_b = NEI %>% filter(scc %in% vehicle_scc) %>%
filter(fips=="24510") %>% mutate( fips = as.factor(fips)) %>%
mutate(city = rep("Baltimore City"))
vehicle_emi_l = NEI %>% filter(scc %in% vehicle_scc) %>%
filter(fips=="06037") %>% mutate( fips = as.factor(fips)) %>%
mutate(city = rep("Los Angeles County"))
vehicle_emi_bl = rbind(vehicle_emi_b,vehicle_emi_l)%>%
mutate(city = as.factor(city))
vehicle_emi_year = aggregate(vehicle_emi_bl$emissions, list(vehicle_emi_bl$year,vehicle_emi_bl$city), sum)%>%
rename(years = Group.1, city = Group.2, emissions = x) %>%
mutate(years = as.factor(years))
png("plot6.png", width=480, height=480)
ggplot(data = vehicle_emi_year,aes(years)) +
geom_bar(aes(weight = emissions), fill = "chartreuse4",width = 0.75) +
facet_grid(.~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off()
|
/plot6.R
|
no_license
|
YinengChen/Exploratory_DA_Course_Project_2
|
R
| false | false | 1,253 |
r
|
library(ggplot2)
library(tidyverse)
NEI <- readRDS("./data/summarySCC_PM25.rds")%>% janitor::clean_names()
SCC <- readRDS("./data/Source_Classification_Code.rds") %>% janitor::clean_names()
vehicle_scc = SCC$scc[grep("Vehicle",SCC$scc_level_two)]
vehicle_emi_b = NEI %>% filter(scc %in% vehicle_scc) %>%
filter(fips=="24510") %>% mutate( fips = as.factor(fips)) %>%
mutate(city = rep("Baltimore City"))
vehicle_emi_l = NEI %>% filter(scc %in% vehicle_scc) %>%
filter(fips=="06037") %>% mutate( fips = as.factor(fips)) %>%
mutate(city = rep("Los Angeles County"))
vehicle_emi_bl = rbind(vehicle_emi_b,vehicle_emi_l)%>%
mutate(city = as.factor(city))
vehicle_emi_year = aggregate(vehicle_emi_bl$emissions, list(vehicle_emi_bl$year,vehicle_emi_bl$city), sum)%>%
rename(years = Group.1, city = Group.2, emissions = x) %>%
mutate(years = as.factor(years))
png("plot6.png", width=480, height=480)
ggplot(data = vehicle_emi_year,aes(years)) +
geom_bar(aes(weight = emissions), fill = "chartreuse4",width = 0.75) +
facet_grid(.~city) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Kilo-Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore & LA, 1999-2008"))
dev.off()
|
rm(list = ls())
library(data.table)
library(caret)
library(reshape2)
d1 <- fread("project/volume/data/raw/NCAATourneyDetailedResults.csv")
d2 <- fread("project/volume/data/raw/RegularSeasonDetailedResults.csv")
All_Games_Table <- rbind(d1,d2)
W_stats <- All_Games_Table[,.(Season, DayNum, WTeamID, WScore, WLoc, NumOT, WFGM, WFGA, WFGM3, WFGA3, WFTM, WFTA ,WOR ,WDR , WAst, WTO, WStl, WBlk ,WPF)]
L_stats <- All_Games_Table[,.(Season, DayNum, LTeamID, LScore, WLoc, NumOT, LFGM, LFGA, LFGM3, LFGA3, LFTM, LFTA ,LOR ,LDR , LAst, LTO, LStl, LBlk ,LPF)]
colnames(W_stats) <- c("Season", "DayNum", "TeamID", "Score", "Loc", "NumOT", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
colnames(L_stats) <- c("Season", "DayNum", "TeamID", "Score", "Loc", "NumOT", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
master_stats <- rbind(W_stats,L_stats)
stats_by_day <- NULL
for (i in 1:max(master_stats$DayNum)){
sub_master_stats <- master_stats[DayNum < i]
team_stats_by_day <- dcast(sub_master_stats, TeamID+Season~., mean , value.var = c("FGM"))
team_stats_by_dat$DayNum <- 1
stats_by_day <- rbind(stats_by_day, team_stats_by_day)
}
|
/NCAA/project/volume/data/raw/Initial_tables.R
|
permissive
|
paridhi1603/NCAA-Prediction
|
R
| false | false | 1,237 |
r
|
rm(list = ls())
library(data.table)
library(caret)
library(reshape2)
d1 <- fread("project/volume/data/raw/NCAATourneyDetailedResults.csv")
d2 <- fread("project/volume/data/raw/RegularSeasonDetailedResults.csv")
All_Games_Table <- rbind(d1,d2)
W_stats <- All_Games_Table[,.(Season, DayNum, WTeamID, WScore, WLoc, NumOT, WFGM, WFGA, WFGM3, WFGA3, WFTM, WFTA ,WOR ,WDR , WAst, WTO, WStl, WBlk ,WPF)]
L_stats <- All_Games_Table[,.(Season, DayNum, LTeamID, LScore, WLoc, NumOT, LFGM, LFGA, LFGM3, LFGA3, LFTM, LFTA ,LOR ,LDR , LAst, LTO, LStl, LBlk ,LPF)]
colnames(W_stats) <- c("Season", "DayNum", "TeamID", "Score", "Loc", "NumOT", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
colnames(L_stats) <- c("Season", "DayNum", "TeamID", "Score", "Loc", "NumOT", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
master_stats <- rbind(W_stats,L_stats)
stats_by_day <- NULL
for (i in 1:max(master_stats$DayNum)){
sub_master_stats <- master_stats[DayNum < i]
team_stats_by_day <- dcast(sub_master_stats, TeamID+Season~., mean , value.var = c("FGM"))
team_stats_by_dat$DayNum <- 1
stats_by_day <- rbind(stats_by_day, team_stats_by_day)
}
|
#' One-sample Test for Covariance Matrix by Wu and Li (2015)
#'
#' Given a multivariate sample \eqn{X} and hypothesized covariance matrix \eqn{\Sigma_0}, it tests
#' \deqn{H_0 : \Sigma_x = \Sigma_0\quad vs\quad H_1 : \Sigma_x \neq \Sigma_0}
#' using the procedure by Wu and Li (2015). They proposed to use \eqn{m} number of multiple random projections
#' since only a single operation might attenuate the efficacy of the test.
#'
#' @param X an \eqn{(n\times p)} data matrix where each row is an observation.
#' @param Sigma0 a \eqn{(p\times p)} given covariance matrix.
#' @param m the number of random projections to be applied.
#'
#' @return a (list) object of \code{S3} class \code{htest} containing: \describe{
#' \item{statistic}{a test statistic.}
#' \item{p.value}{\eqn{p}-value under \eqn{H_0}.}
#' \item{alternative}{alternative hypothesis.}
#' \item{method}{name of the test.}
#' \item{data.name}{name(s) of provided sample data.}
#' }
#'
#' @examples
#' ## CRAN-purpose small example
#' smallX = matrix(rnorm(10*3),ncol=3)
#' cov1.2015WL(smallX) # run the test
#'
#' \donttest{
#' ## empirical Type 1 error
#' ## compare effects of m=5, 10, 50
#' niter = 1000
#' rec1 = rep(0,niter) # for m=5
#' rec2 = rep(0,niter) # m=10
#' rec3 = rep(0,niter) # m=50
#' for (i in 1:niter){
#' X = matrix(rnorm(50*10), ncol=50) # (n,p) = (10,50)
#' rec1[i] = ifelse(cov1.2015WL(X, m=5)$p.value < 0.05, 1, 0)
#' rec2[i] = ifelse(cov1.2015WL(X, m=10)$p.value < 0.05, 1, 0)
#' rec3[i] = ifelse(cov1.2015WL(X, m=50)$p.value < 0.05, 1, 0)
#' }
#'
#' ## print the result
#' cat(paste("\n* Example for 'cov1.2015WL'\n","*\n",
#' "* Type 1 error with m=5 : ",round(sum(rec1/niter),5),"\n",
#' "* Type 1 error with m=10 : ",round(sum(rec2/niter),5),"\n",
#' "* Type 1 error with m=50 : ",round(sum(rec3/niter),5),"\n",sep=""))
#' }
#'
#' @references
#' \insertRef{wu_tests_2015}{SHT}
#'
#' @export
cov1.2015WL <- function(X, Sigma0=diag(ncol(X)), m=25){
##############################################################
# PREPROCESSING
check_nd(X)
n = nrow(X)
p = ncol(X)
m = as.integer(m)
##############################################################
# CENTER AND SCALE PROPERLY
X.centered = as.matrix(scale(X, center=TRUE, scale=FALSE))
scaler = aux_getinvroot(Sigma0)
X.processed = X.centered%*%scaler
##############################################################
# LET'S RUN MULTIPLE ITERATIONS
rec.stat = rep(0,m)
for (i in 1:m){
projvec = rnorm(p)
projvec = projvec/sqrt(sum(projvec*projvec))
Y = as.vector(X.processed%*%projvec)
rec.stat[i] = sqrt(2*sum(Y^2)) - sqrt((2*n)-1)
}
thestat = max(rec.stat)
pvalue = 1-(pnorm(thestat, lower.tail=TRUE)^m)
##############################################################
# COMPUTATION : DETERMINATION
hname = "One-sample Test for Covariance Matrix by Wu and Li (2015)."
Ha = "true covariance is different from Sigma0."
DNAME = deparse(substitute(X)) # borrowed from HDtest
names(thestat) = "T1m"
res = list(statistic=thestat, p.value=pvalue, alternative = Ha, method=hname, data.name = DNAME)
class(res) = "htest"
return(res)
}
|
/SHT/R/cov1.2015WL.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false | false | 3,198 |
r
|
#' One-sample Test for Covariance Matrix by Wu and Li (2015)
#'
#' Given a multivariate sample \eqn{X} and hypothesized covariance matrix \eqn{\Sigma_0}, it tests
#' \deqn{H_0 : \Sigma_x = \Sigma_0\quad vs\quad H_1 : \Sigma_x \neq \Sigma_0}
#' using the procedure by Wu and Li (2015). They proposed to use \eqn{m} number of multiple random projections
#' since only a single operation might attenuate the efficacy of the test.
#'
#' @param X an \eqn{(n\times p)} data matrix where each row is an observation.
#' @param Sigma0 a \eqn{(p\times p)} given covariance matrix.
#' @param m the number of random projections to be applied.
#'
#' @return a (list) object of \code{S3} class \code{htest} containing: \describe{
#' \item{statistic}{a test statistic.}
#' \item{p.value}{\eqn{p}-value under \eqn{H_0}.}
#' \item{alternative}{alternative hypothesis.}
#' \item{method}{name of the test.}
#' \item{data.name}{name(s) of provided sample data.}
#' }
#'
#' @examples
#' ## CRAN-purpose small example
#' smallX = matrix(rnorm(10*3),ncol=3)
#' cov1.2015WL(smallX) # run the test
#'
#' \donttest{
#' ## empirical Type 1 error
#' ## compare effects of m=5, 10, 50
#' niter = 1000
#' rec1 = rep(0,niter) # for m=5
#' rec2 = rep(0,niter) # m=10
#' rec3 = rep(0,niter) # m=50
#' for (i in 1:niter){
#' X = matrix(rnorm(50*10), ncol=50) # (n,p) = (10,50)
#' rec1[i] = ifelse(cov1.2015WL(X, m=5)$p.value < 0.05, 1, 0)
#' rec2[i] = ifelse(cov1.2015WL(X, m=10)$p.value < 0.05, 1, 0)
#' rec3[i] = ifelse(cov1.2015WL(X, m=50)$p.value < 0.05, 1, 0)
#' }
#'
#' ## print the result
#' cat(paste("\n* Example for 'cov1.2015WL'\n","*\n",
#' "* Type 1 error with m=5 : ",round(sum(rec1/niter),5),"\n",
#' "* Type 1 error with m=10 : ",round(sum(rec2/niter),5),"\n",
#' "* Type 1 error with m=50 : ",round(sum(rec3/niter),5),"\n",sep=""))
#' }
#'
#' @references
#' \insertRef{wu_tests_2015}{SHT}
#'
#' @export
cov1.2015WL <- function(X, Sigma0=diag(ncol(X)), m=25){
##############################################################
# PREPROCESSING
check_nd(X)
n = nrow(X)
p = ncol(X)
m = as.integer(m)
##############################################################
# CENTER AND SCALE PROPERLY
X.centered = as.matrix(scale(X, center=TRUE, scale=FALSE))
scaler = aux_getinvroot(Sigma0)
X.processed = X.centered%*%scaler
##############################################################
# LET'S RUN MULTIPLE ITERATIONS
rec.stat = rep(0,m)
for (i in 1:m){
projvec = rnorm(p)
projvec = projvec/sqrt(sum(projvec*projvec))
Y = as.vector(X.processed%*%projvec)
rec.stat[i] = sqrt(2*sum(Y^2)) - sqrt((2*n)-1)
}
thestat = max(rec.stat)
pvalue = 1-(pnorm(thestat, lower.tail=TRUE)^m)
##############################################################
# COMPUTATION : DETERMINATION
hname = "One-sample Test for Covariance Matrix by Wu and Li (2015)."
Ha = "true covariance is different from Sigma0."
DNAME = deparse(substitute(X)) # borrowed from HDtest
names(thestat) = "T1m"
res = list(statistic=thestat, p.value=pvalue, alternative = Ha, method=hname, data.name = DNAME)
class(res) = "htest"
return(res)
}
|
# fail-data-frame.R --- Fail if argument is not a data frame
library(usl)
data(AirPassengers)
try(usl(Jan ~ Feb + Mar, data=AirPassengers))
|
/tests/fail-data-frame.R
|
no_license
|
smoeding/usl
|
R
| false | false | 143 |
r
|
# fail-data-frame.R --- Fail if argument is not a data frame
library(usl)
data(AirPassengers)
try(usl(Jan ~ Feb + Mar, data=AirPassengers))
|
#' Unicode groups
#'
#' Match ranges of unicode characters.
#' @param lo A non-negative integer. Minimum number of repeats, when grouped.
#' @param hi positive integer. Maximum number of repeats, when grouped.
#' @param char_class \code{TRUE} or \code{FALSE}. Should the values be wrapped
#' into a character class?
#' @return A character vector representing part or all of a regular expression.
#' @note Windows currently doesn't handle Unicode points with more than four
#' digits correctly. See
#' \url{https://bugs.r-project.org/bugzilla3/show_bug.cgi?id=16098}
#' @references \url{http://www.unicode.org/charts}
#' @seealso \code{\link{ClassGroups}}
#' @name Unicode
NULL
#' @rdname Unicode
#' @export
armenian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARMENIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
armenian_ligatures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARMENIAN_LIGATURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
caucasian_albanian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CAUCASIAN_ALBANIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cypriot_syllabary <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYPRIOT_SYLLABARY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
elbasan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ELBASAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
georgian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEORGIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
georgian_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEORGIAN_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
glagolitic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GLAGOLITIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gothic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GOTHIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
greek_and_coptic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GREEK_AND_COPTIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
greek_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GREEK_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_1_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_1_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_c <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_C, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_d <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_D, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_e <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_E, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_additional <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_ADDITIONAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_ligatures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_LIGATURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_b_syllabary <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_B_SYLLABARY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_b_ideograms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_B_IDEOGRAMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ogham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OGHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_italic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_ITALIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_permic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_PERMIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phaistos_disc <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHAISTOS_DISC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
runic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(RUNIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
shavian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHAVIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
duployan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DUPLOYAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
shorthand_format_controls <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHORTHAND_FORMAT_CONTROLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ipa_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(IPA_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phonetic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHONETIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phonetic_extensions_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHONETIC_EXTENSIONS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
modifier_tone_letters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MODIFIER_TONE_LETTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
spacing_modifier_letters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SPACING_MODIFIER_LETTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
superscripts_and_subscripts <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPERSCRIPTS_AND_SUBSCRIPTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_marks <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_MARKS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_half_marks <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_HALF_MARKS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bamun <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BAMUN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bamun_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BAMUN_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bassa_vah <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BASSA_VAH, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic_epact_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC_EPACT_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
egyptian_hieroglyphs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(EGYPTIAN_HIEROGLYPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mende_kikakui <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MENDE_KIKAKUI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meroitic_cursive <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEROITIC_CURSIVE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meroitic_hieroglyphs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEROITIC_HIEROGLYPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
nko <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NKO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
osmanya <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OSMANYA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tifinagh <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIFINAGH, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vai <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VAI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_presentation_forms_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_PRESENTATION_FORMS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_presentation_forms_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_PRESENTATION_FORMS_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
imperial_aramaic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(IMPERIAL_ARAMAIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
avestan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(AVESTAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
carian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CARIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cuneiform <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CUNEIFORM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cuneiform_numbers_and_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CUNEIFORM_NUMBERS_AND_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_persian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_PERSIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ugaritic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UGARITIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hebrew <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HEBREW, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lycian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LYCIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lydian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LYDIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mandaic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MANDAIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
nabataean <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NABATAEAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_north_arabian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_NORTH_ARABIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_south_arabian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_SOUTH_ARABIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahlavi_inscriptional <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHLAVI_INSCRIPTIONAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahlavi_psalter <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHLAVI_PSALTER, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
palmyrene <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PALMYRENE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phoenician <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHOENICIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
samaritan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SAMARITAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
syriac <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SYRIAC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
manichaean <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MANICHAEAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mongolian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MONGOLIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_turkic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_TURKIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phags_pa <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHAGS_PA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tibetan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIBETAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bengali_and_assamese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BENGALI_AND_ASSAMESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
brahmi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BRAHMI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
chakma <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHAKMA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
devanagari <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DEVANAGARI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
devanagari_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DEVANAGARI_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
grantha <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GRANTHA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gujarati <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GUJARATI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gurmukhi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GURMUKHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kaithi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KAITHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kannada <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANNADA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kharoshthi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHAROSHTHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khojki <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHOJKI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khudawadi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHUDAWADI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lepcha <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LEPCHA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
limbu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LIMBU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mahajani <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MAHAJANI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
malayalam <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MALAYALAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meetei_mayek <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEETEI_MAYEK, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meetei_mayek_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEETEI_MAYEK_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
modi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MODI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mro <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MRO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ol_chiki <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OL_CHIKI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
oriya <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ORIYA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
saurashtra <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SAURASHTRA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sharada <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHARADA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
siddham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SIDDHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala_archaic_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA_ARCHAIC_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sora_sompeng <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SORA_SOMPENG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
syloti_nagri <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SYLOTI_NAGRI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
takri <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAKRI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tamil <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAMIL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
telugu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TELUGU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
thaana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(THAANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tirhuta <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIRHUTA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vedic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VEDIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
warang_citi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(WARANG_CITI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kayah_li <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KAYAH_LI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khmer <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHMER, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khmer_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHMER_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lao <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LAO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
new_tai_lue <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NEW_TAI_LUE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahawh_hmong <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHAWH_HMONG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pau_cin_hau <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAU_CIN_HAU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_le <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_LE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_tham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_THAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_viet <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_VIET, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
thai <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(THAI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
balinese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BALINESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
batak <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BATAK, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
buginese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BUGINESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
buhid <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BUHID, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hanunoo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANUNOO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
javanese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(JAVANESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
rejang <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(REJANG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sundanese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUNDANESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sundanese_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUNDANESE_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tagalog <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGALOG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tagbanwa <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGBANWA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bopomofo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOPOMOFO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bopomofo_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOPOMOFO_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_c <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_d <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_ideographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_IDEOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_ideographs_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kangxi_radicals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANGXI_RADICALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kangxi_radicals_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANGXI_RADICALS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_strokes <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_STROKES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_ideographic_description_characters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_IDEOGRAPHIC_DESCRIPTION_CHARACTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_compatibility_jamo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_COMPATIBILITY_JAMO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_syllables <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_SYLLABLES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hiragana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HIRAGANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
katakana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KATAKANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
katakana_phonetic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KATAKANA_PHONETIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kana_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANA_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kanbun <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANBUN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lisu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LISU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miao <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MIAO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yi_syllables <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YI_SYLLABLES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yi_radicals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YI_RADICALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cherokee <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHEROKEE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
deseret <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DESERET, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
unified_canadian_aboriginal_syllabics <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
unified_canadian_aboriginal_syllabics_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
alphabetic_presentation_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ALPHABETIC_PRESENTATION_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
halfwidth_and_fullwidth_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HALFWIDTH_AND_FULLWIDTH_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
general_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GENERAL_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_1_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_1_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
small_form_variants <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SMALL_FORM_VARIANTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_symbols_and_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_SYMBOLS_AND_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
fullwidth_ascii_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FULLWIDTH_ASCII_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vertical_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VERTICAL_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
letterlike_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LETTERLIKE_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mathematical_alphanumeric_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MATHEMATICAL_ALPHANUMERIC_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_mathematical_alphanumeric_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_MATHEMATICAL_ALPHANUMERIC_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_alphanumerics <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_ALPHANUMERICS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_alphanumeric_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_ALPHANUMERIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_cjk_letters_and_months <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_CJK_LETTERS_AND_MONTHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_ideographic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_IDEOGRAPHIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_technical <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_TECHNICAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
control_pictures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CONTROL_PICTURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
optical_character_recognition <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OPTICAL_CHARACTER_RECOGNITION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_marks_for_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_MARKS_FOR_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
aegean_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(AEGEAN_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_greek_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_GREEK_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
fullwidth_ascii_digits <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FULLWIDTH_ASCII_DIGITS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
common_indic_number_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMMON_INDIC_NUMBER_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic_epact_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC_EPACT_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
counting_rod_numerals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COUNTING_ROD_NUMERALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
number_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NUMBER_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
rumi_numeral_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(RUMI_NUMERAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala_archaic_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA_ARCHAIC_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
math_arrows <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MATH_ARROWS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
additional_arrows <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ADDITIONAL_ARROWS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_mathematical_operators <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_MATHEMATICAL_OPERATORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_mathematical_symbols_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_mathematical_symbols_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
floors_and_ceilings <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FLOORS_AND_CEILINGS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
invisible_operators <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(INVISIBLE_OPERATORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
geometric_shapes <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEOMETRIC_SHAPES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
box_drawing <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOX_DRAWING, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
block_elements <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BLOCK_ELEMENTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
geometric_shapes_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEOMETRIC_SHAPES_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
alchemical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ALCHEMICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
braille_patterns <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BRAILLE_PATTERNS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
currency_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CURRENCY_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
dingbats <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DINGBATS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ornamental_dingbats <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ORNAMENTAL_DINGBATS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
emoticons <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(EMOTICONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
chess_checkers_draughts <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHESS_CHECKERS_DRAUGHTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
domino_tiles <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DOMINO_TILES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
japanese_chess <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(JAPANESE_CHESS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mahjong_tiles <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MAHJONG_TILES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
playing_cards <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PLAYING_CARDS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
card_suits <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CARD_SUITS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_symbols_and_pictographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
musical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MUSICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_greek_musical_notation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_GREEK_MUSICAL_NOTATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
byzantine_musical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BYZANTINE_MUSICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
transport_and_map_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TRANSPORT_AND_MAP_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yijing_mono_di_and_trigrams <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YIJING_MONO_DI_AND_TRIGRAMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yijing_hexagram_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YIJING_HEXAGRAM_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_xuan_jing_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_XUAN_JING_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
specials <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SPECIALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tags <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
variation_selectors <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VARIATION_SELECTORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
variation_selectors_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VARIATION_SELECTORS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
private_use_area <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PRIVATE_USE_AREA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplementary_private_use_area_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTARY_PRIVATE_USE_AREA_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplementary_private_use_area_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTARY_PRIVATE_USE_AREA_B, lo, hi, char_class)
}
|
/R/unicode-groups.R
|
no_license
|
trinker/regex
|
R
| false | false | 38,574 |
r
|
#' Unicode groups
#'
#' Match ranges of unicode characters.
#' @param lo A non-negative integer. Minimum number of repeats, when grouped.
#' @param hi positive integer. Maximum number of repeats, when grouped.
#' @param char_class \code{TRUE} or \code{FALSE}. Should the values be wrapped
#' into a character class?
#' @return A character vector representing part or all of a regular expression.
#' @note Windows currently doesn't handle Unicode points with more than four
#' digits correctly. See
#' \url{https://bugs.r-project.org/bugzilla3/show_bug.cgi?id=16098}
#' @references \url{http://www.unicode.org/charts}
#' @seealso \code{\link{ClassGroups}}
#' @name Unicode
NULL
#' @rdname Unicode
#' @export
armenian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARMENIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
armenian_ligatures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARMENIAN_LIGATURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
caucasian_albanian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CAUCASIAN_ALBANIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cypriot_syllabary <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYPRIOT_SYLLABARY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cyrillic_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CYRILLIC_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
elbasan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ELBASAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
georgian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEORGIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
georgian_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEORGIAN_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
glagolitic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GLAGOLITIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gothic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GOTHIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
greek_and_coptic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GREEK_AND_COPTIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
greek_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GREEK_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_1_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_1_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_c <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_C, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_d <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_D, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_e <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_E, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_extended_additional <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_EXTENDED_ADDITIONAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_ligatures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_LIGATURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_b_syllabary <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_B_SYLLABARY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
linear_b_ideograms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LINEAR_B_IDEOGRAMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ogham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OGHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_italic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_ITALIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_permic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_PERMIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phaistos_disc <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHAISTOS_DISC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
runic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(RUNIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
shavian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHAVIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
duployan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DUPLOYAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
shorthand_format_controls <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHORTHAND_FORMAT_CONTROLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ipa_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(IPA_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phonetic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHONETIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phonetic_extensions_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHONETIC_EXTENSIONS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
modifier_tone_letters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MODIFIER_TONE_LETTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
spacing_modifier_letters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SPACING_MODIFIER_LETTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
superscripts_and_subscripts <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPERSCRIPTS_AND_SUBSCRIPTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_marks <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_MARKS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_half_marks <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_HALF_MARKS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bamun <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BAMUN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bamun_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BAMUN_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bassa_vah <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BASSA_VAH, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic_epact_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC_EPACT_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
egyptian_hieroglyphs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(EGYPTIAN_HIEROGLYPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ethiopic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ETHIOPIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mende_kikakui <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MENDE_KIKAKUI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meroitic_cursive <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEROITIC_CURSIVE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meroitic_hieroglyphs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEROITIC_HIEROGLYPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
nko <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NKO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
osmanya <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OSMANYA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tifinagh <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIFINAGH, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vai <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VAI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_presentation_forms_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_PRESENTATION_FORMS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_presentation_forms_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_PRESENTATION_FORMS_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
imperial_aramaic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(IMPERIAL_ARAMAIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
avestan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(AVESTAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
carian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CARIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cuneiform <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CUNEIFORM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cuneiform_numbers_and_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CUNEIFORM_NUMBERS_AND_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_persian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_PERSIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ugaritic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UGARITIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hebrew <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HEBREW, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lycian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LYCIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lydian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LYDIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mandaic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MANDAIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
nabataean <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NABATAEAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_north_arabian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_NORTH_ARABIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_south_arabian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_SOUTH_ARABIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahlavi_inscriptional <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHLAVI_INSCRIPTIONAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahlavi_psalter <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHLAVI_PSALTER, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
palmyrene <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PALMYRENE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phoenician <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHOENICIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
samaritan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SAMARITAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
syriac <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SYRIAC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
manichaean <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MANICHAEAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mongolian <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MONGOLIAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
old_turkic <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OLD_TURKIC, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
phags_pa <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PHAGS_PA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tibetan <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIBETAN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bengali_and_assamese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BENGALI_AND_ASSAMESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
brahmi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BRAHMI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
chakma <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHAKMA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
devanagari <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DEVANAGARI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
devanagari_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DEVANAGARI_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
grantha <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GRANTHA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gujarati <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GUJARATI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
gurmukhi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GURMUKHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kaithi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KAITHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kannada <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANNADA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kharoshthi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHAROSHTHI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khojki <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHOJKI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khudawadi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHUDAWADI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lepcha <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LEPCHA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
limbu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LIMBU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mahajani <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MAHAJANI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
malayalam <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MALAYALAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meetei_mayek <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEETEI_MAYEK, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
meetei_mayek_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MEETEI_MAYEK_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
modi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MODI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mro <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MRO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ol_chiki <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OL_CHIKI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
oriya <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ORIYA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
saurashtra <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SAURASHTRA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sharada <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SHARADA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
siddham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SIDDHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala_archaic_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA_ARCHAIC_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sora_sompeng <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SORA_SOMPENG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
syloti_nagri <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SYLOTI_NAGRI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
takri <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAKRI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tamil <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAMIL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
telugu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TELUGU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
thaana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(THAANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tirhuta <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TIRHUTA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vedic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VEDIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
warang_citi <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(WARANG_CITI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kayah_li <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KAYAH_LI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khmer <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHMER, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
khmer_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KHMER_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lao <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LAO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
myanmar_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MYANMAR_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
new_tai_lue <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NEW_TAI_LUE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pahawh_hmong <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAHAWH_HMONG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
pau_cin_hau <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PAU_CIN_HAU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_le <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_LE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_tham <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_THAM, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_viet <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_VIET, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
thai <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(THAI, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
balinese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BALINESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
batak <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BATAK, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
buginese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BUGINESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
buhid <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BUHID, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hanunoo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANUNOO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
javanese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(JAVANESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
rejang <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(REJANG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sundanese <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUNDANESE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sundanese_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUNDANESE_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tagalog <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGALOG, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tagbanwa <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGBANWA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bopomofo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOPOMOFO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
bopomofo_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOPOMOFO_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_c <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_unified_ideographs_extension_d <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_ideographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_IDEOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_ideographs_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kangxi_radicals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANGXI_RADICALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kangxi_radicals_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANGXI_RADICALS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_strokes <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_STROKES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_ideographic_description_characters <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_IDEOGRAPHIC_DESCRIPTION_CHARACTERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo_extended_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO_EXTENDED_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_jamo_extended_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_JAMO_EXTENDED_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_compatibility_jamo <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_COMPATIBILITY_JAMO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hangul_syllables <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HANGUL_SYLLABLES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
hiragana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HIRAGANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
katakana <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KATAKANA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
katakana_phonetic_extensions <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KATAKANA_PHONETIC_EXTENSIONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kana_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANA_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
kanbun <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(KANBUN, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
lisu <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LISU, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miao <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MIAO, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yi_syllables <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YI_SYLLABLES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yi_radicals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YI_RADICALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cherokee <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHEROKEE, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
deseret <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DESERET, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
unified_canadian_aboriginal_syllabics <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
unified_canadian_aboriginal_syllabics_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
alphabetic_presentation_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ALPHABETIC_PRESENTATION_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
halfwidth_and_fullwidth_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(HALFWIDTH_AND_FULLWIDTH_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
general_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GENERAL_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
latin_1_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LATIN_1_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
small_form_variants <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SMALL_FORM_VARIANTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_symbols_and_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_SYMBOLS_AND_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
fullwidth_ascii_punctuation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FULLWIDTH_ASCII_PUNCTUATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
vertical_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VERTICAL_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
letterlike_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(LETTERLIKE_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mathematical_alphanumeric_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MATHEMATICAL_ALPHANUMERIC_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
arabic_mathematical_alphanumeric_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ARABIC_MATHEMATICAL_ALPHANUMERIC_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_alphanumerics <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_ALPHANUMERICS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_alphanumeric_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_ALPHANUMERIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_cjk_letters_and_months <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_CJK_LETTERS_AND_MONTHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
enclosed_ideographic_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ENCLOSED_IDEOGRAPHIC_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
cjk_compatibility <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CJK_COMPATIBILITY, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_technical <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_TECHNICAL, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
control_pictures <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CONTROL_PICTURES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
optical_character_recognition <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(OPTICAL_CHARACTER_RECOGNITION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
combining_diacritic_marks_for_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMBINING_DIACRITIC_MARKS_FOR_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
aegean_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(AEGEAN_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_greek_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_GREEK_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
fullwidth_ascii_digits <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FULLWIDTH_ASCII_DIGITS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
common_indic_number_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COMMON_INDIC_NUMBER_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
coptic_epact_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COPTIC_EPACT_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
counting_rod_numerals <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(COUNTING_ROD_NUMERALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
number_forms <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(NUMBER_FORMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
rumi_numeral_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(RUMI_NUMERAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
sinhala_archaic_numbers <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SINHALA_ARCHAIC_NUMBERS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
math_arrows <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MATH_ARROWS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_arrows_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_ARROWS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
additional_arrows <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ADDITIONAL_ARROWS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplemental_mathematical_operators <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTAL_MATHEMATICAL_OPERATORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_mathematical_symbols_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_mathematical_symbols_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
floors_and_ceilings <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(FLOORS_AND_CEILINGS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
invisible_operators <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(INVISIBLE_OPERATORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
geometric_shapes <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEOMETRIC_SHAPES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
box_drawing <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BOX_DRAWING, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
block_elements <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BLOCK_ELEMENTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
geometric_shapes_extended <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(GEOMETRIC_SHAPES_EXTENDED, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
alchemical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ALCHEMICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
braille_patterns <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BRAILLE_PATTERNS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
currency_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CURRENCY_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
dingbats <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DINGBATS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ornamental_dingbats <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ORNAMENTAL_DINGBATS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
emoticons <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(EMOTICONS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
chess_checkers_draughts <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CHESS_CHECKERS_DRAUGHTS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
domino_tiles <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(DOMINO_TILES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
japanese_chess <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(JAPANESE_CHESS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
mahjong_tiles <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MAHJONG_TILES, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
playing_cards <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PLAYING_CARDS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
card_suits <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(CARD_SUITS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
miscellaneous_symbols_and_pictographs <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
musical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(MUSICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
ancient_greek_musical_notation <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(ANCIENT_GREEK_MUSICAL_NOTATION, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
byzantine_musical_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(BYZANTINE_MUSICAL_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
transport_and_map_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TRANSPORT_AND_MAP_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yijing_mono_di_and_trigrams <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YIJING_MONO_DI_AND_TRIGRAMS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
yijing_hexagram_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(YIJING_HEXAGRAM_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tai_xuan_jing_symbols <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAI_XUAN_JING_SYMBOLS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
specials <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SPECIALS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
tags <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(TAGS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
variation_selectors <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VARIATION_SELECTORS, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
variation_selectors_supplement <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(VARIATION_SELECTORS_SUPPLEMENT, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
private_use_area <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(PRIVATE_USE_AREA, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplementary_private_use_area_a <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTARY_PRIVATE_USE_AREA_A, lo, hi, char_class)
}
#' @rdname Unicode
#' @export
supplementary_private_use_area_b <- function(lo, hi, char_class = TRUE)
{
repeat_in_class(SUPPLEMENTARY_PRIVATE_USE_AREA_B, lo, hi, char_class)
}
|
#!/usr/bin/env Rscript
# The goal of this exercise is to estimate a Gaussian distribution from data
## Preparation
rm(list=ls()) # clear out the memory from previous data
setwd("~/bayesian_data_analysis/C9/E9p2") # set current path
library(R2OpenBUGS)
# Specify the model in BUGS language, but save it as a string in R:
modelString = "
model{
# Likelihood
for (i in 1:N) {
obs[i] ~ dnorm(mu, precision) # In BUGS, dnorm uses precision
}
# Prior
precision <- 1.0/(std*std)
mu ~ dnorm(0,1)
std ~ dnorm(1,1)
}
"
writeLines(modelString, con="model.txt")
## Data (Ground truth)
N <- 1000 # number of observation
obs = rnorm(N,2,5) # Ground truth observation
model.data <- list("N","obs")
## SPECIFY, WHICH PARAMETERS TO TRACE:
parameters <- c("mu","std")
#### LOAD INITIAL VALUES
inits <- function()
{
#list(delta=0, taudelta=1)
list(mu=0, std=1)
}
#### =================================================
mcmc.simulation =
bugs(model.data,
inits,
model.file="model.txt",
parameters=parameters,
n.chains=1,
n.iter=20000,
n.burnin=500,
n.thin=1,
codaPkg=FALSE)
#### =================================================
print(mcmc.simulation)
png('mcmc_simulation.png')
plot(mcmc.simulation)
dev.off()
#### MCMC CHAIN IN ONE DATA FRAME
png('mcmc_simulation2.png')
chain=mcmc.simulation$sims.list
par(mfrow = c(2,2)) # plot 4 plots per pa
par(mar=c(3,3,4,1)); # set marging of the plots
#### PLOT THE MCMC CHAINS:
png('mcmc_simulation3.png')
for(p_ in parameters)
{
plot(chain[[p_]][1:300], main=p_, type="l",
ylab=NA, xlab=NA, col="red")
}
dev.off()
#### PLOT AUTOCORRELATIONS:
png('mcmc_simulation4.png')
for(p_ in parameters)
{
acf(chain[[p_]], main=p_,lwd=4,col="red")
}
dev.off()
#### PLOT THE HISTOGRAMS OF THE SAMPLED VALUES
png('mcmc_simulation5.png')
for(p_ in parameters[1])
{
hist(chain[[p_]], main=p_,
ylab=NA, xlab=NA,
nclas=50, col="red")
}
dev.off()
|
/C9/Gaussian/Gaussian_estimate.R
|
no_license
|
tianxiang84/bayesian_data_analysis
|
R
| false | false | 1,956 |
r
|
#!/usr/bin/env Rscript
# The goal of this exercise is to estimate a Gaussian distribution from data
## Preparation
rm(list=ls()) # clear out the memory from previous data
setwd("~/bayesian_data_analysis/C9/E9p2") # set current path
library(R2OpenBUGS)
# Specify the model in BUGS language, but save it as a string in R:
modelString = "
model{
# Likelihood
for (i in 1:N) {
obs[i] ~ dnorm(mu, precision) # In BUGS, dnorm uses precision
}
# Prior
precision <- 1.0/(std*std)
mu ~ dnorm(0,1)
std ~ dnorm(1,1)
}
"
writeLines(modelString, con="model.txt")
## Data (Ground truth)
N <- 1000 # number of observation
obs = rnorm(N,2,5) # Ground truth observation
model.data <- list("N","obs")
## SPECIFY, WHICH PARAMETERS TO TRACE:
parameters <- c("mu","std")
#### LOAD INITIAL VALUES
inits <- function()
{
#list(delta=0, taudelta=1)
list(mu=0, std=1)
}
#### =================================================
mcmc.simulation =
bugs(model.data,
inits,
model.file="model.txt",
parameters=parameters,
n.chains=1,
n.iter=20000,
n.burnin=500,
n.thin=1,
codaPkg=FALSE)
#### =================================================
print(mcmc.simulation)
png('mcmc_simulation.png')
plot(mcmc.simulation)
dev.off()
#### MCMC CHAIN IN ONE DATA FRAME
png('mcmc_simulation2.png')
chain=mcmc.simulation$sims.list
par(mfrow = c(2,2)) # plot 4 plots per pa
par(mar=c(3,3,4,1)); # set marging of the plots
#### PLOT THE MCMC CHAINS:
png('mcmc_simulation3.png')
for(p_ in parameters)
{
plot(chain[[p_]][1:300], main=p_, type="l",
ylab=NA, xlab=NA, col="red")
}
dev.off()
#### PLOT AUTOCORRELATIONS:
png('mcmc_simulation4.png')
for(p_ in parameters)
{
acf(chain[[p_]], main=p_,lwd=4,col="red")
}
dev.off()
#### PLOT THE HISTOGRAMS OF THE SAMPLED VALUES
png('mcmc_simulation5.png')
for(p_ in parameters[1])
{
hist(chain[[p_]], main=p_,
ylab=NA, xlab=NA,
nclas=50, col="red")
}
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WUAction.R
\name{r2hpcc.ProtectWsWorkunits}
\alias{r2hpcc.ProtectWsWorkunits}
\title{Title}
\usage{
r2hpcc.ProtectWsWorkunits(conn, workunits)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{workunits}{- list of workunits to set to protected}
}
\value{
- status of processed operation
}
\description{
Title
}
|
/man/r2hpcc.ProtectWsWorkunits.Rd
|
no_license
|
chuajoey/r2hpcc
|
R
| false | true | 422 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/WUAction.R
\name{r2hpcc.ProtectWsWorkunits}
\alias{r2hpcc.ProtectWsWorkunits}
\title{Title}
\usage{
r2hpcc.ProtectWsWorkunits(conn, workunits)
}
\arguments{
\item{conn}{- HPCC connection information}
\item{workunits}{- list of workunits to set to protected}
}
\value{
- status of processed operation
}
\description{
Title
}
|
/plot4.R
|
no_license
|
guymartial/ExploratoryDataAnalysisWeek1
|
R
| false | false | 3,283 |
r
| ||
library(testthat)
library(canrecall)
test_check("canrecall")
|
/tests/testthat.R
|
permissive
|
derekreay/canrecall
|
R
| false | false | 62 |
r
|
library(testthat)
library(canrecall)
test_check("canrecall")
|
###############################################
# Functions for the West Nile Virus challenge #
###############################################
# Generate predictions and submission file
gensubmission <- function(whichmodel, newdata, type = "response"){
# which model is the model to use for the prediction
# newdata is the test data (variable names have to coincide with train names)
# type is the prediction type (response, terms)
virus <- predict(whichmodel, newdata, type)
sub.dt <- cbind.data.frame(Id = newdata$Id, WnvPresent = virus)
if (identifyNA(sub.dt) == 0) {
write.csv(sub.dt, file = "submission.csv", row.names = FALSE, quote = FALSE)
message("Submission file created. Expected RMSE is:")
#testRMSLE(sub.dt, count, whichmodel)
} else {
message("There are NAs and/or negative values in the predicted set!")
sub.dt[!complete.cases(sub.dt),]
sub.dt[sub.dt$count < 0, ]
}
}
# Test RMSLE
testRMSLE <- function(dt, y, whichmodel){
# dt is the data table
# y is the variable to test against the predicted one
# which model is the model to use for the prediction
require(Metrics)
# ...with own data (not recommended)
myrmsle <- rmsle(dt[, y], fitted(whichmodel))
# todo: split data frame to do in-sample testing
return(myrmsle)
}
# Identify NAs and Negative values
identifyNA <- function(dt){
# dt is the data table
nas <- nrow(!complete.cases(dt))
neg <- nrow(dt[dt$count < 0, ])
unfit <- ifelse(length(nas) == 0, 0, nas) +
ifelse(length(neg) == 0, 0, neg)
return(unfit)
}
|
/functions.R
|
no_license
|
kproductivity/predict-west-nile-virus
|
R
| false | false | 1,568 |
r
|
###############################################
# Functions for the West Nile Virus challenge #
###############################################
# Generate predictions and submission file
gensubmission <- function(whichmodel, newdata, type = "response"){
# which model is the model to use for the prediction
# newdata is the test data (variable names have to coincide with train names)
# type is the prediction type (response, terms)
virus <- predict(whichmodel, newdata, type)
sub.dt <- cbind.data.frame(Id = newdata$Id, WnvPresent = virus)
if (identifyNA(sub.dt) == 0) {
write.csv(sub.dt, file = "submission.csv", row.names = FALSE, quote = FALSE)
message("Submission file created. Expected RMSE is:")
#testRMSLE(sub.dt, count, whichmodel)
} else {
message("There are NAs and/or negative values in the predicted set!")
sub.dt[!complete.cases(sub.dt),]
sub.dt[sub.dt$count < 0, ]
}
}
# Test RMSLE
testRMSLE <- function(dt, y, whichmodel){
# dt is the data table
# y is the variable to test against the predicted one
# which model is the model to use for the prediction
require(Metrics)
# ...with own data (not recommended)
myrmsle <- rmsle(dt[, y], fitted(whichmodel))
# todo: split data frame to do in-sample testing
return(myrmsle)
}
# Identify NAs and Negative values
identifyNA <- function(dt){
# dt is the data table
nas <- nrow(!complete.cases(dt))
neg <- nrow(dt[dt$count < 0, ])
unfit <- ifelse(length(nas) == 0, 0, nas) +
ifelse(length(neg) == 0, 0, neg)
return(unfit)
}
|
context("Testing function initialize_genotypeR_data")
source("test_data_generator.R")
genotype_table <- Ref_Alt_Table(make_marker_names(fake_markers))
fake_geno_data <- initialize_genotypeR_data(seq_data=genotypes_data, genotype_table=genotype_table, warning_allele="Ref", output="pass_through")
fake_geno_data_warn2NA <- initialize_genotypeR_data(seq_data=genotypes_data, genotype_table=genotype_table, warning_allele="Ref", output="warnings2NA")
##get expectation
##dcast(SAMPLE_NAME+WELL~MARKER, value.var = "GENOTYPE", data=genotypes(fake_geno_data))
##naive_CO <- list(c(0,1,0,1), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
##non_naive_CO <- list(c(0,1,0.5,0.5), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
fake_geno_binary <- binary_coding(fake_geno_data, genotype_table=genotype_table)
non_naive_fake_geno_CO <- count_CO(fake_geno_binary, naive=FALSE)
test_that("initialize_genotypeR produces the right class", {
expect_is(non_naive_fake_geno_CO, "genotypeR")
})
test_that("initialize_genotypeR accesor functions return expected result", {
##impossible_genotype
expect_equal(impossible_genotype(non_naive_fake_geno_CO), "Ref")
##genotypes
## library(reshape)
geno <- melt(genotypes_data, id.vars=c("SAMPLE_NAME", "WELL"))
geno[geno$value=="","value"] <- NA
colnames(geno) <- c("SAMPLE_NAME", "WELL", "MARKER", "GENOTYPE")
geno$GENOTYPE <- as.character(geno$GENOTYPE)
expect_identical(genotypes(non_naive_fake_geno_CO), geno)
##binary_genotypes
##turn warnings into 0s to match test data
test_binary <- binary_genotypes(non_naive_fake_geno_CO)
test_binary[is.na(test_binary)] <- FALSE
test_binary[test_binary=="G" | test_binary=="A"] <- 0
test_binary[test_binary==FALSE] <- NA
expect_equal(test_binary, test_data)
##counted_crossovers
non_naive_CO <- list(c(0,1,0.5,0.5), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
expect_equal(counted_crossovers(non_naive_fake_geno_CO)$crossovers, apply(do.call(rbind, non_naive_CO), 2, sum))
})
|
/tests/testthat/test-6_initialize_genotypeR_data.R
|
no_license
|
StevisonLab/genotypeR
|
R
| false | false | 2,053 |
r
|
context("Testing function initialize_genotypeR_data")
source("test_data_generator.R")
genotype_table <- Ref_Alt_Table(make_marker_names(fake_markers))
fake_geno_data <- initialize_genotypeR_data(seq_data=genotypes_data, genotype_table=genotype_table, warning_allele="Ref", output="pass_through")
fake_geno_data_warn2NA <- initialize_genotypeR_data(seq_data=genotypes_data, genotype_table=genotype_table, warning_allele="Ref", output="warnings2NA")
##get expectation
##dcast(SAMPLE_NAME+WELL~MARKER, value.var = "GENOTYPE", data=genotypes(fake_geno_data))
##naive_CO <- list(c(0,1,0,1), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
##non_naive_CO <- list(c(0,1,0.5,0.5), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
fake_geno_binary <- binary_coding(fake_geno_data, genotype_table=genotype_table)
non_naive_fake_geno_CO <- count_CO(fake_geno_binary, naive=FALSE)
test_that("initialize_genotypeR produces the right class", {
expect_is(non_naive_fake_geno_CO, "genotypeR")
})
test_that("initialize_genotypeR accesor functions return expected result", {
##impossible_genotype
expect_equal(impossible_genotype(non_naive_fake_geno_CO), "Ref")
##genotypes
## library(reshape)
geno <- melt(genotypes_data, id.vars=c("SAMPLE_NAME", "WELL"))
geno[geno$value=="","value"] <- NA
colnames(geno) <- c("SAMPLE_NAME", "WELL", "MARKER", "GENOTYPE")
geno$GENOTYPE <- as.character(geno$GENOTYPE)
expect_identical(genotypes(non_naive_fake_geno_CO), geno)
##binary_genotypes
##turn warnings into 0s to match test data
test_binary <- binary_genotypes(non_naive_fake_geno_CO)
test_binary[is.na(test_binary)] <- FALSE
test_binary[test_binary=="G" | test_binary=="A"] <- 0
test_binary[test_binary==FALSE] <- NA
expect_equal(test_binary, test_data)
##counted_crossovers
non_naive_CO <- list(c(0,1,0.5,0.5), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0), c(0,0,0,0))
expect_equal(counted_crossovers(non_naive_fake_geno_CO)$crossovers, apply(do.call(rbind, non_naive_CO), 2, sum))
})
|
#*******************************************************************************
#
# Local Approximate Gaussian Process Regression
# Copyright (C) 2013, The University of Chicago
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Questions? Contact Robert B. Gramacy (rbg@vt.edu)
#
#*******************************************************************************
## laGPsep:
##
## C-version of sequential design loop for prediction at Xref
laGPsep <- function(Xref, start, end, X, Z, d=NULL, g=1/10000,
method=c("alc", "alcopt", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] %in% c("alcray", "alcopt")) 10 else 1, nrow(X)),
alc.gpu=FALSE, numstart=if(method[1] == "alcray") ncol(X) else 1,
rect=NULL, lite=TRUE, verb=0)
{
## argument matching and numerifying
method <- match.arg(method)
if(method == "alc") imethod <- 1
else if(method == "alcopt") imethod <- 2
else if(method == "alcray") imethod <- 3
## else if(method == "mspe") imethod <- 4
## else if(method == "fish") imethod <- 5
else imethod <- 6
## massage Xref
m <- ncol(X)
if(!is.matrix(Xref)) Xref <- data.matrix(Xref)
nref <- nrow(Xref)
## calculate rectangle if using alcray
if(method == "alcray" || method == "alcopt") {
if(is.null(rect)) rect <- matrix(0, nrow=2, ncol=m);
if(method == "alcray" && nref != 1)
stop("alcray only implemented for nrow(Xref) = 1")
if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numstart) != 1 || numstart < 1)
stop("numstart should be an integer scalar >= 1")
} else {
if(!is.null(rect)) warning("rect only used by alcray and alcopt methods");
rect <- 0
}
## sanity checks on input dims
n <- nrow(X)
if(start < 6 || end <= start) stop("must have 6 <= start < end")
if(ncol(Xref) != m) stop("bad dims")
if(length(Z) != n) stop("bad dims")
if(start >= end || n <= end)
stop("start >= end or nrow(X) <= end, so nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
if(!lite) {
if(nref == 1) {
warning("lite = FALSE only allowed for nref > 1")
lite <- TRUE
}
else s2dim <- nref*nref
} else s2dim <- nref
## process the d argument
d <- darg(d, X)
if(length(d$start) == 1) d$start <- rep(d$start, ncol(X))
else if(length(d$start) != ncol(X))
stop("d$start should be scalar or length ncol(X)")
## process the g argument
g <- garg(g, Z)
if(length(g$start) != 1) stop("g$start should be scalar")
## convert to doubles
m <- ncol(X)
dd <- c(d$start, d$mle, rep(d$min, m), rep(d$max, m), d$ab)
dg <- c(g$start, g$mle, g$min, g$max, g$ab)
## sanity checks on controls
if(!(is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
if(length(alc.gpu) > 1 || alc.gpu < 0)
stop("alc.gpu should be a scalar logical or scalar non-negative integer")
## for timing
tic <- proc.time()[3]
out <- .C("laGPsep_R",
m = as.integer(ncol(Xref)),
start = as.integer(start),
end = as.integer(end),
Xref = as.double(t(Xref)),
nref = as.integer(nref),
n = as.integer(n),
X = as.double(t(X)),
Z = as.double(Z),
d = as.double(dd),
g = as.double(dg),
imethod = as.integer(imethod),
close = as.integer(close),
numstart = as.integer(numstart),
rect = as.double(t(rect)),
lite = as.integer(lite),
verb = as.integer(verb),
Xi.ret = as.integer(Xi.ret),
Xi = integer(end*Xi.ret),
mean = double(nref),
s2 = double(s2dim),
df = double(1),
dmle = double(m * d$mle),
dits = integer(1 * d$mle),
gmle = double(1 * g$mle),
gits = integer(1 * g$mle),
llik = double(1),
PACKAGE = "laGP")
## put timing in
toc <- proc.time()[3]
## assemble output and return
outp <- list(mean=out$mean, s2=out$s2, df=out$df, llik=out$llik,
time=toc-tic, method=method, d=d, g=g, close=close)
## possibly add mle and Xi info
mle <- NULL
if(d$mle) mle <- data.frame(d=matrix(out$dmle, nrow=1), dits=out$dits)
if(g$mle) mle <- cbind(mle, data.frame(g=out$gmle, gits=out$gits))
outp$mle <- mle
if(Xi.ret) outp$Xi <- out$Xi + 1
## check for lite and possibly make s2 into Sigma
if(!lite) {
outp$Sigma <- matrix(out$s2, ncol=nref)
outp$s2 <- NULL
}
## add ray info?
if(method == "alcray" || method == "alcopt")
outp$numstart <- numstart
##return
return(outp)
}
## laGPsep.R:
##
## and R-loop version of the laGPsep function; the main reason this is
## much slower than the C-version (laGPsep) is that it must pass/copy
## a big X-matrix each time it is called
laGPsep.R <- function(Xref, start, end, X, Z, d=NULL, g=1/10000,
method=c("alc", "alcopt", "alcray", "nn"),
Xi.ret=TRUE, pall=FALSE,
close=min((1000+end)*if(method[1] %in% c("alcray", "alcopt")) 10 else 1, nrow(X)),
parallel=c("none", "omp", "gpu"),
numstart=if(method[1] == "alcray") ncol(X) else 1,
rect=NULL, lite=TRUE, verb=0)
{
## argument matching
method <- match.arg(method)
parallel <- match.arg(parallel)
## massage Xref
m <- ncol(X)
if(!is.matrix(Xref)) Xref <- data.matrix(Xref)
## sanity checks
n <- nrow(X)
if(start < 6 || end <= start) stop("must have 6 <= start < end")
if(ncol(Xref) != m) stop("bad dims")
if(length(Z) != n) stop("bad dims")
if(start >= end || n <= end)
stop("start >= end or nrow(X) <= end, so nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
if(!lite && nrow(Xref) == 1)
warning("lite = TRUE only allowed for nrow(Xref) > 1")
## calculate rectangle if using alcray
if(method %in% c("alcray", "alcopt")) {
if(method == "alcray" && nrow(Xref) != 1)
stop("alcray only implemented for nrow(Xref) = 1")
if(length(numstart) != 1 || numstart < 1)
stop("numstart should be an integer scalar >= 1")
}
## process the d argument
d <- darg(d, X)
if(length(d$start) == 1) d$start <- rep(d$start, ncol(X))
else if(length(d$start) != ncol(X))
stop("d$start should be scalar or length ncol(X)")
## process the g argument
g <- garg(g, Z)
if(length(g$start) != 1) stop("g$start should be scalar")
## check Xi.ret argument
if(!( is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
if(Xi.ret) Xi.ret <- rep(NA, end)
else Xi.ret <- NULL
## for timing
tic <- proc.time()[3]
## sorting to Xref location
dst <- drop(distance(Xref, X))
if(is.matrix(dst)) dst <- apply(dst, 2, min)
cands <- order(dst)
Xi <- cands[1:start]
## building a new GP with closest Xs to Xref
gpsepi <- newGPsep(X[Xi,,drop=FALSE], Z[Xi], d=d$start, g=g$start,
dK=!(method %in% c("alc", "alcray", "alcopt", "nn")))
## for the output object
if(!is.null(Xi.ret)) Xi.ret[1:start] <- Xi
## if pall, then predict after every iteration
## ONLY AVAILABLE IN THE R VERSION
if(pall) {
nav <- rep(NA, end-start)
pall <- data.frame(mean=nav, s2=nav, df=nav, llik=nav)
} else pall <- NULL
## determine remaining candidates
if(close >= n) close <- 0
if(close > 0) {
if(close >= n-start)
stop("close not less than remaining cands")
cands <- cands[(start+1):close]
} else cands <- cands[-(1:start)]
# set up rect from cands if not specified
if(method %in% c("alcray", "alcopt")) {
if(is.null(rect)) rect <- apply(X[cands,,drop=FALSE], 2, range)
else if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
} else if(!is.null(rect))
warning("rect only used by alcray and alcopt methods")
## set up the start and end times
for(t in (start+1):end) {
## if pall then predict after each iteration
if(!is.null(pall))
pall[t-start,] <- predGPsep(gpsepi, Xref, lite=TRUE)
## calc ALC to reference
if(method == "alcray") {
offset <- ((t-start) %% floor(sqrt(t-start))) + 1
w <- lalcrayGPsep(gpsepi, Xref, X[cands,,drop=FALSE], rect, offset, numstart,
verb=verb-2)
} else if(method == "alcopt") {
offset <- ((t-start)) # %% floor(sqrt(t-start))) + 1
w <- lalcoptGPsep.R(gpsepi, Xref, X[cands,,drop=FALSE], rect, offset, numstart,
verb=verb-2)
} else {
if(method == "alc")
als <- alcGPsep(gpsepi, X[cands,,drop=FALSE], Xref, parallel=parallel,
verb=verb-2)
else als <- c(1, rep(0, length(cands)-1)) ## nearest neighbor
als[!is.finite(als)] <- NA
w <- which.max(als)
}
## add the chosen point to the GP fit
updateGPsep(gpsepi, matrix(X[cands[w],], nrow=1), Z[cands[w]], verb=verb-1)
if(!is.null(Xi.ret)) Xi.ret[t] <- cands[w]
cands <- cands[-w]
}
## maybe do post-MLE calculation
mle <- mleGPsep.switch(gpsepi, method, d, g, verb)
## Obtain final prediction
outp <- predGPsep(gpsepi, Xref, lite=lite)
if(!is.null(pall)) outp <- as.list(rbind(pall, outp))
## put timing and X info in
toc <- proc.time()[3]
outp$time <- toc - tic
outp$Xi <- Xi.ret
outp$method <- method
outp$close <- close
## assign d & g
outp$d <- d
## assign g
outp$g <- g
## assign mle
outp$mle <- mle
## add ray info?
if(method == "alcray" || method == "alcopt")
outp$numstart <- numstart
## clean up
deleteGPsep(gpsepi)
return(outp)
}
## aGPsep.R:
##
## loops over all predictive locations XX and obtains adaptive approx
## kriging equations for each based on localized subsets of (X,Z);
## the main reason this is much slower than the C-version (aGPsep) is
## that it must pass/copy a big X-matrix each time it is called
aGPsep.R <- function(X, Z, XX, start=6, end=50, d=NULL, g=1/10000,
method=c("alc", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] == "alcray") 10 else 1, nrow(X)),
numrays=ncol(X), laGPsep=laGPsep.R, verb=1)
{
## sanity checks
nn <- nrow(XX)
m <- ncol(X)
if(ncol(XX) != ncol(X)) stop("mismatch XX and X cols")
if(nrow(X) != length(Z)) stop("length(Z) != nrow(X)")
if(end-start <= 0) stop("nothing to do")
## check method argument
method <- match.arg(method)
## calculate rectangle if using alcray
if(method == "alcray") {
rect <- apply(X, 2, range)
if(nrow(rect) != 2 || ncol(rect) != ncol(X))
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numrays) != 1 || numrays < 1)
stop("numrays should be an integer scalar >= 1")
} else rect <- NULL
## memory for each set of approx kriging equations
ZZ.var <- ZZ.mean <- rep(NA, nrow(XX))
## other args checked in laGP.R; allocate Xi space (?)
N <- length(ZZ.mean)
if(Xi.ret) Xi <- matrix(NA, nrow=N, ncol=end)
else Xi <- NULL
## get d and g arguments
d <- darg(d, X)
g <- garg(g, Z)
## check d$start
ds.norep <- d$start
if(length(d$start) == 1)
d$start <- matrix(rep(d$start, m), ncol=m, nrow=nn, byrow=TRUE)
else if(length(d$start) == m)
d$start <- matrix(d$start, nrow=nn, byrow=TRUE)
else if(nrow(d$start) != nn || ncol(d$start) != m)
stop("d$start must be a scalar, or a vector of length ncol(X), or an nrow(XX) x ncol(X) matrix")
## check gstart
if(length(g$start) > 1 && length(g$start) != nn)
stop("g$start must be a scalar or a vector of length nrow(XX)")
gs.norep <- g$start
if(length(g$start) != nrow(XX)) g$start <- rep(g$start, nrow(XX))
## check mle
if(d$mle) {
dits <- ZZ.var
dmle <- matrix(NA, nrow=nrow(XX), ncol=ncol(X))
} else dits <- dmle <- NULL
if(g$mle) gits <- gmle <- ZZ.var
else gits <- gmle <- NULL
## for timing
tic <- proc.time()[3]
## now do copies and local updates for each reference location
for(i in 1:N) {
## local calculation, (add/remove .R in laGP.R for R/C version)
di <- list(start=d$start[i,], mle=d$mle, min=d$min, max=d$max, ab=d$ab)
gi <- list(start=g$start[i], mle=g$mle, min=g$min, max=g$max, ab=g$ab)
outp <- laGPsep(XX[i,,drop=FALSE], start, end, X, Z, d=di, g=gi,
method=method, Xi.ret=Xi.ret, close=close, numrays=numrays,
rect=rect, verb=verb-1)
## save MLE outputs and update gpi to use new dmle
if(!is.null(dmle)) { dmle[i,] <- as.numeric(outp$mle[1:ncol(X)]); dits[i] <- outp$mle$dits }
if(!is.null(gmle)) { gmle[i] <- outp$mle$g; gits[i] <- outp$mle$gits }
## extract predictive equations
ZZ.mean[i] <- outp$mean
ZZ.var[i] <- outp$s2 * outp$df / (outp$df-2)
## save Xi; Xi.ret checked in laGP.R
if(Xi.ret) Xi[i,] <- outp$Xi
## print progress
if(verb > 0) {
cat("i = ", i, " (of ", N, ")", sep="")
if(d$mle) cat(", d = (", paste(signif(dmle[i,], 5), collapse=", "), "), its = ", dits[i], sep="")
if(g$mle) cat(", g = ", gmle[i], ", its = ", gits[i], sep="")
cat("\n", sep="")
}
}
## for timing
toc <- proc.time()[3]
## assemble output
d$start <- ds.norep
g$start <- gs.norep
r <- list(Xi=Xi, mean=ZZ.mean, var=ZZ.var, d=d, g=g,
time=toc-tic, method=method, close=close)
## add mle info?
mle <- NULL
if(d$mle) mle <- data.frame(d=dmle, dits=dits)
if(g$mle) mle <- cbind(mle, data.frame(g=gmle, gits=gits))
r$mle <- mle
## add ray info?
if(method == "alcray") r$numrays <- numrays
## done
return(r)
}
## aGPsep:
##
## using C: loops over all predictive locations XX and obtains adaptive
## approx kriging equations for each based on localized subsets of (X,Z)
aGPsep <- function(X, Z, XX, start=6, end=50, d=NULL, g=1/10000,
method=c("alc", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] == "alcray") 10 else 1, nrow(X)),
numrays=ncol(X), omp.threads=1, verb=1)
{
## sanity checks
nn <- nrow(XX)
m <- ncol(X)
n <- nrow(X)
if(ncol(XX) != m) stop("mismatch XX and X cols")
if(n != length(Z)) stop("length(Z) != nrow(X)")
if(end-start <= 0) stop("nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
## numerify method
method <- match.arg(method)
if(method == "alc") imethod <- 1
else if(method == "alcray") imethod <- 3
else imethod <- 6
## calculate rectangle if using alcray
if(method == "alcray") {
rect <- apply(X, 2, range)
if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numrays) != 1 || numrays < 1)
stop("numrays should be an integer scalar >= 1")
} else rect <- 0
## check Xi.ret argument
if(!(is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
## get d and g arguments
d <- darg(d, X)
dd <- c(d$mle, rep(d$min, m), rep(d$max, m), d$ab)
g <- garg(g, Z)
dg <- c(g$mle, g$min, g$max, g$ab)
## check d$start
ds.norep <- d$start
if(length(d$start) == 1)
d$start <- matrix(rep(d$start, m), ncol=m, nrow=nn, byrow=TRUE)
else if(length(d$start) == m)
d$start <- matrix(rep(d$start, nn), ncol=m, byrow=TRUE)
else if(nrow(d$start) != nn || ncol(d$start) != m)
stop("d$start must be a scalar, or a vector of length ncol(X), or an nrow(XX) x ncol(X) matrix")
## check gstart
if(length(g$start) > 1 && length(g$start) != nn)
stop("g$start must be a scalar or a vector of length nrow(XX)")
gs.norep <- g$start
if(length(g$start) != nrow(XX)) g$start <- rep(g$start, nrow(XX))
## check OMP argument
if(length(omp.threads) != 1 || omp.threads < 1)
stop("omp.threads should be a positive scalar integer")
## for timing
tic <- proc.time()[3]
## calculate the kriging equations separately
out <- .C("aGPsep_R",
m = as.integer(m),
start = as.integer(start),
end = as.integer(end),
XX = as.double(t(XX)),
nn = as.integer(nn),
n = as.integer(n),
X = as.double(t(X)),
Z = as.double(Z),
dstart = as.double(t(d$start)),
darg = as.double(dd),
g = as.double(g$start),
garg = as.double(dg),
imethod = as.integer(imethod),
close = as.integer(close),
omp.threads = as.integer(omp.threads),
numrays = as.integer(numrays),
rect = as.double(t(rect)),
verb = as.integer(verb),
Xi.ret = as.integer(Xi.ret),
Xi = integer(end*Xi.ret*nn),
mean = double(nn),
var = double(nn),
dmle = double(nn * d$mle * m),
dits = integer(nn * d$mle),
gmle = double(nn * g$mle),
gits = integer(nn * g$mle),
llik = double(nn),
PACKAGE = "laGP")
## for timing
toc <- proc.time()[3]
## all done, return
d$start <- ds.norep
g$start <- gs.norep
outp <- list(mean=out$mean, var=out$var, llik=out$llik, d=d, g=g,
time=toc-tic, method=method, close=close)
## copy MLE outputs
outp$mle <- NULL
if(d$mle) {
outp$mle <- data.frame(d=matrix(out$dmle, ncol=m, byrow=TRUE),
dits=out$dits)
}
if(g$mle) {
if(d$mle) outp$mle <- cbind(outp$mle, data.frame(g=out$gmle, gits=out$gits))
else outp$mle <- data.frame(g=out$gmle, gits=out$gits)
}
## add ray info?
if(method == "alcray") outp$numrays <- numrays
## copy XI
if(Xi.ret) outp$Xi <- matrix(out$Xi+1, nrow=nn, byrow=TRUE)
return(outp)
}
|
/R/laGP_sep.R
|
no_license
|
cran/laGP
|
R
| false | false | 18,841 |
r
|
#*******************************************************************************
#
# Local Approximate Gaussian Process Regression
# Copyright (C) 2013, The University of Chicago
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Questions? Contact Robert B. Gramacy (rbg@vt.edu)
#
#*******************************************************************************
## laGPsep:
##
## C-version of sequential design loop for prediction at Xref
laGPsep <- function(Xref, start, end, X, Z, d=NULL, g=1/10000,
method=c("alc", "alcopt", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] %in% c("alcray", "alcopt")) 10 else 1, nrow(X)),
alc.gpu=FALSE, numstart=if(method[1] == "alcray") ncol(X) else 1,
rect=NULL, lite=TRUE, verb=0)
{
## argument matching and numerifying
method <- match.arg(method)
if(method == "alc") imethod <- 1
else if(method == "alcopt") imethod <- 2
else if(method == "alcray") imethod <- 3
## else if(method == "mspe") imethod <- 4
## else if(method == "fish") imethod <- 5
else imethod <- 6
## massage Xref
m <- ncol(X)
if(!is.matrix(Xref)) Xref <- data.matrix(Xref)
nref <- nrow(Xref)
## calculate rectangle if using alcray
if(method == "alcray" || method == "alcopt") {
if(is.null(rect)) rect <- matrix(0, nrow=2, ncol=m);
if(method == "alcray" && nref != 1)
stop("alcray only implemented for nrow(Xref) = 1")
if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numstart) != 1 || numstart < 1)
stop("numstart should be an integer scalar >= 1")
} else {
if(!is.null(rect)) warning("rect only used by alcray and alcopt methods");
rect <- 0
}
## sanity checks on input dims
n <- nrow(X)
if(start < 6 || end <= start) stop("must have 6 <= start < end")
if(ncol(Xref) != m) stop("bad dims")
if(length(Z) != n) stop("bad dims")
if(start >= end || n <= end)
stop("start >= end or nrow(X) <= end, so nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
if(!lite) {
if(nref == 1) {
warning("lite = FALSE only allowed for nref > 1")
lite <- TRUE
}
else s2dim <- nref*nref
} else s2dim <- nref
## process the d argument
d <- darg(d, X)
if(length(d$start) == 1) d$start <- rep(d$start, ncol(X))
else if(length(d$start) != ncol(X))
stop("d$start should be scalar or length ncol(X)")
## process the g argument
g <- garg(g, Z)
if(length(g$start) != 1) stop("g$start should be scalar")
## convert to doubles
m <- ncol(X)
dd <- c(d$start, d$mle, rep(d$min, m), rep(d$max, m), d$ab)
dg <- c(g$start, g$mle, g$min, g$max, g$ab)
## sanity checks on controls
if(!(is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
if(length(alc.gpu) > 1 || alc.gpu < 0)
stop("alc.gpu should be a scalar logical or scalar non-negative integer")
## for timing
tic <- proc.time()[3]
out <- .C("laGPsep_R",
m = as.integer(ncol(Xref)),
start = as.integer(start),
end = as.integer(end),
Xref = as.double(t(Xref)),
nref = as.integer(nref),
n = as.integer(n),
X = as.double(t(X)),
Z = as.double(Z),
d = as.double(dd),
g = as.double(dg),
imethod = as.integer(imethod),
close = as.integer(close),
numstart = as.integer(numstart),
rect = as.double(t(rect)),
lite = as.integer(lite),
verb = as.integer(verb),
Xi.ret = as.integer(Xi.ret),
Xi = integer(end*Xi.ret),
mean = double(nref),
s2 = double(s2dim),
df = double(1),
dmle = double(m * d$mle),
dits = integer(1 * d$mle),
gmle = double(1 * g$mle),
gits = integer(1 * g$mle),
llik = double(1),
PACKAGE = "laGP")
## put timing in
toc <- proc.time()[3]
## assemble output and return
outp <- list(mean=out$mean, s2=out$s2, df=out$df, llik=out$llik,
time=toc-tic, method=method, d=d, g=g, close=close)
## possibly add mle and Xi info
mle <- NULL
if(d$mle) mle <- data.frame(d=matrix(out$dmle, nrow=1), dits=out$dits)
if(g$mle) mle <- cbind(mle, data.frame(g=out$gmle, gits=out$gits))
outp$mle <- mle
if(Xi.ret) outp$Xi <- out$Xi + 1
## check for lite and possibly make s2 into Sigma
if(!lite) {
outp$Sigma <- matrix(out$s2, ncol=nref)
outp$s2 <- NULL
}
## add ray info?
if(method == "alcray" || method == "alcopt")
outp$numstart <- numstart
##return
return(outp)
}
## laGPsep.R:
##
## and R-loop version of the laGPsep function; the main reason this is
## much slower than the C-version (laGPsep) is that it must pass/copy
## a big X-matrix each time it is called
laGPsep.R <- function(Xref, start, end, X, Z, d=NULL, g=1/10000,
method=c("alc", "alcopt", "alcray", "nn"),
Xi.ret=TRUE, pall=FALSE,
close=min((1000+end)*if(method[1] %in% c("alcray", "alcopt")) 10 else 1, nrow(X)),
parallel=c("none", "omp", "gpu"),
numstart=if(method[1] == "alcray") ncol(X) else 1,
rect=NULL, lite=TRUE, verb=0)
{
## argument matching
method <- match.arg(method)
parallel <- match.arg(parallel)
## massage Xref
m <- ncol(X)
if(!is.matrix(Xref)) Xref <- data.matrix(Xref)
## sanity checks
n <- nrow(X)
if(start < 6 || end <= start) stop("must have 6 <= start < end")
if(ncol(Xref) != m) stop("bad dims")
if(length(Z) != n) stop("bad dims")
if(start >= end || n <= end)
stop("start >= end or nrow(X) <= end, so nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
if(!lite && nrow(Xref) == 1)
warning("lite = TRUE only allowed for nrow(Xref) > 1")
## calculate rectangle if using alcray
if(method %in% c("alcray", "alcopt")) {
if(method == "alcray" && nrow(Xref) != 1)
stop("alcray only implemented for nrow(Xref) = 1")
if(length(numstart) != 1 || numstart < 1)
stop("numstart should be an integer scalar >= 1")
}
## process the d argument
d <- darg(d, X)
if(length(d$start) == 1) d$start <- rep(d$start, ncol(X))
else if(length(d$start) != ncol(X))
stop("d$start should be scalar or length ncol(X)")
## process the g argument
g <- garg(g, Z)
if(length(g$start) != 1) stop("g$start should be scalar")
## check Xi.ret argument
if(!( is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
if(Xi.ret) Xi.ret <- rep(NA, end)
else Xi.ret <- NULL
## for timing
tic <- proc.time()[3]
## sorting to Xref location
dst <- drop(distance(Xref, X))
if(is.matrix(dst)) dst <- apply(dst, 2, min)
cands <- order(dst)
Xi <- cands[1:start]
## building a new GP with closest Xs to Xref
gpsepi <- newGPsep(X[Xi,,drop=FALSE], Z[Xi], d=d$start, g=g$start,
dK=!(method %in% c("alc", "alcray", "alcopt", "nn")))
## for the output object
if(!is.null(Xi.ret)) Xi.ret[1:start] <- Xi
## if pall, then predict after every iteration
## ONLY AVAILABLE IN THE R VERSION
if(pall) {
nav <- rep(NA, end-start)
pall <- data.frame(mean=nav, s2=nav, df=nav, llik=nav)
} else pall <- NULL
## determine remaining candidates
if(close >= n) close <- 0
if(close > 0) {
if(close >= n-start)
stop("close not less than remaining cands")
cands <- cands[(start+1):close]
} else cands <- cands[-(1:start)]
# set up rect from cands if not specified
if(method %in% c("alcray", "alcopt")) {
if(is.null(rect)) rect <- apply(X[cands,,drop=FALSE], 2, range)
else if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
} else if(!is.null(rect))
warning("rect only used by alcray and alcopt methods")
## set up the start and end times
for(t in (start+1):end) {
## if pall then predict after each iteration
if(!is.null(pall))
pall[t-start,] <- predGPsep(gpsepi, Xref, lite=TRUE)
## calc ALC to reference
if(method == "alcray") {
offset <- ((t-start) %% floor(sqrt(t-start))) + 1
w <- lalcrayGPsep(gpsepi, Xref, X[cands,,drop=FALSE], rect, offset, numstart,
verb=verb-2)
} else if(method == "alcopt") {
offset <- ((t-start)) # %% floor(sqrt(t-start))) + 1
w <- lalcoptGPsep.R(gpsepi, Xref, X[cands,,drop=FALSE], rect, offset, numstart,
verb=verb-2)
} else {
if(method == "alc")
als <- alcGPsep(gpsepi, X[cands,,drop=FALSE], Xref, parallel=parallel,
verb=verb-2)
else als <- c(1, rep(0, length(cands)-1)) ## nearest neighbor
als[!is.finite(als)] <- NA
w <- which.max(als)
}
## add the chosen point to the GP fit
updateGPsep(gpsepi, matrix(X[cands[w],], nrow=1), Z[cands[w]], verb=verb-1)
if(!is.null(Xi.ret)) Xi.ret[t] <- cands[w]
cands <- cands[-w]
}
## maybe do post-MLE calculation
mle <- mleGPsep.switch(gpsepi, method, d, g, verb)
## Obtain final prediction
outp <- predGPsep(gpsepi, Xref, lite=lite)
if(!is.null(pall)) outp <- as.list(rbind(pall, outp))
## put timing and X info in
toc <- proc.time()[3]
outp$time <- toc - tic
outp$Xi <- Xi.ret
outp$method <- method
outp$close <- close
## assign d & g
outp$d <- d
## assign g
outp$g <- g
## assign mle
outp$mle <- mle
## add ray info?
if(method == "alcray" || method == "alcopt")
outp$numstart <- numstart
## clean up
deleteGPsep(gpsepi)
return(outp)
}
## aGPsep.R:
##
## loops over all predictive locations XX and obtains adaptive approx
## kriging equations for each based on localized subsets of (X,Z);
## the main reason this is much slower than the C-version (aGPsep) is
## that it must pass/copy a big X-matrix each time it is called
aGPsep.R <- function(X, Z, XX, start=6, end=50, d=NULL, g=1/10000,
method=c("alc", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] == "alcray") 10 else 1, nrow(X)),
numrays=ncol(X), laGPsep=laGPsep.R, verb=1)
{
## sanity checks
nn <- nrow(XX)
m <- ncol(X)
if(ncol(XX) != ncol(X)) stop("mismatch XX and X cols")
if(nrow(X) != length(Z)) stop("length(Z) != nrow(X)")
if(end-start <= 0) stop("nothing to do")
## check method argument
method <- match.arg(method)
## calculate rectangle if using alcray
if(method == "alcray") {
rect <- apply(X, 2, range)
if(nrow(rect) != 2 || ncol(rect) != ncol(X))
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numrays) != 1 || numrays < 1)
stop("numrays should be an integer scalar >= 1")
} else rect <- NULL
## memory for each set of approx kriging equations
ZZ.var <- ZZ.mean <- rep(NA, nrow(XX))
## other args checked in laGP.R; allocate Xi space (?)
N <- length(ZZ.mean)
if(Xi.ret) Xi <- matrix(NA, nrow=N, ncol=end)
else Xi <- NULL
## get d and g arguments
d <- darg(d, X)
g <- garg(g, Z)
## check d$start
ds.norep <- d$start
if(length(d$start) == 1)
d$start <- matrix(rep(d$start, m), ncol=m, nrow=nn, byrow=TRUE)
else if(length(d$start) == m)
d$start <- matrix(d$start, nrow=nn, byrow=TRUE)
else if(nrow(d$start) != nn || ncol(d$start) != m)
stop("d$start must be a scalar, or a vector of length ncol(X), or an nrow(XX) x ncol(X) matrix")
## check gstart
if(length(g$start) > 1 && length(g$start) != nn)
stop("g$start must be a scalar or a vector of length nrow(XX)")
gs.norep <- g$start
if(length(g$start) != nrow(XX)) g$start <- rep(g$start, nrow(XX))
## check mle
if(d$mle) {
dits <- ZZ.var
dmle <- matrix(NA, nrow=nrow(XX), ncol=ncol(X))
} else dits <- dmle <- NULL
if(g$mle) gits <- gmle <- ZZ.var
else gits <- gmle <- NULL
## for timing
tic <- proc.time()[3]
## now do copies and local updates for each reference location
for(i in 1:N) {
## local calculation, (add/remove .R in laGP.R for R/C version)
di <- list(start=d$start[i,], mle=d$mle, min=d$min, max=d$max, ab=d$ab)
gi <- list(start=g$start[i], mle=g$mle, min=g$min, max=g$max, ab=g$ab)
outp <- laGPsep(XX[i,,drop=FALSE], start, end, X, Z, d=di, g=gi,
method=method, Xi.ret=Xi.ret, close=close, numrays=numrays,
rect=rect, verb=verb-1)
## save MLE outputs and update gpi to use new dmle
if(!is.null(dmle)) { dmle[i,] <- as.numeric(outp$mle[1:ncol(X)]); dits[i] <- outp$mle$dits }
if(!is.null(gmle)) { gmle[i] <- outp$mle$g; gits[i] <- outp$mle$gits }
## extract predictive equations
ZZ.mean[i] <- outp$mean
ZZ.var[i] <- outp$s2 * outp$df / (outp$df-2)
## save Xi; Xi.ret checked in laGP.R
if(Xi.ret) Xi[i,] <- outp$Xi
## print progress
if(verb > 0) {
cat("i = ", i, " (of ", N, ")", sep="")
if(d$mle) cat(", d = (", paste(signif(dmle[i,], 5), collapse=", "), "), its = ", dits[i], sep="")
if(g$mle) cat(", g = ", gmle[i], ", its = ", gits[i], sep="")
cat("\n", sep="")
}
}
## for timing
toc <- proc.time()[3]
## assemble output
d$start <- ds.norep
g$start <- gs.norep
r <- list(Xi=Xi, mean=ZZ.mean, var=ZZ.var, d=d, g=g,
time=toc-tic, method=method, close=close)
## add mle info?
mle <- NULL
if(d$mle) mle <- data.frame(d=dmle, dits=dits)
if(g$mle) mle <- cbind(mle, data.frame(g=gmle, gits=gits))
r$mle <- mle
## add ray info?
if(method == "alcray") r$numrays <- numrays
## done
return(r)
}
## aGPsep:
##
## using C: loops over all predictive locations XX and obtains adaptive
## approx kriging equations for each based on localized subsets of (X,Z)
aGPsep <- function(X, Z, XX, start=6, end=50, d=NULL, g=1/10000,
method=c("alc", "alcray", "nn"), Xi.ret=TRUE,
close=min((1000+end)*if(method[1] == "alcray") 10 else 1, nrow(X)),
numrays=ncol(X), omp.threads=1, verb=1)
{
## sanity checks
nn <- nrow(XX)
m <- ncol(X)
n <- nrow(X)
if(ncol(XX) != m) stop("mismatch XX and X cols")
if(n != length(Z)) stop("length(Z) != nrow(X)")
if(end-start <= 0) stop("nothing to do")
if(close <= end || close > n) stop("must have end < close <= n")
## numerify method
method <- match.arg(method)
if(method == "alc") imethod <- 1
else if(method == "alcray") imethod <- 3
else imethod <- 6
## calculate rectangle if using alcray
if(method == "alcray") {
rect <- apply(X, 2, range)
if(nrow(rect) != 2 || ncol(rect) != m)
stop("bad rect dimensions, must be 2 x ncol(X)")
if(length(numrays) != 1 || numrays < 1)
stop("numrays should be an integer scalar >= 1")
} else rect <- 0
## check Xi.ret argument
if(!(is.logical(Xi.ret) && length(Xi.ret) == 1))
stop("Xi.ret not a scalar logical")
## get d and g arguments
d <- darg(d, X)
dd <- c(d$mle, rep(d$min, m), rep(d$max, m), d$ab)
g <- garg(g, Z)
dg <- c(g$mle, g$min, g$max, g$ab)
## check d$start
ds.norep <- d$start
if(length(d$start) == 1)
d$start <- matrix(rep(d$start, m), ncol=m, nrow=nn, byrow=TRUE)
else if(length(d$start) == m)
d$start <- matrix(rep(d$start, nn), ncol=m, byrow=TRUE)
else if(nrow(d$start) != nn || ncol(d$start) != m)
stop("d$start must be a scalar, or a vector of length ncol(X), or an nrow(XX) x ncol(X) matrix")
## check gstart
if(length(g$start) > 1 && length(g$start) != nn)
stop("g$start must be a scalar or a vector of length nrow(XX)")
gs.norep <- g$start
if(length(g$start) != nrow(XX)) g$start <- rep(g$start, nrow(XX))
## check OMP argument
if(length(omp.threads) != 1 || omp.threads < 1)
stop("omp.threads should be a positive scalar integer")
## for timing
tic <- proc.time()[3]
## calculate the kriging equations separately
out <- .C("aGPsep_R",
m = as.integer(m),
start = as.integer(start),
end = as.integer(end),
XX = as.double(t(XX)),
nn = as.integer(nn),
n = as.integer(n),
X = as.double(t(X)),
Z = as.double(Z),
dstart = as.double(t(d$start)),
darg = as.double(dd),
g = as.double(g$start),
garg = as.double(dg),
imethod = as.integer(imethod),
close = as.integer(close),
omp.threads = as.integer(omp.threads),
numrays = as.integer(numrays),
rect = as.double(t(rect)),
verb = as.integer(verb),
Xi.ret = as.integer(Xi.ret),
Xi = integer(end*Xi.ret*nn),
mean = double(nn),
var = double(nn),
dmle = double(nn * d$mle * m),
dits = integer(nn * d$mle),
gmle = double(nn * g$mle),
gits = integer(nn * g$mle),
llik = double(nn),
PACKAGE = "laGP")
## for timing
toc <- proc.time()[3]
## all done, return
d$start <- ds.norep
g$start <- gs.norep
outp <- list(mean=out$mean, var=out$var, llik=out$llik, d=d, g=g,
time=toc-tic, method=method, close=close)
## copy MLE outputs
outp$mle <- NULL
if(d$mle) {
outp$mle <- data.frame(d=matrix(out$dmle, ncol=m, byrow=TRUE),
dits=out$dits)
}
if(g$mle) {
if(d$mle) outp$mle <- cbind(outp$mle, data.frame(g=out$gmle, gits=out$gits))
else outp$mle <- data.frame(g=out$gmle, gits=out$gits)
}
## add ray info?
if(method == "alcray") outp$numrays <- numrays
## copy XI
if(Xi.ret) outp$Xi <- matrix(out$Xi+1, nrow=nn, byrow=TRUE)
return(outp)
}
|
#Question A
vec1 <- c(2, 1, 1, 3, 2, 1, 0)
vec2 <- c(3, 8, 2, 2, 0, 0, 0)
#i
if((vec1 + vec2) == 10) {
cat("Print me!")
}
#ii
if(vec1[1] >= 2 && vec2[1] >= 2) {
cat("Print me!")
}
#iii
if(all((vec2-vec1)[c(2, 6)]<7)) {
cat("Print me!")
}
#iv
if(!is.na(vec2[3])){
cat("Print me!")
}
#Question B
ifelse(test = vec1 + vec2 > 3, yes = vec1 * vec2, no = vec1 + vec2)
#Question C
mymat <- matrix(c("Foo", "Bar", "Grand", "Good"), nrow = 2, ncol = 2)
mymat <- matrix(as.character(1:16), 4, 4)
mymat <- matrix(
c("DANDELION", "Hyacinthus", "Gerbera",
"MARIGOLD", "geranium", "liguarlia",
"Pachysandra", "SNAPDRAGON", "GLADIOLUS"),
3, 3
)
mymat <- matrix(c("GREAT", "exercises", "right", "here"), 2, 2, byrow = T)
if(any(substr(x=diag(mymat), 1, 1) == "G") || any(substr(x=diag(mymat), 1, 1) == "g")) {
indexes <- which(substr(diag(mymat), 1, 1) == "G" | substr(diag(mymat), 1, 1) == "g")
diag(mymat)[indexes] <- "HERE"
} else {
mymat <- diag(nrow(mymat))
}
|
/lesson_10_1_exs.R
|
no_license
|
synflyn28/r-lessons
|
R
| false | false | 988 |
r
|
#Question A
vec1 <- c(2, 1, 1, 3, 2, 1, 0)
vec2 <- c(3, 8, 2, 2, 0, 0, 0)
#i
if((vec1 + vec2) == 10) {
cat("Print me!")
}
#ii
if(vec1[1] >= 2 && vec2[1] >= 2) {
cat("Print me!")
}
#iii
if(all((vec2-vec1)[c(2, 6)]<7)) {
cat("Print me!")
}
#iv
if(!is.na(vec2[3])){
cat("Print me!")
}
#Question B
ifelse(test = vec1 + vec2 > 3, yes = vec1 * vec2, no = vec1 + vec2)
#Question C
mymat <- matrix(c("Foo", "Bar", "Grand", "Good"), nrow = 2, ncol = 2)
mymat <- matrix(as.character(1:16), 4, 4)
mymat <- matrix(
c("DANDELION", "Hyacinthus", "Gerbera",
"MARIGOLD", "geranium", "liguarlia",
"Pachysandra", "SNAPDRAGON", "GLADIOLUS"),
3, 3
)
mymat <- matrix(c("GREAT", "exercises", "right", "here"), 2, 2, byrow = T)
if(any(substr(x=diag(mymat), 1, 1) == "G") || any(substr(x=diag(mymat), 1, 1) == "g")) {
indexes <- which(substr(diag(mymat), 1, 1) == "G" | substr(diag(mymat), 1, 1) == "g")
diag(mymat)[indexes] <- "HERE"
} else {
mymat <- diag(nrow(mymat))
}
|
# R implementation of model of proterozoic oxygen and nitrogen cycles
# For scientific description see Fennel, K, M. Follows and P.G. Falkowski, 2005, Am. j. Sci. 305, 526-545
library("deSolve")
# boxes
vol_high<-9.0e15 # volume of high lat box / m^3
vol_low<-9.0e15 # volume of low lat box / m^3
vol_deep<-8.1e17 # volume of deep ocean box / m^3
vol_shelf<-1.5e15 # volume of shelf box / m^3
# initalisation
init_P<-0.2 # initial phophate concentration throughout ocean / uM
init_NO3<-0.0 # inital nitrate concentration throughout ocean / uM
init_NH4<-0.1 # initial ammonium concentration throughout ocean / uM
init_O2<-0.0 # inital oxygen concentration throughout ocean / uM
init_atm_O2<-0 # inital atmospheric oxygen fraction / %
# mixing parameters
k_hd<-50 # mixing between high lat and deep boxes / Sv
k_ld<-30 # low lat and deep / Sv
k_lh<-30 # low and high lat / Sv
k_ds<-5 # deep and shelf / Sv
k_ls<-5 # low lat and shelf / Sv
# biogeochemical parameters
u_1<-1 # maximum export production / yr^{-1}
k_P<-0.1 # half-saturation concentration for phosphate uptake / uM
k_N<-1.6 # half-sat concentration for nitrogen uptake / uM
u_2<-0.2 # max rate of N2 fixation / yr^{-1}
z_star<-300 # length scale for remineralisation of organic matter / m
k_anox<-10 # denitrification parameter / uM
u_nitri<-6 # max nitrification rate / yr^{-1}
k_O<-20 # half-sat concn of nitrification / uM
R_OtoP_NO3<-138 # O:P stoichiometry for the production of organic matter based on nitrate
R_OtoP_NH4<-102 # O:P stoichiometry for the production of organic matter based on ammonium
R_NtoP<-6.623 # N:P stoichiometry for the production of organic matter
R_NO3_denit<-84.8 # NO3:P stoichiometry for denitrification
Oeq<-0.2 # Present oxygen equilibrium concentration / mol m^{-3}
tau<-1 # timescale of ocean-atmosphere oxygen equilibration / yr
|
/Fennel_model.R
|
no_license
|
martwine/Biogeochemical-box-modelling
|
R
| false | false | 2,036 |
r
|
# R implementation of model of proterozoic oxygen and nitrogen cycles
# For scientific description see Fennel, K, M. Follows and P.G. Falkowski, 2005, Am. j. Sci. 305, 526-545
library("deSolve")
# boxes
vol_high<-9.0e15 # volume of high lat box / m^3
vol_low<-9.0e15 # volume of low lat box / m^3
vol_deep<-8.1e17 # volume of deep ocean box / m^3
vol_shelf<-1.5e15 # volume of shelf box / m^3
# initalisation
init_P<-0.2 # initial phophate concentration throughout ocean / uM
init_NO3<-0.0 # inital nitrate concentration throughout ocean / uM
init_NH4<-0.1 # initial ammonium concentration throughout ocean / uM
init_O2<-0.0 # inital oxygen concentration throughout ocean / uM
init_atm_O2<-0 # inital atmospheric oxygen fraction / %
# mixing parameters
k_hd<-50 # mixing between high lat and deep boxes / Sv
k_ld<-30 # low lat and deep / Sv
k_lh<-30 # low and high lat / Sv
k_ds<-5 # deep and shelf / Sv
k_ls<-5 # low lat and shelf / Sv
# biogeochemical parameters
u_1<-1 # maximum export production / yr^{-1}
k_P<-0.1 # half-saturation concentration for phosphate uptake / uM
k_N<-1.6 # half-sat concentration for nitrogen uptake / uM
u_2<-0.2 # max rate of N2 fixation / yr^{-1}
z_star<-300 # length scale for remineralisation of organic matter / m
k_anox<-10 # denitrification parameter / uM
u_nitri<-6 # max nitrification rate / yr^{-1}
k_O<-20 # half-sat concn of nitrification / uM
R_OtoP_NO3<-138 # O:P stoichiometry for the production of organic matter based on nitrate
R_OtoP_NH4<-102 # O:P stoichiometry for the production of organic matter based on ammonium
R_NtoP<-6.623 # N:P stoichiometry for the production of organic matter
R_NO3_denit<-84.8 # NO3:P stoichiometry for denitrification
Oeq<-0.2 # Present oxygen equilibrium concentration / mol m^{-3}
tau<-1 # timescale of ocean-atmosphere oxygen equilibration / yr
|
#' Install the kernelspec to tell Jupyter about IRkernel.
#'
#' This can be called multiple times for different R interpreter, but you have to give a
#' different name (and displayname to see a difference in the notebook UI). If the same
#' name is give, it will overwrite older versions of the kernel spec with that name!
#'
#' @param user Install into user directory (\href{https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html}{\code{$XDG_DATA_HOME}}\code{/jupyter/kernels}) or globally?
#' @param name The name of the kernel (default "ir")
#' @param displayname The name which is displayed in the notebook (default: "R")
#'
#' @return Exit code of the \code{jupyter kernelspec install} call.
#'
#' @export
installspec <- function(user = TRUE, name = 'ir', displayname = 'R') {
exit_code <- system2('jupyter', c('kernelspec', '--version'), FALSE, FALSE)
if (exit_code != 0)
stop('jupyter-client has to be installed but ', dQuote('jupyter kernelspec --version'), ' exited with code ', exit_code, '.\n')
# make a kernelspec with the current interpreter's absolute path
srcdir <- system.file('kernelspec', package = 'IRkernel')
tmp_name <- tempfile()
dir.create(tmp_name)
file.copy(srcdir, tmp_name, recursive = TRUE)
spec_path <- file.path(tmp_name, 'kernelspec', 'kernel.json')
spec <- fromJSON(spec_path)
spec$argv[[1]] <- file.path(R.home('bin'), 'R')
spec$display_name <- displayname
write(toJSON(spec, pretty = TRUE, auto_unbox = TRUE), file = spec_path)
user_flag <- if (user) '--user' else character(0)
args <- c('kernelspec', 'install', '--replace', '--name', name, user_flag, file.path(tmp_name, 'kernelspec'))
exit_code <- system2('jupyter', args)
unlink(tmp_name, recursive = TRUE)
invisible(exit_code)
}
|
/R/installspec.r
|
no_license
|
deven2106/IRkernel
|
R
| false | false | 1,850 |
r
|
#' Install the kernelspec to tell Jupyter about IRkernel.
#'
#' This can be called multiple times for different R interpreter, but you have to give a
#' different name (and displayname to see a difference in the notebook UI). If the same
#' name is give, it will overwrite older versions of the kernel spec with that name!
#'
#' @param user Install into user directory (\href{https://specifications.freedesktop.org/basedir-spec/latest/ar01s03.html}{\code{$XDG_DATA_HOME}}\code{/jupyter/kernels}) or globally?
#' @param name The name of the kernel (default "ir")
#' @param displayname The name which is displayed in the notebook (default: "R")
#'
#' @return Exit code of the \code{jupyter kernelspec install} call.
#'
#' @export
installspec <- function(user = TRUE, name = 'ir', displayname = 'R') {
exit_code <- system2('jupyter', c('kernelspec', '--version'), FALSE, FALSE)
if (exit_code != 0)
stop('jupyter-client has to be installed but ', dQuote('jupyter kernelspec --version'), ' exited with code ', exit_code, '.\n')
# make a kernelspec with the current interpreter's absolute path
srcdir <- system.file('kernelspec', package = 'IRkernel')
tmp_name <- tempfile()
dir.create(tmp_name)
file.copy(srcdir, tmp_name, recursive = TRUE)
spec_path <- file.path(tmp_name, 'kernelspec', 'kernel.json')
spec <- fromJSON(spec_path)
spec$argv[[1]] <- file.path(R.home('bin'), 'R')
spec$display_name <- displayname
write(toJSON(spec, pretty = TRUE, auto_unbox = TRUE), file = spec_path)
user_flag <- if (user) '--user' else character(0)
args <- c('kernelspec', 'install', '--replace', '--name', name, user_flag, file.path(tmp_name, 'kernelspec'))
exit_code <- system2('jupyter', args)
unlink(tmp_name, recursive = TRUE)
invisible(exit_code)
}
|
#-----------------------------------------------------------------------------#
#' Prints a summary of an interference object
#'
#' @param x object of class 'interference'
#' @param ... ignored
#' @method print interference
#' @export
#-----------------------------------------------------------------------------#
print.interference <- function(x, ...)
{
cols <- c('alpha1', 'trt1', 'alpha2', 'trt2', 'estimate', 'std.error', 'conf.low', 'conf.high')
est <- x$estimates
#Not defined for glmer class: form <- as.character(deparse(x$models$propensity_model$formula))
form <- x$summary$formula
allo <- x$summary$allocations
conf <- x$summary$conf.level
varm <- x$summary$variance_estimation
k <- x$summary$nallocations
N <- x$summary$ngroups
mina <- min(allo)
maxa <- max(allo)
de1 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == mina, cols ]
de2 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == maxa, cols ]
de <- rbind(de1, de2)
ie <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == mina & est$alpha2 == maxa, cols ]
te <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == mina & est$alpha2 == maxa, cols ]
oe <- est[est$effect == "overall" & est$alpha1 == mina & est$alpha2 == maxa, cols ]
if(length(allo) > 2){
meda <- stats::quantile(allo, probs = .5, type = 3)
de3 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == meda, cols ]
de <- rbind(de, de3)
ie2 <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == mina & est$alpha2 == meda, cols ]
ie3 <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == meda & est$alpha2 == maxa, cols ]
ie <- rbind(ie, ie2, ie3)
te2 <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == mina & est$alpha2 == meda, cols ]
te3 <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == meda & est$alpha2 == maxa, cols ]
te <- rbind(te, te2, te3)
oe2 <- est[est$effect == "overall" & est$alpha1 == mina & est$alpha2 == meda, cols ]
oe3 <- est[est$effect == "overall" & est$alpha1 == meda & est$alpha2 == maxa, cols ]
oe <- rbind(oe, oe2, oe3)
}
de <- format(de, digits = 4)
ie <- format(ie, digits = 4)
te <- format(te, digits = 4)
oe <- format(oe, digits = 4)
## Output ##
cat(" --------------------------------------------------------------------------\n",
" Model Summary \n",
"--------------------------------------------------------------------------\n",
"Formula:", form, '\n',
"Number of groups: ", N, '\n',
k, "allocations were used from", mina, '(min) to', maxa, '(max) \n',
# "Propensity model: ", form, '\n',
"--------------------------------------------------------------------------\n",
" Causal Effect Summary \n",
" Confidence level:", conf, " \n",
" Variance method:", varm, " \n",
"--------------------------------------------------------------------------\n\n",
"Direct Effects\n")
print(de, row.names = FALSE)
cat('\n', "Indirect Effects\n")
print(ie, row.names = FALSE)
cat('\n', "Total Effects \n")
print(te, row.names = FALSE)
cat('\n', 'Overall Effects \n')
print(oe, row.names = FALSE)
cat('\n',
"--------------------------------------------------------------------------\n")
}
|
/R/generics.R
|
no_license
|
bsaul/inferference
|
R
| false | false | 3,804 |
r
|
#-----------------------------------------------------------------------------#
#' Prints a summary of an interference object
#'
#' @param x object of class 'interference'
#' @param ... ignored
#' @method print interference
#' @export
#-----------------------------------------------------------------------------#
print.interference <- function(x, ...)
{
cols <- c('alpha1', 'trt1', 'alpha2', 'trt2', 'estimate', 'std.error', 'conf.low', 'conf.high')
est <- x$estimates
#Not defined for glmer class: form <- as.character(deparse(x$models$propensity_model$formula))
form <- x$summary$formula
allo <- x$summary$allocations
conf <- x$summary$conf.level
varm <- x$summary$variance_estimation
k <- x$summary$nallocations
N <- x$summary$ngroups
mina <- min(allo)
maxa <- max(allo)
de1 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == mina, cols ]
de2 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == maxa, cols ]
de <- rbind(de1, de2)
ie <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == mina & est$alpha2 == maxa, cols ]
te <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == mina & est$alpha2 == maxa, cols ]
oe <- est[est$effect == "overall" & est$alpha1 == mina & est$alpha2 == maxa, cols ]
if(length(allo) > 2){
meda <- stats::quantile(allo, probs = .5, type = 3)
de3 <- est[est$effect == "direct" & est$trt1 == 0 & est$trt2 == 1 & est$alpha1 == meda, cols ]
de <- rbind(de, de3)
ie2 <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == mina & est$alpha2 == meda, cols ]
ie3 <- est[est$effect == "indirect" & est$trt1 == 0 & est$trt2 == 0 &
est$alpha1 == meda & est$alpha2 == maxa, cols ]
ie <- rbind(ie, ie2, ie3)
te2 <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == mina & est$alpha2 == meda, cols ]
te3 <- est[est$effect == "total" & est$trt1 == 0 & est$trt2 == 1 &
est$alpha1 == meda & est$alpha2 == maxa, cols ]
te <- rbind(te, te2, te3)
oe2 <- est[est$effect == "overall" & est$alpha1 == mina & est$alpha2 == meda, cols ]
oe3 <- est[est$effect == "overall" & est$alpha1 == meda & est$alpha2 == maxa, cols ]
oe <- rbind(oe, oe2, oe3)
}
de <- format(de, digits = 4)
ie <- format(ie, digits = 4)
te <- format(te, digits = 4)
oe <- format(oe, digits = 4)
## Output ##
cat(" --------------------------------------------------------------------------\n",
" Model Summary \n",
"--------------------------------------------------------------------------\n",
"Formula:", form, '\n',
"Number of groups: ", N, '\n',
k, "allocations were used from", mina, '(min) to', maxa, '(max) \n',
# "Propensity model: ", form, '\n',
"--------------------------------------------------------------------------\n",
" Causal Effect Summary \n",
" Confidence level:", conf, " \n",
" Variance method:", varm, " \n",
"--------------------------------------------------------------------------\n\n",
"Direct Effects\n")
print(de, row.names = FALSE)
cat('\n', "Indirect Effects\n")
print(ie, row.names = FALSE)
cat('\n', "Total Effects \n")
print(te, row.names = FALSE)
cat('\n', 'Overall Effects \n')
print(oe, row.names = FALSE)
cat('\n',
"--------------------------------------------------------------------------\n")
}
|
# ******************************************************************************
# Intraday Overreaction - CalcScores.R *
# ******************************************************************************
# This script reads index and stock csv files to generate a file with *
# overreaction scores and components for each stock *
# Process: *
# 1) load price files for index, risk-free rate, then for each stock in loop *
# 2) generate date-time aligned series *
# 3) calculate rolling robust betas stock vs. index *
# 4) calculate overreaction scores and save to file for each stock in loop *
# 5) clean-up for obsolete variables *
# ******************************************************************************
# ----- DATA -------------------------------------------------------------------
# get available csv data files
data.files <- list.files (path = data.path, pattern = "*[NQA].csv")
data.files.etf <- list.files (path = data.path.etf, pattern = "*[QN].csv")
# read index and risk-free-rate files into xts series and convert to daily
cur.path <- paste (data.path.etf, data.files.etf[1], sep="/")
sp.x <- Load1minDataXTS (cur.path)
sp.d.x <- to.daily (sp.x, drop.time=TRUE, name=NULL)
rf.d <- read.csv ("D:/Doktorarbeit/40 Data/Rf/FRB_H15.csv", header=TRUE,
stringsAsFactors=FALSE, col.names=c("Date", "Rf"), skip=6)
rf.d.x <- xts (as.numeric(rf.d[, 2]), as.Date(strptime(rf.d[, 1],"%m/%d/%Y")))
colnames (rf.d.x) <- "Rf"
rf.d.x <- na.locf (rf.d.x) # replace missing values with prior value
# ----- CORE LOOP --------------------------------------------------------------
# for (i in 1:100) {
for (i in 1:length(data.files)) {
# read current stock file into xts series and convert to daily
cur.path <- paste (data.path, data.files[i], sep="/")
print (paste("[CalcScores.R] Processing ", data.files[i], sep=""))
cur.symbol <- as.character(strsplit(data.files[i],".csv")[1])
cur.x <- Load1minDataXTS (cur.path)
# skip series with length < one year
if (nrow(cur.x) < daybars * period.oneyear ) {
print (paste("[CalcScores.R] Processing skipped for ", data.files[i],
" -- too short", sep=""))
next
}
cur.d.x <- to.daily (cur.x, drop.time=TRUE, name=NULL)
# create aligned daily time series' for stock, index and Rf
sp.d.x.aligned <- sp.d.x[index(sp.d.x) >= start(cur.d.x) & # cut excess dates
index(sp.d.x) <= end(cur.d.x)] # at boundaries
cur.d.x.aligned <- cur.d.x[index(cur.d.x) %in% index(sp.d.x.aligned)]
sp.d.x.aligned <- sp.d.x.aligned[index(sp.d.x.aligned) %in% index(cur.d.x)]
stopifnot (all.equal (index(sp.d.x.aligned), index(cur.d.x.aligned)))
rf.d.x.aligned <- rf.d.x[index(rf.d.x) %in% index(cur.d.x.aligned)]
stopifnot (all.equal (as.character.Date(index(rf.d.x.aligned)),
as.character.Date(index(cur.d.x.aligned))))
# cut 1-min series using cut.dates from daily cut procedure
cut.dates.sp <- index(sp.d.x)[!(index(sp.d.x) %in% index(sp.d.x.aligned))]
cut.dates.cur <- index(cur.d.x)[!(index(cur.d.x) %in% index(cur.d.x.aligned))]
sp.x.cut <- sp.x[as.character.Date(cut.dates.sp)] # making use of xts
cur.x.cut <- cur.x[as.character.Date(cut.dates.cur)] # by-date selection
sp.x.aligned <- sp.x[!(index(sp.x) %in% index(sp.x.cut))] # cut excess
cur.x.aligned <- cur.x[!(index(cur.x) %in% index(cur.x.cut))] # days
# align / fill missing minutes due to different session-cuts
cur.x.aligned <- AlignTimesAndFillXTS (cur.x.aligned, sp.x.aligned)
stopifnot (all.equal(index(sp.x.aligned), index(cur.x.aligned))) # assert ==
stopifnot (all(is.numeric(cur.x.aligned))) # and no NAs
# calculate robust one-year betas (t0 --> can be applied t+1)
cur.rbetas.d.x <- CalculateRollingBetaXTS ( cur.d.x.aligned, sp.d.x.aligned,
rf.d.x.aligned )
plot (cur.rbetas.d.x, ylab="Beta (Stock ~ S&P500)", xlab="Date", type="l",
main="One-year rolling robust CAPM betas of Stock vs. S&P500 ETF")
# calculate signal components
# ---------------------------
# get day-starts and -ends
day.ends <- endpoints (cur.x.aligned,on='days')[-1] # all elements except 1st
stopifnot (all(diff(day.ends) == 391)) # assert equal day length
# backfill 1st 10 minutes of opening data in case of zero-bars
cur.x.aligned <- BackfillOpeningData(cur.x.aligned, 10)
# calculate 1-minute returns and intraday returns (excluding inter-day return)
ret.x <- 100 * diff (log(cur.x.aligned$Close)) # 1-min returns
idret <- unclass (ret.x) # intraday returns variable
sp.ret.x <- 100 * diff (log(sp.x.aligned$Close)) # same for benchmark
sp.idret <- unclass (sp.ret.x)
idvol <- unclass (cur.x.aligned$Volume)
idvol[which(is.infinite(idvol))] <- 0
# remove inter-day returns from intraday return series
# by replacing daily 1st minute returns with intrabar returns from open
idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- 100 *
log (cur.x.aligned$Close[day.ends[1:(length(day.ends))] - daybars+1] /
cur.x.aligned$Open[day.ends[1:(length(day.ends))] - daybars+1])
sp.idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- 100 *
log (sp.x.aligned$Close[day.ends[1:(length(day.ends))] - daybars+1] /
sp.x.aligned$Open[day.ends[1:(length(day.ends))] - daybars+1])
idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- ifelse (
abs(idret[day.ends[1:(length(day.ends))] - daybars+1, 1]) > 1,
max(-1, min(1, idret[day.ends[1:(length(day.ends))] - daybars+1, 1])),
idret[day.ends[1:(length(day.ends))] - daybars+1, 1])
# prepare daily calculations - slicing and left-bounded window index
idret.daily <- array (idret, dim=c(daybars, length(day.ends)))
idrv.daily <- idret.daily^2
sp.idret.daily <- array (sp.idret, dim=c(daybars, length(day.ends)))
sp.idrv.daily <- sp.idret.daily^2
idvol.daily <- array (idvol, dim=c(daybars, length(day.ends)))
index.left <- c (rep(1,period.id.window), 2:(daybars-period.id.window+1))
# prepare result variables (all except volume, which remains unsmoothed)
id.daily.dsbmom <- array (NA, dim=c(daybars, length(day.ends)))
id.daily.rv.dsbavg <- array (NA, dim=c(daybars, length(day.ends)))
sp.id.daily.dsbmom <- array (NA, dim=c(daybars, length(day.ends)))
sp.id.daily.rv.dsbavg <- array (NA, dim=c(daybars, length(day.ends)))
# calculate day-start bounded mom and variance
rollsum <- rep (NA, length(day.ends)) # array with NA for each day
rollsum2 <- rollsum
rollsum.sp <- rollsum
rollsum <- idret.daily[1, ] # initialize rolling sums with
rollsum2 <- idrv.daily[1, ] # 1st bars of days
rollsum.sp <- sp.idret.daily[1, ]
rollsum2.sp <- sp.idrv.daily[1, ]
id.daily.dsbmom[1, ] <- rollsum
id.daily.rv.dsbavg[1, ] <- rollsum2
sp.id.daily.dsbmom[1, ] <- rollsum.sp
sp.id.daily.rv.dsbavg[1, ] <- rollsum2.sp
for (i in 2:daybars) {
# for each day, roll sums forward
if (i > period.id.window) {
rollsum <- rollsum - idret.daily[index.left[i-1], ]
rollsum2 <- rollsum2 - idrv.daily[index.left[i-1], ]
rollsum.sp <- rollsum.sp - sp.idret.daily[index.left[i-1], ]
rollsum2.sp <- rollsum2.sp - sp.idrv.daily[index.left[i-1], ]
}
rollsum <- rollsum + idret.daily[i, ]
rollsum2 <- rollsum2 + idrv.daily[i, ]
rollsum.sp <- rollsum.sp + sp.idret.daily[i, ]
rollsum2.sp <- rollsum2.sp + sp.idrv.daily[i, ]
id.daily.dsbmom[i, ] <- rollsum
id.daily.rv.dsbavg[i, ] <- rollsum2 / (i - index.left[i] + 1)
sp.id.daily.dsbmom[i, ] <- rollsum.sp
sp.id.daily.rv.dsbavg[i, ] <- rollsum2.sp / (i - index.left[i] + 1)
}
id.daily.rv.dsbavg[id.daily.rv.dsbavg < 0 & id.daily.rv.dsbavg > -eps ] <- 0
id.daily.vol.dsbavg <- sqrt(id.daily.rv.dsbavg) # vola: sqrt of rv
stopifnot (all(is.finite(id.daily.vol.dsbavg))) # assert integrity
sp.id.daily.rv.dsbavg[sp.id.daily.rv.dsbavg < 0 &
sp.id.daily.rv.dsbavg > -eps ] <- 0
sp.id.daily.vol.dsbavg <- sqrt(sp.id.daily.rv.dsbavg) # vola: sqrt of rv
stopifnot (all(is.finite(sp.id.daily.vol.dsbavg))) # assert integrity
# additional High/Low-based momentum
# using alternative calculation approach based on prices directly
id.daily.dsbmom.hi <- array (NA, dim=c(daybars, length(day.ends)))
id.daily.dsbmom.lo <- array (NA, dim=c(daybars, length(day.ends)))
cur.cl.daily <-
array (coredata(cur.x.aligned$Close), dim=c(daybars, length(day.ends)))
cur.hi.daily <-
array (coredata(cur.x.aligned$High), dim=c(daybars, length(day.ends)))
cur.lo.daily <-
array (coredata(cur.x.aligned$Low), dim=c(daybars, length(day.ends)))
rollref <- rep (NA, length(day.ends)) # array with NA for each day
rollref <- cur.cl.daily[1, ]
rollmin <- rollref
rollmax <- rollref
id.daily.dsbmom.hi[1:3, ] <- 0
id.daily.dsbmom.lo[1:3, ] <- 0
# drop first bar as reference point to filter out opening crap data
index.left2 <- index.left
index.left2[index.left2 == 1] <- 2
for (i in 4:daybars) {
# rollref <- cur.cl.daily[index.left[i], ]
# id.daily.dsbmom.hi[i, ] <- 100 * log (cur.hi.daily[i, ] / rollref)
# id.daily.dsbmom.lo[i, ] <- 100 * log (cur.lo.daily[i, ] / rollref)
rollmin <- apply (cur.lo.daily[(index.left[i]):(i-1), ], 2, min)
rollmax <- apply (cur.hi.daily[(index.left[i]):(i-1), ], 2, max)
id.daily.dsbmom.hi[i, ] <- 100 * log (cur.hi.daily[i, ] / rollmin)
id.daily.dsbmom.lo[i, ] <- 100 * log (cur.lo.daily[i, ] / rollmax)
}
# calculate day-time average volatility and volume
# 90? change of logic: calculate day-time average by looping through
# all days, calculating averages for all bars-in-day in parallel
# ATTENTION: id.daily.vol.alt.dta is a simple dta of 1-min squared returns
index.left2 <- c(rep(1,period.days), 2:(length(day.ends)-period.days+1))
rollsum <- id.daily.vol.dsbavg[, 1] # initialize rollsum with 1st day
rollsum.v <- idvol.daily[, 1] # initialize rollsum.v with 1st day
rollsum.alt <- idrv.daily[, 1]
rollsum.sp <- sp.id.daily.vol.dsbavg[, 1]
id.daily.vol.dsbavg.dta <- array (c(NA), # result arrays
dim=c(nrow(id.daily.vol.dsbavg),
ncol(id.daily.vol.dsbavg)))
id.daily.volume.dta <- id.daily.vol.dsbavg.dta
sp.id.daily.vol.dsbavg.dta <- id.daily.vol.dsbavg.dta
id.daily.vol.alt.dta <- id.daily.vol.dsbavg.dta
for (j in 2:length(day.ends)) {
rollsum <- rollsum + id.daily.vol.dsbavg[, j]
rollsum.v <- rollsum.v + idvol.daily[, j]
rollsum.sp <- rollsum.sp + sp.id.daily.vol.dsbavg[, j]
rollsum.alt <- rollsum.alt + idrv.daily[, j]
if (j > period.days) {
rollsum <- rollsum - id.daily.vol.dsbavg[, index.left2[j-1]]
rollsum.v <- rollsum.v - idvol.daily[, index.left2[j-1]]
rollsum.sp <- rollsum.sp - sp.id.daily.vol.dsbavg[, index.left2[j-1]]
rollsum.alt <- rollsum.alt - idrv.daily[, index.left2[j-1]]
}
if (j >= period.days) {
# setting result array here results in NA for 1st period.days
id.daily.vol.dsbavg.dta[, j] <- rollsum / period.days
id.daily.volume.dta[, j] <- rollsum.v / period.days
sp.id.daily.vol.dsbavg.dta[, j] <- rollsum.sp / period.days
id.daily.vol.alt.dta[, j] <- rollsum.alt / period.days
}
}
id.daily.vol.alt.dta[id.daily.vol.alt.dta < 0 & id.daily.vol.alt.dta > -eps ] <- 0
id.daily.vol.alt.dta <- sqrt(id.daily.vol.alt.dta)
stopifnot (all(is.finite(id.daily.vol.alt.dta[,(period.days+1):length(day.ends)])))
# calculate abnormal (beta-adjusted) stock momentum
cur.rbetas.d.x.shifted <- cur.rbetas.d.x
coredata(cur.rbetas.d.x.shifted) <-
append (cur.rbetas.d.x[-nrow(cur.rbetas.d.x)],1,0)
cur.rbetas.d.x.shifted <- na.locf (cur.rbetas.d.x.shifted)
# NAs at start replaced by 1s, and shifted back by 1 day
id.daily.dsbmom.e <- array(c(NA), dim=c(daybars, length(day.ends)))
for (j in 1:length(day.ends)) {
id.daily.dsbmom.e[, j] <- sp.id.daily.dsbmom[, j] *
cur.rbetas.d.x.shifted[j]
}
id.daily.dsbmom.a <- id.daily.dsbmom - id.daily.dsbmom.e
# calculate day-start bounded scaled version of rv
id.daily.vol.dsbavg.dta.scaled <- id.daily.vol.dsbavg.dta
sp.id.daily.vol.dsbavg.dta.scaled <- sp.id.daily.vol.dsbavg.dta
for (i in 2:daybars) {
id.daily.vol.dsbavg.dta.scaled[i,] <- sqrt(min(60, i)) *
id.daily.vol.dsbavg.dta[i,]
sp.id.daily.vol.dsbavg.dta.scaled[i,] <- sqrt(min(60, i)) *
sp.id.daily.vol.dsbavg.dta[i,]
}
# calculate daily smoothed version of 1-minute dta.rv (.alt calculation)
id.daily.vol.alt.dta2 <- id.daily.vol.alt.dta
for (j in (period.days+1):length(day.ends)) {
id.daily.vol.alt.dta2[,j] <- lowess(id.daily.vol.alt.dta[,j], f=.05)$y
}
# convert daily-slice arrays back to time series
id.dsbmom <- array (id.daily.dsbmom, dim=c(nrow(id.daily.dsbmom) *
ncol(id.daily.dsbmom),1))
id.dsbmom.a <- array (id.daily.dsbmom.a, dim=c(nrow(id.daily.dsbmom.a) *
ncol(id.daily.dsbmom.a),1))
id.rv.dta <- array (id.daily.vol.dsbavg.dta,
dim=c(nrow(id.daily.vol.dsbavg.dta) *
ncol(id.daily.vol.dsbavg.dta),1))
id.rv.dta.sc <- array (id.daily.vol.dsbavg.dta.scaled,
dim=c(nrow(id.daily.vol.dsbavg.dta.scaled) *
ncol(id.daily.vol.dsbavg.dta.scaled),1))
id.rv.1min.dta <- array (id.daily.vol.alt.dta2,
dim=c(nrow(id.daily.vol.alt.dta2) *
ncol(id.daily.vol.alt.dta2),1))
id.volume.dta <- array (id.daily.volume.dta,
dim=c(nrow(id.daily.volume.dta) *
ncol(id.daily.volume.dta),1))
sp.id.dsbmom <- array (sp.id.daily.dsbmom, dim=c(nrow(sp.id.daily.dsbmom) *
ncol(sp.id.daily.dsbmom),1))
sp.id.rv.dta <- array (sp.id.daily.vol.dsbavg.dta,
dim=c(nrow(sp.id.daily.vol.dsbavg.dta) *
ncol(sp.id.daily.vol.dsbavg.dta),1))
sp.id.rv.dta.sc <- array (sp.id.daily.vol.dsbavg.dta.scaled,
dim=c(nrow(sp.id.daily.vol.dsbavg.dta.scaled) *
ncol(sp.id.daily.vol.dsbavg.dta.scaled),1))
id.dsbmom.hi <- array (id.daily.dsbmom.hi, dim=c(nrow(id.daily.dsbmom.hi) *
ncol(id.daily.dsbmom.hi),1))
id.dsbmom.lo <- array (id.daily.dsbmom.lo, dim=c(nrow(id.daily.dsbmom.lo) *
ncol(id.daily.dsbmom.lo),1))
# shift dta vola and volume arrays back 1 day
# (cut last day and add 1 NA day at front)
length(id.rv.dta) <- length(id.rv.dta) - daybars
id.rv.dta <- append (id.rv.dta, rep(NA, daybars), 0)
length(id.rv.dta.sc) <- length(id.rv.dta.sc) - daybars
id.rv.dta.sc <- append (id.rv.dta.sc, rep(NA, daybars), 0)
length(id.volume.dta) <- length(id.volume.dta) - daybars
id.volume.dta <- append (id.volume.dta, rep(NA, daybars), 0)
length(sp.id.rv.dta) <- length(sp.id.rv.dta) - daybars
sp.id.rv.dta <- append (sp.id.rv.dta, rep(NA, daybars), 0)
length(sp.id.rv.dta.sc) <- length(sp.id.rv.dta.sc) - daybars
sp.id.rv.dta.sc <- append (sp.id.rv.dta.sc, rep(NA, daybars), 0)
# calculate score
id.score <- id.dsbmom / id.rv.dta.sc
id.score.a <- id.dsbmom.a / id.rv.dta.sc
id.score.hi <- id.dsbmom.hi / id.rv.dta.sc
id.score.lo <- id.dsbmom.lo / id.rv.dta.sc
# ATTENTION: dividing by id.rv.dta.sc (instead of id.rv.dta * sqrt(60)),
# allows detecting events at day-start (>=10 mins since start
# the dta vola estimate is quite stable)
# save score file for current stock
# - aligned 1-min and daily prices
# - betas and id mom, score
# - volume & vola dtas (stock & spx)
save (cur.x.aligned, cur.d.x.aligned, sp.x.aligned, sp.d.x.aligned,
cur.rbetas.d.x.shifted, id.dsbmom, id.score, # id.dsbmom.a, id.score.a,
id.volume.dta, id.rv.dta, id.rv.dta.sc, id.rv.1min.dta, sp.id.rv.dta.sc,
id.score.hi, id.score.lo,
file=paste(data.path.out, cur.symbol, ".sc2", sep=""))
}
beep()
# ----- CLEAN-UP ---------------------------------------------------------------
gc()
|
/R/CalcScores.R
|
no_license
|
MartinNiemann/Model
|
R
| false | false | 16,849 |
r
|
# ******************************************************************************
# Intraday Overreaction - CalcScores.R *
# ******************************************************************************
# This script reads index and stock csv files to generate a file with *
# overreaction scores and components for each stock *
# Process: *
# 1) load price files for index, risk-free rate, then for each stock in loop *
# 2) generate date-time aligned series *
# 3) calculate rolling robust betas stock vs. index *
# 4) calculate overreaction scores and save to file for each stock in loop *
# 5) clean-up for obsolete variables *
# ******************************************************************************
# ----- DATA -------------------------------------------------------------------
# get available csv data files
data.files <- list.files (path = data.path, pattern = "*[NQA].csv")
data.files.etf <- list.files (path = data.path.etf, pattern = "*[QN].csv")
# read index and risk-free-rate files into xts series and convert to daily
cur.path <- paste (data.path.etf, data.files.etf[1], sep="/")
sp.x <- Load1minDataXTS (cur.path)
sp.d.x <- to.daily (sp.x, drop.time=TRUE, name=NULL)
rf.d <- read.csv ("D:/Doktorarbeit/40 Data/Rf/FRB_H15.csv", header=TRUE,
stringsAsFactors=FALSE, col.names=c("Date", "Rf"), skip=6)
rf.d.x <- xts (as.numeric(rf.d[, 2]), as.Date(strptime(rf.d[, 1],"%m/%d/%Y")))
colnames (rf.d.x) <- "Rf"
rf.d.x <- na.locf (rf.d.x) # replace missing values with prior value
# ----- CORE LOOP --------------------------------------------------------------
# for (i in 1:100) {
for (i in 1:length(data.files)) {
# read current stock file into xts series and convert to daily
cur.path <- paste (data.path, data.files[i], sep="/")
print (paste("[CalcScores.R] Processing ", data.files[i], sep=""))
cur.symbol <- as.character(strsplit(data.files[i],".csv")[1])
cur.x <- Load1minDataXTS (cur.path)
# skip series with length < one year
if (nrow(cur.x) < daybars * period.oneyear ) {
print (paste("[CalcScores.R] Processing skipped for ", data.files[i],
" -- too short", sep=""))
next
}
cur.d.x <- to.daily (cur.x, drop.time=TRUE, name=NULL)
# create aligned daily time series' for stock, index and Rf
sp.d.x.aligned <- sp.d.x[index(sp.d.x) >= start(cur.d.x) & # cut excess dates
index(sp.d.x) <= end(cur.d.x)] # at boundaries
cur.d.x.aligned <- cur.d.x[index(cur.d.x) %in% index(sp.d.x.aligned)]
sp.d.x.aligned <- sp.d.x.aligned[index(sp.d.x.aligned) %in% index(cur.d.x)]
stopifnot (all.equal (index(sp.d.x.aligned), index(cur.d.x.aligned)))
rf.d.x.aligned <- rf.d.x[index(rf.d.x) %in% index(cur.d.x.aligned)]
stopifnot (all.equal (as.character.Date(index(rf.d.x.aligned)),
as.character.Date(index(cur.d.x.aligned))))
# cut 1-min series using cut.dates from daily cut procedure
cut.dates.sp <- index(sp.d.x)[!(index(sp.d.x) %in% index(sp.d.x.aligned))]
cut.dates.cur <- index(cur.d.x)[!(index(cur.d.x) %in% index(cur.d.x.aligned))]
sp.x.cut <- sp.x[as.character.Date(cut.dates.sp)] # making use of xts
cur.x.cut <- cur.x[as.character.Date(cut.dates.cur)] # by-date selection
sp.x.aligned <- sp.x[!(index(sp.x) %in% index(sp.x.cut))] # cut excess
cur.x.aligned <- cur.x[!(index(cur.x) %in% index(cur.x.cut))] # days
# align / fill missing minutes due to different session-cuts
cur.x.aligned <- AlignTimesAndFillXTS (cur.x.aligned, sp.x.aligned)
stopifnot (all.equal(index(sp.x.aligned), index(cur.x.aligned))) # assert ==
stopifnot (all(is.numeric(cur.x.aligned))) # and no NAs
# calculate robust one-year betas (t0 --> can be applied t+1)
cur.rbetas.d.x <- CalculateRollingBetaXTS ( cur.d.x.aligned, sp.d.x.aligned,
rf.d.x.aligned )
plot (cur.rbetas.d.x, ylab="Beta (Stock ~ S&P500)", xlab="Date", type="l",
main="One-year rolling robust CAPM betas of Stock vs. S&P500 ETF")
# calculate signal components
# ---------------------------
# get day-starts and -ends
day.ends <- endpoints (cur.x.aligned,on='days')[-1] # all elements except 1st
stopifnot (all(diff(day.ends) == 391)) # assert equal day length
# backfill 1st 10 minutes of opening data in case of zero-bars
cur.x.aligned <- BackfillOpeningData(cur.x.aligned, 10)
# calculate 1-minute returns and intraday returns (excluding inter-day return)
ret.x <- 100 * diff (log(cur.x.aligned$Close)) # 1-min returns
idret <- unclass (ret.x) # intraday returns variable
sp.ret.x <- 100 * diff (log(sp.x.aligned$Close)) # same for benchmark
sp.idret <- unclass (sp.ret.x)
idvol <- unclass (cur.x.aligned$Volume)
idvol[which(is.infinite(idvol))] <- 0
# remove inter-day returns from intraday return series
# by replacing daily 1st minute returns with intrabar returns from open
idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- 100 *
log (cur.x.aligned$Close[day.ends[1:(length(day.ends))] - daybars+1] /
cur.x.aligned$Open[day.ends[1:(length(day.ends))] - daybars+1])
sp.idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- 100 *
log (sp.x.aligned$Close[day.ends[1:(length(day.ends))] - daybars+1] /
sp.x.aligned$Open[day.ends[1:(length(day.ends))] - daybars+1])
idret[day.ends[1:(length(day.ends))] - daybars+1, 1] <- ifelse (
abs(idret[day.ends[1:(length(day.ends))] - daybars+1, 1]) > 1,
max(-1, min(1, idret[day.ends[1:(length(day.ends))] - daybars+1, 1])),
idret[day.ends[1:(length(day.ends))] - daybars+1, 1])
# prepare daily calculations - slicing and left-bounded window index
idret.daily <- array (idret, dim=c(daybars, length(day.ends)))
idrv.daily <- idret.daily^2
sp.idret.daily <- array (sp.idret, dim=c(daybars, length(day.ends)))
sp.idrv.daily <- sp.idret.daily^2
idvol.daily <- array (idvol, dim=c(daybars, length(day.ends)))
index.left <- c (rep(1,period.id.window), 2:(daybars-period.id.window+1))
# prepare result variables (all except volume, which remains unsmoothed)
id.daily.dsbmom <- array (NA, dim=c(daybars, length(day.ends)))
id.daily.rv.dsbavg <- array (NA, dim=c(daybars, length(day.ends)))
sp.id.daily.dsbmom <- array (NA, dim=c(daybars, length(day.ends)))
sp.id.daily.rv.dsbavg <- array (NA, dim=c(daybars, length(day.ends)))
# calculate day-start bounded mom and variance
rollsum <- rep (NA, length(day.ends)) # array with NA for each day
rollsum2 <- rollsum
rollsum.sp <- rollsum
rollsum <- idret.daily[1, ] # initialize rolling sums with
rollsum2 <- idrv.daily[1, ] # 1st bars of days
rollsum.sp <- sp.idret.daily[1, ]
rollsum2.sp <- sp.idrv.daily[1, ]
id.daily.dsbmom[1, ] <- rollsum
id.daily.rv.dsbavg[1, ] <- rollsum2
sp.id.daily.dsbmom[1, ] <- rollsum.sp
sp.id.daily.rv.dsbavg[1, ] <- rollsum2.sp
for (i in 2:daybars) {
# for each day, roll sums forward
if (i > period.id.window) {
rollsum <- rollsum - idret.daily[index.left[i-1], ]
rollsum2 <- rollsum2 - idrv.daily[index.left[i-1], ]
rollsum.sp <- rollsum.sp - sp.idret.daily[index.left[i-1], ]
rollsum2.sp <- rollsum2.sp - sp.idrv.daily[index.left[i-1], ]
}
rollsum <- rollsum + idret.daily[i, ]
rollsum2 <- rollsum2 + idrv.daily[i, ]
rollsum.sp <- rollsum.sp + sp.idret.daily[i, ]
rollsum2.sp <- rollsum2.sp + sp.idrv.daily[i, ]
id.daily.dsbmom[i, ] <- rollsum
id.daily.rv.dsbavg[i, ] <- rollsum2 / (i - index.left[i] + 1)
sp.id.daily.dsbmom[i, ] <- rollsum.sp
sp.id.daily.rv.dsbavg[i, ] <- rollsum2.sp / (i - index.left[i] + 1)
}
id.daily.rv.dsbavg[id.daily.rv.dsbavg < 0 & id.daily.rv.dsbavg > -eps ] <- 0
id.daily.vol.dsbavg <- sqrt(id.daily.rv.dsbavg) # vola: sqrt of rv
stopifnot (all(is.finite(id.daily.vol.dsbavg))) # assert integrity
sp.id.daily.rv.dsbavg[sp.id.daily.rv.dsbavg < 0 &
sp.id.daily.rv.dsbavg > -eps ] <- 0
sp.id.daily.vol.dsbavg <- sqrt(sp.id.daily.rv.dsbavg) # vola: sqrt of rv
stopifnot (all(is.finite(sp.id.daily.vol.dsbavg))) # assert integrity
# additional High/Low-based momentum
# using alternative calculation approach based on prices directly
id.daily.dsbmom.hi <- array (NA, dim=c(daybars, length(day.ends)))
id.daily.dsbmom.lo <- array (NA, dim=c(daybars, length(day.ends)))
cur.cl.daily <-
array (coredata(cur.x.aligned$Close), dim=c(daybars, length(day.ends)))
cur.hi.daily <-
array (coredata(cur.x.aligned$High), dim=c(daybars, length(day.ends)))
cur.lo.daily <-
array (coredata(cur.x.aligned$Low), dim=c(daybars, length(day.ends)))
rollref <- rep (NA, length(day.ends)) # array with NA for each day
rollref <- cur.cl.daily[1, ]
rollmin <- rollref
rollmax <- rollref
id.daily.dsbmom.hi[1:3, ] <- 0
id.daily.dsbmom.lo[1:3, ] <- 0
# drop first bar as reference point to filter out opening crap data
index.left2 <- index.left
index.left2[index.left2 == 1] <- 2
for (i in 4:daybars) {
# rollref <- cur.cl.daily[index.left[i], ]
# id.daily.dsbmom.hi[i, ] <- 100 * log (cur.hi.daily[i, ] / rollref)
# id.daily.dsbmom.lo[i, ] <- 100 * log (cur.lo.daily[i, ] / rollref)
rollmin <- apply (cur.lo.daily[(index.left[i]):(i-1), ], 2, min)
rollmax <- apply (cur.hi.daily[(index.left[i]):(i-1), ], 2, max)
id.daily.dsbmom.hi[i, ] <- 100 * log (cur.hi.daily[i, ] / rollmin)
id.daily.dsbmom.lo[i, ] <- 100 * log (cur.lo.daily[i, ] / rollmax)
}
# calculate day-time average volatility and volume
# 90? change of logic: calculate day-time average by looping through
# all days, calculating averages for all bars-in-day in parallel
# ATTENTION: id.daily.vol.alt.dta is a simple dta of 1-min squared returns
index.left2 <- c(rep(1,period.days), 2:(length(day.ends)-period.days+1))
rollsum <- id.daily.vol.dsbavg[, 1] # initialize rollsum with 1st day
rollsum.v <- idvol.daily[, 1] # initialize rollsum.v with 1st day
rollsum.alt <- idrv.daily[, 1]
rollsum.sp <- sp.id.daily.vol.dsbavg[, 1]
id.daily.vol.dsbavg.dta <- array (c(NA), # result arrays
dim=c(nrow(id.daily.vol.dsbavg),
ncol(id.daily.vol.dsbavg)))
id.daily.volume.dta <- id.daily.vol.dsbavg.dta
sp.id.daily.vol.dsbavg.dta <- id.daily.vol.dsbavg.dta
id.daily.vol.alt.dta <- id.daily.vol.dsbavg.dta
for (j in 2:length(day.ends)) {
rollsum <- rollsum + id.daily.vol.dsbavg[, j]
rollsum.v <- rollsum.v + idvol.daily[, j]
rollsum.sp <- rollsum.sp + sp.id.daily.vol.dsbavg[, j]
rollsum.alt <- rollsum.alt + idrv.daily[, j]
if (j > period.days) {
rollsum <- rollsum - id.daily.vol.dsbavg[, index.left2[j-1]]
rollsum.v <- rollsum.v - idvol.daily[, index.left2[j-1]]
rollsum.sp <- rollsum.sp - sp.id.daily.vol.dsbavg[, index.left2[j-1]]
rollsum.alt <- rollsum.alt - idrv.daily[, index.left2[j-1]]
}
if (j >= period.days) {
# setting result array here results in NA for 1st period.days
id.daily.vol.dsbavg.dta[, j] <- rollsum / period.days
id.daily.volume.dta[, j] <- rollsum.v / period.days
sp.id.daily.vol.dsbavg.dta[, j] <- rollsum.sp / period.days
id.daily.vol.alt.dta[, j] <- rollsum.alt / period.days
}
}
id.daily.vol.alt.dta[id.daily.vol.alt.dta < 0 & id.daily.vol.alt.dta > -eps ] <- 0
id.daily.vol.alt.dta <- sqrt(id.daily.vol.alt.dta)
stopifnot (all(is.finite(id.daily.vol.alt.dta[,(period.days+1):length(day.ends)])))
# calculate abnormal (beta-adjusted) stock momentum
cur.rbetas.d.x.shifted <- cur.rbetas.d.x
coredata(cur.rbetas.d.x.shifted) <-
append (cur.rbetas.d.x[-nrow(cur.rbetas.d.x)],1,0)
cur.rbetas.d.x.shifted <- na.locf (cur.rbetas.d.x.shifted)
# NAs at start replaced by 1s, and shifted back by 1 day
id.daily.dsbmom.e <- array(c(NA), dim=c(daybars, length(day.ends)))
for (j in 1:length(day.ends)) {
id.daily.dsbmom.e[, j] <- sp.id.daily.dsbmom[, j] *
cur.rbetas.d.x.shifted[j]
}
id.daily.dsbmom.a <- id.daily.dsbmom - id.daily.dsbmom.e
# calculate day-start bounded scaled version of rv
id.daily.vol.dsbavg.dta.scaled <- id.daily.vol.dsbavg.dta
sp.id.daily.vol.dsbavg.dta.scaled <- sp.id.daily.vol.dsbavg.dta
for (i in 2:daybars) {
id.daily.vol.dsbavg.dta.scaled[i,] <- sqrt(min(60, i)) *
id.daily.vol.dsbavg.dta[i,]
sp.id.daily.vol.dsbavg.dta.scaled[i,] <- sqrt(min(60, i)) *
sp.id.daily.vol.dsbavg.dta[i,]
}
# calculate daily smoothed version of 1-minute dta.rv (.alt calculation)
id.daily.vol.alt.dta2 <- id.daily.vol.alt.dta
for (j in (period.days+1):length(day.ends)) {
id.daily.vol.alt.dta2[,j] <- lowess(id.daily.vol.alt.dta[,j], f=.05)$y
}
# convert daily-slice arrays back to time series
id.dsbmom <- array (id.daily.dsbmom, dim=c(nrow(id.daily.dsbmom) *
ncol(id.daily.dsbmom),1))
id.dsbmom.a <- array (id.daily.dsbmom.a, dim=c(nrow(id.daily.dsbmom.a) *
ncol(id.daily.dsbmom.a),1))
id.rv.dta <- array (id.daily.vol.dsbavg.dta,
dim=c(nrow(id.daily.vol.dsbavg.dta) *
ncol(id.daily.vol.dsbavg.dta),1))
id.rv.dta.sc <- array (id.daily.vol.dsbavg.dta.scaled,
dim=c(nrow(id.daily.vol.dsbavg.dta.scaled) *
ncol(id.daily.vol.dsbavg.dta.scaled),1))
id.rv.1min.dta <- array (id.daily.vol.alt.dta2,
dim=c(nrow(id.daily.vol.alt.dta2) *
ncol(id.daily.vol.alt.dta2),1))
id.volume.dta <- array (id.daily.volume.dta,
dim=c(nrow(id.daily.volume.dta) *
ncol(id.daily.volume.dta),1))
sp.id.dsbmom <- array (sp.id.daily.dsbmom, dim=c(nrow(sp.id.daily.dsbmom) *
ncol(sp.id.daily.dsbmom),1))
sp.id.rv.dta <- array (sp.id.daily.vol.dsbavg.dta,
dim=c(nrow(sp.id.daily.vol.dsbavg.dta) *
ncol(sp.id.daily.vol.dsbavg.dta),1))
sp.id.rv.dta.sc <- array (sp.id.daily.vol.dsbavg.dta.scaled,
dim=c(nrow(sp.id.daily.vol.dsbavg.dta.scaled) *
ncol(sp.id.daily.vol.dsbavg.dta.scaled),1))
id.dsbmom.hi <- array (id.daily.dsbmom.hi, dim=c(nrow(id.daily.dsbmom.hi) *
ncol(id.daily.dsbmom.hi),1))
id.dsbmom.lo <- array (id.daily.dsbmom.lo, dim=c(nrow(id.daily.dsbmom.lo) *
ncol(id.daily.dsbmom.lo),1))
# shift dta vola and volume arrays back 1 day
# (cut last day and add 1 NA day at front)
length(id.rv.dta) <- length(id.rv.dta) - daybars
id.rv.dta <- append (id.rv.dta, rep(NA, daybars), 0)
length(id.rv.dta.sc) <- length(id.rv.dta.sc) - daybars
id.rv.dta.sc <- append (id.rv.dta.sc, rep(NA, daybars), 0)
length(id.volume.dta) <- length(id.volume.dta) - daybars
id.volume.dta <- append (id.volume.dta, rep(NA, daybars), 0)
length(sp.id.rv.dta) <- length(sp.id.rv.dta) - daybars
sp.id.rv.dta <- append (sp.id.rv.dta, rep(NA, daybars), 0)
length(sp.id.rv.dta.sc) <- length(sp.id.rv.dta.sc) - daybars
sp.id.rv.dta.sc <- append (sp.id.rv.dta.sc, rep(NA, daybars), 0)
# calculate score
id.score <- id.dsbmom / id.rv.dta.sc
id.score.a <- id.dsbmom.a / id.rv.dta.sc
id.score.hi <- id.dsbmom.hi / id.rv.dta.sc
id.score.lo <- id.dsbmom.lo / id.rv.dta.sc
# ATTENTION: dividing by id.rv.dta.sc (instead of id.rv.dta * sqrt(60)),
# allows detecting events at day-start (>=10 mins since start
# the dta vola estimate is quite stable)
# save score file for current stock
# - aligned 1-min and daily prices
# - betas and id mom, score
# - volume & vola dtas (stock & spx)
save (cur.x.aligned, cur.d.x.aligned, sp.x.aligned, sp.d.x.aligned,
cur.rbetas.d.x.shifted, id.dsbmom, id.score, # id.dsbmom.a, id.score.a,
id.volume.dta, id.rv.dta, id.rv.dta.sc, id.rv.1min.dta, sp.id.rv.dta.sc,
id.score.hi, id.score.lo,
file=paste(data.path.out, cur.symbol, ".sc2", sep=""))
}
beep()
# ----- CLEAN-UP ---------------------------------------------------------------
gc()
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.07095840081598e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
/epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926734-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 1,102 |
r
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.07095840081598e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
# code workspace - eric gagnon
# Setup ##############################################################################
#devtools::install_github("hadley/dplyr")
#devtools::install_github("mdsumner/spdplyr")
config <- quote({
Sys.setenv(MAKE = 'make -j 8')
library(MASS)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stringr)
library(magrittr)
library(data.table)
library(lubridate)
library(RPostgreSQL)
library(plotly)
#library(rbokeh)
library(jsonlite)
library(htmltools)
library(glmnet)
library(epitools)
library(broom)
library(lme4)
library(sjPlot)
library(parallel)
library(car)
library(DescTools)
library(outliers)
library(corrgram)
})
eval( config )
# Load & Prep Data #######################################################################################
# Shot Dataset
d.shots <-
fread( 'data/shot_logs-1.csv') %>%
.[ , Scored := case_when( SHOT_RESULT == 'made' ~ TRUE ,
SHOT_RESULT == 'missed' ~ FALSE ) ]
# Update Game clock to represent number of seconds (numeric)
# bug with using some lubridate functions inside data.table - using tmp var as workaround
tmp <- d.shots$GAME_CLOCK %>% ms %>% as.numeric()
d.shots[ , GAME_CLOCK := { tmp } ]
rm(tmp)
# Player Dataset :
# create player_name column to join with shot data
# manually update player_name column for mismatches between two sources
d.players <-
fread('data/players.csv')%>%
.[ , player_name := Player ] %>%
.[ Player == 'allen crabbe' , player_name := 'alan crabbe' ] %>%
.[ Player == 'steven adams' , player_name := 'steve adams' ] %>%
.[ Player == 'dwyane wade' , player_name := 'dwayne wade' ] %>%
.[ Player == 'danilo gallinari' , player_name := 'danilo gallinai' ] %>%
.[ Player == 'dirk nowitzki' , player_name := 'dirk nowtizski' ] %>%
.[ Player == 'tim hardaway' , player_name := 'time hardaway jr' ] %>%
.[ Player == 'beno udrih' , player_name := 'beno urdih' ] %>%
.[ Player == 'al-farouq aminu' , player_name := 'al farouq aminu' ] %>%
.[ Player == 'jj barea' , player_name := 'jose juan barea' ] %>%
.[ Player == 'monta ellis' , player_name := 'mnta ellis' ] %>%
.[ Player == 'nerlens noel' , player_name := 'nerles noel' ] %>%
.[ Player == 'jimmer fredette' , player_name := 'jimmer dredette' ] %>%
.[ Player == 'joe ingles' , player_name := 'jon ingles' ] %>%
.[ Player == 'j.j. hickson' , player_name := 'jj hickson' ]
# Confirm no players from the shots data set are missing from the player data
# setdiff( { d[ , unique( player_name ) ] } , { d.players[ , unique( player_name )]})
# Create Working data set
d <-
d.shots[ d.players , , on = c( 'player_name' ) , nomatch = 0 ]
tmp.2pt.dist.clusters <-
d[ PTS_TYPE == 2 , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.2pt.dist.clusters) <-
tmp.2pt.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 ) %>%
{ paste0( '2 pt: ' , . ) }
tmp.3pt.dist.clusters <-
d[ PTS_TYPE == 3 , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.3pt.dist.clusters) <-
tmp.3pt.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 ) %>%
{ paste0( '3 pt: ' , . ) }
tmp.dist.clusters <-
d[ , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.dist.clusters) <-
tmp.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 )
d %<>%
.[ , .(
Points = PTS ,
Scored ,
ShotResult = factor( SHOT_RESULT ) ,
Game = as.factor(GAME_ID) ,
Player = as.factor( player_name) ,
Position = as.factor(Pos) ,
ShotDistance = SHOT_DIST ,
ClosestDefenderDistance = CLOSE_DEF_DIST ,
ShotClock = SHOT_CLOCK ,
Dribbles = DRIBBLES ,
TouchTime = TOUCH_TIME ,
GameClock = GAME_CLOCK ,
ShotType = { as.factor( PTS_TYPE ) }
) ] %>%
.[ ShotType == "2" , ShotTypeDistFactor := tmp.2pt.dist.clusters ] %>%
.[ ShotType == "3" , ShotTypeDistFactor := tmp.3pt.dist.clusters ] %>%
.[ , ShotDistFactor := tmp.dist.clusters ] %>%
.[ ShotDistance < 15 , ShotDistanceClass := factor( 'Close (<15ft)')] %>%
.[ ShotDistance >= 15 , ShotDistanceClass := factor( 'Far (>=15ft)')] %>%
.[ , PositionDistClass := factor( paste0( Position , ' - ' , ShotDistanceClass ) ) ]
#Cleanup
rm( d.players , d.shots , tmp.dist.clusters , tmp.2pt.dist.clusters , tmp.3pt.dist.clusters )
## EDA Visuals ########################################################################
# Points per Shot vs Points
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( PointsPerGame = sum( Points ) , PointsPerShot = mean( Points ) ) %>% {
ggplot( . , aes( PointsPerGame , PointsPerShot , colour = Position ) ) +
geom_point() +
geom_density_2d( ) +
ggtitle( 'Points Per Shot vs Points Per Game' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-pps-ppg.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Baskets per Shot vs Baskets
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( BasketsPerGame = sum( Scored ) , BasketsPerShot = mean( Scored ) ) %>% {
ggplot( . , aes( BasketsPerGame , BasketsPerShot , colour = Position ) ) +
geom_point() +
geom_density_2d( ) +
ggtitle( 'Baskets Per Shot vs Baskets Per Game' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-bps-bpg.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Baskets vs Shots
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( Baskets = sum( Scored ) , Shots = n() ) %>% {
ggplot( . , aes( Shots , Baskets , colour = Position ) ) +
geom_point() +
geom_smooth( method = 'loess' ) +
ggtitle( 'Baskets vs Shots' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-baskets-shots.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
## Histograms
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock , Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = ShotType , group = ShotType ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-shot-type.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock , Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = Scored , group = Scored ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-scored.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock ,
Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock , ShotDistanceClass , PositionDistClass ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' , 'ShotDistanceClass' , 'PositionDistClass' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = ShotDistanceClass , group = ShotDistanceClass ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-shot-dist-type.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Model ############################################################################################
# https://rstudio-pubs-static.s3.amazonaws.com/33653_57fc7b8e5d484c909b615d8633c01d51.html
# https://cran.r-project.org/web/packages/lme4/vignettes/lmer.pdf
file.remove( 'working.RData')
save.image( file = 'working.RData' )
# glm
#
# cl <- makeCluster(getOption("cl.cores", { min( detectCores() , 10 ) } ) )
# l.m.glm <-
# parSapply(
# cl ,
# d[ , unique( PositionDistClass ) ] ,
# function(x) {
# # prep each environment
# load( 'working.RData')
# eval( config )
#
# d[ PositionDistClass == x , ] %>%
# .[ !is.na( ShotClock ) , ] %$%
# glm(
# Scored ~ ShotDistance * ClosestDefenderDistance ,
# family = binomial( link = 'logit' )
# )
# } , simplify = F , USE.NAMES = T )
# names( l.m.glm ) <- { d[ , unique( PositionDistClass ) ] }
# save.image( file = 'working.RData' )
# stopCluster( cl )
# rm( cl )
#
# glm with random effects
cl <- makeCluster(getOption("cl.cores", { min( detectCores() , 10 ) } ) )
l.m.glmer <-
parSapply(
cl ,
d[ , unique( PositionDistClass ) ] ,
function(x) {
# prep each environment
load( 'working.RData')
eval( config )
d[ PositionDistClass == x , ] %>%
.[ !is.na( ShotClock ) , ] %$%
glmer(
Scored ~ ShotDistance * ClosestDefenderDistance + ( ShotDistance | Player / Game ) + ( ClosestDefenderDistance | Player / Game ) ,
family = binomial( link = 'logit' ) ,
control = glmerControl(optimizer= c( "bobyqa" , "bobyqa" ) ,
optCtrl=list(maxfun=2e5) )
)
} , simplify = F )
names( l.m.glmer ) <- d[ , unique( PositionDistClass ) ]
save.image( file = 'working.RData' )
stopCluster( cl )
rm( cl )
# Results & Visuals ############################################################################
# output for each model
l.m.summaries <-
l.m.glmer %>% sapply( summary , simplify = F )
# anova for each model
l.m.anovas <-
l.m.glmer %>% sapply( anova , simplify = F )
# data.frame of model results
d.m.results <-
l.m.glmer %>% names %>% sapply( function(x) {
data.frame( PositionShotDistClass = x , tidy( l.m.glmer[[x]] ) )
} , simplify = F ) %>%
bind_rows() %>%
mutate(
Position = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2})(.*)' , '\\1')} ,
ShotDistClass = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2}) - (.*)' , '\\2')}
)
# data.frame of anova results
d.m.anovas <-
l.m.glmer %>% names %>% sapply( function(x) {
data.frame( PositionShotDistClass = x , tidy( anova( l.m.glmer[[x]] ) ) )
} , simplify = F ) %>%
bind_rows() %>%
mutate(
Position = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2})(.*)' , '\\1')} ,
ShotDistClass = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2}) - (.*)' , '\\2')}
)
save.image( file = 'working.RData' )
# Plot Fixed Effects
l.p.fe <-
l.m.glmer %>% names %>%
sapply( function(x){
sjp.glmer( l.m.glmer[[x]] , type = 'fe' , title = paste0( x , ': Fixed Effects' ) , prnt.plot = F )
} , simplify = F )
names(l.p.fe) <- paste0( names( l.p.fe ) , ': Fixed Effects' )
# Plot Fixed Effects Slopes
l.p.fe.slope <-
l.m.glmer %>% names %>%
sapply( function(x){
sjp.glmer( l.m.glmer[[x]] , type = 'fe.slope' , title = paste0( x , ': Fixed Effects Slopes' ) , prnt.plot = F )
} , simplify = F )
names(l.p.fe.slope) <- paste0( names( l.p.fe.slope ) , ': Fixed Effects Slopes' )
# Make combined list of ggplots
l.p <-
c(
l.p.fe %>% names %>% sapply( function(x){
l.p.fe[[x]][['plot']]
} , simplify = F )
,
l.p.fe.slope %>% names %>% sapply( function(x){
l.p.fe.slope[[x]][['plot']]
} , simplify = F )
)
d[ , unique( Position ) ] %>% as.character %>%
sapply( function(x) {
l <- list(
{ l.p[[ paste0( x , ' - Close (<15ft): Fixed Effects') ]] } ,
{ l.p[[ paste0( x , ' - Far (>=15ft): Fixed Effects') ]] +
theme( axis.text.y = element_blank() ,
axis.ticks.y = element_blank() )
}
)
grid.newpage()
p <- marrangeGrob( l , nrow = 1 , ncol = 2 , top = NULL )
ggsave( paste0( x , '-Fixed-Effects.png' ) ,
p ,
width = 6 ,
height = 3 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# FE Slopes
l <- list(
{ l.p[[ paste0( x , ' - Close (<15ft): Fixed Effects Slopes') ]] } ,
{ l.p[[ paste0( x , ' - Far (>=15ft): Fixed Effects Slopes') ]] +
theme( axis.text.y = element_blank() ,
axis.ticks.y = element_blank() )
}
)
grid.newpage()
p <- marrangeGrob( l , nrow = 1 , ncol = 2 , top = NULL )
ggsave( paste0( x , '-Fixed-Effects-Slopes.png' ) ,
p ,
width = 6 ,
height = 3 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
} , simplify = F )
save.image( file = 'working.RData' )
# Save Tabled Outputs to png
x <-
d.m.results %>%
filter( !is.na( p.value ) ) %>%
select( PositionShotDistClass , Term = term , Estimate = estimate , PValue = p.value , Position , DistanceGroup = ShotDistClass ) %>%
mutate( Estimate = sprintf( '%.3f' , Estimate ) ) %>%
dcast( DistanceGroup + Term ~ Position , value.var = 'Estimate' )
png("estimates.png" , width = 7.6 , height = 2.5 , units = 'in' , res = 100 , pointsize = 6 )
p<-tableGrob(x , rows = NULL )
grid.arrange(p)
dev.off()
x <-
d.m.results %>%
filter( !is.na( p.value ) ) %>%
select( PositionShotDistClass , Term = term , Estimate = estimate , PValue = p.value , Position , DistanceGroup = ShotDistClass ) %>%
mutate( PValue = sprintf( '%.3f' , PValue ) ) %>%
dcast( DistanceGroup + Term ~ Position , value.var = 'PValue' )
png("pvalues.png" , width = 7.6 , height = 2.5 , units = 'in' , res = 100 , pointsize = 6 )
p<-tableGrob(x , rows = NULL )
grid.arrange(p)
dev.off()
save( d , file = 'd.RData' , envir = .GlobalEnv )
save( d.m.anovas , file = 'd.m.anovas.RData' , envir = .GlobalEnv )
save( d.m.results , file = 'd.m.results.RData' , envir = .GlobalEnv )
save( l.m.anovas , file = 'l.m.anovas.RData' , envir = .GlobalEnv )
save( l.m.glmer , file = 'l.m.glmer.RData' , envir = .GlobalEnv )
save( l.m.summaries , file = 'l.m.summaries.RData' , envir = .GlobalEnv )
save( l.p , file = 'l.p.RData' , envir = .GlobalEnv)
save( d.m.results , file = 'd.m.results.RData' , envir = .GlobalEnv)
|
/codeEricGagnon.R
|
permissive
|
ericsgagnon/psustat504groupproject
|
R
| false | false | 15,777 |
r
|
# code workspace - eric gagnon
# Setup ##############################################################################
#devtools::install_github("hadley/dplyr")
#devtools::install_github("mdsumner/spdplyr")
config <- quote({
Sys.setenv(MAKE = 'make -j 8')
library(MASS)
library(tidyverse)
library(dplyr)
library(ggplot2)
library(stringr)
library(magrittr)
library(data.table)
library(lubridate)
library(RPostgreSQL)
library(plotly)
#library(rbokeh)
library(jsonlite)
library(htmltools)
library(glmnet)
library(epitools)
library(broom)
library(lme4)
library(sjPlot)
library(parallel)
library(car)
library(DescTools)
library(outliers)
library(corrgram)
})
eval( config )
# Load & Prep Data #######################################################################################
# Shot Dataset
d.shots <-
fread( 'data/shot_logs-1.csv') %>%
.[ , Scored := case_when( SHOT_RESULT == 'made' ~ TRUE ,
SHOT_RESULT == 'missed' ~ FALSE ) ]
# Update Game clock to represent number of seconds (numeric)
# bug with using some lubridate functions inside data.table - using tmp var as workaround
tmp <- d.shots$GAME_CLOCK %>% ms %>% as.numeric()
d.shots[ , GAME_CLOCK := { tmp } ]
rm(tmp)
# Player Dataset :
# create player_name column to join with shot data
# manually update player_name column for mismatches between two sources
d.players <-
fread('data/players.csv')%>%
.[ , player_name := Player ] %>%
.[ Player == 'allen crabbe' , player_name := 'alan crabbe' ] %>%
.[ Player == 'steven adams' , player_name := 'steve adams' ] %>%
.[ Player == 'dwyane wade' , player_name := 'dwayne wade' ] %>%
.[ Player == 'danilo gallinari' , player_name := 'danilo gallinai' ] %>%
.[ Player == 'dirk nowitzki' , player_name := 'dirk nowtizski' ] %>%
.[ Player == 'tim hardaway' , player_name := 'time hardaway jr' ] %>%
.[ Player == 'beno udrih' , player_name := 'beno urdih' ] %>%
.[ Player == 'al-farouq aminu' , player_name := 'al farouq aminu' ] %>%
.[ Player == 'jj barea' , player_name := 'jose juan barea' ] %>%
.[ Player == 'monta ellis' , player_name := 'mnta ellis' ] %>%
.[ Player == 'nerlens noel' , player_name := 'nerles noel' ] %>%
.[ Player == 'jimmer fredette' , player_name := 'jimmer dredette' ] %>%
.[ Player == 'joe ingles' , player_name := 'jon ingles' ] %>%
.[ Player == 'j.j. hickson' , player_name := 'jj hickson' ]
# Confirm no players from the shots data set are missing from the player data
# setdiff( { d[ , unique( player_name ) ] } , { d.players[ , unique( player_name )]})
# Create Working data set
d <-
d.shots[ d.players , , on = c( 'player_name' ) , nomatch = 0 ]
tmp.2pt.dist.clusters <-
d[ PTS_TYPE == 2 , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.2pt.dist.clusters) <-
tmp.2pt.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 ) %>%
{ paste0( '2 pt: ' , . ) }
tmp.3pt.dist.clusters <-
d[ PTS_TYPE == 3 , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.3pt.dist.clusters) <-
tmp.3pt.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 ) %>%
{ paste0( '3 pt: ' , . ) }
tmp.dist.clusters <-
d[ , { kmeans( SHOT_DIST , 10 , iter.max = 20 ) } ] %$%
centers[ cluster , 1] %>%
unname %>%
factor
levels(tmp.dist.clusters) <-
tmp.dist.clusters %>%
levels %>%
as.numeric %>%
signif( digits = 3 )
d %<>%
.[ , .(
Points = PTS ,
Scored ,
ShotResult = factor( SHOT_RESULT ) ,
Game = as.factor(GAME_ID) ,
Player = as.factor( player_name) ,
Position = as.factor(Pos) ,
ShotDistance = SHOT_DIST ,
ClosestDefenderDistance = CLOSE_DEF_DIST ,
ShotClock = SHOT_CLOCK ,
Dribbles = DRIBBLES ,
TouchTime = TOUCH_TIME ,
GameClock = GAME_CLOCK ,
ShotType = { as.factor( PTS_TYPE ) }
) ] %>%
.[ ShotType == "2" , ShotTypeDistFactor := tmp.2pt.dist.clusters ] %>%
.[ ShotType == "3" , ShotTypeDistFactor := tmp.3pt.dist.clusters ] %>%
.[ , ShotDistFactor := tmp.dist.clusters ] %>%
.[ ShotDistance < 15 , ShotDistanceClass := factor( 'Close (<15ft)')] %>%
.[ ShotDistance >= 15 , ShotDistanceClass := factor( 'Far (>=15ft)')] %>%
.[ , PositionDistClass := factor( paste0( Position , ' - ' , ShotDistanceClass ) ) ]
#Cleanup
rm( d.players , d.shots , tmp.dist.clusters , tmp.2pt.dist.clusters , tmp.3pt.dist.clusters )
## EDA Visuals ########################################################################
# Points per Shot vs Points
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( PointsPerGame = sum( Points ) , PointsPerShot = mean( Points ) ) %>% {
ggplot( . , aes( PointsPerGame , PointsPerShot , colour = Position ) ) +
geom_point() +
geom_density_2d( ) +
ggtitle( 'Points Per Shot vs Points Per Game' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-pps-ppg.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Baskets per Shot vs Baskets
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( BasketsPerGame = sum( Scored ) , BasketsPerShot = mean( Scored ) ) %>% {
ggplot( . , aes( BasketsPerGame , BasketsPerShot , colour = Position ) ) +
geom_point() +
geom_density_2d( ) +
ggtitle( 'Baskets Per Shot vs Baskets Per Game' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-bps-bpg.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Baskets vs Shots
grid.newpage()
d %>%
select( Player , Game , Position , Points , Scored ) %>%
group_by( Player , Game , Position ) %>%
summarise( Baskets = sum( Scored ) , Shots = n() ) %>% {
ggplot( . , aes( Shots , Baskets , colour = Position ) ) +
geom_point() +
geom_smooth( method = 'loess' ) +
ggtitle( 'Baskets vs Shots' , subtitle = 'Per Game - By Position') } %>%
ggplotly
ggsave(
'eda-baskets-shots.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
## Histograms
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock , Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = ShotType , group = ShotType ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-shot-type.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock , Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = Scored , group = Scored ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-scored.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
grid.newpage()
d %>%
.[ , .( Scored , ShotType , ShotDistance , ClosestDefenderDistance , ShotClock ,
Dribbles = as.numeric( Dribbles ) , TouchTime , GameClock , ShotDistanceClass , PositionDistClass ) ] %>%
.[ !is.na( ShotClock ) ] %>%
.[ TouchTime >= 0 ] %>%
melt( id.vars = c('Scored' , 'ShotType' , 'ShotDistanceClass' , 'PositionDistClass' ) ) %>%
.[ , variable := factor( variable ) ] %>%
{ ggplot( . , aes( value , colour = ShotDistanceClass , group = ShotDistanceClass ) ) + facet_wrap( ~ variable , scales = 'free' ) + geom_histogram( bins = 30 ) } %>%
ggplotly
ggsave(
'eda-hist-by-shot-dist-type.png' ,
width = 4 ,
height = 2.5 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# Model ############################################################################################
# https://rstudio-pubs-static.s3.amazonaws.com/33653_57fc7b8e5d484c909b615d8633c01d51.html
# https://cran.r-project.org/web/packages/lme4/vignettes/lmer.pdf
file.remove( 'working.RData')
save.image( file = 'working.RData' )
# glm
#
# cl <- makeCluster(getOption("cl.cores", { min( detectCores() , 10 ) } ) )
# l.m.glm <-
# parSapply(
# cl ,
# d[ , unique( PositionDistClass ) ] ,
# function(x) {
# # prep each environment
# load( 'working.RData')
# eval( config )
#
# d[ PositionDistClass == x , ] %>%
# .[ !is.na( ShotClock ) , ] %$%
# glm(
# Scored ~ ShotDistance * ClosestDefenderDistance ,
# family = binomial( link = 'logit' )
# )
# } , simplify = F , USE.NAMES = T )
# names( l.m.glm ) <- { d[ , unique( PositionDistClass ) ] }
# save.image( file = 'working.RData' )
# stopCluster( cl )
# rm( cl )
#
# glm with random effects
cl <- makeCluster(getOption("cl.cores", { min( detectCores() , 10 ) } ) )
l.m.glmer <-
parSapply(
cl ,
d[ , unique( PositionDistClass ) ] ,
function(x) {
# prep each environment
load( 'working.RData')
eval( config )
d[ PositionDistClass == x , ] %>%
.[ !is.na( ShotClock ) , ] %$%
glmer(
Scored ~ ShotDistance * ClosestDefenderDistance + ( ShotDistance | Player / Game ) + ( ClosestDefenderDistance | Player / Game ) ,
family = binomial( link = 'logit' ) ,
control = glmerControl(optimizer= c( "bobyqa" , "bobyqa" ) ,
optCtrl=list(maxfun=2e5) )
)
} , simplify = F )
names( l.m.glmer ) <- d[ , unique( PositionDistClass ) ]
save.image( file = 'working.RData' )
stopCluster( cl )
rm( cl )
# Results & Visuals ############################################################################
# output for each model
l.m.summaries <-
l.m.glmer %>% sapply( summary , simplify = F )
# anova for each model
l.m.anovas <-
l.m.glmer %>% sapply( anova , simplify = F )
# data.frame of model results
d.m.results <-
l.m.glmer %>% names %>% sapply( function(x) {
data.frame( PositionShotDistClass = x , tidy( l.m.glmer[[x]] ) )
} , simplify = F ) %>%
bind_rows() %>%
mutate(
Position = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2})(.*)' , '\\1')} ,
ShotDistClass = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2}) - (.*)' , '\\2')}
)
# data.frame of anova results
d.m.anovas <-
l.m.glmer %>% names %>% sapply( function(x) {
data.frame( PositionShotDistClass = x , tidy( anova( l.m.glmer[[x]] ) ) )
} , simplify = F ) %>%
bind_rows() %>%
mutate(
Position = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2})(.*)' , '\\1')} ,
ShotDistClass = { str_replace_all( PositionShotDistClass , '([:alpha:]{1,2}) - (.*)' , '\\2')}
)
save.image( file = 'working.RData' )
# Plot Fixed Effects
l.p.fe <-
l.m.glmer %>% names %>%
sapply( function(x){
sjp.glmer( l.m.glmer[[x]] , type = 'fe' , title = paste0( x , ': Fixed Effects' ) , prnt.plot = F )
} , simplify = F )
names(l.p.fe) <- paste0( names( l.p.fe ) , ': Fixed Effects' )
# Plot Fixed Effects Slopes
l.p.fe.slope <-
l.m.glmer %>% names %>%
sapply( function(x){
sjp.glmer( l.m.glmer[[x]] , type = 'fe.slope' , title = paste0( x , ': Fixed Effects Slopes' ) , prnt.plot = F )
} , simplify = F )
names(l.p.fe.slope) <- paste0( names( l.p.fe.slope ) , ': Fixed Effects Slopes' )
# Make combined list of ggplots
l.p <-
c(
l.p.fe %>% names %>% sapply( function(x){
l.p.fe[[x]][['plot']]
} , simplify = F )
,
l.p.fe.slope %>% names %>% sapply( function(x){
l.p.fe.slope[[x]][['plot']]
} , simplify = F )
)
d[ , unique( Position ) ] %>% as.character %>%
sapply( function(x) {
l <- list(
{ l.p[[ paste0( x , ' - Close (<15ft): Fixed Effects') ]] } ,
{ l.p[[ paste0( x , ' - Far (>=15ft): Fixed Effects') ]] +
theme( axis.text.y = element_blank() ,
axis.ticks.y = element_blank() )
}
)
grid.newpage()
p <- marrangeGrob( l , nrow = 1 , ncol = 2 , top = NULL )
ggsave( paste0( x , '-Fixed-Effects.png' ) ,
p ,
width = 6 ,
height = 3 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
# FE Slopes
l <- list(
{ l.p[[ paste0( x , ' - Close (<15ft): Fixed Effects Slopes') ]] } ,
{ l.p[[ paste0( x , ' - Far (>=15ft): Fixed Effects Slopes') ]] +
theme( axis.text.y = element_blank() ,
axis.ticks.y = element_blank() )
}
)
grid.newpage()
p <- marrangeGrob( l , nrow = 1 , ncol = 2 , top = NULL )
ggsave( paste0( x , '-Fixed-Effects-Slopes.png' ) ,
p ,
width = 6 ,
height = 3 ,
scale = 2 ,
dpi = 300 ,
path = 'output/'
)
} , simplify = F )
save.image( file = 'working.RData' )
# Save Tabled Outputs to png
x <-
d.m.results %>%
filter( !is.na( p.value ) ) %>%
select( PositionShotDistClass , Term = term , Estimate = estimate , PValue = p.value , Position , DistanceGroup = ShotDistClass ) %>%
mutate( Estimate = sprintf( '%.3f' , Estimate ) ) %>%
dcast( DistanceGroup + Term ~ Position , value.var = 'Estimate' )
png("estimates.png" , width = 7.6 , height = 2.5 , units = 'in' , res = 100 , pointsize = 6 )
p<-tableGrob(x , rows = NULL )
grid.arrange(p)
dev.off()
x <-
d.m.results %>%
filter( !is.na( p.value ) ) %>%
select( PositionShotDistClass , Term = term , Estimate = estimate , PValue = p.value , Position , DistanceGroup = ShotDistClass ) %>%
mutate( PValue = sprintf( '%.3f' , PValue ) ) %>%
dcast( DistanceGroup + Term ~ Position , value.var = 'PValue' )
png("pvalues.png" , width = 7.6 , height = 2.5 , units = 'in' , res = 100 , pointsize = 6 )
p<-tableGrob(x , rows = NULL )
grid.arrange(p)
dev.off()
save( d , file = 'd.RData' , envir = .GlobalEnv )
save( d.m.anovas , file = 'd.m.anovas.RData' , envir = .GlobalEnv )
save( d.m.results , file = 'd.m.results.RData' , envir = .GlobalEnv )
save( l.m.anovas , file = 'l.m.anovas.RData' , envir = .GlobalEnv )
save( l.m.glmer , file = 'l.m.glmer.RData' , envir = .GlobalEnv )
save( l.m.summaries , file = 'l.m.summaries.RData' , envir = .GlobalEnv )
save( l.p , file = 'l.p.RData' , envir = .GlobalEnv)
save( d.m.results , file = 'd.m.results.RData' , envir = .GlobalEnv)
|
#################################################################################
# Author: Michael Burton
#
# Purpose: Get basic summaries of different outfitting packages
# + Overall average cost by group size and number of days
# + Average cost by outfitter, group size, and number of days
#
# Notes: Add additional summary statistics (min, q1, median, mq3, max)
# Visualize information, boxplots, average cost over days plot
#################################################################################
# Overall average cost by group size and number of days
AVG_COST <- ELY_OUTFITTERS_03 %>%
group_by(PEOPLE_CAT, DAYS) %>%
summarise(avg = mean(COST)) %>%
mutate(avg = round(avg,digits=0))
OVERALL_AVG_COST_TABLE <- xtabs(avg~PEOPLE_CAT+DAYS, data = AVG_COST)
ftable(OVERALL_AVG_COST_TABLE)
# Avg cost by outfitter
AVG_COST_BY_OUTFITTER <- ELY_OUTFITTERS_03 %>%
group_by(OUTFITTER,PEOPLE_CAT, DAYS) %>%
summarise(avg = mean(COST)) %>%
mutate(avg = round(avg,digits=0))
OUTFITTER_AVG_COST_TABLE <- xtabs(avg~OUTFITTER+PEOPLE_CAT+DAYS, data = AVG_COST_BY_OUTFITTER)
ftable(OUTFITTER_AVG_COST_TABLE)
|
/02_summary_info.r
|
permissive
|
MichaelEBurton/RoaringLoon
|
R
| false | false | 1,270 |
r
|
#################################################################################
# Author: Michael Burton
#
# Purpose: Get basic summaries of different outfitting packages
# + Overall average cost by group size and number of days
# + Average cost by outfitter, group size, and number of days
#
# Notes: Add additional summary statistics (min, q1, median, mq3, max)
# Visualize information, boxplots, average cost over days plot
#################################################################################
# Overall average cost by group size and number of days
AVG_COST <- ELY_OUTFITTERS_03 %>%
group_by(PEOPLE_CAT, DAYS) %>%
summarise(avg = mean(COST)) %>%
mutate(avg = round(avg,digits=0))
OVERALL_AVG_COST_TABLE <- xtabs(avg~PEOPLE_CAT+DAYS, data = AVG_COST)
ftable(OVERALL_AVG_COST_TABLE)
# Avg cost by outfitter
AVG_COST_BY_OUTFITTER <- ELY_OUTFITTERS_03 %>%
group_by(OUTFITTER,PEOPLE_CAT, DAYS) %>%
summarise(avg = mean(COST)) %>%
mutate(avg = round(avg,digits=0))
OUTFITTER_AVG_COST_TABLE <- xtabs(avg~OUTFITTER+PEOPLE_CAT+DAYS, data = AVG_COST_BY_OUTFITTER)
ftable(OUTFITTER_AVG_COST_TABLE)
|
library(pROC)
data(aSAH)
a.ndka <- auc(aSAH$outcome, aSAH$ndka)
test_that("can convert auc to numeric", {
expect_is(a.ndka, "auc") # a.ndka is not a numeric to start with
expect_equal(as.numeric(a.ndka), 0.611957994579946)
})
test_that("can do math on an AUC", {
expect_equal(sqrt(a.ndka), 0.782277440924859)
expect_equal(a.ndka * 2, 1.22391598915989)
expect_equal(a.ndka / 0.5, 1.22391598915989)
expect_equal(a.ndka + 5, 5.611957994579946)
expect_equal(a.ndka - 1, -0.388042005420054)
expect_equal(round(a.ndka, digits=1), 0.6)
})
|
/tests/testthat/test-Ops.R
|
no_license
|
xrobin/pROC
|
R
| false | false | 542 |
r
|
library(pROC)
data(aSAH)
a.ndka <- auc(aSAH$outcome, aSAH$ndka)
test_that("can convert auc to numeric", {
expect_is(a.ndka, "auc") # a.ndka is not a numeric to start with
expect_equal(as.numeric(a.ndka), 0.611957994579946)
})
test_that("can do math on an AUC", {
expect_equal(sqrt(a.ndka), 0.782277440924859)
expect_equal(a.ndka * 2, 1.22391598915989)
expect_equal(a.ndka / 0.5, 1.22391598915989)
expect_equal(a.ndka + 5, 5.611957994579946)
expect_equal(a.ndka - 1, -0.388042005420054)
expect_equal(round(a.ndka, digits=1), 0.6)
})
|
library(tidyverse)
#setwd("C:/Users/m1tws00/Desktop/CAPSTONE/")
setwd("C:/Users/Popeck Spiller/Desktop/Northwestern/CAPSTONE/")
minor_batting1 <- read.csv("data/minor_batting.csv",stringsAsFactors=FALSE)
minor_batting <- read.csv("data/minor_batting_altered.csv",stringsAsFactors=FALSE)
wOBA_FIP_constants <- read.csv("data/wOBA_FIP_constants.csv",stringsAsFactors=FALSE)
minor_batting1 <- minor_batting1 %>% arrange(playerid,year,teamName,League,Level,orgName)
minor_batting <- minor_batting %>% arrange(playerid,year,teamName,League,Level,orgName)
minor_batting$HT <-minor_batting1$HT
#Editing the data to get
# height in inches
# Corrected weights
# Flags for lefties
# Going to
minor_batting2 <- minor_batting %>%
dplyr::mutate(HTFT = as.numeric(substring(HT,1,1)),
HTIN = as.numeric(substring(HT,3,5)),
HT_INCHES = ifelse(HT == "51-",61,
ifelse(HT == "61-",73,
ifelse(HT == "51-1",71,
ifelse(HT == "6-09",81,
ifelse(HT == "-10",70,#Daniel Mann, weighs 182 I don't think a 4-10 or a 6-10 athlete could weigh 182.
ifelse(HT == "60-",72,
ifelse(HT == "5-91",69,#Cray Landon, Recent prospect
ifelse(HT == "",NA,
HTFT*12+HTIN
)))))))),
WT = ifelse(playerid==210610,170,WT),#Correcting the 70lbs identified for Enrique Castillo, playerid 210610
Bats = ifelse(Bats=="",NA,Bats),
BatsLeft = ifelse(Bats=="L",1,0),
Throws = ifelse(Throws=="",NA,Throws),
ThrowsLeft = ifelse(Throws=="L",1,0),
pitcher = ifelse(grepl("P",posit),1,0),
Outfielder = ifelse(grepl("F",posit),1,0),
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES)
) %>%
dplyr::select(-c(we_wBB,we_w1B,we_w2B,we_w3B,we_wHR,we_wOBAScale,
we_runCS,we_R.PA,we_cFIP,we_wOBA,we_wHBP,we_runSB,
we_R.W))
numeric_batting <- minor_batting2 %>%
dplyr::select(-c(teamName,orgName,lastName,firstName,Bats,Throws,posit,borndate,cityName,
regionID,mlbid,HT))
leage.position.year <- numeric_batting %>%
dplyr::group_by(League,Level,year,pitcher) %>%
dplyr::summarise_all(funs(mean))
leage.level.pitcher.year <- data.table::setDT(numeric_batting)[, lapply(.SD, mean), by = c("League","Level","year","pitcher")] %>%
merge(numeric_batting %>% dplyr::group_by(League,Level,year,pitcher) %>% summarize(numPlayers=n_distinct(playerid)),by=c("League","Level","year","pitcher"))
leage.level.year <- data.table::setDT(numeric_batting)[, lapply(.SD, mean), by = c("League","Level","year")] %>%
merge(numeric_batting %>% dplyr::group_by(League,Level,year) %>% summarize(numPlayers=n_distinct(playerid)),by=c("League","Level","year"))
leage.level.pitcher.year %>% write_csv("leage_level_year_pitcher_averages.csv")
leage.level.year %>% write_csv("leage_level_year_averages.csv")
CombinedPlayerBats <- function(){
bioInfo <- minor_batting %>%
select(playerid,lastName,firstName,HT,WT,Bats,Throws,posit,borndate,cityName,regionID,mlbid,Made.it,Year_in_MLB) %>%
unique() %>%
mutate(HTFT = as.numeric(substring(HT,1,1)),
HTIN = as.numeric(substring(HT,3,5)),
HT_INCHES = ifelse(HT == "51-",61,
ifelse(HT == "61-",73,
ifelse(HT == "51-1",71,
ifelse(HT == "6-09",81,
ifelse(HT == "-10",70,#Daniel Mann, weighs 182 I don't think a 4-10 or a 6-10 athlete could weigh 182.
ifelse(HT == "60-",72,
ifelse(HT == "5-91",69,#Cray Landon, Recent prospect
ifelse(HT == "",NA,
HTFT*12+HTIN
)))))),
WT = ifelse(playerid==210610,170,WT),#Correcting the 70lbs identified for Enrique Castillo, playerid 210610
Bats = ifelse(Bats=="",NA,Bats),
BatsLeft = ifelse(Bats=="L",TRUE,FALSE),
Throws = ifelse(Throws=="",NA,Throws),
ThrowsLeft = ifelse(Throws=="L",TRUE,FALSE),
pitcher = ifelse(posit=="P",TRUE,FALSE),
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES)
) %>% select(-c(HT,HTFT,HTIN,Bats,Throws,posit))
ds <- minor_batting %>% group_by(playerid) %>%
summarize(G=sum(G), AB=sum(AB), R=sum(R), H=sum(H),
Dbl=sum(Dbl), Tpl=sum(Tpl), HR=sum(HR), RBI=sum(RBI),
SB=sum(SB), CS=sum(CS), BB=sum(BB), IBB=sum(IBB),
SO=sum(SO), SH=sum(SH), SF=sum(SF), HBP=sum(HBP),
GDP=sum(GDP),
TB=H+2*Dbl+3*Tpl+4*HR,
XBH=2*Dbl+3*Tpl+4*HR,
PA = (AB+BB+HBP+SF+SH),
Bavg=ifelse(AB==0,NA,H/AB),
OBP =ifelse((AB+BB+HBP+SF)==0,NA,(H+BB+HBP)/(AB+BB+HBP+SF)),
SLG=ifelse(AB==0,NA,TB/AB),
OPS=OBP+SLG,
ISO=SLG-Bavg,
BABIP=ifelse((AB-SO-HR+SF)==0,NA,(H-HR)/(AB-SO-HR+SF)),
SecA=ifelse(AB==0,NA, (BB + (TB-H) + (SB-CS)) / (AB)),
BBpct=ifelse(PA==0,NA,BB/PA),
SOpct=ifelse(PA==0,NA,SO/PA),
HRpct=ifelse(PA==0,NA,HR/PA),
K_BB= ifelse(BB==0,0,SO/BB),
AB_HR=ifelse(HR==0,0,AB/HR),
XBHpct=ifelse(H==0,0,XBH/H),
wBB=mean(we_wBB),
wHBP=mean(we_wHBP),
w1B=mean(we_w1B),
w2B=mean(we_w2B),
w3B=mean(we_w3B),
wHR=mean(we_wHR),
wOBAScale=mean(we_wOBAScale),
#wOBA=ifelse((AB+BB–IBB+SF+HBP)==0,NA,(wBB*BB+wHBP*HBP+w1B*H+w2B*Dbl+w3B*Tpl+wHR*HR)/(AB+BB–IBB+SF+HBP)),
wRAA=sum(wRAA),
teams = n_distinct(teamName),
orgs = n_distinct(orgName),
leagues = n_distinct(League),
levels = n_distinct(Level),
yearsInMinors = n_distinct(year)
) %>%
merge(bioInfo,by=c("playerid"))
return(ds)
}
# At the player level.
minor_bat_all<-CombinedPlayerbat()
# Replace missing values with 0 and flag the data that was missing.
minor_bat_all[ , paste0( "M_",names(minor_bat_all)[-1])] <-
lapply(minor_bat_all[-1], function(x) as.numeric(is.na(x)) )
minor_bat_all[is.na(minor_bat_all)] <- 0
#Drop the variables that are zero for the whole dataset:
# Specifically the M_ missing flags
allNames <- names(minor_bat_all)
minor_bat_all2 <- minor_bat_all[, colSums(minor_bat_all != 0) > 0]
nowNames <- names(minor_bat_all2)
droppedVars <- setdiff(allNames,nowNames)
#Replace 0 mlbid with NA to return to a better ID.
# Generates the minor_bat_cleaned.
minor_bat_cleaned_player <- minor_bat_all2 %>%
mutate(mlbid=ifelse(M_mlbid==1,NA,mlbid),
MLB=factor(ifelse(is.na(mlbid),0,1)),
WT = ifelse(WT==0,NA,WT), # 0 Weight
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES) # 0 HT_INCHES
)
summary(minor_bat_cleaned_player)
|
/Code/R/BaseballCubeImportBatting.R
|
no_license
|
michaelpallante/mlb_talent_predictions
|
R
| false | false | 6,523 |
r
|
library(tidyverse)
#setwd("C:/Users/m1tws00/Desktop/CAPSTONE/")
setwd("C:/Users/Popeck Spiller/Desktop/Northwestern/CAPSTONE/")
minor_batting1 <- read.csv("data/minor_batting.csv",stringsAsFactors=FALSE)
minor_batting <- read.csv("data/minor_batting_altered.csv",stringsAsFactors=FALSE)
wOBA_FIP_constants <- read.csv("data/wOBA_FIP_constants.csv",stringsAsFactors=FALSE)
minor_batting1 <- minor_batting1 %>% arrange(playerid,year,teamName,League,Level,orgName)
minor_batting <- minor_batting %>% arrange(playerid,year,teamName,League,Level,orgName)
minor_batting$HT <-minor_batting1$HT
#Editing the data to get
# height in inches
# Corrected weights
# Flags for lefties
# Going to
minor_batting2 <- minor_batting %>%
dplyr::mutate(HTFT = as.numeric(substring(HT,1,1)),
HTIN = as.numeric(substring(HT,3,5)),
HT_INCHES = ifelse(HT == "51-",61,
ifelse(HT == "61-",73,
ifelse(HT == "51-1",71,
ifelse(HT == "6-09",81,
ifelse(HT == "-10",70,#Daniel Mann, weighs 182 I don't think a 4-10 or a 6-10 athlete could weigh 182.
ifelse(HT == "60-",72,
ifelse(HT == "5-91",69,#Cray Landon, Recent prospect
ifelse(HT == "",NA,
HTFT*12+HTIN
)))))))),
WT = ifelse(playerid==210610,170,WT),#Correcting the 70lbs identified for Enrique Castillo, playerid 210610
Bats = ifelse(Bats=="",NA,Bats),
BatsLeft = ifelse(Bats=="L",1,0),
Throws = ifelse(Throws=="",NA,Throws),
ThrowsLeft = ifelse(Throws=="L",1,0),
pitcher = ifelse(grepl("P",posit),1,0),
Outfielder = ifelse(grepl("F",posit),1,0),
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES)
) %>%
dplyr::select(-c(we_wBB,we_w1B,we_w2B,we_w3B,we_wHR,we_wOBAScale,
we_runCS,we_R.PA,we_cFIP,we_wOBA,we_wHBP,we_runSB,
we_R.W))
numeric_batting <- minor_batting2 %>%
dplyr::select(-c(teamName,orgName,lastName,firstName,Bats,Throws,posit,borndate,cityName,
regionID,mlbid,HT))
leage.position.year <- numeric_batting %>%
dplyr::group_by(League,Level,year,pitcher) %>%
dplyr::summarise_all(funs(mean))
leage.level.pitcher.year <- data.table::setDT(numeric_batting)[, lapply(.SD, mean), by = c("League","Level","year","pitcher")] %>%
merge(numeric_batting %>% dplyr::group_by(League,Level,year,pitcher) %>% summarize(numPlayers=n_distinct(playerid)),by=c("League","Level","year","pitcher"))
leage.level.year <- data.table::setDT(numeric_batting)[, lapply(.SD, mean), by = c("League","Level","year")] %>%
merge(numeric_batting %>% dplyr::group_by(League,Level,year) %>% summarize(numPlayers=n_distinct(playerid)),by=c("League","Level","year"))
leage.level.pitcher.year %>% write_csv("leage_level_year_pitcher_averages.csv")
leage.level.year %>% write_csv("leage_level_year_averages.csv")
CombinedPlayerBats <- function(){
bioInfo <- minor_batting %>%
select(playerid,lastName,firstName,HT,WT,Bats,Throws,posit,borndate,cityName,regionID,mlbid,Made.it,Year_in_MLB) %>%
unique() %>%
mutate(HTFT = as.numeric(substring(HT,1,1)),
HTIN = as.numeric(substring(HT,3,5)),
HT_INCHES = ifelse(HT == "51-",61,
ifelse(HT == "61-",73,
ifelse(HT == "51-1",71,
ifelse(HT == "6-09",81,
ifelse(HT == "-10",70,#Daniel Mann, weighs 182 I don't think a 4-10 or a 6-10 athlete could weigh 182.
ifelse(HT == "60-",72,
ifelse(HT == "5-91",69,#Cray Landon, Recent prospect
ifelse(HT == "",NA,
HTFT*12+HTIN
)))))),
WT = ifelse(playerid==210610,170,WT),#Correcting the 70lbs identified for Enrique Castillo, playerid 210610
Bats = ifelse(Bats=="",NA,Bats),
BatsLeft = ifelse(Bats=="L",TRUE,FALSE),
Throws = ifelse(Throws=="",NA,Throws),
ThrowsLeft = ifelse(Throws=="L",TRUE,FALSE),
pitcher = ifelse(posit=="P",TRUE,FALSE),
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES)
) %>% select(-c(HT,HTFT,HTIN,Bats,Throws,posit))
ds <- minor_batting %>% group_by(playerid) %>%
summarize(G=sum(G), AB=sum(AB), R=sum(R), H=sum(H),
Dbl=sum(Dbl), Tpl=sum(Tpl), HR=sum(HR), RBI=sum(RBI),
SB=sum(SB), CS=sum(CS), BB=sum(BB), IBB=sum(IBB),
SO=sum(SO), SH=sum(SH), SF=sum(SF), HBP=sum(HBP),
GDP=sum(GDP),
TB=H+2*Dbl+3*Tpl+4*HR,
XBH=2*Dbl+3*Tpl+4*HR,
PA = (AB+BB+HBP+SF+SH),
Bavg=ifelse(AB==0,NA,H/AB),
OBP =ifelse((AB+BB+HBP+SF)==0,NA,(H+BB+HBP)/(AB+BB+HBP+SF)),
SLG=ifelse(AB==0,NA,TB/AB),
OPS=OBP+SLG,
ISO=SLG-Bavg,
BABIP=ifelse((AB-SO-HR+SF)==0,NA,(H-HR)/(AB-SO-HR+SF)),
SecA=ifelse(AB==0,NA, (BB + (TB-H) + (SB-CS)) / (AB)),
BBpct=ifelse(PA==0,NA,BB/PA),
SOpct=ifelse(PA==0,NA,SO/PA),
HRpct=ifelse(PA==0,NA,HR/PA),
K_BB= ifelse(BB==0,0,SO/BB),
AB_HR=ifelse(HR==0,0,AB/HR),
XBHpct=ifelse(H==0,0,XBH/H),
wBB=mean(we_wBB),
wHBP=mean(we_wHBP),
w1B=mean(we_w1B),
w2B=mean(we_w2B),
w3B=mean(we_w3B),
wHR=mean(we_wHR),
wOBAScale=mean(we_wOBAScale),
#wOBA=ifelse((AB+BB–IBB+SF+HBP)==0,NA,(wBB*BB+wHBP*HBP+w1B*H+w2B*Dbl+w3B*Tpl+wHR*HR)/(AB+BB–IBB+SF+HBP)),
wRAA=sum(wRAA),
teams = n_distinct(teamName),
orgs = n_distinct(orgName),
leagues = n_distinct(League),
levels = n_distinct(Level),
yearsInMinors = n_distinct(year)
) %>%
merge(bioInfo,by=c("playerid"))
return(ds)
}
# At the player level.
minor_bat_all<-CombinedPlayerbat()
# Replace missing values with 0 and flag the data that was missing.
minor_bat_all[ , paste0( "M_",names(minor_bat_all)[-1])] <-
lapply(minor_bat_all[-1], function(x) as.numeric(is.na(x)) )
minor_bat_all[is.na(minor_bat_all)] <- 0
#Drop the variables that are zero for the whole dataset:
# Specifically the M_ missing flags
allNames <- names(minor_bat_all)
minor_bat_all2 <- minor_bat_all[, colSums(minor_bat_all != 0) > 0]
nowNames <- names(minor_bat_all2)
droppedVars <- setdiff(allNames,nowNames)
#Replace 0 mlbid with NA to return to a better ID.
# Generates the minor_bat_cleaned.
minor_bat_cleaned_player <- minor_bat_all2 %>%
mutate(mlbid=ifelse(M_mlbid==1,NA,mlbid),
MLB=factor(ifelse(is.na(mlbid),0,1)),
WT = ifelse(WT==0,NA,WT), # 0 Weight
HT_INCHES = ifelse(HT_INCHES==0,NA,HT_INCHES) # 0 HT_INCHES
)
summary(minor_bat_cleaned_player)
|
# qPCRs for infection intensity
library(tidyverse)
library(RCurl)
library(reshape2)
library(ggpubr)
library(ggplot2)
library(naniar)
library(data.table)
######### add infection intensity
qPCR1 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_qPCR4.CSV"
qPCR1 <- read.csv(text = getURL(qPCR1))
# remove LM_340 because, added in qPCR3
qPCR1 <-qPCR1[!(qPCR1$Name=="LM_0340"),]
qPCR1.long <- dplyr::select(qPCR1, Name, Ct.Mean.SYBR, Target.SYBR)
qPCR1.long <- distinct(qPCR1.long)
# add qPCR 3 and use LM_340 from there
qPCR2 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_qPCR3.CSV"
qPCR2 <- read.csv(text = getURL(qPCR2))
qPCR2.long <- dplyr::select(qPCR2, Name, Ct.Mean.SYBR, Target.SYBR)
qPCR2.long <- distinct(qPCR2.long)
# make into deltas (mouse-eimeria)
qPCR1.long <- qPCR1.long %>%
reshape2::dcast(Name ~ Target.SYBR, value.var = "Ct.Mean.SYBR", fill = 0) %>%
mutate(delta = mouse - eimeria) %>%
dplyr::select(Name,delta)
names(qPCR1.long)[names(qPCR1.long) == "Name"] <- "EH_ID"
qPCR2.long <- qPCR2.long %>%
reshape2::dcast(Name ~ Target.SYBR, value.var = "Ct.Mean.SYBR", fill = 0) %>%
mutate(delta = mouse - eimeria) %>%
dplyr::select(Name,delta)
names(qPCR2.long)[names(qPCR2.long) == "Name"] <- "EH_ID"
# check MCs one more time but pick qPCR1 predominantly and add 332, 333 and 340 from qPCR2 for now)
qPCR2.long <- subset(qPCR2.long, EH_ID == c("LM_0332", "LM_0333", "LM_0340"))
# merge and write out
P3_qPCR <- rbind(qPCR1.long, qPCR2.long)
write.csv(P3_qPCR, "./Eimeria_Lab/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_CEWE_qPCR.csv")
write.csv(P3_qPCR, "D:/Eimeria_Lab/data/Experiment_results/P3_112019_Eim_CEWE_qPCR.csv")
|
/Eimeria_Lab_code/P3_112019_Eim_qPCR.R
|
no_license
|
LubomirBednar/PhD
|
R
| false | false | 1,822 |
r
|
# qPCRs for infection intensity
library(tidyverse)
library(RCurl)
library(reshape2)
library(ggpubr)
library(ggplot2)
library(naniar)
library(data.table)
######### add infection intensity
qPCR1 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_qPCR4.CSV"
qPCR1 <- read.csv(text = getURL(qPCR1))
# remove LM_340 because, added in qPCR3
qPCR1 <-qPCR1[!(qPCR1$Name=="LM_0340"),]
qPCR1.long <- dplyr::select(qPCR1, Name, Ct.Mean.SYBR, Target.SYBR)
qPCR1.long <- distinct(qPCR1.long)
# add qPCR 3 and use LM_340 from there
qPCR2 <- "https://raw.githubusercontent.com/derele/Eimeria_Lab/master/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_qPCR3.CSV"
qPCR2 <- read.csv(text = getURL(qPCR2))
qPCR2.long <- dplyr::select(qPCR2, Name, Ct.Mean.SYBR, Target.SYBR)
qPCR2.long <- distinct(qPCR2.long)
# make into deltas (mouse-eimeria)
qPCR1.long <- qPCR1.long %>%
reshape2::dcast(Name ~ Target.SYBR, value.var = "Ct.Mean.SYBR", fill = 0) %>%
mutate(delta = mouse - eimeria) %>%
dplyr::select(Name,delta)
names(qPCR1.long)[names(qPCR1.long) == "Name"] <- "EH_ID"
qPCR2.long <- qPCR2.long %>%
reshape2::dcast(Name ~ Target.SYBR, value.var = "Ct.Mean.SYBR", fill = 0) %>%
mutate(delta = mouse - eimeria) %>%
dplyr::select(Name,delta)
names(qPCR2.long)[names(qPCR2.long) == "Name"] <- "EH_ID"
# check MCs one more time but pick qPCR1 predominantly and add 332, 333 and 340 from qPCR2 for now)
qPCR2.long <- subset(qPCR2.long, EH_ID == c("LM_0332", "LM_0333", "LM_0340"))
# merge and write out
P3_qPCR <- rbind(qPCR1.long, qPCR2.long)
write.csv(P3_qPCR, "./Eimeria_Lab/data/Experiment_results/P3_112019_Eim_qPCRs/P3_112019_Eim_CEWE_qPCR.csv")
write.csv(P3_qPCR, "D:/Eimeria_Lab/data/Experiment_results/P3_112019_Eim_CEWE_qPCR.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilityfcns.R
\name{summary.arfima}
\alias{summary.arfima}
\title{Extensive Summary of an Object}
\usage{
\method{summary}{arfima}(object, digits = max(4, getOption("digits") - 3), ...)
}
\arguments{
\item{object}{A fitted \code{arfima} object}
\item{digits}{The number of digits to print}
\item{\dots}{Optional arguments, currently not used.}
}
\value{
A list of lists (one for each mode) of all relevant information
about the fit that can be passed to \code{print.summary.arfima}.
}
\description{
Provides a very comprehensive summary of a fitted \code{arfima} object.
Includes correlation and covariance matrices (observed and expected), the
Fisher Information matrix of those parameters for which it is defined, and
more, for each mode.
}
\examples{
data(tmpyr)
fit <- arfima(tmpyr, order = c(1, 0, 1), back=TRUE)
fit
summary(fit)
}
\references{
Veenstra, J.Q. Persistence and Antipersistence: Theory and
Software (PhD Thesis)
}
\seealso{
\code{\link{arfima}}, \code{\link{iARFIMA}},
\code{\link{vcov.arfima}}
}
\author{
JQ (Justin) Veenstra
}
\keyword{ts}
|
/man/summary.arfima.Rd
|
no_license
|
JQVeenstra/arfima
|
R
| false | true | 1,146 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilityfcns.R
\name{summary.arfima}
\alias{summary.arfima}
\title{Extensive Summary of an Object}
\usage{
\method{summary}{arfima}(object, digits = max(4, getOption("digits") - 3), ...)
}
\arguments{
\item{object}{A fitted \code{arfima} object}
\item{digits}{The number of digits to print}
\item{\dots}{Optional arguments, currently not used.}
}
\value{
A list of lists (one for each mode) of all relevant information
about the fit that can be passed to \code{print.summary.arfima}.
}
\description{
Provides a very comprehensive summary of a fitted \code{arfima} object.
Includes correlation and covariance matrices (observed and expected), the
Fisher Information matrix of those parameters for which it is defined, and
more, for each mode.
}
\examples{
data(tmpyr)
fit <- arfima(tmpyr, order = c(1, 0, 1), back=TRUE)
fit
summary(fit)
}
\references{
Veenstra, J.Q. Persistence and Antipersistence: Theory and
Software (PhD Thesis)
}
\seealso{
\code{\link{arfima}}, \code{\link{iARFIMA}},
\code{\link{vcov.arfima}}
}
\author{
JQ (Justin) Veenstra
}
\keyword{ts}
|
\name{variableSelectorBatchP}
\alias{variableSelectorBatchP}
\title{ Variable Selection in Parallel Batch Mode BAMD }
\description{
This function runs \code{\link{variableSelector}} in parallel
in Batch mode.
}
\usage{
variableSelectorBatchP(fname, n, p, s, nsim, keep = 5, prop = 0.75,
codaOut = "CodaChain.txt", codaIndex = "CodaIndex.txt",
missingfile = "Imputed_missing_vals", SNPsubset, prefix,
pathToLog, outfile = "out1.rdt")
}
\arguments{
\item{fname}{ \code{fname} should be the name of a \code{.csv} file. This file should
contain the Y, X, Z and R matrices for the model, in that particular order. Hence it
should contain \eqn{n \times (1 + p + s + n)} values. There should be a header rown in the
input file as well. The Z matrix should use the values 1,2,3 for the SNPs and 0 for any missing SNPs.
The program will convert the SNP codings to -1,0,1 and work with those.}
\item{n}{ \code{n} refers to the length of the Y-vector; equivalent to the number of
observations in the dataset. }
\item{p}{ \code{p} is the number of columns of the X-matrix. }
\item{s}{ \code{s} is the number of columns of the Z-matrix. Note that this is the total number of original SNPs put through the Gibbs sampler.}
\item{nsim}{ \code{nsim} specifies the number of iterations of the Metropolis-Hastings
chain to carry out. }
\item{keep}{ \code{keep} specifies the number of models to store. The top
\code{keep} models will be retained. }
\item{prop}{ As the candidate distribution for the Metropolis-Hastings chain is a mixture, one
of whose components is a random walk, \code{prop} will determine the percentage of time that
the random walk distribution is chosen. }
\item{codaOut}{ This is the name of the file that was output from \code{\link{gibbsSampler}}. It contains the values obtained from the Gibbs sampler. }
\item{codaIndex}{ This is the name of the file that describes the format of the variables in \code{codaOut}. }
\item{missingfile}{ Contains the missing SNP values that were output from \code{\link{gibbsSampler}}. }
\item{SNPsubset}{ A 0-1 vector of length \code{s}, indicating the SNPs that should be considered as possible variables. }
\item{prefix}{ A prefix to name the log files from each processor. For example, if
prefix is specified as "rank" and there are 3 processors, then there will be 3 files
with names "rank00.log", "rank01.log" and "rank02.log"}
\item{pathToLog}{ A path to where the log files should be stored. }
\item{outfile}{ A character string - the file name to store the output table to. }
}
\details{
A Metropolis-Hastings algorithm is used to conduct a stochastic search through the model space
to find the best models. \code{nsim} steps of the chain will be run on each available
processor. Each of them will return the best \code{keep} models they found to the
master. The master will strip away the duplicates and return the top
\code{keep} models found.
See the scripts in demo/ directory for full examples.
}
\value{
No value is returned as it is run in Batch mode. The output object is stored in the binary output file.
}
\author{
Vik Gopal \email{viknesh@stat.ufl.edu}
}
\note{
Remember to copy the appropriate Rprofile that is provided in the inst/ directory to the
directory to you are working in!
}
\seealso{
\code{\link{variableSelector}}, \code{\link{variableSelectorInteractP}}
}
\keyword{ htest }
|
/man/variableSelectorBatchP.Rd
|
no_license
|
cran/BAMD
|
R
| false | false | 3,410 |
rd
|
\name{variableSelectorBatchP}
\alias{variableSelectorBatchP}
\title{ Variable Selection in Parallel Batch Mode BAMD }
\description{
This function runs \code{\link{variableSelector}} in parallel
in Batch mode.
}
\usage{
variableSelectorBatchP(fname, n, p, s, nsim, keep = 5, prop = 0.75,
codaOut = "CodaChain.txt", codaIndex = "CodaIndex.txt",
missingfile = "Imputed_missing_vals", SNPsubset, prefix,
pathToLog, outfile = "out1.rdt")
}
\arguments{
\item{fname}{ \code{fname} should be the name of a \code{.csv} file. This file should
contain the Y, X, Z and R matrices for the model, in that particular order. Hence it
should contain \eqn{n \times (1 + p + s + n)} values. There should be a header rown in the
input file as well. The Z matrix should use the values 1,2,3 for the SNPs and 0 for any missing SNPs.
The program will convert the SNP codings to -1,0,1 and work with those.}
\item{n}{ \code{n} refers to the length of the Y-vector; equivalent to the number of
observations in the dataset. }
\item{p}{ \code{p} is the number of columns of the X-matrix. }
\item{s}{ \code{s} is the number of columns of the Z-matrix. Note that this is the total number of original SNPs put through the Gibbs sampler.}
\item{nsim}{ \code{nsim} specifies the number of iterations of the Metropolis-Hastings
chain to carry out. }
\item{keep}{ \code{keep} specifies the number of models to store. The top
\code{keep} models will be retained. }
\item{prop}{ As the candidate distribution for the Metropolis-Hastings chain is a mixture, one
of whose components is a random walk, \code{prop} will determine the percentage of time that
the random walk distribution is chosen. }
\item{codaOut}{ This is the name of the file that was output from \code{\link{gibbsSampler}}. It contains the values obtained from the Gibbs sampler. }
\item{codaIndex}{ This is the name of the file that describes the format of the variables in \code{codaOut}. }
\item{missingfile}{ Contains the missing SNP values that were output from \code{\link{gibbsSampler}}. }
\item{SNPsubset}{ A 0-1 vector of length \code{s}, indicating the SNPs that should be considered as possible variables. }
\item{prefix}{ A prefix to name the log files from each processor. For example, if
prefix is specified as "rank" and there are 3 processors, then there will be 3 files
with names "rank00.log", "rank01.log" and "rank02.log"}
\item{pathToLog}{ A path to where the log files should be stored. }
\item{outfile}{ A character string - the file name to store the output table to. }
}
\details{
A Metropolis-Hastings algorithm is used to conduct a stochastic search through the model space
to find the best models. \code{nsim} steps of the chain will be run on each available
processor. Each of them will return the best \code{keep} models they found to the
master. The master will strip away the duplicates and return the top
\code{keep} models found.
See the scripts in demo/ directory for full examples.
}
\value{
No value is returned as it is run in Batch mode. The output object is stored in the binary output file.
}
\author{
Vik Gopal \email{viknesh@stat.ufl.edu}
}
\note{
Remember to copy the appropriate Rprofile that is provided in the inst/ directory to the
directory to you are working in!
}
\seealso{
\code{\link{variableSelector}}, \code{\link{variableSelectorInteractP}}
}
\keyword{ htest }
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() {
x
}
setinverse <- function(inverse) {
i <<- inverse
}
getinverse <- function() {
i
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data) %*% data
x$setInverse(m)
m
}
|
/assignment2.R
|
no_license
|
Jessica0816/ProgrammingAssignment2
|
R
| false | false | 889 |
r
|
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() {
x
}
setinverse <- function(inverse) {
i <<- inverse
}
getinverse <- function() {
i
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data) %*% data
x$setInverse(m)
m
}
|
source("D:/Course/BIOS7695/Final projects/programs/7. Dispersion summary.R")
tiff("D:/Course/BIOS7695/Final projects/Figures/Boxplot.tiff",height = 30, width = 20, units="cm",
compression = "lzw", res = 600)
par(mfrow=c(4,1))
boxplot(seq.counts,xlab ="314 subjects", xaxt = "n",main="Raw counts")
boxplot(counts.between.median,xlab ="314 subjects",main="Normalized counts, method=median", xaxt = "n")
boxplot(counts.between.upper,xlab ="314 subjects",main="Normalized counts, method=upper", xaxt = "n")
boxplot(counts.between.full,xlab ="314 subjects",main="Normalized counts, method=full", xaxt = "n")
dev.off()
tiff("D:/Course/BIOS7695/Final projects/Figures/MAplot.tiff",height = 9, width = 9, units="cm",
compression = "lzw", res = 400)
meanVarPlot(seq.counts,log=TRUE)
dev.off()
setwd("D:/Course/BIOS7695/Final projects/Figures/Log2change")
tiff(height = 12, width = 8, units="cm",
compression = "lzw", res = 400)
p1
p2
p3
p4
dev.off()
setwd("D:/Course/BIOS7695/Final projects/Figures/Dispersion")
tiff(height = 8, width = 12, units="cm",
compression = "lzw", res = 400)
p5
p6
p7
p8
p9
p10
p11
p12
dev.off()
|
/programs/Final_Figures.R
|
no_license
|
zhwr7125/Final-projects
|
R
| false | false | 1,178 |
r
|
source("D:/Course/BIOS7695/Final projects/programs/7. Dispersion summary.R")
tiff("D:/Course/BIOS7695/Final projects/Figures/Boxplot.tiff",height = 30, width = 20, units="cm",
compression = "lzw", res = 600)
par(mfrow=c(4,1))
boxplot(seq.counts,xlab ="314 subjects", xaxt = "n",main="Raw counts")
boxplot(counts.between.median,xlab ="314 subjects",main="Normalized counts, method=median", xaxt = "n")
boxplot(counts.between.upper,xlab ="314 subjects",main="Normalized counts, method=upper", xaxt = "n")
boxplot(counts.between.full,xlab ="314 subjects",main="Normalized counts, method=full", xaxt = "n")
dev.off()
tiff("D:/Course/BIOS7695/Final projects/Figures/MAplot.tiff",height = 9, width = 9, units="cm",
compression = "lzw", res = 400)
meanVarPlot(seq.counts,log=TRUE)
dev.off()
setwd("D:/Course/BIOS7695/Final projects/Figures/Log2change")
tiff(height = 12, width = 8, units="cm",
compression = "lzw", res = 400)
p1
p2
p3
p4
dev.off()
setwd("D:/Course/BIOS7695/Final projects/Figures/Dispersion")
tiff(height = 8, width = 12, units="cm",
compression = "lzw", res = 400)
p5
p6
p7
p8
p9
p10
p11
p12
dev.off()
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% File.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getCanonicalFile.File}
\alias{getCanonicalFile.File}
\alias{File.getCanonicalFile}
\alias{getCanonicalFile.File}
\alias{getCanonicalFile,File-method}
\title{Gets the cannonical form of this pathname}
\usage{\method{getCanonicalFile}{File}(this, ...)}
\description{
Gets the cannonical form of this pathname.
}
\value{
Returns a \code{File} object.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
For more information see \code{\link{File}}.
}
\keyword{internal}
\keyword{methods}
|
/man/getCanonicalFile.File.Rd
|
no_license
|
HenrikBengtsson/R.io
|
R
| false | false | 856 |
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% File.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{getCanonicalFile.File}
\alias{getCanonicalFile.File}
\alias{File.getCanonicalFile}
\alias{getCanonicalFile.File}
\alias{getCanonicalFile,File-method}
\title{Gets the cannonical form of this pathname}
\usage{\method{getCanonicalFile}{File}(this, ...)}
\description{
Gets the cannonical form of this pathname.
}
\value{
Returns a \code{File} object.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\seealso{
For more information see \code{\link{File}}.
}
\keyword{internal}
\keyword{methods}
|
# Script for exporting run data from an h5 to CSV
# Script will have 3 inputs: h5_file_path, output_file_path, and data_source_table
suppressPackageStartupMessages(library(rhdf5))
suppressPackageStartupMessages(library(R.utils))
# Accepting command arguments:
argst <- commandArgs(trailingOnly = T)
h5_file_path <- argst[1] # arguments default as class 'character' ; name of the .h5
output_file_path <- argst[2] #name of csv being created
data_source_table <- argst[3] # Path to data table within h5 that begins with /[groupname]
# Reading in table from h5
fid = H5Fopen(h5_file_path) # Opens the h5 file, fid is a h5 identifier
did = H5Dopen(fid, data_source_table) # Opens the data table of interest using the path provided
data <- H5Dread(did, bit64conversion = "double")
origin <- "1970-01-01"
data$index <- as.POSIXct((data$index)/10^9, origin = origin, tz = "UTC")
# Exporting to a csv
write.table(data,file = output_file_path, sep = ",", row.names = FALSE) # Maybe .csv should be added to the end of file argument?
|
/run/export/export_hsp_h5.R
|
no_license
|
HARPgroup/cbp_wsm
|
R
| false | false | 1,029 |
r
|
# Script for exporting run data from an h5 to CSV
# Script will have 3 inputs: h5_file_path, output_file_path, and data_source_table
suppressPackageStartupMessages(library(rhdf5))
suppressPackageStartupMessages(library(R.utils))
# Accepting command arguments:
argst <- commandArgs(trailingOnly = T)
h5_file_path <- argst[1] # arguments default as class 'character' ; name of the .h5
output_file_path <- argst[2] #name of csv being created
data_source_table <- argst[3] # Path to data table within h5 that begins with /[groupname]
# Reading in table from h5
fid = H5Fopen(h5_file_path) # Opens the h5 file, fid is a h5 identifier
did = H5Dopen(fid, data_source_table) # Opens the data table of interest using the path provided
data <- H5Dread(did, bit64conversion = "double")
origin <- "1970-01-01"
data$index <- as.POSIXct((data$index)/10^9, origin = origin, tz = "UTC")
# Exporting to a csv
write.table(data,file = output_file_path, sep = ",", row.names = FALSE) # Maybe .csv should be added to the end of file argument?
|
#### SETUP #####################################################################
cat("\014"); rm(list=ls())
setwd(here::here())
source("code/ExtAn_Helper_PrecisionData.R")
nm <- "gallagher_comparison_2016" ## Name of study (actually name of file)
df <- read.csv(paste0("data/raw_ageing/",nm,".csv"))
str(df)
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("whoto_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~whoto_RW_1+whoto_RW_2+whoto_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("sectoto_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~sectoto_RW_1+sectoto_RW_2+sectoto_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pectoral"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("pectoral_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pectoral_RW_1+pectoral_RW_2+pectoral_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pelvic"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("pelvic_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pelvic_RW_1+pelvic_RW_2+pelvic_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("whoto_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~whoto_NSC_1+whoto_NSC_2+whoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("sectoto_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~sectoto_NSC_1+sectoto_NSC_2+sectoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pectoral"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("pectoral_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pectoral_NSC_1+pectoral_NSC_2+pectoral_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pelvic"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("pelvic_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pelvic_NSC_1+pelvic_NSC_2+pelvic_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### BETWEEN RW AND NSC READER #################################################
## This uses all six reads (three from each reader) ... not sure about this so
## I did not output these results.
species <- "Dolly Varden"
atype <- "between"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- ""
df1 <- df %>%
select(contains("whoto")) %>%
filter(complete.cases(.)) %>%
mutate(whoto_RW=rowMeans(select(.,contains("RW"))),
whoto_NSC=rowMeans(select(.,contains("NSC"))))
ap1 <- agePrecision(~whoto_RW_1+whoto_RW_2+whoto_RW_3+whoto_NSC_1+whoto_NSC_2+whoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
#saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
# ifelse(strux2=="","","_"),strux2,
# ifelse(proc=="","","_"),proc,
# ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
|
/data/raw_ageing/gallagher_comparison_2016.R
|
no_license
|
droglenc/AgePrecision
|
R
| false | false | 10,383 |
r
|
#### SETUP #####################################################################
cat("\014"); rm(list=ls())
setwd(here::here())
source("code/ExtAn_Helper_PrecisionData.R")
nm <- "gallagher_comparison_2016" ## Name of study (actually name of file)
df <- read.csv(paste0("data/raw_ageing/",nm,".csv"))
str(df)
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("whoto_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~whoto_RW_1+whoto_RW_2+whoto_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("sectoto_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~sectoto_RW_1+sectoto_RW_2+sectoto_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pectoral"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("pectoral_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pectoral_RW_1+pectoral_RW_2+pectoral_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN RW READER ##########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pelvic"
proc <- "sectioned"
extra_suffix <- "RW"
df1 <- df %>%
select(contains("pelvic_RW")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pelvic_RW_1+pelvic_RW_2+pelvic_RW_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("whoto_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~whoto_NSC_1+whoto_NSC_2+whoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("sectoto_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~sectoto_NSC_1+sectoto_NSC_2+sectoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pectoral"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("pectoral_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pectoral_NSC_1+pectoral_NSC_2+pectoral_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### WITHIN NSC READER #########################################################
species <- "Dolly Varden"
atype <- "within"
strux <- "finrays"
strux2 <- "pelvic"
proc <- "sectioned"
extra_suffix <- "NSC"
df1 <- df %>%
select(contains("pelvic_NSC")) %>%
filter(complete.cases(.))
ap1 <- agePrecision(~pelvic_NSC_1+pelvic_NSC_2+pelvic_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
ifelse(strux2=="","","_"),strux2,
ifelse(proc=="","","_"),proc,
ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
#### BETWEEN RW AND NSC READER #################################################
## This uses all six reads (three from each reader) ... not sure about this so
## I did not output these results.
species <- "Dolly Varden"
atype <- "between"
strux <- "otoliths"
strux2 <- "saggitae"
proc <- "whole"
extra_suffix <- ""
df1 <- df %>%
select(contains("whoto")) %>%
filter(complete.cases(.)) %>%
mutate(whoto_RW=rowMeans(select(.,contains("RW"))),
whoto_NSC=rowMeans(select(.,contains("NSC"))))
ap1 <- agePrecision(~whoto_RW_1+whoto_RW_2+whoto_RW_3+whoto_NSC_1+whoto_NSC_2+whoto_NSC_3,data=df1)
pt1SD <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="SD")
plot(pt1SD)
summary(pt1SD,what="tests")
pt1CV <- precisionData(ap1,studyID=nm,species=species,
structure=strux,structure2=strux2,process=proc,
type=atype,var="CV")
plot(pt1CV)
summary(pt1CV,what="tests")
res <- list(sum=pt1SD$sum,tests=rbind(pt1SD$tests,pt1CV$tests))
#saveRDS(res,paste0("data/results_precision/",nm,"_",species,"_",strux,
# ifelse(strux2=="","","_"),strux2,
# ifelse(proc=="","","_"),proc,
# ifelse(extra_suffix=="","","_"),extra_suffix,".rds"))
|
#Packages for Random Forest
install.packages("randomForest")
library("randomForest")
# split the data into training and test
set.seed(1234) # for reproducibility #ask Rajneesh what is set.seed
titanic_final$rand <- runif(nrow(titanic_final))
titanic_train <- titanic_final[titanic_final$rand <= 0.7,]
titanic_test <- titanic_final[titanic_final$rand > 0.7,]
nrow(titanic_train)
nrow(titanic_test)
View(titanic_train)
titanic_train$Age[is.na(titanic_train$Age)] <- 29.7
titanic_train$Survived <- titanic_train$Survived
rf = randomForest(Survived ~ Pclass+Sex+Age, data = titanic_train, mtry = 2, importance = TRUE)
print(rf)
summary(rf)
varImp(rf)
# training probabilities and roc
titanic_train$randomprob = predict(rf, type=c("response"))
titanic_train$randompred = ifelse(titanic_train$randomprob >=.5,'pred_yes','pred_no') #not required if Survived is as.factor
View(titanic_train)
q <- roc(Survived ~ prob, data = titanic_train)
plot(q)
auc(q)
# confusion matrix for Train data
titanic_train$Survivedpred = ifelse(titanic_train$prob>=.5,'pred_yes','pred_no')
table(titanic_train$Survivedpred,titanic_train$Survived)
#Accuracy, Precision, Recall and F1 for Train
accuracy_train <- (322+179)/626 #80.03
#########Support Vector Machine ###################
install.packages("e1071")
library("e1071")
SVM_FINAL = svm(formula = Survived ~ Pclass + Sex + Age + SibSp, data = titanic_train, mtry=2, importance=TRUE)
print(SVM_FINAL)
titanic_train$svmprob = predict(SVM_FINAL, type=c("response"))
titanic_train$svmpred = ifelse(titanic_train$svmprob >=.5,'pred_yes','pred_no') #not required if Survived is as.factor
View(titanic_train)
table(titanic_train$svmprob, titanic_train$Survived)
accuracy_svm <- (352+163)/(352+163+76+31) #82.79%
|
/SVM_In class.R
|
no_license
|
bhavyabishnoi/AMMA--ALL-Codes
|
R
| false | false | 1,800 |
r
|
#Packages for Random Forest
install.packages("randomForest")
library("randomForest")
# split the data into training and test
set.seed(1234) # for reproducibility #ask Rajneesh what is set.seed
titanic_final$rand <- runif(nrow(titanic_final))
titanic_train <- titanic_final[titanic_final$rand <= 0.7,]
titanic_test <- titanic_final[titanic_final$rand > 0.7,]
nrow(titanic_train)
nrow(titanic_test)
View(titanic_train)
titanic_train$Age[is.na(titanic_train$Age)] <- 29.7
titanic_train$Survived <- titanic_train$Survived
rf = randomForest(Survived ~ Pclass+Sex+Age, data = titanic_train, mtry = 2, importance = TRUE)
print(rf)
summary(rf)
varImp(rf)
# training probabilities and roc
titanic_train$randomprob = predict(rf, type=c("response"))
titanic_train$randompred = ifelse(titanic_train$randomprob >=.5,'pred_yes','pred_no') #not required if Survived is as.factor
View(titanic_train)
q <- roc(Survived ~ prob, data = titanic_train)
plot(q)
auc(q)
# confusion matrix for Train data
titanic_train$Survivedpred = ifelse(titanic_train$prob>=.5,'pred_yes','pred_no')
table(titanic_train$Survivedpred,titanic_train$Survived)
#Accuracy, Precision, Recall and F1 for Train
accuracy_train <- (322+179)/626 #80.03
#########Support Vector Machine ###################
install.packages("e1071")
library("e1071")
SVM_FINAL = svm(formula = Survived ~ Pclass + Sex + Age + SibSp, data = titanic_train, mtry=2, importance=TRUE)
print(SVM_FINAL)
titanic_train$svmprob = predict(SVM_FINAL, type=c("response"))
titanic_train$svmpred = ifelse(titanic_train$svmprob >=.5,'pred_yes','pred_no') #not required if Survived is as.factor
View(titanic_train)
table(titanic_train$svmprob, titanic_train$Survived)
accuracy_svm <- (352+163)/(352+163+76+31) #82.79%
|
make.formulastepNLL.formula <- function(formula, data,
response=".fail",
baseline= "NLL(.t, Spline = \"b-spline\", Knots = NULL, Degree = 2, Log = FALSE, Intercept = TRUE)",
tik="tik",
...){
# make formula for glm() at step NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add offset(log(tik))
offset <- paste("offset(log(", tik, "))", sep="")
# add arg BETAt = "betaT"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
namebetaTx <- paste("betaT", thecall[["x"]], sep="")
modified <- modified + 1
thecall[[1]] <- as.name("NLLbeta")
thecall[["y"]] <- as.name(namebetaTx)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
if(thecall[["Spline"]]=="b-spline"){
# add offset(alpha(x)*b1
minX <- eval(as.expression(thecall[["Boundary.knots"]]))[1]
offset <- c(offset, paste("offset(", namebetaTx, " * ", minX, ")", sep=""))
}
}
}
}
if(modified > 0){
formula <- reformulate(c(baseline, newtermlabels, offset),
response = response,
intercept = !is.null(baseline))
}
return(formula)
}
################################################################################
make.formulastepNPH.formula <- function(formula, data,
response=".fail",
baseline= "NLL(.t, Spline = \"b-spline\", Knots = NULL, Degree = 2, Log = FALSE, Intercept = TRUE)",
tik="tik",
...){
# make formula for glm() at step NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add offset(log(tik))
offset <- paste("offset(log(", tik, "))", sep="")
# change change arg x to alpha"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
namealphax <- paste("alpha", thecall[["x"]], sep="")
modified <- modified + 1
thecall[[1]] <- as.name("NPH")
thecall[["x"]] <- as.name(namealphax)
thecall[["Intercept.t"]] <- FALSE
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
# add offset(alpha(x)*b1
offset <- c(offset, paste("offset(", namealphax, "b1", ")", sep=""))
}
}
}
if(modified > 0){
formula <- reformulate(c(baseline, newtermlabels, offset),
response = response,
intercept = !is.null(baseline))
}
return(formula)
}
######################################################################
NPHNLL2NLL.formula <- function(formula, data,
response=".fail",
...){
# make formula in which NPHNLL is replace by NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add arg BETAt = "betaT"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i+1]])
modified <- modified + 1
thecall[[1]] <- as.name("NLL")
thecall[["timevar"]] <- NULL
if(!is.null(thecall[["Degree.t"]])){
thecall[["Degree.t"]] <- NULL
}
if(!is.null(thecall[["Knots.t"]])){
thecall[["Knots.t"]] <- NULL
}
if(!is.null(thecall[["Intercept.t"]])){
thecall[["Intercept.t"]] <- NULL
}
if(!is.null(thecall[["Boundary.knots.t"]])){
thecall[["Boundary.knots.t"]] <- NULL
}
if(!is.null(thecall[["model"]])){
thecall[["model"]] <- NULL
}
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = response,
intercept = FALSE)
}
return(formula)
}
################################################################################
NPHNLL2NPHalpha.formula <- function(formula, data,
response=".fail",
...){
# make formula in which NPHNLL is replace by NPHalpha
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# change change arg x to alpha"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i+1]])
modified <- modified + 1
thecall[[1]] <- as.name("NPHalpha")
if(!is.null(thecall[["Degree"]])){
thecall[["Degree"]] <- NULL
}
if(!is.null(thecall[["Knots"]])){
thecall[["Knots"]] <- NULL
}
if(!is.null(thecall[["Intercept"]])){
thecall[["Intercept"]] <- NULL
}
if(!is.null(thecall[["Boundary.knots"]])){
thecall[["Boundary.knots"]] <- NULL
}
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = response,
intercept = FALSE)
}
return(formula)
}
|
/R/make.formulastep.formula.R
|
no_license
|
cran/flexrsurv
|
R
| false | false | 6,807 |
r
|
make.formulastepNLL.formula <- function(formula, data,
response=".fail",
baseline= "NLL(.t, Spline = \"b-spline\", Knots = NULL, Degree = 2, Log = FALSE, Intercept = TRUE)",
tik="tik",
...){
# make formula for glm() at step NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add offset(log(tik))
offset <- paste("offset(log(", tik, "))", sep="")
# add arg BETAt = "betaT"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
namebetaTx <- paste("betaT", thecall[["x"]], sep="")
modified <- modified + 1
thecall[[1]] <- as.name("NLLbeta")
thecall[["y"]] <- as.name(namebetaTx)
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
if(thecall[["Spline"]]=="b-spline"){
# add offset(alpha(x)*b1
minX <- eval(as.expression(thecall[["Boundary.knots"]]))[1]
offset <- c(offset, paste("offset(", namebetaTx, " * ", minX, ")", sep=""))
}
}
}
}
if(modified > 0){
formula <- reformulate(c(baseline, newtermlabels, offset),
response = response,
intercept = !is.null(baseline))
}
return(formula)
}
################################################################################
make.formulastepNPH.formula <- function(formula, data,
response=".fail",
baseline= "NLL(.t, Spline = \"b-spline\", Knots = NULL, Degree = 2, Log = FALSE, Intercept = TRUE)",
tik="tik",
...){
# make formula for glm() at step NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add offset(log(tik))
offset <- paste("offset(log(", tik, "))", sep="")
# change change arg x to alpha"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
for (k in 1:length(i)){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i[k]+1]])
namealphax <- paste("alpha", thecall[["x"]], sep="")
modified <- modified + 1
thecall[[1]] <- as.name("NPH")
thecall[["x"]] <- as.name(namealphax)
thecall[["Intercept.t"]] <- FALSE
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm[k]]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
# add offset(alpha(x)*b1
offset <- c(offset, paste("offset(", namealphax, "b1", ")", sep=""))
}
}
}
if(modified > 0){
formula <- reformulate(c(baseline, newtermlabels, offset),
response = response,
intercept = !is.null(baseline))
}
return(formula)
}
######################################################################
NPHNLL2NLL.formula <- function(formula, data,
response=".fail",
...){
# make formula in which NPHNLL is replace by NLL
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# add arg BETAt = "betaT"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i+1]])
modified <- modified + 1
thecall[[1]] <- as.name("NLL")
thecall[["timevar"]] <- NULL
if(!is.null(thecall[["Degree.t"]])){
thecall[["Degree.t"]] <- NULL
}
if(!is.null(thecall[["Knots.t"]])){
thecall[["Knots.t"]] <- NULL
}
if(!is.null(thecall[["Intercept.t"]])){
thecall[["Intercept.t"]] <- NULL
}
if(!is.null(thecall[["Boundary.knots.t"]])){
thecall[["Boundary.knots.t"]] <- NULL
}
if(!is.null(thecall[["model"]])){
thecall[["model"]] <- NULL
}
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = response,
intercept = FALSE)
}
return(formula)
}
################################################################################
NPHNLL2NPHalpha.formula <- function(formula, data,
response=".fail",
...){
# make formula in which NPHNLL is replace by NPHalpha
special <- c("NPH","NLL", "NPHNLL")
Terms <- if (missing(data)){
terms(formula, special, keep.order = TRUE)
} else {
terms(formula, special, data = data, keep.order = TRUE)
}
NamesNPHNLLVars<- all_specials_vars( Terms,
specials="NPHNLL",
unique = TRUE,
order="formula")
modified <- 0
newtermlabels <- labels(Terms)
# change change arg x to alpha"x" in NPHNLL() call
# force intercept.t = FALSE
if(length(NamesNPHNLLVars) >0){
for (i in attr(Terms, "specials")[["NPHNLL"]]){
thecall <- match.call(NPHNLL, attr(Terms,"variables")[[i+1]])
modified <- modified + 1
thecall[[1]] <- as.name("NPHalpha")
if(!is.null(thecall[["Degree"]])){
thecall[["Degree"]] <- NULL
}
if(!is.null(thecall[["Knots"]])){
thecall[["Knots"]] <- NULL
}
if(!is.null(thecall[["Intercept"]])){
thecall[["Intercept"]] <- NULL
}
if(!is.null(thecall[["Boundary.knots"]])){
thecall[["Boundary.knots"]] <- NULL
}
indxterm <- variable2term(i, Terms)
charcall<-deparse(thecall, 500)
oldtermlabel <- newtermlabels[indxterm]
newtermlabels <- gsub(oldtermlabel, charcall, newtermlabels, fixed=TRUE)
}
}
if(modified > 0){
formula <- reformulate(newtermlabels,
response = response,
intercept = FALSE)
}
return(formula)
}
|
#' Geocode
#'
#' Geocodes (finds latitude and longitude of) a location using the Google
#' Geocoding API. Note: To use Google's Geocoding API, you must first enable the
#' API in the Google Cloud Platform Console. See [register_google()].
#'
#' @param location a character vector of street addresses or place names (e.g.
#' "1600 pennsylvania avenue, washington dc" or "Baylor University")
#' @param output amount of output, "latlon", "latlona", "more", or "all"
#' @param source "google" for Google (note: "dsk" is defunct)
#' @param force force online query even if cached.
#' @param urlonly return only the url?
#' @param override_limit override the current query rate
#' @param nameType in some cases, Google returns both a long name and a short
#' name. this parameter allows the user to specify which to grab.
#' @param ext top level domain (e.g. "com", "co.nz"); helpful for non-US users
#' @param inject character string to add to the url or named character vector of
#' key-value pairs to be injected (e.g. c("a" = "b") get converted to "a=b"
#' and appended to the query)
#' @param data a data frame or equivalent
#' @param path path to file
#' @param overwrite in [load_geocode_cache()], should the current cache be
#' wholly replaced with the one on file?
#' @param ... In [mutate_geocode()], arguments to pass to [geocode()]. In
#' [write_geocode_cache()], arguments to pass to [saveRDS()].
#' @return If \code{output} is "latlon", "latlona", or "more", a tibble (classed
#' data frame). If "all", a list.
#' @author David Kahle \email{david@@kahle.io}
#' @seealso \url{http://code.google.com/apis/maps/documentation/geocoding/},
#' \url{https://developers.google.com/maps/documentation/javascript/geocoding},
#'
#'
#' \url{https://developers.google.com/maps/documentation/geocoding/usage-limits}
#'
#'
#' @name geocode
#' @examples
#'
#' \dontrun{ requires Google API key, see ?register_google
#'
#' ## basic usage
#' ########################################
#'
#' # geocoding is most commonly used for addresses
#' geocode("1600 Amphitheatre Parkway, Mountain View, CA")
#' geocode("1600 Amphitheatre Parkway, Mountain View, CA", urlonly = TRUE)
#'
#' # google can also geocode colloquial names of places
#' geocode("the white house")
#'
#' # geocode can also accept character vectors of places
#' geocode(c("the white house", "washington dc"))
#'
#'
#'
#' ## types of output
#' ########################################
#'
#' geocode("waco texas")
#' geocode("waco texas", output = "latlona")
#' geocode("waco texas", output = "more")
#' str(geocode("waco texas", output = "all"))
#'
#' geocode(c("waco, texas", "houston, texas"))
#' geocode(c("waco, texas", "houston, texas"), output = "latlona")
#' geocode(c("waco, texas", "houston, texas"), output = "all") %>% str(4)
#'
#'
#'
#' ## mutate_geocode
#' ########################################
#'
#' # mutate_geocode is used to add location columns to an existing dataset
#' # that has location information
#'
#' df <- data.frame(
#' address = c("1600 Pennsylvania Avenue, Washington DC", "", "houston texas"),
#' stringsAsFactors = FALSE
#' )
#'
#' mutate_geocode(df, address)
#' df %>% mutate_geocode(address)
#'
#'
#' ## known issues
#' ########################################
#'
#' # in some cases geocode finds several locations
#' geocode("waco city hall")
#'
#'
#' }
#'
#'
#' @rdname geocode
#' @export
geocode <- function (
location,
output = c("latlon", "latlona", "more", "all"),
source = c("google", "dsk"),
force = ifelse(source == "dsk", FALSE, TRUE),
urlonly = FALSE,
override_limit = FALSE,
nameType = c("long", "short"),
ext = "com",
inject = "",
...
) {
# basic parameter check
stopifnot(is.character(location))
output <- match.arg(output)
nameType <- match.arg(nameType)
source <- match.arg(source)
# source checking
if (source == "google" && !has_google_key() && !urlonly) stop("Google now requires an API key.", "\n See ?register_google for details.", call. = FALSE)
# if (source == "dsk") stop("datasciencetoolkit.org terminated its map service, sorry!")
# vectorize for many locations
if (length(location) > 1) {
out <- location %>%
map(~ geocode(.x,
"output" = output,
"source" = source,
"messaging" = messaging,
"inject" = inject,
"force" = force,
"urlonly" = urlonly
)
)
if (output == "all") return(out)
out <- out %>% map(~ as_tibble(as.list(.x))) %>% bind_rows()
return(out)
}
# return NA for location == ""
if (location == "") return(return_failed_geocode(output))
# set url base (protocol + fqdn + path + "?")
url_base <- switch(source,
"google" = glue("https://maps.googleapis.{ext}/maps/api/geocode/json?"),
"dsk" = "http://www.datasciencetoolkit.org/maps/api/geocode/json?"
)
# initialize the url query
url_query <- location %>% str_trim() %>% str_replace_all(" +", "+") %>% URLencode(reserved = FALSE) %>% c("address" = .)
# address
# "1600+Amphitheatre+Parkway,+Mountain+View,+CA"
# add google account stuff to query, if applicable
if (source == "google") {
url_query <- c(url_query, "client" = google_client(), "signature" = google_signature(), "key" = google_key())
url_query <- url_query[!is.na(url_query)]
}
# form url
url_query_inline <- str_c(names(url_query), url_query, sep = "=", collapse = "&")
url <- str_c(url_base, url_query_inline)
# inject any remaining stuff
if (inject != "") {
if (is.null(names(inject))) {
url <- str_c(url, inject, sep = "&")
} else {
url <- str_c(url, str_c(names(inject), inject, sep = "=", collapse = "&"), sep = "&")
}
}
# encode
url <- URLencode( enc2utf8(url) )
url <- str_replace_all(url, "#", "%23") # selectively url-encode
# return early if user only wants url
if(urlonly) if(showing_key()) return(url) else return(scrub_key(url))
# hash for caching
url_hash <- digest::digest(scrub_key(url))
# lookup info if on file
if (location_is_cached(url_hash) && force == FALSE) {
gc <- geocode_cache()[[url_hash]]
} else {
# if using google, throttle/update google query limit
if (source == "google") throttle_google_geocode_query_rate(url_hash, queries_sought = 1L, override = override_limit)
# message url
if (showing_key()) message("Source : ", url) else message("Source : ", scrub_key(url))
# query server
response <- httr::GET(url)
# deal with bad responses
if (response$status_code != 200L) {
warning(
tryCatch(stop_for_status(response),
"http_400" = function(c) "HTTP 400 Bad Request",
"http_402" = function(c) "HTTP 402 Payment Required - May indicate over Google query limit",
"http_403" = function(c) "HTTP 403 Forbidden - Server refuses, is the API enabled?",
"http_404" = function(c) "HTTP 404 Not Found - Server reports page not found",
"http_414" = function(c) "HTTP 414 URI Too Long - URL query too long",
"http_500" = function(c) "HTTP 500 Internal Server Error - If dsk, try Google",
"http_503" = function(c) "HTTP 503 Service Unavailable - Server bogged down, try later"
)
)
return(return_failed_geocode(output))
}
# grab content
gc <- httr::content(response)
# cache it
cache_geocoded_info(url_hash, gc)
}
# did geocode fail?
if (gc$status != "OK") {
warning(
glue("Geocoding \"{str_trunc(location, 20)}\" failed with error:"),
"\n", gc$error_message, "\n",
call. = FALSE, immediate. = TRUE, noBreaks. = FALSE
)
return(tibble("lon" = NA_real_, "lat" = NA_real_))
}
# return if you want full output
if (output == "all") return(gc)
# more than one location found?
if (length(gc$results) > 1L) {
message( glue("\"{stringr::str_trunc(location, 20)}\" not uniquely geocoded, using \"{tolower(gc$results[[1]]$formatted_address)}\"") )
}
# format geocoded data
NULLtoNA <- function (x) {
if (is.null(x)) return(NA) else x
}
gcdf <- with(gc$results[[1]], {
tibble(
"lon" = NULLtoNA(geometry$location$lng),
"lat" = NULLtoNA(geometry$location$lat),
"type" = tolower(NULLtoNA(types[1])),
"loctype" = tolower(NULLtoNA(geometry$location_type)),
"address" = location, # dsk doesn't give the address
"north" = NULLtoNA(geometry$viewport$northeast$lat),
"south" = NULLtoNA(geometry$viewport$southwest$lat),
"east" = NULLtoNA(geometry$viewport$northeast$lng),
"west" = NULLtoNA(geometry$viewport$southwest$lng)
)
})
# add address
if (source == "google") gcdf$address <- tolower(NULLtoNA(gc$results[[1]]$formatted_address))
if (output == "latlon") return(gcdf[,c("lon","lat")])
if (output == "latlona") return(gcdf[,c("lon","lat","address")])
if (output == "more") return(gcdf)
# parse json when output == "more"
name_to_grab <- if(nameType == "long") "long_name" else "short_name"
output_values <- vapply(gc$results[[1]]$address_components, function (x) x[[name_to_grab]], character(1))
output_names <- vapply(gc$results[[1]]$address_components, function (x) {
if (length(x$types) == 0) return("query")
unlist(x$types)[1]
},
character(1)
)
gcdf_more <- as_tibble(as.list(output_values))
names(gcdf_more) <- output_names
tibble(gcdf, gcdf_more)
}
#' @rdname geocode
#' @export
mutate_geocode <- function (data, location, ...){
locs <- data[[deparse(substitute(location))]]
gcdf <- geocode(locs, ...)
dplyr::bind_cols(data, gcdf)
}
throttle_google_geocode_query_rate <- function (url_hash, queries_sought, override) {
if (exists(".google_geocode_query_times", ggmap_environment)) {
.google_geocode_query_times <- get(".google_geocode_query_times", envir = ggmap_environment)
queries_used_in_last_second <- with(.google_geocode_query_times, sum(queries[time >= Sys.time() - 1L]))
if (!override && (queries_used_in_last_second + queries_sought > google_second_limit())) Sys.sleep(.2) # can do better
assign(
".google_geocode_query_times",
bind_rows(.google_geocode_query_times, tibble("time" = Sys.time(), "url" = url_hash, "queries" = queries_sought)),
envir = ggmap_environment
)
} else {
assign(".google_geocode_query_times", tibble("time" = Sys.time(), "url" = url_hash, "queries" = queries_sought), envir = ggmap_environment)
}
invisible()
}
#' @export
#' @rdname geocode
geocodeQueryCheck <- function () {
.Deprecated(msg = "As of mid-2018, Google no longer has daily query limits.")
queries <- NA; rm(queries)
if (exists(".google_geocode_query_times", ggmap_environment)) {
.google_geocode_query_times <- get(".google_geocode_query_times", ggmap_environment)
google_geocode_queries_in_last_24hrs <-
.google_geocode_query_times %>%
dplyr::filter(time >= Sys.time() - 24L*60L*60L) %>%
dplyr::select(queries) %>%
sum()
remaining <- google_day_limit() - google_geocode_queries_in_last_24hrs
message(remaining, " Google geocoding queries remaining.")
} else {
remaining <- google_day_limit()
message(remaining, " Google geocoding queries remaining.")
}
invisible(remaining)
}
#' @export
#' @rdname geocode
geocode_cache <- function () {
if (!exists(".geocode_cache", envir = ggmap_environment)) {
assign(".geocode_cache", list(), ggmap_environment)
}
get(".geocode_cache", envir = ggmap_environment)
}
cache_geocoded_info <- function (url_hash, data) {
if (!exists(".geocode_cache", envir = ggmap_environment)) assign(".geocode_cache", list(), ggmap_environment)
assign(
".geocode_cache",
c(geocode_cache(), structure(list(data), names = url_hash)),
envir = ggmap_environment
)
invisible()
}
location_is_cached <- function (url_hash) {
if (!exists(".geocode_cache", envir = ggmap_environment)) return(FALSE)
if (url_hash %notin% names(geocode_cache())) return(FALSE)
TRUE
}
return_failed_geocode <- function (output) {
if (output == "latlon") {
return(tibble("lon" = NA_real_, "lat" = NA_real_))
} else if (output == "latlona") {
return(tibble("lon" = NA_real_, "lat" = NA_real_, "address" = NA_character_))
} else if (output == "more") {
return(tibble(
"lon" = NA_real_, "lat" = NA_real_, "type" = NA_character_, "address" = NA_character_,
"north" = NA_real_, "south" = NA_real_, "east" = NA_real_, "west" = NA_real_
))
} else {
return(NA)
}
}
#' @export
#' @rdname geocode
write_geocode_cache <- function (path, ...) {
saveRDS(
object = geocode_cache(),
file = path,
...
)
}
#' @export
#' @rdname geocode
load_geocode_cache <- function(path, overwrite = FALSE) {
if (!exists(".geocode_cache", envir = ggmap_environment)) {
assign(".geocode_cache", list(), ggmap_environment)
}
if (overwrite) {
assign(".geocode_cache", readRDS(path), ggmap_environment)
} else {
assign(
".geocode_cache",
c(geocode_cache(), readRDS(path)),
ggmap_environment
)
}
}
#' @export
#' @rdname geocode
clear_geocode_cache <- function(path) {
assign(".geocode_cache", list(), ggmap_environment)
}
|
/R/geocode.R
|
permissive
|
erhard1/ggmap
|
R
| false | false | 13,401 |
r
|
#' Geocode
#'
#' Geocodes (finds latitude and longitude of) a location using the Google
#' Geocoding API. Note: To use Google's Geocoding API, you must first enable the
#' API in the Google Cloud Platform Console. See [register_google()].
#'
#' @param location a character vector of street addresses or place names (e.g.
#' "1600 pennsylvania avenue, washington dc" or "Baylor University")
#' @param output amount of output, "latlon", "latlona", "more", or "all"
#' @param source "google" for Google (note: "dsk" is defunct)
#' @param force force online query even if cached.
#' @param urlonly return only the url?
#' @param override_limit override the current query rate
#' @param nameType in some cases, Google returns both a long name and a short
#' name. this parameter allows the user to specify which to grab.
#' @param ext top level domain (e.g. "com", "co.nz"); helpful for non-US users
#' @param inject character string to add to the url or named character vector of
#' key-value pairs to be injected (e.g. c("a" = "b") get converted to "a=b"
#' and appended to the query)
#' @param data a data frame or equivalent
#' @param path path to file
#' @param overwrite in [load_geocode_cache()], should the current cache be
#' wholly replaced with the one on file?
#' @param ... In [mutate_geocode()], arguments to pass to [geocode()]. In
#' [write_geocode_cache()], arguments to pass to [saveRDS()].
#' @return If \code{output} is "latlon", "latlona", or "more", a tibble (classed
#' data frame). If "all", a list.
#' @author David Kahle \email{david@@kahle.io}
#' @seealso \url{http://code.google.com/apis/maps/documentation/geocoding/},
#' \url{https://developers.google.com/maps/documentation/javascript/geocoding},
#'
#'
#' \url{https://developers.google.com/maps/documentation/geocoding/usage-limits}
#'
#'
#' @name geocode
#' @examples
#'
#' \dontrun{ requires Google API key, see ?register_google
#'
#' ## basic usage
#' ########################################
#'
#' # geocoding is most commonly used for addresses
#' geocode("1600 Amphitheatre Parkway, Mountain View, CA")
#' geocode("1600 Amphitheatre Parkway, Mountain View, CA", urlonly = TRUE)
#'
#' # google can also geocode colloquial names of places
#' geocode("the white house")
#'
#' # geocode can also accept character vectors of places
#' geocode(c("the white house", "washington dc"))
#'
#'
#'
#' ## types of output
#' ########################################
#'
#' geocode("waco texas")
#' geocode("waco texas", output = "latlona")
#' geocode("waco texas", output = "more")
#' str(geocode("waco texas", output = "all"))
#'
#' geocode(c("waco, texas", "houston, texas"))
#' geocode(c("waco, texas", "houston, texas"), output = "latlona")
#' geocode(c("waco, texas", "houston, texas"), output = "all") %>% str(4)
#'
#'
#'
#' ## mutate_geocode
#' ########################################
#'
#' # mutate_geocode is used to add location columns to an existing dataset
#' # that has location information
#'
#' df <- data.frame(
#' address = c("1600 Pennsylvania Avenue, Washington DC", "", "houston texas"),
#' stringsAsFactors = FALSE
#' )
#'
#' mutate_geocode(df, address)
#' df %>% mutate_geocode(address)
#'
#'
#' ## known issues
#' ########################################
#'
#' # in some cases geocode finds several locations
#' geocode("waco city hall")
#'
#'
#' }
#'
#'
#' @rdname geocode
#' @export
geocode <- function (
location,
output = c("latlon", "latlona", "more", "all"),
source = c("google", "dsk"),
force = ifelse(source == "dsk", FALSE, TRUE),
urlonly = FALSE,
override_limit = FALSE,
nameType = c("long", "short"),
ext = "com",
inject = "",
...
) {
# basic parameter check
stopifnot(is.character(location))
output <- match.arg(output)
nameType <- match.arg(nameType)
source <- match.arg(source)
# source checking
if (source == "google" && !has_google_key() && !urlonly) stop("Google now requires an API key.", "\n See ?register_google for details.", call. = FALSE)
# if (source == "dsk") stop("datasciencetoolkit.org terminated its map service, sorry!")
# vectorize for many locations
if (length(location) > 1) {
out <- location %>%
map(~ geocode(.x,
"output" = output,
"source" = source,
"messaging" = messaging,
"inject" = inject,
"force" = force,
"urlonly" = urlonly
)
)
if (output == "all") return(out)
out <- out %>% map(~ as_tibble(as.list(.x))) %>% bind_rows()
return(out)
}
# return NA for location == ""
if (location == "") return(return_failed_geocode(output))
# set url base (protocol + fqdn + path + "?")
url_base <- switch(source,
"google" = glue("https://maps.googleapis.{ext}/maps/api/geocode/json?"),
"dsk" = "http://www.datasciencetoolkit.org/maps/api/geocode/json?"
)
# initialize the url query
url_query <- location %>% str_trim() %>% str_replace_all(" +", "+") %>% URLencode(reserved = FALSE) %>% c("address" = .)
# address
# "1600+Amphitheatre+Parkway,+Mountain+View,+CA"
# add google account stuff to query, if applicable
if (source == "google") {
url_query <- c(url_query, "client" = google_client(), "signature" = google_signature(), "key" = google_key())
url_query <- url_query[!is.na(url_query)]
}
# form url
url_query_inline <- str_c(names(url_query), url_query, sep = "=", collapse = "&")
url <- str_c(url_base, url_query_inline)
# inject any remaining stuff
if (inject != "") {
if (is.null(names(inject))) {
url <- str_c(url, inject, sep = "&")
} else {
url <- str_c(url, str_c(names(inject), inject, sep = "=", collapse = "&"), sep = "&")
}
}
# encode
url <- URLencode( enc2utf8(url) )
url <- str_replace_all(url, "#", "%23") # selectively url-encode
# return early if user only wants url
if(urlonly) if(showing_key()) return(url) else return(scrub_key(url))
# hash for caching
url_hash <- digest::digest(scrub_key(url))
# lookup info if on file
if (location_is_cached(url_hash) && force == FALSE) {
gc <- geocode_cache()[[url_hash]]
} else {
# if using google, throttle/update google query limit
if (source == "google") throttle_google_geocode_query_rate(url_hash, queries_sought = 1L, override = override_limit)
# message url
if (showing_key()) message("Source : ", url) else message("Source : ", scrub_key(url))
# query server
response <- httr::GET(url)
# deal with bad responses
if (response$status_code != 200L) {
warning(
tryCatch(stop_for_status(response),
"http_400" = function(c) "HTTP 400 Bad Request",
"http_402" = function(c) "HTTP 402 Payment Required - May indicate over Google query limit",
"http_403" = function(c) "HTTP 403 Forbidden - Server refuses, is the API enabled?",
"http_404" = function(c) "HTTP 404 Not Found - Server reports page not found",
"http_414" = function(c) "HTTP 414 URI Too Long - URL query too long",
"http_500" = function(c) "HTTP 500 Internal Server Error - If dsk, try Google",
"http_503" = function(c) "HTTP 503 Service Unavailable - Server bogged down, try later"
)
)
return(return_failed_geocode(output))
}
# grab content
gc <- httr::content(response)
# cache it
cache_geocoded_info(url_hash, gc)
}
# did geocode fail?
if (gc$status != "OK") {
warning(
glue("Geocoding \"{str_trunc(location, 20)}\" failed with error:"),
"\n", gc$error_message, "\n",
call. = FALSE, immediate. = TRUE, noBreaks. = FALSE
)
return(tibble("lon" = NA_real_, "lat" = NA_real_))
}
# return if you want full output
if (output == "all") return(gc)
# more than one location found?
if (length(gc$results) > 1L) {
message( glue("\"{stringr::str_trunc(location, 20)}\" not uniquely geocoded, using \"{tolower(gc$results[[1]]$formatted_address)}\"") )
}
# format geocoded data
NULLtoNA <- function (x) {
if (is.null(x)) return(NA) else x
}
gcdf <- with(gc$results[[1]], {
tibble(
"lon" = NULLtoNA(geometry$location$lng),
"lat" = NULLtoNA(geometry$location$lat),
"type" = tolower(NULLtoNA(types[1])),
"loctype" = tolower(NULLtoNA(geometry$location_type)),
"address" = location, # dsk doesn't give the address
"north" = NULLtoNA(geometry$viewport$northeast$lat),
"south" = NULLtoNA(geometry$viewport$southwest$lat),
"east" = NULLtoNA(geometry$viewport$northeast$lng),
"west" = NULLtoNA(geometry$viewport$southwest$lng)
)
})
# add address
if (source == "google") gcdf$address <- tolower(NULLtoNA(gc$results[[1]]$formatted_address))
if (output == "latlon") return(gcdf[,c("lon","lat")])
if (output == "latlona") return(gcdf[,c("lon","lat","address")])
if (output == "more") return(gcdf)
# parse json when output == "more"
name_to_grab <- if(nameType == "long") "long_name" else "short_name"
output_values <- vapply(gc$results[[1]]$address_components, function (x) x[[name_to_grab]], character(1))
output_names <- vapply(gc$results[[1]]$address_components, function (x) {
if (length(x$types) == 0) return("query")
unlist(x$types)[1]
},
character(1)
)
gcdf_more <- as_tibble(as.list(output_values))
names(gcdf_more) <- output_names
tibble(gcdf, gcdf_more)
}
#' @rdname geocode
#' @export
mutate_geocode <- function (data, location, ...){
locs <- data[[deparse(substitute(location))]]
gcdf <- geocode(locs, ...)
dplyr::bind_cols(data, gcdf)
}
throttle_google_geocode_query_rate <- function (url_hash, queries_sought, override) {
if (exists(".google_geocode_query_times", ggmap_environment)) {
.google_geocode_query_times <- get(".google_geocode_query_times", envir = ggmap_environment)
queries_used_in_last_second <- with(.google_geocode_query_times, sum(queries[time >= Sys.time() - 1L]))
if (!override && (queries_used_in_last_second + queries_sought > google_second_limit())) Sys.sleep(.2) # can do better
assign(
".google_geocode_query_times",
bind_rows(.google_geocode_query_times, tibble("time" = Sys.time(), "url" = url_hash, "queries" = queries_sought)),
envir = ggmap_environment
)
} else {
assign(".google_geocode_query_times", tibble("time" = Sys.time(), "url" = url_hash, "queries" = queries_sought), envir = ggmap_environment)
}
invisible()
}
#' @export
#' @rdname geocode
geocodeQueryCheck <- function () {
.Deprecated(msg = "As of mid-2018, Google no longer has daily query limits.")
queries <- NA; rm(queries)
if (exists(".google_geocode_query_times", ggmap_environment)) {
.google_geocode_query_times <- get(".google_geocode_query_times", ggmap_environment)
google_geocode_queries_in_last_24hrs <-
.google_geocode_query_times %>%
dplyr::filter(time >= Sys.time() - 24L*60L*60L) %>%
dplyr::select(queries) %>%
sum()
remaining <- google_day_limit() - google_geocode_queries_in_last_24hrs
message(remaining, " Google geocoding queries remaining.")
} else {
remaining <- google_day_limit()
message(remaining, " Google geocoding queries remaining.")
}
invisible(remaining)
}
#' @export
#' @rdname geocode
geocode_cache <- function () {
if (!exists(".geocode_cache", envir = ggmap_environment)) {
assign(".geocode_cache", list(), ggmap_environment)
}
get(".geocode_cache", envir = ggmap_environment)
}
cache_geocoded_info <- function (url_hash, data) {
if (!exists(".geocode_cache", envir = ggmap_environment)) assign(".geocode_cache", list(), ggmap_environment)
assign(
".geocode_cache",
c(geocode_cache(), structure(list(data), names = url_hash)),
envir = ggmap_environment
)
invisible()
}
location_is_cached <- function (url_hash) {
if (!exists(".geocode_cache", envir = ggmap_environment)) return(FALSE)
if (url_hash %notin% names(geocode_cache())) return(FALSE)
TRUE
}
return_failed_geocode <- function (output) {
if (output == "latlon") {
return(tibble("lon" = NA_real_, "lat" = NA_real_))
} else if (output == "latlona") {
return(tibble("lon" = NA_real_, "lat" = NA_real_, "address" = NA_character_))
} else if (output == "more") {
return(tibble(
"lon" = NA_real_, "lat" = NA_real_, "type" = NA_character_, "address" = NA_character_,
"north" = NA_real_, "south" = NA_real_, "east" = NA_real_, "west" = NA_real_
))
} else {
return(NA)
}
}
#' @export
#' @rdname geocode
write_geocode_cache <- function (path, ...) {
saveRDS(
object = geocode_cache(),
file = path,
...
)
}
#' @export
#' @rdname geocode
load_geocode_cache <- function(path, overwrite = FALSE) {
if (!exists(".geocode_cache", envir = ggmap_environment)) {
assign(".geocode_cache", list(), ggmap_environment)
}
if (overwrite) {
assign(".geocode_cache", readRDS(path), ggmap_environment)
} else {
assign(
".geocode_cache",
c(geocode_cache(), readRDS(path)),
ggmap_environment
)
}
}
#' @export
#' @rdname geocode
clear_geocode_cache <- function(path) {
assign(".geocode_cache", list(), ggmap_environment)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/farsFuncs.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Title fars_read_years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{one or more years as an atomic value or a list}
}
\value{
Creates one or more datasets based on year number. Returns NULL if there is an error
}
\description{
This function accepts one or more years as a list, calls the function make_filename()
with each of the years, and then populates those files with data associated with that
specific year from the main data set
}
\details{
An error will be thrown and the function will be halted if the year is invalid
Uses make_filename(year)
fars_read(file)
}
\examples{
\dontrun{
fars_read_years(1999)
fars_read_years(as.list(1999, 2000, 2001))
fars_read_years(1999:2016)
}
}
|
/man/fars_read_years.Rd
|
no_license
|
drglcc/packFars
|
R
| false | true | 866 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/farsFuncs.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Title fars_read_years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{one or more years as an atomic value or a list}
}
\value{
Creates one or more datasets based on year number. Returns NULL if there is an error
}
\description{
This function accepts one or more years as a list, calls the function make_filename()
with each of the years, and then populates those files with data associated with that
specific year from the main data set
}
\details{
An error will be thrown and the function will be halted if the year is invalid
Uses make_filename(year)
fars_read(file)
}
\examples{
\dontrun{
fars_read_years(1999)
fars_read_years(as.list(1999, 2000, 2001))
fars_read_years(1999:2016)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.