content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
/Code/SimulatePoissonProcess.R
|
no_license
|
STIMALiU/IntroStatsForCSCourse
|
R
| false | false | 567 |
r
| ||
#!/usr/bin/Rscript
#SBATCH -c 4
#SBATCH --mem 32G
#SBATCH --array 1-5
taskID = as.integer(Sys.getenv("SLURM_ARRAY_TASK_ID"))
arg = commandArgs(trailingOnly=TRUE)
cell_line = as.character(arg)
library(pacman)
p_load(STAN, GenomicRanges, ggplot2, reshape, dplyr, cowplot)
binned_K562 = readRDS("../data/hmm_training/binned_hm_data_K562.rds")
binned_HepG2 = readRDS("../data/hmm_training/binned_hm_data_HepG2.rds")
binned_A549 = readRDS("../data/hmm_training/binned_hm_data_A549.rds")
binned_GM12878 = readRDS("../data/hmm_training/binned_hm_data_GM12878.rds")
binned_data = list(binned_K562, binned_HepG2, binned_A549, binned_GM12878)
names(binned_data) = c("K562", "HepG2", "A549", "GM12878")
sample = binned_data[[cell_line]]
nStates = c(10, 15, 20, 25, 30)
maxIters = 500
method = "Bernoulli"
print(cell_line)
print(nStates[taskID])
sample_binary = binarizeData(obs=sample, thresh = 1e-04)
hmm = initHMM(sample_binary, nStates[taskID], method)
# find optimal number of iterations
hmm_fitted = fitHMM(sample_binary, hmm, maxIters = maxIters)
path = paste0("../data/HMMs/tmpdir/", cell_line, "_", method, "_", nStates[taskID],"_HMM.rds")
saveRDS(hmm_fitted, path)
log_L = hmm_fitted@LogLik
iterations = c(1:length(log_L))
data = data.frame(iterations, log_L)
line_1 = ggplot(data = data, aes(x=iterations, y=log_L)) +
geom_line()+
geom_point(alpha=0.2, color = "red")+
labs(x = "Number of iterations", y = "log Likelihood", title=paste0(cell_line, "_", method, "::", "all iterations"))
line_2 = ggplot(data = data, aes(x=iterations, y=log_L)) +
geom_line()+
geom_point(alpha=0.2, color = "red")+
labs(x = "Number of iterations", y = "log Likelihood", title=paste0(cell_line, "_", method,"::","Iterations 1-10 removed"))+
coord_cartesian(ylim = c(data$log_L[10], data$log_L[length(data$log_L)]+1000))
plot = plot_grid(line_1, line_2, ncol=2)
path = paste0("../data/HMMs/tmpdir/", cell_line, "_", method,"_", nStates[taskID], ".png")
ggsave(path)
|
/cluster/scripts/hmm_benchmark_bernoulli.R
|
no_license
|
a-solovyev12/promoter_classification
|
R
| false | false | 1,981 |
r
|
#!/usr/bin/Rscript
#SBATCH -c 4
#SBATCH --mem 32G
#SBATCH --array 1-5
taskID = as.integer(Sys.getenv("SLURM_ARRAY_TASK_ID"))
arg = commandArgs(trailingOnly=TRUE)
cell_line = as.character(arg)
library(pacman)
p_load(STAN, GenomicRanges, ggplot2, reshape, dplyr, cowplot)
binned_K562 = readRDS("../data/hmm_training/binned_hm_data_K562.rds")
binned_HepG2 = readRDS("../data/hmm_training/binned_hm_data_HepG2.rds")
binned_A549 = readRDS("../data/hmm_training/binned_hm_data_A549.rds")
binned_GM12878 = readRDS("../data/hmm_training/binned_hm_data_GM12878.rds")
binned_data = list(binned_K562, binned_HepG2, binned_A549, binned_GM12878)
names(binned_data) = c("K562", "HepG2", "A549", "GM12878")
sample = binned_data[[cell_line]]
nStates = c(10, 15, 20, 25, 30)
maxIters = 500
method = "Bernoulli"
print(cell_line)
print(nStates[taskID])
sample_binary = binarizeData(obs=sample, thresh = 1e-04)
hmm = initHMM(sample_binary, nStates[taskID], method)
# find optimal number of iterations
hmm_fitted = fitHMM(sample_binary, hmm, maxIters = maxIters)
path = paste0("../data/HMMs/tmpdir/", cell_line, "_", method, "_", nStates[taskID],"_HMM.rds")
saveRDS(hmm_fitted, path)
log_L = hmm_fitted@LogLik
iterations = c(1:length(log_L))
data = data.frame(iterations, log_L)
line_1 = ggplot(data = data, aes(x=iterations, y=log_L)) +
geom_line()+
geom_point(alpha=0.2, color = "red")+
labs(x = "Number of iterations", y = "log Likelihood", title=paste0(cell_line, "_", method, "::", "all iterations"))
line_2 = ggplot(data = data, aes(x=iterations, y=log_L)) +
geom_line()+
geom_point(alpha=0.2, color = "red")+
labs(x = "Number of iterations", y = "log Likelihood", title=paste0(cell_line, "_", method,"::","Iterations 1-10 removed"))+
coord_cartesian(ylim = c(data$log_L[10], data$log_L[length(data$log_L)]+1000))
plot = plot_grid(line_1, line_2, ncol=2)
path = paste0("../data/HMMs/tmpdir/", cell_line, "_", method,"_", nStates[taskID], ".png")
ggsave(path)
|
#' Skewness
#'
#' Computes the skewness statistic.
#'
#' @param x any numeric vector.
#' @param na.rm logical; if TRUE, then remove missing value before computation.
#' @param method the method to use for computing the skew, must be "fisher" or
#' "moments."
#' @return a single value representing the skewness of the data in \code{x}.
#' @references Helsel, D.R. and Hirsch, R.M., 2002, Statistical methods in
#' water resources: U.S. Geological Survey Techniques of Water-Resources
#' Investigations, book 4, chap. A3, 522 p.\cr
#' @keywords univariate
#' @examples
#'
#' skew(c(1.0, 1.2, 1.5, 1.9, 2.5))
#'
#' @export skew
skew <- function(x, na.rm=TRUE, method="fisher") {
# Coding history:
# Unknown DLLorenz Original Coding
# 2011Aug24 DLLorenz Conversion to R
# 2013Feb28 DLLorenz Tweak to handling method
# 2014Dec29 DLLorenz Conversion to roxygen header
##
method <- match.arg(method, c("fisher", "moments"))
if(na.rm)
x <- x[!is.na(x)]
n <- length(x)
mn <- mean(x)
dif.x <- x - mn
m2 <- sum(dif.x^2)/n
m3 <- sum(dif.x^3)/n
b1 <- (m3/(m2^(3/2)))
if(method == "moments")
return(b1)
if(n < 3)
g1 <- NA
else
g1 <- (sqrt(n * (n - 1)) * b1)/(n - 2)
return(g1)
}
|
/R/skew.R
|
permissive
|
oceanspace/smwrStats
|
R
| false | false | 1,232 |
r
|
#' Skewness
#'
#' Computes the skewness statistic.
#'
#' @param x any numeric vector.
#' @param na.rm logical; if TRUE, then remove missing value before computation.
#' @param method the method to use for computing the skew, must be "fisher" or
#' "moments."
#' @return a single value representing the skewness of the data in \code{x}.
#' @references Helsel, D.R. and Hirsch, R.M., 2002, Statistical methods in
#' water resources: U.S. Geological Survey Techniques of Water-Resources
#' Investigations, book 4, chap. A3, 522 p.\cr
#' @keywords univariate
#' @examples
#'
#' skew(c(1.0, 1.2, 1.5, 1.9, 2.5))
#'
#' @export skew
skew <- function(x, na.rm=TRUE, method="fisher") {
# Coding history:
# Unknown DLLorenz Original Coding
# 2011Aug24 DLLorenz Conversion to R
# 2013Feb28 DLLorenz Tweak to handling method
# 2014Dec29 DLLorenz Conversion to roxygen header
##
method <- match.arg(method, c("fisher", "moments"))
if(na.rm)
x <- x[!is.na(x)]
n <- length(x)
mn <- mean(x)
dif.x <- x - mn
m2 <- sum(dif.x^2)/n
m3 <- sum(dif.x^3)/n
b1 <- (m3/(m2^(3/2)))
if(method == "moments")
return(b1)
if(n < 3)
g1 <- NA
else
g1 <- (sqrt(n * (n - 1)) * b1)/(n - 2)
return(g1)
}
|
get.sim.data <- function(final.state, population.data,
amount.under.reporting.history,
perc.reporting.history,
perc.hideable.reporting.history,
compliant.history,
audit.history,
penalty.history) {
sim.data <- with(final.state, data.frame(tax.ids=population.data$tax.ids,
self.employed=population.data$self.employed,
c1=round(100*population.data$c1,0),
c1.tilde=round(100*c1.tilde,0),
per.audit.rate = per.audit.rate*100,
per.penalty.rate = per.penalty.rate*100,
perc.hideable.income = population.data$prop.hideable.income*100,
Delta.Morale = round(100*Delta.Morale ,0),
w = round(100*w,0),
freq.audits=freq.audits,
freq.penalty=freq.penalty,
years.since.last.compliant =years.since.last.compliant,
years.since.last.audit=years.since.last.audit,
years.since.last.penalty=years.since.last.penalty,
amount.under.reporting.history,
perc.reporting.history,
perc.hideable.reporting.history,
compliant.history,
audit.history,
penalty.history))
invisible(sim.data)
}
|
/Library/get.sim.data.R
|
no_license
|
Breakend/RIBSS_tax_evasion_ABM
|
R
| false | false | 1,956 |
r
|
get.sim.data <- function(final.state, population.data,
amount.under.reporting.history,
perc.reporting.history,
perc.hideable.reporting.history,
compliant.history,
audit.history,
penalty.history) {
sim.data <- with(final.state, data.frame(tax.ids=population.data$tax.ids,
self.employed=population.data$self.employed,
c1=round(100*population.data$c1,0),
c1.tilde=round(100*c1.tilde,0),
per.audit.rate = per.audit.rate*100,
per.penalty.rate = per.penalty.rate*100,
perc.hideable.income = population.data$prop.hideable.income*100,
Delta.Morale = round(100*Delta.Morale ,0),
w = round(100*w,0),
freq.audits=freq.audits,
freq.penalty=freq.penalty,
years.since.last.compliant =years.since.last.compliant,
years.since.last.audit=years.since.last.audit,
years.since.last.penalty=years.since.last.penalty,
amount.under.reporting.history,
perc.reporting.history,
perc.hideable.reporting.history,
compliant.history,
audit.history,
penalty.history))
invisible(sim.data)
}
|
#setwd("X:/LAB/ML")
data<-read.csv(file.choose(),header=TRUE)
for(i in seq(1,ncol(data)))
{
data[,i]=as.numeric(data[,i])
}
c=unique(data[,ncol(data)])
tot_ent=0
for(i in c)
{
v=length(which(data[ncol(data)]==i))/nrow(data)
tot_ent=tot_ent+(v*log2(v))
}
tot_ent=-tot_ent
ig=matrix(0,1,ncol(data)-1)
for(i in seq(ncol(data)-1))
{
f=unique(data[,i])
for(j in f)
{
tmp=0
for(k in c)
{
v=(length(which(data[i]==j&data[ncol(data)]==k)))/length(which(data[i]==j))
if(v!=0)
{
tmp=tmp+(v*log2(v))
}
}
tmp=-tmp
ig[i]=ig[i]+(tmp*(length(which(data[i]==j))/nrow(data)))
}
}
ig=tot_ent-ig
best=which.max(ig)
cat("\nThe best factor is",names(data)[best])
|
/ML/Information_Gain.R
|
no_license
|
jerome4598/nalco
|
R
| false | false | 768 |
r
|
#setwd("X:/LAB/ML")
data<-read.csv(file.choose(),header=TRUE)
for(i in seq(1,ncol(data)))
{
data[,i]=as.numeric(data[,i])
}
c=unique(data[,ncol(data)])
tot_ent=0
for(i in c)
{
v=length(which(data[ncol(data)]==i))/nrow(data)
tot_ent=tot_ent+(v*log2(v))
}
tot_ent=-tot_ent
ig=matrix(0,1,ncol(data)-1)
for(i in seq(ncol(data)-1))
{
f=unique(data[,i])
for(j in f)
{
tmp=0
for(k in c)
{
v=(length(which(data[i]==j&data[ncol(data)]==k)))/length(which(data[i]==j))
if(v!=0)
{
tmp=tmp+(v*log2(v))
}
}
tmp=-tmp
ig[i]=ig[i]+(tmp*(length(which(data[i]==j))/nrow(data)))
}
}
ig=tot_ent-ig
best=which.max(ig)
cat("\nThe best factor is",names(data)[best])
|
setwd("D:\\BI\\Coursera\\Courses\\3Getting&CleaningData\\getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset")
## 1. Merges the training and the test sets to create one data set.
Xtrain <- read.table("./train/X_train.txt") ##training set
dim(Xtrain)
Xtest <- read.table("./test/X_test.txt") ##test set
dim(Xtest)
mergedData <- merge(Xtrain,Xtest,all=TRUE) ##merged set
dim(mergedData)
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
features <- read.table("features.txt")
dim(features)
str(features)
names(features)
x <- features[grep("mean()",features$V2,fixed = TRUE),]
x
y <- features[grep("std()",features$V2),]
y
z<-merge(x,y,all=TRUE)
z
# 3. Uses descriptive activity names to name the activities in the data set
Ytrainactivity <- read.table("./train/Y_train.txt")
Ytestactivity <- read.table("./test/Y_test.txt")
dim(Ytrainactivity)
dim(Ytestactivity)
activity <- rbind(Ytrainactivity,Ytestactivity)
mergedData <- cbind(mergedData,activity)
colnames(mergedData)[562]<-"activity"
list1 <- c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING")
for(i in 1:6)
{
mergedData$activity <- gsub(i,list1[i],mergedData$activity)
}
# 4. Appropriately labels the data set with descriptive variable names.
features <- read.table("features.txt")
for(i in 1:561)
{
x<-features[i,2]
colnames(mergedData)[i]<-as.character(x)
}
colnames(mergedData)
dim(mergedData)
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject
trainsubj <- read.table("./train/subject_train.txt")
testsubj <- read.table("./test/subject_test.txt")
subject <- rbind(trainsubj,testsubj)
dim(subject)
library(Hmisc)
library(plyr)
mergedData <- cbind(mergedData,subject)
colnames(mergedData)[563]<-"subject"
colnames(mergedData)
write.csv(mergedData,file="mergedData.csv")
avg <- aggregate(mergedData[,1:561],by=list(mergedData$activity,mergedData$subject),FUN=mean)
write.table(avg,file="mergedData.txt",row.names = FALSE)
|
/run_analysis.R
|
no_license
|
KomalKalbhor1/dataCleaningCoursera
|
R
| false | false | 2,169 |
r
|
setwd("D:\\BI\\Coursera\\Courses\\3Getting&CleaningData\\getdata-projectfiles-UCI HAR Dataset\\UCI HAR Dataset")
## 1. Merges the training and the test sets to create one data set.
Xtrain <- read.table("./train/X_train.txt") ##training set
dim(Xtrain)
Xtest <- read.table("./test/X_test.txt") ##test set
dim(Xtest)
mergedData <- merge(Xtrain,Xtest,all=TRUE) ##merged set
dim(mergedData)
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
features <- read.table("features.txt")
dim(features)
str(features)
names(features)
x <- features[grep("mean()",features$V2,fixed = TRUE),]
x
y <- features[grep("std()",features$V2),]
y
z<-merge(x,y,all=TRUE)
z
# 3. Uses descriptive activity names to name the activities in the data set
Ytrainactivity <- read.table("./train/Y_train.txt")
Ytestactivity <- read.table("./test/Y_test.txt")
dim(Ytrainactivity)
dim(Ytestactivity)
activity <- rbind(Ytrainactivity,Ytestactivity)
mergedData <- cbind(mergedData,activity)
colnames(mergedData)[562]<-"activity"
list1 <- c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING")
for(i in 1:6)
{
mergedData$activity <- gsub(i,list1[i],mergedData$activity)
}
# 4. Appropriately labels the data set with descriptive variable names.
features <- read.table("features.txt")
for(i in 1:561)
{
x<-features[i,2]
colnames(mergedData)[i]<-as.character(x)
}
colnames(mergedData)
dim(mergedData)
# 5. From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject
trainsubj <- read.table("./train/subject_train.txt")
testsubj <- read.table("./test/subject_test.txt")
subject <- rbind(trainsubj,testsubj)
dim(subject)
library(Hmisc)
library(plyr)
mergedData <- cbind(mergedData,subject)
colnames(mergedData)[563]<-"subject"
colnames(mergedData)
write.csv(mergedData,file="mergedData.csv")
avg <- aggregate(mergedData[,1:561],by=list(mergedData$activity,mergedData$subject),FUN=mean)
write.table(avg,file="mergedData.txt",row.names = FALSE)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_040.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/autonomic_ganglia/autonomic_ganglia_040.R
|
no_license
|
esbgkannan/QSMART
|
R
| false | false | 367 |
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_040.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# Author: Susan Paykin
# Date: April 22, 2020
# Geocomputation in R
# for GIS III: University of Chicago
######################################
### Chapter 5: Geometry Operations ###
######################################
# Load libraries
library(sf)
library(raster)
library(dplyr)
library(spData)
library(spDataLarge)
###############################################
### 5.2 Geometric Operations on Vector Data ###
###############################################
##### 5.2.1 Simplification ######
# Definition: process for generalizatino of vector objects usually for smaller scale maps
# Also: reduce amount of memory, simplify geometries before using as interactive maps
# st_simplify()
seine_simp = st_simplify(seine, dTolerance = 2000) #2000 meters
plot(seine_simp)
object.size(seine)
object.size(seine_simp)
us_states2163 = st_transform(us_states, 2163)
us_states_simp1 = st_simplify(us_states2163, dTolerance = 100000) #100 km
# ms_simplify() overcomes the issue of overlapping and "holy" areal units
us_states2163$AREA = as.numeric(us_states2163$AREA)
us_states_simp2 = rmapshaper::ms_simplify(us_states2163, keep = 0.01, keep_shapes = TRUE)
plot(us_states)
plot(us_states_simp1)
plot(us_states_simp2)
##### 5.2.2 Centroids #####
# st_centroid()
nz_centroid = st_centroid(nz)
plot(nz_centroid["geom"])
seine_centroid = st_centroid(seine)
plot(seine_centroid["geometry"])
# points on surface - guarantee the point will be in the parent object (good for labeling irregular multipolygon objects)
nz_pos = st_point_on_surface(nz)
plot(nz_pos["geom"])
seine_pos = st_point_on_surface(seine)
plot(seine_pos["geometry"])
##### 5.2.3 Buffers #####
# st_buffer()
seine_buff_5km = st_buffer(seine, dist = 5000) #500m buffer
plot(seine_buff_5km)
sein_buff_50km = st_buffer(seine, dist = 50000) #50km buffer
plot(sein_buff_50km)
##### 5.2.4 Affine Transformations #####
# transformation that preserves lines and parallelism
nz_sfc = st_geometry(nz)
# Shift y coordinates by 100,000 meters, but do not shift x coordinates
nz_shift = nz_sfc + c(1, 100000)
# Scaling
nz_centroid_sfc = st_centroid(nz_sfc)
nz_scale = (nz_sfc - nz_centroid_sfc) * 0.5 + nz_centroid_sfc # sizes are reduced by half (0.5)
# Rotation matrix - rotates points in clockwise direction
rotation = function(a){
r = a * pi / 180 #degrees to radians
matrix(c(cos(r), sin(r), -sin(r), cos(r)), nrow = 2, ncol = 2)
}
# rotation() function accepts one argument - rotation angle in degrees
nz_rotate = (nz_sfc - nz_centroid_sfc) * rotation(30) + nz_centroid_sfc
##### 5.2.5 Clipping #####
# spatial subsetting that involves changes to geometry column
# two overlapping circles with center point one unit away from one another; radius = 1
b = st_sfc(st_point(c(0, 1)), st_point(c(1, 1))) # create 2 points
b = st_buffer(b, dist = 1) # convert points to circles
plot(b)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y")) # add text
# st_intersection() - select where the circles intersect, both x and y
x = b[1]
y = b[2]
x_and_y = st_intersection(x, y)
plot(b)
plot(x_and_y, col = "lightgrey", add = TRUE) # color intersecting area
# subset points that cover the bounding box of circles x and y
bb = st_bbox(st_union(x, y))
box = st_as_sfc(bb)
set.seed(2017)
p = st_sample(x = box, size = 10)
plot(box)
plot(x, add = TRUE)
plot(y, add = TRUE)
plot(p, add = TRUE)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y"))
# using clipped polygon
sel_p_xy = st_intersects(p, x, sparse = FALSE)[, 1] &
st_intersects(p, y, sparse = FALSE)[, 1]
p_xy1 = p[sel_p_xy]
p_xy2 = p[x_and_y]
identical(p_xy1, p_xy2)
##### 5.2.6 Geometry Unions #####
regions = aggregate(x = us_states[, "total_pop_15"], by = list(us_states$REGION),
FUN = sum, na.rm = TRUE)
plot(regions)
regions2 = us_states %>% group_by(REGION) %>%
summarize(pop = sum(total_pop_15, na.rm = TRUE))
plot(regions2)
# st_union()
us_west = us_states[us_states$REGION == "West", ]
us_west_union = st_union(us_west)
texas = us_states[us_states$NAME == "Texas", ]
texas_union = st_union(us_west_union, texas)
plot(texas_union)
##### 5.2.7 Type Transformations #####
# geometry casting - st_cast()
multipoint = st_multipoint(matrix(c(1, 3, 5, 1, 3, 1), ncol = 2))
plot(multipoint)
linestring = st_cast(multipoint, "LINESTRING")
polyg = st_cast(multipoint, "POLYGON")
multilinestring_list = list(matrix(c(1, 4, 5, 3), ncol = 2),
matrix(c(4, 4, 4, 1), ncol = 2),
matrix(c(2, 4, 2, 2), ncol = 2))
multilinestring = st_multilinestring((multilinestring_list))
multilinestring_sf = st_sf(geom = st_sfc(multilinestring))
multilinestring_sf
linestring_sf2 = st_cast(multilinestring_sf, "LINESTRING")
linestring_sf2
linestring_sf2$name = c("Riddle Rd", "Marshall Ave", "Foulke St")
linestring_sf2$length = st_length(linestring_sf2)
linestring_sf2
###############################################
### 5.3 Geometric Operations on Raster Data ###
###############################################
# Operations include the shift, flipping, mirroring, scaling, rotation or warping of images.
# Necessary for a variety of applications including georeferencing,
# used to allow images to be overlaid on an accurate map with a known CRS.
##### 5.3.1 Geometric Intersections #####
# keep matrix structure by setting drop parameter to FALSE;
# returns raster object containing the cells whose midpoints overlap with clip
data("elev", package = "spData")
clip = raster(xmn = 0.9, xmx = 1.8, ymn = -0.45, ymx = 0.45,
res = 0.3, vals = rep(1, 9))
elev[clip, drop = FALSE]
##### 5.3.2 Extent and Origin #####
# how to merge satellite imagery from different sensors with different projections and resolutions
# adds one row and two columns to each side of the raster while setting all new values to an elevation of 1000 meters
data(elev, package = "spData")
elev_2 = extend(elev, c(1, 2), value = 1000)
plot(elev_2)
# test different extents in algebraic operation
elev_3 = elev + elev_2 #returns error
# extend() function - extend elev to elev_2
elev_4 = extend(elev, elev_2)
# origin() returns coordinate of origin of raster (cell closest to coordinate 0,0)
origin(elev_4)
# change the origin
origin(elev_4) = c(0.25, 0.25)
plot(elev_4)
# and add the original raster
plot(elev, add = TRUE)
##### 5.3.3 Aggregation and Disaggregation #####
# decrease (aggregate()) or increase (disaggregate()) resolution of a raster
# change spatial resolution of dem raster (from RGIS package)
data("dem", package = "RQGIS")
plot(dem)
dem_agg = aggregate(dem, fact = 5, fun = mean)
plot(dem_agg)
# disaggregate() increases resolution; need to specify method
# method = "" gives all output cells value of input cell, hence duplicates values
# method = bilinear, is interpolation technique, uses 4 nearest pixel centers to compute average weighted by distnace
dem_disagg = disaggregate(dem_agg, fact = 5, method = "bilinear")
plot(dem_disagg)
plot(dem)
identical(dem, dem_disagg)
# resample() function allows you to align several raster properties inc. origin, extent, resolution
# add 2 rows and columns, i.e. change the extent
dem_agg = extend(dem_agg, 2)
dem_disagg_2 = resample(dem_agg, dem)
plot(dem_disagg_2)
#############################################
###### 5.4 Raster-Vector Interactions #######
#############################################
##### 5.4.1 Raster Cropping #####
# often the extent of raster input datasets is larger than area of interest
# raster cropping and masking can help unify the spatial extent of input data
# load raster and vector data; reproject zion vector data. srtm = raster representing elevation (meters above sea level) in SW Utah
srtm = raster(system.file("raster/srtm.tif", package = "spDataLarge"))
zion = st_read(system.file("vector/zion.gpkg", package = "spDataLarge"))
zion = st_transform(zion, projection(srtm))
# crop() reduces rectangular extent of first argument object based on extent of second argument object
srtm_cropped = crop(srtm, zion)
plot(srtm_cropped)
# mask() is different
# Setting updatevalue = 0 will set all pixels outside the national park to 0.
# Setting inverse = TRUE will mask everything inside the bounds of the park
srtm_inv_masked = mask(srtm, zion, inverse = TRUE)
plot(srtm_inv_masked)
##### 5.4.2 Raster Extraction #####
# extract() function to extract value of raster cell at specific points
data("zion_points", package = "spDataLarge")
zion_points$elevation = raster::extract(srtm, zion_points)
raster::extract(srtm, zion_points, buffer = 1000)
zion_transect = cbind(c(-113.2, -112.9), c(37.45, 37.2)) %>%
st_linestring() %>%
st_sfc(crs = projection(srtm)) %>%
st_sf()
transect = raster::extract(srtm, zion_transect,
along = TRUE, cellnumbers = TRUE)
transect_df = purrr::map_dfr(transect, as_data_frame, .id = "ID")
transect_df
transect_coords = xyFromCell(srtm, transect_df$cell)
pair_dist = geosphere::distGeo(transect_coords)[-nrow(transect_coords)]
transect_df$dist = c(0, cumsum(pair_dist))
# extract() extract value of raster polygons
zion_srtm_values = raster::extract(x = srtm, y = zion, df = TRUE)
group_by(zion_srtm_values, ID) %>%
summarize_at(vars(srtm), list(~min(.), ~mean(.), ~max(.)))
# land cover dataset nlcd, counting occurances of categorical raster values within polygons
# extract zion land cover data
zion_nlcd = raster::extract(nlcd, zion, df = TRUE, factors = TRUE)
# group by land cover categories
dplyr::select(zion_nlcd, ID, levels) %>%
tidyr::gather(key, value, -ID) %>%
group_by(ID, key, value) %>%
tally() %>%
tidyr::spread(value, n, fill = 0)
##### 5.4.3 Rasterization #####
# rasterize() - convert vector objections into raster
cycle_hire_osm_projected = st_transform(cycle_hire_osm, 27700)
raster_template = raster(extent(cycle_hire_osm_projected), resolution = 1000,
crs = st_crs(cycle_hire_osm_projected)$proj4string)
# 3 Approaches to Rasterization
# Approach 1 - absence or presence of something (binary)
# requires only one argument in addition to x and y (vector and raster objects):
# a value to be transferred to all non-empty cells specified by field
ch_raster1 = rasterize(cycle_hire_osm_projected, raster_template, field = 1)
plot(ch_raster1)
# Approach 2 - fun argument specifies summary stats, such as count, to count number of points in each cell
ch_raster2 = rasterize(cycle_hire_osm_projected, raster_template,
field = 1, fun = "count")
plot(ch_raster2)
# Approach 3 - capacity of each cell - sum the field ("capacity")
ch_raster3 = rasterize(cycle_hire_osm_projected, raster_template,
field = "capacity", fun = sum)
plot(ch_raster3)
# California polygons and borders, create template raster with resolution of 0.5 degree
california = dplyr::filter(us_states, NAME == "California")
california_borders = st_cast(california, "MULTILINESTRING")
raster_template2 = raster(extent(california), resolution = 0.5,
crs = st_crs(california)$proj4string)
# Line rasterization
# All cells touched by a line get a value
california_raster1 = rasterize(california_borders, raster_template2)
plot(california_raster1)
# Polygon rasterization
# All cells whose centroids are inside selector polygon get a value
california_raster2 = rasterize(california, raster_template2)
plot(california_raster2)
##### Spatial Vectorization #####
# convert spatially continuous raster data into spatially discrete vector data
# such as points, lines or polygons
# rasterToPoints() converts centroids of raster cells into point
elev_point = rasterToPoints(elev, spatial = TRUE) %>% #spatial=TRUE gets spatial object instead of matrix
st_as_sf()
plot(elev_point)
# rasterToContour() creates contour lines representing continuous height or temperatures
data(dem, package = "RQGIS")
cl = rasterToContour(dem)
plot(dem, axes = FALSE)
plot(cl, add = TRUE)
# Isolines can be labeled
# create hillshade
hs = hillShade(slope = terrain(dem, "slope"), aspect = terrain(dem, "aspect"))
plot(hs, col = gray(0:100 / 100), legend = FALSE)
# overlay with DEM
plot(dem, col = terrain.colors(25), alpha = 0.5, legend = FALSE, add = TRUE)
# add contour lines
contour(dem, col = "white", add = TRUE)
# rasterToPolygons() converts rasters to polygons
grain_poly = rasterToPolygons(grain) %>%
st_as_sf() #convert foreign object to sf object
plot(grain_poly)
grain_poly2 = grain_poly %>%
group_by(layer) %>%
summarize()
plot(grain_poly2)
############################
###### 5.5 Exercises #######
############################
# 4 - Most world maps have a north-up orientation. A world map with a south-up orientation could be created by a reflection (one of the affine transformations not mentioned in Section 5.2.4) of the world object’s geometry.
# Write code to do so. Hint: you need to use a two-element vector for this transformation.
# Bonus: create an upside-down map of your country.
# create world map with south-up orientation
world_south <- st_geometry(world) * c(-1, -1)
plot(world_south)
# create south-up map of Russia
russia_south <-
world %>%
filter(name_long == "Russian Federation") %>%
st_geometry() * c(-1, -1)
plot(russia_south)
# 5 - Subset the point in p that is contained within x and y (see Section 5.2.5 and Figure 5.8).
# Using base subsetting operators.
# Using an intermediary object created with st_intersection().
x = b[1]
y = b[2]
x_and_y = st_intersection(x, y)
plot(b)
plot(x_and_y, col = "lightblue", add = TRUE) # color intersecting area
set.seed(1948)
p = st_sample(x = st_union(x, y), size = 5)
plot(x_and_y, col = "lightblue", add = TRUE)
plot(p, add = TRUE)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y"))
# base subsetting
p[x_and_y]
#POINT (0.6334506 1.118806)
#intermediary object
st_intersection(p, x_and_y)
#POINT (0.6334506 1.118806)
# 7 - Crop the ndvi raster using (1) the random_points dataset and (2) the ch dataset.
# Are there any differences in the output maps?
# Next, mask ndvi using these two datasets. Can you see any difference now? How can you explain that?
library(RQGIS)
data(random_points)
data(ndvi)
ch = st_combine(random_points) %>%
st_convex_hull()
# make convex hull polygon (ch) an sf object
ch1 <- st_as_sf(ch)
# crop with ch
ndvi_crop <- crop(ndvi, ch1)
plot(ndvi_crop)
# crop with random_points
ndvi_crop2 <- crop(ndvi, random_points)
plot(ndvi_crop2)
# mask with ch
ndvi_mask <- mask(ndvi, ch1)
plot(ndvi_mask)
# mask with random_points
ndvi_mask2 <- mask(ndvi, random_points)
plot(ndvi_mask2)
|
/exercises/ch05_GeometryOperations.R
|
no_license
|
spaykin/geocomputation
|
R
| false | false | 14,584 |
r
|
# Author: Susan Paykin
# Date: April 22, 2020
# Geocomputation in R
# for GIS III: University of Chicago
######################################
### Chapter 5: Geometry Operations ###
######################################
# Load libraries
library(sf)
library(raster)
library(dplyr)
library(spData)
library(spDataLarge)
###############################################
### 5.2 Geometric Operations on Vector Data ###
###############################################
##### 5.2.1 Simplification ######
# Definition: process for generalizatino of vector objects usually for smaller scale maps
# Also: reduce amount of memory, simplify geometries before using as interactive maps
# st_simplify()
seine_simp = st_simplify(seine, dTolerance = 2000) #2000 meters
plot(seine_simp)
object.size(seine)
object.size(seine_simp)
us_states2163 = st_transform(us_states, 2163)
us_states_simp1 = st_simplify(us_states2163, dTolerance = 100000) #100 km
# ms_simplify() overcomes the issue of overlapping and "holy" areal units
us_states2163$AREA = as.numeric(us_states2163$AREA)
us_states_simp2 = rmapshaper::ms_simplify(us_states2163, keep = 0.01, keep_shapes = TRUE)
plot(us_states)
plot(us_states_simp1)
plot(us_states_simp2)
##### 5.2.2 Centroids #####
# st_centroid()
nz_centroid = st_centroid(nz)
plot(nz_centroid["geom"])
seine_centroid = st_centroid(seine)
plot(seine_centroid["geometry"])
# points on surface - guarantee the point will be in the parent object (good for labeling irregular multipolygon objects)
nz_pos = st_point_on_surface(nz)
plot(nz_pos["geom"])
seine_pos = st_point_on_surface(seine)
plot(seine_pos["geometry"])
##### 5.2.3 Buffers #####
# st_buffer()
seine_buff_5km = st_buffer(seine, dist = 5000) #500m buffer
plot(seine_buff_5km)
sein_buff_50km = st_buffer(seine, dist = 50000) #50km buffer
plot(sein_buff_50km)
##### 5.2.4 Affine Transformations #####
# transformation that preserves lines and parallelism
nz_sfc = st_geometry(nz)
# Shift y coordinates by 100,000 meters, but do not shift x coordinates
nz_shift = nz_sfc + c(1, 100000)
# Scaling
nz_centroid_sfc = st_centroid(nz_sfc)
nz_scale = (nz_sfc - nz_centroid_sfc) * 0.5 + nz_centroid_sfc # sizes are reduced by half (0.5)
# Rotation matrix - rotates points in clockwise direction
rotation = function(a){
r = a * pi / 180 #degrees to radians
matrix(c(cos(r), sin(r), -sin(r), cos(r)), nrow = 2, ncol = 2)
}
# rotation() function accepts one argument - rotation angle in degrees
nz_rotate = (nz_sfc - nz_centroid_sfc) * rotation(30) + nz_centroid_sfc
##### 5.2.5 Clipping #####
# spatial subsetting that involves changes to geometry column
# two overlapping circles with center point one unit away from one another; radius = 1
b = st_sfc(st_point(c(0, 1)), st_point(c(1, 1))) # create 2 points
b = st_buffer(b, dist = 1) # convert points to circles
plot(b)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y")) # add text
# st_intersection() - select where the circles intersect, both x and y
x = b[1]
y = b[2]
x_and_y = st_intersection(x, y)
plot(b)
plot(x_and_y, col = "lightgrey", add = TRUE) # color intersecting area
# subset points that cover the bounding box of circles x and y
bb = st_bbox(st_union(x, y))
box = st_as_sfc(bb)
set.seed(2017)
p = st_sample(x = box, size = 10)
plot(box)
plot(x, add = TRUE)
plot(y, add = TRUE)
plot(p, add = TRUE)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y"))
# using clipped polygon
sel_p_xy = st_intersects(p, x, sparse = FALSE)[, 1] &
st_intersects(p, y, sparse = FALSE)[, 1]
p_xy1 = p[sel_p_xy]
p_xy2 = p[x_and_y]
identical(p_xy1, p_xy2)
##### 5.2.6 Geometry Unions #####
regions = aggregate(x = us_states[, "total_pop_15"], by = list(us_states$REGION),
FUN = sum, na.rm = TRUE)
plot(regions)
regions2 = us_states %>% group_by(REGION) %>%
summarize(pop = sum(total_pop_15, na.rm = TRUE))
plot(regions2)
# st_union()
us_west = us_states[us_states$REGION == "West", ]
us_west_union = st_union(us_west)
texas = us_states[us_states$NAME == "Texas", ]
texas_union = st_union(us_west_union, texas)
plot(texas_union)
##### 5.2.7 Type Transformations #####
# geometry casting - st_cast()
multipoint = st_multipoint(matrix(c(1, 3, 5, 1, 3, 1), ncol = 2))
plot(multipoint)
linestring = st_cast(multipoint, "LINESTRING")
polyg = st_cast(multipoint, "POLYGON")
multilinestring_list = list(matrix(c(1, 4, 5, 3), ncol = 2),
matrix(c(4, 4, 4, 1), ncol = 2),
matrix(c(2, 4, 2, 2), ncol = 2))
multilinestring = st_multilinestring((multilinestring_list))
multilinestring_sf = st_sf(geom = st_sfc(multilinestring))
multilinestring_sf
linestring_sf2 = st_cast(multilinestring_sf, "LINESTRING")
linestring_sf2
linestring_sf2$name = c("Riddle Rd", "Marshall Ave", "Foulke St")
linestring_sf2$length = st_length(linestring_sf2)
linestring_sf2
###############################################
### 5.3 Geometric Operations on Raster Data ###
###############################################
# Operations include the shift, flipping, mirroring, scaling, rotation or warping of images.
# Necessary for a variety of applications including georeferencing,
# used to allow images to be overlaid on an accurate map with a known CRS.
##### 5.3.1 Geometric Intersections #####
# keep matrix structure by setting drop parameter to FALSE;
# returns raster object containing the cells whose midpoints overlap with clip
data("elev", package = "spData")
clip = raster(xmn = 0.9, xmx = 1.8, ymn = -0.45, ymx = 0.45,
res = 0.3, vals = rep(1, 9))
elev[clip, drop = FALSE]
##### 5.3.2 Extent and Origin #####
# how to merge satellite imagery from different sensors with different projections and resolutions
# adds one row and two columns to each side of the raster while setting all new values to an elevation of 1000 meters
data(elev, package = "spData")
elev_2 = extend(elev, c(1, 2), value = 1000)
plot(elev_2)
# test different extents in algebraic operation
elev_3 = elev + elev_2 #returns error
# extend() function - extend elev to elev_2
elev_4 = extend(elev, elev_2)
# origin() returns coordinate of origin of raster (cell closest to coordinate 0,0)
origin(elev_4)
# change the origin
origin(elev_4) = c(0.25, 0.25)
plot(elev_4)
# and add the original raster
plot(elev, add = TRUE)
##### 5.3.3 Aggregation and Disaggregation #####
# decrease (aggregate()) or increase (disaggregate()) resolution of a raster
# change spatial resolution of dem raster (from RGIS package)
data("dem", package = "RQGIS")
plot(dem)
dem_agg = aggregate(dem, fact = 5, fun = mean)
plot(dem_agg)
# disaggregate() increases resolution; need to specify method
# method = "" gives all output cells value of input cell, hence duplicates values
# method = bilinear, is interpolation technique, uses 4 nearest pixel centers to compute average weighted by distnace
dem_disagg = disaggregate(dem_agg, fact = 5, method = "bilinear")
plot(dem_disagg)
plot(dem)
identical(dem, dem_disagg)
# resample() function allows you to align several raster properties inc. origin, extent, resolution
# add 2 rows and columns, i.e. change the extent
dem_agg = extend(dem_agg, 2)
dem_disagg_2 = resample(dem_agg, dem)
plot(dem_disagg_2)
#############################################
###### 5.4 Raster-Vector Interactions #######
#############################################
##### 5.4.1 Raster Cropping #####
# often the extent of raster input datasets is larger than area of interest
# raster cropping and masking can help unify the spatial extent of input data
# load raster and vector data; reproject zion vector data. srtm = raster representing elevation (meters above sea level) in SW Utah
srtm = raster(system.file("raster/srtm.tif", package = "spDataLarge"))
zion = st_read(system.file("vector/zion.gpkg", package = "spDataLarge"))
zion = st_transform(zion, projection(srtm))
# crop() reduces rectangular extent of first argument object based on extent of second argument object
srtm_cropped = crop(srtm, zion)
plot(srtm_cropped)
# mask() is different
# Setting updatevalue = 0 will set all pixels outside the national park to 0.
# Setting inverse = TRUE will mask everything inside the bounds of the park
srtm_inv_masked = mask(srtm, zion, inverse = TRUE)
plot(srtm_inv_masked)
##### 5.4.2 Raster Extraction #####
# extract() function to extract value of raster cell at specific points
data("zion_points", package = "spDataLarge")
zion_points$elevation = raster::extract(srtm, zion_points)
raster::extract(srtm, zion_points, buffer = 1000)
zion_transect = cbind(c(-113.2, -112.9), c(37.45, 37.2)) %>%
st_linestring() %>%
st_sfc(crs = projection(srtm)) %>%
st_sf()
transect = raster::extract(srtm, zion_transect,
along = TRUE, cellnumbers = TRUE)
transect_df = purrr::map_dfr(transect, as_data_frame, .id = "ID")
transect_df
transect_coords = xyFromCell(srtm, transect_df$cell)
pair_dist = geosphere::distGeo(transect_coords)[-nrow(transect_coords)]
transect_df$dist = c(0, cumsum(pair_dist))
# extract() extract value of raster polygons
zion_srtm_values = raster::extract(x = srtm, y = zion, df = TRUE)
group_by(zion_srtm_values, ID) %>%
summarize_at(vars(srtm), list(~min(.), ~mean(.), ~max(.)))
# land cover dataset nlcd, counting occurances of categorical raster values within polygons
# extract zion land cover data
zion_nlcd = raster::extract(nlcd, zion, df = TRUE, factors = TRUE)
# group by land cover categories
dplyr::select(zion_nlcd, ID, levels) %>%
tidyr::gather(key, value, -ID) %>%
group_by(ID, key, value) %>%
tally() %>%
tidyr::spread(value, n, fill = 0)
##### 5.4.3 Rasterization #####
# rasterize() - convert vector objections into raster
cycle_hire_osm_projected = st_transform(cycle_hire_osm, 27700)
raster_template = raster(extent(cycle_hire_osm_projected), resolution = 1000,
crs = st_crs(cycle_hire_osm_projected)$proj4string)
# 3 Approaches to Rasterization
# Approach 1 - absence or presence of something (binary)
# requires only one argument in addition to x and y (vector and raster objects):
# a value to be transferred to all non-empty cells specified by field
ch_raster1 = rasterize(cycle_hire_osm_projected, raster_template, field = 1)
plot(ch_raster1)
# Approach 2 - fun argument specifies summary stats, such as count, to count number of points in each cell
ch_raster2 = rasterize(cycle_hire_osm_projected, raster_template,
field = 1, fun = "count")
plot(ch_raster2)
# Approach 3 - capacity of each cell - sum the field ("capacity")
ch_raster3 = rasterize(cycle_hire_osm_projected, raster_template,
field = "capacity", fun = sum)
plot(ch_raster3)
# California polygons and borders, create template raster with resolution of 0.5 degree
california = dplyr::filter(us_states, NAME == "California")
california_borders = st_cast(california, "MULTILINESTRING")
raster_template2 = raster(extent(california), resolution = 0.5,
crs = st_crs(california)$proj4string)
# Line rasterization
# All cells touched by a line get a value
california_raster1 = rasterize(california_borders, raster_template2)
plot(california_raster1)
# Polygon rasterization
# All cells whose centroids are inside selector polygon get a value
california_raster2 = rasterize(california, raster_template2)
plot(california_raster2)
##### Spatial Vectorization #####
# convert spatially continuous raster data into spatially discrete vector data
# such as points, lines or polygons
# rasterToPoints() converts centroids of raster cells into point
elev_point = rasterToPoints(elev, spatial = TRUE) %>% #spatial=TRUE gets spatial object instead of matrix
st_as_sf()
plot(elev_point)
# rasterToContour() creates contour lines representing continuous height or temperatures
data(dem, package = "RQGIS")
cl = rasterToContour(dem)
plot(dem, axes = FALSE)
plot(cl, add = TRUE)
# Isolines can be labeled
# create hillshade
hs = hillShade(slope = terrain(dem, "slope"), aspect = terrain(dem, "aspect"))
plot(hs, col = gray(0:100 / 100), legend = FALSE)
# overlay with DEM
plot(dem, col = terrain.colors(25), alpha = 0.5, legend = FALSE, add = TRUE)
# add contour lines
contour(dem, col = "white", add = TRUE)
# rasterToPolygons() converts rasters to polygons
grain_poly = rasterToPolygons(grain) %>%
st_as_sf() #convert foreign object to sf object
plot(grain_poly)
grain_poly2 = grain_poly %>%
group_by(layer) %>%
summarize()
plot(grain_poly2)
############################
###### 5.5 Exercises #######
############################
# 4 - Most world maps have a north-up orientation. A world map with a south-up orientation could be created by a reflection (one of the affine transformations not mentioned in Section 5.2.4) of the world object’s geometry.
# Write code to do so. Hint: you need to use a two-element vector for this transformation.
# Bonus: create an upside-down map of your country.
# create world map with south-up orientation
world_south <- st_geometry(world) * c(-1, -1)
plot(world_south)
# create south-up map of Russia
russia_south <-
world %>%
filter(name_long == "Russian Federation") %>%
st_geometry() * c(-1, -1)
plot(russia_south)
# 5 - Subset the point in p that is contained within x and y (see Section 5.2.5 and Figure 5.8).
# Using base subsetting operators.
# Using an intermediary object created with st_intersection().
x = b[1]
y = b[2]
x_and_y = st_intersection(x, y)
plot(b)
plot(x_and_y, col = "lightblue", add = TRUE) # color intersecting area
set.seed(1948)
p = st_sample(x = st_union(x, y), size = 5)
plot(x_and_y, col = "lightblue", add = TRUE)
plot(p, add = TRUE)
text(x = c(-0.5, 1.5), y = 1, labels = c("x", "y"))
# base subsetting
p[x_and_y]
#POINT (0.6334506 1.118806)
#intermediary object
st_intersection(p, x_and_y)
#POINT (0.6334506 1.118806)
# 7 - Crop the ndvi raster using (1) the random_points dataset and (2) the ch dataset.
# Are there any differences in the output maps?
# Next, mask ndvi using these two datasets. Can you see any difference now? How can you explain that?
library(RQGIS)
data(random_points)
data(ndvi)
ch = st_combine(random_points) %>%
st_convex_hull()
# make convex hull polygon (ch) an sf object
ch1 <- st_as_sf(ch)
# crop with ch
ndvi_crop <- crop(ndvi, ch1)
plot(ndvi_crop)
# crop with random_points
ndvi_crop2 <- crop(ndvi, random_points)
plot(ndvi_crop2)
# mask with ch
ndvi_mask <- mask(ndvi, ch1)
plot(ndvi_mask)
# mask with random_points
ndvi_mask2 <- mask(ndvi, random_points)
plot(ndvi_mask2)
|
library(SIS)
### Name: predict.SIS
### Title: Model prediction based on a fitted SIS object.
### Aliases: predict.SIS
### Keywords: models
### ** Examples
set.seed(0)
n = 400; p = 50; rho = 0.5
corrmat = diag(rep(1-rho, p)) + matrix(rho, p, p)
corrmat[,4] = sqrt(rho)
corrmat[4, ] = sqrt(rho)
corrmat[4,4] = 1
corrmat[,5] = 0
corrmat[5, ] = 0
corrmat[5,5] = 1
cholmat = chol(corrmat)
x = matrix(rnorm(n*p, mean=0, sd=1), n, p)
x = x%*%cholmat
testX = matrix(rnorm(10*p, mean=0, sd=1), nrow=10, ncol=p)
# gaussian response
set.seed(1)
b = c(4,4,4,-6*sqrt(2),4/3)
y=x[, 1:5]%*%b + rnorm(n)
model1=SIS(x, y, family='gaussian', tune='bic', varISIS='aggr', seed=11)
predict(model1, testX, type='response')
predict(model1, testX, which=1:10, type='response')
## Not run:
##D # binary response
##D set.seed(2)
##D feta = x[, 1:5]%*%b; fprob = exp(feta)/(1+exp(feta))
##D y = rbinom(n, 1, fprob)
##D model2=SIS(x, y, family='binomial', tune='bic', varISIS='aggr', seed=21)
##D
##D predict(model2, testX, type='response')
##D predict(model2, testX, type='link')
##D predict(model2, testX, type='class')
##D
##D predict(model2, testX, which=1:10, type='response')
##D predict(model2, testX, which=1:10, type='link')
##D predict(model2, testX, which=1:10, type='class')
##D
##D # poisson response
##D set.seed(3)
##D b = c(0.6,0.6,0.6,-0.9*sqrt(2))
##D myrates = exp(x[, 1:4]%*%b)
##D y = rpois(n, myrates)
##D model3=SIS(x, y, family='poisson', penalty = 'lasso',tune='bic', varISIS='aggr', seed=31)
##D
##D predict(model3, testX, type='response')
##D predict(model3, testX, type='link')
## End(Not run)
|
/data/genthat_extracted_code/SIS/examples/predict.SIS.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,616 |
r
|
library(SIS)
### Name: predict.SIS
### Title: Model prediction based on a fitted SIS object.
### Aliases: predict.SIS
### Keywords: models
### ** Examples
set.seed(0)
n = 400; p = 50; rho = 0.5
corrmat = diag(rep(1-rho, p)) + matrix(rho, p, p)
corrmat[,4] = sqrt(rho)
corrmat[4, ] = sqrt(rho)
corrmat[4,4] = 1
corrmat[,5] = 0
corrmat[5, ] = 0
corrmat[5,5] = 1
cholmat = chol(corrmat)
x = matrix(rnorm(n*p, mean=0, sd=1), n, p)
x = x%*%cholmat
testX = matrix(rnorm(10*p, mean=0, sd=1), nrow=10, ncol=p)
# gaussian response
set.seed(1)
b = c(4,4,4,-6*sqrt(2),4/3)
y=x[, 1:5]%*%b + rnorm(n)
model1=SIS(x, y, family='gaussian', tune='bic', varISIS='aggr', seed=11)
predict(model1, testX, type='response')
predict(model1, testX, which=1:10, type='response')
## Not run:
##D # binary response
##D set.seed(2)
##D feta = x[, 1:5]%*%b; fprob = exp(feta)/(1+exp(feta))
##D y = rbinom(n, 1, fprob)
##D model2=SIS(x, y, family='binomial', tune='bic', varISIS='aggr', seed=21)
##D
##D predict(model2, testX, type='response')
##D predict(model2, testX, type='link')
##D predict(model2, testX, type='class')
##D
##D predict(model2, testX, which=1:10, type='response')
##D predict(model2, testX, which=1:10, type='link')
##D predict(model2, testX, which=1:10, type='class')
##D
##D # poisson response
##D set.seed(3)
##D b = c(0.6,0.6,0.6,-0.9*sqrt(2))
##D myrates = exp(x[, 1:4]%*%b)
##D y = rpois(n, myrates)
##D model3=SIS(x, y, family='poisson', penalty = 'lasso',tune='bic', varISIS='aggr', seed=31)
##D
##D predict(model3, testX, type='response')
##D predict(model3, testX, type='link')
## End(Not run)
|
##check if already have dataset, if not, download and unzip
if(!file.exists("household_power_consumption.txt")){
##download and unzip files to local
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "dataset.zip")
unzip("dataset.zip")}
#read data and format Date and Time
data<-read.table("household_power_consumption.txt",header=T,sep=";")
library(lubridate)
data$new_time<-dmy_hms(paste(data$Date,data$Time))
data$Date<-dmy(data$Date)
data$Time<-hms(data$Time)
#subsetting only data from 2007-02-01 and 2007-02-01
data1<-subset(data, Date=="2007-02-01"|Date=="2007-02-02")
#converting factors variables into numeric variables while preserving decimal
data1[]<-lapply(data1,function(x){if(is.factor(x))as.numeric(as.character(x))else x})
#plot3
png(file="plot3.png")
plot(data1$new_time, data1$Sub_metering_1, ylim=range(data1$Sub_metering_1),type="l",xlab= "", ylab = "Energy sub metering", col="black")
lines(data1$new_time, data1$Sub_metering_2,col="red")
lines(data1$new_time, data1$Sub_metering_3,col="blue")
legend("topright",lty=c(1,1,1), col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
/plot3.r
|
no_license
|
Shuai-Dong/ExploratoryDataSub1
|
R
| false | false | 1,261 |
r
|
##check if already have dataset, if not, download and unzip
if(!file.exists("household_power_consumption.txt")){
##download and unzip files to local
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "dataset.zip")
unzip("dataset.zip")}
#read data and format Date and Time
data<-read.table("household_power_consumption.txt",header=T,sep=";")
library(lubridate)
data$new_time<-dmy_hms(paste(data$Date,data$Time))
data$Date<-dmy(data$Date)
data$Time<-hms(data$Time)
#subsetting only data from 2007-02-01 and 2007-02-01
data1<-subset(data, Date=="2007-02-01"|Date=="2007-02-02")
#converting factors variables into numeric variables while preserving decimal
data1[]<-lapply(data1,function(x){if(is.factor(x))as.numeric(as.character(x))else x})
#plot3
png(file="plot3.png")
plot(data1$new_time, data1$Sub_metering_1, ylim=range(data1$Sub_metering_1),type="l",xlab= "", ylab = "Energy sub metering", col="black")
lines(data1$new_time, data1$Sub_metering_2,col="red")
lines(data1$new_time, data1$Sub_metering_3,col="blue")
legend("topright",lty=c(1,1,1), col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
SurvivalFunction <- function(time){ #Surival function of the system
a=1;
b=0.5;
lambda=0.5;
R = (1-(1-exp(-(a*time)^b))^3)*exp(-lambda*time);
return(R)
}
DeathRateFunction <- function(time){ # Death rate function of the system
r <- log(SurvivalFunction(time))
}
expectedLifeLength <- integrate(SurvivalFunction,0,Inf)
curve(SurvivalFunction,0,10) # Used instead of plot as it takes a function as argument
time = seq(0.1, 10, length.out =1000)
r <- -grad(DeathRateFunction,time) # Derivate of the death rate function
plot(time, r,type="l")
|
/lab3-1-1.R
|
no_license
|
sebastianpantin/Stochastic-data-processing-and-simulation-R
|
R
| false | false | 572 |
r
|
SurvivalFunction <- function(time){ #Surival function of the system
a=1;
b=0.5;
lambda=0.5;
R = (1-(1-exp(-(a*time)^b))^3)*exp(-lambda*time);
return(R)
}
DeathRateFunction <- function(time){ # Death rate function of the system
r <- log(SurvivalFunction(time))
}
expectedLifeLength <- integrate(SurvivalFunction,0,Inf)
curve(SurvivalFunction,0,10) # Used instead of plot as it takes a function as argument
time = seq(0.1, 10, length.out =1000)
r <- -grad(DeathRateFunction,time) # Derivate of the death rate function
plot(time, r,type="l")
|
# Course Project 1 for Exploratory Data Class
# create graphs for Electric Power Consumption Dataset
# Plot 2
library(dplyr)
# Read in data set
txtfile <-"exdata-data-household_power_consumption/household_power_consumption.txt"
tempfile <- read.table(txtfile, header = TRUE, sep = ";", na.strings = "?")
# re-format dates
tempfile$Date <- strptime(tempfile$Date, "%d/%m/%Y")
tempfile$Date <- as.Date(tempfile$Date, "%Y-%m-%d")
# subset the dates for the project
powerset <- filter(tempfile, (Date == "2007-02-01" | Date == "2007-02-02"))
class(powerset$Date)
# reformat time
powerset2 <- mutate(powerset, datetime = as.POSIXct(paste(Date, Time)))
# create plot 2 and output to a png file
png(file = "plot2.png", width = 480, height = 480)
with(powerset2, plot(datetime, Global_active_power, type = "l", col = "black", xlab = "", ylab = "Global Active Power (kilowatts)"))
with(powerset2, lines(datetime, Global_active_power))
dev.off()
|
/plot2.R
|
no_license
|
jabutler66/ExData_Plotting1
|
R
| false | false | 942 |
r
|
# Course Project 1 for Exploratory Data Class
# create graphs for Electric Power Consumption Dataset
# Plot 2
library(dplyr)
# Read in data set
txtfile <-"exdata-data-household_power_consumption/household_power_consumption.txt"
tempfile <- read.table(txtfile, header = TRUE, sep = ";", na.strings = "?")
# re-format dates
tempfile$Date <- strptime(tempfile$Date, "%d/%m/%Y")
tempfile$Date <- as.Date(tempfile$Date, "%Y-%m-%d")
# subset the dates for the project
powerset <- filter(tempfile, (Date == "2007-02-01" | Date == "2007-02-02"))
class(powerset$Date)
# reformat time
powerset2 <- mutate(powerset, datetime = as.POSIXct(paste(Date, Time)))
# create plot 2 and output to a png file
png(file = "plot2.png", width = 480, height = 480)
with(powerset2, plot(datetime, Global_active_power, type = "l", col = "black", xlab = "", ylab = "Global Active Power (kilowatts)"))
with(powerset2, lines(datetime, Global_active_power))
dev.off()
|
library(dplyr, warn.conflicts = FALSE)
library(stringr)
library(lubridate, warn.conflicts = FALSE)
source("_parse.R")
source("_frame.R")
generate_video <- function(srt_file, duration = "00:00", overwrite = "auto") {
# The output path is the input path with .srt => .mp4
output_file <- paste0(tools::file_path_sans_ext(srt_file), ".mp4")
# If the output file exists, we bail if ovewrite=FALSE
if (file.exists(output_file)) {
if (isFALSE(overwrite)) {
return(FALSE)
} else if (overwrite == "auto") {
# Only bail if output is up-to-date
srt_mtime <- file.info(srt_file)$mtime
out_mtime <- file.info(output_file)$mtime
if (out_mtime >= srt_mtime) {
return(FALSE)
}
}
}
# Data frame with `n` (int), `start`/`end` (in secs), and `text`
df <- read_srt(srt_file)
if (identical(duration, "00:00")) {
# Auto-detect duration; set it to last caption + 1 second
duration <- max(df$end) + 1
} else {
# Convert "mm:ss" to seconds
duration <- period_to_seconds(ms(duration))
}
# The overall strategy is to create one .png for each caption (dozens), plus
# one blank .png, inside captions_dir.
#
# Then populate frames_dir with one softlink per output frame (thousands), and
# use ffmpeg to convert frames_dir to a video.
# Create these dirs using tempfile so we can simultaneously run multiple jobs
# if we want to
captions_dir <- tempfile("caption_images")
frames_dir <- tempfile("frames")
dir.create(captions_dir)
dir.create(frames_dir)
# Don't on.exit(unlink) in case the files are needed for debugging
message(" Generating frames")
text_to_frame("", file.path(captions_dir, "blank.png"))
for (i in seq_len(nrow(df))) {
text_to_frame(df$text[i], file.path(captions_dir, paste0(df$n[i], ".png")))
}
fps <- 12
message(" Linking")
for (sec in 0:(duration - 1)) {
for (frame in 0:(fps - 1)) {
abs_frame <- sec * fps + frame
secs <- abs_frame / fps
dest <- file.path(frames_dir, sprintf("frame%08d.png", abs_frame))
# Determine which caption png should be used for this frame
row <- which(df$start <= secs & df$end >= secs)
if (length(row) == 0) {
# No caption matched, use blank
src <- file.path(captions_dir, "blank.png")
} else {
# One or more captions matched, use the first result
src <- file.path(captions_dir, paste0(df$n[[row[1]]], ".png"))
}
file.link(src, dest)
}
}
message(" Rendering")
tryCatch({
av::av_encode_video(
input = dir(frames_dir, full.names = TRUE),
output = output_file,
framerate = fps,
verbose = FALSE
)
message("Wrote ", output_file)
}, interrupt = function(e) {
message("Interrupted, deleting output")
unlink(output_file)
stop(e)
}, error = function(e) {
message("Errored, deleting output")
unlink(output_file)
stop(e)
})
unlink(captions_dir, recursive = TRUE)
unlink(frames_dir, recursive = TRUE)
invisible(TRUE)
}
# args <- commandArgs(TRUE)
# if (length(args) < 1) {
# message("Usage: Rscript generate.R [input-file.srt] [mm:ss]")
# }
# input_file <- args[[1]]
# duration <- if (length(args) > 1) args[[2]] else "00:00"
# generate_video(input_file, duration)
|
/generate.R
|
no_license
|
jcheng5/captions
|
R
| false | false | 3,305 |
r
|
library(dplyr, warn.conflicts = FALSE)
library(stringr)
library(lubridate, warn.conflicts = FALSE)
source("_parse.R")
source("_frame.R")
generate_video <- function(srt_file, duration = "00:00", overwrite = "auto") {
# The output path is the input path with .srt => .mp4
output_file <- paste0(tools::file_path_sans_ext(srt_file), ".mp4")
# If the output file exists, we bail if ovewrite=FALSE
if (file.exists(output_file)) {
if (isFALSE(overwrite)) {
return(FALSE)
} else if (overwrite == "auto") {
# Only bail if output is up-to-date
srt_mtime <- file.info(srt_file)$mtime
out_mtime <- file.info(output_file)$mtime
if (out_mtime >= srt_mtime) {
return(FALSE)
}
}
}
# Data frame with `n` (int), `start`/`end` (in secs), and `text`
df <- read_srt(srt_file)
if (identical(duration, "00:00")) {
# Auto-detect duration; set it to last caption + 1 second
duration <- max(df$end) + 1
} else {
# Convert "mm:ss" to seconds
duration <- period_to_seconds(ms(duration))
}
# The overall strategy is to create one .png for each caption (dozens), plus
# one blank .png, inside captions_dir.
#
# Then populate frames_dir with one softlink per output frame (thousands), and
# use ffmpeg to convert frames_dir to a video.
# Create these dirs using tempfile so we can simultaneously run multiple jobs
# if we want to
captions_dir <- tempfile("caption_images")
frames_dir <- tempfile("frames")
dir.create(captions_dir)
dir.create(frames_dir)
# Don't on.exit(unlink) in case the files are needed for debugging
message(" Generating frames")
text_to_frame("", file.path(captions_dir, "blank.png"))
for (i in seq_len(nrow(df))) {
text_to_frame(df$text[i], file.path(captions_dir, paste0(df$n[i], ".png")))
}
fps <- 12
message(" Linking")
for (sec in 0:(duration - 1)) {
for (frame in 0:(fps - 1)) {
abs_frame <- sec * fps + frame
secs <- abs_frame / fps
dest <- file.path(frames_dir, sprintf("frame%08d.png", abs_frame))
# Determine which caption png should be used for this frame
row <- which(df$start <= secs & df$end >= secs)
if (length(row) == 0) {
# No caption matched, use blank
src <- file.path(captions_dir, "blank.png")
} else {
# One or more captions matched, use the first result
src <- file.path(captions_dir, paste0(df$n[[row[1]]], ".png"))
}
file.link(src, dest)
}
}
message(" Rendering")
tryCatch({
av::av_encode_video(
input = dir(frames_dir, full.names = TRUE),
output = output_file,
framerate = fps,
verbose = FALSE
)
message("Wrote ", output_file)
}, interrupt = function(e) {
message("Interrupted, deleting output")
unlink(output_file)
stop(e)
}, error = function(e) {
message("Errored, deleting output")
unlink(output_file)
stop(e)
})
unlink(captions_dir, recursive = TRUE)
unlink(frames_dir, recursive = TRUE)
invisible(TRUE)
}
# args <- commandArgs(TRUE)
# if (length(args) < 1) {
# message("Usage: Rscript generate.R [input-file.srt] [mm:ss]")
# }
# input_file <- args[[1]]
# duration <- if (length(args) > 1) args[[2]] else "00:00"
# generate_video(input_file, duration)
|
library(hypergate)
### Name: boolmat
### Title: boolmat
### Aliases: boolmat
### ** Examples
data(Samusik_01_subset)
xp=Samusik_01_subset$xp_src
gate_vector=Samusik_01_subset$labels
hg=hypergate(xp=xp,gate_vector=gate_vector,level=23,delta_add=0.01)
head(boolmat(hg,xp))
|
/data/genthat_extracted_code/hypergate/examples/boolmat.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 278 |
r
|
library(hypergate)
### Name: boolmat
### Title: boolmat
### Aliases: boolmat
### ** Examples
data(Samusik_01_subset)
xp=Samusik_01_subset$xp_src
gate_vector=Samusik_01_subset$labels
hg=hypergate(xp=xp,gate_vector=gate_vector,level=23,delta_add=0.01)
head(boolmat(hg,xp))
|
# TODO: Write the narrative for what's going on here, what does it represent / exemplify?
# Tell what's manual and will be automated later.
# The code to do the actual transformation
# The user of makeParallel must write something like the following:
library(makeParallel)
fnames = list.files(pattern = "x[1-4]\\.rds")
# Description of the data
d = ChunkLoadFunc(read_func_name = "readRDS"
, read_args = fnames
, varname = "x"
, combine_func_name = "rbind"
, split_column_name = "y"
, column_names = c(y = 1L, z = 2L)
, sizes = c(10, 5, 5, 10)
)
# TODO: Grow the example by scheduling based on size of files
# x.csv (is chunked as) x1.csv = 200 rows, x2.csv = 300 rows, etc.
out = makeParallel('
f = function(grp){
median_z = median(grp[, "z"])
data.frame(y = grp[1L, "y"], median_z = median_z)
}
result = by(x, x[, "y"], f)
saveRDS(result, "result.rds")
', scheduler = scheduleDataParallel, data = d, nWorkers = 3L)
writeCode(out, "vector_actual_generated.R", overWrite = TRUE)
|
/tests/testthat/by_example/vector_transform.R
|
no_license
|
jfontestad/makeParallel
|
R
| false | false | 1,145 |
r
|
# TODO: Write the narrative for what's going on here, what does it represent / exemplify?
# Tell what's manual and will be automated later.
# The code to do the actual transformation
# The user of makeParallel must write something like the following:
library(makeParallel)
fnames = list.files(pattern = "x[1-4]\\.rds")
# Description of the data
d = ChunkLoadFunc(read_func_name = "readRDS"
, read_args = fnames
, varname = "x"
, combine_func_name = "rbind"
, split_column_name = "y"
, column_names = c(y = 1L, z = 2L)
, sizes = c(10, 5, 5, 10)
)
# TODO: Grow the example by scheduling based on size of files
# x.csv (is chunked as) x1.csv = 200 rows, x2.csv = 300 rows, etc.
out = makeParallel('
f = function(grp){
median_z = median(grp[, "z"])
data.frame(y = grp[1L, "y"], median_z = median_z)
}
result = by(x, x[, "y"], f)
saveRDS(result, "result.rds")
', scheduler = scheduleDataParallel, data = d, nWorkers = 3L)
writeCode(out, "vector_actual_generated.R", overWrite = TRUE)
|
#### Import R-data
#############################################################################
#' Import R \code{data.frame}
#'
#' Function to import a \code{data.frame} object for use in \code{eatGADS} while extracting value labels from factors.
#'
#' Factors are integers with labeled variable levels. \code{import_DF} extracts these labels and stores them in a separate meta data data.frame.
#' See \code{\link{import_spss}} for detailed information.
#'
#'@param df A \code{data.frame}.
#'@param checkVarNames Should variable names be checked for violations of \code{SQLite} and \code{R} naming rules?
#'
#'@return Returns a list with the actual data \code{dat} and a data frame with all meta information in long format \code{labels}.
#'
#'@examples
#'dat <- import_DF(iris, checkVarNames = FALSE)
#'
#'# Inspect Meta data
#'extractMeta(dat)
#'
#'# Extract Data
#'dat <- extractData(dat, convertLabels = "character")
#'
#'@export
import_DF <- function(df, checkVarNames = TRUE) {
if(!is.data.frame(df)) stop("df needs to be a data frame.")
zeroLevels <- sapply(df, function(dfVar) is.factor(dfVar) && identical(levels(dfVar), character(0)))
if(any(zeroLevels)) warning("The following variables in the data are factors with zero valid levels: ",
paste(names(zeroLevels)[zeroLevels], collapse = ", "))
out <- prepare_labels(rawDat = df, checkVarNames = checkVarNames, labeledStrings = FALSE)
out
}
|
/R/import_DF.R
|
no_license
|
beckerbenj/eatGADS
|
R
| false | false | 1,472 |
r
|
#### Import R-data
#############################################################################
#' Import R \code{data.frame}
#'
#' Function to import a \code{data.frame} object for use in \code{eatGADS} while extracting value labels from factors.
#'
#' Factors are integers with labeled variable levels. \code{import_DF} extracts these labels and stores them in a separate meta data data.frame.
#' See \code{\link{import_spss}} for detailed information.
#'
#'@param df A \code{data.frame}.
#'@param checkVarNames Should variable names be checked for violations of \code{SQLite} and \code{R} naming rules?
#'
#'@return Returns a list with the actual data \code{dat} and a data frame with all meta information in long format \code{labels}.
#'
#'@examples
#'dat <- import_DF(iris, checkVarNames = FALSE)
#'
#'# Inspect Meta data
#'extractMeta(dat)
#'
#'# Extract Data
#'dat <- extractData(dat, convertLabels = "character")
#'
#'@export
import_DF <- function(df, checkVarNames = TRUE) {
if(!is.data.frame(df)) stop("df needs to be a data frame.")
zeroLevels <- sapply(df, function(dfVar) is.factor(dfVar) && identical(levels(dfVar), character(0)))
if(any(zeroLevels)) warning("The following variables in the data are factors with zero valid levels: ",
paste(names(zeroLevels)[zeroLevels], collapse = ", "))
out <- prepare_labels(rawDat = df, checkVarNames = checkVarNames, labeledStrings = FALSE)
out
}
|
### Defines function to analyze simplex (handles 2- through 5-players, discrete & continuous)
# W: vector of payoff values, by row
# gentime: defines replicator dynamics, "continuous" or "discrete" time steps
# zero=1e-14: uses a value of 1e-14 instead of 0 in validation steps
# sig_dig=7: default prints output with 7 significant figures
# colnames=c("R", "P", "S", "L", "K"): user can change variable names displayed
# deconstruct=T: output includes analysis of face games as independent games
solveSimplex <- function(W, names=c("R", "P", "S", "L", "K"), deconstruct=T, debug=F){
gen_time="continuous"; zero=1e-14; sig_dig=4
# tests for numerical W, puts W in matrix format, check variable names vector length
if(sqrt(length(W)) %in% c(2,3,4,5)){
if(class(W)[1] == "numeric" && sapply(W, FUN = function(x) is.numeric(x)==T)){Wmatrix = matrix(W, nrow=sqrt(length(W)), byrow=T)}
else if(class(W)[1] == "matrix" && mapply(W, FUN = function(x) is.numeric(x)==T)){Wmatrix = W}
else{stop("non-numeric payoff values")}
} else{stop("simplex dimension ")}
if(length(names) < sqrt(length(W))){names=c(names, c("R","P","S","L","K")[1:(sqrt(length(W))-length(names))])}
if(length(names) > sqrt(length(W))){names=names[1:sqrt(length(W))]}
# analyzes game by number of competitors
if(sqrt(length(W)) == 2){
table_out <- table2(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){ if(deconstruct==T){return(table_out[-c(2,4)])}else{return(table_out[-c(2,4)])} }
else{ if(deconstruct==T){return(table_out[-2])}else{return(table_out[-2])} }
}
if(sqrt(length(W)) == 3){
table_out <- table3(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
if(sqrt(length(W)) == 4){
table_out <- table4(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
if(sqrt(length(W)) == 5){
table_out <- table5(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
}
|
/R/solveSimplex.R
|
no_license
|
cmsette/solveSimplex
|
R
| false | false | 2,966 |
r
|
### Defines function to analyze simplex (handles 2- through 5-players, discrete & continuous)
# W: vector of payoff values, by row
# gentime: defines replicator dynamics, "continuous" or "discrete" time steps
# zero=1e-14: uses a value of 1e-14 instead of 0 in validation steps
# sig_dig=7: default prints output with 7 significant figures
# colnames=c("R", "P", "S", "L", "K"): user can change variable names displayed
# deconstruct=T: output includes analysis of face games as independent games
solveSimplex <- function(W, names=c("R", "P", "S", "L", "K"), deconstruct=T, debug=F){
gen_time="continuous"; zero=1e-14; sig_dig=4
# tests for numerical W, puts W in matrix format, check variable names vector length
if(sqrt(length(W)) %in% c(2,3,4,5)){
if(class(W)[1] == "numeric" && sapply(W, FUN = function(x) is.numeric(x)==T)){Wmatrix = matrix(W, nrow=sqrt(length(W)), byrow=T)}
else if(class(W)[1] == "matrix" && mapply(W, FUN = function(x) is.numeric(x)==T)){Wmatrix = W}
else{stop("non-numeric payoff values")}
} else{stop("simplex dimension ")}
if(length(names) < sqrt(length(W))){names=c(names, c("R","P","S","L","K")[1:(sqrt(length(W))-length(names))])}
if(length(names) > sqrt(length(W))){names=names[1:sqrt(length(W))]}
# analyzes game by number of competitors
if(sqrt(length(W)) == 2){
table_out <- table2(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){ if(deconstruct==T){return(table_out[-c(2,4)])}else{return(table_out[-c(2,4)])} }
else{ if(deconstruct==T){return(table_out[-2])}else{return(table_out[-2])} }
}
if(sqrt(length(W)) == 3){
table_out <- table3(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
if(sqrt(length(W)) == 4){
table_out <- table4(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
if(sqrt(length(W)) == 5){
table_out <- table5(Wmatrix, gen_time, zero, sig_dig, names)
if(debug==F){
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2]))}
else{return(c(table_out[[1]][1], table_out[[1]][3]))} }
else{
if(deconstruct==T){return(c(table_out[[1]][1], table_out[[1]][3], table_out[2], table_out[[3]]))}
else{return(c(table_out[[1]][1], table_out[[1]][3], table_out[[3]][1]))} }
}
}
|
## Created by tsmereka on Jan 25th 2016
## makeCacheMatrix creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
setData <- function(y) {
x <<- y
cachedInverse <<- NULL
}
getData <- function() {
x
}
setInverse <- function(inv) {
cachedInverse <<- inv
}
getInverse <- function() {
cachedInverse
}
list(setData = setData, getData = getData, setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
## `cacheSolve` assumes that the matrix supplied is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Computing the inverse of a square matrix can be done with the `solve`
## function in R. For example, if `X` is a square invertible matrix, then
## `solve(X)` returns its inverse.
inv <- x$getInverse()
if (!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$getData()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
tsmereka/ProgrammingAssignment2
|
R
| false | false | 1,506 |
r
|
## Created by tsmereka on Jan 25th 2016
## makeCacheMatrix creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
cachedInverse <- NULL
setData <- function(y) {
x <<- y
cachedInverse <<- NULL
}
getData <- function() {
x
}
setInverse <- function(inv) {
cachedInverse <<- inv
}
getInverse <- function() {
cachedInverse
}
list(setData = setData, getData = getData, setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache.
## `cacheSolve` assumes that the matrix supplied is always invertible.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Computing the inverse of a square matrix can be done with the `solve`
## function in R. For example, if `X` is a square invertible matrix, then
## `solve(X)` returns its inverse.
inv <- x$getInverse()
if (!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$getData()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
# Interactions All students
# -------------------------
all.full.edgelist <- function(my.data.filtered, building, action, day, year) {
# create vector of richs' actions
all1 <- my.data.filtered[,c("carnet", "date_time", "porteria_detalle")]
all1 <-all1[order(all1$date_time),] #By sorting the data it is faster to check the interactions
# create vector of poors' actions
all2 <- all1
# Create list of interactions w/time_date
auxPobresStartIndex <- 1 # Auxiliary variable to keep comparaisons at minimum # esto es lo que mas velocidad gana
minSeconds <- 2 #The number of seconds to consider an interaction
edg <- matrix(nrow = 1, ncol = 5, rep(NA))
# crear aqui una lista de 3 celdas y una linea, a esa lista le pego nuevos valores en a funcion, ya no necesitaria el hashmap
# Recorre los ricos y para los pobres recorre solo las interacciones posibles
for (i in 1:nrow(all1)) { #Ricos are in the rows#
updatedIndex <- FALSE
for (j in auxPobresStartIndex:nrow(all2)) { #Pobres are in the columns #
if(all1$carnet[i]!=all2$carnet[j]) { # Look at different students only #
timediff<- as.numeric(difftime(all1$date_time[i], all2$date_time[j], units = "secs"))
if(abs(timediff) < minSeconds){
row <- c(all1$carnet[i], all2$carnet[j], as.character(all1$date_time[i]), as.character(all1$porteria_detalle[i]), as.character(all2$porteria_detalle[j]) )
edg <- rbind(edg, row)
if(!updatedIndex){
auxPobresStartIndex <- j
updatedIndex <- TRUE
}
}
if(timediff < -minSeconds){
break;
}
if(!updatedIndex)
auxPobresStartIndex <-j
}
}
}
colnames(edg) <- c("carnet1", "carnet2", "date_time_carnet1", "porteria_det_carnet1", "porteria_det_carnet2")
edg <- cbind(edg, building, action, day, year)
edg <- edg[-1,]
return(edg)
}
|
/Code/all.full.edgelist.R
|
no_license
|
JuanMoreno11/Turnstile_networks
|
R
| false | false | 1,838 |
r
|
# Interactions All students
# -------------------------
all.full.edgelist <- function(my.data.filtered, building, action, day, year) {
# create vector of richs' actions
all1 <- my.data.filtered[,c("carnet", "date_time", "porteria_detalle")]
all1 <-all1[order(all1$date_time),] #By sorting the data it is faster to check the interactions
# create vector of poors' actions
all2 <- all1
# Create list of interactions w/time_date
auxPobresStartIndex <- 1 # Auxiliary variable to keep comparaisons at minimum # esto es lo que mas velocidad gana
minSeconds <- 2 #The number of seconds to consider an interaction
edg <- matrix(nrow = 1, ncol = 5, rep(NA))
# crear aqui una lista de 3 celdas y una linea, a esa lista le pego nuevos valores en a funcion, ya no necesitaria el hashmap
# Recorre los ricos y para los pobres recorre solo las interacciones posibles
for (i in 1:nrow(all1)) { #Ricos are in the rows#
updatedIndex <- FALSE
for (j in auxPobresStartIndex:nrow(all2)) { #Pobres are in the columns #
if(all1$carnet[i]!=all2$carnet[j]) { # Look at different students only #
timediff<- as.numeric(difftime(all1$date_time[i], all2$date_time[j], units = "secs"))
if(abs(timediff) < minSeconds){
row <- c(all1$carnet[i], all2$carnet[j], as.character(all1$date_time[i]), as.character(all1$porteria_detalle[i]), as.character(all2$porteria_detalle[j]) )
edg <- rbind(edg, row)
if(!updatedIndex){
auxPobresStartIndex <- j
updatedIndex <- TRUE
}
}
if(timediff < -minSeconds){
break;
}
if(!updatedIndex)
auxPobresStartIndex <-j
}
}
}
colnames(edg) <- c("carnet1", "carnet2", "date_time_carnet1", "porteria_det_carnet1", "porteria_det_carnet2")
edg <- cbind(edg, building, action, day, year)
edg <- edg[-1,]
return(edg)
}
|
#' Query Phylomatic for a phylogenetic tree.
#'
#' @export
#' @param taxa Phylomatic format input of taxa names.
#' @param taxnames If \code{TRUE} (default), we get the family names for you to attach
#' to your species names to send to Phylomatic API. If \code{FALSE}, you have to
#' provide the strings in the right format.
#' @param get 'GET' (default) or 'POST' format for submission to the website.
#' @param informat One of newick (default), nexml, or cdaordf. If using a stored tree,
#' informat should always be newick.
#' @param method One of phylomatic (default) or convert
#' @param storedtree One of R20120829 (Phylomatic tree R20120829 for plants),
#' smith2011 (Smith 2011, plants), binindaemonds2007 (Bininda-Emonds 2007,
#' mammals), or zanne2014 (Zanne et al. 2014, plants). Default: R20120829
#' @param treeuri URL for a phylogenetic tree in newick format.
#' @param taxaformat Only option is slashpath for now. Leave as is.
#' @param outformat One of newick, nexml, or fyt.
#' @param clean Return a clean tree or not. Default: \code{TRUE}
#' @param db One of "ncbi", "itis", or "apg". Default: apg
#' @param mssgs Print messages. Default: \code{TRUE}
#' @param ... curl options passed on to \code{\link[crul]{HttpClient}}
#'
#' @details Use the web interface at \url{http://phylodiversity.net/phylomatic/}
#'
#' @return Newick formatted tree as \code{phylo} object or
#' nexml character string
#'
#' @examples \dontrun{
#' # Input taxonomic names
#' taxa <- c("Poa annua", "Phlox diffusa", "Helianthus annuus")
#' tree <- phylomatic(taxa=taxa, get = 'POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Genus names
#' taxa <- c("Poa", "Phlox", "Helianthus")
#' tree <- phylomatic(taxa=taxa, storedtree='R20120829', get='POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Lots of names
#' taxa <- c("Poa annua", "Collomia grandiflora", "Lilium lankongense", "Phlox diffusa",
#' "Iteadaphne caudata", "Gagea sarmentosa", "Helianthus annuus")
#' tree <- phylomatic(taxa=taxa, get = 'POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Don't clean - clean=TRUE is default
#' (tree <- phylomatic(taxa=taxa, clean = FALSE))
#' ## with clean=FALSE, you can get non-splitting nodes, which you
#' ## need to collpase before plotting
#' library('ape')
#' plot(collapse.singles(tree), no.margin=TRUE)
#'
#' # Output NeXML format
#' taxa <- c("Gonocarpus leptothecus", "Gonocarpus leptothecus", "Lilium lankongense")
#' out <- phylomatic(taxa=taxa, get = 'POST', outformat = "nexml")
#' cat(out)
#'
#' # Lots of names, note that when you have enough names (number depends on length of individual
#' # names, so there's no per se rule), you will get an error when using \code{get='GET'},
#' # when that happens use \code{get='POST'}
#' library("taxize")
#' spp <- names_list("species", 5000)
#' # phylomatic(taxa = spp, get = "GET")
#' (out <- phylomatic(taxa = spp, get = "POST"))
#' plot(out)
#'
#' # Pass in a tree from a URL on the web
#' spp <- c("Abies_nordmanniana", "Abies_bornmuelleriana", "Abies_cilicica", "Abies_cephalonica",
#' "Abies_numidica", "Abies_pinsapo", "Abies_alba")
#' url <- "http://datadryad.org/bitstream/handle/10255/dryad.8791/final_tree.tre?sequence=1"
#' phylomatic(taxa=spp, treeuri=url)
#' }
phylomatic <- function(taxa, taxnames = TRUE, get = 'GET',
informat = "newick", method = "phylomatic", storedtree = "R20120829", treeuri = NULL,
taxaformat = "slashpath", outformat = "newick", clean = TRUE, db="apg",
mssgs=TRUE, ...) {
if (taxnames) {
dat_ <- phylomatic_names(taxa, format = 'isubmit', db = db)
checknas <- sapply(dat_, function(x) strsplit(x, "/")[[1]][1])
checknas2 <- checknas[match("na", checknas)]
if (is.numeric(checknas2)) {
stop(sprintf("A family was not found for the following taxa:\n %s \n\n try setting taxnames=FALSE, and passing in a vector of strings, like \n%s",
paste(sapply(dat_, function(x) strsplit(x, "/")[[1]][3])[match("na", checknas)], collapse = ", "),
'phylomatic(taxa = c("asteraceae/taraxacum/taraxacum_officinale", "ericaceae/gaylussacia/gaylussacia_baccata", "ericaceae/vaccinium/vaccinium_pallidum"), taxnames=FALSE, parallel=FALSE)'
))
}
} else {
dat_ <- taxa
}
if (length(dat_) > 1) {
dat_ <- paste(dat_, collapse = "\n")
}
# Only one of storedtree or treeuri
if (!is.null(treeuri)) storedtree <- NULL
# clean up the clean param
clean <- if (clean) "true" else "false"
args <- cpt(list(taxa = dat_, informat = informat, method = method,
storedtree = storedtree, treeuri = treeuri, taxaformat = taxaformat,
outformat = outformat, clean = clean))
cli <- crul::HttpClient$new(url = phylo_base, opts = list(...))
if (get == 'POST') {
tt <- cli$post(body = args, encode = 'form')
out <- tt$parse("UTF-8")
} else if (get == 'GET') {
tt <- cli$get(query = args)
if (tt$status_code == 414) {
stop("(414) Request-URI Too Long - Use get='POST' in your function call", call. = FALSE)
} else {
tt$raise_for_status()
}
out <- tt$parse("UTF-8")
} else {
stop("get must be one of 'POST' or 'GET'", call. = FALSE)
}
if (grepl("No taxa in common", out)) {
stop(out)
} else {
# parse out missing taxa note
if (grepl("\\[NOTE: ", out)) {
taxa_na <- strmatch(out, "NOTE:.+")
taxa_na2 <- strmatch(taxa_na, ":\\s[A-Za-z].+")
taxa_na2 <- strsplit(taxa_na2, ",")[[1]][-length(strsplit(taxa_na2, ",")[[1]])]
taxa_na2 <- gsub(":|\\s", "", taxa_na2)
taxa_na2 <- sapply(taxa_na2, function(x) strsplit(x, "/")[[1]][[3]], USE.NAMES = FALSE)
taxa_na2 <- traits_capwords(gsub("_", " ", taxa_na2), onlyfirst = TRUE)
mssg(mssgs, taxa_na)
out <- gsub("\\[NOTE:.+", ";\n", out)
} else {
taxa_na2 <- NULL
}
outformat <- match.arg(outformat, choices = c("nexml",'newick'))
switch(outformat,
nexml = structure(out, class = "phylomatic", missing = taxa_na2),
newick = structure(phytools::read.newick(text = out),
class = c("phylo", "phylomatic"),
missing = taxa_na2))
}
}
|
/R/phylomatic.R
|
permissive
|
fozy81/brranching
|
R
| false | false | 6,188 |
r
|
#' Query Phylomatic for a phylogenetic tree.
#'
#' @export
#' @param taxa Phylomatic format input of taxa names.
#' @param taxnames If \code{TRUE} (default), we get the family names for you to attach
#' to your species names to send to Phylomatic API. If \code{FALSE}, you have to
#' provide the strings in the right format.
#' @param get 'GET' (default) or 'POST' format for submission to the website.
#' @param informat One of newick (default), nexml, or cdaordf. If using a stored tree,
#' informat should always be newick.
#' @param method One of phylomatic (default) or convert
#' @param storedtree One of R20120829 (Phylomatic tree R20120829 for plants),
#' smith2011 (Smith 2011, plants), binindaemonds2007 (Bininda-Emonds 2007,
#' mammals), or zanne2014 (Zanne et al. 2014, plants). Default: R20120829
#' @param treeuri URL for a phylogenetic tree in newick format.
#' @param taxaformat Only option is slashpath for now. Leave as is.
#' @param outformat One of newick, nexml, or fyt.
#' @param clean Return a clean tree or not. Default: \code{TRUE}
#' @param db One of "ncbi", "itis", or "apg". Default: apg
#' @param mssgs Print messages. Default: \code{TRUE}
#' @param ... curl options passed on to \code{\link[crul]{HttpClient}}
#'
#' @details Use the web interface at \url{http://phylodiversity.net/phylomatic/}
#'
#' @return Newick formatted tree as \code{phylo} object or
#' nexml character string
#'
#' @examples \dontrun{
#' # Input taxonomic names
#' taxa <- c("Poa annua", "Phlox diffusa", "Helianthus annuus")
#' tree <- phylomatic(taxa=taxa, get = 'POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Genus names
#' taxa <- c("Poa", "Phlox", "Helianthus")
#' tree <- phylomatic(taxa=taxa, storedtree='R20120829', get='POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Lots of names
#' taxa <- c("Poa annua", "Collomia grandiflora", "Lilium lankongense", "Phlox diffusa",
#' "Iteadaphne caudata", "Gagea sarmentosa", "Helianthus annuus")
#' tree <- phylomatic(taxa=taxa, get = 'POST')
#' plot(tree, no.margin=TRUE)
#'
#' # Don't clean - clean=TRUE is default
#' (tree <- phylomatic(taxa=taxa, clean = FALSE))
#' ## with clean=FALSE, you can get non-splitting nodes, which you
#' ## need to collpase before plotting
#' library('ape')
#' plot(collapse.singles(tree), no.margin=TRUE)
#'
#' # Output NeXML format
#' taxa <- c("Gonocarpus leptothecus", "Gonocarpus leptothecus", "Lilium lankongense")
#' out <- phylomatic(taxa=taxa, get = 'POST', outformat = "nexml")
#' cat(out)
#'
#' # Lots of names, note that when you have enough names (number depends on length of individual
#' # names, so there's no per se rule), you will get an error when using \code{get='GET'},
#' # when that happens use \code{get='POST'}
#' library("taxize")
#' spp <- names_list("species", 5000)
#' # phylomatic(taxa = spp, get = "GET")
#' (out <- phylomatic(taxa = spp, get = "POST"))
#' plot(out)
#'
#' # Pass in a tree from a URL on the web
#' spp <- c("Abies_nordmanniana", "Abies_bornmuelleriana", "Abies_cilicica", "Abies_cephalonica",
#' "Abies_numidica", "Abies_pinsapo", "Abies_alba")
#' url <- "http://datadryad.org/bitstream/handle/10255/dryad.8791/final_tree.tre?sequence=1"
#' phylomatic(taxa=spp, treeuri=url)
#' }
phylomatic <- function(taxa, taxnames = TRUE, get = 'GET',
informat = "newick", method = "phylomatic", storedtree = "R20120829", treeuri = NULL,
taxaformat = "slashpath", outformat = "newick", clean = TRUE, db="apg",
mssgs=TRUE, ...) {
if (taxnames) {
dat_ <- phylomatic_names(taxa, format = 'isubmit', db = db)
checknas <- sapply(dat_, function(x) strsplit(x, "/")[[1]][1])
checknas2 <- checknas[match("na", checknas)]
if (is.numeric(checknas2)) {
stop(sprintf("A family was not found for the following taxa:\n %s \n\n try setting taxnames=FALSE, and passing in a vector of strings, like \n%s",
paste(sapply(dat_, function(x) strsplit(x, "/")[[1]][3])[match("na", checknas)], collapse = ", "),
'phylomatic(taxa = c("asteraceae/taraxacum/taraxacum_officinale", "ericaceae/gaylussacia/gaylussacia_baccata", "ericaceae/vaccinium/vaccinium_pallidum"), taxnames=FALSE, parallel=FALSE)'
))
}
} else {
dat_ <- taxa
}
if (length(dat_) > 1) {
dat_ <- paste(dat_, collapse = "\n")
}
# Only one of storedtree or treeuri
if (!is.null(treeuri)) storedtree <- NULL
# clean up the clean param
clean <- if (clean) "true" else "false"
args <- cpt(list(taxa = dat_, informat = informat, method = method,
storedtree = storedtree, treeuri = treeuri, taxaformat = taxaformat,
outformat = outformat, clean = clean))
cli <- crul::HttpClient$new(url = phylo_base, opts = list(...))
if (get == 'POST') {
tt <- cli$post(body = args, encode = 'form')
out <- tt$parse("UTF-8")
} else if (get == 'GET') {
tt <- cli$get(query = args)
if (tt$status_code == 414) {
stop("(414) Request-URI Too Long - Use get='POST' in your function call", call. = FALSE)
} else {
tt$raise_for_status()
}
out <- tt$parse("UTF-8")
} else {
stop("get must be one of 'POST' or 'GET'", call. = FALSE)
}
if (grepl("No taxa in common", out)) {
stop(out)
} else {
# parse out missing taxa note
if (grepl("\\[NOTE: ", out)) {
taxa_na <- strmatch(out, "NOTE:.+")
taxa_na2 <- strmatch(taxa_na, ":\\s[A-Za-z].+")
taxa_na2 <- strsplit(taxa_na2, ",")[[1]][-length(strsplit(taxa_na2, ",")[[1]])]
taxa_na2 <- gsub(":|\\s", "", taxa_na2)
taxa_na2 <- sapply(taxa_na2, function(x) strsplit(x, "/")[[1]][[3]], USE.NAMES = FALSE)
taxa_na2 <- traits_capwords(gsub("_", " ", taxa_na2), onlyfirst = TRUE)
mssg(mssgs, taxa_na)
out <- gsub("\\[NOTE:.+", ";\n", out)
} else {
taxa_na2 <- NULL
}
outformat <- match.arg(outformat, choices = c("nexml",'newick'))
switch(outformat,
nexml = structure(out, class = "phylomatic", missing = taxa_na2),
newick = structure(phytools::read.newick(text = out),
class = c("phylo", "phylomatic"),
missing = taxa_na2))
}
}
|
#### Include dive data per time step as a propotion of water column ####
## 2016 ##
filenames <- list.files(path="Data/Depth Data/Files/2016", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
skip = 3 #skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Dive Number","Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8) # removes "Tag" from the tag column entry (begin at the cut at the 4th character and end at the 8th character)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(i)
}
filenames <- list.files(path="Data/Depth Data/Files/2017", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
# skip = 3 skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature", "Voltage") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(i)
}
filenames <- list.files(path="Data/Depth Data/Files/2018", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
# skip = 3 skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature", "Voltage") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8) # removes "Tag" from the tag column entry (begin at the cut at the 4th character and end at the 8th character)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(filenames[i])
}
## Multiple files for a single tag so need to combine to form single data frame for each tag:
tdr51009 <- rbindlist(mget(ls(pattern = "51009")))
tdr51011 <- rbindlist(mget(ls(pattern = "51011")))
tdr51019 <- rbindlist(mget(ls(pattern = "51019")))
tdr51020 <- rbindlist(mget(ls(pattern = "51020")))
tdr51022 <- rbindlist(mget(ls(pattern = "51022")))
tdr51025 <- rbindlist(mget(ls(pattern = "51025")))
tdr51026 <- rbindlist(mget(ls(pattern = "51026")))
tdr51029 <- rbindlist(mget(ls(pattern = "51029")))
tdr51030 <- rbindlist(mget(ls(pattern = "51030")))
tdr51031 <- rbindlist(mget(ls(pattern = "51031")))
tdr51104 <- rbindlist(mget(ls(pattern = "51104")))
tdr51120 <- rbindlist(mget(ls(pattern = "51120")))
tdr51105 <- rbindlist(mget(ls(pattern = "51105")))
tdr51109 <- rbindlist(mget(ls(pattern = "51109")))
tdr51111 <- rbindlist(mget(ls(pattern = "51111")))
tdr51100 <- rbindlist(mget(ls(pattern = "51100")))
tdr51101 <- rbindlist(mget(ls(pattern = "51101")))
tdr51112 <- rbindlist(mget(ls(pattern = "51112")))
tdr51119 <- rbindlist(mget(ls(pattern = "51119")))
tdr51114 <- rbindlist(mget(ls(pattern = "51114")))
tdr51116 <- rbindlist(mget(ls(pattern = "51116")))
tdr51115 <- rbindlist(mget(ls(pattern = "51115")))
tdr51108 <- rbindlist(mget(ls(pattern = "51108")))
tdr51117 <- rbindlist(mget(ls(pattern = "51117")))
tdr51102 <- rbindlist(mget(ls(pattern = "51102")))
tdr51110 <- rbindlist(mget(ls(pattern = "51110")))
tdr51118 <- rbindlist(mget(ls(pattern = "51118")))
tdr51121 <- rbindlist(mget(ls(pattern = "51121")))
tdr51122 <- rbindlist(mget(ls(pattern = "51122")))
tdr51124 <- rbindlist(mget(ls(pattern = "51124")))
tdr51125 <- rbindlist(mget(ls(pattern = "51125")))
tdr51126 <- rbindlist(mget(ls(pattern = "51126")))
tdr51127 <- rbindlist(mget(ls(pattern = "51127")))
tdr51128 <- rbindlist(mget(ls(pattern = "51128")))
tdr51129 <- rbindlist(mget(ls(pattern = "51129")))
tdr51130 <- rbindlist(mget(ls(pattern = "51130")))
tdr51131 <- rbindlist(mget(ls(pattern = "51131")))
tdr51132 <- rbindlist(mget(ls(pattern = "51132")))
tdr51134 <- rbindlist(mget(ls(pattern = "51134")))
tdr51136 <- rbindlist(mget(ls(pattern = "51136")))
TDR2016 <- rbindlist(mget(ls(pattern = "tdr510")))
TDR2017 <- rbind(tdr51104,tdr51120,tdr51105,tdr51109,tdr51111,tdr51100,tdr51101,tdr51112,tdr51119,
tdr51114,tdr51116,tdr51115,tdr51108,tdr51117)
TDR2018 <- rbind(tdr51102,tdr51110,tdr51118,tdr51121,tdr51122,tdr51124,tdr51125,
tdr51126,tdr51127,tdr51128,tdr51129,tdr51130,tdr51131,tdr51132,tdr51134,tdr51136)
TDR2016$datetime<- paste(TDR2016$Year,"/",TDR2016$Month,"/",TDR2016$Day," ",TDR2016$Hour,":",TDR2016$Min,":",TDR2016$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2016$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2016$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2016$secs<- temp2
rm(temp, temp2)
TDR2016 <- subset(TDR2016, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2016$Month <- as.Date(TDR2016$datetime)
TDR2017$datetime<- paste(TDR2017$Year,"/",TDR2017$Month,"/",TDR2017$Day," ",TDR2017$Hour,":",TDR2017$Min,":",TDR2017$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2017$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2017$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2017$secs<- temp2
rm(temp, temp2)
# Clean data to include only positive depth and logical depth values: Dummy <200 is same reliance metric as residual error
TDR2017 <- subset(TDR2017, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2017$Month <- as.Date(TDR2017$datetime)
TDR2018$datetime<- paste(TDR2018$Year,"/",TDR2018$Month,"/",TDR2018$Day," ",TDR2018$Hour,":",TDR2018$Min,":",
TDR2018$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2018$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2018$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2018$secs<- temp2
rm(temp, temp2)
# Clean data to include only positive depth and logical depth values: Dummy <200 is same reliance metric as residual error
TDR2018 <- subset(TDR2018, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2018$Month <- as.Date(TDR2018$datetime)
save(TDR2016, file="Data/Depth Data/TDR2016.Rd")
save(TDR2017, file=paste("Data/Depth Data/TDR2017.Rd"))
save(TDR2018, file=paste("Data/Depth Data/TDR2018.Rd"))
|
/3.1_read-and-collate-uhftdr.R
|
no_license
|
JojoOno/crm_params
|
R
| false | false | 7,594 |
r
|
#### Include dive data per time step as a propotion of water column ####
## 2016 ##
filenames <- list.files(path="Data/Depth Data/Files/2016", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
skip = 3 #skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Dive Number","Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8) # removes "Tag" from the tag column entry (begin at the cut at the 4th character and end at the 8th character)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(i)
}
filenames <- list.files(path="Data/Depth Data/Files/2017", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
# skip = 3 skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature", "Voltage") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(i)
}
filenames <- list.files(path="Data/Depth Data/Files/2018", pattern="*.tdr", full.names=TRUE)
for(i in 1:length(filenames)) {
X <- read.csv(filenames[i], header=FALSE, sep="") # header=FALSE creates 14 separate columns for each variable rather than one column for everything
# skip = 3 skips first 3 lines of data so you're just left with two rows as headers which are full of NAs and therefore easily subsetable
X <- na.omit(X) # removes the first 2 rows with NA values so you're just left with the data without headers
colnames(X) <- c("Year", "Month","Day", "Hour", "Min","Sec", "Depth","Dummy", "Temperature", "Voltage") # adds names of columns
ds<-paste(filenames[i],sep="") #adds the filename to ds
ds<-substr(ds, 45, nchar(ds)-4) # remove the last 13 charaters (-COMPLETE.pos)
dsn <- paste(ds, i, sep=".")
X$tag<- substr(ds, 4, 8) # removes "Tag" from the tag column entry (begin at the cut at the 4th character and end at the 8th character)
assign(dsn, X) # Give the name "ds" a value of X (data.frame)
print(filenames[i])
}
## Multiple files for a single tag so need to combine to form single data frame for each tag:
tdr51009 <- rbindlist(mget(ls(pattern = "51009")))
tdr51011 <- rbindlist(mget(ls(pattern = "51011")))
tdr51019 <- rbindlist(mget(ls(pattern = "51019")))
tdr51020 <- rbindlist(mget(ls(pattern = "51020")))
tdr51022 <- rbindlist(mget(ls(pattern = "51022")))
tdr51025 <- rbindlist(mget(ls(pattern = "51025")))
tdr51026 <- rbindlist(mget(ls(pattern = "51026")))
tdr51029 <- rbindlist(mget(ls(pattern = "51029")))
tdr51030 <- rbindlist(mget(ls(pattern = "51030")))
tdr51031 <- rbindlist(mget(ls(pattern = "51031")))
tdr51104 <- rbindlist(mget(ls(pattern = "51104")))
tdr51120 <- rbindlist(mget(ls(pattern = "51120")))
tdr51105 <- rbindlist(mget(ls(pattern = "51105")))
tdr51109 <- rbindlist(mget(ls(pattern = "51109")))
tdr51111 <- rbindlist(mget(ls(pattern = "51111")))
tdr51100 <- rbindlist(mget(ls(pattern = "51100")))
tdr51101 <- rbindlist(mget(ls(pattern = "51101")))
tdr51112 <- rbindlist(mget(ls(pattern = "51112")))
tdr51119 <- rbindlist(mget(ls(pattern = "51119")))
tdr51114 <- rbindlist(mget(ls(pattern = "51114")))
tdr51116 <- rbindlist(mget(ls(pattern = "51116")))
tdr51115 <- rbindlist(mget(ls(pattern = "51115")))
tdr51108 <- rbindlist(mget(ls(pattern = "51108")))
tdr51117 <- rbindlist(mget(ls(pattern = "51117")))
tdr51102 <- rbindlist(mget(ls(pattern = "51102")))
tdr51110 <- rbindlist(mget(ls(pattern = "51110")))
tdr51118 <- rbindlist(mget(ls(pattern = "51118")))
tdr51121 <- rbindlist(mget(ls(pattern = "51121")))
tdr51122 <- rbindlist(mget(ls(pattern = "51122")))
tdr51124 <- rbindlist(mget(ls(pattern = "51124")))
tdr51125 <- rbindlist(mget(ls(pattern = "51125")))
tdr51126 <- rbindlist(mget(ls(pattern = "51126")))
tdr51127 <- rbindlist(mget(ls(pattern = "51127")))
tdr51128 <- rbindlist(mget(ls(pattern = "51128")))
tdr51129 <- rbindlist(mget(ls(pattern = "51129")))
tdr51130 <- rbindlist(mget(ls(pattern = "51130")))
tdr51131 <- rbindlist(mget(ls(pattern = "51131")))
tdr51132 <- rbindlist(mget(ls(pattern = "51132")))
tdr51134 <- rbindlist(mget(ls(pattern = "51134")))
tdr51136 <- rbindlist(mget(ls(pattern = "51136")))
TDR2016 <- rbindlist(mget(ls(pattern = "tdr510")))
TDR2017 <- rbind(tdr51104,tdr51120,tdr51105,tdr51109,tdr51111,tdr51100,tdr51101,tdr51112,tdr51119,
tdr51114,tdr51116,tdr51115,tdr51108,tdr51117)
TDR2018 <- rbind(tdr51102,tdr51110,tdr51118,tdr51121,tdr51122,tdr51124,tdr51125,
tdr51126,tdr51127,tdr51128,tdr51129,tdr51130,tdr51131,tdr51132,tdr51134,tdr51136)
TDR2016$datetime<- paste(TDR2016$Year,"/",TDR2016$Month,"/",TDR2016$Day," ",TDR2016$Hour,":",TDR2016$Min,":",TDR2016$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2016$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2016$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2016$secs<- temp2
rm(temp, temp2)
TDR2016 <- subset(TDR2016, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2016$Month <- as.Date(TDR2016$datetime)
TDR2017$datetime<- paste(TDR2017$Year,"/",TDR2017$Month,"/",TDR2017$Day," ",TDR2017$Hour,":",TDR2017$Min,":",TDR2017$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2017$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2017$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2017$secs<- temp2
rm(temp, temp2)
# Clean data to include only positive depth and logical depth values: Dummy <200 is same reliance metric as residual error
TDR2017 <- subset(TDR2017, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2017$Month <- as.Date(TDR2017$datetime)
TDR2018$datetime<- paste(TDR2018$Year,"/",TDR2018$Month,"/",TDR2018$Day," ",TDR2018$Hour,":",TDR2018$Min,":",
TDR2018$Sec, sep="")
temp <- as.POSIXct(as.character(TDR2018$datetime), "%Y/%m/%d %H:%M:%S", tz="UTC")
TDR2018$datetime <- temp
temp2 <- as.numeric(difftime(temp, "2016/09/28 12:00:00", tz="UTC", units="secs"))
TDR2018$secs<- temp2
rm(temp, temp2)
# Clean data to include only positive depth and logical depth values: Dummy <200 is same reliance metric as residual error
TDR2018 <- subset(TDR2018, Depth>-0.01 & Depth<200 & Dummy < 200)
TDR2018$Month <- as.Date(TDR2018$datetime)
save(TDR2016, file="Data/Depth Data/TDR2016.Rd")
save(TDR2017, file=paste("Data/Depth Data/TDR2017.Rd"))
save(TDR2018, file=paste("Data/Depth Data/TDR2018.Rd"))
|
# 1) RFmap
# load the data
load("../Data/MTneuron.RData")
# see what has been loaded
print(ls())
# There are four objects:
# directions is a vector containing directions of motion, from -180 to 165 in steps of 15 degrees
# dirtune is a matrix with 824 rows (trials) and 109 columns (spike times) containing spike time for that trial
# theta is a vector of 824 elements indexing which element of the directions variable is associated with each trial
# Finally, RFMap is a 4-dimensional array 10 (y position) x 15 (x position) x 16 (repetitions) x 24 (max number of spikes) containing spike times
# initialize a matrix with dimensions equal to the first two in RFmap (x and y coordinates)
numspks <- matrix(0, dim(RFmap)[1], dim(RFmap)[2])
# find number of trials
nTrials <- dim(RFmap)[3]
# find max number of spikes
max_num_spks <- dim(RFmap)[4]
for (yind in 1:dim(numspks)[1]){
for (xind in 1:dim(numspks)[2]){
# We care about how many spikes were fired at each grid position, not which
# stimulus repeat they were fired on. So let's count how many spikes are
# in the RFmap at each grid location.
# Note that RFmap[yind, xind, , ] is a matrix. We just count how
# many values in the matrix are not zero, and store the value in numspks[yind,xind]
inds <- which(RFmap[yind,xind,,] > 45)
numspks[yind, xind] <- length(inds)
}
}
# To map the locations, we create a vector from -14 degrees in the visual field to 14 in steps of 2 degrees
x <- seq(-22, 6, by = 2)
# We do the same for the y axis, but now we're limited to -9, +9
y <- seq(-2, 16, by = 2)
# For plotting, we want to modify the data slightly:
# first, now numspks has y coordinates in the rows,
# and x coordinates in the columns, while R likes it
# to be x -> rows and y -> columns.
# Transposing the matrix will please R
numspks <- t(numspks)
# Also, the plotting routine treats the cell [1,1] as the bottom-left corner,
# while we want it to be the upper-left corner
# This fixes the problem
numspks <- numspks[,(dim(numspks)[2]):1]
# A simple plot uses the funcion image, which plots a matrix
# to see how to use it, type ?image
print(
image(x, y, numspks / nTrials,
main = "RF map of an MT neuron",
xlab = "degrees",
ylab = "degrees",
col = rev(rainbow(27,start=0,end=0.7))
)
)
# a fancier plot can be done using
# ?filled.contour
# wich automatically smooths the data
print(
filled.contour(x, y, numspks/nTrials, nlevels = 25,
plot.title = title(main = "RF map of an MT neuron",
xlab = "degrees", ylab = "degrees"),
# choose colors
col = rev(rainbow(28,start=0,end=0.7)),
# add a point
plot.axes={
axis(1); # plot the x-axis
axis(2); # plot the y axis
# The center of the visual field of the monkey was at (7.5, -7.5)
# Let's put a + sign to mark the spot
points(0, 0, pch = "+", cex = 2, col = "white", font = 2)}
)
)
|
/Workshops/Osborne/Code/plot_RFmap.R
|
no_license
|
StefanoAllesina/BSD-QBio
|
R
| false | false | 3,095 |
r
|
# 1) RFmap
# load the data
load("../Data/MTneuron.RData")
# see what has been loaded
print(ls())
# There are four objects:
# directions is a vector containing directions of motion, from -180 to 165 in steps of 15 degrees
# dirtune is a matrix with 824 rows (trials) and 109 columns (spike times) containing spike time for that trial
# theta is a vector of 824 elements indexing which element of the directions variable is associated with each trial
# Finally, RFMap is a 4-dimensional array 10 (y position) x 15 (x position) x 16 (repetitions) x 24 (max number of spikes) containing spike times
# initialize a matrix with dimensions equal to the first two in RFmap (x and y coordinates)
numspks <- matrix(0, dim(RFmap)[1], dim(RFmap)[2])
# find number of trials
nTrials <- dim(RFmap)[3]
# find max number of spikes
max_num_spks <- dim(RFmap)[4]
for (yind in 1:dim(numspks)[1]){
for (xind in 1:dim(numspks)[2]){
# We care about how many spikes were fired at each grid position, not which
# stimulus repeat they were fired on. So let's count how many spikes are
# in the RFmap at each grid location.
# Note that RFmap[yind, xind, , ] is a matrix. We just count how
# many values in the matrix are not zero, and store the value in numspks[yind,xind]
inds <- which(RFmap[yind,xind,,] > 45)
numspks[yind, xind] <- length(inds)
}
}
# To map the locations, we create a vector from -14 degrees in the visual field to 14 in steps of 2 degrees
x <- seq(-22, 6, by = 2)
# We do the same for the y axis, but now we're limited to -9, +9
y <- seq(-2, 16, by = 2)
# For plotting, we want to modify the data slightly:
# first, now numspks has y coordinates in the rows,
# and x coordinates in the columns, while R likes it
# to be x -> rows and y -> columns.
# Transposing the matrix will please R
numspks <- t(numspks)
# Also, the plotting routine treats the cell [1,1] as the bottom-left corner,
# while we want it to be the upper-left corner
# This fixes the problem
numspks <- numspks[,(dim(numspks)[2]):1]
# A simple plot uses the funcion image, which plots a matrix
# to see how to use it, type ?image
print(
image(x, y, numspks / nTrials,
main = "RF map of an MT neuron",
xlab = "degrees",
ylab = "degrees",
col = rev(rainbow(27,start=0,end=0.7))
)
)
# a fancier plot can be done using
# ?filled.contour
# wich automatically smooths the data
print(
filled.contour(x, y, numspks/nTrials, nlevels = 25,
plot.title = title(main = "RF map of an MT neuron",
xlab = "degrees", ylab = "degrees"),
# choose colors
col = rev(rainbow(28,start=0,end=0.7)),
# add a point
plot.axes={
axis(1); # plot the x-axis
axis(2); # plot the y axis
# The center of the visual field of the monkey was at (7.5, -7.5)
# Let's put a + sign to mark the spot
points(0, 0, pch = "+", cex = 2, col = "white", font = 2)}
)
)
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% String.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{replace.String}
\alias{replace.String}
\alias{String.replace}
\alias{replace.String}
\alias{replace,String-method}
\title{Replaces all occurrences of oldChar in this string with newChar}
\usage{\method{replace}{String}(this, oldChar, newChar, ...)}
\arguments{
\item{oldChar}{The old \code{\link[base]{character}}.}
\item{newChar}{The new \code{\link[base]{character}}.}
}
\description{
Replaces all occurrences of \code{oldChar} in this string with
\code{newChar}.
}
\value{
Returns the new string.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\examples{
s <- String("Hello world!")
replace(s, "!", ".") # "Hello world."
replace(String("Hello world!"), "!", ".") # "Hello world."
}
\keyword{internal}
\keyword{methods}
|
/man/replace.String.Rd
|
no_license
|
HenrikBengtsson/R.lang
|
R
| false | false | 1,115 |
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% String.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{replace.String}
\alias{replace.String}
\alias{String.replace}
\alias{replace.String}
\alias{replace,String-method}
\title{Replaces all occurrences of oldChar in this string with newChar}
\usage{\method{replace}{String}(this, oldChar, newChar, ...)}
\arguments{
\item{oldChar}{The old \code{\link[base]{character}}.}
\item{newChar}{The new \code{\link[base]{character}}.}
}
\description{
Replaces all occurrences of \code{oldChar} in this string with
\code{newChar}.
}
\value{
Returns the new string.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\examples{
s <- String("Hello world!")
replace(s, "!", ".") # "Hello world."
replace(String("Hello world!"), "!", ".") # "Hello world."
}
\keyword{internal}
\keyword{methods}
|
##
## Download the Baseball Register from Ted Turocy's site
## http://chadwick-bureau.com/the-register/
library(dplyr)
url = "http://www.chadwick-bureau.com/data/register/register-20150405.zip"
download.file(url, destfile = "inst/extdata/register.zip")
# Unzip it
system("unzip \"inst/extdata/register.zip\" -d \"inst/extdata/\"")
# move the file back to the data directory
system("mv \"inst/extdata/register-20150405/register.csv\" inst/extdata/")
# remove the empty directory
system("rm -R inst/extdata/register-20150405")
# remove the ZIP file
system("rm inst/extdata/register.zip")
register = read.csv(system.file("extdata", "register.csv", package = "openWAR"))
idTT <- register %>%
filter(!is.na(key_mlbam)) %>%
select(key_person, key_mlbam, key_retro, key_bbref, key_bbpro
, key_fangraphs, name_last, name_first, name_given)
# Examine Mike Trout
# filter(idTT, name_last == "Trout")
# save it to the data folder
save(idTT, file="data/idTT.rda", compress = "xz")
|
/data-raw/idTT.R
|
no_license
|
davidbmitchell/openWARData
|
R
| false | false | 991 |
r
|
##
## Download the Baseball Register from Ted Turocy's site
## http://chadwick-bureau.com/the-register/
library(dplyr)
url = "http://www.chadwick-bureau.com/data/register/register-20150405.zip"
download.file(url, destfile = "inst/extdata/register.zip")
# Unzip it
system("unzip \"inst/extdata/register.zip\" -d \"inst/extdata/\"")
# move the file back to the data directory
system("mv \"inst/extdata/register-20150405/register.csv\" inst/extdata/")
# remove the empty directory
system("rm -R inst/extdata/register-20150405")
# remove the ZIP file
system("rm inst/extdata/register.zip")
register = read.csv(system.file("extdata", "register.csv", package = "openWAR"))
idTT <- register %>%
filter(!is.na(key_mlbam)) %>%
select(key_person, key_mlbam, key_retro, key_bbref, key_bbpro
, key_fangraphs, name_last, name_first, name_given)
# Examine Mike Trout
# filter(idTT, name_last == "Trout")
# save it to the data folder
save(idTT, file="data/idTT.rda", compress = "xz")
|
#' Get QJM data for a station between two times t1 and t2
#' @description This is used to get QJM data between two years t1 and t2.
#' @export
#' @param station station code
#' @param t1 Beginning year
#' @param t2 Ending year
#' @param verbose whether to progressively print what data is being collected. Defaults to TRUE.
#' @param sleep time to wait between two successive GET requests. Defaults to 30 seconds.
#' @return tibble with QJM data
#' @examples
#' df_qjm<-bh_get_qjm(station="V2942010",
#' t1=2008,
#' t2=2009)
bh_get_qjm <- function (station,t1,t2,verbose=TRUE, sleep=10) {
seqyears=as.numeric(t1):as.numeric(t2)
df=NULL
for (i in 1:length(seqyears)){
if(verbose==TRUE){print(paste0("Collecting QJM data for year ",
seqyears[i],"."))}
res_tmp=get_to_station(station)
Sys.sleep(sleep)
res_tmp=res_tmp %>%
get_to_procedure("QJM",station)
Sys.sleep(sleep)
res_tmp=res_tmp %>%
get_to_qjm(year=seqyears[i])
Sys.sleep(sleep)
df_tmp=res_tmp %>%
collect_qjm(year=seqyears[i]) %>%
dplyr::mutate(station=rep(station,dplyr::n())) %>%
dplyr::select(station,.data$Date, dplyr::everything())
df=df %>%
dplyr::bind_rows(df_tmp)
}
df=unique(df)
return(df)
}
|
/R/bh_get_qjm.R
|
no_license
|
lvaudor/banqueHydro
|
R
| false | false | 1,317 |
r
|
#' Get QJM data for a station between two times t1 and t2
#' @description This is used to get QJM data between two years t1 and t2.
#' @export
#' @param station station code
#' @param t1 Beginning year
#' @param t2 Ending year
#' @param verbose whether to progressively print what data is being collected. Defaults to TRUE.
#' @param sleep time to wait between two successive GET requests. Defaults to 30 seconds.
#' @return tibble with QJM data
#' @examples
#' df_qjm<-bh_get_qjm(station="V2942010",
#' t1=2008,
#' t2=2009)
bh_get_qjm <- function (station,t1,t2,verbose=TRUE, sleep=10) {
seqyears=as.numeric(t1):as.numeric(t2)
df=NULL
for (i in 1:length(seqyears)){
if(verbose==TRUE){print(paste0("Collecting QJM data for year ",
seqyears[i],"."))}
res_tmp=get_to_station(station)
Sys.sleep(sleep)
res_tmp=res_tmp %>%
get_to_procedure("QJM",station)
Sys.sleep(sleep)
res_tmp=res_tmp %>%
get_to_qjm(year=seqyears[i])
Sys.sleep(sleep)
df_tmp=res_tmp %>%
collect_qjm(year=seqyears[i]) %>%
dplyr::mutate(station=rep(station,dplyr::n())) %>%
dplyr::select(station,.data$Date, dplyr::everything())
df=df %>%
dplyr::bind_rows(df_tmp)
}
df=unique(df)
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_proj_dat.R
\name{get_proj_dat}
\alias{get_proj_dat}
\title{Get project data}
\usage{
get_proj_dat(dir)
}
\arguments{
\item{dir}{A path}
}
\value{
Path and git remote url if available
}
\description{
Get project data
}
\examples{
get_proj_dat(".")
}
|
/man/get_proj_dat.Rd
|
no_license
|
alexwhan/projr
|
R
| false | true | 331 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_proj_dat.R
\name{get_proj_dat}
\alias{get_proj_dat}
\title{Get project data}
\usage{
get_proj_dat(dir)
}
\arguments{
\item{dir}{A path}
}
\value{
Path and git remote url if available
}
\description{
Get project data
}
\examples{
get_proj_dat(".")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keras_helpers.R
\name{predict_keras}
\alias{predict_keras}
\title{Title}
\usage{
predict_keras(
modelNN,
predInput,
maskNA = NULL,
scaleInput = FALSE,
col_means_train,
col_stddevs_train,
batch_size = NULL,
filename = "",
tempdirRaster = NULL,
nCoresRaster = 2
)
}
\arguments{
\item{predInput}{data.frame or raster with colnames or layer names matching the expected input for modelNN}
\item{tempdirRaster}{}
}
\description{
Title
}
|
/man/predict_keras.Rd
|
no_license
|
jmaspons/MLTools
|
R
| false | true | 530 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/keras_helpers.R
\name{predict_keras}
\alias{predict_keras}
\title{Title}
\usage{
predict_keras(
modelNN,
predInput,
maskNA = NULL,
scaleInput = FALSE,
col_means_train,
col_stddevs_train,
batch_size = NULL,
filename = "",
tempdirRaster = NULL,
nCoresRaster = 2
)
}
\arguments{
\item{predInput}{data.frame or raster with colnames or layer names matching the expected input for modelNN}
\item{tempdirRaster}{}
}
\description{
Title
}
|
######################################################################
# Date: 05-04-2020
# Author: Nick Lesniak
# Title: Setup data to be run in machine learning pipeline
######################################################################
######################################################################
# Description:
# This script will read in data
# - Feature data (shared file w/OTUs)
# - Metadata
# It will run the following:
# - code/R/compute_correlation_matrix.R
######################################################################
######################################################################
# Dependencies and Outputs:
# Be in the project directory.
# The outputs are:
# (1) data/process/LEVEL_input_data.csv - CSV with data for machine learning -
# first column is outcome of interest,
# remaining columns are features, one per column.
# (2) data/process/sig_flat_corr_matrix_LEVEL.csv - CSV with correlated features
######################################################################
meta_file <- ##### insert metadata file name #####
feature_file <- ##### insert features file name #####
################### IMPORT LIBRARIES and FUNCTIONS ###################
# The dependinces for this script are consolidated in the first part
deps = c("tidyverse", "caret", "Hmisc");
for (dep in deps){
if (dep %in% installed.packages()[,"Package"] == FALSE){
install.packages(as.character(dep), quiet=TRUE, repos = "http://cran.us.r-project.org", dependencies=TRUE);
}
library(dep, verbose=FALSE, character.only=TRUE)
}
# Load in needed functions and libraries
source('code/R/compute_correlation_matrix.R')
######################################################################
######################## DATA PREPARATION #############################
# ----------------------- Read in data --------------------------------
# Read in metadata
meta <- read_tsv(meta_file)
# Read in OTU table and remove label and numOtus columns
features <- read_tsv(feature_file)
# ---------------------------------------------------------------------
# ----------------- Select samples and features -----------------------
# Filter metadata and select only sample names and outcome columns
# Merge metadata and feature data.
# Then remove the sample name column
data <- meta %>%
filter(###### insert edits here ########
) %>%
select(###### select sample names and outcome column #####
) %>%
inner_join(features, by=c("sample_names")) %>%
select(-sample_names) %>%
drop_na()
# ---------------------------------------------------------------------
save_data <- function(data, level){
# ---------------------- Process model data ---------------------------
# Remove features with near zero variance and scale remaining from 0 to 1
preProcValues <- preProcess(data, method = c("nzv", "range"))
dataTransformed <- predict(preProcValues, data)
# Save data to be used in machine learning pipeline
write_csv(dataTransformed, paste0('data/process/', level, '_input_data.csv'))
# ---------------------------------------------------------------------
# ------------------- Create correlation matrix -----------------------
# Create correlation matrix of machine learning data
# filters correlation >= cor_value and p values < p_value
# default values are cor_value = 1, and p_value = 0.1
compute_correlation_matrix(input_file = paste0('data/process/', level, '_input_data.csv'),
outcome = 'clearance', level = level,
cor_value = 0.8, p_value = 0.05)
# ---------------------------------------------------------------------
}
save_data(data, 'otu')
|
/scripts/ML_pipeline_microbiome/code/R/setup_model_data.R
|
permissive
|
wangdi2014/ml-crkp-infection-manuscript
|
R
| false | false | 3,629 |
r
|
######################################################################
# Date: 05-04-2020
# Author: Nick Lesniak
# Title: Setup data to be run in machine learning pipeline
######################################################################
######################################################################
# Description:
# This script will read in data
# - Feature data (shared file w/OTUs)
# - Metadata
# It will run the following:
# - code/R/compute_correlation_matrix.R
######################################################################
######################################################################
# Dependencies and Outputs:
# Be in the project directory.
# The outputs are:
# (1) data/process/LEVEL_input_data.csv - CSV with data for machine learning -
# first column is outcome of interest,
# remaining columns are features, one per column.
# (2) data/process/sig_flat_corr_matrix_LEVEL.csv - CSV with correlated features
######################################################################
meta_file <- ##### insert metadata file name #####
feature_file <- ##### insert features file name #####
################### IMPORT LIBRARIES and FUNCTIONS ###################
# The dependinces for this script are consolidated in the first part
deps = c("tidyverse", "caret", "Hmisc");
for (dep in deps){
if (dep %in% installed.packages()[,"Package"] == FALSE){
install.packages(as.character(dep), quiet=TRUE, repos = "http://cran.us.r-project.org", dependencies=TRUE);
}
library(dep, verbose=FALSE, character.only=TRUE)
}
# Load in needed functions and libraries
source('code/R/compute_correlation_matrix.R')
######################################################################
######################## DATA PREPARATION #############################
# ----------------------- Read in data --------------------------------
# Read in metadata
meta <- read_tsv(meta_file)
# Read in OTU table and remove label and numOtus columns
features <- read_tsv(feature_file)
# ---------------------------------------------------------------------
# ----------------- Select samples and features -----------------------
# Filter metadata and select only sample names and outcome columns
# Merge metadata and feature data.
# Then remove the sample name column
data <- meta %>%
filter(###### insert edits here ########
) %>%
select(###### select sample names and outcome column #####
) %>%
inner_join(features, by=c("sample_names")) %>%
select(-sample_names) %>%
drop_na()
# ---------------------------------------------------------------------
save_data <- function(data, level){
# ---------------------- Process model data ---------------------------
# Remove features with near zero variance and scale remaining from 0 to 1
preProcValues <- preProcess(data, method = c("nzv", "range"))
dataTransformed <- predict(preProcValues, data)
# Save data to be used in machine learning pipeline
write_csv(dataTransformed, paste0('data/process/', level, '_input_data.csv'))
# ---------------------------------------------------------------------
# ------------------- Create correlation matrix -----------------------
# Create correlation matrix of machine learning data
# filters correlation >= cor_value and p values < p_value
# default values are cor_value = 1, and p_value = 0.1
compute_correlation_matrix(input_file = paste0('data/process/', level, '_input_data.csv'),
outcome = 'clearance', level = level,
cor_value = 0.8, p_value = 0.05)
# ---------------------------------------------------------------------
}
save_data(data, 'otu')
|
####
library(raster)
library(dplyr)
library(plyr)
library(ggplot2)
library(rgdal)
library(maptools)
setwd("/Users/Xin/Research/NRI_urban/")
### county boundry ###
county_boundry <- readOGR("data/cb_2016_us_county_5m/",layer = "cb_2016_us_county_5m")
ia_boundry <- county_boundry[county_boundry$STATEFP==19,]
ia_dat <- ia_boundry@data
ia_dat <- cbind(ID = 1:nrow(ia_dat), ia_dat)
ia_fort <- fortify(ia_boundry, region="NAME")
ia_boundry <- as(ia_boundry,"SpatialPolygons")
ggplot(data=ia_fort, aes(long, lat, group = group)) +
geom_polygon(color = "black", fill = "white") + theme_bw()
### map all data to county level for all years
#datall <- read.csv("Research/survey/data/IA_TS_800m/IA_UrbanArea_TS.csv")
year <- seq(1985,2015,by = 1)
nyear <- length(year)
crsboundry <- crs(county_boundry)
### get index based on 1985
dat85r <- raster("data/IA_TS_800m/IA_UrbanArea_1985.tif")
dat85 <- rasterToPoints(dat85r,spatial = TRUE)
dat85 <- spTransform(dat85,CRSobj = crs(county_boundry))
crsts <- crs(dat85r)
index85 <- over(dat85, ia_boundry) ### find county
## the numbers are the orders in the data not the fips
indexnn <- !is.na(index85) ## not NA index
index85n <- index85[indexnn] # county index
df85 <- as.data.frame(dat85)[indexnn,] ## data without NA
ia_urban_area <- as.data.frame(matrix(0, length(index85n), 3 + nyear))
colnames(ia_urban_area) <- c("index","x","y",paste("A",year,sep=""))
ia_urban_area$index <- index85n
ia_urban_area[,c("x","y","A1985")] <- df85[,c(2,3,1)]
for(j in 2:nyear)
{
namej <- paste("data/IA_TS_800m/IA_UrbanArea_",year[j],".tif",sep="")
datjr <- raster(namej)
datj <- rasterToPoints(datjr,spatial = TRUE)
datj <- spTransform(datj,CRSobj = crsboundry)
dfj <- as.data.frame(datj)
ia_urban_area[,j+3] <- dfj[indexnn,1]
}
### replace county numbers with fips
ia_urban_area <- merge(ia_urban_area, ia_dat[,c("ID","COUNTYFP","GEOID","NAME")],by.x = "index",by.y ="ID")
ia_urban_area$COUNTYFP <- as.numeric(as.character(ia_urban_area$COUNTYFP))
ia_urban_area$GEOID <- as.numeric(as.character(ia_urban_area$GEOID))
ia_urban_area <- arrange(ia_urban_area, COUNTYFP)
write.csv(ia_urban_area, "data/ia_urban_area_800m.csv",row.names = FALSE)
#write.csv(dat, "Research/survey/data/datia_800m.csv",row.names = FALSE)
#write.csv(dat1, "Research/survey/data/datia1_800m.csv",row.names = FALSE)
#datcounty <- ddply(dat, .(county), summarize, area01 = sum(A2001),
# area11 = sum(A2011), numseg = length(county)) ## population
temp <- raster("Research/survey/IA_TS_800m/IA_UrbanArea_2001.tif")
temp <- rasterToPoints(temp)
temp1 <- as.data.frame(temp, xy = TRUE)
#### map crs based on ts #####
dat85r <- raster("data/IA_TS_800m/IA_UrbanArea_1985.tif")
dat85 <- rasterToPoints(dat85r,spatial = TRUE)
crsts <- crs(dat85r)
ia_boundry1 <- spTransform(ia_boundry, crsts)
index85 <- over(dat85, ia_boundry1) ### find county
## the numbers are the orders in the data not the fips
indexnn <- !is.na(index85) ## not NA index
index85n <- index85[indexnn] # county index
df85 <- as.data.frame(dat85)[indexnn,] ## data without NA
ia_urban_area <- as.data.frame(matrix(0, length(index85n), 3 + nyear))
colnames(ia_urban_area) <- c("index","x","y",paste("A",year,sep=""))
ia_urban_area$index <- index85n
ia_urban_area[,c("x","y","A1985")] <- df85[,c(2,3,1)]
for(j in 2:nyear)
{
namej <- paste("data/IA_TS_800m/IA_UrbanArea_",year[j],".tif",sep="")
datjr <- raster(namej)
datj <- rasterToPoints(datjr,spatial = TRUE)
dfj <- as.data.frame(datj)
ia_urban_area[,j+3] <- dfj[indexnn,1]
}
### replace county numbers with fips
ia_urban_area <- merge(ia_urban_area, ia_dat[,c("ID","COUNTYFP","GEOID","NAME")],by.x = "index",by.y ="ID")
ia_urban_area$COUNTYFP <- as.numeric(as.character(ia_urban_area$COUNTYFP))
ia_urban_area$GEOID <- as.numeric(as.character(ia_urban_area$GEOID))
ia_urban_area <- arrange(ia_urban_area, COUNTYFP)
write.csv(ia_urban_area, "data/ia_urban_area_800m_ts.csv",row.names = FALSE)
temp2 <- read.csv("data/ia_urban_area_800m_ts.csv",stringsAsFactors = FALSE)
temp3 <- read.csv("data/ia_urban_area_800m.csv",stringsAsFactors = FALSE)
|
/mapdat.R
|
no_license
|
wangx23/Spgr_bhf_test
|
R
| false | false | 4,150 |
r
|
####
library(raster)
library(dplyr)
library(plyr)
library(ggplot2)
library(rgdal)
library(maptools)
setwd("/Users/Xin/Research/NRI_urban/")
### county boundry ###
county_boundry <- readOGR("data/cb_2016_us_county_5m/",layer = "cb_2016_us_county_5m")
ia_boundry <- county_boundry[county_boundry$STATEFP==19,]
ia_dat <- ia_boundry@data
ia_dat <- cbind(ID = 1:nrow(ia_dat), ia_dat)
ia_fort <- fortify(ia_boundry, region="NAME")
ia_boundry <- as(ia_boundry,"SpatialPolygons")
ggplot(data=ia_fort, aes(long, lat, group = group)) +
geom_polygon(color = "black", fill = "white") + theme_bw()
### map all data to county level for all years
#datall <- read.csv("Research/survey/data/IA_TS_800m/IA_UrbanArea_TS.csv")
year <- seq(1985,2015,by = 1)
nyear <- length(year)
crsboundry <- crs(county_boundry)
### get index based on 1985
dat85r <- raster("data/IA_TS_800m/IA_UrbanArea_1985.tif")
dat85 <- rasterToPoints(dat85r,spatial = TRUE)
dat85 <- spTransform(dat85,CRSobj = crs(county_boundry))
crsts <- crs(dat85r)
index85 <- over(dat85, ia_boundry) ### find county
## the numbers are the orders in the data not the fips
indexnn <- !is.na(index85) ## not NA index
index85n <- index85[indexnn] # county index
df85 <- as.data.frame(dat85)[indexnn,] ## data without NA
ia_urban_area <- as.data.frame(matrix(0, length(index85n), 3 + nyear))
colnames(ia_urban_area) <- c("index","x","y",paste("A",year,sep=""))
ia_urban_area$index <- index85n
ia_urban_area[,c("x","y","A1985")] <- df85[,c(2,3,1)]
for(j in 2:nyear)
{
namej <- paste("data/IA_TS_800m/IA_UrbanArea_",year[j],".tif",sep="")
datjr <- raster(namej)
datj <- rasterToPoints(datjr,spatial = TRUE)
datj <- spTransform(datj,CRSobj = crsboundry)
dfj <- as.data.frame(datj)
ia_urban_area[,j+3] <- dfj[indexnn,1]
}
### replace county numbers with fips
ia_urban_area <- merge(ia_urban_area, ia_dat[,c("ID","COUNTYFP","GEOID","NAME")],by.x = "index",by.y ="ID")
ia_urban_area$COUNTYFP <- as.numeric(as.character(ia_urban_area$COUNTYFP))
ia_urban_area$GEOID <- as.numeric(as.character(ia_urban_area$GEOID))
ia_urban_area <- arrange(ia_urban_area, COUNTYFP)
write.csv(ia_urban_area, "data/ia_urban_area_800m.csv",row.names = FALSE)
#write.csv(dat, "Research/survey/data/datia_800m.csv",row.names = FALSE)
#write.csv(dat1, "Research/survey/data/datia1_800m.csv",row.names = FALSE)
#datcounty <- ddply(dat, .(county), summarize, area01 = sum(A2001),
# area11 = sum(A2011), numseg = length(county)) ## population
temp <- raster("Research/survey/IA_TS_800m/IA_UrbanArea_2001.tif")
temp <- rasterToPoints(temp)
temp1 <- as.data.frame(temp, xy = TRUE)
#### map crs based on ts #####
dat85r <- raster("data/IA_TS_800m/IA_UrbanArea_1985.tif")
dat85 <- rasterToPoints(dat85r,spatial = TRUE)
crsts <- crs(dat85r)
ia_boundry1 <- spTransform(ia_boundry, crsts)
index85 <- over(dat85, ia_boundry1) ### find county
## the numbers are the orders in the data not the fips
indexnn <- !is.na(index85) ## not NA index
index85n <- index85[indexnn] # county index
df85 <- as.data.frame(dat85)[indexnn,] ## data without NA
ia_urban_area <- as.data.frame(matrix(0, length(index85n), 3 + nyear))
colnames(ia_urban_area) <- c("index","x","y",paste("A",year,sep=""))
ia_urban_area$index <- index85n
ia_urban_area[,c("x","y","A1985")] <- df85[,c(2,3,1)]
for(j in 2:nyear)
{
namej <- paste("data/IA_TS_800m/IA_UrbanArea_",year[j],".tif",sep="")
datjr <- raster(namej)
datj <- rasterToPoints(datjr,spatial = TRUE)
dfj <- as.data.frame(datj)
ia_urban_area[,j+3] <- dfj[indexnn,1]
}
### replace county numbers with fips
ia_urban_area <- merge(ia_urban_area, ia_dat[,c("ID","COUNTYFP","GEOID","NAME")],by.x = "index",by.y ="ID")
ia_urban_area$COUNTYFP <- as.numeric(as.character(ia_urban_area$COUNTYFP))
ia_urban_area$GEOID <- as.numeric(as.character(ia_urban_area$GEOID))
ia_urban_area <- arrange(ia_urban_area, COUNTYFP)
write.csv(ia_urban_area, "data/ia_urban_area_800m_ts.csv",row.names = FALSE)
temp2 <- read.csv("data/ia_urban_area_800m_ts.csv",stringsAsFactors = FALSE)
temp3 <- read.csv("data/ia_urban_area_800m.csv",stringsAsFactors = FALSE)
|
# 다변량 회귀 실습
#
# 한 방송국에서 65세 이상 시청자들을 위한 TV 프로그램을 개발하기로 하였다. 이를 위한 기초정보를 획득하기 위해
# 25명의 시청자들을 대상으로 설문조사를 하여 다음의 4가지 변수에 관한 자료를 다음과 같이 수집하였다.
# (data file. 4. 다중회귀분석.sav)
# Q: 동거여부, 연령, 교육기간은 TV 시청시간에 영향을 미치는가?
rm(list=ls())
library(dplyr)
library(haven)
library(psych)
setwd("~/R/Regression/SPSS_다변량회귀분석/01.상반부 자료/실습자료")
df.a <- read_spss("2. 다중회귀분석 실습.sav")
df.a.m <- df.a[,c(3:6)]
df.a.m <- na.omit(df.a.m)
describe(df.a.m)
library(Hmisc)
df.cor <- cor(df.a.m)
round(df.cor, 3)
df.rcorr <- rcorr(as.matrix(df.a.m))
df.rcorr
# 독립변수와 상관관계가 높은 것을 검증한 결과 교육기간과 연령이 -0.501로 다소 높은 상관관계를 보였다.
# 보통 상관관계가 0.8을 넘으면 다중공선성을 의심해 보아야 한다.
# SPSS 회귀분석과 비교하면, 1. 모형요약, 2. ANOVA 3.비표준화 계수에 의한 회귀식 도출
# 더불어 회귀 표준화 잔차의 정규성과 산점도를 검증하면 됨.
df.a.reg <- lm(시청시간 ~ 동거여부 + 연령 + 교육기간,data = df.a)
summary(df.a.reg)
# R : R²의 제곱근으로 상관계수(Correlation coefficient)임.
# R² : 결정계수(coefficient of determination)라고 불림. 종속변수의 분산 중 몇 %가 독립변수에 의해 설명되는가를
# 나타내며, 0과 1사이의 값을 지님. 교육기간, 동거여부, 연령 3개의 변수를 투입한 결과 R² 이 .6256으로 종속변수를
# 62.6%설명하고 있다. adj R² (수정된 R²)은 독립변수의 수와 표본의 크기를 반영한 것으로 .572로 나타났다.
# R²은 독립변수가 증가할수록 커지나, 설명력이 작으면 adj R²는 오히려 줄어든다.
# 오차의 독립성
# 회귀분석에서 종속변수는 독립성의 가정이 존재함
# 종속변수 자체에 상관관계가 존재하면 독립성 가정에 위배되어 회귀분석을
# 이를 확인하기 위해 회귀분석에서는 종속변수 오차항의 자기상관(autocorrelation)을 사용함.
# 자기상관을 측정하는 대표적인 방법: Durbin-Watson (값의 범위: 0~4)
# Durbin-Watson 지수의 해석
# 0에 가까우면 양의 자기상관
# 4에 가까우면 음의 자기상관
# 2에 가까우면 회귀방정식에 예측된 종속변수의 오차항은 자기상관이 없이 서로 독립적.
# 구체적으로 1.8 < Durbin-Watson < 2.2 경우 독립적 자기상관을 갖는다고 볼 수 있다.
library(car)
durbinWatsonTest(df.a.reg)
# 위 식의 결과값에서 D-W Statistic 이 더빈왓슨 값임. (1.310978). 더빈왓슨값이 2에 가까우므로 독립적이라 할 수 있음.
# df.a.aov <- anova(df.a.reg)
anova(df.a.reg)
# 분산분석은 회귀식 자체가 유의한지 판단함. F=367.64,
# p = 0.001로 이 회귀분석은 통계적으로 유의하다고 판단할 수 있음.
# R² = 회귀계수의 합 (16.084 + 14.764 + 5.310) / 회귀계수의 합 + 회귀잔차 (= 16.084 + 14.764 + 5.310 + 116.445) = 0.2369416
# 브랜드(brand)는 p값이 0.05보다 크므로 계산에서 제외
reg.res <- 8.6400 + 6.0468 + 5.2459 # 회귀계수. 회귀식에 의해 설명되는 분산 (SSR)
reg.sumsq <- 11.9273 # 회귀잔차. 회귀식에 의해 설명되지 않는 분산(SSE)
reg.res / (reg.res+ reg.sumsq) # R²
# F = 6.644233 / 0.5684 = 11.68936
((8.6400 + 6.0468 + 5.2459)/3)/0.5684 # F값
summary(df.a.reg) # 비표준화 계수에 의한 회귀분석 결과
# 회귀식에서의 표준화 계수와 비표준화 계수
# 회귀방정식을 구하기 위해서는 표준화 계수와 비표준화 계수를 사용할 수 있다.
# 표준화 계수는 다중회귀 분석에서 사용된다. 예를 들어 측정 데이터가 cm 과 kg 와 같은 다른 단위의 데이터가 사용될 경우
# 각 단위를 통일시킬 필요가 있다. 그러므로 단위를 통일시킨 표준화 계수가 필요하다.
df.a.beta <- lm.beta(df.a.reg)
summary(df.a.beta)
# 분석결과 비표준화계수(B)에 의해 다음과 같은 회귀식이 도출
# Y(시청시간) = 1.49526 - 1.17573X(동거여부) + 0.03876X(연령) - 0.15228X(교육기간)
# 동거여부, 연령, 교육기간에 대한 t값은 각각 -3.726, 1.214, -3.039 이고 p값은 연령을 제외하고 둘 다 0.05 보다 작으므로
# 귀무가설은 기각되고 연구가설은 지지된다. 따라서 사람의 동거여부와 교육기간은 시청시간제에 부의 영향을 미친다고
# 볼 수 있다.
# 독립변수 영향력의 상대적 크기는 표준화 계수를 활용한다. (독립변수들의 단위가 다르기 때문에 표준화를 시켜줌)
# 표준화 계수의 절대값이 큰 순서대로 영향력을 미친다. (동거여부 > 교육기간 > 연령 순)
# 동거여부는 다른 두 변수(연령, 교육기간)가 회귀식에 포함되어 있는 경우 유의미하다 (t=-3.726, p=.001).
# 동거여부는 시청시간에 부적 영향을 미친다.
# 연령은 다른 두 변수가 회귀식에 포함되어 있는 경우 유의미하지 않다 (t=1.214, p=.238).
# 연령은 시청시간에 영향을 미치지 못한다.
# 교육기간은 다른 두 변수가 회귀식에 포함되어 있는 경우 유의미하다 (t=-3.039, p=.006).
# 교육기간은 시청시간에 부적 영향을 미친다
# 다중공선성(multicol) 진단
# 고유값 (eigenvalue) : vif() 사용
# 상태지수 (condition number) : kappa() 사용
vif(df.a.reg) # 고유값
library(olsrr) # vif() 함수가 단순히 다중공선성에 대한 고유값만 나타내 주는데 비해서 olsrr 패키지 내의
# ols_coll_diag() 함수는 SPSS 에서 도출하는 모든 다중공선성 검사에서 나오는 지수들을 모두 표현해 줌.
ols_coll_diag(df.a.reg)
# Tolerance 는 공차
# Condition Index 는 Condition Number 와 동일하게 상태지수
# intercept 는 상수
# 이후에 feature, comfort, usefulness 에서 나오는 모든 수치는 분산비율을 나타냄
# 03. 회귀분석 가정 검정
# 등분산성: Scale-Location, ncvTest
# 정규성: Nomal Q-Q, shapiro.test
# 선형성: Residuals vs Fitted,
# 독립성: durbinWatsonTest
# 이상치검정 : Residuals vs Leverage(cook's distance) 4/n-k-1
# 그림으로 가정 검정
opar <- par(no.readonly = TRUE)
par(mfrow=c(2,2))
plot(df.a.reg)
par(opar) #원상태로 돌림
# Residuals vs Fitted : 점들이 좌우로 등분산을 이루고 있으면 좋은 형태. 만일 잔차에 일정한 패턴이 있다면 모델을 수정해야 함.
# (잔차의 등분산성 가정 충족)
# Normal Q-Q : 잔차가 대각선상에 직선형태에 수렴하면 좋은 형태 (정규성 가정 충족)
# Scales-Location : 기울기가 0이면 이상적
# Residuals vs Leverage : 왼쪽 가운데 몰려 있으면 이상적.
# 단순회귀에서는 통상 잔차의 정규성과 등분산성 가정만 확인하면 됨.
# p-p (probability plot graph) 도표 그리기
# https://stat.ethz.ch/pipermail/r-help/2007-September/141873.html
# 현재에는 PP 그래프보다 QQ그래프를 더 많이 사용한다. QQ그래프가 각 포인트들에 대한 더 많은 흥미있는 점들을 제공하기 때문.
tmp1 <- resid(df.a.reg)
tmp2 <- pnorm( tmp1, 0, summary(df.a.reg)$sigma )
par(mfrow=c(2,1))
qqnorm(tmp1)
qqline(tmp1)
plot( ppoints(length(tmp1)), sort(tmp2), xlab='Theoretical Percentiles(관측누적확률)',
ylab='Sample Percentiles(기대 누적 확률)', main = "PP Plot(회귀 표준화 잔차의 정규 PP 도표)")
abline(0,1)
# 정규분포는 대각선 형태로 그려지고, 점들의 분포가 대각선에 가까울 수록 정규성 가정을 충족 시킨다.
# 따라서 오차의 정규성 가정을 충족시킨다고 볼 수 있다.
# 회귀 표준화 잔차는 회귀 표준화 예측값과 회귀 표준화 잔차를 비교함으로서 확인할 수 있음.
# 위 Residuals vs Fitted 도표에서 보면 잔차가 0을 중심으로 대체로 무작위로 분포되어 있으며 특정 패턴이 나타나지 않고 있다.
# 따라서 오차의 독립성 가정과 등분산 가정을 충족시키고 있다.
# 잔차, 표준화 잔차를 구하기 위해서는 아래 함수를 사용할 수 있다.
# 잔차 구하기
resid(df.a.reg)
#표준화 잔차 구하기
rstudent(df.a.reg)
#표준화 잔차 표 구하기
plot(rstudent(df.a.reg), main = "산점도")
# 오차의 독립성
# 회귀분석에서 종속변수는 독립성의 가정이 존재함
# 종속변수 자체에 상관관계가 존재하면 독립성 가정에 위배되어 회귀분석을
# 이를 확인하기 위해 회귀분석에서는 종속변수 오차항의 자기상관(autocorrelation)을 사용함.
# 자기상관을 측정하는 대표적인 방법: Durbin-Watson (값의 범위: 0~4)
# Durbin-Watson 지수의 해석
# 0에 가까우면 양의 자기상관
# 4에 가까우면 음의 자기상관
# 2에 가까우면 회귀방정식에 예측된 종속변수의 오차항은 자기상관이 없이 서로 독립적.
# 구체적으로 1.8 < Durbin-Watson < 2.2 경우 독립적 자기상관을 갖는다고 볼 수 있다.
durbinWatsonTest(df.a.reg)
# 위 식의 결과값에서 D-W Statistic 이 더빈왓슨 값임. (1.310978).
# 잔차의 정규분포 검정
shapiro.test(df.a.reg$residuals)
# Shapiro Wilks 검정의 유의 확률 (p-value) 이 0.05보다 커서 (0.1074) 영가설을 기각하므로, 잔차는 정규성을 가진다고
# 가정할 수 있음.
# 수치로 가정 검정
# 잔차의 등분산성 검정
ncvTest(df.a.reg)
#이상치 검정, sd, hat, d 통합검정
influencePlot(df.a.reg, id.method="identify")
|
/02_다변량회귀실습.R
|
no_license
|
Joshuariver/regression
|
R
| false | false | 10,196 |
r
|
# 다변량 회귀 실습
#
# 한 방송국에서 65세 이상 시청자들을 위한 TV 프로그램을 개발하기로 하였다. 이를 위한 기초정보를 획득하기 위해
# 25명의 시청자들을 대상으로 설문조사를 하여 다음의 4가지 변수에 관한 자료를 다음과 같이 수집하였다.
# (data file. 4. 다중회귀분석.sav)
# Q: 동거여부, 연령, 교육기간은 TV 시청시간에 영향을 미치는가?
rm(list=ls())
library(dplyr)
library(haven)
library(psych)
setwd("~/R/Regression/SPSS_다변량회귀분석/01.상반부 자료/실습자료")
df.a <- read_spss("2. 다중회귀분석 실습.sav")
df.a.m <- df.a[,c(3:6)]
df.a.m <- na.omit(df.a.m)
describe(df.a.m)
library(Hmisc)
df.cor <- cor(df.a.m)
round(df.cor, 3)
df.rcorr <- rcorr(as.matrix(df.a.m))
df.rcorr
# 독립변수와 상관관계가 높은 것을 검증한 결과 교육기간과 연령이 -0.501로 다소 높은 상관관계를 보였다.
# 보통 상관관계가 0.8을 넘으면 다중공선성을 의심해 보아야 한다.
# SPSS 회귀분석과 비교하면, 1. 모형요약, 2. ANOVA 3.비표준화 계수에 의한 회귀식 도출
# 더불어 회귀 표준화 잔차의 정규성과 산점도를 검증하면 됨.
df.a.reg <- lm(시청시간 ~ 동거여부 + 연령 + 교육기간,data = df.a)
summary(df.a.reg)
# R : R²의 제곱근으로 상관계수(Correlation coefficient)임.
# R² : 결정계수(coefficient of determination)라고 불림. 종속변수의 분산 중 몇 %가 독립변수에 의해 설명되는가를
# 나타내며, 0과 1사이의 값을 지님. 교육기간, 동거여부, 연령 3개의 변수를 투입한 결과 R² 이 .6256으로 종속변수를
# 62.6%설명하고 있다. adj R² (수정된 R²)은 독립변수의 수와 표본의 크기를 반영한 것으로 .572로 나타났다.
# R²은 독립변수가 증가할수록 커지나, 설명력이 작으면 adj R²는 오히려 줄어든다.
# 오차의 독립성
# 회귀분석에서 종속변수는 독립성의 가정이 존재함
# 종속변수 자체에 상관관계가 존재하면 독립성 가정에 위배되어 회귀분석을
# 이를 확인하기 위해 회귀분석에서는 종속변수 오차항의 자기상관(autocorrelation)을 사용함.
# 자기상관을 측정하는 대표적인 방법: Durbin-Watson (값의 범위: 0~4)
# Durbin-Watson 지수의 해석
# 0에 가까우면 양의 자기상관
# 4에 가까우면 음의 자기상관
# 2에 가까우면 회귀방정식에 예측된 종속변수의 오차항은 자기상관이 없이 서로 독립적.
# 구체적으로 1.8 < Durbin-Watson < 2.2 경우 독립적 자기상관을 갖는다고 볼 수 있다.
library(car)
durbinWatsonTest(df.a.reg)
# 위 식의 결과값에서 D-W Statistic 이 더빈왓슨 값임. (1.310978). 더빈왓슨값이 2에 가까우므로 독립적이라 할 수 있음.
# df.a.aov <- anova(df.a.reg)
anova(df.a.reg)
# 분산분석은 회귀식 자체가 유의한지 판단함. F=367.64,
# p = 0.001로 이 회귀분석은 통계적으로 유의하다고 판단할 수 있음.
# R² = 회귀계수의 합 (16.084 + 14.764 + 5.310) / 회귀계수의 합 + 회귀잔차 (= 16.084 + 14.764 + 5.310 + 116.445) = 0.2369416
# 브랜드(brand)는 p값이 0.05보다 크므로 계산에서 제외
reg.res <- 8.6400 + 6.0468 + 5.2459 # 회귀계수. 회귀식에 의해 설명되는 분산 (SSR)
reg.sumsq <- 11.9273 # 회귀잔차. 회귀식에 의해 설명되지 않는 분산(SSE)
reg.res / (reg.res+ reg.sumsq) # R²
# F = 6.644233 / 0.5684 = 11.68936
((8.6400 + 6.0468 + 5.2459)/3)/0.5684 # F값
summary(df.a.reg) # 비표준화 계수에 의한 회귀분석 결과
# 회귀식에서의 표준화 계수와 비표준화 계수
# 회귀방정식을 구하기 위해서는 표준화 계수와 비표준화 계수를 사용할 수 있다.
# 표준화 계수는 다중회귀 분석에서 사용된다. 예를 들어 측정 데이터가 cm 과 kg 와 같은 다른 단위의 데이터가 사용될 경우
# 각 단위를 통일시킬 필요가 있다. 그러므로 단위를 통일시킨 표준화 계수가 필요하다.
df.a.beta <- lm.beta(df.a.reg)
summary(df.a.beta)
# 분석결과 비표준화계수(B)에 의해 다음과 같은 회귀식이 도출
# Y(시청시간) = 1.49526 - 1.17573X(동거여부) + 0.03876X(연령) - 0.15228X(교육기간)
# 동거여부, 연령, 교육기간에 대한 t값은 각각 -3.726, 1.214, -3.039 이고 p값은 연령을 제외하고 둘 다 0.05 보다 작으므로
# 귀무가설은 기각되고 연구가설은 지지된다. 따라서 사람의 동거여부와 교육기간은 시청시간제에 부의 영향을 미친다고
# 볼 수 있다.
# 독립변수 영향력의 상대적 크기는 표준화 계수를 활용한다. (독립변수들의 단위가 다르기 때문에 표준화를 시켜줌)
# 표준화 계수의 절대값이 큰 순서대로 영향력을 미친다. (동거여부 > 교육기간 > 연령 순)
# 동거여부는 다른 두 변수(연령, 교육기간)가 회귀식에 포함되어 있는 경우 유의미하다 (t=-3.726, p=.001).
# 동거여부는 시청시간에 부적 영향을 미친다.
# 연령은 다른 두 변수가 회귀식에 포함되어 있는 경우 유의미하지 않다 (t=1.214, p=.238).
# 연령은 시청시간에 영향을 미치지 못한다.
# 교육기간은 다른 두 변수가 회귀식에 포함되어 있는 경우 유의미하다 (t=-3.039, p=.006).
# 교육기간은 시청시간에 부적 영향을 미친다
# 다중공선성(multicol) 진단
# 고유값 (eigenvalue) : vif() 사용
# 상태지수 (condition number) : kappa() 사용
vif(df.a.reg) # 고유값
library(olsrr) # vif() 함수가 단순히 다중공선성에 대한 고유값만 나타내 주는데 비해서 olsrr 패키지 내의
# ols_coll_diag() 함수는 SPSS 에서 도출하는 모든 다중공선성 검사에서 나오는 지수들을 모두 표현해 줌.
ols_coll_diag(df.a.reg)
# Tolerance 는 공차
# Condition Index 는 Condition Number 와 동일하게 상태지수
# intercept 는 상수
# 이후에 feature, comfort, usefulness 에서 나오는 모든 수치는 분산비율을 나타냄
# 03. 회귀분석 가정 검정
# 등분산성: Scale-Location, ncvTest
# 정규성: Nomal Q-Q, shapiro.test
# 선형성: Residuals vs Fitted,
# 독립성: durbinWatsonTest
# 이상치검정 : Residuals vs Leverage(cook's distance) 4/n-k-1
# 그림으로 가정 검정
opar <- par(no.readonly = TRUE)
par(mfrow=c(2,2))
plot(df.a.reg)
par(opar) #원상태로 돌림
# Residuals vs Fitted : 점들이 좌우로 등분산을 이루고 있으면 좋은 형태. 만일 잔차에 일정한 패턴이 있다면 모델을 수정해야 함.
# (잔차의 등분산성 가정 충족)
# Normal Q-Q : 잔차가 대각선상에 직선형태에 수렴하면 좋은 형태 (정규성 가정 충족)
# Scales-Location : 기울기가 0이면 이상적
# Residuals vs Leverage : 왼쪽 가운데 몰려 있으면 이상적.
# 단순회귀에서는 통상 잔차의 정규성과 등분산성 가정만 확인하면 됨.
# p-p (probability plot graph) 도표 그리기
# https://stat.ethz.ch/pipermail/r-help/2007-September/141873.html
# 현재에는 PP 그래프보다 QQ그래프를 더 많이 사용한다. QQ그래프가 각 포인트들에 대한 더 많은 흥미있는 점들을 제공하기 때문.
tmp1 <- resid(df.a.reg)
tmp2 <- pnorm( tmp1, 0, summary(df.a.reg)$sigma )
par(mfrow=c(2,1))
qqnorm(tmp1)
qqline(tmp1)
plot( ppoints(length(tmp1)), sort(tmp2), xlab='Theoretical Percentiles(관측누적확률)',
ylab='Sample Percentiles(기대 누적 확률)', main = "PP Plot(회귀 표준화 잔차의 정규 PP 도표)")
abline(0,1)
# 정규분포는 대각선 형태로 그려지고, 점들의 분포가 대각선에 가까울 수록 정규성 가정을 충족 시킨다.
# 따라서 오차의 정규성 가정을 충족시킨다고 볼 수 있다.
# 회귀 표준화 잔차는 회귀 표준화 예측값과 회귀 표준화 잔차를 비교함으로서 확인할 수 있음.
# 위 Residuals vs Fitted 도표에서 보면 잔차가 0을 중심으로 대체로 무작위로 분포되어 있으며 특정 패턴이 나타나지 않고 있다.
# 따라서 오차의 독립성 가정과 등분산 가정을 충족시키고 있다.
# 잔차, 표준화 잔차를 구하기 위해서는 아래 함수를 사용할 수 있다.
# 잔차 구하기
resid(df.a.reg)
#표준화 잔차 구하기
rstudent(df.a.reg)
#표준화 잔차 표 구하기
plot(rstudent(df.a.reg), main = "산점도")
# 오차의 독립성
# 회귀분석에서 종속변수는 독립성의 가정이 존재함
# 종속변수 자체에 상관관계가 존재하면 독립성 가정에 위배되어 회귀분석을
# 이를 확인하기 위해 회귀분석에서는 종속변수 오차항의 자기상관(autocorrelation)을 사용함.
# 자기상관을 측정하는 대표적인 방법: Durbin-Watson (값의 범위: 0~4)
# Durbin-Watson 지수의 해석
# 0에 가까우면 양의 자기상관
# 4에 가까우면 음의 자기상관
# 2에 가까우면 회귀방정식에 예측된 종속변수의 오차항은 자기상관이 없이 서로 독립적.
# 구체적으로 1.8 < Durbin-Watson < 2.2 경우 독립적 자기상관을 갖는다고 볼 수 있다.
durbinWatsonTest(df.a.reg)
# 위 식의 결과값에서 D-W Statistic 이 더빈왓슨 값임. (1.310978).
# 잔차의 정규분포 검정
shapiro.test(df.a.reg$residuals)
# Shapiro Wilks 검정의 유의 확률 (p-value) 이 0.05보다 커서 (0.1074) 영가설을 기각하므로, 잔차는 정규성을 가진다고
# 가정할 수 있음.
# 수치로 가정 검정
# 잔차의 등분산성 검정
ncvTest(df.a.reg)
#이상치 검정, sd, hat, d 통합검정
influencePlot(df.a.reg, id.method="identify")
|
#' Scrape data by entry_id
#'
#' @param entry_id Integer. The entry ID that will be scraped.
#' @param user_agent Character. Used to set the user header string in the cURL request. As default, a generic one provided as per https://stackoverflow.com/a/31597823; however, users can provide a custom user agent.
#' @param export_csv Logical. If **TRUE** exports the tibble to a csv file.
#' @param sleep_time Numeric. A value to set Sys.sleep. Useful to not overwhelm the servers when scraping at scale. Defaults to 0.05.
#'
#' @return Always returns a tibble with same columns. If cURL request returns an error (such as if entry is deleted or servers are temporarily unavailable), returns a tibble with missing values filled with NA.
#'
#' @export
#'
#' @examples scrape_entry(1)
#' @examples scrape_entry(1, export_csv=TRUE)
eksi_scrape_entry <- function(entry_id, user_agent=curl_user_agent, export_csv=FALSE, sleep_time=0.05) {
Sys.sleep(sleep_time) # sleep briefly to not overwhelm servers of the website when scraping at scale
main_url <- paste0("https://eksisozluk.com/entry/",entry_id)
query <- try(xml2::read_html(curl::curl(main_url, handle = curl::new_handle("useragent"= curl_user_agent)), encoding = 'UTF-8'),silent = F)
if ("try-error" %in% class(query)) {
print(glue::glue("No data returned for Entry ID: ", entry_id))
entry <- tibble::tibble(
id= as.character(entry_id),
text=NA,
date_time= NA,
author_name = NA,
author_id = NA,
favourite_count =NA,
title_text=NA,
title_id=NA,
title_slug=NA,
error_message=query[1])
} else {
get_column <- function(x) {query %>%
rvest::html_node(xpath = x) %>%
rvest::html_text(trim = T)}
print(glue::glue("Data returned for Entry ID: ", entry_id))
entry <- tibble::tibble(
id = get_column('//*[@id="entry-item-list"]/li//@data-id'),
text= get_column('//*[@id="entry-item-list"]/li/div[1]'),
date_time= get_column('//a[@class="entry-date permalink"]//text()'),
author_name = get_column('//*[@id="entry-item-list"]/li//@data-author'),
author_id = get_column('//*[@id="entry-item-list"]/li//@data-author-id'),
favourite_count = get_column('//*[@id="entry-item-list"]/li//@data-favorite-count'),
title_text=get_column('//h1[@id="title"]'),
title_id= get_column('//h1[@id="title"]//@data-id'),
title_slug= get_column('//h1[@id="title"]//@data-slug'),
error_message=NA)
}
if (export_csv==TRUE) {
readr::write_csv(entry, path = glue::glue(getwd(),"/eksi_entry_no_",entry_id , ".csv"))
} else {
return(entry)
}
}
|
/R/scrape_entry.R
|
no_license
|
sefabey/eksiR
|
R
| false | false | 2,637 |
r
|
#' Scrape data by entry_id
#'
#' @param entry_id Integer. The entry ID that will be scraped.
#' @param user_agent Character. Used to set the user header string in the cURL request. As default, a generic one provided as per https://stackoverflow.com/a/31597823; however, users can provide a custom user agent.
#' @param export_csv Logical. If **TRUE** exports the tibble to a csv file.
#' @param sleep_time Numeric. A value to set Sys.sleep. Useful to not overwhelm the servers when scraping at scale. Defaults to 0.05.
#'
#' @return Always returns a tibble with same columns. If cURL request returns an error (such as if entry is deleted or servers are temporarily unavailable), returns a tibble with missing values filled with NA.
#'
#' @export
#'
#' @examples scrape_entry(1)
#' @examples scrape_entry(1, export_csv=TRUE)
eksi_scrape_entry <- function(entry_id, user_agent=curl_user_agent, export_csv=FALSE, sleep_time=0.05) {
Sys.sleep(sleep_time) # sleep briefly to not overwhelm servers of the website when scraping at scale
main_url <- paste0("https://eksisozluk.com/entry/",entry_id)
query <- try(xml2::read_html(curl::curl(main_url, handle = curl::new_handle("useragent"= curl_user_agent)), encoding = 'UTF-8'),silent = F)
if ("try-error" %in% class(query)) {
print(glue::glue("No data returned for Entry ID: ", entry_id))
entry <- tibble::tibble(
id= as.character(entry_id),
text=NA,
date_time= NA,
author_name = NA,
author_id = NA,
favourite_count =NA,
title_text=NA,
title_id=NA,
title_slug=NA,
error_message=query[1])
} else {
get_column <- function(x) {query %>%
rvest::html_node(xpath = x) %>%
rvest::html_text(trim = T)}
print(glue::glue("Data returned for Entry ID: ", entry_id))
entry <- tibble::tibble(
id = get_column('//*[@id="entry-item-list"]/li//@data-id'),
text= get_column('//*[@id="entry-item-list"]/li/div[1]'),
date_time= get_column('//a[@class="entry-date permalink"]//text()'),
author_name = get_column('//*[@id="entry-item-list"]/li//@data-author'),
author_id = get_column('//*[@id="entry-item-list"]/li//@data-author-id'),
favourite_count = get_column('//*[@id="entry-item-list"]/li//@data-favorite-count'),
title_text=get_column('//h1[@id="title"]'),
title_id= get_column('//h1[@id="title"]//@data-id'),
title_slug= get_column('//h1[@id="title"]//@data-slug'),
error_message=NA)
}
if (export_csv==TRUE) {
readr::write_csv(entry, path = glue::glue(getwd(),"/eksi_entry_no_",entry_id , ".csv"))
} else {
return(entry)
}
}
|
% Created : 2002-10-05
% Modified: $Date: 2006/12/02 05:10:17 $
% Revision: $Revision: 1.4 $
% RCS-ID: $Id: fitted.cpolr.Rd,v 1.4 2006/12/02 05:10:17 olau Exp $
\name{fitted.anchors.cpolr}
\alias{fitted.anchors.cpolr}
\title{Conditional and unconditional prediction for censored ordered probit}
\description{
Conditional and unconditional prediction for censored ordered
probit. Unconditional prediction returns the fitted values (predicted
probabilities) from the \code{\link{cpolr}} object. Conditional prediction
takes the observed range of the diff-corrected self-response output from
\code{\link{anchors}} and renormalizes the predicted
probabilities for each observation.}
\usage{
\method{fitted}{anchors.cpolr}(object, average = FALSE, unconditional = FALSE, ...)
}
\arguments{
\item{object}{anchors.cpolr object}
\item{average}{a logical value. See \code{values} below for more details.}
\item{unconditional}{Set to TRUE if you submit an
anchors.object AND want the unconditional probabilities returned.
One case that you would submit a anchors.rank object is
if you did subsetting for the anchors object but not for the cpolr
object, and want the intersection of the two objects used for the
unconditional probabilities.}
\item{\dots}{required for S3, but any other options will be ignored.}
}
\value{If \code{average = FALSE}, a matrix of predicted probabilities
with rows corresponding to observations, and columns corresponding to
categories.
If \code{average = TRUE}, the matrix of predicted probabilities
(conditional or unconditional) is summarized to a vector (summed by categories,
then renormalized to sum to 1).
If \code{anchors} object has been specified, then each observation is
renormalized to fall into the range of the diff-corrected
self-response for that observation. If there are no ties for a given
observation, then that observation is a
vector consisting of (k-1) zeros and 1 one. If there are ties, then
the predicted probabilities for that observation are renormalized to
fall within the diff-corrected range.
If \code{anchors} object is omitted, identical to the matrix of predicted
probabilities from the \code{\link{cpolr}} output.
}
\note{
Related materials and worked examples are available at
http://wand.stanford.edu/anchors/
}
\author{Jonathan Wand \url{http://wand.stanford.edu}}
\references{
Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied
Statistics with S.} 4th edition. Springer.
Wand, Jonathan; Gary King; and Olivia Lau. (2007) ``Anchors: Software for
Anchoring Vignettes''. \emph{Journal of Statistical Software}. Forthcoming.
copy at http://wand.stanford.edu/research/anchors-jss.pdf
Wand, Jonathan and Gary King. (2007)
Anchoring Vignetttes in R: A (different kind of) Vignette
copy at http://wand.stanford.edu/anchors/doc/anchors.pdf
Gary King and Jonathan Wand. "Comparing Incomparable Survey
Responses: New Tools for Anchoring Vignettes," Political Analysis, 15,
1 (Winter, 2007): Pp. 46-66,
copy at http://gking.harvard.edu/files/abs/c-abs.shtml.
}
\seealso{\code{\link{anchors}}, \code{\link{cpolr}}}
\examples{
## see examples in anchors
}
\keyword{models}
|
/man/anchors.fitted.cpolr.Rd
|
no_license
|
cran/anchors
|
R
| false | false | 3,262 |
rd
|
% Created : 2002-10-05
% Modified: $Date: 2006/12/02 05:10:17 $
% Revision: $Revision: 1.4 $
% RCS-ID: $Id: fitted.cpolr.Rd,v 1.4 2006/12/02 05:10:17 olau Exp $
\name{fitted.anchors.cpolr}
\alias{fitted.anchors.cpolr}
\title{Conditional and unconditional prediction for censored ordered probit}
\description{
Conditional and unconditional prediction for censored ordered
probit. Unconditional prediction returns the fitted values (predicted
probabilities) from the \code{\link{cpolr}} object. Conditional prediction
takes the observed range of the diff-corrected self-response output from
\code{\link{anchors}} and renormalizes the predicted
probabilities for each observation.}
\usage{
\method{fitted}{anchors.cpolr}(object, average = FALSE, unconditional = FALSE, ...)
}
\arguments{
\item{object}{anchors.cpolr object}
\item{average}{a logical value. See \code{values} below for more details.}
\item{unconditional}{Set to TRUE if you submit an
anchors.object AND want the unconditional probabilities returned.
One case that you would submit a anchors.rank object is
if you did subsetting for the anchors object but not for the cpolr
object, and want the intersection of the two objects used for the
unconditional probabilities.}
\item{\dots}{required for S3, but any other options will be ignored.}
}
\value{If \code{average = FALSE}, a matrix of predicted probabilities
with rows corresponding to observations, and columns corresponding to
categories.
If \code{average = TRUE}, the matrix of predicted probabilities
(conditional or unconditional) is summarized to a vector (summed by categories,
then renormalized to sum to 1).
If \code{anchors} object has been specified, then each observation is
renormalized to fall into the range of the diff-corrected
self-response for that observation. If there are no ties for a given
observation, then that observation is a
vector consisting of (k-1) zeros and 1 one. If there are ties, then
the predicted probabilities for that observation are renormalized to
fall within the diff-corrected range.
If \code{anchors} object is omitted, identical to the matrix of predicted
probabilities from the \code{\link{cpolr}} output.
}
\note{
Related materials and worked examples are available at
http://wand.stanford.edu/anchors/
}
\author{Jonathan Wand \url{http://wand.stanford.edu}}
\references{
Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied
Statistics with S.} 4th edition. Springer.
Wand, Jonathan; Gary King; and Olivia Lau. (2007) ``Anchors: Software for
Anchoring Vignettes''. \emph{Journal of Statistical Software}. Forthcoming.
copy at http://wand.stanford.edu/research/anchors-jss.pdf
Wand, Jonathan and Gary King. (2007)
Anchoring Vignetttes in R: A (different kind of) Vignette
copy at http://wand.stanford.edu/anchors/doc/anchors.pdf
Gary King and Jonathan Wand. "Comparing Incomparable Survey
Responses: New Tools for Anchoring Vignettes," Political Analysis, 15,
1 (Winter, 2007): Pp. 46-66,
copy at http://gking.harvard.edu/files/abs/c-abs.shtml.
}
\seealso{\code{\link{anchors}}, \code{\link{cpolr}}}
\examples{
## see examples in anchors
}
\keyword{models}
|
require(truncdist)
require(ggplot2)
require(ggpubr)
require(triangle)
#clear environment
rm(list = ls())
iter<-5000
timestep<-0.001
duration<-20
set.seed(30) #set after checking variabilty from one to another
#with 5,000 iterations and 0.001 timestep
source('simulation_code_dilution_v2.R')
#----------------MODEL A--------------------------------------------
sim.function(type="primary",model="A",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
hands<-rep(NA,iter)
hands.mean<-rep(NA,lengthsim)
hands.sd<-rep(NA,lengthsim)
fomites<-rep(NA,iter)
fomites.mean<-rep(NA,lengthsim)
fomites.sd<-rep(NA,lengthsim)
doseA<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][4,i]
hands[j]<-matrix.list[[j]][2,i]
fomites[j]<-matrix.list[[j]][1,i]
if(i==lengthsim){
doseA[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
hands.mean[i]<-mean(hands)
hands.sd[i]<-sd(hands)
fomites.mean[i]<-mean(fomites)
fomites.sd[i]<-sd(fomites)
}
means<-c(mucous.mean,hands.mean,fomites.mean)
sd<-c(mucous.sd,hands.sd,fomites.sd)
state<-c(rep("mucous membranes",lengthsim),rep("hands",lengthsim),
rep("fomites",lengthsim))
time<-rep(0:(lengthsim-1),3)
frame.model.A<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model A")
#--------------- MODEL B---------------------------------------------
sim.function(type="primary",model="B",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
fingertip<-rep(NA,iter)
fingertip.mean<-rep(NA,lengthsim)
fingertip.sd<-rep(NA,lengthsim)
nonfingertip<-rep(NA,iter)
nonfingertip.mean<-rep(NA,lengthsim)
nonfingertip.sd<-rep(NA,lengthsim)
fomites<-rep(NA,iter)
fomites.mean<-rep(NA,lengthsim)
fomites.sd<-rep(NA,lengthsim)
doseB<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][5,i]
fingertip[j]<-matrix.list[[j]][3,i]
nonfingertip[j]<-matrix.list[[j]][2,i]
fomites[j]<-matrix.list[[j]][1,i]
if(i==lengthsim){
doseB[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
fingertip.mean[i]<-mean(fingertip)
fingertip.sd[i]<-sd(fingertip)
nonfingertip.mean[i]<-mean(nonfingertip)
nonfingertip.sd[i]<-sd(nonfingertip)
fomites.mean[i]<-mean(fomites)
fomites.sd[i]<-sd(fomites)
}
means<-c(mucous.mean,fingertip.mean,nonfingertip.mean,fomites.mean)
sd<-c(mucous.sd,fingertip.sd,nonfingertip.sd,fomites.sd)
state<-c(rep("mucous membranes",lengthsim),rep("fingertip hand area",lengthsim),
rep("non-fingertip hand area",lengthsim),rep("fomites",lengthsim))
time<-rep(0:(lengthsim-1),4)
frame.model.B<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model B")
#----------------MODEL C---------------------------------------
sim.function(type="primary",model="C",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
hands<-rep(NA,iter)
hands.mean<-rep(NA,lengthsim)
hands.sd<-rep(NA,lengthsim)
smallfomite<-rep(NA,iter)
smallfomite.mean<-rep(NA,lengthsim)
smallfomite.sd<-rep(NA,lengthsim)
largefomite<-rep(NA,iter)
largefomite.mean<-rep(NA,lengthsim)
largefomite.sd<-rep(NA,lengthsim)
doseC<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][5,i]
hands[j]<-matrix.list[[j]][3,i]
smallfomite[j]<-matrix.list[[j]][1,i]
largefomite<-matrix.list[[j]][2,i]
if(i==lengthsim){
doseC[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
hands.mean[i]<-mean(hands)
hands.sd[i]<-sd(hands)
smallfomite.mean[i]<-mean(smallfomite)
smallfomite.sd[i]<-sd(smallfomite)
largefomite.mean[i]<-mean(largefomite)
largefomite.sd[i]<-sd(largefomite)
}
largefomite.conc<-rep(NA,iter)
smallfomite.conc<-rep(NA,iter)
mucous.max<-rep(NA,iter)
for(j in 1:iter){
mucous.max[j]<-max(matrix.list[[j]][5,])
smallfomite.conc[j]<-matrix.list[[j]][1,1]
largefomite.conc[j]<-matrix.list[[j]][2,1]
}
means<-c(mucous.mean,hands.mean,
smallfomite.mean,largefomite.mean)
sd<-c(mucous.sd,hands.sd,
smallfomite.sd,largefomite.sd)
state<-c(rep("mucous membranes",lengthsim),rep("hands",lengthsim),
rep("small fomite",lengthsim),rep("large fomite",lengthsim))
time<-rep(0:(lengthsim-1),4)
frame.model.C<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model C")
frame.ratio<-data.frame(mucousmax=mucous.max,smallfomite.conc=smallfomite.conc,
largefomite.conc=largefomite.conc)
ggplot(frame.ratio)+geom_point(aes(x=smallfomite.conc/200,y=mucousmax))+
scale_y_continuous(trans="log10")
#----------------MODEL D---------------------------------------
sim.function(type="primary",model="D",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
fingertip<-rep(NA,iter)
fingertip.mean<-rep(NA,lengthsim)
fingertip.sd<-rep(NA,lengthsim)
nonfingertip<-rep(NA,iter)
nonfingertip.mean<-rep(NA,lengthsim)
nonfingertip.sd<-rep(NA,lengthsim)
smallfomite<-rep(NA,iter)
smallfomite.mean<-rep(NA,lengthsim)
smallfomite.sd<-rep(NA,lengthsim)
largefomite<-rep(NA,iter)
largefomite.mean<-rep(NA,lengthsim)
largefomite.sd<-rep(NA,lengthsim)
doseD<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][6,i]
fingertip[j]<-matrix.list[[j]][4,i]
nonfingertip[j]<-matrix.list[[j]][3,i]
smallfomite[j]<-matrix.list[[j]][1,i]
largefomite<-matrix.list[[j]][2,i]
if(i==lengthsim){
doseD[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
fingertip.mean[i]<-mean(fingertip)
fingertip.sd[i]<-sd(fingertip)
nonfingertip.mean[i]<-mean(nonfingertip)
nonfingertip.sd[i]<-sd(nonfingertip)
smallfomite.mean[i]<-mean(smallfomite)
smallfomite.sd[i]<-sd(smallfomite)
largefomite.mean[i]<-mean(largefomite)
largefomite.sd[i]<-sd(largefomite)
}
largefomite.conc<-rep(NA,iter)
smallfomite.conc<-rep(NA,iter)
mucous.max<-rep(NA,iter)
for(j in 1:iter){
mucous.max[j]<-max(matrix.list[[j]][6,])
smallfomite.conc[j]<-matrix.list[[j]][1,1]
largefomite.conc[j]<-matrix.list[[j]][2,1]
}
means<-c(mucous.mean,fingertip.mean,nonfingertip.mean,
smallfomite.mean,largefomite.mean)
sd<-c(mucous.sd,fingertip.sd,nonfingertip.sd,
smallfomite.sd,largefomite.sd)
state<-c(rep("mucous membranes",lengthsim),rep("fingertip hand area",lengthsim),
rep("non-fingertip hand area",lengthsim),rep("small fomite",lengthsim),
rep("large fomite",lengthsim))
time<-rep(0:(lengthsim-1),5)
frame.model.D<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model D")
frame.ratio<-data.frame(mucousmax=mucous.max,smallfomite.conc=smallfomite.conc,
largefomite.conc=largefomite.conc)
ggplot(frame.ratio)+geom_point(aes(x=smallfomite.conc/200,y=mucousmax))+
scale_y_continuous(trans="log10")
#-------------------------frame all---------------------------------------------------------------
frame.all<-rbind(frame.model.A,frame.model.B,frame.model.C,frame.model.D)
#windows()
#ggplot(frame.all)+geom_line(aes(x=time,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time,ymin=means-sd,ymax=means+sd,group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")+
# #scale_x_continuous(trans="log10")+
# scale_y_continuous(trans="log10")+
# scale_fill_discrete(name="")+
# scale_color_discrete(name="")+
# facet_wrap(~model)+
# theme_pubr()+
# theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16))
#windows()
A<-ggplot(frame.all[frame.all$state=="mucous membranes",])+geom_line(aes(x=time*timestep,y=means,group=model,color=model))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=model,fill=model),alpha=0.3)+
scale_y_continuous(trans="log10",name="Dose (# Viral Particles)")+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
#windows()
B<-ggplot(frame.all[frame.all$state=="hands"|frame.all$state=="non-fingertip hand area" | frame.all$state=="fingertip hand area",])+
geom_line(aes(x=time*timestep,y=means,group=interaction(model,state),color=state))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=interaction(model,state),fill=state),alpha=0.3)+
#geom_ribbon(aes(x=time*timestep,ymin=means-sd,ymax=means+sd,group=interaction(model,state),fill=state),alpha=0.3)+
scale_y_continuous(trans="log10",name=expression("# Viral Particles/cm"^2))+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
facet_wrap(~model,nrow=1)+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
#windows()
#ggplot(frame.model.B[frame.model.B$state=="hands"|frame.model.B$state=="non-fingertip hand area" | frame.model.B$state=="fingertip hand area",])+
# geom_line(aes(x=time*timestep,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")
#windows()
#ggplot(frame.model.B[frame.model.C$state=="hands"|frame.model.C$state=="non-fingertip hand area" | frame.model.C$state=="fingertip hand area",])+
# geom_line(aes(x=time*timestep,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")
#windows()
frame.all$state[frame.all$state=="small fomite"]<-"small env surf"
frame.all$state[frame.all$state=="large fomite"]<-"large env surf"
frame.all$state[frame.all$state=="fomites"]<-"combined env surf"
C<-ggplot(frame.all[frame.all$state=="small env surf"|frame.all$state=="large env surf" | frame.all$state=="combined env surf",])+
geom_line(aes(x=time*timestep,y=means,group=interaction(model,state),color=state))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=interaction(model,state),fill=state),alpha=0.3)+
#geom_ribbon(aes(x=time*timestep,ymin=means-sd,ymax=means+sd,group=interaction(model,state),fill=state),alpha=0.3)+
scale_y_continuous(trans="log10",name=expression("# Viral Particles/cm"^2))+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC"))+
theme_pubr()+
facet_wrap(~model)+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
windows()
C
st=format(Sys.time(), "%Y-%m-%d")
filename<-paste("frameall_",st, ".csv", sep = "")
write.csv(frame.all,filename)
frame.all2<-data.frame(dose=c(doseA,doseB,doseC,doseD),
model=c(rep("Model A",iter),rep("Model B",iter),rep("Model C",iter),rep("Model D",iter)))
my_comparisons<-list(c("Model A","Model B"),
c("Model B","Model C"),
c("Model A","Model C"),
c("Model A","Model D"),
c("Model B","Model D"),
c("Model C"),"Model D")
frame.all3<-data.frame(model=c("Model A","Model B","Model C","Model D"),
meandose=c(mean(doseA),mean(doseB),mean(doseC),mean(doseD)))
windows()
ggplot(frame.all2)+geom_violin(aes(x=model,y=dose,fill=model),draw_quantiles=c(0.25,0.5,0.75),alpha=0.2)+
geom_point(data=frame.all3,aes(x=model,y=meandose,fill=model),size=3)+
scale_y_continuous(trans="log10",name="Dose (# of viral particles)")+
scale_x_discrete(name="")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text = element_text(size=16),legend.position='none')
signif(summary(frame.all2$dose[frame.all2$model=="Model A"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model A"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model A"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model B"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model B"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model B"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model C"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model C"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model C"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model D"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model D"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model D"]),2)
|
/main_code.R
|
permissive
|
awilson12/dilution_model_comparison
|
R
| false | false | 13,600 |
r
|
require(truncdist)
require(ggplot2)
require(ggpubr)
require(triangle)
#clear environment
rm(list = ls())
iter<-5000
timestep<-0.001
duration<-20
set.seed(30) #set after checking variabilty from one to another
#with 5,000 iterations and 0.001 timestep
source('simulation_code_dilution_v2.R')
#----------------MODEL A--------------------------------------------
sim.function(type="primary",model="A",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
hands<-rep(NA,iter)
hands.mean<-rep(NA,lengthsim)
hands.sd<-rep(NA,lengthsim)
fomites<-rep(NA,iter)
fomites.mean<-rep(NA,lengthsim)
fomites.sd<-rep(NA,lengthsim)
doseA<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][4,i]
hands[j]<-matrix.list[[j]][2,i]
fomites[j]<-matrix.list[[j]][1,i]
if(i==lengthsim){
doseA[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
hands.mean[i]<-mean(hands)
hands.sd[i]<-sd(hands)
fomites.mean[i]<-mean(fomites)
fomites.sd[i]<-sd(fomites)
}
means<-c(mucous.mean,hands.mean,fomites.mean)
sd<-c(mucous.sd,hands.sd,fomites.sd)
state<-c(rep("mucous membranes",lengthsim),rep("hands",lengthsim),
rep("fomites",lengthsim))
time<-rep(0:(lengthsim-1),3)
frame.model.A<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model A")
#--------------- MODEL B---------------------------------------------
sim.function(type="primary",model="B",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
fingertip<-rep(NA,iter)
fingertip.mean<-rep(NA,lengthsim)
fingertip.sd<-rep(NA,lengthsim)
nonfingertip<-rep(NA,iter)
nonfingertip.mean<-rep(NA,lengthsim)
nonfingertip.sd<-rep(NA,lengthsim)
fomites<-rep(NA,iter)
fomites.mean<-rep(NA,lengthsim)
fomites.sd<-rep(NA,lengthsim)
doseB<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][5,i]
fingertip[j]<-matrix.list[[j]][3,i]
nonfingertip[j]<-matrix.list[[j]][2,i]
fomites[j]<-matrix.list[[j]][1,i]
if(i==lengthsim){
doseB[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
fingertip.mean[i]<-mean(fingertip)
fingertip.sd[i]<-sd(fingertip)
nonfingertip.mean[i]<-mean(nonfingertip)
nonfingertip.sd[i]<-sd(nonfingertip)
fomites.mean[i]<-mean(fomites)
fomites.sd[i]<-sd(fomites)
}
means<-c(mucous.mean,fingertip.mean,nonfingertip.mean,fomites.mean)
sd<-c(mucous.sd,fingertip.sd,nonfingertip.sd,fomites.sd)
state<-c(rep("mucous membranes",lengthsim),rep("fingertip hand area",lengthsim),
rep("non-fingertip hand area",lengthsim),rep("fomites",lengthsim))
time<-rep(0:(lengthsim-1),4)
frame.model.B<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model B")
#----------------MODEL C---------------------------------------
sim.function(type="primary",model="C",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
hands<-rep(NA,iter)
hands.mean<-rep(NA,lengthsim)
hands.sd<-rep(NA,lengthsim)
smallfomite<-rep(NA,iter)
smallfomite.mean<-rep(NA,lengthsim)
smallfomite.sd<-rep(NA,lengthsim)
largefomite<-rep(NA,iter)
largefomite.mean<-rep(NA,lengthsim)
largefomite.sd<-rep(NA,lengthsim)
doseC<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][5,i]
hands[j]<-matrix.list[[j]][3,i]
smallfomite[j]<-matrix.list[[j]][1,i]
largefomite<-matrix.list[[j]][2,i]
if(i==lengthsim){
doseC[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
hands.mean[i]<-mean(hands)
hands.sd[i]<-sd(hands)
smallfomite.mean[i]<-mean(smallfomite)
smallfomite.sd[i]<-sd(smallfomite)
largefomite.mean[i]<-mean(largefomite)
largefomite.sd[i]<-sd(largefomite)
}
largefomite.conc<-rep(NA,iter)
smallfomite.conc<-rep(NA,iter)
mucous.max<-rep(NA,iter)
for(j in 1:iter){
mucous.max[j]<-max(matrix.list[[j]][5,])
smallfomite.conc[j]<-matrix.list[[j]][1,1]
largefomite.conc[j]<-matrix.list[[j]][2,1]
}
means<-c(mucous.mean,hands.mean,
smallfomite.mean,largefomite.mean)
sd<-c(mucous.sd,hands.sd,
smallfomite.sd,largefomite.sd)
state<-c(rep("mucous membranes",lengthsim),rep("hands",lengthsim),
rep("small fomite",lengthsim),rep("large fomite",lengthsim))
time<-rep(0:(lengthsim-1),4)
frame.model.C<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model C")
frame.ratio<-data.frame(mucousmax=mucous.max,smallfomite.conc=smallfomite.conc,
largefomite.conc=largefomite.conc)
ggplot(frame.ratio)+geom_point(aes(x=smallfomite.conc/200,y=mucousmax))+
scale_y_continuous(trans="log10")
#----------------MODEL D---------------------------------------
sim.function(type="primary",model="D",timestep=timestep,iter=iter)
mucous<-rep(NA,iter)
mucous.mean<-rep(NA,lengthsim)
mucous.sd<-rep(NA,lengthsim)
fingertip<-rep(NA,iter)
fingertip.mean<-rep(NA,lengthsim)
fingertip.sd<-rep(NA,lengthsim)
nonfingertip<-rep(NA,iter)
nonfingertip.mean<-rep(NA,lengthsim)
nonfingertip.sd<-rep(NA,lengthsim)
smallfomite<-rep(NA,iter)
smallfomite.mean<-rep(NA,lengthsim)
smallfomite.sd<-rep(NA,lengthsim)
largefomite<-rep(NA,iter)
largefomite.mean<-rep(NA,lengthsim)
largefomite.sd<-rep(NA,lengthsim)
doseD<-rep(NA,iter)
for (i in 1:lengthsim){
for(j in 1:iter){
temp<-matrix.list[[j]]
mucous[j]<-matrix.list[[j]][6,i]
fingertip[j]<-matrix.list[[j]][4,i]
nonfingertip[j]<-matrix.list[[j]][3,i]
smallfomite[j]<-matrix.list[[j]][1,i]
largefomite<-matrix.list[[j]][2,i]
if(i==lengthsim){
doseD[j]<-mucous[j]
}
}
mucous.mean[i]<-mean(mucous)
mucous.sd[i]<-sd(mucous)
fingertip.mean[i]<-mean(fingertip)
fingertip.sd[i]<-sd(fingertip)
nonfingertip.mean[i]<-mean(nonfingertip)
nonfingertip.sd[i]<-sd(nonfingertip)
smallfomite.mean[i]<-mean(smallfomite)
smallfomite.sd[i]<-sd(smallfomite)
largefomite.mean[i]<-mean(largefomite)
largefomite.sd[i]<-sd(largefomite)
}
largefomite.conc<-rep(NA,iter)
smallfomite.conc<-rep(NA,iter)
mucous.max<-rep(NA,iter)
for(j in 1:iter){
mucous.max[j]<-max(matrix.list[[j]][6,])
smallfomite.conc[j]<-matrix.list[[j]][1,1]
largefomite.conc[j]<-matrix.list[[j]][2,1]
}
means<-c(mucous.mean,fingertip.mean,nonfingertip.mean,
smallfomite.mean,largefomite.mean)
sd<-c(mucous.sd,fingertip.sd,nonfingertip.sd,
smallfomite.sd,largefomite.sd)
state<-c(rep("mucous membranes",lengthsim),rep("fingertip hand area",lengthsim),
rep("non-fingertip hand area",lengthsim),rep("small fomite",lengthsim),
rep("large fomite",lengthsim))
time<-rep(0:(lengthsim-1),5)
frame.model.D<-data.frame(means=means,sd=sd,state=state,time=time,
model="Model D")
frame.ratio<-data.frame(mucousmax=mucous.max,smallfomite.conc=smallfomite.conc,
largefomite.conc=largefomite.conc)
ggplot(frame.ratio)+geom_point(aes(x=smallfomite.conc/200,y=mucousmax))+
scale_y_continuous(trans="log10")
#-------------------------frame all---------------------------------------------------------------
frame.all<-rbind(frame.model.A,frame.model.B,frame.model.C,frame.model.D)
#windows()
#ggplot(frame.all)+geom_line(aes(x=time,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time,ymin=means-sd,ymax=means+sd,group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")+
# #scale_x_continuous(trans="log10")+
# scale_y_continuous(trans="log10")+
# scale_fill_discrete(name="")+
# scale_color_discrete(name="")+
# facet_wrap(~model)+
# theme_pubr()+
# theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16))
#windows()
A<-ggplot(frame.all[frame.all$state=="mucous membranes",])+geom_line(aes(x=time*timestep,y=means,group=model,color=model))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=model,fill=model),alpha=0.3)+
scale_y_continuous(trans="log10",name="Dose (# Viral Particles)")+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
#windows()
B<-ggplot(frame.all[frame.all$state=="hands"|frame.all$state=="non-fingertip hand area" | frame.all$state=="fingertip hand area",])+
geom_line(aes(x=time*timestep,y=means,group=interaction(model,state),color=state))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=interaction(model,state),fill=state),alpha=0.3)+
#geom_ribbon(aes(x=time*timestep,ymin=means-sd,ymax=means+sd,group=interaction(model,state),fill=state),alpha=0.3)+
scale_y_continuous(trans="log10",name=expression("# Viral Particles/cm"^2))+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
facet_wrap(~model,nrow=1)+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
#windows()
#ggplot(frame.model.B[frame.model.B$state=="hands"|frame.model.B$state=="non-fingertip hand area" | frame.model.B$state=="fingertip hand area",])+
# geom_line(aes(x=time*timestep,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")
#windows()
#ggplot(frame.model.B[frame.model.C$state=="hands"|frame.model.C$state=="non-fingertip hand area" | frame.model.C$state=="fingertip hand area",])+
# geom_line(aes(x=time*timestep,y=means,group=state,color=state))+
# geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=state,fill=state),alpha=0.3)+
# scale_y_continuous(trans="log10")
#windows()
frame.all$state[frame.all$state=="small fomite"]<-"small env surf"
frame.all$state[frame.all$state=="large fomite"]<-"large env surf"
frame.all$state[frame.all$state=="fomites"]<-"combined env surf"
C<-ggplot(frame.all[frame.all$state=="small env surf"|frame.all$state=="large env surf" | frame.all$state=="combined env surf",])+
geom_line(aes(x=time*timestep,y=means,group=interaction(model,state),color=state))+
geom_ribbon(aes(x=time*timestep,ymin=means-sd*1.96/sqrt(1000),ymax=means+sd*1.96/sqrt(1000),group=interaction(model,state),fill=state),alpha=0.3)+
#geom_ribbon(aes(x=time*timestep,ymin=means-sd,ymax=means+sd,group=interaction(model,state),fill=state),alpha=0.3)+
scale_y_continuous(trans="log10",name=expression("# Viral Particles/cm"^2))+
scale_x_continuous(name="Time (min)")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC"))+
scale_color_manual(name="",values=c("#3333FF","#FF3311","#00CCCC"))+
theme_pubr()+
facet_wrap(~model)+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text=element_text(size=16),strip.text=element_text(size=16))
windows()
C
st=format(Sys.time(), "%Y-%m-%d")
filename<-paste("frameall_",st, ".csv", sep = "")
write.csv(frame.all,filename)
frame.all2<-data.frame(dose=c(doseA,doseB,doseC,doseD),
model=c(rep("Model A",iter),rep("Model B",iter),rep("Model C",iter),rep("Model D",iter)))
my_comparisons<-list(c("Model A","Model B"),
c("Model B","Model C"),
c("Model A","Model C"),
c("Model A","Model D"),
c("Model B","Model D"),
c("Model C"),"Model D")
frame.all3<-data.frame(model=c("Model A","Model B","Model C","Model D"),
meandose=c(mean(doseA),mean(doseB),mean(doseC),mean(doseD)))
windows()
ggplot(frame.all2)+geom_violin(aes(x=model,y=dose,fill=model),draw_quantiles=c(0.25,0.5,0.75),alpha=0.2)+
geom_point(data=frame.all3,aes(x=model,y=meandose,fill=model),size=3)+
scale_y_continuous(trans="log10",name="Dose (# of viral particles)")+
scale_x_discrete(name="")+
scale_fill_manual(name="",values=c("#3333FF","#FF3311","#00CCCC","grey"))+
theme_pubr()+
theme(axis.text=element_text(size=16),axis.title=element_text(size=16),legend.text = element_text(size=16),legend.position='none')
signif(summary(frame.all2$dose[frame.all2$model=="Model A"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model A"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model A"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model B"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model B"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model B"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model C"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model C"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model C"]),2)
signif(summary(frame.all2$dose[frame.all2$model=="Model D"]),2)
signif(IQR((frame.all2$dose[frame.all2$model=="Model D"])),2)
signif(sd(frame.all2$dose[frame.all2$model=="Model D"]),2)
|
library(fBasics)
## if your version of caret is later than this version, it won't work. So downgrade using the next two lines.
# require(devtools)
# install_version("caret", version = "6.0-76", repos = "http://cran.us.r-project.org")
library(caret)
library(MASS)
library(ggplot2)
library(plyr)
library(reshape2)
setwd("~/Dropbox/Definition_of_PCs/RegressionDefinitionMethod/ClassifierwithImputedData_July2018")
data <- read.csv("ImputedData_July2_2018.csv")
### STEP 1 - create composite.
composite.data <- data[, c(1, 4,5,6,7)]
comp <- composite.data[complete.cases(composite.data),]
# remove subject with WA score of 0
comp$wj3.watt.raw[comp$wj3.watt.raw == 0] <- NA
comp <- comp[complete.cases(comp),]
#check distribution of composite parts
#dagoTest(comp$wj3.watt.raw)
#dagoTest(comp$wj3.wid.raw)
#dagoTest(comp$towre.w.ipm)
#dagoTest(comp$towre.nw.ipm)
#They're all significant except TOWRE real workds, so let's center/scale/transform
#prepare to center, scale, and (if necessary) transform predictor variables
#vars_tf <- c("wj3.watt.raw", "towre.nw.ipm", "wj3.wid.raw")
#pp_md_tf <- preProcess(comp[,vars_tf], method = c("center", "scale", "YeoJohnson"), na.remove=T)
#check the lambda values associated with the predictors
#pp_md_tf$yj$wj3.watt.raw$lambda
#pp_md_tf$yj$wj3.wid.raw$lambda
# pp_md_tf$yj$towre.w.ipm$lambda
#pp_md_tf$yj$towre.nw.ipm$lambda
#execute centering, scaling, tranforming, etc.
#tf_data <- predict(pp_md_tf, comp[,vars_tf])
#re-check distribution
#dagoTest(tf_data$wj3.watt.raw)
#dagoTest(tf_data$wj3.wid.raw)
# dagoTest(tf_data$towre.w.ipm)
#dagoTest(tf_data$towre.nw.ipm)
# scale and center predictors
comp$wj3.watt.cs <- scale(comp$wj3.watt.raw)
comp$wj3.wid.cs <- scale(comp$wj3.wid.raw)
comp$towre.w.cs <- scale(comp$towre.w.ipm)
comp$towre.nw.cs <- scale(comp$towre.nw.ipm)
#create composite
comp$decoding_comp <- rowMeans(scale(comp[,c("wj3.watt.cs","wj3.wid.cs","towre.w.cs", "towre.nw.cs")]))
#put composite into data frame
#comp$composite1 <- tf_data$composite1
#scale (but don't center!) composite
comp$decoding_comp <- scale(comp$decoding_comp, center = FALSE)
# export composite values
comp.export <- comp[,-c(2:5)]
write.csv(comp.export, "decoding_composite.csv", row.names = FALSE)
|
/OldAnalysisFiles/RegressionDefinitionMethod/ClassifierwithImputedData_July2018/create_composite.R
|
no_license
|
kryherd/definitionofPCs
|
R
| false | false | 2,239 |
r
|
library(fBasics)
## if your version of caret is later than this version, it won't work. So downgrade using the next two lines.
# require(devtools)
# install_version("caret", version = "6.0-76", repos = "http://cran.us.r-project.org")
library(caret)
library(MASS)
library(ggplot2)
library(plyr)
library(reshape2)
setwd("~/Dropbox/Definition_of_PCs/RegressionDefinitionMethod/ClassifierwithImputedData_July2018")
data <- read.csv("ImputedData_July2_2018.csv")
### STEP 1 - create composite.
composite.data <- data[, c(1, 4,5,6,7)]
comp <- composite.data[complete.cases(composite.data),]
# remove subject with WA score of 0
comp$wj3.watt.raw[comp$wj3.watt.raw == 0] <- NA
comp <- comp[complete.cases(comp),]
#check distribution of composite parts
#dagoTest(comp$wj3.watt.raw)
#dagoTest(comp$wj3.wid.raw)
#dagoTest(comp$towre.w.ipm)
#dagoTest(comp$towre.nw.ipm)
#They're all significant except TOWRE real workds, so let's center/scale/transform
#prepare to center, scale, and (if necessary) transform predictor variables
#vars_tf <- c("wj3.watt.raw", "towre.nw.ipm", "wj3.wid.raw")
#pp_md_tf <- preProcess(comp[,vars_tf], method = c("center", "scale", "YeoJohnson"), na.remove=T)
#check the lambda values associated with the predictors
#pp_md_tf$yj$wj3.watt.raw$lambda
#pp_md_tf$yj$wj3.wid.raw$lambda
# pp_md_tf$yj$towre.w.ipm$lambda
#pp_md_tf$yj$towre.nw.ipm$lambda
#execute centering, scaling, tranforming, etc.
#tf_data <- predict(pp_md_tf, comp[,vars_tf])
#re-check distribution
#dagoTest(tf_data$wj3.watt.raw)
#dagoTest(tf_data$wj3.wid.raw)
# dagoTest(tf_data$towre.w.ipm)
#dagoTest(tf_data$towre.nw.ipm)
# scale and center predictors
comp$wj3.watt.cs <- scale(comp$wj3.watt.raw)
comp$wj3.wid.cs <- scale(comp$wj3.wid.raw)
comp$towre.w.cs <- scale(comp$towre.w.ipm)
comp$towre.nw.cs <- scale(comp$towre.nw.ipm)
#create composite
comp$decoding_comp <- rowMeans(scale(comp[,c("wj3.watt.cs","wj3.wid.cs","towre.w.cs", "towre.nw.cs")]))
#put composite into data frame
#comp$composite1 <- tf_data$composite1
#scale (but don't center!) composite
comp$decoding_comp <- scale(comp$decoding_comp, center = FALSE)
# export composite values
comp.export <- comp[,-c(2:5)]
write.csv(comp.export, "decoding_composite.csv", row.names = FALSE)
|
#### Import libraries
library(shiny)
library(plyr)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(data.table)
####Suppliers and DP family list in case we want to show them permanently (if dynamic, will be managed in server.r)
#vendorlist<-fread("SupplierName.txt",colClasses =c("integer",rep("character",2)))
#DPlist<-levels(fread("tableauCGvendor.csv",dec=",",colClasses =c("integer",rep("factor",5),rep("numeric",120)),stringsAsFactors=T,drop=1)$DP.Family)
#### Define UI for the application
#### fluidPage is a predefined model
shinyUI(
fluidPage(
#tags$head(tags$link(rel = "icon", href = paste0("//calendar.google.com/googlecalendar/images/favicon_v2014_",substr(file.info(list.files(pattern="tableauCGvendor"))$mtime,9,10),".ico"))
if (sum(grep("mgollety",getwd()))==1){tags$head(tags$link(rel = "icon", href = "//static8.viadeo-static.com/QIi8iNTEPkLurTWxUPblTgvKV6I=/40x40/smart/member/00224jofzgj9hzxy?ts=1410727408000"))
}else{
tags$head(tags$link(rel = "icon", href = paste0("//calendar.google.com/googlecalendar/images/favicon_v2014_",substr(file.info(list.files(pattern="tableauCGvendor"))$mtime,9,10),".ico"))
)},
HTML(paste0("<TITLE>Shiny DP tool - last data update : ",format(file.info(list.files(pattern="tableauCGvendor"))$mtime,"%a %d %b %Y %X"),"</TITLE>")),
# Application title
#titlePanel("Line selector"),
# Sidebar for selections or user inputs
sidebarLayout(
sidebarPanel(
####Title for the window
####User inputs
uiOutput("TypeSelector"),
#selectInput("type", label=("Type"),choices=c("Value","Quantity"),selected = "Value"),
uiOutput("DivSelector"),
uiOutput("PLSelector"),
uiOutput("DPSelector"),
uiOutput("CustGroup"),
uiOutput("SupplierSelector"),
####Inputs in case we want to show them permanently (if dynamic, will be managed in server.r)
#selectInput("division", label=("Division"),c("All"="All","VSF"="D02","VSAO"="D03","VSE"="D05","VSI"="D06","VSG"="D07","VSUK"="D08","VSBE"="D09","VST"="D10","VSEE"="D13","VSR"="D20")),
#selectInput("ProdLine", label=("PL"),c("All"="All","BRA"="BRA","ELE"="ELE","FLT"="FLT","IGN"="IGN","POP"="POP","VCC"="VCC","VEC"="VEC","VES"="VES","VLS"="VLS","VSDS"="VSD","VSS"="VSS","VTR"="VTR","VWS"="VWS")),
#selectInput("DPfamily", label=("DPfamily"),c("All",DPlist)),
#selectInput("DP_PL", label=("DP/PL"),c("DP"="DP","PL"="PL","AlertDP"="ADP","AlertCG"="ACG")),
#selectInput("Supplier", label=("Supplier"),c("All",vendorlist$Supplier)),
####
####this numeric selector enables to select the line in a sorted aggregated data table (1 is the biggest 12m TO)
numericInput("num", label =("Numeric input"), value = 1),
####Checkboxes to define aggregation level
checkboxGroupInput('grpby', 'Group by:',c("Div"="Div","PL"="Product.Line","DP family"="DP.Family","CustGr"="CustGr","Supplier"="Supplier"), selected = c("Product.Line","Div")),
####Text output to anticipate following lines in the numeric input
uiOutput("divnameinfo"),
uiOutput("htmlinfo"),
####Checkboxes to add data labels on the graph
checkboxGroupInput('labels', 'Labels:',c("hist tot"="hist_tot","last year"="last_year","new DP"="new_DP","DP M-1"="DPM1",
"hist OS"="hist_OS","hist DD"="hist_DD","DP OS"="DP_OS","DP DD"="DP_DD","cumul"="cumul"),selected=c("cumul")),
checkboxGroupInput('lines', 'Lines:',c("hist tot"="hist_tot","last year"="last_year","new DP"="new_DP","DP M-1"="DPM1")
,selected=c("hist_tot","last_year","new_DP","DPM1")),
checkboxGroupInput('infos', 'Infos:',c("Display ?"="display")),
checkboxGroupInput('IVSexcl', 'Excl IVS ?:',c("Excl. IVS"="IVSexcl"),selected=c("IVSexcl")),
####define style for current chosen value (output divname and divname2)
tags$head(tags$style("#divname{color: red;
font-style: italic;
}","#divnameqty2{color: red;
font-style: italic;
}","#divnameSemLeft{color: red;
font-style: italic;
}","#divname2{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}","#divnameqty{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}","#divnameSemBw{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}"
))
,width = 2),
#### Show the 2 plots and generate hover, click and brush inputs
mainPanel(
uiOutput("showplot1")
#,verbatimTextOutput("info")
,uiOutput("infotype1")
,uiOutput("divinfo")
,uiOutput("showplot2")
#,verbatimTextOutput("info2")
,uiOutput("infotype2")
####Uncomment to show the data below graphs
#,dataTableOutput("data0")
,width = 10
)
)))
|
/ui.R
|
no_license
|
SinHom/shiny
|
R
| false | false | 5,591 |
r
|
#### Import libraries
library(shiny)
library(plyr)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(data.table)
####Suppliers and DP family list in case we want to show them permanently (if dynamic, will be managed in server.r)
#vendorlist<-fread("SupplierName.txt",colClasses =c("integer",rep("character",2)))
#DPlist<-levels(fread("tableauCGvendor.csv",dec=",",colClasses =c("integer",rep("factor",5),rep("numeric",120)),stringsAsFactors=T,drop=1)$DP.Family)
#### Define UI for the application
#### fluidPage is a predefined model
shinyUI(
fluidPage(
#tags$head(tags$link(rel = "icon", href = paste0("//calendar.google.com/googlecalendar/images/favicon_v2014_",substr(file.info(list.files(pattern="tableauCGvendor"))$mtime,9,10),".ico"))
if (sum(grep("mgollety",getwd()))==1){tags$head(tags$link(rel = "icon", href = "//static8.viadeo-static.com/QIi8iNTEPkLurTWxUPblTgvKV6I=/40x40/smart/member/00224jofzgj9hzxy?ts=1410727408000"))
}else{
tags$head(tags$link(rel = "icon", href = paste0("//calendar.google.com/googlecalendar/images/favicon_v2014_",substr(file.info(list.files(pattern="tableauCGvendor"))$mtime,9,10),".ico"))
)},
HTML(paste0("<TITLE>Shiny DP tool - last data update : ",format(file.info(list.files(pattern="tableauCGvendor"))$mtime,"%a %d %b %Y %X"),"</TITLE>")),
# Application title
#titlePanel("Line selector"),
# Sidebar for selections or user inputs
sidebarLayout(
sidebarPanel(
####Title for the window
####User inputs
uiOutput("TypeSelector"),
#selectInput("type", label=("Type"),choices=c("Value","Quantity"),selected = "Value"),
uiOutput("DivSelector"),
uiOutput("PLSelector"),
uiOutput("DPSelector"),
uiOutput("CustGroup"),
uiOutput("SupplierSelector"),
####Inputs in case we want to show them permanently (if dynamic, will be managed in server.r)
#selectInput("division", label=("Division"),c("All"="All","VSF"="D02","VSAO"="D03","VSE"="D05","VSI"="D06","VSG"="D07","VSUK"="D08","VSBE"="D09","VST"="D10","VSEE"="D13","VSR"="D20")),
#selectInput("ProdLine", label=("PL"),c("All"="All","BRA"="BRA","ELE"="ELE","FLT"="FLT","IGN"="IGN","POP"="POP","VCC"="VCC","VEC"="VEC","VES"="VES","VLS"="VLS","VSDS"="VSD","VSS"="VSS","VTR"="VTR","VWS"="VWS")),
#selectInput("DPfamily", label=("DPfamily"),c("All",DPlist)),
#selectInput("DP_PL", label=("DP/PL"),c("DP"="DP","PL"="PL","AlertDP"="ADP","AlertCG"="ACG")),
#selectInput("Supplier", label=("Supplier"),c("All",vendorlist$Supplier)),
####
####this numeric selector enables to select the line in a sorted aggregated data table (1 is the biggest 12m TO)
numericInput("num", label =("Numeric input"), value = 1),
####Checkboxes to define aggregation level
checkboxGroupInput('grpby', 'Group by:',c("Div"="Div","PL"="Product.Line","DP family"="DP.Family","CustGr"="CustGr","Supplier"="Supplier"), selected = c("Product.Line","Div")),
####Text output to anticipate following lines in the numeric input
uiOutput("divnameinfo"),
uiOutput("htmlinfo"),
####Checkboxes to add data labels on the graph
checkboxGroupInput('labels', 'Labels:',c("hist tot"="hist_tot","last year"="last_year","new DP"="new_DP","DP M-1"="DPM1",
"hist OS"="hist_OS","hist DD"="hist_DD","DP OS"="DP_OS","DP DD"="DP_DD","cumul"="cumul"),selected=c("cumul")),
checkboxGroupInput('lines', 'Lines:',c("hist tot"="hist_tot","last year"="last_year","new DP"="new_DP","DP M-1"="DPM1")
,selected=c("hist_tot","last_year","new_DP","DPM1")),
checkboxGroupInput('infos', 'Infos:',c("Display ?"="display")),
checkboxGroupInput('IVSexcl', 'Excl IVS ?:',c("Excl. IVS"="IVSexcl"),selected=c("IVSexcl")),
####define style for current chosen value (output divname and divname2)
tags$head(tags$style("#divname{color: red;
font-style: italic;
}","#divnameqty2{color: red;
font-style: italic;
}","#divnameSemLeft{color: red;
font-style: italic;
}","#divname2{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}","#divnameqty{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}","#divnameSemBw{
color: #48ca3b;
font-size: 110%;
font-weight: bold;
text-align: center;
}"
))
,width = 2),
#### Show the 2 plots and generate hover, click and brush inputs
mainPanel(
uiOutput("showplot1")
#,verbatimTextOutput("info")
,uiOutput("infotype1")
,uiOutput("divinfo")
,uiOutput("showplot2")
#,verbatimTextOutput("info2")
,uiOutput("infotype2")
####Uncomment to show the data below graphs
#,dataTableOutput("data0")
,width = 10
)
)))
|
coveaProcessing <- function() {
readSeqsIntoDf()
library(readr)
seqDB <-
read_csv("/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF.txt")
SSDB <-
read_csv("/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF_SS.txt")
editAlignment(seqDB, SSDB)
}
#####################################################################################
# # of columns in the dataframe is: # of sequences + 1 CS line
# name of the columns are "CS","seqname1", "seqname2", ...
readSeqsIntoDf <- function()
{
CS <-
grep(
"#=CS +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea" #
),
value = TRUE
)
CStemp <-
unlist(strsplit(CS, split = "\\s+"))
CS <- CStemp[seq(2, length(CStemp), 2)]
CS <- paste(CS, collapse = '')
CSarr <- substring(CS, seq(1, nchar(CS), 1), seq(1, nchar(CS), 1))
# read the name of the sequences into variable "seqnames"
SQs <- grep(
"#=SQ +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea" #
),
value = TRUE
)
seqnames <- character(length = length(SQs))
for (i in 1:length(SQs)) {
seqnames[i] <- unlist(strsplit(SQs[i], split = " "))[2]
}
# define the main data frame as "seqDB" and assign names to it
m <- matrix(ncol = (length(seqnames) + 1), nrow = length(CSarr))
seqDB <- as.data.frame(m)
names(seqDB) <- c(seqnames, "CS")
for (i in 1:length(seqnames)) {
pat <- paste("^", seqnames[i], " +", sep = "")
myseq <-
grep(
pattern = pat,
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea" #"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea"#
),
value = TRUE
)
temp <-
unlist(strsplit(myseq, split = "\\s+"))
myseq <- temp[seq(2, length(temp), 2)]
myseq <- paste(myseq, collapse = '')
myseqarr <-
substring(myseq, seq(1, nchar(myseq), 1), seq(1, nchar(myseq), 1))
seqDB[, i] <- myseqarr
}
seqDB$CS <- CSarr
#_____________________ reading the #=SS lines into a nother data frame as SSDB ____________
# define the main data frame as "seqDB" and assign names to it
SSDB = seqDB
SSs <- grep(
"#=SS +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea"#
),
value = TRUE
)
# number of sections is CStemp/2 = 3
numsec <- length(CStemp) / 2
end <- length(SSs) / numsec
for (i in 1:end) {
if (numsec == 3)
myseq <-
paste(unlist(strsplit(SSs[i], split = "\\s+"))[2],
unlist(strsplit(SSs[end + i], split = "\\s+"))[2],
unlist(strsplit(SSs[(2 * end) + i], split = "\\s+"))[2],
sep = "")
if (numsec == 2)
myseq <-
paste(unlist(strsplit(SSs[i], split = "\\s+"))[2], unlist(strsplit(SSs[end +
i], split = "\\s+"))[2], sep = "")
myseqarr <-
substring(myseq, seq(1, nchar(myseq), 1), seq(1, nchar(myseq), 1))
SSDB[, i] <- myseqarr
}
library(readr)
write_csv(seqDB, path = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF.txt")
write_csv(SSDB, path = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF_SS.txt")
}
#####################################################################################
writeCovea <- function(SSDB, seqDB) {
coveaseqs <- character(length = ncol(seqDB) - 1)
coveass <- character(length = ncol(seqDB) - 1)
seqDB <- seqDB[, -ncol(seqDB)]
SSDB <- SSDB[, -ncol(SSDB)]
for (i in 1:(length(coveaseqs))) {
# coveaseqs[i] <- paste(seqDB[, i], collapse = '')
# coveass[i] <- paste(SSDB[, i], collapse = '')
print(i)
coveaseqs[i] <-
paste((as.data.frame(seqDB[, i])[, 1]), collapse = '')
coveass[i] <- paste((as.data.frame(SSDB[, i])[, 1]), collapse = '')
}
mynames <- names(seqDB)[!names(seqDB) %in% "CS"]
library(gdata)
write.fwf(
data.frame(mynames,
coveaseqs,
coveass),
file = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_EditedCovea.covea",
sep = "\n",
colnames = FALSE
)
# remove "."s from sequences and write them in a fasta file to run with covea
library(Hmisc)
for (i in 1:length(coveaseqs)) {
coveaseqs[i] <- translate(coveaseqs[i], "[.]", "-")
#coveaseqs[i] <- translate(coveaseqs[i], "[n]", "-")
#coveaseqs[i] <- translate(coveaseqs[i], "[N]", "-")
}
write.fwf(
data.frame(paste(">", mynames, sep = ""),
coveaseqs),
file = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_EditedCovea.fasta",
sep = "\n",
colnames = FALSE
)
}
#####################################################################################
editAlignment <- function(seqDB, SSDB) {
# removing sites that have more than 98% gap (removing rows that have more than 99% ".")
# seqDB = mydb
delpos = " "
for (i in 1:nrow(seqDB)) {
temp <-
data.frame(table(
seqDB[i, ] == "." |
seqDB[i, ] == "t" |
seqDB[i, ] == "g" | seqDB[i, ] == "c" | seqDB[i, ] == "a"
))
if (length(temp[temp$Var1 == "FALSE", 2]) != 0)
{
if (temp[temp$Var1 == "FALSE", 2] != ncol(seqDB))
{
gapperc <-
(temp[temp$Var1 == "TRUE", 2] / (temp[temp$Var1 == "TRUE", 2] + temp[temp$Var1 ==
"FALSE", 2])) * 100
if (gapperc > 97)
{
delpos = c(delpos, i)
}
}
}
else{
if (temp[temp$Var1 == "TRUE", 2] == ncol(seqDB))
delpos = c(delpos, i)
}
}
delpos <- delpos[2:length(delpos)]
delpos = as.integer(delpos)
seqDB <- seqDB[-delpos, ]
SSDB <- SSDB[-delpos, ]
cs <- paste(seqDB$CS, collapse = '')
# remove sequences with more than 8 gaps!
delpos = " "
flag=0
for (i in 1:ncol(seqDB)) {
temp <- data.frame(table(seqDB[, i] == "."))
if (length(temp[temp$Var1 == "TRUE", 2]) != 0)
if (temp[temp$Var1 == "TRUE", 2] > 8)
{
if (names(seqDB[, i]) != "CS")
delpos = c(delpos, i)
}
for (x in 1:length(unlist(seqDB[,i]))) {
if(unlist(seqDB[,i])[x]=="n" | unlist(seqDB[,i])[x]=="N" )
flag=1
}
if(flag == 1)
{
print("shit")
delpos = c(delpos, i)
flag = 0
}
}
if (length(delpos) > 1)
{
delpos <- delpos[2:length(delpos)]
delpos = as.integer(delpos)
seqDB <- seqDB[, -delpos]
SSDB <- SSDB[, -delpos]
}
# remove sites that are lowercase or . in 99% of sequences
# remove sequences with gap in their anticodon
writeCovea(SSDB, seqDB)
}
#####################################################################################
# partition the fasta file based on their functional classes
# partition the EditedCovea.fasta files based on the sourceOrg
|
/Phylpclassification/coveaProcessing.R
|
no_license
|
fhadinezhadUC/LeishGitRep
|
R
| false | false | 7,575 |
r
|
coveaProcessing <- function() {
readSeqsIntoDf()
library(readr)
seqDB <-
read_csv("/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF.txt")
SSDB <-
read_csv("/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF_SS.txt")
editAlignment(seqDB, SSDB)
}
#####################################################################################
# # of columns in the dataframe is: # of sequences + 1 CS line
# name of the columns are "CS","seqname1", "seqname2", ...
readSeqsIntoDf <- function()
{
CS <-
grep(
"#=CS +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea" #
),
value = TRUE
)
CStemp <-
unlist(strsplit(CS, split = "\\s+"))
CS <- CStemp[seq(2, length(CStemp), 2)]
CS <- paste(CS, collapse = '')
CSarr <- substring(CS, seq(1, nchar(CS), 1), seq(1, nchar(CS), 1))
# read the name of the sequences into variable "seqnames"
SQs <- grep(
"#=SQ +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea" #
),
value = TRUE
)
seqnames <- character(length = length(SQs))
for (i in 1:length(SQs)) {
seqnames[i] <- unlist(strsplit(SQs[i], split = " "))[2]
}
# define the main data frame as "seqDB" and assign names to it
m <- matrix(ncol = (length(seqnames) + 1), nrow = length(CSarr))
seqDB <- as.data.frame(m)
names(seqDB) <- c(seqnames, "CS")
for (i in 1:length(seqnames)) {
pat <- paste("^", seqnames[i], " +", sep = "")
myseq <-
grep(
pattern = pat,
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea" #"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea"#
),
value = TRUE
)
temp <-
unlist(strsplit(myseq, split = "\\s+"))
myseq <- temp[seq(2, length(temp), 2)]
myseq <- paste(myseq, collapse = '')
myseqarr <-
substring(myseq, seq(1, nchar(myseq), 1), seq(1, nchar(myseq), 1))
seqDB[, i] <- myseqarr
}
seqDB$CS <- CSarr
#_____________________ reading the #=SS lines into a nother data frame as SSDB ____________
# define the main data frame as "seqDB" and assign names to it
SSDB = seqDB
SSs <- grep(
"#=SS +",
readLines(
"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/LeishmaniaHomoC.covea"#"/home/fatemeh/Leishmania_Aug2018/phyloclassification/Trytryp_genes_NoVarIntron.covea"#
),
value = TRUE
)
# number of sections is CStemp/2 = 3
numsec <- length(CStemp) / 2
end <- length(SSs) / numsec
for (i in 1:end) {
if (numsec == 3)
myseq <-
paste(unlist(strsplit(SSs[i], split = "\\s+"))[2],
unlist(strsplit(SSs[end + i], split = "\\s+"))[2],
unlist(strsplit(SSs[(2 * end) + i], split = "\\s+"))[2],
sep = "")
if (numsec == 2)
myseq <-
paste(unlist(strsplit(SSs[i], split = "\\s+"))[2], unlist(strsplit(SSs[end +
i], split = "\\s+"))[2], sep = "")
myseqarr <-
substring(myseq, seq(1, nchar(myseq), 1), seq(1, nchar(myseq), 1))
SSDB[, i] <- myseqarr
}
library(readr)
write_csv(seqDB, path = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF.txt")
write_csv(SSDB, path = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_coveaDF_SS.txt")
}
#####################################################################################
writeCovea <- function(SSDB, seqDB) {
coveaseqs <- character(length = ncol(seqDB) - 1)
coveass <- character(length = ncol(seqDB) - 1)
seqDB <- seqDB[, -ncol(seqDB)]
SSDB <- SSDB[, -ncol(SSDB)]
for (i in 1:(length(coveaseqs))) {
# coveaseqs[i] <- paste(seqDB[, i], collapse = '')
# coveass[i] <- paste(SSDB[, i], collapse = '')
print(i)
coveaseqs[i] <-
paste((as.data.frame(seqDB[, i])[, 1]), collapse = '')
coveass[i] <- paste((as.data.frame(SSDB[, i])[, 1]), collapse = '')
}
mynames <- names(seqDB)[!names(seqDB) %in% "CS"]
library(gdata)
write.fwf(
data.frame(mynames,
coveaseqs,
coveass),
file = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_EditedCovea.covea",
sep = "\n",
colnames = FALSE
)
# remove "."s from sequences and write them in a fasta file to run with covea
library(Hmisc)
for (i in 1:length(coveaseqs)) {
coveaseqs[i] <- translate(coveaseqs[i], "[.]", "-")
#coveaseqs[i] <- translate(coveaseqs[i], "[n]", "-")
#coveaseqs[i] <- translate(coveaseqs[i], "[N]", "-")
}
write.fwf(
data.frame(paste(">", mynames, sep = ""),
coveaseqs),
file = "/home/fatemeh/Leishmania_Aug2018/phyloclassification/Leishmania_HomoC/HomoTryTryp_EditedCovea.fasta",
sep = "\n",
colnames = FALSE
)
}
#####################################################################################
editAlignment <- function(seqDB, SSDB) {
# removing sites that have more than 98% gap (removing rows that have more than 99% ".")
# seqDB = mydb
delpos = " "
for (i in 1:nrow(seqDB)) {
temp <-
data.frame(table(
seqDB[i, ] == "." |
seqDB[i, ] == "t" |
seqDB[i, ] == "g" | seqDB[i, ] == "c" | seqDB[i, ] == "a"
))
if (length(temp[temp$Var1 == "FALSE", 2]) != 0)
{
if (temp[temp$Var1 == "FALSE", 2] != ncol(seqDB))
{
gapperc <-
(temp[temp$Var1 == "TRUE", 2] / (temp[temp$Var1 == "TRUE", 2] + temp[temp$Var1 ==
"FALSE", 2])) * 100
if (gapperc > 97)
{
delpos = c(delpos, i)
}
}
}
else{
if (temp[temp$Var1 == "TRUE", 2] == ncol(seqDB))
delpos = c(delpos, i)
}
}
delpos <- delpos[2:length(delpos)]
delpos = as.integer(delpos)
seqDB <- seqDB[-delpos, ]
SSDB <- SSDB[-delpos, ]
cs <- paste(seqDB$CS, collapse = '')
# remove sequences with more than 8 gaps!
delpos = " "
flag=0
for (i in 1:ncol(seqDB)) {
temp <- data.frame(table(seqDB[, i] == "."))
if (length(temp[temp$Var1 == "TRUE", 2]) != 0)
if (temp[temp$Var1 == "TRUE", 2] > 8)
{
if (names(seqDB[, i]) != "CS")
delpos = c(delpos, i)
}
for (x in 1:length(unlist(seqDB[,i]))) {
if(unlist(seqDB[,i])[x]=="n" | unlist(seqDB[,i])[x]=="N" )
flag=1
}
if(flag == 1)
{
print("shit")
delpos = c(delpos, i)
flag = 0
}
}
if (length(delpos) > 1)
{
delpos <- delpos[2:length(delpos)]
delpos = as.integer(delpos)
seqDB <- seqDB[, -delpos]
SSDB <- SSDB[, -delpos]
}
# remove sites that are lowercase or . in 99% of sequences
# remove sequences with gap in their anticodon
writeCovea(SSDB, seqDB)
}
#####################################################################################
# partition the fasta file based on their functional classes
# partition the EditedCovea.fasta files based on the sourceOrg
|
# helper function to shorten list of offending rows
# to be inserted in other function reporting
output_truncate <- function(locs){
# added type conversion because Tyler had decided
# that it would be wise <#sacrasm> to pass in locs as text
if (is.character(locs)){
locs <- suppressWarnings(as.numeric(locs))
}
if (length(locs) > 100) {
paste0(paste(locs[1:100]+1, collapse=", "), "...[truncated]...")
} else {
paste(locs+1, collapse=", ")
}
}
|
/R/output_truncate.R
|
no_license
|
data-steve/valiData
|
R
| false | false | 477 |
r
|
# helper function to shorten list of offending rows
# to be inserted in other function reporting
output_truncate <- function(locs){
# added type conversion because Tyler had decided
# that it would be wise <#sacrasm> to pass in locs as text
if (is.character(locs)){
locs <- suppressWarnings(as.numeric(locs))
}
if (length(locs) > 100) {
paste0(paste(locs[1:100]+1, collapse=", "), "...[truncated]...")
} else {
paste(locs+1, collapse=", ")
}
}
|
modelo = lm(height ~ weight, data=women)
predict(modelo, data.frame(weight=30))
|
/dados/estatistica2/women_predict.r
|
no_license
|
slackfx/learningpython
|
R
| false | false | 80 |
r
|
modelo = lm(height ~ weight, data=women)
predict(modelo, data.frame(weight=30))
|
library(postal)
### Name: fetch_mail
### Title: Fetch postage details
### Aliases: fetch_mail
### ** Examples
## Not run:
##D
##D fetch_mail(origin_zip = "90210",
##D destination_zip = "59001",
##D type = "envelope")
##D
##D
##D fetch_mail(origin_zip = "68003",
##D destination_zip = "23285",
##D pounds = 4,
##D ground_transportation_needed = TRUE,
##D type = "package",
##D shape = "rectangular",
##D show_details = TRUE)
##D
##D # Contains an invalid zip ("foobar"), which will get a "no_success" row
##D origins <- c("90210", "foobar", "59001")
##D destinations <- c("68003", "94707", "23285")
##D
##D purrr::map2_dfr(
##D origins, destinations,
##D fetch_mail,
##D type = "package"
##D )
##D
##D # A syntactically fine request, but no results are returned
##D fetch_mail(origin_zip = "04101",
##D destination_zip = "97211",
##D shipping_date = "3018-07-04", # way in the future!
##D type = "package",
##D show_details = TRUE)
##D
## End(Not run)
|
/data/genthat_extracted_code/postal/examples/fetch_mail.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,062 |
r
|
library(postal)
### Name: fetch_mail
### Title: Fetch postage details
### Aliases: fetch_mail
### ** Examples
## Not run:
##D
##D fetch_mail(origin_zip = "90210",
##D destination_zip = "59001",
##D type = "envelope")
##D
##D
##D fetch_mail(origin_zip = "68003",
##D destination_zip = "23285",
##D pounds = 4,
##D ground_transportation_needed = TRUE,
##D type = "package",
##D shape = "rectangular",
##D show_details = TRUE)
##D
##D # Contains an invalid zip ("foobar"), which will get a "no_success" row
##D origins <- c("90210", "foobar", "59001")
##D destinations <- c("68003", "94707", "23285")
##D
##D purrr::map2_dfr(
##D origins, destinations,
##D fetch_mail,
##D type = "package"
##D )
##D
##D # A syntactically fine request, but no results are returned
##D fetch_mail(origin_zip = "04101",
##D destination_zip = "97211",
##D shipping_date = "3018-07-04", # way in the future!
##D type = "package",
##D show_details = TRUE)
##D
## End(Not run)
|
\name{Cooper03}
\alias{Cooper03}
\docType{data}
\title{Selected effect sizes from Cooper et al. (2003)}
\description{
Fifty-six effect sizes from 11 districts from Cooper et al. (2003) were reported by Konstantopoulos (2011).
}
\usage{data(Cooper03)}
\details{
The variables are:
\describe{
\item{District}{District ID}
\item{Study}{Study ID}
\item{y}{Effect size}
\item{v}{Sampling variance}
\item{Year}{Year of publication}
}
}
\source{
Cooper, H., Valentine, J. C., Charlton, K., & Melson, A. (2003). The Effects of Modified School Calendars on Student Achievement and on School and Community Attitudes. \emph{Review of Educational Research}, \bold{73(1)}, 1-52. doi:10.3102/00346543073001001
}
\references{
Konstantopoulos, S. (2011). Fixed effects and variance components
estimation in three-level meta-analysis. \emph{Research Synthesis
Methods}, \bold{2}, 61-76. doi:10.1002/jrsm.35
}
\examples{
\dontrun{
data(Cooper03)
#### ML estimation method
## No predictor
summary( model1 <- meta3L(y=y, v=v, cluster=District, data=Cooper03) )
## Show all heterogeneity indices and their 95\% confidence intervals
summary( meta3L(y=y, v=v, cluster=District, data=Cooper03,
intervals.type="LB", I2=c("I2q", "I2hm", "I2am", "ICC")) )
## Year as a predictor
summary( meta3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE),
data=Cooper03, model.name="Year as a predictor") )
## Equality of level-2 and level-3 heterogeneity
summary( model2 <- meta3L(y=y, v=v, cluster=District, data=Cooper03,
RE2.constraints="0.2*EqTau2",
RE3.constraints="0.2*EqTau2",
model.name="Equal Tau2") )
## Compare model2 vs. model1
anova(model1, model2)
#### REML estimation method
## No predictor
summary( reml3L(y=y, v=v, cluster=District, data=Cooper03) )
## Level-2 and level-3 variances are constrained equally
summary( reml3L(y=y, v=v, cluster=District, data=Cooper03,
RE.equal=TRUE, model.name="Equal Tau2") )
## Year as a predictor
summary( reml3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE),
data=Cooper03, intervals.type="LB") )
## Handling missing covariates with FIML
## Create 20/56 MCAR data in Year
set.seed(10000)
Year_MCAR <- Cooper03$Year
Year_MCAR[sample(56, 20)] <- NA
summary( meta3LFIML(y=y, v=v, cluster=District, x2=scale(Year_MCAR, scale=FALSE),
data=Cooper03, model.name="NA in Year_MCAR") )
}
}
\keyword{datasets}
|
/man/Cooper03.Rd
|
no_license
|
mikewlcheung/metasem
|
R
| false | false | 2,521 |
rd
|
\name{Cooper03}
\alias{Cooper03}
\docType{data}
\title{Selected effect sizes from Cooper et al. (2003)}
\description{
Fifty-six effect sizes from 11 districts from Cooper et al. (2003) were reported by Konstantopoulos (2011).
}
\usage{data(Cooper03)}
\details{
The variables are:
\describe{
\item{District}{District ID}
\item{Study}{Study ID}
\item{y}{Effect size}
\item{v}{Sampling variance}
\item{Year}{Year of publication}
}
}
\source{
Cooper, H., Valentine, J. C., Charlton, K., & Melson, A. (2003). The Effects of Modified School Calendars on Student Achievement and on School and Community Attitudes. \emph{Review of Educational Research}, \bold{73(1)}, 1-52. doi:10.3102/00346543073001001
}
\references{
Konstantopoulos, S. (2011). Fixed effects and variance components
estimation in three-level meta-analysis. \emph{Research Synthesis
Methods}, \bold{2}, 61-76. doi:10.1002/jrsm.35
}
\examples{
\dontrun{
data(Cooper03)
#### ML estimation method
## No predictor
summary( model1 <- meta3L(y=y, v=v, cluster=District, data=Cooper03) )
## Show all heterogeneity indices and their 95\% confidence intervals
summary( meta3L(y=y, v=v, cluster=District, data=Cooper03,
intervals.type="LB", I2=c("I2q", "I2hm", "I2am", "ICC")) )
## Year as a predictor
summary( meta3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE),
data=Cooper03, model.name="Year as a predictor") )
## Equality of level-2 and level-3 heterogeneity
summary( model2 <- meta3L(y=y, v=v, cluster=District, data=Cooper03,
RE2.constraints="0.2*EqTau2",
RE3.constraints="0.2*EqTau2",
model.name="Equal Tau2") )
## Compare model2 vs. model1
anova(model1, model2)
#### REML estimation method
## No predictor
summary( reml3L(y=y, v=v, cluster=District, data=Cooper03) )
## Level-2 and level-3 variances are constrained equally
summary( reml3L(y=y, v=v, cluster=District, data=Cooper03,
RE.equal=TRUE, model.name="Equal Tau2") )
## Year as a predictor
summary( reml3L(y=y, v=v, cluster=District, x=scale(Year, scale=FALSE),
data=Cooper03, intervals.type="LB") )
## Handling missing covariates with FIML
## Create 20/56 MCAR data in Year
set.seed(10000)
Year_MCAR <- Cooper03$Year
Year_MCAR[sample(56, 20)] <- NA
summary( meta3LFIML(y=y, v=v, cluster=District, x2=scale(Year_MCAR, scale=FALSE),
data=Cooper03, model.name="NA in Year_MCAR") )
}
}
\keyword{datasets}
|
### Pull out 4 specific OTUs of Interest
### Final figure on Potential Difference between Adenoma and Carcinoma
### P. micra, P. stomatis, P. assacharolytica, and F. nucleatum
## Marc Sze
# Load needed functions
source('code/functions.R')
# Load needed libraries
loadLibs(c("dplyr", "tidyr"))
# Load needed data tables
tax <- read.delim('data/process/final.taxonomy', sep='\t',
header=T, row.names=1)
# Convert taxa table to a data frame with columns for each taxonomic division and shared OTU assignment
tax_df <- data.frame(do.call('rbind',
strsplit(as.character(tax$Taxonomy), ';'))) %>%
select(Domain = X1, Phyla = X2, Order = X3, Class = X4, Family = X5, Genus = X6) %>%
mutate(otu = rownames(tax))
# Remove the (100) from the columns and remove the unused tax data table
tax_df <- as.data.frame(apply(tax_df, 2,
function(x) gsub("\\(\\d{2}\\d?\\)", "", x)))
rm(tax)
# Load in metad data, create Disease_Free variable, and remove values with NA
good_metaf <- read.csv(
"data/process/mod_metadata/good_metaf_final.csv",
stringsAsFactors = F, header = T) %>%
mutate(lesion_follow = ifelse(Disease_Free == "n", 1, 0))
# Gather by lowest level classification (genus)
genera_data <- as.data.frame(get_tax_level_shared('data/process/final.shared', 'data/process/final.taxonomy', 6))
# Generate relative abundance
total_seqs <- rowSums(genera_data)
genera_data <- cbind(Group = rownames(genera_data),
as.data.frame(apply(genera_data, 2,
function(x) x/total_seqs)))
# Aggregate data and reshape for graphing
temp_data <- genera_data %>% filter(Group %in% as.character(c(good_metaf$initial, good_metaf$followUp))) %>%
slice(match(as.character(c(good_metaf$initial, good_metaf$followUp)), Group)) %>%
mutate(sampleType = c(rep("initial", length(good_metaf$initial)),
rep("followup", length(good_metaf$followUp))),
Dx_Bin = rep(good_metaf$Dx_Bin, 2))
crc_select_data <- cbind(
EDRN = rep(good_metaf$EDRN, 2),
Group = temp_data$Group,
sampleType = temp_data$sampleType,
Dx_Bin = temp_data$Dx_Bin,
Disease_free = rep(good_metaf$Disease_Free, 2),
select(temp_data, Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas)) %>%
gather(key = Genus,
value = rel.abund, Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas)
# Write out table for future use
write.csv(crc_select_data, "data/process/tables/adn_crc_maybe_diff.csv", row.names = F)
# Run statistics testing and BH correction
test_data <- temp_data %>% select(Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas) %>%
mutate(Dx_Bin = rep(good_metaf$Dx_Bin, 2),
sampleType = temp_data$sampleType,
Disease_free = rep(good_metaf$Disease_Free, 2))
good_counts_init <- c("Fusobacterium", "Parvimonas", "Peptostreptococcus", "Porphyromonas")
pvalue_summary <- cbind(
lesion_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial"],
x[test_data$sampleType == "followup"],
paired = TRUE)$p.value}),
crc_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial" & test_data$Dx_Bin == "cancer"],
x[test_data$sampleType == "followup" & test_data$Dx_Bin == "cancer"],
paired = TRUE)$p.value}),
adn_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial" & test_data$Dx_Bin != "cancer"],
x[test_data$sampleType == "followup" & test_data$Dx_Bin != "cancer"],
paired = TRUE)$p.value}))
adjusted_pvalues <- p.adjust(c(pvalue_summary[, "lesion_pvalue"],
pvalue_summary[, "crc_pvalue"],
pvalue_summary[, "adn_pvalue"]), method = "BH")
pvalue_summary <- cbind(
pvalue_summary, lesion_BH = adjusted_pvalues[1:length(good_counts_init)],
crc_BH = adjusted_pvalues[(length(good_counts_init) + 1):(length(good_counts_init)*2)],
adn_BH = adjusted_pvalues[(length(good_counts_init)*2 + 1):(length(good_counts_init)*3)])
rownames(pvalue_summary) <- c("porp", "fn", "parv", "pept")
# Write out the pvalue table for future use
write.csv(pvalue_summary, "data/process/tables/adn_crc_maybe_pvalue_summary.csv")
|
/code/old/Run_potential_cancer_specific_OTUs.R
|
permissive
|
SchlossLab/Sze_FollowUps_Microbiome_2017
|
R
| false | false | 4,859 |
r
|
### Pull out 4 specific OTUs of Interest
### Final figure on Potential Difference between Adenoma and Carcinoma
### P. micra, P. stomatis, P. assacharolytica, and F. nucleatum
## Marc Sze
# Load needed functions
source('code/functions.R')
# Load needed libraries
loadLibs(c("dplyr", "tidyr"))
# Load needed data tables
tax <- read.delim('data/process/final.taxonomy', sep='\t',
header=T, row.names=1)
# Convert taxa table to a data frame with columns for each taxonomic division and shared OTU assignment
tax_df <- data.frame(do.call('rbind',
strsplit(as.character(tax$Taxonomy), ';'))) %>%
select(Domain = X1, Phyla = X2, Order = X3, Class = X4, Family = X5, Genus = X6) %>%
mutate(otu = rownames(tax))
# Remove the (100) from the columns and remove the unused tax data table
tax_df <- as.data.frame(apply(tax_df, 2,
function(x) gsub("\\(\\d{2}\\d?\\)", "", x)))
rm(tax)
# Load in metad data, create Disease_Free variable, and remove values with NA
good_metaf <- read.csv(
"data/process/mod_metadata/good_metaf_final.csv",
stringsAsFactors = F, header = T) %>%
mutate(lesion_follow = ifelse(Disease_Free == "n", 1, 0))
# Gather by lowest level classification (genus)
genera_data <- as.data.frame(get_tax_level_shared('data/process/final.shared', 'data/process/final.taxonomy', 6))
# Generate relative abundance
total_seqs <- rowSums(genera_data)
genera_data <- cbind(Group = rownames(genera_data),
as.data.frame(apply(genera_data, 2,
function(x) x/total_seqs)))
# Aggregate data and reshape for graphing
temp_data <- genera_data %>% filter(Group %in% as.character(c(good_metaf$initial, good_metaf$followUp))) %>%
slice(match(as.character(c(good_metaf$initial, good_metaf$followUp)), Group)) %>%
mutate(sampleType = c(rep("initial", length(good_metaf$initial)),
rep("followup", length(good_metaf$followUp))),
Dx_Bin = rep(good_metaf$Dx_Bin, 2))
crc_select_data <- cbind(
EDRN = rep(good_metaf$EDRN, 2),
Group = temp_data$Group,
sampleType = temp_data$sampleType,
Dx_Bin = temp_data$Dx_Bin,
Disease_free = rep(good_metaf$Disease_Free, 2),
select(temp_data, Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas)) %>%
gather(key = Genus,
value = rel.abund, Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas)
# Write out table for future use
write.csv(crc_select_data, "data/process/tables/adn_crc_maybe_diff.csv", row.names = F)
# Run statistics testing and BH correction
test_data <- temp_data %>% select(Fusobacterium, Parvimonas, Peptostreptococcus, Porphyromonas) %>%
mutate(Dx_Bin = rep(good_metaf$Dx_Bin, 2),
sampleType = temp_data$sampleType,
Disease_free = rep(good_metaf$Disease_Free, 2))
good_counts_init <- c("Fusobacterium", "Parvimonas", "Peptostreptococcus", "Porphyromonas")
pvalue_summary <- cbind(
lesion_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial"],
x[test_data$sampleType == "followup"],
paired = TRUE)$p.value}),
crc_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial" & test_data$Dx_Bin == "cancer"],
x[test_data$sampleType == "followup" & test_data$Dx_Bin == "cancer"],
paired = TRUE)$p.value}),
adn_pvalue = apply(select(test_data, one_of(good_counts_init)), 2,
function(x){
wilcox.test(x[test_data$sampleType == "initial" & test_data$Dx_Bin != "cancer"],
x[test_data$sampleType == "followup" & test_data$Dx_Bin != "cancer"],
paired = TRUE)$p.value}))
adjusted_pvalues <- p.adjust(c(pvalue_summary[, "lesion_pvalue"],
pvalue_summary[, "crc_pvalue"],
pvalue_summary[, "adn_pvalue"]), method = "BH")
pvalue_summary <- cbind(
pvalue_summary, lesion_BH = adjusted_pvalues[1:length(good_counts_init)],
crc_BH = adjusted_pvalues[(length(good_counts_init) + 1):(length(good_counts_init)*2)],
adn_BH = adjusted_pvalues[(length(good_counts_init)*2 + 1):(length(good_counts_init)*3)])
rownames(pvalue_summary) <- c("porp", "fn", "parv", "pept")
# Write out the pvalue table for future use
write.csv(pvalue_summary, "data/process/tables/adn_crc_maybe_pvalue_summary.csv")
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
ui <- fluidPage(
fluidPage(
titlePanel(img(src = "logo.png", height = 50, width = 100, hspace = "400" )),
navbarPage(title = "WholeMeals",
tabPanel("Home",
titlePanel("Delicious & nutricious meals delivered to your doorstep"),
fluidRow(column(12,img(src = "WholeMeals.png", width = "100%"))),
hr(),
fluidRow(
column(4,wellPanel(h4("Customise Your Meal Plan"),h5("Be it weight-loss, bulking, low-cholestrol or gluten-free,
you get to choose the plan that suits your health goals."))),
column(4,wellPanel(h4("Local and International Selections"), h5("We have over 500 recipes from wide ranging cuisines,
including popular Asian and Western fares, so your tastebuds will never get bored. "))),
column(4,wellPanel(h4("Your Convenience, Delivered"), h5("Fresh meals prepared daily and delivered to your office or home
in individually packed and microwave safe containers. ")))
)
),
tabPanel("About Us",
navlistPanel(
tabPanel("Our Team",
wellPanel(
h3("Our Team"),
br(),
img(src = "chefs.png", width = "100%"),
br(),
h4("The WholeMeals team is comprised of excellent high level chefs and nutritionists.
We bring our decades of experience in catering, restaurants and fitness to the table
to create your well-balanced and tasty meals."))),
tabPanel("Our Mission",
wellPanel(
h3("Our Mission"),
br(),
img(src = "healthy.png", width = "100%"),
br(),
h4("With the hustle and bustle of modern lifestyle, your health often takes a backseat.
Our mission is to help you maintain a healthy lifestyle without compromising on the taste.
We want to dispel the myth that diets comprise of bland, boring meals."))),
tabPanel("Our Kitchen",
wellPanel(
h3("Our Kitchen"),
br(),
img(src = "kitchen.png", width = "100%"),
br(),
h4("Our meals are prepared and packed in our specially designed kitchen, with procedures to
ensure hygiene, freshness and prevent cross-contamination. We count portion and label our
meals so you don't have to.")))
)
),
navbarMenu(title = "Contact Us",
tabPanel("Address", h4("Address"), "No. 29, Woodlands Industrial Park,", br()," E1 #04-17 Northtech", br(),"Singapore, 726811"),
tabPanel("Phone", h4("Phone"), "+65 9988 9988", br(), "+65 9999 9999"),
tabPanel("Email", h4("Email"), "customerservice@wholemeals.com")
)
)
)
)
server <- function(input, output) {
}
# Run the application
shinyApp(ui = ui, server = server)
|
/2017/Assignment-archive/FE8828-Juilee Save/Assignment1/Assignment 1.R
|
no_license
|
leafyoung/fe8828
|
R
| false | false | 4,081 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
ui <- fluidPage(
fluidPage(
titlePanel(img(src = "logo.png", height = 50, width = 100, hspace = "400" )),
navbarPage(title = "WholeMeals",
tabPanel("Home",
titlePanel("Delicious & nutricious meals delivered to your doorstep"),
fluidRow(column(12,img(src = "WholeMeals.png", width = "100%"))),
hr(),
fluidRow(
column(4,wellPanel(h4("Customise Your Meal Plan"),h5("Be it weight-loss, bulking, low-cholestrol or gluten-free,
you get to choose the plan that suits your health goals."))),
column(4,wellPanel(h4("Local and International Selections"), h5("We have over 500 recipes from wide ranging cuisines,
including popular Asian and Western fares, so your tastebuds will never get bored. "))),
column(4,wellPanel(h4("Your Convenience, Delivered"), h5("Fresh meals prepared daily and delivered to your office or home
in individually packed and microwave safe containers. ")))
)
),
tabPanel("About Us",
navlistPanel(
tabPanel("Our Team",
wellPanel(
h3("Our Team"),
br(),
img(src = "chefs.png", width = "100%"),
br(),
h4("The WholeMeals team is comprised of excellent high level chefs and nutritionists.
We bring our decades of experience in catering, restaurants and fitness to the table
to create your well-balanced and tasty meals."))),
tabPanel("Our Mission",
wellPanel(
h3("Our Mission"),
br(),
img(src = "healthy.png", width = "100%"),
br(),
h4("With the hustle and bustle of modern lifestyle, your health often takes a backseat.
Our mission is to help you maintain a healthy lifestyle without compromising on the taste.
We want to dispel the myth that diets comprise of bland, boring meals."))),
tabPanel("Our Kitchen",
wellPanel(
h3("Our Kitchen"),
br(),
img(src = "kitchen.png", width = "100%"),
br(),
h4("Our meals are prepared and packed in our specially designed kitchen, with procedures to
ensure hygiene, freshness and prevent cross-contamination. We count portion and label our
meals so you don't have to.")))
)
),
navbarMenu(title = "Contact Us",
tabPanel("Address", h4("Address"), "No. 29, Woodlands Industrial Park,", br()," E1 #04-17 Northtech", br(),"Singapore, 726811"),
tabPanel("Phone", h4("Phone"), "+65 9988 9988", br(), "+65 9999 9999"),
tabPanel("Email", h4("Email"), "customerservice@wholemeals.com")
)
)
)
)
server <- function(input, output) {
}
# Run the application
shinyApp(ui = ui, server = server)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpp_pobs <- function(x) {
.Call(`_RoDiCE_rcpp_pobs`, x)
}
rcpp_cvm <- function(mat1, mat2) {
.Call(`_RoDiCE_rcpp_cvm`, mat1, mat2)
}
rcpp_coptest <- function(mat1, mat2, nperm) {
.Call(`_RoDiCE_rcpp_coptest`, mat1, mat2, nperm)
}
|
/R/RcppExports.R
|
no_license
|
ymatts/RoDiCE
|
R
| false | false | 373 |
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
rcpp_pobs <- function(x) {
.Call(`_RoDiCE_rcpp_pobs`, x)
}
rcpp_cvm <- function(mat1, mat2) {
.Call(`_RoDiCE_rcpp_cvm`, mat1, mat2)
}
rcpp_coptest <- function(mat1, mat2, nperm) {
.Call(`_RoDiCE_rcpp_coptest`, mat1, mat2, nperm)
}
|
# Test files
good_file <- "data/Blending_001L00XS4.txt"
context("Printing and previewing data")
test_that("preview levels", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_levels(chunked))
# This file has 5 levels
expect_equal(length(output), 5 + 2)
expect_equal(output[1], "Level Counts: ")
})
test_that("preview frames", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_frames(chunked))
# First chunk is the Header
expect_true(output[2] == " Eprime.Level Running Procedure")
expect_true(output[3] == " 1 Header Header")
})
test_that("preview eprime combines level and frame previews", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_eprime(chunked))
output_levels <- capture.output(preview_levels(chunked))
output_frames <- capture.output(preview_frames(chunked))
expect_true(all(output == c(output_levels, output_frames)))
})
test_that("print an EprimeFrame", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
# Print the frame
frame <- chunked[[1]]
printed_frame <- capture.output(frame)
# Check specific lines in the print function
proc_line <- ' $ Procedure : chr "Header"'
classes <- ' - attr(*, "class")= chr [1:2] "EprimeFrame" "list"'
expect_equal(printed_frame[5 + 1], proc_line)
expect_equal(printed_frame[22 + 2], classes)
})
test_that("print a FrameList", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
# Print the frame
printed_list <- capture.output(chunked)
nlines <- length(printed_list)
# Check specific lines in the print function
l1 <- "List of 25"
l2 <- " $ :List of 22"
last_1 <- ' ..- attr(*, "class")= chr [1:2] "EprimeFrame" "list"'
last_2 <- ' - attr(*, "class")= chr [1:2] "list" "FrameList"'
expect_equal(printed_list[1], l1)
expect_equal(printed_list[2], l2)
expect_equal(printed_list[nlines - 1], last_1)
expect_equal(printed_list[nlines], last_2)
})
|
/tests/testthat/test-print.R
|
no_license
|
cran/rprime
|
R
| false | false | 2,195 |
r
|
# Test files
good_file <- "data/Blending_001L00XS4.txt"
context("Printing and previewing data")
test_that("preview levels", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_levels(chunked))
# This file has 5 levels
expect_equal(length(output), 5 + 2)
expect_equal(output[1], "Level Counts: ")
})
test_that("preview frames", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_frames(chunked))
# First chunk is the Header
expect_true(output[2] == " Eprime.Level Running Procedure")
expect_true(output[3] == " 1 Header Header")
})
test_that("preview eprime combines level and frame previews", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
output <- capture.output(preview_eprime(chunked))
output_levels <- capture.output(preview_levels(chunked))
output_frames <- capture.output(preview_frames(chunked))
expect_true(all(output == c(output_levels, output_frames)))
})
test_that("print an EprimeFrame", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
# Print the frame
frame <- chunked[[1]]
printed_frame <- capture.output(frame)
# Check specific lines in the print function
proc_line <- ' $ Procedure : chr "Header"'
classes <- ' - attr(*, "class")= chr [1:2] "EprimeFrame" "list"'
expect_equal(printed_frame[5 + 1], proc_line)
expect_equal(printed_frame[22 + 2], classes)
})
test_that("print a FrameList", {
eprime_log <- read_eprime(good_file)
chunked <- FrameList(eprime_log)
# Print the frame
printed_list <- capture.output(chunked)
nlines <- length(printed_list)
# Check specific lines in the print function
l1 <- "List of 25"
l2 <- " $ :List of 22"
last_1 <- ' ..- attr(*, "class")= chr [1:2] "EprimeFrame" "list"'
last_2 <- ' - attr(*, "class")= chr [1:2] "list" "FrameList"'
expect_equal(printed_list[1], l1)
expect_equal(printed_list[2], l2)
expect_equal(printed_list[nlines - 1], last_1)
expect_equal(printed_list[nlines], last_2)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ergis.R
\name{draw_ergis_comparative_insight}
\alias{draw_ergis_comparative_insight}
\title{Plot ERGIS layout for scenario comparison in the Insight Center}
\usage{
draw_ergis_comparative_insight(t, density = "None", max_interchange = 0,
types = c("Hydro", "Coal", "Gas CC", "Wind", "CT/Gas boiler", "Other",
"Pumped Storage", "PV"), scaling = 0.002, weight = 3, ...)
}
\arguments{
\item{t}{timestep of interest}
}
\description{
Plot a 5760x2400 frame comparing ERGIS generation, net interchange
and dispatch for all four scenarios
}
|
/man/draw_ergis_comparative_insight.Rd
|
no_license
|
fabric-io-rodrigues/kaleidoscope
|
R
| false | true | 630 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ergis.R
\name{draw_ergis_comparative_insight}
\alias{draw_ergis_comparative_insight}
\title{Plot ERGIS layout for scenario comparison in the Insight Center}
\usage{
draw_ergis_comparative_insight(t, density = "None", max_interchange = 0,
types = c("Hydro", "Coal", "Gas CC", "Wind", "CT/Gas boiler", "Other",
"Pumped Storage", "PV"), scaling = 0.002, weight = 3, ...)
}
\arguments{
\item{t}{timestep of interest}
}
\description{
Plot a 5760x2400 frame comparing ERGIS generation, net interchange
and dispatch for all four scenarios
}
|
# This code imports Falha.csv into R
# This code imports Local.csv into R
# This code imports Falha_Talao.csv into R
setwd("../dados")
falha <- read.csv(file="./ManutencaoSemaforica/Falha.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
local <- read.csv(file="./ManutencaoSemaforica/Local.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
talao <- read.csv(file="./ManutencaoSemaforica/Talao_Falha.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
talao$data_abertura = strptime(talao$data_abertura,format='%d/%m/%Y %H:%M')
talao$data_informacao = strptime(talao$data_informacao,format='%d/%m/%Y %H:%M')
talao$data_acionamento = strptime(talao$data_acionamento,format='%d/%m/%Y %H:%M')
talao$data_chegada = strptime(talao$data_chegada,format='%d/%m/%Y %H:%M')
talao$data_confirmacao = strptime(talao$data_confirmacao,format='%d/%m/%Y %H:%M')
talao$data_acionamento_eletro = strptime(talao$data_acionamento_eletro,format='%d/%m/%Y %H:%M')
talao$data_cancelamento_eletro = strptime(talao$data_cancelamento_eletro,format='%d/%m/%Y %H:%M')
talao$data_origem = strptime(talao$data_origem,format='%d/%m/%Y %H:%M')
talao$data_encerramento = strptime(talao$data_encerramento,format='%d/%m/%Y %H:%M')
# tempo até encerramento (dias)
talao$duracao <- as.numeric(talao$data_encerramento-talao$data_origem)/(60*60)
##### adicionando info de Falha.csv nos dados do Talão
tmp <- match(talao$id_falha,falha$id_falha)
talao$falha_nome <- falha$nome[tmp]
talao$falha_prioridade <- falha$prioridade[tmp]
talao$falha_familia <- falha$familia[tmp]
table(talao$falha_prioridade,talao$prioridade)
##### adicionando info de Local.csv nos dados do Talão
tmp <- match(talao$id_local,local$id_local)
talao$local <- local$local[tmp]
talao$local_subprefeitura <- local$subprefeitura[tmp]
talao$local_distrito <- local$distrito[tmp]
talao$lat <- local$latitude[tmp]
talao$lon <- local$lon[tmp]
talao <- talao[!is.na(talao$duracao),]
save(talao,file="../semaforos/talao-local-falha.rda")
write.csv(talao,file="../semaforos/talao-local-falha.csv")
|
/proc/semaforos/codigo01.R
|
permissive
|
sigriston/CETvis
|
R
| false | false | 2,157 |
r
|
# This code imports Falha.csv into R
# This code imports Local.csv into R
# This code imports Falha_Talao.csv into R
setwd("../dados")
falha <- read.csv(file="./ManutencaoSemaforica/Falha.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
local <- read.csv(file="./ManutencaoSemaforica/Local.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
talao <- read.csv(file="./ManutencaoSemaforica/Talao_Falha.csv",sep=";",stringsAsFactors=FALSE,
fileEncoding="windows-1252")
talao$data_abertura = strptime(talao$data_abertura,format='%d/%m/%Y %H:%M')
talao$data_informacao = strptime(talao$data_informacao,format='%d/%m/%Y %H:%M')
talao$data_acionamento = strptime(talao$data_acionamento,format='%d/%m/%Y %H:%M')
talao$data_chegada = strptime(talao$data_chegada,format='%d/%m/%Y %H:%M')
talao$data_confirmacao = strptime(talao$data_confirmacao,format='%d/%m/%Y %H:%M')
talao$data_acionamento_eletro = strptime(talao$data_acionamento_eletro,format='%d/%m/%Y %H:%M')
talao$data_cancelamento_eletro = strptime(talao$data_cancelamento_eletro,format='%d/%m/%Y %H:%M')
talao$data_origem = strptime(talao$data_origem,format='%d/%m/%Y %H:%M')
talao$data_encerramento = strptime(talao$data_encerramento,format='%d/%m/%Y %H:%M')
# tempo até encerramento (dias)
talao$duracao <- as.numeric(talao$data_encerramento-talao$data_origem)/(60*60)
##### adicionando info de Falha.csv nos dados do Talão
tmp <- match(talao$id_falha,falha$id_falha)
talao$falha_nome <- falha$nome[tmp]
talao$falha_prioridade <- falha$prioridade[tmp]
talao$falha_familia <- falha$familia[tmp]
table(talao$falha_prioridade,talao$prioridade)
##### adicionando info de Local.csv nos dados do Talão
tmp <- match(talao$id_local,local$id_local)
talao$local <- local$local[tmp]
talao$local_subprefeitura <- local$subprefeitura[tmp]
talao$local_distrito <- local$distrito[tmp]
talao$lat <- local$latitude[tmp]
talao$lon <- local$lon[tmp]
talao <- talao[!is.na(talao$duracao),]
save(talao,file="../semaforos/talao-local-falha.rda")
write.csv(talao,file="../semaforos/talao-local-falha.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{dir.ctrl<-}
\alias{dir.ctrl<-}
\title{dir.ctrl}
\usage{
dir.ctrl(para) <- value
}
\arguments{
\item{para}{An object of metaXpara}
\item{value}{value}
}
\value{
An object of metaXpara
}
\description{
dir.ctrl
}
\examples{
para <- new("metaXpara")
dir.ctrl(para) <- "./"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
/man/dir.ctrl.Rd
|
no_license
|
jaspershen/metaX
|
R
| false | true | 455 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{dir.ctrl<-}
\alias{dir.ctrl<-}
\title{dir.ctrl}
\usage{
dir.ctrl(para) <- value
}
\arguments{
\item{para}{An object of metaXpara}
\item{value}{value}
}
\value{
An object of metaXpara
}
\description{
dir.ctrl
}
\examples{
para <- new("metaXpara")
dir.ctrl(para) <- "./"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kullback_leibler.R
\name{print.kullback_leibler}
\alias{print.kullback_leibler}
\title{Prints a kullback_leibler Object.}
\usage{
\method{print}{kullback_leibler}(x, digits = 3, ...)
}
\arguments{
\item{x}{The kullback_leibler object}
\item{digits}{Number of decimal places to print.}
\item{\ldots}{ignored}
}
\description{
Prints a kullback_leibler object.
}
|
/man/print.kullback_leibler.Rd
|
no_license
|
hoodaly/qdap
|
R
| false | true | 441 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kullback_leibler.R
\name{print.kullback_leibler}
\alias{print.kullback_leibler}
\title{Prints a kullback_leibler Object.}
\usage{
\method{print}{kullback_leibler}(x, digits = 3, ...)
}
\arguments{
\item{x}{The kullback_leibler object}
\item{digits}{Number of decimal places to print.}
\item{\ldots}{ignored}
}
\description{
Prints a kullback_leibler object.
}
|
\name{publish_rpubs}
\alias{publish_rpubs}
\title{Publish slide deck to rPubs}
\usage{
publish_rpubs(title, html_file = "index.html")
}
\arguments{
\item{title}{title of the presentation}
\item{html_file}{path to html file to publish; defaults
to index.html}
}
\description{
Publish slide deck to rPubs
}
|
/man/publish_rpubs.Rd
|
no_license
|
Lchiffon/slidify
|
R
| false | false | 317 |
rd
|
\name{publish_rpubs}
\alias{publish_rpubs}
\title{Publish slide deck to rPubs}
\usage{
publish_rpubs(title, html_file = "index.html")
}
\arguments{
\item{title}{title of the presentation}
\item{html_file}{path to html file to publish; defaults
to index.html}
}
\description{
Publish slide deck to rPubs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{unique.igraph.vs}
\alias{unique.igraph.vs}
\title{Remove duplicate vertices from a vertex sequence}
\usage{
\method{unique}{igraph.vs}(x, incomparables = FALSE, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{incomparables}{a vector of values that cannot be compared.
Passed to base function \code{duplicated}. See details there.}
\item{...}{Passed to base function \code{duplicated()}.}
}
\value{
A vertex sequence with the duplicate vertices removed.
}
\description{
Remove duplicate vertices from a vertex sequence
}
\examples{
g <- make_(ring(10), with_vertex_(name = LETTERS[1:10]))
V(g)[1, 1:5, 1:10, 5:10]
V(g)[1, 1:5, 1:10, 5:10] \%>\% unique()
}
\seealso{
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing2}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}}
}
|
/man/unique.igraph.vs.Rd
|
no_license
|
Ruchika8/Dgraph
|
R
| false | true | 1,332 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iterators.R
\name{unique.igraph.vs}
\alias{unique.igraph.vs}
\title{Remove duplicate vertices from a vertex sequence}
\usage{
\method{unique}{igraph.vs}(x, incomparables = FALSE, ...)
}
\arguments{
\item{x}{A vertex sequence.}
\item{incomparables}{a vector of values that cannot be compared.
Passed to base function \code{duplicated}. See details there.}
\item{...}{Passed to base function \code{duplicated()}.}
}
\value{
A vertex sequence with the duplicate vertices removed.
}
\description{
Remove duplicate vertices from a vertex sequence
}
\examples{
g <- make_(ring(10), with_vertex_(name = LETTERS[1:10]))
V(g)[1, 1:5, 1:10, 5:10]
V(g)[1, 1:5, 1:10, 5:10] \%>\% unique()
}
\seealso{
Other vertex and edge sequence operations: \code{\link{c.igraph.es}},
\code{\link{c.igraph.vs}},
\code{\link{difference.igraph.es}},
\code{\link{difference.igraph.vs}},
\code{\link{igraph-es-indexing2}},
\code{\link{igraph-es-indexing}},
\code{\link{igraph-vs-indexing2}},
\code{\link{igraph-vs-indexing}},
\code{\link{intersection.igraph.es}},
\code{\link{intersection.igraph.vs}},
\code{\link{rev.igraph.es}}, \code{\link{rev.igraph.vs}},
\code{\link{union.igraph.es}},
\code{\link{union.igraph.vs}},
\code{\link{unique.igraph.es}}
}
|
# 04 narrow band indices
# takes a long time, so I did this on a lab pc
library(signal, lib.loc = "F:/be_hyperspectral/library")
library(hsdar, lib.loc = "F:/be_hyperspectral/library")
# get hyperspectral plot data
hy_fls <- list.files("F:/be_hyperspectral/hy_10m/", pattern = ".tif$", full.names = TRUE)
hy_names <- substr(hy_fls, 35, 39)
band_info <- readRDS("F:/be_hyperspectral/hy_10m/hyperspectral_band_info.RDS")
# define function for mean and sd of narrow band indices for one plot
nri_stats <- function(Nri){
wl1 <- lapply(seq(length(Nri@wavelength)), function(i){
print(i)
wl2 <- lapply(seq(length(Nri@wavelength)), function(j){
data.frame(wl1 = Nri@wavelength[i],
wl2 = Nri@wavelength[j],
nri_mean = mean(Nri@nri[i,j,]),
nri_sd = sd(Nri@nri[i,j,]))
})
do.call(rbind, wl2)
})
wl1 <- do.call(rbind,wl1)
wl1 <- na.omit(wl1)
return(wl1)
}
# calculate for all plots
for(k in seq(length(hy_fls))){
aeg <- speclib(brick(hy_fls[k]), band_info$wavelength)
aeg_nri <- nri(aeg, recursive = TRUE)
print(hy_names[k])
nri_df <- nri_stats(aeg_nri)
saveRDS(nri_df, paste0("F:/be_hyperspectral/nri/", hy_names[k], "_nri.RDS"))
}
|
/src/deprecated/05_narrow_band_indices.R
|
permissive
|
yangxhcaf/BE-HyperSpecPrediction
|
R
| false | false | 1,223 |
r
|
# 04 narrow band indices
# takes a long time, so I did this on a lab pc
library(signal, lib.loc = "F:/be_hyperspectral/library")
library(hsdar, lib.loc = "F:/be_hyperspectral/library")
# get hyperspectral plot data
hy_fls <- list.files("F:/be_hyperspectral/hy_10m/", pattern = ".tif$", full.names = TRUE)
hy_names <- substr(hy_fls, 35, 39)
band_info <- readRDS("F:/be_hyperspectral/hy_10m/hyperspectral_band_info.RDS")
# define function for mean and sd of narrow band indices for one plot
nri_stats <- function(Nri){
wl1 <- lapply(seq(length(Nri@wavelength)), function(i){
print(i)
wl2 <- lapply(seq(length(Nri@wavelength)), function(j){
data.frame(wl1 = Nri@wavelength[i],
wl2 = Nri@wavelength[j],
nri_mean = mean(Nri@nri[i,j,]),
nri_sd = sd(Nri@nri[i,j,]))
})
do.call(rbind, wl2)
})
wl1 <- do.call(rbind,wl1)
wl1 <- na.omit(wl1)
return(wl1)
}
# calculate for all plots
for(k in seq(length(hy_fls))){
aeg <- speclib(brick(hy_fls[k]), band_info$wavelength)
aeg_nri <- nri(aeg, recursive = TRUE)
print(hy_names[k])
nri_df <- nri_stats(aeg_nri)
saveRDS(nri_df, paste0("F:/be_hyperspectral/nri/", hy_names[k], "_nri.RDS"))
}
|
library(testthat)
library(bigutilsr)
test_check("bigutilsr")
|
/fuzzedpackages/bigutilsr/tests/testthat.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 66 |
r
|
library(testthat)
library(bigutilsr)
test_check("bigutilsr")
|
library(rpostgisLT)
### Name: roe_gps_data
### Title: Example data from a GPS tracking project
### Aliases: roe_gps_data roe_sensors_animals_tables roe_vector_geom
### roe_raster
### Keywords: datasets
### ** Examples
data("roe_gps_data")
head(roe_gps_data$GSM01438)
data("roe_sensors_animals_tables")
roe_sensors_animals_tables$animals
data("roe_vector_geom")
if (require(sp, quietly = TRUE)) {
plot(roe_vector_geom$adm_boundaries)
plot(roe_vector_geom$roads, col = 'red', add = TRUE)
}
if (require(raster, quietly = TRUE)) {
data("roe_raster")
plot(roe_raster$srtm_dem)
}
|
/data/genthat_extracted_code/rpostgisLT/examples/roe_gps_data.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 594 |
r
|
library(rpostgisLT)
### Name: roe_gps_data
### Title: Example data from a GPS tracking project
### Aliases: roe_gps_data roe_sensors_animals_tables roe_vector_geom
### roe_raster
### Keywords: datasets
### ** Examples
data("roe_gps_data")
head(roe_gps_data$GSM01438)
data("roe_sensors_animals_tables")
roe_sensors_animals_tables$animals
data("roe_vector_geom")
if (require(sp, quietly = TRUE)) {
plot(roe_vector_geom$adm_boundaries)
plot(roe_vector_geom$roads, col = 'red', add = TRUE)
}
if (require(raster, quietly = TRUE)) {
data("roe_raster")
plot(roe_raster$srtm_dem)
}
|
psplineDensity<- function(obj, x, probTail=FALSE) {
# copy linear coefficients for log density tail
# right tail: aTail[1]*( x- xr[1]) + bTail[1]
# left tail: aTail[2]*( x- xr[2]) + bTail[2]
a <- obj$aTail
b <- obj$bTail
xr<- obj$xr
# probabilities of right tail interval, middle from histospline and left.
I <- obj$I
ind1 <- x < xr[1]
ind3 <- x > xr[2]
ind2 <- !(ind3 | ind1)
y <- rep(NA, length(x))
if (!obj$threshold[1]) {
prob<- exp( a[1]*(x[ind1]- xr[1]) + b[1] ) / a[1]
if( probTail){
y[ind1]<-1-prob
}else{
y[ind1]<-prob
}
}
else{
y[ind1] <- 0
}
if (!obj$threshold[2]) {
prob<- -exp( a[2]*(x[ind3]- xr[2]) + b[2] ) / a[2]
if( probTail){
y[ind3]<-prob
}else{
y[ind3]<-1 - prob
}
}
else{
y[ind3] <- 1.0
}
#prob <- splint(obj$cdfRoot$x, obj$cdfRoot$y, x[ind2])
# piecewise linear interpolation
prob <- approx(obj$cdfRoot$x, obj$cdfRoot$y, x[ind2])$y
if( probTail){
y[ind2]<-1-prob
}else{
y[ind2]<-prob
}
return(y)
}
|
/LHSpline/R/psplineDensity.R
|
no_license
|
dnychka/LogHistoSpline
|
R
| false | false | 1,134 |
r
|
psplineDensity<- function(obj, x, probTail=FALSE) {
# copy linear coefficients for log density tail
# right tail: aTail[1]*( x- xr[1]) + bTail[1]
# left tail: aTail[2]*( x- xr[2]) + bTail[2]
a <- obj$aTail
b <- obj$bTail
xr<- obj$xr
# probabilities of right tail interval, middle from histospline and left.
I <- obj$I
ind1 <- x < xr[1]
ind3 <- x > xr[2]
ind2 <- !(ind3 | ind1)
y <- rep(NA, length(x))
if (!obj$threshold[1]) {
prob<- exp( a[1]*(x[ind1]- xr[1]) + b[1] ) / a[1]
if( probTail){
y[ind1]<-1-prob
}else{
y[ind1]<-prob
}
}
else{
y[ind1] <- 0
}
if (!obj$threshold[2]) {
prob<- -exp( a[2]*(x[ind3]- xr[2]) + b[2] ) / a[2]
if( probTail){
y[ind3]<-prob
}else{
y[ind3]<-1 - prob
}
}
else{
y[ind3] <- 1.0
}
#prob <- splint(obj$cdfRoot$x, obj$cdfRoot$y, x[ind2])
# piecewise linear interpolation
prob <- approx(obj$cdfRoot$x, obj$cdfRoot$y, x[ind2])$y
if( probTail){
y[ind2]<-1-prob
}else{
y[ind2]<-prob
}
return(y)
}
|
########Input R parameters generated by experGen suite of tools for use in driver script -------
rm(list=ls())
#--------------predictor and target variable names--------#
predictor.vars <- <PREDICTOR>
target.var <- <TARGET>
#--------------grid region, mask settings----------#
grid <- <OP.GRID>
spat.mask.dir_1 <- <SPAT.MASK.DIR>
spat.mask.var <- <SPAT.MASK.VAR>
ds.region <- <DS.REGION>
#--------------- I,J settings ----------------#
file.j.range <- <JRANGE>
i.file <- <ISUFFIX>
j.start <- <JSTART>
j.end <- <JEND>
loop.start <- j.start - (j.start-1)
loop.end <- j.end - (j.start-1)
#------------ historical predictor(s)----------#
hist.file.start.year_1 <- <H.FILE.START.YEAR>
hist.file.end.year_1 <- <H.FILE.END.YEAR>
hist.train.start.year_1 <- <H.TRAIN.START.YEAR>
hist.train.end.year_1 <- <H.TRAIN.END.YEAR>
hist.scenario_1 <- <H.SCENARIO>
hist.nyrtot_1 <- (hist.train.end.year_1 - hist.train.start.year_1) + 1
hist.model_1 <- <H.MODEL>
hist.freq_1 <- <H.FREQ>
hist.indir_1 <- <H.INDIR>
hist.time.window <- <H.TIME.WINDOW>
#------------ future predictor(s) -------------#
fut.file.start.year_1 <- <F.FILE.START.YEAR>
fut.file.end.year_1 <- <F.FILE.END.YEAR>
fut.train.start.year_1 <- <F.TRAIN.START.YEAR>
fut.train.end.year_1 <- <F.TRAIN.END.YEAR>
fut.scenario_1 <- <F.SCENARIO>
fut.nyrtot_1 <- (fut.train.end.year_1 - fut.train.start.year_1) + 1
fut.model_1 <- <F.MODEL>
fut.freq_1 <- <F.FREQ>
fut.indir_1 <- <F.INDIR>
fut.time.window <- <F.TIME.WINDOW>
fut.time.trim.mask <- <FUT.TIME.TRIM.MASK>
#------------- target -------------------------#
target.file.start.year_1 <- <T.FILE.START.YEAR>
target.file.end.year_1 <- <T.FILE.END.YEAR>
target.train.start.year_1 <- <T.TRAIN.START.YEAR>
target.train.end.year_1 <- <T.TRAIN.END.YEAR>
target.scenario_1 <- <T.SCENARIO>
target.nyrtot_1 <- (target.train.end.year_1 - target.train.start.year_1) + 1
target.model_1 <- <T.MODEL>
target.freq_1 <- <T.FREQ>
target.indir_1 <- <T.INDIR>
target.time.window <- <T.TIME.WINDOW>
#------------- method name k-fold specs-----------------------#
ds.method <- <METHOD>
ds.experiment <- <DEXPER>
k.fold <- <KFOLD>
<PR_OPTS>
#-------------- output -----------------------#
output.dir <- <OUTPUT.DIR>
mask.output.dir <- <MASK.OUTPUT.DIR>
#------------- custom -----------------------#
<PARAMS>
#Number of "cuts" for which quantiles will be empirically estimated (Default is 100 in CDFt package).
#-------------- pp ---------------------------#
mask.list <- <MASK.LIST>
################### others ###################################
#---------------- reference to go in globals -----------------------------------
configURL <-' Ref:http://gfdl.noaa.gov/esd_experiment_configs'
# ------ Set FUDGE environment ---------------
# FUDGEROOT = Sys.getenv(c("FUDGEROOT"))
FUDGEROOT <- <FUDGEROOT>
print(paste("FUDGEROOT is now activated:",FUDGEROOT,sep=''))
BRANCH <- <BRANCH>
################ call main driver ###################################
print(paste("START TIME:",Sys.time(),sep=''))
#----------Use /vftmp as necessary---------------#
TMPDIR = Sys.getenv(c("TMPDIR"))
if (TMPDIR == ""){
stop("ERROR: TMPDIR is not set. Please set it and try it")
}
#########################################################################
gcp_to_TMPDIR <- function(TMPDIR, directory, file.str=""){
#Function for acting as a gcp backstop if the runscript is not present
if((grepl('^/archive',directory)) | (grepl('^/work',directory))){
directory.tmp <- paste(TMPDIR,directory,sep='')
if(file.str!="") file.str=paste("/*",file.str, sep="")
file.tmp <- paste(directory.tmp, file.str, sep="")
message(file.tmp)
if(!file.exists(directory.tmp)){
#Gcp should already be loaded by setenv_fudge or the module if you are running this
if(system('gcp --version')!=0){
loadstr = 'source /usr/share/Modules/init/sh;module load gcp;'
}else{
loadstr = ""
}
commandstr <- paste(loadstr, "gcp -cd", directory, directory.tmp)
sys.status <- system(commandstr)
if (sys.status!=0){
warning(paste("Error in gcp_to_TMPDIR; exited with status", sys.status, "; returning orig file"))
return(directory)
}
}
return(directory.tmp)
}else{
return(directory)
}
}
ijstr <- paste("I", i.file, "_", file.j.range, ".nc", sep="")
if(spat.mask.dir_1 != 'na'){
spat.mask.dir_1 <- gcp_to_TMPDIR(TMPDIR, spat.mask.dir_1, ijstr)}
if(hist.indir_1 != 'na'){
hist.indir_1 <- gcp_to_TMPDIR(TMPDIR, hist.indir_1, ijstr)}
if(fut.indir_1 != 'na'){
fut.indir_1 <- gcp_to_TMPDIR(TMPDIR, fut.indir_1, ijstr)}
if(target.indir_1 != 'na'){
target.indir_1 <- gcp_to_TMPDIR(TMPDIR, target.indir_1, ijstr)}
if(target.time.window != 'na'){
target.time.window <- gcp_to_TMPDIR(TMPDIR, target.time.window)}
if(hist.time.window != 'na'){
hist.time.window <- gcp_to_TMPDIR(TMPDIR, hist.time.window)}
if(fut.time.window != 'na'){
fut.time.window <- gcp_to_TMPDIR(TMPDIR, fut.time.window)}
if(fut.time.trim.mask != 'na'){
fut.time.trim.mask <- gcp_to_TMPDIR(TMPDIR, fut.time.trim.mask)}
##Check for output and need to gcp on here as well
#If file exists in TMPDIR, was presumably created by the runscript and no GCP needed in R
#output.dir <- paste(TMPDIR,output.dir,sep='')
#mask.output.dir <- paste(TMPDIR,mask.output.dir,sep='')
output.dir <- gcp_to_TMPDIR(TMPDIR, output.dir)
mask.output.dir <- gcp_to_TMPDIR(TMPDIR,mask.output.dir)
#########################################################################
#-------------------------------------------------#
#source(paste(FUDGEROOT,'Rsuite/Drivers/',ds.method,'/Driver_',ds.method,'.R',sep=''))
source(paste(FUDGEROOT,'Rsuite/Drivers/','Master_Driver.R',sep=''))
|
/utils/templates/runcode/template_runcode_gcp.R
|
no_license
|
cwhitlock-NOAA/FUDGE
|
R
| false | false | 5,873 |
r
|
########Input R parameters generated by experGen suite of tools for use in driver script -------
rm(list=ls())
#--------------predictor and target variable names--------#
predictor.vars <- <PREDICTOR>
target.var <- <TARGET>
#--------------grid region, mask settings----------#
grid <- <OP.GRID>
spat.mask.dir_1 <- <SPAT.MASK.DIR>
spat.mask.var <- <SPAT.MASK.VAR>
ds.region <- <DS.REGION>
#--------------- I,J settings ----------------#
file.j.range <- <JRANGE>
i.file <- <ISUFFIX>
j.start <- <JSTART>
j.end <- <JEND>
loop.start <- j.start - (j.start-1)
loop.end <- j.end - (j.start-1)
#------------ historical predictor(s)----------#
hist.file.start.year_1 <- <H.FILE.START.YEAR>
hist.file.end.year_1 <- <H.FILE.END.YEAR>
hist.train.start.year_1 <- <H.TRAIN.START.YEAR>
hist.train.end.year_1 <- <H.TRAIN.END.YEAR>
hist.scenario_1 <- <H.SCENARIO>
hist.nyrtot_1 <- (hist.train.end.year_1 - hist.train.start.year_1) + 1
hist.model_1 <- <H.MODEL>
hist.freq_1 <- <H.FREQ>
hist.indir_1 <- <H.INDIR>
hist.time.window <- <H.TIME.WINDOW>
#------------ future predictor(s) -------------#
fut.file.start.year_1 <- <F.FILE.START.YEAR>
fut.file.end.year_1 <- <F.FILE.END.YEAR>
fut.train.start.year_1 <- <F.TRAIN.START.YEAR>
fut.train.end.year_1 <- <F.TRAIN.END.YEAR>
fut.scenario_1 <- <F.SCENARIO>
fut.nyrtot_1 <- (fut.train.end.year_1 - fut.train.start.year_1) + 1
fut.model_1 <- <F.MODEL>
fut.freq_1 <- <F.FREQ>
fut.indir_1 <- <F.INDIR>
fut.time.window <- <F.TIME.WINDOW>
fut.time.trim.mask <- <FUT.TIME.TRIM.MASK>
#------------- target -------------------------#
target.file.start.year_1 <- <T.FILE.START.YEAR>
target.file.end.year_1 <- <T.FILE.END.YEAR>
target.train.start.year_1 <- <T.TRAIN.START.YEAR>
target.train.end.year_1 <- <T.TRAIN.END.YEAR>
target.scenario_1 <- <T.SCENARIO>
target.nyrtot_1 <- (target.train.end.year_1 - target.train.start.year_1) + 1
target.model_1 <- <T.MODEL>
target.freq_1 <- <T.FREQ>
target.indir_1 <- <T.INDIR>
target.time.window <- <T.TIME.WINDOW>
#------------- method name k-fold specs-----------------------#
ds.method <- <METHOD>
ds.experiment <- <DEXPER>
k.fold <- <KFOLD>
<PR_OPTS>
#-------------- output -----------------------#
output.dir <- <OUTPUT.DIR>
mask.output.dir <- <MASK.OUTPUT.DIR>
#------------- custom -----------------------#
<PARAMS>
#Number of "cuts" for which quantiles will be empirically estimated (Default is 100 in CDFt package).
#-------------- pp ---------------------------#
mask.list <- <MASK.LIST>
################### others ###################################
#---------------- reference to go in globals -----------------------------------
configURL <-' Ref:http://gfdl.noaa.gov/esd_experiment_configs'
# ------ Set FUDGE environment ---------------
# FUDGEROOT = Sys.getenv(c("FUDGEROOT"))
FUDGEROOT <- <FUDGEROOT>
print(paste("FUDGEROOT is now activated:",FUDGEROOT,sep=''))
BRANCH <- <BRANCH>
################ call main driver ###################################
print(paste("START TIME:",Sys.time(),sep=''))
#----------Use /vftmp as necessary---------------#
TMPDIR = Sys.getenv(c("TMPDIR"))
if (TMPDIR == ""){
stop("ERROR: TMPDIR is not set. Please set it and try it")
}
#########################################################################
gcp_to_TMPDIR <- function(TMPDIR, directory, file.str=""){
#Function for acting as a gcp backstop if the runscript is not present
if((grepl('^/archive',directory)) | (grepl('^/work',directory))){
directory.tmp <- paste(TMPDIR,directory,sep='')
if(file.str!="") file.str=paste("/*",file.str, sep="")
file.tmp <- paste(directory.tmp, file.str, sep="")
message(file.tmp)
if(!file.exists(directory.tmp)){
#Gcp should already be loaded by setenv_fudge or the module if you are running this
if(system('gcp --version')!=0){
loadstr = 'source /usr/share/Modules/init/sh;module load gcp;'
}else{
loadstr = ""
}
commandstr <- paste(loadstr, "gcp -cd", directory, directory.tmp)
sys.status <- system(commandstr)
if (sys.status!=0){
warning(paste("Error in gcp_to_TMPDIR; exited with status", sys.status, "; returning orig file"))
return(directory)
}
}
return(directory.tmp)
}else{
return(directory)
}
}
ijstr <- paste("I", i.file, "_", file.j.range, ".nc", sep="")
if(spat.mask.dir_1 != 'na'){
spat.mask.dir_1 <- gcp_to_TMPDIR(TMPDIR, spat.mask.dir_1, ijstr)}
if(hist.indir_1 != 'na'){
hist.indir_1 <- gcp_to_TMPDIR(TMPDIR, hist.indir_1, ijstr)}
if(fut.indir_1 != 'na'){
fut.indir_1 <- gcp_to_TMPDIR(TMPDIR, fut.indir_1, ijstr)}
if(target.indir_1 != 'na'){
target.indir_1 <- gcp_to_TMPDIR(TMPDIR, target.indir_1, ijstr)}
if(target.time.window != 'na'){
target.time.window <- gcp_to_TMPDIR(TMPDIR, target.time.window)}
if(hist.time.window != 'na'){
hist.time.window <- gcp_to_TMPDIR(TMPDIR, hist.time.window)}
if(fut.time.window != 'na'){
fut.time.window <- gcp_to_TMPDIR(TMPDIR, fut.time.window)}
if(fut.time.trim.mask != 'na'){
fut.time.trim.mask <- gcp_to_TMPDIR(TMPDIR, fut.time.trim.mask)}
##Check for output and need to gcp on here as well
#If file exists in TMPDIR, was presumably created by the runscript and no GCP needed in R
#output.dir <- paste(TMPDIR,output.dir,sep='')
#mask.output.dir <- paste(TMPDIR,mask.output.dir,sep='')
output.dir <- gcp_to_TMPDIR(TMPDIR, output.dir)
mask.output.dir <- gcp_to_TMPDIR(TMPDIR,mask.output.dir)
#########################################################################
#-------------------------------------------------#
#source(paste(FUDGEROOT,'Rsuite/Drivers/',ds.method,'/Driver_',ds.method,'.R',sep=''))
source(paste(FUDGEROOT,'Rsuite/Drivers/','Master_Driver.R',sep=''))
|
LocalCI <- R6Class(
"LocalCI",
inherit = CI,
public = list(
get_branch = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(system2("git", "rev-parse --abbrev-ref HEAD", stdout = TRUE))
},
get_tag = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(system2("git", "describe", stdout = TRUE))
},
is_tag = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(length(system2("git", c("tag", "--points-at", "HEAD"), stdout = TRUE)) > 0)
},
get_slug = function() {
# Suppress error that occurs if not in a Git repo
tryCatch(
{
remote <- gh::gh_tree_remote()
paste0(remote$username, "/", remote$repo)
},
error = ""
)
},
get_build_number = function() {
"local build"
},
get_build_url = function() {
NULL
},
get_commit = function() {
# Suppress error that occurs if not in a Git repo
tryCatch(git2r::revparse_single(revision = "HEAD")$sha, error = "")
},
can_push = function(name = "TIC_DEPLOY_KEY") {
TRUE
},
get_env = function(env) {
Sys.getenv(env)
},
is_env = function(env, value) {
self$get_env(env) == value
},
has_env = function(env) {
self$get_env(env) != ""
},
is_interactive = function() {
TRUE
}
)
)
|
/R/local.R
|
no_license
|
MarkEdmondson1234/tic
|
R
| false | false | 1,452 |
r
|
LocalCI <- R6Class(
"LocalCI",
inherit = CI,
public = list(
get_branch = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(system2("git", "rev-parse --abbrev-ref HEAD", stdout = TRUE))
},
get_tag = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(system2("git", "describe", stdout = TRUE))
},
is_tag = function() {
# Suppress warnings that occur if not in a Git repo
suppressWarnings(length(system2("git", c("tag", "--points-at", "HEAD"), stdout = TRUE)) > 0)
},
get_slug = function() {
# Suppress error that occurs if not in a Git repo
tryCatch(
{
remote <- gh::gh_tree_remote()
paste0(remote$username, "/", remote$repo)
},
error = ""
)
},
get_build_number = function() {
"local build"
},
get_build_url = function() {
NULL
},
get_commit = function() {
# Suppress error that occurs if not in a Git repo
tryCatch(git2r::revparse_single(revision = "HEAD")$sha, error = "")
},
can_push = function(name = "TIC_DEPLOY_KEY") {
TRUE
},
get_env = function(env) {
Sys.getenv(env)
},
is_env = function(env, value) {
self$get_env(env) == value
},
has_env = function(env) {
self$get_env(env) != ""
},
is_interactive = function() {
TRUE
}
)
)
|
/nodestraindatasetcreation.R
|
no_license
|
hying99/Rfiles
|
R
| false | false | 731 |
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/margins.R, R/margins_betareg.R,
% R/margins_clm.R, R/margins_default.R, R/margins_glm.R, R/margins_lm.R,
% R/margins_loess.R, R/margins_merMod.R, R/margins_multinom.R,
% R/margins_nnet.R, R/margins_polr.R, R/margins_summary.R, R/margins_svyglm.R
\docType{package}
\name{margins}
\alias{margins}
\alias{margins-package}
\alias{margins.betareg}
\alias{margins.clm}
\alias{margins.default}
\alias{margins.glm}
\alias{margins.lm}
\alias{margins.loess}
\alias{margins.merMod}
\alias{margins.lmerMod}
\alias{margins.multinom}
\alias{margins.nnet}
\alias{margins.polr}
\alias{margins_summary}
\alias{margins.svyglm}
\title{Marginal Effects Estimation}
\usage{
margins(model, ...)
\method{margins}{betareg}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model, phi = FALSE),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{clm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vce = "none",
eps = 1e-07,
...
)
\method{margins}{default}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{glm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{lm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{loess}(
model,
data,
variables = NULL,
at = NULL,
vce = "none",
eps = 1e-07,
...
)
\method{margins}{merMod}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{lmerMod}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{multinom}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = NULL,
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{nnet}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
vce = "none",
eps = 1e-07,
...
)
\method{margins}{polr}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = NULL,
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
margins_summary(model, ..., level = 0.95, by_factor = TRUE)
\method{margins}{svyglm}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
}
\arguments{
\item{model}{A model object. See Details for supported model classes.}
\item{\dots}{Arguments passed to methods, and onward to \code{\link{dydx}} methods and possibly further to \code{\link[prediction]{prediction}} methods. This can be useful, for example, for setting \code{type} (predicted value type), \code{eps} (precision), or \code{category} (category for multi-category outcome models), etc.}
\item{data}{A data frame containing the data at which to evaluate the marginal effects, as in \code{\link[stats]{predict}}. This is optional, but may be required when the underlying modelling function sets \code{model = FALSE}.}
\item{variables}{A character vector with the names of variables for which to compute the marginal effects. The default (\code{NULL}) returns marginal effects for all variables.}
\item{at}{A list of one or more named vectors, specifically values at which to calculate the marginal effects. This is an analogue of Stata's \code{, at()} option. The specified values are fully combined (i.e., a cartesian product) to find AMEs for all combinations of specified variable values. Rather than a list, this can also be a data frame of combination levels if only a subset of combinations are desired. These are used to modify the value of \code{data} when calculating AMEs across specified values (see \code{\link[prediction]{build_datalist}} for details on use). Note: This does not calculate AMEs for \emph{subgroups} but rather for counterfactual datasets where all observaations take the specified values; to obtain subgroup effects, subset \code{data} directly.}
\item{type}{A character string indicating the type of marginal effects to estimate. Mostly relevant for non-linear models, where the reasonable options are \dQuote{response} (the default) or \dQuote{link} (i.e., on the scale of the linear predictor in a GLM).}
\item{vcov}{A matrix containing the variance-covariance matrix for estimated model coefficients, or a function to perform the estimation with \code{model} as its only argument.}
\item{vce}{A character string indicating the type of estimation procedure to use for estimating variances. The default (\dQuote{delta}) uses the delta method. Alternatives are \dQuote{bootstrap}, which uses bootstrap estimation, or \dQuote{simulation}, which averages across simulations drawn from the joint sampling distribution of model coefficients. The latter two are extremely time intensive.}
\item{iterations}{If \code{vce = "bootstrap"}, the number of bootstrap iterations. If \code{vce = "simulation"}, the number of simulated effects to draw. Ignored otherwise.}
\item{unit_ses}{If \code{vce = "delta"}, a logical specifying whether to calculate and return unit-specific marginal effect variances. This calculation is time consuming and the information is often not needed, so this is set to \code{FALSE} by default.}
\item{eps}{A numeric value specifying the \dQuote{step} to use when calculating numerical derivatives.}
\item{level}{A numeric value specifying the confidence level for calculating p-values and confidence intervals.}
\item{by_factor}{A logical specifying whether to order the output by factor (the default, \code{TRUE}).}
}
\value{
A data frame of class \dQuote{margins} containing the contents of \code{data}, predicted values from \code{model} for \code{data}, the standard errors of the predictions, and any estimated marginal effects. If \code{at = NULL} (the default), then the data frame will have a number of rows equal to \code{nrow(data)}. Otherwise, the number of rows will be a multiple thereof based upon the number of combinations of values specified in \code{at}. Columns containing marginal effects are distinguished by their name (prefixed by \code{dydx_}). These columns can be extracted from a \dQuote{margins} object using, for example, \code{marginal_effects(margins(model))}. Columns prefixed by \code{Var_} specify the variances of the \emph{average} marginal effects, whereas (optional) columns prefixed by \code{SE_} contain observation-specific standard errors. A special column, \code{_at_number}, specifies which \code{at} combination a given row corresponds to; the data frame carries an attribute \dQuote{at} that specifies which combination of values this index represents. The \code{summary.margins()} method provides for pretty printing of the results, particularly in cases where \code{at} is specified. A variance-covariance matrix for the average marginal effects is returned as an attribute (though behavior when \code{at} is non-NULL is unspecified).
}
\description{
This package is an R port of Stata's \samp{margins} command, implemented as an S3 generic \code{margins()} for model objects, like those of class \dQuote{lm} and \dQuote{glm}. \code{margins()} is an S3 generic function for building a \dQuote{margins} object from a model object. Methods are currently implemented for several model classes (see Details, below).
margins provides \dQuote{marginal effects} summaries of models. Marginal effects are partial derivatives of the regression equation with respect to each variable in the model for each unit in the data; average marginal effects are simply the mean of these unit-specific partial derivatives over some sample. In ordinary least squares regression with no interactions or higher-order term, the estimated slope coefficients are marginal effects. In other cases and for generalized linear models, the coefficients are not marginal effects at least not on the scale of the response variable. margins therefore provides ways of calculating the marginal effects of variables to make these models more interpretable.
The package also provides a low-level function, \code{\link{marginal_effects}}, to estimate those quantities and return a data frame of unit-specific effects and another even lower-level function, \code{\link{dydx}}, to provide variable-specific derivatives from models. Some of the underlying architecture for the package is provided by the low-level function \code{\link[prediction]{prediction}}, which provides a consistent data frame interface to \code{\link[stats]{predict}} for a large number of model types. If a \code{prediction} method exists for a model class, \code{margin} should work for the model class but only those classes listed here have been tested and specifically supported.
}
\details{
Methods for this generic return a \dQuote{margins} object, which is a data frame consisting of the original data, predicted values and standard errors thereof, estimated marginal effects from the model \code{model} (for all variables used in the model, or the subset specified by \code{variables}), along with attributes describing various features of the marginal effects estimates.
The default print method is concise; a more useful \code{summary} method provides additional details.
\code{margins_summary} is sugar that provides a more convenient way of obtaining the nested call: \code{summary(margins(...))}.
Methods are currently implemented for the following object classes:
\itemize{
\item \dQuote{betareg}, see \code{\link[betareg]{betareg}}
\item \dQuote{glm}, see \code{\link[stats]{glm}}, \code{\link[MASS]{glm.nb}}
\item \dQuote{ivreg}, see \code{\link[AER]{ivreg}}
\item \dQuote{lm}, see \code{\link[stats]{lm}}
\item \dQuote{loess}, see \code{\link[stats]{loess}}
\item \dQuote{merMod}, see \code{\link[lme4]{lmer}}, \code{\link[lme4]{glmer}}
\item \dQuote{nnet}, see \code{\link[nnet]{nnet}}
\item \dQuote{polr}, see \code{\link[MASS]{polr}}
\item \dQuote{svyglm}, see \code{\link[survey]{svyglm}}
}
The \code{margins} methods simply construct a list of data frames based upon the values of \code{at} (using \code{\link[prediction]{build_datalist}}), calculate marginal effects for each data frame (via \code{\link{marginal_effects}} and, in turn, \code{\link{dydx}} and \code{\link[prediction]{prediction}}), stacks the results together, and provides variance estimates. Alternatively, you can use \code{\link{marginal_effects}} directly to only retrieve a data frame of marginal effects without constructing a \dQuote{margins} object or variance estimates. That can be efficient for plotting, etc., given the time-consuming nature of variance estimation.
See \code{\link{dydx}} for details on estimation of marginal effects.
The choice of \code{vce} may be important. The default variance-covariance estimation procedure (\code{vce = "delta"}) uses the delta method to estimate marginal effect variances. This is the fastest method. When \code{vce = "simulation"}, coefficient estimates are repeatedly drawn from the asymptotic (multivariate normal) distribution of the model coefficients and each draw is used to estimate marginal effects, with the variance based upon the dispersion of those simulated effects. The number of iterations used is given by \code{iterations}. For \code{vce = "bootstrap"}, the bootstrap is used to repeatedly subsample \code{data} and the variance of marginal effects is estimated from the variance of the bootstrap distribution. This method is markedly slower than the other two procedures. Again, \code{iterations} regulates the number of bootstrap subsamples to draw. Some model classes (notably \dQuote{loess}) fix \code{vce ="none"}.
}
\examples{
# basic example using linear model
require("datasets")
x <- lm(mpg ~ cyl * hp + wt, data = head(mtcars))
margins(x)
# obtain unit-specific standard errors
\dontrun{
margins(x, unit_ses = TRUE)
}
# use of 'variables' argument to estimate only some MEs
summary(margins(x, variables = "hp"))
# use of 'at' argument
## modifying original data values
margins(x, at = list(hp = 150))
## AMEs at various data values
margins(x, at = list(hp = c(95, 150), cyl = c(4,6)))
# use of 'data' argument to obtain AMEs for a subset of data
margins(x, data = mtcars[mtcars[["cyl"]] == 4,])
margins(x, data = mtcars[mtcars[["cyl"]] == 6,])
# return discrete differences for continuous terms
## passes 'change' through '...' to dydx()
margins(x, change = "sd")
# summary() method
summary(margins(x, at = list(hp = c(95, 150))))
margins_summary(x, at = list(hp = c(95, 150)))
## control row order of summary() output
summary(margins(x, at = list(hp = c(95, 150))), by_factor = FALSE)
# alternative 'vce' estimation
\dontrun{
# bootstrap
margins(x, vce = "bootstrap", iterations = 100L)
# simulation (ala Clarify/Zelig)
margins(x, vce = "simulation", iterations = 100L)
}
# specifying a custom `vcov` argument
if (require("sandwich")) {
x2 <- lm(Sepal.Length ~ Sepal.Width, data = head(iris))
summary(margins(x2))
## heteroskedasticity-consistent covariance matrix
summary(margins(x2, vcov = vcovHC(x2)))
}
# generalized linear model
x <- glm(am ~ hp, data = head(mtcars), family = binomial)
margins(x, type = "response")
margins(x, type = "link")
# multi-category outcome
if (requireNamespace("nnet")) {
data("iris3", package = "datasets")
ird <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]),
species = factor(c(rep("s",50), rep("c", 50), rep("v", 50))))
m <- nnet::nnet(species ~ ., data = ird, size = 2, rang = 0.1,
decay = 5e-4, maxit = 200, trace = FALSE)
margins(m) # default
margins(m, category = "v") # explicit category
}
# using margins_summary() for concise grouped operations
list_data <- split(mtcars, mtcars$gear)
list_mod <- lapply(list_data, function(x) lm(mpg ~ cyl + wt, data = x))
mapply(margins_summary, model = list_mod, data = list_data, SIMPLIFY = FALSE)
}
\references{
Greene, W.H. 2012. Econometric Analysis, 7th Ed. Boston: Pearson.
Stata manual: \code{margins}. Retrieved 2014-12-15 from \url{http://www.stata.com/manuals13/rmargins.pdf}.
}
\seealso{
\code{\link{marginal_effects}}, \code{\link{dydx}}, \code{\link[prediction]{prediction}}
}
\author{
Thomas J. Leeper
}
\keyword{models}
\keyword{package}
|
/man/margins.Rd
|
permissive
|
tzoltak/margins
|
R
| false | true | 15,696 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/margins.R, R/margins_betareg.R,
% R/margins_clm.R, R/margins_default.R, R/margins_glm.R, R/margins_lm.R,
% R/margins_loess.R, R/margins_merMod.R, R/margins_multinom.R,
% R/margins_nnet.R, R/margins_polr.R, R/margins_summary.R, R/margins_svyglm.R
\docType{package}
\name{margins}
\alias{margins}
\alias{margins-package}
\alias{margins.betareg}
\alias{margins.clm}
\alias{margins.default}
\alias{margins.glm}
\alias{margins.lm}
\alias{margins.loess}
\alias{margins.merMod}
\alias{margins.lmerMod}
\alias{margins.multinom}
\alias{margins.nnet}
\alias{margins.polr}
\alias{margins_summary}
\alias{margins.svyglm}
\title{Marginal Effects Estimation}
\usage{
margins(model, ...)
\method{margins}{betareg}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model, phi = FALSE),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{clm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vce = "none",
eps = 1e-07,
...
)
\method{margins}{default}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{glm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{lm}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{loess}(
model,
data,
variables = NULL,
at = NULL,
vce = "none",
eps = 1e-07,
...
)
\method{margins}{merMod}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{lmerMod}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{multinom}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = NULL,
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
\method{margins}{nnet}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
vce = "none",
eps = 1e-07,
...
)
\method{margins}{polr}(
model,
data = find_data(model, parent.frame()),
variables = NULL,
at = NULL,
type = NULL,
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
margins_summary(model, ..., level = 0.95, by_factor = TRUE)
\method{margins}{svyglm}(
model,
data = find_data(model),
variables = NULL,
at = NULL,
type = c("response", "link"),
vcov = stats::vcov(model),
vce = c("delta", "simulation", "bootstrap", "none"),
iterations = 50L,
unit_ses = FALSE,
eps = 1e-07,
...
)
}
\arguments{
\item{model}{A model object. See Details for supported model classes.}
\item{\dots}{Arguments passed to methods, and onward to \code{\link{dydx}} methods and possibly further to \code{\link[prediction]{prediction}} methods. This can be useful, for example, for setting \code{type} (predicted value type), \code{eps} (precision), or \code{category} (category for multi-category outcome models), etc.}
\item{data}{A data frame containing the data at which to evaluate the marginal effects, as in \code{\link[stats]{predict}}. This is optional, but may be required when the underlying modelling function sets \code{model = FALSE}.}
\item{variables}{A character vector with the names of variables for which to compute the marginal effects. The default (\code{NULL}) returns marginal effects for all variables.}
\item{at}{A list of one or more named vectors, specifically values at which to calculate the marginal effects. This is an analogue of Stata's \code{, at()} option. The specified values are fully combined (i.e., a cartesian product) to find AMEs for all combinations of specified variable values. Rather than a list, this can also be a data frame of combination levels if only a subset of combinations are desired. These are used to modify the value of \code{data} when calculating AMEs across specified values (see \code{\link[prediction]{build_datalist}} for details on use). Note: This does not calculate AMEs for \emph{subgroups} but rather for counterfactual datasets where all observaations take the specified values; to obtain subgroup effects, subset \code{data} directly.}
\item{type}{A character string indicating the type of marginal effects to estimate. Mostly relevant for non-linear models, where the reasonable options are \dQuote{response} (the default) or \dQuote{link} (i.e., on the scale of the linear predictor in a GLM).}
\item{vcov}{A matrix containing the variance-covariance matrix for estimated model coefficients, or a function to perform the estimation with \code{model} as its only argument.}
\item{vce}{A character string indicating the type of estimation procedure to use for estimating variances. The default (\dQuote{delta}) uses the delta method. Alternatives are \dQuote{bootstrap}, which uses bootstrap estimation, or \dQuote{simulation}, which averages across simulations drawn from the joint sampling distribution of model coefficients. The latter two are extremely time intensive.}
\item{iterations}{If \code{vce = "bootstrap"}, the number of bootstrap iterations. If \code{vce = "simulation"}, the number of simulated effects to draw. Ignored otherwise.}
\item{unit_ses}{If \code{vce = "delta"}, a logical specifying whether to calculate and return unit-specific marginal effect variances. This calculation is time consuming and the information is often not needed, so this is set to \code{FALSE} by default.}
\item{eps}{A numeric value specifying the \dQuote{step} to use when calculating numerical derivatives.}
\item{level}{A numeric value specifying the confidence level for calculating p-values and confidence intervals.}
\item{by_factor}{A logical specifying whether to order the output by factor (the default, \code{TRUE}).}
}
\value{
A data frame of class \dQuote{margins} containing the contents of \code{data}, predicted values from \code{model} for \code{data}, the standard errors of the predictions, and any estimated marginal effects. If \code{at = NULL} (the default), then the data frame will have a number of rows equal to \code{nrow(data)}. Otherwise, the number of rows will be a multiple thereof based upon the number of combinations of values specified in \code{at}. Columns containing marginal effects are distinguished by their name (prefixed by \code{dydx_}). These columns can be extracted from a \dQuote{margins} object using, for example, \code{marginal_effects(margins(model))}. Columns prefixed by \code{Var_} specify the variances of the \emph{average} marginal effects, whereas (optional) columns prefixed by \code{SE_} contain observation-specific standard errors. A special column, \code{_at_number}, specifies which \code{at} combination a given row corresponds to; the data frame carries an attribute \dQuote{at} that specifies which combination of values this index represents. The \code{summary.margins()} method provides for pretty printing of the results, particularly in cases where \code{at} is specified. A variance-covariance matrix for the average marginal effects is returned as an attribute (though behavior when \code{at} is non-NULL is unspecified).
}
\description{
This package is an R port of Stata's \samp{margins} command, implemented as an S3 generic \code{margins()} for model objects, like those of class \dQuote{lm} and \dQuote{glm}. \code{margins()} is an S3 generic function for building a \dQuote{margins} object from a model object. Methods are currently implemented for several model classes (see Details, below).
margins provides \dQuote{marginal effects} summaries of models. Marginal effects are partial derivatives of the regression equation with respect to each variable in the model for each unit in the data; average marginal effects are simply the mean of these unit-specific partial derivatives over some sample. In ordinary least squares regression with no interactions or higher-order term, the estimated slope coefficients are marginal effects. In other cases and for generalized linear models, the coefficients are not marginal effects at least not on the scale of the response variable. margins therefore provides ways of calculating the marginal effects of variables to make these models more interpretable.
The package also provides a low-level function, \code{\link{marginal_effects}}, to estimate those quantities and return a data frame of unit-specific effects and another even lower-level function, \code{\link{dydx}}, to provide variable-specific derivatives from models. Some of the underlying architecture for the package is provided by the low-level function \code{\link[prediction]{prediction}}, which provides a consistent data frame interface to \code{\link[stats]{predict}} for a large number of model types. If a \code{prediction} method exists for a model class, \code{margin} should work for the model class but only those classes listed here have been tested and specifically supported.
}
\details{
Methods for this generic return a \dQuote{margins} object, which is a data frame consisting of the original data, predicted values and standard errors thereof, estimated marginal effects from the model \code{model} (for all variables used in the model, or the subset specified by \code{variables}), along with attributes describing various features of the marginal effects estimates.
The default print method is concise; a more useful \code{summary} method provides additional details.
\code{margins_summary} is sugar that provides a more convenient way of obtaining the nested call: \code{summary(margins(...))}.
Methods are currently implemented for the following object classes:
\itemize{
\item \dQuote{betareg}, see \code{\link[betareg]{betareg}}
\item \dQuote{glm}, see \code{\link[stats]{glm}}, \code{\link[MASS]{glm.nb}}
\item \dQuote{ivreg}, see \code{\link[AER]{ivreg}}
\item \dQuote{lm}, see \code{\link[stats]{lm}}
\item \dQuote{loess}, see \code{\link[stats]{loess}}
\item \dQuote{merMod}, see \code{\link[lme4]{lmer}}, \code{\link[lme4]{glmer}}
\item \dQuote{nnet}, see \code{\link[nnet]{nnet}}
\item \dQuote{polr}, see \code{\link[MASS]{polr}}
\item \dQuote{svyglm}, see \code{\link[survey]{svyglm}}
}
The \code{margins} methods simply construct a list of data frames based upon the values of \code{at} (using \code{\link[prediction]{build_datalist}}), calculate marginal effects for each data frame (via \code{\link{marginal_effects}} and, in turn, \code{\link{dydx}} and \code{\link[prediction]{prediction}}), stacks the results together, and provides variance estimates. Alternatively, you can use \code{\link{marginal_effects}} directly to only retrieve a data frame of marginal effects without constructing a \dQuote{margins} object or variance estimates. That can be efficient for plotting, etc., given the time-consuming nature of variance estimation.
See \code{\link{dydx}} for details on estimation of marginal effects.
The choice of \code{vce} may be important. The default variance-covariance estimation procedure (\code{vce = "delta"}) uses the delta method to estimate marginal effect variances. This is the fastest method. When \code{vce = "simulation"}, coefficient estimates are repeatedly drawn from the asymptotic (multivariate normal) distribution of the model coefficients and each draw is used to estimate marginal effects, with the variance based upon the dispersion of those simulated effects. The number of iterations used is given by \code{iterations}. For \code{vce = "bootstrap"}, the bootstrap is used to repeatedly subsample \code{data} and the variance of marginal effects is estimated from the variance of the bootstrap distribution. This method is markedly slower than the other two procedures. Again, \code{iterations} regulates the number of bootstrap subsamples to draw. Some model classes (notably \dQuote{loess}) fix \code{vce ="none"}.
}
\examples{
# basic example using linear model
require("datasets")
x <- lm(mpg ~ cyl * hp + wt, data = head(mtcars))
margins(x)
# obtain unit-specific standard errors
\dontrun{
margins(x, unit_ses = TRUE)
}
# use of 'variables' argument to estimate only some MEs
summary(margins(x, variables = "hp"))
# use of 'at' argument
## modifying original data values
margins(x, at = list(hp = 150))
## AMEs at various data values
margins(x, at = list(hp = c(95, 150), cyl = c(4,6)))
# use of 'data' argument to obtain AMEs for a subset of data
margins(x, data = mtcars[mtcars[["cyl"]] == 4,])
margins(x, data = mtcars[mtcars[["cyl"]] == 6,])
# return discrete differences for continuous terms
## passes 'change' through '...' to dydx()
margins(x, change = "sd")
# summary() method
summary(margins(x, at = list(hp = c(95, 150))))
margins_summary(x, at = list(hp = c(95, 150)))
## control row order of summary() output
summary(margins(x, at = list(hp = c(95, 150))), by_factor = FALSE)
# alternative 'vce' estimation
\dontrun{
# bootstrap
margins(x, vce = "bootstrap", iterations = 100L)
# simulation (ala Clarify/Zelig)
margins(x, vce = "simulation", iterations = 100L)
}
# specifying a custom `vcov` argument
if (require("sandwich")) {
x2 <- lm(Sepal.Length ~ Sepal.Width, data = head(iris))
summary(margins(x2))
## heteroskedasticity-consistent covariance matrix
summary(margins(x2, vcov = vcovHC(x2)))
}
# generalized linear model
x <- glm(am ~ hp, data = head(mtcars), family = binomial)
margins(x, type = "response")
margins(x, type = "link")
# multi-category outcome
if (requireNamespace("nnet")) {
data("iris3", package = "datasets")
ird <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]),
species = factor(c(rep("s",50), rep("c", 50), rep("v", 50))))
m <- nnet::nnet(species ~ ., data = ird, size = 2, rang = 0.1,
decay = 5e-4, maxit = 200, trace = FALSE)
margins(m) # default
margins(m, category = "v") # explicit category
}
# using margins_summary() for concise grouped operations
list_data <- split(mtcars, mtcars$gear)
list_mod <- lapply(list_data, function(x) lm(mpg ~ cyl + wt, data = x))
mapply(margins_summary, model = list_mod, data = list_data, SIMPLIFY = FALSE)
}
\references{
Greene, W.H. 2012. Econometric Analysis, 7th Ed. Boston: Pearson.
Stata manual: \code{margins}. Retrieved 2014-12-15 from \url{http://www.stata.com/manuals13/rmargins.pdf}.
}
\seealso{
\code{\link{marginal_effects}}, \code{\link{dydx}}, \code{\link[prediction]{prediction}}
}
\author{
Thomas J. Leeper
}
\keyword{models}
\keyword{package}
|
#-------------------------- Assignment 11.2------------------------
# dataset : https://www.kaggle.com/hugodarwood/epirecipes/data
# Perform the below given activities:
# a. apply K-means clustering to identify similar recipies
# b. apply K-means clustering to identify similar attributes
# c. how many unique recipies that people order often
# d. what are their typical profiles
# -----------------------------------------------------------------
# Import Zip File
getwd()
setwd("E:\\Data Analytics with RET\\Assignment\\Assignment 22")
data <- read.csv(unzip("epi_r.zip"))
View(data)
dim(data)
str(data)
# Preprocessing the data set
colnames(data)
library(maps)
data("world.cities")
world.cities$country.etc <- toupper(world.cities$country.etc)
raw <- colnames(data) # column names tored in a vector
raw <- gsub("[[:punct:]\n]","",raw) # Removing punctuation
raw <- strsplit(raw, " ") # Split data at word boundaries
raw <- toupper(raw) # convert to upper case
length(raw)
# Match on country / cities in world.countries
CountryList_raw <- (lapply(raw, function(x)x[which(x %in% world.cities$country.etc)]))
colnames(data) <- raw
# check for NA
sum(is.na(data))
sort(sapply(data, function(x) sum(is.na(x))))
# impute missing values
library(mice)
imputed = mice(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")], method='cart', m=5)
imputed <- mice::complete(imputed)
# replacing NAs with imputed values
data$CALORIES <- imputed$CALORIES
data$PROTEIN <- imputed$PROTEIN
data$SODIUM <- imputed$SODIUM
data$FAT <- imputed$FAT
sum(is.na(data))
# checking for outliers
library(ggplot2)
ggplot(reshape2::melt(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")]),
aes(x= variable, value, fill = variable))+
geom_boxplot()+facet_wrap(~variable, scales = 'free_y')
# yes there are outliers
# removing these outliers
df <- outliers::rm.outlier(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")], fill = TRUE)
data$CALORIES <- df$CALORIES
data$PROTEIN <- df$PROTEIN
data$SODIUM <- df$SODIUM
data$FAT <- df$FAT
dim(data)
TITLE <- data$TITLE
# Load required libraries
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
#----------------------------------------------------------------------
# a. apply K-means clustering to identify similar recipies
# preparing data set for receipe
set.seed(123)
data_recipe <- data[,-c(1,unlist(which(raw %in% world.cities$country.etc)))]
data_recipe <- scale(data_recipe)
# Compute k-means clustering with k = 5
final_recipe <- kmeans(data_recipe, 5, nstart = 25)
summary(final_recipe)
table(final_recipe$cluster) # cluster for similar recipes
fviz_cluster(final_recipe, data = data_recipe)
# ----------------------------------------------------------------------------
# b. apply K-means clustering to identify similar attributes
# preparing data set for receipe
set.seed(123)
data_att <- data[,c(unlist(which(raw %in% world.cities$country.etc)))]
# Compute k-means clustering with k = 2
final_att <- kmeans(data_att, 2, nstart = 25)
summary(final_att)
table(final_att$cluster) # cluster for similar attributes
fviz_cluster(final_att, data = data_att)
# -----------------------------------------------------------------------------
# c. how many unique recipies that people order often
df$Clusters <- final_recipe$cluster
df$TITLE <- TITLE
by_cluster <- df %>% group_by(Clusters) %>% summarise_all("length") %>% select(Clusters, TITLE)
by_cluster
max(by_cluster$TITLE)
# -----------------------------------------------------------------------------
# d. what are their typical profiles
profile <- (df[,-6] %>% group_by(Clusters) %>% summarise_all("mean") %>%
select("CALORIES", "SODIUM", "PROTEIN", "FAT"))[1,]
profile
# ------------------------------------------------------------------------------
|
/11.2.R
|
no_license
|
shilpa29nair/Data-Analytics-Assignment-11.2
|
R
| false | false | 4,038 |
r
|
#-------------------------- Assignment 11.2------------------------
# dataset : https://www.kaggle.com/hugodarwood/epirecipes/data
# Perform the below given activities:
# a. apply K-means clustering to identify similar recipies
# b. apply K-means clustering to identify similar attributes
# c. how many unique recipies that people order often
# d. what are their typical profiles
# -----------------------------------------------------------------
# Import Zip File
getwd()
setwd("E:\\Data Analytics with RET\\Assignment\\Assignment 22")
data <- read.csv(unzip("epi_r.zip"))
View(data)
dim(data)
str(data)
# Preprocessing the data set
colnames(data)
library(maps)
data("world.cities")
world.cities$country.etc <- toupper(world.cities$country.etc)
raw <- colnames(data) # column names tored in a vector
raw <- gsub("[[:punct:]\n]","",raw) # Removing punctuation
raw <- strsplit(raw, " ") # Split data at word boundaries
raw <- toupper(raw) # convert to upper case
length(raw)
# Match on country / cities in world.countries
CountryList_raw <- (lapply(raw, function(x)x[which(x %in% world.cities$country.etc)]))
colnames(data) <- raw
# check for NA
sum(is.na(data))
sort(sapply(data, function(x) sum(is.na(x))))
# impute missing values
library(mice)
imputed = mice(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")], method='cart', m=5)
imputed <- mice::complete(imputed)
# replacing NAs with imputed values
data$CALORIES <- imputed$CALORIES
data$PROTEIN <- imputed$PROTEIN
data$SODIUM <- imputed$SODIUM
data$FAT <- imputed$FAT
sum(is.na(data))
# checking for outliers
library(ggplot2)
ggplot(reshape2::melt(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")]),
aes(x= variable, value, fill = variable))+
geom_boxplot()+facet_wrap(~variable, scales = 'free_y')
# yes there are outliers
# removing these outliers
df <- outliers::rm.outlier(data[,c("CALORIES", "SODIUM", "PROTEIN", "FAT")], fill = TRUE)
data$CALORIES <- df$CALORIES
data$PROTEIN <- df$PROTEIN
data$SODIUM <- df$SODIUM
data$FAT <- df$FAT
dim(data)
TITLE <- data$TITLE
# Load required libraries
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
#----------------------------------------------------------------------
# a. apply K-means clustering to identify similar recipies
# preparing data set for receipe
set.seed(123)
data_recipe <- data[,-c(1,unlist(which(raw %in% world.cities$country.etc)))]
data_recipe <- scale(data_recipe)
# Compute k-means clustering with k = 5
final_recipe <- kmeans(data_recipe, 5, nstart = 25)
summary(final_recipe)
table(final_recipe$cluster) # cluster for similar recipes
fviz_cluster(final_recipe, data = data_recipe)
# ----------------------------------------------------------------------------
# b. apply K-means clustering to identify similar attributes
# preparing data set for receipe
set.seed(123)
data_att <- data[,c(unlist(which(raw %in% world.cities$country.etc)))]
# Compute k-means clustering with k = 2
final_att <- kmeans(data_att, 2, nstart = 25)
summary(final_att)
table(final_att$cluster) # cluster for similar attributes
fviz_cluster(final_att, data = data_att)
# -----------------------------------------------------------------------------
# c. how many unique recipies that people order often
df$Clusters <- final_recipe$cluster
df$TITLE <- TITLE
by_cluster <- df %>% group_by(Clusters) %>% summarise_all("length") %>% select(Clusters, TITLE)
by_cluster
max(by_cluster$TITLE)
# -----------------------------------------------------------------------------
# d. what are their typical profiles
profile <- (df[,-6] %>% group_by(Clusters) %>% summarise_all("mean") %>%
select("CALORIES", "SODIUM", "PROTEIN", "FAT"))[1,]
profile
# ------------------------------------------------------------------------------
|
#' International Exchange Rate Dataset
#'
#' This dataset was considered by West and Harrison (1997) and
#' Lopes and West (2004). The dataset consists of n = 143 monthly
#' first-differences of the exchange rates of 6 international
#' currencies against the British pound, from Jan 1975 to Dec 1986,
#' these currencies are: US dollar (USD), Canadian dollar (CAD),
#' Japanese yen (JPY), French franc (FRF), German (deutsche) mark (DEM),
#' and the Italian lira (ITL).
#'
#' @format A 143 by 7 \code{matrix} of exchange rate time-series. The variables include:
#' \describe{
#' \item{\code{Month_Year}}{Month and year of exchange rate data.}
#' \item{\code{USD}}{US dollar}
#' \item{\code{CAD}}{Canadian dollar}
#' \item{\code{JPY}}{Japanese yen}
#' \item{\code{FRF}}{French franc}
#' \item{\code{DEM}}{German (deutsche) mark}
#' \item{\code{ITL}}{Italian lira}
#' }
#' @source
#'
#' Lopes, H. F., and West, M. (2004). Bayesian model assessment in factor analysis, Statistica Sinica, 14, 41–67.
#'
#' Man, A. & Culpepper, S. A. (2020). A mode-jumping algorithm for Bayesian factor analysis. Journal of the American Statistical Association, doi:10.1080/01621459.2020.1773833.
#'
#' West, M., and Harrison, J. (1997), Bayesian forecasting and dynamic models (2nd ed.), Berlin, Heidelberg: Springer-Verlag.
#'
#' @author Steven Culpepper
#'
"exchangerate"
|
/R/exchangerate-data.R
|
no_license
|
cran/bayesefa
|
R
| false | false | 1,406 |
r
|
#' International Exchange Rate Dataset
#'
#' This dataset was considered by West and Harrison (1997) and
#' Lopes and West (2004). The dataset consists of n = 143 monthly
#' first-differences of the exchange rates of 6 international
#' currencies against the British pound, from Jan 1975 to Dec 1986,
#' these currencies are: US dollar (USD), Canadian dollar (CAD),
#' Japanese yen (JPY), French franc (FRF), German (deutsche) mark (DEM),
#' and the Italian lira (ITL).
#'
#' @format A 143 by 7 \code{matrix} of exchange rate time-series. The variables include:
#' \describe{
#' \item{\code{Month_Year}}{Month and year of exchange rate data.}
#' \item{\code{USD}}{US dollar}
#' \item{\code{CAD}}{Canadian dollar}
#' \item{\code{JPY}}{Japanese yen}
#' \item{\code{FRF}}{French franc}
#' \item{\code{DEM}}{German (deutsche) mark}
#' \item{\code{ITL}}{Italian lira}
#' }
#' @source
#'
#' Lopes, H. F., and West, M. (2004). Bayesian model assessment in factor analysis, Statistica Sinica, 14, 41–67.
#'
#' Man, A. & Culpepper, S. A. (2020). A mode-jumping algorithm for Bayesian factor analysis. Journal of the American Statistical Association, doi:10.1080/01621459.2020.1773833.
#'
#' West, M., and Harrison, J. (1997), Bayesian forecasting and dynamic models (2nd ed.), Berlin, Heidelberg: Springer-Verlag.
#'
#' @author Steven Culpepper
#'
"exchangerate"
|
library(shiny)
library(maps)
library(mapproj)
# server.R
library(leaflet)
library(dplyr)
library(sp)
library(rgdal)
library(R.utils)
library(base)
library(data.table)
library(googleVis)
library(ggplot2)
library(ggmap)
library(RCurl)
library(plotrix)
suppressPackageStartupMessages(library(googleVis));
load("crime15.RData")
load("nynta.RData")
load("pop_nta.RData")
load("nyc_bars_clean.RData")
load("nynta_bar.RData")
nyc_bars<-as.data.frame(nyc_bars)
nyc_bars$Doing.Busi<-as.character(nyc_bars$Doing.Busi)
nyc_bars$Actual.Add<-as.character(nyc_bars$Actual.Add)
shinyServer(function(input, output) {
barsnta<-reactive({filter(nyc_bars,nyc_bars$NTAName==toupper(input$nta))})
a_bar<-reactive({ filter(nyc_bars,nyc_bars$Doing.Busi==toupper(input$bar))})
# subsets the crime data depending on user input in the Shiny app
crimeInput <- reactive({
if (input$min_hour <= input$max_hour) {
crime15 <- filter(crime15, Occurren_3 >= input$min_hour & Occurren_3 <= input$max_hour)
}
else {
crime15 <- filter(crime15, Occurren_3 >= input$min_hour | Occurren_3 <= input$max_hour)
}
crime_density <- filter(crime15, Offense %in% input$offense, Day.of.Wee %in% input$day_of_week) %>%
group_by(NTACode) %>%
summarize(num_crimes = n()) %>%
inner_join(pop_nta, by = c("NTACode" = "NTA.Code")) %>%
mutate(crime_density_per_1K = num_crimes / (Population / 1000)) %>%
arrange(NTACode)
nynta_crime <- subset(nynta, NTACode %in% crime_density$NTACode)
nynta_crime_not <- subset(nynta, !(NTACode %in% crime_density$NTACode))
nynta_crime_not$crime_density_per_1K <- 0 # set NTAs with no crime density to 0
nynta_crime <- nynta_crime[order(nynta_crime$NTACode), ]
nynta_crime <- cbind(nynta_crime, crime_density$crime_density_per_1K)
names(nynta_crime)[8] <- "crime_density_per_1K"
nynta_crime <- rbind(nynta_crime, nynta_crime_not)
nynta_crime <- nynta_crime[-grep("park-cemetery", nynta_crime@data$NTAName), ]
nynta_crime# remove parks, cemeteries, etc.
})
# defines color ramp for crime density
colorInput <- reactive({
colorQuantile(palette = "Reds", domain = unique(crimeInput()@data$crime_density_per_1K), n = 7)
})
# draws the basic map
leafletInput <- function(){
return( leaflet() %>% addProviderTiles("CartoDB.DarkMatter") %>%
setView(lat=40.69196, lng = -73.96483, zoom = 10) )
}
# # adds colors (except if no offenses or days of the week are checked, then just display the basemap)
# choroplethInput <- reactive({
# if (is.null(input$offense) | is.null(input$day_of_week)) return(leafletInput())
# leafletInput() %>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
# color = ~colorInput()(crime_density_per_1K),
# popup = paste(crimeInput()$NTAName,
# round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
# addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())%>%
# addCircleMarkers(lng=bar()$Longitude,lat=bar()$Latitude,popup=bar()$Doing.Busi,radius=2)
#
# })
# renders the map
output$general <- renderLeaflet({
if (is.null(input$offense) | is.null(input$day_of_week)) {
leafletInput()} else if ( (nrow(a_bar())==0) && (nrow(barsnta())==0) ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))
} else if ( nrow(a_bar())==0 ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())
} else if ( nrow(barsnta())==0 ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addCircleMarkers(lng=a_bar()$Longitude,lat=a_bar()$Latitude,popup=a_bar()$Doing.Busi,radius=2)
} else {
leafletInput() %>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addCircleMarkers(lng=a_bar()$Longitude,lat=a_bar()$Latitude,popup=a_bar()$Doing.Busi,radius=2)%>%
addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())
}
})
# choropleth map of bar density per 1K, equal quantiles (7 divisions), superimposed onto street basemap
output$density <- renderLeaflet({
pal_bar <- colorQuantile(palette = "Purples", domain = nynta_bar$bar_density_per_1K, n = 7)
leaflet() %>%setView(lat=40.69196, lng = -73.96483, zoom = 10)%>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = nynta_bar, weight = 2, fillOpacity = 0.7, color = ~pal_bar(bar_density_per_1K))
})
#Ranking
output$table1 = renderDataTable({
crimesum
}, options = list(orderClasses = TRUE))
output$map<- renderGvis({
#printgooglemap
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
thebar$Actual.Add<-paste(thebar$Actual.Add,"New York, NY")
gvisMap(thebar,"Actual.Add", c("Doing.Busi"),
options=list(showTip=TRUE,
showLine=TRUE,
enableScrollWheel=TRUE,
mapType='normal',
useMapTypeControl=TRUE))
}
})
#pieplot and quantile information
output$pieplot <- renderPlot({
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
per <- as.matrix(crimesum[crimesum$Bar==thebar$Doing.Busi,][2:8])
lbls <- c("BURGLARY","GRAND_LAR","RAPE","ASSAULT","ROBBERY","GRAND_LAR_MOTOR_VEHICLE","MURDER")
pie3D(per[1,],labels=lbls,main="Pie Chart of This Bar ")
}},height = 500, width = 900 )
output$table2 = renderDataTable({
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
quantile<-quan[quan$Bar==thebar$Doing.Busi,]
}})
})
|
/FINAL_project/www/server_rev3.R
|
no_license
|
arnoldlcl/barglary
|
R
| false | false | 7,803 |
r
|
library(shiny)
library(maps)
library(mapproj)
# server.R
library(leaflet)
library(dplyr)
library(sp)
library(rgdal)
library(R.utils)
library(base)
library(data.table)
library(googleVis)
library(ggplot2)
library(ggmap)
library(RCurl)
library(plotrix)
suppressPackageStartupMessages(library(googleVis));
load("crime15.RData")
load("nynta.RData")
load("pop_nta.RData")
load("nyc_bars_clean.RData")
load("nynta_bar.RData")
nyc_bars<-as.data.frame(nyc_bars)
nyc_bars$Doing.Busi<-as.character(nyc_bars$Doing.Busi)
nyc_bars$Actual.Add<-as.character(nyc_bars$Actual.Add)
shinyServer(function(input, output) {
barsnta<-reactive({filter(nyc_bars,nyc_bars$NTAName==toupper(input$nta))})
a_bar<-reactive({ filter(nyc_bars,nyc_bars$Doing.Busi==toupper(input$bar))})
# subsets the crime data depending on user input in the Shiny app
crimeInput <- reactive({
if (input$min_hour <= input$max_hour) {
crime15 <- filter(crime15, Occurren_3 >= input$min_hour & Occurren_3 <= input$max_hour)
}
else {
crime15 <- filter(crime15, Occurren_3 >= input$min_hour | Occurren_3 <= input$max_hour)
}
crime_density <- filter(crime15, Offense %in% input$offense, Day.of.Wee %in% input$day_of_week) %>%
group_by(NTACode) %>%
summarize(num_crimes = n()) %>%
inner_join(pop_nta, by = c("NTACode" = "NTA.Code")) %>%
mutate(crime_density_per_1K = num_crimes / (Population / 1000)) %>%
arrange(NTACode)
nynta_crime <- subset(nynta, NTACode %in% crime_density$NTACode)
nynta_crime_not <- subset(nynta, !(NTACode %in% crime_density$NTACode))
nynta_crime_not$crime_density_per_1K <- 0 # set NTAs with no crime density to 0
nynta_crime <- nynta_crime[order(nynta_crime$NTACode), ]
nynta_crime <- cbind(nynta_crime, crime_density$crime_density_per_1K)
names(nynta_crime)[8] <- "crime_density_per_1K"
nynta_crime <- rbind(nynta_crime, nynta_crime_not)
nynta_crime <- nynta_crime[-grep("park-cemetery", nynta_crime@data$NTAName), ]
nynta_crime# remove parks, cemeteries, etc.
})
# defines color ramp for crime density
colorInput <- reactive({
colorQuantile(palette = "Reds", domain = unique(crimeInput()@data$crime_density_per_1K), n = 7)
})
# draws the basic map
leafletInput <- function(){
return( leaflet() %>% addProviderTiles("CartoDB.DarkMatter") %>%
setView(lat=40.69196, lng = -73.96483, zoom = 10) )
}
# # adds colors (except if no offenses or days of the week are checked, then just display the basemap)
# choroplethInput <- reactive({
# if (is.null(input$offense) | is.null(input$day_of_week)) return(leafletInput())
# leafletInput() %>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
# color = ~colorInput()(crime_density_per_1K),
# popup = paste(crimeInput()$NTAName,
# round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
# addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())%>%
# addCircleMarkers(lng=bar()$Longitude,lat=bar()$Latitude,popup=bar()$Doing.Busi,radius=2)
#
# })
# renders the map
output$general <- renderLeaflet({
if (is.null(input$offense) | is.null(input$day_of_week)) {
leafletInput()} else if ( (nrow(a_bar())==0) && (nrow(barsnta())==0) ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))
} else if ( nrow(a_bar())==0 ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())
} else if ( nrow(barsnta())==0 ) {
leafletInput()%>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addCircleMarkers(lng=a_bar()$Longitude,lat=a_bar()$Latitude,popup=a_bar()$Doing.Busi,radius=2)
} else {
leafletInput() %>% addPolygons(data = crimeInput(), weight = 2, fillOpacity = 0.7,
color = ~colorInput()(crime_density_per_1K),
popup = paste(crimeInput()$NTAName,
round(crimeInput()$crime_density_per_1K, digits = 2), sep = " "))%>%
addCircleMarkers(lng=a_bar()$Longitude,lat=a_bar()$Latitude,popup=a_bar()$Doing.Busi,radius=2)%>%
addMarkers(lng=barsnta()$Longitude,lat=barsnta()$Latitude,popup=barsnta()$Doing.Busi,clusterOptions = markerClusterOptions())
}
})
# choropleth map of bar density per 1K, equal quantiles (7 divisions), superimposed onto street basemap
output$density <- renderLeaflet({
pal_bar <- colorQuantile(palette = "Purples", domain = nynta_bar$bar_density_per_1K, n = 7)
leaflet() %>%setView(lat=40.69196, lng = -73.96483, zoom = 10)%>%
addProviderTiles("CartoDB.Positron") %>%
addPolygons(data = nynta_bar, weight = 2, fillOpacity = 0.7, color = ~pal_bar(bar_density_per_1K))
})
#Ranking
output$table1 = renderDataTable({
crimesum
}, options = list(orderClasses = TRUE))
output$map<- renderGvis({
#printgooglemap
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
thebar$Actual.Add<-paste(thebar$Actual.Add,"New York, NY")
gvisMap(thebar,"Actual.Add", c("Doing.Busi"),
options=list(showTip=TRUE,
showLine=TRUE,
enableScrollWheel=TRUE,
mapType='normal',
useMapTypeControl=TRUE))
}
})
#pieplot and quantile information
output$pieplot <- renderPlot({
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
per <- as.matrix(crimesum[crimesum$Bar==thebar$Doing.Busi,][2:8])
lbls <- c("BURGLARY","GRAND_LAR","RAPE","ASSAULT","ROBBERY","GRAND_LAR_MOTOR_VEHICLE","MURDER")
pie3D(per[1,],labels=lbls,main="Pie Chart of This Bar ")
}},height = 500, width = 900 )
output$table2 = renderDataTable({
thebar<-a_bar()
if (nrow(thebar)==0) { return()} else{
quantile<-quan[quan$Bar==thebar$Doing.Busi,]
}})
})
|
library(dplyr)
options(scipen=999)
Math.cbrt <- function(x) {
sign(x) * abs(x)^(1/3)
}
setwd("C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper")
#setwd("/Users/nishaldave/OneDrive - University of Bristol/Dissertation/Volume Forecasting/Data")
df <- read.csv("stocks_updated.csv")
#Set week 53 to week 0 - needed for lags
df$week[df$week == 53] <- 0
#Drop the index values that were generated by Python (not needed anymore)
df$X <- NULL
#Lag the close prices
df$lclose <- as.numeric(sapply(1:nrow(df), function(x) df$close[x-1]))
#Generate variable for returns
ave_close <- df %>%
group_by(asset, week) %>%
summarize(ave_close = ave(close)) %>%
ungroup()
ave_close <- unique(ave_close)
ave_close$ave_lclose <- as.numeric(sapply(1:nrow(ave_close), function(x) ave_close$ave_close[x-1]))
ave_close <- ave_close[!ave_close$week == 0,]
ave_close$ret <- log(ave_close$ave_close/ave_close$ave_lclose)
#Daily returns to calculate interweek variance
df$ret <- log(df$close/df$lclose)
#Drop the unneeded week
df <- df[!df$week == 0,]
#Generate variable for autocovariance of returns
autocov <- df %>%
group_by(asset, week) %>%
summarize(autocov = cov(close,lclose)) %>%
ungroup()
autocov$autocov <- Math.cbrt((autocov$autocov))
#Generate variable for variance/stdev
var_ret <- df %>%
group_by(asset, week) %>%
summarize(var_ret = var(ret)) %>%
ungroup()
var_ret$stdev <- sqrt(var_ret$var_ret)
#Generate variable for volume
ave_vol <- df %>%
group_by(asset, week) %>%
summarize(ave_vol = ave(close*volume)) %>%
ungroup()
ave_vol <- unique(ave_vol)
#Create a weekly dataset from the daily data
df_weekly <- as.data.frame(cbind(ave_close$asset, ave_close$week, ave_close$ret, var_ret$stdev, ave_vol$ave_vol,autocov$autocov))
df_weekly <- df_weekly %>%
rename(
Asset = V1,
Week = V2,
Avret = V3,
Stdev = V4,
Avvol = V5,
Autocov = V6
)
#Ensure all data is numeric
df_weekly$Week <- as.numeric(df_weekly$Week)
df_weekly$Avret <- as.numeric(df_weekly$Avret)
df_weekly$Stdev <- as.numeric(df_weekly$Stdev)
df_weekly$Avvol <- as.numeric(df_weekly$Avvol)
df_weekly$Autocov <- as.numeric(df_weekly$Autocov)
#Drop uneeded
rm(autocov, ave_close, ave_vol, df, var_ret)
#Create the log of trading volume
df_weekly$LogVol <- log(df_weekly$Avvol)
df_weekly$LogVol <- as.numeric(df_weekly$LogVol)
write.csv(df_weekly,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\stocks_weekly.csv", row.names = FALSE)
###############################################################################
###############################################################################
###############################################################################
#Sentiment Data Analysis
sub <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\WSB Scraper\\Submissions\\assets3_v2.csv')
#sub2 <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\WSB Scraper\\Submissions\\assets2_v2.csv')
#sub <- rbind(sub,sub2)
#rm(sub2)
#dates <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\date.csv')
#Submissions
sub$count <- 1
num_sub <- sub %>%
group_by(asset, week) %>%
summarize(Submissions = sum(count)) %>%
ungroup()
num_sub <- num_sub %>%
rename(
Asset = asset,
Week = week,
)
#Sentiments
sentiment_weekly <- sub %>%
group_by(asset, week) %>%
summarize(sentiment_weekly = ave(compound1)) %>%
ungroup()
sentiment_weekly <- unique(sentiment_weekly)
sentiment_weekly <- sentiment_weekly %>%
rename(
Week = week,
Asset = asset,
Ave_Sentiment = sentiment_weekly
)
#Combining weekly stocks with weekly sentiments
Combined_df <- merge(df_weekly, sentiment_weekly, by=c('Week','Asset'), all = T)
Combined_df <- merge(Combined_df,num_sub, by = c('Week','Asset'), all = T)
Combined_df[is.na(Combined_df)] <- 0
Combined_df <- Combined_df[
with(Combined_df, order(Asset,Week)),
]
Combined_df$Submissions <- ifelse(Combined_df$Submissions == 0,1,
Combined_df$Submissions)
Combined_df$Submissions <- log(Combined_df$Submissions)
Ave_Sentiment <- scale(Combined_df$Ave_Sentiment)
#Remove FORD from sample - not the actual ford
Combined_df<-Combined_df[!(Combined_df$Asset=="FORD"),]
#Cleaning firms with have IPO during the year and therefore no trading data
Combined_df<-Combined_df[!(Combined_df$LogVol==0),]
#Drop uneeded
rm(num_sub,sentiment_weekly, sub, Ave_Sentiment, df_weekly)
################################################################################
#Combining with market cap data for stratification
mktcap <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\mktcap.csv')
Combined_df <- merge(Combined_df, mktcap, by=c('Asset'), all = T)
#Splitting Data into individual Quantiles
Combined_df$quantile <- ecdf(Combined_df$Market.Cap)(Combined_df$Market.Cap)
topdf <- Combined_df[(Combined_df$quantile > 0.75),]
middf <- Combined_df[(Combined_df$quantile > 0.25) & (Combined_df$quantile <0.75),]
bottomdf <- Combined_df[(Combined_df$quantile < 0.25),]
write.csv(topdf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataTop30.csv", row.names = FALSE)
write.csv(middf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataMid30.csv", row.names = FALSE)
write.csv(bottomdf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataBottom30.csv", row.names = FALSE)
write.csv(Combined_df,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdata.csv", row.names = FALSE)
|
/5_DataTransforming.R
|
no_license
|
Nishal97/Masters_Dissertation
|
R
| false | false | 5,983 |
r
|
library(dplyr)
options(scipen=999)
Math.cbrt <- function(x) {
sign(x) * abs(x)^(1/3)
}
setwd("C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper")
#setwd("/Users/nishaldave/OneDrive - University of Bristol/Dissertation/Volume Forecasting/Data")
df <- read.csv("stocks_updated.csv")
#Set week 53 to week 0 - needed for lags
df$week[df$week == 53] <- 0
#Drop the index values that were generated by Python (not needed anymore)
df$X <- NULL
#Lag the close prices
df$lclose <- as.numeric(sapply(1:nrow(df), function(x) df$close[x-1]))
#Generate variable for returns
ave_close <- df %>%
group_by(asset, week) %>%
summarize(ave_close = ave(close)) %>%
ungroup()
ave_close <- unique(ave_close)
ave_close$ave_lclose <- as.numeric(sapply(1:nrow(ave_close), function(x) ave_close$ave_close[x-1]))
ave_close <- ave_close[!ave_close$week == 0,]
ave_close$ret <- log(ave_close$ave_close/ave_close$ave_lclose)
#Daily returns to calculate interweek variance
df$ret <- log(df$close/df$lclose)
#Drop the unneeded week
df <- df[!df$week == 0,]
#Generate variable for autocovariance of returns
autocov <- df %>%
group_by(asset, week) %>%
summarize(autocov = cov(close,lclose)) %>%
ungroup()
autocov$autocov <- Math.cbrt((autocov$autocov))
#Generate variable for variance/stdev
var_ret <- df %>%
group_by(asset, week) %>%
summarize(var_ret = var(ret)) %>%
ungroup()
var_ret$stdev <- sqrt(var_ret$var_ret)
#Generate variable for volume
ave_vol <- df %>%
group_by(asset, week) %>%
summarize(ave_vol = ave(close*volume)) %>%
ungroup()
ave_vol <- unique(ave_vol)
#Create a weekly dataset from the daily data
df_weekly <- as.data.frame(cbind(ave_close$asset, ave_close$week, ave_close$ret, var_ret$stdev, ave_vol$ave_vol,autocov$autocov))
df_weekly <- df_weekly %>%
rename(
Asset = V1,
Week = V2,
Avret = V3,
Stdev = V4,
Avvol = V5,
Autocov = V6
)
#Ensure all data is numeric
df_weekly$Week <- as.numeric(df_weekly$Week)
df_weekly$Avret <- as.numeric(df_weekly$Avret)
df_weekly$Stdev <- as.numeric(df_weekly$Stdev)
df_weekly$Avvol <- as.numeric(df_weekly$Avvol)
df_weekly$Autocov <- as.numeric(df_weekly$Autocov)
#Drop uneeded
rm(autocov, ave_close, ave_vol, df, var_ret)
#Create the log of trading volume
df_weekly$LogVol <- log(df_weekly$Avvol)
df_weekly$LogVol <- as.numeric(df_weekly$LogVol)
write.csv(df_weekly,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\stocks_weekly.csv", row.names = FALSE)
###############################################################################
###############################################################################
###############################################################################
#Sentiment Data Analysis
sub <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\WSB Scraper\\Submissions\\assets3_v2.csv')
#sub2 <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\WSB Scraper\\Submissions\\assets2_v2.csv')
#sub <- rbind(sub,sub2)
#rm(sub2)
#dates <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\date.csv')
#Submissions
sub$count <- 1
num_sub <- sub %>%
group_by(asset, week) %>%
summarize(Submissions = sum(count)) %>%
ungroup()
num_sub <- num_sub %>%
rename(
Asset = asset,
Week = week,
)
#Sentiments
sentiment_weekly <- sub %>%
group_by(asset, week) %>%
summarize(sentiment_weekly = ave(compound1)) %>%
ungroup()
sentiment_weekly <- unique(sentiment_weekly)
sentiment_weekly <- sentiment_weekly %>%
rename(
Week = week,
Asset = asset,
Ave_Sentiment = sentiment_weekly
)
#Combining weekly stocks with weekly sentiments
Combined_df <- merge(df_weekly, sentiment_weekly, by=c('Week','Asset'), all = T)
Combined_df <- merge(Combined_df,num_sub, by = c('Week','Asset'), all = T)
Combined_df[is.na(Combined_df)] <- 0
Combined_df <- Combined_df[
with(Combined_df, order(Asset,Week)),
]
Combined_df$Submissions <- ifelse(Combined_df$Submissions == 0,1,
Combined_df$Submissions)
Combined_df$Submissions <- log(Combined_df$Submissions)
Ave_Sentiment <- scale(Combined_df$Ave_Sentiment)
#Remove FORD from sample - not the actual ford
Combined_df<-Combined_df[!(Combined_df$Asset=="FORD"),]
#Cleaning firms with have IPO during the year and therefore no trading data
Combined_df<-Combined_df[!(Combined_df$LogVol==0),]
#Drop uneeded
rm(num_sub,sentiment_weekly, sub, Ave_Sentiment, df_weekly)
################################################################################
#Combining with market cap data for stratification
mktcap <- read.csv('C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Stock Scraper\\mktcap.csv')
Combined_df <- merge(Combined_df, mktcap, by=c('Asset'), all = T)
#Splitting Data into individual Quantiles
Combined_df$quantile <- ecdf(Combined_df$Market.Cap)(Combined_df$Market.Cap)
topdf <- Combined_df[(Combined_df$quantile > 0.75),]
middf <- Combined_df[(Combined_df$quantile > 0.25) & (Combined_df$quantile <0.75),]
bottomdf <- Combined_df[(Combined_df$quantile < 0.25),]
write.csv(topdf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataTop30.csv", row.names = FALSE)
write.csv(middf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataMid30.csv", row.names = FALSE)
write.csv(bottomdf,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdataBottom30.csv", row.names = FALSE)
write.csv(Combined_df,"C:\\Users\\Nish\\OneDrive - University of Bristol\\Dissertation\\Volume Forecasting\\Data\\Regression\\STATAdata.csv", row.names = FALSE)
|
library(ape)
library(picante)
#samp: dataframe with sites as rownames and species as columns; or community matrix with sites as rows and species as columns
#dis: cophenetic matrix of phylogenetic tree
#Function to calculate species focal measures of Nearest Neighbor/Taxon Distance (NTD) and Mean Pairwise Distance (MPD) for each plot.
NTD<-function (samp, dis){
if(is.matrix(samp)){
samp<-as.data.frame(samp)
}else if(!is.data.frame(samp)){
stop("object 'samp' not a matrix or data frame")
}
output<-do.call(rbind.data.frame,lapply(1:dim(samp)[1], function(i){ #For each plot...
sppInSample <- names(samp[i, samp[i, ] > 0]) #select species which share plot
if (length(sppInSample) > 1) { #Can only calculate MPD and NTD focal for plots with 2 or more species
sample.dis <- dis[sppInSample, sppInSample] #Phylogenetic distance for
diag(sample.dis) <- NA
do.call(rbind.data.frame,lapply(1:length(sppInSample), function(n){
cbind(
Plot = rownames(samp)[i],
Scientific.Name = sppInSample[n],
NearestNeighbor = ifelse(
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))>1,
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))[1],
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))),
NTD = min(sample.dis[n,],na.rm=TRUE),
MPD = mean(sample.dis[n,],na.rm=TRUE)
)
}))
} else{
data.frame(cbind(Plot=rownames(samp)[i],Scientific.Name=NA,NearestNeighbor=NA,NTD=NA,MPD=NA))
}
}))
output[,4:5]<-sapply(4:5, function(x) as.numeric(as.character(output[,x]))) #Force NTD and MPD to numeric
output<-unique(output) #Remove duplicates
names(output$Scientific.Name)<-NULL; names(output$Plot)<-NULL; names(output$NearestNeighbor)<-NULL
return(output)
}
|
/Species Focal MPD and NTD.r
|
no_license
|
lannajin/Rscripts
|
R
| false | false | 1,779 |
r
|
library(ape)
library(picante)
#samp: dataframe with sites as rownames and species as columns; or community matrix with sites as rows and species as columns
#dis: cophenetic matrix of phylogenetic tree
#Function to calculate species focal measures of Nearest Neighbor/Taxon Distance (NTD) and Mean Pairwise Distance (MPD) for each plot.
NTD<-function (samp, dis){
if(is.matrix(samp)){
samp<-as.data.frame(samp)
}else if(!is.data.frame(samp)){
stop("object 'samp' not a matrix or data frame")
}
output<-do.call(rbind.data.frame,lapply(1:dim(samp)[1], function(i){ #For each plot...
sppInSample <- names(samp[i, samp[i, ] > 0]) #select species which share plot
if (length(sppInSample) > 1) { #Can only calculate MPD and NTD focal for plots with 2 or more species
sample.dis <- dis[sppInSample, sppInSample] #Phylogenetic distance for
diag(sample.dis) <- NA
do.call(rbind.data.frame,lapply(1:length(sppInSample), function(n){
cbind(
Plot = rownames(samp)[i],
Scientific.Name = sppInSample[n],
NearestNeighbor = ifelse(
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))>1,
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))[1],
names(which(sample.dis[n,]== min(sample.dis[n,],na.rm=TRUE)))),
NTD = min(sample.dis[n,],na.rm=TRUE),
MPD = mean(sample.dis[n,],na.rm=TRUE)
)
}))
} else{
data.frame(cbind(Plot=rownames(samp)[i],Scientific.Name=NA,NearestNeighbor=NA,NTD=NA,MPD=NA))
}
}))
output[,4:5]<-sapply(4:5, function(x) as.numeric(as.character(output[,x]))) #Force NTD and MPD to numeric
output<-unique(output) #Remove duplicates
names(output$Scientific.Name)<-NULL; names(output$Plot)<-NULL; names(output$NearestNeighbor)<-NULL
return(output)
}
|
# Install required packages (if not already installed)
#install.packages("ISOweek")
### Clear enviromnet i.e. delete all data and variabels
rm(list = ls())
# Clear console
cat("\014")
### Country
country <- "Denmark"
country.code <- "DK"
### Work directory - directory where the R programs are placed
wdir <- "."
### Period: Start and end (both included)
start_year <- 2013
start_week <- 40
end_year <- 2018
end_week <- 50
### Deaths data source
### Must be placed in the subdirectory /data in the work directory
# 1 = A-MOMO complete file, renamed to A-MOMO data.txt
# 0 = you provide a ;-separated file: deaths.txt, containing at least the variable: agegrp, year, week, deaths
A_MOMO <- 1
### Weather data source
# 1 = automatic download from EuroMOMO website
# 0 = you provide a ;-separated file: wdata_'country.code'.txt, containing at least the variables: date, pop3, NUTS3, temp and placed in the subdirectory /data in the work directory
WeatherData <- 1
### Population data (TRUE/FALSE)
### A ;-separated file: population.txt, must be placed in the subdirectory /data in the work directory
population <- TRUE
# Restrict IA to only positive (TRUE/FALSE)
IArest <- TRUE
# Number of IA lags
# 0 = no lag, 1 = one week lag, ...(max=9)
IAlags <- 2
# Number of OF lags
# 0 = no lag, 1 = one week lag, ...(max=9)
ETlags <- 2
source("R/Estimation.R")
estimation(
wdir="H:/SFSD/INFEPI/Projekter/AKTIVE/MOMO/AttMOMO/Denmark",
country = "Denmark",
StartWeek = '2015-W27',
EndWeek = '2020-W15',
groups = c('00to04', '05to14', '15to64', '65P', '65to74', '75to84', '85P', 'Total'),
indicators <- c('InflPosPct', 'COVID19PosPct'),
restrict = TRUE,
population = FALSE,
lags <- 2,
ptrend = 0.05,
p26 = 0.05,
p52 = 0.10
)
# Estimation
source(paste0(wdir,"/Estimation_v42.R"), echo = FALSE)
# Output: txt-files
source(paste0(wdir,"/Output_txt_v42.R"))
# Output: graphs IA and ET
source(paste0(wdir,"/Output_IA_ET_v42.R"))
# Output: graphs over calendar time
source(paste0(wdir,"/Output_calendar_v42.R"))
# Output: graphs cumulated IA
source(paste0(wdir,"/Output_cumulated_v42.R"))
|
/inst/scr/master.R
|
no_license
|
MartinMSPedersen/AttMOMO
|
R
| false | false | 2,109 |
r
|
# Install required packages (if not already installed)
#install.packages("ISOweek")
### Clear enviromnet i.e. delete all data and variabels
rm(list = ls())
# Clear console
cat("\014")
### Country
country <- "Denmark"
country.code <- "DK"
### Work directory - directory where the R programs are placed
wdir <- "."
### Period: Start and end (both included)
start_year <- 2013
start_week <- 40
end_year <- 2018
end_week <- 50
### Deaths data source
### Must be placed in the subdirectory /data in the work directory
# 1 = A-MOMO complete file, renamed to A-MOMO data.txt
# 0 = you provide a ;-separated file: deaths.txt, containing at least the variable: agegrp, year, week, deaths
A_MOMO <- 1
### Weather data source
# 1 = automatic download from EuroMOMO website
# 0 = you provide a ;-separated file: wdata_'country.code'.txt, containing at least the variables: date, pop3, NUTS3, temp and placed in the subdirectory /data in the work directory
WeatherData <- 1
### Population data (TRUE/FALSE)
### A ;-separated file: population.txt, must be placed in the subdirectory /data in the work directory
population <- TRUE
# Restrict IA to only positive (TRUE/FALSE)
IArest <- TRUE
# Number of IA lags
# 0 = no lag, 1 = one week lag, ...(max=9)
IAlags <- 2
# Number of OF lags
# 0 = no lag, 1 = one week lag, ...(max=9)
ETlags <- 2
source("R/Estimation.R")
estimation(
wdir="H:/SFSD/INFEPI/Projekter/AKTIVE/MOMO/AttMOMO/Denmark",
country = "Denmark",
StartWeek = '2015-W27',
EndWeek = '2020-W15',
groups = c('00to04', '05to14', '15to64', '65P', '65to74', '75to84', '85P', 'Total'),
indicators <- c('InflPosPct', 'COVID19PosPct'),
restrict = TRUE,
population = FALSE,
lags <- 2,
ptrend = 0.05,
p26 = 0.05,
p52 = 0.10
)
# Estimation
source(paste0(wdir,"/Estimation_v42.R"), echo = FALSE)
# Output: txt-files
source(paste0(wdir,"/Output_txt_v42.R"))
# Output: graphs IA and ET
source(paste0(wdir,"/Output_IA_ET_v42.R"))
# Output: graphs over calendar time
source(paste0(wdir,"/Output_calendar_v42.R"))
# Output: graphs cumulated IA
source(paste0(wdir,"/Output_cumulated_v42.R"))
|
# caret v 'dev'
nominalTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, testing = FALSE, ...)
{
loadNamespace("caret")
ppp <- list(options = ppOpts)
ppp <- c(ppp, ctrl$preProcOptions)
printed <- format(info$loop, digits = 4)
colnames(printed) <- gsub("^\\.", "", colnames(printed))
## For 632 estimator, add an element to the index of zeros to trick it into
## fitting and predicting the full data set.
resampleIndex <- ctrl$index
if (ctrl$method %in% c("boot632"))
{
resampleIndex <- c(list("AllData" = rep(0, nrow(x))), resampleIndex)
ctrl$indexOut <- c(list("AllData" = rep(0, nrow(x))), ctrl$indexOut)
}
`%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1)
pkgs <- c("methods", "caret")
if (!is.null(method$library)) pkgs <- c(pkgs, method$library)
result <- foreach(iter = seq(along = resampleIndex), .combine = "c",
.verbose = FALSE, .packages = pkgs, .errorhandling = "stop") %:%
foreach(parm = 1:nrow(info$loop), .combine = "c", .verbose = FALSE,
.packages = pkgs, .errorhandling = "stop") %op%
{
testing <- FALSE
if (!(length(ctrl$seeds) == 1 && is.na(ctrl$seeds)))
set.seed(ctrl$seeds[[iter]][parm])
loadNamespace("caret")
if (ctrl$verboseIter)
progress(printed[parm, ,drop = FALSE], names(resampleIndex), iter)
if (names(resampleIndex)[iter] != "AllData")
{
modelIndex <- resampleIndex[[iter]]
holdoutIndex <- ctrl$indexOut[[iter]]
} else {
modelIndex <- 1:nrow(x)
holdoutIndex <- modelIndex
}
if (testing) cat("pre-model\n")
if (is.null(info$submodels[[parm]]) || nrow(info$submodels[[parm]]) > 0) {
submod <- info$submodels[[parm]]
} else submod <- NULL
mod <- try(
createModel(x = x[modelIndex,,drop = FALSE],
y = y[modelIndex],
wts = wts[modelIndex],
method = method,
tuneValue = info$loop[parm,,drop = FALSE],
obsLevels = lev,
pp = ppp,
classProbs = ctrl$classProbs,
sampling = ctrl$sampling,
...),
silent = TRUE)
if (class(mod)[1] != "try-error")
{
predicted <- try(
predictionFunction(method = method,
modelFit = mod$fit,
newdata = x[holdoutIndex,, drop = FALSE],
preProc = mod$preProc,
param = submod),
silent = TRUE)
if (class(predicted)[1] == "try-error")
{
wrn <- paste(colnames(printed[parm,,drop = FALSE]),
printed[parm,,drop = FALSE],
sep = "=",
collapse = ", ")
wrn <- paste("predictions failed for ", names(resampleIndex)[iter],
": ", wrn, " ", as.character(predicted), sep = "")
if (ctrl$verboseIter) cat(wrn, "\n")
warning(wrn)
rm(wrn)
## setup a dummy results with NA values for all predictions
nPred <- length(holdoutIndex)
if (!is.null(lev))
{
predicted <- rep("", nPred)
predicted[seq(along = predicted)] <- NA
} else {
predicted <- rep(NA, nPred)
}
if (!is.null(submod))
{
tmp <- predicted
predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = predicted)) predicted[[i]] <- tmp
rm(tmp)
}
}
} else {
wrn <- paste(colnames(printed[parm,,drop = FALSE]),
printed[parm,,drop = FALSE],
sep = "=",
collapse = ", ")
wrn <- paste("model fit failed for ", names(resampleIndex)[iter],
": ", wrn, " ", as.character(mod), sep = "")
if (ctrl$verboseIter) cat(wrn, "\n")
warning(wrn)
rm(wrn)
## setup a dummy results with NA values for all predictions
nPred <- length(holdoutIndex)
if (!is.null(lev))
{
predicted <- rep("", nPred)
predicted[seq(along = predicted)] <- NA
} else {
predicted <- rep(NA, nPred)
}
if (!is.null(submod))
{
tmp <- predicted
predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = predicted)) predicted[[i]] <- tmp
rm(tmp)
}
}
if (testing) print(head(predicted))
if (ctrl$classProbs)
{
if (class(mod)[1] != "try-error")
{
probValues <- probFunction(method = method,
modelFit = mod$fit,
newdata = x[holdoutIndex,, drop = FALSE],
preProc = mod$preProc,
param = submod)
} else {
probValues <- as.data.frame(matrix(NA, nrow = nPred, ncol = length(lev)))
colnames(probValues) <- lev
if (!is.null(submod))
{
tmp <- probValues
probValues <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = probValues)) probValues[[i]] <- tmp
rm(tmp)
}
}
if (testing) print(head(probValues))
}
##################################
if (is.numeric(y)) {
if (is.logical(ctrl$predictionBounds) && any(ctrl$predictionBounds)) {
if (is.list(predicted)) {
predicted <- lapply(predicted, trimPredictions,
mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimits)
} else {
predicted <- trimPredictions(mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimit,
pred = predicted)
}
} else {
if (is.numeric(ctrl$predictionBounds) && any(!is.na(ctrl$predictionBounds))) {
if (is.list(predicted)) {
predicted <- lapply(predicted, trimPredictions,
mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimits)
} else {
predicted <- trimPredictions(mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimit,
pred = predicted)
}
}
}
}
if (!is.null(submod))
{
## merge the fixed and seq parameter values together
allParam <- expandParameters(info$loop[parm,,drop = FALSE], info$submodels[[parm]])
allParam <- allParam[complete.cases(allParam),, drop = FALSE]
## collate the predicitons across all the sub-models
predicted <- lapply(predicted,
function(x, y, wts, lv, rows) {
if (!is.factor(x) & is.character(x))
x <- factor(as.character(x), levels = lv)
out <- data.frame(pred = x, obs = y, stringsAsFactors = FALSE)
if (!is.null(wts)) out$weights <- wts
out$rowIndex <- rows
out
},
y = y[holdoutIndex],
wts = wts[holdoutIndex],
lv = lev,
rows = holdoutIndex)
if (testing) print(head(predicted))
## same for the class probabilities
if (ctrl$classProbs)
{
for (k in seq(along = predicted))
predicted[[k]] <- cbind(predicted[[k]], probValues[[k]])
}
if (ctrl$savePredictions)
{
tmpPred <- predicted
for (modIndex in seq(along = tmpPred))
{
tmpPred[[modIndex]]$rowIndex <- holdoutIndex
tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]],
allParam[modIndex,,drop = FALSE],
all = TRUE)
}
tmpPred <- rbind.fill(tmpPred)
tmpPred$Resample <- names(resampleIndex)[iter]
} else tmpPred <- NULL
## get the performance for this resample for each sub-model
thisResample <- lapply(predicted,
ctrl$summaryFunction,
lev = lev,
model = method)
if (testing) print(head(thisResample))
## for classification, add the cell counts
if (length(lev) > 1)
{
cells <- lapply(predicted,
function(x) flatTable(x$pred, x$obs))
for (ind in seq(along = cells))
thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]])
}
thisResample <- do.call("rbind", thisResample)
thisResample <- cbind(allParam, thisResample)
} else {
if (is.factor(y))
predicted <- factor(as.character(predicted), levels = lev)
tmp <- data.frame(pred = predicted,
obs = y[holdoutIndex],
stringsAsFactors = FALSE)
## Sometimes the code above does not coerce the first
## columnn to be named "pred" so force it
names(tmp)[1] <- "pred"
if (!is.null(wts)) tmp$weights <- wts[holdoutIndex]
if (ctrl$classProbs) tmp <- cbind(tmp, probValues)
tmp$rowIndex <- holdoutIndex
if (ctrl$savePredictions)
{
tmpPred <- tmp
tmpPred$rowIndex <- holdoutIndex
tmpPred <- merge(tmpPred, info$loop[parm,,drop = FALSE], all = TRUE)
tmpPred$Resample <- names(resampleIndex)[iter]
} else tmpPred <- NULL
##################################
thisResample <- ctrl$summaryFunction(tmp,
lev = lev,
model = method)
## if classification, get the confusion matrix
if (length(lev) > 1)
thisResample <- c(thisResample, flatTable(tmp$pred, tmp$obs))
thisResample <- as.data.frame(t(thisResample))
thisResample <- cbind(thisResample, info$loop[parm,,drop = FALSE])
}
thisResample$Resample <- names(resampleIndex)[iter]
if (ctrl$verboseIter)
progress(printed[parm,,drop = FALSE], names(resampleIndex), iter, FALSE)
list(resamples = thisResample, pred = tmpPred)
}
resamples <- rbind.fill(result[names(result) == "resamples"])
pred <- if (ctrl$savePredictions) rbind.fill(result[names(result) == "pred"]) else NULL
if (ctrl$method %in% c("boot632"))
{
perfNames <- names(ctrl$summaryFunction(data.frame(obs = y, pred = sample(y), weights = 1),
lev = lev, model = method))
apparent <- subset(resamples, Resample == "AllData")
apparent <- apparent[, !grepl("^\\.cell|Resample", colnames(apparent)), drop = FALSE]
names(apparent)[which(names(apparent) %in% perfNames)] <-
paste(names(apparent)[which(names(apparent) %in% perfNames)], "Apparent", sep = "")
names(apparent) <- gsub("^\\.", "", names(apparent))
if (any(!complete.cases(apparent[, !grepl("^cell|Resample", colnames(apparent)), drop = FALSE])))
{
warning("There were missing values in the apparent performance measures.")
}
resamples <- subset(resamples, Resample != "AllData")
}
names(resamples) <- gsub("^\\.", "", names(resamples))
if (any(!complete.cases(resamples[, !grepl("^cell|Resample", colnames(resamples)), drop = FALSE])))
{
warning("There were missing values in resampled performance measures.")
}
out <- ddply(resamples[, !grepl("^cell|Resample", colnames(resamples)), drop = FALSE],
## TODO check this for seq models
gsub("^\\.", "", colnames(info$loop)),
MeanSD,
exclude = gsub("^\\.", "", colnames(info$loop)))
if (ctrl$method %in% c("boot632"))
{
out <- merge(out, apparent)
for (p in seq(along = perfNames))
{
const <- 1 - exp(-1)
out[, perfNames[p]] <- (const * out[, perfNames[p]]) +
((1 - const) * out[, paste(perfNames[p], "Apparent", sep = "")])
}
}
list(performance = out, resamples = resamples,
predictions = if (ctrl$savePredictions) pred else NULL)
}
|
/caretdev_nominalTrainWorkflow.R
|
no_license
|
bdanalytics/ebay-ipads2
|
R
| false | false | 15,330 |
r
|
# caret v 'dev'
nominalTrainWorkflow <- function(x, y, wts, info, method, ppOpts, ctrl, lev, testing = FALSE, ...)
{
loadNamespace("caret")
ppp <- list(options = ppOpts)
ppp <- c(ppp, ctrl$preProcOptions)
printed <- format(info$loop, digits = 4)
colnames(printed) <- gsub("^\\.", "", colnames(printed))
## For 632 estimator, add an element to the index of zeros to trick it into
## fitting and predicting the full data set.
resampleIndex <- ctrl$index
if (ctrl$method %in% c("boot632"))
{
resampleIndex <- c(list("AllData" = rep(0, nrow(x))), resampleIndex)
ctrl$indexOut <- c(list("AllData" = rep(0, nrow(x))), ctrl$indexOut)
}
`%op%` <- getOper(ctrl$allowParallel && getDoParWorkers() > 1)
pkgs <- c("methods", "caret")
if (!is.null(method$library)) pkgs <- c(pkgs, method$library)
result <- foreach(iter = seq(along = resampleIndex), .combine = "c",
.verbose = FALSE, .packages = pkgs, .errorhandling = "stop") %:%
foreach(parm = 1:nrow(info$loop), .combine = "c", .verbose = FALSE,
.packages = pkgs, .errorhandling = "stop") %op%
{
testing <- FALSE
if (!(length(ctrl$seeds) == 1 && is.na(ctrl$seeds)))
set.seed(ctrl$seeds[[iter]][parm])
loadNamespace("caret")
if (ctrl$verboseIter)
progress(printed[parm, ,drop = FALSE], names(resampleIndex), iter)
if (names(resampleIndex)[iter] != "AllData")
{
modelIndex <- resampleIndex[[iter]]
holdoutIndex <- ctrl$indexOut[[iter]]
} else {
modelIndex <- 1:nrow(x)
holdoutIndex <- modelIndex
}
if (testing) cat("pre-model\n")
if (is.null(info$submodels[[parm]]) || nrow(info$submodels[[parm]]) > 0) {
submod <- info$submodels[[parm]]
} else submod <- NULL
mod <- try(
createModel(x = x[modelIndex,,drop = FALSE],
y = y[modelIndex],
wts = wts[modelIndex],
method = method,
tuneValue = info$loop[parm,,drop = FALSE],
obsLevels = lev,
pp = ppp,
classProbs = ctrl$classProbs,
sampling = ctrl$sampling,
...),
silent = TRUE)
if (class(mod)[1] != "try-error")
{
predicted <- try(
predictionFunction(method = method,
modelFit = mod$fit,
newdata = x[holdoutIndex,, drop = FALSE],
preProc = mod$preProc,
param = submod),
silent = TRUE)
if (class(predicted)[1] == "try-error")
{
wrn <- paste(colnames(printed[parm,,drop = FALSE]),
printed[parm,,drop = FALSE],
sep = "=",
collapse = ", ")
wrn <- paste("predictions failed for ", names(resampleIndex)[iter],
": ", wrn, " ", as.character(predicted), sep = "")
if (ctrl$verboseIter) cat(wrn, "\n")
warning(wrn)
rm(wrn)
## setup a dummy results with NA values for all predictions
nPred <- length(holdoutIndex)
if (!is.null(lev))
{
predicted <- rep("", nPred)
predicted[seq(along = predicted)] <- NA
} else {
predicted <- rep(NA, nPred)
}
if (!is.null(submod))
{
tmp <- predicted
predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = predicted)) predicted[[i]] <- tmp
rm(tmp)
}
}
} else {
wrn <- paste(colnames(printed[parm,,drop = FALSE]),
printed[parm,,drop = FALSE],
sep = "=",
collapse = ", ")
wrn <- paste("model fit failed for ", names(resampleIndex)[iter],
": ", wrn, " ", as.character(mod), sep = "")
if (ctrl$verboseIter) cat(wrn, "\n")
warning(wrn)
rm(wrn)
## setup a dummy results with NA values for all predictions
nPred <- length(holdoutIndex)
if (!is.null(lev))
{
predicted <- rep("", nPred)
predicted[seq(along = predicted)] <- NA
} else {
predicted <- rep(NA, nPred)
}
if (!is.null(submod))
{
tmp <- predicted
predicted <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = predicted)) predicted[[i]] <- tmp
rm(tmp)
}
}
if (testing) print(head(predicted))
if (ctrl$classProbs)
{
if (class(mod)[1] != "try-error")
{
probValues <- probFunction(method = method,
modelFit = mod$fit,
newdata = x[holdoutIndex,, drop = FALSE],
preProc = mod$preProc,
param = submod)
} else {
probValues <- as.data.frame(matrix(NA, nrow = nPred, ncol = length(lev)))
colnames(probValues) <- lev
if (!is.null(submod))
{
tmp <- probValues
probValues <- vector(mode = "list", length = nrow(info$submodels[[parm]]) + 1)
for (i in seq(along = probValues)) probValues[[i]] <- tmp
rm(tmp)
}
}
if (testing) print(head(probValues))
}
##################################
if (is.numeric(y)) {
if (is.logical(ctrl$predictionBounds) && any(ctrl$predictionBounds)) {
if (is.list(predicted)) {
predicted <- lapply(predicted, trimPredictions,
mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimits)
} else {
predicted <- trimPredictions(mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimit,
pred = predicted)
}
} else {
if (is.numeric(ctrl$predictionBounds) && any(!is.na(ctrl$predictionBounds))) {
if (is.list(predicted)) {
predicted <- lapply(predicted, trimPredictions,
mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimits)
} else {
predicted <- trimPredictions(mod_type = "Regression",
bounds = ctrl$predictionBounds,
limits = ctrl$yLimit,
pred = predicted)
}
}
}
}
if (!is.null(submod))
{
## merge the fixed and seq parameter values together
allParam <- expandParameters(info$loop[parm,,drop = FALSE], info$submodels[[parm]])
allParam <- allParam[complete.cases(allParam),, drop = FALSE]
## collate the predicitons across all the sub-models
predicted <- lapply(predicted,
function(x, y, wts, lv, rows) {
if (!is.factor(x) & is.character(x))
x <- factor(as.character(x), levels = lv)
out <- data.frame(pred = x, obs = y, stringsAsFactors = FALSE)
if (!is.null(wts)) out$weights <- wts
out$rowIndex <- rows
out
},
y = y[holdoutIndex],
wts = wts[holdoutIndex],
lv = lev,
rows = holdoutIndex)
if (testing) print(head(predicted))
## same for the class probabilities
if (ctrl$classProbs)
{
for (k in seq(along = predicted))
predicted[[k]] <- cbind(predicted[[k]], probValues[[k]])
}
if (ctrl$savePredictions)
{
tmpPred <- predicted
for (modIndex in seq(along = tmpPred))
{
tmpPred[[modIndex]]$rowIndex <- holdoutIndex
tmpPred[[modIndex]] <- merge(tmpPred[[modIndex]],
allParam[modIndex,,drop = FALSE],
all = TRUE)
}
tmpPred <- rbind.fill(tmpPred)
tmpPred$Resample <- names(resampleIndex)[iter]
} else tmpPred <- NULL
## get the performance for this resample for each sub-model
thisResample <- lapply(predicted,
ctrl$summaryFunction,
lev = lev,
model = method)
if (testing) print(head(thisResample))
## for classification, add the cell counts
if (length(lev) > 1)
{
cells <- lapply(predicted,
function(x) flatTable(x$pred, x$obs))
for (ind in seq(along = cells))
thisResample[[ind]] <- c(thisResample[[ind]], cells[[ind]])
}
thisResample <- do.call("rbind", thisResample)
thisResample <- cbind(allParam, thisResample)
} else {
if (is.factor(y))
predicted <- factor(as.character(predicted), levels = lev)
tmp <- data.frame(pred = predicted,
obs = y[holdoutIndex],
stringsAsFactors = FALSE)
## Sometimes the code above does not coerce the first
## columnn to be named "pred" so force it
names(tmp)[1] <- "pred"
if (!is.null(wts)) tmp$weights <- wts[holdoutIndex]
if (ctrl$classProbs) tmp <- cbind(tmp, probValues)
tmp$rowIndex <- holdoutIndex
if (ctrl$savePredictions)
{
tmpPred <- tmp
tmpPred$rowIndex <- holdoutIndex
tmpPred <- merge(tmpPred, info$loop[parm,,drop = FALSE], all = TRUE)
tmpPred$Resample <- names(resampleIndex)[iter]
} else tmpPred <- NULL
##################################
thisResample <- ctrl$summaryFunction(tmp,
lev = lev,
model = method)
## if classification, get the confusion matrix
if (length(lev) > 1)
thisResample <- c(thisResample, flatTable(tmp$pred, tmp$obs))
thisResample <- as.data.frame(t(thisResample))
thisResample <- cbind(thisResample, info$loop[parm,,drop = FALSE])
}
thisResample$Resample <- names(resampleIndex)[iter]
if (ctrl$verboseIter)
progress(printed[parm,,drop = FALSE], names(resampleIndex), iter, FALSE)
list(resamples = thisResample, pred = tmpPred)
}
resamples <- rbind.fill(result[names(result) == "resamples"])
pred <- if (ctrl$savePredictions) rbind.fill(result[names(result) == "pred"]) else NULL
if (ctrl$method %in% c("boot632"))
{
perfNames <- names(ctrl$summaryFunction(data.frame(obs = y, pred = sample(y), weights = 1),
lev = lev, model = method))
apparent <- subset(resamples, Resample == "AllData")
apparent <- apparent[, !grepl("^\\.cell|Resample", colnames(apparent)), drop = FALSE]
names(apparent)[which(names(apparent) %in% perfNames)] <-
paste(names(apparent)[which(names(apparent) %in% perfNames)], "Apparent", sep = "")
names(apparent) <- gsub("^\\.", "", names(apparent))
if (any(!complete.cases(apparent[, !grepl("^cell|Resample", colnames(apparent)), drop = FALSE])))
{
warning("There were missing values in the apparent performance measures.")
}
resamples <- subset(resamples, Resample != "AllData")
}
names(resamples) <- gsub("^\\.", "", names(resamples))
if (any(!complete.cases(resamples[, !grepl("^cell|Resample", colnames(resamples)), drop = FALSE])))
{
warning("There were missing values in resampled performance measures.")
}
out <- ddply(resamples[, !grepl("^cell|Resample", colnames(resamples)), drop = FALSE],
## TODO check this for seq models
gsub("^\\.", "", colnames(info$loop)),
MeanSD,
exclude = gsub("^\\.", "", colnames(info$loop)))
if (ctrl$method %in% c("boot632"))
{
out <- merge(out, apparent)
for (p in seq(along = perfNames))
{
const <- 1 - exp(-1)
out[, perfNames[p]] <- (const * out[, perfNames[p]]) +
((1 - const) * out[, paste(perfNames[p], "Apparent", sep = "")])
}
}
list(performance = out, resamples = resamples,
predictions = if (ctrl$savePredictions) pred else NULL)
}
|
#' bc.R
#'
#' Bias correction techniques
#'
#' $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $
bc <- function(fit, ...) {
UseMethod("bc")
}
bc.ppm <- function(fit, ..., nfine=256) {
stopifnot(is.ppm(fit))
#
theta0 <- coef(fit)
nc <- length(theta0)
#
X <- data.ppm(fit)
Z <- is.data(quad.ppm(fit))
# evaluate sufficient statistic at data points
sufX <- model.matrix(fit)[Z, ]
if(ncol(sufX) != nc)
stop("Internal error: model.matrix does not match coef(model)")
# predict on fine grid
finemask <- as.mask(as.owin(fit), dimyx=nfine)
lamF <- predict(fit, type="cif", locations=finemask)
sufF <- model.images(fit, W=finemask)
if(length(sufF) != nc)
stop("Internal error: model.images does not match coef(model)")
# edge correction
if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) {
b <- bdist.pixels(finemask)
bX <- bdist.points(X)
excludeU <- eval.im(b < rbord)
retainX <- (bX >= rbord)
sufX <- sufX[retainX, , drop=FALSE]
} else {
excludeU <- FALSE
}
# compute fine approximation to score
scoreX <- colSums(sufX)
scoreW <- numeric(nc)
for(k in seq_len(nc)) {
S <- sufF[[k]]
# infinite values of S may occur and correspond to zero cif
Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF))
scoreW[k] <- integral.im(Slam)
}
score <- scoreX - scoreW
# Newton-Raphson
Iinv <- vcov(fit, hessian=TRUE)
theta <- theta0 + Iinv %*% score
theta <- theta[ , 1, drop=TRUE]
#
# return(list(theta0=theta0, theta=theta))
return(theta)
}
# Richardson extrapolation (generic)
rex <- function(x, r=2, k=1, recursive=FALSE) {
# x should be a matrix
# whose columns are successive estimates of a parameter vector
# obtained using "grid step sizes" t, t/r, t/r^2, ...
# Estimate from step size t is assumed to converge at rate t^k
if(!is.matrix(x)) x <- matrix(x, nrow=1)
if(ncol(x) <= 1) return(x)
rk <- r^k
y <- (rk * x[, -1, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1)
if(recursive)
y <- rex(y, r=r, k=k+1, recursive=TRUE)
return(y)
}
|
/R/bc.R
|
no_license
|
mirca/spatstat
|
R
| false | false | 2,089 |
r
|
#' bc.R
#'
#' Bias correction techniques
#'
#' $Revision: 1.2 $ $Date: 2016/09/15 02:21:15 $
bc <- function(fit, ...) {
UseMethod("bc")
}
bc.ppm <- function(fit, ..., nfine=256) {
stopifnot(is.ppm(fit))
#
theta0 <- coef(fit)
nc <- length(theta0)
#
X <- data.ppm(fit)
Z <- is.data(quad.ppm(fit))
# evaluate sufficient statistic at data points
sufX <- model.matrix(fit)[Z, ]
if(ncol(sufX) != nc)
stop("Internal error: model.matrix does not match coef(model)")
# predict on fine grid
finemask <- as.mask(as.owin(fit), dimyx=nfine)
lamF <- predict(fit, type="cif", locations=finemask)
sufF <- model.images(fit, W=finemask)
if(length(sufF) != nc)
stop("Internal error: model.images does not match coef(model)")
# edge correction
if(fit$correction == "border" && ((rbord <- fit$rbord) > 0)) {
b <- bdist.pixels(finemask)
bX <- bdist.points(X)
excludeU <- eval.im(b < rbord)
retainX <- (bX >= rbord)
sufX <- sufX[retainX, , drop=FALSE]
} else {
excludeU <- FALSE
}
# compute fine approximation to score
scoreX <- colSums(sufX)
scoreW <- numeric(nc)
for(k in seq_len(nc)) {
S <- sufF[[k]]
# infinite values of S may occur and correspond to zero cif
Slam <- eval.im(ifelse(is.infinite(S) | excludeU, 0, S * lamF))
scoreW[k] <- integral.im(Slam)
}
score <- scoreX - scoreW
# Newton-Raphson
Iinv <- vcov(fit, hessian=TRUE)
theta <- theta0 + Iinv %*% score
theta <- theta[ , 1, drop=TRUE]
#
# return(list(theta0=theta0, theta=theta))
return(theta)
}
# Richardson extrapolation (generic)
rex <- function(x, r=2, k=1, recursive=FALSE) {
# x should be a matrix
# whose columns are successive estimates of a parameter vector
# obtained using "grid step sizes" t, t/r, t/r^2, ...
# Estimate from step size t is assumed to converge at rate t^k
if(!is.matrix(x)) x <- matrix(x, nrow=1)
if(ncol(x) <= 1) return(x)
rk <- r^k
y <- (rk * x[, -1, drop=FALSE] - x[, -ncol(x), drop=FALSE])/(rk - 1)
if(recursive)
y <- rex(y, r=r, k=k+1, recursive=TRUE)
return(y)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_manip.R
\name{f_manip_get_variables_from_formula}
\alias{f_manip_get_variables_from_formula}
\title{get variables from formula}
\usage{
f_manip_get_variables_from_formula(formula)
}
\arguments{
\item{formula}{formula}
}
\value{
character vector
}
\examples{
f = foo~bar1 + bar2
vars = f_manip_get_variables_from_formula(f)
response_var = f_manip_get_response_variable_from_formula(f)
}
\seealso{
\code{\link{f_manip_get_response_variable_from_formula}}
}
|
/man/f_manip_get_variables_from_formula.Rd
|
no_license
|
adambouras/oetteR
|
R
| false | true | 537 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_manip.R
\name{f_manip_get_variables_from_formula}
\alias{f_manip_get_variables_from_formula}
\title{get variables from formula}
\usage{
f_manip_get_variables_from_formula(formula)
}
\arguments{
\item{formula}{formula}
}
\value{
character vector
}
\examples{
f = foo~bar1 + bar2
vars = f_manip_get_variables_from_formula(f)
response_var = f_manip_get_response_variable_from_formula(f)
}
\seealso{
\code{\link{f_manip_get_response_variable_from_formula}}
}
|
originaldata <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", check.names=F, stringsAsFactors=F)
originaldata$Date <- as.Date(originaldata$Date, format="%d/%m/%Y")
data <- subset(originaldata, Date == "2007-02-01" | Date == "2007-02-02")
datetime <- paste(data$Date, data$Time)
data$Datetime <- as.POSIXct(datetime)
par(mfrow=c(1,1))
plot(data$Sub_metering_1~data$Datetime, type="l",ylab="Energy sub metering", xlab="")
lines(data$Sub_metering_2~data$Datetime, col='red')
lines(data$Sub_metering_3~data$Datetime, col='blue')
legend('topright',col=c('black','red','blue'),legend = c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lty = 1,lwd=2)
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
xuejieqi/ExData_Plotting1
|
R
| false | false | 752 |
r
|
originaldata <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", check.names=F, stringsAsFactors=F)
originaldata$Date <- as.Date(originaldata$Date, format="%d/%m/%Y")
data <- subset(originaldata, Date == "2007-02-01" | Date == "2007-02-02")
datetime <- paste(data$Date, data$Time)
data$Datetime <- as.POSIXct(datetime)
par(mfrow=c(1,1))
plot(data$Sub_metering_1~data$Datetime, type="l",ylab="Energy sub metering", xlab="")
lines(data$Sub_metering_2~data$Datetime, col='red')
lines(data$Sub_metering_3~data$Datetime, col='blue')
legend('topright',col=c('black','red','blue'),legend = c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lty = 1,lwd=2)
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulated_data.R
\name{weibull_data}
\alias{weibull_data}
\title{Simulate Current Status Data from Weibull Failure Time and Uniform Censoring Times}
\usage{
weibull_data(n, tau = 1, test = FALSE)
}
\arguments{
\item{n}{A positive integer (sample size)}
\item{tau}{A non-negative value (defining the support of T and C). Default value is 1.}
\item{test}{Logical indicating whether the function should return in addition also the true (uncensored) failure time, together with Bayes risk, for testing. Default value is False and then only current status data is returned (for training).}
}
\value{
A list containing: (1) the simulated data in the format of a data frame with the columns Z, delta, C, and possibly also T (for testing only), and (2) the Bayes risk which can only be evaluated when test=TRUE (otherwise it is NA).
}
\description{
\code{weibull_data} returns a list that includes a data frame of size [n,3]. The first column corresponds to the univarite covarite Z, the second column is the current status indicator, and the third column is the censoring times. Note that when test=TRUE we get an extra column with the true uncensored failure time T.
}
\examples{
d <- weibull_data(n=100)
}
|
/man/weibull_data.Rd
|
no_license
|
Yael-Travis-Lumer/KMforCSD
|
R
| false | true | 1,281 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulated_data.R
\name{weibull_data}
\alias{weibull_data}
\title{Simulate Current Status Data from Weibull Failure Time and Uniform Censoring Times}
\usage{
weibull_data(n, tau = 1, test = FALSE)
}
\arguments{
\item{n}{A positive integer (sample size)}
\item{tau}{A non-negative value (defining the support of T and C). Default value is 1.}
\item{test}{Logical indicating whether the function should return in addition also the true (uncensored) failure time, together with Bayes risk, for testing. Default value is False and then only current status data is returned (for training).}
}
\value{
A list containing: (1) the simulated data in the format of a data frame with the columns Z, delta, C, and possibly also T (for testing only), and (2) the Bayes risk which can only be evaluated when test=TRUE (otherwise it is NA).
}
\description{
\code{weibull_data} returns a list that includes a data frame of size [n,3]. The first column corresponds to the univarite covarite Z, the second column is the current status indicator, and the third column is the censoring times. Note that when test=TRUE we get an extra column with the true uncensored failure time T.
}
\examples{
d <- weibull_data(n=100)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startWSServer.R
\name{startWSServer}
\alias{startWSServer}
\title{start a websocket server}
\usage{
startWSServer(port)
}
\arguments{
\item{port}{port if not default}
}
\value{
a connection
}
\description{
This function established a two way connection between R and the visualizer
using a websocket
}
\examples{
# conn <- startWSServer()
}
|
/man/startWSServer.Rd
|
no_license
|
jwist/hastaLaVista
|
R
| false | true | 419 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startWSServer.R
\name{startWSServer}
\alias{startWSServer}
\title{start a websocket server}
\usage{
startWSServer(port)
}
\arguments{
\item{port}{port if not default}
}
\value{
a connection
}
\description{
This function established a two way connection between R and the visualizer
using a websocket
}
\examples{
# conn <- startWSServer()
}
|
testlist <- list(x = c(1814571619L, 1819243365L, 1466527309L, 1634736770L, -702912170L, 19561L, 1853106408L, NA, 1683972975L, 1836085861L, 1935434351L, 1965631587L, 1869509452L, -180L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, 65535L, 439353125L, -1763287L, -11733428L, 506033012L, 704643071L, -42L, 1377447721L, 1277820927L, -15193601L, -687922689L, -721944577L, -1616928865L, -1616928801L, -697997979L, 1566926181L, 1701143909L, 16777215L, -702873601L, -687920820L, 701890560L, 2042367L, -65460L, 825636406L), y = c(-177642121L, NA, 1481637987L, 1869486121L, -11733428L, 506033012L, 692854313L, -42L, 1377447721L, 1277820927L, -15193601L, -687922689L, -721944577L, -1616928865L, -1616928801L, -704523008L, -702873601L, -687920820L, 701890560L, 2042367L, -65460L, 825636406L, -11776769L, -16777216L, 255L, -1L, 50397184L, 0L, 169748991L, -1L, -2745809L, -436207617L, -1L, -2745809L, -14146304L, 10874L, 1937007711L, 1669361008L, 1919251292L, 1651471657L, 677605230L, 1946156800L, -1L, -10497L, -524321L, -1L, -1L, 0L, 63313L, -11512833L, -687920641L, 704598348L ))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962892-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 1,147 |
r
|
testlist <- list(x = c(1814571619L, 1819243365L, 1466527309L, 1634736770L, -702912170L, 19561L, 1853106408L, NA, 1683972975L, 1836085861L, 1935434351L, 1965631587L, 1869509452L, -180L, -1L, -1L, -1L, -1L, -1L, -1L, -1L, 65535L, 439353125L, -1763287L, -11733428L, 506033012L, 704643071L, -42L, 1377447721L, 1277820927L, -15193601L, -687922689L, -721944577L, -1616928865L, -1616928801L, -697997979L, 1566926181L, 1701143909L, 16777215L, -702873601L, -687920820L, 701890560L, 2042367L, -65460L, 825636406L), y = c(-177642121L, NA, 1481637987L, 1869486121L, -11733428L, 506033012L, 692854313L, -42L, 1377447721L, 1277820927L, -15193601L, -687922689L, -721944577L, -1616928865L, -1616928801L, -704523008L, -702873601L, -687920820L, 701890560L, 2042367L, -65460L, 825636406L, -11776769L, -16777216L, 255L, -1L, 50397184L, 0L, 169748991L, -1L, -2745809L, -436207617L, -1L, -2745809L, -14146304L, 10874L, 1937007711L, 1669361008L, 1919251292L, 1651471657L, 677605230L, 1946156800L, -1L, -10497L, -524321L, -1L, -1L, 0L, 63313L, -11512833L, -687920641L, 704598348L ))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
data_full <- read.csv("/Users/rolandpadilla/topten/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/plot1.R
|
no_license
|
rolandpadilla/Exploratory-Data-Analysis
|
R
| false | false | 1,265 |
r
|
data_full <- read.csv("/Users/rolandpadilla/topten/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
\name{read}
\alias{read}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Simplifies reading csv files
}
\description{
This function just makes reading csv files using less typing, also, it leaves spaces in the column names
}
\usage{
read(filename)
}
\arguments{
\item{filename}{
filename corresponds to a character string which is the name for a csv file in the directory
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Anthony Elowsky
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## The function is currently defined as
read(filename)
{
read.csv(paste0(filename, ".csv"), check.names = F)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ reading }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/read.Rd
|
no_license
|
tonyelowsky/Waterreporttable
|
R
| false | false | 1,280 |
rd
|
\name{read}
\alias{read}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Simplifies reading csv files
}
\description{
This function just makes reading csv files using less typing, also, it leaves spaces in the column names
}
\usage{
read(filename)
}
\arguments{
\item{filename}{
filename corresponds to a character string which is the name for a csv file in the directory
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Anthony Elowsky
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## The function is currently defined as
read(filename)
{
read.csv(paste0(filename, ".csv"), check.names = F)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ reading }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
####################################################################################################
## Script to run the AFAM App
## This is a simple script to run the AFAM App on your computer
## Please contact Gavin McDonald with any questions - gmcdonald@bren.ucsb.edu
## Originally written at University of California, Santa barbara on March 24, 2017
####################################################################################################
#' Title
#'
#' @return
#' @export
#'
#' @examples
runAFAM <- function() {
appDir <- system.file("shiny-examples", "afamApp", package = "afamAppPackage")
if (appDir == "") {
stop("Could not find app. Try re-installing `afamAppPackage`.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
|
/R/runAFAM.R
|
no_license
|
SFG-UCSB/afamAppPackage
|
R
| false | false | 776 |
r
|
####################################################################################################
## Script to run the AFAM App
## This is a simple script to run the AFAM App on your computer
## Please contact Gavin McDonald with any questions - gmcdonald@bren.ucsb.edu
## Originally written at University of California, Santa barbara on March 24, 2017
####################################################################################################
#' Title
#'
#' @return
#' @export
#'
#' @examples
runAFAM <- function() {
appDir <- system.file("shiny-examples", "afamApp", package = "afamAppPackage")
if (appDir == "") {
stop("Could not find app. Try re-installing `afamAppPackage`.", call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
|
plot2 = function()
{
dtfull = fread("household_power_consumption.txt")
dt = dtfull[dtfull$Date=="1/2/2007" | dtfull$Date=="2/2/2007",]
dt$Date = as.Date(dt$Date, "%d/%m/%Y")
datetime = strptime(paste(dt$Date, dt$Time), format="%Y-%m-%d %H:%M:%S")
png("plot2.png", width=480, height=480, bg="transparent")
plot(datetime,
dt$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
}
|
/plot2.R
|
no_license
|
ap-osd/ExData_Plotting1
|
R
| false | false | 499 |
r
|
plot2 = function()
{
dtfull = fread("household_power_consumption.txt")
dt = dtfull[dtfull$Date=="1/2/2007" | dtfull$Date=="2/2/2007",]
dt$Date = as.Date(dt$Date, "%d/%m/%Y")
datetime = strptime(paste(dt$Date, dt$Time), format="%Y-%m-%d %H:%M:%S")
png("plot2.png", width=480, height=480, bg="transparent")
plot(datetime,
dt$Global_active_power,
type="l",
xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
}
|
evaluatePR <- function(x,betas) {
#performs the result of the polynomial regression expresion given the betas and their labels.
x=unname(as.matrix(x)) #removes the colnames and rownames of the input variables when using a dataframe.
response=betas[1] # this gets the intercept beta_0
for (i in 2:length(betas)){
#here the label is transformed into a vector of the needed length with the index of each variable
variable_indexes=as.integer(unlist(strsplit(colnames(betas)[i], ",")))
#Intialize the product as 1 and loop over all the indexes l_j to obtain the product of al the needed variables x
product=1
for(j in 1:length(variable_indexes)){
product=product*x[variable_indexes[j]]
}
#We add to the response the product of those variables with their associated beta
response=response+betas[i]*product
}
return(response)
}
|
/functions/evaluatePR.R
|
permissive
|
IBiDat/nntopr
|
R
| false | false | 878 |
r
|
evaluatePR <- function(x,betas) {
#performs the result of the polynomial regression expresion given the betas and their labels.
x=unname(as.matrix(x)) #removes the colnames and rownames of the input variables when using a dataframe.
response=betas[1] # this gets the intercept beta_0
for (i in 2:length(betas)){
#here the label is transformed into a vector of the needed length with the index of each variable
variable_indexes=as.integer(unlist(strsplit(colnames(betas)[i], ",")))
#Intialize the product as 1 and loop over all the indexes l_j to obtain the product of al the needed variables x
product=1
for(j in 1:length(variable_indexes)){
product=product*x[variable_indexes[j]]
}
#We add to the response the product of those variables with their associated beta
response=response+betas[i]*product
}
return(response)
}
|
# tocID <- "FND-STA-Information_theory.R"
#
# ==============================================================================
#
# Purpose: A Bioinformatics Course:
# R code accompanying the FND-STA-Information_theory unit.
#
# Version: 0.2.1
#
# Date: 2017 - 2021
# Author: Boris Steipe (boris.steipe@utoronto.ca)
#
# Versions:
# 0.2.1 Maintenance
# 0.2 Under development
# 0.1 First code copied from 2016 material.
#
#
# TODO:
#
#
# == DO NOT SIMPLY source() THIS FILE! =======================================
#
# If there are portions you don't understand, use R's help system, Google for an
# answer, or ask your instructor. Don't continue if you don't understand what's
# going on. That's not how it works ...
#
# ==============================================================================
#TOC> ==========================================================================
#TOC>
#TOC> Section Title Line
#TOC> --------------------------------------
#TOC> 1 ___Section___ 39
#TOC>
#TOC> ==========================================================================
# = 1 ___Section___ =======================================================
# What level of information is "significant"
# Assume the background distribution is the database frequencies of
# amino acids:
AAref <- numeric() # Uniprot frequencies October 2017, slightly adjusted to
# sum to 1.0
AAref["A"] <- 0.0904
AAref["C"] <- 0.0123
AAref["D"] <- 0.0545
AAref["E"] <- 0.0617
AAref["F"] <- 0.0394
AAref["G"] <- 0.0724
AAref["H"] <- 0.0221
AAref["I"] <- 0.0573
AAref["K"] <- 0.0504
AAref["L"] <- 0.0986
AAref["M"] <- 0.0240
AAref["N"] <- 0.0392
AAref["P"] <- 0.0486
AAref["Q"] <- 0.0381
AAref["R"] <- 0.0570
AAref["S"] <- 0.0673
AAref["T"] <- 0.0558
AAref["V"] <- 0.0686
AAref["W"] <- 0.0129
AAref["Y"] <- 0.0294
sum(AAref)
# Function to calculate Shannon entropy
H <- function(pmf) {
# Calculate Shannon entropy
# Parameters:
# pmf (numeric) probability mass function: a vector of states and
# associated probabilities. Each element of
# pmf must be in (0, 1] and sum(pmf) must be 1.
# Value:
# Shannon entropy in bits.
# Examples:
# H(c(A=0.25, C=0.25, G=0.25, T=0.25)) # 2 bits entropy in a random
# # nucleotide sequence
# H(1) # If all elements are the same, entropy is zero
#
if (any(pmf <= 0 | pmf > 1) || isFALSE(all.equal(1.0, sum(pmf)))) {
stop("Input is not a discrete probability distribution.")
}
H <- -sum(pmf * (log(pmf) / log(2)))
return(H)
}
# Why use all.equal()? Exact comparisons with floating point numbers are
# brittle. Consider for example:
1/6 + 1/6 + 1/6 + 1/6 + 1/6 + 1/6 == 1
print(1/6 + 1/6 + 1/6 + 1/6 + 1/6 + 1/6, digits = 22) # 0.9999999999999998889777
# all.equal() tests for _near_ equality with tolerance of ~ 1.5e-8
# Entropy of the database frequencies (in bits):
(Href <- H(AAref))
# for comparison: entropy if all amino acids are equiprobable
H(rep(0.05, 20))
# Set up a simulation to estimate the distribution of Information values
# from random sequences drawn from AAref. This is the distribution for the
# statistical null hypothesis:
nObs <- 15 # number of observations (e.g aligned sequences)
# nObs <- 80
nTrials <- 10000 # number of trials
IObs <- numeric(nTrials) # vector to store Information in each trial
simCounts <- numeric(20) # vector to tabulate our information ...
names(simCounts) <- names(AAref)# ... with the names of AAref
for (i in 1:nTrials) { # simulate ...
# sample AAref letters, nObs times, with the probabilities of AAref:
AAobs <- sample(names(AAref), size = nObs, prob = AAref, replace = TRUE)
x <- table(AAobs) # table simulated observations
simCounts[1:20] <- rep(0, length(simCounts)) # initialize simCounts to 0
simCounts[names(x)] <- x # overwrite with observed counts
simCounts <- simCounts + 0.5 # add Jeffreys' pseudocounts
Hobs <- H(simCounts/sum(simCounts)) # counts to frequency, calc. H
IObs[i] <- Href - Hobs # store information
}
# evaluate
hist(IObs, col = "#C9F4E3", xlim = c(-0.2, 1.0), breaks = 25)
abline(v = quantile(IObs, c(0.05, 0.95)), col = "#AA00CC")
# The purple lines are drawn at the 5% quantiles of the Iobs distributions -
# i.e. an actual observation that lies outside the purple lines is deemed
# "significant"(1)(2). Of course, this is only true to the degree that the
# database frequencies are a valid model for the null-hypothesis on the
# sequence position we are considering here.
# (1) If we use 5% quantiles, this means a value is significantly larger
# than expected, and we ignore cases when the value is < 0; if we
# consider both smaller and larger values, we need to use 2.5% quantiles,
# since 5% of all observations lie outside the 0.025 and 0.975
# quantiles.
#
# (2) For an actual observation of counts, we calculate its observed
# _empirical_p_Value_ as (nCounts + 1)/(nTotal + 1).
# You can probably now appreciate that information is a bit of a shortcut for
# biological sequences, and does not really take the different inherent
# frequencies based on the character of the amino acids into account. For
# example, L is the most frequent and C is the least frequent, but if we have an
# alignment of 1000 sequences and we see that the frequencies for L and C are
# swapped, that would be _very_ surprising - nevertheless, the information would
# be 0. In order to take that into account, we should actually compute
# Kullback-Leibler divergences.
# Swap C and L frequencies
p <- AAref
q <- AAref
q["L"] <- AAref["C"]
q["C"] <- AAref["L"]
H(p)
H(q)
KLdiv <- function(p, q) {
# p and q are two pmfs of discrete probability distributions
# with the same outcomes, which are nowhere 0.
# Value: Kullback-Leibler divergence sum(p * log( p / q))).
if (length(p) != length(q)) {
stop("PANIC: input vector lengths differ!")
}
if (any(c((p == 0), (q == 0)))) {
stop("PANIC: 0's found in input vectors!")
}
return(sum(p * log( p / q )))
}
KLdiv(p, p)
KLdiv(p, q)
nObs <- 15 # number of observations (e.g aligned sequences)
# nObs <- 80
nTrials <- 10000 # number of trials
KLdivObs <- numeric(nTrials) # vector to store Information in each trial
simCounts <- numeric(20) # vector to tabulate our information ...
names(simCounts) <- names(AAref)# ... with the names of AAref
for (i in 1:nTrials) { # simulate ...
# sample AAref letters, nObs times, with the probabilities of AAref:
AAobs <- sample(names(AAref), size = nObs, prob = AAref, replace = TRUE)
x <- table(AAobs) # table simulated observations
simCounts[1:20] <- rep(0, length(simCounts)) # initialize simCounts to 0
simCounts[names(x)] <- x # overwrite with observed counts
simCounts <- simCounts + 0.5 # add Jeffreys' pseudocounts
simCounts <- simCounts/sum(simCounts) # counts to frequency
KLdivObs[i] <- sum(simCounts * log( simCounts / AAref )) # store KLdiv
}
# evaluate
hist(KLdivObs, col = "#C9F4E3", breaks = 25)
abline(v = quantile(KLdivObs, c(0.05, 0.95)), col = "#AA00CC")
quantile(KLdivObs, 0.992)
# Running the simulation with KL does not give a fundamentally
# different behaviour - since we are just randomly sampling. But KL would be
# more sensitive in case there is biological selection, where the sampling is no
# longer random. If I run the same simulation, with nObs <- 80 but calculating
# KLdiv instead of information, I get a 5% quantile at 0.15 - but the C/L
# frequency swap gives me a KL divergence of 0.18 - this is significant at p =
# 0.008 - (remember, Information is 0 in this case). So that's actually quite a
# nice addition to the toolbox.
# [END]
|
/FND-STA-Information_theory.R
|
no_license
|
hyginn/ABC-units
|
R
| false | false | 8,039 |
r
|
# tocID <- "FND-STA-Information_theory.R"
#
# ==============================================================================
#
# Purpose: A Bioinformatics Course:
# R code accompanying the FND-STA-Information_theory unit.
#
# Version: 0.2.1
#
# Date: 2017 - 2021
# Author: Boris Steipe (boris.steipe@utoronto.ca)
#
# Versions:
# 0.2.1 Maintenance
# 0.2 Under development
# 0.1 First code copied from 2016 material.
#
#
# TODO:
#
#
# == DO NOT SIMPLY source() THIS FILE! =======================================
#
# If there are portions you don't understand, use R's help system, Google for an
# answer, or ask your instructor. Don't continue if you don't understand what's
# going on. That's not how it works ...
#
# ==============================================================================
#TOC> ==========================================================================
#TOC>
#TOC> Section Title Line
#TOC> --------------------------------------
#TOC> 1 ___Section___ 39
#TOC>
#TOC> ==========================================================================
# = 1 ___Section___ =======================================================
# What level of information is "significant"
# Assume the background distribution is the database frequencies of
# amino acids:
AAref <- numeric() # Uniprot frequencies October 2017, slightly adjusted to
# sum to 1.0
AAref["A"] <- 0.0904
AAref["C"] <- 0.0123
AAref["D"] <- 0.0545
AAref["E"] <- 0.0617
AAref["F"] <- 0.0394
AAref["G"] <- 0.0724
AAref["H"] <- 0.0221
AAref["I"] <- 0.0573
AAref["K"] <- 0.0504
AAref["L"] <- 0.0986
AAref["M"] <- 0.0240
AAref["N"] <- 0.0392
AAref["P"] <- 0.0486
AAref["Q"] <- 0.0381
AAref["R"] <- 0.0570
AAref["S"] <- 0.0673
AAref["T"] <- 0.0558
AAref["V"] <- 0.0686
AAref["W"] <- 0.0129
AAref["Y"] <- 0.0294
sum(AAref)
# Function to calculate Shannon entropy
H <- function(pmf) {
# Calculate Shannon entropy
# Parameters:
# pmf (numeric) probability mass function: a vector of states and
# associated probabilities. Each element of
# pmf must be in (0, 1] and sum(pmf) must be 1.
# Value:
# Shannon entropy in bits.
# Examples:
# H(c(A=0.25, C=0.25, G=0.25, T=0.25)) # 2 bits entropy in a random
# # nucleotide sequence
# H(1) # If all elements are the same, entropy is zero
#
if (any(pmf <= 0 | pmf > 1) || isFALSE(all.equal(1.0, sum(pmf)))) {
stop("Input is not a discrete probability distribution.")
}
H <- -sum(pmf * (log(pmf) / log(2)))
return(H)
}
# Why use all.equal()? Exact comparisons with floating point numbers are
# brittle. Consider for example:
1/6 + 1/6 + 1/6 + 1/6 + 1/6 + 1/6 == 1
print(1/6 + 1/6 + 1/6 + 1/6 + 1/6 + 1/6, digits = 22) # 0.9999999999999998889777
# all.equal() tests for _near_ equality with tolerance of ~ 1.5e-8
# Entropy of the database frequencies (in bits):
(Href <- H(AAref))
# for comparison: entropy if all amino acids are equiprobable
H(rep(0.05, 20))
# Set up a simulation to estimate the distribution of Information values
# from random sequences drawn from AAref. This is the distribution for the
# statistical null hypothesis:
nObs <- 15 # number of observations (e.g aligned sequences)
# nObs <- 80
nTrials <- 10000 # number of trials
IObs <- numeric(nTrials) # vector to store Information in each trial
simCounts <- numeric(20) # vector to tabulate our information ...
names(simCounts) <- names(AAref)# ... with the names of AAref
for (i in 1:nTrials) { # simulate ...
# sample AAref letters, nObs times, with the probabilities of AAref:
AAobs <- sample(names(AAref), size = nObs, prob = AAref, replace = TRUE)
x <- table(AAobs) # table simulated observations
simCounts[1:20] <- rep(0, length(simCounts)) # initialize simCounts to 0
simCounts[names(x)] <- x # overwrite with observed counts
simCounts <- simCounts + 0.5 # add Jeffreys' pseudocounts
Hobs <- H(simCounts/sum(simCounts)) # counts to frequency, calc. H
IObs[i] <- Href - Hobs # store information
}
# evaluate
hist(IObs, col = "#C9F4E3", xlim = c(-0.2, 1.0), breaks = 25)
abline(v = quantile(IObs, c(0.05, 0.95)), col = "#AA00CC")
# The purple lines are drawn at the 5% quantiles of the Iobs distributions -
# i.e. an actual observation that lies outside the purple lines is deemed
# "significant"(1)(2). Of course, this is only true to the degree that the
# database frequencies are a valid model for the null-hypothesis on the
# sequence position we are considering here.
# (1) If we use 5% quantiles, this means a value is significantly larger
# than expected, and we ignore cases when the value is < 0; if we
# consider both smaller and larger values, we need to use 2.5% quantiles,
# since 5% of all observations lie outside the 0.025 and 0.975
# quantiles.
#
# (2) For an actual observation of counts, we calculate its observed
# _empirical_p_Value_ as (nCounts + 1)/(nTotal + 1).
# You can probably now appreciate that information is a bit of a shortcut for
# biological sequences, and does not really take the different inherent
# frequencies based on the character of the amino acids into account. For
# example, L is the most frequent and C is the least frequent, but if we have an
# alignment of 1000 sequences and we see that the frequencies for L and C are
# swapped, that would be _very_ surprising - nevertheless, the information would
# be 0. In order to take that into account, we should actually compute
# Kullback-Leibler divergences.
# Swap C and L frequencies
p <- AAref
q <- AAref
q["L"] <- AAref["C"]
q["C"] <- AAref["L"]
H(p)
H(q)
KLdiv <- function(p, q) {
# p and q are two pmfs of discrete probability distributions
# with the same outcomes, which are nowhere 0.
# Value: Kullback-Leibler divergence sum(p * log( p / q))).
if (length(p) != length(q)) {
stop("PANIC: input vector lengths differ!")
}
if (any(c((p == 0), (q == 0)))) {
stop("PANIC: 0's found in input vectors!")
}
return(sum(p * log( p / q )))
}
KLdiv(p, p)
KLdiv(p, q)
nObs <- 15 # number of observations (e.g aligned sequences)
# nObs <- 80
nTrials <- 10000 # number of trials
KLdivObs <- numeric(nTrials) # vector to store Information in each trial
simCounts <- numeric(20) # vector to tabulate our information ...
names(simCounts) <- names(AAref)# ... with the names of AAref
for (i in 1:nTrials) { # simulate ...
# sample AAref letters, nObs times, with the probabilities of AAref:
AAobs <- sample(names(AAref), size = nObs, prob = AAref, replace = TRUE)
x <- table(AAobs) # table simulated observations
simCounts[1:20] <- rep(0, length(simCounts)) # initialize simCounts to 0
simCounts[names(x)] <- x # overwrite with observed counts
simCounts <- simCounts + 0.5 # add Jeffreys' pseudocounts
simCounts <- simCounts/sum(simCounts) # counts to frequency
KLdivObs[i] <- sum(simCounts * log( simCounts / AAref )) # store KLdiv
}
# evaluate
hist(KLdivObs, col = "#C9F4E3", breaks = 25)
abline(v = quantile(KLdivObs, c(0.05, 0.95)), col = "#AA00CC")
quantile(KLdivObs, 0.992)
# Running the simulation with KL does not give a fundamentally
# different behaviour - since we are just randomly sampling. But KL would be
# more sensitive in case there is biological selection, where the sampling is no
# longer random. If I run the same simulation, with nObs <- 80 but calculating
# KLdiv instead of information, I get a 5% quantile at 0.15 - but the C/L
# frequency swap gives me a KL divergence of 0.18 - this is significant at p =
# 0.008 - (remember, Information is 0 in this case). So that's actually quite a
# nice addition to the toolbox.
# [END]
|
# Read in CSV-Database from Lab.js
# The current R-Script is work in Progress.
# It may contain errors and is far from finished
# Questions and Comments should be directed to julian.keil@gmail.com
# 0. Load packages
library("data.table")
library("ggplot2")
# 1. Set Working Directory
setwd('~/Documents/Arbeit/lab_js/Experiments/MultiRace/Data/Offline')
# 2. Get the list of files in the Directory
indat <- list.files(pattern='.csv')
Meta <- NULL
alldat <- NULL
# 3. Loop through files to read in data
for (v in 1:length(indat)) {
data <- read.csv(indat[v])
# 3.1. Get the meta data
Meta$ts[v] = levels(data$timestamp)[1] # Timestamp
tmp<-c(levels(data$f_vname)[2],levels(data$f_zname)[2])
Meta$name[v] <- paste(tmp,collapse="") # Initials
Meta$gend[v] <- levels(data$Geschlecht)[2] # Gender
Meta$birth[v] <- levels(data$f_gebdat)[2] # Birthday
Meta$sick[v] <- levels(data$erkrankt)[2] # Neurological diseases
Meta$drug[v] <- levels(data$drugs)[2] # Taking drugs
Meta$sight[v] <- levels(data$Sehschwäche)[2] # Visual impairments
Meta$hear[v] <- levels(data$Hörstörung)[2] # Auditory impairments
Meta$fb_v[v] <- levels(data$visual)[2] # Were all visual stimuli seen
Meta$fb_a[v] <- levels(data$auditory)[2] # Were all auditory stimuli heard
Meta$fb_del[v] <- levels(data$Verzögerung)[2] # Was there a delay between stimuli
if (length(levels(data$feedback))==0) {
Meta$fb_open[v] <- "None"
}
if (length(levels(data$feedback))!=0) {
Meta$fb_open[v] <- levels(data$feedback)[2] # Open Text for Feedback
}
resp = NULL
labl = NULL
RT = NULL
for (i in 1:length(data)) {
# 3.2 Get the sections with the reactions
index <- data$response=="react"
# 3.2.1 Cut the whole dataset according to this
# label is 2=A,3=V,4=AV
resp <-data$respons[index]
labl <- data$label[index]
RT <- data$duration[index]
}
tmp <- data.frame(labl,resp,RT)
alldat <- c(alldat,list(tmp))
}
# Now clean up the dataset
# 4.2.1. Remove empty elements
iszero = NULL
# Find non-zero datasets
for (v in 1:length(alldat)){
iszero[v]<-nrow(alldat[[v]])>1
}
# Keep non-zero datasets
alldat<-alldat[iszero]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][iszero]
}
# 4.2.2. Remove incomplete sets
isincomp = NULL
# Find datasets with less than 30 trials
for (v in 1:length(alldat)){
isincomp[v]<-nrow(alldat[[v]])>30
}
# Keep only datasets with more than 30 trials
alldat<-alldat[isincomp]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][isincomp]
}
# 4.2.3. Remove sets with negative values
isneg = NULL
# Find datasets with negative values
for (v in 1:length(alldat)){
isneg[v]<-alldat[[v]][1,3]>0
}
# Keep only datasets with positive values
alldat<-alldat[isneg]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][isneg]
}
# 4.2.3. Remove dulicated rows
dups<-!duplicated(alldat)
alldat<-alldat[dups]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][dups]
}
# 5. Compute response time
A_RT <- vector("list", length(alldat))
V_RT <- vector("list", length(alldat))
AV_RT <- vector("list", length(alldat))
A_Mean <- NA
V_Mean <- NA
AV_Mean <- NA
for (v in 1:length(alldat)){
tmp<-alldat[[v]]
A_RT[[v]]<-(tmp[tmp$labl=="A",]$RT)
V_RT[[v]]<-(tmp[tmp$labl=="V",]$RT)
AV_RT[[v]]<-(tmp[tmp$labl=="VA",]$RT)
A_Mean[v]<-mean(tmp[tmp$labl=="A",]$RT)
V_Mean[v]<-mean(tmp[tmp$labl=="V",]$RT)
AV_Mean[v]<-mean(tmp[tmp$labl=="VA",]$RT)
}
# 6. Plot
# Very bad and priliminary bar plots
error.bar<-function(x,y,upper,lower=upper,length=0.1) {
arrows(x,y+upper,x,y-lower,angle=90,code=3,length=length)
}
barx<-barplot(c(mean(A_Mean),mean(V_Mean),mean(AV_Mean)),ylim=c(0,550),names.arg=c("Audio","Visual","Audiovisual"),ylab="Mean RT")
error.bar(barx,c(mean(A_Mean),mean(V_Mean),mean(AV_Mean)), c(sd(A_Mean)/sqrt(length(A_Mean)),sd(V_Mean)/sqrt(length(V_Mean)),sd(AV_Mean)/sqrt(length(AV_Mean))))
# 7. Anova
sub_v <- 1:length(A_Mean)
cond <- rep(1,length(A_Mean))
indat <- rbind(cbind(A_Mean,cond*1,sub_v),cbind(V_Mean,cond*2,sub_v),cbind(AV_Mean,cond*3,sub_v))
colnames(indat)[2] <- "cond"
indat <- as.data.frame(indat)
indat$cond <- as.factor(indat$cond)
indat$sub_v <- as.factor(indat$sub_v)
aov1 <- aov(A_Mean~cond+Error(sub_v/cond),data = indat)
summary(aov1)
# 7.1 Post-Hoch T-Tests
A_V <- t.test(A_Mean,V_Mean,paired=TRUE,var.equal=TRUE)
A_AV <- t.test(A_Mean,AV_Mean,paired=TRUE,var.equal=TRUE)
AV_V <- t.test(AV_Mean,V_Mean,paired=TRUE,var.equal=TRUE)
# 8. Race Model
source('GetPercentile.R')
source('ties.R')
source('cdf.Ulrich.R')
source('probSpace.R')
A_p <- vector("list", length(A_Mean))
V_p <- vector("list", length(V_Mean))
AV_p <- vector("list", length(AV_Mean))
B_p <- vector("list", length(A_Mean))
psq <- probSpace(10); psq
for (v in 1:length(A_Mean)) {
dfx <- ties(A_RT[[v]])
dfy <- ties(V_RT[[v]])
dfz <- ties(AV_RT[[v]])
tmax <- max(A_RT[[v]],V_RT[[v]],AV_RT[[v]])
gx <- cdf.ulrich(data=dfx, maximum=tmax)
gy <- cdf.ulrich(data=dfy, maximum=tmax)
gz <- cdf.ulrich(data=dfz, maximum=tmax)
b <- gx + gy
A_p[[v]] <- GetPercentile(psq, gx, tmax);
V_p[[v]] <- GetPercentile(psq, gy, tmax);
AV_p[[v]] <- GetPercentile(psq, gz, tmax);
B_p[[v]] <- GetPercentile(psq, b, tmax);
}
tmp <- as.data.frame(A_p)
meanA_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(V_p)
meanV_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(AV_p)
meanAV_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(B_p)
meanB_p <- rowMeans(as.matrix(tmp))
# Plot
gdf <- data.frame(RT =c(meanA_p,meanV_p,meanAV_p,meanB_p), Probability =rep(psq, 4),
Condition =rep(c("gx(t)", "gy(t)","gz(t)","gx(t)+gy(t)"), each=length(meanA_p)))
panelf <- ggplot(gdf, aes(x = RT, y = Probability, group=Condition,
colour=Condition, shape=Condition)) +
geom_point() + geom_line()
panelf + coord_cartesian(xlim = c(200, 500), ylim=c(-.01,1.01)) +
theme(legend.position= c(.85, .20),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
|
/MultiRace_Read_CVV.R
|
no_license
|
juliankeil/lab_js
|
R
| false | false | 5,956 |
r
|
# Read in CSV-Database from Lab.js
# The current R-Script is work in Progress.
# It may contain errors and is far from finished
# Questions and Comments should be directed to julian.keil@gmail.com
# 0. Load packages
library("data.table")
library("ggplot2")
# 1. Set Working Directory
setwd('~/Documents/Arbeit/lab_js/Experiments/MultiRace/Data/Offline')
# 2. Get the list of files in the Directory
indat <- list.files(pattern='.csv')
Meta <- NULL
alldat <- NULL
# 3. Loop through files to read in data
for (v in 1:length(indat)) {
data <- read.csv(indat[v])
# 3.1. Get the meta data
Meta$ts[v] = levels(data$timestamp)[1] # Timestamp
tmp<-c(levels(data$f_vname)[2],levels(data$f_zname)[2])
Meta$name[v] <- paste(tmp,collapse="") # Initials
Meta$gend[v] <- levels(data$Geschlecht)[2] # Gender
Meta$birth[v] <- levels(data$f_gebdat)[2] # Birthday
Meta$sick[v] <- levels(data$erkrankt)[2] # Neurological diseases
Meta$drug[v] <- levels(data$drugs)[2] # Taking drugs
Meta$sight[v] <- levels(data$Sehschwäche)[2] # Visual impairments
Meta$hear[v] <- levels(data$Hörstörung)[2] # Auditory impairments
Meta$fb_v[v] <- levels(data$visual)[2] # Were all visual stimuli seen
Meta$fb_a[v] <- levels(data$auditory)[2] # Were all auditory stimuli heard
Meta$fb_del[v] <- levels(data$Verzögerung)[2] # Was there a delay between stimuli
if (length(levels(data$feedback))==0) {
Meta$fb_open[v] <- "None"
}
if (length(levels(data$feedback))!=0) {
Meta$fb_open[v] <- levels(data$feedback)[2] # Open Text for Feedback
}
resp = NULL
labl = NULL
RT = NULL
for (i in 1:length(data)) {
# 3.2 Get the sections with the reactions
index <- data$response=="react"
# 3.2.1 Cut the whole dataset according to this
# label is 2=A,3=V,4=AV
resp <-data$respons[index]
labl <- data$label[index]
RT <- data$duration[index]
}
tmp <- data.frame(labl,resp,RT)
alldat <- c(alldat,list(tmp))
}
# Now clean up the dataset
# 4.2.1. Remove empty elements
iszero = NULL
# Find non-zero datasets
for (v in 1:length(alldat)){
iszero[v]<-nrow(alldat[[v]])>1
}
# Keep non-zero datasets
alldat<-alldat[iszero]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][iszero]
}
# 4.2.2. Remove incomplete sets
isincomp = NULL
# Find datasets with less than 30 trials
for (v in 1:length(alldat)){
isincomp[v]<-nrow(alldat[[v]])>30
}
# Keep only datasets with more than 30 trials
alldat<-alldat[isincomp]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][isincomp]
}
# 4.2.3. Remove sets with negative values
isneg = NULL
# Find datasets with negative values
for (v in 1:length(alldat)){
isneg[v]<-alldat[[v]][1,3]>0
}
# Keep only datasets with positive values
alldat<-alldat[isneg]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][isneg]
}
# 4.2.3. Remove dulicated rows
dups<-!duplicated(alldat)
alldat<-alldat[dups]
for (i in 1:length(Meta)){
Meta[[i]]<-Meta[[i]][dups]
}
# 5. Compute response time
A_RT <- vector("list", length(alldat))
V_RT <- vector("list", length(alldat))
AV_RT <- vector("list", length(alldat))
A_Mean <- NA
V_Mean <- NA
AV_Mean <- NA
for (v in 1:length(alldat)){
tmp<-alldat[[v]]
A_RT[[v]]<-(tmp[tmp$labl=="A",]$RT)
V_RT[[v]]<-(tmp[tmp$labl=="V",]$RT)
AV_RT[[v]]<-(tmp[tmp$labl=="VA",]$RT)
A_Mean[v]<-mean(tmp[tmp$labl=="A",]$RT)
V_Mean[v]<-mean(tmp[tmp$labl=="V",]$RT)
AV_Mean[v]<-mean(tmp[tmp$labl=="VA",]$RT)
}
# 6. Plot
# Very bad and priliminary bar plots
error.bar<-function(x,y,upper,lower=upper,length=0.1) {
arrows(x,y+upper,x,y-lower,angle=90,code=3,length=length)
}
barx<-barplot(c(mean(A_Mean),mean(V_Mean),mean(AV_Mean)),ylim=c(0,550),names.arg=c("Audio","Visual","Audiovisual"),ylab="Mean RT")
error.bar(barx,c(mean(A_Mean),mean(V_Mean),mean(AV_Mean)), c(sd(A_Mean)/sqrt(length(A_Mean)),sd(V_Mean)/sqrt(length(V_Mean)),sd(AV_Mean)/sqrt(length(AV_Mean))))
# 7. Anova
sub_v <- 1:length(A_Mean)
cond <- rep(1,length(A_Mean))
indat <- rbind(cbind(A_Mean,cond*1,sub_v),cbind(V_Mean,cond*2,sub_v),cbind(AV_Mean,cond*3,sub_v))
colnames(indat)[2] <- "cond"
indat <- as.data.frame(indat)
indat$cond <- as.factor(indat$cond)
indat$sub_v <- as.factor(indat$sub_v)
aov1 <- aov(A_Mean~cond+Error(sub_v/cond),data = indat)
summary(aov1)
# 7.1 Post-Hoch T-Tests
A_V <- t.test(A_Mean,V_Mean,paired=TRUE,var.equal=TRUE)
A_AV <- t.test(A_Mean,AV_Mean,paired=TRUE,var.equal=TRUE)
AV_V <- t.test(AV_Mean,V_Mean,paired=TRUE,var.equal=TRUE)
# 8. Race Model
source('GetPercentile.R')
source('ties.R')
source('cdf.Ulrich.R')
source('probSpace.R')
A_p <- vector("list", length(A_Mean))
V_p <- vector("list", length(V_Mean))
AV_p <- vector("list", length(AV_Mean))
B_p <- vector("list", length(A_Mean))
psq <- probSpace(10); psq
for (v in 1:length(A_Mean)) {
dfx <- ties(A_RT[[v]])
dfy <- ties(V_RT[[v]])
dfz <- ties(AV_RT[[v]])
tmax <- max(A_RT[[v]],V_RT[[v]],AV_RT[[v]])
gx <- cdf.ulrich(data=dfx, maximum=tmax)
gy <- cdf.ulrich(data=dfy, maximum=tmax)
gz <- cdf.ulrich(data=dfz, maximum=tmax)
b <- gx + gy
A_p[[v]] <- GetPercentile(psq, gx, tmax);
V_p[[v]] <- GetPercentile(psq, gy, tmax);
AV_p[[v]] <- GetPercentile(psq, gz, tmax);
B_p[[v]] <- GetPercentile(psq, b, tmax);
}
tmp <- as.data.frame(A_p)
meanA_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(V_p)
meanV_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(AV_p)
meanAV_p <- rowMeans(as.matrix(tmp))
tmp <- as.data.frame(B_p)
meanB_p <- rowMeans(as.matrix(tmp))
# Plot
gdf <- data.frame(RT =c(meanA_p,meanV_p,meanAV_p,meanB_p), Probability =rep(psq, 4),
Condition =rep(c("gx(t)", "gy(t)","gz(t)","gx(t)+gy(t)"), each=length(meanA_p)))
panelf <- ggplot(gdf, aes(x = RT, y = Probability, group=Condition,
colour=Condition, shape=Condition)) +
geom_point() + geom_line()
panelf + coord_cartesian(xlim = c(200, 500), ylim=c(-.01,1.01)) +
theme(legend.position= c(.85, .20),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
|
#' Print the results.
#'
#' Print the results.
#'
#' @param object A psychobject class object.
#' @param round Round the ouput.
#' @param ... Further arguments passed to or from other methods.
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @method summary psychobject
#' @export
summary.psychobject <- function(object, round = NULL, ...) {
summary <- object$summary
if (!is.null(round)) {
nums <- dplyr::select_if(summary, is.numeric)
nums <- round(nums, round)
fact <- dplyr::select_if(summary, is.character)
fact <- cbind(fact, dplyr::select_if(summary, is.factor))
summary <- cbind(fact, nums)
}
return(summary)
}
|
/R/summary.psychobject.R
|
permissive
|
anhnguyendepocen/psycho.R
|
R
| false | false | 684 |
r
|
#' Print the results.
#'
#' Print the results.
#'
#' @param object A psychobject class object.
#' @param round Round the ouput.
#' @param ... Further arguments passed to or from other methods.
#'
#' @author \href{https://dominiquemakowski.github.io/}{Dominique Makowski}
#'
#' @method summary psychobject
#' @export
summary.psychobject <- function(object, round = NULL, ...) {
summary <- object$summary
if (!is.null(round)) {
nums <- dplyr::select_if(summary, is.numeric)
nums <- round(nums, round)
fact <- dplyr::select_if(summary, is.character)
fact <- cbind(fact, dplyr::select_if(summary, is.factor))
summary <- cbind(fact, nums)
}
return(summary)
}
|
ols <- function(form, data, robust=FALSE, cluster=NULL,digits=3){
r1 <- lm(form, data)
if(length(cluster)!=0){
data <- na.omit(data[,c(colnames(r1$model),cluster)])
r1 <- lm(form, data)
}
X <- model.matrix(r1)
n <- dim(X)[1]
k <- dim(X)[2]
if(robust==FALSE & length(cluster)==0){
se <- sqrt(diag(solve(crossprod(X)) * as.numeric(crossprod(resid(r1))/(n-k))))
res <- cbind(coef(r1),se)
}
if(robust==TRUE){
u <- matrix(resid(r1))
meat1 <- t(X) %*% diag(diag(crossprod(t(u)))) %*% X
dfc <- n/(n-k)
se <- sqrt(dfc*diag(solve(crossprod(X)) %*% meat1 %*% solve(crossprod(X))))
res <- cbind(coef(r1),se)
}
if(length(cluster)!=0){
clus <- cbind(X,data[,cluster],resid(r1))
colnames(clus)[(dim(clus)[2]-1):dim(clus)[2]] <- c(cluster,"resid")
m <- dim(table(clus[,cluster]))
dfc <- (m/(m-1))*((n-1)/(n-k))
uclust <- apply(resid(r1)*X,2, function(x) tapply(x, clus[,cluster], sum))
se <- sqrt(diag(solve(crossprod(X)) %*% (t(uclust) %*% uclust) %*% solve(crossprod(X)))*dfc)
res <- cbind(coef(r1),se)
}
res <- cbind(res,res[,1]/res[,2],(1-pnorm(abs(res[,1]/res[,2])))*2)
res1 <- matrix(as.numeric(sprintf(paste("%.",paste(digits,"f",sep=""),sep=""),res)),nrow=dim(res)[1])
rownames(res1) <- rownames(res)
colnames(res1) <- c("Estimate","Std. Error","t value","Pr(>|t|)")
return(res1)
}
|
/teaching/docs/ols.R
|
permissive
|
rdinter/rdinter.github.io
|
R
| false | false | 1,414 |
r
|
ols <- function(form, data, robust=FALSE, cluster=NULL,digits=3){
r1 <- lm(form, data)
if(length(cluster)!=0){
data <- na.omit(data[,c(colnames(r1$model),cluster)])
r1 <- lm(form, data)
}
X <- model.matrix(r1)
n <- dim(X)[1]
k <- dim(X)[2]
if(robust==FALSE & length(cluster)==0){
se <- sqrt(diag(solve(crossprod(X)) * as.numeric(crossprod(resid(r1))/(n-k))))
res <- cbind(coef(r1),se)
}
if(robust==TRUE){
u <- matrix(resid(r1))
meat1 <- t(X) %*% diag(diag(crossprod(t(u)))) %*% X
dfc <- n/(n-k)
se <- sqrt(dfc*diag(solve(crossprod(X)) %*% meat1 %*% solve(crossprod(X))))
res <- cbind(coef(r1),se)
}
if(length(cluster)!=0){
clus <- cbind(X,data[,cluster],resid(r1))
colnames(clus)[(dim(clus)[2]-1):dim(clus)[2]] <- c(cluster,"resid")
m <- dim(table(clus[,cluster]))
dfc <- (m/(m-1))*((n-1)/(n-k))
uclust <- apply(resid(r1)*X,2, function(x) tapply(x, clus[,cluster], sum))
se <- sqrt(diag(solve(crossprod(X)) %*% (t(uclust) %*% uclust) %*% solve(crossprod(X)))*dfc)
res <- cbind(coef(r1),se)
}
res <- cbind(res,res[,1]/res[,2],(1-pnorm(abs(res[,1]/res[,2])))*2)
res1 <- matrix(as.numeric(sprintf(paste("%.",paste(digits,"f",sep=""),sep=""),res)),nrow=dim(res)[1])
rownames(res1) <- rownames(res)
colnames(res1) <- c("Estimate","Std. Error","t value","Pr(>|t|)")
return(res1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CellScoreReport.R
\name{CellScoreReport}
\alias{CellScoreReport}
\title{Generate a CellScore report}
\usage{
CellScoreReport(
cellscore,
cell.change,
marker.genes,
inputObj,
cs,
group.by = c("transition", "experiment_id", "sub_cell_type1")
)
}
\arguments{
\item{cellscore}{a data.frame of CellScore values, as calculated by
CellScore().}
\item{cell.change}{a data frame containing three columns, one for the
start (donor) test and target cell type. Each row of the data
frame describes one transition from the start to a target cell type.}
\item{marker.genes}{a data.frame of marker genes as generated by function OnOff()}
\item{inputObj}{an ExpressionSet containing data matrices of normalized
expression data, present/absent calls, a gene annotation data.frame and
a phenotype data.frame.}
}
\value{
This function outputs the plots on the active graphical device
and returns invisibly NULL.
}
\description{
This function will generates a CellScore report for each study and
transition that can be saved as a pdf. The report includes:
1) scatterplot of the donor-like and target-like scores of relevant
test samples and the standards;
2) a density plot of the test and standard cellscores;
3) a rugplot of the cellscores, focussing on the test samples;
4) a heatmap of the OnOff Marker genes for all standards and test samples.
}
\examples{
## Load the expression set for the standard cell types
library(Biobase)
library(hgu133plus2CellScore) # inputObj.std
## Locate the external data files in the CellScore package
rdata.path <- system.file("extdata", "inputObj48.RData", package = "CellScore")
tsvdata.path <- system.file("extdata", "cell_change_test.tsv",
package = "CellScore")
if (file.exists(rdata.path) && file.exists(tsvdata.path)) {
## Load the expression set with normalized expressions of 48 test samples
load(rdata.path)
## Import the cell change info for the loaded test samples
cell.change <- read.delim(file= tsvdata.path, sep="\t",
header=TRUE, stringsAsFactors=FALSE)
## Combine the standards and the test data
inputObj <- combine(inputObj.std, inputObj48)
## Generate cosine similarity for the combined data
## NOTE: May take 1-2 minutes on the full inputObj object
## so we subset it for 4 cell types
pdata <- pData(inputObj)
sel.samples <- pdata$general_cell_type \%in\% c("ESC", "EC", "FIB", "KER",
"ASC", "NPC", "MSC", "iPS", "piPS")
inputObj.sub <- inputObj[, sel.samples]
cs <- CosineSimScore(inputObj.sub, cell.change, iqr.cutoff=0.1)
## Generate the on/off scores for the combined data
individ.OnOff <- OnOff(inputObj.sub, cell.change, out.put="individual")
## Generate the CellScore values for all samples
cellscore <- CellScore(inputObj.sub, cell.change, individ.OnOff$scores,
cs$cosine.samples)
## Generate the group on/off scores for the combined data
group.OnOff <- OnOff(inputObj.sub, cell.change, out.put="marker.list")
## Make a report and save it the current working directory
pdf("TestReport.pdf", width=8, height=12)
CellScoreReport(cellscore, cell.change, group.OnOff$markers, inputObj.sub)
dev.off()
}
}
\seealso{
\code{\link[CellScore]{CellScore}} for details on CellScore, and
\code{\link[hgu133plus2CellScore]{hgu133plus2CellScore}} for details on the
specific ExpressionSet object that shoud be provided as an input.
}
\keyword{cellscore}
\keyword{report}
|
/man/CellScoreReport.Rd
|
no_license
|
uilnauyis/CellScore
|
R
| false | true | 3,580 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CellScoreReport.R
\name{CellScoreReport}
\alias{CellScoreReport}
\title{Generate a CellScore report}
\usage{
CellScoreReport(
cellscore,
cell.change,
marker.genes,
inputObj,
cs,
group.by = c("transition", "experiment_id", "sub_cell_type1")
)
}
\arguments{
\item{cellscore}{a data.frame of CellScore values, as calculated by
CellScore().}
\item{cell.change}{a data frame containing three columns, one for the
start (donor) test and target cell type. Each row of the data
frame describes one transition from the start to a target cell type.}
\item{marker.genes}{a data.frame of marker genes as generated by function OnOff()}
\item{inputObj}{an ExpressionSet containing data matrices of normalized
expression data, present/absent calls, a gene annotation data.frame and
a phenotype data.frame.}
}
\value{
This function outputs the plots on the active graphical device
and returns invisibly NULL.
}
\description{
This function will generates a CellScore report for each study and
transition that can be saved as a pdf. The report includes:
1) scatterplot of the donor-like and target-like scores of relevant
test samples and the standards;
2) a density plot of the test and standard cellscores;
3) a rugplot of the cellscores, focussing on the test samples;
4) a heatmap of the OnOff Marker genes for all standards and test samples.
}
\examples{
## Load the expression set for the standard cell types
library(Biobase)
library(hgu133plus2CellScore) # inputObj.std
## Locate the external data files in the CellScore package
rdata.path <- system.file("extdata", "inputObj48.RData", package = "CellScore")
tsvdata.path <- system.file("extdata", "cell_change_test.tsv",
package = "CellScore")
if (file.exists(rdata.path) && file.exists(tsvdata.path)) {
## Load the expression set with normalized expressions of 48 test samples
load(rdata.path)
## Import the cell change info for the loaded test samples
cell.change <- read.delim(file= tsvdata.path, sep="\t",
header=TRUE, stringsAsFactors=FALSE)
## Combine the standards and the test data
inputObj <- combine(inputObj.std, inputObj48)
## Generate cosine similarity for the combined data
## NOTE: May take 1-2 minutes on the full inputObj object
## so we subset it for 4 cell types
pdata <- pData(inputObj)
sel.samples <- pdata$general_cell_type \%in\% c("ESC", "EC", "FIB", "KER",
"ASC", "NPC", "MSC", "iPS", "piPS")
inputObj.sub <- inputObj[, sel.samples]
cs <- CosineSimScore(inputObj.sub, cell.change, iqr.cutoff=0.1)
## Generate the on/off scores for the combined data
individ.OnOff <- OnOff(inputObj.sub, cell.change, out.put="individual")
## Generate the CellScore values for all samples
cellscore <- CellScore(inputObj.sub, cell.change, individ.OnOff$scores,
cs$cosine.samples)
## Generate the group on/off scores for the combined data
group.OnOff <- OnOff(inputObj.sub, cell.change, out.put="marker.list")
## Make a report and save it the current working directory
pdf("TestReport.pdf", width=8, height=12)
CellScoreReport(cellscore, cell.change, group.OnOff$markers, inputObj.sub)
dev.off()
}
}
\seealso{
\code{\link[CellScore]{CellScore}} for details on CellScore, and
\code{\link[hgu133plus2CellScore]{hgu133plus2CellScore}} for details on the
specific ExpressionSet object that shoud be provided as an input.
}
\keyword{cellscore}
\keyword{report}
|
#' @title
#' Generator for the Insertion mutation operator.
#'
#' @description
#' The Insertion mutation operator selects a position random and inserts it at
#' a random position.
#'
#' @return [\code{ecr_mutator}]
#' @family mutators
#' @export
makeInsertionMutator = function() {
mutator = function(ind, args = list(), control, task) {
n.params = length(ind)
# select a random position and insert it at another random location
idx = sample(seq(n.params), size = 2L)
# idx[1] is the selected position
# idx[2] is the destination
# equality is impossible, since replace = FALSE in sample above
tmp = ind[idx[1]]
# determine shift direction
offset = if (idx[1] < idx[2]) (1) else (-1)
ind[idx[1]:(idx[2] - offset)] = ind[(idx[1] + offset):idx[2]]
ind[idx[2]] = tmp
return(ind)
}
makeMutator(
mutator = mutator,
name = "Insertion mutator",
description = "Selects two positions at random, places the first position to
the second and shift the other elements accordingly.",
supported = "permutation"
)
}
|
/R/mutator.insertion.R
|
no_license
|
kerschke/ecr
|
R
| false | false | 1,083 |
r
|
#' @title
#' Generator for the Insertion mutation operator.
#'
#' @description
#' The Insertion mutation operator selects a position random and inserts it at
#' a random position.
#'
#' @return [\code{ecr_mutator}]
#' @family mutators
#' @export
makeInsertionMutator = function() {
mutator = function(ind, args = list(), control, task) {
n.params = length(ind)
# select a random position and insert it at another random location
idx = sample(seq(n.params), size = 2L)
# idx[1] is the selected position
# idx[2] is the destination
# equality is impossible, since replace = FALSE in sample above
tmp = ind[idx[1]]
# determine shift direction
offset = if (idx[1] < idx[2]) (1) else (-1)
ind[idx[1]:(idx[2] - offset)] = ind[(idx[1] + offset):idx[2]]
ind[idx[2]] = tmp
return(ind)
}
makeMutator(
mutator = mutator,
name = "Insertion mutator",
description = "Selects two positions at random, places the first position to
the second and shift the other elements accordingly.",
supported = "permutation"
)
}
|
CoexDiff <- function(krc, kcp, D, k0, e1, e2, e3, f, fc, phi){
pd <- ifelse(D == 2, pd2D, pd3D)
a0 <- ifelse(D == 2, a02D, a03D)
fm1 <- ifelse(fc == 1, "Gr", "Ac")
fm2 <- ifelse(fc == 1, "Gr", ifelse(fc == 2, "Sw", "Ac"))
fm3 <- ifelse(fc == 1, "Ac", ifelse(fc == 2, "Sw", "Ac"))
h <- pv + 2*(D-1)*pd
w <- h + 1 - 2*b
c <- e2/(e1 * e3)
###############
c0 <- e2 * q0 / e1
c1 <- e3 * r0
krp <- krc * kcp
f1 <- f(krc, D, pv, pd, fm1, phi)
f2 <- f(krp, D, pv, pd, fm2, phi)
f3 <- f(kcp, D, pv, pd, fm3, phi)
chi1 <- e1 * k0 * a0 * f1 * krc**(1-b)
chi3 <- (c0 * f2 * kcp**(b - h)) / f1
chi4 <- (c1 * f3 * kcp**(b - h)) / ( f1 * krc**(1-b) )
chi2 <- e2 * k0 * a0 * f2 * krp**(1-b)
gamma2 <- q0 / ( e2 * k0 * a0 * f2 * krp**(1-b))
gamma1 <- kcp**(-w) *q0 / chi1
condPCR <- chi3 + chi4 - q0
condCPR <- chi3 + c*chi4 - q0
gamma3 <- ifelse(condPCR > 0 , log10(chi4 / condPCR * gamma1), NA)
gamma4 <- ifelse(condCPR > 0 , log10(c * chi4 / condCPR * gamma2), NA )
diff <- gamma4 - gamma3
return (ifelse( diff > 0 , diff/w , NA))
}
|
/code/Theory/Analisis/FCL.r
|
no_license
|
Carlos16/Tesis-IGP
|
R
| false | false | 1,130 |
r
|
CoexDiff <- function(krc, kcp, D, k0, e1, e2, e3, f, fc, phi){
pd <- ifelse(D == 2, pd2D, pd3D)
a0 <- ifelse(D == 2, a02D, a03D)
fm1 <- ifelse(fc == 1, "Gr", "Ac")
fm2 <- ifelse(fc == 1, "Gr", ifelse(fc == 2, "Sw", "Ac"))
fm3 <- ifelse(fc == 1, "Ac", ifelse(fc == 2, "Sw", "Ac"))
h <- pv + 2*(D-1)*pd
w <- h + 1 - 2*b
c <- e2/(e1 * e3)
###############
c0 <- e2 * q0 / e1
c1 <- e3 * r0
krp <- krc * kcp
f1 <- f(krc, D, pv, pd, fm1, phi)
f2 <- f(krp, D, pv, pd, fm2, phi)
f3 <- f(kcp, D, pv, pd, fm3, phi)
chi1 <- e1 * k0 * a0 * f1 * krc**(1-b)
chi3 <- (c0 * f2 * kcp**(b - h)) / f1
chi4 <- (c1 * f3 * kcp**(b - h)) / ( f1 * krc**(1-b) )
chi2 <- e2 * k0 * a0 * f2 * krp**(1-b)
gamma2 <- q0 / ( e2 * k0 * a0 * f2 * krp**(1-b))
gamma1 <- kcp**(-w) *q0 / chi1
condPCR <- chi3 + chi4 - q0
condCPR <- chi3 + c*chi4 - q0
gamma3 <- ifelse(condPCR > 0 , log10(chi4 / condPCR * gamma1), NA)
gamma4 <- ifelse(condCPR > 0 , log10(c * chi4 / condCPR * gamma2), NA )
diff <- gamma4 - gamma3
return (ifelse( diff > 0 , diff/w , NA))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/message.R
\name{v6_addMessage}
\alias{v6_addMessage}
\title{v6_addMessage}
\usage{
v6_addMessage()
}
\description{
v6_addMessage
}
\seealso{
\url{http://developers.vision6.com.au/3.0/method/addMessage}
}
|
/man/v6_addMessage.Rd
|
no_license
|
gorcha/vision6
|
R
| false | true | 282 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/message.R
\name{v6_addMessage}
\alias{v6_addMessage}
\title{v6_addMessage}
\usage{
v6_addMessage()
}
\description{
v6_addMessage
}
\seealso{
\url{http://developers.vision6.com.au/3.0/method/addMessage}
}
|
\name{subsemble}
\alias{subsemble}
\title{
An Ensemble Method for Combining Subset-Specific Algorithm Fits
}
\description{
The Subsemble algorithm partitions the full dataset into subsets of observations, fits a specified underlying algorithm on each subset, and uses a unique form of k-fold cross-validation to output a prediction function that combines the subset-specific fits.
}
\usage{
subsemble(x, y, newx = NULL, family = gaussian(),
learner, metalearner = "SL.glm", subsets = 3, subControl = list(),
cvControl = list(), learnControl = list(), genControl = list(),
id = NULL, obsWeights = NULL, seed = 1, parallel = "seq")
}
\arguments{
\item{x}{
The data.frame or matrix of predictor variables.
}
\item{y}{
The outcome in the training data set. Must be a numeric vector.
}
\item{newx}{
The predictor variables in the test data set. The structure should match \code{x}. If missing, uses \code{x} for \code{newx}.
}
\item{family}{
A description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See '?family' for the details of family functions.) Currently allows \code{gaussian()} or \code{binomial()}.
}
\item{learner}{
A string or character vector naming the prediction algorithm(s) used to train a model on each of the subsets of \code{x}. This uses the learning algorithm API provided by the SuperLearner package, so for example, we could use \code{learner = "SL.randomForest"} or \code{learner = c("SL.glm","SL.randomForest")}. See the \code{\link[SuperLearner:listWrappers]{listWrappers}} function for a full list of available algorithms. If a single learner is provided, the same algorithm will be used on each of the subsets. If a vector of learners is provided, then each algorithm will be applied to each of the subsets (default behavior specified by the \code{learnControl$multiType="crossprod"}); or alternatively, the multiple algorithms can be applied to different subsets with \code{learnControl$multiType="divisor"}.
}
\item{metalearner}{
A string specifying the prediction algorithm used to learn the optimal weighted combination of the sublearners (ie. models learned on subsets of the data.) This uses the API provided by the SuperLearner package, so for example, we could use \code{metalearner = "SL.glmnet"}. See the \code{\link[SuperLearner:listWrappers]{listWrappers}} function for a full list of available algorithms.
}
\item{subsets}{
An integer specifying the number of subsets the data should be partitioned into, a vector of subset labels equal to the number of rows of \code{x}, or a user-specified list of index vectors equal to the number of subsets. If subsets is an integer, you can control how the subsets are partitioned (random shuffle, etc) using the \code{subControl} argument.
}
\item{subControl}{
A list of parameters to control the data partitioning (subsetting) process. The logical \code{stratifyCV} list parameter will stratify the data splits by binary outcome (\code{family=binomial()} only), and defaults to \code{TRUE}. The logical \code{shuffle} parameter defaults to \code{TRUE} to ensure that subsets will be created randomly. If the user explicitly specifies the subsets via the \code{subsets} argument, that will override any parameters in this list. The last parameter, \code{supervised}, currently defaults to \code{NULL} and is a place-holder for option to learn the optimal subsets in a supervised manner. This will be implemented in a future release.
}
\item{cvControl}{
A list of parameters to control the cross-validation process. The \code{V} parameter is an integer representing the number of cross-validation folds and defaults to 10. Each of the subsets will be divided into \code{V} cross-validation folds. The other parameters are \code{stratifyCV} and \code{shuffle}, which are both logical and default to \code{TRUE}. See above for descriptions of these parameters.
}
\item{learnControl}{
A list of parameters to control the learning process. Currently, the only parameter is \code{multiType}, which is only used if there are multiple learners specified by the \code{learner} argument. The two supported values for \code{multiType} are \code{"crossprod"} (the default) and \code{"divisor"}. The \code{"crossprod"} type will train each of the learners on each of the subsets. For the \code{"divisor"} type, the length of the \code{learners} vector must be a divisor of the number of subsets. If \code{length(learner)} equals the number of subsets, each learner will be applied to a single subset. If \code{length(learner)} is a divisor of the number of subsets, then the learners will be repeated as necessary (to equal the number of subsets).
}
\item{genControl}{
A list of general control parameters. Currently, the only parameter is \code{saveFits}, which defaults to \code{TRUE}. If set to \code{FALSE}, then the \code{subfits} and \code{metafit} output objects will be set to \code{NULL}. This can be used if you want to train and test in one step and do not want to waste disk space storing all the models.
}
\item{id}{
Optional cluster identification variable. Passed to the \code{learner} algorithm.
}
\item{obsWeights}{
Optional observation weights vector. As with \code{id} above, \code{obsWeights} is passed to the prediction and screening algorithms, but many of the built in learner wrappers ignore (or can't use) the information. If you are using observation weights, make sure the learner you specify uses the information, or the weights will be ignored.
}
\item{seed}{
A random seed to be set (integer); defaults to 1. If \code{NULL}, then a random seed will not be set.
}
\item{parallel}{
A character string specifying optional parallelization. Use \code{"seq"} for sequential computation (the default). Use \code{"multicore"} to perform the k-fold (internal) cross-validation step as well as the learning across subsets in parallel over all available cores. Or \code{parallel} can be a snow cluster object. Both parallel options use the built-in functionality of the core "parallel" package.
}
}
\value{
\item{subfits}{
A list of predictive models, each of which are fit on a subset of the (rows of) data, \code{x}. For \code{learnControl$multiType="crossprod"}, the length of this list is equal to the number of subsets times the number of learners in the \code{learner} argument. For \code{learnControl$multiType="divisor"}, the length of this list is equal to the number of subsets.
}
\item{metafit}{
The predictive model which is learned by regressing \code{y} on \code{Z} (see description of \code{Z} below). The type of model is specified using the \code{metalearner} argument.
}
\item{subpred}{
A data.frame with the predicted values from each sublearner algorithm for the rows in \code{newx}. If we have L unique learners and there are J subsets of data, then there will be L x J columns when \code{learnControl$multiType=="crossprod"} (default) and J columns when \code{learnControl$multiType=="divisor"}.
}
\item{pred}{
A vector containing the predicted values from the subsemble for the rows in \code{newX}.
}
\item{Z}{
The Z matrix (the cross-validated predicted values for each sublearner).
}
\item{cvRisk}{
A numeric vector with the k-fold cross-validated risk estimate for each algorithm in learning library. Note that this does not contain the CV risk estimate for the Subsemble, only the individual models in the library. (Not enabled yet, set to \code{NULL}.)
}
\item{family}{
Returns the \code{family} argument from above.
}
\item{subControl}{
Returns the \code{subControl} argument from above.
}
\item{cvControl}{
Returns the \code{cvControl} argument from above.
}
\item{learnControl}{
Returns the \code{learnControl} argument from above.
}
\item{subsets}{
The list of subsets, which is a list of vectors of row indicies. The length of this list equals the number of subsets.
}
\item{subCVsets}{
The list of subsets, further broken down into the cross-validation folds that were used. Each subset (top level list element) is partitioned into V cross-validation folds.
}
\item{ylim}{
Returns range of \code{y}.
}
\item{seed}{
An integer. Returns \code{seed} argument from above.
}
\item{runtime}{
An list of runtimes for various steps of the algorithm. The list contains \code{cv}, \code{metalearning}, \code{sublearning} and \code{total} elements. The \code{cv} element is the time it takes to create the \code{Z} matrix (see above). The \code{metalearning} element is the training time for the metalearning step. The \code{sublearning} element is a list of training times for each of the models in the ensemble. The time to run the entire \code{subsemble} function is given in \code{total}.
}
}
\references{
LeDell, E. (2015) Scalable Ensemble Learning and Computationally Efficient Variance Estimation (Doctoral Dissertation). University of California, Berkeley, USA.\cr
\url{https://github.com/ledell/phd-thesis/blob/main/ledell-phd-thesis.pdf}\cr
\cr
Stephanie Sapp, Mark J. van der Laan & John Canny. (2014) Subsemble: An ensemble method for combining subset-specific algorithm fits. Journal of Applied Statistics, 41(6):1247-1259\cr
\url{https://www.tandfonline.com/doi/abs/10.1080/02664763.2013.864263}\cr
\url{https://biostats.bepress.com/ucbbiostat/paper313/}
}
\author{
Erin LeDell \email{oss@ledell.org}
}
\seealso{
\code{\link[SuperLearner:listWrappers]{listWrappers}}, \code{\link[SuperLearner:SuperLearner]{SuperLearner}}
}
\examples{
\donttest{
# Load some example data.
library(subsemble)
library(cvAUC) # >= version 1.0.1
data(admissions)
# Training data.
x <- subset(admissions, select = -c(Y))[1:400,]
y <- admissions$Y[1:400]
# Test data.
newx <- subset(admissions, select = -c(Y))[401:500,]
newy <- admissions$Y[401:500]
# Set up the Subsemble.
learner <- c("SL.randomForest", "SL.glm")
metalearner <- "SL.glm"
subsets <- 2
# Train and test the model.
# With learnControl$multiType="crossprod" (the default),
# we ensemble 4 models (2 subsets x 2 learners).
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
# Evaulate the model by calculating AUC on the test set.
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.937
# We can also use the predict method to generate predictions on new data afterwards.
pred <- predict(fit, newx)
auc <- AUC(predictions = pred$pred, labels = newy)
print(auc) # Test set AUC is: 0.937
# Modify the learnControl argument and train/eval a new Subsemble.
# With learnControl$multiType="divisor",
# we ensemble only 2 models (one for each subset).
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets,
learnControl = list(multiType = "divisor"))
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.922
# An example using a single learner.
# In this case there are 3 subsets and 1 learner,
# for a total of 3 models in the ensemble.
learner <- c("SL.randomForest")
metalearner <- "SL.glmnet"
subsets <- 3
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.925
# An example using the full data (i.e. subsets = 1).
# Here, we have an ensemble of 2 models (one for each of the 2 learners).
# This is equivalent to the Super Learner algorithm.
learner <- c("SL.randomForest", "SL.glm")
metalearner <- "SL.glm"
subsets <- 1
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.935
# Multicore subsemble via the "parallel" package.
# To perform the cross-validation and training steps using all available cores,
# use the parallel = "multicore" option.
# More examples and information at: https://github.com/ledell/subsemble
}
}
\keyword{models}
|
/man/subsemble.Rd
|
permissive
|
ledell/subsemble
|
R
| false | false | 12,416 |
rd
|
\name{subsemble}
\alias{subsemble}
\title{
An Ensemble Method for Combining Subset-Specific Algorithm Fits
}
\description{
The Subsemble algorithm partitions the full dataset into subsets of observations, fits a specified underlying algorithm on each subset, and uses a unique form of k-fold cross-validation to output a prediction function that combines the subset-specific fits.
}
\usage{
subsemble(x, y, newx = NULL, family = gaussian(),
learner, metalearner = "SL.glm", subsets = 3, subControl = list(),
cvControl = list(), learnControl = list(), genControl = list(),
id = NULL, obsWeights = NULL, seed = 1, parallel = "seq")
}
\arguments{
\item{x}{
The data.frame or matrix of predictor variables.
}
\item{y}{
The outcome in the training data set. Must be a numeric vector.
}
\item{newx}{
The predictor variables in the test data set. The structure should match \code{x}. If missing, uses \code{x} for \code{newx}.
}
\item{family}{
A description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See '?family' for the details of family functions.) Currently allows \code{gaussian()} or \code{binomial()}.
}
\item{learner}{
A string or character vector naming the prediction algorithm(s) used to train a model on each of the subsets of \code{x}. This uses the learning algorithm API provided by the SuperLearner package, so for example, we could use \code{learner = "SL.randomForest"} or \code{learner = c("SL.glm","SL.randomForest")}. See the \code{\link[SuperLearner:listWrappers]{listWrappers}} function for a full list of available algorithms. If a single learner is provided, the same algorithm will be used on each of the subsets. If a vector of learners is provided, then each algorithm will be applied to each of the subsets (default behavior specified by the \code{learnControl$multiType="crossprod"}); or alternatively, the multiple algorithms can be applied to different subsets with \code{learnControl$multiType="divisor"}.
}
\item{metalearner}{
A string specifying the prediction algorithm used to learn the optimal weighted combination of the sublearners (ie. models learned on subsets of the data.) This uses the API provided by the SuperLearner package, so for example, we could use \code{metalearner = "SL.glmnet"}. See the \code{\link[SuperLearner:listWrappers]{listWrappers}} function for a full list of available algorithms.
}
\item{subsets}{
An integer specifying the number of subsets the data should be partitioned into, a vector of subset labels equal to the number of rows of \code{x}, or a user-specified list of index vectors equal to the number of subsets. If subsets is an integer, you can control how the subsets are partitioned (random shuffle, etc) using the \code{subControl} argument.
}
\item{subControl}{
A list of parameters to control the data partitioning (subsetting) process. The logical \code{stratifyCV} list parameter will stratify the data splits by binary outcome (\code{family=binomial()} only), and defaults to \code{TRUE}. The logical \code{shuffle} parameter defaults to \code{TRUE} to ensure that subsets will be created randomly. If the user explicitly specifies the subsets via the \code{subsets} argument, that will override any parameters in this list. The last parameter, \code{supervised}, currently defaults to \code{NULL} and is a place-holder for option to learn the optimal subsets in a supervised manner. This will be implemented in a future release.
}
\item{cvControl}{
A list of parameters to control the cross-validation process. The \code{V} parameter is an integer representing the number of cross-validation folds and defaults to 10. Each of the subsets will be divided into \code{V} cross-validation folds. The other parameters are \code{stratifyCV} and \code{shuffle}, which are both logical and default to \code{TRUE}. See above for descriptions of these parameters.
}
\item{learnControl}{
A list of parameters to control the learning process. Currently, the only parameter is \code{multiType}, which is only used if there are multiple learners specified by the \code{learner} argument. The two supported values for \code{multiType} are \code{"crossprod"} (the default) and \code{"divisor"}. The \code{"crossprod"} type will train each of the learners on each of the subsets. For the \code{"divisor"} type, the length of the \code{learners} vector must be a divisor of the number of subsets. If \code{length(learner)} equals the number of subsets, each learner will be applied to a single subset. If \code{length(learner)} is a divisor of the number of subsets, then the learners will be repeated as necessary (to equal the number of subsets).
}
\item{genControl}{
A list of general control parameters. Currently, the only parameter is \code{saveFits}, which defaults to \code{TRUE}. If set to \code{FALSE}, then the \code{subfits} and \code{metafit} output objects will be set to \code{NULL}. This can be used if you want to train and test in one step and do not want to waste disk space storing all the models.
}
\item{id}{
Optional cluster identification variable. Passed to the \code{learner} algorithm.
}
\item{obsWeights}{
Optional observation weights vector. As with \code{id} above, \code{obsWeights} is passed to the prediction and screening algorithms, but many of the built in learner wrappers ignore (or can't use) the information. If you are using observation weights, make sure the learner you specify uses the information, or the weights will be ignored.
}
\item{seed}{
A random seed to be set (integer); defaults to 1. If \code{NULL}, then a random seed will not be set.
}
\item{parallel}{
A character string specifying optional parallelization. Use \code{"seq"} for sequential computation (the default). Use \code{"multicore"} to perform the k-fold (internal) cross-validation step as well as the learning across subsets in parallel over all available cores. Or \code{parallel} can be a snow cluster object. Both parallel options use the built-in functionality of the core "parallel" package.
}
}
\value{
\item{subfits}{
A list of predictive models, each of which are fit on a subset of the (rows of) data, \code{x}. For \code{learnControl$multiType="crossprod"}, the length of this list is equal to the number of subsets times the number of learners in the \code{learner} argument. For \code{learnControl$multiType="divisor"}, the length of this list is equal to the number of subsets.
}
\item{metafit}{
The predictive model which is learned by regressing \code{y} on \code{Z} (see description of \code{Z} below). The type of model is specified using the \code{metalearner} argument.
}
\item{subpred}{
A data.frame with the predicted values from each sublearner algorithm for the rows in \code{newx}. If we have L unique learners and there are J subsets of data, then there will be L x J columns when \code{learnControl$multiType=="crossprod"} (default) and J columns when \code{learnControl$multiType=="divisor"}.
}
\item{pred}{
A vector containing the predicted values from the subsemble for the rows in \code{newX}.
}
\item{Z}{
The Z matrix (the cross-validated predicted values for each sublearner).
}
\item{cvRisk}{
A numeric vector with the k-fold cross-validated risk estimate for each algorithm in learning library. Note that this does not contain the CV risk estimate for the Subsemble, only the individual models in the library. (Not enabled yet, set to \code{NULL}.)
}
\item{family}{
Returns the \code{family} argument from above.
}
\item{subControl}{
Returns the \code{subControl} argument from above.
}
\item{cvControl}{
Returns the \code{cvControl} argument from above.
}
\item{learnControl}{
Returns the \code{learnControl} argument from above.
}
\item{subsets}{
The list of subsets, which is a list of vectors of row indicies. The length of this list equals the number of subsets.
}
\item{subCVsets}{
The list of subsets, further broken down into the cross-validation folds that were used. Each subset (top level list element) is partitioned into V cross-validation folds.
}
\item{ylim}{
Returns range of \code{y}.
}
\item{seed}{
An integer. Returns \code{seed} argument from above.
}
\item{runtime}{
An list of runtimes for various steps of the algorithm. The list contains \code{cv}, \code{metalearning}, \code{sublearning} and \code{total} elements. The \code{cv} element is the time it takes to create the \code{Z} matrix (see above). The \code{metalearning} element is the training time for the metalearning step. The \code{sublearning} element is a list of training times for each of the models in the ensemble. The time to run the entire \code{subsemble} function is given in \code{total}.
}
}
\references{
LeDell, E. (2015) Scalable Ensemble Learning and Computationally Efficient Variance Estimation (Doctoral Dissertation). University of California, Berkeley, USA.\cr
\url{https://github.com/ledell/phd-thesis/blob/main/ledell-phd-thesis.pdf}\cr
\cr
Stephanie Sapp, Mark J. van der Laan & John Canny. (2014) Subsemble: An ensemble method for combining subset-specific algorithm fits. Journal of Applied Statistics, 41(6):1247-1259\cr
\url{https://www.tandfonline.com/doi/abs/10.1080/02664763.2013.864263}\cr
\url{https://biostats.bepress.com/ucbbiostat/paper313/}
}
\author{
Erin LeDell \email{oss@ledell.org}
}
\seealso{
\code{\link[SuperLearner:listWrappers]{listWrappers}}, \code{\link[SuperLearner:SuperLearner]{SuperLearner}}
}
\examples{
\donttest{
# Load some example data.
library(subsemble)
library(cvAUC) # >= version 1.0.1
data(admissions)
# Training data.
x <- subset(admissions, select = -c(Y))[1:400,]
y <- admissions$Y[1:400]
# Test data.
newx <- subset(admissions, select = -c(Y))[401:500,]
newy <- admissions$Y[401:500]
# Set up the Subsemble.
learner <- c("SL.randomForest", "SL.glm")
metalearner <- "SL.glm"
subsets <- 2
# Train and test the model.
# With learnControl$multiType="crossprod" (the default),
# we ensemble 4 models (2 subsets x 2 learners).
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
# Evaulate the model by calculating AUC on the test set.
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.937
# We can also use the predict method to generate predictions on new data afterwards.
pred <- predict(fit, newx)
auc <- AUC(predictions = pred$pred, labels = newy)
print(auc) # Test set AUC is: 0.937
# Modify the learnControl argument and train/eval a new Subsemble.
# With learnControl$multiType="divisor",
# we ensemble only 2 models (one for each subset).
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets,
learnControl = list(multiType = "divisor"))
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.922
# An example using a single learner.
# In this case there are 3 subsets and 1 learner,
# for a total of 3 models in the ensemble.
learner <- c("SL.randomForest")
metalearner <- "SL.glmnet"
subsets <- 3
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.925
# An example using the full data (i.e. subsets = 1).
# Here, we have an ensemble of 2 models (one for each of the 2 learners).
# This is equivalent to the Super Learner algorithm.
learner <- c("SL.randomForest", "SL.glm")
metalearner <- "SL.glm"
subsets <- 1
fit <- subsemble(x = x, y = y, newx = newx, family = binomial(),
learner = learner, metalearner = metalearner,
subsets = subsets)
auc <- AUC(predictions = fit$pred, labels = newy)
print(auc) # Test set AUC is: 0.935
# Multicore subsemble via the "parallel" package.
# To perform the cross-validation and training steps using all available cores,
# use the parallel = "multicore" option.
# More examples and information at: https://github.com/ledell/subsemble
}
}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compose_email.R
\name{compose_email}
\alias{compose_email}
\title{Create the email message}
\usage{
compose_email(body = NULL, footer = NULL, .preheader_text = NULL,
.title = NULL, .envir = parent.frame(), ...)
}
\arguments{
\item{body}{the main body of text for
the email message. Markdown can be used here
(along with string interpolation via curly
braces and named arguments) to construct the
main text.}
\item{footer}{the footer text for the email
message. As with the \code{body}, Markdown
and string interpolation can be used here.}
\item{.preheader_text}{text that appears
before the subject in some email clients.
This must be plaintext.}
\item{.title}{the title of the email message.
This is not the subject but the HTML title
text which may appear in limited
circumstances.}
\item{.envir}{allows for setting the
environment.}
\item{...}{expression strings for string
interpolation for the \code{body},
\code{footer}, and \code{preheader_text}
string data.}
}
\value{
an \code{email_message} object,
which can be used for previewing with
the \code{preview_email()} function or
for sending out actual emails with the
\code{send_email_out()} function.
}
\description{
Create an email message. String
interpolation is possible for the text
comprising the email body, footer, and
preheader text. This is done by using
curly braces to enclose R code chunks.
Variables can be specified in the function
call (using named arguments with \code{...}),
and any variables not found in \code{...}
will be searched for in the global
environment.
}
\examples{
# Create a simple email message using
# Markdown formatting
email <-
compose_email(
body = "
Hello!
## This a section heading
We can use Markdown formatting \\\\
to **embolden** text or to add \\\\
*emphasis*. This is exciting, \\\\
right?
Cheers")
# The email message can always be
# previewed using `preview_email()`
preview_email(email = email)
# We can use string interpolation to
# add in R code or strings assigned
# to variables; variables can be
# obtained from the global workspace
# or from temporary variables in the
# function call
sender_name <- "Mike"
email <-
compose_email(
body = "
Hello!
I just wanted to let you \\\\
know that the {thing} that \\\\
asked me for is ready to \\\\
pick up. So, come over and \\\\
do that.
Cheers,
{sender_name}",
thing = "report")
}
|
/man/compose_email.Rd
|
permissive
|
jcheng5/blastula
|
R
| false | true | 2,471 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compose_email.R
\name{compose_email}
\alias{compose_email}
\title{Create the email message}
\usage{
compose_email(body = NULL, footer = NULL, .preheader_text = NULL,
.title = NULL, .envir = parent.frame(), ...)
}
\arguments{
\item{body}{the main body of text for
the email message. Markdown can be used here
(along with string interpolation via curly
braces and named arguments) to construct the
main text.}
\item{footer}{the footer text for the email
message. As with the \code{body}, Markdown
and string interpolation can be used here.}
\item{.preheader_text}{text that appears
before the subject in some email clients.
This must be plaintext.}
\item{.title}{the title of the email message.
This is not the subject but the HTML title
text which may appear in limited
circumstances.}
\item{.envir}{allows for setting the
environment.}
\item{...}{expression strings for string
interpolation for the \code{body},
\code{footer}, and \code{preheader_text}
string data.}
}
\value{
an \code{email_message} object,
which can be used for previewing with
the \code{preview_email()} function or
for sending out actual emails with the
\code{send_email_out()} function.
}
\description{
Create an email message. String
interpolation is possible for the text
comprising the email body, footer, and
preheader text. This is done by using
curly braces to enclose R code chunks.
Variables can be specified in the function
call (using named arguments with \code{...}),
and any variables not found in \code{...}
will be searched for in the global
environment.
}
\examples{
# Create a simple email message using
# Markdown formatting
email <-
compose_email(
body = "
Hello!
## This a section heading
We can use Markdown formatting \\\\
to **embolden** text or to add \\\\
*emphasis*. This is exciting, \\\\
right?
Cheers")
# The email message can always be
# previewed using `preview_email()`
preview_email(email = email)
# We can use string interpolation to
# add in R code or strings assigned
# to variables; variables can be
# obtained from the global workspace
# or from temporary variables in the
# function call
sender_name <- "Mike"
email <-
compose_email(
body = "
Hello!
I just wanted to let you \\\\
know that the {thing} that \\\\
asked me for is ready to \\\\
pick up. So, come over and \\\\
do that.
Cheers,
{sender_name}",
thing = "report")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Summarize Observations by Year}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{The years to be read in}
}
\value{
\code{fars_summarize_years} will return data frame.
}
\description{
\code{fars_summarize_years} will read in multiple Fatality Analysis Reporting
System data files based on the years provided and summarise the number of
observations by year and month.
}
\examples{
fars_summarize_years(2013:2015)
}
|
/man/fars_summarize_years.Rd
|
no_license
|
goodamr/rfars
|
R
| false | true | 593 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Summarize Observations by Year}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{The years to be read in}
}
\value{
\code{fars_summarize_years} will return data frame.
}
\description{
\code{fars_summarize_years} will read in multiple Fatality Analysis Reporting
System data files based on the years provided and summarise the number of
observations by year and month.
}
\examples{
fars_summarize_years(2013:2015)
}
|
##library
library(tidyverse)
library(doBy)
library(readxl)
library(ggplot2)
library(plotrix)
library(psych)
library(rlang)
library(purrr)
library(forcats)
library(viridis)
library(reshape)
library(rgdal)
library(xlsx)
library(lubridate)
library(gganimate)
library(magick)
library(grid)
library(ggjoy)
library(ggridges)
library(here)
##load the raw RVCAT data file
raw.data<-read.csv(here('Data','RVCAT.csv'))
##change date into usable form
raw.data$date<-dmy(raw.data$OP_DATE)
###Calculate mid lat and long for each trawl
raw.data[is.na(raw.data[,"END_LATITUDE_DD"]), "END_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LATITUDE_DD"]),"BEG_LATITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LATITUDE_DD"]), "BEG_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LATITUDE_DD"]),"END_LATITUDE_DD"]
raw.data[is.na(raw.data[,"END_LONGITUDE_DD"]), "END_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LONGITUDE_DD"]),"BEG_LONGITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LONGITUDE_DD"]), "BEG_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LONGITUDE_DD"]),"END_LONGITUDE_DD"]
raw.data$Mid.Lat.DD<-(raw.data$BEG_LATITUDE_DD+raw.data$END_LATITUDE_DD)/2
raw.data$Mid.Long.DD<-(raw.data$BEG_LONGITUDE_DD+raw.data$END_LONGITUDE_DD)/2
raw.data$YearClass<-raw.data$YEAR-1
##Select minimum number of fields of interest
data1<-select(raw.data,1,32,4,5,8,33,34,35)
###########################
##load Fish Lengths file into R
raw.data<-read.csv(here('Data','LENGTHS_RVCAT.csv'))
raw.data<-subset(raw.data, EXP_N>0)
reprows<-rep(1:nrow(raw.data), raw.data$EXP_N)
data2 <- raw.data[reprows,] %>%
as.data.frame()
data2 <-select(data2, 1,4:6)
###JOIN TRAWL EFFORT TO LENGTH DATA
data3 <- inner_join(data2,unique(data1))
describe(data3)
#########################################################################################
#########################################################################################
plot_theme<-theme(axis.line=element_line(size=1, color='black'),
panel.background = element_rect(NA),
axis.text=element_text(size=20, family='serif'),
axis.title=element_text(size=20, family='serif'),
plot.margin = margin(.5,.5,.5,.5,"cm"),
legend.text=element_text(size=16, family='serif'),
axis.ticks=element_line(size=1, color='black'),
plot.title=element_text(size=24, family='serif'),
plot.subtitle=element_text(size=16, family='serif'),
plot.caption=element_text(size=16, family='serif'),
legend.background = element_blank(),
legend.key = element_blank(),
strip.text=element_text(size=16, family='serif'))
###Subset data for a particular species, target, location, year, etc...
sumdata1 <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
###########Length Frequency Joy Plots
sumdata1$YEARf<-as.factor(sumdata1$YEAR)
ggplot(sumdata1, aes(x = LENGTH, y = YEARf)) +
geom_joy_gradient(scale = 3, rel_min_height = 0.01) +
scale_x_continuous(limits = c(0,500)) +
scale_y_discrete(expand = c(0.01, 0)) +
plot_theme +
theme_joy(font_size = 16, grid = TRUE) +
labs(title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Total length (mm)",
y = "Year")
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Cisco_Joyplot.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
#################################################################################################
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore - offshore Ciscoes
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="204" & TARGET==2 & YEAR >=1989 |
SPECIES=="206" & TARGET==118 & YEAR >=1989 |
SPECIES=="206" & TARGET==117 & YEAR >=1989 |
SPECIES=="202" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "202"] <- "Cisco"
sumdata2$SPECIES[sumdata2$SPECIES == "204"] <- "Bloater"
sumdata2$SPECIES[sumdata2$SPECIES == "206"] <- "Kiyi"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,350)) +
plot_theme +
labs(title='Lake Superior Ciscoes Mean Annual Lengths',
subtitle='Near- and Offshore bottom trawl survey collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Ciscoe_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore SCULPINS
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="902" & TARGET==2 & YEAR >=1989 |
SPECIES=="903" & TARGET==2 & YEAR >=1989 |
SPECIES=="904" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "902"] <- "Slimy Sculpin"
sumdata2$SPECIES[sumdata2$SPECIES == "903"] <- "Spoonhead Sculpin"
sumdata2$SPECIES[sumdata2$SPECIES == "904"] <- "Deepwater Sculpin"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,150)) +
plot_theme +
labs(title='Lake Superior Sculpin Mean Annual Lengths',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Sculpins_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore SCULPINS
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="130" & TARGET==2 & YEAR >=1989 |
SPECIES=="131" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "130"] <- "Ninespine Stickleback"
sumdata2$SPECIES[sumdata2$SPECIES == "131"] <- "Trout-Perch"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,150)) +
plot_theme +
labs(title='Lake Superior Ninespine Stickleback and Trout-Perch Mean Annual Lengths',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_TP-NS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths SINGLE SPECIES Rainbow Smelt
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="109" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,250)) +
plot_theme +
labs(title='Lake Superior Rainbow Smelt Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_RBS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Burbot
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="127" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
plot_theme +
labs(title='Lake Superior Burbot Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Burbot_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Longnose Sucker
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="404" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,500)) +
plot_theme +
labs(title='Lake Superior Longnose Sucker Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_LNS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Siscowet Lake Trout
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="308" & TARGET==118 & YEAR >=2011 |
SPECIES=="308" & TARGET==117 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH))
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
plot_theme +
labs(title='Lake Superior Siscowet Lake Trout Mean Annual Length',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(2011,2019, by=1))
ggsave(here('Plots and Tables/Lengths','os_Lengths_Siscowet_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
###############################################################################################
###############################################################################################
###############################################################################################
##vertical histograms
##CISCO--------------------------------------------------------------------------------------------------------------------
cisco <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
##filter(SPECIES=="202", YEAR >=1960)
ggplot(cisco, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=140, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Cisco_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Bloater-----------------------------------------------------------------------------------------------------------
bloater <- data3 %>%
filter(SPECIES=="204",TARGET==2, YEAR >=1989)
ggplot(bloater, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Bloater Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Bloater_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Lake Whitefish----------------------------------------------------------------------------------------------------
lwf <- data3 %>%
filter(SPECIES=="203",TARGET==2, YEAR >=1989)%>%
filter(LENGTH<501) ##included this bc there are some misentered data for lwf lengths, need to decide the best cutoff length
ggplot(lwf, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Lake Whitefish (<500 mm) Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=160, size=1)+
scale_x_continuous(breaks=seq(0,600,by=100))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_LWF_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##rainbow smelt------------------------------------------------------------------------------------------------------
rbs <- data3 %>%
filter(SPECIES=="109",TARGET==2, YEAR >=1989)
ggplot(rbs, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Rainbow Smelt Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=100, size=1)+
scale_x_continuous(breaks=seq(0,300, by=50), limits=c(0,NA), expand=c(0,0))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_RBS_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##kiyi----------------------------------------------------------------------------------------------------------------
kiyi <- data3 %>%
filter(SPECIES=="206",TARGET==117|TARGET==118, YEAR >=2011)
ggplot(kiyi, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Kiyi Length Frequency',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Kiyi_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##SISCOWET--------------------------------------------------------------------------------------------------------------------
siscowet <- data3 %>%
filter(SPECIES=="308",TARGET==117|TARGET==118, YEAR >=2011) %>%
subset(LENGTH<=800)
ggplot(siscowet, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Siscowet Length Frequency',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
## geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Siscowet_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
###############################################################################################
#################################################################################################
###Miscellaneous plots below
###############################################################################################
###############################################################################################
cisco <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
ggplot(cisco, aes(x=date, y=LENGTH))+
geom_point() +
plot_theme+
#facet_grid(.~YEAR, switch='both', scales='free')+
#coord_flip()+
# geom_hline(yintercept=0, color='black', size=.5)+
# theme(strip.placement = 'inside',
# strip.background = element_blank(),
# strip.text=element_text(size=12))+
labs(x = 'Year', y='Total Length (mm)',
title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)
# geom_vline(xintercept=140, size=1)
##Simple length frequency histogram all individuals
ggplot(sumdata1, aes(x = LENGTH)) +
geom_histogram() +
scale_x_continuous(limits = c(0,350), expand=c(0,0)) +
scale_y_continuous(expand=c(0,0))+
theme(axis.line=element_line(size=1),
panel.background=element_blank(),
axis.text=element_text(size=16, family='serif'),
axis.title=element_text(size=16, family='serif'),
plot.title=element_text(size=20, family='serif'),
plot.subtitle=element_text(size=20, family='serif'),
plot.caption=element_text(size=12, family='serif'),
legend.text=element_text(size=20, family='serif'),
legend.title=element_text(size=20, family='serif')) +
labs(title='Lake Superior Bloater Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1998-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Total length (mm)",
y = "Count")
ggsave(here('Plots and Tables/Lengths','ns_lengthfreq.png'),dpi = 300, width = 40, height = 20, units = "cm")
################################################################################
##Annual length plots with all fish - jittered
sumdata2 <- data3 %>%
filter(SPECIES=="308" & TARGET==118 & YEAR >=2011 |
SPECIES=="308" & TARGET==117 & YEAR >=2011)
sumdata3 <-sumdata2 %>%
group_by(YEAR) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH))
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEAR,y = LENGTH)) +
geom_jitter(alpha=.1) +
## geom_point(mean(LENGTH)) +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
scale_x_discrete(breaks=seq(2011,2019, by=1)) +
plot_theme +
labs(title='Lake Superior Siscowet Lake Trout Lengths',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Total length (mm)")
ggsave(here('Plots and Tables/Lengths','ns_annual_lengths.png'), dpi = 300, width = 40, height = 20, units = "cm")
###################################################################################################
###################################################################################################
###Excel annual summary table of lengths by species and targets (2-nearshore, 117/118-offshore)
#########################################################
#ADDED BY MARK 5/6/2020
##SOME CODE REPEATS FROM ABOVE
##load the raw RVCAT data file
raw.data<-read.csv(here('Data','RVCAT.csv'))
##change date into usable form
raw.data$date<-dmy(raw.data$OP_DATE)
###Calculate mid lat and long for each trawl
raw.data[is.na(raw.data[,"END_LATITUDE_DD"]), "END_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LATITUDE_DD"]),"BEG_LATITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LATITUDE_DD"]), "BEG_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LATITUDE_DD"]),"END_LATITUDE_DD"]
raw.data[is.na(raw.data[,"END_LONGITUDE_DD"]), "END_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LONGITUDE_DD"]),"BEG_LONGITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LONGITUDE_DD"]), "BEG_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LONGITUDE_DD"]),"END_LONGITUDE_DD"]
raw.data$Mid.Lat.DD<-(raw.data$BEG_LATITUDE_DD+raw.data$END_LATITUDE_DD)/2
raw.data$Mid.Long.DD<-(raw.data$BEG_LONGITUDE_DD+raw.data$END_LONGITUDE_DD)/2
raw.data$YearClass<-raw.data$YEAR-1
##Select minimum number of fields of interest
data1<-select(raw.data,1,32,4,5,8,33,34,35)
###########################
##load Fish Lengths file into R
raw.data<-read.csv(here('Data','LENGTHS_RVCAT.csv'))
raw.data<-subset(raw.data, EXP_N>0)
reprows<-rep(1:nrow(raw.data), raw.data$EXP_N)
data2 <- raw.data[reprows,] %>%
as.data.frame()
data2 <-select(data2, 1,4:6)
###########################
###JOIN TRAWL EFFORT TO LENGTH DATA
data3 <- inner_join(data2,unique(data1))
###########################
##Filter targets and summarize by SPECIES, YEAR, TARGET
##Write Excel file
sumtable1 <- data3 %>%
filter(TARGET==2 & YEAR >=1978 | TARGET==118 & YEAR >=2011 | TARGET==117 & YEAR >=2011 | TARGET==106 & YEAR >=1973)
sumtable2 <- sumtable1 %>%
group_by(SPECIES,YEAR,TARGET) %>%
summarize(fish_count=n(), min_L_mm=mean(LENGTH), max_L_mm=max(LENGTH), median_L_mm=median(LENGTH), mean_L_mm=mean(LENGTH))
openxlsx::write.xlsx(sumtable2, here('Plots and Tables/Lengths','Length_AnnualSumry_byTarget_1978-present.xlsx'), row.names=FALSE)
|
/R Scripts/FishLengths.R
|
no_license
|
carolinerosinski/RVCAT
|
R
| false | false | 25,534 |
r
|
##library
library(tidyverse)
library(doBy)
library(readxl)
library(ggplot2)
library(plotrix)
library(psych)
library(rlang)
library(purrr)
library(forcats)
library(viridis)
library(reshape)
library(rgdal)
library(xlsx)
library(lubridate)
library(gganimate)
library(magick)
library(grid)
library(ggjoy)
library(ggridges)
library(here)
##load the raw RVCAT data file
raw.data<-read.csv(here('Data','RVCAT.csv'))
##change date into usable form
raw.data$date<-dmy(raw.data$OP_DATE)
###Calculate mid lat and long for each trawl
raw.data[is.na(raw.data[,"END_LATITUDE_DD"]), "END_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LATITUDE_DD"]),"BEG_LATITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LATITUDE_DD"]), "BEG_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LATITUDE_DD"]),"END_LATITUDE_DD"]
raw.data[is.na(raw.data[,"END_LONGITUDE_DD"]), "END_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LONGITUDE_DD"]),"BEG_LONGITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LONGITUDE_DD"]), "BEG_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LONGITUDE_DD"]),"END_LONGITUDE_DD"]
raw.data$Mid.Lat.DD<-(raw.data$BEG_LATITUDE_DD+raw.data$END_LATITUDE_DD)/2
raw.data$Mid.Long.DD<-(raw.data$BEG_LONGITUDE_DD+raw.data$END_LONGITUDE_DD)/2
raw.data$YearClass<-raw.data$YEAR-1
##Select minimum number of fields of interest
data1<-select(raw.data,1,32,4,5,8,33,34,35)
###########################
##load Fish Lengths file into R
raw.data<-read.csv(here('Data','LENGTHS_RVCAT.csv'))
raw.data<-subset(raw.data, EXP_N>0)
reprows<-rep(1:nrow(raw.data), raw.data$EXP_N)
data2 <- raw.data[reprows,] %>%
as.data.frame()
data2 <-select(data2, 1,4:6)
###JOIN TRAWL EFFORT TO LENGTH DATA
data3 <- inner_join(data2,unique(data1))
describe(data3)
#########################################################################################
#########################################################################################
plot_theme<-theme(axis.line=element_line(size=1, color='black'),
panel.background = element_rect(NA),
axis.text=element_text(size=20, family='serif'),
axis.title=element_text(size=20, family='serif'),
plot.margin = margin(.5,.5,.5,.5,"cm"),
legend.text=element_text(size=16, family='serif'),
axis.ticks=element_line(size=1, color='black'),
plot.title=element_text(size=24, family='serif'),
plot.subtitle=element_text(size=16, family='serif'),
plot.caption=element_text(size=16, family='serif'),
legend.background = element_blank(),
legend.key = element_blank(),
strip.text=element_text(size=16, family='serif'))
###Subset data for a particular species, target, location, year, etc...
sumdata1 <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
###########Length Frequency Joy Plots
sumdata1$YEARf<-as.factor(sumdata1$YEAR)
ggplot(sumdata1, aes(x = LENGTH, y = YEARf)) +
geom_joy_gradient(scale = 3, rel_min_height = 0.01) +
scale_x_continuous(limits = c(0,500)) +
scale_y_discrete(expand = c(0.01, 0)) +
plot_theme +
theme_joy(font_size = 16, grid = TRUE) +
labs(title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Total length (mm)",
y = "Year")
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Cisco_Joyplot.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
#################################################################################################
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore - offshore Ciscoes
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="204" & TARGET==2 & YEAR >=1989 |
SPECIES=="206" & TARGET==118 & YEAR >=1989 |
SPECIES=="206" & TARGET==117 & YEAR >=1989 |
SPECIES=="202" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "202"] <- "Cisco"
sumdata2$SPECIES[sumdata2$SPECIES == "204"] <- "Bloater"
sumdata2$SPECIES[sumdata2$SPECIES == "206"] <- "Kiyi"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,350)) +
plot_theme +
labs(title='Lake Superior Ciscoes Mean Annual Lengths',
subtitle='Near- and Offshore bottom trawl survey collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Ciscoe_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore SCULPINS
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="902" & TARGET==2 & YEAR >=1989 |
SPECIES=="903" & TARGET==2 & YEAR >=1989 |
SPECIES=="904" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "902"] <- "Slimy Sculpin"
sumdata2$SPECIES[sumdata2$SPECIES == "903"] <- "Spoonhead Sculpin"
sumdata2$SPECIES[sumdata2$SPECIES == "904"] <- "Deepwater Sculpin"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,150)) +
plot_theme +
labs(title='Lake Superior Sculpin Mean Annual Lengths',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Sculpins_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths MULTIPLE SPECIES - Nearshore SCULPINS
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="130" & TARGET==2 & YEAR >=1989 |
SPECIES=="131" & TARGET==2 & YEAR >=1989)
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
sumdata2$SPECIES[sumdata2$SPECIES == "130"] <- "Ninespine Stickleback"
sumdata2$SPECIES[sumdata2$SPECIES == "131"] <- "Trout-Perch"
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(data=sumdata4, aes(yintercept=mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,150)) +
plot_theme +
labs(title='Lake Superior Ninespine Stickleback and Trout-Perch Mean Annual Lengths',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
facet_grid(SPECIES ~.)+
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_TP-NS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
#################################################################################################
##Annual mean lengths SINGLE SPECIES Rainbow Smelt
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="109" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,250)) +
plot_theme +
labs(title='Lake Superior Rainbow Smelt Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_RBS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Burbot
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="127" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
plot_theme +
labs(title='Lake Superior Burbot Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Burbot_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Longnose Sucker
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="404" & TARGET==2 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR, SPECIES) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH), min_L=min(LENGTH), max_L=max(LENGTH))
sumdata4 <-sumdata3 %>%
group_by(SPECIES) %>%
summarize(median_L=median(median_L), mean_L=mean(mean_L), min_L=min(min_L),max_L=max(max_L), )
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,500)) +
plot_theme +
labs(title='Lake Superior Longnose Sucker Mean Annual Length',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(1989,2019, by=2))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_LNS_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Annual mean lengths SINGLE SPECIES Siscowet Lake Trout
###Subset data for a particular species, target, location, year, etc...
sumdata2 <- data3 %>%
filter(SPECIES=="308" & TARGET==118 & YEAR >=2011 |
SPECIES=="308" & TARGET==117 & YEAR >=1989)
sumdata3 <-sumdata2 %>%
group_by(YEAR) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH))
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEARf,LENGTH)) +
stat_summary(fun.y = mean, geom = "point", size=2) +
geom_line() +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
plot_theme +
labs(title='Lake Superior Siscowet Lake Trout Mean Annual Length',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Mean length (mm)") +
scale_x_discrete(breaks=seq(2011,2019, by=1))
ggsave(here('Plots and Tables/Lengths','os_Lengths_Siscowet_Means.png'), dpi = 300, width = 40, height = 20, units = "cm")
###############################################################################################
###############################################################################################
###############################################################################################
##vertical histograms
##CISCO--------------------------------------------------------------------------------------------------------------------
cisco <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
##filter(SPECIES=="202", YEAR >=1960)
ggplot(cisco, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=140, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Cisco_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Bloater-----------------------------------------------------------------------------------------------------------
bloater <- data3 %>%
filter(SPECIES=="204",TARGET==2, YEAR >=1989)
ggplot(bloater, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Bloater Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Bloater_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##Lake Whitefish----------------------------------------------------------------------------------------------------
lwf <- data3 %>%
filter(SPECIES=="203",TARGET==2, YEAR >=1989)%>%
filter(LENGTH<501) ##included this bc there are some misentered data for lwf lengths, need to decide the best cutoff length
ggplot(lwf, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Lake Whitefish (<500 mm) Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=160, size=1)+
scale_x_continuous(breaks=seq(0,600,by=100))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_LWF_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##rainbow smelt------------------------------------------------------------------------------------------------------
rbs <- data3 %>%
filter(SPECIES=="109",TARGET==2, YEAR >=1989)
ggplot(rbs, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Rainbow Smelt Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=100, size=1)+
scale_x_continuous(breaks=seq(0,300, by=50), limits=c(0,NA), expand=c(0,0))
ggsave(here('Plots and Tables/Lengths','ns_Lengths_RBS_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##kiyi----------------------------------------------------------------------------------------------------------------
kiyi <- data3 %>%
filter(SPECIES=="206",TARGET==117|TARGET==118, YEAR >=2011)
ggplot(kiyi, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Kiyi Length Frequency',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Kiyi_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
##SISCOWET--------------------------------------------------------------------------------------------------------------------
siscowet <- data3 %>%
filter(SPECIES=="308",TARGET==117|TARGET==118, YEAR >=2011) %>%
subset(LENGTH<=800)
ggplot(siscowet, aes(x=LENGTH))+
geom_histogram(binwidth = 10)+
plot_theme+
facet_grid(.~YEAR, switch='both', scales='free')+
coord_flip()+
geom_hline(yintercept=0, color='black', size=.5)+
theme(strip.placement = 'inside',
strip.background = element_blank(),
strip.text=element_text(size=12))+
labs(y='Year', x='Total Length (mm)',
title='Lake Superior Siscowet Length Frequency',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)+
## geom_vline(xintercept=130, size=1)
ggsave(here('Plots and Tables/Lengths','ns_Lengths_Siscowet_Vhistogram.png'), dpi = 300, width = 40, height = 20, units = "cm")
###############################################################################################
#################################################################################################
###Miscellaneous plots below
###############################################################################################
###############################################################################################
cisco <- data3 %>%
filter(SPECIES=="202",TARGET==2, YEAR >=1989)
ggplot(cisco, aes(x=date, y=LENGTH))+
geom_point() +
plot_theme+
#facet_grid(.~YEAR, switch='both', scales='free')+
#coord_flip()+
# geom_hline(yintercept=0, color='black', size=.5)+
# theme(strip.placement = 'inside',
# strip.background = element_blank(),
# strip.text=element_text(size=12))+
labs(x = 'Year', y='Total Length (mm)',
title='Lake Superior Cisco Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1989-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0')+
scale_y_continuous(breaks=NULL)
# geom_vline(xintercept=140, size=1)
##Simple length frequency histogram all individuals
ggplot(sumdata1, aes(x = LENGTH)) +
geom_histogram() +
scale_x_continuous(limits = c(0,350), expand=c(0,0)) +
scale_y_continuous(expand=c(0,0))+
theme(axis.line=element_line(size=1),
panel.background=element_blank(),
axis.text=element_text(size=16, family='serif'),
axis.title=element_text(size=16, family='serif'),
plot.title=element_text(size=20, family='serif'),
plot.subtitle=element_text(size=20, family='serif'),
plot.caption=element_text(size=12, family='serif'),
legend.text=element_text(size=20, family='serif'),
legend.title=element_text(size=20, family='serif')) +
labs(title='Lake Superior Bloater Length Frequency',
subtitle='Nearshore spring bottom trawl collections, 1998-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Total length (mm)",
y = "Count")
ggsave(here('Plots and Tables/Lengths','ns_lengthfreq.png'),dpi = 300, width = 40, height = 20, units = "cm")
################################################################################
##Annual length plots with all fish - jittered
sumdata2 <- data3 %>%
filter(SPECIES=="308" & TARGET==118 & YEAR >=2011 |
SPECIES=="308" & TARGET==117 & YEAR >=2011)
sumdata3 <-sumdata2 %>%
group_by(YEAR) %>%
summarize(median_L=median(LENGTH), mean_L=mean(LENGTH))
sumdata2$YEARf<-as.factor(sumdata2$YEAR)
ggplot(sumdata2, aes(x=YEAR,y = LENGTH)) +
geom_jitter(alpha=.1) +
## geom_point(mean(LENGTH)) +
geom_hline(yintercept=mean(sumdata3$mean_L), color='black', size=.5)+
scale_y_continuous(limits = c(0,750)) +
scale_x_discrete(breaks=seq(2011,2019, by=1)) +
plot_theme +
labs(title='Lake Superior Siscowet Lake Trout Lengths',
subtitle='Offshore summer bottom trawl collections, 2011-2019',
caption = 'Data: U.S. Geological Survey, doi.org/10.5066/F75M63X0',
x = "Year",
y = "Total length (mm)")
ggsave(here('Plots and Tables/Lengths','ns_annual_lengths.png'), dpi = 300, width = 40, height = 20, units = "cm")
###################################################################################################
###################################################################################################
###Excel annual summary table of lengths by species and targets (2-nearshore, 117/118-offshore)
#########################################################
#ADDED BY MARK 5/6/2020
##SOME CODE REPEATS FROM ABOVE
##load the raw RVCAT data file
raw.data<-read.csv(here('Data','RVCAT.csv'))
##change date into usable form
raw.data$date<-dmy(raw.data$OP_DATE)
###Calculate mid lat and long for each trawl
raw.data[is.na(raw.data[,"END_LATITUDE_DD"]), "END_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LATITUDE_DD"]),"BEG_LATITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LATITUDE_DD"]), "BEG_LATITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LATITUDE_DD"]),"END_LATITUDE_DD"]
raw.data[is.na(raw.data[,"END_LONGITUDE_DD"]), "END_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "END_LONGITUDE_DD"]),"BEG_LONGITUDE_DD"]
raw.data[is.na(raw.data[,"BEG_LONGITUDE_DD"]), "BEG_LONGITUDE_DD"] <- raw.data[is.na(raw.data[, "BEG_LONGITUDE_DD"]),"END_LONGITUDE_DD"]
raw.data$Mid.Lat.DD<-(raw.data$BEG_LATITUDE_DD+raw.data$END_LATITUDE_DD)/2
raw.data$Mid.Long.DD<-(raw.data$BEG_LONGITUDE_DD+raw.data$END_LONGITUDE_DD)/2
raw.data$YearClass<-raw.data$YEAR-1
##Select minimum number of fields of interest
data1<-select(raw.data,1,32,4,5,8,33,34,35)
###########################
##load Fish Lengths file into R
raw.data<-read.csv(here('Data','LENGTHS_RVCAT.csv'))
raw.data<-subset(raw.data, EXP_N>0)
reprows<-rep(1:nrow(raw.data), raw.data$EXP_N)
data2 <- raw.data[reprows,] %>%
as.data.frame()
data2 <-select(data2, 1,4:6)
###########################
###JOIN TRAWL EFFORT TO LENGTH DATA
data3 <- inner_join(data2,unique(data1))
###########################
##Filter targets and summarize by SPECIES, YEAR, TARGET
##Write Excel file
sumtable1 <- data3 %>%
filter(TARGET==2 & YEAR >=1978 | TARGET==118 & YEAR >=2011 | TARGET==117 & YEAR >=2011 | TARGET==106 & YEAR >=1973)
sumtable2 <- sumtable1 %>%
group_by(SPECIES,YEAR,TARGET) %>%
summarize(fish_count=n(), min_L_mm=mean(LENGTH), max_L_mm=max(LENGTH), median_L_mm=median(LENGTH), mean_L_mm=mean(LENGTH))
openxlsx::write.xlsx(sumtable2, here('Plots and Tables/Lengths','Length_AnnualSumry_byTarget_1978-present.xlsx'), row.names=FALSE)
|
library(dplyr)
# load data, this are data from Lesson 5 where we prepared Claims with Policies into one dataset
dt_pol_w_claims <- readRDS("C:\\Users\\Jakub\\Documents\\GeneralInsurance_Class\\Data\\lesson6_dt_pol_w_claims.rds")
set.seed(58742) # to fix randomizer
ind <- sample(2, nrow(dt_pol_w_claims), replace=TRUE, prob=c(0.80, 0.20)) # generate random indicator to split by
#ked som to dobre pochopil tak mame vylepsit nas model z DU5, data beriem z cvika
#model0 <- glm(data = dt_pol_w_claims %>% filter(Burning_Cost != 0, Burning_Cost < 100),
# formula = Burning_Cost ~ D_age,
# family = Gamma())
#len teraz tento model pouzijeme len na trenovacie data + ako v minulej du si odstranime outlierov
# kvoli tomu ze sa nam to bude lepsie modelovat
dt_pol_w_claims <- dt_pol_w_claims %>% filter(Burning_Cost != 0, Burning_Cost < 100)
dt_pol_w_claims <- mutate(dt_pol_w_claims,
data_status = ifelse(ind == 1,
"Training",
ifelse(ind == 2,
"Validation",
"Unseen")
)
)
train <- dt_pol_w_claims %>% filter(data_status == "Training")
val <- dt_pol_w_claims %>% filter(data_status == "Validation")
mse <- function(prediction, actual){
return(sum((prediction-actual)^2, na.rm = TRUE)/length(prediction))
}
model1 <- glm(data = train,
formula = Burning_Cost ~ D_age,
family = Gamma())
summary(model1)
#spocitame si chyby
mse(predict(model1, train, type = "response"), train$Burning_Cost)
mse(predict(model1, val, type = "response"), val$Burning_Cost)
#198.8526 a 284.1642
#pridame dalsie premenne, premennu, ktoru som pouyival minulu du Construct_year(aj ked mi vo One-way analyze
#nevysla signifikantna) a premennu co sme pouzivali na prednaske/cviku
model2 <- glm(data = train,
formula = Burning_Cost ~ D_age + Veh_type2 + Construct_year,
family = Gamma())
summary(model2)
mse(predict(model2, train, type = "response"), train$Burning_Cost)
mse(predict(model2, val, type = "response"), val$Burning_Cost)
# 193.9859 285.856
#vidime ze sa nam model zhorsil v predvidani,ale budeme s nim pokracovat a pokusit sa ho zlepsit
#skusime odstranit premennu Veh_type2
model2a <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year,
family = Gamma())
summary(model2a)
mse(predict(model2a, train, type = "response"), train$Burning_Cost)
mse(predict(model2a, val, type = "response"), val$Burning_Cost)
#198.7277 a 284.02
#vidime ze sa zhorsil na train ale zlepsil sa pre val
#takze s nim budeme pokracovat
#rovnako ako na hodine si vykreslime zavislost BC od CY
library(gridExtra)
source("Support/emb_chart.R")
emblem_graph(
dt.frm = train %>% cbind(data.frame(pred = predict(model2a, train, type = "response"))),
x_var = "Construct_year",
target = "Burning_Cost",
prediction = "pred"
)
# na prednaske sme pouzili Cap od zaciatku po rok 2005, ale ked si pozrieme graf
# tak spojenie tychto rokov vytvori dost pocetnu premennu tak to skusime pr mensie
#skusal som aj pre 2005 ale davalo to rovnake vysledky, z coho som usudil ze to mozeme vynechat
#na grafe vidno ze pri vyssich rokoch ako 2001 su asi este nezanedbatelne mnozstva
train <- train %>%
mutate(Construct_year = ifelse(Construct_year <= 2001, 2001, Construct_year))
model4 <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year,
family = Gamma())
mse(predict(model4, train, type = "response"), train$Burning_Cost)
mse(predict(model4, val, type = "response"), val$Burning_Cost)
#pre val nerobime cap, lebo nas model by mal rozoznavat podla mna aj roky pod 2001
#198.6779 a 284.0205
#no model sa nam nezlepsil skor naopak
#skusime este zgrupit roky medzi 2001 a 2005, dame to do roka 2003 lebo je v strede(mozno to bude lepsie fitovat)
train <- train %>%
mutate(Construct_year = ifelse(Construct_year > 2001 & Construct_year <= 2005, 2003, Construct_year))
model5 <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year ,
family = Gamma())
mse(predict(model5, train, type = "response"), train$Burning_Cost)
mse(predict(model5, val, type = "response"), val$Burning_Cost)
# rovnako ako predtym podla mna by mal rozoznavat aj roky medyi tym, preto som vo val nezgrupil roky
mse(predict(model5, val%>% mutate(Construct_year = ifelse(Construct_year > 2001 & Construct_year <= 2005, 2003, Construct_year)), type = "response"), val$Burning_Cost)
#skusil som to aj tak
#chyby 198.6768 a 284.0049
#vidime ze sa tento model nezlepsil oproti modelu2a
#skusime pridat k modelu 2a este nejaku premennu
#po skusani mnoho premennych nam vysiel konecne vylepseny model po pridani Veh_type1
model7 <- glm(data = train,
formula = Burning_Cost ~ D_age + Veh_type1 + Construct_year,
family = Gamma())
mse(predict(model7, train, type = "response"), train$Burning_Cost)
mse(predict(model7, val, type = "response"), val$Burning_Cost)
#190.5903 a 283.5625
#tento model je najlepsi
library(gridExtra)
source("Support/emb_chart.R")
emblem_graph(
dt.frm = train %>% cbind(data.frame(pred = predict(model7, train, type = "response"))),
x_var = "Veh_type1",
target = "Burning_Cost",
prediction = "pred"
)
#na grafe vidime ze to celkom pekne fituje az na jednu oblast
|
/Lessons/Lesson6/du6.R
|
no_license
|
osifali/GeneralInsurance_Class
|
R
| false | false | 5,532 |
r
|
library(dplyr)
# load data, this are data from Lesson 5 where we prepared Claims with Policies into one dataset
dt_pol_w_claims <- readRDS("C:\\Users\\Jakub\\Documents\\GeneralInsurance_Class\\Data\\lesson6_dt_pol_w_claims.rds")
set.seed(58742) # to fix randomizer
ind <- sample(2, nrow(dt_pol_w_claims), replace=TRUE, prob=c(0.80, 0.20)) # generate random indicator to split by
#ked som to dobre pochopil tak mame vylepsit nas model z DU5, data beriem z cvika
#model0 <- glm(data = dt_pol_w_claims %>% filter(Burning_Cost != 0, Burning_Cost < 100),
# formula = Burning_Cost ~ D_age,
# family = Gamma())
#len teraz tento model pouzijeme len na trenovacie data + ako v minulej du si odstranime outlierov
# kvoli tomu ze sa nam to bude lepsie modelovat
dt_pol_w_claims <- dt_pol_w_claims %>% filter(Burning_Cost != 0, Burning_Cost < 100)
dt_pol_w_claims <- mutate(dt_pol_w_claims,
data_status = ifelse(ind == 1,
"Training",
ifelse(ind == 2,
"Validation",
"Unseen")
)
)
train <- dt_pol_w_claims %>% filter(data_status == "Training")
val <- dt_pol_w_claims %>% filter(data_status == "Validation")
mse <- function(prediction, actual){
return(sum((prediction-actual)^2, na.rm = TRUE)/length(prediction))
}
model1 <- glm(data = train,
formula = Burning_Cost ~ D_age,
family = Gamma())
summary(model1)
#spocitame si chyby
mse(predict(model1, train, type = "response"), train$Burning_Cost)
mse(predict(model1, val, type = "response"), val$Burning_Cost)
#198.8526 a 284.1642
#pridame dalsie premenne, premennu, ktoru som pouyival minulu du Construct_year(aj ked mi vo One-way analyze
#nevysla signifikantna) a premennu co sme pouzivali na prednaske/cviku
model2 <- glm(data = train,
formula = Burning_Cost ~ D_age + Veh_type2 + Construct_year,
family = Gamma())
summary(model2)
mse(predict(model2, train, type = "response"), train$Burning_Cost)
mse(predict(model2, val, type = "response"), val$Burning_Cost)
# 193.9859 285.856
#vidime ze sa nam model zhorsil v predvidani,ale budeme s nim pokracovat a pokusit sa ho zlepsit
#skusime odstranit premennu Veh_type2
model2a <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year,
family = Gamma())
summary(model2a)
mse(predict(model2a, train, type = "response"), train$Burning_Cost)
mse(predict(model2a, val, type = "response"), val$Burning_Cost)
#198.7277 a 284.02
#vidime ze sa zhorsil na train ale zlepsil sa pre val
#takze s nim budeme pokracovat
#rovnako ako na hodine si vykreslime zavislost BC od CY
library(gridExtra)
source("Support/emb_chart.R")
emblem_graph(
dt.frm = train %>% cbind(data.frame(pred = predict(model2a, train, type = "response"))),
x_var = "Construct_year",
target = "Burning_Cost",
prediction = "pred"
)
# na prednaske sme pouzili Cap od zaciatku po rok 2005, ale ked si pozrieme graf
# tak spojenie tychto rokov vytvori dost pocetnu premennu tak to skusime pr mensie
#skusal som aj pre 2005 ale davalo to rovnake vysledky, z coho som usudil ze to mozeme vynechat
#na grafe vidno ze pri vyssich rokoch ako 2001 su asi este nezanedbatelne mnozstva
train <- train %>%
mutate(Construct_year = ifelse(Construct_year <= 2001, 2001, Construct_year))
model4 <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year,
family = Gamma())
mse(predict(model4, train, type = "response"), train$Burning_Cost)
mse(predict(model4, val, type = "response"), val$Burning_Cost)
#pre val nerobime cap, lebo nas model by mal rozoznavat podla mna aj roky pod 2001
#198.6779 a 284.0205
#no model sa nam nezlepsil skor naopak
#skusime este zgrupit roky medzi 2001 a 2005, dame to do roka 2003 lebo je v strede(mozno to bude lepsie fitovat)
train <- train %>%
mutate(Construct_year = ifelse(Construct_year > 2001 & Construct_year <= 2005, 2003, Construct_year))
model5 <- glm(data = train,
formula = Burning_Cost ~ D_age + Construct_year ,
family = Gamma())
mse(predict(model5, train, type = "response"), train$Burning_Cost)
mse(predict(model5, val, type = "response"), val$Burning_Cost)
# rovnako ako predtym podla mna by mal rozoznavat aj roky medyi tym, preto som vo val nezgrupil roky
mse(predict(model5, val%>% mutate(Construct_year = ifelse(Construct_year > 2001 & Construct_year <= 2005, 2003, Construct_year)), type = "response"), val$Burning_Cost)
#skusil som to aj tak
#chyby 198.6768 a 284.0049
#vidime ze sa tento model nezlepsil oproti modelu2a
#skusime pridat k modelu 2a este nejaku premennu
#po skusani mnoho premennych nam vysiel konecne vylepseny model po pridani Veh_type1
model7 <- glm(data = train,
formula = Burning_Cost ~ D_age + Veh_type1 + Construct_year,
family = Gamma())
mse(predict(model7, train, type = "response"), train$Burning_Cost)
mse(predict(model7, val, type = "response"), val$Burning_Cost)
#190.5903 a 283.5625
#tento model je najlepsi
library(gridExtra)
source("Support/emb_chart.R")
emblem_graph(
dt.frm = train %>% cbind(data.frame(pred = predict(model7, train, type = "response"))),
x_var = "Veh_type1",
target = "Burning_Cost",
prediction = "pred"
)
#na grafe vidime ze to celkom pekne fituje az na jednu oblast
|
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## Setting Time Variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
## Plot 1
hist(finalData$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
## Save file
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
|
/Plot 1.R
|
no_license
|
Shyvah/ExData_Plotting1
|
R
| false | false | 525 |
r
|
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## Setting Time Variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
## Plot 1
hist(finalData$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
## Save file
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
|
## ---- echo=FALSE---------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE,
fig.width=6,
fig.height=6)
## ------------------------------------------------------------------------
library(sensemakr)
data("darfur")
## ---- eval = FALSE-------------------------------------------------------
# devtools::install_github("chadhazlett/sensemakr", build_vignettes = TRUE)
## ------------------------------------------------------------------------
lm.out <- lm(peacefactor ~ directlyharmed + age + farmer_dar + herder_dar +
pastvoted + hhsize_darfur + female + village, data = darfur)
## ------------------------------------------------------------------------
sense.out <- sensemakr(model=lm.out, treatment="directlyharmed")
## ------------------------------------------------------------------------
summary(sense.out)
## ------------------------------------------------------------------------
worstcaseinterpret(sense.out, scenarios=c(.1,.3), q=.5)
## ------------------------------------------------------------------------
plot(sense.out)
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"))
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"), contour="t-value")
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"), contour="upper-limit", lim=.35)
plot(sense.out, showvars = list("pastvoted","female"), contour="lower-limit", lim=.35)
## ------------------------------------------------------------------------
plot(sense.out, type="worst-case",lim=0.5)
## ------------------------------------------------------------------------
sense.out$treat.stats
## ------------------------------------------------------------------------
ls(sense.out$benchmarks)
## ------------------------------------------------------------------------
round(sense.out$benchmarks$benchmark_masked,4)
## ------------------------------------------------------------------------
sense.out$benchmarks$benchmark_group
## ------------------------------------------------------------------------
sense.grp.out = sensemakr(lm.out, treatment="directlyharmed",
group_list = list(c("farmer_dar","herder_dar")))
## ------------------------------------------------------------------------
sense.grp.out$benchmarks$benchmark_group
plot(sense.grp.out, showvars=list("farmer_dar,herder_dar", "female"))
## ------------------------------------------------------------------------
# turn off default labels
plot_out = contourplot(sense.out,ptlab=FALSE,lim=0.02)
head(plot_out$labels) # contains original labels and positions
# overlay new algorithimically spaced labels
library(maptools)
# ?maptools::pointLabel
# "SANN" simulated annealing
with(plot_out$labels,
maptools::pointLabel(x, y,
labels = labels,
method='SANN',
offset = 1, cex = .8))
# compare to default labels of plot(sensemakr)
# contourplot(sense.out,ptlab=TRUE,lim=0.02)
## other "GA" genetic algorithm option
# with(plot_out$labels,
# maptools::pointLabel(x, y,
# labels = labels,
# method='GA',
# offset = 1, cex = .8))
## ----eval=FALSE----------------------------------------------------------
# # locator() method eg handpick placements
#
# # First, use default ptlab=TRUE to show default labels
# # human book-keep, figure out order which points are which,
# plot(sense,type='contour',ptlab=TRUE,lim=0.02)
#
# # one strategy: top to bottom along y-axis
# # age pastvoted framerdar hhsize_darfur herder_dar
#
# # end-user handpicks x-y locations via ?locator()
# # click plot device sequentially according to user's strategy
# pts_pick = locator()
#
# str((plot_out$labels)$labels)
#
# # get index position of '(plot_out$labels)$labels'
# # that matches ranking criteria: y-axis top to bottom
# ind_name_order = c(2,3,6,5,4)
# lab_order = ((plot_out$labels)$labels)[ind_name_order]
#
# # create dataframe of handpicked positions and labels
# lab_pos_manual = cbind(data.frame(pts_pick),lab_order)
#
# # 1) now plot() but toggle off ptlab
# # 2) add text() using end-user created df 'lab_pos_manual'
# plot(sense,type='contour',ptlab=FALSE,lim=0.02)
# with(lab_pos_manual,text(x,y,lab_order,cex=0.6))
|
/vignettes/sensemakr.R
|
no_license
|
guhjy/sensemakr
|
R
| false | false | 4,550 |
r
|
## ---- echo=FALSE---------------------------------------------------------
knitr::opts_chunk$set(echo = TRUE,
fig.width=6,
fig.height=6)
## ------------------------------------------------------------------------
library(sensemakr)
data("darfur")
## ---- eval = FALSE-------------------------------------------------------
# devtools::install_github("chadhazlett/sensemakr", build_vignettes = TRUE)
## ------------------------------------------------------------------------
lm.out <- lm(peacefactor ~ directlyharmed + age + farmer_dar + herder_dar +
pastvoted + hhsize_darfur + female + village, data = darfur)
## ------------------------------------------------------------------------
sense.out <- sensemakr(model=lm.out, treatment="directlyharmed")
## ------------------------------------------------------------------------
summary(sense.out)
## ------------------------------------------------------------------------
worstcaseinterpret(sense.out, scenarios=c(.1,.3), q=.5)
## ------------------------------------------------------------------------
plot(sense.out)
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"))
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"), contour="t-value")
## ------------------------------------------------------------------------
plot(sense.out, showvars = list("pastvoted","female"), contour="upper-limit", lim=.35)
plot(sense.out, showvars = list("pastvoted","female"), contour="lower-limit", lim=.35)
## ------------------------------------------------------------------------
plot(sense.out, type="worst-case",lim=0.5)
## ------------------------------------------------------------------------
sense.out$treat.stats
## ------------------------------------------------------------------------
ls(sense.out$benchmarks)
## ------------------------------------------------------------------------
round(sense.out$benchmarks$benchmark_masked,4)
## ------------------------------------------------------------------------
sense.out$benchmarks$benchmark_group
## ------------------------------------------------------------------------
sense.grp.out = sensemakr(lm.out, treatment="directlyharmed",
group_list = list(c("farmer_dar","herder_dar")))
## ------------------------------------------------------------------------
sense.grp.out$benchmarks$benchmark_group
plot(sense.grp.out, showvars=list("farmer_dar,herder_dar", "female"))
## ------------------------------------------------------------------------
# turn off default labels
plot_out = contourplot(sense.out,ptlab=FALSE,lim=0.02)
head(plot_out$labels) # contains original labels and positions
# overlay new algorithimically spaced labels
library(maptools)
# ?maptools::pointLabel
# "SANN" simulated annealing
with(plot_out$labels,
maptools::pointLabel(x, y,
labels = labels,
method='SANN',
offset = 1, cex = .8))
# compare to default labels of plot(sensemakr)
# contourplot(sense.out,ptlab=TRUE,lim=0.02)
## other "GA" genetic algorithm option
# with(plot_out$labels,
# maptools::pointLabel(x, y,
# labels = labels,
# method='GA',
# offset = 1, cex = .8))
## ----eval=FALSE----------------------------------------------------------
# # locator() method eg handpick placements
#
# # First, use default ptlab=TRUE to show default labels
# # human book-keep, figure out order which points are which,
# plot(sense,type='contour',ptlab=TRUE,lim=0.02)
#
# # one strategy: top to bottom along y-axis
# # age pastvoted framerdar hhsize_darfur herder_dar
#
# # end-user handpicks x-y locations via ?locator()
# # click plot device sequentially according to user's strategy
# pts_pick = locator()
#
# str((plot_out$labels)$labels)
#
# # get index position of '(plot_out$labels)$labels'
# # that matches ranking criteria: y-axis top to bottom
# ind_name_order = c(2,3,6,5,4)
# lab_order = ((plot_out$labels)$labels)[ind_name_order]
#
# # create dataframe of handpicked positions and labels
# lab_pos_manual = cbind(data.frame(pts_pick),lab_order)
#
# # 1) now plot() but toggle off ptlab
# # 2) add text() using end-user created df 'lab_pos_manual'
# plot(sense,type='contour',ptlab=FALSE,lim=0.02)
# with(lab_pos_manual,text(x,y,lab_order,cex=0.6))
|
## setup environment
iris <- NULL
iris_raw <- NULL
compNames <- NULL
iris_colNames <- NULL
## load data
bootstrap <- function() {
## iris data
iris_tmp <- read.csv("data/iRIS_ALL.csv", header=T)
iris_raw <<- iris_tmp
iris_colNames <<- names(iris_tmp)
## integer columns
intCols <- union(
union(iris_colNames[grep("OS",iris_colNames)],
iris_colNames[grep("EUR",iris_colNames)]),
union(iris_colNames[grep("franchisees",iris_colNames)],
iris_colNames[grep("conversion",iris_colNames)])
)
iris_tmp[,intCols] <- sapply(iris_tmp[,intCols], function(x) as.numeric(gsub(",","",as.character(x))))
iris <<- iris_tmp
##
## scrambled company names
compNames <<- sample(read.csv("data/dummyNames.csv", header=T))
compNames[,2] <<- apply(compNames, 1, function(x) RandomStr())
}
## scramble the company names and numbers
scramble <- function(names = NULL, pct = 1) {
## setup chain name scrambling
iris_chainNames <- as.vector(unique(iris$Chain))
## chain scramble vector
iris_match <- match(iris$Chain, iris_chainNames)
## scramble chain names
iris$Chain <<- compNames$Company.Names[iris_match]
## scramble all numeric columns
numCols <- sapply(iris,class)
numCols <- names(numCols[numCols=="numeric"])
iris[,numCols] <<- iris[,numCols]*pct
}
## create company code
RandomStr <- function(n=1, length=3)
{
randomString <- c(1:n) # initialize vector
for (i in 1:n)
{
randomString[i] <- paste(sample(c(LETTERS),
length, replace=TRUE),
collapse="")
}
return(randomString)
}
|
/bootstrap.R
|
no_license
|
pzacho/GV_iRis
|
R
| false | false | 1,678 |
r
|
## setup environment
iris <- NULL
iris_raw <- NULL
compNames <- NULL
iris_colNames <- NULL
## load data
bootstrap <- function() {
## iris data
iris_tmp <- read.csv("data/iRIS_ALL.csv", header=T)
iris_raw <<- iris_tmp
iris_colNames <<- names(iris_tmp)
## integer columns
intCols <- union(
union(iris_colNames[grep("OS",iris_colNames)],
iris_colNames[grep("EUR",iris_colNames)]),
union(iris_colNames[grep("franchisees",iris_colNames)],
iris_colNames[grep("conversion",iris_colNames)])
)
iris_tmp[,intCols] <- sapply(iris_tmp[,intCols], function(x) as.numeric(gsub(",","",as.character(x))))
iris <<- iris_tmp
##
## scrambled company names
compNames <<- sample(read.csv("data/dummyNames.csv", header=T))
compNames[,2] <<- apply(compNames, 1, function(x) RandomStr())
}
## scramble the company names and numbers
scramble <- function(names = NULL, pct = 1) {
## setup chain name scrambling
iris_chainNames <- as.vector(unique(iris$Chain))
## chain scramble vector
iris_match <- match(iris$Chain, iris_chainNames)
## scramble chain names
iris$Chain <<- compNames$Company.Names[iris_match]
## scramble all numeric columns
numCols <- sapply(iris,class)
numCols <- names(numCols[numCols=="numeric"])
iris[,numCols] <<- iris[,numCols]*pct
}
## create company code
RandomStr <- function(n=1, length=3)
{
randomString <- c(1:n) # initialize vector
for (i in 1:n)
{
randomString[i] <- paste(sample(c(LETTERS),
length, replace=TRUE),
collapse="")
}
return(randomString)
}
|
library(smallarea)
### Name: fayherriot
### Title: Estimate of the variance component in Fay Herriot Model using
### Fay Herriot Method
### Aliases: fayherriot
### Keywords: fay Herriot small area estimation variance component
### ** Examples
response=c(1,2,3,4,5)
designmatrix=cbind(c(1,1,1,1,1),c(1,2,4,4,1),c(2,1,3,1,5))
randomeffect.var=c(0.5,0.7,0.8,0.4,0.5)
fayherriot(response,designmatrix,randomeffect.var)
|
/data/genthat_extracted_code/smallarea/examples/fayherriot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 424 |
r
|
library(smallarea)
### Name: fayherriot
### Title: Estimate of the variance component in Fay Herriot Model using
### Fay Herriot Method
### Aliases: fayherriot
### Keywords: fay Herriot small area estimation variance component
### ** Examples
response=c(1,2,3,4,5)
designmatrix=cbind(c(1,1,1,1,1),c(1,2,4,4,1),c(2,1,3,1,5))
randomeffect.var=c(0.5,0.7,0.8,0.4,0.5)
fayherriot(response,designmatrix,randomeffect.var)
|
## File Name: cdm_print_summary_data_frame.R
## File Version: 0.072
cdm_print_summary_data_frame <- function(obji, from=NULL, to=NULL, digits=3,
rownames_null=FALSE)
{
if (is.vector(obji)){
obji <- round(obji, digits)
} else {
if (is.null(from)){
from <- 1
}
if (is.null(to)){
to <- ncol(obji)
}
ind <- seq( from, to )
for (vv in ind){
obji_vv <- obji[,vv]
if ( is.numeric(obji_vv) ){
obji[, vv ] <- round( obji_vv, digits )
}
}
}
if (rownames_null){
rownames(obji) <- NULL
}
print(obji)
}
|
/R/cdm_print_summary_data_frame.R
|
no_license
|
cran/CDM
|
R
| false | false | 713 |
r
|
## File Name: cdm_print_summary_data_frame.R
## File Version: 0.072
cdm_print_summary_data_frame <- function(obji, from=NULL, to=NULL, digits=3,
rownames_null=FALSE)
{
if (is.vector(obji)){
obji <- round(obji, digits)
} else {
if (is.null(from)){
from <- 1
}
if (is.null(to)){
to <- ncol(obji)
}
ind <- seq( from, to )
for (vv in ind){
obji_vv <- obji[,vv]
if ( is.numeric(obji_vv) ){
obji[, vv ] <- round( obji_vv, digits )
}
}
}
if (rownames_null){
rownames(obji) <- NULL
}
print(obji)
}
|
# f24_across_periods.R
# CAN RUN THIS ON SERVER AS IS TAKES 2.5 HOURS
# Jake Yeung
# 2015-09-22
# library("devtools")
# dev_mode()
#
# install("~/projects/f24") # use jake branch
# library(f24.R2.cycling)
library(dplyr)
library(ggplot2)
setwd("/home/yeung/projects/tissue-specificity")
# Source ------------------------------------------------------------------
source("scripts/functions/FitRhythmic.R")
source("scripts/functions/GetClockGenes.R")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/FitRhythmicAcrossPeriods.R")
# Load data ---------------------------------------------------------------
load("Robjs/dat.long.fixed_rik_genes.Robj")
# load("Robjs/dat.long.Robj")
# load("Robjs/dat.fit.scan_periods.genome_wide.Robj", verbose=T)
load("/home/yeung/projects/tissue-specificity/Robjs/dat.fit.scan_periods.genome_wide.10_to_30.Robj", verbose=T)
dat.fit.periods.genome_wide.10_to_30 <- dat.fit.periods.genome_wide
load("/home/yeung/projects/tissue-specificity/Robjs/dat.fit.scan_periods.genome_wide.5_10.Robj", verbose=T)
dat.fit.periods.genome_wide <- rbind(dat.fit.periods.genome_wide, dat.fit.periods.genome_wide.10_to_30)
dat.fit.periods.genome_wide.min <- dat.fit.periods.genome_wide %>%
group_by(gene, tissue) %>%
filter(period == period[which.min(ssq.residuals)])
# dat.fit.periods.min.check <- subset(dat.fit.periods.genome_wide.min, gene %in% unique(dat.fit.periods.min$gene)) # from .min from clockgenes
dat.fit.periods.sub <- subset(dat.fit.periods.genome_wide.min, amp > 0.5 & pval < 1e-5)
ggplot(subset(dat.fit.periods.sub, tissue != "WFAT"), aes(x = period)) + geom_histogram(binwidth = diff(range(dat.fit.periods.genome_wide.min$period))/100) + facet_wrap(~tissue)
ggplot(subset(dat.fit.periods.sub, tissue != "WFAT"), aes(x = period)) +
geom_histogram(binwidth = diff(range(dat.fit.periods.genome_wide.min$period))/100) +
geom_vline(xintercept=24, linetype="dotted") + geom_vline(xintercept=12, linetype="dotted")
tiss <- "BFAT"; gen <- "Myf6"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Adr"; gen <- "Elovl3"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "BFAT"; gen <- "Ampd1"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "BFAT"; gen <- "Ampd1"; exper="rnaseq"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Liver"; gen <- "Thrsp"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Mus"; gen <- "Tnni1"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
|
/scripts/fourier/f24_across_periods.genome_wide.analyze.10_to_30.R
|
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
R
| false | false | 3,655 |
r
|
# f24_across_periods.R
# CAN RUN THIS ON SERVER AS IS TAKES 2.5 HOURS
# Jake Yeung
# 2015-09-22
# library("devtools")
# dev_mode()
#
# install("~/projects/f24") # use jake branch
# library(f24.R2.cycling)
library(dplyr)
library(ggplot2)
setwd("/home/yeung/projects/tissue-specificity")
# Source ------------------------------------------------------------------
source("scripts/functions/FitRhythmic.R")
source("scripts/functions/GetClockGenes.R")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/FitRhythmicAcrossPeriods.R")
# Load data ---------------------------------------------------------------
load("Robjs/dat.long.fixed_rik_genes.Robj")
# load("Robjs/dat.long.Robj")
# load("Robjs/dat.fit.scan_periods.genome_wide.Robj", verbose=T)
load("/home/yeung/projects/tissue-specificity/Robjs/dat.fit.scan_periods.genome_wide.10_to_30.Robj", verbose=T)
dat.fit.periods.genome_wide.10_to_30 <- dat.fit.periods.genome_wide
load("/home/yeung/projects/tissue-specificity/Robjs/dat.fit.scan_periods.genome_wide.5_10.Robj", verbose=T)
dat.fit.periods.genome_wide <- rbind(dat.fit.periods.genome_wide, dat.fit.periods.genome_wide.10_to_30)
dat.fit.periods.genome_wide.min <- dat.fit.periods.genome_wide %>%
group_by(gene, tissue) %>%
filter(period == period[which.min(ssq.residuals)])
# dat.fit.periods.min.check <- subset(dat.fit.periods.genome_wide.min, gene %in% unique(dat.fit.periods.min$gene)) # from .min from clockgenes
dat.fit.periods.sub <- subset(dat.fit.periods.genome_wide.min, amp > 0.5 & pval < 1e-5)
ggplot(subset(dat.fit.periods.sub, tissue != "WFAT"), aes(x = period)) + geom_histogram(binwidth = diff(range(dat.fit.periods.genome_wide.min$period))/100) + facet_wrap(~tissue)
ggplot(subset(dat.fit.periods.sub, tissue != "WFAT"), aes(x = period)) +
geom_histogram(binwidth = diff(range(dat.fit.periods.genome_wide.min$period))/100) +
geom_vline(xintercept=24, linetype="dotted") + geom_vline(xintercept=12, linetype="dotted")
tiss <- "BFAT"; gen <- "Myf6"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Adr"; gen <- "Elovl3"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "BFAT"; gen <- "Ampd1"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "BFAT"; gen <- "Ampd1"; exper="rnaseq"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Liver"; gen <- "Thrsp"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
tiss <- "Mus"; gen <- "Tnni1"; exper="array"
dat.sub <- subset(dat.long, gene == gen & tissue == tiss)
period.min <- subset(dat.fit.periods.genome_wide.min, tissue == tiss & gene == gen)$period
PlotFitTwoPeriods(dat.sub, period1 = 24, period2 = period.min, tiss, gen, exper)
|
# Siwei 02 Jul 2021
# plot read size distribution of the 20 lines
# init
library(readr)
library(ggplot2)
library(RColorBrewer)
library(stringr)
# load data
size.data.files <-
list.files(path = "size_dist/",
pattern = "*.txt")
list.size.data <- vector(mode = "list", length = length(size.data.files))
i <- 1
for (i in 1:length(size.data.files)) {
list.size.data[[i]] <-
read_delim(paste0("size_dist/",
size.data.files[i]),
delim = "\t",
escape_double = FALSE,
trim_ws = TRUE,
skip = 10)
}
names(list.size.data) <-
str_replace_all(str_remove_all(size.data.files, pattern = "\\.txt"),
pattern = "Glut_rapid_neuron20_",
replacement = "Cell line ")
# add percentage to list.size.data
i <- 1
for (i in 1:length(size.data.files)) {
list.size.data[[i]]$percentage <-
list.size.data[[i]]$All_Reads.fr_count * 100 / sum(list.size.data[[i]]$All_Reads.fr_count)
list.size.data[[i]]$max.percent <-
list.size.data[[i]]$All_Reads.fr_count * 100 / max(list.size.data[[i]]$All_Reads.fr_count)
}
# make plot
colour.list <- RColorBrewer::brewer.pal(10, "Paired")
##
i <- 1
for (i in 1:10) {
if (i == 1) {
plot.size.dist <-
ggplot() +
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i],
alpha = 0.7)
} else {
plot.size.dist <-
plot.size.dist +
# ggplot() + # note no new ggplot() is required here
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i],
alpha = 0.7)
}
}
plot.size.dist <-
plot.size.dist +
theme_classic() +
xlab("Insert size") +
ylab(("Percentage")) +
xlim(0, 800)
plot.size.dist
##
rm(plot.size.dist)
i <- 11
for (i in 11:20) {
print(i)
if (i == 11) {
plot.size.dist <-
ggplot() +
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i - 10],
alpha = 0.7)
} else {
plot.size.dist <-
plot.size.dist +
# ggplot() + # note no new ggplot() is required here
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i - 10],
alpha = 0.7)
}
}
plot.size.dist <-
plot.size.dist +
theme_classic() +
xlab("Percentage") +
ylab(("Read count")) +
xlim(0, 800)
plot.size.dist
|
/Siwei_analysis/code_R/VPS45/code_02Jul2021_plot_size_dist.R
|
no_license
|
endeneon/VPS45_repo
|
R
| false | false | 2,713 |
r
|
# Siwei 02 Jul 2021
# plot read size distribution of the 20 lines
# init
library(readr)
library(ggplot2)
library(RColorBrewer)
library(stringr)
# load data
size.data.files <-
list.files(path = "size_dist/",
pattern = "*.txt")
list.size.data <- vector(mode = "list", length = length(size.data.files))
i <- 1
for (i in 1:length(size.data.files)) {
list.size.data[[i]] <-
read_delim(paste0("size_dist/",
size.data.files[i]),
delim = "\t",
escape_double = FALSE,
trim_ws = TRUE,
skip = 10)
}
names(list.size.data) <-
str_replace_all(str_remove_all(size.data.files, pattern = "\\.txt"),
pattern = "Glut_rapid_neuron20_",
replacement = "Cell line ")
# add percentage to list.size.data
i <- 1
for (i in 1:length(size.data.files)) {
list.size.data[[i]]$percentage <-
list.size.data[[i]]$All_Reads.fr_count * 100 / sum(list.size.data[[i]]$All_Reads.fr_count)
list.size.data[[i]]$max.percent <-
list.size.data[[i]]$All_Reads.fr_count * 100 / max(list.size.data[[i]]$All_Reads.fr_count)
}
# make plot
colour.list <- RColorBrewer::brewer.pal(10, "Paired")
##
i <- 1
for (i in 1:10) {
if (i == 1) {
plot.size.dist <-
ggplot() +
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i],
alpha = 0.7)
} else {
plot.size.dist <-
plot.size.dist +
# ggplot() + # note no new ggplot() is required here
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i],
alpha = 0.7)
}
}
plot.size.dist <-
plot.size.dist +
theme_classic() +
xlab("Insert size") +
ylab(("Percentage")) +
xlim(0, 800)
plot.size.dist
##
rm(plot.size.dist)
i <- 11
for (i in 11:20) {
print(i)
if (i == 11) {
plot.size.dist <-
ggplot() +
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i - 10],
alpha = 0.7)
} else {
plot.size.dist <-
plot.size.dist +
# ggplot() + # note no new ggplot() is required here
geom_line(data = list.size.data[[i]],
aes(x = insert_size,
y = max.percent),
colour = colour.list[i - 10],
alpha = 0.7)
}
}
plot.size.dist <-
plot.size.dist +
theme_classic() +
xlab("Percentage") +
ylab(("Read count")) +
xlim(0, 800)
plot.size.dist
|
function(input, output, session){
# Combine the selected variables into a new data frame
selectedData <- reactive({
iris[,c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
|
/server.R
|
no_license
|
Anshul-Arya/K_Mean-Using-Shiny
|
R
| false | false | 616 |
r
|
function(input, output, session){
# Combine the selected variables into a new data frame
selectedData <- reactive({
iris[,c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
######################################################################################
# This pyunit test is written to ensure that the AUCPR metric can restrict the model training time.
# See PUBDEV-6684.
######################################################################################
pubdev_6684_test <-
function() {
# random forest
training1_data <- h2o.importFile(locate("smalldata/junit/cars_20mpg.csv"))
y_index <- "economy_20mpg"
x_indices <- c(3:8)
training1_data["economy_20mpg"] <- as.factor(training1_data["economy_20mpg"])
print("************* starting max_runtime_test for Random Forest")
model <- h2o.randomForest(y=y_index, x=x_indices, training_frame=training1_data,
seed=12345, stopping_rounds=5, stopping_metric="AUCPR", stopping_tolerance=0.1)
numTreesEarlyStop <- model@model$model_summary$number_of_trees
print("number of trees built with AUCPR early-stopping is")
print(numTreesEarlyStop)
model2 <- h2o.randomForest(y=y_index, x=x_indices, training_frame=training1_data, seed=12345)
numTrees <- model2@model$model_summary$number_of_trees
print("number of trees built without AUCPR early-stopping is")
print(numTrees)
expect_true(numTrees >= numTreesEarlyStop)
}
doTest("Perform the test for pubdev 6684: use AUCPR as an early stopping metric", pubdev_6684_test)
|
/h2o-r/tests/testdir_jira/runit_pubdev_6684_AUCPR_early_stop.R
|
permissive
|
h2oai/h2o-3
|
R
| false | false | 1,469 |
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
######################################################################################
# This pyunit test is written to ensure that the AUCPR metric can restrict the model training time.
# See PUBDEV-6684.
######################################################################################
pubdev_6684_test <-
function() {
# random forest
training1_data <- h2o.importFile(locate("smalldata/junit/cars_20mpg.csv"))
y_index <- "economy_20mpg"
x_indices <- c(3:8)
training1_data["economy_20mpg"] <- as.factor(training1_data["economy_20mpg"])
print("************* starting max_runtime_test for Random Forest")
model <- h2o.randomForest(y=y_index, x=x_indices, training_frame=training1_data,
seed=12345, stopping_rounds=5, stopping_metric="AUCPR", stopping_tolerance=0.1)
numTreesEarlyStop <- model@model$model_summary$number_of_trees
print("number of trees built with AUCPR early-stopping is")
print(numTreesEarlyStop)
model2 <- h2o.randomForest(y=y_index, x=x_indices, training_frame=training1_data, seed=12345)
numTrees <- model2@model$model_summary$number_of_trees
print("number of trees built without AUCPR early-stopping is")
print(numTrees)
expect_true(numTrees >= numTreesEarlyStop)
}
doTest("Perform the test for pubdev 6684: use AUCPR as an early stopping metric", pubdev_6684_test)
|
print.summary.cmulti <-
function (x, digits, ...)
{
if (missing(digits))
digits <- max(3, getOption("digits") - 3)
cat("\nCall:", deparse(x$call,
width.cutoff = floor(getOption("width") * 0.85)), "", sep = "\n")
if (x$type == "dis")
cat("Distance Sampling (half-normal, circular area)\n")
if (x$type == "rem")
cat("Removal Sampling (homogeneous singing rate)\n")
if (x$type == "mix")
cat("Removal Sampling (heterogeneous singing rate)\n")
cat(paste("Conditional Maximum Likelihood estimates\n\n", sep = ""))
cat(paste("Coefficients:\n", sep = ""))
printCoefmat(x$coefficients, digits = digits, signif.legend = FALSE)
if (!any(is.na(array(x$coefficients)))) {
if (getOption("show.signif.stars") & any(x$coefficients[,4] < 0.1))
cat("---\nSignif. codes: ", "0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1", "\n")
}
cat("\nLog-likelihood:", formatC(x$loglik, digits = digits),
"\nBIC =", formatC(x$bic, digits = digits), "\n")
cat("\n")
invisible(x)
}
|
/detect/R/print.summary.cmulti.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 1,071 |
r
|
print.summary.cmulti <-
function (x, digits, ...)
{
if (missing(digits))
digits <- max(3, getOption("digits") - 3)
cat("\nCall:", deparse(x$call,
width.cutoff = floor(getOption("width") * 0.85)), "", sep = "\n")
if (x$type == "dis")
cat("Distance Sampling (half-normal, circular area)\n")
if (x$type == "rem")
cat("Removal Sampling (homogeneous singing rate)\n")
if (x$type == "mix")
cat("Removal Sampling (heterogeneous singing rate)\n")
cat(paste("Conditional Maximum Likelihood estimates\n\n", sep = ""))
cat(paste("Coefficients:\n", sep = ""))
printCoefmat(x$coefficients, digits = digits, signif.legend = FALSE)
if (!any(is.na(array(x$coefficients)))) {
if (getOption("show.signif.stars") & any(x$coefficients[,4] < 0.1))
cat("---\nSignif. codes: ", "0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1", "\n")
}
cat("\nLog-likelihood:", formatC(x$loglik, digits = digits),
"\nBIC =", formatC(x$bic, digits = digits), "\n")
cat("\n")
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_filter_barcodes.R
\name{.sc_filter_barcodes_single_sample}
\alias{.sc_filter_barcodes_single_sample}
\title{Compute barcode filters for a single sample}
\usage{
.sc_filter_barcodes_single_sample(sce, barcode_filter)
}
\arguments{
\item{sce}{SingleCellExperiment object to compute filters for}
}
\value{
}
\description{
Compute barcode filters for a single sample
}
\examples{
NULL
}
|
/man/dot-sc_filter_barcodes_single_sample.Rd
|
no_license
|
keshav-motwani/tregPaper
|
R
| false | true | 465 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_filter_barcodes.R
\name{.sc_filter_barcodes_single_sample}
\alias{.sc_filter_barcodes_single_sample}
\title{Compute barcode filters for a single sample}
\usage{
.sc_filter_barcodes_single_sample(sce, barcode_filter)
}
\arguments{
\item{sce}{SingleCellExperiment object to compute filters for}
}
\value{
}
\description{
Compute barcode filters for a single sample
}
\examples{
NULL
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.