content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
library(tidyverse)
input = scan("solutions/day09/input")
# Part I ----
check_sum = function(preamble_length = 5, current_location = preamble_length + 1){
result = input[current_location]
stopifnot(current_location <= length(input))
leading = input[(current_location-preamble_length-1):(current_location-1)]
addends = leading[(result - leading) %in% leading]
# exclude addends that are exactly half of the result
final_addends = addends[addends != result/2]
# do we have a result?
if (length(final_addends) == 0){return(result)}
# if not, go again:
check_sum(preamble_length, current_location + 1)
}
check_sum(25)
# Part II ----
# using global variables instead of function arguments:
# (as otherwise exceed stack limit with recursive arguments)
starting_location = 1
current_location = starting_location + 1
result = check_sum(25)
addends_sum = 0
check_contiguous = function(){
print(paste("starting_location:", starting_location))
print(paste("current_location:", current_location))
# note the <<- for assignment into the global environment
# this ensures each function does not add to the stack with its own version of this variable
addends_sum <<- sum(input[starting_location:current_location])
if (addends_sum == result){
return(sum(range(input[starting_location:current_location])))
} else if (addends_sum < result){
starting_location <<- starting_location
current_location <<- current_location + 1
check_contiguous()
} else if (addends_sum > result){
starting_location <<- starting_location + 1
check_contiguous()
} else{
stop("okou, this shouldn't happen")
}
}
check_contiguous()
|
/solutions/day09/encoding_error.R
|
no_license
|
riinuots/advent2020
|
R
| false | false | 1,687 |
r
|
library(tidyverse)
input = scan("solutions/day09/input")
# Part I ----
check_sum = function(preamble_length = 5, current_location = preamble_length + 1){
result = input[current_location]
stopifnot(current_location <= length(input))
leading = input[(current_location-preamble_length-1):(current_location-1)]
addends = leading[(result - leading) %in% leading]
# exclude addends that are exactly half of the result
final_addends = addends[addends != result/2]
# do we have a result?
if (length(final_addends) == 0){return(result)}
# if not, go again:
check_sum(preamble_length, current_location + 1)
}
check_sum(25)
# Part II ----
# using global variables instead of function arguments:
# (as otherwise exceed stack limit with recursive arguments)
starting_location = 1
current_location = starting_location + 1
result = check_sum(25)
addends_sum = 0
check_contiguous = function(){
print(paste("starting_location:", starting_location))
print(paste("current_location:", current_location))
# note the <<- for assignment into the global environment
# this ensures each function does not add to the stack with its own version of this variable
addends_sum <<- sum(input[starting_location:current_location])
if (addends_sum == result){
return(sum(range(input[starting_location:current_location])))
} else if (addends_sum < result){
starting_location <<- starting_location
current_location <<- current_location + 1
check_contiguous()
} else if (addends_sum > result){
starting_location <<- starting_location + 1
check_contiguous()
} else{
stop("okou, this shouldn't happen")
}
}
check_contiguous()
|
####################
# 1 - query database for bbs data
#
# REQUIRES PASSWORD - USE PROVIDED .rds OBJECT TO RECRATE DEPENDENT ANALYSES
#
# Creates:
# * bbs_query_5930_2018-10-30.rds -> bbs results from query
####################
# top-level dir --------------------------------------------------------------
dir <- '~/Google_Drive/R/'
# Load packages -----------------------------------------------------------
library(dplyr)
library(RPostgreSQL)
library(DBI)
# set wd ------------------------------------------------------------------
setwd(paste0(dir, 'Example_BBS/Data/'))
# access DB ---------------------------------------------------------------
pass <- readLines('db_pass.txt')
pg <- DBI::dbDriver("PostgreSQL")
cxn <- DBI::dbConnect(pg,
user = "cyoungflesh",
password = pass,
host = "35.221.16.125",
port = 5432,
dbname = "sightings")
# Get all routes run in 2016 ----------------------------------------------------
# All unique surveys
# Just 2016
# Only rpid = 101 and routetypedetailid = 1
# see metadata here: https://github.com/phenomismatch/sightings-database/blob/master/docs/metadata/bbs/RunType.pdf
# need to zero fill as 0 counts are meaningful
data <- DBI::dbGetQuery(cxn, paste0("SELECT DISTINCT ON (event_id) event_id, year, day,
lng, lat,
(place_json ->> 'countrynum')::int AS country_number,
(place_json ->> 'statenum')::int AS state_number,
(place_json ->> 'route')::int AS route_number,
(place_json ->> 'routename') AS route_name,
(event_json ->> 'rpid')::int AS rpid,
(event_json ->> 'runtype')::int AS runtype,
(place_json ->> 'routetypedetailid')::int AS routetypedetailid
FROM places
JOIN events USING (place_id)
JOIN counts USING (event_id)
JOIN taxons USING (taxon_id)
WHERE dataset_id = 'bbs'
AND (place_json ->> 'routetypedetailid')::int = 1
AND (event_json ->> 'rpid')::int = 101
AND year = 2016;
"))
#add species columns
data[c('target_count')] <- NA
#Northern Cardinal AOU
AOU <- '5930'
#query just cardinal
temp <- DBI::dbGetQuery(cxn, paste0("SELECT event_id, year, day,
lng, lat, count, common_name,
(place_json ->> 'countrynum')::int AS country_number,
(place_json ->> 'statenum')::int AS state_number,
(place_json ->> 'route')::int AS route_number,
(place_json ->> 'routename') AS route_name,
(event_json ->> 'rpid')::int AS rpid,
(event_json ->> 'runtype')::int AS runtype,
(count_json ->> 'aou') AS aou,
(place_json ->> 'routetypedetailid')::int AS routetypedetailid
FROM places
JOIN events USING (place_id)
JOIN counts USING (event_id)
JOIN taxons USING (taxon_id)
WHERE dataset_id = 'bbs'
AND (place_json ->> 'routetypedetailid')::int = 1
AND (event_json ->> 'rpid')::int = 101
AND (count_json ->> 'aou') ~ '\\y",AOU,"\\y'
AND year = 2016;
"))
#indices in data that match species ind
ind <- which(data$event_id %in% temp$event_id)
data[ind,'target_count'] <- temp$count
#indices to fill with 0s (observations of species not made for these events)
n_ind <- (1:NROW(data))[-ind]
data[n_ind,'target_count'] <- 0
query_rds_name <- paste0('bbs_query_', AOU, '_', Sys.Date(), '.rds')
saveRDS(data, paste0(query_rds_name))
|
/Example_BBS/Scripts/1-query-db.R
|
no_license
|
MortonArb-ForestEcology/MSB_Non-Stationarity
|
R
| false | false | 4,506 |
r
|
####################
# 1 - query database for bbs data
#
# REQUIRES PASSWORD - USE PROVIDED .rds OBJECT TO RECRATE DEPENDENT ANALYSES
#
# Creates:
# * bbs_query_5930_2018-10-30.rds -> bbs results from query
####################
# top-level dir --------------------------------------------------------------
dir <- '~/Google_Drive/R/'
# Load packages -----------------------------------------------------------
library(dplyr)
library(RPostgreSQL)
library(DBI)
# set wd ------------------------------------------------------------------
setwd(paste0(dir, 'Example_BBS/Data/'))
# access DB ---------------------------------------------------------------
pass <- readLines('db_pass.txt')
pg <- DBI::dbDriver("PostgreSQL")
cxn <- DBI::dbConnect(pg,
user = "cyoungflesh",
password = pass,
host = "35.221.16.125",
port = 5432,
dbname = "sightings")
# Get all routes run in 2016 ----------------------------------------------------
# All unique surveys
# Just 2016
# Only rpid = 101 and routetypedetailid = 1
# see metadata here: https://github.com/phenomismatch/sightings-database/blob/master/docs/metadata/bbs/RunType.pdf
# need to zero fill as 0 counts are meaningful
data <- DBI::dbGetQuery(cxn, paste0("SELECT DISTINCT ON (event_id) event_id, year, day,
lng, lat,
(place_json ->> 'countrynum')::int AS country_number,
(place_json ->> 'statenum')::int AS state_number,
(place_json ->> 'route')::int AS route_number,
(place_json ->> 'routename') AS route_name,
(event_json ->> 'rpid')::int AS rpid,
(event_json ->> 'runtype')::int AS runtype,
(place_json ->> 'routetypedetailid')::int AS routetypedetailid
FROM places
JOIN events USING (place_id)
JOIN counts USING (event_id)
JOIN taxons USING (taxon_id)
WHERE dataset_id = 'bbs'
AND (place_json ->> 'routetypedetailid')::int = 1
AND (event_json ->> 'rpid')::int = 101
AND year = 2016;
"))
#add species columns
data[c('target_count')] <- NA
#Northern Cardinal AOU
AOU <- '5930'
#query just cardinal
temp <- DBI::dbGetQuery(cxn, paste0("SELECT event_id, year, day,
lng, lat, count, common_name,
(place_json ->> 'countrynum')::int AS country_number,
(place_json ->> 'statenum')::int AS state_number,
(place_json ->> 'route')::int AS route_number,
(place_json ->> 'routename') AS route_name,
(event_json ->> 'rpid')::int AS rpid,
(event_json ->> 'runtype')::int AS runtype,
(count_json ->> 'aou') AS aou,
(place_json ->> 'routetypedetailid')::int AS routetypedetailid
FROM places
JOIN events USING (place_id)
JOIN counts USING (event_id)
JOIN taxons USING (taxon_id)
WHERE dataset_id = 'bbs'
AND (place_json ->> 'routetypedetailid')::int = 1
AND (event_json ->> 'rpid')::int = 101
AND (count_json ->> 'aou') ~ '\\y",AOU,"\\y'
AND year = 2016;
"))
#indices in data that match species ind
ind <- which(data$event_id %in% temp$event_id)
data[ind,'target_count'] <- temp$count
#indices to fill with 0s (observations of species not made for these events)
n_ind <- (1:NROW(data))[-ind]
data[n_ind,'target_count'] <- 0
query_rds_name <- paste0('bbs_query_', AOU, '_', Sys.Date(), '.rds')
saveRDS(data, paste0(query_rds_name))
|
get_annual_temp_peaks = function(wtr){
temp_max = apply(wtr[,-1], 1, max, na.rm=TRUE)
years = as.POSIXlt(wtr$DateTime)$year + 1900
uyears = unique(years)
output = data.frame(year=uyears, max_date=as.POSIXct(Sys.Date()), max_val=NA)
for(i in 1:nrow(output)){
if(diff(range(wtr$DateTime[years==output$year[i]])) < as.difftime(364,units="days")){
output$max_date[i] = NA
output$max_val[i] = NA
next
}
max_indx = which.max(temp_max[years==output$year[i]])
output$max_date[i] = wtr$DateTime[years==output$year[i]][max_indx]
output$max_val[i] = temp_max[years==output$year[i]][max_indx]
}
return(output)
}
|
/R/get_annual_temp_peaks.R
|
permissive
|
USGS-R/mda.lakes
|
R
| false | false | 645 |
r
|
get_annual_temp_peaks = function(wtr){
temp_max = apply(wtr[,-1], 1, max, na.rm=TRUE)
years = as.POSIXlt(wtr$DateTime)$year + 1900
uyears = unique(years)
output = data.frame(year=uyears, max_date=as.POSIXct(Sys.Date()), max_val=NA)
for(i in 1:nrow(output)){
if(diff(range(wtr$DateTime[years==output$year[i]])) < as.difftime(364,units="days")){
output$max_date[i] = NA
output$max_val[i] = NA
next
}
max_indx = which.max(temp_max[years==output$year[i]])
output$max_date[i] = wtr$DateTime[years==output$year[i]][max_indx]
output$max_val[i] = temp_max[years==output$year[i]][max_indx]
}
return(output)
}
|
# required pakacges
# install.packages("Rstem")
# install.packages("tm")
# install.packages("https://cran.r-project.org/src/contrib/Archive/sentiment/sentiment_0.2.tar.gz", repo=NULL, type="source")
# install.packages("twitteR")
# install.packages("ROAuth")
# install.packages(c("devtools", "rjson", "bit64", "httr"))
#install.packages("rjson")
#install.packages("MASS")
library(twitteR)
library(sentiment)
library(plyr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
library("ROAuth")
library("rjson")
library("httr")
library("devtools")
library("bit64")
#install.packages("rjson")
# Download "cacert.pem" file
download.file(url="http://curl.haxx.se/ca/cacert.pem",destfile="cacert.pem")
#create an object "cred" that will save the authenticated object that we can use for later sessions
consumerKey<-"p6KNwzh98FnSGEYehRgLumT2u"
consumerSecret<-"NHft3vE3qjDyParpX1cOVsRxGYLRJDZG9EWVHleyYM08rIvVAs"
requestURL<-"https://api.twitter.com/oauth/request_token"
accessURL<-"https://api.twitter.com/oauth/access_token"
authURL<-"https://api.twitter.com/oauth/authorize"
access_token<-"456361928-ZdLwRJx4Ljbvhh8oBKOW6sGV2avTolb6ceb2QwnA"
access_secret<-"qWtW3JxRzhFho3JOf0d1k733veajV7Zp2F99GtqIOjdiz"
setup_twitter_oauth(consumerKey, consumerSecret, access_token, access_secret)
searchResults <- searchTwitter("#parisAttack", n=1500, since = as.character(Sys.Date()-30), until = as.character(Sys.Date()))
#head(searchResults)
some_txt = sapply(searchResults, function(x) x$getText())
#head(some_txt)
# remove retweet entities
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", some_txt)
# remove at people
some_txt = gsub("@\\w+", "", some_txt)
# remove punctuation
some_txt = gsub("[[:punct:]]", "", some_txt)
# remove numbers
some_txt = gsub("[[:digit:]]", "", some_txt)
# remove html links
some_txt = gsub("http\\w+", "", some_txt)
# remove unnecessary spaces
some_txt = gsub("[ \t]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
# lower case using try.error with sapply
some_txt = sapply(some_txt, try.error)
# remove NAs in some_txt
some_txt = some_txt[!is.na(some_txt)]
names(some_txt) = NULL
# classify emotion
class_emo = classify_emotion(some_txt, algorithm="bayes", prior=1.0)
# get emotion best fit
#View(class_emo)
emotion = class_emo[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_pol = classify_polarity(some_txt, algorithm="bayes")
# get polarity best fit
polarity = class_pol[,4]
# data frame with results
sent_df = data.frame(text=some_txt, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
sent_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# plot distribution of emotions
ggplot(sent_df, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
labs(x="emotion categories", y="number of tweets")
ggplot(sent_df, aes(x=polarity)) +
geom_bar(aes(y=..count.., fill=polarity)) +
scale_fill_brewer(palette="RdGy") +
labs(x="polarity categories", y="number of tweets")
# separating text by emotion
emos = levels(factor(sent_df$emotion))
nemo = length(emos)
emo.docs = rep("", nemo)
for (i in 1:nemo)
{
tmp = some_txt[emotion == emos[i]]
emo.docs[i] = paste(tmp, collapse=" ")
}
# remove stopwords
emo.docs = removeWords(emo.docs, stopwords("english"))
# create corpus
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = emos
# comparison word cloud
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),
scale = c(3,.5), random.order = FALSE, title.size = 1.5)
|
/Twitter/Sentiment Analysis.R
|
no_license
|
navd/Sentiment-Analysis
|
R
| false | false | 3,961 |
r
|
# required pakacges
# install.packages("Rstem")
# install.packages("tm")
# install.packages("https://cran.r-project.org/src/contrib/Archive/sentiment/sentiment_0.2.tar.gz", repo=NULL, type="source")
# install.packages("twitteR")
# install.packages("ROAuth")
# install.packages(c("devtools", "rjson", "bit64", "httr"))
#install.packages("rjson")
#install.packages("MASS")
library(twitteR)
library(sentiment)
library(plyr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
library("ROAuth")
library("rjson")
library("httr")
library("devtools")
library("bit64")
#install.packages("rjson")
# Download "cacert.pem" file
download.file(url="http://curl.haxx.se/ca/cacert.pem",destfile="cacert.pem")
#create an object "cred" that will save the authenticated object that we can use for later sessions
consumerKey<-"p6KNwzh98FnSGEYehRgLumT2u"
consumerSecret<-"NHft3vE3qjDyParpX1cOVsRxGYLRJDZG9EWVHleyYM08rIvVAs"
requestURL<-"https://api.twitter.com/oauth/request_token"
accessURL<-"https://api.twitter.com/oauth/access_token"
authURL<-"https://api.twitter.com/oauth/authorize"
access_token<-"456361928-ZdLwRJx4Ljbvhh8oBKOW6sGV2avTolb6ceb2QwnA"
access_secret<-"qWtW3JxRzhFho3JOf0d1k733veajV7Zp2F99GtqIOjdiz"
setup_twitter_oauth(consumerKey, consumerSecret, access_token, access_secret)
searchResults <- searchTwitter("#parisAttack", n=1500, since = as.character(Sys.Date()-30), until = as.character(Sys.Date()))
#head(searchResults)
some_txt = sapply(searchResults, function(x) x$getText())
#head(some_txt)
# remove retweet entities
some_txt = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", some_txt)
# remove at people
some_txt = gsub("@\\w+", "", some_txt)
# remove punctuation
some_txt = gsub("[[:punct:]]", "", some_txt)
# remove numbers
some_txt = gsub("[[:digit:]]", "", some_txt)
# remove html links
some_txt = gsub("http\\w+", "", some_txt)
# remove unnecessary spaces
some_txt = gsub("[ \t]{2,}", "", some_txt)
some_txt = gsub("^\\s+|\\s+$", "", some_txt)
try.error = function(x)
{
# create missing value
y = NA
# tryCatch error
try_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(try_error, "error"))
y = tolower(x)
# result
return(y)
}
# lower case using try.error with sapply
some_txt = sapply(some_txt, try.error)
# remove NAs in some_txt
some_txt = some_txt[!is.na(some_txt)]
names(some_txt) = NULL
# classify emotion
class_emo = classify_emotion(some_txt, algorithm="bayes", prior=1.0)
# get emotion best fit
#View(class_emo)
emotion = class_emo[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_pol = classify_polarity(some_txt, algorithm="bayes")
# get polarity best fit
polarity = class_pol[,4]
# data frame with results
sent_df = data.frame(text=some_txt, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
sent_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# plot distribution of emotions
ggplot(sent_df, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
labs(x="emotion categories", y="number of tweets")
ggplot(sent_df, aes(x=polarity)) +
geom_bar(aes(y=..count.., fill=polarity)) +
scale_fill_brewer(palette="RdGy") +
labs(x="polarity categories", y="number of tweets")
# separating text by emotion
emos = levels(factor(sent_df$emotion))
nemo = length(emos)
emo.docs = rep("", nemo)
for (i in 1:nemo)
{
tmp = some_txt[emotion == emos[i]]
emo.docs[i] = paste(tmp, collapse=" ")
}
# remove stopwords
emo.docs = removeWords(emo.docs, stopwords("english"))
# create corpus
corpus = Corpus(VectorSource(emo.docs))
tdm = TermDocumentMatrix(corpus)
tdm = as.matrix(tdm)
colnames(tdm) = emos
# comparison word cloud
comparison.cloud(tdm, colors = brewer.pal(nemo, "Dark2"),
scale = c(3,.5), random.order = FALSE, title.size = 1.5)
|
#' Identify differentially expressed genes.
#'
#' This function makes two statistics available for the VR process
#' (1) 'linearSelection' will automaticly be choosen if there is only one group in the data
#' (2) 'wilcox' a c++ re-implementation of the Seurat::FindAllMarkers function (default)
#'
#' @name getDifferentials
#' @docType methods
#' @description Calculates the statistics for one CellexalVR selection file.
#' @param x, cellexalvrR object
#' @param cellidfile file containing cell IDs
#' @param deg.method The method to use to find DEGs (only 'wilcox' supported at the moment)
#' @param num.sig number of differnetial genes to return (250)
#' @param Log log the results (default=TRUE)
#' @param logfc.threshold the Seurat logfc.threshold option (default here 1 vs 0.25 in Seurat)
#' @param minPct the minium percent expressing cells in a group (default 0.1)
#' @param onlyPos select only genes showing an higher expression in the group (default =T)
#' @param report4genes a list of genes you want to get a report on.
#' @param text an optional additional text to show in the log
#' @param bulkStats apply stats to pseudo bulk samples instead of cells? (default TRUE)
#' @param nameAdd one string to add to the clustion id in both the stats table and the ToppGene table.
#' @param showIDs plot the IDs on the drc plots. This will get problems if it is many IDs (default TRUE)
#' @keywords DEGs
#' @title main Statistics function for cellexalvrR
#' @examples
#' \dontrun{
#' getDifferentials( x, cellidfile= 'User.group.2', deg.method='wilcox')@usedObj$deg.genes
#' }
#' @return the cellexalvrr object with the stats table stored in x@usedObj$sigGeneLists$Cpp[[x@usedObj$lastGroup]]
#' and significant genes can be accessed in the x@usedObj$deg.genes slot.
#' @export
#if ( ! isGeneric('getDifferentials') ){
setGeneric('getDifferentials', ## Name
function (x,cellidfile, deg.method='wilcox', num.sig=250, Log=TRUE,
logfc.threshold = .5, minPct=0.2, onlyPos=TRUE, report4genes= NULL,
text = NULL, bulkStats = TRUE, nameAdd=NULL, showIDs=TRUE ) {
standardGeneric('getDifferentials')
}
)
#}
#' @rdname getDifferentials
setMethod('getDifferentials', signature = c ('cellexalvrR', "character"),
definition = function (x,cellidfile, deg.method='wilcox', num.sig=250, Log=TRUE,
logfc.threshold = 0.5, minPct=0.2, onlyPos=TRUE, report4genes= NULL,
text = NULL, bulkStats = TRUE, nameAdd=NULL, showIDs=TRUE ) {
x <- loadObject(x) #function definition in file 'lockedSave.R'
x= check(x)
if ( nrow( x@data) <= 1000 ){
logfc.threshold = minPct = 0
}
x <- userGrouping(x, cellidfile) #function definition in file 'userGrouping.R'
cellidfile = x@usedObj$lastGroup
num.sig <- as.numeric( num.sig )
accepted = c('wilcox','Seurat_wilcox', 'bimod', 'roc', 't', 'tobit', 'poisson', 'negbinom', 'MAST', 'DESeq2', 'anova')
if ( sum(unlist(lapply( accepted, function(ok) { return ( ok == deg.method )} ))) != 1 ) {
stop( paste('The deg.method',deg.method, 'is not supported' ) )
}
ok <- which(!is.na(x@userGroups[,cellidfile]))
if ( length(ok) > 0) {
loc <- reduceTo (x, what='col', to=colnames(x@data)[ ok ] ) #function definition in file 'reduceTo.R'
}else {
loc <- x
}
if ( ! is.na(match(paste(cellidfile, 'order'), colnames(x@userGroups))) ){
## at some time we had a problem in the creeation of order column names:
possible = c( paste(cellidfile, c(' order','.order'), sep=""))
gname = possible[which(!is.na(match(possible, colnames(loc@userGroups))))]
#browser()
loc <- reorderSamples ( loc, gname ) #function definition in file 'reorder.obj.R'
}
info <- groupingInfo( loc, cellidfile ) #function definition in file 'groupingInfo.R'
if ( class(info)[1] == "cellexalvrR"){
loc = info
info <- groupingInfo( loc, cellidfile )
}
rem.ind <- which(Matrix::rowSums(loc@data)==0)
grp.vec <- info@grouping
col.tab <- info@col
if(length(rem.ind)>0){
loc = reduceTo(loc, what='row', to=rownames(loc@data)[-rem.ind]) #function definition in file 'reduceTo.R'
grp.vec = grp.vec[-rem.ind]
}
deg.genes <- NULL
if ( is.null(x@usedObj$sigGeneLists))
x@usedObj$sigGeneLists = list()
if(deg.method=='anova'){
message('anova gene stats is deprecated - using wilcox instead!')
deg.method= 'wilcox'
}
if( length(table(info@grouping)) == 1 ){
deg.method = 'Linear'
#stop( "Please selecting more than one group!")
message('cor.stat linear gene stats linearSelection')
if ( is.null( info@drc )) {
message(paste("The linear stats has not gotten the drc information -- choosing the first possible" , names(loc@drc )[1] ))
info@drc = names(loc@drc )[1]
}
info = groupingInfo(x, info@gname ) ## get the info for the big object
if ( class(info)[1] == "cellexalvrR"){
x = info
info <- groupingInfo( x, cellidfile )
}
drc = x@drc[[ info@drc ]]
if ( is.null(drc) ){
message(paste("the drc info",info@drc, "can not be found in the data! (", paste(collapse=", ", names(loc@drc)) ))
message(paste("The linear stats has not gotten the drc information -- choosing the first possible" , names(loc@drc )[1] ))
info@drc = names(x@drc )[1] ## for the log!
drc = x@drc[[ 1 ]]
}
cellexalLinear = NULL
if ( nrow(info@linarObj@dat) == 0 ){
message('defining time')
x = pseudotimeTest3D( x, grouping= info@gname )
info = groupingInfo( x, info@gname ) ## load changes
if ( class(info)[1] == "cellexalvrR"){
x = info
info <- groupingInfo( x, cellidfile )
}
if ( empty( info@linarObj )){
info = groupingInfo( x, x@usedObj$lastGroup )
}
cellexalLinear = info@linarObj
}
else {
cellexalLinear = info@linarObj
}
message('creating reports')
x = createStats( cellexalLinear, x, num.sig= num.sig, bulkStats=bulkStats )
ret = createReport(cellexalLinear,
reduceTo(x, what='row', to = x@usedObj$deg.genes),
info = groupingInfo( x, info@gname )
)
timeN = cellexalLinear@gname
x@usedObj$linearSelections[['lastEntry']] = x@usedObj$linearSelections[[timeN]] =
ret$cellexalObj@usedObj$linearSelections[[timeN]]
deg.genes = x@usedObj$deg.genes
}else if ( deg.method == 'wilcox') {
## use the faster Rcpp implementation
CppStats <- function( n ) {
OK = which(grp.vec == n )
BAD= which(grp.vec != n )
r = NULL
if ( length(OK) > 0 & length(BAD) >0){
#try({
r = as.data.frame(
FastWilcoxTest::StatTest(
Matrix::t( loc@data),
as.vector(BAD), as.vector(OK),
# as.vector(OK), as.vector(BAD),
logfc.threshold, minPct, onlyPos=onlyPos )
)
r= r[order( r[,'p.value']),]
r = cbind( r, cluster= rep(n,nrow(r) ), gene=rownames(loc@data)[r[,1]] )
#})
}
r
}
all_markers = NULL;
#tmp = loc
if ( bulkStats ){
## here is where we need the pseudo bulk approach.
## sumething similar is implemented in the compactTime function
cellexalObj@usedObj$pseudoBulk = TRUE
loc = pseudoBulk ( info, loc )
toGSEA( loc,
ofile=file.path( loc@usedObj$sessionPath, paste(sep=".", "PseudoBulk", info@gname,".RData" ) ),
info@gname
)
save( loc, file=file.path( x@usedObj$sessionPath, paste(sep=".", "PseudoBulk", info@gname,".RData" ) ) )
grp.vec = groupingInfo( loc, info@gname )@grouping
x@groupSelectedFrom[[paste(info@gname, 'pseudoBulk')]] = groupingInfo( loc, info@gname )
}
for ( n in unique( sort(grp.vec)) ) {
all_markers = rbind( all_markers, CppStats(n) )
}
if ( !is.null( nameAdd ) ) {
all_markers[,'cluster'] = paste( sep="",nameAdd, all_markers[,'cluster'] )
}
#all_markers <- all_markers[ order( all_markers[,'p.value']),]
try ( {
x = logStatResult( x,method='Cpp', data= all_markers, col='p.value', text=text, showIDs=showIDs)
## And get additional info about the genes
}) #function definition in file 'logStatResult.R'
if ( is.null(x@usedObj$sigGeneLists$Cpp))
x@usedObj$sigGeneLists$Cpp = list()
x@usedObj$sigGeneLists$Cpp[[x@usedObj$lastGroup]] = all_markers
}
else {
stop(paste('The stats method', deg.method, "is not supported by this version of cellexalvrR"))
}
### get the top genes
if ( deg.method != 'Linear' ) {
genes_list <- split( as.vector(all_markers[,'gene']), all_markers[,'cluster'] )
ret_genes = ceiling(num.sig / length(table(grp.vec)))
if ( ret_genes < 1)
ret_genes = 1
top_genes <- function( x ) {
if ( length(x) == 0) {
NA
}
else if ( length(x) < ret_genes ) {
x
}else {
x[1:ret_genes]
}
}
## likely not the best approach..
deg.genes = NULL
ret_genes = ret_genes -1
i = 0
while ( length( deg.genes ) < num.sig ) {
ret_genes = ret_genes +1
i = i+1
deg.genes = unique(unlist( lapply( genes_list,top_genes ) ))
bad = which(is.na(deg.genes))
if ( length(bad) > 0)
deg.genes = deg.genes[-bad]
if ( i > 20)
break
}
deg.genes = rownames(x@data)[ match( make.names(deg.genes), make.names( rownames( x@data) ) )]
loc = reduceTo(loc, what='row', to=deg.genes) #function definition in file 'reduceTo.R'
#tab <- as.matrix(Matrix::t(loc@data))
if ( length(which(is.na( loc@userGroups[, loc@usedObj$lastGroup]) )) > 0 ) {
## shit that will not work!
loc = reduceTo(loc, what='col', to= which(is.na( loc@userGroups[, x@usedObj$lastGroup]) ==F) ) #function definition in file 'reduceTo.R'
}
tab <- t(FastWilcoxTest::collapse( loc@data, as.numeric(factor( as.vector(loc@userGroups[, loc@usedObj$lastGroup]) ) ), 1 )) ## simple sum up the data
tab[which(tab == -Inf)] = 0
bad = which( apply(tab, 2, var) == 0 )
bad.genes = NULL
if ( length(bad) > 0 ){
tab = tab[, -bad]
bad.genes = deg.genes[bad]
deg.genes = deg.genes[-bad]
loc =reduceTo( loc, what='row', to=deg.genes)
message(paste(length(bad), "genes had a summary varianze of 0 in this comparison"))
}
hc <- stats::hclust(stats::as.dist( 1- stats::cor(tab, method='pearson') ),method = 'ward.D2')
deg.genes = c(rownames(loc@data)[hc$order], bad.genes)
deg.genes = deg.genes[which(!is.na(deg.genes))]
}
if ( length(deg.genes) == 0){
message('deg.genes no entries - fix that')
if ( interactive() ) {
message ( 'no signififcant genes detected! - help needed: (exit with Q)' )
browser()
}else {
message ( 'no signififcant genes detected!' )
}
}
#promise <- future(lockedSave(x), evaluator = plan('multiprocess') ) #function definition in file 'lockedSave.R'
## we only need to store the stats object here.
## and as that is part of the usedObj we will store that ;-)
## lockedSave(x) ## to much overheard! #function definition in file 'lockedSave.R'
if ( ! interactive() ) { ## likely the VR scripts
#print( paste('Do we reach this point?', 'usedObj', x@outpath ) )
savePart( x, 'usedObj'); #function definition in file 'integrateParts.R'
#print( 'And this - Do we reach this point, too?')
}
if ( length(deg.genes ) < 10){
if(interactive()) {
print("no deg genes - please check why!")
browser()
}
}
x@usedObj$deg.genes = deg.genes
invisible( x )
}
)
#' @rdname getDifferentials
setMethod('getDifferentials', signature = c ('character'),
definition = function (x,cellidfile,
deg.method='wilcox',
num.sig=250, Log=TRUE, logfc.threshold = 1, minPct=0.1, text = NULL) {
x <- loadObject(x) #function definition in file 'lockedSave.R'
getDifferentials( x,cellidfile,deg.method,num.sig, Log=Log) #function definition in file 'getDifferentials.R'
}
)
|
/R/getDifferentials.R
|
no_license
|
sonejilab/cellexalvrR
|
R
| false | false | 12,305 |
r
|
#' Identify differentially expressed genes.
#'
#' This function makes two statistics available for the VR process
#' (1) 'linearSelection' will automaticly be choosen if there is only one group in the data
#' (2) 'wilcox' a c++ re-implementation of the Seurat::FindAllMarkers function (default)
#'
#' @name getDifferentials
#' @docType methods
#' @description Calculates the statistics for one CellexalVR selection file.
#' @param x, cellexalvrR object
#' @param cellidfile file containing cell IDs
#' @param deg.method The method to use to find DEGs (only 'wilcox' supported at the moment)
#' @param num.sig number of differnetial genes to return (250)
#' @param Log log the results (default=TRUE)
#' @param logfc.threshold the Seurat logfc.threshold option (default here 1 vs 0.25 in Seurat)
#' @param minPct the minium percent expressing cells in a group (default 0.1)
#' @param onlyPos select only genes showing an higher expression in the group (default =T)
#' @param report4genes a list of genes you want to get a report on.
#' @param text an optional additional text to show in the log
#' @param bulkStats apply stats to pseudo bulk samples instead of cells? (default TRUE)
#' @param nameAdd one string to add to the clustion id in both the stats table and the ToppGene table.
#' @param showIDs plot the IDs on the drc plots. This will get problems if it is many IDs (default TRUE)
#' @keywords DEGs
#' @title main Statistics function for cellexalvrR
#' @examples
#' \dontrun{
#' getDifferentials( x, cellidfile= 'User.group.2', deg.method='wilcox')@usedObj$deg.genes
#' }
#' @return the cellexalvrr object with the stats table stored in x@usedObj$sigGeneLists$Cpp[[x@usedObj$lastGroup]]
#' and significant genes can be accessed in the x@usedObj$deg.genes slot.
#' @export
#if ( ! isGeneric('getDifferentials') ){
setGeneric('getDifferentials', ## Name
function (x,cellidfile, deg.method='wilcox', num.sig=250, Log=TRUE,
logfc.threshold = .5, minPct=0.2, onlyPos=TRUE, report4genes= NULL,
text = NULL, bulkStats = TRUE, nameAdd=NULL, showIDs=TRUE ) {
standardGeneric('getDifferentials')
}
)
#}
#' @rdname getDifferentials
setMethod('getDifferentials', signature = c ('cellexalvrR', "character"),
definition = function (x,cellidfile, deg.method='wilcox', num.sig=250, Log=TRUE,
logfc.threshold = 0.5, minPct=0.2, onlyPos=TRUE, report4genes= NULL,
text = NULL, bulkStats = TRUE, nameAdd=NULL, showIDs=TRUE ) {
x <- loadObject(x) #function definition in file 'lockedSave.R'
x= check(x)
if ( nrow( x@data) <= 1000 ){
logfc.threshold = minPct = 0
}
x <- userGrouping(x, cellidfile) #function definition in file 'userGrouping.R'
cellidfile = x@usedObj$lastGroup
num.sig <- as.numeric( num.sig )
accepted = c('wilcox','Seurat_wilcox', 'bimod', 'roc', 't', 'tobit', 'poisson', 'negbinom', 'MAST', 'DESeq2', 'anova')
if ( sum(unlist(lapply( accepted, function(ok) { return ( ok == deg.method )} ))) != 1 ) {
stop( paste('The deg.method',deg.method, 'is not supported' ) )
}
ok <- which(!is.na(x@userGroups[,cellidfile]))
if ( length(ok) > 0) {
loc <- reduceTo (x, what='col', to=colnames(x@data)[ ok ] ) #function definition in file 'reduceTo.R'
}else {
loc <- x
}
if ( ! is.na(match(paste(cellidfile, 'order'), colnames(x@userGroups))) ){
## at some time we had a problem in the creeation of order column names:
possible = c( paste(cellidfile, c(' order','.order'), sep=""))
gname = possible[which(!is.na(match(possible, colnames(loc@userGroups))))]
#browser()
loc <- reorderSamples ( loc, gname ) #function definition in file 'reorder.obj.R'
}
info <- groupingInfo( loc, cellidfile ) #function definition in file 'groupingInfo.R'
if ( class(info)[1] == "cellexalvrR"){
loc = info
info <- groupingInfo( loc, cellidfile )
}
rem.ind <- which(Matrix::rowSums(loc@data)==0)
grp.vec <- info@grouping
col.tab <- info@col
if(length(rem.ind)>0){
loc = reduceTo(loc, what='row', to=rownames(loc@data)[-rem.ind]) #function definition in file 'reduceTo.R'
grp.vec = grp.vec[-rem.ind]
}
deg.genes <- NULL
if ( is.null(x@usedObj$sigGeneLists))
x@usedObj$sigGeneLists = list()
if(deg.method=='anova'){
message('anova gene stats is deprecated - using wilcox instead!')
deg.method= 'wilcox'
}
if( length(table(info@grouping)) == 1 ){
deg.method = 'Linear'
#stop( "Please selecting more than one group!")
message('cor.stat linear gene stats linearSelection')
if ( is.null( info@drc )) {
message(paste("The linear stats has not gotten the drc information -- choosing the first possible" , names(loc@drc )[1] ))
info@drc = names(loc@drc )[1]
}
info = groupingInfo(x, info@gname ) ## get the info for the big object
if ( class(info)[1] == "cellexalvrR"){
x = info
info <- groupingInfo( x, cellidfile )
}
drc = x@drc[[ info@drc ]]
if ( is.null(drc) ){
message(paste("the drc info",info@drc, "can not be found in the data! (", paste(collapse=", ", names(loc@drc)) ))
message(paste("The linear stats has not gotten the drc information -- choosing the first possible" , names(loc@drc )[1] ))
info@drc = names(x@drc )[1] ## for the log!
drc = x@drc[[ 1 ]]
}
cellexalLinear = NULL
if ( nrow(info@linarObj@dat) == 0 ){
message('defining time')
x = pseudotimeTest3D( x, grouping= info@gname )
info = groupingInfo( x, info@gname ) ## load changes
if ( class(info)[1] == "cellexalvrR"){
x = info
info <- groupingInfo( x, cellidfile )
}
if ( empty( info@linarObj )){
info = groupingInfo( x, x@usedObj$lastGroup )
}
cellexalLinear = info@linarObj
}
else {
cellexalLinear = info@linarObj
}
message('creating reports')
x = createStats( cellexalLinear, x, num.sig= num.sig, bulkStats=bulkStats )
ret = createReport(cellexalLinear,
reduceTo(x, what='row', to = x@usedObj$deg.genes),
info = groupingInfo( x, info@gname )
)
timeN = cellexalLinear@gname
x@usedObj$linearSelections[['lastEntry']] = x@usedObj$linearSelections[[timeN]] =
ret$cellexalObj@usedObj$linearSelections[[timeN]]
deg.genes = x@usedObj$deg.genes
}else if ( deg.method == 'wilcox') {
## use the faster Rcpp implementation
CppStats <- function( n ) {
OK = which(grp.vec == n )
BAD= which(grp.vec != n )
r = NULL
if ( length(OK) > 0 & length(BAD) >0){
#try({
r = as.data.frame(
FastWilcoxTest::StatTest(
Matrix::t( loc@data),
as.vector(BAD), as.vector(OK),
# as.vector(OK), as.vector(BAD),
logfc.threshold, minPct, onlyPos=onlyPos )
)
r= r[order( r[,'p.value']),]
r = cbind( r, cluster= rep(n,nrow(r) ), gene=rownames(loc@data)[r[,1]] )
#})
}
r
}
all_markers = NULL;
#tmp = loc
if ( bulkStats ){
## here is where we need the pseudo bulk approach.
## sumething similar is implemented in the compactTime function
cellexalObj@usedObj$pseudoBulk = TRUE
loc = pseudoBulk ( info, loc )
toGSEA( loc,
ofile=file.path( loc@usedObj$sessionPath, paste(sep=".", "PseudoBulk", info@gname,".RData" ) ),
info@gname
)
save( loc, file=file.path( x@usedObj$sessionPath, paste(sep=".", "PseudoBulk", info@gname,".RData" ) ) )
grp.vec = groupingInfo( loc, info@gname )@grouping
x@groupSelectedFrom[[paste(info@gname, 'pseudoBulk')]] = groupingInfo( loc, info@gname )
}
for ( n in unique( sort(grp.vec)) ) {
all_markers = rbind( all_markers, CppStats(n) )
}
if ( !is.null( nameAdd ) ) {
all_markers[,'cluster'] = paste( sep="",nameAdd, all_markers[,'cluster'] )
}
#all_markers <- all_markers[ order( all_markers[,'p.value']),]
try ( {
x = logStatResult( x,method='Cpp', data= all_markers, col='p.value', text=text, showIDs=showIDs)
## And get additional info about the genes
}) #function definition in file 'logStatResult.R'
if ( is.null(x@usedObj$sigGeneLists$Cpp))
x@usedObj$sigGeneLists$Cpp = list()
x@usedObj$sigGeneLists$Cpp[[x@usedObj$lastGroup]] = all_markers
}
else {
stop(paste('The stats method', deg.method, "is not supported by this version of cellexalvrR"))
}
### get the top genes
if ( deg.method != 'Linear' ) {
genes_list <- split( as.vector(all_markers[,'gene']), all_markers[,'cluster'] )
ret_genes = ceiling(num.sig / length(table(grp.vec)))
if ( ret_genes < 1)
ret_genes = 1
top_genes <- function( x ) {
if ( length(x) == 0) {
NA
}
else if ( length(x) < ret_genes ) {
x
}else {
x[1:ret_genes]
}
}
## likely not the best approach..
deg.genes = NULL
ret_genes = ret_genes -1
i = 0
while ( length( deg.genes ) < num.sig ) {
ret_genes = ret_genes +1
i = i+1
deg.genes = unique(unlist( lapply( genes_list,top_genes ) ))
bad = which(is.na(deg.genes))
if ( length(bad) > 0)
deg.genes = deg.genes[-bad]
if ( i > 20)
break
}
deg.genes = rownames(x@data)[ match( make.names(deg.genes), make.names( rownames( x@data) ) )]
loc = reduceTo(loc, what='row', to=deg.genes) #function definition in file 'reduceTo.R'
#tab <- as.matrix(Matrix::t(loc@data))
if ( length(which(is.na( loc@userGroups[, loc@usedObj$lastGroup]) )) > 0 ) {
## shit that will not work!
loc = reduceTo(loc, what='col', to= which(is.na( loc@userGroups[, x@usedObj$lastGroup]) ==F) ) #function definition in file 'reduceTo.R'
}
tab <- t(FastWilcoxTest::collapse( loc@data, as.numeric(factor( as.vector(loc@userGroups[, loc@usedObj$lastGroup]) ) ), 1 )) ## simple sum up the data
tab[which(tab == -Inf)] = 0
bad = which( apply(tab, 2, var) == 0 )
bad.genes = NULL
if ( length(bad) > 0 ){
tab = tab[, -bad]
bad.genes = deg.genes[bad]
deg.genes = deg.genes[-bad]
loc =reduceTo( loc, what='row', to=deg.genes)
message(paste(length(bad), "genes had a summary varianze of 0 in this comparison"))
}
hc <- stats::hclust(stats::as.dist( 1- stats::cor(tab, method='pearson') ),method = 'ward.D2')
deg.genes = c(rownames(loc@data)[hc$order], bad.genes)
deg.genes = deg.genes[which(!is.na(deg.genes))]
}
if ( length(deg.genes) == 0){
message('deg.genes no entries - fix that')
if ( interactive() ) {
message ( 'no signififcant genes detected! - help needed: (exit with Q)' )
browser()
}else {
message ( 'no signififcant genes detected!' )
}
}
#promise <- future(lockedSave(x), evaluator = plan('multiprocess') ) #function definition in file 'lockedSave.R'
## we only need to store the stats object here.
## and as that is part of the usedObj we will store that ;-)
## lockedSave(x) ## to much overheard! #function definition in file 'lockedSave.R'
if ( ! interactive() ) { ## likely the VR scripts
#print( paste('Do we reach this point?', 'usedObj', x@outpath ) )
savePart( x, 'usedObj'); #function definition in file 'integrateParts.R'
#print( 'And this - Do we reach this point, too?')
}
if ( length(deg.genes ) < 10){
if(interactive()) {
print("no deg genes - please check why!")
browser()
}
}
x@usedObj$deg.genes = deg.genes
invisible( x )
}
)
#' @rdname getDifferentials
setMethod('getDifferentials', signature = c ('character'),
definition = function (x,cellidfile,
deg.method='wilcox',
num.sig=250, Log=TRUE, logfc.threshold = 1, minPct=0.1, text = NULL) {
x <- loadObject(x) #function definition in file 'lockedSave.R'
getDifferentials( x,cellidfile,deg.method,num.sig, Log=Log) #function definition in file 'getDifferentials.R'
}
)
|
#===========================
# Econ 210
# Problem Set 1
# Problem 5 (c)
# evanliao@uchicago.edu
#===========================
# set random seed
set.seed(210)
# Generate U,V, and W
U <- rnorm(40,0,1)
V <- rnorm(40,0,1)
W <- rnorm(40,0,1)
# Construct X,Y
X <- (2*U + V + 1)
Y <- (-U + 3*W + 3)
# Draw a scatterplot
# install.packages("car")
library(car)
scatterplot(X~Y)
# Compute all of our desired moments
#################
#### part vi ####
#################
mean(X)
mean(Y)
##################
#### part vii ####
##################
var(X)
var(Y)
##################
#### part viii ###
##################
cov(X,Y)
cor(X,Y)
#################
#### part ix ####
#################
mean(X+Y)
var(X+Y)
#################
##### part x ####
#################
cov(X+Y,Y)
cor(X+Y,Y)
|
/Classes/ECON 21000 (Autumn 15) Econometrics A/Assignments/ PS1_Code.R
|
no_license
|
shugamoe/CSJAN
|
R
| false | false | 880 |
r
|
#===========================
# Econ 210
# Problem Set 1
# Problem 5 (c)
# evanliao@uchicago.edu
#===========================
# set random seed
set.seed(210)
# Generate U,V, and W
U <- rnorm(40,0,1)
V <- rnorm(40,0,1)
W <- rnorm(40,0,1)
# Construct X,Y
X <- (2*U + V + 1)
Y <- (-U + 3*W + 3)
# Draw a scatterplot
# install.packages("car")
library(car)
scatterplot(X~Y)
# Compute all of our desired moments
#################
#### part vi ####
#################
mean(X)
mean(Y)
##################
#### part vii ####
##################
var(X)
var(Y)
##################
#### part viii ###
##################
cov(X,Y)
cor(X,Y)
#################
#### part ix ####
#################
mean(X+Y)
var(X+Y)
#################
##### part x ####
#################
cov(X+Y,Y)
cor(X+Y,Y)
|
#Austin Dickerson
#InClass 12_1
library(ROCR)
library(rpart)
library(class)
names(mtcars)
head(mtcars$am)
mtcars$response <- mtcars$am > 0
install.packages('kernlab')
library(kernlab)
mSVMV <- ksvm(as.formula(f),data = mtcars, kernel = 'vanilladot') #bad kernel function
f <- paste('response ~ ',paste(selVars,collapse=' + '),sep='')
svm_pred <- predict(mSVMV, newdata = mtcars, type = 'response')
head(svm_pred)
eval <- prediction(svm_pred, mtcars$response)
auc_calc <- performance(eval,'auc')
auc_calc@y.values
mSVMV1 <- ksvm(as.formula(f),data = mtcars, kernel = 'rbfdot')
svm_pred1 <- predict(mSVMV1, newdata = mtcars, type = 'response')
head(svm_pred1)
eval <- prediction(svm_pred1, mtcars$response)
auc_calc <- performance(eval,'auc')
auc_calc@y.values
|
/ADickersonInclass12_1.R
|
no_license
|
abdickerson/CMDA
|
R
| false | false | 769 |
r
|
#Austin Dickerson
#InClass 12_1
library(ROCR)
library(rpart)
library(class)
names(mtcars)
head(mtcars$am)
mtcars$response <- mtcars$am > 0
install.packages('kernlab')
library(kernlab)
mSVMV <- ksvm(as.formula(f),data = mtcars, kernel = 'vanilladot') #bad kernel function
f <- paste('response ~ ',paste(selVars,collapse=' + '),sep='')
svm_pred <- predict(mSVMV, newdata = mtcars, type = 'response')
head(svm_pred)
eval <- prediction(svm_pred, mtcars$response)
auc_calc <- performance(eval,'auc')
auc_calc@y.values
mSVMV1 <- ksvm(as.formula(f),data = mtcars, kernel = 'rbfdot')
svm_pred1 <- predict(mSVMV1, newdata = mtcars, type = 'response')
head(svm_pred1)
eval <- prediction(svm_pred1, mtcars$response)
auc_calc <- performance(eval,'auc')
auc_calc@y.values
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Swrappers.R
\name{rowaggregate.DTSg}
\alias{rowaggregate.DTSg}
\alias{rowaggregate}
\alias{raggregate}
\title{Aggregate values row-wise}
\usage{
\method{rowaggregate}{DTSg}(
x,
resultCols,
fun,
...,
cols = self$cols(class = "numeric"),
clone = getOption("DTSgClone")
)
}
\arguments{
\item{x}{A \code{\link{DTSg}} object (S3 method only).}
\item{resultCols}{A character vector either of length one (names of \code{fun} are
appended in the case one or more functions are provided) or the same length
as \code{fun} specifying the column names for the return values of \code{fun}.}
\item{fun}{A summary function, (named) \code{\link{list}} of summary functions or
(named) character vector specifying summary functions applied row-wise to
all the values of the specified \code{cols}. The return value(s) must be of
length one. See corresponding section for further information.}
\item{...}{Further arguments passed on to \code{fun}.}
\item{cols}{A character vector specifying the columns to apply \code{fun} to.
Another possibility is a character string containing either comma separated
column names, for example, \code{"x,y,z"}, or the start and end column separated
by a colon, for example, \code{"x:z"}.}
\item{clone}{A logical specifying if the object shall be modified in place or
if a deep clone (copy) shall be made beforehand.}
}
\value{
Returns a \code{\link{DTSg}} object.
}
\description{
Applies one or more provided summary functions row-wise to selected columns
of a \code{\link{DTSg}} object.
}
\section{Summary functions}{
Some examples for \code{fun} are as follows:
\itemize{
\item \code{\link{mean}}
\item \code{\link{list}(min = \link{min}, max = \link{max})}
\item \code{c(sd = "sd", var = "var")}
}
}
\examples{
# new DTSg object
DT <- data.table::data.table(
date = flow$date,
flow1 = flow$flow - abs(rnorm(nrow(flow))),
flow2 = flow$flow,
flow3 = flow$flow + abs(rnorm(nrow(flow)))
)
x <- DTSg$new(values = DT)
# mean and standard deviation of multiple measurements per timestamp
## R6 method
x$rowaggregate(
resultCols = "flow",
fun = list(mean = mean, sd = sd)
)$print()
## 'raggregate()' is a "hidden" R6 alias for 'rowaggregate()'
x$raggregate(
resultCols = "flow",
fun = list(mean = mean, sd = sd)
)$print()
## S3 method
print(rowaggregate(
x = x,
resultCols = "flow",
fun = list(mean = mean, sd = sd)
))
}
\seealso{
\code{\link{cols}}, \code{\link{getOption}}
}
|
/man/rowaggregate.DTSg.Rd
|
permissive
|
gisler/DTSg
|
R
| false | true | 2,510 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Swrappers.R
\name{rowaggregate.DTSg}
\alias{rowaggregate.DTSg}
\alias{rowaggregate}
\alias{raggregate}
\title{Aggregate values row-wise}
\usage{
\method{rowaggregate}{DTSg}(
x,
resultCols,
fun,
...,
cols = self$cols(class = "numeric"),
clone = getOption("DTSgClone")
)
}
\arguments{
\item{x}{A \code{\link{DTSg}} object (S3 method only).}
\item{resultCols}{A character vector either of length one (names of \code{fun} are
appended in the case one or more functions are provided) or the same length
as \code{fun} specifying the column names for the return values of \code{fun}.}
\item{fun}{A summary function, (named) \code{\link{list}} of summary functions or
(named) character vector specifying summary functions applied row-wise to
all the values of the specified \code{cols}. The return value(s) must be of
length one. See corresponding section for further information.}
\item{...}{Further arguments passed on to \code{fun}.}
\item{cols}{A character vector specifying the columns to apply \code{fun} to.
Another possibility is a character string containing either comma separated
column names, for example, \code{"x,y,z"}, or the start and end column separated
by a colon, for example, \code{"x:z"}.}
\item{clone}{A logical specifying if the object shall be modified in place or
if a deep clone (copy) shall be made beforehand.}
}
\value{
Returns a \code{\link{DTSg}} object.
}
\description{
Applies one or more provided summary functions row-wise to selected columns
of a \code{\link{DTSg}} object.
}
\section{Summary functions}{
Some examples for \code{fun} are as follows:
\itemize{
\item \code{\link{mean}}
\item \code{\link{list}(min = \link{min}, max = \link{max})}
\item \code{c(sd = "sd", var = "var")}
}
}
\examples{
# new DTSg object
DT <- data.table::data.table(
date = flow$date,
flow1 = flow$flow - abs(rnorm(nrow(flow))),
flow2 = flow$flow,
flow3 = flow$flow + abs(rnorm(nrow(flow)))
)
x <- DTSg$new(values = DT)
# mean and standard deviation of multiple measurements per timestamp
## R6 method
x$rowaggregate(
resultCols = "flow",
fun = list(mean = mean, sd = sd)
)$print()
## 'raggregate()' is a "hidden" R6 alias for 'rowaggregate()'
x$raggregate(
resultCols = "flow",
fun = list(mean = mean, sd = sd)
)$print()
## S3 method
print(rowaggregate(
x = x,
resultCols = "flow",
fun = list(mean = mean, sd = sd)
))
}
\seealso{
\code{\link{cols}}, \code{\link{getOption}}
}
|
library(igraph)
#friendship_df = read.csv("/home/dheeraj/Desktop/Lecture/6th_sem_Academics/SocialNetworkAnalysis/HandsOn/karate.txt", sep = " ")
graph <- watts.strogatz.game(2,100,5, 0.05)
plot(graph, layout = layout_on_grid)
V(graph)
E(graph)
transitivity(graph)
diameter(graph)
#random graph
make_random <- function(num, prob){
g6 = make_empty_graph(n = num)
for(i in 1:(num-1)){
for(j in (i+1):num){
r = runif(1)
if(r<=prob){
g6 = g6+edge(i,j)
}
}
}
return (g6)
}
g6 = make_random(200, 0.3)
g6
plot(g6)
deg_vec = igraph::degree(g6)
deg_vec
plot(table(deg_vec))
|
/SocialNetworkAnalysis/HandsOn/Lectures/Lecture4.R
|
no_license
|
dheeraj-2000/6th_sem_Academics
|
R
| false | false | 610 |
r
|
library(igraph)
#friendship_df = read.csv("/home/dheeraj/Desktop/Lecture/6th_sem_Academics/SocialNetworkAnalysis/HandsOn/karate.txt", sep = " ")
graph <- watts.strogatz.game(2,100,5, 0.05)
plot(graph, layout = layout_on_grid)
V(graph)
E(graph)
transitivity(graph)
diameter(graph)
#random graph
make_random <- function(num, prob){
g6 = make_empty_graph(n = num)
for(i in 1:(num-1)){
for(j in (i+1):num){
r = runif(1)
if(r<=prob){
g6 = g6+edge(i,j)
}
}
}
return (g6)
}
g6 = make_random(200, 0.3)
g6
plot(g6)
deg_vec = igraph::degree(g6)
deg_vec
plot(table(deg_vec))
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
3
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[14]], l=-1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD14","l",-1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
/scripts/checkplots_for_parallel_amarel/asy_207.R
|
no_license
|
dushoff/diversity_metrics
|
R
| false | false | 537 |
r
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
3
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[14]], l=-1, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD14","l",-1,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
#! /usr/bin/Rscript
library(httr)
library(jsonlite)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at at
# https://github.com/settings/applications. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#
# Replace your key and secret below.
myapp <- oauth_app("rgetcleandata", key = "6601cff8a84cba8810a0", secret = "a7510a05e93f25f53a785e826b2fc267b4cc4dec")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- with_config(gtoken, GET("https://api.github.com/users/jtleek/repos"))
stop_for_status(req)
# content now contains info about the repositories
content <- content(req)
# convert this to JSON so we can read it using jsonlite
data <- fromJSON(toJSON(content))
# get the index of the repository
index <- which(data$name == "datasharing")
# get create time for this repository
data$created_at[index]
|
/getcleandata/week2/q1.R
|
no_license
|
cszikszoy/datasciencecoursera
|
R
| false | false | 1,083 |
r
|
#! /usr/bin/Rscript
library(httr)
library(jsonlite)
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
# 2. To make your own application, register at at
# https://github.com/settings/applications. Use any URL for the homepage URL
# (http://github.com is fine) and http://localhost:1410 as the callback url
#
# Replace your key and secret below.
myapp <- oauth_app("rgetcleandata", key = "6601cff8a84cba8810a0", secret = "a7510a05e93f25f53a785e826b2fc267b4cc4dec")
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- with_config(gtoken, GET("https://api.github.com/users/jtleek/repos"))
stop_for_status(req)
# content now contains info about the repositories
content <- content(req)
# convert this to JSON so we can read it using jsonlite
data <- fromJSON(toJSON(content))
# get the index of the repository
index <- which(data$name == "datasharing")
# get create time for this repository
data$created_at[index]
|
# Simulate loadings.
load <- matrix(1, nrow = N, ncol = R)
# Only first loading generated from standard normal distribution, other constant.
load[, 1] <- rnorm(N)
# Simulate factors.
fac <- matrix(1, nrow = T, ncol = R)
# Only second factor generated from standard normal distribution, other constant.
fac[, 2] <- rnorm(T)
# Set up container to store regressors.
X <- array(data = NA, dim = c(T, N, length(param)))
# Compute regressors according to model specification in simulation part.
X[,,1] <- 1 + fac %*% t(load) + matrix(1, nrow = T, ncol = R) %*% t(load) + fac %*% matrix(1, nrow = R, ncol = N) + matrix(rnorm(T * N), nrow = T, ncol = N)
X[,,2] <- 1 + fac %*% t(load) + matrix(1, nrow = T, ncol = R) %*% t(load) + fac %*% matrix(1, nrow = R, ncol = N) + matrix(rnorm(T * N), nrow = T, ncol = N)
# Accordingly compute dependent variable.
Y <- X[,,1] * param[1] + X[,,2] * param[2] + fac %*% t(load) + epsilon
|
/mc_simulations/data_generating/data_generating_additive.R
|
no_license
|
mmaeh/Research-Module-Econometrics-Interactive-Fixed-Effects
|
R
| false | false | 922 |
r
|
# Simulate loadings.
load <- matrix(1, nrow = N, ncol = R)
# Only first loading generated from standard normal distribution, other constant.
load[, 1] <- rnorm(N)
# Simulate factors.
fac <- matrix(1, nrow = T, ncol = R)
# Only second factor generated from standard normal distribution, other constant.
fac[, 2] <- rnorm(T)
# Set up container to store regressors.
X <- array(data = NA, dim = c(T, N, length(param)))
# Compute regressors according to model specification in simulation part.
X[,,1] <- 1 + fac %*% t(load) + matrix(1, nrow = T, ncol = R) %*% t(load) + fac %*% matrix(1, nrow = R, ncol = N) + matrix(rnorm(T * N), nrow = T, ncol = N)
X[,,2] <- 1 + fac %*% t(load) + matrix(1, nrow = T, ncol = R) %*% t(load) + fac %*% matrix(1, nrow = R, ncol = N) + matrix(rnorm(T * N), nrow = T, ncol = N)
# Accordingly compute dependent variable.
Y <- X[,,1] * param[1] + X[,,2] * param[2] + fac %*% t(load) + epsilon
|
shinyUI(fluidPage(
titlePanel('How many games SHOULD you have won?'),
sidebarLayout(
sidebarPanel(
helpText("Find out your expected win total in Fantasy Football this year.
Were you besieged by bad luck or graced by good fortune?"),
numericInput('leagueSize',
label = 'How Many teams are in your league?',
value = 0, min = 8, max = 16, step = 1),
numericInput('actualWins',
label = 'How many games did you manage to win this year?',
value = 0, min = 0, max = 32, step = 1),
numericInput('playoffWins',
label = 'How many total wins would you need to make the playoffs?',
value = 0, min = 0, max = 32, step = 1),
helpText("Look back at all the weeks of your league and jot down your scoring rank for each individual week.
In the boxes below, enter the number of times you achieved each rank. For ranks higher than the
number of teams in your league, leave the value at 0."),
numericInput('rank1',
label = 'Rank 1',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank2',
label = '2nd',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank3',
label = '3rd',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank4',
label = '4th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank5',
label = '5th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank6',
label = '6th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank7',
label = '7th',
value = 0, min = 0, max = 32, step = 1),
numericInput('8th',
label = '8th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank9',
label = '9th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank10',
label = '10th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank11',
label = '11th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank12',
label = '12th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank13',
label = '13th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank14',
label = '14th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank15',
label = '15th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank16',
label = '16th',
value = 0, min = 0, max = 32, step = 1),
submitButton('Submit')
),
mainPanel(
tabsetPanel(
tabPanel("Main", textOutput('teamtext'),
plotOutput('winHist'),
textOutput('min'),
textOutput('max'),
textOutput('playoffs'),
textOutput('winTotal')
),
tabPanel("About Standard Score", textOutput('summary'))
)
)
)
))
|
/ui.r
|
no_license
|
blockee/Bad-Luck-FF
|
R
| false | false | 4,175 |
r
|
shinyUI(fluidPage(
titlePanel('How many games SHOULD you have won?'),
sidebarLayout(
sidebarPanel(
helpText("Find out your expected win total in Fantasy Football this year.
Were you besieged by bad luck or graced by good fortune?"),
numericInput('leagueSize',
label = 'How Many teams are in your league?',
value = 0, min = 8, max = 16, step = 1),
numericInput('actualWins',
label = 'How many games did you manage to win this year?',
value = 0, min = 0, max = 32, step = 1),
numericInput('playoffWins',
label = 'How many total wins would you need to make the playoffs?',
value = 0, min = 0, max = 32, step = 1),
helpText("Look back at all the weeks of your league and jot down your scoring rank for each individual week.
In the boxes below, enter the number of times you achieved each rank. For ranks higher than the
number of teams in your league, leave the value at 0."),
numericInput('rank1',
label = 'Rank 1',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank2',
label = '2nd',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank3',
label = '3rd',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank4',
label = '4th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank5',
label = '5th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank6',
label = '6th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank7',
label = '7th',
value = 0, min = 0, max = 32, step = 1),
numericInput('8th',
label = '8th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank9',
label = '9th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank10',
label = '10th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank11',
label = '11th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank12',
label = '12th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank13',
label = '13th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank14',
label = '14th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank15',
label = '15th',
value = 0, min = 0, max = 32, step = 1),
numericInput('rank16',
label = '16th',
value = 0, min = 0, max = 32, step = 1),
submitButton('Submit')
),
mainPanel(
tabsetPanel(
tabPanel("Main", textOutput('teamtext'),
plotOutput('winHist'),
textOutput('min'),
textOutput('max'),
textOutput('playoffs'),
textOutput('winTotal')
),
tabPanel("About Standard Score", textOutput('summary'))
)
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{commute}
\alias{commute}
\title{Greater Boston Area Commuting Data}
\format{
A tibble with 648 rows and 14 columns.
}
\usage{
commute
}
\description{
A subset of the `acs_raw` tibble containing commuting data for Middlesex, Norfolk, and Suffolk Counties.
}
\keyword{datasets}
|
/man/commute.Rd
|
permissive
|
JosiahParry/uitk
|
R
| false | true | 382 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{commute}
\alias{commute}
\title{Greater Boston Area Commuting Data}
\format{
A tibble with 648 rows and 14 columns.
}
\usage{
commute
}
\description{
A subset of the `acs_raw` tibble containing commuting data for Middlesex, Norfolk, and Suffolk Counties.
}
\keyword{datasets}
|
"SpatDensReg" <- function (formula, data, na.action, prior=NULL, state=NULL,
mcmc=list(nburn=3000, nsave=2000, nskip=0, ndisplay=500),
permutation=TRUE, fix.theta=TRUE) {
#########################################################################################
# call parameters
#########################################################################################
Call <- match.call(); # save a copy of the call
indx <- match(c("formula", "data", "na.action", "truncation_time", "subject.num"),
names(Call), nomatch=0)
if (indx[1] ==0) stop("A formula argument is required");
temp <- Call[c(1,indx)] # only keep the arguments we wanted
temp[[1L]] <- quote(stats::model.frame)
special <- c("baseline", "frailtyprior", "truncation_time", "subject.num", "bspline")
temp$formula <- if (missing(data))
terms(formula, special)
else terms(formula, special, data = data)
if (is.R())
m <- eval(temp, parent.frame())
else m <- eval(temp, sys.parent())
Terms <- attr(m, 'terms')
if(any(names(m)=="(truncation_time)")){
truncation_time = m[,"(truncation_time)"]
}else{
truncation_time = NULL
}
if(any(names(m)=="(subject.num)")){
subject.num = m[,"(subject.num)"]
}else{
subject.num = NULL
}
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv")) stop("Response must be a survival object")
baseline0 <- attr(Terms, "specials")$baseline
frailtyprior0<- attr(Terms, "specials")$frailtyprior
bspline0<- attr(Terms, "specials")$bspline
if (length(frailtyprior0)) {
temp <- survival::untangle.specials(Terms, 'frailtyprior', 1)
dropfrail <- c(temp$terms)
frail.terms <- m[[temp$vars]]
}else{
dropfrail <- NULL
frail.terms <- NULL;
}
if (length(baseline0)) {
temp <- survival::untangle.specials(Terms, 'baseline', 1)
dropXtf <- c(temp$terms)
Xtf <- m[[temp$vars]]
}else{
dropXtf <- NULL
Xtf <- NULL
}
if (length(bspline0)) {
temp <- survival::untangle.specials(Terms, 'bspline', 1)
#dropx <- c(dropx, temp$terms);
X.bs = NULL;
n.bs = rep(0, length(temp$vars));
for(ii in 1:length(temp$vars)){
X.bs = cbind(X.bs, m[[temp$vars[ii]]]);
n.bs[ii] = ncol(m[[temp$vars[ii]]]);
}
}else{
X.bs <- NULL;
n.bs <- NULL;
}
dropx <- c(dropfrail, dropXtf)
if (length(dropx)) {
newTerms <- Terms[-dropx]
# R (version 2.7.1) adds intercept=T anytime you drop something
if (is.R()) attr(newTerms, 'intercept') <- attr(Terms, 'intercept')
} else newTerms <- Terms
X <- model.matrix(newTerms, m);
if (is.R()) {
assign <- lapply(survival::attrassign(X, newTerms)[-1], function(x) x-1)
xlevels <- .getXlevels(newTerms, m)
contr.save <- attr(X, 'contrasts')
}else {
assign <- lapply(attr(X, 'assign')[-1], function(x) x -1)
xvars <- as.character(attr(newTerms, 'variables'))
xvars <- xvars[-attr(newTerms, 'response')]
if (length(xvars) >0) {
xlevels <- lapply(m[xvars], levels)
xlevels <- xlevels[!unlist(lapply(xlevels, is.null))]
if(length(xlevels) == 0)
xlevels <- NULL
} else xlevels <- NULL
contr.save <- attr(X, 'contrasts')
}
# drop the intercept after the fact, and also drop baseline if necessary
adrop <- 0 #levels of "assign" to be dropped; 0= intercept
Xatt <- attributes(X)
xdrop <- Xatt$assign %in% adrop #columns to drop (always the intercept)
X <- X[, !xdrop, drop=FALSE]
attr(X, "assign") <- Xatt$assign[!xdrop]
n <- nrow(X)
p <- ncol(X)
if(p==0){
stop("covariate is required; you could creat a working covariate and set phi=0")
}
Sinv = solve(var(X));
# find the maximumu M distance between X[i,] and colMeans(X)
distseq = rep(0, n);
Xbar = colMeans(X);
for(i in 1:n) distseq[i] = sqrt(as.vector((X[i,]-Xbar)%*%Sinv%*%(X[i,]-Xbar)))
maxdist = max(distseq)
phi0 = (-log(0.001))/maxdist;
#########################################################################################
# data structure
#########################################################################################
y1 = Y[,1]; y2 = Y[,1];
type <- attr(Y, "type")
exactsurv <- Y[,ncol(Y)] ==1
if (any(exactsurv)) {
y1[exactsurv]=Y[exactsurv,1];
y2[exactsurv]=Y[exactsurv,1];
}
if (type== 'counting') stop ("Invalid survival type")
if (type=='interval') {
intsurv <- Y[,3]==3;
if (any(intsurv)){
y1[intsurv]=Y[intsurv,1];
y2[intsurv]=Y[intsurv,2];
}
}
delta = Y[,ncol(Y)];
if (!all(is.finite(Y))) {
stop("Invalid survival times for this distribution")
} else {
if (type=='left') delta <- 2- delta;
}
#########################################################################################
# initial MLE analysis and mcmc parameters
#########################################################################################
fit0 <- survival::survreg(formula = survival::Surv(y1, y2, type="interval2")~1, dist="gaussian");
theta1 = fit0$coefficients[1];
theta2 = log(fit0$scale);
theta0 = c(theta1, theta2); theta_prior = c(theta1, theta2);
Vhat0 = as.matrix(fit0$var[c(1,2),c(1,2)]);
#########################################################################################
# priors and initial values
#########################################################################################
alpha=state$alpha; if(is.null(alpha)) alpha=1;
theta=state$theta; if(is.null(theta)) theta=c(theta1, theta2);
phi = state$phi; if(is.null(phi)) phi=phi0;
y <- state$y;
if(is.null(y)){
y <- rep(0, n);
for(i in 1:n){
if(delta[i]==0) y[i] = y1[i]+sd(y);
if(delta[i]==1) y[i] = y1[i];
if(delta[i]==2) y[i] = y2[i]-sd(y);
if(delta[i]==3) y[i] = mean(c(y1[i], y2[i]));
}
}
nburn <- mcmc$nburn;
nsave <- mcmc$nsave;
nskip <- mcmc$nskip;
ndisplay <- mcmc$ndisplay;
maxL <- prior$maxL; if(is.null(maxL)) maxL<-5;
a0=prior$a0; if(is.null(a0)) a0=5;
b0=prior$b0; if(is.null(b0)) b0=1;
if(fix.theta){
V0_prior = diag(0, 2);
}else{
V0_prior = 10*Vhat0;
}
theta0 <- prior$theta0; if(is.null(theta0)) theta0 <- theta_prior;
V0 <- prior$V0; if(is.null(V0)) V0 <- V0_prior;
if(sum(abs(V0))==0){
V0inv <- diag(c(Inf,Inf));
}else {
V0inv <- solve(V0);
}
Vhat <- prior$Vhat; if(is.null(Vhat)) Vhat <- Vhat0;
phiq0 = prior$phiq0; if(is.null(phiq0)) phiq0=0.5;
phia0 = prior$phia0; if(is.null(phia0)) phia0=2;
phib0 = prior$phib0; if(is.null(phib0)) phib0=1/phi0;
## save to output list
mcmc = list(nburn=nburn, nsave=nsave, nskip=nskip, ndisplay=ndisplay)
theta_initial=theta+0;
alpha_initial=alpha+0;
phi_initial = phi+0;
initial.values = list(alpha=alpha_initial, theta=theta_initial, phi=phi_initial);
prior = list(maxL=maxL, a0=a0, b0=b0, theta0=theta0, V0=V0, Vhat=Vhat,
phiq0=phiq0, phia0=phia0, phib0=phib0);
#########################################################################################
# calling the c++ code and # output
#########################################################################################
y1new=y1; y2new=y2;
for(i in 1:n){
if(delta[i]==0) y2new[i] = Inf;
if(delta[i]==2) y1new[i] = -Inf;
}
model.name <- "Spatially Smoothed Polya Tree Density Estimation:";
foo <- .Call("SpatDens", nburn_=nburn, nsave_=nsave, nskip_=nskip, ndisplay_=ndisplay,
y_=y, y1_=y1new, y2_=y2new, type_=delta, X_=t(X), theta_=theta, maxJ_=maxL,
cpar_=alpha, a0_=a0, b0_=b0, theta0_=theta0, V0inv_=V0inv, Vhat_=Vhat,
l0_=round(min(1000,nburn/2)), adapter_=2.38^2, Sinv_=Sinv, phi_=phi, q0phi_=phiq0,
a0phi_=phia0, b0phi_=phib0, perm_=permutation+0, PACKAGE = "spBayesSurv");
## Bayes Factor for the spatial model vs. the exchangeable model
q.bar = mean(foo$phi==0);
BF = (phiq0*(1-q.bar))/((1-phiq0)*q.bar);
#########################################################################################
# save to a list
#########################################################################################
output <- list(modelname=model.name,
terms=m,
call=Call,
prior=prior,
mcmc=mcmc,
n=n,
p=p,
Surv=survival::Surv(y1, y2, type="interval2"),
X = X,
alpha = foo$cpar,
theta = foo$theta,
phi = foo$phi,
y = foo$y,
maxL = maxL,
ratec = foo$ratec,
ratetheta = foo$ratetheta,
ratephi = foo$ratephi,
ratey = foo$ratey,
initial.values=initial.values,
BF = BF);
class(output) <- c("SpatDensReg")
output
}
#### empirial BF and p-value for the spatial model vs. the exchangeable model
"BF.SpatDensReg" <- function (y, X, prior=NULL, nperm=100, c_seq=NULL, phi_seq=NULL) {
n = length(y);
X = cbind(X);
Sinv = solve(var(X));
# find the maximumu M distance between X[i,] and colMeans(X)
distseq = rep(0, n);
Xbar = colMeans(X);
for(i in 1:n) distseq[i] = sqrt(as.vector((X[i,]-Xbar)%*%Sinv%*%(X[i,]-Xbar)))
maxdist = max(distseq)
phi0 = (-log(0.001))/maxdist;
#########################################################################################
# initial MLE analysis
#########################################################################################
fit0 <- survival::survreg(formula = survival::Surv(y)~1, dist="gaussian");
theta1 = fit0$coefficients[1];
theta2 = log(fit0$scale);
theta = c(theta1, theta2);
#########################################################################################
# priors and initial values
#########################################################################################
maxL <- prior$maxL; if(is.null(maxL)) maxL<-5;
a0=prior$a0; if(is.null(a0)) a0=5;
b0=prior$b0; if(is.null(b0)) b0=1;
phiq0 = prior$phiq0; if(is.null(phiq0)) phiq0=0.5;
phia0 = prior$phia0; if(is.null(phia0)) phia0=2;
phib0 = prior$phib0; if(is.null(phib0)) phib0=1/phi0;
if(is.null(c_seq)) c_seq=c(0.001, 0.01, 0.1, 0.5, 1, 5, 10, 50, 100, 1000);
if(is.null(phi_seq)) phi_seq = qgamma((1:10)/11, phia0, phib0)
#########################################################################################
# calling the c++ code and # output
#########################################################################################
foo <- .Call("SpatDens_BF", y_=y, X_=t(X), Sinv_=Sinv, theta_=theta, maxJ_=maxL,
cpar_=c_seq, a0_=a0, b0_=b0, phi_=phi_seq, q0phi_=phiq0,
a0phi_=phia0, b0phi_=phib0, nperm_=nperm, PACKAGE = "spBayesSurv");
## Bayes Factor for the spatial model vs. the exchangeable model
BF = foo$BF;
pvalue = sum(foo$BFperm>foo$BF)/nperm;
output <- list(BF = BF,
pvalue = pvalue);
output
}
#### print, summary, plot
"print.SpatDensReg" <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat(x$modelname,"\nCall:\n", sep = "")
print(x$call)
cat(paste("\nBayes Factor for the spatial model vs. the exchangeable model:", sep=""), x$BF);
cat("\nn=",x$n, "\n", sep="")
invisible(x)
}
"plot.SpatDensReg" <- function (x, xnewdata, ygrid=NULL, CI=0.95, PLOT=TRUE, ...) {
if(is.null(ygrid)) ygrid = seq(min(x$Surv[,1], na.rm=T)-sd(x$Surv[,1], na.rm=T),
max(x$Surv[,2], na.rm=T)+sd(x$Surv[,2], na.rm=T), length.out=200);
if(missing(xnewdata)){
stop("please specify xnewdata")
}else{
rnames = row.names(xnewdata)
m = x$terms
Terms = attr(m, 'terms')
baseline0 <- attr(Terms, "specials")$baseline
frailtyprior0<- attr(Terms, "specials")$frailtyprior
dropx <- NULL
if (length(frailtyprior0)) {
temp <- survival::untangle.specials(Terms, 'frailtyprior', 1)
dropx <- c(dropx, temp$terms)
frail.terms <- m[[temp$vars]]
}else{
frail.terms <- NULL;
}
if (length(baseline0)) {
temp <- survival::untangle.specials(Terms, 'baseline', 1)
dropx <- c(dropx, temp$terms)
Xtf <- m[[temp$vars]]
}else{
Xtf <- NULL;
}
if (length(dropx)) {
newTerms <- Terms[-dropx]
# R (version 2.7.1) adds intercept=T anytime you drop something
if (is.R()) attr(newTerms, 'intercept') <- attr(Terms, 'intercept')
} else newTerms <- Terms
newTerms <- delete.response(newTerms)
mnew <- model.frame(newTerms, xnewdata, na.action = na.omit, xlev = .getXlevels(newTerms, m))
Xnew <- model.matrix(newTerms, mnew);
if (is.R()) {
assign <- lapply(survival::attrassign(Xnew, newTerms)[-1], function(x) x-1)
xlevels <- .getXlevels(newTerms, mnew)
contr.save <- attr(Xnew, 'contrasts')
}else {
assign <- lapply(attr(Xnew, 'assign')[-1], function(x) x -1)
xvars <- as.character(attr(newTerms, 'variables'))
xvars <- xvars[-attr(newTerms, 'response')]
if (length(xvars) >0) {
xlevels <- lapply(mnew[xvars], levels)
xlevels <- xlevels[!unlist(lapply(xlevels, is.null))]
if(length(xlevels) == 0)
xlevels <- NULL
} else xlevels <- NULL
contr.save <- attr(Xnew, 'contrasts')
}
# drop the intercept after the fact, and also drop baseline if necessary
adrop <- 0 #levels of "assign" to be dropped; 0= intercept
Xatt <- attributes(Xnew)
xdrop <- Xatt$assign %in% adrop #columns to drop (always the intercept)
Xnew <- Xnew[, !xdrop, drop=FALSE]
attr(Xnew, "assign") <- Xatt$assign[!xdrop]
xpred = Xnew
if(ncol(xpred)!=x$p) stop("please make sure the number of columns matches!");
}
xpred = cbind(xpred);
nxpred = nrow(xpred);
Sinv = solve(var(x$X));
estimates <- .Call("SpatDens_plots", ygrid, t(xpred), x$theta, x$alpha, x$phi, x$maxL,
x$y, t(x$X), Sinv, CI, PACKAGE = "spBayesSurv");
if(PLOT){
par(cex=1.5,mar=c(4.1,4.1,1,1),cex.lab=1.4,cex.axis=1.1)
plot(ygrid, estimates$fhat[,1], "l", lwd=3, xlab="log time", ylab="density",
xlim=c(min(ygrid), max(ygrid)), ylim=c(0,max(estimates$fhatup)));
for(i in 1:nxpred){
polygon(x=c(rev(ygrid),ygrid),
y=c(rev(estimates$fhatlow[,i]),estimates$fhatup[,i]),
border=NA,col="lightgray");
}
for(i in 1:nxpred){
lines(ygrid, estimates$fhat[,i], lty=i, lwd=3, col=i);
}
legend("topright", rnames, col = 1:nxpred, lty=1:nxpred, ...)
}
estimates$ygrid=ygrid;
invisible(estimates)
}
"summary.SpatDensReg" <- function(object, CI.level=0.95, ...) {
ans <- c(object[c("call", "modelname")])
### Baseline Information
mat <- as.matrix(object$theta)
coef.p <- apply(mat, 1, mean); names(coef.p)=c("location", "log(scale)");
coef.m <- apply(mat, 1, median)
coef.sd <- apply(mat, 1, sd)
limm <- apply(mat, 1, function(x) as.vector(quantile(x, probs=c((1-CI.level)/2, 1-(1-CI.level)/2))) )
coef.l <- limm[1,]
coef.u <- limm[2,]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$theta.var <- coef.table
### Precision parameter
if(object$prior$a0<=0){
ans$alpha.var <- NULL
}else{
mat <- object$alpha
coef.p <- mean(mat); names(coef.p)="alpha";
coef.m <- median(mat)
coef.sd <- sd(mat)
limm <- as.vector(quantile(mat, probs=c((1-CI.level)/2, 1-(1-CI.level)/2)))
coef.l <- limm[1]
coef.u <- limm[2]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$alpha.var <- coef.table
}
### phi parameter
mat <- object$phi
coef.p <- mean(mat); names(coef.p)="range";
coef.m <- median(mat)
coef.sd <- sd(mat)
limm <- as.vector(quantile(mat, probs=c((1-CI.level)/2, 1-(1-CI.level)/2)))
coef.l <- limm[1]
coef.u <- limm[2]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$phi.var <- coef.table;
ans$BF <- object$BF
ans$n <- object$n
ans$p <- object$p
ans$prior <- object$prior
### acceptance rates
ans$ratetheta = object$ratetheta;
ans$ratephi = object$ratephi;
ans$ratey = object$ratey;
ans$ratec = object$ratec;
class(ans) <- "summary.SpatDensReg"
return(ans)
}
"print.summary.SpatDensReg"<-function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat(x$modelname,"\nCall:\n", sep = "")
print(x$call)
if(x$theta.var[1,3]==0){
cat("\nCentering distribution parameters are fixed at:\n")
cat("location=", x$theta.var[1,1], ", log(scale)=", x$theta.var[2,1], "\n", sep="")
}else{
cat("\nPosterior inference of centering distribution parameters\n")
cat("(Adaptive M-H acceptance rate: ", x$ratetheta, "):\n", sep="")
print.default(format(x$theta.var, digits = digits), print.gap = 2,
quote = FALSE)
}
if (!is.null(x$alpha.var)) {
cat("\nPosterior inference of precision parameter\n")
cat("(Adaptive M-H acceptance rate: ", x$ratec, "):\n", sep="")
print.default(format(x$alpha.var, digits = digits), print.gap = 2,
quote = FALSE)
}
cat("\nPosterior inference of distance function range phi\n")
cat("(Adaptive M-H acceptance rate: ", x$ratephi, "):\n", sep="")
print.default(format(x$phi.var, digits = digits), print.gap = 2,
quote = FALSE)
cat(paste("\nBayes Factor for the spatial model vs. the exchangeable model:", sep=""), x$BF)
cat("\nNumber of subjects: n=", x$n, "\n", sep="")
invisible(x)
}
|
/R/SpatDensReg.R
|
no_license
|
cran/spBayesSurv
|
R
| false | false | 18,416 |
r
|
"SpatDensReg" <- function (formula, data, na.action, prior=NULL, state=NULL,
mcmc=list(nburn=3000, nsave=2000, nskip=0, ndisplay=500),
permutation=TRUE, fix.theta=TRUE) {
#########################################################################################
# call parameters
#########################################################################################
Call <- match.call(); # save a copy of the call
indx <- match(c("formula", "data", "na.action", "truncation_time", "subject.num"),
names(Call), nomatch=0)
if (indx[1] ==0) stop("A formula argument is required");
temp <- Call[c(1,indx)] # only keep the arguments we wanted
temp[[1L]] <- quote(stats::model.frame)
special <- c("baseline", "frailtyprior", "truncation_time", "subject.num", "bspline")
temp$formula <- if (missing(data))
terms(formula, special)
else terms(formula, special, data = data)
if (is.R())
m <- eval(temp, parent.frame())
else m <- eval(temp, sys.parent())
Terms <- attr(m, 'terms')
if(any(names(m)=="(truncation_time)")){
truncation_time = m[,"(truncation_time)"]
}else{
truncation_time = NULL
}
if(any(names(m)=="(subject.num)")){
subject.num = m[,"(subject.num)"]
}else{
subject.num = NULL
}
Y <- model.extract(m, "response")
if (!inherits(Y, "Surv")) stop("Response must be a survival object")
baseline0 <- attr(Terms, "specials")$baseline
frailtyprior0<- attr(Terms, "specials")$frailtyprior
bspline0<- attr(Terms, "specials")$bspline
if (length(frailtyprior0)) {
temp <- survival::untangle.specials(Terms, 'frailtyprior', 1)
dropfrail <- c(temp$terms)
frail.terms <- m[[temp$vars]]
}else{
dropfrail <- NULL
frail.terms <- NULL;
}
if (length(baseline0)) {
temp <- survival::untangle.specials(Terms, 'baseline', 1)
dropXtf <- c(temp$terms)
Xtf <- m[[temp$vars]]
}else{
dropXtf <- NULL
Xtf <- NULL
}
if (length(bspline0)) {
temp <- survival::untangle.specials(Terms, 'bspline', 1)
#dropx <- c(dropx, temp$terms);
X.bs = NULL;
n.bs = rep(0, length(temp$vars));
for(ii in 1:length(temp$vars)){
X.bs = cbind(X.bs, m[[temp$vars[ii]]]);
n.bs[ii] = ncol(m[[temp$vars[ii]]]);
}
}else{
X.bs <- NULL;
n.bs <- NULL;
}
dropx <- c(dropfrail, dropXtf)
if (length(dropx)) {
newTerms <- Terms[-dropx]
# R (version 2.7.1) adds intercept=T anytime you drop something
if (is.R()) attr(newTerms, 'intercept') <- attr(Terms, 'intercept')
} else newTerms <- Terms
X <- model.matrix(newTerms, m);
if (is.R()) {
assign <- lapply(survival::attrassign(X, newTerms)[-1], function(x) x-1)
xlevels <- .getXlevels(newTerms, m)
contr.save <- attr(X, 'contrasts')
}else {
assign <- lapply(attr(X, 'assign')[-1], function(x) x -1)
xvars <- as.character(attr(newTerms, 'variables'))
xvars <- xvars[-attr(newTerms, 'response')]
if (length(xvars) >0) {
xlevels <- lapply(m[xvars], levels)
xlevels <- xlevels[!unlist(lapply(xlevels, is.null))]
if(length(xlevels) == 0)
xlevels <- NULL
} else xlevels <- NULL
contr.save <- attr(X, 'contrasts')
}
# drop the intercept after the fact, and also drop baseline if necessary
adrop <- 0 #levels of "assign" to be dropped; 0= intercept
Xatt <- attributes(X)
xdrop <- Xatt$assign %in% adrop #columns to drop (always the intercept)
X <- X[, !xdrop, drop=FALSE]
attr(X, "assign") <- Xatt$assign[!xdrop]
n <- nrow(X)
p <- ncol(X)
if(p==0){
stop("covariate is required; you could creat a working covariate and set phi=0")
}
Sinv = solve(var(X));
# find the maximumu M distance between X[i,] and colMeans(X)
distseq = rep(0, n);
Xbar = colMeans(X);
for(i in 1:n) distseq[i] = sqrt(as.vector((X[i,]-Xbar)%*%Sinv%*%(X[i,]-Xbar)))
maxdist = max(distseq)
phi0 = (-log(0.001))/maxdist;
#########################################################################################
# data structure
#########################################################################################
y1 = Y[,1]; y2 = Y[,1];
type <- attr(Y, "type")
exactsurv <- Y[,ncol(Y)] ==1
if (any(exactsurv)) {
y1[exactsurv]=Y[exactsurv,1];
y2[exactsurv]=Y[exactsurv,1];
}
if (type== 'counting') stop ("Invalid survival type")
if (type=='interval') {
intsurv <- Y[,3]==3;
if (any(intsurv)){
y1[intsurv]=Y[intsurv,1];
y2[intsurv]=Y[intsurv,2];
}
}
delta = Y[,ncol(Y)];
if (!all(is.finite(Y))) {
stop("Invalid survival times for this distribution")
} else {
if (type=='left') delta <- 2- delta;
}
#########################################################################################
# initial MLE analysis and mcmc parameters
#########################################################################################
fit0 <- survival::survreg(formula = survival::Surv(y1, y2, type="interval2")~1, dist="gaussian");
theta1 = fit0$coefficients[1];
theta2 = log(fit0$scale);
theta0 = c(theta1, theta2); theta_prior = c(theta1, theta2);
Vhat0 = as.matrix(fit0$var[c(1,2),c(1,2)]);
#########################################################################################
# priors and initial values
#########################################################################################
alpha=state$alpha; if(is.null(alpha)) alpha=1;
theta=state$theta; if(is.null(theta)) theta=c(theta1, theta2);
phi = state$phi; if(is.null(phi)) phi=phi0;
y <- state$y;
if(is.null(y)){
y <- rep(0, n);
for(i in 1:n){
if(delta[i]==0) y[i] = y1[i]+sd(y);
if(delta[i]==1) y[i] = y1[i];
if(delta[i]==2) y[i] = y2[i]-sd(y);
if(delta[i]==3) y[i] = mean(c(y1[i], y2[i]));
}
}
nburn <- mcmc$nburn;
nsave <- mcmc$nsave;
nskip <- mcmc$nskip;
ndisplay <- mcmc$ndisplay;
maxL <- prior$maxL; if(is.null(maxL)) maxL<-5;
a0=prior$a0; if(is.null(a0)) a0=5;
b0=prior$b0; if(is.null(b0)) b0=1;
if(fix.theta){
V0_prior = diag(0, 2);
}else{
V0_prior = 10*Vhat0;
}
theta0 <- prior$theta0; if(is.null(theta0)) theta0 <- theta_prior;
V0 <- prior$V0; if(is.null(V0)) V0 <- V0_prior;
if(sum(abs(V0))==0){
V0inv <- diag(c(Inf,Inf));
}else {
V0inv <- solve(V0);
}
Vhat <- prior$Vhat; if(is.null(Vhat)) Vhat <- Vhat0;
phiq0 = prior$phiq0; if(is.null(phiq0)) phiq0=0.5;
phia0 = prior$phia0; if(is.null(phia0)) phia0=2;
phib0 = prior$phib0; if(is.null(phib0)) phib0=1/phi0;
## save to output list
mcmc = list(nburn=nburn, nsave=nsave, nskip=nskip, ndisplay=ndisplay)
theta_initial=theta+0;
alpha_initial=alpha+0;
phi_initial = phi+0;
initial.values = list(alpha=alpha_initial, theta=theta_initial, phi=phi_initial);
prior = list(maxL=maxL, a0=a0, b0=b0, theta0=theta0, V0=V0, Vhat=Vhat,
phiq0=phiq0, phia0=phia0, phib0=phib0);
#########################################################################################
# calling the c++ code and # output
#########################################################################################
y1new=y1; y2new=y2;
for(i in 1:n){
if(delta[i]==0) y2new[i] = Inf;
if(delta[i]==2) y1new[i] = -Inf;
}
model.name <- "Spatially Smoothed Polya Tree Density Estimation:";
foo <- .Call("SpatDens", nburn_=nburn, nsave_=nsave, nskip_=nskip, ndisplay_=ndisplay,
y_=y, y1_=y1new, y2_=y2new, type_=delta, X_=t(X), theta_=theta, maxJ_=maxL,
cpar_=alpha, a0_=a0, b0_=b0, theta0_=theta0, V0inv_=V0inv, Vhat_=Vhat,
l0_=round(min(1000,nburn/2)), adapter_=2.38^2, Sinv_=Sinv, phi_=phi, q0phi_=phiq0,
a0phi_=phia0, b0phi_=phib0, perm_=permutation+0, PACKAGE = "spBayesSurv");
## Bayes Factor for the spatial model vs. the exchangeable model
q.bar = mean(foo$phi==0);
BF = (phiq0*(1-q.bar))/((1-phiq0)*q.bar);
#########################################################################################
# save to a list
#########################################################################################
output <- list(modelname=model.name,
terms=m,
call=Call,
prior=prior,
mcmc=mcmc,
n=n,
p=p,
Surv=survival::Surv(y1, y2, type="interval2"),
X = X,
alpha = foo$cpar,
theta = foo$theta,
phi = foo$phi,
y = foo$y,
maxL = maxL,
ratec = foo$ratec,
ratetheta = foo$ratetheta,
ratephi = foo$ratephi,
ratey = foo$ratey,
initial.values=initial.values,
BF = BF);
class(output) <- c("SpatDensReg")
output
}
#### empirial BF and p-value for the spatial model vs. the exchangeable model
"BF.SpatDensReg" <- function (y, X, prior=NULL, nperm=100, c_seq=NULL, phi_seq=NULL) {
n = length(y);
X = cbind(X);
Sinv = solve(var(X));
# find the maximumu M distance between X[i,] and colMeans(X)
distseq = rep(0, n);
Xbar = colMeans(X);
for(i in 1:n) distseq[i] = sqrt(as.vector((X[i,]-Xbar)%*%Sinv%*%(X[i,]-Xbar)))
maxdist = max(distseq)
phi0 = (-log(0.001))/maxdist;
#########################################################################################
# initial MLE analysis
#########################################################################################
fit0 <- survival::survreg(formula = survival::Surv(y)~1, dist="gaussian");
theta1 = fit0$coefficients[1];
theta2 = log(fit0$scale);
theta = c(theta1, theta2);
#########################################################################################
# priors and initial values
#########################################################################################
maxL <- prior$maxL; if(is.null(maxL)) maxL<-5;
a0=prior$a0; if(is.null(a0)) a0=5;
b0=prior$b0; if(is.null(b0)) b0=1;
phiq0 = prior$phiq0; if(is.null(phiq0)) phiq0=0.5;
phia0 = prior$phia0; if(is.null(phia0)) phia0=2;
phib0 = prior$phib0; if(is.null(phib0)) phib0=1/phi0;
if(is.null(c_seq)) c_seq=c(0.001, 0.01, 0.1, 0.5, 1, 5, 10, 50, 100, 1000);
if(is.null(phi_seq)) phi_seq = qgamma((1:10)/11, phia0, phib0)
#########################################################################################
# calling the c++ code and # output
#########################################################################################
foo <- .Call("SpatDens_BF", y_=y, X_=t(X), Sinv_=Sinv, theta_=theta, maxJ_=maxL,
cpar_=c_seq, a0_=a0, b0_=b0, phi_=phi_seq, q0phi_=phiq0,
a0phi_=phia0, b0phi_=phib0, nperm_=nperm, PACKAGE = "spBayesSurv");
## Bayes Factor for the spatial model vs. the exchangeable model
BF = foo$BF;
pvalue = sum(foo$BFperm>foo$BF)/nperm;
output <- list(BF = BF,
pvalue = pvalue);
output
}
#### print, summary, plot
"print.SpatDensReg" <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat(x$modelname,"\nCall:\n", sep = "")
print(x$call)
cat(paste("\nBayes Factor for the spatial model vs. the exchangeable model:", sep=""), x$BF);
cat("\nn=",x$n, "\n", sep="")
invisible(x)
}
"plot.SpatDensReg" <- function (x, xnewdata, ygrid=NULL, CI=0.95, PLOT=TRUE, ...) {
if(is.null(ygrid)) ygrid = seq(min(x$Surv[,1], na.rm=T)-sd(x$Surv[,1], na.rm=T),
max(x$Surv[,2], na.rm=T)+sd(x$Surv[,2], na.rm=T), length.out=200);
if(missing(xnewdata)){
stop("please specify xnewdata")
}else{
rnames = row.names(xnewdata)
m = x$terms
Terms = attr(m, 'terms')
baseline0 <- attr(Terms, "specials")$baseline
frailtyprior0<- attr(Terms, "specials")$frailtyprior
dropx <- NULL
if (length(frailtyprior0)) {
temp <- survival::untangle.specials(Terms, 'frailtyprior', 1)
dropx <- c(dropx, temp$terms)
frail.terms <- m[[temp$vars]]
}else{
frail.terms <- NULL;
}
if (length(baseline0)) {
temp <- survival::untangle.specials(Terms, 'baseline', 1)
dropx <- c(dropx, temp$terms)
Xtf <- m[[temp$vars]]
}else{
Xtf <- NULL;
}
if (length(dropx)) {
newTerms <- Terms[-dropx]
# R (version 2.7.1) adds intercept=T anytime you drop something
if (is.R()) attr(newTerms, 'intercept') <- attr(Terms, 'intercept')
} else newTerms <- Terms
newTerms <- delete.response(newTerms)
mnew <- model.frame(newTerms, xnewdata, na.action = na.omit, xlev = .getXlevels(newTerms, m))
Xnew <- model.matrix(newTerms, mnew);
if (is.R()) {
assign <- lapply(survival::attrassign(Xnew, newTerms)[-1], function(x) x-1)
xlevels <- .getXlevels(newTerms, mnew)
contr.save <- attr(Xnew, 'contrasts')
}else {
assign <- lapply(attr(Xnew, 'assign')[-1], function(x) x -1)
xvars <- as.character(attr(newTerms, 'variables'))
xvars <- xvars[-attr(newTerms, 'response')]
if (length(xvars) >0) {
xlevels <- lapply(mnew[xvars], levels)
xlevels <- xlevels[!unlist(lapply(xlevels, is.null))]
if(length(xlevels) == 0)
xlevels <- NULL
} else xlevels <- NULL
contr.save <- attr(Xnew, 'contrasts')
}
# drop the intercept after the fact, and also drop baseline if necessary
adrop <- 0 #levels of "assign" to be dropped; 0= intercept
Xatt <- attributes(Xnew)
xdrop <- Xatt$assign %in% adrop #columns to drop (always the intercept)
Xnew <- Xnew[, !xdrop, drop=FALSE]
attr(Xnew, "assign") <- Xatt$assign[!xdrop]
xpred = Xnew
if(ncol(xpred)!=x$p) stop("please make sure the number of columns matches!");
}
xpred = cbind(xpred);
nxpred = nrow(xpred);
Sinv = solve(var(x$X));
estimates <- .Call("SpatDens_plots", ygrid, t(xpred), x$theta, x$alpha, x$phi, x$maxL,
x$y, t(x$X), Sinv, CI, PACKAGE = "spBayesSurv");
if(PLOT){
par(cex=1.5,mar=c(4.1,4.1,1,1),cex.lab=1.4,cex.axis=1.1)
plot(ygrid, estimates$fhat[,1], "l", lwd=3, xlab="log time", ylab="density",
xlim=c(min(ygrid), max(ygrid)), ylim=c(0,max(estimates$fhatup)));
for(i in 1:nxpred){
polygon(x=c(rev(ygrid),ygrid),
y=c(rev(estimates$fhatlow[,i]),estimates$fhatup[,i]),
border=NA,col="lightgray");
}
for(i in 1:nxpred){
lines(ygrid, estimates$fhat[,i], lty=i, lwd=3, col=i);
}
legend("topright", rnames, col = 1:nxpred, lty=1:nxpred, ...)
}
estimates$ygrid=ygrid;
invisible(estimates)
}
"summary.SpatDensReg" <- function(object, CI.level=0.95, ...) {
ans <- c(object[c("call", "modelname")])
### Baseline Information
mat <- as.matrix(object$theta)
coef.p <- apply(mat, 1, mean); names(coef.p)=c("location", "log(scale)");
coef.m <- apply(mat, 1, median)
coef.sd <- apply(mat, 1, sd)
limm <- apply(mat, 1, function(x) as.vector(quantile(x, probs=c((1-CI.level)/2, 1-(1-CI.level)/2))) )
coef.l <- limm[1,]
coef.u <- limm[2,]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$theta.var <- coef.table
### Precision parameter
if(object$prior$a0<=0){
ans$alpha.var <- NULL
}else{
mat <- object$alpha
coef.p <- mean(mat); names(coef.p)="alpha";
coef.m <- median(mat)
coef.sd <- sd(mat)
limm <- as.vector(quantile(mat, probs=c((1-CI.level)/2, 1-(1-CI.level)/2)))
coef.l <- limm[1]
coef.u <- limm[2]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$alpha.var <- coef.table
}
### phi parameter
mat <- object$phi
coef.p <- mean(mat); names(coef.p)="range";
coef.m <- median(mat)
coef.sd <- sd(mat)
limm <- as.vector(quantile(mat, probs=c((1-CI.level)/2, 1-(1-CI.level)/2)))
coef.l <- limm[1]
coef.u <- limm[2]
coef.table <- cbind(coef.p, coef.m, coef.sd, coef.l , coef.u)
dimnames(coef.table) <- list(names(coef.p), c("Mean", "Median", "Std. Dev.",
paste(CI.level*100, "%CI-Low", sep=""),
paste(CI.level*100, "%CI-Upp", sep="")))
ans$phi.var <- coef.table;
ans$BF <- object$BF
ans$n <- object$n
ans$p <- object$p
ans$prior <- object$prior
### acceptance rates
ans$ratetheta = object$ratetheta;
ans$ratephi = object$ratephi;
ans$ratey = object$ratey;
ans$ratec = object$ratec;
class(ans) <- "summary.SpatDensReg"
return(ans)
}
"print.summary.SpatDensReg"<-function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat(x$modelname,"\nCall:\n", sep = "")
print(x$call)
if(x$theta.var[1,3]==0){
cat("\nCentering distribution parameters are fixed at:\n")
cat("location=", x$theta.var[1,1], ", log(scale)=", x$theta.var[2,1], "\n", sep="")
}else{
cat("\nPosterior inference of centering distribution parameters\n")
cat("(Adaptive M-H acceptance rate: ", x$ratetheta, "):\n", sep="")
print.default(format(x$theta.var, digits = digits), print.gap = 2,
quote = FALSE)
}
if (!is.null(x$alpha.var)) {
cat("\nPosterior inference of precision parameter\n")
cat("(Adaptive M-H acceptance rate: ", x$ratec, "):\n", sep="")
print.default(format(x$alpha.var, digits = digits), print.gap = 2,
quote = FALSE)
}
cat("\nPosterior inference of distance function range phi\n")
cat("(Adaptive M-H acceptance rate: ", x$ratephi, "):\n", sep="")
print.default(format(x$phi.var, digits = digits), print.gap = 2,
quote = FALSE)
cat(paste("\nBayes Factor for the spatial model vs. the exchangeable model:", sep=""), x$BF)
cat("\nNumber of subjects: n=", x$n, "\n", sep="")
invisible(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proj.R
\name{Proj}
\alias{Proj}
\title{Projection of Vector y on columns of X}
\usage{
Proj(y, X, list = FALSE)
}
\arguments{
\item{y}{a vector, treated as a one-column matrix}
\item{X}{a vector or matrix. Number of rows of \code{y} and \code{X} must match}
\item{list}{logical; if FALSE, return just the projected vector; otherwise returns a list}
}
\value{
the projection of \code{y} on \code{X} (if \code{list=FALSE}) or a list with elements \code{y} and \code{P}
}
\description{
Fitting a linear model, \code{lm(y ~ X)}, by least squares can be thought of geometrically as the orthogonal projection of
\code{y} on the column space of \code{X}. This function is designed to allow exploration of projections
and orthogonality.
}
\details{
The projection is defined as \eqn{P y} where \eqn{P = X (X'X)^- X'}
and \eqn{X^-} is a generalized inverse.
}
\examples{
X <- matrix( c(1, 1, 1, 1, 1, -1, 1, -1), 4,2, byrow=TRUE)
y <- 1:4
Proj(y, X[,1]) # project y on unit vector
Proj(y, X[,2])
Proj(y, X)
# orthogonal complements
yp <-Proj(y, X, list=TRUE)
yp$y
P <- yp$P
IP <- diag(4) - P
yc <- c(IP \%*\% y)
crossprod(yp$y, yc)
# P is idempotent: P P = P
P \%*\% P
all.equal(P, P \%*\% P)
}
\seealso{
Other vector diagrams:
\code{\link{arc}()},
\code{\link{arrows3d}()},
\code{\link{circle3d}()},
\code{\link{corner}()},
\code{\link{plot.regvec3d}()},
\code{\link{pointOnLine}()},
\code{\link{regvec3d}()},
\code{\link{vectors3d}()},
\code{\link{vectors}()}
}
\author{
Michael Friendly
}
\concept{vector diagrams}
|
/man/Proj.Rd
|
no_license
|
aarora87/matlib
|
R
| false | true | 1,595 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proj.R
\name{Proj}
\alias{Proj}
\title{Projection of Vector y on columns of X}
\usage{
Proj(y, X, list = FALSE)
}
\arguments{
\item{y}{a vector, treated as a one-column matrix}
\item{X}{a vector or matrix. Number of rows of \code{y} and \code{X} must match}
\item{list}{logical; if FALSE, return just the projected vector; otherwise returns a list}
}
\value{
the projection of \code{y} on \code{X} (if \code{list=FALSE}) or a list with elements \code{y} and \code{P}
}
\description{
Fitting a linear model, \code{lm(y ~ X)}, by least squares can be thought of geometrically as the orthogonal projection of
\code{y} on the column space of \code{X}. This function is designed to allow exploration of projections
and orthogonality.
}
\details{
The projection is defined as \eqn{P y} where \eqn{P = X (X'X)^- X'}
and \eqn{X^-} is a generalized inverse.
}
\examples{
X <- matrix( c(1, 1, 1, 1, 1, -1, 1, -1), 4,2, byrow=TRUE)
y <- 1:4
Proj(y, X[,1]) # project y on unit vector
Proj(y, X[,2])
Proj(y, X)
# orthogonal complements
yp <-Proj(y, X, list=TRUE)
yp$y
P <- yp$P
IP <- diag(4) - P
yc <- c(IP \%*\% y)
crossprod(yp$y, yc)
# P is idempotent: P P = P
P \%*\% P
all.equal(P, P \%*\% P)
}
\seealso{
Other vector diagrams:
\code{\link{arc}()},
\code{\link{arrows3d}()},
\code{\link{circle3d}()},
\code{\link{corner}()},
\code{\link{plot.regvec3d}()},
\code{\link{pointOnLine}()},
\code{\link{regvec3d}()},
\code{\link{vectors3d}()},
\code{\link{vectors}()}
}
\author{
Michael Friendly
}
\concept{vector diagrams}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/order_sm.R
\name{order_sm}
\alias{order_sm}
\title{Random Sampling of k-th Order Statistics from a Singh-Maddala Distribution}
\usage{
order_sm(size, k, shape1, shape2, scale, n, alpha = 0.05, ...)
}
\arguments{
\item{size}{numeric, represents the size of the sample.}
\item{k}{numeric, represents the Kth smallest value from a sample.}
\item{shape1}{numeric, represents a first shape parameter value. Must be strictly positive.}
\item{shape2}{numeric, represents a second shape parameter value. Must be strictly positive.}
\item{scale}{numeric, represents scale parameter values. Must be strictly positive.}
\item{n}{numeric, represents the size of the sample to compute the order statistic from.}
\item{alpha}{numeric, (1 - alpha) represents the confidence of an interval for the population median of the distribution of the k-th order statistic. Default value is 0.05.}
\item{...}{represents others parameters of a Singh-Maddala distribution.}
}
\value{
A list with a random sample of order statistics from a Singh-Maddala Distribution, the value of its join probability density function evaluated in the random sample and
a (1 - alpha) confidence interval for the population median of the distribution of the k-th order statistic.
}
\description{
\code{order_sm} is used to obtain a random sample of the k-th order statistic from a Singh-Maddala distribution and some associated quantities of interest.
}
\examples{
library(orders)
# A sample of size 10 of the 3-th order statistics from a Singh-Maddala Distribution
order_sm(size=10,shape1=1,shape2=2,scale=1,k=3,n=50,alpha=0.02)
}
\references{
Gentle, J, Computational Statistics, First Edition. Springer - Verlag, 2009.
Kleiber, C. and Kotz, S. (2003). Statistical Size Distributions in Economics and Actuarial Sciences, Hoboken, NJ, USA: Wiley-Interscience.
}
\author{
Carlos Alberto Cardozo Delgado <cardozorpackages@gmail.com>.
}
|
/man/order_sm.Rd
|
no_license
|
cran/orders
|
R
| false | true | 1,976 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/order_sm.R
\name{order_sm}
\alias{order_sm}
\title{Random Sampling of k-th Order Statistics from a Singh-Maddala Distribution}
\usage{
order_sm(size, k, shape1, shape2, scale, n, alpha = 0.05, ...)
}
\arguments{
\item{size}{numeric, represents the size of the sample.}
\item{k}{numeric, represents the Kth smallest value from a sample.}
\item{shape1}{numeric, represents a first shape parameter value. Must be strictly positive.}
\item{shape2}{numeric, represents a second shape parameter value. Must be strictly positive.}
\item{scale}{numeric, represents scale parameter values. Must be strictly positive.}
\item{n}{numeric, represents the size of the sample to compute the order statistic from.}
\item{alpha}{numeric, (1 - alpha) represents the confidence of an interval for the population median of the distribution of the k-th order statistic. Default value is 0.05.}
\item{...}{represents others parameters of a Singh-Maddala distribution.}
}
\value{
A list with a random sample of order statistics from a Singh-Maddala Distribution, the value of its join probability density function evaluated in the random sample and
a (1 - alpha) confidence interval for the population median of the distribution of the k-th order statistic.
}
\description{
\code{order_sm} is used to obtain a random sample of the k-th order statistic from a Singh-Maddala distribution and some associated quantities of interest.
}
\examples{
library(orders)
# A sample of size 10 of the 3-th order statistics from a Singh-Maddala Distribution
order_sm(size=10,shape1=1,shape2=2,scale=1,k=3,n=50,alpha=0.02)
}
\references{
Gentle, J, Computational Statistics, First Edition. Springer - Verlag, 2009.
Kleiber, C. and Kotz, S. (2003). Statistical Size Distributions in Economics and Actuarial Sciences, Hoboken, NJ, USA: Wiley-Interscience.
}
\author{
Carlos Alberto Cardozo Delgado <cardozorpackages@gmail.com>.
}
|
numPerPatch147000 <- c(2599,2401)
|
/NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch147000.R
|
no_license
|
flaxmans/NatureEE2017
|
R
| false | false | 34 |
r
|
numPerPatch147000 <- c(2599,2401)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objects.R
\name{GoogleCloudMlV1beta1__HyperparameterOutput}
\alias{GoogleCloudMlV1beta1__HyperparameterOutput}
\title{GoogleCloudMlV1beta1__HyperparameterOutput Object}
\usage{
GoogleCloudMlV1beta1__HyperparameterOutput(GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters = NULL,
allMetrics = NULL, finalMetric = NULL, hyperparameters = NULL,
trialId = NULL)
}
\arguments{
\item{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters}{The \link{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters} object or list of objects}
\item{allMetrics}{All recorded object metrics for this trial}
\item{finalMetric}{The final objective metric seen for this trial}
\item{hyperparameters}{The hyperparameters given to this trial}
\item{trialId}{The trial id for these results}
}
\value{
GoogleCloudMlV1beta1__HyperparameterOutput object
}
\description{
GoogleCloudMlV1beta1__HyperparameterOutput Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents the result of a single hyperparameter tuning trial from atraining job. The TrainingOutput object that is returned on successfulcompletion of a training job with hyperparameter tuning includes a listof HyperparameterOutput objects, one for each successful trial.
}
\seealso{
Other GoogleCloudMlV1beta1__HyperparameterOutput functions: \code{\link{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters}}
}
|
/googlemlv1beta1.auto/man/GoogleCloudMlV1beta1__HyperparameterOutput.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false | true | 1,501 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objects.R
\name{GoogleCloudMlV1beta1__HyperparameterOutput}
\alias{GoogleCloudMlV1beta1__HyperparameterOutput}
\title{GoogleCloudMlV1beta1__HyperparameterOutput Object}
\usage{
GoogleCloudMlV1beta1__HyperparameterOutput(GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters = NULL,
allMetrics = NULL, finalMetric = NULL, hyperparameters = NULL,
trialId = NULL)
}
\arguments{
\item{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters}{The \link{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters} object or list of objects}
\item{allMetrics}{All recorded object metrics for this trial}
\item{finalMetric}{The final objective metric seen for this trial}
\item{hyperparameters}{The hyperparameters given to this trial}
\item{trialId}{The trial id for these results}
}
\value{
GoogleCloudMlV1beta1__HyperparameterOutput object
}
\description{
GoogleCloudMlV1beta1__HyperparameterOutput Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents the result of a single hyperparameter tuning trial from atraining job. The TrainingOutput object that is returned on successfulcompletion of a training job with hyperparameter tuning includes a listof HyperparameterOutput objects, one for each successful trial.
}
\seealso{
Other GoogleCloudMlV1beta1__HyperparameterOutput functions: \code{\link{GoogleCloudMlV1beta1__HyperparameterOutput.hyperparameters}}
}
|
####
#### Size measurements for Cophosaurus texanus lizards. For each of 25 lizards, we have
#### data on three variables: mass in grams, snout-vent length (SVL) in millimeters, and
#### hind limb span (HLS) in millimeters. We also know the gender of each lizard.
####
## Input data.
dta_0 <- read.delim("T1-3.DAT", header = FALSE, sep = " ")
n <- nrow(dta_0)
dta <- matrix(NA, nrow = n, ncol = 3)
colnames(dta) <- c("Mass", "SVL", "HLS")
for(i in 1:n)
dta[i, ] <- as.numeric(dta_0[i, !is.na(dta_0[i, ])])
|
/STAT_636/R Scripts/Data-Specific/Textbook/T1-3.r
|
no_license
|
mauliasavana/Statistics-Masters
|
R
| false | false | 514 |
r
|
####
#### Size measurements for Cophosaurus texanus lizards. For each of 25 lizards, we have
#### data on three variables: mass in grams, snout-vent length (SVL) in millimeters, and
#### hind limb span (HLS) in millimeters. We also know the gender of each lizard.
####
## Input data.
dta_0 <- read.delim("T1-3.DAT", header = FALSE, sep = " ")
n <- nrow(dta_0)
dta <- matrix(NA, nrow = n, ncol = 3)
colnames(dta) <- c("Mass", "SVL", "HLS")
for(i in 1:n)
dta[i, ] <- as.numeric(dta_0[i, !is.na(dta_0[i, ])])
|
#' Find sensor overlap bounds
#'
#' \code{i_find_sensor_overlap_bounds} finds the overlap bounds between sensors
#'
#' @param x band vector
#' @param idx boolean. return indices? defaults to TRUE
#' @return data.frame with sensor bounds
#'
#' @keywords internal
#' @author Jose Eduardo Meireles
i_find_sensor_overlap_bounds = function(x, idx = TRUE){
decrease = which(diff(x) < 0.0)
n_decreases = length(decrease) + 1
dimnames = list(c("begin", "end"),
paste("sensor", seq(n_decreases), sep = "_"))
bounds = matrix(data = c( c(1, decrease + 1), c(decrease, length(x)) ),
ncol = n_decreases,
byrow = TRUE,
dimnames = dimnames)
if(!idx){
bounds["begin", ] = x[ bounds["begin", ] ]
bounds["end", ] = x[ bounds["end", ] ]
}
as.data.frame(bounds)
}
#' Trim sensor overlap
#'
#' @param x spectra object
#' @param splice_at bands where to splice sensors. suggests where the
#' beginning of sensors 2 and 3 should be.
#' @return spectra object
#'
#' @keywords internal
#' @author Jose Eduardo Meireles
i_trim_sensor_overlap = function(x, splice_at){
w = bands(x)
b = i_find_sensor_overlap_bounds(w)
if(ncol(b) == 1){
message("No overlap regions were found. Returning spectra unmodified...")
return(x)
}
if(length(splice_at) != ncol(b) - 1){
stop("number of cut_points must be equal to the number of overlaps.")
}
s = lapply(b, function(y){
w[ seq.int(y[[1]], y[[2]]) ]
})
## trim band lists
for(i in 1:length(splice_at) ){
right = which(s[[i + 1]] >= splice_at[i])
s[[i + 1]] = s[[i + 1]][ right ]
s[[i]] = s[[i]][ s[[i]] < min(s[[i + 1]]) ]
}
list("spectra" = x[ , unlist(s) ],
"sensor" = rep(names(s), sapply(s, length)))
}
#' Match spectra at sensor transitions
#'
#' \code{match_sensors} scales value values of sensors 1 (vis) and 3 (swir2)
#'
#' Splice_at has no default because sensor transition points vary between vendors
#' and individual instruments. It is an important parameter though, so you should
#' visually inspect your spectra before assigning it.
#' Typical values in our own individual instruments were:
#' SVC ~ c(990, 1900),
#' ASD ~ c(1001, 1801).
#'
#' If the factors used to match spectra are unreasonable, \code{match_sensors}
#' will throw. Unreasonable factors (f) are defined as 0.5 > f > 3 or NaN,
#' which happens when the value value for the right sensor is 0.
#'
#' @param x spectra object
#' @param splice_at bands that serve as splice points, i.e the beginnings
#' of the rightmost sensor. Must be length 1 or 2 (max 3 sensors)
#' @param fixed_sensor sensor to keep fixed. Can be 1 or 2 if matching 2 sensors.
#' If matching 3 sensors, `fixed_sensor` must be 2 (default).
#' @param interpolate_wvl extent around splice_at values over which the splicing
#' factors will be calculated. Defaults to 5
#' @param factor_range range of acceptable correction factors (min, max).
#' Defaults to c(0.5, 3)
#' @return spectra object
#'
#' @author Jose Eduardo Meireles and Anna Schweiger
#' @export
match_sensors = function(x,
splice_at,
fixed_sensor = 2,
interpolate_wvl = 5,
factor_range = c(0.5, 3) ){
UseMethod("match_sensors")
}
#' @describeIn match_sensors Match sensor overlap regions
#' @export
match_sensors.spectra = function(x,
splice_at,
fixed_sensor = 2,
interpolate_wvl = 5,
factor_range = c(0.5, 3)){
# message("Warning: feature under development!")
# message("match_sensors: should not be used in poduction code.")
# message("match_sensors: API will change.")
x = x
w = bands(x)
splice_at = unlist(splice_at)
if(length(splice_at) > 2){
stop("matching more than 3 sensors not implemented.")
}
if(length(unlist(fixed_sensor)) != 1 | ! fixed_sensor %in% c(1, 2) ){
stop("fixed_sensor must be 1 or 2")
}
fixed_sensor = ifelse( length(splice_at) == 2, 2, fixed_sensor)
if( ! i_is_increasing(x = w) ){
y = i_trim_sensor_overlap(x = x, splice_at = splice_at)
x = y$spectra # reassign x
w = bands(x) # reassign w
s = split(w, y$sensor)
} else {
s = cut(x = w,
breaks = c(min(w), splice_at, max(w)),
include.lowest = TRUE,
right = FALSE,
labels = paste("sensor", seq(length(splice_at) + 1), sep = "_") )
s = split(w, s)
}
interpolate_wvl = rep(interpolate_wvl, length.out = length(splice_at))
## Pick bands by sensor to computer factors
wl_picks = lapply(seq_along(splice_at), function(z){
low = splice_at[z] - interpolate_wvl[z]
high = splice_at[z] + interpolate_wvl[z]
left = s[[ z ]][ s[[ z ]] >= low ]
right = s[[z + 1L ]][ s[[z + 1L ]] <= high ]
# solve issues if any of the picks are empty
if(length(left) == 0){
left = max(s[[ z ]])
}
if(length(right) == 0){
right = min(s[[ z + 1L ]])
}
list("left" = left, "right" = right)
})
names(wl_picks) = splice_at
## compute splicing factors
splice_factors = lapply(seq_along(wl_picks), function(z){
y = setNames(c(z, z + 1), c("left", "right"))
m = names(y[match(fixed_sensor, y)])
if(m == "right"){
fixed = wl_picks[[z]]$left
scaled = wl_picks[[z]]$right
} else {
fixed = wl_picks[[z]]$right
scaled = wl_picks[[z]]$left
}
rowMeans(value(x[ , scaled, simplify = FALSE])) /
rowMeans(value(x[ , fixed, simplify = FALSE]))
})
## Verify if factors for splicing are reasonable
lapply(splice_factors, function(z){
crap = which( z < factor_range[[1]] | z > factor_range[[2]] | is.nan(z))
if(length(crap) > 0 ){
stop("Factors to match sensors are either NaN or are outside the bounds chosen by `factor_range`:",
paste(crap, sep = " "))
}
})
## Compute the factor matrices
## These functions need to be empirically derived. Current implementation
## is just a hack and should not be used in production code
s[fixed_sensor] = NULL
factor_mat = lapply(seq_along(splice_factors), function(z){
#seq(1.0, y, length.out = length(s$sensor_1))
l = length(s[[z]])
sapply(splice_factors[[z]], function(w){
seq(w, w, length.out = l)
})
})
## Transform data
for(i in seq_along(factor_mat)){
x[ , s[[i]]] = value(x[ , s[[i]] ] ) * t( factor_mat[[i]] )
}
x
}
|
/R/match_sensors.R
|
no_license
|
williamwzt/spectrolab
|
R
| false | false | 7,137 |
r
|
#' Find sensor overlap bounds
#'
#' \code{i_find_sensor_overlap_bounds} finds the overlap bounds between sensors
#'
#' @param x band vector
#' @param idx boolean. return indices? defaults to TRUE
#' @return data.frame with sensor bounds
#'
#' @keywords internal
#' @author Jose Eduardo Meireles
i_find_sensor_overlap_bounds = function(x, idx = TRUE){
decrease = which(diff(x) < 0.0)
n_decreases = length(decrease) + 1
dimnames = list(c("begin", "end"),
paste("sensor", seq(n_decreases), sep = "_"))
bounds = matrix(data = c( c(1, decrease + 1), c(decrease, length(x)) ),
ncol = n_decreases,
byrow = TRUE,
dimnames = dimnames)
if(!idx){
bounds["begin", ] = x[ bounds["begin", ] ]
bounds["end", ] = x[ bounds["end", ] ]
}
as.data.frame(bounds)
}
#' Trim sensor overlap
#'
#' @param x spectra object
#' @param splice_at bands where to splice sensors. suggests where the
#' beginning of sensors 2 and 3 should be.
#' @return spectra object
#'
#' @keywords internal
#' @author Jose Eduardo Meireles
i_trim_sensor_overlap = function(x, splice_at){
w = bands(x)
b = i_find_sensor_overlap_bounds(w)
if(ncol(b) == 1){
message("No overlap regions were found. Returning spectra unmodified...")
return(x)
}
if(length(splice_at) != ncol(b) - 1){
stop("number of cut_points must be equal to the number of overlaps.")
}
s = lapply(b, function(y){
w[ seq.int(y[[1]], y[[2]]) ]
})
## trim band lists
for(i in 1:length(splice_at) ){
right = which(s[[i + 1]] >= splice_at[i])
s[[i + 1]] = s[[i + 1]][ right ]
s[[i]] = s[[i]][ s[[i]] < min(s[[i + 1]]) ]
}
list("spectra" = x[ , unlist(s) ],
"sensor" = rep(names(s), sapply(s, length)))
}
#' Match spectra at sensor transitions
#'
#' \code{match_sensors} scales value values of sensors 1 (vis) and 3 (swir2)
#'
#' Splice_at has no default because sensor transition points vary between vendors
#' and individual instruments. It is an important parameter though, so you should
#' visually inspect your spectra before assigning it.
#' Typical values in our own individual instruments were:
#' SVC ~ c(990, 1900),
#' ASD ~ c(1001, 1801).
#'
#' If the factors used to match spectra are unreasonable, \code{match_sensors}
#' will throw. Unreasonable factors (f) are defined as 0.5 > f > 3 or NaN,
#' which happens when the value value for the right sensor is 0.
#'
#' @param x spectra object
#' @param splice_at bands that serve as splice points, i.e the beginnings
#' of the rightmost sensor. Must be length 1 or 2 (max 3 sensors)
#' @param fixed_sensor sensor to keep fixed. Can be 1 or 2 if matching 2 sensors.
#' If matching 3 sensors, `fixed_sensor` must be 2 (default).
#' @param interpolate_wvl extent around splice_at values over which the splicing
#' factors will be calculated. Defaults to 5
#' @param factor_range range of acceptable correction factors (min, max).
#' Defaults to c(0.5, 3)
#' @return spectra object
#'
#' @author Jose Eduardo Meireles and Anna Schweiger
#' @export
match_sensors = function(x,
splice_at,
fixed_sensor = 2,
interpolate_wvl = 5,
factor_range = c(0.5, 3) ){
UseMethod("match_sensors")
}
#' @describeIn match_sensors Match sensor overlap regions
#' @export
match_sensors.spectra = function(x,
splice_at,
fixed_sensor = 2,
interpolate_wvl = 5,
factor_range = c(0.5, 3)){
# message("Warning: feature under development!")
# message("match_sensors: should not be used in poduction code.")
# message("match_sensors: API will change.")
x = x
w = bands(x)
splice_at = unlist(splice_at)
if(length(splice_at) > 2){
stop("matching more than 3 sensors not implemented.")
}
if(length(unlist(fixed_sensor)) != 1 | ! fixed_sensor %in% c(1, 2) ){
stop("fixed_sensor must be 1 or 2")
}
fixed_sensor = ifelse( length(splice_at) == 2, 2, fixed_sensor)
if( ! i_is_increasing(x = w) ){
y = i_trim_sensor_overlap(x = x, splice_at = splice_at)
x = y$spectra # reassign x
w = bands(x) # reassign w
s = split(w, y$sensor)
} else {
s = cut(x = w,
breaks = c(min(w), splice_at, max(w)),
include.lowest = TRUE,
right = FALSE,
labels = paste("sensor", seq(length(splice_at) + 1), sep = "_") )
s = split(w, s)
}
interpolate_wvl = rep(interpolate_wvl, length.out = length(splice_at))
## Pick bands by sensor to computer factors
wl_picks = lapply(seq_along(splice_at), function(z){
low = splice_at[z] - interpolate_wvl[z]
high = splice_at[z] + interpolate_wvl[z]
left = s[[ z ]][ s[[ z ]] >= low ]
right = s[[z + 1L ]][ s[[z + 1L ]] <= high ]
# solve issues if any of the picks are empty
if(length(left) == 0){
left = max(s[[ z ]])
}
if(length(right) == 0){
right = min(s[[ z + 1L ]])
}
list("left" = left, "right" = right)
})
names(wl_picks) = splice_at
## compute splicing factors
splice_factors = lapply(seq_along(wl_picks), function(z){
y = setNames(c(z, z + 1), c("left", "right"))
m = names(y[match(fixed_sensor, y)])
if(m == "right"){
fixed = wl_picks[[z]]$left
scaled = wl_picks[[z]]$right
} else {
fixed = wl_picks[[z]]$right
scaled = wl_picks[[z]]$left
}
rowMeans(value(x[ , scaled, simplify = FALSE])) /
rowMeans(value(x[ , fixed, simplify = FALSE]))
})
## Verify if factors for splicing are reasonable
lapply(splice_factors, function(z){
crap = which( z < factor_range[[1]] | z > factor_range[[2]] | is.nan(z))
if(length(crap) > 0 ){
stop("Factors to match sensors are either NaN or are outside the bounds chosen by `factor_range`:",
paste(crap, sep = " "))
}
})
## Compute the factor matrices
## These functions need to be empirically derived. Current implementation
## is just a hack and should not be used in production code
s[fixed_sensor] = NULL
factor_mat = lapply(seq_along(splice_factors), function(z){
#seq(1.0, y, length.out = length(s$sensor_1))
l = length(s[[z]])
sapply(splice_factors[[z]], function(w){
seq(w, w, length.out = l)
})
})
## Transform data
for(i in seq_along(factor_mat)){
x[ , s[[i]]] = value(x[ , s[[i]] ] ) * t( factor_mat[[i]] )
}
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_prop_identified.R
\name{get_prop_identified}
\alias{get_prop_identified}
\title{Calculate the proportion isolated and quarantined}
\usage{
get_prop_identified(dqc)
}
\arguments{
\item{dqc}{Numeric vector of Detected-Quarantine-Community proportions.
The components of this vector should sum to one. The order of the
components should be: \code{Ds}, \code{Da}, \code{Qcds}, \code{Qcda}, \code{Qhds}, \code{Qhda},
\code{Qq}, \code{Cs}, \code{Ca}. See \code{\link[=init_dqc]{init_dqc()}} to generate this vector.}
}
\value{
Numeric value. Proportion identified.
}
\description{
Calculates the proportion isolated and quarantined from a
Detected-Quarantine-Community vector
}
|
/man/get_prop_identified.Rd
|
permissive
|
HopkinsIDD/tti
|
R
| false | true | 755 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_prop_identified.R
\name{get_prop_identified}
\alias{get_prop_identified}
\title{Calculate the proportion isolated and quarantined}
\usage{
get_prop_identified(dqc)
}
\arguments{
\item{dqc}{Numeric vector of Detected-Quarantine-Community proportions.
The components of this vector should sum to one. The order of the
components should be: \code{Ds}, \code{Da}, \code{Qcds}, \code{Qcda}, \code{Qhds}, \code{Qhda},
\code{Qq}, \code{Cs}, \code{Ca}. See \code{\link[=init_dqc]{init_dqc()}} to generate this vector.}
}
\value{
Numeric value. Proportion identified.
}
\description{
Calculates the proportion isolated and quarantined from a
Detected-Quarantine-Community vector
}
|
library(nat)
### Name: as.data.frame.neuronlist
### Title: Get or set the attached data.frame of a neuronlist
### Aliases: as.data.frame.neuronlist data.frame<- data.frame<-.neuronlist
### ** Examples
head(as.data.frame(kcs20))
# add additional variables
str(as.data.frame(kcs20, i=seq(kcs20), abc=LETTERS[seq(kcs20)]))
# stop character columns being turned into factors
newdf <- as.data.frame(kcs20, i=seq(kcs20), abc=LETTERS[seq(kcs20)],
stringsAsFactors=FALSE)
str(newdf)
data.frame(kcs20)=newdf
|
/data/genthat_extracted_code/nat/examples/get-set-neuronlist-data.frame.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 510 |
r
|
library(nat)
### Name: as.data.frame.neuronlist
### Title: Get or set the attached data.frame of a neuronlist
### Aliases: as.data.frame.neuronlist data.frame<- data.frame<-.neuronlist
### ** Examples
head(as.data.frame(kcs20))
# add additional variables
str(as.data.frame(kcs20, i=seq(kcs20), abc=LETTERS[seq(kcs20)]))
# stop character columns being turned into factors
newdf <- as.data.frame(kcs20, i=seq(kcs20), abc=LETTERS[seq(kcs20)],
stringsAsFactors=FALSE)
str(newdf)
data.frame(kcs20)=newdf
|
/PCA_Atricutos Geomorfo_ESP.R
|
no_license
|
Dariana713/Datosdeanalisis_R
|
R
| false | false | 2,949 |
r
| ||
# This script was written by Arjun, and tidied by Travis on 20210617
# Load packages -----------------------------------------------------------
library(tidyverse)
# Read in pathway hierarchy and pathway names -----------------------------
pathway_hierarchy <- read_tsv(
"https://reactome.org/download/current/ReactomePathwaysRelation.txt",
col_names = c("higher", "pathway_id")
) %>%
filter(str_detect(higher, "^R-HSA"))
all_pathways <- read_tsv(
"https://reactome.org/download/current/ReactomePathways.txt",
col_names = c("pathway_id", "description", "organism")
) %>%
filter(str_detect(pathway_id, "^R-HSA")) %>%
select(-organism)
# Function to join and rename ---------------------------------------------
pathway_join <- function(input, name) {
nam <- name
p <- left_join(input, pathway_hierarchy, by = "pathway_id") %>%
rename(!!name := "pathway_id", pathway_id = "higher")
return(p)
}
# Initial join of all pathways to their parent ----------------------------
full_hierarchy <- all_pathways %>%
select(pathway_id) %>%
pathway_join(name = "enr_pathway")
# Run successive joins to fully expand the hierarchy ----------------------
for (i in 1:11) {
names <- as.character(english::as.english(i))
full_hierarchy <- full_hierarchy %>% pathway_join(name = names)
}
# All entries in "pathway_id" column are NA, so remove that column
length(na.omit(full_hierarchy$pathway_id))
full_hierarchy <- full_hierarchy %>% select(-pathway_id)
# Reduce hierarchy --------------------------------------------------------
# For each term, we want it's highest and second-highest level parent
enr_pathway_high_level <- data.frame()
for (row in 1:nrow(full_hierarchy)) {
p <- na.omit(as.character(full_hierarchy[row, ]))
how_deep <- length(p)
if (how_deep >= 3) {
p <- data.frame(
enr_pathway = p[1],
one_lower_level = p[length(p) - 1],
top_level = p[length(p)]
)
enr_pathway_high_level <- bind_rows(enr_pathway_high_level, p)
} else {
p <- data.frame(
enr_pathway = p[1],
one_lower_level = p[1],
top_level = p[length(p)]
)
enr_pathway_high_level <- bind_rows(enr_pathway_high_level, p)
}
}
# Add descriptions --------------------------------------------------------
pathways_higher_levels_description <- enr_pathway_high_level %>%
# Base level
left_join(all_pathways, by = c("enr_pathway" = "pathway_id")) %>%
rename("enr_description" = description) %>%
# Second-highest level
left_join(all_pathways, by = c("one_lower_level" = "pathway_id")) %>%
rename("one_lower_level_description" = description) %>%
# Highest level
left_join(all_pathways, by = c("top_level" = "pathway_id")) %>%
rename("top_level_description" = description)
# Save the results --------------------------------------------------------
write_csv(
full_hierarchy,
"data/reactome_pathway_hierarchy_full.csv"
)
write_csv(
pathways_higher_levels_description,
"data/reactome_pathway_hierarchy_top.csv"
)
|
/R_scripts/get_reactome_categories_Arjun.R
|
no_license
|
hancockinformatics/misc_R_scripts
|
R
| false | false | 3,040 |
r
|
# This script was written by Arjun, and tidied by Travis on 20210617
# Load packages -----------------------------------------------------------
library(tidyverse)
# Read in pathway hierarchy and pathway names -----------------------------
pathway_hierarchy <- read_tsv(
"https://reactome.org/download/current/ReactomePathwaysRelation.txt",
col_names = c("higher", "pathway_id")
) %>%
filter(str_detect(higher, "^R-HSA"))
all_pathways <- read_tsv(
"https://reactome.org/download/current/ReactomePathways.txt",
col_names = c("pathway_id", "description", "organism")
) %>%
filter(str_detect(pathway_id, "^R-HSA")) %>%
select(-organism)
# Function to join and rename ---------------------------------------------
pathway_join <- function(input, name) {
nam <- name
p <- left_join(input, pathway_hierarchy, by = "pathway_id") %>%
rename(!!name := "pathway_id", pathway_id = "higher")
return(p)
}
# Initial join of all pathways to their parent ----------------------------
full_hierarchy <- all_pathways %>%
select(pathway_id) %>%
pathway_join(name = "enr_pathway")
# Run successive joins to fully expand the hierarchy ----------------------
for (i in 1:11) {
names <- as.character(english::as.english(i))
full_hierarchy <- full_hierarchy %>% pathway_join(name = names)
}
# All entries in "pathway_id" column are NA, so remove that column
length(na.omit(full_hierarchy$pathway_id))
full_hierarchy <- full_hierarchy %>% select(-pathway_id)
# Reduce hierarchy --------------------------------------------------------
# For each term, we want it's highest and second-highest level parent
enr_pathway_high_level <- data.frame()
for (row in 1:nrow(full_hierarchy)) {
p <- na.omit(as.character(full_hierarchy[row, ]))
how_deep <- length(p)
if (how_deep >= 3) {
p <- data.frame(
enr_pathway = p[1],
one_lower_level = p[length(p) - 1],
top_level = p[length(p)]
)
enr_pathway_high_level <- bind_rows(enr_pathway_high_level, p)
} else {
p <- data.frame(
enr_pathway = p[1],
one_lower_level = p[1],
top_level = p[length(p)]
)
enr_pathway_high_level <- bind_rows(enr_pathway_high_level, p)
}
}
# Add descriptions --------------------------------------------------------
pathways_higher_levels_description <- enr_pathway_high_level %>%
# Base level
left_join(all_pathways, by = c("enr_pathway" = "pathway_id")) %>%
rename("enr_description" = description) %>%
# Second-highest level
left_join(all_pathways, by = c("one_lower_level" = "pathway_id")) %>%
rename("one_lower_level_description" = description) %>%
# Highest level
left_join(all_pathways, by = c("top_level" = "pathway_id")) %>%
rename("top_level_description" = description)
# Save the results --------------------------------------------------------
write_csv(
full_hierarchy,
"data/reactome_pathway_hierarchy_full.csv"
)
write_csv(
pathways_higher_levels_description,
"data/reactome_pathway_hierarchy_top.csv"
)
|
## This file contains functions makeCacheMatrix and cacheSolve
## that compute and cache the inverse of a matrix
## The makeCacheMatrix function creates a special "matrix" object
## that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the special "matrix"
## returned by makeCacheMatrix
## If the inverse has already been calculated (and the matrix has not changed)
## then the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
burubaxair/ProgrammingAssignment2
|
R
| false | false | 1,009 |
r
|
## This file contains functions makeCacheMatrix and cacheSolve
## that compute and cache the inverse of a matrix
## The makeCacheMatrix function creates a special "matrix" object
## that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The cacheSolve function computes the inverse of the special "matrix"
## returned by makeCacheMatrix
## If the inverse has already been calculated (and the matrix has not changed)
## then the cachesolve retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/bone/bone_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/bone/bone_016.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 345 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/bone/bone_016.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
GetColourHex <- function(colourName) {
# This function takes the name of a colour and outputs its hex code
# Define colours
colourList <- list(
# http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
"black" = "#000000",
"grey" = "#999999",
"gold" = "#E69F00",
"light blue" = "#56B4E9",
"green" = "#009E73",
"yellow" = "#F0E442",
"dark blue" = "#0072B2",
"orange" = "#D55E00",
"pink" = "#CC79A7",
# https://www.datanovia.com/en/blog/ggplot-colors-best-tricks-you-will-love/
"yellow2" = "#FFDB6D",
"gold2" = "#C4961A",
"cream" = "#F4EDCA",
"rust" = "#D16103",
"light green" = "#C3D7A4",
"dark green" = "#52854C",
"light blue 2" = "#4E84C4",
"dark blue 2" = "#293352"
)
# Get colour
colourHex <- colourList[[colourName]]
return(colourHex)
}
|
/BC-big-game-harvest/src/GetColourHex.R
|
no_license
|
hlmore/big-game-hunting
|
R
| false | false | 840 |
r
|
GetColourHex <- function(colourName) {
# This function takes the name of a colour and outputs its hex code
# Define colours
colourList <- list(
# http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
"black" = "#000000",
"grey" = "#999999",
"gold" = "#E69F00",
"light blue" = "#56B4E9",
"green" = "#009E73",
"yellow" = "#F0E442",
"dark blue" = "#0072B2",
"orange" = "#D55E00",
"pink" = "#CC79A7",
# https://www.datanovia.com/en/blog/ggplot-colors-best-tricks-you-will-love/
"yellow2" = "#FFDB6D",
"gold2" = "#C4961A",
"cream" = "#F4EDCA",
"rust" = "#D16103",
"light green" = "#C3D7A4",
"dark green" = "#52854C",
"light blue 2" = "#4E84C4",
"dark blue 2" = "#293352"
)
# Get colour
colourHex <- colourList[[colourName]]
return(colourHex)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GroupSize.R
\name{GroupRate}
\alias{GroupRate}
\alias{GroupSize}
\title{Schaetz- und Fehlerrechnungsfunktionen.}
\usage{
GroupRate(x, TFstring, TFstring2 = NULL, each = NULL, byeach = TRUE,
thousands_separator = TRUE, digits = 2, replicates = FALSE)
GroupSize(x, TFstring = NULL, each = NULL,
thousands_separator = TRUE, digits = 2, replicates = FALSE)
}
\arguments{
\item{x}{MZ Daten - Output von Funktion \link{ImportData}.}
\item{TFstring}{Character oder NULL: Logische Einschraenkung der Gruppe, im Fall der Anteile (\code{GroupRate}):
Einschraenkung fuer Zaehler. Falls NULL, gilt keine Einschraenkung.}
\item{TFstring2}{Character oder NULL: Logische Einschraenkung fuer den Nenner.
Falls NULL, gilt keine Einschraenkung fuer den Nenner ausser wenn \code{each} ungleich NULL und \code{byeach=TRUE}.}
\item{each}{Character oder NULL: Name der Variable nach der getrennt berechnet werden soll.
Hier koennen auch mehrere Variablen angegeben werden und zwar in einem character string, getrennt durch ein +, siehe Examples.}
\item{byeach}{Logical: Vorgabe fuer den Nenner. \code{byeach} bezieht sich auf den
Parameter \code{each} und gibt an, ob bei \code{GroupRate} zusaetzlich zu
\code{TFstring2} auch die jeweilige durch \code{each} definierte Einschraenkung
in den Nenner kommen soll, also ob die Raten jeweils bezogen auf die Auspraegungen
bzw. Auspraegungskombinationen von \code{each} berechnet werden sollen (\code{byeach=TRUE})
oder rein bezogen auf \code{TFstring2} (\code{byeach=FALSE}).}
\item{thousands_separator}{Logical: Wenn TRUE, werden Tausendertrennzeichen
angezeigt.}
\item{digits}{Numerischer Wert: Anzahl der Nachkommastellen im angezeigten Ergebnis. Default
ist 2.}
\item{replicates}{Fürge einen Vektor aus Schätzwerten zum Output hinzu? Die Anzahl der Schätzwerte
pro Gruppe in \code{each} entspricht der Anzahl der Bootstrapreplikate (typischerweise 500).
Siehe auch \link{getReplicates}.}
}
\value{
Output ist ein Objekt der Klasse \code{mzR}.
}
\description{
Funktion berechnet absolute (\code{GroupSize}) oder relative
(\code{GroupRate}) Schaetzwerte und die zugehoerigen Fehler.
}
\details{
Wiedergegeben wird der Schaetzer \code{est}, der Stichprobenfehler
\code{sd}, der Variationskoeffizient \code{cv} und die untere/obere Grenze
des 95\% Konfidenzintervalls \code{cil_2.5\%}/\code{ciu_97.5\%}.
Die Fehler werden mit Hilfe von Replikationsgewichten aus einem Bootstrapverfahren
berechnet, d.h. \code{sd} entspricht der Standardabweichung der mit den Bootstrapgewichten
berechneten Schaetzwerte, \code{cil_2.5\%} und \code{ciu_97.5\%} sind die
entsprechenden 2.5\% und 97.5\% Quantile und \code{cv=sd/est}.
}
\examples{
# Daten laden (oder Daten einlesen mit ImportData() bzw. IndivImportData())
data(mzTestData)
# Arbeitslosenzahlen: Absolutwerte und Veraenderung
GroupSize(mzTestData,TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquoten: Prozentwerte und Veraenderung
GroupRate(mzTestData,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Oesterreichische Bevoelkerung nach Bundesland und Geschlecht
GroupSize(mzTestData,TFstring=NULL,each="xnuts2+bsex")
\dontrun{
############################################################################################
# Zusaetzliche Beispiele fuer DatennutzerInnen der Mikrozensus-Arbeitskraefteerhebung: #
############################################################################################
# Quartal und zugehoeriges Vorjahrsquartal einlesen (Funktion fuer STAT-interne Nutzer)
dat <- ImportData(year=2014,quarter=4, comp_diff_lag=4)
# Oesterreichische Bevoelkerung nach Bundesland und Geschlecht
GroupSize(dat,TFstring=NULL,each="xnuts2+bsex")
# Arbeitslosenzahlen: Absolutwerte und Veraenderung
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquoten: Prozentwerte und Veraenderung
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Arbeitslosenzahl vom aktuelleren der beiden Quartale
GroupSize(dat[1],TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquote vom weniger aktuellen Quartal
GroupRate(dat[2],TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Absolutwerte und Veraenderung fuer jede Auspraegung von xerwstat eingeschraenkt auf 15-74-Jaehrige
GroupSize(dat,TFstring="balt>=15&balt<=74",each="xerwstat")
# Prozentwerte (bezogen auf Gesamtbevoelkerung) und Veraenderung
# fuer jede Auspraegung von xerwstat eingeschraenkt auf 15-74-Jaehrige
GroupRate(dat,TFstring="balt>=15&balt<=74",each="xerwstat")
# Arbeitslosenzahlen: Absolutwerte und Veraenderung fuer jedes Bundesland
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74",each="xnuts2")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Bundesland
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="xnuts2")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Geschlecht
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="bsex")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Bundesland X Geschlecht
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74",each="xnuts2+bsex")
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="xnuts2+bsex")
# Haushalte: Quartal und Vorquartal einlesen.
dat <- ImportData(year=2014,quarter=4, comp_diff_lag=1, hh=TRUE)
# Absolutwerte: Anzahl der Hauptmietwohnungen ohne gueltiger Kostenangabe.
GroupSize(dat,TFstring="wrecht==3")
GroupSize(dat,TFstring="wrecht2\%in\%c(1:3)")
}
}
\seealso{
\code{\link{ImportData},\link{IndivImportData},\link{ImportAndMerge},\link{GetLabels},\link{Total},\link{Mean},\link{export}}
}
|
/man/GroupSize.Rd
|
no_license
|
statistikat/mzR
|
R
| false | true | 5,922 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GroupSize.R
\name{GroupRate}
\alias{GroupRate}
\alias{GroupSize}
\title{Schaetz- und Fehlerrechnungsfunktionen.}
\usage{
GroupRate(x, TFstring, TFstring2 = NULL, each = NULL, byeach = TRUE,
thousands_separator = TRUE, digits = 2, replicates = FALSE)
GroupSize(x, TFstring = NULL, each = NULL,
thousands_separator = TRUE, digits = 2, replicates = FALSE)
}
\arguments{
\item{x}{MZ Daten - Output von Funktion \link{ImportData}.}
\item{TFstring}{Character oder NULL: Logische Einschraenkung der Gruppe, im Fall der Anteile (\code{GroupRate}):
Einschraenkung fuer Zaehler. Falls NULL, gilt keine Einschraenkung.}
\item{TFstring2}{Character oder NULL: Logische Einschraenkung fuer den Nenner.
Falls NULL, gilt keine Einschraenkung fuer den Nenner ausser wenn \code{each} ungleich NULL und \code{byeach=TRUE}.}
\item{each}{Character oder NULL: Name der Variable nach der getrennt berechnet werden soll.
Hier koennen auch mehrere Variablen angegeben werden und zwar in einem character string, getrennt durch ein +, siehe Examples.}
\item{byeach}{Logical: Vorgabe fuer den Nenner. \code{byeach} bezieht sich auf den
Parameter \code{each} und gibt an, ob bei \code{GroupRate} zusaetzlich zu
\code{TFstring2} auch die jeweilige durch \code{each} definierte Einschraenkung
in den Nenner kommen soll, also ob die Raten jeweils bezogen auf die Auspraegungen
bzw. Auspraegungskombinationen von \code{each} berechnet werden sollen (\code{byeach=TRUE})
oder rein bezogen auf \code{TFstring2} (\code{byeach=FALSE}).}
\item{thousands_separator}{Logical: Wenn TRUE, werden Tausendertrennzeichen
angezeigt.}
\item{digits}{Numerischer Wert: Anzahl der Nachkommastellen im angezeigten Ergebnis. Default
ist 2.}
\item{replicates}{Fürge einen Vektor aus Schätzwerten zum Output hinzu? Die Anzahl der Schätzwerte
pro Gruppe in \code{each} entspricht der Anzahl der Bootstrapreplikate (typischerweise 500).
Siehe auch \link{getReplicates}.}
}
\value{
Output ist ein Objekt der Klasse \code{mzR}.
}
\description{
Funktion berechnet absolute (\code{GroupSize}) oder relative
(\code{GroupRate}) Schaetzwerte und die zugehoerigen Fehler.
}
\details{
Wiedergegeben wird der Schaetzer \code{est}, der Stichprobenfehler
\code{sd}, der Variationskoeffizient \code{cv} und die untere/obere Grenze
des 95\% Konfidenzintervalls \code{cil_2.5\%}/\code{ciu_97.5\%}.
Die Fehler werden mit Hilfe von Replikationsgewichten aus einem Bootstrapverfahren
berechnet, d.h. \code{sd} entspricht der Standardabweichung der mit den Bootstrapgewichten
berechneten Schaetzwerte, \code{cil_2.5\%} und \code{ciu_97.5\%} sind die
entsprechenden 2.5\% und 97.5\% Quantile und \code{cv=sd/est}.
}
\examples{
# Daten laden (oder Daten einlesen mit ImportData() bzw. IndivImportData())
data(mzTestData)
# Arbeitslosenzahlen: Absolutwerte und Veraenderung
GroupSize(mzTestData,TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquoten: Prozentwerte und Veraenderung
GroupRate(mzTestData,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Oesterreichische Bevoelkerung nach Bundesland und Geschlecht
GroupSize(mzTestData,TFstring=NULL,each="xnuts2+bsex")
\dontrun{
############################################################################################
# Zusaetzliche Beispiele fuer DatennutzerInnen der Mikrozensus-Arbeitskraefteerhebung: #
############################################################################################
# Quartal und zugehoeriges Vorjahrsquartal einlesen (Funktion fuer STAT-interne Nutzer)
dat <- ImportData(year=2014,quarter=4, comp_diff_lag=4)
# Oesterreichische Bevoelkerung nach Bundesland und Geschlecht
GroupSize(dat,TFstring=NULL,each="xnuts2+bsex")
# Arbeitslosenzahlen: Absolutwerte und Veraenderung
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquoten: Prozentwerte und Veraenderung
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Arbeitslosenzahl vom aktuelleren der beiden Quartale
GroupSize(dat[1],TFstring="xerwstat==2&balt>=15&balt<=74")
# Arbeitslosenquote vom weniger aktuellen Quartal
GroupRate(dat[2],TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74")
# Absolutwerte und Veraenderung fuer jede Auspraegung von xerwstat eingeschraenkt auf 15-74-Jaehrige
GroupSize(dat,TFstring="balt>=15&balt<=74",each="xerwstat")
# Prozentwerte (bezogen auf Gesamtbevoelkerung) und Veraenderung
# fuer jede Auspraegung von xerwstat eingeschraenkt auf 15-74-Jaehrige
GroupRate(dat,TFstring="balt>=15&balt<=74",each="xerwstat")
# Arbeitslosenzahlen: Absolutwerte und Veraenderung fuer jedes Bundesland
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74",each="xnuts2")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Bundesland
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="xnuts2")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Geschlecht
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="bsex")
# Arbeitslosenquote: Prozentwerte und Veraenderung pro Bundesland X Geschlecht
GroupSize(dat,TFstring="xerwstat==2&balt>=15&balt<=74",each="xnuts2+bsex")
GroupRate(dat,TFstring="xerwstat==2&balt>=15&balt<=74",
TFstring2="xerwstat\%in\%c(1,2)&balt>=15&balt<=74",each="xnuts2+bsex")
# Haushalte: Quartal und Vorquartal einlesen.
dat <- ImportData(year=2014,quarter=4, comp_diff_lag=1, hh=TRUE)
# Absolutwerte: Anzahl der Hauptmietwohnungen ohne gueltiger Kostenangabe.
GroupSize(dat,TFstring="wrecht==3")
GroupSize(dat,TFstring="wrecht2\%in\%c(1:3)")
}
}
\seealso{
\code{\link{ImportData},\link{IndivImportData},\link{ImportAndMerge},\link{GetLabels},\link{Total},\link{Mean},\link{export}}
}
|
########################
##Trees Classification##
########################
### Author: Yandong Xiong
### Project 3
Tree <- function(dat_train, dat_test, run_tree = FALSE){
# installing and loading required packages
if(!require("rpart")){
install.packages("rpart")
}
if(!require("pROC")){
install.packages("pROC")
}
require(rpart)
require(pROC)
# run Tree if run_trees == TRUE, otherwise load "trees.RData"
if(run_tree){
# Fit a decision tree using rpart
# Note: when you fit a tree using rpart, the fitting routine automatically
# performs 10-fold CV and stores the errors for later use
# (such as for pruning the tree)
tm_tree <- system.time({tree = rpart(as.factor(label) ~ ., data = dat_train, method = "class")})
save(tree, file = "../output/tree.RData")
save(tm_tree, file = "../output/tm_tree.RData")
}else{
load(file = "../output/tree.RData")
load(file = "../output/tm_tree.RData")
}
# find best value of cost complexity
min_cp = tree$cptable[which.min(tree$cptable[,"xerror"]),"CP"]
# pruning tree using best cp
tree_prune = prune(tree, cp = min_cp)
#
tree_pred.train = predict(tree_prune, dat_train, type = "class")
# make predictions
tm_tree_pred <- system.time({tree_pred = predict(tree_prune, dat_test, type = "class")})
tree_prob_pred = predict(tree_prune, dat_test, type = "prob")
treeROC = roc(dat_test$label, tree_prob_pred[,1])
return(list(tree = tree, tm_tree = tm_tree, min_cp = min_cp, tree_prune = tree_prune, tree_pred.train = tree_pred.train,
tree_pred = tree_pred, tm_tree_pred = tm_tree_pred, tree_prob_pred = tree_prob_pred, treeROC = treeROC))
}
|
/lib/Trees_Classification.R
|
no_license
|
TZstatsADS/Spring2021-Project3-group1
|
R
| false | false | 1,726 |
r
|
########################
##Trees Classification##
########################
### Author: Yandong Xiong
### Project 3
Tree <- function(dat_train, dat_test, run_tree = FALSE){
# installing and loading required packages
if(!require("rpart")){
install.packages("rpart")
}
if(!require("pROC")){
install.packages("pROC")
}
require(rpart)
require(pROC)
# run Tree if run_trees == TRUE, otherwise load "trees.RData"
if(run_tree){
# Fit a decision tree using rpart
# Note: when you fit a tree using rpart, the fitting routine automatically
# performs 10-fold CV and stores the errors for later use
# (such as for pruning the tree)
tm_tree <- system.time({tree = rpart(as.factor(label) ~ ., data = dat_train, method = "class")})
save(tree, file = "../output/tree.RData")
save(tm_tree, file = "../output/tm_tree.RData")
}else{
load(file = "../output/tree.RData")
load(file = "../output/tm_tree.RData")
}
# find best value of cost complexity
min_cp = tree$cptable[which.min(tree$cptable[,"xerror"]),"CP"]
# pruning tree using best cp
tree_prune = prune(tree, cp = min_cp)
#
tree_pred.train = predict(tree_prune, dat_train, type = "class")
# make predictions
tm_tree_pred <- system.time({tree_pred = predict(tree_prune, dat_test, type = "class")})
tree_prob_pred = predict(tree_prune, dat_test, type = "prob")
treeROC = roc(dat_test$label, tree_prob_pred[,1])
return(list(tree = tree, tm_tree = tm_tree, min_cp = min_cp, tree_prune = tree_prune, tree_pred.train = tree_pred.train,
tree_pred = tree_pred, tm_tree_pred = tm_tree_pred, tree_prob_pred = tree_prob_pred, treeROC = treeROC))
}
|
library(data.table)
library(sqldf)
library(ggplot2)
library(maps)
# Load Dataset ####
setwd("/sscc/home/l/lsl575/airline")
fileNames = list.files(path = getwd(), pattern = "*.csv")
alldata = rbindlist(lapply(fileNames, function(x) {
xx = fread(x, header = TRUE, sep = ",", na.strings=c("NA", ''))
}))
str(alldata)
head(alldata)
dim(alldata)
names(alldata)
class(alldata)
# Transform the Dataset ####
alldata = data.frame(alldata)
drop = c("FlightNum", "TailNum", "ActualElapsedTime", "CRSElapsedTime", "AirTime", "Distance",
"TaxiIn", "TaxiOut", "DepTime", "ArrTime")
# Drop variables
data = alldata[, -which(names(alldata) %in% drop)]
names(data)
# Subset data
dataSub=sqldf("SELECT *
FROM data
WHERE (Origin='ORD' OR Origin = 'MDW')
AND (Dest = 'MCO' OR Dest = 'LAX')")
# Save as R dataset
save(dataSub,file="dataSub.Rda")
load("~/airline/dataSub.Rda")
data = dataSub
# Save as csv
write.csv(dataSub, file = "dataSub.csv", row.names = FALSE)
########################## Data Processing#####
#delete NA's
data[is.na(data)] = 0
mydata = data[1:100,]
#data = within(data, dateTime = paste(Year, Month, DayofMonth, sep="-"))
data$date = paste(data$Year, data$Month, data$DayofMonth, sep= "-")
data$date = as.Date(data$date)
data$month = months(data$date)
data$dayofweek = weekdays(data$date)
str(data)
# cacellation code
# reason for cancellation
# (A = carrier, B = weather, C = NAS, D = security)
table(data$CancellationCode)
data[data[,"CancellationCode"] == "0","CancellationCode"] = "NA"
data[data[,"CancellationCode"] == "0","CancellationCode"] = "NA"
data$CancellationCode[data$CancellationCode =="0"] = "NA"
data$CancellationCode[data$CancellationCode =="A"] = "Carrier"
data$CancellationCode[data$CancellationCode =="B"] = "Weather"
data$CancellationCode[data$CancellationCode =="C"] = "NAS"
data$CancellationCode = as.factor(data$CancellationCode)
levels(data$CancellationCode)
unique(data$CancellationCode)
table(data$Cancelled, data$CancellationCode)
table(data$Dest, data$UniqueCarrier)
table(data$Origin, data$UniqueCarrier)
# Subset data
data1 = sqldf("SELECT *
FROM data
WHERE Dest = 'MCO' AND UniqueCarrier <> 'DL'")
str(data1)
table(data1$UniqueCarrier)
data2 =sqldf(" SELECT *, (CancelCount/TotalCount) AS pctCancel
FROM (SELECT Year, Month,UniqueCarrier, cast(SUM(Cancelled) as real) as CancelCount, COUNT(*) as TotalCount
FROM data1
GROUP BY Year, Month,UniqueCarrier)")
table(data2$UniqueCarrier)
head(data1)
ggplot(data2, aes(x=UniqueCarrier, y=pctCancel)) +
geom_boxplot(aes(fill = UniqueCarrier)) +
labs(y = "Percentage of Flighs Cancelled", x = "Airline") +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
guides(fill=FALSE) +
theme(axis.text=element_text(size=18), axis.title=element_text(size=18)) +
theme(strip.text.x = element_text(size = 18), strip.text.y = element_text(size = 18))
############################### Check for later ####
mydata = data[1:100,]
# Create datetime variable
timeVars = c("Year", "Month", "DayofMonth", "DayOfWeek")
for(i in 1:nrow(mydata)){
mydata$dateTime[i]=paste(time2$year[i], "-",time2$month[i], "-1", sep="")
}
dataSub2$dateTime =
x = ISOdatetime(year = 2014, month = 10, day = 12, hour = 21, min = 12, sec = 0)
weekdays(x)
month(x)
strftime(x, format="%H:%M")
as.date(x)
as.Date.Time(x)
i = sapply(dataSub2[,c("Year", "DayofMonth")], function(x) class(x))
class(i)
i
# date format
# http://www.statmethods.net/input/dates.html
# http://www.ats.ucla.edu/stat/r/faq/string_dates.htm
# http://stackoverflow.com/questions/9216138/find-the-day-of-a-week-in-r
# http://stackoverflow.com/questions/9749598/r-obtaining-month-and-year-from-a-date
# http://www.quantlego.com/howto/introduction-dates-and-times-in-r/
# convert to dates
i = sapply(mydata[var_date], is.factor)
mydata[,var_date] = as.character(mydata[,var_date])
mydata[,var_date] = as.Date(mydata[,var_date],"%m/%d/%Y")
# create additional variables based on dates
mydata$PurchWeekDay = factor(weekdays(mydata[,var_date]),
levels = c("Monday", "Tuesday",
"Wednesday","Thursday",
"Friday", "Saturday",
"Sunday"),ordered=T)
mydata$PurchMonth = factor(months(mydata[,var_date]),
levels=c("January","February","March",
"April","May","June","July","August","September",
"October","November","December"),ordered=TRUE)
mydata$PurchQuarter = factor(quarters(mydata[,var_date]),
levels = c("Q1","Q2","Q3","Q4"),
ordered=TRUE)
mydata$PurchYear= as.numeric(format(mydata[,var_date], "%Y"))
table(dataSub$CancellationCode)
x = "2100"
y = strptime(x,"%H%M")
strftime(y, format="%H:%M")
str(dataSub)
str(dataSub)
################################### Xiang ####
setwd("/sscc/home/x/xlf278/airline")
origin_mon = read.csv("eastWestOrigins_Mon.csv",header = TRUE, na.strings=c("NA", ''))
origin_mon = origin_mon[,-c(1,11,12,21,22)]
str(origin_mon)
dele_na = origin_mon[,c(21:25)]
na = origin_mon[,-c(21:25)]
dele_na[is.na(dele_na)] <- 0
origin_mon = cbind(na,dele_na)
origin_mon = origin_mon[,-c(19)]
dest_thu = read.csv("/sscc/home/s/swr386/airline/eastWestDest_Thu.csv",header = TRUE, na.strings=c("NA", ''))
dest_thu = dest_thu[,-c(1,11,12,21,22,24)]
str(dest_thu)
dele_na = dest_thu[,c(20:24)]
na = dest_thu[,-c(20:24)]
dele_na[is.na(dele_na)] <- 0
dest_thu = cbind(na,dele_na)
save(dest_thu, file = "dest_thu.RData")
save(origin_mon, file = "origin_mon.RData")
write.csv(dest_thu,"dest_thu.csv")
write.csv(origin_mon,"origin_mon.csv")
################################### Alejandro ####
library(data.table)
library(sqldf)
library(ggplot2)
library(maps)
# Load Dataset ####
setwd("/sscc/home/l/lsl575/airline")
fileNames <- list.files(path = getwd(), pattern = "*.csv")
alldata <- rbindlist(lapply(fileNames, function(x) {
xx <- fread(x, header = TRUE, sep = ",", na.strings=c("NA", ''))
}))
# Transform the Dataset ####
data<-sqldf("SELECT *
FROM alldata
WHERE (Origin='ORD' OR Origin = 'MDW')
AND (Dest = 'MCO' OR Dest = 'LAX')")
head(data,20)
head(test1,20)
backup<-data
data<-backup
#delete NA's
data[is.na(data)] <- 0
dataLate15<-sqldf("SELECT *
FROM data
WHERE ArrDelay>15")
head(dataLate15,10)
test3<-sqldf("select
case
when (CarrierDelay > WeatherDelay AND CarrierDelay > NASDelay AND CarrierDelay > SecurityDelay AND CarrierDelay > LateAircraftDelay) THEN 'CarrierDelay'
WHEN (WeatherDelay > CarrierDelay AND WeatherDelay > NASDelay AND WeatherDelay > SecurityDelay AND WeatherDelay > LateAircraftDelay) THEN 'WeatherDelay'
WHEN (NASDelay > CarrierDelay AND NASDelay > WeatherDelay AND NASDelay > SecurityDelay AND NASDelay > LateAircraftDelay) THEN 'NASDelay'
WHEN (SecurityDelay > CarrierDelay AND SecurityDelay > WeatherDelay AND SecurityDelay > NASDelay AND SecurityDelay > LateAircraftDelay) THEN 'SecurityDelay'
WHEN (LateAircraftDelay > CarrierDelay AND LateAircraftDelay > WeatherDelay AND LateAircraftDelay > SecurityDelay AND LateAircraftDelay >= NASDelay) THEN 'LateAircraftDelay'
ELSE 'NoDelay'
END AS DelayReason
FROM dataLate15")
unique(test3)
dataLate15$DelayReason<-as.matrix(test3)
dataLate15$DelayReason<-as.factor(dataLate15$DelayReason)
#take out the NoDelay ones that have problems (they should not been there because we removed the NoDelay - probably they had NA's)
update<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'")
ggplot(data = update, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar() +
facet_wrap(~ UniqueCarrier)
# DelayReason from Chicago to LAX ####
dataLate15CHItoLAX<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'
AND Dest = 'LAX'")
ggplot(data = dataLate15CHItoLAX, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar() +
facet_wrap(~ UniqueCarrier)
# DelayReason from MDW to MCO ####
dataLate15CHItoMCO<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'
AND Dest = 'MCO'
AND Origin = 'MDW'")
ggplot(data = dataLate15CHItoMCO, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar(aes(y=..count../sum(..count..))) +
facet_wrap(~ UniqueCarrier) +
theme(panel.background = element_rect(fill = "#d8e7eb"))
# Count of flights from Chicago to LAX and MCO ####
count<-data
count[is.na(count)] <- 0
test1<-sqldf("select
case
WHEN (CarrierDelay > WeatherDelay AND CarrierDelay > NASDelay AND CarrierDelay > SecurityDelay AND CarrierDelay > LateAircraftDelay) THEN 'CarrierDelay'
WHEN (WeatherDelay > CarrierDelay AND WeatherDelay > NASDelay AND WeatherDelay > SecurityDelay AND WeatherDelay > LateAircraftDelay) THEN 'WeatherDelay'
WHEN (NASDelay > CarrierDelay AND NASDelay > WeatherDelay AND NASDelay > SecurityDelay AND NASDelay > LateAircraftDelay) THEN 'NASDelay'
WHEN (SecurityDelay > CarrierDelay AND SecurityDelay > WeatherDelay AND SecurityDelay > NASDelay AND SecurityDelay > LateAircraftDelay) THEN 'SecurityDelay'
WHEN (LateAircraftDelay > CarrierDelay AND LateAircraftDelay > WeatherDelay AND LateAircraftDelay > SecurityDelay AND LateAircraftDelay >= NASDelay) THEN 'LateAircraftDelay'
ELSE 'NoDelay'
END AS DelayReason
FROM count")
count$DelayReason<-as.matrix(test1)
count$DelayReason<-as.factor(count$DelayReason)
test2<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM count")
count$DummyDelay<-as.matrix(test2)
count$DummyDelay<-as.numeric(count$DummyDelay)
unique(update2$DummyDelay)
ggplot(data = count, aes(x = as.factor(Dest), fill=factor(DummyDelay))) +
geom_bar(position = "dodge")
# Time series for delay in every airport ####
#By day
timeSeries<-data.frame(data)
test5<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM timeSeries")
timeSeries$DummyDelay<-as.matrix(test5)
timeSeries$DummyDelay<-as.numeric(timeSeries$DummyDelay)
time<-sqldf("select *, (delay/total) as percenDelayed
from (
select Origin, Dest, year, month, DayofMonth, count(*) as total, sum(DummyDelay) as delay
from timeSeries
group by origin, dest, year, month, DayofMonth
)")
unique(time$year)
for(i in 1:nrow(time)){
time$date[i]<-paste(time$year[i], "-",time$month[i], "-", time$DayofMonth[i], sep="")
}
time$date<-as.Date(time$date)
ggplot(data=time, aes(x=date, y=percenDelayed, colour=factor(Dest))) +
geom_line() +
stat_smooth()
#too croweded, so try by month
timeSeries2<-data.frame(data)
test6<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM timeSeries2")
timeSeries2$DummyDelay<-as.matrix(test6)
timeSeries2$DummyDelay<-as.numeric(timeSeries2$DummyDelay)
time2<-sqldf("select *, (delay/total) as percenDelayed
from (
select Origin, Dest, year, month, count(*) as total, sum(DummyDelay) as delay
from timeSeries2
group by origin, dest, year, month
)")
unique(time2$year)
for(i in 1:nrow(time2)){
time2$date[i]<-paste(time2$year[i], "-",time2$month[i], "-1", sep="")
}
time2$date<-as.Date(time2$date)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(Dest))) +
geom_line() +
stat_smooth()
#Separating for every route
for(i in 1:nrow(time2)){
time2$route[i]<-paste("From: ",time2$Origin[i], " To: ",time2$Dest[i], sep="")
}
time2$route<-as.factor(time2$route)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(route))) +
geom_line() +
stat_smooth() +
ylim(0,0.5)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(route))) +
stat_smooth(se=F, size=2) +
ylim(0,0.4) +
labs(y = "Percentage of Flighs Delayed",
x = "Time") +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
theme(legend.key = element_rect(fill = "#d8e7eb")) +
scale_colour_manual(values = c("#c163c1","#800080","#FFA500","#FF4040"),
breaks = c("From: ORD To: LAX", "From: ORD To: MCO",
"From: MDW To: LAX", "From: MDW To: MCO"),
name = "Flight Routes") +
theme(axis.text=element_text(size=18),
axis.title=element_text(size=18))
# Maps ####
#load us map data
all_states <- map_data("state")
#plot all states with ggplot
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white", fill="grey10" )
p
mydata <- read.csv("/sscc/home/l/lsl575/airline/geo/geo.csv", header=TRUE, row.names=1, sep=",")
mapData1<-data
mapData2<-sqldf("select Origin, Dest, count(*) AS count
from mapData1
where (Origin = 'ORD' AND Dest = 'MCO')
OR (Origin = 'ORD' AND Dest = 'LAX')
OR (Origin = 'MDW' AND Dest = 'MCO')
OR (Origin = 'MDW' AND Dest = 'LAX')
Group by Origin, Dest
Order by Dest")
#plot all the states and the data
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long, y=lat, size = (NumFlights)*5), color="coral1") + scale_size(name="Number of Flights")
p <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long, y=lat, label=label), colour="gold2", size=6 )
p
# for individual states
CA <- subset(all_states, region %in% 'california' )
p <- ggplot()
p <- p + geom_polygon( data=CA, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[4], y=lat[4], size = enrollment[4]), color="coral1") + scale_size(name="Total enrollment")
p1 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[4], y=lat[4], label=label[4]), colour="gold2", size=4 )
p1
FL <- subset(all_states, region %in% 'florida' )
p <- ggplot()
p <- p + geom_polygon( data=FL, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[1], y=lat[1], size = enrollment[1]), color="coral1") + scale_size(name="Total enrollment")
p2 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[1], y=lat[1], label=label[1]), colour="gold2", size=4 )
p2
IL <- subset(all_states, region %in% 'illinois' )
p <- ggplot()
p <- p + geom_polygon( data=IL, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[2:3], y=lat[2:3], size = enrollment[2:3]), color="coral1") + scale_size(name="Total enrollment")
p3 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[2:3], y=lat[2:3], label=label[2:3]), colour="gold2", size=4 )
p3
# Distribution of ArrDelay in both MCO and LAX ####
distribution <- sqldf("select *
from data
where Dest = 'MCO' OR Dest='LAX'") #same as the data
distribution <- data
test8<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
WHEN (ArrDelay<0) THEN 2
ELSE 0
END AS DummyDelay
FROM distribution")
distribution$DummyDelay<-as.matrix(test8)
distribution$DummyDelay<-as.numeric(distribution$DummyDelay)
ggplot(distribution, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_histogram(aes(y=..count../sum(..count..)),binwidth = 1) +
geom_vline(xintercept = 15, colour="black", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-50,250) +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
scale_fill_manual(values=c("#408740", "#df2c2c", "#56B4E9"),
name="Legend",
breaks=c("0", "1", "2"),
labels=c("On-time", "Delayed Arrival", "Early Arrival")) +
labs(y = "Percentage of counts",
x = "Time (in minutes)") +
theme(axis.text=element_text(size=18),
axis.title=element_text(size=18)) +
theme(strip.text.x = element_text(size = 18),
strip.text.y = element_text(size = 18))
ggplot(distribution, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_density() +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-100,250)
#preserving marginal densities (aka count)
#ggplot(distribution, aes(x=ArrDelay, y=..count.., fill = factor(DummyDelay))) +
ggplot(distribution, aes(x=ArrDelay, y=..scale.., fill = factor(DummyDelay))) +
geom_density() +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-100,250)
#check<-sqldf("select * from distribution where ArrDelay >= 250")
# Distribution of ArrDelay in LAX ####
distributionLAX<-sqldf("select *
from distribution
where Dest = 'LAX'")
ggplot(distributionLAX, aes(x=ArrDelay)) +
geom_histogram(binwidth = 1) +
geom_vline(xintercept = 15, colour="green", linetype = "longdash")
# Distribution of ArrDelay in MCO ####
distributionMCO<-sqldf("select *
from distribution
where Dest = 'MCO'")
ggplot(distributionMCO, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_histogram(binwidth = 3) +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
xlim(-50,300)
# Calendar ####
#From Midway to MCO
calendar<-distribution
test9<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM calendar")
calendar$DummyDelay<-as.matrix(test9)
calendar$DummyDelay<-as.numeric(calendar$DummyDelay)
totalDelay<-sqldf("select count(*) AS total
from calendar
where Origin = 'MDW' AND Dest = 'MCO'
AND Year>= 2003
group by Month, DayOfWeek")
calendarDelay<-sqldf("select Month, DayOfWeek, count(DummyDelay) AS CountDelay
from calendar
where Origin = 'MDW' AND Dest = 'MCO'
AND Year>= 2003
AND DummyDelay=1
group by Month, DayOfWeek")
calendarDelay$Total<-as.matrix(totalDelay)
calendarDelay$Total<-as.numeric(calendarDelay$Total)
percDelay<-sqldf("select (CountDelay/Total) AS percDelay
from calendarDelay")
calendarDelay$percDelay<-as.matrix(percDelay)
calendarDelay$percDelay<-as.numeric(calendarDelay$percDelay)
ggplot(calendarDelay, aes(factor(Month), DayOfWeek)) +
geom_tile(aes(fill = percDelay), colour = "white") +
scale_fill_gradient(low = "#F7FBFF", high = "#08306B", name = "Percentage of\nDelayed Flights") +
labs(x = "Month",
y = "Day of the Week") +
theme(panel.background = element_rect(fill = "white")) +
scale_x_discrete(breaks=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"),
labels=c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")) +
scale_y_discrete(breaks=c("1", "2", "3", "4", "5", "6", "7"),
labels=c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")) +
theme(axis.title.x = element_text(size=20),
axis.text.x = element_text(size = 18, angle=90, vjust=0.5),
axis.title.y = element_text(size=20),
axis.text.y = element_text(size = 18)) +
theme(legend.text = element_text(size = 16),
legend.title = element_text(size = 16))
|
/airline/airline.R
|
no_license
|
linshiu/projects
|
R
| false | false | 20,461 |
r
|
library(data.table)
library(sqldf)
library(ggplot2)
library(maps)
# Load Dataset ####
setwd("/sscc/home/l/lsl575/airline")
fileNames = list.files(path = getwd(), pattern = "*.csv")
alldata = rbindlist(lapply(fileNames, function(x) {
xx = fread(x, header = TRUE, sep = ",", na.strings=c("NA", ''))
}))
str(alldata)
head(alldata)
dim(alldata)
names(alldata)
class(alldata)
# Transform the Dataset ####
alldata = data.frame(alldata)
drop = c("FlightNum", "TailNum", "ActualElapsedTime", "CRSElapsedTime", "AirTime", "Distance",
"TaxiIn", "TaxiOut", "DepTime", "ArrTime")
# Drop variables
data = alldata[, -which(names(alldata) %in% drop)]
names(data)
# Subset data
dataSub=sqldf("SELECT *
FROM data
WHERE (Origin='ORD' OR Origin = 'MDW')
AND (Dest = 'MCO' OR Dest = 'LAX')")
# Save as R dataset
save(dataSub,file="dataSub.Rda")
load("~/airline/dataSub.Rda")
data = dataSub
# Save as csv
write.csv(dataSub, file = "dataSub.csv", row.names = FALSE)
########################## Data Processing#####
#delete NA's
data[is.na(data)] = 0
mydata = data[1:100,]
#data = within(data, dateTime = paste(Year, Month, DayofMonth, sep="-"))
data$date = paste(data$Year, data$Month, data$DayofMonth, sep= "-")
data$date = as.Date(data$date)
data$month = months(data$date)
data$dayofweek = weekdays(data$date)
str(data)
# cacellation code
# reason for cancellation
# (A = carrier, B = weather, C = NAS, D = security)
table(data$CancellationCode)
data[data[,"CancellationCode"] == "0","CancellationCode"] = "NA"
data[data[,"CancellationCode"] == "0","CancellationCode"] = "NA"
data$CancellationCode[data$CancellationCode =="0"] = "NA"
data$CancellationCode[data$CancellationCode =="A"] = "Carrier"
data$CancellationCode[data$CancellationCode =="B"] = "Weather"
data$CancellationCode[data$CancellationCode =="C"] = "NAS"
data$CancellationCode = as.factor(data$CancellationCode)
levels(data$CancellationCode)
unique(data$CancellationCode)
table(data$Cancelled, data$CancellationCode)
table(data$Dest, data$UniqueCarrier)
table(data$Origin, data$UniqueCarrier)
# Subset data
data1 = sqldf("SELECT *
FROM data
WHERE Dest = 'MCO' AND UniqueCarrier <> 'DL'")
str(data1)
table(data1$UniqueCarrier)
data2 =sqldf(" SELECT *, (CancelCount/TotalCount) AS pctCancel
FROM (SELECT Year, Month,UniqueCarrier, cast(SUM(Cancelled) as real) as CancelCount, COUNT(*) as TotalCount
FROM data1
GROUP BY Year, Month,UniqueCarrier)")
table(data2$UniqueCarrier)
head(data1)
ggplot(data2, aes(x=UniqueCarrier, y=pctCancel)) +
geom_boxplot(aes(fill = UniqueCarrier)) +
labs(y = "Percentage of Flighs Cancelled", x = "Airline") +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
guides(fill=FALSE) +
theme(axis.text=element_text(size=18), axis.title=element_text(size=18)) +
theme(strip.text.x = element_text(size = 18), strip.text.y = element_text(size = 18))
############################### Check for later ####
mydata = data[1:100,]
# Create datetime variable
timeVars = c("Year", "Month", "DayofMonth", "DayOfWeek")
for(i in 1:nrow(mydata)){
mydata$dateTime[i]=paste(time2$year[i], "-",time2$month[i], "-1", sep="")
}
dataSub2$dateTime =
x = ISOdatetime(year = 2014, month = 10, day = 12, hour = 21, min = 12, sec = 0)
weekdays(x)
month(x)
strftime(x, format="%H:%M")
as.date(x)
as.Date.Time(x)
i = sapply(dataSub2[,c("Year", "DayofMonth")], function(x) class(x))
class(i)
i
# date format
# http://www.statmethods.net/input/dates.html
# http://www.ats.ucla.edu/stat/r/faq/string_dates.htm
# http://stackoverflow.com/questions/9216138/find-the-day-of-a-week-in-r
# http://stackoverflow.com/questions/9749598/r-obtaining-month-and-year-from-a-date
# http://www.quantlego.com/howto/introduction-dates-and-times-in-r/
# convert to dates
i = sapply(mydata[var_date], is.factor)
mydata[,var_date] = as.character(mydata[,var_date])
mydata[,var_date] = as.Date(mydata[,var_date],"%m/%d/%Y")
# create additional variables based on dates
mydata$PurchWeekDay = factor(weekdays(mydata[,var_date]),
levels = c("Monday", "Tuesday",
"Wednesday","Thursday",
"Friday", "Saturday",
"Sunday"),ordered=T)
mydata$PurchMonth = factor(months(mydata[,var_date]),
levels=c("January","February","March",
"April","May","June","July","August","September",
"October","November","December"),ordered=TRUE)
mydata$PurchQuarter = factor(quarters(mydata[,var_date]),
levels = c("Q1","Q2","Q3","Q4"),
ordered=TRUE)
mydata$PurchYear= as.numeric(format(mydata[,var_date], "%Y"))
table(dataSub$CancellationCode)
x = "2100"
y = strptime(x,"%H%M")
strftime(y, format="%H:%M")
str(dataSub)
str(dataSub)
################################### Xiang ####
setwd("/sscc/home/x/xlf278/airline")
origin_mon = read.csv("eastWestOrigins_Mon.csv",header = TRUE, na.strings=c("NA", ''))
origin_mon = origin_mon[,-c(1,11,12,21,22)]
str(origin_mon)
dele_na = origin_mon[,c(21:25)]
na = origin_mon[,-c(21:25)]
dele_na[is.na(dele_na)] <- 0
origin_mon = cbind(na,dele_na)
origin_mon = origin_mon[,-c(19)]
dest_thu = read.csv("/sscc/home/s/swr386/airline/eastWestDest_Thu.csv",header = TRUE, na.strings=c("NA", ''))
dest_thu = dest_thu[,-c(1,11,12,21,22,24)]
str(dest_thu)
dele_na = dest_thu[,c(20:24)]
na = dest_thu[,-c(20:24)]
dele_na[is.na(dele_na)] <- 0
dest_thu = cbind(na,dele_na)
save(dest_thu, file = "dest_thu.RData")
save(origin_mon, file = "origin_mon.RData")
write.csv(dest_thu,"dest_thu.csv")
write.csv(origin_mon,"origin_mon.csv")
################################### Alejandro ####
library(data.table)
library(sqldf)
library(ggplot2)
library(maps)
# Load Dataset ####
setwd("/sscc/home/l/lsl575/airline")
fileNames <- list.files(path = getwd(), pattern = "*.csv")
alldata <- rbindlist(lapply(fileNames, function(x) {
xx <- fread(x, header = TRUE, sep = ",", na.strings=c("NA", ''))
}))
# Transform the Dataset ####
data<-sqldf("SELECT *
FROM alldata
WHERE (Origin='ORD' OR Origin = 'MDW')
AND (Dest = 'MCO' OR Dest = 'LAX')")
head(data,20)
head(test1,20)
backup<-data
data<-backup
#delete NA's
data[is.na(data)] <- 0
dataLate15<-sqldf("SELECT *
FROM data
WHERE ArrDelay>15")
head(dataLate15,10)
test3<-sqldf("select
case
when (CarrierDelay > WeatherDelay AND CarrierDelay > NASDelay AND CarrierDelay > SecurityDelay AND CarrierDelay > LateAircraftDelay) THEN 'CarrierDelay'
WHEN (WeatherDelay > CarrierDelay AND WeatherDelay > NASDelay AND WeatherDelay > SecurityDelay AND WeatherDelay > LateAircraftDelay) THEN 'WeatherDelay'
WHEN (NASDelay > CarrierDelay AND NASDelay > WeatherDelay AND NASDelay > SecurityDelay AND NASDelay > LateAircraftDelay) THEN 'NASDelay'
WHEN (SecurityDelay > CarrierDelay AND SecurityDelay > WeatherDelay AND SecurityDelay > NASDelay AND SecurityDelay > LateAircraftDelay) THEN 'SecurityDelay'
WHEN (LateAircraftDelay > CarrierDelay AND LateAircraftDelay > WeatherDelay AND LateAircraftDelay > SecurityDelay AND LateAircraftDelay >= NASDelay) THEN 'LateAircraftDelay'
ELSE 'NoDelay'
END AS DelayReason
FROM dataLate15")
unique(test3)
dataLate15$DelayReason<-as.matrix(test3)
dataLate15$DelayReason<-as.factor(dataLate15$DelayReason)
#take out the NoDelay ones that have problems (they should not been there because we removed the NoDelay - probably they had NA's)
update<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'")
ggplot(data = update, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar() +
facet_wrap(~ UniqueCarrier)
# DelayReason from Chicago to LAX ####
dataLate15CHItoLAX<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'
AND Dest = 'LAX'")
ggplot(data = dataLate15CHItoLAX, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar() +
facet_wrap(~ UniqueCarrier)
# DelayReason from MDW to MCO ####
dataLate15CHItoMCO<-sqldf("SELECT *
FROM dataLate15
WHERE DelayReason <> 'NoDelay'
AND Dest = 'MCO'
AND Origin = 'MDW'")
ggplot(data = dataLate15CHItoMCO, aes(x = as.factor(Month), fill=factor(DelayReason))) +
geom_bar(aes(y=..count../sum(..count..))) +
facet_wrap(~ UniqueCarrier) +
theme(panel.background = element_rect(fill = "#d8e7eb"))
# Count of flights from Chicago to LAX and MCO ####
count<-data
count[is.na(count)] <- 0
test1<-sqldf("select
case
WHEN (CarrierDelay > WeatherDelay AND CarrierDelay > NASDelay AND CarrierDelay > SecurityDelay AND CarrierDelay > LateAircraftDelay) THEN 'CarrierDelay'
WHEN (WeatherDelay > CarrierDelay AND WeatherDelay > NASDelay AND WeatherDelay > SecurityDelay AND WeatherDelay > LateAircraftDelay) THEN 'WeatherDelay'
WHEN (NASDelay > CarrierDelay AND NASDelay > WeatherDelay AND NASDelay > SecurityDelay AND NASDelay > LateAircraftDelay) THEN 'NASDelay'
WHEN (SecurityDelay > CarrierDelay AND SecurityDelay > WeatherDelay AND SecurityDelay > NASDelay AND SecurityDelay > LateAircraftDelay) THEN 'SecurityDelay'
WHEN (LateAircraftDelay > CarrierDelay AND LateAircraftDelay > WeatherDelay AND LateAircraftDelay > SecurityDelay AND LateAircraftDelay >= NASDelay) THEN 'LateAircraftDelay'
ELSE 'NoDelay'
END AS DelayReason
FROM count")
count$DelayReason<-as.matrix(test1)
count$DelayReason<-as.factor(count$DelayReason)
test2<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM count")
count$DummyDelay<-as.matrix(test2)
count$DummyDelay<-as.numeric(count$DummyDelay)
unique(update2$DummyDelay)
ggplot(data = count, aes(x = as.factor(Dest), fill=factor(DummyDelay))) +
geom_bar(position = "dodge")
# Time series for delay in every airport ####
#By day
timeSeries<-data.frame(data)
test5<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM timeSeries")
timeSeries$DummyDelay<-as.matrix(test5)
timeSeries$DummyDelay<-as.numeric(timeSeries$DummyDelay)
time<-sqldf("select *, (delay/total) as percenDelayed
from (
select Origin, Dest, year, month, DayofMonth, count(*) as total, sum(DummyDelay) as delay
from timeSeries
group by origin, dest, year, month, DayofMonth
)")
unique(time$year)
for(i in 1:nrow(time)){
time$date[i]<-paste(time$year[i], "-",time$month[i], "-", time$DayofMonth[i], sep="")
}
time$date<-as.Date(time$date)
ggplot(data=time, aes(x=date, y=percenDelayed, colour=factor(Dest))) +
geom_line() +
stat_smooth()
#too croweded, so try by month
timeSeries2<-data.frame(data)
test6<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM timeSeries2")
timeSeries2$DummyDelay<-as.matrix(test6)
timeSeries2$DummyDelay<-as.numeric(timeSeries2$DummyDelay)
time2<-sqldf("select *, (delay/total) as percenDelayed
from (
select Origin, Dest, year, month, count(*) as total, sum(DummyDelay) as delay
from timeSeries2
group by origin, dest, year, month
)")
unique(time2$year)
for(i in 1:nrow(time2)){
time2$date[i]<-paste(time2$year[i], "-",time2$month[i], "-1", sep="")
}
time2$date<-as.Date(time2$date)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(Dest))) +
geom_line() +
stat_smooth()
#Separating for every route
for(i in 1:nrow(time2)){
time2$route[i]<-paste("From: ",time2$Origin[i], " To: ",time2$Dest[i], sep="")
}
time2$route<-as.factor(time2$route)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(route))) +
geom_line() +
stat_smooth() +
ylim(0,0.5)
ggplot(data=time2, aes(x=date, y=percenDelayed, colour=factor(route))) +
stat_smooth(se=F, size=2) +
ylim(0,0.4) +
labs(y = "Percentage of Flighs Delayed",
x = "Time") +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
theme(legend.key = element_rect(fill = "#d8e7eb")) +
scale_colour_manual(values = c("#c163c1","#800080","#FFA500","#FF4040"),
breaks = c("From: ORD To: LAX", "From: ORD To: MCO",
"From: MDW To: LAX", "From: MDW To: MCO"),
name = "Flight Routes") +
theme(axis.text=element_text(size=18),
axis.title=element_text(size=18))
# Maps ####
#load us map data
all_states <- map_data("state")
#plot all states with ggplot
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white", fill="grey10" )
p
mydata <- read.csv("/sscc/home/l/lsl575/airline/geo/geo.csv", header=TRUE, row.names=1, sep=",")
mapData1<-data
mapData2<-sqldf("select Origin, Dest, count(*) AS count
from mapData1
where (Origin = 'ORD' AND Dest = 'MCO')
OR (Origin = 'ORD' AND Dest = 'LAX')
OR (Origin = 'MDW' AND Dest = 'MCO')
OR (Origin = 'MDW' AND Dest = 'LAX')
Group by Origin, Dest
Order by Dest")
#plot all the states and the data
p <- ggplot()
p <- p + geom_polygon( data=all_states, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long, y=lat, size = (NumFlights)*5), color="coral1") + scale_size(name="Number of Flights")
p <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long, y=lat, label=label), colour="gold2", size=6 )
p
# for individual states
CA <- subset(all_states, region %in% 'california' )
p <- ggplot()
p <- p + geom_polygon( data=CA, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[4], y=lat[4], size = enrollment[4]), color="coral1") + scale_size(name="Total enrollment")
p1 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[4], y=lat[4], label=label[4]), colour="gold2", size=4 )
p1
FL <- subset(all_states, region %in% 'florida' )
p <- ggplot()
p <- p + geom_polygon( data=FL, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[1], y=lat[1], size = enrollment[1]), color="coral1") + scale_size(name="Total enrollment")
p2 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[1], y=lat[1], label=label[1]), colour="gold2", size=4 )
p2
IL <- subset(all_states, region %in% 'illinois' )
p <- ggplot()
p <- p + geom_polygon( data=IL, aes(x=long, y=lat, group = group),colour="white" )
p <- p + geom_point( data=mydata, aes(x=long[2:3], y=lat[2:3], size = enrollment[2:3]), color="coral1") + scale_size(name="Total enrollment")
p3 <- p + geom_text( data=mydata, hjust=0.5, vjust=-0.5, aes(x=long[2:3], y=lat[2:3], label=label[2:3]), colour="gold2", size=4 )
p3
# Distribution of ArrDelay in both MCO and LAX ####
distribution <- sqldf("select *
from data
where Dest = 'MCO' OR Dest='LAX'") #same as the data
distribution <- data
test8<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
WHEN (ArrDelay<0) THEN 2
ELSE 0
END AS DummyDelay
FROM distribution")
distribution$DummyDelay<-as.matrix(test8)
distribution$DummyDelay<-as.numeric(distribution$DummyDelay)
ggplot(distribution, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_histogram(aes(y=..count../sum(..count..)),binwidth = 1) +
geom_vline(xintercept = 15, colour="black", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-50,250) +
theme(panel.background = element_rect(fill = "#d8e7eb")) +
scale_fill_manual(values=c("#408740", "#df2c2c", "#56B4E9"),
name="Legend",
breaks=c("0", "1", "2"),
labels=c("On-time", "Delayed Arrival", "Early Arrival")) +
labs(y = "Percentage of counts",
x = "Time (in minutes)") +
theme(axis.text=element_text(size=18),
axis.title=element_text(size=18)) +
theme(strip.text.x = element_text(size = 18),
strip.text.y = element_text(size = 18))
ggplot(distribution, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_density() +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-100,250)
#preserving marginal densities (aka count)
#ggplot(distribution, aes(x=ArrDelay, y=..count.., fill = factor(DummyDelay))) +
ggplot(distribution, aes(x=ArrDelay, y=..scale.., fill = factor(DummyDelay))) +
geom_density() +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
geom_vline(xintercept = 0, colour="red") +
facet_grid(Origin ~ Dest) +
xlim(-100,250)
#check<-sqldf("select * from distribution where ArrDelay >= 250")
# Distribution of ArrDelay in LAX ####
distributionLAX<-sqldf("select *
from distribution
where Dest = 'LAX'")
ggplot(distributionLAX, aes(x=ArrDelay)) +
geom_histogram(binwidth = 1) +
geom_vline(xintercept = 15, colour="green", linetype = "longdash")
# Distribution of ArrDelay in MCO ####
distributionMCO<-sqldf("select *
from distribution
where Dest = 'MCO'")
ggplot(distributionMCO, aes(x=ArrDelay, fill = factor(DummyDelay))) +
geom_histogram(binwidth = 3) +
geom_vline(xintercept = 15, colour="green", linetype = "longdash") +
xlim(-50,300)
# Calendar ####
#From Midway to MCO
calendar<-distribution
test9<-sqldf("select
case
WHEN (ArrDelay>15) THEN 1
ELSE 0
END AS DummyDelay
FROM calendar")
calendar$DummyDelay<-as.matrix(test9)
calendar$DummyDelay<-as.numeric(calendar$DummyDelay)
totalDelay<-sqldf("select count(*) AS total
from calendar
where Origin = 'MDW' AND Dest = 'MCO'
AND Year>= 2003
group by Month, DayOfWeek")
calendarDelay<-sqldf("select Month, DayOfWeek, count(DummyDelay) AS CountDelay
from calendar
where Origin = 'MDW' AND Dest = 'MCO'
AND Year>= 2003
AND DummyDelay=1
group by Month, DayOfWeek")
calendarDelay$Total<-as.matrix(totalDelay)
calendarDelay$Total<-as.numeric(calendarDelay$Total)
percDelay<-sqldf("select (CountDelay/Total) AS percDelay
from calendarDelay")
calendarDelay$percDelay<-as.matrix(percDelay)
calendarDelay$percDelay<-as.numeric(calendarDelay$percDelay)
ggplot(calendarDelay, aes(factor(Month), DayOfWeek)) +
geom_tile(aes(fill = percDelay), colour = "white") +
scale_fill_gradient(low = "#F7FBFF", high = "#08306B", name = "Percentage of\nDelayed Flights") +
labs(x = "Month",
y = "Day of the Week") +
theme(panel.background = element_rect(fill = "white")) +
scale_x_discrete(breaks=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12"),
labels=c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December")) +
scale_y_discrete(breaks=c("1", "2", "3", "4", "5", "6", "7"),
labels=c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday")) +
theme(axis.title.x = element_text(size=20),
axis.text.x = element_text(size = 18, angle=90, vjust=0.5),
axis.title.y = element_text(size=20),
axis.text.y = element_text(size = 18)) +
theme(legend.text = element_text(size = 16),
legend.title = element_text(size = 16))
|
##this code was made as part of peergraded project
#load into the data
rm(list=ls())
#leave that alone
setwd("E:/career/Dataa/github/Exploratory-ana-project-R")
getwd()
path <- getwd()
list.files(path)
if (file.exists("data/household_power_consumption.txt")){
print("you are good to go")
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
}
library("data.table")
#read into the data
## df <- read.table("data/household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
df <- data.table::fread(input = "data/household_power_consumption.txt", na.strings="?")
head(df)
#We will only be using data from the dates 2007-02-01 and 2007-02-02. One alternative is to read the data from
#just those dates rather than reading in the entire dataset and subsetting to those dates.
#You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions.
df$datetime <- paste(df$Date,df$Time)
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df <- subset(df, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
df$datetime <- as.POSIXct(df$datetime,format = "%d/%m/%Y %H:%M:%S" )
#######################
png("plot1.png", width = 480, height = 480 , units = "px")
hist(df$Global_active_power, col = "red", xlab="Global Active Power (kilowatts)")
dev.off()
################
png("plot2.png", width = 480, height = 480 , units = "px")
plot(x = df$datetime, y = df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
##################
png("plot3.png", width = 480, height = 480 , units = "px")
plot(df$datetime, df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$datetime, df$Sub_metering_2,col="red")
lines(df$datetime, df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend = c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.off()
##############
png("plot4.png", width = 480, height = 480 , units = "px")
par(mfrow = c(2,2))
plot(x = df$datetime, y = df$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(x = df$datetime, y = df$Voltage , type="l" , xlab="datetime", ylab="Voltage" )
plot(df$datetime, df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$datetime, df$Sub_metering_2,col="red")
lines(df$datetime, df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend = c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
plot(x = df$datetime, y = df$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/coursera Data Science Specialization/4- Exploratory Data Analysis/Exploratory-ana-project-R/project 1/R code.R
|
no_license
|
Ibn-mohey/Practice-R
|
R
| false | false | 2,819 |
r
|
##this code was made as part of peergraded project
#load into the data
rm(list=ls())
#leave that alone
setwd("E:/career/Dataa/github/Exploratory-ana-project-R")
getwd()
path <- getwd()
list.files(path)
if (file.exists("data/household_power_consumption.txt")){
print("you are good to go")
} else {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
}
library("data.table")
#read into the data
## df <- read.table("data/household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?")
df <- data.table::fread(input = "data/household_power_consumption.txt", na.strings="?")
head(df)
#We will only be using data from the dates 2007-02-01 and 2007-02-02. One alternative is to read the data from
#just those dates rather than reading in the entire dataset and subsetting to those dates.
#You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions.
df$datetime <- paste(df$Date,df$Time)
df$Date <- as.Date(df$Date, "%d/%m/%Y")
df <- subset(df, Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
df$datetime <- as.POSIXct(df$datetime,format = "%d/%m/%Y %H:%M:%S" )
#######################
png("plot1.png", width = 480, height = 480 , units = "px")
hist(df$Global_active_power, col = "red", xlab="Global Active Power (kilowatts)")
dev.off()
################
png("plot2.png", width = 480, height = 480 , units = "px")
plot(x = df$datetime, y = df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
##################
png("plot3.png", width = 480, height = 480 , units = "px")
plot(df$datetime, df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$datetime, df$Sub_metering_2,col="red")
lines(df$datetime, df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend = c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.off()
##############
png("plot4.png", width = 480, height = 480 , units = "px")
par(mfrow = c(2,2))
plot(x = df$datetime, y = df$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(x = df$datetime, y = df$Voltage , type="l" , xlab="datetime", ylab="Voltage" )
plot(df$datetime, df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$datetime, df$Sub_metering_2,col="red")
lines(df$datetime, df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend = c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
plot(x = df$datetime, y = df$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
## Get DAta Quiz 1
## Q1
library(data.table)
setwd("C:/downloads/Coursera/getdata-032")
if(!file.exists("data")){dir.create("data")}
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
## Windows version of download
download.file(fileURL,destfile = "./data/UScommunities2006microdata.csv",method="wininet")
dateDownloaded <- date()
dateDownloaded
list.files("data")
idaho2<- fread("./data/UScommunities2006microdata.csv")
class(idaho2)
## Q3
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
## Windows version of download
download.file(fileURL,destfile = "./data/NatGasAquProg.xlsx",mode='wb')
library(xlsx)
library(XLConnect)
dateDownloaded <- date()
dateDownloaded
list.files("data")
dat <- readWorksheetFromFile("./data/NatGasAquProg.xlsx",sheet=1,startRow=18,endCol=15)
dat <- read.xlsx("./data/NatGasAquProg.xlsx",sheetIndex=1,rowIndex=18:23,colIndex=7:15)
class(dat)
dat <- as.data.table(dat)
class(dat)
##install.packages("xlsx")
q3<-sum(dat$Zip*dat$Ext,na.rm=T)
q3
## ---------------- Question 4
##Balitmore Restaurants
fileurl4="https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
download.file(fileurl4,destfile = "./data/restaurants.xml",mode='wb')
rest <- "./data/restaurants.xml"
## install.packages("xml",dependencies = TRUE)
library(XML)
doc1<-xmlParse(rest)
rootNode <- xmlRoot(doc1)
xmlName(rootNode)
zipcodes<-xpathSApply(rootNode,"//zipcode",xmlValue)
count = zipcodes[zipcodes=="21231"]
length(count)
## Q5 ----
fileURL5 = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
DT <- fread(fileURL5)
system.time(rowMeans(DT)[DT$SEX==1];rowMeans(DT)[DT$SEX==2])
system.time(mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15))
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(DT[,mean(pwgtp15),by=SEX])
system.time(mean(DT$pwgtp15,by=DT$SEX))
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
|
/Non-Project/quiz1.R
|
no_license
|
n4ybn/getdata-032
|
R
| false | false | 1,961 |
r
|
## Get DAta Quiz 1
## Q1
library(data.table)
setwd("C:/downloads/Coursera/getdata-032")
if(!file.exists("data")){dir.create("data")}
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
## Windows version of download
download.file(fileURL,destfile = "./data/UScommunities2006microdata.csv",method="wininet")
dateDownloaded <- date()
dateDownloaded
list.files("data")
idaho2<- fread("./data/UScommunities2006microdata.csv")
class(idaho2)
## Q3
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
## Windows version of download
download.file(fileURL,destfile = "./data/NatGasAquProg.xlsx",mode='wb')
library(xlsx)
library(XLConnect)
dateDownloaded <- date()
dateDownloaded
list.files("data")
dat <- readWorksheetFromFile("./data/NatGasAquProg.xlsx",sheet=1,startRow=18,endCol=15)
dat <- read.xlsx("./data/NatGasAquProg.xlsx",sheetIndex=1,rowIndex=18:23,colIndex=7:15)
class(dat)
dat <- as.data.table(dat)
class(dat)
##install.packages("xlsx")
q3<-sum(dat$Zip*dat$Ext,na.rm=T)
q3
## ---------------- Question 4
##Balitmore Restaurants
fileurl4="https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
download.file(fileurl4,destfile = "./data/restaurants.xml",mode='wb')
rest <- "./data/restaurants.xml"
## install.packages("xml",dependencies = TRUE)
library(XML)
doc1<-xmlParse(rest)
rootNode <- xmlRoot(doc1)
xmlName(rootNode)
zipcodes<-xpathSApply(rootNode,"//zipcode",xmlValue)
count = zipcodes[zipcodes=="21231"]
length(count)
## Q5 ----
fileURL5 = "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
DT <- fread(fileURL5)
system.time(rowMeans(DT)[DT$SEX==1];rowMeans(DT)[DT$SEX==2])
system.time(mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15))
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(DT[,mean(pwgtp15),by=SEX])
system.time(mean(DT$pwgtp15,by=DT$SEX))
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
|
derived.amps <- function (b.x,b.y,retention){
ampx <- b.x
ampy <- sqrt(b.y^2+retention^2)
split.angle <- atan2(sqrt(ampy^2-retention^2),ampx)
split.angle <- split.angle*180/pi
hysteresis.y <- retention/b.y
hysteresis.x <- 1/sqrt(1+(b.y/retention)^2)
return(c(split.angle,hysteresis.x,hysteresis.y,ampx,ampy))
}
|
/hysteresis/R/derived.amps.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 334 |
r
|
derived.amps <- function (b.x,b.y,retention){
ampx <- b.x
ampy <- sqrt(b.y^2+retention^2)
split.angle <- atan2(sqrt(ampy^2-retention^2),ampx)
split.angle <- split.angle*180/pi
hysteresis.y <- retention/b.y
hysteresis.x <- 1/sqrt(1+(b.y/retention)^2)
return(c(split.angle,hysteresis.x,hysteresis.y,ampx,ampy))
}
|
#Problem37
library(primes)
left_truncatable = function(x){
x = as.character(x)
if(nchar(x)==1&is_prime(as.numeric(x))) TRUE
else if(is_prime(as.numeric(x))) left_truncatable(substr(x, 2, nchar(x)))
else FALSE
}
right_truncatable = function(x){
x = as.character(x)
if(nchar(x)==1&is_prime(as.numeric(x))) TRUE
else if(is_prime(as.numeric(x))) right_truncatable(substr(x, 1, nchar(x)-1))
else FALSE
}
primes = generate_primes(10, 1000000)
truncs = NULL
for(i in 1:length(primes)){
print(primes[i])
if(left_truncatable(primes[i])&right_truncatable(primes[i])) truncs = c(truncs, primes[i])
if(length(truncs)==11) break
}
print(truncs)
print(sum(truncs))
|
/scripts/37.R
|
no_license
|
GrahamQuee/ProjectEuler
|
R
| false | false | 677 |
r
|
#Problem37
library(primes)
left_truncatable = function(x){
x = as.character(x)
if(nchar(x)==1&is_prime(as.numeric(x))) TRUE
else if(is_prime(as.numeric(x))) left_truncatable(substr(x, 2, nchar(x)))
else FALSE
}
right_truncatable = function(x){
x = as.character(x)
if(nchar(x)==1&is_prime(as.numeric(x))) TRUE
else if(is_prime(as.numeric(x))) right_truncatable(substr(x, 1, nchar(x)-1))
else FALSE
}
primes = generate_primes(10, 1000000)
truncs = NULL
for(i in 1:length(primes)){
print(primes[i])
if(left_truncatable(primes[i])&right_truncatable(primes[i])) truncs = c(truncs, primes[i])
if(length(truncs)==11) break
}
print(truncs)
print(sum(truncs))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esWinsorize.R
\name{esWinsorize}
\alias{esWinsorize}
\title{esWinsorize}
\usage{
esWinsorize(es, subtractcovar = FALSE, log = FALSE, covar, base = "",
intervals = c(0.1, 0.9))
}
\arguments{
\item{es}{expression set %% ~~Describe \code{es} here~~}
\item{subtractcovar}{If the base is specificed and the subtractcovar set to
TRUE, then it will find the base of the selected covariate and it will apply
the normalization of that covariate and then it will take the entire assay
data or expression values and will scale the expression data to the
reference base on a log scale.}
\item{log}{log is set to TRUE, it will take the data and set it equal 2 to
the power of the expression set}
\item{covar}{covar is covariate of interest}
\item{base}{base is used as a reference for scaling}
\item{intervals}{winsorizing process caps the outliers to the intervals
selected. Where .1 is 10th percentile value and .9 is 90th percentile value.
Acts as a ceiling.}
}
\description{
Winsorization creates a upper and lower bounds for reducing the influence of
outliers, such that if the outlier past either bound, it is given a constant
value at the bound, as opposed its real value as an outlier.
}
\examples{
#results <- esWinsorize(eset, subtractcovar = FALSE, log = FALSE, covar="group_of_interest", intervals = c(0.1, 0.9))
#results
}
\references{
"An Introduction to Bioconductor's ExpressionSet Class" \cr Seth
Falcon, Martin Morgan, and Robert Gentleman \cr 6 October, 2006; revised 9
February, 2007 \cr
}
\author{
Shahab Asgharzadeh
}
\keyword{~kwd1}
\keyword{~kwd2}
|
/man/esWinsorize.Rd
|
no_license
|
genomelab/esFunctions
|
R
| false | true | 1,646 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/esWinsorize.R
\name{esWinsorize}
\alias{esWinsorize}
\title{esWinsorize}
\usage{
esWinsorize(es, subtractcovar = FALSE, log = FALSE, covar, base = "",
intervals = c(0.1, 0.9))
}
\arguments{
\item{es}{expression set %% ~~Describe \code{es} here~~}
\item{subtractcovar}{If the base is specificed and the subtractcovar set to
TRUE, then it will find the base of the selected covariate and it will apply
the normalization of that covariate and then it will take the entire assay
data or expression values and will scale the expression data to the
reference base on a log scale.}
\item{log}{log is set to TRUE, it will take the data and set it equal 2 to
the power of the expression set}
\item{covar}{covar is covariate of interest}
\item{base}{base is used as a reference for scaling}
\item{intervals}{winsorizing process caps the outliers to the intervals
selected. Where .1 is 10th percentile value and .9 is 90th percentile value.
Acts as a ceiling.}
}
\description{
Winsorization creates a upper and lower bounds for reducing the influence of
outliers, such that if the outlier past either bound, it is given a constant
value at the bound, as opposed its real value as an outlier.
}
\examples{
#results <- esWinsorize(eset, subtractcovar = FALSE, log = FALSE, covar="group_of_interest", intervals = c(0.1, 0.9))
#results
}
\references{
"An Introduction to Bioconductor's ExpressionSet Class" \cr Seth
Falcon, Martin Morgan, and Robert Gentleman \cr 6 October, 2006; revised 9
February, 2007 \cr
}
\author{
Shahab Asgharzadeh
}
\keyword{~kwd1}
\keyword{~kwd2}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 3.62489632210691e-217, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613111927-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 257 |
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 3.62489632210691e-217, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#! /usr/bin/Rscript --vanilla --default-packages=base,datasets,utils,stats,edgeR,grDevices,graphics
library(edgeR)
args <- commandArgs(TRUE)
if (length(args) < 5)
{
cat("\nERROR: Insufficient command line arguments. 5 arguments required\n\n")
cat("edgeR_single_factor datafile c1 g1 c2 g2\n\n")
cat(" datafile = CSV file: sample IDs in row 1, protein IDs in column 1.\n")
cat(" Samples grouped by treatment.\n")
cat(" c1 = first column for treatment group 1\n")
cat(" g1 = number of treatment in treatment group 1\n")
cat(" c2 = first column for treatment group 2\n")
cat(" g2 = number of treatment in treatment group 2\n")
quit()
}
#Parse Command Line Arguments
filename <- args[1]
c1 <- as.numeric(args[2])
g1 <- as.numeric(args[3])
c2 <- as.numeric(args[4])
g2 <- as.numeric(args[5])
#Read Data from formatted CSV file
datafile <- read.csv(file=filename, head = TRUE)
datafile.counts = cbind(datafile[, c1:(c1+g1-1)],datafile[, c2:(c2+g2-1)])
datafile.genes = datafile[, 1]
#create vector with labels for each sample group ID
#create DGEList
datafile.dgelist <- DGEList(counts=datafile.counts, group=c(rep(0,g1),rep(1,g2)), genes = datafile.genes)
#filter data such that a rom must have = 5 counts
#cat(paste("\nProteins before filetering = ",dim(datafile.dgelist),"\n"))
datafile.dgelist <- datafile.dgelist[rowSums(datafile.dgelist$counts) >= 5, ]
#cat(paste("Proteins after filetering = ",dim(datafile.dgelist),"\n"))
#Normalize data
cat("Normalizing data\n")
datafile.dgelist <- calcNormFactors(datafile.dgelist)
#datafile.dgelist$samples
#Estimate Common Dispersion
datafile.dgelist <- estimateCommonDisp(datafile.dgelist)
#datafile.dgelist$samples$lib.size
#datafile.dgelist$common.lib.size
cat(paste("common dispersion =", datafile.dgelist$common.dispersion,"\n"))
#create MDS plot
MDSPDFfile <- paste(filename,date(),"MDS.pdf")
pdf(file=MDSPDFfile)
plotMDS(datafile.dgelist, main="MDS Plot")
dev.off()
#perform exact test to determine proteins differentially experessed
datafile.dgelist.results <- exactTest(datafile.dgelist)
#format taks for SMEAR MVA plot
options(digits = 4)
#topTags(datafile.dgelist.results )
#perform multiple comparison correction
BH.mt.adj <- decideTestsDGE(datafile.dgelist.results , adjust.method="BH", p.value=0.05)
#BH.mt.adj
#create SMEAR plot
top <- topTags(datafile.dgelist.results , n = sum(datafile.dgelist.results $table$PValue < 0.05))
detags <- rownames(top$table)
smearPDFfile <- paste(filename,date(),"smear.pdf")
pdf(file=smearPDFfile )
plotSmear(datafile.dgelist.results , de.tags = detags, main = "FC Smear Plot",cex=0.5)
abline(h = c(-1, 1), col = "dodgerblue")
dev.off()
#format final results for output
final_results = cbind(datafile.dgelist$counts,datafile.dgelist.results$table,BH.mt.adj)
rownames(final_results) <- datafile.dgelist.results$genes[,1]
final_results.mtcorrected = final_results[final_results$BH.mt.adj != 0 , ]
#Create MV plot
MVPDFfile <- paste(filename,date(),"mv.pdf")
pdf(file=MVPDFfile )
plotMeanVar(datafile.dgelist)
dev.off()
#output final results
outfile = paste(filename,date(),".csv")
write.table(final_results[order(final_results$PValue) ,] , file=outfile,sep=",")
#create Heatmap for top protein IDs
head(as.matrix(final_results.mtcorrected[, 1:(g1+g2)]))
HMPDFfile <- paste(filename,date(),"HM.pdf")
pdf(file=HMPDFfile )
heatmap(as.matrix(final_results.mtcorrected[, 1:(g1+g2)]))
abline(h = c(-1, 1), col = "dodgerblue")
dev.off()
|
/Resources/r/edger_singlefactor.r
|
no_license
|
DebianDan/Proteomics_Workbench
|
R
| false | false | 3,534 |
r
|
#! /usr/bin/Rscript --vanilla --default-packages=base,datasets,utils,stats,edgeR,grDevices,graphics
library(edgeR)
args <- commandArgs(TRUE)
if (length(args) < 5)
{
cat("\nERROR: Insufficient command line arguments. 5 arguments required\n\n")
cat("edgeR_single_factor datafile c1 g1 c2 g2\n\n")
cat(" datafile = CSV file: sample IDs in row 1, protein IDs in column 1.\n")
cat(" Samples grouped by treatment.\n")
cat(" c1 = first column for treatment group 1\n")
cat(" g1 = number of treatment in treatment group 1\n")
cat(" c2 = first column for treatment group 2\n")
cat(" g2 = number of treatment in treatment group 2\n")
quit()
}
#Parse Command Line Arguments
filename <- args[1]
c1 <- as.numeric(args[2])
g1 <- as.numeric(args[3])
c2 <- as.numeric(args[4])
g2 <- as.numeric(args[5])
#Read Data from formatted CSV file
datafile <- read.csv(file=filename, head = TRUE)
datafile.counts = cbind(datafile[, c1:(c1+g1-1)],datafile[, c2:(c2+g2-1)])
datafile.genes = datafile[, 1]
#create vector with labels for each sample group ID
#create DGEList
datafile.dgelist <- DGEList(counts=datafile.counts, group=c(rep(0,g1),rep(1,g2)), genes = datafile.genes)
#filter data such that a rom must have = 5 counts
#cat(paste("\nProteins before filetering = ",dim(datafile.dgelist),"\n"))
datafile.dgelist <- datafile.dgelist[rowSums(datafile.dgelist$counts) >= 5, ]
#cat(paste("Proteins after filetering = ",dim(datafile.dgelist),"\n"))
#Normalize data
cat("Normalizing data\n")
datafile.dgelist <- calcNormFactors(datafile.dgelist)
#datafile.dgelist$samples
#Estimate Common Dispersion
datafile.dgelist <- estimateCommonDisp(datafile.dgelist)
#datafile.dgelist$samples$lib.size
#datafile.dgelist$common.lib.size
cat(paste("common dispersion =", datafile.dgelist$common.dispersion,"\n"))
#create MDS plot
MDSPDFfile <- paste(filename,date(),"MDS.pdf")
pdf(file=MDSPDFfile)
plotMDS(datafile.dgelist, main="MDS Plot")
dev.off()
#perform exact test to determine proteins differentially experessed
datafile.dgelist.results <- exactTest(datafile.dgelist)
#format taks for SMEAR MVA plot
options(digits = 4)
#topTags(datafile.dgelist.results )
#perform multiple comparison correction
BH.mt.adj <- decideTestsDGE(datafile.dgelist.results , adjust.method="BH", p.value=0.05)
#BH.mt.adj
#create SMEAR plot
top <- topTags(datafile.dgelist.results , n = sum(datafile.dgelist.results $table$PValue < 0.05))
detags <- rownames(top$table)
smearPDFfile <- paste(filename,date(),"smear.pdf")
pdf(file=smearPDFfile )
plotSmear(datafile.dgelist.results , de.tags = detags, main = "FC Smear Plot",cex=0.5)
abline(h = c(-1, 1), col = "dodgerblue")
dev.off()
#format final results for output
final_results = cbind(datafile.dgelist$counts,datafile.dgelist.results$table,BH.mt.adj)
rownames(final_results) <- datafile.dgelist.results$genes[,1]
final_results.mtcorrected = final_results[final_results$BH.mt.adj != 0 , ]
#Create MV plot
MVPDFfile <- paste(filename,date(),"mv.pdf")
pdf(file=MVPDFfile )
plotMeanVar(datafile.dgelist)
dev.off()
#output final results
outfile = paste(filename,date(),".csv")
write.table(final_results[order(final_results$PValue) ,] , file=outfile,sep=",")
#create Heatmap for top protein IDs
head(as.matrix(final_results.mtcorrected[, 1:(g1+g2)]))
HMPDFfile <- paste(filename,date(),"HM.pdf")
pdf(file=HMPDFfile )
heatmap(as.matrix(final_results.mtcorrected[, 1:(g1+g2)]))
abline(h = c(-1, 1), col = "dodgerblue")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_gp_impute_core.R
\name{generate_gp_ppg_predictions}
\alias{generate_gp_ppg_predictions}
\title{Internal \code{ibiVizEdit} utility for selecting maximum a posteriori estimates of corrupted PPG data}
\usage{
generate_gp_ppg_predictions(model_outputs = NULL, gp_driver = NULL)
}
\arguments{
\item{model_outputs}{output object generated from the imputation run}
\item{gp_driver}{a \code{list} object used to define the imputation problem and includes hyperparameters}
}
\value{
output \code{data.frame} that upsamples based on the Gaussian process imputation model
}
\description{
Internal \code{ibiVizEdit} utility for selecting maximum a posteriori estimates of corrupted PPG data
}
|
/man/generate_gp_ppg_predictions.Rd
|
no_license
|
dr-consulting/ibi_VizEdit
|
R
| false | true | 764 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_gp_impute_core.R
\name{generate_gp_ppg_predictions}
\alias{generate_gp_ppg_predictions}
\title{Internal \code{ibiVizEdit} utility for selecting maximum a posteriori estimates of corrupted PPG data}
\usage{
generate_gp_ppg_predictions(model_outputs = NULL, gp_driver = NULL)
}
\arguments{
\item{model_outputs}{output object generated from the imputation run}
\item{gp_driver}{a \code{list} object used to define the imputation problem and includes hyperparameters}
}
\value{
output \code{data.frame} that upsamples based on the Gaussian process imputation model
}
\description{
Internal \code{ibiVizEdit} utility for selecting maximum a posteriori estimates of corrupted PPG data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cigar_partition.R
\name{cigar_partition}
\alias{cigar_partition}
\title{cigar partitioning}
\usage{
cigar_partition(bamfile)
}
\arguments{
\item{bamfile}{the path to the BAM file}
}
\description{
cigar partitioning
}
|
/man/cigar_partition.Rd
|
no_license
|
compbiocore/qckitalign
|
R
| false | true | 295 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cigar_partition.R
\name{cigar_partition}
\alias{cigar_partition}
\title{cigar partitioning}
\usage{
cigar_partition(bamfile)
}
\arguments{
\item{bamfile}{the path to the BAM file}
}
\description{
cigar partitioning
}
|
library(shiny)
library(leaflet)
library(rgdal)
library(ggplot2)
#in this page, we'll clean up data and assign it to variables that can be referenced in the server and ui R pages.
poli16 <- read.csv("1976-2016-president.csv")
poli20 <- read.csv("1976-2016-president.csv")
#Here, we set up for 2020 election results
years <- list(year2016 = poli16,
year2020 = poli20)
politicalclimate <-
fluidRow(
box(
width = 12,
leafletOutput("PoliticalClimateOvertime"),
selectInput("electionyear", "Election Year", c("year2016", "year2020"))
)
)
|
/political-climate.R
|
no_license
|
WL-Biol185-ShinyProjects/COVID-and-Race-analysis
|
R
| false | false | 581 |
r
|
library(shiny)
library(leaflet)
library(rgdal)
library(ggplot2)
#in this page, we'll clean up data and assign it to variables that can be referenced in the server and ui R pages.
poli16 <- read.csv("1976-2016-president.csv")
poli20 <- read.csv("1976-2016-president.csv")
#Here, we set up for 2020 election results
years <- list(year2016 = poli16,
year2020 = poli20)
politicalclimate <-
fluidRow(
box(
width = 12,
leafletOutput("PoliticalClimateOvertime"),
selectInput("electionyear", "Election Year", c("year2016", "year2020"))
)
)
|
library(shiny)
library(xml2)
library(ggplot2)
library(plotly)
getData <- function (year, bond1_input, bond2_input){
baseUrl = "https://home.treasury.gov/resource-center/data-chart-center/interest-rates/pages/xml?data=daily_treasury_yield_curve"
url <- paste0(baseUrl,"&field_tdr_date_value=",toString(year))
download_xml(url, file = "TYC.xml")
doc <- read_xml("TYC.xml")
entries <- xml_find_all(doc, ".//m:properties")
param1 = paste("d:BC_", bond1_input, sep="")
param2 = paste("d:BC_", bond2_input, sep="")
date <- xml_text((xml_find_first(entries, "d:NEW_DATE")))
return <- xml_text((xml_find_first(entries, param1)))
df1 <- data.frame(date, return)
df1$date <- as.Date(df1$date)
df1$return <- as.numeric(as.character(df1$return))
return <- xml_text((xml_find_first(entries, param2)))
df2 <- data.frame(date, return)
df2$date <- as.Date(df2$date)
df2$return <- as.numeric(as.character(df2$return))
d <- merge(df1, df2, by="date")
colnames(d) <- c("Date", "Bond1", "Bond2")
d
}
function (input, output) {
df <- reactive({
bond_data <- getData(input$year, input$bond1, input$bond2)
})
output$stats<- renderPlotly({
plot_ly(df(), x= ~Date, y= ~Bond1, type='scatter', mode = 'lines', name=input$bond1) %>%
add_trace(y = ~Bond2, name = input$bond2) %>%
layout(xaxis = list(title="DATE"), yaxis = list(title="RATE"))
})
}
|
/server.R
|
no_license
|
soitknows/Treasury-Yield-Curve
|
R
| false | false | 1,403 |
r
|
library(shiny)
library(xml2)
library(ggplot2)
library(plotly)
getData <- function (year, bond1_input, bond2_input){
baseUrl = "https://home.treasury.gov/resource-center/data-chart-center/interest-rates/pages/xml?data=daily_treasury_yield_curve"
url <- paste0(baseUrl,"&field_tdr_date_value=",toString(year))
download_xml(url, file = "TYC.xml")
doc <- read_xml("TYC.xml")
entries <- xml_find_all(doc, ".//m:properties")
param1 = paste("d:BC_", bond1_input, sep="")
param2 = paste("d:BC_", bond2_input, sep="")
date <- xml_text((xml_find_first(entries, "d:NEW_DATE")))
return <- xml_text((xml_find_first(entries, param1)))
df1 <- data.frame(date, return)
df1$date <- as.Date(df1$date)
df1$return <- as.numeric(as.character(df1$return))
return <- xml_text((xml_find_first(entries, param2)))
df2 <- data.frame(date, return)
df2$date <- as.Date(df2$date)
df2$return <- as.numeric(as.character(df2$return))
d <- merge(df1, df2, by="date")
colnames(d) <- c("Date", "Bond1", "Bond2")
d
}
function (input, output) {
df <- reactive({
bond_data <- getData(input$year, input$bond1, input$bond2)
})
output$stats<- renderPlotly({
plot_ly(df(), x= ~Date, y= ~Bond1, type='scatter', mode = 'lines', name=input$bond1) %>%
add_trace(y = ~Bond2, name = input$bond2) %>%
layout(xaxis = list(title="DATE"), yaxis = list(title="RATE"))
})
}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/block_matrix.R
\name{block_matrix}
\alias{block_matrix}
\title{Block matrix}
\usage{
block_matrix(x, s)
}
\arguments{
\item{x}{a square matrix}
\item{s}{the dimensions (width = height) of the block matrix being selected over the diagonal of matrix x}
}
\description{
Block matrix
}
|
/man/block_matrix.Rd
|
no_license
|
memed01/gvc
|
R
| false | false | 375 |
rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/block_matrix.R
\name{block_matrix}
\alias{block_matrix}
\title{Block matrix}
\usage{
block_matrix(x, s)
}
\arguments{
\item{x}{a square matrix}
\item{s}{the dimensions (width = height) of the block matrix being selected over the diagonal of matrix x}
}
\description{
Block matrix
}
|
#create a dev app for the twitter api to get credentials for access at dev.twitter.com/apps first
#load the httr library
library(httr)
myApp = oauth_app("twitter",
key="keyfromapp", secret="secretfromapp")
sig = sign_oauth1.0(myApp, token=
"tokenfromapp", token_secret="secretfromapp")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
#load the jsonlite library to read the json file
library(jsonlite)
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
|
/Read_From_API.R
|
no_license
|
rdewolf127/Tinker-R
|
R
| false | false | 576 |
r
|
#create a dev app for the twitter api to get credentials for access at dev.twitter.com/apps first
#load the httr library
library(httr)
myApp = oauth_app("twitter",
key="keyfromapp", secret="secretfromapp")
sig = sign_oauth1.0(myApp, token=
"tokenfromapp", token_secret="secretfromapp")
homeTL = GET("https://api.twitter.com/1.1/statuses/home_timeline.json", sig)
#load the jsonlite library to read the json file
library(jsonlite)
json1 = content(homeTL)
json2 = jsonlite::fromJSON(toJSON(json1))
json2[1,1:4]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labelOverlapMeasures.R
\name{labelOverlapMeasures}
\alias{labelOverlapMeasures}
\title{labelOverlapMeasures}
\usage{
labelOverlapMeasures(sourceLabelImage, targetLabelImage)
}
\arguments{
\item{sourceLabelImage}{label image for source image.}
\item{targetLabelImage}{label image for target/reference image.}
}
\value{
data frame with overlap measures
}
\description{
Wrapper for the ANTs funtion LabelOverlapMeasures. More documentaiton
available here:
}
\details{
\url{https://www.insight-journal.org/browse/publication/707}
}
\examples{
sourceImage <- antsImageRead( getANTsRData( "r16" ), 2 )
sourceSegmentation <- kmeansSegmentation( sourceImage, 3 )$segmentation
referenceImage <- antsImageRead( getANTsRData( "r16" ), 2 )
referenceSegmentation <- kmeansSegmentation( referenceImage, 3 )$segmentation
overlap <- labelOverlapMeasures( sourceSegmentation, referenceSegmentation )
}
\author{
Avants BB, Tustison NJ
}
|
/man/labelOverlapMeasures.Rd
|
permissive
|
ANTsX/ANTsR
|
R
| false | true | 1,001 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/labelOverlapMeasures.R
\name{labelOverlapMeasures}
\alias{labelOverlapMeasures}
\title{labelOverlapMeasures}
\usage{
labelOverlapMeasures(sourceLabelImage, targetLabelImage)
}
\arguments{
\item{sourceLabelImage}{label image for source image.}
\item{targetLabelImage}{label image for target/reference image.}
}
\value{
data frame with overlap measures
}
\description{
Wrapper for the ANTs funtion LabelOverlapMeasures. More documentaiton
available here:
}
\details{
\url{https://www.insight-journal.org/browse/publication/707}
}
\examples{
sourceImage <- antsImageRead( getANTsRData( "r16" ), 2 )
sourceSegmentation <- kmeansSegmentation( sourceImage, 3 )$segmentation
referenceImage <- antsImageRead( getANTsRData( "r16" ), 2 )
referenceSegmentation <- kmeansSegmentation( referenceImage, 3 )$segmentation
overlap <- labelOverlapMeasures( sourceSegmentation, referenceSegmentation )
}
\author{
Avants BB, Tustison NJ
}
|
#######################################################################################################
# #
# OutilsCN: Definition des fonctions propres aux comptes notionnels #
# #
#######################################################################################################
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsMS.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsPensIPP.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/DefVarRetr_Destinie.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsLeg.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsRetr.R")) )
###### Fonction UseOptCN
# Spécifie les options supplémentaires si option "CN" a été demandées avec UseOpt.
# Il y a deux arguments. Le premier est le millésime de démarrage du basculement.
# Le second est une chaine de caract?re, avec les mêmes principes que dans UseOpt,
# pouvant contenir les éléments suivants :
# Exemples:
#UseOptCN(2010,c("immediat", "rg"))
UseOptCN <- function(liste=c())
{
OptionsCN <<- c()
for (i in 1:length(liste))
{
OptionsCN[i] <<- tolower(liste[i])
if (!(is.element(OptionsCN[i],c("valocot","discount","noassimilcn","noavpfcn","nomdacn","nobonifcn","nomccn"))))
{
print (paste("Attention : option '",OptionsCN[i],"' inconnue"))
}
}
}
#####Fonction UseConv
# Met a jour les coefficients de conversion si r?gime en comptes notionnels.
# Il y a deux modes d'appel. Le premier mode doit-?tre utilis? lorsqu'on veut des
# coefficients de conversion bas?s sur l'esp?rance de vie courante.
# Calcule les coefficients de conversion entre $agemin et $agemax ?gaux ? 60 et 70
# ans sur la base de la mortalit? de la date $t.
# Exemple : UseConv(60,70,$t);
setwd((paste0(cheminsource,"Modele/Parametres/Demographie")))
age_max <- 121
quotient <- array(0,dim=c(2,121,200))
survie <- array(0,dim=c(2,121,200))
survie[1,,1:160] <- read_ap("SHnew.csv")
survie[2,,1:160] <- read_ap("SFnew.csv")
quotient[1,,1:160] <- read_ap("QHnew.csv")
quotient[2,,1:160] <- read_ap("QFnew.csv")
UseConv <- function(agemin,agemax,t)
{
# if (t>=AnneeDepartCN) # Supprime : ne m'a pas semblé utile
{
# Coeffs définis par la mortalité (suppose que les coefficients de revalo
# prospectifs sont au moins définis jusqu'en 2050)
taux_croi <- RendementCNPrev[t]-(RevaloCN[t]-1) #NB : Revalo et rendement pas dans la m???me unit??? !)
for (a in (agemin:agemax))
{
CoeffConv[a] <<-0
for (u in (a:120))
{
CoeffConv[a] <<- CoeffConv[a] + ((1+taux_croi)**(-u+a))*(survie[1,u,t]+survie[2,u,t])
}
CoeffConv[a] <<- (survie[1,40,t]+survie[2,40,t])/CoeffConv[a]
#print (c(a,CoeffConv[a]))
}
# Coeffs hors des bornes
CoeffConv[0:(agemin-1)] <<- 0
CoeffConv[(agemax+1):120] <<- CoeffConv[agemax]
}
}
##### Fonction PointsCN
# Calcule les points portés au compte individuel acquis par l'individu i à la date t.
PointsCN <- function(i,t,plafond)
{
points_cn_pri <<- 0
points_cn_fp <<- 0
points_cn_ind <<- 0
points_cn_nc <<- 0
points_mccn <<- 0
# Calcul des cumuls de points CN, selon étendue du nouveau régime
if (t>=AnneeDepartCN)
{
for (a in (30:t))
{
# print(c("a",a,points_cn_pri ))
# Application du rendement aux cotisations portees au compte : A DISCUTER &
points_cn_pri <<- points_cn_pri*(1+RendementCN[a-1])
points_cn_ind <<- points_cn_ind*(1+RendementCN[a-1])
points_cn_fp <<- points_cn_fp* (1+RendementCN[a-1])
points_cn_nc <<- points_cn_nc* (1+RendementCN[a-1])
if (is.element("valocot",OptionsCN))
{
if (a < AnneeDepartCN) {points_cn_pri <<- points_cn_pri + CotRetTotAnn(i,a)}
# ajouté au point privé par défault (décomposition par régime nécessaire?)
}
if (is.element("discount",OptionsCN))
{
if (a==(AnneeDepartCN+1))
{
# print (c("a",i,a,points_cn_pri))
points_cn_pri <<- points_cn_pri*0.95
points_cn_ind <<- points_cn_ind*0.95
points_cn_fp <<- points_cn_fp*0.95
points_cn_nc <<- points_cn_nc*0.95
# print (c("b",i,a,points_cn_pri))
}
}
# PointsCN pour les periodes travaillees.
if (statut[i,a]%in% c(cadreCN,non_cadreCN))
{
points_cn_pri <<- points_cn_pri + TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
# print(c(plafond,min(salaire[i,a],plafond*PlafondSS[a])))
}
else if (statut[i,a]%in% indepCN)
{
points_cn_ind<<-points_cn_ind + TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
}
else if (statut[i,a]%in% c(fonct_aCN,fonct_sCN))
{
points_cn_fp <<- points_cn_fp+ TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
}
# Avantages Non contributifs CN
if ((statut[i,a]==chomeurCN) & (!(is.element("noassimilcn",OptionsCN))))
{
#print(c("chom",i,a,points_cn_nc ))
# Assiette de cotisation: dernier salaire ou SMIC, plafonné à 4 SMIC.
liste <- which(salaire[i,1:(a-1)]>0)
salref <- salaire[i,liste[length(liste)]]
salref <- min(max(salref,SMIC[a]),4*PlafondSS[a])
#print(c("chom1",a, points_cn_nc ))
points_cn_nc <<- points_cn_nc+ TauxCotCN[115]*salref
#print(c("chom2",i,a, points_cn_nc ))
}
# AVPF
if ((statut[i,a]%in% avpfCN) & (!(is.element("noavpfcn",OptionsCN))))
{
#print(c("avpf",a))
# Assiette de cotisation: SMIC de l'annee en cours.
points_cn_nc <<- points_cn_nc+ TauxCotCN[115]*SMIC[a]
}
# MDA (lors d'une naissance a l'annee a)
if ((sexe[i]==2) &(is.element(a,t_naiss[enf[i,]])) & (!(is.element("nomdacn",OptionsCN))))
{
#print(c(1,"mda",a,points_cn_nc))
# Assiette de cotisation: Moyenne des salaire des années précédentes ou SMIC, plafonne à 4 SMIC., plafonné à 4 SMIC.
#liste <- which(is.element(statut[i,1:a],codes_occCN))
# salref<-min(max(mean(salaire[i,liste]),SMIC[a]),4*SMIC[a])
salref <- min(max(salaire[i,(a-1)],salaire[i,(a-2)],SMIC[a]),4*PlafondSS[a])
points_cn_nc <<- points_cn_nc + TauxCotCN[115]*salref*2
#print(c(2,"mda",a,points_cn_nc))
}
#print(c(points_cn_nc,a))
} # Fin boucle sur a
#Bonfication pour pension:
if (!(is.element("nobonifcn",OptionsCN)))
{
if (n_enf[i]>2)
{
points_cn_pri<<- 1.10*points_cn_pri
points_cn_fp <<- 1.10*points_cn_fp
points_cn_ind<<- 1.10*points_cn_ind
points_cn_nc <<- 1.10*points_cn_nc
}
}
# Calcul du MICO:
points_cn_cont <- points_cn_pri + points_cn_fp + points_cn_ind
seuil<-MinVieil1[t]/CoeffConv[60] # Seuil: nb de point pour avoir le minimum vieillesse
pfd <- SMIC[t]/CoeffConv[60] # Pfd: plafond, nb de points pour lequel le mico est =0.
txmc <- 0
txmc<-affn(points_cn_cont,c(0,seuil,pfd),c(0.1,0.1,0))
points_mccn<<-txmc*points_cn_cont
}
}
|
/Modele/Outils/OutilsRetraite/OutilsCN.R
|
no_license
|
philippechataignon/pensipp
|
R
| false | false | 7,036 |
r
|
#######################################################################################################
# #
# OutilsCN: Definition des fonctions propres aux comptes notionnels #
# #
#######################################################################################################
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsMS.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsPensIPP.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/DefVarRetr_Destinie.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsLeg.R")) )
source( (paste0(cheminsource,"Modele/Outils/OutilsRetraite/OutilsRetr.R")) )
###### Fonction UseOptCN
# Spécifie les options supplémentaires si option "CN" a été demandées avec UseOpt.
# Il y a deux arguments. Le premier est le millésime de démarrage du basculement.
# Le second est une chaine de caract?re, avec les mêmes principes que dans UseOpt,
# pouvant contenir les éléments suivants :
# Exemples:
#UseOptCN(2010,c("immediat", "rg"))
UseOptCN <- function(liste=c())
{
OptionsCN <<- c()
for (i in 1:length(liste))
{
OptionsCN[i] <<- tolower(liste[i])
if (!(is.element(OptionsCN[i],c("valocot","discount","noassimilcn","noavpfcn","nomdacn","nobonifcn","nomccn"))))
{
print (paste("Attention : option '",OptionsCN[i],"' inconnue"))
}
}
}
#####Fonction UseConv
# Met a jour les coefficients de conversion si r?gime en comptes notionnels.
# Il y a deux modes d'appel. Le premier mode doit-?tre utilis? lorsqu'on veut des
# coefficients de conversion bas?s sur l'esp?rance de vie courante.
# Calcule les coefficients de conversion entre $agemin et $agemax ?gaux ? 60 et 70
# ans sur la base de la mortalit? de la date $t.
# Exemple : UseConv(60,70,$t);
setwd((paste0(cheminsource,"Modele/Parametres/Demographie")))
age_max <- 121
quotient <- array(0,dim=c(2,121,200))
survie <- array(0,dim=c(2,121,200))
survie[1,,1:160] <- read_ap("SHnew.csv")
survie[2,,1:160] <- read_ap("SFnew.csv")
quotient[1,,1:160] <- read_ap("QHnew.csv")
quotient[2,,1:160] <- read_ap("QFnew.csv")
UseConv <- function(agemin,agemax,t)
{
# if (t>=AnneeDepartCN) # Supprime : ne m'a pas semblé utile
{
# Coeffs définis par la mortalité (suppose que les coefficients de revalo
# prospectifs sont au moins définis jusqu'en 2050)
taux_croi <- RendementCNPrev[t]-(RevaloCN[t]-1) #NB : Revalo et rendement pas dans la m???me unit??? !)
for (a in (agemin:agemax))
{
CoeffConv[a] <<-0
for (u in (a:120))
{
CoeffConv[a] <<- CoeffConv[a] + ((1+taux_croi)**(-u+a))*(survie[1,u,t]+survie[2,u,t])
}
CoeffConv[a] <<- (survie[1,40,t]+survie[2,40,t])/CoeffConv[a]
#print (c(a,CoeffConv[a]))
}
# Coeffs hors des bornes
CoeffConv[0:(agemin-1)] <<- 0
CoeffConv[(agemax+1):120] <<- CoeffConv[agemax]
}
}
##### Fonction PointsCN
# Calcule les points portés au compte individuel acquis par l'individu i à la date t.
PointsCN <- function(i,t,plafond)
{
points_cn_pri <<- 0
points_cn_fp <<- 0
points_cn_ind <<- 0
points_cn_nc <<- 0
points_mccn <<- 0
# Calcul des cumuls de points CN, selon étendue du nouveau régime
if (t>=AnneeDepartCN)
{
for (a in (30:t))
{
# print(c("a",a,points_cn_pri ))
# Application du rendement aux cotisations portees au compte : A DISCUTER &
points_cn_pri <<- points_cn_pri*(1+RendementCN[a-1])
points_cn_ind <<- points_cn_ind*(1+RendementCN[a-1])
points_cn_fp <<- points_cn_fp* (1+RendementCN[a-1])
points_cn_nc <<- points_cn_nc* (1+RendementCN[a-1])
if (is.element("valocot",OptionsCN))
{
if (a < AnneeDepartCN) {points_cn_pri <<- points_cn_pri + CotRetTotAnn(i,a)}
# ajouté au point privé par défault (décomposition par régime nécessaire?)
}
if (is.element("discount",OptionsCN))
{
if (a==(AnneeDepartCN+1))
{
# print (c("a",i,a,points_cn_pri))
points_cn_pri <<- points_cn_pri*0.95
points_cn_ind <<- points_cn_ind*0.95
points_cn_fp <<- points_cn_fp*0.95
points_cn_nc <<- points_cn_nc*0.95
# print (c("b",i,a,points_cn_pri))
}
}
# PointsCN pour les periodes travaillees.
if (statut[i,a]%in% c(cadreCN,non_cadreCN))
{
points_cn_pri <<- points_cn_pri + TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
# print(c(plafond,min(salaire[i,a],plafond*PlafondSS[a])))
}
else if (statut[i,a]%in% indepCN)
{
points_cn_ind<<-points_cn_ind + TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
}
else if (statut[i,a]%in% c(fonct_aCN,fonct_sCN))
{
points_cn_fp <<- points_cn_fp+ TauxCotCN[a]*min(salaire[i,a],plafond*PlafondSS[a])
}
# Avantages Non contributifs CN
if ((statut[i,a]==chomeurCN) & (!(is.element("noassimilcn",OptionsCN))))
{
#print(c("chom",i,a,points_cn_nc ))
# Assiette de cotisation: dernier salaire ou SMIC, plafonné à 4 SMIC.
liste <- which(salaire[i,1:(a-1)]>0)
salref <- salaire[i,liste[length(liste)]]
salref <- min(max(salref,SMIC[a]),4*PlafondSS[a])
#print(c("chom1",a, points_cn_nc ))
points_cn_nc <<- points_cn_nc+ TauxCotCN[115]*salref
#print(c("chom2",i,a, points_cn_nc ))
}
# AVPF
if ((statut[i,a]%in% avpfCN) & (!(is.element("noavpfcn",OptionsCN))))
{
#print(c("avpf",a))
# Assiette de cotisation: SMIC de l'annee en cours.
points_cn_nc <<- points_cn_nc+ TauxCotCN[115]*SMIC[a]
}
# MDA (lors d'une naissance a l'annee a)
if ((sexe[i]==2) &(is.element(a,t_naiss[enf[i,]])) & (!(is.element("nomdacn",OptionsCN))))
{
#print(c(1,"mda",a,points_cn_nc))
# Assiette de cotisation: Moyenne des salaire des années précédentes ou SMIC, plafonne à 4 SMIC., plafonné à 4 SMIC.
#liste <- which(is.element(statut[i,1:a],codes_occCN))
# salref<-min(max(mean(salaire[i,liste]),SMIC[a]),4*SMIC[a])
salref <- min(max(salaire[i,(a-1)],salaire[i,(a-2)],SMIC[a]),4*PlafondSS[a])
points_cn_nc <<- points_cn_nc + TauxCotCN[115]*salref*2
#print(c(2,"mda",a,points_cn_nc))
}
#print(c(points_cn_nc,a))
} # Fin boucle sur a
#Bonfication pour pension:
if (!(is.element("nobonifcn",OptionsCN)))
{
if (n_enf[i]>2)
{
points_cn_pri<<- 1.10*points_cn_pri
points_cn_fp <<- 1.10*points_cn_fp
points_cn_ind<<- 1.10*points_cn_ind
points_cn_nc <<- 1.10*points_cn_nc
}
}
# Calcul du MICO:
points_cn_cont <- points_cn_pri + points_cn_fp + points_cn_ind
seuil<-MinVieil1[t]/CoeffConv[60] # Seuil: nb de point pour avoir le minimum vieillesse
pfd <- SMIC[t]/CoeffConv[60] # Pfd: plafond, nb de points pour lequel le mico est =0.
txmc <- 0
txmc<-affn(points_cn_cont,c(0,seuil,pfd),c(0.1,0.1,0))
points_mccn<<-txmc*points_cn_cont
}
}
|
"train" <-
function(x, ...){
UseMethod("train")
}
train.default <- function(x, y,
method = "rf",
preProcess = NULL,
...,
weights = NULL,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric == "RMSE", FALSE, TRUE),
trControl = trainControl(),
tuneGrid = NULL,
tuneLength = 3) {
startTime <- proc.time()
if(is.list(method)) {
minNames <- c("library", "type", "parameters", "grid",
"fit", "predict", "prob")
nameCheck <- minNames %in% names(method)
if(!all(nameCheck)) stop(paste("some required components are missing:",
paste(minNames[!nameCheck], collapse = ", ")))
models <- method
method <- "custom"
} else {
models <- getModelInfo(method, regex = FALSE)[[1]]
if (length(models) == 0)
stop(paste("Model", method, "is not in caret's built-in library"))
}
checkInstall(models$library)
for(i in seq(along = models$library)) do.call("require", list(package = models$library[i]))
paramNames <- as.character(models$parameters$parameter)
funcCall <- match.call(expand.dots = TRUE)
modelType <- if(is.factor(y)) "Classification" else "Regression"
if(!(modelType %in% models$type)) stop(paste("wrong model type for", tolower(modelType)))
if(grepl("^svm", method) & grepl("String$", method)) {
if(is.vector(x) && is.character(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.matrix(x) && is.numeric(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.data.frame(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
}
if(modelType != "Classification" & !is.null(trControl$sampling))
stop("sampling methods are only implemented for classification problems")
if(!is.null(trControl$sampling)) {
trControl$sampling <- parse_sampling(trControl$sampling)
}
if(any(class(x) == "data.table")) x <- as.data.frame(x)
stopifnot(length(y) > 1)
stopifnot(nrow(x) > 1)
stopifnot(nrow(x) == length(y))
## TODO add check method and execute here
## Some models that use RWeka start multiple threads and this conflicts with multicore:
if(any(search() == "package:doMC") && getDoParRegistered() && "RWeka" %in% models$library)
warning("Models using Weka will not work with parallel processing with multicore/doMC")
flush.console()
if(!is.null(preProcess) && !(all(preProcess %in% ppMethods)))
stop(paste('pre-processing methods are limited to:', paste(ppMethods, collapse = ", ")))
if(modelType == "Classification") {
## We should get and save the class labels to ensure that predictions are coerced
## to factors that have the same levels as the original data. This is especially
## important with multiclass systems where one or more classes have low sample sizes
## relative to the others
classLevels <- levels(y)
if(trControl$classProbs && any(classLevels != make.names(classLevels))) {
warning(paste("At least one of the class levels are not valid R variables names;",
"This may cause errors if class probabilities are generated because",
"the variables names will be converted to:",
paste(make.names(classLevels), collapse = ", ")))
}
if(metric %in% c("RMSE", "Rsquared"))
stop(paste("Metric", metric, "not applicable for classification models"))
if(trControl$classProbs) {
if(!is.function(models$prob)) {
warning("Class probabilities were requested for a model that does not implement them")
trControl$classProbs <- FALSE
}
}
} else {
if(metric %in% c("Accuracy", "Kappa"))
stop(paste("Metric", metric, "not applicable for regression models"))
classLevels <- NA
if(trControl$classProbs) {
warning("cannnot compute class probabilities for regression")
trControl$classProbs <- FALSE
}
}
if(trControl$method == "oob" & !(method %in% oob_mods))
stop(paste("for oob error rates, model bust be one of:",
paste(oob_mods, sep = "", collapse = ", ")))
## If they don't exist, make the data partitions for the resampling iterations.
if(is.null(trControl$index)) {
if(trControl$method == "custom")
stop("'custom' resampling is appropriate when the `trControl` argument `index` is used")
trControl$index <- switch(tolower(trControl$method),
oob = NULL,
none = list(seq(along = y)),
alt_cv =, cv = createFolds(y, trControl$number, returnTrain = TRUE),
repeatedcv =, adaptive_cv = createMultiFolds(y, trControl$number, trControl$repeats),
loocv = createFolds(y, length(y), returnTrain = TRUE),
boot =, boot632 =, adaptive_boot = createResample(y, trControl$number),
test = createDataPartition(y, 1, trControl$p),
adaptive_lgocv =, lgocv = createDataPartition(y, trControl$number, trControl$p),
timeslice = createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$train,
subsemble = subsemble_index(y, V = trControl$number, J = trControl$repeats))
} else {
index_types <- unlist(lapply(trControl$index, is.integer))
if(!isTRUE(all(index_types)))
stop("`index` should be lists of integers.")
if(!is.null(trControl$indexOut)) {
index_types <- unlist(lapply(trControl$indexOut, is.integer))
if(!isTRUE(all(index_types)))
stop("`indexOut` should be lists of integers.")
}
}
if(trControl$method == "subsemble") {
if(!trControl$savePredictions) trControl$savePredictions <- TRUE
trControl$indexOut <- trControl$index$holdout
trControl$index <- trControl$index$model
}
## Create hold--out indicies
if(is.null(trControl$indexOut) & trControl$method != "oob"){
if(tolower(trControl$method) != "timeslice") {
trControl$indexOut <- lapply(trControl$index,
function(training, allSamples) allSamples[-unique(training)],
allSamples = seq(along = y))
names(trControl$indexOut) <- prettySeq(trControl$indexOut)
} else {
trControl$indexOut <- createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$test
}
}
if(trControl$method != "oob" & is.null(trControl$index)) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$index))) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$indexOut))) names(trControl$indexOut) <- prettySeq(trControl$indexOut)
# if(!is.data.frame(x)) x <- as.data.frame(x)
## Gather all the pre-processing info. We will need it to pass into the grid creation
## code so that there is a concordance between the data used for modeling and grid creation
if(!is.null(preProcess)) {
ppOpt <- list(options = preProcess)
if(length(trControl$preProcOptions) > 0) ppOpt <- c(ppOpt,trControl$preProcOptions)
} else ppOpt <- NULL
## If no default training grid is specified, get one. We have to pass in the formula
## and data for some models (rpart, pam, etc - see manual for more details)
if(is.null(tuneGrid)) {
if(!is.null(ppOpt) && length(models$parameters$parameter) > 1 && as.character(models$parameters$parameter) != "parameter") {
pp <- list(method = ppOpt$options)
if("ica" %in% pp$method) pp$n.comp <- ppOpt$ICAcomp
if("pca" %in% pp$method) pp$thresh <- ppOpt$thresh
if("knnImpute" %in% pp$method) pp$k <- ppOpt$k
pp$x <- x
ppObj <- do.call("preProcess", pp)
tuneGrid <- models$grid(predict(ppObj, x), y, tuneLength)
rm(ppObj, pp)
} else tuneGrid <- models$grid(x, y, tuneLength)
}
dotNames <- hasDots(tuneGrid, models)
if(dotNames) colnames(tuneGrid) <- gsub("^\\.", "", colnames(tuneGrid))
## Check tuning parameter names
tuneNames <- as.character(models$parameters$parameter)
goodNames <- all.equal(sort(tuneNames), sort(names(tuneGrid)))
if(!is.logical(goodNames) || !goodNames) {
stop(paste("The tuning parameter grid should have columns",
paste(tuneNames, collapse = ", ", sep = "")))
}
if(trControl$method == "none" && nrow(tuneGrid) != 1)
stop("Only one model should be specified in tuneGrid with no resampling")
## In case prediction bounds are used, compute the limits. For now,
## store these in the control object since that gets passed everywhere
trControl$yLimits <- if(is.numeric(y)) extendrange(y) else NULL
if(trControl$method != "none") {
##------------------------------------------------------------------------------------------------------------------------------------------------------#
## For each tuning parameter combination, we will loop over them, fit models and generate predictions.
## We only save the predictions at this point, not the models (and in the case of method = "oob" we
## only save the prediction summaries at this stage.
## trainInfo will hold the information about how we should loop to train the model and what types
## of parameters are used.
## There are two types of methods to build the models: "basic" means that each tuning parameter
## combination requires it's own model fit and "seq" where a single model fit can be used to
## get predictions for multiple tuning parameters.
## The tuneScheme() function is in miscr.R and it helps define the following:
## - A data frame called "loop" with columns for parameters and a row for each model to be fit.
## For "basic" models, this is the same as the tuning grid. For "seq" models, it is only
## the subset of parameters that need to be fit
## - A list called "submodels". If "basic", it is NULL. For "seq" models, it is a list. Each list
## item is a data frame of the parameters that need to be varied for the corresponding row of
## the loop object.
##
## For example, for a gbm model, our tuning grid might be:
## .interaction.depth .n.trees .shrinkage
## 1 50 0.1
## 1 100 0.1
## 2 50 0.1
## 2 100 0.1
## 2 150 0.1
##
## For this example:
##
## loop:
## .interaction.depth .shrinkage .n.trees
## 1 0.1 100
## 2 0.1 150
##
## submodels:
## [[1]]
## .n.trees
## 50
##
## [[2]]
## .n.trees
## 50
## 100
##
## A simplified version of predictionFunction() would have the following gbm section:
##
## # First get the predictions with the value of n.trees as given in the current
## # row of loop
## out <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = modelFit$tuneValue$.n.trees)
##
## # param is the current value of submodels. In normal prediction mode (i.e
## # when using predict.train), param = NULL. When called within train()
## # with this model, it will have the other values for n.trees.
## # In this case, the output of the function is a list of predictions
## # These values are deconvoluted in workerTasks() in misc.R
## if(!is.null(param))
## {
## tmp <- vector(mode = "list", length = nrow(param) + 1)
## tmp[[1]] <- out
##
## for(j in seq(along = param$.n.trees))
## {
## tmp[[j]] <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = param$.n.trees[j])
## }
## out <- tmp
##
# paramCols <- paste(".", as.character(models$parameters$parameter), sep = "")
if(is.function(models$loop) && nrow(tuneGrid) > 1){
trainInfo <- models$loop(tuneGrid)
if(!all(c("loop", "submodels") %in% names(trainInfo)))
stop("The 'loop' function should produce a list with elements 'loop' and 'submodels'")
} else trainInfo <- list(loop = tuneGrid)
## Set or check the seeds when needed
if(is.null(trControl$seeds)) {
seeds <- vector(mode = "list", length = length(trControl$index))
seeds <- lapply(seeds, function(x) sample.int(n = 1000000, size = nrow(trainInfo$loop)))
seeds[[length(trControl$index) + 1]] <- sample.int(n = 1000000, size = 1)
trControl$seeds <- seeds
} else {
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) {
## check versus number of tasks
numSeeds <- unlist(lapply(trControl$seeds, length))
badSeed <- (length(trControl$seeds) < length(trControl$index) + 1) ||
(any(numSeeds[-length(numSeeds)] < nrow(trainInfo$loop)))
if(badSeed) stop(paste("Bad seeds: the seed object should be a list of length",
length(trControl$index) + 1, "with",
length(trControl$index), "integer vectors of size",
nrow(trainInfo$loop), "and the last list element having a",
"single integer"))
}
}
if(trControl$method == "oob") {
## delay this test until later
perfNames <- metric
} else {
## run some data thru the summary function and see what we get
testSummary <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(testSummary)
}
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
if(trControl$method == "oob"){
tmp <- oobTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp
perfNames <- colnames(performance)
perfNames <- perfNames[!(perfNames %in% as.character(models$parameters$parameter))]
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
} else {
if(trControl$method == "LOOCV"){
tmp <- looTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
} else {
if(!grepl("adapt", trControl$method)){
tmp <- nominalTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
resampleResults <- tmp$resample
} else {
tmp <- adaptiveWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess,
ctrl = trControl,
lev = classLevels,
metric = metric,
maximize = maximize,
...)
performance <- tmp$performance
resampleResults <- tmp$resample
}
}
}
## TODO we used to give resampled results for LOO
if(!(trControl$method %in% c("LOOCV", "oob"))) {
if(modelType == "Classification" && length(grep("^\\cell", colnames(resampleResults))) > 0) {
resampledCM <- resampleResults[, !(names(resampleResults) %in% perfNames)]
resampleResults <- resampleResults[, -grep("^\\cell", colnames(resampleResults))]
#colnames(resampledCM) <- gsub("^\\.", "", colnames(resampledCM))
} else resampledCM <- NULL
} else resampledCM <- NULL
if(trControl$verboseIter) {
cat("Aggregating results\n")
flush.console()
}
perfCols <- names(performance)
perfCols <- perfCols[!(perfCols %in% paramNames)]
if(all(is.na(performance[, metric]))) {
cat(paste("Something is wrong; all the", metric, "metric values are missing:\n"))
print(summary(performance[, perfCols[!grepl("SD$", perfCols)], drop = FALSE]))
stop("Stopping")
}
## Sort the tuning parameters from least complex to most complex
if(!is.null(models$sort)) performance <- models$sort(performance)
if(any(is.na(performance[, metric])))
warning("missing values found in aggregated results")
if(trControl$verboseIter && nrow(performance) > 1) {
cat("Selecting tuning parameters\n")
flush.console()
}
## select the optimal set
selectClass <- class(trControl$selectionFunction)[1]
## Select the "optimal" tuning parameter.
if(grepl("adapt", trControl$method)) {
perf_check <- subset(performance, .B == max(performance$.B))
} else perf_check <- performance
## Make adaptive only look at parameters with B = max(B)
if(selectClass == "function") {
bestIter <- trControl$selectionFunction(x = perf_check,
metric = metric,
maximize = maximize)
}
else {
if(trControl$selectionFunction == "oneSE") {
bestIter <- oneSE(perf_check,
metric,
length(trControl$index),
maximize)
} else {
bestIter <- do.call(trControl$selectionFunction,
list(x = perf_check,
metric = metric,
maximize = maximize))
}
}
if(is.na(bestIter) || length(bestIter) != 1) stop("final tuning parameters could not be determined")
if(grepl("adapt", trControl$method)) {
best_perf <- perf_check[bestIter,as.character(models$parameters$parameter),drop = FALSE]
performance$order <- 1:nrow(performance)
bestIter <- merge(performance, best_perf)$order
performance$order <- NULL
}
## Based on the optimality criterion, select the tuning parameter(s)
bestTune <- performance[bestIter, paramNames, drop = FALSE]
} else {
bestTune <- tuneGrid
performance <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(performance)
performance <- as.data.frame(t(performance))
performance <- cbind(performance, tuneGrid)
performance <- performance[-1,,drop = FALSE]
tmp <- resampledCM <- NULL
}
## Save some or all of the resampling summary metrics
if(!(trControl$method %in% c("LOOCV", "oob", "none"))) {
byResample <- switch(trControl$returnResamp,
none = NULL,
all = {
out <- resampleResults
colnames(out) <- gsub("^\\.", "", colnames(out))
out
},
final = {
out <- merge(bestTune, resampleResults)
out <- out[,!(names(out) %in% names(tuneGrid)), drop = FALSE]
out
})
} else {
byResample <- NULL
}
# names(bestTune) <- paste(".", names(bestTune), sep = "")
## Reorder rows of performance
orderList <- list()
for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]]
names(orderList) <- paramNames
performance <- performance[do.call("order", orderList),]
if(trControl$verboseIter) {
bestText <- paste(paste(names(bestTune), "=",
format(bestTune, digits = 3)),
collapse = ", ")
if(nrow(performance) == 1) bestText <- "final model"
cat("Fitting", bestText, "on full training set\n")
flush.console()
}
## Make the final model based on the tuning results
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1])
finalTime <- system.time(
finalModel <- createModel(x = x, y = y, wts = weights,
method = models,
tuneValue = bestTune,
obsLevels = classLevels,
pp = ppOpt,
last = TRUE,
classProbs = trControl$classProbs,
sampling = trControl$sampling,
...))
if(trControl$trim && !is.null(models$trim)) {
if(trControl$verboseIter) old_size <- object.size(finalModel$fit)
finalModel$fit <- models$trim(finalModel$fit)
if(trControl$verboseIter) {
new_size <- object.size(finalModel$fit)
reduction <- format(old_size - new_size, units = "Mb")
if(reduction == "0 Mb") reduction <- "< 0 Mb"
p_reduction <- (unclass(old_size) - unclass(new_size))/unclass(old_size)*100
p_reduction <- if(p_reduction < 1) "< 1%" else paste0(round(p_reduction, 0), "%")
cat("Final model footprint reduced by", reduction, "or", p_reduction, "\n")
}
}
## get pp info
pp <- finalModel$preProc
finalModel <- finalModel$fit
## Remove this and check for other places it is reference
## replaced by tuneValue
if(method == "pls") finalModel$bestIter <- bestTune
## To use predict.train and automatically use the optimal lambda,
## we need to save it
if(method == "glmnet") finalModel$lambdaOpt <- bestTune$lambda
if(trControl$returnData) {
outData <- if(!is.data.frame(x)) try(as.data.frame(x), silent = TRUE) else x
if(class(outData)[1] == "try-error") {
warning("The training data could not be converted to a data frame for saving")
outData <- NULL
} else outData$.outcome <- y
} else outData <- NULL
## In the case of pam, the data will need to be saved differently
if(trControl$returnData & method == "pam") {
finalModel$xData <- x
finalModel$yData <- y
}
endTime <- proc.time()
times <- list(everything = endTime - startTime,
final = finalTime)
out <- structure(list(method = method,
modelInfo = models,
modelType = modelType,
results = performance,
pred = tmp$predictions,
bestTune = bestTune,
call = funcCall,
dots = list(...),
metric = metric,
control = trControl,
finalModel = finalModel,
preProcess = pp,
trainingData = outData,
resample = byResample,
resampledCM = resampledCM,
perfNames = perfNames,
maximize = maximize,
yLimits = trControl$yLimits,
times = times),
class = "train")
trControl$yLimits <- NULL
if(trControl$timingSamps > 0) {
pData <- lapply(x, function(x, n) sample(x, n, replace = TRUE), n = trControl$timingSamps)
pData <- as.data.frame(pData)
out$times$prediction <- system.time(predict(out, pData))
} else out$times$prediction <- rep(NA, 3)
out
}
train.formula <- function (form, data, ..., weights, subset, na.action = na.fail, contrasts = NULL) {
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data))) m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
if(nrow(m) < 1) stop("Every row has at least one missing value were found")
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts, na.action = na.action)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0) x <- x[, -xint, drop = FALSE]
y <- model.response(m)
w <- as.vector(model.weights(m))
res <- train(x, y, weights = w, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$call <- match.call()
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
if(!is.null(res$trainingData)) {
res$trainingData <- data
isY <- names(res$trainingData) %in% as.character(form[[2]])
if(any(isY)) colnames(res$trainingData)[isY] <- ".outcome"
}
class(res) <- c("train", "train.formula")
res
}
summary.train <- function(object, ...) summary(object$finalModel, ...)
residuals.train <- function(object, ...) {
if(object$modelType != "Regression") stop("train() only produces residuals on numeric outcomes")
resid <- residuals(object$finalModel, ...)
if(is.null(resid)) {
if(!is.null(object$trainingData)) {
resid <- object$trainingData$.outcome - predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce residuals")
}
resid
}
fitted.train <- function(object, ...) {
prd <- fitted(object$finalModel)
if(is.null(prd)) {
if(!is.null(object$trainingData)) {
prd <- predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce fitted values")
}
prd
}
|
/pkg/caret/R/train.default.R
|
no_license
|
bleutner/caret
|
R
| false | false | 27,798 |
r
|
"train" <-
function(x, ...){
UseMethod("train")
}
train.default <- function(x, y,
method = "rf",
preProcess = NULL,
...,
weights = NULL,
metric = ifelse(is.factor(y), "Accuracy", "RMSE"),
maximize = ifelse(metric == "RMSE", FALSE, TRUE),
trControl = trainControl(),
tuneGrid = NULL,
tuneLength = 3) {
startTime <- proc.time()
if(is.list(method)) {
minNames <- c("library", "type", "parameters", "grid",
"fit", "predict", "prob")
nameCheck <- minNames %in% names(method)
if(!all(nameCheck)) stop(paste("some required components are missing:",
paste(minNames[!nameCheck], collapse = ", ")))
models <- method
method <- "custom"
} else {
models <- getModelInfo(method, regex = FALSE)[[1]]
if (length(models) == 0)
stop(paste("Model", method, "is not in caret's built-in library"))
}
checkInstall(models$library)
for(i in seq(along = models$library)) do.call("require", list(package = models$library[i]))
paramNames <- as.character(models$parameters$parameter)
funcCall <- match.call(expand.dots = TRUE)
modelType <- if(is.factor(y)) "Classification" else "Regression"
if(!(modelType %in% models$type)) stop(paste("wrong model type for", tolower(modelType)))
if(grepl("^svm", method) & grepl("String$", method)) {
if(is.vector(x) && is.character(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.matrix(x) && is.numeric(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
if(is.data.frame(x)) {
stop("'x' should be a character matrix with a single column for string kernel methods")
}
}
if(modelType != "Classification" & !is.null(trControl$sampling))
stop("sampling methods are only implemented for classification problems")
if(!is.null(trControl$sampling)) {
trControl$sampling <- parse_sampling(trControl$sampling)
}
if(any(class(x) == "data.table")) x <- as.data.frame(x)
stopifnot(length(y) > 1)
stopifnot(nrow(x) > 1)
stopifnot(nrow(x) == length(y))
## TODO add check method and execute here
## Some models that use RWeka start multiple threads and this conflicts with multicore:
if(any(search() == "package:doMC") && getDoParRegistered() && "RWeka" %in% models$library)
warning("Models using Weka will not work with parallel processing with multicore/doMC")
flush.console()
if(!is.null(preProcess) && !(all(preProcess %in% ppMethods)))
stop(paste('pre-processing methods are limited to:', paste(ppMethods, collapse = ", ")))
if(modelType == "Classification") {
## We should get and save the class labels to ensure that predictions are coerced
## to factors that have the same levels as the original data. This is especially
## important with multiclass systems where one or more classes have low sample sizes
## relative to the others
classLevels <- levels(y)
if(trControl$classProbs && any(classLevels != make.names(classLevels))) {
warning(paste("At least one of the class levels are not valid R variables names;",
"This may cause errors if class probabilities are generated because",
"the variables names will be converted to:",
paste(make.names(classLevels), collapse = ", ")))
}
if(metric %in% c("RMSE", "Rsquared"))
stop(paste("Metric", metric, "not applicable for classification models"))
if(trControl$classProbs) {
if(!is.function(models$prob)) {
warning("Class probabilities were requested for a model that does not implement them")
trControl$classProbs <- FALSE
}
}
} else {
if(metric %in% c("Accuracy", "Kappa"))
stop(paste("Metric", metric, "not applicable for regression models"))
classLevels <- NA
if(trControl$classProbs) {
warning("cannnot compute class probabilities for regression")
trControl$classProbs <- FALSE
}
}
if(trControl$method == "oob" & !(method %in% oob_mods))
stop(paste("for oob error rates, model bust be one of:",
paste(oob_mods, sep = "", collapse = ", ")))
## If they don't exist, make the data partitions for the resampling iterations.
if(is.null(trControl$index)) {
if(trControl$method == "custom")
stop("'custom' resampling is appropriate when the `trControl` argument `index` is used")
trControl$index <- switch(tolower(trControl$method),
oob = NULL,
none = list(seq(along = y)),
alt_cv =, cv = createFolds(y, trControl$number, returnTrain = TRUE),
repeatedcv =, adaptive_cv = createMultiFolds(y, trControl$number, trControl$repeats),
loocv = createFolds(y, length(y), returnTrain = TRUE),
boot =, boot632 =, adaptive_boot = createResample(y, trControl$number),
test = createDataPartition(y, 1, trControl$p),
adaptive_lgocv =, lgocv = createDataPartition(y, trControl$number, trControl$p),
timeslice = createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$train,
subsemble = subsemble_index(y, V = trControl$number, J = trControl$repeats))
} else {
index_types <- unlist(lapply(trControl$index, is.integer))
if(!isTRUE(all(index_types)))
stop("`index` should be lists of integers.")
if(!is.null(trControl$indexOut)) {
index_types <- unlist(lapply(trControl$indexOut, is.integer))
if(!isTRUE(all(index_types)))
stop("`indexOut` should be lists of integers.")
}
}
if(trControl$method == "subsemble") {
if(!trControl$savePredictions) trControl$savePredictions <- TRUE
trControl$indexOut <- trControl$index$holdout
trControl$index <- trControl$index$model
}
## Create hold--out indicies
if(is.null(trControl$indexOut) & trControl$method != "oob"){
if(tolower(trControl$method) != "timeslice") {
trControl$indexOut <- lapply(trControl$index,
function(training, allSamples) allSamples[-unique(training)],
allSamples = seq(along = y))
names(trControl$indexOut) <- prettySeq(trControl$indexOut)
} else {
trControl$indexOut <- createTimeSlices(seq(along = y),
initialWindow = trControl$initialWindow,
horizon = trControl$horizon,
fixedWindow = trControl$fixedWindow)$test
}
}
if(trControl$method != "oob" & is.null(trControl$index)) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$index))) names(trControl$index) <- prettySeq(trControl$index)
if(trControl$method != "oob" & is.null(names(trControl$indexOut))) names(trControl$indexOut) <- prettySeq(trControl$indexOut)
# if(!is.data.frame(x)) x <- as.data.frame(x)
## Gather all the pre-processing info. We will need it to pass into the grid creation
## code so that there is a concordance between the data used for modeling and grid creation
if(!is.null(preProcess)) {
ppOpt <- list(options = preProcess)
if(length(trControl$preProcOptions) > 0) ppOpt <- c(ppOpt,trControl$preProcOptions)
} else ppOpt <- NULL
## If no default training grid is specified, get one. We have to pass in the formula
## and data for some models (rpart, pam, etc - see manual for more details)
if(is.null(tuneGrid)) {
if(!is.null(ppOpt) && length(models$parameters$parameter) > 1 && as.character(models$parameters$parameter) != "parameter") {
pp <- list(method = ppOpt$options)
if("ica" %in% pp$method) pp$n.comp <- ppOpt$ICAcomp
if("pca" %in% pp$method) pp$thresh <- ppOpt$thresh
if("knnImpute" %in% pp$method) pp$k <- ppOpt$k
pp$x <- x
ppObj <- do.call("preProcess", pp)
tuneGrid <- models$grid(predict(ppObj, x), y, tuneLength)
rm(ppObj, pp)
} else tuneGrid <- models$grid(x, y, tuneLength)
}
dotNames <- hasDots(tuneGrid, models)
if(dotNames) colnames(tuneGrid) <- gsub("^\\.", "", colnames(tuneGrid))
## Check tuning parameter names
tuneNames <- as.character(models$parameters$parameter)
goodNames <- all.equal(sort(tuneNames), sort(names(tuneGrid)))
if(!is.logical(goodNames) || !goodNames) {
stop(paste("The tuning parameter grid should have columns",
paste(tuneNames, collapse = ", ", sep = "")))
}
if(trControl$method == "none" && nrow(tuneGrid) != 1)
stop("Only one model should be specified in tuneGrid with no resampling")
## In case prediction bounds are used, compute the limits. For now,
## store these in the control object since that gets passed everywhere
trControl$yLimits <- if(is.numeric(y)) extendrange(y) else NULL
if(trControl$method != "none") {
##------------------------------------------------------------------------------------------------------------------------------------------------------#
## For each tuning parameter combination, we will loop over them, fit models and generate predictions.
## We only save the predictions at this point, not the models (and in the case of method = "oob" we
## only save the prediction summaries at this stage.
## trainInfo will hold the information about how we should loop to train the model and what types
## of parameters are used.
## There are two types of methods to build the models: "basic" means that each tuning parameter
## combination requires it's own model fit and "seq" where a single model fit can be used to
## get predictions for multiple tuning parameters.
## The tuneScheme() function is in miscr.R and it helps define the following:
## - A data frame called "loop" with columns for parameters and a row for each model to be fit.
## For "basic" models, this is the same as the tuning grid. For "seq" models, it is only
## the subset of parameters that need to be fit
## - A list called "submodels". If "basic", it is NULL. For "seq" models, it is a list. Each list
## item is a data frame of the parameters that need to be varied for the corresponding row of
## the loop object.
##
## For example, for a gbm model, our tuning grid might be:
## .interaction.depth .n.trees .shrinkage
## 1 50 0.1
## 1 100 0.1
## 2 50 0.1
## 2 100 0.1
## 2 150 0.1
##
## For this example:
##
## loop:
## .interaction.depth .shrinkage .n.trees
## 1 0.1 100
## 2 0.1 150
##
## submodels:
## [[1]]
## .n.trees
## 50
##
## [[2]]
## .n.trees
## 50
## 100
##
## A simplified version of predictionFunction() would have the following gbm section:
##
## # First get the predictions with the value of n.trees as given in the current
## # row of loop
## out <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = modelFit$tuneValue$.n.trees)
##
## # param is the current value of submodels. In normal prediction mode (i.e
## # when using predict.train), param = NULL. When called within train()
## # with this model, it will have the other values for n.trees.
## # In this case, the output of the function is a list of predictions
## # These values are deconvoluted in workerTasks() in misc.R
## if(!is.null(param))
## {
## tmp <- vector(mode = "list", length = nrow(param) + 1)
## tmp[[1]] <- out
##
## for(j in seq(along = param$.n.trees))
## {
## tmp[[j]] <- predict(modelFit,
## newdata,
## type = "response",
## n.trees = param$.n.trees[j])
## }
## out <- tmp
##
# paramCols <- paste(".", as.character(models$parameters$parameter), sep = "")
if(is.function(models$loop) && nrow(tuneGrid) > 1){
trainInfo <- models$loop(tuneGrid)
if(!all(c("loop", "submodels") %in% names(trainInfo)))
stop("The 'loop' function should produce a list with elements 'loop' and 'submodels'")
} else trainInfo <- list(loop = tuneGrid)
## Set or check the seeds when needed
if(is.null(trControl$seeds)) {
seeds <- vector(mode = "list", length = length(trControl$index))
seeds <- lapply(seeds, function(x) sample.int(n = 1000000, size = nrow(trainInfo$loop)))
seeds[[length(trControl$index) + 1]] <- sample.int(n = 1000000, size = 1)
trControl$seeds <- seeds
} else {
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) {
## check versus number of tasks
numSeeds <- unlist(lapply(trControl$seeds, length))
badSeed <- (length(trControl$seeds) < length(trControl$index) + 1) ||
(any(numSeeds[-length(numSeeds)] < nrow(trainInfo$loop)))
if(badSeed) stop(paste("Bad seeds: the seed object should be a list of length",
length(trControl$index) + 1, "with",
length(trControl$index), "integer vectors of size",
nrow(trainInfo$loop), "and the last list element having a",
"single integer"))
}
}
if(trControl$method == "oob") {
## delay this test until later
perfNames <- metric
} else {
## run some data thru the summary function and see what we get
testSummary <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(testSummary)
}
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
if(trControl$method == "oob"){
tmp <- oobTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp
perfNames <- colnames(performance)
perfNames <- perfNames[!(perfNames %in% as.character(models$parameters$parameter))]
if(!(metric %in% perfNames)){
oldMetric <- metric
metric <- perfNames[1]
warning(paste("The metric \"",
oldMetric,
"\" was not in ",
"the result set. ",
metric,
" will be used instead.",
sep = ""))
}
} else {
if(trControl$method == "LOOCV"){
tmp <- looTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
} else {
if(!grepl("adapt", trControl$method)){
tmp <- nominalTrainWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess, ctrl = trControl, lev = classLevels, ...)
performance <- tmp$performance
resampleResults <- tmp$resample
} else {
tmp <- adaptiveWorkflow(x = x, y = y, wts = weights,
info = trainInfo, method = models,
ppOpts = preProcess,
ctrl = trControl,
lev = classLevels,
metric = metric,
maximize = maximize,
...)
performance <- tmp$performance
resampleResults <- tmp$resample
}
}
}
## TODO we used to give resampled results for LOO
if(!(trControl$method %in% c("LOOCV", "oob"))) {
if(modelType == "Classification" && length(grep("^\\cell", colnames(resampleResults))) > 0) {
resampledCM <- resampleResults[, !(names(resampleResults) %in% perfNames)]
resampleResults <- resampleResults[, -grep("^\\cell", colnames(resampleResults))]
#colnames(resampledCM) <- gsub("^\\.", "", colnames(resampledCM))
} else resampledCM <- NULL
} else resampledCM <- NULL
if(trControl$verboseIter) {
cat("Aggregating results\n")
flush.console()
}
perfCols <- names(performance)
perfCols <- perfCols[!(perfCols %in% paramNames)]
if(all(is.na(performance[, metric]))) {
cat(paste("Something is wrong; all the", metric, "metric values are missing:\n"))
print(summary(performance[, perfCols[!grepl("SD$", perfCols)], drop = FALSE]))
stop("Stopping")
}
## Sort the tuning parameters from least complex to most complex
if(!is.null(models$sort)) performance <- models$sort(performance)
if(any(is.na(performance[, metric])))
warning("missing values found in aggregated results")
if(trControl$verboseIter && nrow(performance) > 1) {
cat("Selecting tuning parameters\n")
flush.console()
}
## select the optimal set
selectClass <- class(trControl$selectionFunction)[1]
## Select the "optimal" tuning parameter.
if(grepl("adapt", trControl$method)) {
perf_check <- subset(performance, .B == max(performance$.B))
} else perf_check <- performance
## Make adaptive only look at parameters with B = max(B)
if(selectClass == "function") {
bestIter <- trControl$selectionFunction(x = perf_check,
metric = metric,
maximize = maximize)
}
else {
if(trControl$selectionFunction == "oneSE") {
bestIter <- oneSE(perf_check,
metric,
length(trControl$index),
maximize)
} else {
bestIter <- do.call(trControl$selectionFunction,
list(x = perf_check,
metric = metric,
maximize = maximize))
}
}
if(is.na(bestIter) || length(bestIter) != 1) stop("final tuning parameters could not be determined")
if(grepl("adapt", trControl$method)) {
best_perf <- perf_check[bestIter,as.character(models$parameters$parameter),drop = FALSE]
performance$order <- 1:nrow(performance)
bestIter <- merge(performance, best_perf)$order
performance$order <- NULL
}
## Based on the optimality criterion, select the tuning parameter(s)
bestTune <- performance[bestIter, paramNames, drop = FALSE]
} else {
bestTune <- tuneGrid
performance <- evalSummaryFunction(y, wts = weights, ctrl = trControl,
lev = classLevels, metric = metric,
method = method)
perfNames <- names(performance)
performance <- as.data.frame(t(performance))
performance <- cbind(performance, tuneGrid)
performance <- performance[-1,,drop = FALSE]
tmp <- resampledCM <- NULL
}
## Save some or all of the resampling summary metrics
if(!(trControl$method %in% c("LOOCV", "oob", "none"))) {
byResample <- switch(trControl$returnResamp,
none = NULL,
all = {
out <- resampleResults
colnames(out) <- gsub("^\\.", "", colnames(out))
out
},
final = {
out <- merge(bestTune, resampleResults)
out <- out[,!(names(out) %in% names(tuneGrid)), drop = FALSE]
out
})
} else {
byResample <- NULL
}
# names(bestTune) <- paste(".", names(bestTune), sep = "")
## Reorder rows of performance
orderList <- list()
for(i in seq(along = paramNames)) orderList[[i]] <- performance[,paramNames[i]]
names(orderList) <- paramNames
performance <- performance[do.call("order", orderList),]
if(trControl$verboseIter) {
bestText <- paste(paste(names(bestTune), "=",
format(bestTune, digits = 3)),
collapse = ", ")
if(nrow(performance) == 1) bestText <- "final model"
cat("Fitting", bestText, "on full training set\n")
flush.console()
}
## Make the final model based on the tuning results
if(!(length(trControl$seeds) == 1 && is.na(trControl$seeds))) set.seed(trControl$seeds[[length(trControl$seeds)]][1])
finalTime <- system.time(
finalModel <- createModel(x = x, y = y, wts = weights,
method = models,
tuneValue = bestTune,
obsLevels = classLevels,
pp = ppOpt,
last = TRUE,
classProbs = trControl$classProbs,
sampling = trControl$sampling,
...))
if(trControl$trim && !is.null(models$trim)) {
if(trControl$verboseIter) old_size <- object.size(finalModel$fit)
finalModel$fit <- models$trim(finalModel$fit)
if(trControl$verboseIter) {
new_size <- object.size(finalModel$fit)
reduction <- format(old_size - new_size, units = "Mb")
if(reduction == "0 Mb") reduction <- "< 0 Mb"
p_reduction <- (unclass(old_size) - unclass(new_size))/unclass(old_size)*100
p_reduction <- if(p_reduction < 1) "< 1%" else paste0(round(p_reduction, 0), "%")
cat("Final model footprint reduced by", reduction, "or", p_reduction, "\n")
}
}
## get pp info
pp <- finalModel$preProc
finalModel <- finalModel$fit
## Remove this and check for other places it is reference
## replaced by tuneValue
if(method == "pls") finalModel$bestIter <- bestTune
## To use predict.train and automatically use the optimal lambda,
## we need to save it
if(method == "glmnet") finalModel$lambdaOpt <- bestTune$lambda
if(trControl$returnData) {
outData <- if(!is.data.frame(x)) try(as.data.frame(x), silent = TRUE) else x
if(class(outData)[1] == "try-error") {
warning("The training data could not be converted to a data frame for saving")
outData <- NULL
} else outData$.outcome <- y
} else outData <- NULL
## In the case of pam, the data will need to be saved differently
if(trControl$returnData & method == "pam") {
finalModel$xData <- x
finalModel$yData <- y
}
endTime <- proc.time()
times <- list(everything = endTime - startTime,
final = finalTime)
out <- structure(list(method = method,
modelInfo = models,
modelType = modelType,
results = performance,
pred = tmp$predictions,
bestTune = bestTune,
call = funcCall,
dots = list(...),
metric = metric,
control = trControl,
finalModel = finalModel,
preProcess = pp,
trainingData = outData,
resample = byResample,
resampledCM = resampledCM,
perfNames = perfNames,
maximize = maximize,
yLimits = trControl$yLimits,
times = times),
class = "train")
trControl$yLimits <- NULL
if(trControl$timingSamps > 0) {
pData <- lapply(x, function(x, n) sample(x, n, replace = TRUE), n = trControl$timingSamps)
pData <- as.data.frame(pData)
out$times$prediction <- system.time(predict(out, pData))
} else out$times$prediction <- rep(NA, 3)
out
}
train.formula <- function (form, data, ..., weights, subset, na.action = na.fail, contrasts = NULL) {
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data))) m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
if(nrow(m) < 1) stop("Every row has at least one missing value were found")
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts, na.action = na.action)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0) x <- x[, -xint, drop = FALSE]
y <- model.response(m)
w <- as.vector(model.weights(m))
res <- train(x, y, weights = w, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$call <- match.call()
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
if(!is.null(res$trainingData)) {
res$trainingData <- data
isY <- names(res$trainingData) %in% as.character(form[[2]])
if(any(isY)) colnames(res$trainingData)[isY] <- ".outcome"
}
class(res) <- c("train", "train.formula")
res
}
summary.train <- function(object, ...) summary(object$finalModel, ...)
residuals.train <- function(object, ...) {
if(object$modelType != "Regression") stop("train() only produces residuals on numeric outcomes")
resid <- residuals(object$finalModel, ...)
if(is.null(resid)) {
if(!is.null(object$trainingData)) {
resid <- object$trainingData$.outcome - predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce residuals")
}
resid
}
fitted.train <- function(object, ...) {
prd <- fitted(object$finalModel)
if(is.null(prd)) {
if(!is.null(object$trainingData)) {
prd <- predict(object, object$trainingData[, names(object$trainingData) != ".outcome",drop = FALSE])
} else stop("The training data must be saved to produce fitted values")
}
prd
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_levels.R
\name{join}
\alias{join}
\title{Helper function handling specification of which variables to join a
cross-classified data on, and what kind of correlation structure needed.
Correlation structures can only be provided if the underlying call is
a `link_levels()` call.}
\usage{
join(..., rho = 0, sigma = NULL)
}
\arguments{
\item{...}{A series of two or more variable names, unquoted, to join on in
order to create cross-classified data.}
\item{rho}{A fixed (Spearman's rank) correlation coefficient between the
variables being joined on: note that if it is not possible to make a
correlation matrix from this coefficient (e.g. if you are joining on three
or more variables and rho is negative) then the \code{cross_levels()} call
will fail. Do not provide \code{rho} if making panel data.}
\item{sigma}{A matrix with dimensions equal to the number of variables you
are joining on, specifying the correlation for the resulting joined data.
Only one of rho and sigma should be provided. Do not provide \code{sigma} if
making panel data.}
}
\description{
Helper function handling specification of which variables to join a
cross-classified data on, and what kind of correlation structure needed.
Correlation structures can only be provided if the underlying call is
a `link_levels()` call.
}
\examples{
panels <- fabricate(
countries = add_level(N = 150, country_fe = runif(N, 1, 10)),
years = add_level(N = 25, year_shock = runif(N, 1, 10), nest = FALSE),
obs = cross_levels(
by = join(countries, years),
new_variable = country_fe + year_shock + rnorm(N, 0, 2)
)
)
schools_data <- fabricate(
primary_schools = add_level(N = 20, ps_quality = runif(N, 1, 10)),
secondary_schools = add_level(
N = 15,
ss_quality = runif(N, 1, 10),
nest = FALSE),
students = link_levels(
N = 1500,
by = join(primary_schools, secondary_schools),
SAT_score = 800 + 13 * ps_quality + 26 * ss_quality + rnorm(N, 0, 50)
)
)
}
|
/man/join.Rd
|
no_license
|
amirmasoudabdol/fabricatr
|
R
| false | true | 2,044 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cross_levels.R
\name{join}
\alias{join}
\title{Helper function handling specification of which variables to join a
cross-classified data on, and what kind of correlation structure needed.
Correlation structures can only be provided if the underlying call is
a `link_levels()` call.}
\usage{
join(..., rho = 0, sigma = NULL)
}
\arguments{
\item{...}{A series of two or more variable names, unquoted, to join on in
order to create cross-classified data.}
\item{rho}{A fixed (Spearman's rank) correlation coefficient between the
variables being joined on: note that if it is not possible to make a
correlation matrix from this coefficient (e.g. if you are joining on three
or more variables and rho is negative) then the \code{cross_levels()} call
will fail. Do not provide \code{rho} if making panel data.}
\item{sigma}{A matrix with dimensions equal to the number of variables you
are joining on, specifying the correlation for the resulting joined data.
Only one of rho and sigma should be provided. Do not provide \code{sigma} if
making panel data.}
}
\description{
Helper function handling specification of which variables to join a
cross-classified data on, and what kind of correlation structure needed.
Correlation structures can only be provided if the underlying call is
a `link_levels()` call.
}
\examples{
panels <- fabricate(
countries = add_level(N = 150, country_fe = runif(N, 1, 10)),
years = add_level(N = 25, year_shock = runif(N, 1, 10), nest = FALSE),
obs = cross_levels(
by = join(countries, years),
new_variable = country_fe + year_shock + rnorm(N, 0, 2)
)
)
schools_data <- fabricate(
primary_schools = add_level(N = 20, ps_quality = runif(N, 1, 10)),
secondary_schools = add_level(
N = 15,
ss_quality = runif(N, 1, 10),
nest = FALSE),
students = link_levels(
N = 1500,
by = join(primary_schools, secondary_schools),
SAT_score = 800 + 13 * ps_quality + 26 * ss_quality + rnorm(N, 0, 50)
)
)
}
|
## These functions, together, enable a matrix and it's inverse to be stored together
## The matrix and it's inverse are stored inside the makeCacheMatrix environment,
## with the makeCachMatrix function only creating a list of four functions in the
## global environment
## To use these functions, first create/store the matrix inside makeCacheMatrix
## e.g. x <- makeCacheMatrix(MATRIX)
## The invert/solve the matrix with cacheSolve()
## e.g. xinv <- cacheSolve(x)
## If the matrix had been previously inverted, cacheSolve() will retrieve the solution
## - it will only revert to actually solving if no solution had been previously found
## makeCacheMatrix creates the special "matrix", in reality a list of four functions
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setinverse <- function(Inverse) I <<- Inverse
getinverse <- function() I
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve finds the inverse of a matrix created/stored with makeCacheMatrix
## If previously solved, cacheSolve will recover and return the inverse from cache
## Otherwise, it gets the original matrix, inverts it,
## stores the inversion in the makeCacheMatrix environment, and returns the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getinverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setinverse(I)
I
}
|
/cachematrix.R
|
no_license
|
camlucas/ProgrammingAssignment2
|
R
| false | false | 1,609 |
r
|
## These functions, together, enable a matrix and it's inverse to be stored together
## The matrix and it's inverse are stored inside the makeCacheMatrix environment,
## with the makeCachMatrix function only creating a list of four functions in the
## global environment
## To use these functions, first create/store the matrix inside makeCacheMatrix
## e.g. x <- makeCacheMatrix(MATRIX)
## The invert/solve the matrix with cacheSolve()
## e.g. xinv <- cacheSolve(x)
## If the matrix had been previously inverted, cacheSolve() will retrieve the solution
## - it will only revert to actually solving if no solution had been previously found
## makeCacheMatrix creates the special "matrix", in reality a list of four functions
makeCacheMatrix <- function(x = matrix()) {
I <- NULL
set <- function(y) {
x <<- y
I <<- NULL
}
get <- function() x
setinverse <- function(Inverse) I <<- Inverse
getinverse <- function() I
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve finds the inverse of a matrix created/stored with makeCacheMatrix
## If previously solved, cacheSolve will recover and return the inverse from cache
## Otherwise, it gets the original matrix, inverts it,
## stores the inversion in the makeCacheMatrix environment, and returns the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
I <- x$getinverse()
if(!is.null(I)) {
message("getting cached data")
return(I)
}
data <- x$get()
I <- solve(data, ...)
x$setinverse(I)
I
}
|
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
# EGOLM
# 2021-04-14
# Gini over time
# Fabian Braesemann
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%
# Loading packages
#%#%#%#%#%#%#%#%#%#%
library(tidyverse) # numerous data wrangling packages
library(data.table) # quick data loadinglibrary(lubridate)
library(lubridate) # Working with dates
library(RColorBrewer) # Fancy colours
library(ggrepel) # Fancy labels in plot
library(scales) # Log-transformed axis labels
library(ggpubr) # Arranging several ggplots
library(ggrepel) # Fancy geom-labels
library(stargazer) # LaTeX regression tables
'%!in%' <- function(x,y)!('%in%'(x,y)) # opposite of %in% command
options(stringsAsFactors = FALSE)
library("reldist") # Package to compute GINI
library(countrycode) # Package to iso-code country names
#%#%#%#%#%#%#%#%#%
# Prepare WB data
#%#%#%#%#%#%#%#%#%
olm_country <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/olm_data/OLM_Country_Data.csv")
wb_country <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/county_wb_data.csv")
wb_country <- wb_country %>% dplyr::select(-long, -country)
wb_country$year <- as.Date(as.character(wb_country$year), format = "%Y")
wb_country$year <- floor_date(wb_country$year, unit = "year")
wb_country <- wb_country %>% filter(short == "WB.POP.TOTL") %>% rename(WB.POP.TOTL = value) %>% select(-short)
olm_country$year <- as.Date(as.character(olm_country$year), format = "%d/%m/%Y")
olm_country <- olm_country %>% select(-country)
olm_country <- merge(olm_country, wb_country, by.x = c("ISO_Code", "year"), by.y = c("iso3", "year"))
olm_country$country <- countrycode(olm_country$ISO_Code, origin = "iso3c", destination = "country.name")
olm_country$region <- countrycode(olm_country$ISO_Code, origin = "iso3c", destination = "region")
olm_country <- olm_country %>% mutate(OLM.PRJ.CNT.PC = project_count / WB.POP.TOTL * 1000000)
olm_country_allYears <- olm_country
#%#%#%#%#%#%#%#%#%
# Prepare OECD data
#%#%#%#%#%#%#%#%#%
OECD_stats <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/region_oecd_data.csv")
OECD_stats$iso2 <- sapply(OECD_stats$region_id, function(x) substr(x,1,2)[[1]][1])
OECD_stats$iso2 <- ifelse(OECD_stats$iso2 == "EL", "GR",
ifelse(OECD_stats$iso2 == "UK", "GB", OECD_stats$iso2))
OECD_stats$iso3 <- countrycode(OECD_stats$iso2, origin = "iso2c", destination = "iso3c")
OECD_stats <- OECD_stats %>% filter(iso3 %!in% c("TUN", "PER"))
OECD_stats$year <- as.Date(as.character(OECD_stats$year), format = "%Y")
OECD_stats$year <- floor_date(OECD_stats$year, unit = "year")
OECD_stats %>% filter(long == "Region holds country capital") %>% summarise(sum(OLM.PRJ.CNT))
OECD_capital <- OECD_stats %>% filter(long == "Region holds country capital", year == as.Date("2020-01-01")) %>% dplyr::select(region_id, capital = value)
OECD_stats2 <- merge(OECD_stats, OECD_capital, by = "region_id")
OECD_stats_allYears <- OECD_stats2 %>% filter(long == "Population count") %>% dplyr::select(year, iso3, region_id, region, OLM.PRJ.CNT, OLM.WG.MN, OCD.PPL.CNT = value, capital)
OECD_stats_allYears <- OECD_stats_allYears %>% dplyr::select(region_id, year, OLM.PRJ.CNT) #%>% spread(gdlCode, year, project_count, fil = 0)
OECD_stats_allYears <- OECD_stats_allYears %>% group_by(region_id, year) %>% filter(row_number(OLM.PRJ.CNT) == 1)
OECD_stats_allYears2 <- pivot_wider(OECD_stats_allYears, names_from = year, values_from = OLM.PRJ.CNT, values_fill = 0)
OECD_stats_allYears2 <- OECD_stats_allYears2 %>% gather(year, OLM.PRJ.CNT, - region_id)
OECD_stats_allYears2$iso2 <- sapply(OECD_stats_allYears2$region_id, function(x) substr(x,1,2)[[1]][1])
OECD_stats_allYears2$iso2 <- ifelse(OECD_stats_allYears2$iso2 == "EL", "GR",
ifelse(OECD_stats_allYears2$iso2 == "UK", "GB", OECD_stats_allYears2$iso2))
OECD_stats_allYears2$iso3 <- countrycode(OECD_stats_allYears2$iso2, origin = "iso2c", destination = "iso3c")
OECD_stats_allYears2$iso3 <- ifelse(OECD_stats_allYears2$iso3 == "MNE", "MEX", OECD_stats_allYears2$iso3)
OECD_stats_allYears3 <- merge(OECD_stats_allYears2, OECD_capital, by = "region_id")
OECD_stats_allYears3 <- OECD_stats_allYears3 %>% group_by(region_id, year) %>% filter(row_number(OLM.PRJ.CNT) == 1)
#%#%#%#%#%#%#%#%#%
# Prepare GDL data
#%#%#%#%#%#%#%#%#%
olm_GDL <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/olm_data/OLM_GDL_Data.csv")
olm_GDL$year <- as.Date(olm_GDL$year, format = "%d/%m/%Y")
olm_GDL <- olm_GDL %>% filter(Country %!in% c("Brazil", "Costa Rica", "China", "India", "Indonesia", "Mexico", "South Africa", "Colombia"))
olm_GDL <- olm_GDL %>% mutate(MEX = str_detect(gdlCode, "MEX")) %>% filter(MEX == 0) %>% dplyr::select(-MEX)
olm_GDL <- olm_GDL %>% dplyr::select(year, region_id = gdlCode, mean_wage, region = Region, project_count)
GDL_stats <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/region_gdl_data.csv")
GDL_stats$year <- as.Date(as.character(GDL_stats$year), format = "%Y")
GDL_stats$year <- floor_date(GDL_stats$year, unit = "year")
GDL_stats <- GDL_stats %>% filter(short %in% c("GDL.PPL.CNT", "GDL.CPT.YES")) %>%
dplyr::select(region_id, iso3, year, short, value)
GDL_stats <- GDL_stats %>% filter(iso3 %!in% c("BRA", "CRI", "CHN", "IND", "IDN", "MEX", "ZAF", "COL"))
GDL_stats <- GDL_stats %>% spread(short, value)
GDL_stats <- merge(olm_GDL, GDL_stats, by = c("region_id", "year"))
GDL_stats$GN <- "Global South"
GDL_stats <- GDL_stats %>% dplyr::select(GN, iso3, year, region_id, region, project_count, mean_wage, population = GDL.PPL.CNT, capital = GDL.CPT.YES)
GDL_capital <- GDL_stats %>% dplyr::select(year, region_id, capital)
olm_GDL_allYears <- GDL_stats %>% dplyr::select(region_id, year, project_count)
olm_GDL_allYears <- olm_GDL_allYears %>% group_by(region_id, year) %>% filter(row_number(project_count) == 1)
olm_GDL_allYears2 <- pivot_wider(olm_GDL_allYears, names_from = year, values_from = project_count, values_fill = 0)
olm_GDL_allYears2 <- olm_GDL_allYears2 %>% gather(year, project_count, - region_id)
olm_GDL_allYears2$iso3 <- sapply(olm_GDL_allYears2$region_id, function(x) substr(x,1,3)[[1]][1])
olm_GDL_allYears2 <- olm_GDL_allYears2 %>% mutate(year = as.Date(year, format = "%Y-%m-%d"))
olm_GDL_allYears3 <- merge(olm_GDL_allYears2, GDL_capital, by = c("region_id", "year"), all.x = T)
olm_GDL_allYears3 <- olm_GDL_allYears3 %>% mutate(capital = ifelse(is.na(capital), 0, capital))
#%#%#%#%#%#%#%#%#%
# Between Gini
#%#%#%#%#%#%#%#%#%
# Country
ginis_wb_between <- olm_country_allYears %>% group_by(year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "Global (countries)", key = "Gini coefficient (between countries)")
# OECD
ginis_oecd_between <- OECD_stats_allYears3 %>% group_by(year) %>% summarise(value = gini(OLM.PRJ.CNT)) %>% mutate(geography = "OECD+ (regions)", key = "Gini coefficient (between countries)")
# Check whether all OECD countries are in the data set
gini_oecd_countries <- (OECD_stats_allYears3 %>% group_by(iso3) %>% summarise(count = sum(OLM.PRJ.CNT)))$iso3
# Remove OECD+ countries from GDL list
olm_GDL_allYears3 <- olm_GDL_allYears3 %>% filter(iso3 %!in% c("BRA", "CHN", "COL", "CRI", "IDN", "IND", "MNE", "ZAF"))
ginis_gdl_between <- olm_GDL_allYears3 %>% group_by(year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "GDL", key = "Gini coefficient (between countries)")
# Check whether all GDL countries are in the data set
gini_gdl_countries <- (olm_GDL_allYears3 %>% group_by(iso3) %>% summarise(count = n()))$iso3
gini_df_between <- rbind(ginis_wb_between, ginis_oecd_between, ginis_gdl_between)
gini_df_between <- gini_df_between %>% mutate(geography = ifelse(geography == "GDL", "Global South (regions)",geography),
geography = factor(geography, levels = c("Global (countries)", "OECD+ (regions)", "Global South (regions)")))
# Plot (A) Regression
regression_df <- data.frame(key = "Gini coefficient (between countries)", value = c(0.92,0.76,0.84),
geography = c("Global South (regions)", "OECD+ (regions)", "Global (countries)"), year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_gdl_between))$coefficients[,1][2],3), "**", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_oecd_between))$coefficients[,1][2],3), "**", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_wb_between))$coefficients[,1][2],3), "***", sep = "")))
participation_oecd <- OECD_stats_allYears2 %>% group_by(year) %>% filter(OLM.PRJ.CNT > 0) %>% summarise(value = n()) %>% mutate(geography = "OECD+ (regions)", key = "Participating countries / regions")
participation_gdl <- olm_GDL_allYears2 %>% group_by(year) %>% filter(project_count > 0) %>% summarise(value = n()) %>% mutate(geography = "Global South (regions)", key = "Participating countries / regions")
participation_wb <- olm_country_allYears %>% group_by(year) %>% filter(project_count > 0) %>% summarise(value = n()) %>% mutate(geography = "Global (countries)", key = "Participating countries / regions")
gini_df_between <- rbind(gini_df_between, participation_oecd, participation_gdl, participation_wb)
# Plot (B) Regression
regression_df2 <- data.frame(key = "Participating countries / regions", value = c(430,570,220),
geography = c("Global South (regions)", "OECD+ (regions)", "Global (countries)"), year = c(as.Date("2019-01-01"), as.Date("2014-06-01"), as.Date("2019-01-01")),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = participation_gdl))$coefficients[,1][2],0), ".", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = participation_oecd))$coefficients[,1][2],0), "", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = participation_wb))$coefficients[,1][2],0), ".", sep = "")))
gini_df_between$key <- factor(gini_df_between$key, levels = c("Participating countries / regions",
"Gini coefficient (between countries)"))
# Plot A
plot1a <- gini_df_between %>% filter(key == "Participating countries / regions") %>%
ggplot(aes(x = year, y = value, colour = geography)) +
facet_wrap(~key, scales = "free_y") +
geom_line(lty = 3, key_glyph = "rect") +
geom_point(shape = 21) +
geom_text(data = regression_df2, aes(label = coefficient)) +
scale_colour_manual(values = c("#ac7921",brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
geom_smooth(method = "lm", se = F, lwd = 0.5) +
theme_bw() +
labs(colour = "", x = "", y = "") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), legend.position = "bottom",
text = element_text(size = 14))
# Plot B
plot1b <- gini_df_between %>% filter(key == "Gini coefficient (between countries)") %>%
ggplot(aes(x = year, y = value, colour = geography)) +
facet_wrap(~key, scales = "free_y") +
geom_line(lty = 3, key_glyph = "rect") +
geom_point(shape = 21) +
geom_text(data = regression_df, aes(label = coefficient)) +
scale_colour_manual(values = c("#ac7921",brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
geom_smooth(method = "lm", se = F, lwd = 0.5) +
theme_bw() +
labs(colour = "", x = "", y = "") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), legend.position = "bottom",
text = element_text(size = 14))
#%#%#%#%#%#%#%#%#%
# Within Gini
#%#%#%#%#%#%#%#%#%
# OECD
ginis_oecd_within <- OECD_stats_allYears3 %>% filter(capital == 0) %>% group_by(iso3, year) %>% summarise(value = gini(OLM.PRJ.CNT)) %>% mutate(geography = "OECD+ (regions)", key = "within")
ginis_oecd_within$year <- as.Date(ginis_oecd_within$year, format = "%Y-%m-%d")
ginis_oecd_within <- data.frame(ginis_oecd_within)
# GDL
# We have to remove those country-years without projects as they do not allow us to compute the GINI (without variation, no GINI)
deleter <- (olm_GDL_allYears3 %>% group_by(iso3, year) %>% summarise(count = n(), project_count = sum(project_count)) %>% filter(count == 1 | project_count == 0) %>% mutate(country_year = paste(iso3, year, sep ="_")))$country_year
ginis_gdl_within <- olm_GDL_allYears3 %>% mutate(country_year = paste(iso3, year, sep ="_")) %>%
filter(country_year %!in% deleter,
capital == 0) %>%
group_by(iso3, year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "GDL", key = "within")
ginis_gdl_within <- data.frame(ginis_gdl_within)
gini_df_within <- rbind(ginis_oecd_within, ginis_gdl_within)
gini_df_within$year <- as.Date(gini_df_within$year, format = "%Y-%m-%d")
gini_df_within <- gini_df_within %>% mutate(geography = ifelse(geography == "GDL", "Within-country Gini coefficient Global South (non-capital regions)", "Within-country Gini coefficient OECD+ (non-capital regions)"),
geography = factor(geography, levels = c("Within-country Gini coefficient OECD+ (non-capital regions)", "Within-country Gini coefficient Global South (non-capital regions)")))
ginis_oecd_within_means <- ginis_oecd_within %>% group_by(year) %>% summarise(value = mean(value, na.rm = T))
ginis_gdl_within_means <- ginis_gdl_within %>% group_by(year) %>% summarise(value = mean(value, na.rm = T))
# Within regression
regression_df3 <- data.frame(value = c(0.33,0.93),
geography = c("Within-country Gini coefficient Global South (non-capital regions)", "Within-country Gini coefficient OECD+ (non-capital regions)"), year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_gdl_within_means))$coefficients[,1][2],3), "", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_oecd_within_means))$coefficients[,1][2],3), "", sep = "")))
# Within Gini Plot
plot2 <- gini_df_within %>%
ggplot(aes(x = year, y = value, colour = geography, group = year)) +
geom_boxplot(outlier.alpha = 0, coef = 1) +
facet_wrap(~geography) +
geom_smooth(aes(group = geography), method = "lm", se = F) +
geom_text(data = regression_df3, aes(label = coefficient)) +
theme_bw() +
scale_colour_manual(values = c(brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
labs(colour = "", x = "", y = "")+#, caption = "(***p < 0.001, **p < 0.01, *p < 0.05, .p < 0.1)") +
theme(panel.grid.minor = element_blank(), plot.caption = element_text(size = 10),
panel.grid.major.x = element_blank(), legend.position = "none",
text = element_text(size = 14))
#%#%#%#%#%#%#%#%#%
# City share plot
#%#%#%#%#%#%#%#%#%
# Calculate share of capital in GDL OECD+
city_share_GDL <- olm_GDL_allYears3 %>% group_by(capital = factor(capital), year) %>% summarise(total_count = sum(project_count)) %>% group_by(year) %>%
mutate(OLM.PRJ.CNT = total_count / sum(total_count)) %>% mutate(key = "Share of projects in capital regions (Global South)") %>% filter(capital == 1)
city_share_OECD <- OECD_stats_allYears3 %>% group_by(capital = factor(capital), year) %>% summarise(total_count = sum(OLM.PRJ.CNT)) %>% group_by(year) %>%
mutate(OLM.PRJ.CNT = total_count / sum(total_count))%>% mutate(key = "Share of projects in capital regions (OECD+)") %>% filter(capital == 1)
city_share_OECD$year <- as.Date(city_share_OECD$year, format = "%Y-%m-%d")
city_share_total <- rbind(city_share_GDL, city_share_OECD)
# Regerssion for plot (D)
regression_df4 <- data.frame(key = c("Share of projects in capital regions (Global South)", "Share of projects in capital regions (OECD+)"),
OLM.PRJ.CNT = c(0.42,0.2),
year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(OLM.PRJ.CNT ~ time(OLM.PRJ.CNT), data = city_share_GDL))$coefficients[,1][2],3), "", sep = ""),
paste("b = ", round(summary(lm(OLM.PRJ.CNT ~ time(OLM.PRJ.CNT), data = city_share_OECD))$coefficients[,1][2],3), "***", sep = "")))
plot3 <- city_share_total %>%
ggplot(aes(x = as.Date(year, format = "%Y-%m-%d"), y = OLM.PRJ.CNT, colour = factor(key))) +
geom_point(shape = 21) +
geom_line(lty = 3) +
facet_wrap(~factor(key), ncol = 2, scales = "free") + geom_smooth(method = "lm", se = F) +
geom_text(data = regression_df4, aes(label = coefficient)) +
theme_bw() +
scale_colour_manual(values = c(brewer.pal(11,"RdBu")[2],
brewer.pal(11,"RdBu")[10])) +
labs(colour = "", x = "", y = "", caption = "(***p < 0.001, **p < 0.01, *p < 0.05, .p < 0.1)") +
theme(panel.grid.minor = element_blank(), plot.caption = element_text(size = 10),
panel.grid.major.x = element_blank(), legend.position = "none",
text = element_text(size = 14),
strip.text = element_text(size = 12)
)
ggarrange(
ggarrange(plot1a, plot1b, ncol = 2, labels = c("A", "B"), common.legend = T, legend = "bottom"),
plot2, plot3, # First row with line plot
# Second row with box and dot plots
nrow = 3,
labels = c("","C", "D")
)
#%#%#%#%#%#%#%#%#%#%#%
|
/code/SFig_S18_Gini.R
|
no_license
|
Braesemann/remotework
|
R
| false | false | 18,031 |
r
|
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
# EGOLM
# 2021-04-14
# Gini over time
# Fabian Braesemann
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%#%
#%#%#%#%#%#%#%#%#%#%
# Loading packages
#%#%#%#%#%#%#%#%#%#%
library(tidyverse) # numerous data wrangling packages
library(data.table) # quick data loadinglibrary(lubridate)
library(lubridate) # Working with dates
library(RColorBrewer) # Fancy colours
library(ggrepel) # Fancy labels in plot
library(scales) # Log-transformed axis labels
library(ggpubr) # Arranging several ggplots
library(ggrepel) # Fancy geom-labels
library(stargazer) # LaTeX regression tables
'%!in%' <- function(x,y)!('%in%'(x,y)) # opposite of %in% command
options(stringsAsFactors = FALSE)
library("reldist") # Package to compute GINI
library(countrycode) # Package to iso-code country names
#%#%#%#%#%#%#%#%#%
# Prepare WB data
#%#%#%#%#%#%#%#%#%
olm_country <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/olm_data/OLM_Country_Data.csv")
wb_country <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/county_wb_data.csv")
wb_country <- wb_country %>% dplyr::select(-long, -country)
wb_country$year <- as.Date(as.character(wb_country$year), format = "%Y")
wb_country$year <- floor_date(wb_country$year, unit = "year")
wb_country <- wb_country %>% filter(short == "WB.POP.TOTL") %>% rename(WB.POP.TOTL = value) %>% select(-short)
olm_country$year <- as.Date(as.character(olm_country$year), format = "%d/%m/%Y")
olm_country <- olm_country %>% select(-country)
olm_country <- merge(olm_country, wb_country, by.x = c("ISO_Code", "year"), by.y = c("iso3", "year"))
olm_country$country <- countrycode(olm_country$ISO_Code, origin = "iso3c", destination = "country.name")
olm_country$region <- countrycode(olm_country$ISO_Code, origin = "iso3c", destination = "region")
olm_country <- olm_country %>% mutate(OLM.PRJ.CNT.PC = project_count / WB.POP.TOTL * 1000000)
olm_country_allYears <- olm_country
#%#%#%#%#%#%#%#%#%
# Prepare OECD data
#%#%#%#%#%#%#%#%#%
OECD_stats <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/region_oecd_data.csv")
OECD_stats$iso2 <- sapply(OECD_stats$region_id, function(x) substr(x,1,2)[[1]][1])
OECD_stats$iso2 <- ifelse(OECD_stats$iso2 == "EL", "GR",
ifelse(OECD_stats$iso2 == "UK", "GB", OECD_stats$iso2))
OECD_stats$iso3 <- countrycode(OECD_stats$iso2, origin = "iso2c", destination = "iso3c")
OECD_stats <- OECD_stats %>% filter(iso3 %!in% c("TUN", "PER"))
OECD_stats$year <- as.Date(as.character(OECD_stats$year), format = "%Y")
OECD_stats$year <- floor_date(OECD_stats$year, unit = "year")
OECD_stats %>% filter(long == "Region holds country capital") %>% summarise(sum(OLM.PRJ.CNT))
OECD_capital <- OECD_stats %>% filter(long == "Region holds country capital", year == as.Date("2020-01-01")) %>% dplyr::select(region_id, capital = value)
OECD_stats2 <- merge(OECD_stats, OECD_capital, by = "region_id")
OECD_stats_allYears <- OECD_stats2 %>% filter(long == "Population count") %>% dplyr::select(year, iso3, region_id, region, OLM.PRJ.CNT, OLM.WG.MN, OCD.PPL.CNT = value, capital)
OECD_stats_allYears <- OECD_stats_allYears %>% dplyr::select(region_id, year, OLM.PRJ.CNT) #%>% spread(gdlCode, year, project_count, fil = 0)
OECD_stats_allYears <- OECD_stats_allYears %>% group_by(region_id, year) %>% filter(row_number(OLM.PRJ.CNT) == 1)
OECD_stats_allYears2 <- pivot_wider(OECD_stats_allYears, names_from = year, values_from = OLM.PRJ.CNT, values_fill = 0)
OECD_stats_allYears2 <- OECD_stats_allYears2 %>% gather(year, OLM.PRJ.CNT, - region_id)
OECD_stats_allYears2$iso2 <- sapply(OECD_stats_allYears2$region_id, function(x) substr(x,1,2)[[1]][1])
OECD_stats_allYears2$iso2 <- ifelse(OECD_stats_allYears2$iso2 == "EL", "GR",
ifelse(OECD_stats_allYears2$iso2 == "UK", "GB", OECD_stats_allYears2$iso2))
OECD_stats_allYears2$iso3 <- countrycode(OECD_stats_allYears2$iso2, origin = "iso2c", destination = "iso3c")
OECD_stats_allYears2$iso3 <- ifelse(OECD_stats_allYears2$iso3 == "MNE", "MEX", OECD_stats_allYears2$iso3)
OECD_stats_allYears3 <- merge(OECD_stats_allYears2, OECD_capital, by = "region_id")
OECD_stats_allYears3 <- OECD_stats_allYears3 %>% group_by(region_id, year) %>% filter(row_number(OLM.PRJ.CNT) == 1)
#%#%#%#%#%#%#%#%#%
# Prepare GDL data
#%#%#%#%#%#%#%#%#%
olm_GDL <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/olm_data/OLM_GDL_Data.csv")
olm_GDL$year <- as.Date(olm_GDL$year, format = "%d/%m/%Y")
olm_GDL <- olm_GDL %>% filter(Country %!in% c("Brazil", "Costa Rica", "China", "India", "Indonesia", "Mexico", "South Africa", "Colombia"))
olm_GDL <- olm_GDL %>% mutate(MEX = str_detect(gdlCode, "MEX")) %>% filter(MEX == 0) %>% dplyr::select(-MEX)
olm_GDL <- olm_GDL %>% dplyr::select(year, region_id = gdlCode, mean_wage, region = Region, project_count)
GDL_stats <- read.csv("https://raw.githubusercontent.com/Braesemann/EGOLM/main/data/official_statistics/region_gdl_data.csv")
GDL_stats$year <- as.Date(as.character(GDL_stats$year), format = "%Y")
GDL_stats$year <- floor_date(GDL_stats$year, unit = "year")
GDL_stats <- GDL_stats %>% filter(short %in% c("GDL.PPL.CNT", "GDL.CPT.YES")) %>%
dplyr::select(region_id, iso3, year, short, value)
GDL_stats <- GDL_stats %>% filter(iso3 %!in% c("BRA", "CRI", "CHN", "IND", "IDN", "MEX", "ZAF", "COL"))
GDL_stats <- GDL_stats %>% spread(short, value)
GDL_stats <- merge(olm_GDL, GDL_stats, by = c("region_id", "year"))
GDL_stats$GN <- "Global South"
GDL_stats <- GDL_stats %>% dplyr::select(GN, iso3, year, region_id, region, project_count, mean_wage, population = GDL.PPL.CNT, capital = GDL.CPT.YES)
GDL_capital <- GDL_stats %>% dplyr::select(year, region_id, capital)
olm_GDL_allYears <- GDL_stats %>% dplyr::select(region_id, year, project_count)
olm_GDL_allYears <- olm_GDL_allYears %>% group_by(region_id, year) %>% filter(row_number(project_count) == 1)
olm_GDL_allYears2 <- pivot_wider(olm_GDL_allYears, names_from = year, values_from = project_count, values_fill = 0)
olm_GDL_allYears2 <- olm_GDL_allYears2 %>% gather(year, project_count, - region_id)
olm_GDL_allYears2$iso3 <- sapply(olm_GDL_allYears2$region_id, function(x) substr(x,1,3)[[1]][1])
olm_GDL_allYears2 <- olm_GDL_allYears2 %>% mutate(year = as.Date(year, format = "%Y-%m-%d"))
olm_GDL_allYears3 <- merge(olm_GDL_allYears2, GDL_capital, by = c("region_id", "year"), all.x = T)
olm_GDL_allYears3 <- olm_GDL_allYears3 %>% mutate(capital = ifelse(is.na(capital), 0, capital))
#%#%#%#%#%#%#%#%#%
# Between Gini
#%#%#%#%#%#%#%#%#%
# Country
ginis_wb_between <- olm_country_allYears %>% group_by(year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "Global (countries)", key = "Gini coefficient (between countries)")
# OECD
ginis_oecd_between <- OECD_stats_allYears3 %>% group_by(year) %>% summarise(value = gini(OLM.PRJ.CNT)) %>% mutate(geography = "OECD+ (regions)", key = "Gini coefficient (between countries)")
# Check whether all OECD countries are in the data set
gini_oecd_countries <- (OECD_stats_allYears3 %>% group_by(iso3) %>% summarise(count = sum(OLM.PRJ.CNT)))$iso3
# Remove OECD+ countries from GDL list
olm_GDL_allYears3 <- olm_GDL_allYears3 %>% filter(iso3 %!in% c("BRA", "CHN", "COL", "CRI", "IDN", "IND", "MNE", "ZAF"))
ginis_gdl_between <- olm_GDL_allYears3 %>% group_by(year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "GDL", key = "Gini coefficient (between countries)")
# Check whether all GDL countries are in the data set
gini_gdl_countries <- (olm_GDL_allYears3 %>% group_by(iso3) %>% summarise(count = n()))$iso3
gini_df_between <- rbind(ginis_wb_between, ginis_oecd_between, ginis_gdl_between)
gini_df_between <- gini_df_between %>% mutate(geography = ifelse(geography == "GDL", "Global South (regions)",geography),
geography = factor(geography, levels = c("Global (countries)", "OECD+ (regions)", "Global South (regions)")))
# Plot (A) Regression
regression_df <- data.frame(key = "Gini coefficient (between countries)", value = c(0.92,0.76,0.84),
geography = c("Global South (regions)", "OECD+ (regions)", "Global (countries)"), year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_gdl_between))$coefficients[,1][2],3), "**", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_oecd_between))$coefficients[,1][2],3), "**", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_wb_between))$coefficients[,1][2],3), "***", sep = "")))
participation_oecd <- OECD_stats_allYears2 %>% group_by(year) %>% filter(OLM.PRJ.CNT > 0) %>% summarise(value = n()) %>% mutate(geography = "OECD+ (regions)", key = "Participating countries / regions")
participation_gdl <- olm_GDL_allYears2 %>% group_by(year) %>% filter(project_count > 0) %>% summarise(value = n()) %>% mutate(geography = "Global South (regions)", key = "Participating countries / regions")
participation_wb <- olm_country_allYears %>% group_by(year) %>% filter(project_count > 0) %>% summarise(value = n()) %>% mutate(geography = "Global (countries)", key = "Participating countries / regions")
gini_df_between <- rbind(gini_df_between, participation_oecd, participation_gdl, participation_wb)
# Plot (B) Regression
regression_df2 <- data.frame(key = "Participating countries / regions", value = c(430,570,220),
geography = c("Global South (regions)", "OECD+ (regions)", "Global (countries)"), year = c(as.Date("2019-01-01"), as.Date("2014-06-01"), as.Date("2019-01-01")),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = participation_gdl))$coefficients[,1][2],0), ".", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = participation_oecd))$coefficients[,1][2],0), "", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = participation_wb))$coefficients[,1][2],0), ".", sep = "")))
gini_df_between$key <- factor(gini_df_between$key, levels = c("Participating countries / regions",
"Gini coefficient (between countries)"))
# Plot A
plot1a <- gini_df_between %>% filter(key == "Participating countries / regions") %>%
ggplot(aes(x = year, y = value, colour = geography)) +
facet_wrap(~key, scales = "free_y") +
geom_line(lty = 3, key_glyph = "rect") +
geom_point(shape = 21) +
geom_text(data = regression_df2, aes(label = coefficient)) +
scale_colour_manual(values = c("#ac7921",brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
geom_smooth(method = "lm", se = F, lwd = 0.5) +
theme_bw() +
labs(colour = "", x = "", y = "") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), legend.position = "bottom",
text = element_text(size = 14))
# Plot B
plot1b <- gini_df_between %>% filter(key == "Gini coefficient (between countries)") %>%
ggplot(aes(x = year, y = value, colour = geography)) +
facet_wrap(~key, scales = "free_y") +
geom_line(lty = 3, key_glyph = "rect") +
geom_point(shape = 21) +
geom_text(data = regression_df, aes(label = coefficient)) +
scale_colour_manual(values = c("#ac7921",brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
geom_smooth(method = "lm", se = F, lwd = 0.5) +
theme_bw() +
labs(colour = "", x = "", y = "") +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(), legend.position = "bottom",
text = element_text(size = 14))
#%#%#%#%#%#%#%#%#%
# Within Gini
#%#%#%#%#%#%#%#%#%
# OECD
ginis_oecd_within <- OECD_stats_allYears3 %>% filter(capital == 0) %>% group_by(iso3, year) %>% summarise(value = gini(OLM.PRJ.CNT)) %>% mutate(geography = "OECD+ (regions)", key = "within")
ginis_oecd_within$year <- as.Date(ginis_oecd_within$year, format = "%Y-%m-%d")
ginis_oecd_within <- data.frame(ginis_oecd_within)
# GDL
# We have to remove those country-years without projects as they do not allow us to compute the GINI (without variation, no GINI)
deleter <- (olm_GDL_allYears3 %>% group_by(iso3, year) %>% summarise(count = n(), project_count = sum(project_count)) %>% filter(count == 1 | project_count == 0) %>% mutate(country_year = paste(iso3, year, sep ="_")))$country_year
ginis_gdl_within <- olm_GDL_allYears3 %>% mutate(country_year = paste(iso3, year, sep ="_")) %>%
filter(country_year %!in% deleter,
capital == 0) %>%
group_by(iso3, year) %>% summarise(value = gini(project_count)) %>% mutate(geography = "GDL", key = "within")
ginis_gdl_within <- data.frame(ginis_gdl_within)
gini_df_within <- rbind(ginis_oecd_within, ginis_gdl_within)
gini_df_within$year <- as.Date(gini_df_within$year, format = "%Y-%m-%d")
gini_df_within <- gini_df_within %>% mutate(geography = ifelse(geography == "GDL", "Within-country Gini coefficient Global South (non-capital regions)", "Within-country Gini coefficient OECD+ (non-capital regions)"),
geography = factor(geography, levels = c("Within-country Gini coefficient OECD+ (non-capital regions)", "Within-country Gini coefficient Global South (non-capital regions)")))
ginis_oecd_within_means <- ginis_oecd_within %>% group_by(year) %>% summarise(value = mean(value, na.rm = T))
ginis_gdl_within_means <- ginis_gdl_within %>% group_by(year) %>% summarise(value = mean(value, na.rm = T))
# Within regression
regression_df3 <- data.frame(value = c(0.33,0.93),
geography = c("Within-country Gini coefficient Global South (non-capital regions)", "Within-country Gini coefficient OECD+ (non-capital regions)"), year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_gdl_within_means))$coefficients[,1][2],3), "", sep = ""),
paste("b = ", round(summary(lm(value ~ time(value), data = ginis_oecd_within_means))$coefficients[,1][2],3), "", sep = "")))
# Within Gini Plot
plot2 <- gini_df_within %>%
ggplot(aes(x = year, y = value, colour = geography, group = year)) +
geom_boxplot(outlier.alpha = 0, coef = 1) +
facet_wrap(~geography) +
geom_smooth(aes(group = geography), method = "lm", se = F) +
geom_text(data = regression_df3, aes(label = coefficient)) +
theme_bw() +
scale_colour_manual(values = c(brewer.pal(11,"RdBu")[10],
brewer.pal(11,"RdBu")[2])) +
labs(colour = "", x = "", y = "")+#, caption = "(***p < 0.001, **p < 0.01, *p < 0.05, .p < 0.1)") +
theme(panel.grid.minor = element_blank(), plot.caption = element_text(size = 10),
panel.grid.major.x = element_blank(), legend.position = "none",
text = element_text(size = 14))
#%#%#%#%#%#%#%#%#%
# City share plot
#%#%#%#%#%#%#%#%#%
# Calculate share of capital in GDL OECD+
city_share_GDL <- olm_GDL_allYears3 %>% group_by(capital = factor(capital), year) %>% summarise(total_count = sum(project_count)) %>% group_by(year) %>%
mutate(OLM.PRJ.CNT = total_count / sum(total_count)) %>% mutate(key = "Share of projects in capital regions (Global South)") %>% filter(capital == 1)
city_share_OECD <- OECD_stats_allYears3 %>% group_by(capital = factor(capital), year) %>% summarise(total_count = sum(OLM.PRJ.CNT)) %>% group_by(year) %>%
mutate(OLM.PRJ.CNT = total_count / sum(total_count))%>% mutate(key = "Share of projects in capital regions (OECD+)") %>% filter(capital == 1)
city_share_OECD$year <- as.Date(city_share_OECD$year, format = "%Y-%m-%d")
city_share_total <- rbind(city_share_GDL, city_share_OECD)
# Regerssion for plot (D)
regression_df4 <- data.frame(key = c("Share of projects in capital regions (Global South)", "Share of projects in capital regions (OECD+)"),
OLM.PRJ.CNT = c(0.42,0.2),
year = as.Date("2019-01-01"),
coefficient = c(
paste("b = ", round(summary(lm(OLM.PRJ.CNT ~ time(OLM.PRJ.CNT), data = city_share_GDL))$coefficients[,1][2],3), "", sep = ""),
paste("b = ", round(summary(lm(OLM.PRJ.CNT ~ time(OLM.PRJ.CNT), data = city_share_OECD))$coefficients[,1][2],3), "***", sep = "")))
plot3 <- city_share_total %>%
ggplot(aes(x = as.Date(year, format = "%Y-%m-%d"), y = OLM.PRJ.CNT, colour = factor(key))) +
geom_point(shape = 21) +
geom_line(lty = 3) +
facet_wrap(~factor(key), ncol = 2, scales = "free") + geom_smooth(method = "lm", se = F) +
geom_text(data = regression_df4, aes(label = coefficient)) +
theme_bw() +
scale_colour_manual(values = c(brewer.pal(11,"RdBu")[2],
brewer.pal(11,"RdBu")[10])) +
labs(colour = "", x = "", y = "", caption = "(***p < 0.001, **p < 0.01, *p < 0.05, .p < 0.1)") +
theme(panel.grid.minor = element_blank(), plot.caption = element_text(size = 10),
panel.grid.major.x = element_blank(), legend.position = "none",
text = element_text(size = 14),
strip.text = element_text(size = 12)
)
ggarrange(
ggarrange(plot1a, plot1b, ncol = 2, labels = c("A", "B"), common.legend = T, legend = "bottom"),
plot2, plot3, # First row with line plot
# Second row with box and dot plots
nrow = 3,
labels = c("","C", "D")
)
#%#%#%#%#%#%#%#%#%#%#%
|
score <- as.numeric(readline('score: '))
if (score >= 90){
print('A')
}else if (score >= 80){
print('B')
}else{
print('C')
}
|
/교수님_답안/04#12-1.R
|
no_license
|
GoDK36/R
|
R
| false | false | 130 |
r
|
score <- as.numeric(readline('score: '))
if (score >= 90){
print('A')
}else if (score >= 80){
print('B')
}else{
print('C')
}
|
sumtwobiggest <- function(a, b, c){
if(a > c & b >c ){
(a * a) + (b * b)
}
if(a > b & c > b){
(a * a) + (c * c)
}
if(b > a & c > a){
(b * b) + (c * c)
}
}
sumtwobiggest(2, 4, 8)
|
/Chapter 1/1.3.R
|
no_license
|
slowpokemc/SICP
|
R
| false | false | 201 |
r
|
sumtwobiggest <- function(a, b, c){
if(a > c & b >c ){
(a * a) + (b * b)
}
if(a > b & c > b){
(a * a) + (c * c)
}
if(b > a & c > a){
(b * b) + (c * c)
}
}
sumtwobiggest(2, 4, 8)
|
# Removing objects while keeping functions
remove_objects_keep_functions <- function() {
ENV <- globalenv()
object_list <- ls(envir = ENV)
which_function_to_keep <- numeric()
for(i in 1:length(functions_to_keep)) {
which_function_to_keep <- c(which_function_to_keep, which(object_list == functions_to_keep[i]))
}
rm(list = object_list[-which_function_to_keep], envir = ENV)
}
|
/Script/remove_objects_keep_functions.r
|
permissive
|
david-beauchesne/Interaction_catalog
|
R
| false | false | 397 |
r
|
# Removing objects while keeping functions
remove_objects_keep_functions <- function() {
ENV <- globalenv()
object_list <- ls(envir = ENV)
which_function_to_keep <- numeric()
for(i in 1:length(functions_to_keep)) {
which_function_to_keep <- c(which_function_to_keep, which(object_list == functions_to_keep[i]))
}
rm(list = object_list[-which_function_to_keep], envir = ENV)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{solveRRBLUP}
\alias{solveRRBLUP}
\title{Solve RR-BLUP}
\usage{
solveRRBLUP(y, X, M)
}
\arguments{
\item{y}{a matrix with n rows and 1 column}
\item{X}{a matrix with n rows and x columns}
\item{M}{a matrix with n rows and m columns}
}
\description{
Solves a univariate mixed model of form \eqn{y=X\beta+Mu+e}
}
|
/man/solveRRBLUP.Rd
|
no_license
|
cran/AlphaSimR
|
R
| false | true | 428 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{solveRRBLUP}
\alias{solveRRBLUP}
\title{Solve RR-BLUP}
\usage{
solveRRBLUP(y, X, M)
}
\arguments{
\item{y}{a matrix with n rows and 1 column}
\item{X}{a matrix with n rows and x columns}
\item{M}{a matrix with n rows and m columns}
}
\description{
Solves a univariate mixed model of form \eqn{y=X\beta+Mu+e}
}
|
predictors_enfa <- as(worldclim.crop,'SpatialPixelsDataFrame')
str(predictors_enfa)
#predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SST), ]
#predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$CHL), ]
pr <- slot(count.points(Species, predictors_enfa), "data")[,1]
pc <- dudi.pca(slot(predictors_enfa, "data"), scannf=FALSE)
enfa <- enfa(pc,pr, scannf = FALSE)
pred <- predict.enfa(enfa, predictors_enfa)
####
# enfa_data <- data2enfa(predictors_enfa, Species$geometry)
# pc <- dudi.pca(enfa_data$tab, scannf = FALSE)
# enfa <- enfa(pc, enfa_data$pr, scannf = FALSE)
# ##
# mod1 <- enfa(x = worldclim.crop, s.dat = DataENFA_F, field = "Faidherbia.albida")
###############"
Base_Faidherbia_Z<-Base_Espece_df[,c("xcoord","ycoord","Faidherbia_albida")]
names(Base_Faidherbia_Z)<-c("lon","lat","Faidherbia.albida")
FaidherbiaPr<- Base_Faidherbia_Z %>%
filter(Faidherbia.albida ==1)
#Transform data as SpatialPointDataFrame
sp::coordinates(FaidherbiaPr) <-~lon+lat
sp::proj4string(FaidherbiaPr) <-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
predictors_enfa <- as(SoilGrid.crop,'SpatialPixelsDataFrame')
str(predictors_enfa)
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$AETI), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SINT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SOS), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$NBWP), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$CLYPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$ORCDRC), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$PHIHOX), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SLTPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$NTO), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SNDPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$P), ]
#proj4string(predictors_enfa)<-proj4string(Base_Faidherbia_Z)
pr <- slot(count.points(FaidherbiaPr, predictors_enfa), "data")[,1]
tab<-slot(predictors_enfa, "data")
pc <- dudi.pca(tab, scannf=F)
scatterniche(tab, pr,pts=TRUE)
enfa <- adehabitatHS::enfa(pc,pr, scannf = TRUE)
(enfa1 <- adehabitatHS::enfa(pc, pr,
scannf = FALSE))
library(adehabitatHS)
adehabitatHS::scatter(enfa1)
(renfa <- randtest(enfa1))
plot(renfa)
extent(SoilGrid.crop)
# xmin : -16.53797
# xmax : -16.35464
# ymin : 14.45374
# ymax : 14.63499
extent(FaidherbiaPr)
# xmin : -16.53817
# xmax : -16.35454
# ymin : 14.45461
# ymax : 14.63513
extent(Species)
FaidherbiaPr@bbox <-as.matrix(extent(SoilGrid.crop))
glc <- GLcenfa(x = SoilGrid.crop)
FaidherbiaPr@data$Faidherbia.albida<-as.numeric(FaidherbiaPr@data$Faidherbia.albida)
mod.enfa <- enfa(x = SoilGrid.crop, s.dat = FaidherbiaPr, field = "Faidherbia.albida")
scatter(x = mod.enfa, y = glc)
mod.cnfa <- cnfa(x = SoilGrid.crop, s.dat = FaidherbiaPr, field = "Faidherbia.albida")
predictorsbio_enfa<-worldclim.crop
proj4string(predictorsbio_enfa)<-proj4string(Base_Faidherbia_Z)
enfa(x = predictors_enfa, s.dat = Base_Faidherbia_Z, field = "Faidherbia albida")
##########"
#(niche(pc,pr,scannf = FALSE))
image(predictors_enfa)
histniche()
#FANTER
gn<-gnesfa(pc,Focus = pr)
scatterniche(gn$li,pr)
s.arrow(gn$co)
s.arrow(gn$cor)
gn2<-gnesfa(pc,Reference = pr)
scatterniche(gn2$li,pr,side = "bottom")
s.arrow(gn2$co)
s.arrow(gn2$cor)
(mad<-madifa(pc,pr,scannf = F))
enfa(pc,pr,scannf=F)
#########
data(chamois)
cpi <- slot(count.points(chamois$locs, chamois$map),"data")[,1]
chamois$map
tab <- slot(chamois$map, "data")
## we focus on the distance to ecotone and on the slope,
## after centring and scaling (with the help of a PCA)
scatterniche(dudi.pca(tab[,2:3], scannf=FALSE)$tab, cpi)
scatterniche(dudi.pca(tab[,2:3], scannf=FALSE)$tab, cpi, pts=TRUE)
|
/R/enfa_essai.R
|
no_license
|
Abson-dev/cirad
|
R
| false | false | 3,865 |
r
|
predictors_enfa <- as(worldclim.crop,'SpatialPixelsDataFrame')
str(predictors_enfa)
#predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SST), ]
#predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$CHL), ]
pr <- slot(count.points(Species, predictors_enfa), "data")[,1]
pc <- dudi.pca(slot(predictors_enfa, "data"), scannf=FALSE)
enfa <- enfa(pc,pr, scannf = FALSE)
pred <- predict.enfa(enfa, predictors_enfa)
####
# enfa_data <- data2enfa(predictors_enfa, Species$geometry)
# pc <- dudi.pca(enfa_data$tab, scannf = FALSE)
# enfa <- enfa(pc, enfa_data$pr, scannf = FALSE)
# ##
# mod1 <- enfa(x = worldclim.crop, s.dat = DataENFA_F, field = "Faidherbia.albida")
###############"
Base_Faidherbia_Z<-Base_Espece_df[,c("xcoord","ycoord","Faidherbia_albida")]
names(Base_Faidherbia_Z)<-c("lon","lat","Faidherbia.albida")
FaidherbiaPr<- Base_Faidherbia_Z %>%
filter(Faidherbia.albida ==1)
#Transform data as SpatialPointDataFrame
sp::coordinates(FaidherbiaPr) <-~lon+lat
sp::proj4string(FaidherbiaPr) <-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
predictors_enfa <- as(SoilGrid.crop,'SpatialPixelsDataFrame')
str(predictors_enfa)
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$AETI), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SINT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SOS), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$NBWP), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$CLYPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$ORCDRC), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$PHIHOX), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SLTPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$NTO), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$SNDPPT), ]
predictors_enfa <- predictors_enfa[!is.na(predictors_enfa$P), ]
#proj4string(predictors_enfa)<-proj4string(Base_Faidherbia_Z)
pr <- slot(count.points(FaidherbiaPr, predictors_enfa), "data")[,1]
tab<-slot(predictors_enfa, "data")
pc <- dudi.pca(tab, scannf=F)
scatterniche(tab, pr,pts=TRUE)
enfa <- adehabitatHS::enfa(pc,pr, scannf = TRUE)
(enfa1 <- adehabitatHS::enfa(pc, pr,
scannf = FALSE))
library(adehabitatHS)
adehabitatHS::scatter(enfa1)
(renfa <- randtest(enfa1))
plot(renfa)
extent(SoilGrid.crop)
# xmin : -16.53797
# xmax : -16.35464
# ymin : 14.45374
# ymax : 14.63499
extent(FaidherbiaPr)
# xmin : -16.53817
# xmax : -16.35454
# ymin : 14.45461
# ymax : 14.63513
extent(Species)
FaidherbiaPr@bbox <-as.matrix(extent(SoilGrid.crop))
glc <- GLcenfa(x = SoilGrid.crop)
FaidherbiaPr@data$Faidherbia.albida<-as.numeric(FaidherbiaPr@data$Faidherbia.albida)
mod.enfa <- enfa(x = SoilGrid.crop, s.dat = FaidherbiaPr, field = "Faidherbia.albida")
scatter(x = mod.enfa, y = glc)
mod.cnfa <- cnfa(x = SoilGrid.crop, s.dat = FaidherbiaPr, field = "Faidherbia.albida")
predictorsbio_enfa<-worldclim.crop
proj4string(predictorsbio_enfa)<-proj4string(Base_Faidherbia_Z)
enfa(x = predictors_enfa, s.dat = Base_Faidherbia_Z, field = "Faidherbia albida")
##########"
#(niche(pc,pr,scannf = FALSE))
image(predictors_enfa)
histniche()
#FANTER
gn<-gnesfa(pc,Focus = pr)
scatterniche(gn$li,pr)
s.arrow(gn$co)
s.arrow(gn$cor)
gn2<-gnesfa(pc,Reference = pr)
scatterniche(gn2$li,pr,side = "bottom")
s.arrow(gn2$co)
s.arrow(gn2$cor)
(mad<-madifa(pc,pr,scannf = F))
enfa(pc,pr,scannf=F)
#########
data(chamois)
cpi <- slot(count.points(chamois$locs, chamois$map),"data")[,1]
chamois$map
tab <- slot(chamois$map, "data")
## we focus on the distance to ecotone and on the slope,
## after centring and scaling (with the help of a PCA)
scatterniche(dudi.pca(tab[,2:3], scannf=FALSE)$tab, cpi)
scatterniche(dudi.pca(tab[,2:3], scannf=FALSE)$tab, cpi, pts=TRUE)
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
set_inverse <- function(inv_input) inv <<- inv_input
get_inverse <- function() inv
list(set = set, get = get,set_inverse = set_inverse,get_inverse = get_inverse)
}
cacheSolve <- function(x, ...) {
inv <- x$get_inverse()
if(!is.null(inv)) {
message("chache inverse")
return(inv)
}
data <- x$get()
|
/cachematrix.R
|
no_license
|
cagankaya/ProgrammingAssignment2
|
R
| false | false | 418 |
r
|
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
set_inverse <- function(inv_input) inv <<- inv_input
get_inverse <- function() inv
list(set = set, get = get,set_inverse = set_inverse,get_inverse = get_inverse)
}
cacheSolve <- function(x, ...) {
inv <- x$get_inverse()
if(!is.null(inv)) {
message("chache inverse")
return(inv)
}
data <- x$get()
|
#
#
dds <- dds.gene
# collapse replicates (none in this case!)
ddsc <- collapseReplicates(dds, groupby=paste(dds$cell_line,dds$fraction,sep="."), run=dds$sample)
rldc <- rlog(ddsc,blind=FALSE) # non-blinded for display actual results (not QC)
alpha <- 0.10
contrast_list <- list(
c("cell_line", "Ctrl", "shS25"),
c("fraction","Mono","Poly")
)
contrast <- contrast[[1]]
# get the results (fold change)
res <- results(ddsc, alpha=alpha, contrast)
#----------------------------------------------------------------------
# volcano plot
#----------------------------------------------------------------------
#save to PDF
pdf(paste(figure_title,".pdf",sep=""), height=8, width=8)
# plot expr vs fold-change, color by pvalue
(lfcLim=c(min(res$log2FoldChange),max(res$log2FoldChange)))
(logpLim=c( min(-log10(res$pvalue),na.rm=T),max(-log10(res$pvalue),na.rm=T)))
(logpadjLim=c( min(-log10(res$padj),na.rm=T),max(-log10(res$padj),na.rm=T)))
cex<-0.3
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(res, padj>=.05 & abs(log2FoldChange)<=1),
plot(log2FoldChange, -log10(pvalue), pch=20, col="black",cex=cex,
main=paste("Volcano plot ",paste0(contrast,collapse="."), ", alpha=",alpha,sep=""),
xlim=lfcLim, ylim=logpLim
)
)
with(subset(res, padj<.05 ), points(log2FoldChange, -log10(pvalue), pch=20,cex=cex, col="red"))
with(subset(res, abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20,cex=cex, col="orange"))
with(subset(res, padj<.05 & abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20,cex=1.5*cex, col="green"))
dev.off()
#----------------------------------------------------------------------
# scatter plot
#----------------------------------------------------------------------
# get the expression levels
geneData <- data.frame(x=assay(rldc)[,contrast[2]], y=assay(rldc)[,contrast[3]])
|
/rnaseq/deseq2/r4.contrasts.R
|
no_license
|
rusalkaguy/uab_ngs
|
R
| false | false | 1,884 |
r
|
#
#
dds <- dds.gene
# collapse replicates (none in this case!)
ddsc <- collapseReplicates(dds, groupby=paste(dds$cell_line,dds$fraction,sep="."), run=dds$sample)
rldc <- rlog(ddsc,blind=FALSE) # non-blinded for display actual results (not QC)
alpha <- 0.10
contrast_list <- list(
c("cell_line", "Ctrl", "shS25"),
c("fraction","Mono","Poly")
)
contrast <- contrast[[1]]
# get the results (fold change)
res <- results(ddsc, alpha=alpha, contrast)
#----------------------------------------------------------------------
# volcano plot
#----------------------------------------------------------------------
#save to PDF
pdf(paste(figure_title,".pdf",sep=""), height=8, width=8)
# plot expr vs fold-change, color by pvalue
(lfcLim=c(min(res$log2FoldChange),max(res$log2FoldChange)))
(logpLim=c( min(-log10(res$pvalue),na.rm=T),max(-log10(res$pvalue),na.rm=T)))
(logpadjLim=c( min(-log10(res$padj),na.rm=T),max(-log10(res$padj),na.rm=T)))
cex<-0.3
# Add colored points: red if padj<0.05, orange of log2FC>1, green if both)
with(subset(res, padj>=.05 & abs(log2FoldChange)<=1),
plot(log2FoldChange, -log10(pvalue), pch=20, col="black",cex=cex,
main=paste("Volcano plot ",paste0(contrast,collapse="."), ", alpha=",alpha,sep=""),
xlim=lfcLim, ylim=logpLim
)
)
with(subset(res, padj<.05 ), points(log2FoldChange, -log10(pvalue), pch=20,cex=cex, col="red"))
with(subset(res, abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20,cex=cex, col="orange"))
with(subset(res, padj<.05 & abs(log2FoldChange)>1), points(log2FoldChange, -log10(pvalue), pch=20,cex=1.5*cex, col="green"))
dev.off()
#----------------------------------------------------------------------
# scatter plot
#----------------------------------------------------------------------
# get the expression levels
geneData <- data.frame(x=assay(rldc)[,contrast[2]], y=assay(rldc)[,contrast[3]])
|
\name{pacmanNeptune}
\alias{pacmanNeptune}
\title{
Implement PacMan trimming (Lazarus et al. 2012)
}
\description{
Trim the top and the bottom of each species range in order to eliminate potential outliers.
}
\usage{
pacmanNeptune(dataset, top, bottom)
}
\arguments{
\item{dataset}{
A dataframe, typically the output of \code{\link{getNeptuneData}}.
}
\item{top}{
Percentage of occurrences to trim at the top of each species range.
}
\item{bottom}{
Percentage of occurrences to trim at the bottom of each species range.
}
}
\value{
Return a trimmed dataframe.
}
\references{
Lazarus et al. 2012. Pacman profiling: a simple procedure to identify stratigraphic outliers in high-density deep-sea microfossil data. Paleobiology, 38(1): 858-875.
}
\author{
Lazarus et al. 2012 for the algorithm, Johan Renaudie for the code.
}
\examples{
nsb <- nsbConnect("guest","arm_aber_sexy")
n <- getNeptuneData(nsb,fossil_group="R",age_range=c(4,6),ocean="IND")
nrow(n)
#[1] 1725
o <- pacmanNeptune(n, top=10, bottom=5)
nrow(o)
#[1] 1550
dbDisconnect(nsb)
}
|
/man/pacmanNeptune.Rd
|
permissive
|
plannapus/NSBcompanion
|
R
| false | false | 1,053 |
rd
|
\name{pacmanNeptune}
\alias{pacmanNeptune}
\title{
Implement PacMan trimming (Lazarus et al. 2012)
}
\description{
Trim the top and the bottom of each species range in order to eliminate potential outliers.
}
\usage{
pacmanNeptune(dataset, top, bottom)
}
\arguments{
\item{dataset}{
A dataframe, typically the output of \code{\link{getNeptuneData}}.
}
\item{top}{
Percentage of occurrences to trim at the top of each species range.
}
\item{bottom}{
Percentage of occurrences to trim at the bottom of each species range.
}
}
\value{
Return a trimmed dataframe.
}
\references{
Lazarus et al. 2012. Pacman profiling: a simple procedure to identify stratigraphic outliers in high-density deep-sea microfossil data. Paleobiology, 38(1): 858-875.
}
\author{
Lazarus et al. 2012 for the algorithm, Johan Renaudie for the code.
}
\examples{
nsb <- nsbConnect("guest","arm_aber_sexy")
n <- getNeptuneData(nsb,fossil_group="R",age_range=c(4,6),ocean="IND")
nrow(n)
#[1] 1725
o <- pacmanNeptune(n, top=10, bottom=5)
nrow(o)
#[1] 1550
dbDisconnect(nsb)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cropmea_vars.R
\name{get_crops}
\alias{get_crops}
\title{Get crops}
\usage{
get_crops(traitlist, singularity = "crop_measurement")
}
\arguments{
\item{traitlist}{data.frame trait list table}
\item{singularity}{chr Types of variables. Ex. \code{crop_measurement}, \code{crop_phenology}, \code{management_practices} and \code{weather}}
}
\description{
Get crops
}
|
/man/get_crops.Rd
|
permissive
|
AGROFIMS/ragrofims
|
R
| false | true | 441 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cropmea_vars.R
\name{get_crops}
\alias{get_crops}
\title{Get crops}
\usage{
get_crops(traitlist, singularity = "crop_measurement")
}
\arguments{
\item{traitlist}{data.frame trait list table}
\item{singularity}{chr Types of variables. Ex. \code{crop_measurement}, \code{crop_phenology}, \code{management_practices} and \code{weather}}
}
\description{
Get crops
}
|
rm(list=ls())
install.packages("writexl")
library(metafor)
library(readxl)
library(tidyverse)
library(writexl)
ds <- read_excel('final data.xlsx')
as.data.frame(table(ds$`Mitochondrial function and iron metabolism measurements`))->dsS
variables <- dsS$Var1
var <- droplevels(variables[-31])
#do something about DRP1 protein expression [var 31]
###### Meta-analysis###############
fits<-list()
for(v in c(1:length(var))){
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[v],])->fit
fits[[v]]<-fit}
names(fits)<-var
#DRP-1 protein expression (variable no. 31) didn't fit with the REML model so we used DL.
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='DL',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==variables[31],])->DRP
###############################
lapply(fits,function(x){
c('estimate'=x$b,
'pval'=x$pval,
'Q.pval'=x$QEp,
'I^2'=x$I2,
'upper CI'=x$ci.ub,
'lower CI'=x$ci.lb)})->fits.pvals
do.call(rbind,fits.pvals)->ds.models
as.data.frame(ds.models)->ds.models
pbl_bias <- list()
for (f in fits){
pb <- regtest.rma(f)
pbl_bias <- c(append(pbl_bias, pb$pval))}
names(pbl_bias)<-var
pbl_bias_frame<-as.data.frame(pbl_bias)
pbl_bias_final<-t(pbl_bias_frame)
names <- row.names(ds.models)
Final_data<-cbind(names,pbl_bias_final, ds.models)
write_xlsx(Final_data, "estimates.xlsx")
#dont forget to add DRP1 manually to the data later
################################
##### Moderator analysis ######
#Spiecies as a moderator
species_fits<-list()
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[80],],
mods = ~ Species-1)->fit
species_fits<-c(append(species_fits,fit$QMp))
new_var<- droplevels(var[-c(1,6,9,18,22,44,49,51,75,77)])
moderator_test_S <- as.data.frame(species_fits)
moderator_test_S <- t(moderator_test_S)
names<-as.data.frame(new_var)
moderator_test_S<-cbind(names,moderator_test_S)
write_xlsx(moderator_test_S, "moderator species.xlsx")
#Type of frailty assessment as a moderator
frailty_fits<-list()
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[80],],
mods = ~ Frailty.assesment.model-1)->fit
frailty_fits<-c(append(frailty_fits,fit$QMp))
other_new_var<- droplevels(var[-c(9,10,18,20,38,41,45,46,48,49,53,67,80)])
moderator_test_F <- as.data.frame(frailty_fits)
moderator_test_F <- t(moderator_test_F)
names<-as.data.frame(other_new_var)
moderator_test_F<-cbind(names,moderator_test_F)
write_xlsx(moderator_test_F, "moderator frailty assessment.xlsx")
###################################
######## Subgroup analysis ########
ds <- read_excel('final data.xlsx')
variable<-ds %>% filter(`Mitochondrial function and iron metabolism measurements`=='DRP1 gene expression')
species_list<-variable %>% select(Species) %>% distinct() %>% pull(Species)
fits<-list()
for(s in species_list){
fit<-rma(m1i=Mean.frail, m2i=Mean.ctrl, sd1i=SD.frail, sd2i=SD.ctrl, n1i=N.frail, n2i=N.ctrl,
slab=Study, method='REML', measure='SMD', data=filter(variable,Species==s))
fits[[s]]<-fit}
fits<-list(fit_mice,fit_rat,fit_human)
lapply(fits,function(x){
c('estimate'=x$b,
'pval'=x$pval,
'Q.pval'=x$QEp,
'I^2'=x$I2,
'upper CI'=x$ci.ub,
'lower CI'=x$ci.lb)})->fits.pvals
do.call(rbind,fits.pvals)->subgroup.models
as.data.frame(subgroup.models)->subgroup.models
names<-as.data.frame(species_list)
final_data<-cbind(names,subgroup.models)
rm(list=ls())
|
/new analysis.R
|
no_license
|
Evanneeray/tina
|
R
| false | false | 4,071 |
r
|
rm(list=ls())
install.packages("writexl")
library(metafor)
library(readxl)
library(tidyverse)
library(writexl)
ds <- read_excel('final data.xlsx')
as.data.frame(table(ds$`Mitochondrial function and iron metabolism measurements`))->dsS
variables <- dsS$Var1
var <- droplevels(variables[-31])
#do something about DRP1 protein expression [var 31]
###### Meta-analysis###############
fits<-list()
for(v in c(1:length(var))){
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[v],])->fit
fits[[v]]<-fit}
names(fits)<-var
#DRP-1 protein expression (variable no. 31) didn't fit with the REML model so we used DL.
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='DL',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==variables[31],])->DRP
###############################
lapply(fits,function(x){
c('estimate'=x$b,
'pval'=x$pval,
'Q.pval'=x$QEp,
'I^2'=x$I2,
'upper CI'=x$ci.ub,
'lower CI'=x$ci.lb)})->fits.pvals
do.call(rbind,fits.pvals)->ds.models
as.data.frame(ds.models)->ds.models
pbl_bias <- list()
for (f in fits){
pb <- regtest.rma(f)
pbl_bias <- c(append(pbl_bias, pb$pval))}
names(pbl_bias)<-var
pbl_bias_frame<-as.data.frame(pbl_bias)
pbl_bias_final<-t(pbl_bias_frame)
names <- row.names(ds.models)
Final_data<-cbind(names,pbl_bias_final, ds.models)
write_xlsx(Final_data, "estimates.xlsx")
#dont forget to add DRP1 manually to the data later
################################
##### Moderator analysis ######
#Spiecies as a moderator
species_fits<-list()
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[80],],
mods = ~ Species-1)->fit
species_fits<-c(append(species_fits,fit$QMp))
new_var<- droplevels(var[-c(1,6,9,18,22,44,49,51,75,77)])
moderator_test_S <- as.data.frame(species_fits)
moderator_test_S <- t(moderator_test_S)
names<-as.data.frame(new_var)
moderator_test_S<-cbind(names,moderator_test_S)
write_xlsx(moderator_test_S, "moderator species.xlsx")
#Type of frailty assessment as a moderator
frailty_fits<-list()
rma(m1i=Mean.frail,
m2i=Mean.ctrl,
sd1i=SD.frail,
sd2i=SD.ctrl,
n1i=N.frail,
n2i=N.ctrl,
slab=Study,
method='REML',
measure='SMD',
data=ds[ds$`Mitochondrial function and iron metabolism measurements`==var[80],],
mods = ~ Frailty.assesment.model-1)->fit
frailty_fits<-c(append(frailty_fits,fit$QMp))
other_new_var<- droplevels(var[-c(9,10,18,20,38,41,45,46,48,49,53,67,80)])
moderator_test_F <- as.data.frame(frailty_fits)
moderator_test_F <- t(moderator_test_F)
names<-as.data.frame(other_new_var)
moderator_test_F<-cbind(names,moderator_test_F)
write_xlsx(moderator_test_F, "moderator frailty assessment.xlsx")
###################################
######## Subgroup analysis ########
ds <- read_excel('final data.xlsx')
variable<-ds %>% filter(`Mitochondrial function and iron metabolism measurements`=='DRP1 gene expression')
species_list<-variable %>% select(Species) %>% distinct() %>% pull(Species)
fits<-list()
for(s in species_list){
fit<-rma(m1i=Mean.frail, m2i=Mean.ctrl, sd1i=SD.frail, sd2i=SD.ctrl, n1i=N.frail, n2i=N.ctrl,
slab=Study, method='REML', measure='SMD', data=filter(variable,Species==s))
fits[[s]]<-fit}
fits<-list(fit_mice,fit_rat,fit_human)
lapply(fits,function(x){
c('estimate'=x$b,
'pval'=x$pval,
'Q.pval'=x$QEp,
'I^2'=x$I2,
'upper CI'=x$ci.ub,
'lower CI'=x$ci.lb)})->fits.pvals
do.call(rbind,fits.pvals)->subgroup.models
as.data.frame(subgroup.models)->subgroup.models
names<-as.data.frame(species_list)
final_data<-cbind(names,subgroup.models)
rm(list=ls())
|
test_that("ebnv results make sense",{
set.seed(1)
n = 1e5
eta = 2
w = rexp(n,rate=eta)
s2 = 9
b = rnorm(n,0,sd= sqrt(s2*w))
temp=ebnv.exp(b,s2)
expect_equal(temp$g$w,1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-2)
temp=ebnv.np(b,s2,g.init=list(w=seq(1e-5,2,length=20)), update.mixprop = "mixsqp", update.w = "none")
expect_equal(sum(temp$g$mixprop * temp$g$w),1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-1)
# same but different eta and s2 just in case
n = 1e5
eta = 2
w = rexp(n,rate=eta)
s2 = 9
b = rnorm(n,0,sd= sqrt(s2*w))
temp=ebnv.exp(b,s2)
expect_equal(temp$g$w,1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-2)
temp=ebnv.np(b,s2,g.init=list(w=seq(1e-5,2,length=20)), update.mixprop = "mixsqp", update.w = "none")
expect_equal(sum(temp$g$mixprop * temp$g$w),1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-1)
})
test_that("ebnv.exp and ebnv.mix_exp logliks match when the mix solution is trivial",{
set.seed(1)
b = rexp(100)
s2= 1
res.exp = ebnv.exp(b,s2)
w = c(0.01,res.exp$g$w, 1000) # set up grid so that only the middle one will be plausible
res.exp_mix = ebnv.exp_mix(b,s2,g.init=list(w=w), update.mixprop= "mixsqp", update.w = "none")
expect_equal(res.exp_mix$loglik,res.exp$loglik, tol=1e-2)
})
test_that("ebnv.np em updates are increasing log-likelihood",{
set.seed(1)
n = 1000
w = c(rep(1,n),rep(10,n))
b = rnorm(2*n,0,sd=sqrt(w))
g = list(mixprop=c(0.5,0.5), w=c(2,3))
niter = 100
loglik = rep(0,niter)
for(i in 1:niter){
res = ebnv.np(b,1,g,update.mixprop = "em",update.w="em")
loglik[i] = res$loglik
g = res$g
}
expect_true(all(diff(loglik)>=0))
})
|
/tests/testthat/test_ebnv.R
|
no_license
|
banskt/ebmr.alpha
|
R
| false | false | 1,794 |
r
|
test_that("ebnv results make sense",{
set.seed(1)
n = 1e5
eta = 2
w = rexp(n,rate=eta)
s2 = 9
b = rnorm(n,0,sd= sqrt(s2*w))
temp=ebnv.exp(b,s2)
expect_equal(temp$g$w,1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-2)
temp=ebnv.np(b,s2,g.init=list(w=seq(1e-5,2,length=20)), update.mixprop = "mixsqp", update.w = "none")
expect_equal(sum(temp$g$mixprop * temp$g$w),1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-1)
# same but different eta and s2 just in case
n = 1e5
eta = 2
w = rexp(n,rate=eta)
s2 = 9
b = rnorm(n,0,sd= sqrt(s2*w))
temp=ebnv.exp(b,s2)
expect_equal(temp$g$w,1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-2)
temp=ebnv.np(b,s2,g.init=list(w=seq(1e-5,2,length=20)), update.mixprop = "mixsqp", update.w = "none")
expect_equal(sum(temp$g$mixprop * temp$g$w),1/eta,tol=1e-2)
expect_equal(as.numeric(lm(w ~ temp$wbar)$coef[2]),1,tol=1e-1)
})
test_that("ebnv.exp and ebnv.mix_exp logliks match when the mix solution is trivial",{
set.seed(1)
b = rexp(100)
s2= 1
res.exp = ebnv.exp(b,s2)
w = c(0.01,res.exp$g$w, 1000) # set up grid so that only the middle one will be plausible
res.exp_mix = ebnv.exp_mix(b,s2,g.init=list(w=w), update.mixprop= "mixsqp", update.w = "none")
expect_equal(res.exp_mix$loglik,res.exp$loglik, tol=1e-2)
})
test_that("ebnv.np em updates are increasing log-likelihood",{
set.seed(1)
n = 1000
w = c(rep(1,n),rep(10,n))
b = rnorm(2*n,0,sd=sqrt(w))
g = list(mixprop=c(0.5,0.5), w=c(2,3))
niter = 100
loglik = rep(0,niter)
for(i in 1:niter){
res = ebnv.np(b,1,g,update.mixprop = "em",update.w="em")
loglik[i] = res$loglik
g = res$g
}
expect_true(all(diff(loglik)>=0))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varinfo.R
\name{redist.varinfo.plot}
\alias{redist.varinfo.plot}
\title{Static Variation of Information Plot}
\usage{
redist.varinfo.plot(district_membership, grouppop, fullpop, shp)
}
\arguments{
\item{district_membership}{matrix of district assignments}
\item{grouppop}{Required. Population of subgroup being studied in each precinct.}
\item{fullpop}{Required. Population of each precinct.}
\item{shp}{sf dataframe}
}
\value{
patchworked ggplot
}
\description{
Static Variation of Information Plot
}
|
/man/redist.varinfo.plot.Rd
|
no_license
|
christopherkenny/redist
|
R
| false | true | 583 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varinfo.R
\name{redist.varinfo.plot}
\alias{redist.varinfo.plot}
\title{Static Variation of Information Plot}
\usage{
redist.varinfo.plot(district_membership, grouppop, fullpop, shp)
}
\arguments{
\item{district_membership}{matrix of district assignments}
\item{grouppop}{Required. Population of subgroup being studied in each precinct.}
\item{fullpop}{Required. Population of each precinct.}
\item{shp}{sf dataframe}
}
\value{
patchworked ggplot
}
\description{
Static Variation of Information Plot
}
|
e1 <- new.env()
assign("var1",1,envir = e1)
var1 = "x"
oea <- function(){
print(environment())
print(globalenv())
}
for(i in 1:5){
oea()
}
with(
mtcars,ls()
)
install.packages("lattice")
library(lattice)
state <- data.frame(state.x77,
region = state.region)
xyplot(Life.Exp ~ Income | region,
data = state,
layout = c(4, 1))
|
/escopo.R
|
no_license
|
ZecaRueda/FIAP4IA
|
R
| false | false | 414 |
r
|
e1 <- new.env()
assign("var1",1,envir = e1)
var1 = "x"
oea <- function(){
print(environment())
print(globalenv())
}
for(i in 1:5){
oea()
}
with(
mtcars,ls()
)
install.packages("lattice")
library(lattice)
state <- data.frame(state.x77,
region = state.region)
xyplot(Life.Exp ~ Income | region,
data = state,
layout = c(4, 1))
|
#HW1---Problem2
#Import Libraries
library("ggplot2")
#Load the Dataset
df <- read.csv("C:/Users/kfeng3.DPU/Desktop/Intel.csv")
head(df)
#Preprocessing: Rename the variables and Change Date format
colnames(df)
colnames(df)[8] <- "Adjclose"
date <- as.Date(df$Date, format="%m/%d/%y")
head(date)
#Graph the closing price vs. the data with an ordinary line graph
ggplot(df, aes(x=date, y=Adjclose))+
labs(x="Date", y="Adjusted Closing Price", title = "Adjusted Closing Price by Date")+
geom_line(size=2, col="Blue")+
scale_y_continuous(limits = c(15,30))+
theme(axis.text.x = element_text(face="bold"))
#Graph the Volume vs Date with a Bar Graph
volume2 <- df$Volume/1000000
max(volume2)
min(volume2)
ggplot(df, aes(x=date, y=volume2))+
labs(x="Date", y="Volume (Unit: Million)", title="Volume by Date")+
geom_bar(stat="identity", width=0.5, fill="#008000")+
scale_x_date(breaks = "7 days")+
scale_y_continuous(breaks=seq(0, 370,50))+
theme(axis.text.x = element_text(angle = 40, vjust = 1.0, hjust = 1.0))
#Create a histogram of daily stock Volume
ggplot(df, aes(volume2))+
labs(title="Distribution of Daily Stock Volume", x="Volume (Unit: Million)", y="Count")+
scale_y_continuous(breaks=seq(0, 30,5))+
scale_x_continuous(breaks=seq(0, 400,50))+
geom_histogram(stat = "bin", fill="#B22222", bins=70)
#Create a scatterplot with volume on x-axis and range on y-axis
df$range <- df$High-df$Low
head(range)
ggplot(df, aes(x=volume2, y=range))+
labs(x="Volume (Unit: Million)", y="Daily Price Range", title= "Daily Volume Vs. Price Range")+
scale_x_continuous(breaks = seq(0, 370, 20))+
scale_y_continuous(breaks= seq(0, 9.31,1))+
geom_point(color="orange", size=1, stroke=1)
|
/KF_DSC465_HW1_P2_20190126.R
|
no_license
|
fengke54/data-visualization-code-in-R
|
R
| false | false | 1,764 |
r
|
#HW1---Problem2
#Import Libraries
library("ggplot2")
#Load the Dataset
df <- read.csv("C:/Users/kfeng3.DPU/Desktop/Intel.csv")
head(df)
#Preprocessing: Rename the variables and Change Date format
colnames(df)
colnames(df)[8] <- "Adjclose"
date <- as.Date(df$Date, format="%m/%d/%y")
head(date)
#Graph the closing price vs. the data with an ordinary line graph
ggplot(df, aes(x=date, y=Adjclose))+
labs(x="Date", y="Adjusted Closing Price", title = "Adjusted Closing Price by Date")+
geom_line(size=2, col="Blue")+
scale_y_continuous(limits = c(15,30))+
theme(axis.text.x = element_text(face="bold"))
#Graph the Volume vs Date with a Bar Graph
volume2 <- df$Volume/1000000
max(volume2)
min(volume2)
ggplot(df, aes(x=date, y=volume2))+
labs(x="Date", y="Volume (Unit: Million)", title="Volume by Date")+
geom_bar(stat="identity", width=0.5, fill="#008000")+
scale_x_date(breaks = "7 days")+
scale_y_continuous(breaks=seq(0, 370,50))+
theme(axis.text.x = element_text(angle = 40, vjust = 1.0, hjust = 1.0))
#Create a histogram of daily stock Volume
ggplot(df, aes(volume2))+
labs(title="Distribution of Daily Stock Volume", x="Volume (Unit: Million)", y="Count")+
scale_y_continuous(breaks=seq(0, 30,5))+
scale_x_continuous(breaks=seq(0, 400,50))+
geom_histogram(stat = "bin", fill="#B22222", bins=70)
#Create a scatterplot with volume on x-axis and range on y-axis
df$range <- df$High-df$Low
head(range)
ggplot(df, aes(x=volume2, y=range))+
labs(x="Volume (Unit: Million)", y="Daily Price Range", title= "Daily Volume Vs. Price Range")+
scale_x_continuous(breaks = seq(0, 370, 20))+
scale_y_continuous(breaks= seq(0, 9.31,1))+
geom_point(color="orange", size=1, stroke=1)
|
##### Performance for the hypothesis testing of annotation enrichment #####
# Vary alpha (0.2, 0.4, 0.6) and beta (−0.4, −0.3, −0.2, −0.1, 0.1, 0.2, 0.3, 0.4) to get Supplementary Figure S28
# Vary A.perc (0.001, 0.005, 0.01, 0.05, 0.1, 0.2) and beta (-0.3, -0.2, -0.1, 0.1, 0.2, 0.3) to get Supplementary Figures S29 and S30
library(MASS)
library(LPM)
library(pbivnorm)
library(mvtnorm)
# function to generate data
generate_data <- function(M, K, D, A, beta, alpha, R){
Z <- cbind(rep(1, M), A) %*% t(beta) + mvrnorm(M, rep(0, K), R)
indexeta <- (Z > 0)
eta <- matrix(as.numeric(indexeta), M, K)
Pvalue <- NULL
for (k in 1:K){
Pvalue_tmp <- runif(M)
Pvalue_tmp[indexeta[, k]] <- rbeta(sum(indexeta[, k]), alpha[k], 1)
Pvalue <- c(Pvalue, list(data.frame(SNP = seq(1, M), p = Pvalue_tmp)))
}
names(Pvalue) <- paste("P", seq(1, K), sep = "")
A <- data.frame(SNP=seq(1,M), A)
return( list(Pvalue = Pvalue, A = A, beta = beta, eta = eta))
}
K <- 2 # No. of traits
M <- 100000 # No. of SNPs
D <- 5 # No. of annotations
beta0 <- -1 # intercept of the probit model
beta0 <- rep(beta0, K)
set.seed(1)
beta <- rep(0, K) # coefficients of annotations
A.perc <- 0.2 # the proportion the entries in X is 1
A <- rep(0, M*D) # the design matrix of annotation
indexA <- sample(M*D, M*D*A.perc)
A[indexA] <- 1
A <- matrix(A, M, D)
alpha <- 0.2 # parameter in the Beta distribution
rho <- 0 # correlation between the two traits
R <- matrix(c(1, rho, rho, 1), K, K) # correlation matrix for the traits
rep <- 500 # repeat times
pvalue_beta <- numeric(rep)
for (i in 1:rep){
data <- generate_data(M, K, D, A, beta, alpha, R)
Pvalue <- data$Pvalue
X <- data$A
fit <- bLPM(Pvalue, X = X)
LPMfit <- LPM(fit)
pvalue_beta[i] <- test_beta(Pvalue, X, 1, LPMfit)$p_value[2]
}
result <- sum(pvalue_beta < 0.05)/rep
|
/annotation_enrichment.R
|
no_license
|
YangLabHKUST/LPM-sim
|
R
| false | false | 2,012 |
r
|
##### Performance for the hypothesis testing of annotation enrichment #####
# Vary alpha (0.2, 0.4, 0.6) and beta (−0.4, −0.3, −0.2, −0.1, 0.1, 0.2, 0.3, 0.4) to get Supplementary Figure S28
# Vary A.perc (0.001, 0.005, 0.01, 0.05, 0.1, 0.2) and beta (-0.3, -0.2, -0.1, 0.1, 0.2, 0.3) to get Supplementary Figures S29 and S30
library(MASS)
library(LPM)
library(pbivnorm)
library(mvtnorm)
# function to generate data
generate_data <- function(M, K, D, A, beta, alpha, R){
Z <- cbind(rep(1, M), A) %*% t(beta) + mvrnorm(M, rep(0, K), R)
indexeta <- (Z > 0)
eta <- matrix(as.numeric(indexeta), M, K)
Pvalue <- NULL
for (k in 1:K){
Pvalue_tmp <- runif(M)
Pvalue_tmp[indexeta[, k]] <- rbeta(sum(indexeta[, k]), alpha[k], 1)
Pvalue <- c(Pvalue, list(data.frame(SNP = seq(1, M), p = Pvalue_tmp)))
}
names(Pvalue) <- paste("P", seq(1, K), sep = "")
A <- data.frame(SNP=seq(1,M), A)
return( list(Pvalue = Pvalue, A = A, beta = beta, eta = eta))
}
K <- 2 # No. of traits
M <- 100000 # No. of SNPs
D <- 5 # No. of annotations
beta0 <- -1 # intercept of the probit model
beta0 <- rep(beta0, K)
set.seed(1)
beta <- rep(0, K) # coefficients of annotations
A.perc <- 0.2 # the proportion the entries in X is 1
A <- rep(0, M*D) # the design matrix of annotation
indexA <- sample(M*D, M*D*A.perc)
A[indexA] <- 1
A <- matrix(A, M, D)
alpha <- 0.2 # parameter in the Beta distribution
rho <- 0 # correlation between the two traits
R <- matrix(c(1, rho, rho, 1), K, K) # correlation matrix for the traits
rep <- 500 # repeat times
pvalue_beta <- numeric(rep)
for (i in 1:rep){
data <- generate_data(M, K, D, A, beta, alpha, R)
Pvalue <- data$Pvalue
X <- data$A
fit <- bLPM(Pvalue, X = X)
LPMfit <- LPM(fit)
pvalue_beta[i] <- test_beta(Pvalue, X, 1, LPMfit)$p_value[2]
}
result <- sum(pvalue_beta < 0.05)/rep
|
search()
ls()
gis.data<-read.table("Landscape_Phenology_Analysis/Data/BBS.CBC.UKBMS.complete.landcover.data.all.scales.soil.dem.configuration.txt",header=T)
gis.data<-gis.data[gis.data$Surv=="UKBMS",] #only ukbms rows
gis.data<-gis.data[gis.data$buffer=="500",] #only 500 buffer rows
gis.data$LCper<-gis.data$LC/(pi*500^2)*100 #Add row - convert LC to percentage of LC
#head(gis.data)
#summary(gis.data$LC)
#summary(gis.data$LCper)
phenology<-read.csv("Landscape_Phenology_Analysis/Outputs/meadow_brown_phenology.csv",header=T) # limits to univoltine species, records where flight period is greater than zero i.e. the species was recorded more than once in the year at that site, and species occupy 10 or more 10km squares within the UKBMS (see email from Marc Botham Wed 23/10/2013 14:33)
phenology.gis<-merge(phenology,gis.data,by.x="site",by.y="siteno.gref") #merge datasets
#nrow(phenology.gis)
############################ allocating values ####################################
par(mfrow = c(1, 1))
summary(phenology.gis$POINT_X)
phenology.gis$POINT_X.label[phenology.gis$POINT_X<(448100)]<-"southern"
phenology.gis$POINT_X.label[phenology.gis$POINT_X>(448100)]<-"northern"
table(phenology.gis$POINT_X.label)
summary(phenology.gis$POINT_Y)
phenology.gis$POINT_Y.label[phenology.gis$POINT_Y<(252700)]<-"western" #using mean
phenology.gis$POINT_Y.label[phenology.gis$POINT_Y>(252700)]<-"eastern"
table(phenology.gis$POINT_Y.label)
#head(phenology.gis)
############################ assigning labels ######################################
for (i in 1:nrow(phenology.gis)){
phenology.gis$POINT_X.POINT_Y[i]<-paste(phenology.gis[i,"POINT_X.label"],phenology.gis[i,"POINT_Y.label"],sep=".")
}
table(phenology.gis$POINT_X.POINT_Y)
names(phenology.gis)
library(lme4)
model<-lmer(daynum.nintieth~POINT_X.POINT_Y+year+(1|site),phenology.gis)
# (1|site) gives site as random effect- not a factor of interest
summary(model)
names(phenology.gis)
par(mfrow=c(1,1))
with(phenology.gis, boxplot(daynum.nintieth~POINT_X.POINT_Y))
############################## Testing models #############################
modelnull<-lmer(daynum.nintieth~year+(1|site),phenology.gis)
anova(model,modelnull)
#Testing whether POINT_X.POINT_Y has an effect on daynum.nintieth by comparing with a model that just looks at daynum.nintieth vs year with site as random effect
#result shows sig dif between models.
#AIC lower on original model therefore more of results explained by model.
library(plyr)
bargraph<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N))
head(bargraph)
library(ggplot2)
#??ggplot2
ggplot(bargraph, aes(x = POINT_X.POINT_Y, y = mean)) +
geom_bar(position = position_dodge(), stat="identity",width=0.4,col="black",fill="grey") +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se,width=4)) +
ggtitle("daynum.nintieth vs sight conditions") +
theme_grey() +
theme(panel.grid.major = element_blank()) +
labs(x="Site category",y="daynum.nintieth") +
expand_limits(y=c(0,130)) + scale_y_continuous(breaks=seq(0, 130, 10))
phenology.gis$POINT_X.POINT_Y2<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y2<-as.factor(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)<-c("All others","All others","All others","Southern.Western")
levels(phenology.gis$POINT_X.POINT_Y2)
model2<-lmer(daynum.nintieth~POINT_X.POINT_Y2+year+(1|site),phenology.gis)
anova(model,model2)
#sig dif
phenology.gis$POINT_X.POINT_Y2<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y2<-as.factor(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)<-c("A","B","C","A")
levels(phenology.gis$POINT_X.POINT_Y2)
model2<-lmer(daynum.nintieth~POINT_X.POINT_Y2+year+(1|site),phenology.gis)
anova(model,model2)
######
phenology.gis$POINT_X.POINT_Y3<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y3<-as.factor(phenology.gis$POINT_X.POINT_Y3)
levels(phenology.gis$POINT_X.POINT_Y3)
levels(phenology.gis$POINT_X.POINT_Y3)<-c("A","B","A","C")
levels(phenology.gis$POINT_X.POINT_Y3)
model3<-lmer(daynum.nintieth~POINT_X.POINT_Y3+year+(1|site),phenology.gis)
anova(model,model3)
#######
phenology.gis$POINT_X.POINT_Y4<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y4<-as.factor(phenology.gis$POINT_X.POINT_Y4)
levels(phenology.gis$POINT_X.POINT_Y4)
levels(phenology.gis$POINT_X.POINT_Y4)<-c("A","A","B","C")
levels(phenology.gis$POINT_X.POINT_Y4)
model4<-lmer(daynum.nintieth~POINT_X.POINT_Y4+year+(1|site),phenology.gis)
anova(model3,model4)
#######
phenology.gis$POINT_X.POINT_Y5<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y5<-as.factor(phenology.gis$POINT_X.POINT_Y5)
levels(phenology.gis$POINT_X.POINT_Y5)
levels(phenology.gis$POINT_X.POINT_Y5)<-c("B","C","A","A")
levels(phenology.gis$POINT_X.POINT_Y5)
model5<-lmer(daynum.nintieth~POINT_X.POINT_Y5+year+(1|site),phenology.gis)
anova(model3,model5)
#######
phenology.gis$POINT_X.POINT_Y6<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y6<-as.factor(phenology.gis$POINT_X.POINT_Y6)
levels(phenology.gis$POINT_X.POINT_Y6)
levels(phenology.gis$POINT_X.POINT_Y6)<-c("B","A","C","A")
levels(phenology.gis$POINT_X.POINT_Y6)
model6<-lmer(daynum.nintieth~POINT_X.POINT_Y6+year+(1|site),phenology.gis)
anova(model3,model6)
#######
phenology.gis$POINT_X.POINT_Y7<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y7<-as.factor(phenology.gis$POINT_X.POINT_Y7)
levels(phenology.gis$POINT_X.POINT_Y7)
levels(phenology.gis$POINT_X.POINT_Y7)<-c("B","A","A","C")
levels(phenology.gis$POINT_X.POINT_Y7)
model7<-lmer(daynum.nintieth~POINT_X.POINT_Y7+year+(1|site),phenology.gis)
anova(model3,model7)
#######
phenology.gis$POINT_X.POINT_Y8<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y8<-as.factor(phenology.gis$POINT_X.POINT_Y8)
levels(phenology.gis$POINT_X.POINT_Y8)
levels(phenology.gis$POINT_X.POINT_Y8)<-c("A","A","B","B")
levels(phenology.gis$POINT_X.POINT_Y8)
model8<-lmer(daynum.nintieth~POINT_X.POINT_Y8+year+(1|site),phenology.gis)
anova(model3,model8)
#######
phenology.gis$POINT_X.POINT_Y9<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y9<-as.factor(phenology.gis$POINT_X.POINT_Y9)
levels(phenology.gis$POINT_X.POINT_Y9)
levels(phenology.gis$POINT_X.POINT_Y9)<-c("A","B","A","B")
levels(phenology.gis$POINT_X.POINT_Y9)
model9<-lmer(daynum.nintieth~POINT_X.POINT_Y9+year+(1|site),phenology.gis)
anova(model3,model9)
#######
phenology.gis$POINT_X.POINT_Y10<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y10<-as.factor(phenology.gis$POINT_X.POINT_Y10)
levels(phenology.gis$POINT_X.POINT_Y10)
levels(phenology.gis$POINT_X.POINT_Y10)<-c("A","B","B","A")
levels(phenology.gis$POINT_X.POINT_Y10)
model10<-lmer(daynum.nintieth~POINT_X.POINT_Y10+year+(1|site),phenology.gis)
anova(model3,model10)
#######
phenology.gis$POINT_X.POINT_Y11<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y11<-as.factor(phenology.gis$POINT_X.POINT_Y11)
levels(phenology.gis$POINT_X.POINT_Y11)
levels(phenology.gis$POINT_X.POINT_Y11)<-c("A","A","A","B")
levels(phenology.gis$POINT_X.POINT_Y11)
model11<-lmer(daynum.nintieth~POINT_X.POINT_Y11+year+(1|site),phenology.gis)
anova(model3,model11)
#######
phenology.gis$POINT_X.POINT_Y12<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y12<-as.factor(phenology.gis$POINT_X.POINT_Y12)
levels(phenology.gis$POINT_X.POINT_Y12)
levels(phenology.gis$POINT_X.POINT_Y12)<-c("A","A","B","A")
levels(phenology.gis$POINT_X.POINT_Y12)
model12<-lmer(daynum.nintieth~POINT_X.POINT_Y12+year+(1|site),phenology.gis)
anova(model11,model12)
#######
phenology.gis$POINT_X.POINT_Y13<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y13<-as.factor(phenology.gis$POINT_X.POINT_Y13)
levels(phenology.gis$POINT_X.POINT_Y13)
levels(phenology.gis$POINT_X.POINT_Y13)<-c("A","B","A","A")
levels(phenology.gis$POINT_X.POINT_Y13)
model13<-lmer(daynum.nintieth~POINT_X.POINT_Y13+year+(1|site),phenology.gis)
anova(model11,model13)
#######
phenology.gis$POINT_X.POINT_Y14<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y14<-as.factor(phenology.gis$POINT_X.POINT_Y14)
levels(phenology.gis$POINT_X.POINT_Y14)
levels(phenology.gis$POINT_X.POINT_Y14)<-c("B","A","A","A")
levels(phenology.gis$POINT_X.POINT_Y14)
model14<-lmer(daynum.nintieth~POINT_X.POINT_Y14+year+(1|site),phenology.gis)
anova(model11,model14)
############################### plotting model11 or 3 ###################################
with(phenology.gis, boxplot(daynum.nintieth~POINT_X.POINT_Y,xlab="Site type",ylab="daynum.nintieth",ylim = c(0, 200)))
#plot boxplot of model
library(plyr)
bargraph<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N))
head(bargraph)
library(ggplot2)
#??ggplot2
ggplot(bargraph, aes(x = POINT_X.POINT_Y, y = mean)) +
geom_bar(position = position_dodge(), stat="identity",width=0.4,col="black",fill="grey") +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se,width=0.1)) +
ggtitle("daynum.nintieth vs sight conditions") +
theme_grey() +
theme(panel.grid.major = element_blank()) +
labs(x="Site category",y="daynum.nintieth") +
expand_limits(y=c(0,130)) + scale_y_continuous(breaks=seq(0, 130, 10))
p<-ggplot(phenology.gis,aes(x=POINT_X.POINT_Y,y=daynum.nintieth))+geom_violin(fill="grey",col="black")+theme_minimal()
p + geom_boxplot(width=0.1) + expand_limits(y=c(0,200)) + scale_y_continuous(breaks=seq(0, 200, 20))+ggtitle("daynum.nintieth vs sight Type")+xlab("Site Category")+ylab("daynum.nintieth")
test<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N),
min = min(daynum.nintieth),
max=max(daynum.nintieth))
head(test)
|
/Landscape_Phenology_Analysis/Scripts/Variables vs phenology/Variable vs 90th percentile/2x variables/northing and easting vs 90th.R
|
no_license
|
mgreenwell/Phenology_Analysis
|
R
| false | false | 10,190 |
r
|
search()
ls()
gis.data<-read.table("Landscape_Phenology_Analysis/Data/BBS.CBC.UKBMS.complete.landcover.data.all.scales.soil.dem.configuration.txt",header=T)
gis.data<-gis.data[gis.data$Surv=="UKBMS",] #only ukbms rows
gis.data<-gis.data[gis.data$buffer=="500",] #only 500 buffer rows
gis.data$LCper<-gis.data$LC/(pi*500^2)*100 #Add row - convert LC to percentage of LC
#head(gis.data)
#summary(gis.data$LC)
#summary(gis.data$LCper)
phenology<-read.csv("Landscape_Phenology_Analysis/Outputs/meadow_brown_phenology.csv",header=T) # limits to univoltine species, records where flight period is greater than zero i.e. the species was recorded more than once in the year at that site, and species occupy 10 or more 10km squares within the UKBMS (see email from Marc Botham Wed 23/10/2013 14:33)
phenology.gis<-merge(phenology,gis.data,by.x="site",by.y="siteno.gref") #merge datasets
#nrow(phenology.gis)
############################ allocating values ####################################
par(mfrow = c(1, 1))
summary(phenology.gis$POINT_X)
phenology.gis$POINT_X.label[phenology.gis$POINT_X<(448100)]<-"southern"
phenology.gis$POINT_X.label[phenology.gis$POINT_X>(448100)]<-"northern"
table(phenology.gis$POINT_X.label)
summary(phenology.gis$POINT_Y)
phenology.gis$POINT_Y.label[phenology.gis$POINT_Y<(252700)]<-"western" #using mean
phenology.gis$POINT_Y.label[phenology.gis$POINT_Y>(252700)]<-"eastern"
table(phenology.gis$POINT_Y.label)
#head(phenology.gis)
############################ assigning labels ######################################
for (i in 1:nrow(phenology.gis)){
phenology.gis$POINT_X.POINT_Y[i]<-paste(phenology.gis[i,"POINT_X.label"],phenology.gis[i,"POINT_Y.label"],sep=".")
}
table(phenology.gis$POINT_X.POINT_Y)
names(phenology.gis)
library(lme4)
model<-lmer(daynum.nintieth~POINT_X.POINT_Y+year+(1|site),phenology.gis)
# (1|site) gives site as random effect- not a factor of interest
summary(model)
names(phenology.gis)
par(mfrow=c(1,1))
with(phenology.gis, boxplot(daynum.nintieth~POINT_X.POINT_Y))
############################## Testing models #############################
modelnull<-lmer(daynum.nintieth~year+(1|site),phenology.gis)
anova(model,modelnull)
#Testing whether POINT_X.POINT_Y has an effect on daynum.nintieth by comparing with a model that just looks at daynum.nintieth vs year with site as random effect
#result shows sig dif between models.
#AIC lower on original model therefore more of results explained by model.
library(plyr)
bargraph<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N))
head(bargraph)
library(ggplot2)
#??ggplot2
ggplot(bargraph, aes(x = POINT_X.POINT_Y, y = mean)) +
geom_bar(position = position_dodge(), stat="identity",width=0.4,col="black",fill="grey") +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se,width=4)) +
ggtitle("daynum.nintieth vs sight conditions") +
theme_grey() +
theme(panel.grid.major = element_blank()) +
labs(x="Site category",y="daynum.nintieth") +
expand_limits(y=c(0,130)) + scale_y_continuous(breaks=seq(0, 130, 10))
phenology.gis$POINT_X.POINT_Y2<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y2<-as.factor(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)<-c("All others","All others","All others","Southern.Western")
levels(phenology.gis$POINT_X.POINT_Y2)
model2<-lmer(daynum.nintieth~POINT_X.POINT_Y2+year+(1|site),phenology.gis)
anova(model,model2)
#sig dif
phenology.gis$POINT_X.POINT_Y2<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y2<-as.factor(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)
levels(phenology.gis$POINT_X.POINT_Y2)<-c("A","B","C","A")
levels(phenology.gis$POINT_X.POINT_Y2)
model2<-lmer(daynum.nintieth~POINT_X.POINT_Y2+year+(1|site),phenology.gis)
anova(model,model2)
######
phenology.gis$POINT_X.POINT_Y3<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y3<-as.factor(phenology.gis$POINT_X.POINT_Y3)
levels(phenology.gis$POINT_X.POINT_Y3)
levels(phenology.gis$POINT_X.POINT_Y3)<-c("A","B","A","C")
levels(phenology.gis$POINT_X.POINT_Y3)
model3<-lmer(daynum.nintieth~POINT_X.POINT_Y3+year+(1|site),phenology.gis)
anova(model,model3)
#######
phenology.gis$POINT_X.POINT_Y4<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y4<-as.factor(phenology.gis$POINT_X.POINT_Y4)
levels(phenology.gis$POINT_X.POINT_Y4)
levels(phenology.gis$POINT_X.POINT_Y4)<-c("A","A","B","C")
levels(phenology.gis$POINT_X.POINT_Y4)
model4<-lmer(daynum.nintieth~POINT_X.POINT_Y4+year+(1|site),phenology.gis)
anova(model3,model4)
#######
phenology.gis$POINT_X.POINT_Y5<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y5<-as.factor(phenology.gis$POINT_X.POINT_Y5)
levels(phenology.gis$POINT_X.POINT_Y5)
levels(phenology.gis$POINT_X.POINT_Y5)<-c("B","C","A","A")
levels(phenology.gis$POINT_X.POINT_Y5)
model5<-lmer(daynum.nintieth~POINT_X.POINT_Y5+year+(1|site),phenology.gis)
anova(model3,model5)
#######
phenology.gis$POINT_X.POINT_Y6<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y6<-as.factor(phenology.gis$POINT_X.POINT_Y6)
levels(phenology.gis$POINT_X.POINT_Y6)
levels(phenology.gis$POINT_X.POINT_Y6)<-c("B","A","C","A")
levels(phenology.gis$POINT_X.POINT_Y6)
model6<-lmer(daynum.nintieth~POINT_X.POINT_Y6+year+(1|site),phenology.gis)
anova(model3,model6)
#######
phenology.gis$POINT_X.POINT_Y7<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y7<-as.factor(phenology.gis$POINT_X.POINT_Y7)
levels(phenology.gis$POINT_X.POINT_Y7)
levels(phenology.gis$POINT_X.POINT_Y7)<-c("B","A","A","C")
levels(phenology.gis$POINT_X.POINT_Y7)
model7<-lmer(daynum.nintieth~POINT_X.POINT_Y7+year+(1|site),phenology.gis)
anova(model3,model7)
#######
phenology.gis$POINT_X.POINT_Y8<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y8<-as.factor(phenology.gis$POINT_X.POINT_Y8)
levels(phenology.gis$POINT_X.POINT_Y8)
levels(phenology.gis$POINT_X.POINT_Y8)<-c("A","A","B","B")
levels(phenology.gis$POINT_X.POINT_Y8)
model8<-lmer(daynum.nintieth~POINT_X.POINT_Y8+year+(1|site),phenology.gis)
anova(model3,model8)
#######
phenology.gis$POINT_X.POINT_Y9<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y9<-as.factor(phenology.gis$POINT_X.POINT_Y9)
levels(phenology.gis$POINT_X.POINT_Y9)
levels(phenology.gis$POINT_X.POINT_Y9)<-c("A","B","A","B")
levels(phenology.gis$POINT_X.POINT_Y9)
model9<-lmer(daynum.nintieth~POINT_X.POINT_Y9+year+(1|site),phenology.gis)
anova(model3,model9)
#######
phenology.gis$POINT_X.POINT_Y10<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y10<-as.factor(phenology.gis$POINT_X.POINT_Y10)
levels(phenology.gis$POINT_X.POINT_Y10)
levels(phenology.gis$POINT_X.POINT_Y10)<-c("A","B","B","A")
levels(phenology.gis$POINT_X.POINT_Y10)
model10<-lmer(daynum.nintieth~POINT_X.POINT_Y10+year+(1|site),phenology.gis)
anova(model3,model10)
#######
phenology.gis$POINT_X.POINT_Y11<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y11<-as.factor(phenology.gis$POINT_X.POINT_Y11)
levels(phenology.gis$POINT_X.POINT_Y11)
levels(phenology.gis$POINT_X.POINT_Y11)<-c("A","A","A","B")
levels(phenology.gis$POINT_X.POINT_Y11)
model11<-lmer(daynum.nintieth~POINT_X.POINT_Y11+year+(1|site),phenology.gis)
anova(model3,model11)
#######
phenology.gis$POINT_X.POINT_Y12<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y12<-as.factor(phenology.gis$POINT_X.POINT_Y12)
levels(phenology.gis$POINT_X.POINT_Y12)
levels(phenology.gis$POINT_X.POINT_Y12)<-c("A","A","B","A")
levels(phenology.gis$POINT_X.POINT_Y12)
model12<-lmer(daynum.nintieth~POINT_X.POINT_Y12+year+(1|site),phenology.gis)
anova(model11,model12)
#######
phenology.gis$POINT_X.POINT_Y13<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y13<-as.factor(phenology.gis$POINT_X.POINT_Y13)
levels(phenology.gis$POINT_X.POINT_Y13)
levels(phenology.gis$POINT_X.POINT_Y13)<-c("A","B","A","A")
levels(phenology.gis$POINT_X.POINT_Y13)
model13<-lmer(daynum.nintieth~POINT_X.POINT_Y13+year+(1|site),phenology.gis)
anova(model11,model13)
#######
phenology.gis$POINT_X.POINT_Y14<-phenology.gis$POINT_X.POINT_Y
phenology.gis$POINT_X.POINT_Y14<-as.factor(phenology.gis$POINT_X.POINT_Y14)
levels(phenology.gis$POINT_X.POINT_Y14)
levels(phenology.gis$POINT_X.POINT_Y14)<-c("B","A","A","A")
levels(phenology.gis$POINT_X.POINT_Y14)
model14<-lmer(daynum.nintieth~POINT_X.POINT_Y14+year+(1|site),phenology.gis)
anova(model11,model14)
############################### plotting model11 or 3 ###################################
with(phenology.gis, boxplot(daynum.nintieth~POINT_X.POINT_Y,xlab="Site type",ylab="daynum.nintieth",ylim = c(0, 200)))
#plot boxplot of model
library(plyr)
bargraph<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N))
head(bargraph)
library(ggplot2)
#??ggplot2
ggplot(bargraph, aes(x = POINT_X.POINT_Y, y = mean)) +
geom_bar(position = position_dodge(), stat="identity",width=0.4,col="black",fill="grey") +
geom_errorbar(aes(ymin=mean-se, ymax=mean+se,width=0.1)) +
ggtitle("daynum.nintieth vs sight conditions") +
theme_grey() +
theme(panel.grid.major = element_blank()) +
labs(x="Site category",y="daynum.nintieth") +
expand_limits(y=c(0,130)) + scale_y_continuous(breaks=seq(0, 130, 10))
p<-ggplot(phenology.gis,aes(x=POINT_X.POINT_Y,y=daynum.nintieth))+geom_violin(fill="grey",col="black")+theme_minimal()
p + geom_boxplot(width=0.1) + expand_limits(y=c(0,200)) + scale_y_continuous(breaks=seq(0, 200, 20))+ggtitle("daynum.nintieth vs sight Type")+xlab("Site Category")+ylab("daynum.nintieth")
test<-ddply(phenology.gis, c("POINT_X.POINT_Y"),summarise,
N=length(daynum.nintieth),
mean=mean(daynum.nintieth),
sd=sd(daynum.nintieth),
se=sd/ sqrt(N),
min = min(daynum.nintieth),
max=max(daynum.nintieth))
head(test)
|
# RQ: Scrape the data from https://www.eliteprospects.com/
# The idea is user must provide the `draft name` and `year` as input.
# The url will be constructed basis of this input. Then scrape the data
# The drafts are {NHL Entry Draft,NHL Expansion Draft,NHL Supplemental Draft,WHA Amateur Draft,KHL Draft,LNAH Draft,NWHL Draft,CWHL Draft }
# More drafts can be found at the bottom of this page: https://www.eliteprospects.com/draft/nhl-entry-draft
# draft_year are Seasons for which the user wants to scrape data. Must be of the form 2018, 1996, etc -- only a single 4-digit number.
# clean the workspace
rm(list = ls())
# load the required libraries
library(rvest)
# LOGIC: First create test example to check if the idea works. Then put it all in a function
# manual assignment
# draft_type<- "nhl entry draft"
# draft_types<- draft_type %>%
# # coerce to tibble format
# tibble::as.tibble() %>%
# purrr::set_names("draft_type") %>%
# # replace the space between words in draft type with a '-'.
# # The space is replaced with hyphen because if you browse to the webpage, you'll see the page is constructed the same. https://www.eliteprospects.com/draft/nhl-entry-draft
# dplyr::mutate(draft_type = stringr::str_replace_all(draft_type, " ", "-"))
#
# draft_year<- 2018
# # create page url
# page <- stringr::str_c("https://www.eliteprospects.com/draft/", draft_types, "/", draft_year)%>%
# xml2::read_html()
# # Now scrape the team data from the page
# # Extract the team data
# draft_team<- page %>%
# # use selector gadget to determine the relevant css
# rvest::html_nodes(".team") %>%
# rvest::html_text()%>%
# stringr::str_squish() %>%
# tibble::as_tibble()
#
# # Extract the player data
# draft_player<- page %>%
# # use selector gadget to determine the relevant css
# rvest::html_nodes("#drafted-players .player") %>%
# rvest::html_text()%>%
# stringr::str_squish() %>%
# tibble::as_tibble()
#
# # Join both the dataframe's together. Note both dataframes have uneven rows. So using merge
# all_data<- cbind(draft_team, draft_player)
# The above logic works. Wraping it in a function
# For CSS, use the selector gadget from here: ftp://cran.r-project.org/pub/R/web/packages/rvest/vignettes/selectorgadget.html
get_draft_data<- function(draft_type, draft_year){
# replace the space between words in draft type with a '-'
draft_types<- draft_type %>%
# coerce to tibble format
tibble::as.tibble() %>%
purrr::set_names("draft_type") %>%
# replace the space between words in draft type with a '-'
dplyr::mutate(draft_type = stringr::str_replace_all(draft_type, " ", "-"))
# create page url
page <- stringr::str_c("https://www.eliteprospects.com/draft/", draft_types, "/", draft_year)%>%
xml2::read_html()
# Now scrape the team data from the page
# Extract the team data
draft_team<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".team") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the player data
draft_player<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .player") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the seasons data
draft_season<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".seasons") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the gp data
draft_gp<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .gp") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the g data
draft_g<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .g") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the a data
draft_a<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .a") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the tp data
draft_tp<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .tp") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the PIM data
draft_pim<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".pim") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Join the dataframe's together.
all_data<- cbind(draft_team, draft_player,draft_season, draft_gp,draft_g,
draft_a,draft_tp,draft_pim)
return(all_data)
} # end function
# Testing the function
draft_data<-get_draft_data("nhl entry draft", 2011)
View(draft_data)
draft_data<- get_draft_data("CWHL Draft","2012")
View(draft_data)
# Further improvement idea: Rather than user input, iterate through a list of draft teams and years to scrape the data.
# draft_year<- c(1963:2018)
# draft_year
draft_year<- c(2014:2018)
|
/scripts/R/basic_scraper_06.R
|
permissive
|
duttashi/scrapers
|
R
| false | false | 5,253 |
r
|
# RQ: Scrape the data from https://www.eliteprospects.com/
# The idea is user must provide the `draft name` and `year` as input.
# The url will be constructed basis of this input. Then scrape the data
# The drafts are {NHL Entry Draft,NHL Expansion Draft,NHL Supplemental Draft,WHA Amateur Draft,KHL Draft,LNAH Draft,NWHL Draft,CWHL Draft }
# More drafts can be found at the bottom of this page: https://www.eliteprospects.com/draft/nhl-entry-draft
# draft_year are Seasons for which the user wants to scrape data. Must be of the form 2018, 1996, etc -- only a single 4-digit number.
# clean the workspace
rm(list = ls())
# load the required libraries
library(rvest)
# LOGIC: First create test example to check if the idea works. Then put it all in a function
# manual assignment
# draft_type<- "nhl entry draft"
# draft_types<- draft_type %>%
# # coerce to tibble format
# tibble::as.tibble() %>%
# purrr::set_names("draft_type") %>%
# # replace the space between words in draft type with a '-'.
# # The space is replaced with hyphen because if you browse to the webpage, you'll see the page is constructed the same. https://www.eliteprospects.com/draft/nhl-entry-draft
# dplyr::mutate(draft_type = stringr::str_replace_all(draft_type, " ", "-"))
#
# draft_year<- 2018
# # create page url
# page <- stringr::str_c("https://www.eliteprospects.com/draft/", draft_types, "/", draft_year)%>%
# xml2::read_html()
# # Now scrape the team data from the page
# # Extract the team data
# draft_team<- page %>%
# # use selector gadget to determine the relevant css
# rvest::html_nodes(".team") %>%
# rvest::html_text()%>%
# stringr::str_squish() %>%
# tibble::as_tibble()
#
# # Extract the player data
# draft_player<- page %>%
# # use selector gadget to determine the relevant css
# rvest::html_nodes("#drafted-players .player") %>%
# rvest::html_text()%>%
# stringr::str_squish() %>%
# tibble::as_tibble()
#
# # Join both the dataframe's together. Note both dataframes have uneven rows. So using merge
# all_data<- cbind(draft_team, draft_player)
# The above logic works. Wraping it in a function
# For CSS, use the selector gadget from here: ftp://cran.r-project.org/pub/R/web/packages/rvest/vignettes/selectorgadget.html
get_draft_data<- function(draft_type, draft_year){
# replace the space between words in draft type with a '-'
draft_types<- draft_type %>%
# coerce to tibble format
tibble::as.tibble() %>%
purrr::set_names("draft_type") %>%
# replace the space between words in draft type with a '-'
dplyr::mutate(draft_type = stringr::str_replace_all(draft_type, " ", "-"))
# create page url
page <- stringr::str_c("https://www.eliteprospects.com/draft/", draft_types, "/", draft_year)%>%
xml2::read_html()
# Now scrape the team data from the page
# Extract the team data
draft_team<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".team") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the player data
draft_player<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .player") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the seasons data
draft_season<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".seasons") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the gp data
draft_gp<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .gp") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the g data
draft_g<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .g") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the a data
draft_a<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .a") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the tp data
draft_tp<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes("#drafted-players .tp") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Extract the PIM data
draft_pim<- page %>%
# use selector gadget to determine the relevant css
rvest::html_nodes(".pim") %>%
rvest::html_text()%>%
stringr::str_squish() %>%
tibble::as_tibble()
# Join the dataframe's together.
all_data<- cbind(draft_team, draft_player,draft_season, draft_gp,draft_g,
draft_a,draft_tp,draft_pim)
return(all_data)
} # end function
# Testing the function
draft_data<-get_draft_data("nhl entry draft", 2011)
View(draft_data)
draft_data<- get_draft_data("CWHL Draft","2012")
View(draft_data)
# Further improvement idea: Rather than user input, iterate through a list of draft teams and years to scrape the data.
# draft_year<- c(1963:2018)
# draft_year
draft_year<- c(2014:2018)
|
%%
%% WARNING! DO NOT EDIT!
%% This file is automatically generated from meshbuilder.R
%%
\name{meshbuider}
\alias{meshbuilder}
\title{Interactive mesh building and diagnostics}
\description{Interactively design and build a triangle mesh
for use with SPDE models, and assess the finite element
approximation errors. The R code needed to recreate the mesh
outside the interactive Shiny app is also generated. Spatial
objects can be imported from the global workspace. }
\usage{
meshbuilder()
}
\author{Finn Lindgren \email{finn.lindgren@gmail.com}}
\seealso{inla.mesh.2d, inla.mesh.create}
\examples{
\dontrun{
meshbuilder()
}
}
|
/man/meshbuilder.Rd
|
no_license
|
inbo/INLA
|
R
| false | false | 636 |
rd
|
%%
%% WARNING! DO NOT EDIT!
%% This file is automatically generated from meshbuilder.R
%%
\name{meshbuider}
\alias{meshbuilder}
\title{Interactive mesh building and diagnostics}
\description{Interactively design and build a triangle mesh
for use with SPDE models, and assess the finite element
approximation errors. The R code needed to recreate the mesh
outside the interactive Shiny app is also generated. Spatial
objects can be imported from the global workspace. }
\usage{
meshbuilder()
}
\author{Finn Lindgren \email{finn.lindgren@gmail.com}}
\seealso{inla.mesh.2d, inla.mesh.create}
\examples{
\dontrun{
meshbuilder()
}
}
|
bs_accordion_sidebar(id = "documentation",
spec_side = c(width = 3, offset = 0),
spec_main = c(width = 9, offset = 0)) %>%
bs_append(
title_side = "App Documentation",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/app_documentation.Rmd"))
) %>%
bs_append(
title_side = "Setup Page",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/setup_page.Rmd"))
) %>%
bs_append(
title_side = "History Runs",
content_side = NULL,
content_main = HTML("
<p>This page is for seaching history runs.</p>
<p>If you don\'t know the workflow Id to select in the first panel, use this page to explore all the runs at all sites.
<br>Select the one you wish to explore using the explore button.</p>
")
) %>%
bs_append(
title_side = "Exploratory Plots",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/exploratory_plot.Rmd"))
) %>%
bs_append(
title_side = "Benchmarking",
content_side = NULL,
content_main =
bs_accordion_sidebar(id = "benchmarking") %>%
bs_append(
title_side = "Settings",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_setting.Rmd"))
) %>%
bs_append(
title_side = "Scores",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_scores.Rmd"))
) %>%
bs_append(
title_side = "Plots",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_plots.Rmd"))
)
)
|
/shiny/workflowPlot/ui_files/documentation_UI.R
|
permissive
|
PecanProject/pecan
|
R
| false | false | 1,769 |
r
|
bs_accordion_sidebar(id = "documentation",
spec_side = c(width = 3, offset = 0),
spec_main = c(width = 9, offset = 0)) %>%
bs_append(
title_side = "App Documentation",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/app_documentation.Rmd"))
) %>%
bs_append(
title_side = "Setup Page",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/setup_page.Rmd"))
) %>%
bs_append(
title_side = "History Runs",
content_side = NULL,
content_main = HTML("
<p>This page is for seaching history runs.</p>
<p>If you don\'t know the workflow Id to select in the first panel, use this page to explore all the runs at all sites.
<br>Select the one you wish to explore using the explore button.</p>
")
) %>%
bs_append(
title_side = "Exploratory Plots",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/exploratory_plot.Rmd"))
) %>%
bs_append(
title_side = "Benchmarking",
content_side = NULL,
content_main =
bs_accordion_sidebar(id = "benchmarking") %>%
bs_append(
title_side = "Settings",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_setting.Rmd"))
) %>%
bs_append(
title_side = "Scores",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_scores.Rmd"))
) %>%
bs_append(
title_side = "Plots",
content_side = NULL,
content_main = withMathJax(includeMarkdown("markdown/benchmarking_plots.Rmd"))
)
)
|
plot.gambin <-
function(x, barcol = "grey", barwidth = 1, cex.dots = 1, dotpch = 16, dotcol = par("fg"), line = FALSE, lwd = 1, linecol = par("fg"), ...)
{
ylim <- max(predict(x), x$Data$species)
midpoints <- barplot(x$Data$species, names.arg = x$Data$octave, ylim = c(0, 0.5 + ylim), col = barcol, width = barwidth, xlab = "Octaves", ylab = "Number of species", ...)
points(midpoints, predict(x), pch = dotpch, cex = cex.dots, col = dotcol)
if(line)
lines(midpoints, predict(x), lwd = lwd, col = linecol)
}
|
/gambin/R/plot.gambin.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 523 |
r
|
plot.gambin <-
function(x, barcol = "grey", barwidth = 1, cex.dots = 1, dotpch = 16, dotcol = par("fg"), line = FALSE, lwd = 1, linecol = par("fg"), ...)
{
ylim <- max(predict(x), x$Data$species)
midpoints <- barplot(x$Data$species, names.arg = x$Data$octave, ylim = c(0, 0.5 + ylim), col = barcol, width = barwidth, xlab = "Octaves", ylab = "Number of species", ...)
points(midpoints, predict(x), pch = dotpch, cex = cex.dots, col = dotcol)
if(line)
lines(midpoints, predict(x), lwd = lwd, col = linecol)
}
|
library(testthat)
library(SensMap)
test_check("SensMap")
|
/tests/testthat.R
|
no_license
|
cran/SensMap
|
R
| false | false | 62 |
r
|
library(testthat)
library(SensMap)
test_check("SensMap")
|
install.packages('rJava')
install.packages("xlsxjars")
install.packages("xlsx")
install.packages("xlsx", INSTALL_opts=c("--no-multiarch"))
library("xlsx")
data <- read.xlsx
# linear regression
x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
relation <- lm(y~x)
print(summary(relation))
a <- data.frame(x = 170)
result <- predict(relation,a)
b <- data.frame(x = c(150,170))
c <- data.frame(x = 1:10)
result2 <- predict(relation,c)
print(result2)
png(file = "linearregression.png")
plot(y,x,col = "blue",main = "Height & Weight Regression",
abline(lm(x~y)),cex = 1.3,pch = 16,xlab = "Weight in Kg",ylab = "Height in cm")
dev.off()
|
/StatisticsAndR/exercise.R
|
no_license
|
lchi91/r-training
|
R
| false | false | 700 |
r
|
install.packages('rJava')
install.packages("xlsxjars")
install.packages("xlsx")
install.packages("xlsx", INSTALL_opts=c("--no-multiarch"))
library("xlsx")
data <- read.xlsx
# linear regression
x <- c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
y <- c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
relation <- lm(y~x)
print(summary(relation))
a <- data.frame(x = 170)
result <- predict(relation,a)
b <- data.frame(x = c(150,170))
c <- data.frame(x = 1:10)
result2 <- predict(relation,c)
print(result2)
png(file = "linearregression.png")
plot(y,x,col = "blue",main = "Height & Weight Regression",
abline(lm(x~y)),cex = 1.3,pch = 16,xlab = "Weight in Kg",ylab = "Height in cm")
dev.off()
|
##########################################################################################################
## Coursera Getting and Cleaning Data Course Project
## Rafael Godinho
## 2016-04-03
# runAnalysis.r File Description:
# This script will perform the following steps on the UCI HAR Dataset downloaded from
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# 1. Merge the training and the test sets to create one data set.
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# 3. Use descriptive activity names to name the activities in the data set
# 4. Appropriately label the data set with descriptive activity names.
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##########################################################################################################
#1.Download the file and put the file in the data folder
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="auto")
#2 unzip the file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#3 unzipped files are in the folderUCI HAR Dataset. Get the list of the files
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
#4 read data
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#5 merge the training and the test sets to create one data set
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
# set names to variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
# merge columns to get the data frame Data for all data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#6 Extracts only the measurements on the mean and standard deviation for each measurement
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
#7 Uses descriptive activity names to name the activities in the data set
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
#8 ajust names
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
#9 Creates a second,independent tidy data set and ouput it
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
jedirafa/ProgrammingAssignment4
|
R
| false | false | 3,719 |
r
|
##########################################################################################################
## Coursera Getting and Cleaning Data Course Project
## Rafael Godinho
## 2016-04-03
# runAnalysis.r File Description:
# This script will perform the following steps on the UCI HAR Dataset downloaded from
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# 1. Merge the training and the test sets to create one data set.
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# 3. Use descriptive activity names to name the activities in the data set
# 4. Appropriately label the data set with descriptive activity names.
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##########################################################################################################
#1.Download the file and put the file in the data folder
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="auto")
#2 unzip the file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
#3 unzipped files are in the folderUCI HAR Dataset. Get the list of the files
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
#4 read data
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
#5 merge the training and the test sets to create one data set
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
# set names to variables
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
# merge columns to get the data frame Data for all data
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
#6 Extracts only the measurements on the mean and standard deviation for each measurement
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
#7 Uses descriptive activity names to name the activities in the data set
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
#8 ajust names
names(Data)<-gsub("^t", "time", names(Data))
names(Data)<-gsub("^f", "frequency", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "Magnitude", names(Data))
names(Data)<-gsub("BodyBody", "Body", names(Data))
#9 Creates a second,independent tidy data set and ouput it
library(plyr);
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE)
|
#########################################################################
##### GOA Dynamics Working Group #####
##### Benthic Nearshore Group - Data Assembly script #####
##### Created by Rachael Blake on Sept. 21, 2015 #####
#########################################################################
## load packages (order matters)
library(httr)
library(plyr)
library(dplyr)
library(XML)
library(curl)
library(rvest)
library(tidyr)
library(stringr)
# create empty data frame
BenNear <- data.frame('Year'=c(1975:2015))
# merge in data columns generated by data cleaning scripts
BenNear <- merge(BenNear,ENSO_annual,all.x=T) # ENSO annual
BenNear <- merge(BenNear,pdo_annual,all.x=T) # PDO annual
BenNear <- merge(BenNear,npgo_annual,all.x=T) # NPGO annual
BenNear <- merge(BenNear,upanom,all.x=T) # Upwelling anomalies annual
BenNear <- merge(BenNear,Phy,all.x=T) # Phytoplankton - Seward Line, spring
BenNear <- merge(BenNear,SatChl_df,all.x=T) # Chla - Satellite annual
BenNear <- merge(BenNear,SST,all.x=T) # SST - Seward Line
###############################################################################################
### Multivariate ENSO Index (MEI):
URL_enso <- "http://www.esrl.noaa.gov/psd/enso/mei/table.html"
enso_pre <- xpathSApply(content(GET(URL_enso)),"/html/body/pre", xmlValue)
enso_cols <- scan(textConnection(enso_pre), skip=10, nlines=1, what=character()) # get header row
enso <- read.csv(file=textConnection(enso_pre), skip=11, stringsAsFactors=F, sep="\t",
header=FALSE, col.names=enso_cols)
enso_df <- enso[1:66,] # removes the text at bottom of file
#
ENSO_annual <- enso_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Months, ENSO, -Year) %>% # reshapes data to be column-wise
filter(!is.na(ENSO)) %>% # remove NA values
group_by(Year) %>%
summarise(ENSO_anul_mn=mean(ENSO)) %>% # get annual means
ungroup() #
##############################################################################################
### Pacific Decadal Oscillation Index (PDO):
URL_pdo <- "http://jisao.washington.edu/pdo/PDO.latest"
pdo_raw <- html(URL_pdo)
pdo_pre <- pdo_raw %>%
html_node("p") %>%
html_text()
pdo_cols <- scan(textConnection(pdo_pre), skip=29, nlines=1, what=character())# Get header row
pdo_df <- read.table(file=textConnection(pdo_pre), skip=30, nrows=116, stringsAsFactors=F, sep="",
header=FALSE, col.names=pdo_cols, strip.white=TRUE, fill=TRUE)
pdo_df$YEAR <- substr(pdo_df$YEAR, 1, 4) # removes asterisks from years 2002-2015
pdo_annual <- pdo_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Month, PDO, -Year) %>% # reshapes data to be column-wise
group_by(Year) %>%
summarise(PDO_anul_mn=mean(as.numeric(as.character(PDO)), na.rm = TRUE)) %>% # get annual means
ungroup()
###############################################################################################
### North Pacific Gyre Oscillation Index (NPGO):
URL_npgo <- "http://www.o3d.org/npgo/npgo.php"
npgo_pre <- xpathSApply(content(GET(URL_npgo)),"/html/body/pre", xmlValue)
npgo_cols <- scan(textConnection(npgo_pre), skip=25, nlines=1, what=character())# Get header row
npgo_cols <- npgo_cols[2:4] # select column names
npgo_df <- read.csv(file=textConnection(npgo_pre), skip=26, stringsAsFactors=F, sep="",
header=FALSE, col.names=npgo_cols, strip.white=TRUE)
npgo_annual <- npgo_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
group_by(Year) %>%
summarise(NPGO_anul_mn=mean(NPGO)) %>% # get annual means
ungroup() #
##############################################################################################
### Upwelling Anomalies:
URL_upanom <- "http://www.pfeg.noaa.gov/products/PFELData/upwell/monthly/upanoms.mon"
upanom_raw <- html(URL_upanom)
upanom_pre <- upanom_raw %>%
html_node("p") %>%
html_text()
upanom_cols <- scan(textConnection(upanom_pre), skip=2, nlines=1, what=character())# Get header row
upanom_cols <- c("Lat", "Long", upanom_cols[-1])# split position into lat and long
upanom_df <- read.csv(file=textConnection(upanom_pre), skip=4, stringsAsFactors=F, sep="",
header=FALSE, col.names=upanom_cols, strip.white=TRUE)
#
upanom <- upanom_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Month, UpwelAnom,-Year,-Lat,-Long) %>% # reshapes data to be column-wise
group_by(Year) %>%
summarise(UpWelAnom_anul_mn=mean(UpwelAnom, na.rm = TRUE)) %>% # get annual means
ungroup()
###############################################################################################
### Phytoplankton (annual spring mean): (from Seward Line dataset)
# Get 1998-2010 data
URL_Chl <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.41.3"
ChlGet <- GET(URL_Chl)
Chl1 <- content(ChlGet, as='text')
Chl_df <- read.csv(file=textConnection(Chl1),stringsAsFactors=FALSE)
head(Chl_df)
#################
### NTOE: Have Jessica correct the dates for 2007 (swapped Month and Day)
### in the data sheet on the portal.
#################
#
Phy <- Chl_df %>%
arrange(dateTime) %>%
mutate(Year=substring(dateTime,1,4),
Month=substring(dateTime,6,7)) %>%
filter(Month %in% c("05")) %>% # selects just the May samples for all years
group_by(Year) %>%
summarise(ChlA_micgL_AnnSpMn=mean(chloropyllA),
TotChl_micgL_AnnSpMn=mean(totalChl)) %>% # get annual means
ungroup() %>%
mutate(TotChlA_micgL_AnnSpMn=rowSums(.[2:3],na.rm=T)) %>%
select(Year,TotChlA_micgL_AnnSpMn)
###############################################################################################
# Mean annual Chl a anomalies (mg/m3) for Gulf of Alaska
# From Waite & Mueter 2013, Fig 11 Annual
# Waite, J.N. and Mueter, F.J. 2013. Spatial and temporal variability of chlorophyll-a concentrations
# in the coastal Gulf of Alaska, 1998-2011, using cloud-free reconstructions of SeaWiFS and MODIS-Aqua data.
# Prog. Oceanogr. 116, 179-192.
#
URL_SatChl <- "https://drive.google.com/uc?export=download&id=0B1XbkXxdfD7uRHdOTGQtSVBQOE0"
SatChlGet <- GET(URL_SatChl)
SatChl1 <- content(SatChlGet, as='text')
SatChl_df <- read.csv(file=textConnection(SatChl1),stringsAsFactors=FALSE)
head(SatChl_df)
################################################################################################
### Water Temperature (SST):
URL_T <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.31.1"
TGet <- GET(URL_T)
T1 <- content(TGet, as='text')
Tmps <- read.csv(file=textConnection(T1),stringsAsFactors=FALSE,strip.white=TRUE)
head(Tmps)
URL_Ts <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.32.2"
TsGet <- GET(URL_Ts)
Ts1 <- content(TsGet, as='text')
TmpSams <- read.csv(file=textConnection(Ts1),stringsAsFactors=FALSE,strip.white=TRUE)
head(TmpSams)
#
Temps <- merge(Tmps, TmpSams, all.x=TRUE) # merge sample information with data values
Temps$Date <- sapply((strsplit(as.character(Temps$dateTime), split=" ")), function(x) x[1]) # split date out
head(Temps)
############################################
### NOTE : Need to deal with missing sample info for cruiseID TXS09, consecStationNum 3
############################################
# missing_date <- filter(Temps, is.na(dateTime)) # selects data with missing dates
# miss_cID <- unique(missing_date$cruiseID) # selects the cruise IDs for which sample info is missing
SST <- Temps %>%
mutate(Year=sapply((strsplit(as.character(Date), split="/")),
function(x) x[3])) %>% # creates Year column
arrange(dateTime) %>%
rename(WTemp_C=temp) %>%
group_by(Year) %>%
summarise(WTemp_C_AnnMn=mean(WTemp_C)) %>% # get annual means
ungroup() %>%
select(Year, WTemp_C_AnnMn) # selects columns wanted
##############################################################################################
|
/Benthic_Nearshore_Data_.r
|
no_license
|
NCEAS/dmx-multistress
|
R
| false | false | 8,709 |
r
|
#########################################################################
##### GOA Dynamics Working Group #####
##### Benthic Nearshore Group - Data Assembly script #####
##### Created by Rachael Blake on Sept. 21, 2015 #####
#########################################################################
## load packages (order matters)
library(httr)
library(plyr)
library(dplyr)
library(XML)
library(curl)
library(rvest)
library(tidyr)
library(stringr)
# create empty data frame
BenNear <- data.frame('Year'=c(1975:2015))
# merge in data columns generated by data cleaning scripts
BenNear <- merge(BenNear,ENSO_annual,all.x=T) # ENSO annual
BenNear <- merge(BenNear,pdo_annual,all.x=T) # PDO annual
BenNear <- merge(BenNear,npgo_annual,all.x=T) # NPGO annual
BenNear <- merge(BenNear,upanom,all.x=T) # Upwelling anomalies annual
BenNear <- merge(BenNear,Phy,all.x=T) # Phytoplankton - Seward Line, spring
BenNear <- merge(BenNear,SatChl_df,all.x=T) # Chla - Satellite annual
BenNear <- merge(BenNear,SST,all.x=T) # SST - Seward Line
###############################################################################################
### Multivariate ENSO Index (MEI):
URL_enso <- "http://www.esrl.noaa.gov/psd/enso/mei/table.html"
enso_pre <- xpathSApply(content(GET(URL_enso)),"/html/body/pre", xmlValue)
enso_cols <- scan(textConnection(enso_pre), skip=10, nlines=1, what=character()) # get header row
enso <- read.csv(file=textConnection(enso_pre), skip=11, stringsAsFactors=F, sep="\t",
header=FALSE, col.names=enso_cols)
enso_df <- enso[1:66,] # removes the text at bottom of file
#
ENSO_annual <- enso_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Months, ENSO, -Year) %>% # reshapes data to be column-wise
filter(!is.na(ENSO)) %>% # remove NA values
group_by(Year) %>%
summarise(ENSO_anul_mn=mean(ENSO)) %>% # get annual means
ungroup() #
##############################################################################################
### Pacific Decadal Oscillation Index (PDO):
URL_pdo <- "http://jisao.washington.edu/pdo/PDO.latest"
pdo_raw <- html(URL_pdo)
pdo_pre <- pdo_raw %>%
html_node("p") %>%
html_text()
pdo_cols <- scan(textConnection(pdo_pre), skip=29, nlines=1, what=character())# Get header row
pdo_df <- read.table(file=textConnection(pdo_pre), skip=30, nrows=116, stringsAsFactors=F, sep="",
header=FALSE, col.names=pdo_cols, strip.white=TRUE, fill=TRUE)
pdo_df$YEAR <- substr(pdo_df$YEAR, 1, 4) # removes asterisks from years 2002-2015
pdo_annual <- pdo_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Month, PDO, -Year) %>% # reshapes data to be column-wise
group_by(Year) %>%
summarise(PDO_anul_mn=mean(as.numeric(as.character(PDO)), na.rm = TRUE)) %>% # get annual means
ungroup()
###############################################################################################
### North Pacific Gyre Oscillation Index (NPGO):
URL_npgo <- "http://www.o3d.org/npgo/npgo.php"
npgo_pre <- xpathSApply(content(GET(URL_npgo)),"/html/body/pre", xmlValue)
npgo_cols <- scan(textConnection(npgo_pre), skip=25, nlines=1, what=character())# Get header row
npgo_cols <- npgo_cols[2:4] # select column names
npgo_df <- read.csv(file=textConnection(npgo_pre), skip=26, stringsAsFactors=F, sep="",
header=FALSE, col.names=npgo_cols, strip.white=TRUE)
npgo_annual <- npgo_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
group_by(Year) %>%
summarise(NPGO_anul_mn=mean(NPGO)) %>% # get annual means
ungroup() #
##############################################################################################
### Upwelling Anomalies:
URL_upanom <- "http://www.pfeg.noaa.gov/products/PFELData/upwell/monthly/upanoms.mon"
upanom_raw <- html(URL_upanom)
upanom_pre <- upanom_raw %>%
html_node("p") %>%
html_text()
upanom_cols <- scan(textConnection(upanom_pre), skip=2, nlines=1, what=character())# Get header row
upanom_cols <- c("Lat", "Long", upanom_cols[-1])# split position into lat and long
upanom_df <- read.csv(file=textConnection(upanom_pre), skip=4, stringsAsFactors=F, sep="",
header=FALSE, col.names=upanom_cols, strip.white=TRUE)
#
upanom <- upanom_df %>%
rename(Year=YEAR) %>% # rename data columns
filter(Year %in% c(1975:2015)) %>% # selects years 1975 - 2015
gather(Month, UpwelAnom,-Year,-Lat,-Long) %>% # reshapes data to be column-wise
group_by(Year) %>%
summarise(UpWelAnom_anul_mn=mean(UpwelAnom, na.rm = TRUE)) %>% # get annual means
ungroup()
###############################################################################################
### Phytoplankton (annual spring mean): (from Seward Line dataset)
# Get 1998-2010 data
URL_Chl <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.41.3"
ChlGet <- GET(URL_Chl)
Chl1 <- content(ChlGet, as='text')
Chl_df <- read.csv(file=textConnection(Chl1),stringsAsFactors=FALSE)
head(Chl_df)
#################
### NTOE: Have Jessica correct the dates for 2007 (swapped Month and Day)
### in the data sheet on the portal.
#################
#
Phy <- Chl_df %>%
arrange(dateTime) %>%
mutate(Year=substring(dateTime,1,4),
Month=substring(dateTime,6,7)) %>%
filter(Month %in% c("05")) %>% # selects just the May samples for all years
group_by(Year) %>%
summarise(ChlA_micgL_AnnSpMn=mean(chloropyllA),
TotChl_micgL_AnnSpMn=mean(totalChl)) %>% # get annual means
ungroup() %>%
mutate(TotChlA_micgL_AnnSpMn=rowSums(.[2:3],na.rm=T)) %>%
select(Year,TotChlA_micgL_AnnSpMn)
###############################################################################################
# Mean annual Chl a anomalies (mg/m3) for Gulf of Alaska
# From Waite & Mueter 2013, Fig 11 Annual
# Waite, J.N. and Mueter, F.J. 2013. Spatial and temporal variability of chlorophyll-a concentrations
# in the coastal Gulf of Alaska, 1998-2011, using cloud-free reconstructions of SeaWiFS and MODIS-Aqua data.
# Prog. Oceanogr. 116, 179-192.
#
URL_SatChl <- "https://drive.google.com/uc?export=download&id=0B1XbkXxdfD7uRHdOTGQtSVBQOE0"
SatChlGet <- GET(URL_SatChl)
SatChl1 <- content(SatChlGet, as='text')
SatChl_df <- read.csv(file=textConnection(SatChl1),stringsAsFactors=FALSE)
head(SatChl_df)
################################################################################################
### Water Temperature (SST):
URL_T <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.31.1"
TGet <- GET(URL_T)
T1 <- content(TGet, as='text')
Tmps <- read.csv(file=textConnection(T1),stringsAsFactors=FALSE,strip.white=TRUE)
head(Tmps)
URL_Ts <- "http://gulfwatch.nceas.ucsb.edu/goa/d1/mn/v1/object/df35b.32.2"
TsGet <- GET(URL_Ts)
Ts1 <- content(TsGet, as='text')
TmpSams <- read.csv(file=textConnection(Ts1),stringsAsFactors=FALSE,strip.white=TRUE)
head(TmpSams)
#
Temps <- merge(Tmps, TmpSams, all.x=TRUE) # merge sample information with data values
Temps$Date <- sapply((strsplit(as.character(Temps$dateTime), split=" ")), function(x) x[1]) # split date out
head(Temps)
############################################
### NOTE : Need to deal with missing sample info for cruiseID TXS09, consecStationNum 3
############################################
# missing_date <- filter(Temps, is.na(dateTime)) # selects data with missing dates
# miss_cID <- unique(missing_date$cruiseID) # selects the cruise IDs for which sample info is missing
SST <- Temps %>%
mutate(Year=sapply((strsplit(as.character(Date), split="/")),
function(x) x[3])) %>% # creates Year column
arrange(dateTime) %>%
rename(WTemp_C=temp) %>%
group_by(Year) %>%
summarise(WTemp_C_AnnMn=mean(WTemp_C)) %>% # get annual means
ungroup() %>%
select(Year, WTemp_C_AnnMn) # selects columns wanted
##############################################################################################
|
mediacontaminante <- function(directorio, contaminante, id = 1:332) {
ni <- 0
resultado <- vector(mode="numeric",1)
c <- vector(mode="numeric",1)
s <- vector(mode="numeric",1)
for (i in id){
ni = ni+1
if (i<10){
id=paste("00",i,sep = "")
} else {
if (i<100){
id=paste("0",i,sep = "")
} else {
id=paste(i,sep="")
}
}
Archivo <- c("C:/Users/Pablo Rendon/Documents/GitHub/Programacion_Actuarial_III_OT2016")
data <- read.csv(paste(Archivo,"/",directorio,"/",id,".csv",sep = ""),header = TRUE)
if (contaminante=="nitrate"){
columna <- 3
} else if (contaminante=="sulfate") {
columna <- 2
}
c <- sum(complete.cases(data[,columna]))+c
s <- sum(data[,columna], na.rm = TRUE)+s
}
resultado <- (s/c)
resultado
}
mediacontaminante("specdata","nitrate",2:4)
|
/MediaContaminante.R
|
no_license
|
RicardoRendon/Programacion_Actuarial_III_OT2016
|
R
| false | false | 842 |
r
|
mediacontaminante <- function(directorio, contaminante, id = 1:332) {
ni <- 0
resultado <- vector(mode="numeric",1)
c <- vector(mode="numeric",1)
s <- vector(mode="numeric",1)
for (i in id){
ni = ni+1
if (i<10){
id=paste("00",i,sep = "")
} else {
if (i<100){
id=paste("0",i,sep = "")
} else {
id=paste(i,sep="")
}
}
Archivo <- c("C:/Users/Pablo Rendon/Documents/GitHub/Programacion_Actuarial_III_OT2016")
data <- read.csv(paste(Archivo,"/",directorio,"/",id,".csv",sep = ""),header = TRUE)
if (contaminante=="nitrate"){
columna <- 3
} else if (contaminante=="sulfate") {
columna <- 2
}
c <- sum(complete.cases(data[,columna]))+c
s <- sum(data[,columna], na.rm = TRUE)+s
}
resultado <- (s/c)
resultado
}
mediacontaminante("specdata","nitrate",2:4)
|
#' Compute the Shannon entropy index from regions - industries matrices
#'
#' This function computes the Shannon entropy index from regions - industries matrices from (incidence) regions - industries matrices
#' @param mat An incidence matrix with regions in rows and industries in columns
#' @keywords diversity
#' @export
#' @examples
#' ## generate a region - industry matrix
#' set.seed(31)
#' mat <- matrix(sample(0:100,20,replace=T), ncol = 4)
#' rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
#' colnames(mat) <- c ("I1", "I2", "I3", "I4")
#'
#' ## run the function
#' entropy (mat)
#' @author Pierre-Alexandre Balland \email{p.balland@uu.nl}
#' @seealso \code{\link{diversity}}
#' @references Shannon, C.E., Weaver, W. (1949) \emph{The Mathematical Theory of Communication}. Univ of Illinois Press. \cr
#' \cr
#' Frenken, K., Van Oort, F. and Verburg, T. (2007) Related variety, unrelated variety and regional economic growth, \emph{Regional studies} \strong{41} (5): 685-697.
entropy <- function(mat) {
freqs <- mat/rowSums (mat)
entropy <- - rowSums (freqs * log2(freqs+0.000000001))
entropy <- round (entropy, digits = 3)
return (entropy)
}
|
/R/entropy.r
|
no_license
|
PABalland/EconGeo
|
R
| false | false | 1,166 |
r
|
#' Compute the Shannon entropy index from regions - industries matrices
#'
#' This function computes the Shannon entropy index from regions - industries matrices from (incidence) regions - industries matrices
#' @param mat An incidence matrix with regions in rows and industries in columns
#' @keywords diversity
#' @export
#' @examples
#' ## generate a region - industry matrix
#' set.seed(31)
#' mat <- matrix(sample(0:100,20,replace=T), ncol = 4)
#' rownames(mat) <- c ("R1", "R2", "R3", "R4", "R5")
#' colnames(mat) <- c ("I1", "I2", "I3", "I4")
#'
#' ## run the function
#' entropy (mat)
#' @author Pierre-Alexandre Balland \email{p.balland@uu.nl}
#' @seealso \code{\link{diversity}}
#' @references Shannon, C.E., Weaver, W. (1949) \emph{The Mathematical Theory of Communication}. Univ of Illinois Press. \cr
#' \cr
#' Frenken, K., Van Oort, F. and Verburg, T. (2007) Related variety, unrelated variety and regional economic growth, \emph{Regional studies} \strong{41} (5): 685-697.
entropy <- function(mat) {
freqs <- mat/rowSums (mat)
entropy <- - rowSums (freqs * log2(freqs+0.000000001))
entropy <- round (entropy, digits = 3)
return (entropy)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PupilSmoother.R
\name{pupilSmoother_Hampel}
\alias{pupilSmoother_Hampel}
\title{pupilSmoother_Hampel}
\usage{
pupilSmoother_Hampel(data, Hampel_k = 15, Hampel_t0 = 1)
}
\arguments{
\item{data}{a prepared data.table}
\item{Hampel_k}{window length}
\item{Hampel_t0}{threshold}
}
\value{
processes the data.table in place
}
\description{
Hampel Filter
}
\details{
see ?pracma::hampel
}
\examples{
pupilSmoother_Hampel(data)
}
|
/man/pupilSmoother_Hampel.Rd
|
permissive
|
thohag/pupilParse
|
R
| false | true | 534 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PupilSmoother.R
\name{pupilSmoother_Hampel}
\alias{pupilSmoother_Hampel}
\title{pupilSmoother_Hampel}
\usage{
pupilSmoother_Hampel(data, Hampel_k = 15, Hampel_t0 = 1)
}
\arguments{
\item{data}{a prepared data.table}
\item{Hampel_k}{window length}
\item{Hampel_t0}{threshold}
}
\value{
processes the data.table in place
}
\description{
Hampel Filter
}
\details{
see ?pracma::hampel
}
\examples{
pupilSmoother_Hampel(data)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atoms.R
\name{atoms}
\alias{atoms}
\alias{atoms.default}
\alias{is.atoms}
\title{Create \sQuote{atoms} Object}
\usage{
atoms(...)
\method{atoms}{default}(recname, eleid, elename, alt, resname, chainid, resid,
insert, x1, x2, x3, occ, temp, segid, basis = "xyz", ...)
is.atoms(x)
}
\arguments{
\item{recname}{a character vector containing the record name for each element.}
\item{eleid}{a integer vector containing the element ID for each element.}
\item{elename}{a character vector containing the element name for each element.}
\item{alt}{a character vector containing the alternate location indicator for each element.}
\item{resname}{a character vector containing the residue name for each element.}
\item{chainid}{a character vector containing the chain ID for each element.}
\item{resid}{a integer vector containing the residue ID for each element.}
\item{insert}{a character vector containing the codes for insertion of residue of each element.}
\item{x1, x2, x3}{a numeric vector containing the first, second and third coordinate for each element.}
\item{occ}{a numeric vector containing the occupancie for each element.}
\item{temp}{a numeric vector containing the temperature factor for each element.}
\item{segid}{a character vector containing the segment ID for each element.}
\item{basis}{a single element character vector indicating the type of basis vector used to express the atomic coordinates.}
\item{x}{an R obecjt to be tested.}
\item{\dots}{arguments passed to methods.}
}
\value{
\code{atoms} returns a data.frame of class \sQuote{atoms} with the following components:
\describe{
\item{recname}{a character vector containing the record name for each element.}
\item{eleid}{a integer vector containing the element ID for each element.}
\item{elename}{a character vector containing the element name for each element.}
\item{alt}{a character vector containing the alternate location indicator for each element.}
\item{resname}{a character vector containing the residue name for each element.}
\item{chainid}{a character vector containing the chain ID for each element.}
\item{resid}{a integer vector containing the residue ID for each element.}
\item{insert}{a character vector containing the codes for insertion of residue for each element.}
\item{x1, x2, x3}{a numeric vector containing the first, second and third coordinate for each element.}
\item{occ}{a numeric vector containing the occupencie for each element.}
\item{temp}{a numeric vector containing the temperature factor for each element.}
\item{segid}{a character vector containing the segment ID for each element.}
\item{basis}{a single element character vector indicating the type of basis vector used to express the atomic coordinates.}
}
\code{is.atoms} returns TRUE if \code{x} is an object of class \sQuote{atoms} and FALSE otherwise.
}
\description{
Creates an object of class \sQuote{atoms} containing the data related to ATOM
and HETATM records of a PDB file.
}
\details{
\code{atoms} is a generic function to create objects of class \sQuote{atoms}.
The purpose of this class is to store ATOM and HETATM records from PDB files.
The default method creates a \code{atoms} object from its different
components, i.e.: \code{recname}, \code{eleid}, \code{elename}, \code{alt},
\code{resname}, \code{chainid}, \code{resid}, \code{insert}, \code{x1},
\code{x2}, \code{x3}, \code{occ}, \code{temp}, \code{segid} and \code{basis}.
All the arguments have to be specified except \code{basis} which by default
is set to "xyz" (Cartesian coordinates).\cr \code{is.atoms} tests if an object
of class \sQuote{atoms}, i.e. if it has a \dQuote{class} attribute equal to
\code{atoms}.
}
\examples{
x <- atoms(recname = c("ATOM","ATOM"), eleid = 1:2, elename = c("H","H"), alt = "",
resname = c("H2","H2"), chainid = "", resid = c(1,1), insert = "",
x1 = c(0,0), x2 = c(0,0), x3 = c(0,1), occ = c(0.0,0.0), temp = c(1.0,1.0),
segid = c("H2","H2"))
print(x)
is.atoms(x)
}
\seealso{
\code{\link{basis}}, \code{\link{coords}}, \code{\link{pdb}}
}
\keyword{classes}
|
/man/atoms.Rd
|
no_license
|
cran/Rpdb
|
R
| false | true | 4,131 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/atoms.R
\name{atoms}
\alias{atoms}
\alias{atoms.default}
\alias{is.atoms}
\title{Create \sQuote{atoms} Object}
\usage{
atoms(...)
\method{atoms}{default}(recname, eleid, elename, alt, resname, chainid, resid,
insert, x1, x2, x3, occ, temp, segid, basis = "xyz", ...)
is.atoms(x)
}
\arguments{
\item{recname}{a character vector containing the record name for each element.}
\item{eleid}{a integer vector containing the element ID for each element.}
\item{elename}{a character vector containing the element name for each element.}
\item{alt}{a character vector containing the alternate location indicator for each element.}
\item{resname}{a character vector containing the residue name for each element.}
\item{chainid}{a character vector containing the chain ID for each element.}
\item{resid}{a integer vector containing the residue ID for each element.}
\item{insert}{a character vector containing the codes for insertion of residue of each element.}
\item{x1, x2, x3}{a numeric vector containing the first, second and third coordinate for each element.}
\item{occ}{a numeric vector containing the occupancie for each element.}
\item{temp}{a numeric vector containing the temperature factor for each element.}
\item{segid}{a character vector containing the segment ID for each element.}
\item{basis}{a single element character vector indicating the type of basis vector used to express the atomic coordinates.}
\item{x}{an R obecjt to be tested.}
\item{\dots}{arguments passed to methods.}
}
\value{
\code{atoms} returns a data.frame of class \sQuote{atoms} with the following components:
\describe{
\item{recname}{a character vector containing the record name for each element.}
\item{eleid}{a integer vector containing the element ID for each element.}
\item{elename}{a character vector containing the element name for each element.}
\item{alt}{a character vector containing the alternate location indicator for each element.}
\item{resname}{a character vector containing the residue name for each element.}
\item{chainid}{a character vector containing the chain ID for each element.}
\item{resid}{a integer vector containing the residue ID for each element.}
\item{insert}{a character vector containing the codes for insertion of residue for each element.}
\item{x1, x2, x3}{a numeric vector containing the first, second and third coordinate for each element.}
\item{occ}{a numeric vector containing the occupencie for each element.}
\item{temp}{a numeric vector containing the temperature factor for each element.}
\item{segid}{a character vector containing the segment ID for each element.}
\item{basis}{a single element character vector indicating the type of basis vector used to express the atomic coordinates.}
}
\code{is.atoms} returns TRUE if \code{x} is an object of class \sQuote{atoms} and FALSE otherwise.
}
\description{
Creates an object of class \sQuote{atoms} containing the data related to ATOM
and HETATM records of a PDB file.
}
\details{
\code{atoms} is a generic function to create objects of class \sQuote{atoms}.
The purpose of this class is to store ATOM and HETATM records from PDB files.
The default method creates a \code{atoms} object from its different
components, i.e.: \code{recname}, \code{eleid}, \code{elename}, \code{alt},
\code{resname}, \code{chainid}, \code{resid}, \code{insert}, \code{x1},
\code{x2}, \code{x3}, \code{occ}, \code{temp}, \code{segid} and \code{basis}.
All the arguments have to be specified except \code{basis} which by default
is set to "xyz" (Cartesian coordinates).\cr \code{is.atoms} tests if an object
of class \sQuote{atoms}, i.e. if it has a \dQuote{class} attribute equal to
\code{atoms}.
}
\examples{
x <- atoms(recname = c("ATOM","ATOM"), eleid = 1:2, elename = c("H","H"), alt = "",
resname = c("H2","H2"), chainid = "", resid = c(1,1), insert = "",
x1 = c(0,0), x2 = c(0,0), x3 = c(0,1), occ = c(0.0,0.0), temp = c(1.0,1.0),
segid = c("H2","H2"))
print(x)
is.atoms(x)
}
\seealso{
\code{\link{basis}}, \code{\link{coords}}, \code{\link{pdb}}
}
\keyword{classes}
|
STEM <- read.csv(source("http://grimshawville.byu.edu/STEMgetdata.R"))
# EDA:
# Side-by-side boxplots
boxplot(nST~y, data = STEM)
boxplot(new.teach~y, data = STEM)
boxplot(new.stu~y, data = STEM)
# Contingency tables
table.prevCalc <- table(STEM$prevCalc, STEM$y)
table.newMJ <- table(STEM$newMJ, STEM$y)
table.gender <- table(STEM$gender, STEM$y)
# Analysis
# The response variable is whether or not the student dropped the class. The explanatory variables are
# prevCalc (calculus experience), nST (percentile of standardized test), newMJ (intended major),
# new.teach (instructor quality), new.stu(student-centered practices), and gender.
# declare categorical variables as R type factor
STEM$gender <- factor(STEM$gender)
STEM$gender <- relevel(STEM$gender, ref = "l")
STEM$prevCalc <- factor(STEM$prevCalc)
STEM$prevCalc <- relevel(STEM$prevCalc, ref = "m")
STEM$newMJ <- factor(STEM$newMJ)
STEM$newMJ <- relevel(STEM$newMJ, ref = "l")
#MODEL
# logit(y=l)=logit(Switchers)=beta0 + beta1nsT +beta2new.teach + beta3new.Stu + gender_i + prevCalc_+newMJ_k
out.stem <- glm(~nST+new.teach + new.stu + gender + prevCalc + newMJ, data = STEM, family = "binomial")
summary(out.stem)
# To interpret gender difference, compute transformed coeff
exp(coef(out.stem)) [-1]
# create a graphic showing the difference between women & men
# men
x.star<-data.frame(gender="1", nST=seq(2,99, length=100),
new.teach=6, new.stu=6, prevCalc="1", newMJ="1")
plot(x.star$nST, predict*out.STEM, newdata=x.star, type = "response"),
type="l", ylim=(0, 0.25),
ylab = "P(Swith from Calc Seq)",xlab="Percentile of Standardized Test"
# women
x.star<-data.frame(gender="2", nST=seq(2,99, length=100),
new.teach=6, new.stu=6, prevCalc="1", newMJ="1")
lines(x.star$nST, predict*out.STEM, newdata=x.star, type = "response"),
col="red"
ylab = "P(Switch from Calc Seq)",xlab="Percentile of Standardized Test"
# differene between men and women, holding all else constant
# 95% CI
expt(confint(out.STEM)[-1,])
# x-test
summary(out.STEM)
# X^2 test
red1.STEM <- glm(~nsT+new.Teach + new.stu + gender + prevCalc + newMJ, data = STEM, family = "binomial")
anova(red1,out.stem, test ="Chisq")
summary(out.stem)
# Is there an effect due to calculus preparation?
# X^2 test
red2.STEM <- glm(y~nST+new.teach+new.stu+gender+newMJ, data=STEM)
anova(red2.STEM, out.STEM,test="Chisq")
# If we focus on using the model to identify students at risk of switching
# ROC Curve
library(ROCR)
STEM.pred <- prediction(predict(out.STEM, type = "response"), STEM$y)
STEM.perf <- performance(STEM.pred, measure="tpr",x.measur="fpr")
plot(STEM.perf,xlab="1-specificity", ylab = "sensitivity", main = "ROC Curve")
abline(0, 1, col="gray")
# AUC
performance(STEM.pred, measure = "auc")
|
/Stats 330 - storage/STEM Majors - Logistical regression.R
|
no_license
|
ayoung68/SchoolStory2
|
R
| false | false | 2,820 |
r
|
STEM <- read.csv(source("http://grimshawville.byu.edu/STEMgetdata.R"))
# EDA:
# Side-by-side boxplots
boxplot(nST~y, data = STEM)
boxplot(new.teach~y, data = STEM)
boxplot(new.stu~y, data = STEM)
# Contingency tables
table.prevCalc <- table(STEM$prevCalc, STEM$y)
table.newMJ <- table(STEM$newMJ, STEM$y)
table.gender <- table(STEM$gender, STEM$y)
# Analysis
# The response variable is whether or not the student dropped the class. The explanatory variables are
# prevCalc (calculus experience), nST (percentile of standardized test), newMJ (intended major),
# new.teach (instructor quality), new.stu(student-centered practices), and gender.
# declare categorical variables as R type factor
STEM$gender <- factor(STEM$gender)
STEM$gender <- relevel(STEM$gender, ref = "l")
STEM$prevCalc <- factor(STEM$prevCalc)
STEM$prevCalc <- relevel(STEM$prevCalc, ref = "m")
STEM$newMJ <- factor(STEM$newMJ)
STEM$newMJ <- relevel(STEM$newMJ, ref = "l")
#MODEL
# logit(y=l)=logit(Switchers)=beta0 + beta1nsT +beta2new.teach + beta3new.Stu + gender_i + prevCalc_+newMJ_k
out.stem <- glm(~nST+new.teach + new.stu + gender + prevCalc + newMJ, data = STEM, family = "binomial")
summary(out.stem)
# To interpret gender difference, compute transformed coeff
exp(coef(out.stem)) [-1]
# create a graphic showing the difference between women & men
# men
x.star<-data.frame(gender="1", nST=seq(2,99, length=100),
new.teach=6, new.stu=6, prevCalc="1", newMJ="1")
plot(x.star$nST, predict*out.STEM, newdata=x.star, type = "response"),
type="l", ylim=(0, 0.25),
ylab = "P(Swith from Calc Seq)",xlab="Percentile of Standardized Test"
# women
x.star<-data.frame(gender="2", nST=seq(2,99, length=100),
new.teach=6, new.stu=6, prevCalc="1", newMJ="1")
lines(x.star$nST, predict*out.STEM, newdata=x.star, type = "response"),
col="red"
ylab = "P(Switch from Calc Seq)",xlab="Percentile of Standardized Test"
# differene between men and women, holding all else constant
# 95% CI
expt(confint(out.STEM)[-1,])
# x-test
summary(out.STEM)
# X^2 test
red1.STEM <- glm(~nsT+new.Teach + new.stu + gender + prevCalc + newMJ, data = STEM, family = "binomial")
anova(red1,out.stem, test ="Chisq")
summary(out.stem)
# Is there an effect due to calculus preparation?
# X^2 test
red2.STEM <- glm(y~nST+new.teach+new.stu+gender+newMJ, data=STEM)
anova(red2.STEM, out.STEM,test="Chisq")
# If we focus on using the model to identify students at risk of switching
# ROC Curve
library(ROCR)
STEM.pred <- prediction(predict(out.STEM, type = "response"), STEM$y)
STEM.perf <- performance(STEM.pred, measure="tpr",x.measur="fpr")
plot(STEM.perf,xlab="1-specificity", ylab = "sensitivity", main = "ROC Curve")
abline(0, 1, col="gray")
# AUC
performance(STEM.pred, measure = "auc")
|
\name{census}
\alias{census}
\title{Cross tabulate a consensus vector}
\description{
Cross tabulate the population composition
}
\usage{
census(v, pop.def)
}
\arguments{
\item{v}{a consensus vector of population classifications.}
\item{pop.def}{A population (rows) definition dataframe with parameters (columns) for gating and clustersing.}
}
\value{
a one row, cross-tabulated dataframe of counts with one column for each population specified by the rows in the pop.def dataframe. Zeros are filled in for absent populations.
}
\examples{
opp.file.path <- system.file("extdata","seaflow_cruise","2011_001", "1.evt.opp",
package="flowPhyto")
pop.file.path <- system.file("extdata","seaflow_cruise","pop.def.tab",
package="flowPhyto")
opp <- readSeaflow(opp.file.path)
def <- readPopDef(pop.file.path)
pop <- classify(x=opp, pop.def= def)
census(v=pop$pop, pop.def=def)
}
|
/man/census.Rd
|
permissive
|
armbrustlab/flowPhyto
|
R
| false | false | 884 |
rd
|
\name{census}
\alias{census}
\title{Cross tabulate a consensus vector}
\description{
Cross tabulate the population composition
}
\usage{
census(v, pop.def)
}
\arguments{
\item{v}{a consensus vector of population classifications.}
\item{pop.def}{A population (rows) definition dataframe with parameters (columns) for gating and clustersing.}
}
\value{
a one row, cross-tabulated dataframe of counts with one column for each population specified by the rows in the pop.def dataframe. Zeros are filled in for absent populations.
}
\examples{
opp.file.path <- system.file("extdata","seaflow_cruise","2011_001", "1.evt.opp",
package="flowPhyto")
pop.file.path <- system.file("extdata","seaflow_cruise","pop.def.tab",
package="flowPhyto")
opp <- readSeaflow(opp.file.path)
def <- readPopDef(pop.file.path)
pop <- classify(x=opp, pop.def= def)
census(v=pop$pop, pop.def=def)
}
|
\name{createcsdata}
\alias{createcsdata}
\title{Writes to camcon script for cross-sectional data}
\usage{
createcsdata(data, propdrop, lhs, rhs, ngrps, cc_dir,
cc_rfile)
}
\arguments{
\item{data}{a single line generated by getdata() that
contains the data set and specified parameters}
\item{propdrop}{the proportion of rows to drop}
\item{lhs}{the name of the original data frame}
\item{rhs}{the input for the original data frame}
\item{ngrps}{number of unique solution sets to produce}
\item{cc_dir}{location of output directory to be written
to during replication process}
\item{cc_rfile}{location of output file to be written to
during replication process}
}
\description{
Generates a new data frame for every individual by
dropping random rows from the data frame
}
|
/man/createcsdata.Rd
|
no_license
|
mfrmn/camcon
|
R
| false | false | 808 |
rd
|
\name{createcsdata}
\alias{createcsdata}
\title{Writes to camcon script for cross-sectional data}
\usage{
createcsdata(data, propdrop, lhs, rhs, ngrps, cc_dir,
cc_rfile)
}
\arguments{
\item{data}{a single line generated by getdata() that
contains the data set and specified parameters}
\item{propdrop}{the proportion of rows to drop}
\item{lhs}{the name of the original data frame}
\item{rhs}{the input for the original data frame}
\item{ngrps}{number of unique solution sets to produce}
\item{cc_dir}{location of output directory to be written
to during replication process}
\item{cc_rfile}{location of output file to be written to
during replication process}
}
\description{
Generates a new data frame for every individual by
dropping random rows from the data frame
}
|
library(Biobase)
library(ALL)
library(genefilter)
library(DMwR)
library(class)
library(lattice)
library(Hmisc)
library(randomForest)
#data(ALL)
#ALL
#pD <- phenoData(ALL)
#varMetadata(pD)
#table(ALL$BT)
#table(ALL$mol.biol)
#table(ALL$BT,ALL$mol.bio)
#featureNames(ALL)[1:10]
#sampleNames(ALL)[1:5]
#tgt.cases <- which(ALL$BT %in% levels(ALL$BT)[1:5] &
# ALL$mol.bio %in% levels(ALL$mol.bio)[1:4])
#ALLb <- ALL[,tgt.cases]
#ALLb
#ALLb$BT <- factor(ALLb$BT)
#ALLb$mol.bio <- factor(ALLb$mol.bio)
###################################################
### Exploring the data set
###################################################
#es <- exprs(ALLb)
#ALLb <- nsFilter(ALLb,
# var.func=IQR,var.cutoff=IQR(as.vector(es))/5,
# feature.exclude="^AFFX")
#ALLb <- ALLb$eset
# the data set
#featureNames(ALLb) <- make.names(featureNames(ALLb))
#dt <- data.frame(t(exprs(ALLb)),Mut=ALLb$mol.bio)
#dt<-dt[,c(3800:3943)]
#######################
a<-read.delim("/mnt/ilustre/users/sanger-dev/sg-users/zhangpeng/fighting/gogo/cal_data_all_merge_1.txt")
rownames(a)<-a[,1]
a<-a[,-1]
alength<-length(a[1,])
print(alength)
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
arowlength<-length(a[,1])
a$para<-c(1:arowlength)
#a$para<-sapply(a[,alength],a$para,function(x,y) if(x=="c18"|x=="c19"|x=="c20"){y<-"colorectal"}else{y<-"other"})
#dt<-a
blength<-length(a[1,])
print(blength)
print("BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB")
a[which(a[,alength]=='c16'),blength]<-"stomach"
a[which(a[,alength]!='c16'),blength]<-"other"
a<-a[,-alength]
a<-as.data.frame(a)
#print(head(a[,length(a[1,])]))
a1<-a[which(a[,alength]=="stomach"),]
a2<-a[which(a[,alength]=="other"),]
a1_length<-length(a1[,1])
a2_length<-length(a2[,1])
a1_length<-a1_length*2
a3_list<-round(runif(a1_length,0,a2_length))
a2<-a2[a3_list,]
a2<-na.omit(a2)
a<-rbind(a1,a2)
dt <- data.frame(a[,-length(a[1,])],Mut=a[,length(a[1,])])
#print(a[,length(a[1,])])
#print("a")
######################
############data_in_put
getVarsSet <- function(cluster,nvars=30,seed=NULL,verb=F)
{
if (!is.null(seed)) set.seed(seed)
cls <- cutree(cluster,nvars)
tots <- table(cls)
vars <- c()
vars <- sapply(1:nvars,function(clID)
{
if (!length(tots[clID])) stop('Empty cluster! (',clID,')')
x <- sample(1:tots[clID],1)
names(cls[cls==clID])[x]
})
if (verb) structure(vars,clusMemb=cls,clusTots=tots)
else vars
}
rpart.loocv <- function(form,train,test,...) {
require(rpart,quietly=T)
m <- rpart(form,train,...)
p <- predict(m,test,type='class')
c(accuracy=ifelse(p == resp(form,test),100,0))
}
rowIQRs <- function(em){rowQ(em,ceiling(0.75*ncol(em))) - rowQ(em,floor(0.25*ncol(em)))}
###plot(rowMedians(es),rowIQRs(es), xlab='Median expression level', ylab='IQR expression level', main='Main Characteristics of Genes Expression Levels')
kNN <- function(form,train,test,norm=T,norm.stats=NULL,...) {
require(class,quietly=TRUE)
tgtCol <- which(colnames(train)==as.character(form[[2]]))
if (norm) {
if (is.null(norm.stats)) tmp <- scale(train[,-tgtCol],center=T,scale=T)
else tmp <- scale(train[,-tgtCol],center=norm.stats[[1]],scale=norm.stats[[2]])
train[,-tgtCol] <- tmp
ms <- attr(tmp,"scaled:center")
ss <- attr(tmp,"scaled:scale")
test[,-tgtCol] <- scale(test[,-tgtCol],center=ms,scale=ss)
}
knn(train[,-tgtCol],test[,-tgtCol],train[,tgtCol],...)
}
vars <- list()
vars$randomForest <- list(ntree=c(500,750,100),
mtry=c(5,15,30),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
vars$svm <- list(cost=c(1,100,500),
gamma=c(0.01,0.001,0.0001),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
vars$knn <- list(k=c(3,5,7,11),
norm=c(T,F),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
print("b")
vars <- list()
vars$randomForest <- list(ntree=c(500,750,100),
mtry=c(5,15,30),
fs.meth=list(
list('ALL')))
vars$svm <- list(cost=c(1,100,500),
gamma=c(0.01,0.001,0.0001),
fs.meth=list(
list('ALL')))
vars$knn <- list(k=c(3,5,7,11),
norm=c(T,F),
fs.meth=list(
list('ALL')))
varsEnsembles <- function(tgt,train,test,
varsSets,
baseLearner,blPars,
verb=F)
{
preds <- matrix(NA,ncol=length(varsSets),nrow=NROW(test))
for(v in seq(along=varsSets)) {
if (baseLearner=='knn')
preds[,v] <- knn(train[,varsSets[[v]]],
test[,varsSets[[v]]],
train[,tgt],blPars)
else {
m <- do.call(baseLearner,
c(list(as.formula(paste(tgt,
paste(varsSets[[v]],
collapse='+'),
sep='~')),
train[,c(tgt,varsSets[[v]])]),
blPars)
)
if (baseLearner == 'randomForest')
preds[,v] <- do.call('predict',
list(m,test[,c(tgt,varsSets[[v]])],
type='response'))
else
preds[,v] <- do.call('predict',
list(m,test[,c(tgt,varsSets[[v]])]))
}}
ps <- apply(preds,1,function(x)
{levels(factor(x))[which.max(table(factor(x)))]})
ps <- factor(ps,
levels=1:nlevels(train[,tgt]),
labels=levels(train[,tgt]))
if (verb) structure(ps,ensemblePreds=preds) else ps
}
genericModel <- function(form,train,test,
learner,
fs.meth,
...)
{
cat('=')
tgt <- as.character(form[[2]])
tgtCol <- which(colnames(train)==tgt)
# Anova filtering
f <- Anova(train[,tgt],p=0.01)
ff <- filterfun(f)
genes <- genefilter(t(train[,-tgtCol]),ff)
genes <- names(genes)[genes]
train <- train[,c(tgt,genes)]
test <- test[,c(tgt,genes)]
tgtCol <- 1
# Specific filtering
if (fs.meth[[1]]=='varclus') {
require(Hmisc,quietly=T)
print("a")
v <- varclus(as.matrix(train[,-tgtCol]))
print(v)
#v <- varclus(as.matrix(train))
VSs <- lapply(1:fs.meth[[3]],function(x)
getVarsSet(v$hclust,nvars=fs.meth[[2]]))
print(VSs)
pred <- varsEnsembles(tgt,train,test,VSs,learner,list(...))
} else {
if (fs.meth[[1]]=='rf') {
require(randomForest,quietly=T)
rf <- randomForest(form,train,importance=T)
imp <- importance(rf)
imp <- imp[,ncol(imp)-1]
rf.genes <- names(imp)[order(imp,decreasing=T)[1:fs.meth[[2]]]]
train <- train[,c(tgt,rf.genes)]
test <- test[,c(tgt,rf.genes)]
}
if (learner == 'knn')
pred <- kNN(form,
train,
test,
norm.stats=list(rowMedians(t(as.matrix(train[,-tgtCol]))),
rowIQRs(t(as.matrix(train[,-tgtCol])))),
...)
else {
model <- do.call(learner,c(list(form,train),list(...)))
pred <- if (learner != 'randomForest') predict(model,test)
else predict(model,test,type='response')
}
}
c(accuracy=ifelse(pred == resp(form,test),100,0))
}
require(class,quietly=TRUE)
require(randomForest,quietly=TRUE)
require(e1071,quietly=TRUE)
print("c")
DSs <- list(dataset(Mut ~ .,dt,'ALL'))
#DSs <- list(dataset(primarg_diagnosis ~ .,dt,'ALL'))
# The learners to evaluate
TODO <- c('knn','svm','randomForest')
for(td in TODO) {
assign(td,
experimentalComparison(
DSs,
c(
do.call('variants',
c(list('genericModel',learner=td),
vars[[td]],
varsRootName=td))
),
#cvSettings(3,10,1234)
loocvSettings(seed=1234,verbose=F)
)
)
file_path = '/mnt/ilustre/users/sanger-dev/sg-users/zhangpeng/fighting/model6/stomach/'
file_path = paste(file_path,td,sep='')
file_path = paste(file_path,'Rdata', sep = '.')
#save(list=td,file=paste(td,'Rdata',sep='.'))
save(list=td,file=file_path)
}
|
/code5/stomach.R
|
no_license
|
lizhengbio/methylantion_ML_sumamry
|
R
| false | false | 8,900 |
r
|
library(Biobase)
library(ALL)
library(genefilter)
library(DMwR)
library(class)
library(lattice)
library(Hmisc)
library(randomForest)
#data(ALL)
#ALL
#pD <- phenoData(ALL)
#varMetadata(pD)
#table(ALL$BT)
#table(ALL$mol.biol)
#table(ALL$BT,ALL$mol.bio)
#featureNames(ALL)[1:10]
#sampleNames(ALL)[1:5]
#tgt.cases <- which(ALL$BT %in% levels(ALL$BT)[1:5] &
# ALL$mol.bio %in% levels(ALL$mol.bio)[1:4])
#ALLb <- ALL[,tgt.cases]
#ALLb
#ALLb$BT <- factor(ALLb$BT)
#ALLb$mol.bio <- factor(ALLb$mol.bio)
###################################################
### Exploring the data set
###################################################
#es <- exprs(ALLb)
#ALLb <- nsFilter(ALLb,
# var.func=IQR,var.cutoff=IQR(as.vector(es))/5,
# feature.exclude="^AFFX")
#ALLb <- ALLb$eset
# the data set
#featureNames(ALLb) <- make.names(featureNames(ALLb))
#dt <- data.frame(t(exprs(ALLb)),Mut=ALLb$mol.bio)
#dt<-dt[,c(3800:3943)]
#######################
a<-read.delim("/mnt/ilustre/users/sanger-dev/sg-users/zhangpeng/fighting/gogo/cal_data_all_merge_1.txt")
rownames(a)<-a[,1]
a<-a[,-1]
alength<-length(a[1,])
print(alength)
print("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
arowlength<-length(a[,1])
a$para<-c(1:arowlength)
#a$para<-sapply(a[,alength],a$para,function(x,y) if(x=="c18"|x=="c19"|x=="c20"){y<-"colorectal"}else{y<-"other"})
#dt<-a
blength<-length(a[1,])
print(blength)
print("BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB")
a[which(a[,alength]=='c16'),blength]<-"stomach"
a[which(a[,alength]!='c16'),blength]<-"other"
a<-a[,-alength]
a<-as.data.frame(a)
#print(head(a[,length(a[1,])]))
a1<-a[which(a[,alength]=="stomach"),]
a2<-a[which(a[,alength]=="other"),]
a1_length<-length(a1[,1])
a2_length<-length(a2[,1])
a1_length<-a1_length*2
a3_list<-round(runif(a1_length,0,a2_length))
a2<-a2[a3_list,]
a2<-na.omit(a2)
a<-rbind(a1,a2)
dt <- data.frame(a[,-length(a[1,])],Mut=a[,length(a[1,])])
#print(a[,length(a[1,])])
#print("a")
######################
############data_in_put
getVarsSet <- function(cluster,nvars=30,seed=NULL,verb=F)
{
if (!is.null(seed)) set.seed(seed)
cls <- cutree(cluster,nvars)
tots <- table(cls)
vars <- c()
vars <- sapply(1:nvars,function(clID)
{
if (!length(tots[clID])) stop('Empty cluster! (',clID,')')
x <- sample(1:tots[clID],1)
names(cls[cls==clID])[x]
})
if (verb) structure(vars,clusMemb=cls,clusTots=tots)
else vars
}
rpart.loocv <- function(form,train,test,...) {
require(rpart,quietly=T)
m <- rpart(form,train,...)
p <- predict(m,test,type='class')
c(accuracy=ifelse(p == resp(form,test),100,0))
}
rowIQRs <- function(em){rowQ(em,ceiling(0.75*ncol(em))) - rowQ(em,floor(0.25*ncol(em)))}
###plot(rowMedians(es),rowIQRs(es), xlab='Median expression level', ylab='IQR expression level', main='Main Characteristics of Genes Expression Levels')
kNN <- function(form,train,test,norm=T,norm.stats=NULL,...) {
require(class,quietly=TRUE)
tgtCol <- which(colnames(train)==as.character(form[[2]]))
if (norm) {
if (is.null(norm.stats)) tmp <- scale(train[,-tgtCol],center=T,scale=T)
else tmp <- scale(train[,-tgtCol],center=norm.stats[[1]],scale=norm.stats[[2]])
train[,-tgtCol] <- tmp
ms <- attr(tmp,"scaled:center")
ss <- attr(tmp,"scaled:scale")
test[,-tgtCol] <- scale(test[,-tgtCol],center=ms,scale=ss)
}
knn(train[,-tgtCol],test[,-tgtCol],train[,tgtCol],...)
}
vars <- list()
vars$randomForest <- list(ntree=c(500,750,100),
mtry=c(5,15,30),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
vars$svm <- list(cost=c(1,100,500),
gamma=c(0.01,0.001,0.0001),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
vars$knn <- list(k=c(3,5,7,11),
norm=c(T,F),
fs.meth=list(list('all'),
list('rf',30),
list('varclus',30,50)))
print("b")
vars <- list()
vars$randomForest <- list(ntree=c(500,750,100),
mtry=c(5,15,30),
fs.meth=list(
list('ALL')))
vars$svm <- list(cost=c(1,100,500),
gamma=c(0.01,0.001,0.0001),
fs.meth=list(
list('ALL')))
vars$knn <- list(k=c(3,5,7,11),
norm=c(T,F),
fs.meth=list(
list('ALL')))
varsEnsembles <- function(tgt,train,test,
varsSets,
baseLearner,blPars,
verb=F)
{
preds <- matrix(NA,ncol=length(varsSets),nrow=NROW(test))
for(v in seq(along=varsSets)) {
if (baseLearner=='knn')
preds[,v] <- knn(train[,varsSets[[v]]],
test[,varsSets[[v]]],
train[,tgt],blPars)
else {
m <- do.call(baseLearner,
c(list(as.formula(paste(tgt,
paste(varsSets[[v]],
collapse='+'),
sep='~')),
train[,c(tgt,varsSets[[v]])]),
blPars)
)
if (baseLearner == 'randomForest')
preds[,v] <- do.call('predict',
list(m,test[,c(tgt,varsSets[[v]])],
type='response'))
else
preds[,v] <- do.call('predict',
list(m,test[,c(tgt,varsSets[[v]])]))
}}
ps <- apply(preds,1,function(x)
{levels(factor(x))[which.max(table(factor(x)))]})
ps <- factor(ps,
levels=1:nlevels(train[,tgt]),
labels=levels(train[,tgt]))
if (verb) structure(ps,ensemblePreds=preds) else ps
}
genericModel <- function(form,train,test,
learner,
fs.meth,
...)
{
cat('=')
tgt <- as.character(form[[2]])
tgtCol <- which(colnames(train)==tgt)
# Anova filtering
f <- Anova(train[,tgt],p=0.01)
ff <- filterfun(f)
genes <- genefilter(t(train[,-tgtCol]),ff)
genes <- names(genes)[genes]
train <- train[,c(tgt,genes)]
test <- test[,c(tgt,genes)]
tgtCol <- 1
# Specific filtering
if (fs.meth[[1]]=='varclus') {
require(Hmisc,quietly=T)
print("a")
v <- varclus(as.matrix(train[,-tgtCol]))
print(v)
#v <- varclus(as.matrix(train))
VSs <- lapply(1:fs.meth[[3]],function(x)
getVarsSet(v$hclust,nvars=fs.meth[[2]]))
print(VSs)
pred <- varsEnsembles(tgt,train,test,VSs,learner,list(...))
} else {
if (fs.meth[[1]]=='rf') {
require(randomForest,quietly=T)
rf <- randomForest(form,train,importance=T)
imp <- importance(rf)
imp <- imp[,ncol(imp)-1]
rf.genes <- names(imp)[order(imp,decreasing=T)[1:fs.meth[[2]]]]
train <- train[,c(tgt,rf.genes)]
test <- test[,c(tgt,rf.genes)]
}
if (learner == 'knn')
pred <- kNN(form,
train,
test,
norm.stats=list(rowMedians(t(as.matrix(train[,-tgtCol]))),
rowIQRs(t(as.matrix(train[,-tgtCol])))),
...)
else {
model <- do.call(learner,c(list(form,train),list(...)))
pred <- if (learner != 'randomForest') predict(model,test)
else predict(model,test,type='response')
}
}
c(accuracy=ifelse(pred == resp(form,test),100,0))
}
require(class,quietly=TRUE)
require(randomForest,quietly=TRUE)
require(e1071,quietly=TRUE)
print("c")
DSs <- list(dataset(Mut ~ .,dt,'ALL'))
#DSs <- list(dataset(primarg_diagnosis ~ .,dt,'ALL'))
# The learners to evaluate
TODO <- c('knn','svm','randomForest')
for(td in TODO) {
assign(td,
experimentalComparison(
DSs,
c(
do.call('variants',
c(list('genericModel',learner=td),
vars[[td]],
varsRootName=td))
),
#cvSettings(3,10,1234)
loocvSettings(seed=1234,verbose=F)
)
)
file_path = '/mnt/ilustre/users/sanger-dev/sg-users/zhangpeng/fighting/model6/stomach/'
file_path = paste(file_path,td,sep='')
file_path = paste(file_path,'Rdata', sep = '.')
#save(list=td,file=paste(td,'Rdata',sep='.'))
save(list=td,file=file_path)
}
|
########################
# Topic Modelling EDA
#######################
setwd("~/Dropbox/PNLP")
Sys.setlocale(locale="es_ES.UTF-8") # Para visualizar caracteres especiales
options(scipen = 99999)
source("keyFunctions.R")
# Leyendo datos
df <- fread("data/train.csv")
stop_words <- prep_fun(tm::stopwords("en"))
# iterizing
stackDF <- rbind(data.table(id = df$qid1, pregunta = df$question1),
data.table(id = df$qid2, pregunta = df$question2))
stackDF <- stackDF[!duplicated(stackDF$id)] # elimino los duplicados
it <- itoken(stackDF$pregunta, preprocessor = prep_fun, tokenizer = word_tokenizer)
# Vocabulary, vectorizer and DTM
vocab <- create_vocabulary(it, stopwords = stop_words)
vocab <- prune_vocabulary(vocab, term_count_min = 5L)
vectorizer <- vocab_vectorizer(vocab)
dtm <- create_dtm(it, vectorizer)
# Finding Topic Number
tn <- topicNumber(docTermMatrix = dtm, maxTopics = 50, interval = 2, n_cores = 7)
#
lda_model <- text2vec::LDA$new(n_topics = 10, vocabulary = vocab, doc_topic_prior = 0.1, topic_word_prior = 0.01)
doc_topic_distr <- lda_model$fit_transform(dtm, n_iter = 1000, convergence_tol = 0.01, check_convergence_every_n = 10)
lda_model$plot()
lda_model
|
/EDA/EDA_topicModelling.R
|
no_license
|
falrrema/PNLP
|
R
| false | false | 1,192 |
r
|
########################
# Topic Modelling EDA
#######################
setwd("~/Dropbox/PNLP")
Sys.setlocale(locale="es_ES.UTF-8") # Para visualizar caracteres especiales
options(scipen = 99999)
source("keyFunctions.R")
# Leyendo datos
df <- fread("data/train.csv")
stop_words <- prep_fun(tm::stopwords("en"))
# iterizing
stackDF <- rbind(data.table(id = df$qid1, pregunta = df$question1),
data.table(id = df$qid2, pregunta = df$question2))
stackDF <- stackDF[!duplicated(stackDF$id)] # elimino los duplicados
it <- itoken(stackDF$pregunta, preprocessor = prep_fun, tokenizer = word_tokenizer)
# Vocabulary, vectorizer and DTM
vocab <- create_vocabulary(it, stopwords = stop_words)
vocab <- prune_vocabulary(vocab, term_count_min = 5L)
vectorizer <- vocab_vectorizer(vocab)
dtm <- create_dtm(it, vectorizer)
# Finding Topic Number
tn <- topicNumber(docTermMatrix = dtm, maxTopics = 50, interval = 2, n_cores = 7)
#
lda_model <- text2vec::LDA$new(n_topics = 10, vocabulary = vocab, doc_topic_prior = 0.1, topic_word_prior = 0.01)
doc_topic_distr <- lda_model$fit_transform(dtm, n_iter = 1000, convergence_tol = 0.01, check_convergence_every_n = 10)
lda_model$plot()
lda_model
|
# misclassification rate in synthetic dataset for centralized
load("syndata.Rdata")
Nexpr = 20
misclsfctn = rep(0,Nexpr) #misclassification rate
load("../../centr20000_100.Rdata")
for(expr_no in 1:Nexpr){
betahat = betas[[expr_no]][[5]]
betahat = as.numeric(betahat)
# if using 40 parties, should be <=t[2], 60 parties for t[3], 80 for t[4] ...
#betahat[abs(betahat) <= t[1]/2 ] = 0
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[1]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e1 = sum(predict == F)
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[2]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e2 = sum(predict == T)
misclsfctn[expr_no] = (e1 + e2) / (10000)
}
cat( mean(misclsfctn) )
cat('\n')
cat( sd(misclsfctn) )
|
/synthetic_expr/synthetic_mscls_2.R
|
no_license
|
bargavjayaraman/secure_model_aggregation
|
R
| false | false | 968 |
r
|
# misclassification rate in synthetic dataset for centralized
load("syndata.Rdata")
Nexpr = 20
misclsfctn = rep(0,Nexpr) #misclassification rate
load("../../centr20000_100.Rdata")
for(expr_no in 1:Nexpr){
betahat = betas[[expr_no]][[5]]
betahat = as.numeric(betahat)
# if using 40 parties, should be <=t[2], 60 parties for t[3], 80 for t[4] ...
#betahat[abs(betahat) <= t[1]/2 ] = 0
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[1]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e1 = sum(predict == F)
# if using 40 parties, should be muhat[[expr_no]][, 2], 60 for muhat[[expr_no]][, 3]...
difference = apply(data[[2]], 2, '-', muhat[[expr_no]][, 5])
predict = (crossprod(betahat, difference) > 0)
e2 = sum(predict == T)
misclsfctn[expr_no] = (e1 + e2) / (10000)
}
cat( mean(misclsfctn) )
cat('\n')
cat( sd(misclsfctn) )
|
resource_set_of_consumer <- function(consumer, resource, inter_type) {
# The arguments must represent binary interactions identifed by 1 (interaction) or 0 (non-interaction)
resource_list <- as.character(unique(resource))
res_cons <- matrix(nrow = length(resource_list), ncol = 3, dimnames = list(c(), c('resource', 'consumer', 'non-consumer')))
res_cons[, 'resource'] <- resource_list
for(i in 1:length(resource_list)){
yes.consumer <- consumer[names(which(inter_type[which(resource == resource_list[i])] == "1"))]
non.consumer <- consumer[names(which(inter_type[which(resource == resource_list[i])] == "0"))]
res_cons[i , 'consumer'] <- paste(unique(yes.consumer), collapse = ' | ')
res_cons[i , 'non-consumer'] <- paste(unique(non.consumer), collapse = ' | ')
} #i
return(res_cons)
} #function
|
/Script/resource_set_of_consumer.r
|
permissive
|
david-beauchesne/Predict_interactions
|
R
| false | false | 828 |
r
|
resource_set_of_consumer <- function(consumer, resource, inter_type) {
# The arguments must represent binary interactions identifed by 1 (interaction) or 0 (non-interaction)
resource_list <- as.character(unique(resource))
res_cons <- matrix(nrow = length(resource_list), ncol = 3, dimnames = list(c(), c('resource', 'consumer', 'non-consumer')))
res_cons[, 'resource'] <- resource_list
for(i in 1:length(resource_list)){
yes.consumer <- consumer[names(which(inter_type[which(resource == resource_list[i])] == "1"))]
non.consumer <- consumer[names(which(inter_type[which(resource == resource_list[i])] == "0"))]
res_cons[i , 'consumer'] <- paste(unique(yes.consumer), collapse = ' | ')
res_cons[i , 'non-consumer'] <- paste(unique(non.consumer), collapse = ' | ')
} #i
return(res_cons)
} #function
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storage.R
\name{store_sqlite}
\alias{store_sqlite}
\title{Use SQLite database as storage mode}
\usage{
store_sqlite(path)
}
\arguments{
\item{path}{Path to the SQLite file or a directory where to create one.}
}
\description{
All logs will be written in the same file.
}
\examples{
if (interactive()) {
# temp directory for writing logs
tmp <- tempdir()
# when app stop,
# navigate to the directory containing logs
onStop(function() {
browseURL(url = tmp)
})
# Classir Iris clustering with Shiny
ui <- fluidPage(
headerPanel("Iris k-means clustering"),
sidebarLayout(
sidebarPanel(
selectInput(
inputId = "xcol",
label = "X Variable",
choices = names(iris)
),
selectInput(
inputId = "ycol",
label = "Y Variable",
choices = names(iris),
selected = names(iris)[[2]]
),
numericInput(
inputId = "clusters",
label = "Cluster count",
value = 3,
min = 1,
max = 9
)
),
mainPanel(
plotOutput("plot1")
)
)
)
server <- function(input, output, session) {
# Store RDS with logs in the temp dir
track_usage(
storage_mode = store_sqlite(path = tmp)
)
# classic server logic
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
shinyApp(ui, server)
}
}
|
/man/store_sqlite.Rd
|
no_license
|
gridl/shinylogs
|
R
| false | true | 1,958 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storage.R
\name{store_sqlite}
\alias{store_sqlite}
\title{Use SQLite database as storage mode}
\usage{
store_sqlite(path)
}
\arguments{
\item{path}{Path to the SQLite file or a directory where to create one.}
}
\description{
All logs will be written in the same file.
}
\examples{
if (interactive()) {
# temp directory for writing logs
tmp <- tempdir()
# when app stop,
# navigate to the directory containing logs
onStop(function() {
browseURL(url = tmp)
})
# Classir Iris clustering with Shiny
ui <- fluidPage(
headerPanel("Iris k-means clustering"),
sidebarLayout(
sidebarPanel(
selectInput(
inputId = "xcol",
label = "X Variable",
choices = names(iris)
),
selectInput(
inputId = "ycol",
label = "Y Variable",
choices = names(iris),
selected = names(iris)[[2]]
),
numericInput(
inputId = "clusters",
label = "Cluster count",
value = 3,
min = 1,
max = 9
)
),
mainPanel(
plotOutput("plot1")
)
)
)
server <- function(input, output, session) {
# Store RDS with logs in the temp dir
track_usage(
storage_mode = store_sqlite(path = tmp)
)
# classic server logic
selectedData <- reactive({
iris[, c(input$xcol, input$ycol)]
})
clusters <- reactive({
kmeans(selectedData(), input$clusters)
})
output$plot1 <- renderPlot({
palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
"#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
par(mar = c(5.1, 4.1, 0, 1))
plot(selectedData(),
col = clusters()$cluster,
pch = 20, cex = 3)
points(clusters()$centers, pch = 4, cex = 4, lwd = 4)
})
}
shinyApp(ui, server)
}
}
|
library(dplyr)
if(!file.exists("./data")){dir.create("./data")}
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./data/Coursera_Final.zip") #backup
download.file(fileUrl, destfile="./Coursera_Final.zip") #working directory
unzip("Coursera_Final.zip")
#Assigning all data frames
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
#merge data
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
# selects from merge data only columns subject code and anything with mean and std in name
Q2<- select(Merged_Data,subject, code, contains("mean"), contains("std"))
# Uses descriptive activity names in the data set from activities file.
Q2$code<- activities[Q2$code, 2]
#fix label names
names(Q2)[2] = "activity"
names(Q2)<-gsub("Acc", "Accelerometer", names(Q2))
names(Q2)<-gsub("Gyro", "Gyroscope", names(Q2))
names(Q2)<-gsub("BodyBody", "Body", names(Q2))
names(Q2)<-gsub("Mag", "Magnitude", names(Q2))
names(Q2)<-gsub("^t", "Time", names(Q2))
names(Q2)<-gsub("^f", "Frequency", names(Q2))
names(Q2)<-gsub("tBody", "TimeBody", names(Q2))
names(Q2)<-gsub("-mean()", "Mean", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("-std()", "STD", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("-freq()", "Frequency", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("angle", "Angle", names(Q2))
names(Q2)<-gsub("gravity", "Gravity", names(Q2))
View(Q2)
data set with the average of each variable for each activity and each subject.
Q5 <- Q2 %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
View(Q5)
write.table(Q5, "Q5.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
rharnanan/getting-and-cleaning-data-
|
R
| false | false | 2,389 |
r
|
library(dplyr)
if(!file.exists("./data")){dir.create("./data")}
fileUrl<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./data/Coursera_Final.zip") #backup
download.file(fileUrl, destfile="./Coursera_Final.zip") #working directory
unzip("Coursera_Final.zip")
#Assigning all data frames
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
#merge data
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
# selects from merge data only columns subject code and anything with mean and std in name
Q2<- select(Merged_Data,subject, code, contains("mean"), contains("std"))
# Uses descriptive activity names in the data set from activities file.
Q2$code<- activities[Q2$code, 2]
#fix label names
names(Q2)[2] = "activity"
names(Q2)<-gsub("Acc", "Accelerometer", names(Q2))
names(Q2)<-gsub("Gyro", "Gyroscope", names(Q2))
names(Q2)<-gsub("BodyBody", "Body", names(Q2))
names(Q2)<-gsub("Mag", "Magnitude", names(Q2))
names(Q2)<-gsub("^t", "Time", names(Q2))
names(Q2)<-gsub("^f", "Frequency", names(Q2))
names(Q2)<-gsub("tBody", "TimeBody", names(Q2))
names(Q2)<-gsub("-mean()", "Mean", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("-std()", "STD", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("-freq()", "Frequency", names(Q2), ignore.case = TRUE)
names(Q2)<-gsub("angle", "Angle", names(Q2))
names(Q2)<-gsub("gravity", "Gravity", names(Q2))
View(Q2)
data set with the average of each variable for each activity and each subject.
Q5 <- Q2 %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
View(Q5)
write.table(Q5, "Q5.txt", row.name=FALSE)
|
# quick borrowing from dplyr
dots <- dplyr:::dots
commas <- dplyr:::commas
named_dots <- dplyr:::named_dots
deparse_all <- dplyr:::deparse_all
# right now do it naively
build_index <- function(group){
idx <- fforder(group)
rngs <- lapply(chunk(idx), function(i){
r <- intrle(group[idx[i]])
cbind(lengths=r$lengths,values=r$values)
})
rngs <- do.call(rbind, rngs)
rngs <- structure(list(lengths=rngs[,1], values=rngs[,2]), class="rle")
list(idx, ranges=rngs)
}
# quick testing....
# set.seed(42)
# group <- ff(sample(2, size=10, replace=TRUE))
# group[]
# build_index(group)
|
/dplyr_utils.r
|
no_license
|
edwindj/dplyr_ffbase
|
R
| false | false | 599 |
r
|
# quick borrowing from dplyr
dots <- dplyr:::dots
commas <- dplyr:::commas
named_dots <- dplyr:::named_dots
deparse_all <- dplyr:::deparse_all
# right now do it naively
build_index <- function(group){
idx <- fforder(group)
rngs <- lapply(chunk(idx), function(i){
r <- intrle(group[idx[i]])
cbind(lengths=r$lengths,values=r$values)
})
rngs <- do.call(rbind, rngs)
rngs <- structure(list(lengths=rngs[,1], values=rngs[,2]), class="rle")
list(idx, ranges=rngs)
}
# quick testing....
# set.seed(42)
# group <- ff(sample(2, size=10, replace=TRUE))
# group[]
# build_index(group)
|
library(tidyverse)
library(broom)
library(here)
library(glue)
source(here("writeups/paper/paper_helpers.R"))
ALL_GENDER <- here("data/processed/books/tidy_full_corpus_all.csv")
CONTENT_GENDER <- here("data/processed/books/tidy_full_corpus_no_chars.csv")
CHARACTER_GENDER <- here("data/processed/books/tidy_full_corpus_chars_only.csv")
WORD_SCORES <- here("data/processed/words/all_word_measures_tidy.csv")
BOOK_GENDER <- here("data/processed/books/gender_token_type_by_book.csv")
all_score <- read_csv(ALL_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
content_score <- read_csv(CONTENT_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
character_score <- read_csv(CHARACTER_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
word_scores <- read_csv(WORD_SCORES) %>%
select(word, gender)
book_gender <- read_csv(BOOK_GENDER) %>%
filter(corpus_type == "all") %>%
select(book_id, title, token_gender_mean)
tidy_df <- bind_rows(content_score, character_score) %>%
left_join(word_scores) %>%
mutate(corpus_type2 = case_when((corpus_type == "char_only" & gender < 3) ~ "male_char",
(corpus_type == "char_only" & gender >=3 ) ~ "female_char",
TRUE ~ "content"))
book_gender_content <- read_csv(BOOK_GENDER) %>%
filter(corpus_type == "no_char") %>%
select(book_id, title, token_gender_mean)
# tile plot
tile_gender <- tidy_df %>%
mutate(line_tile = ntile(line_number,50)) %>%
group_by(book_id, title, line_tile, corpus_type) %>%
summarize(gender_mean = mean(gender, na.rm = T)) %>%
group_by(corpus_type) %>%
mutate(gender_mean_scaled = scale(gender_mean, center = 3)) %>%
ungroup()
tile_gender %>%
left_join(book_gender) %>%
filter(token_gender_mean > 3.25 | token_gender_mean < 2.7) %>%
ggplot(aes(x = line_tile,
y = gender_mean_scaled,
group = corpus_type, color = corpus_type)) +
#geom_line() +
geom_hline(aes(yintercept = 0)) +
geom_smooth(span = .5, se = F) +
facet_wrap(~title) +
theme_classic()
# line plot
rug_data <- tidy_df %>%
filter(corpus_type == "char_only") %>%
group_by(book_id, line_number) %>%
summarize(mean_gender = mean(gender, na.rm = T)) %>%
mutate(gender_binary = case_when(mean_gender >3 ~ "F",
mean_gender <= 3 ~ "M",
TRUE ~ NA_character_))
line_gender <- tidy_df %>%
group_by(book_id, title, line_number, corpus_type) %>%
summarize(gender_mean = mean(gender, na.rm = T)) %>%
group_by(corpus_type) %>%
# mutate(gender_mean_scaled = scale(gender_mean, center = 3)) %>%
ungroup()
# This is the best one:
line_gender %>%
left_join(book_gender) %>%
left_join(rug_data) %>%
filter(corpus_type == "no_char") %>%
#filter(token_gender_mean > 3.25 | token_gender_mean < 2.7) %>%
filter(token_gender_mean < 2.9) %>%
ggplot(aes(x = line_number)) +
geom_hline(aes(yintercept = 3)) +
geom_smooth(aes(y = gender_mean), span = .3, color = "red", se = F) +
geom_rug(aes(x = line_number, sides = "b", color = gender_binary),
length = unit(0.08, "npc")) +
scale_color_manual(values = c("pink", "lightblue", "white")) +
facet_wrap(~title, scale = "free_x") +
theme_classic()
### try heatmap
### misc
find_target_books <- tile_gender %>%
count(book_id, title, line_tile) %>%
group_by(book_id, title) %>%
summarize(prop_both = sum(n == 2)/n()) %>%
arrange(-prop_both) %>%
left_join(book_gender) %>%
#filter(prop_both == 1) %>%
filter(token_gender_mean > 3.25 | token_gender_mean < 2.7)
#ggplot(find_target_books, aes(x = prop_both, y = token_gender_mean)) +
# geom_text(aes(label = title))
|
/analysis/books/19_book_examples1_old.R
|
no_license
|
mllewis/WCBC_GENDER
|
R
| false | false | 3,728 |
r
|
library(tidyverse)
library(broom)
library(here)
library(glue)
source(here("writeups/paper/paper_helpers.R"))
ALL_GENDER <- here("data/processed/books/tidy_full_corpus_all.csv")
CONTENT_GENDER <- here("data/processed/books/tidy_full_corpus_no_chars.csv")
CHARACTER_GENDER <- here("data/processed/books/tidy_full_corpus_chars_only.csv")
WORD_SCORES <- here("data/processed/words/all_word_measures_tidy.csv")
BOOK_GENDER <- here("data/processed/books/gender_token_type_by_book.csv")
all_score <- read_csv(ALL_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
content_score <- read_csv(CONTENT_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
character_score <- read_csv(CHARACTER_GENDER) %>%
filter(!(book_id %in% c("L105", "L112")))
word_scores <- read_csv(WORD_SCORES) %>%
select(word, gender)
book_gender <- read_csv(BOOK_GENDER) %>%
filter(corpus_type == "all") %>%
select(book_id, title, token_gender_mean)
tidy_df <- bind_rows(content_score, character_score) %>%
left_join(word_scores) %>%
mutate(corpus_type2 = case_when((corpus_type == "char_only" & gender < 3) ~ "male_char",
(corpus_type == "char_only" & gender >=3 ) ~ "female_char",
TRUE ~ "content"))
book_gender_content <- read_csv(BOOK_GENDER) %>%
filter(corpus_type == "no_char") %>%
select(book_id, title, token_gender_mean)
# tile plot
tile_gender <- tidy_df %>%
mutate(line_tile = ntile(line_number,50)) %>%
group_by(book_id, title, line_tile, corpus_type) %>%
summarize(gender_mean = mean(gender, na.rm = T)) %>%
group_by(corpus_type) %>%
mutate(gender_mean_scaled = scale(gender_mean, center = 3)) %>%
ungroup()
tile_gender %>%
left_join(book_gender) %>%
filter(token_gender_mean > 3.25 | token_gender_mean < 2.7) %>%
ggplot(aes(x = line_tile,
y = gender_mean_scaled,
group = corpus_type, color = corpus_type)) +
#geom_line() +
geom_hline(aes(yintercept = 0)) +
geom_smooth(span = .5, se = F) +
facet_wrap(~title) +
theme_classic()
# line plot
rug_data <- tidy_df %>%
filter(corpus_type == "char_only") %>%
group_by(book_id, line_number) %>%
summarize(mean_gender = mean(gender, na.rm = T)) %>%
mutate(gender_binary = case_when(mean_gender >3 ~ "F",
mean_gender <= 3 ~ "M",
TRUE ~ NA_character_))
line_gender <- tidy_df %>%
group_by(book_id, title, line_number, corpus_type) %>%
summarize(gender_mean = mean(gender, na.rm = T)) %>%
group_by(corpus_type) %>%
# mutate(gender_mean_scaled = scale(gender_mean, center = 3)) %>%
ungroup()
# This is the best one:
line_gender %>%
left_join(book_gender) %>%
left_join(rug_data) %>%
filter(corpus_type == "no_char") %>%
#filter(token_gender_mean > 3.25 | token_gender_mean < 2.7) %>%
filter(token_gender_mean < 2.9) %>%
ggplot(aes(x = line_number)) +
geom_hline(aes(yintercept = 3)) +
geom_smooth(aes(y = gender_mean), span = .3, color = "red", se = F) +
geom_rug(aes(x = line_number, sides = "b", color = gender_binary),
length = unit(0.08, "npc")) +
scale_color_manual(values = c("pink", "lightblue", "white")) +
facet_wrap(~title, scale = "free_x") +
theme_classic()
### try heatmap
### misc
find_target_books <- tile_gender %>%
count(book_id, title, line_tile) %>%
group_by(book_id, title) %>%
summarize(prop_both = sum(n == 2)/n()) %>%
arrange(-prop_both) %>%
left_join(book_gender) %>%
#filter(prop_both == 1) %>%
filter(token_gender_mean > 3.25 | token_gender_mean < 2.7)
#ggplot(find_target_books, aes(x = prop_both, y = token_gender_mean)) +
# geom_text(aes(label = title))
|
################## SUMMARIZE MAPS FROM LANDIS-II RUNS #####################
########################## Summarizes across multiple LANDIS-II scenarios ################################
#################################Created by M. Lucash, adapted from M. Creutzburg's file
library(ggplot2)
# library(gridExtra)
# library(gridGraphics)
library(png)
# library(plyr)
# Set working directory
dir <- "I:/Research/Shares/lucash_lab/Lucash/DoD_Model_Comparison/Output/PnET_BiomassMaps/"
setwd(dir)
#Set output directory
output_dir<-("I:/Research/Shares/lucash_lab/Lucash/DoD_Model_Comparison/Output/PnET_BiomassMaps/Outputs/")
# Useful if you have multiple replicates
no_reps <- 1 # input nummber of replicates here
# Load two lookup tables for joining to data to make the graphs look nicer.
Scenario_LUT <- read.csv ("Scenarios_071919.csv")
Year_LUT <- read.csv (paste(dir, "LUT_Year_1y.csv", sep=""))
Year_LUT_50y <- Year_LUT[1:51,]
#Read in the data. This comes from the output file in MultipleScenarios_SppMaps.R
original_data <- read.csv(paste(output_dir, "SppBiomass-Summary.csv", sep=""))
head(original_data)
#Merging with tables to make the graphs look nicer.
original_data_scenario <- merge(original_data, Scenario_LUT, by.x="ScenarioName", by.y="Folder")
head(original_data_scenario)
original_data_scenario_Time <- merge(original_data_scenario, Year_LUT_50y, by.x="Time", by.y="Time", all=TRUE)
graphing_Totalbiomass<-subset (original_data_scenario_Time, original_data_scenario_Time$Species %in% "TotalBiomass")
head(graphing_Totalbiomass)
#Number of colors corresponds to scenarios
# plt.cols.short <- c("#000000", "#E69F00", "#D55E00")
plt.cols.short <- c("#E69F00", "#009E73", "#F0E442", "#56B4E9", "#D55E00","#0072B2", "#E69F00", "#009E73", "#F0E442", "#56B4E9", "#D55E00","#0072B2")
#Graph for total biomass of all scenarios together on one graph
# dev.off()
png_name<-paste(output_dir, "Biomass_vs_Time_071619.png", sep="")
output.plot.v1<-ggplot(graphing_Totalbiomass, aes(x=Year, y= Avg_Biomass_gm2, group = as.factor(Scenarios)))+ theme_classic()+ geom_line(aes(linetype = Scenarios, color = Scenarios), size=1.3) + scale_linetype_manual(values=c("solid", "solid", "solid", "solid", "solid", "solid", "dashed", "dashed", "dashed", "dashed", "dashed", "dashed"))+scale_color_manual(values=plt.cols.short) +scale_y_continuous(limits=c(0,30000))+scale_x_continuous(limits=c(2010,2060))+
ggtitle("Overall Trends in Biomass") +theme(plot.title = element_text(size=30, margin = margin(t = 10, b = -20), hjust=0.1)) +xlab("Time")+ylab("Aboveground biomass (g/m2)") + theme(legend.key.width=unit(1.5,"cm"))
ggsave(output.plot.v1, filename = png_name, width=11, height=8.5, units="in")
# dev.off()
######################################################
#Graph of ANPP. This is not working right now.
final_data<- subset(all_data2, all_data2$Time >0)
png_name<-paste(output_dir, "ANPP_vs_Time_071619.png", sep="")
output.plot.v2<-ggplot(final_data, aes(x=Year, y= AG_NPPC, group = as.factor(Scenarios)))+ theme_classic()+ geom_line(aes(linetype = Scenarios, color = Scenarios), size=1.3) + scale_linetype_manual(values=c("solid", "solid", "solid", "solid", "solid", "solid", "dashed", "dashed", "dashed", "dashed", "dashed", "dashed"))+scale_color_manual(values=plt.cols.short) +scale_y_continuous(limits=c(200,1200))+scale_x_continuous(limits=c(2010,2060))+
ggtitle("Overall Trends in ANPP") +theme(plot.title = element_text(size=30, margin = margin(t = 10, b = -20), hjust=0.1)) +xlab("Time")+ylab("Aboveground biomass (g/m2)") + theme(legend.key.width=unit(1.5,"cm"))
ggsave(output.plot.v2, filename = png_name, width=11, height=8.5, units="in")
# dev.off()
list.files(output_dir)
|
/scripts/R_Scripts/Graphing_totalBiomass_time_AllScenarios_DOD.R
|
permissive
|
LANDIS-II-Foundation/Project-Fort-Bragg-NC
|
R
| false | false | 3,748 |
r
|
################## SUMMARIZE MAPS FROM LANDIS-II RUNS #####################
########################## Summarizes across multiple LANDIS-II scenarios ################################
#################################Created by M. Lucash, adapted from M. Creutzburg's file
library(ggplot2)
# library(gridExtra)
# library(gridGraphics)
library(png)
# library(plyr)
# Set working directory
dir <- "I:/Research/Shares/lucash_lab/Lucash/DoD_Model_Comparison/Output/PnET_BiomassMaps/"
setwd(dir)
#Set output directory
output_dir<-("I:/Research/Shares/lucash_lab/Lucash/DoD_Model_Comparison/Output/PnET_BiomassMaps/Outputs/")
# Useful if you have multiple replicates
no_reps <- 1 # input nummber of replicates here
# Load two lookup tables for joining to data to make the graphs look nicer.
Scenario_LUT <- read.csv ("Scenarios_071919.csv")
Year_LUT <- read.csv (paste(dir, "LUT_Year_1y.csv", sep=""))
Year_LUT_50y <- Year_LUT[1:51,]
#Read in the data. This comes from the output file in MultipleScenarios_SppMaps.R
original_data <- read.csv(paste(output_dir, "SppBiomass-Summary.csv", sep=""))
head(original_data)
#Merging with tables to make the graphs look nicer.
original_data_scenario <- merge(original_data, Scenario_LUT, by.x="ScenarioName", by.y="Folder")
head(original_data_scenario)
original_data_scenario_Time <- merge(original_data_scenario, Year_LUT_50y, by.x="Time", by.y="Time", all=TRUE)
graphing_Totalbiomass<-subset (original_data_scenario_Time, original_data_scenario_Time$Species %in% "TotalBiomass")
head(graphing_Totalbiomass)
#Number of colors corresponds to scenarios
# plt.cols.short <- c("#000000", "#E69F00", "#D55E00")
plt.cols.short <- c("#E69F00", "#009E73", "#F0E442", "#56B4E9", "#D55E00","#0072B2", "#E69F00", "#009E73", "#F0E442", "#56B4E9", "#D55E00","#0072B2")
#Graph for total biomass of all scenarios together on one graph
# dev.off()
png_name<-paste(output_dir, "Biomass_vs_Time_071619.png", sep="")
output.plot.v1<-ggplot(graphing_Totalbiomass, aes(x=Year, y= Avg_Biomass_gm2, group = as.factor(Scenarios)))+ theme_classic()+ geom_line(aes(linetype = Scenarios, color = Scenarios), size=1.3) + scale_linetype_manual(values=c("solid", "solid", "solid", "solid", "solid", "solid", "dashed", "dashed", "dashed", "dashed", "dashed", "dashed"))+scale_color_manual(values=plt.cols.short) +scale_y_continuous(limits=c(0,30000))+scale_x_continuous(limits=c(2010,2060))+
ggtitle("Overall Trends in Biomass") +theme(plot.title = element_text(size=30, margin = margin(t = 10, b = -20), hjust=0.1)) +xlab("Time")+ylab("Aboveground biomass (g/m2)") + theme(legend.key.width=unit(1.5,"cm"))
ggsave(output.plot.v1, filename = png_name, width=11, height=8.5, units="in")
# dev.off()
######################################################
#Graph of ANPP. This is not working right now.
final_data<- subset(all_data2, all_data2$Time >0)
png_name<-paste(output_dir, "ANPP_vs_Time_071619.png", sep="")
output.plot.v2<-ggplot(final_data, aes(x=Year, y= AG_NPPC, group = as.factor(Scenarios)))+ theme_classic()+ geom_line(aes(linetype = Scenarios, color = Scenarios), size=1.3) + scale_linetype_manual(values=c("solid", "solid", "solid", "solid", "solid", "solid", "dashed", "dashed", "dashed", "dashed", "dashed", "dashed"))+scale_color_manual(values=plt.cols.short) +scale_y_continuous(limits=c(200,1200))+scale_x_continuous(limits=c(2010,2060))+
ggtitle("Overall Trends in ANPP") +theme(plot.title = element_text(size=30, margin = margin(t = 10, b = -20), hjust=0.1)) +xlab("Time")+ylab("Aboveground biomass (g/m2)") + theme(legend.key.width=unit(1.5,"cm"))
ggsave(output.plot.v2, filename = png_name, width=11, height=8.5, units="in")
# dev.off()
list.files(output_dir)
|
\name{getColumnView}
\alias{getColumnView}
\title{Method "getColumnView"}
\docType{methods}
\description{
Method \code{getColumnView} returns RR or FE frameworks results in a column format
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "htestPhenStat")}}{
%% Returns result in a column format
}
}}
\keyword{methods}
\value{
This function returns RR or FE frameworks results in a column format
}
|
/Early adults stats pipeline/PhenStat/PhenStatPackage/PhenStat/man/getColumnView.Rd
|
permissive
|
mpi2/impc_stats_pipeline
|
R
| false | false | 454 |
rd
|
\name{getColumnView}
\alias{getColumnView}
\title{Method "getColumnView"}
\docType{methods}
\description{
Method \code{getColumnView} returns RR or FE frameworks results in a column format
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "htestPhenStat")}}{
%% Returns result in a column format
}
}}
\keyword{methods}
\value{
This function returns RR or FE frameworks results in a column format
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network.smoothing.R
\docType{package}
\name{network.smoothing}
\alias{network.smoothing}
\alias{network.smoothing-package}
\title{A package to propagate data over an interaction network}
\description{
This uses the method similar to network-based stratification (NBS) method
published in Nature Methods (2013)
}
\details{
Pilot version of function
}
|
/man/network.smoothing.Rd
|
no_license
|
tylmoss/network_smoothing
|
R
| false | true | 428 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network.smoothing.R
\docType{package}
\name{network.smoothing}
\alias{network.smoothing}
\alias{network.smoothing-package}
\title{A package to propagate data over an interaction network}
\description{
This uses the method similar to network-based stratification (NBS) method
published in Nature Methods (2013)
}
\details{
Pilot version of function
}
|
#' @details The \code{shinyURL.server} method contains server logic for encoding
#' and restoring the widgets' values. It is called from inside the app's
#' server script, and can take the \code{session} objects as argument.
#' @param session Typically the same as the optional parameter passed into the
#' Shiny server function as an argument; if missing defaults to
#' \code{getDefaultReactiveDomain()}
#' @return \code{shinyURL.server} returns a reactive expression evaluating to the app's URL.
#' @rdname shinyURL
#' @export
shinyURL.server = function(session) {
if (missing(session))
session = getDefaultReactiveDomain()
## initialize from query string
init = .initFromURL(session, init)
## encode current app's state
url = .encodeURL(session, inputId)
## use TinyURL for shortening the URL
.queryTinyURL(session)
## Initial invalidation needed to execute scheduled input updates when the
## browser is refreshed switched off because it interferes with dynamic UIs
## invalidate = .invalidateOnInit(session, invalidate)
url
}
.initFromURL = function(session, self) {
queryValues <- isolate(parseQueryString(session$clientData$url_search, nested=TRUE))
observe({
queryValuesCopy = queryValues
## iterate through available inputs as long as there are any uninitialized
## values in queryValues the expression below depends on inputs which is
## neccassary to restore dynamic UIs
inputValues = reactiveValuesToList(session$input, all.names=FALSE)
updateValues = intersect(names(inputValues), names(queryValues))
queryIds = match(updateValues, names(queryValues))
inputIds = match(updateValues, names(inputValues))
if ( length(queryIds) > 0 ) queryValues <<- queryValues[-queryIds]
## schedule the update only after all input messages have been sent out (see
## the 'flushOutput' function in shiny.R). This is to avoid potential
## overwriting by some update events from user code
session$onFlushed(function() {
.initInputs(session, queryValuesCopy[queryIds], inputValues[inputIds])
})
## suspend if nothing to do
if ( length(queryValues) == 0L )
self$suspend()
}, priority = -99)
}
.initInputs = function(session, queryValues, inputValues) {
for (i in seq_along(queryValues)) {
q = queryValues[[i]]
q = if (is.list(q)) {
## checkbox group or multiple select
unname(q)
}
else {
## decode range vectors (sliders and dates)
if (length(inputValues[[i]])>1L)
q = unlist(strsplit(q, ","))
## use information about the class of the inputs when initializing them
cl = class(inputValues[[i]])
## promote integer to numeric because numericInputs can contain either
if (cl=="integer")
cl = "numeric"
switch(cl,
## Dates need to be handled separately
Date = format(as.Date(as.numeric(q), "1970-01-01"), "%Y-%m-%d"),
## default case; should allow to correctly decode TRUE/FALSE
as(q, cl)
)
}
session$sendInputMessage(names(queryValues)[i], list(value=q))
}
}
.encodeURL = function(session, inputId) {
clientData = isolate(reactiveValuesToList(session$clientData))
## base URL which is not supposed to change
baseURL = paste0(clientData$url_protocol, "//",
clientData$url_hostname,
## add port number if present
if( (port=clientData$url_port)!="" ) paste0(":", port),
clientData$url_pathname)
queryString = reactive({
## all.names = FALSE excludes objects with a leading dot, in this case the
## ".url" field to avoid self-dependency
inputValues = reactiveValuesToList(session$input, all.names=FALSE)
## quit if there is there are no inputs to encode
if (length(inputValues)==0) return()
## remove actionButtons
isActionButton = unlist(lapply(inputValues, function(x) inherits(x, "shinyActionButtonValue")), use.names=FALSE)
inputValues = inputValues[!isActionButton]
## remove ggvis specific inputs
idx = grep("_mouse_(over|out)$", names(inputValues))
if ( length(idx) > 0 ) inputValues = inputValues[-idx]
inputValues = mapply(function(name, value) {
## this is important to be able to have all checkboxes unchecked
if (is.null(value))
""
else {
if (length(value) == 1L) {
## encode TRUE/FALSE as T/F
if (is.logical(value)) {
if (isTRUE(value)) "T" else "F"
}
else value
}
else {
cl = class(value)
## expand checkbox group and multiple select vectors
if (cl=="character") {
setNames(as.list(value), sprintf("%s[%s]", name, seq_along(value)))
}
## encode range vectors as comma separated string
else {
if (cl=="Date") value = as.integer(value)
paste(value, collapse=",")
}
}
}
}, names(inputValues), inputValues, SIMPLIFY=FALSE)
## remove names of sublists before flattening
names(inputValues)[sapply(inputValues, is.list)] = ""
inputValues = unlist(inputValues)
paste(names(inputValues), inputValues, sep = "=", collapse = "&")
})
observe({
updateTextInput(session, inputId, value = url())
}, priority = -999)
url = reactive({
URLencode(paste(c(baseURL, queryString()), collapse = "?"))
})
url
}
.queryTinyURL = function(session) {
input = session$input
.busyMsg = "Please wait..."
## construct a query string from the current URL
tinyURLquery = eventReactive(input$.getTinyURL, {
sprintf("http://tinyurl.com/api-create.php?url=%s", input[[inputId]])
})
## set busy message
observeEvent(tinyURLquery(), {
updateTextInput(session, inputId, value=.busyMsg)
## resume the observer only after .busyMsg is set
session$onFlushed(function() {
runTinyURLquery$resume()
})
})
## query TinyURL
runTinyURLquery = observe({
tinyurl = tryCatch(getURL(tinyURLquery()), error = function(e) "Error fetching tinyURL!")
updateTextInput(session, inputId, value=tinyurl)
runTinyURLquery$suspend()
}, suspended=TRUE)
invisible()
}
|
/R/server.R
|
no_license
|
githubfun/shinyURL
|
R
| false | false | 6,354 |
r
|
#' @details The \code{shinyURL.server} method contains server logic for encoding
#' and restoring the widgets' values. It is called from inside the app's
#' server script, and can take the \code{session} objects as argument.
#' @param session Typically the same as the optional parameter passed into the
#' Shiny server function as an argument; if missing defaults to
#' \code{getDefaultReactiveDomain()}
#' @return \code{shinyURL.server} returns a reactive expression evaluating to the app's URL.
#' @rdname shinyURL
#' @export
shinyURL.server = function(session) {
if (missing(session))
session = getDefaultReactiveDomain()
## initialize from query string
init = .initFromURL(session, init)
## encode current app's state
url = .encodeURL(session, inputId)
## use TinyURL for shortening the URL
.queryTinyURL(session)
## Initial invalidation needed to execute scheduled input updates when the
## browser is refreshed switched off because it interferes with dynamic UIs
## invalidate = .invalidateOnInit(session, invalidate)
url
}
.initFromURL = function(session, self) {
queryValues <- isolate(parseQueryString(session$clientData$url_search, nested=TRUE))
observe({
queryValuesCopy = queryValues
## iterate through available inputs as long as there are any uninitialized
## values in queryValues the expression below depends on inputs which is
## neccassary to restore dynamic UIs
inputValues = reactiveValuesToList(session$input, all.names=FALSE)
updateValues = intersect(names(inputValues), names(queryValues))
queryIds = match(updateValues, names(queryValues))
inputIds = match(updateValues, names(inputValues))
if ( length(queryIds) > 0 ) queryValues <<- queryValues[-queryIds]
## schedule the update only after all input messages have been sent out (see
## the 'flushOutput' function in shiny.R). This is to avoid potential
## overwriting by some update events from user code
session$onFlushed(function() {
.initInputs(session, queryValuesCopy[queryIds], inputValues[inputIds])
})
## suspend if nothing to do
if ( length(queryValues) == 0L )
self$suspend()
}, priority = -99)
}
.initInputs = function(session, queryValues, inputValues) {
for (i in seq_along(queryValues)) {
q = queryValues[[i]]
q = if (is.list(q)) {
## checkbox group or multiple select
unname(q)
}
else {
## decode range vectors (sliders and dates)
if (length(inputValues[[i]])>1L)
q = unlist(strsplit(q, ","))
## use information about the class of the inputs when initializing them
cl = class(inputValues[[i]])
## promote integer to numeric because numericInputs can contain either
if (cl=="integer")
cl = "numeric"
switch(cl,
## Dates need to be handled separately
Date = format(as.Date(as.numeric(q), "1970-01-01"), "%Y-%m-%d"),
## default case; should allow to correctly decode TRUE/FALSE
as(q, cl)
)
}
session$sendInputMessage(names(queryValues)[i], list(value=q))
}
}
.encodeURL = function(session, inputId) {
clientData = isolate(reactiveValuesToList(session$clientData))
## base URL which is not supposed to change
baseURL = paste0(clientData$url_protocol, "//",
clientData$url_hostname,
## add port number if present
if( (port=clientData$url_port)!="" ) paste0(":", port),
clientData$url_pathname)
queryString = reactive({
## all.names = FALSE excludes objects with a leading dot, in this case the
## ".url" field to avoid self-dependency
inputValues = reactiveValuesToList(session$input, all.names=FALSE)
## quit if there is there are no inputs to encode
if (length(inputValues)==0) return()
## remove actionButtons
isActionButton = unlist(lapply(inputValues, function(x) inherits(x, "shinyActionButtonValue")), use.names=FALSE)
inputValues = inputValues[!isActionButton]
## remove ggvis specific inputs
idx = grep("_mouse_(over|out)$", names(inputValues))
if ( length(idx) > 0 ) inputValues = inputValues[-idx]
inputValues = mapply(function(name, value) {
## this is important to be able to have all checkboxes unchecked
if (is.null(value))
""
else {
if (length(value) == 1L) {
## encode TRUE/FALSE as T/F
if (is.logical(value)) {
if (isTRUE(value)) "T" else "F"
}
else value
}
else {
cl = class(value)
## expand checkbox group and multiple select vectors
if (cl=="character") {
setNames(as.list(value), sprintf("%s[%s]", name, seq_along(value)))
}
## encode range vectors as comma separated string
else {
if (cl=="Date") value = as.integer(value)
paste(value, collapse=",")
}
}
}
}, names(inputValues), inputValues, SIMPLIFY=FALSE)
## remove names of sublists before flattening
names(inputValues)[sapply(inputValues, is.list)] = ""
inputValues = unlist(inputValues)
paste(names(inputValues), inputValues, sep = "=", collapse = "&")
})
observe({
updateTextInput(session, inputId, value = url())
}, priority = -999)
url = reactive({
URLencode(paste(c(baseURL, queryString()), collapse = "?"))
})
url
}
.queryTinyURL = function(session) {
input = session$input
.busyMsg = "Please wait..."
## construct a query string from the current URL
tinyURLquery = eventReactive(input$.getTinyURL, {
sprintf("http://tinyurl.com/api-create.php?url=%s", input[[inputId]])
})
## set busy message
observeEvent(tinyURLquery(), {
updateTextInput(session, inputId, value=.busyMsg)
## resume the observer only after .busyMsg is set
session$onFlushed(function() {
runTinyURLquery$resume()
})
})
## query TinyURL
runTinyURLquery = observe({
tinyurl = tryCatch(getURL(tinyURLquery()), error = function(e) "Error fetching tinyURL!")
updateTextInput(session, inputId, value=tinyurl)
runTinyURLquery$suspend()
}, suspended=TRUE)
invisible()
}
|
seed <- 234
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225163.08800240816
df.resid <- 35402
df <- 165
coefs <- c(6.910650579520636, 5.795405608770849, 5.696709446664359, 5.496487588872446, 5.132506288149086, 4.90026246785606, 4.793366513472435, 4.638648436110686, 4.39047400615578, 4.266734965404141, 4.251665669542032, 4.200603183235513, 4.017947755950382, 3.990377787412173, 3.7737649702184104, 3.5342265198087923, 3.2840082602706446, 2.9553675777345596, 2.497200809072442, 2.07912805697625, 1.4759735954659732, 0.8809910775185913, 0.9912850135598219, 0.16105668226630518, 0.6123194622234674, -1.2166055986308388, -0.22647561014639472, 1.1089426391928892, 1.1332882969162328, -1.3559363608678505, -2.0239214905704506, -1.7786024253639268, -0.15707270453938887, 0.7536537551152228, 1.211782468954473, -0.9533425832662132, -0.9934858810549159, -0.8018761175748772, 0.2636531346966451, -1.2629462206255322, 1.0184579911984337, 0.794253595131663, -0.810710685706854, -2.365807813224686, -0.7417692003033888, -0.638511611448669, -0.7130126229684832, 0.47381497643194076, 0.1816854828409978, -1.0342280729547488, 6.743921804241138e-2, 0.7913470940985868, -2.725166624304884, 1.5487767245050335, 0.604828457168989, 1.0993969792139318, -2.1808270446589417, -0.49295065520033027, -0.5744324203028734, 1.4131813961934314, 1.4724443541414347, 0.7789250411941921, -2.50099616114131, -1.0552427453511872, -0.909957554552408, 0.4007786940296291, 0.6624871197418974, -0.626122860424564, -1.8206508250972075, -0.46384030954432665, -2.2702925904040634, -0.8452756163882014, 0.8405140045509907, 1.0121074684994016, 0.7138974527234521, -0.5899591977998198, -1.5012736020518496, -1.1136632221282232, -4.8567002619448235e-4, 0.7997100112833269, 1.2903600676335376, 8.737938326049344e-2, 7.400946845863667e-2, -1.4623522668410416, -0.8556264422014935, 0.46754182075739076, 1.2991893052115548, 0.44469817614209683, 1.007027903692391, -1.5558188441061793, 0.5028504929926478, 0.6780827305280618, 0.9021034868623077, 0.3170980186454909, 0.5292099681034655, 1.423615696654118, -0.5849040178145569, 0.6179730974417038, -0.24196772872310937, -0.5432748124956905, 0.3601720902226752, -0.5759184895012384, 0.7396765135433562, -0.12742251601461516, 0.7025778256008528, 0.837022100461125, 1.0950391309903071, -0.9246380795194626, -0.8832543921360637, -1.0779376810875128, 0.5993766227998877, 0.6870126230360077, 1.6671613566910257, -0.7105475792776652, -0.18395578506601276, -1.123787385728118, 0.8196982287149114, -0.325746882239315, 0.4623543296302764, 0.5629179203097275, -0.5142792982201584, -0.46062483224685724, -1.065418075240979, -0.590466934284884, 0.3596450646200749, 0.7051201774113526, -2.867754063601307e-2, 0.9782432188113599, -0.5274469473623014, -0.44614809228131713, 0.3023919638287712, 0.7865626829384831, 1.117612895598401, 0.3816970591025265, -4.369949044902499e-2, 1.2380500330284363, -0.3282861239447551, 0.9990977941731655, 0.72799649486212, 1.0376053458461771, 0.7103582506462767, -0.5144841352601549, -1.005546042448106, 0.6700647055574428, 0.4454353711691228, 0.4271409089153782, -0.10819377837814274, -0.3226770492795543, -1.6907116758702658, 1.1786635680027862, 6.346712277757076e-2, 1.274512427929166, -0.20540429098333104, 5.537350973131178e-3, -0.10278866740231841, -1.3056055143365415, -0.8384615390972593, 0.905886906584275, 1.2431951106049497, 9.451639433867372e-2, 1.6096428773835543, -0.3148060573725927, -0.2535550290810278, 0.15419228714396463, 1.2080731534865814)
|
/analysis/boot/boot234.R
|
no_license
|
patperry/interaction-proc
|
R
| false | false | 3,731 |
r
|
seed <- 234
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225163.08800240816
df.resid <- 35402
df <- 165
coefs <- c(6.910650579520636, 5.795405608770849, 5.696709446664359, 5.496487588872446, 5.132506288149086, 4.90026246785606, 4.793366513472435, 4.638648436110686, 4.39047400615578, 4.266734965404141, 4.251665669542032, 4.200603183235513, 4.017947755950382, 3.990377787412173, 3.7737649702184104, 3.5342265198087923, 3.2840082602706446, 2.9553675777345596, 2.497200809072442, 2.07912805697625, 1.4759735954659732, 0.8809910775185913, 0.9912850135598219, 0.16105668226630518, 0.6123194622234674, -1.2166055986308388, -0.22647561014639472, 1.1089426391928892, 1.1332882969162328, -1.3559363608678505, -2.0239214905704506, -1.7786024253639268, -0.15707270453938887, 0.7536537551152228, 1.211782468954473, -0.9533425832662132, -0.9934858810549159, -0.8018761175748772, 0.2636531346966451, -1.2629462206255322, 1.0184579911984337, 0.794253595131663, -0.810710685706854, -2.365807813224686, -0.7417692003033888, -0.638511611448669, -0.7130126229684832, 0.47381497643194076, 0.1816854828409978, -1.0342280729547488, 6.743921804241138e-2, 0.7913470940985868, -2.725166624304884, 1.5487767245050335, 0.604828457168989, 1.0993969792139318, -2.1808270446589417, -0.49295065520033027, -0.5744324203028734, 1.4131813961934314, 1.4724443541414347, 0.7789250411941921, -2.50099616114131, -1.0552427453511872, -0.909957554552408, 0.4007786940296291, 0.6624871197418974, -0.626122860424564, -1.8206508250972075, -0.46384030954432665, -2.2702925904040634, -0.8452756163882014, 0.8405140045509907, 1.0121074684994016, 0.7138974527234521, -0.5899591977998198, -1.5012736020518496, -1.1136632221282232, -4.8567002619448235e-4, 0.7997100112833269, 1.2903600676335376, 8.737938326049344e-2, 7.400946845863667e-2, -1.4623522668410416, -0.8556264422014935, 0.46754182075739076, 1.2991893052115548, 0.44469817614209683, 1.007027903692391, -1.5558188441061793, 0.5028504929926478, 0.6780827305280618, 0.9021034868623077, 0.3170980186454909, 0.5292099681034655, 1.423615696654118, -0.5849040178145569, 0.6179730974417038, -0.24196772872310937, -0.5432748124956905, 0.3601720902226752, -0.5759184895012384, 0.7396765135433562, -0.12742251601461516, 0.7025778256008528, 0.837022100461125, 1.0950391309903071, -0.9246380795194626, -0.8832543921360637, -1.0779376810875128, 0.5993766227998877, 0.6870126230360077, 1.6671613566910257, -0.7105475792776652, -0.18395578506601276, -1.123787385728118, 0.8196982287149114, -0.325746882239315, 0.4623543296302764, 0.5629179203097275, -0.5142792982201584, -0.46062483224685724, -1.065418075240979, -0.590466934284884, 0.3596450646200749, 0.7051201774113526, -2.867754063601307e-2, 0.9782432188113599, -0.5274469473623014, -0.44614809228131713, 0.3023919638287712, 0.7865626829384831, 1.117612895598401, 0.3816970591025265, -4.369949044902499e-2, 1.2380500330284363, -0.3282861239447551, 0.9990977941731655, 0.72799649486212, 1.0376053458461771, 0.7103582506462767, -0.5144841352601549, -1.005546042448106, 0.6700647055574428, 0.4454353711691228, 0.4271409089153782, -0.10819377837814274, -0.3226770492795543, -1.6907116758702658, 1.1786635680027862, 6.346712277757076e-2, 1.274512427929166, -0.20540429098333104, 5.537350973131178e-3, -0.10278866740231841, -1.3056055143365415, -0.8384615390972593, 0.905886906584275, 1.2431951106049497, 9.451639433867372e-2, 1.6096428773835543, -0.3148060573725927, -0.2535550290810278, 0.15419228714396463, 1.2080731534865814)
|
##Load Libraries
library(ggplot2)
library(tidyverse)
library(dplyr)
library(readr)
library(cowplot)
library(caret)
library(pROC)
library(ROCR)
## NOTE: This is a proof of concept. Further validation work needs to take place.
##need to create train/datasets
data = read.csv('C:\\Datasets\\Regression\\data1.csv', stringsAsFactors = F)
head(data) #allows you to check the data, first few entries
summary(data) #produce result summaries of the results of various model fitting functions.
dim(data) #the dimension (e.g. the number of columns and rows) of a matrix, array or data frame.
str(data)
# this shows that we need to tell R which columns contain factors
# it will also show us if there are some missing values.
hist(data$out_admitted)
#Baseline Accuracy
table(data$out_admitted)
#CATOOLS
library(caTools)
set.seed(123)
split = sample.split(data$out_admitted, SplitRatio = 0.80)
split
datatrain = subset(data, split==TRUE)
datatest = subset(data, split==FALSE)
head(datatrain)
head(datatest)
nrow(datatrain) # 42946 Training Samples
nrow(datatest)#10736 Test Samples
table(datatrain$out_admitted)
table(datatest$out_admitted)
data$out_admitted
library(rpart)
library(rpart.plot)
t= out_admitted ~
ed_arrival_mode +
ed_stream +
ed_blue_stream +
flag_prev_last_positive_ordered_dt +
flag_shielded_pat +
demo_age65 +
ed_arrival_hour +
flag_care_home_id +
path_creatinine +
path_prothrombin +
path_aki +
como_Peripheral_Vascular_Disease +
como_Renal_Disease +
como_Diabetes +
como_Connective_Tissue_Disorder +
como_Dementia +
como_Severe_Liver_Disease +
como_Metastatic_Cancer +
como_HIV +
como_Peptic_Ulcer +
como_Liver_Disease +
como_Congestive_Heart_Failure +
como_Acute_Myocardial_Infarction +
como_Pulmonary_Disease +
como_Paraplegia +
como_Cerebral_Vascular_Accident +
como_Smoker +
como_Hypertension+demo_age+demo_gender
library(jcart)
jcart(t)
prp(jcart(t))
predTrain = predict(jcart(t))[,2]
summary(predTrain)
table(datatrain$out_admitted, predTrain >= 0.5)
predTest = predict(jcart(t), newdata=datatest)[,2]
table(datatest$out_admitted, predTest >= 0.5)
pred = prediction(predTest, datatest$out_admitted)
as.numeric(performance(pred, "auc")@y.values)
|
/jcart New Package Example.R
|
no_license
|
UKVeteran/New-Packages-in-R
|
R
| false | false | 2,324 |
r
|
##Load Libraries
library(ggplot2)
library(tidyverse)
library(dplyr)
library(readr)
library(cowplot)
library(caret)
library(pROC)
library(ROCR)
## NOTE: This is a proof of concept. Further validation work needs to take place.
##need to create train/datasets
data = read.csv('C:\\Datasets\\Regression\\data1.csv', stringsAsFactors = F)
head(data) #allows you to check the data, first few entries
summary(data) #produce result summaries of the results of various model fitting functions.
dim(data) #the dimension (e.g. the number of columns and rows) of a matrix, array or data frame.
str(data)
# this shows that we need to tell R which columns contain factors
# it will also show us if there are some missing values.
hist(data$out_admitted)
#Baseline Accuracy
table(data$out_admitted)
#CATOOLS
library(caTools)
set.seed(123)
split = sample.split(data$out_admitted, SplitRatio = 0.80)
split
datatrain = subset(data, split==TRUE)
datatest = subset(data, split==FALSE)
head(datatrain)
head(datatest)
nrow(datatrain) # 42946 Training Samples
nrow(datatest)#10736 Test Samples
table(datatrain$out_admitted)
table(datatest$out_admitted)
data$out_admitted
library(rpart)
library(rpart.plot)
t= out_admitted ~
ed_arrival_mode +
ed_stream +
ed_blue_stream +
flag_prev_last_positive_ordered_dt +
flag_shielded_pat +
demo_age65 +
ed_arrival_hour +
flag_care_home_id +
path_creatinine +
path_prothrombin +
path_aki +
como_Peripheral_Vascular_Disease +
como_Renal_Disease +
como_Diabetes +
como_Connective_Tissue_Disorder +
como_Dementia +
como_Severe_Liver_Disease +
como_Metastatic_Cancer +
como_HIV +
como_Peptic_Ulcer +
como_Liver_Disease +
como_Congestive_Heart_Failure +
como_Acute_Myocardial_Infarction +
como_Pulmonary_Disease +
como_Paraplegia +
como_Cerebral_Vascular_Accident +
como_Smoker +
como_Hypertension+demo_age+demo_gender
library(jcart)
jcart(t)
prp(jcart(t))
predTrain = predict(jcart(t))[,2]
summary(predTrain)
table(datatrain$out_admitted, predTrain >= 0.5)
predTest = predict(jcart(t), newdata=datatest)[,2]
table(datatest$out_admitted, predTest >= 0.5)
pred = prediction(predTest, datatest$out_admitted)
as.numeric(performance(pred, "auc")@y.values)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrapper_scPipeCPP.R
\name{sc_trim_barcode}
\alias{sc_trim_barcode}
\title{sc_trim_barcode}
\usage{
sc_trim_barcode(
outfq,
r1,
r2 = NULL,
read_structure = list(bs1 = -1, bl1 = 0, bs2 = 6, bl2 = 8, us = 0, ul = 6),
filter_settings = list(rmlow = TRUE, rmN = TRUE, minq = 20, numbq = 2)
)
}
\arguments{
\item{outfq}{the output fastq file, which reformat the barcode and UMI into
the read name. Files ending in \code{.gz} will be automatically compressed.}
\item{r1}{read one for pair-end reads. This read should contain
the transcript.}
\item{r2}{read two for pair-end reads, NULL if single read.
(default: NULL)}
\item{read_structure}{a list containing the read structure configuration:
\itemize{
\item{bs1}: starting position of barcode in read one. -1 if no barcode in
read one.
\item{bl1}: length of barcode in read one, if there is no
barcode in read one this number is used for trimming beginning of read
one.
\item{bs2}: starting position of barcode in read two
\item{bl2}: length of barcode in read two
\item{us}: starting position of UMI
\item{ul}: length of UMI
}}
\item{filter_settings}{A list contains read filter settings:\itemize{
\item{rmlow} whether to remove the low quality reads.
\item{rmN} whether to remove reads that contains N in UMI or cell barcode.
\item{minq} the minimum base pair quality that we allowed
\item{numbq} the maximum number of base pair that have quality
below \code{numbq}
}}
}
\value{
generates a trimmed fastq file named \code{outfq}
}
\description{
Reformat fastq files so barcode and UMI sequences are moved from
the sequence into the read name.
}
\details{
Positions used in this function are 0-indexed, so they start from 0
rather than 1. The default read structure in this function represents
CEL-seq paired-ended reads. This contains a transcript in the first read, a
UMI in the first 6bp of the second read followed by a 8bp barcode. So the
read structure will be : \code{list(bs1=-1, bl1=0, bs2=6, bl2=8, us=0,
ul=6)}. \code{bs1=-1, bl1=0} indicates negative start position and zero
length for the barcode on read one, this is used to denote "no barcode" on
read one. \code{bs2=6, bl2=8} indicates there is a barcode in read two that
starts at the 7th base with length 8bp. \code{us=0, ul=6} indicates a UMI
from first base of read two and the length in 6bp.
For a typical Drop-seq experiment the read structure will be
\code{list(bs1=-1, bl1=0, bs2=0, bl2=12, us=12, ul=8)}, which means the read
one only contains transcript, the first 12bp in read two are cell barcode, followed
by a 8bp UMI.
}
\examples{
data_dir="celseq2_demo"
\dontrun{
# for the complete workflow, refer to the vignettes
...
sc_trim_barcode(file.path(data_dir, "combined.fastq"),
file.path(data_dir, "simu_R1.fastq"),
file.path(data_dir, "simu_R2.fastq"))
...
}
}
|
/man/sc_trim_barcode.Rd
|
no_license
|
LuyiTian/scPipe
|
R
| false | true | 2,924 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrapper_scPipeCPP.R
\name{sc_trim_barcode}
\alias{sc_trim_barcode}
\title{sc_trim_barcode}
\usage{
sc_trim_barcode(
outfq,
r1,
r2 = NULL,
read_structure = list(bs1 = -1, bl1 = 0, bs2 = 6, bl2 = 8, us = 0, ul = 6),
filter_settings = list(rmlow = TRUE, rmN = TRUE, minq = 20, numbq = 2)
)
}
\arguments{
\item{outfq}{the output fastq file, which reformat the barcode and UMI into
the read name. Files ending in \code{.gz} will be automatically compressed.}
\item{r1}{read one for pair-end reads. This read should contain
the transcript.}
\item{r2}{read two for pair-end reads, NULL if single read.
(default: NULL)}
\item{read_structure}{a list containing the read structure configuration:
\itemize{
\item{bs1}: starting position of barcode in read one. -1 if no barcode in
read one.
\item{bl1}: length of barcode in read one, if there is no
barcode in read one this number is used for trimming beginning of read
one.
\item{bs2}: starting position of barcode in read two
\item{bl2}: length of barcode in read two
\item{us}: starting position of UMI
\item{ul}: length of UMI
}}
\item{filter_settings}{A list contains read filter settings:\itemize{
\item{rmlow} whether to remove the low quality reads.
\item{rmN} whether to remove reads that contains N in UMI or cell barcode.
\item{minq} the minimum base pair quality that we allowed
\item{numbq} the maximum number of base pair that have quality
below \code{numbq}
}}
}
\value{
generates a trimmed fastq file named \code{outfq}
}
\description{
Reformat fastq files so barcode and UMI sequences are moved from
the sequence into the read name.
}
\details{
Positions used in this function are 0-indexed, so they start from 0
rather than 1. The default read structure in this function represents
CEL-seq paired-ended reads. This contains a transcript in the first read, a
UMI in the first 6bp of the second read followed by a 8bp barcode. So the
read structure will be : \code{list(bs1=-1, bl1=0, bs2=6, bl2=8, us=0,
ul=6)}. \code{bs1=-1, bl1=0} indicates negative start position and zero
length for the barcode on read one, this is used to denote "no barcode" on
read one. \code{bs2=6, bl2=8} indicates there is a barcode in read two that
starts at the 7th base with length 8bp. \code{us=0, ul=6} indicates a UMI
from first base of read two and the length in 6bp.
For a typical Drop-seq experiment the read structure will be
\code{list(bs1=-1, bl1=0, bs2=0, bl2=12, us=12, ul=8)}, which means the read
one only contains transcript, the first 12bp in read two are cell barcode, followed
by a 8bp UMI.
}
\examples{
data_dir="celseq2_demo"
\dontrun{
# for the complete workflow, refer to the vignettes
...
sc_trim_barcode(file.path(data_dir, "combined.fastq"),
file.path(data_dir, "simu_R1.fastq"),
file.path(data_dir, "simu_R2.fastq"))
...
}
}
|
species_table <- function(x){
list(BARBAR = 'Barbastella barbastellus',
EPTSER = 'Eptesicus serotinus',
MYOALC = 'Myotis alcathoe',
MYOBEC = 'Myotis bechsteinii',
MYOBRA = 'Myotis brandtii',
MYODAS = 'Myotis dasycneme',
MYODAU = 'Myotis daubentonii',
MYOMYS = 'Myotis mystacinus',
MYONAT = 'Myotis nattereri',
NYCLEI = 'Nyctalus leisleri',
NYCNOC = 'Nyctalus noctula',
PIPKUH = 'Pipistrellus kuhlii',
PIPNAT = 'Pipistrellus nathusii',
PIPPIP = 'Pipistrellus pipistrellus',
PIPPYG = 'Pipistrellus pygmaeus',
PLEAUR = 'Plecotus auritus',
PLEAUS = 'Plecotus austriacus',
RHIFER = 'Rhinolophus ferrumequinum',
RHIHIP = 'Rhinolophus hipposideros')
}
|
/functions/species_table.R
|
permissive
|
YTHsieh/bat2inat
|
R
| false | false | 775 |
r
|
species_table <- function(x){
list(BARBAR = 'Barbastella barbastellus',
EPTSER = 'Eptesicus serotinus',
MYOALC = 'Myotis alcathoe',
MYOBEC = 'Myotis bechsteinii',
MYOBRA = 'Myotis brandtii',
MYODAS = 'Myotis dasycneme',
MYODAU = 'Myotis daubentonii',
MYOMYS = 'Myotis mystacinus',
MYONAT = 'Myotis nattereri',
NYCLEI = 'Nyctalus leisleri',
NYCNOC = 'Nyctalus noctula',
PIPKUH = 'Pipistrellus kuhlii',
PIPNAT = 'Pipistrellus nathusii',
PIPPIP = 'Pipistrellus pipistrellus',
PIPPYG = 'Pipistrellus pygmaeus',
PLEAUR = 'Plecotus auritus',
PLEAUS = 'Plecotus austriacus',
RHIFER = 'Rhinolophus ferrumequinum',
RHIHIP = 'Rhinolophus hipposideros')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cleanhtml.R
\name{cleanhtml}
\alias{cleanhtml}
\title{Extracts the location data from the html string}
\usage{
cleanhtml(htmlstring)
}
\arguments{
\item{htmlstring}{character vector produced by the gethtml function. The html will be scraped from the LabCorp website.}
}
\value{
dataframe
}
\description{
This function takes a raw html string from the gethtml function and extracts the location data. Each patient lab will include the address, city, state, zip code and lat-long coordinates for their location.
}
|
/man/cleanhtml.Rd
|
permissive
|
kuhnrl30/LabCorp
|
R
| false | true | 592 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cleanhtml.R
\name{cleanhtml}
\alias{cleanhtml}
\title{Extracts the location data from the html string}
\usage{
cleanhtml(htmlstring)
}
\arguments{
\item{htmlstring}{character vector produced by the gethtml function. The html will be scraped from the LabCorp website.}
}
\value{
dataframe
}
\description{
This function takes a raw html string from the gethtml function and extracts the location data. Each patient lab will include the address, city, state, zip code and lat-long coordinates for their location.
}
|
# Prepare model inputs for tier "misc_pepra"
#*******************************************************************************
# Notes ####
#*******************************************************************************
#' Inputs:
#' - inputs/data_proc/Data_BART_decrements_ES2017_imputed.RData
#' - inputs/data_proc/Data_BART_demographics_20190630_fillin.RData
#' What this file does
#' - produce the following model inputs for tier "misc_pepra"
#' - decrement table
#' - salary scale
#' - initial demographics
#' - tier specific parameters
#*******************************************************************************
# Tier specification ####
#*******************************************************************************
##' Members included
#' - misc pepra
##' Service retirement
#'
#' - Benefit rules
#' - Use benefit rules based on Misc classic members with 2%@62 rules.
#' (The 2%@62 rule is applied to all classic members who joined before 1/15/2011,
#' so this rule should over the majority of classic members)
#'
#' - Final compensation
#' - The plan policy:
#' - 12 month for members who joined before 1/15/2011
#' - 36 month for members who joined on or after 1/15/2011)
#' - Model:
#' - 12 month (1 year in the model) for classic members.
#' - 36 month (3 years in the model) for pepra members.
#' - Do not model salary cap and SS offset for now, do calibration instead.
#'
#' Eligibility:
#' - classic: age>=50 & yos>=5
#' - pepra: age>=52 & yos>=5
#'
#' Vesting: yos >=5
#'
#' Benefit factor: 2% at age 62
# Deferred retirement
#' - Plan policy: eligible to receive benefit at age 50 (classic) or 52 (PEPRA).
# - Model: start receiving benefit at 59
#' - Simplification: do not model refund upon separation but take into account its separation rates
# TODO: check if the offical valuation report also use the age 59 assumption.
# Disability retirement
#
# - Based on standard non-industrial disability retirement benefit
#
# - Eligibility: yos > 5
#
# - Benefit:
# - formula: 1.8% x service x Final compensation (may want to use higher benefit factor)
# - service:
# - YOS if YOS <10 or YOS > 18.518, else
# - YOS + years would have worked until 60,
# - with max benefit 1/3 of final compensation
#
# - Simplification:
# - does not compare with service retirement benefit
# - use YOS > 18
# Death benefit: pre-retirement
# - sum of
# - Member's accumulated contributions, with interest max(6%, prevailing discount rate)
# - 6 months' salary if eligible for service retirement or Alternate Death Benefit
#
# - BART model for all members
# - factor * final salary
# - the value of factor TBD
## Assumptions, needs to be revisited
# Notes on gender ratio:
# 40% male and 60% female for all calculations for misc members.
## Assumptions on demographics
#
# Classic and PEPRA mebmers:
# Active members:
# - Members who joined the plan on or after Jan 1, 2013 are PEPRA members
# - In the model, we assume that a member attains 1 yos after a full year of employment.
#
# - The dempgrahpic data from AV2019 are up to 6/30/2019
# - Therefore, members who joined on 6/30/2013 had just attained 6 YOS and
# members who joined between 7/1/2012 and 6/30/2013 have 6 yos in the membership data.
# - Assuming the entry of new members uniformly distributed during year, we can assume that
# all members with YOS <= 5 and half of new members with YOS = 6 are PEPRA members.
# Serivice retirees
# - According to CalPERS CAFR2018-19 ep159, more than 99.9% of service retirees and
# beneficiaries are classic members. In the model we assume all service retirees
# and beneficiaires in the AV data are classic members
#
# Disability retirees.
# - We have not found data that show the proportion of PEPRA members in disability retirees.
# - Because the PEPRA tier is still new and members are generally younger, we assume
# all disability retirees in the AV data are classic members.
#
# Initial terminated members
# - For now, we assume that for each tier the liability of initial terminated members(in or not in pay status)
# is a fixed percentage of the AL of retirees.
# - As we assume the PEPRA tier has no retirees in the model, there are no AL for initial terminated members
# under the current simplification method. The should not be an issue because the actual AL for termianted should be
# very small as the PEPRA tier is still new.
#*******************************************************************************
# ## Global settings ####
#*******************************************************************************
dir_data <- "inputs/data_proc/"
dir_outputs <- "model/tiers/tierData/"
# Model settings
range_age <- 20:100
range_ea <- 20:69 # max retirement age is assumed to be 75 (qxr = 1 at age 75 in AV tables)
# Tier specific parameters
tier_name <- "misc_pepra"
age_vben <- 59 # assumed age of starting receiving deferred retirement benefits
v.year <- 5
fasyears <- 3 # based on policy before PEPRA
bfactor <- 0.02
cola_assumed <- 0.02 # assumed cola rates for valuation
EEC_rate <- 0.0625 # TODO:check/estimate EEC rate for classic and PEPRA members
infl_salary <- 0.0275
share_female <- 0.6
share_male <- 1 - share_female
# Other tier params to add
#*******************************************************************************
# ## Loading data ####
#*******************************************************************************
load(paste0(dir_data, "Data_BART_decrements_ES2017_imputed.RData"))
load(paste0(dir_data, "Data_BART_demographics_20190630_fillin.RData"))
load(paste0(dir_data, "Data_BART_planInfo_AV2019.RData"))
#*******************************************************************************
# ## Decrements 1: combining groups ####
#*******************************************************************************
## Service retirement rates
# groups included
grp_include <- df_qxr_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include , "misc_pepra")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc_pepra","wgt"] <- 1
#wgts[wgts$grp == "inds_classic","wgt"] <- 0.046
## calculate weighted average
df_qxr_tier <-
df_qxr_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age, yos) %>%
summarise(qxr = weighted.mean(qxr, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates
# groups included
df_qxd_misc_imputed %<>%
gather(grp, qxd.nonocc, -age) %>%
mutate(grp = str_extract(grp, "male|female"))
grp_include <- df_qxd_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "male|female")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "female","wgt"] <- share_female
wgts[wgts$grp == "male", "wgt"] <- share_male
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
#
df_qxd_tier <-
df_qxd_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd.nonocc = weighted.mean(qxd.nonocc, wgt),
qxd.occ = 0,
#qxd = qxd.nonocc + qxd.occ,
.groups = "drop") %>%
mutate(grp = tier_name,
qxd = qxd.nonocc + qxd.occ) %>%
relocate(grp) %>%
ungroup()
## Termination with refund
# groups included
grp_include <- df_qxt.refund_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
## calculate weighted average
df_qxt.refund_tier <-
df_qxt.refund_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(qxt.refund = weighted.mean(qxt.refund, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
## Termination with vested benefits
# groups included
grp_include <- df_qxt.vest_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
## calculate weighted average
df_qxt.vest_tier <-
df_qxt.vest_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(qxt.vest = weighted.mean(qxt.vest, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
## combine two types of termination rates
df_qxt_tier <-
left_join(df_qxt.vest_tier,
df_qxt.refund_tier,
by = c("grp", "yos", "ea")
) %>%
mutate(qxt = qxt.refund + qxt.vest,
age = ea + yos) %>%
relocate(grp, ea, age, yos, qxt)
## Pre-retirement mortality
# Misc plan usually set industrial death to zero. See BART Misc AV2019 ep45
df_qxm.pre_tier <-
df_qxm.pre_imputed %>%
mutate(qxm.pre.nonocc = share_female * qxm.pre.nonocc_female + share_male * qxm.pre.nonocc_male,
qxm.pre.occ = 0, # 0.1 * qxm.pre.occ_female + 0.9 * qxm.pre.occ_male,
qxm.pre = qxm.pre.nonocc + qxm.pre.occ, # see note above
grp = tier_name
) %>%
select(grp, age, qxm.pre, qxm.pre.nonocc, qxm.pre.occ)
## Post-retirement mortality, without projection
# Need to combine two types of disability mortality rates: using weighted average
# - assume 0% of disability retirement is job-related for BART Misc
df_qxm.post_tier <-
df_qxm.post_imputed %>%
mutate(qxm.post = share_female * qxm.post_female + share_male * qxm.post_male,
qxmd.post.nonocc = share_female * qxmd.post.nonocc_female + share_male * qxmd.post.nonocc_male,
qxmd.post.occ = 0.1 * qxmd.post.occ_female + 0.9 * qxmd.post.occ_male,
qxmd.post = 1 * qxmd.post.nonocc + 0 * qxmd.post.occ,
grp = tier_name
) %>%
select(grp, age,
qxm.post, qxmd.post
#qxmd.post.nonocc, qxmd.post.occ,
#qxm.post_female, qxm.post_male,
#qxmd.post.nonocc_female, qxmd.post.nonocc_male,
#qxmd.post.occ_female, qxmd.post.occ_male
)
## Post-retirement mortality, with projection
df_qxm.post_proj_tier <-
df_qxm.post_proj_imputed %>%
mutate(qxm.post_proj = share_female * qxm.post_female_proj + share_male * qxm.post_male_proj,
qxmd.post.nonocc_proj = share_female * qxmd.post.nonocc_female_proj + share_male * qxmd.post.nonocc_male_proj,
qxmd.post.occ_proj = 0.1 * qxmd.post.occ_female_proj + 0.9 * qxmd.post.occ_male_proj,
qxmd.post_proj = 1 * qxmd.post.nonocc_proj + 0 * qxmd.post.occ_proj,
grp = tier_name
) %>%
select(grp, age,
qxm.post_proj, qxmd.post_proj
#qxmd.post.nonocc_proj, qxmd.post.occ_proj,
#qxm.post_female_proj, qxm.post_male_proj,
#qxmd.post.nonocc_female_proj, qxmd.post.nonocc_male_proj,
#qxmd.post.occ_female_proj, qxmd.post.occ_male_proj
)
# df_qxr_tier
# df_qxd_tier
# df_qxt.refund_tier
# df_qxt.vest_tier
# df_qxm.pre_tier
# df_qxm.post_tier
# df_qxm.post_proj_tier
#*******************************************************************************
# ## Decrements 2: Single decrement table ####
#*******************************************************************************
# df_qxr_tier
# df_qxd_tier
# df_qxt.refund_tier
# df_qxt.vest_tier
# df_qxm.pre_tier
# df_qxm.post_tier
# df_qxm.post_proj_tier
decrements_tier <- expand.grid(age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
grp = tier_name) %>%
filter(age >= ea) %>%
left_join(df_qxm.pre_tier, by = c("grp", "age")) %>% # pre-retirement mortality
left_join(df_qxm.post_tier, by = c("grp", "age")) %>% # post-retirement mortality with no projection
# left_join(df_qxm.post_proj_tier, by = c("grp", "age")) %>% # post-retirement mortality with 15-year projection
left_join(df_qxt_tier, by = c("grp", "ea", "age", "yos")) %>% # termination with vested benefit
left_join(df_qxr_tier, by = c("grp", "age", "yos")) %>% # service retirement
left_join(df_qxd_tier, by = c("grp", "age")) %>% # disability
select(grp, ea, age, yos, qxm.pre, qxm.post, qxmd.post, qxt, qxt.vest, qxt.refund, qxr, qxd, everything())%>%
arrange(ea, age) %>%
colwise(na2zero)(.)
# decrement_tier
#*******************************************************************************
# ## Decrements 3: adding eligibility information ####
#*******************************************************************************
# Create 2 columns for each tier
# elig_servRet_full: number of year of being eligible for full or greater retirement benefits
# elig_servRet_early: number of year of being eligible for early retirement benefits;
# 0 after being eligible for full retirement benefits
# Nots for CalPERS (including BART)
# - elig_servRet_full is set to YY in the "X%@YY" formula name
# - elig_servRet_early is set to the earliest retirement age
decrements_tier %<>%
group_by(ea) %>%
mutate(
# Eligibility for full (or greater) retirement benefit
elig_servRet_full = ifelse( (age >= 62 & yos >= 5), 1, 0) %>% cumsum,
# Eligibility for early retirement benefit
elig_servRet_early = ifelse( (age >= 52 & yos >= 5), 1, 0) %>% cumsum,
elig_servRet_early = ifelse( elig_servRet_full, 0, elig_servRet_early)
) %>%
## Adjustments to decrement rates based on eligibility
# 1. Only keep retirement rates when a member is eligible
# 2. Coerce termination rates to 0 when eligible for early retirement or full retirement, or age >= age_vben
mutate(
qxr = ifelse(elig_servRet_early | elig_servRet_full, qxr, 0),
qxt.refund = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt.refund, 0),
qxt.vest = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt.vest, 0),
qxt = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt, 0)
) %>%
ungroup
## setting mortality to 1 at the max age
decrements_tier %<>%
mutate(qxm.pre = ifelse(age == max(age), 1, qxm.pre),
qxm.post = ifelse(age == max(age), 1, qxm.post),
qxmd.post = ifelse(age == max(age), 1, qxmd.post)
)
#*******************************************************************************
# ## Decrements 4: Improvement table ####
#*******************************************************************************
# improvement for post retirement mortality
# May want to use MP2016 directly
decrements_improvement <-
expand_grid(year = 2017:(2017+14),
age = range_age) %>%
left_join(
bind_rows(
# df_qxm.post_imputed %>% mutate(year = 2017),
# df_qxm.post_proj_imputed %>%
# rename_with( ~str_remove(.x, "_proj" )) %>%
# mutate(year = 2017+14)
df_qxm.post_tier %>% mutate(year = 2017),
df_qxm.post_proj_tier %>%
rename_with( ~str_remove(.x, "_proj" )) %>%
mutate(year = 2017+14)
),
by = c("year", "age")
)
decrements_improvement %<>%
group_by(age) %>%
arrange(age, year) %>%
# filter(age == 90) %>%
mutate(across(!c(year, grp), ~ seq(first(.x), last(.x), length.out = n()))) %>%
mutate(across(!c(year, grp), ~ .x / .x[year == min(year)])) %>%
rename_with(~ paste0("impr_", .x), !c(year, age, grp)) %>%
mutate(grp = tier_name )
#*******************************************************************************
# ## Salary Scale ####
#*******************************************************************************
# df_salScale.merit_imputed
# groups included
grp_include <- df_salScale.merit_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
wgts
## calculate weighted average
df_salScale_tier <-
df_salScale.merit_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(salScale.merit = weighted.mean(salScale.merit, wgt),
.groups = "drop") %>%
mutate(grp = tier_name,
salScale.infl = infl_salary,
salScale = salScale.merit + salScale.infl) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
#*******************************************************************************
# ## Initial demographics ####
#*******************************************************************************
## View the inputs
# df_nactives_fillin
# df_n_servRet_fillin
# df_n_disbRet_occ_fillin
# df_n_disbRet_nonocc_fillin
# df_n_beneficiaries_fillin
## groups included
grp_include <- df_nactives_fillin$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
## Active members
# all active members
df_n_actives_tier <-
df_nactives_fillin %>%
filter(grp %in% grp_include) %>%
# left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(salary = weighted.mean(salary, nactives, na.rm = TRUE) %>% na2zero(),
nactives = sum(nactives, na.rm= TRUE) %>% na2zero,
.groups = "drop") %>%
mutate(grp = tier_name,
age = ea + yos) %>%
relocate(grp) %>%
arrange(ea, age) %>%
ungroup()
# BART: Check total benefit againt the AV value: payroll
# sum(df_n_actives_tier$nactives * df_n_actives_tier$salary) / sum(df_n_actives_tier$nactives)
# model/target: 94310.94/94311 = 0.9999994
# Keep classic members only
# assume
# - members with yos <= 5 are all pepra members
# - 50% of members with yos == 6 are pepra members
# - the rest are classic membrs
fct <- 0.11758
df_n_actives_tier %<>%
mutate(nactives = case_when(
yos > 6 ~ 0,
yos == 6 ~ nactives * (0.5 - fct ),
yos <= 5 ~ nactives * (1 - fct),
TRUE ~ nactives
))
# df_n_actives_tier$nactives %>% sum
# Potential issue (PERF A):
# - Currently we have not found data about the proportion of PEPRA members in
# state PERF A members.
# - The closest is the number of classic and PEPRA members in
# the entire PERF A, including both state and non-state members. (CAFR2019-19 ep159)
# It shows that 72.5% misc(and inds?) members are classic members.
# - Under the assumptions described above, classic members only accounts for
# 61% of total state PERF A members.
# - This can be because
# 1. We have overestimated the number of PEPRA members under our assumption.
# 2. The proportion of PEPRA members differs for state and non-state PERF A members.
# 3. A combination of the two.
#
# TODO: For now, we will just stick with the simple assumptions, and see if the
# simulation results are off the target too much.
#
# (df_n_actives_tier$nactives %>% sum)*0.725
# (df_n_actives_tier$nactives %>% sum)*0.275
#
# df_n_actives_tier %>%
# mutate(nactives_c = case_when(
# yos <= 4 ~ 0,
# yos == 5 ~ nactives * 0.5,
# TRUE ~ nactives
# )) %>%
# summarise(nactives_c = sum(nactives_c),
# nactives = sum(nactives)) %>%
# mutate(c_share = nactives_c / nactives)
## Service retirees
# For now, combine service retirees and beneficiaries
# BART: death beneficiaries are also included
# assume all service retirees are classic members
df_n_servRet_tier <-
full_join(df_n_servRet_fillin,
df_n_beneficiaries_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
full_join(df_n_deathBen_occ_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
full_join(df_n_deathBen_nonocc_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_servRet = weighted.mean(benefit_servRet, n_servRet, na.rm= TRUE),
n_servRet = sum(n_servRet, na.rm = TRUE),
benefit_beneficiaries = weighted.mean(benefit_beneficiaries, n_beneficiaries, na.rm= TRUE),
n_beneficiaries = sum(n_beneficiaries, na.rm = TRUE),
benefit_death_nonocc = weighted.mean(benefit_death_nonocc, n_death_nonocc, na.rm= TRUE),
n_death_nonocc = sum(n_death_nonocc, na.rm = TRUE),
benefit_death_occ = weighted.mean(benefit_death_occ, n_death_occ, na.rm= TRUE),
n_death_occ = sum(n_death_occ, na.rm = TRUE),
.groups = "drop") %>%
colwise(na2zero)(.) %>%
mutate(grp = tier_name,
benefit_servRet = na2zero((benefit_servRet * n_servRet +
benefit_beneficiaries * n_beneficiaries +
benefit_death_occ * n_death_occ +
benefit_death_nonocc * n_death_nonocc
) / (n_servRet + n_beneficiaries + n_death_nonocc + n_death_occ)),
n_servRet = n_servRet + n_beneficiaries + n_death_nonocc + n_death_occ
) %>%
select(grp, age, n_servRet, benefit_servRet) %>%
arrange(age) %>%
ungroup()
# BART: all retirees are classic
df_n_servRet_tier %<>%
mutate(n_servRet = 0)
## Disability retirees
# For now, combine industrial and non-industrial disability retirees
# Assume all disability retirees are classic members
df_n_disbRet_tier <-
left_join(df_n_disbRet_nonocc_fillin,
df_n_disbRet_occ_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_disbRet_nonocc = weighted.mean(benefit_disbRet_nonocc, n_disbRet_nonocc, na.rm= TRUE),
n_disbRet_nonocc = sum(n_disbRet_nonocc, na.rm = TRUE),
benefit_disbRet_occ = weighted.mean(benefit_disbRet_occ, n_disbRet_occ, na.rm= TRUE),
n_disbRet_occ = sum(n_disbRet_occ, na.rm = TRUE),
.groups = "drop") %>%
mutate(across(everything() , na2zero)) %>%
mutate(grp = tier_name,
benefit_disbRet = na2zero((benefit_disbRet_nonocc * n_disbRet_nonocc + benefit_disbRet_occ * n_disbRet_occ) / (n_disbRet_occ + n_disbRet_nonocc)),
n_disbRet = n_disbRet_occ + n_disbRet_nonocc
) %>%
select(grp, age, n_disbRet, benefit_disbRet) %>%
arrange(age) %>%
ungroup()
# BART: assuming all retirees are classic
df_n_disbRet_tier %<>%
mutate(n_disbRet = 0)
## View the results
# df_n_actives_tier
# df_n_servRet_tier
# df_n_disbRet_tier
#*******************************************************************************
# ## Benefit factor and benefit reduction ####
#*******************************************************************************
df_benFactor <-
benFactor_misc$df %>%
select(age = age_ret,
bfactor_reduced = bfactor_reduced_pepra,
benReduction = benReduction_pepra)
#*******************************************************************************
# ## Saving tier information in a list ####
#*******************************************************************************
# collect tier-specific parameters in a list
tier_params <-
list(
tier_name = tier_name,
age_vben = age_vben,
v.year = v.year,
fasyears = fasyears, # based on policy before PEPRA
cola_assumed = cola_assumed,
bfactor = bfactor,
EEC_rate = EEC_rate
)
# Store all tier data in a list
assign(paste0("tierData_", tier_name),
list(
tier_name = tier_name,
decrements = decrements_tier,
decrements_improvement = decrements_improvement,
df_n_actives = df_n_actives_tier,
df_n_servRet = df_n_servRet_tier,
df_n_disbRet = df_n_disbRet_tier,
df_salScale = df_salScale_tier,
df_benFactor = df_benFactor,
tier_params = tier_params
)
)
# Save the list of tier data in a .rds (single object) file
saveRDS(get(paste0("tierData_", tier_name)),
file = paste0(dir_outputs, "tierData_", tier_name, ".rds"))
# tierData <- readRDS(paste(dir_outputs, "tierData_", tier_name, ".rds"))
|
/model/tiers/Tier_misc_pepra.R
|
no_license
|
yimengyin16/model_BART
|
R
| false | false | 25,252 |
r
|
# Prepare model inputs for tier "misc_pepra"
#*******************************************************************************
# Notes ####
#*******************************************************************************
#' Inputs:
#' - inputs/data_proc/Data_BART_decrements_ES2017_imputed.RData
#' - inputs/data_proc/Data_BART_demographics_20190630_fillin.RData
#' What this file does
#' - produce the following model inputs for tier "misc_pepra"
#' - decrement table
#' - salary scale
#' - initial demographics
#' - tier specific parameters
#*******************************************************************************
# Tier specification ####
#*******************************************************************************
##' Members included
#' - misc pepra
##' Service retirement
#'
#' - Benefit rules
#' - Use benefit rules based on Misc classic members with 2%@62 rules.
#' (The 2%@62 rule is applied to all classic members who joined before 1/15/2011,
#' so this rule should over the majority of classic members)
#'
#' - Final compensation
#' - The plan policy:
#' - 12 month for members who joined before 1/15/2011
#' - 36 month for members who joined on or after 1/15/2011)
#' - Model:
#' - 12 month (1 year in the model) for classic members.
#' - 36 month (3 years in the model) for pepra members.
#' - Do not model salary cap and SS offset for now, do calibration instead.
#'
#' Eligibility:
#' - classic: age>=50 & yos>=5
#' - pepra: age>=52 & yos>=5
#'
#' Vesting: yos >=5
#'
#' Benefit factor: 2% at age 62
# Deferred retirement
#' - Plan policy: eligible to receive benefit at age 50 (classic) or 52 (PEPRA).
# - Model: start receiving benefit at 59
#' - Simplification: do not model refund upon separation but take into account its separation rates
# TODO: check if the offical valuation report also use the age 59 assumption.
# Disability retirement
#
# - Based on standard non-industrial disability retirement benefit
#
# - Eligibility: yos > 5
#
# - Benefit:
# - formula: 1.8% x service x Final compensation (may want to use higher benefit factor)
# - service:
# - YOS if YOS <10 or YOS > 18.518, else
# - YOS + years would have worked until 60,
# - with max benefit 1/3 of final compensation
#
# - Simplification:
# - does not compare with service retirement benefit
# - use YOS > 18
# Death benefit: pre-retirement
# - sum of
# - Member's accumulated contributions, with interest max(6%, prevailing discount rate)
# - 6 months' salary if eligible for service retirement or Alternate Death Benefit
#
# - BART model for all members
# - factor * final salary
# - the value of factor TBD
## Assumptions, needs to be revisited
# Notes on gender ratio:
# 40% male and 60% female for all calculations for misc members.
## Assumptions on demographics
#
# Classic and PEPRA mebmers:
# Active members:
# - Members who joined the plan on or after Jan 1, 2013 are PEPRA members
# - In the model, we assume that a member attains 1 yos after a full year of employment.
#
# - The dempgrahpic data from AV2019 are up to 6/30/2019
# - Therefore, members who joined on 6/30/2013 had just attained 6 YOS and
# members who joined between 7/1/2012 and 6/30/2013 have 6 yos in the membership data.
# - Assuming the entry of new members uniformly distributed during year, we can assume that
# all members with YOS <= 5 and half of new members with YOS = 6 are PEPRA members.
# Serivice retirees
# - According to CalPERS CAFR2018-19 ep159, more than 99.9% of service retirees and
# beneficiaries are classic members. In the model we assume all service retirees
# and beneficiaires in the AV data are classic members
#
# Disability retirees.
# - We have not found data that show the proportion of PEPRA members in disability retirees.
# - Because the PEPRA tier is still new and members are generally younger, we assume
# all disability retirees in the AV data are classic members.
#
# Initial terminated members
# - For now, we assume that for each tier the liability of initial terminated members(in or not in pay status)
# is a fixed percentage of the AL of retirees.
# - As we assume the PEPRA tier has no retirees in the model, there are no AL for initial terminated members
# under the current simplification method. The should not be an issue because the actual AL for termianted should be
# very small as the PEPRA tier is still new.
#*******************************************************************************
# ## Global settings ####
#*******************************************************************************
dir_data <- "inputs/data_proc/"
dir_outputs <- "model/tiers/tierData/"
# Model settings
range_age <- 20:100
range_ea <- 20:69 # max retirement age is assumed to be 75 (qxr = 1 at age 75 in AV tables)
# Tier specific parameters
tier_name <- "misc_pepra"
age_vben <- 59 # assumed age of starting receiving deferred retirement benefits
v.year <- 5
fasyears <- 3 # based on policy before PEPRA
bfactor <- 0.02
cola_assumed <- 0.02 # assumed cola rates for valuation
EEC_rate <- 0.0625 # TODO:check/estimate EEC rate for classic and PEPRA members
infl_salary <- 0.0275
share_female <- 0.6
share_male <- 1 - share_female
# Other tier params to add
#*******************************************************************************
# ## Loading data ####
#*******************************************************************************
load(paste0(dir_data, "Data_BART_decrements_ES2017_imputed.RData"))
load(paste0(dir_data, "Data_BART_demographics_20190630_fillin.RData"))
load(paste0(dir_data, "Data_BART_planInfo_AV2019.RData"))
#*******************************************************************************
# ## Decrements 1: combining groups ####
#*******************************************************************************
## Service retirement rates
# groups included
grp_include <- df_qxr_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include , "misc_pepra")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc_pepra","wgt"] <- 1
#wgts[wgts$grp == "inds_classic","wgt"] <- 0.046
## calculate weighted average
df_qxr_tier <-
df_qxr_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age, yos) %>%
summarise(qxr = weighted.mean(qxr, wgt), .groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
ungroup()
## Disability retirement rates
# groups included
df_qxd_misc_imputed %<>%
gather(grp, qxd.nonocc, -age) %>%
mutate(grp = str_extract(grp, "male|female"))
grp_include <- df_qxd_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "male|female")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "female","wgt"] <- share_female
wgts[wgts$grp == "male", "wgt"] <- share_male
## calculate weighted average
# Need to combine two types of disability rates: adding the two rates
#
df_qxd_tier <-
df_qxd_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(age) %>%
summarise(qxd.nonocc = weighted.mean(qxd.nonocc, wgt),
qxd.occ = 0,
#qxd = qxd.nonocc + qxd.occ,
.groups = "drop") %>%
mutate(grp = tier_name,
qxd = qxd.nonocc + qxd.occ) %>%
relocate(grp) %>%
ungroup()
## Termination with refund
# groups included
grp_include <- df_qxt.refund_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
## calculate weighted average
df_qxt.refund_tier <-
df_qxt.refund_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(qxt.refund = weighted.mean(qxt.refund, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
## Termination with vested benefits
# groups included
grp_include <- df_qxt.vest_misc_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
## calculate weighted average
df_qxt.vest_tier <-
df_qxt.vest_misc_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(qxt.vest = weighted.mean(qxt.vest, wgt),
.groups = "drop") %>%
mutate(grp = tier_name) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
## combine two types of termination rates
df_qxt_tier <-
left_join(df_qxt.vest_tier,
df_qxt.refund_tier,
by = c("grp", "yos", "ea")
) %>%
mutate(qxt = qxt.refund + qxt.vest,
age = ea + yos) %>%
relocate(grp, ea, age, yos, qxt)
## Pre-retirement mortality
# Misc plan usually set industrial death to zero. See BART Misc AV2019 ep45
df_qxm.pre_tier <-
df_qxm.pre_imputed %>%
mutate(qxm.pre.nonocc = share_female * qxm.pre.nonocc_female + share_male * qxm.pre.nonocc_male,
qxm.pre.occ = 0, # 0.1 * qxm.pre.occ_female + 0.9 * qxm.pre.occ_male,
qxm.pre = qxm.pre.nonocc + qxm.pre.occ, # see note above
grp = tier_name
) %>%
select(grp, age, qxm.pre, qxm.pre.nonocc, qxm.pre.occ)
## Post-retirement mortality, without projection
# Need to combine two types of disability mortality rates: using weighted average
# - assume 0% of disability retirement is job-related for BART Misc
df_qxm.post_tier <-
df_qxm.post_imputed %>%
mutate(qxm.post = share_female * qxm.post_female + share_male * qxm.post_male,
qxmd.post.nonocc = share_female * qxmd.post.nonocc_female + share_male * qxmd.post.nonocc_male,
qxmd.post.occ = 0.1 * qxmd.post.occ_female + 0.9 * qxmd.post.occ_male,
qxmd.post = 1 * qxmd.post.nonocc + 0 * qxmd.post.occ,
grp = tier_name
) %>%
select(grp, age,
qxm.post, qxmd.post
#qxmd.post.nonocc, qxmd.post.occ,
#qxm.post_female, qxm.post_male,
#qxmd.post.nonocc_female, qxmd.post.nonocc_male,
#qxmd.post.occ_female, qxmd.post.occ_male
)
## Post-retirement mortality, with projection
df_qxm.post_proj_tier <-
df_qxm.post_proj_imputed %>%
mutate(qxm.post_proj = share_female * qxm.post_female_proj + share_male * qxm.post_male_proj,
qxmd.post.nonocc_proj = share_female * qxmd.post.nonocc_female_proj + share_male * qxmd.post.nonocc_male_proj,
qxmd.post.occ_proj = 0.1 * qxmd.post.occ_female_proj + 0.9 * qxmd.post.occ_male_proj,
qxmd.post_proj = 1 * qxmd.post.nonocc_proj + 0 * qxmd.post.occ_proj,
grp = tier_name
) %>%
select(grp, age,
qxm.post_proj, qxmd.post_proj
#qxmd.post.nonocc_proj, qxmd.post.occ_proj,
#qxm.post_female_proj, qxm.post_male_proj,
#qxmd.post.nonocc_female_proj, qxmd.post.nonocc_male_proj,
#qxmd.post.occ_female_proj, qxmd.post.occ_male_proj
)
# df_qxr_tier
# df_qxd_tier
# df_qxt.refund_tier
# df_qxt.vest_tier
# df_qxm.pre_tier
# df_qxm.post_tier
# df_qxm.post_proj_tier
#*******************************************************************************
# ## Decrements 2: Single decrement table ####
#*******************************************************************************
# df_qxr_tier
# df_qxd_tier
# df_qxt.refund_tier
# df_qxt.vest_tier
# df_qxm.pre_tier
# df_qxm.post_tier
# df_qxm.post_proj_tier
decrements_tier <- expand.grid(age = range_age,
ea = range_ea) %>%
mutate(yos = age - ea,
grp = tier_name) %>%
filter(age >= ea) %>%
left_join(df_qxm.pre_tier, by = c("grp", "age")) %>% # pre-retirement mortality
left_join(df_qxm.post_tier, by = c("grp", "age")) %>% # post-retirement mortality with no projection
# left_join(df_qxm.post_proj_tier, by = c("grp", "age")) %>% # post-retirement mortality with 15-year projection
left_join(df_qxt_tier, by = c("grp", "ea", "age", "yos")) %>% # termination with vested benefit
left_join(df_qxr_tier, by = c("grp", "age", "yos")) %>% # service retirement
left_join(df_qxd_tier, by = c("grp", "age")) %>% # disability
select(grp, ea, age, yos, qxm.pre, qxm.post, qxmd.post, qxt, qxt.vest, qxt.refund, qxr, qxd, everything())%>%
arrange(ea, age) %>%
colwise(na2zero)(.)
# decrement_tier
#*******************************************************************************
# ## Decrements 3: adding eligibility information ####
#*******************************************************************************
# Create 2 columns for each tier
# elig_servRet_full: number of year of being eligible for full or greater retirement benefits
# elig_servRet_early: number of year of being eligible for early retirement benefits;
# 0 after being eligible for full retirement benefits
# Nots for CalPERS (including BART)
# - elig_servRet_full is set to YY in the "X%@YY" formula name
# - elig_servRet_early is set to the earliest retirement age
decrements_tier %<>%
group_by(ea) %>%
mutate(
# Eligibility for full (or greater) retirement benefit
elig_servRet_full = ifelse( (age >= 62 & yos >= 5), 1, 0) %>% cumsum,
# Eligibility for early retirement benefit
elig_servRet_early = ifelse( (age >= 52 & yos >= 5), 1, 0) %>% cumsum,
elig_servRet_early = ifelse( elig_servRet_full, 0, elig_servRet_early)
) %>%
## Adjustments to decrement rates based on eligibility
# 1. Only keep retirement rates when a member is eligible
# 2. Coerce termination rates to 0 when eligible for early retirement or full retirement, or age >= age_vben
mutate(
qxr = ifelse(elig_servRet_early | elig_servRet_full, qxr, 0),
qxt.refund = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt.refund, 0),
qxt.vest = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt.vest, 0),
qxt = ifelse((elig_servRet_early == 0 & elig_servRet_full == 0) & age < age_vben, qxt, 0)
) %>%
ungroup
## setting mortality to 1 at the max age
decrements_tier %<>%
mutate(qxm.pre = ifelse(age == max(age), 1, qxm.pre),
qxm.post = ifelse(age == max(age), 1, qxm.post),
qxmd.post = ifelse(age == max(age), 1, qxmd.post)
)
#*******************************************************************************
# ## Decrements 4: Improvement table ####
#*******************************************************************************
# improvement for post retirement mortality
# May want to use MP2016 directly
decrements_improvement <-
expand_grid(year = 2017:(2017+14),
age = range_age) %>%
left_join(
bind_rows(
# df_qxm.post_imputed %>% mutate(year = 2017),
# df_qxm.post_proj_imputed %>%
# rename_with( ~str_remove(.x, "_proj" )) %>%
# mutate(year = 2017+14)
df_qxm.post_tier %>% mutate(year = 2017),
df_qxm.post_proj_tier %>%
rename_with( ~str_remove(.x, "_proj" )) %>%
mutate(year = 2017+14)
),
by = c("year", "age")
)
decrements_improvement %<>%
group_by(age) %>%
arrange(age, year) %>%
# filter(age == 90) %>%
mutate(across(!c(year, grp), ~ seq(first(.x), last(.x), length.out = n()))) %>%
mutate(across(!c(year, grp), ~ .x / .x[year == min(year)])) %>%
rename_with(~ paste0("impr_", .x), !c(year, age, grp)) %>%
mutate(grp = tier_name )
#*******************************************************************************
# ## Salary Scale ####
#*******************************************************************************
# df_salScale.merit_imputed
# groups included
grp_include <- df_salScale.merit_imputed$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
# weight for each group
wgts <- tibble(grp = grp_include, wgt = 0)
wgts[wgts$grp == "misc", "wgt"] <- 1
wgts
## calculate weighted average
df_salScale_tier <-
df_salScale.merit_imputed %>%
filter(grp %in% grp_include) %>%
left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(salScale.merit = weighted.mean(salScale.merit, wgt),
.groups = "drop") %>%
mutate(grp = tier_name,
salScale.infl = infl_salary,
salScale = salScale.merit + salScale.infl) %>%
relocate(grp) %>%
arrange(ea, yos) %>%
ungroup()
#*******************************************************************************
# ## Initial demographics ####
#*******************************************************************************
## View the inputs
# df_nactives_fillin
# df_n_servRet_fillin
# df_n_disbRet_occ_fillin
# df_n_disbRet_nonocc_fillin
# df_n_beneficiaries_fillin
## groups included
grp_include <- df_nactives_fillin$grp %>% unique
grp_include <- grp_include[str_detect(grp_include, "misc")]
## Active members
# all active members
df_n_actives_tier <-
df_nactives_fillin %>%
filter(grp %in% grp_include) %>%
# left_join(wgts, by = "grp") %>%
group_by(yos, ea) %>%
summarise(salary = weighted.mean(salary, nactives, na.rm = TRUE) %>% na2zero(),
nactives = sum(nactives, na.rm= TRUE) %>% na2zero,
.groups = "drop") %>%
mutate(grp = tier_name,
age = ea + yos) %>%
relocate(grp) %>%
arrange(ea, age) %>%
ungroup()
# BART: Check total benefit againt the AV value: payroll
# sum(df_n_actives_tier$nactives * df_n_actives_tier$salary) / sum(df_n_actives_tier$nactives)
# model/target: 94310.94/94311 = 0.9999994
# Keep classic members only
# assume
# - members with yos <= 5 are all pepra members
# - 50% of members with yos == 6 are pepra members
# - the rest are classic membrs
fct <- 0.11758
df_n_actives_tier %<>%
mutate(nactives = case_when(
yos > 6 ~ 0,
yos == 6 ~ nactives * (0.5 - fct ),
yos <= 5 ~ nactives * (1 - fct),
TRUE ~ nactives
))
# df_n_actives_tier$nactives %>% sum
# Potential issue (PERF A):
# - Currently we have not found data about the proportion of PEPRA members in
# state PERF A members.
# - The closest is the number of classic and PEPRA members in
# the entire PERF A, including both state and non-state members. (CAFR2019-19 ep159)
# It shows that 72.5% misc(and inds?) members are classic members.
# - Under the assumptions described above, classic members only accounts for
# 61% of total state PERF A members.
# - This can be because
# 1. We have overestimated the number of PEPRA members under our assumption.
# 2. The proportion of PEPRA members differs for state and non-state PERF A members.
# 3. A combination of the two.
#
# TODO: For now, we will just stick with the simple assumptions, and see if the
# simulation results are off the target too much.
#
# (df_n_actives_tier$nactives %>% sum)*0.725
# (df_n_actives_tier$nactives %>% sum)*0.275
#
# df_n_actives_tier %>%
# mutate(nactives_c = case_when(
# yos <= 4 ~ 0,
# yos == 5 ~ nactives * 0.5,
# TRUE ~ nactives
# )) %>%
# summarise(nactives_c = sum(nactives_c),
# nactives = sum(nactives)) %>%
# mutate(c_share = nactives_c / nactives)
## Service retirees
# For now, combine service retirees and beneficiaries
# BART: death beneficiaries are also included
# assume all service retirees are classic members
df_n_servRet_tier <-
full_join(df_n_servRet_fillin,
df_n_beneficiaries_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
full_join(df_n_deathBen_occ_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
full_join(df_n_deathBen_nonocc_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_servRet = weighted.mean(benefit_servRet, n_servRet, na.rm= TRUE),
n_servRet = sum(n_servRet, na.rm = TRUE),
benefit_beneficiaries = weighted.mean(benefit_beneficiaries, n_beneficiaries, na.rm= TRUE),
n_beneficiaries = sum(n_beneficiaries, na.rm = TRUE),
benefit_death_nonocc = weighted.mean(benefit_death_nonocc, n_death_nonocc, na.rm= TRUE),
n_death_nonocc = sum(n_death_nonocc, na.rm = TRUE),
benefit_death_occ = weighted.mean(benefit_death_occ, n_death_occ, na.rm= TRUE),
n_death_occ = sum(n_death_occ, na.rm = TRUE),
.groups = "drop") %>%
colwise(na2zero)(.) %>%
mutate(grp = tier_name,
benefit_servRet = na2zero((benefit_servRet * n_servRet +
benefit_beneficiaries * n_beneficiaries +
benefit_death_occ * n_death_occ +
benefit_death_nonocc * n_death_nonocc
) / (n_servRet + n_beneficiaries + n_death_nonocc + n_death_occ)),
n_servRet = n_servRet + n_beneficiaries + n_death_nonocc + n_death_occ
) %>%
select(grp, age, n_servRet, benefit_servRet) %>%
arrange(age) %>%
ungroup()
# BART: all retirees are classic
df_n_servRet_tier %<>%
mutate(n_servRet = 0)
## Disability retirees
# For now, combine industrial and non-industrial disability retirees
# Assume all disability retirees are classic members
df_n_disbRet_tier <-
left_join(df_n_disbRet_nonocc_fillin,
df_n_disbRet_occ_fillin,
by = c("AV_date", "grp", "age", "age.cell")
) %>%
filter(grp %in% grp_include) %>%
group_by(age) %>%
summarise(benefit_disbRet_nonocc = weighted.mean(benefit_disbRet_nonocc, n_disbRet_nonocc, na.rm= TRUE),
n_disbRet_nonocc = sum(n_disbRet_nonocc, na.rm = TRUE),
benefit_disbRet_occ = weighted.mean(benefit_disbRet_occ, n_disbRet_occ, na.rm= TRUE),
n_disbRet_occ = sum(n_disbRet_occ, na.rm = TRUE),
.groups = "drop") %>%
mutate(across(everything() , na2zero)) %>%
mutate(grp = tier_name,
benefit_disbRet = na2zero((benefit_disbRet_nonocc * n_disbRet_nonocc + benefit_disbRet_occ * n_disbRet_occ) / (n_disbRet_occ + n_disbRet_nonocc)),
n_disbRet = n_disbRet_occ + n_disbRet_nonocc
) %>%
select(grp, age, n_disbRet, benefit_disbRet) %>%
arrange(age) %>%
ungroup()
# BART: assuming all retirees are classic
df_n_disbRet_tier %<>%
mutate(n_disbRet = 0)
## View the results
# df_n_actives_tier
# df_n_servRet_tier
# df_n_disbRet_tier
#*******************************************************************************
# ## Benefit factor and benefit reduction ####
#*******************************************************************************
df_benFactor <-
benFactor_misc$df %>%
select(age = age_ret,
bfactor_reduced = bfactor_reduced_pepra,
benReduction = benReduction_pepra)
#*******************************************************************************
# ## Saving tier information in a list ####
#*******************************************************************************
# collect tier-specific parameters in a list
tier_params <-
list(
tier_name = tier_name,
age_vben = age_vben,
v.year = v.year,
fasyears = fasyears, # based on policy before PEPRA
cola_assumed = cola_assumed,
bfactor = bfactor,
EEC_rate = EEC_rate
)
# Store all tier data in a list
assign(paste0("tierData_", tier_name),
list(
tier_name = tier_name,
decrements = decrements_tier,
decrements_improvement = decrements_improvement,
df_n_actives = df_n_actives_tier,
df_n_servRet = df_n_servRet_tier,
df_n_disbRet = df_n_disbRet_tier,
df_salScale = df_salScale_tier,
df_benFactor = df_benFactor,
tier_params = tier_params
)
)
# Save the list of tier data in a .rds (single object) file
saveRDS(get(paste0("tierData_", tier_name)),
file = paste0(dir_outputs, "tierData_", tier_name, ".rds"))
# tierData <- readRDS(paste(dir_outputs, "tierData_", tier_name, ".rds"))
|
iter <- 1000000
pb<-txtProgressBar(min = 1, max = iter, style = 3)
t<-0
for(i in 1:iter){
t<-t+i
setTxtProgressBar(pb,value = i)
}
draws <- mvrnorm(n = 10, mu = rep(0,5), Sigma = diag(5))
cov(draws)
|
/AdapativeMCMCLogistic/progressbar.R
|
no_license
|
sunilpaul29/BayesianTutorials
|
R
| false | false | 208 |
r
|
iter <- 1000000
pb<-txtProgressBar(min = 1, max = iter, style = 3)
t<-0
for(i in 1:iter){
t<-t+i
setTxtProgressBar(pb,value = i)
}
draws <- mvrnorm(n = 10, mu = rep(0,5), Sigma = diag(5))
cov(draws)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.