content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{signatures.exome.cosmic.v3.may2019}
\alias{signatures.exome.cosmic.v3.may2019}
\title{Updated SBS Exome Signatures from Sanger COSMIC (May 2019)}
\format{A data frame of 65 rows and 96 columns}
\source{
\url{https://www.synapse.org/#!Synapse:syn12009743}
}
\description{
A dataset containing the additional signatures identified read into R as a
data frame. Can be used the 'signatures.ref' parameter in whichSignatures().
}
\keyword{datasets}
|
/man/signatures.exome.cosmic.v3.may2019.Rd
|
no_license
|
luolingqi/deconstructSigs.mm10
|
R
| false | true | 555 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{signatures.exome.cosmic.v3.may2019}
\alias{signatures.exome.cosmic.v3.may2019}
\title{Updated SBS Exome Signatures from Sanger COSMIC (May 2019)}
\format{A data frame of 65 rows and 96 columns}
\source{
\url{https://www.synapse.org/#!Synapse:syn12009743}
}
\description{
A dataset containing the additional signatures identified read into R as a
data frame. Can be used the 'signatures.ref' parameter in whichSignatures().
}
\keyword{datasets}
|
print ("hello R world")
#multiply 5 times 4
5*4
#name my number
a <- 20
#divide my number by 5
a/5
#make a vector of numbers
b <- c(12,8,24,100)
#multiply all numbers by 3
b*3
|
/activity 1/activity1_script.r
|
no_license
|
cbarb21/datasci
|
R
| false | false | 176 |
r
|
print ("hello R world")
#multiply 5 times 4
5*4
#name my number
a <- 20
#divide my number by 5
a/5
#make a vector of numbers
b <- c(12,8,24,100)
#multiply all numbers by 3
b*3
|
# loadABCSFromMaster.0.1.R
#
# Purpose: Provides functions to load A-B-Cell-Synergy lists
# from a master source.
# This file can be loaded as an ABCSLOADER asset in
# the DREAM_Combi_main.R workflow and is
# therefore expected to provide a
# loadABCSmaster() function that returns an
# ABCS_Master dataframe.
#
# This file should contain only assets of functions
# and constants. Sourcing this file must not have
# any side-effects. Functions should not have side-
# effects either.
#
# Version: 0.1
#
# Date: Jan 22 2016
# Author: Boris and DREAM team UofT
#
# V 0.1 First code
#
# TODO:
#
# ==========================================================
loadABCSmaster <- function(IN = "../Challenge Data/Drug Synergy Data/ch1_train_combination_and_monoTherapy.csv") {
# Return a dataframe with four columns:
# Drug A name
# Drug B name
# Cell line name
# Synergy score
master <- read.csv(IN, stringsAsFactors=FALSE)
master <- master[master[,"QA"] == 1, ] # Exclude all combinations with poor QA scores
ABCS <- data.frame("A" = master$COMPOUND_A,
"B" = master$COMPOUND_B,
"C" = master$CELL_LINE,
"S" = as.numeric(master$SYNERGY_SCORE),
stringsAsFactors = FALSE)
return(ABCS)
}
# [END]
|
/loadABCSFromMaster.0.1.R
|
no_license
|
DREAM-Toronto/Drug-Combination-Prediction-2015
|
R
| false | false | 1,473 |
r
|
# loadABCSFromMaster.0.1.R
#
# Purpose: Provides functions to load A-B-Cell-Synergy lists
# from a master source.
# This file can be loaded as an ABCSLOADER asset in
# the DREAM_Combi_main.R workflow and is
# therefore expected to provide a
# loadABCSmaster() function that returns an
# ABCS_Master dataframe.
#
# This file should contain only assets of functions
# and constants. Sourcing this file must not have
# any side-effects. Functions should not have side-
# effects either.
#
# Version: 0.1
#
# Date: Jan 22 2016
# Author: Boris and DREAM team UofT
#
# V 0.1 First code
#
# TODO:
#
# ==========================================================
loadABCSmaster <- function(IN = "../Challenge Data/Drug Synergy Data/ch1_train_combination_and_monoTherapy.csv") {
# Return a dataframe with four columns:
# Drug A name
# Drug B name
# Cell line name
# Synergy score
master <- read.csv(IN, stringsAsFactors=FALSE)
master <- master[master[,"QA"] == 1, ] # Exclude all combinations with poor QA scores
ABCS <- data.frame("A" = master$COMPOUND_A,
"B" = master$COMPOUND_B,
"C" = master$CELL_LINE,
"S" = as.numeric(master$SYNERGY_SCORE),
stringsAsFactors = FALSE)
return(ABCS)
}
# [END]
|
#' Rank sites by EAR
#'
#' The \code{rank_sites_DT} (DT option) and \code{rank_sites} (data frame option) functions
#' create tables with one row per site. Columns represent the maximum or mean EAR
#' (depending on the mean_logic argument) for each category ("Chemical Class",
#' "Chemical", or "Biological") and the frequency of the maximum or mean EAR
#' exceeding a user specified hit_threshold.
#'
#' The tables show slightly different results for a single site. Rather than multiple
#' columns for categories, there is now 1 row per category (since the site is known).
#'
#' @param chemical_summary Data frame from \code{\link{get_chemical_summary}}.
#' @param mean_logic Logical. \code{TRUE} displays the mean sample from each site,
#' \code{FALSE} displays the maximum sample from each site.
#' @param sum_logic Logical. \code{TRUE} sums the EARs in a specified grouping,
#' \code{FALSE} does not. \code{FALSE} may be better for traditional benchmarks as
#' opposed to ToxCast benchmarks.
#' @param category Character. Either "Biological", "Chemical Class", or "Chemical".
#' @param hit_threshold Numeric threshold defining a "hit".
#' @export
#'
#' @return data frame with one row per site, and the mas or mean EAR and frequency of
#' hits based on the category.
#'
#' @rdname rank_sites_DT
#' @importFrom stats median
#' @examples
#' # This is the example workflow:
#' path_to_tox <- system.file("extdata", package="toxEval")
#' file_name <- "OWC_data_fromSup.xlsx"
#'
#' full_path <- file.path(path_to_tox, file_name)
#'
#' tox_list <- create_toxEval(full_path)
#'
#' ACC <- get_ACC(tox_list$chem_info$CAS)
#' ACC <- remove_flags(ACC)
#'
#' cleaned_ep <- clean_endPoint_info(end_point_info)
#' filtered_ep <- filter_groups(cleaned_ep)
#' chemical_summary <- get_chemical_summary(tox_list, ACC, filtered_ep)
#'
#' stats_df <- rank_sites(chemical_summary, "Biological")
#'
#' rank_sites_DT(chemical_summary, category = "Biological")
#' rank_sites_DT(chemical_summary, category = "Chemical Class")
#' rank_sites_DT(chemical_summary, category = "Chemical")
#'
rank_sites_DT <- function(chemical_summary,
category = "Biological",
mean_logic = FALSE,
sum_logic = TRUE,
hit_threshold = 0.1){
Bio_category <- Class <- EAR <- maxEAR <- sumEAR <- value <- calc <- chnm <- choice_calc <- n <- nHits <- site <- ".dplyr"
match.arg(category, c("Biological","Chemical Class","Chemical"))
statsOfColumn <- rank_sites(chemical_summary=chemical_summary,
category = category,
hit_threshold = hit_threshold,
mean_logic = mean_logic,
sum_logic = sum_logic)
colToSort <- 1
if(mean_logic){
maxEARS <- grep("meanEAR",names(statsOfColumn))
} else {
maxEARS <- grep("maxEAR",names(statsOfColumn))
}
freqCol <- grep("freq",names(statsOfColumn))
n <- length(maxEARS)
ignoreIndex <- which(names(statsOfColumn) %in% c("site","category"))
if(n > 20 & n<30){
colors <- c(RColorBrewer::brewer.pal(n = 12, name = "Set3"),
RColorBrewer::brewer.pal(n = 8, name = "Set2"),
RColorBrewer::brewer.pal(n = max(c(3,n-20)), name = "Set1"))
} else if (n <= 20){
colors <- c(RColorBrewer::brewer.pal(n = 12, name = "Set3"),
RColorBrewer::brewer.pal(n = max(c(3,n-12)), name = "Set2"))
} else {
colors <- colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(n)
}
tableSumm <- DT::datatable(statsOfColumn, extensions = 'Buttons',
rownames = FALSE,
options = list(#dom = 'ft',
dom = 'Bfrtip',
buttons =
list('colvis'),
scrollX = TRUE,
# pageLength = nrow(statsOfColumn),
order=list(list(colToSort,'desc'))))
tableSumm <- DT::formatSignif(tableSumm, names(statsOfColumn)[-ignoreIndex], 2)
for(i in 1:length(maxEARS)){
tableSumm <- DT::formatStyle(tableSumm,
names(statsOfColumn)[maxEARS[i]],
backgroundColor = colors[i])
tableSumm <- DT::formatStyle(tableSumm,
names(statsOfColumn)[freqCol[i]],
backgroundColor = colors[i])
tableSumm <- DT::formatStyle(tableSumm, names(statsOfColumn)[maxEARS[i]],
background = DT::styleColorBar(range(statsOfColumn[,names(statsOfColumn)[maxEARS[i]]],na.rm = TRUE), 'goldenrod'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center' )
tableSumm <- DT::formatStyle(tableSumm, names(statsOfColumn)[freqCol[i]],
background = DT::styleColorBar(range(statsOfColumn[,names(statsOfColumn)[freqCol[i]]],na.rm = TRUE), 'wheat'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center')
}
return(tableSumm)
}
#' @export
#' @rdname rank_sites_DT
rank_sites <- function(chemical_summary,
category,
hit_threshold = 0.1,
mean_logic = FALSE,
sum_logic = TRUE){
sumEAR <- nHits <- n <- calc <- value <- choice_calc <- ".dplyr"
chnm <- Class <- Bio_category <- site <- EAR <- maxEAR <- ".dplyr"
siteToFind <- unique(chemical_summary$shortName)
if(category == "Chemical"){
chemical_summary <- dplyr::mutate(chemical_summary, category = chnm)
} else if (category == "Chemical Class"){
chemical_summary <- dplyr::mutate(chemical_summary, category = Class)
} else {
chemical_summary <- dplyr::mutate(chemical_summary, category = Bio_category)
}
chemical_summary <- dplyr::select(chemical_summary, -Class, -Bio_category, -chnm)
if(length(siteToFind) == 1){
chemical_summary$site <- chemical_summary$category
} else {
chemical_summary$site <- chemical_summary$shortName
}
if(!sum_logic){
statsOfColumn <- chemical_summary %>%
dplyr::group_by(site, date, category) %>%
dplyr::summarize(sumEAR = max(EAR),
nHits = sum(sumEAR > hit_threshold)) %>%
dplyr::group_by(site, category) %>%
dplyr::summarise(maxEAR = ifelse(mean_logic, mean(sumEAR), max(sumEAR)),
freq = sum(nHits > 0)/dplyr::n()) %>%
data.frame()
} else {
statsOfColumn <- chemical_summary %>%
dplyr::group_by(site, date, category) %>%
dplyr::summarise(sumEAR = sum(EAR),
nHits = sum(sumEAR > hit_threshold)) %>%
dplyr::group_by(site, category) %>%
dplyr::summarise(maxEAR = ifelse(mean_logic, mean(sumEAR), max(sumEAR)),
freq = sum(nHits > 0)/dplyr::n()) %>%
data.frame()
}
if(!(length(siteToFind) == 1)){
statsOfColumn <- statsOfColumn %>%
tidyr::gather(calc, value, -site, -category) %>%
tidyr::unite(choice_calc, category, calc, sep=" ") %>%
tidyr::spread(choice_calc, value)
}
colToSort <- 2
if("nSamples" %in% names(statsOfColumn)){
colToSort <- 3
}
freqCol <- grep("freq",names(statsOfColumn))
maxEARS <- grep("maxEAR",names(statsOfColumn))
ignoreIndex <- which(names(statsOfColumn) %in% c("site","nSamples"))
statsOfColumn <- statsOfColumn[,c(ignoreIndex,c(maxEARS,freqCol)[order(c(maxEARS,freqCol))])]
maxEARS <- grep("maxEAR",names(statsOfColumn))
MaxEARSordered <- order(apply(statsOfColumn[,maxEARS, drop = FALSE], 2, max),decreasing = TRUE)
if(length(maxEARS) != 1){
statsOfColumn <- statsOfColumn[,c(ignoreIndex,interl(maxEARS[MaxEARSordered],(maxEARS[MaxEARSordered]-1)))]
}
freqCol <- grep("freq",names(statsOfColumn))
maxEARS <- grep("maxEAR",names(statsOfColumn))
if(isTRUE(mean_logic)){
names(statsOfColumn)[maxEARS] <- gsub("max","mean",names(statsOfColumn)[maxEARS])
}
statsOfColumn <- statsOfColumn[order(statsOfColumn[[colToSort]], decreasing = TRUE),]
if(length(siteToFind) == 1){
names(statsOfColumn)[which(names(statsOfColumn) == "site")] <- "category"
}
return(statsOfColumn)
}
interl <- function (a,b) {
n <- min(length(a),length(b))
p1 <- as.vector(rbind(a[1:n],b[1:n]))
p2 <- c(a[-(1:n)],b[-(1:n)])
c(p1,p2)
}
|
/R/rank_sites.R
|
no_license
|
waternk/toxEval
|
R
| false | false | 8,743 |
r
|
#' Rank sites by EAR
#'
#' The \code{rank_sites_DT} (DT option) and \code{rank_sites} (data frame option) functions
#' create tables with one row per site. Columns represent the maximum or mean EAR
#' (depending on the mean_logic argument) for each category ("Chemical Class",
#' "Chemical", or "Biological") and the frequency of the maximum or mean EAR
#' exceeding a user specified hit_threshold.
#'
#' The tables show slightly different results for a single site. Rather than multiple
#' columns for categories, there is now 1 row per category (since the site is known).
#'
#' @param chemical_summary Data frame from \code{\link{get_chemical_summary}}.
#' @param mean_logic Logical. \code{TRUE} displays the mean sample from each site,
#' \code{FALSE} displays the maximum sample from each site.
#' @param sum_logic Logical. \code{TRUE} sums the EARs in a specified grouping,
#' \code{FALSE} does not. \code{FALSE} may be better for traditional benchmarks as
#' opposed to ToxCast benchmarks.
#' @param category Character. Either "Biological", "Chemical Class", or "Chemical".
#' @param hit_threshold Numeric threshold defining a "hit".
#' @export
#'
#' @return data frame with one row per site, and the mas or mean EAR and frequency of
#' hits based on the category.
#'
#' @rdname rank_sites_DT
#' @importFrom stats median
#' @examples
#' # This is the example workflow:
#' path_to_tox <- system.file("extdata", package="toxEval")
#' file_name <- "OWC_data_fromSup.xlsx"
#'
#' full_path <- file.path(path_to_tox, file_name)
#'
#' tox_list <- create_toxEval(full_path)
#'
#' ACC <- get_ACC(tox_list$chem_info$CAS)
#' ACC <- remove_flags(ACC)
#'
#' cleaned_ep <- clean_endPoint_info(end_point_info)
#' filtered_ep <- filter_groups(cleaned_ep)
#' chemical_summary <- get_chemical_summary(tox_list, ACC, filtered_ep)
#'
#' stats_df <- rank_sites(chemical_summary, "Biological")
#'
#' rank_sites_DT(chemical_summary, category = "Biological")
#' rank_sites_DT(chemical_summary, category = "Chemical Class")
#' rank_sites_DT(chemical_summary, category = "Chemical")
#'
rank_sites_DT <- function(chemical_summary,
category = "Biological",
mean_logic = FALSE,
sum_logic = TRUE,
hit_threshold = 0.1){
Bio_category <- Class <- EAR <- maxEAR <- sumEAR <- value <- calc <- chnm <- choice_calc <- n <- nHits <- site <- ".dplyr"
match.arg(category, c("Biological","Chemical Class","Chemical"))
statsOfColumn <- rank_sites(chemical_summary=chemical_summary,
category = category,
hit_threshold = hit_threshold,
mean_logic = mean_logic,
sum_logic = sum_logic)
colToSort <- 1
if(mean_logic){
maxEARS <- grep("meanEAR",names(statsOfColumn))
} else {
maxEARS <- grep("maxEAR",names(statsOfColumn))
}
freqCol <- grep("freq",names(statsOfColumn))
n <- length(maxEARS)
ignoreIndex <- which(names(statsOfColumn) %in% c("site","category"))
if(n > 20 & n<30){
colors <- c(RColorBrewer::brewer.pal(n = 12, name = "Set3"),
RColorBrewer::brewer.pal(n = 8, name = "Set2"),
RColorBrewer::brewer.pal(n = max(c(3,n-20)), name = "Set1"))
} else if (n <= 20){
colors <- c(RColorBrewer::brewer.pal(n = 12, name = "Set3"),
RColorBrewer::brewer.pal(n = max(c(3,n-12)), name = "Set2"))
} else {
colors <- colorRampPalette(RColorBrewer::brewer.pal(11,"Spectral"))(n)
}
tableSumm <- DT::datatable(statsOfColumn, extensions = 'Buttons',
rownames = FALSE,
options = list(#dom = 'ft',
dom = 'Bfrtip',
buttons =
list('colvis'),
scrollX = TRUE,
# pageLength = nrow(statsOfColumn),
order=list(list(colToSort,'desc'))))
tableSumm <- DT::formatSignif(tableSumm, names(statsOfColumn)[-ignoreIndex], 2)
for(i in 1:length(maxEARS)){
tableSumm <- DT::formatStyle(tableSumm,
names(statsOfColumn)[maxEARS[i]],
backgroundColor = colors[i])
tableSumm <- DT::formatStyle(tableSumm,
names(statsOfColumn)[freqCol[i]],
backgroundColor = colors[i])
tableSumm <- DT::formatStyle(tableSumm, names(statsOfColumn)[maxEARS[i]],
background = DT::styleColorBar(range(statsOfColumn[,names(statsOfColumn)[maxEARS[i]]],na.rm = TRUE), 'goldenrod'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center' )
tableSumm <- DT::formatStyle(tableSumm, names(statsOfColumn)[freqCol[i]],
background = DT::styleColorBar(range(statsOfColumn[,names(statsOfColumn)[freqCol[i]]],na.rm = TRUE), 'wheat'),
backgroundSize = '100% 90%',
backgroundRepeat = 'no-repeat',
backgroundPosition = 'center')
}
return(tableSumm)
}
#' @export
#' @rdname rank_sites_DT
rank_sites <- function(chemical_summary,
category,
hit_threshold = 0.1,
mean_logic = FALSE,
sum_logic = TRUE){
sumEAR <- nHits <- n <- calc <- value <- choice_calc <- ".dplyr"
chnm <- Class <- Bio_category <- site <- EAR <- maxEAR <- ".dplyr"
siteToFind <- unique(chemical_summary$shortName)
if(category == "Chemical"){
chemical_summary <- dplyr::mutate(chemical_summary, category = chnm)
} else if (category == "Chemical Class"){
chemical_summary <- dplyr::mutate(chemical_summary, category = Class)
} else {
chemical_summary <- dplyr::mutate(chemical_summary, category = Bio_category)
}
chemical_summary <- dplyr::select(chemical_summary, -Class, -Bio_category, -chnm)
if(length(siteToFind) == 1){
chemical_summary$site <- chemical_summary$category
} else {
chemical_summary$site <- chemical_summary$shortName
}
if(!sum_logic){
statsOfColumn <- chemical_summary %>%
dplyr::group_by(site, date, category) %>%
dplyr::summarize(sumEAR = max(EAR),
nHits = sum(sumEAR > hit_threshold)) %>%
dplyr::group_by(site, category) %>%
dplyr::summarise(maxEAR = ifelse(mean_logic, mean(sumEAR), max(sumEAR)),
freq = sum(nHits > 0)/dplyr::n()) %>%
data.frame()
} else {
statsOfColumn <- chemical_summary %>%
dplyr::group_by(site, date, category) %>%
dplyr::summarise(sumEAR = sum(EAR),
nHits = sum(sumEAR > hit_threshold)) %>%
dplyr::group_by(site, category) %>%
dplyr::summarise(maxEAR = ifelse(mean_logic, mean(sumEAR), max(sumEAR)),
freq = sum(nHits > 0)/dplyr::n()) %>%
data.frame()
}
if(!(length(siteToFind) == 1)){
statsOfColumn <- statsOfColumn %>%
tidyr::gather(calc, value, -site, -category) %>%
tidyr::unite(choice_calc, category, calc, sep=" ") %>%
tidyr::spread(choice_calc, value)
}
colToSort <- 2
if("nSamples" %in% names(statsOfColumn)){
colToSort <- 3
}
freqCol <- grep("freq",names(statsOfColumn))
maxEARS <- grep("maxEAR",names(statsOfColumn))
ignoreIndex <- which(names(statsOfColumn) %in% c("site","nSamples"))
statsOfColumn <- statsOfColumn[,c(ignoreIndex,c(maxEARS,freqCol)[order(c(maxEARS,freqCol))])]
maxEARS <- grep("maxEAR",names(statsOfColumn))
MaxEARSordered <- order(apply(statsOfColumn[,maxEARS, drop = FALSE], 2, max),decreasing = TRUE)
if(length(maxEARS) != 1){
statsOfColumn <- statsOfColumn[,c(ignoreIndex,interl(maxEARS[MaxEARSordered],(maxEARS[MaxEARSordered]-1)))]
}
freqCol <- grep("freq",names(statsOfColumn))
maxEARS <- grep("maxEAR",names(statsOfColumn))
if(isTRUE(mean_logic)){
names(statsOfColumn)[maxEARS] <- gsub("max","mean",names(statsOfColumn)[maxEARS])
}
statsOfColumn <- statsOfColumn[order(statsOfColumn[[colToSort]], decreasing = TRUE),]
if(length(siteToFind) == 1){
names(statsOfColumn)[which(names(statsOfColumn) == "site")] <- "category"
}
return(statsOfColumn)
}
interl <- function (a,b) {
n <- min(length(a),length(b))
p1 <- as.vector(rbind(a[1:n],b[1:n]))
p2 <- c(a[-(1:n)],b[-(1:n)])
c(p1,p2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make.R
\name{make_percent}
\alias{make_percent}
\title{Clean up a character vector to make it a percent}
\usage{
make_percent(x)
}
\arguments{
\item{x}{character vector to process}
}
\value{
numeric vector
}
\description{
Remove "%" primarily, convert to numeric & divide by 100
}
|
/man/make_percent.Rd
|
no_license
|
tpopenfoose/hrbrmisc
|
R
| false | true | 360 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make.R
\name{make_percent}
\alias{make_percent}
\title{Clean up a character vector to make it a percent}
\usage{
make_percent(x)
}
\arguments{
\item{x}{character vector to process}
}
\value{
numeric vector
}
\description{
Remove "%" primarily, convert to numeric & divide by 100
}
|
# PhenoFunctions
#### READ IN PHENOLOGY DATA 2016 ####
ReadInBodyPhenology2016 <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-15,16),function(i){
x <- dat[ ,c(1:2,i:(i+15))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 4 ), rep(1:4, each=4), sep="."))
x$date <- strsplit(dat.h[1,i+1], "_")[[1]][1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- substr(dat.long$turfID, 1,1)
dat.long$destSite <- site
dat.long$block <- substr(dat.long$turfID, 2,2)
dat.long$treatment <- substr(dat.long$turfID, 4,nchar(dat.long$turfID))
# if treatmen 1 or 2, remove species with a *sp* (not from original data set)
dat.long <- dat.long[
(dat.long$treatment %in% c("1", "2") & grepl("\\*.*\\*", as.character(dat.long$species), perl = TRUE)) | #if treatment 1 or 2 only *sp*
!(dat.long$treatment %in% c("1", "2")), ] # if other treatment
dat.long$species <- gsub("*", "", dat.long$species,fixed = TRUE) # get rid of *
# convert to factor and numeric
dat.long <- cbind(dat.long[,c(1:2,19:24)],sapply(dat.long[,c(3:18)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
#### READ IN 2017 data
ReadInBodyPhenology2017 <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-15,16),function(i){
x <- dat[ ,c(1:2,i:(i+15))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 4 ), rep(1:4, each=4), sep="."))
x$date <- dat.h[1,i+1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- substr(dat.long$turfID, 1,1)
dat.long$destSite <- site
dat.long$block <- substr(dat.long$turfID, 2,2)
dat.long$treatment <- substr(dat.long$turfID, 4,nchar(dat.long$turfID))
# convert to factor and numeric
dat.long <- cbind(dat.long[,c(1:2,19:24)],sapply(dat.long[,c(3:18)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
#### READ IN EXTRA CONTROLS 2017
ReadInBodyPhenologyExtra <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
#browser()
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-35,36),function(i){
x <- dat[ ,c(1:2,i:(i+35))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 9), rep(1:9, each=4), sep="."))
x$date <- dat.h[1,i+1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- site
dat.long$destSite <- site
dat.long$block <- sub(".*\\-", "", dat.long$turfID) # place everything before - with blank
dat.long$treatment <- "EC"
# convert to factor and numeric
dat.long <- cbind(dat.long[,c("turfID", "species", "date", "doy", "origSite", "destSite", "block", "treatment")],sapply(dat.long[,c(3:38)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
# Calculate the sum of buds, flowers and seeds per turf and species
CalcSums <- function(dat){
dat <- dat %>%
mutate(bud = rowSums(.[grep("b\\.", names(.))], na.rm = TRUE)) %>%
mutate(flower = rowSums(.[grep("f\\.", names(.))], na.rm = TRUE)) %>%
mutate(seed = rowSums(.[grep("s\\.", names(.))], na.rm = TRUE)) %>%
mutate(ripe = rowSums(.[grep("r\\.", names(.))], na.rm = TRUE))
return(dat)
}
#### FUNCTIONS FOR FIGURES ####
### GET MEAN AND SE BY SPECIES ###
SpeciesMeanSE <- function(dat){
# Calculate mean and se by species, pheno.stage, origSite, newTT
MeanSE <- dat %>%
group_by(year, newTT, origSite, pheno.stage, pheno.var, species) %>%
summarise(N = sum(!is.na(value)), mean = mean(value, na.rm = TRUE), se = sd(value, na.rm = TRUE)/sqrt(N))
# Calculate mean for difference between Control and Treatment
#SPOnlyInOneTreatment
SpeciesDifference <- MeanSE %>%
ungroup() %>%
select(-N) %>% # remove site, because it causes problems
unite(united, mean, se, sep = "_") %>% # unite mean and se
spread(key = newTT, value = united) %>% # spread Treatments
separate(col = Control, into = c("Control_mean", "Control_se"), sep = "_", convert = TRUE) %>%
separate(col = OTC, into = c("OTC_mean", "OTC_se"), sep = "_", convert = TRUE) %>%
separate(col = Warm, into = c("Warm_mean", "Warm_se"), sep = "_", convert = TRUE) %>%
separate(col = Cold, into = c("Cold_mean", "Cold_se"), sep = "_", convert = TRUE) %>%
mutate(OTC_mean = OTC_mean - Control_mean, Warm_mean = Warm_mean - Control_mean, Cold_mean = Cold_mean - Control_mean) %>%
mutate(OTC_se = sqrt(Control_se^2 + OTC_se^2), Warm_se = sqrt(Control_se^2 + Warm_se^2), Cold_se = sqrt(Control_se^2 + Cold_se^2)) %>%
select(-Control_mean, -Control_se) %>%
unite(OTC, OTC_mean, OTC_se, sep = "_") %>%
unite(Warm, Warm_mean, Warm_se, sep = "_") %>%
unite(Cold, Cold_mean, Cold_se, sep = "_") %>%
gather(key = Treatment, value = united, -year, -origSite, -pheno.stage, -pheno.var, -species) %>%
separate(col = united, into = c("mean", "se"), sep = "_", convert = TRUE) %>%
filter(!is.na(mean))
return(SpeciesDifference)
}
### COMMUNITY DATA ###
PlotCommunityData <- function(dat, phenovar){
CommunityDifference <- dat %>%
mutate(newname = paste(origSite, Treatment, sep = "_")) %>% # paste Treatment and site, they are unique and can be renamed
mutate(newname = plyr::mapvalues(newname, c("H_OTC", "A_OTC", "H_Transplant", "A_Transplant"), c("High alpine OTC", "Alpine OTC", "High alpine Transplant", "Alpine Transplant"))) %>%
mutate(newname = factor(newname, levels = c("High alpine OTC", "Alpine OTC", "High alpine Transplant", "Alpine Transplant"))) %>%
group_by(pheno.stage, newname) %>%
summarise(Difference = mean(mean, na.rm = TRUE), SE = mean(se, na.rm = TRUE)) %>%
mutate(Treatment = sub(".* ", "", newname))
ggplot(CommunityDifference, aes(x = newname, y = Difference, color = Treatment, shape = Treatment, ymax = Difference + SE, ymin = Difference - SE)) +
geom_hline(yintercept=0, color = "gray") +
geom_point(size = 1.8) +
labs(x = "", y = "Treatment - control in days") +
scale_colour_manual(name = "", values = c("red", "purple")) +
scale_shape_manual(name = "", values = c(16, 17)) +
facet_grid(~ pheno.stage) +
geom_errorbar(width=0.2) +
scale_x_discrete(labels = c("High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine")) +
ggtitle(phenovar) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
### SPECIES DATA ###
PlotSpeciesData <- function(dat, phenovar, phenostage, Year){
dat2 <- with(dat, expand.grid(year = unique(year), Treatment=unique(Treatment), species=unique(species), origSite = unique(origSite), pheno.stage = unique(pheno.stage))) %>%
left_join(dat, by = c("year", "Treatment", "species", "origSite", "pheno.stage")) %>%
group_by(year, species, origSite) %>%
filter(sum(!is.na(mean)) > 0) %>%
ungroup()
# Draw plot
dat2 %>%
filter(year == Year, pheno.var == phenovar, pheno.stage == phenostage) %>%
mutate(origSite = plyr::mapvalues(origSite, c("H", "A", "M"), c("High Alpine", "Alpine", "Middle"))) %>%
mutate(Treatment = plyr::mapvalues(Treatment, c("Cold", "OTC", "Warm"), c("Transplant Cold", "OTC", "Transplant Warm"))) %>%
mutate(Treatment = factor(Treatment, levels = c("OTC", "Transplant Warm", "Transplant Cold"))) %>%
ggplot(aes(y = mean, x = species, fill = Treatment, ymin = mean - se, ymax = mean + se)) +
geom_col(position="dodge", width = 0.7) +
geom_errorbar(position = position_dodge(0.7), width = 0.2) +
geom_hline(yintercept = 0, colour = "grey", linetype = 2) +
#scale_fill_manual(name = "", values = c("purple", "orange", "lightblue")) +
labs(y = "Difference between treatment and control in days", x = "", title = phenovar) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
facet_grid(pheno.stage ~ Treatment * origSite, scales = "free_x", space = "free_x")
}
PlotSpeciesData2 <- function(dat, phenovar, Year){
dat %>%
filter(year == Year, pheno.var == phenovar, pheno.stage != "Ripe") %>%
#mutate(origSite = plyr::mapvalues(origSite, c("H", "A", "M"), c("High Alpine", "Alpine", "Middle"))) %>%
mutate(Treatment = plyr::mapvalues(Treatment, c("Cold", "OTC", "Warm"), c("Transplant Cold", "OTC", "Transplant Warm"))) %>%
mutate(Treatment = factor(Treatment, levels = c("OTC", "Transplant Warm", "Transplant Cold"))) %>%
mutate(origSite = plyr::mapvalues(origSite, c("A", "H", "M"), c("Alpine", "High alpine", "Mid"))) %>%
mutate(origSite = factor(origSite, levels = c("High alpine", "Alpine", "Mid"))) %>%
mutate(Order = paste(Treatment, origSite, species, sep = "_")) %>%
ggplot(aes(y = mean, x = species, fill = Treatment, ymin = mean - se, ymax = mean + se)) +
geom_col(position="dodge", width = 0.7) +
geom_errorbar(position = position_dodge(0.7), width = 0.2) +
geom_hline(yintercept = 0, colour = "grey", linetype = 2) +
scale_fill_manual(name = "", values = c(rep("purple", 1), rep("orange", 1), rep("lightblue", 1))) +
#scale_x_discrete(labels = SP) +
labs(y = "Difference between treatment and control in days", x = "", title = "peak phenophase") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position="top") +
facet_grid(pheno.stage ~ Treatment * origSite, scales = "free_x", space = "free_x")
}
#### FUNCTIONS FOR ANALYSIS ####
#### Function to produce model-checking plots for the fixed effects of an lmer model
ModelCheck <- function(mod){
par(mfrow = c(1,2))
# Residual plot: checking homogeneity of the variance and linerarity
plot(fitted(mod), resid(mod)) #should have no pattern
abline(h=0)
# QQnorm plot: normal distribution of the residuals
qqnorm(resid(mod), ylab="Residuals") #should be approximately straight line
qqline(resid(mod))
}
### Test overdispersion
# compare the residual deviance to the residual degrees of freedom
# these are assumed to be the same.
overdisp_fun <- function(model) {
## number of variance parameters in
## an n-by-n variance-covariance matrix
vpars <- function(m) {
nrow(m)*(nrow(m)+1)/2
}
model.df <- sum(sapply(VarCorr(model),vpars))+length(fixef(model))
rdf <- nrow(model.frame(model))-model.df
rp <- residuals(model,type="pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)
c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval)
}
# Function to calculate QAICc
# NB, phi is the scaling parameter from the quasi-family model. If using e.g. a poisson family, phi=1 and QAICc returns AICc, or AIC if QAICc=FALSE.
QAICc <- function(mod, scale, QAICc = TRUE) {
ll <- as.numeric(logLik(mod))
df <- attr(logLik(mod), "df")
n <- length(resid(mod))
if (QAICc)
qaic = as.numeric(-2 * ll/scale + 2 * df + 2 * df * (df + 1)/(n - df - 1))
else qaic = as.numeric(-2 * ll/scale + 2 * df)
qaic
}
# Model selection
modsel <- function(mods,x){
phi=1
dd <- data.frame(Model=1:length(mods), K=1, QAIC=1)
for(j in 1:length(mods)){
dd$K[j] = attr(logLik(mods[[j]]),"df")
dd$QAIC[j] = QAICc(mods[[j]],phi)
}
dd$delta.i <- dd$QAIC - min(dd$QAIC)
dd <- subset(dd,dd$delta.i<x)
dd$re.lik <- round(exp(-0.5*dd$delta.i),3)
sum.aic <- sum(exp(-0.5*dd$delta.i))
wi <- numeric(0)
for (i in 1:length(dd$Model)){wi[i] <- round(exp(-0.5*dd$delta.i[i])/sum.aic,3)}; dd$wi<-wi
print(dds <- dd[order(dd$QAIC), ])
assign("mstable",dd,envir=.GlobalEnv)
}
|
/PhenoFunctions.R
|
no_license
|
fanyanghenu/PhenologyLi
|
R
| false | false | 14,494 |
r
|
# PhenoFunctions
#### READ IN PHENOLOGY DATA 2016 ####
ReadInBodyPhenology2016 <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-15,16),function(i){
x <- dat[ ,c(1:2,i:(i+15))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 4 ), rep(1:4, each=4), sep="."))
x$date <- strsplit(dat.h[1,i+1], "_")[[1]][1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- substr(dat.long$turfID, 1,1)
dat.long$destSite <- site
dat.long$block <- substr(dat.long$turfID, 2,2)
dat.long$treatment <- substr(dat.long$turfID, 4,nchar(dat.long$turfID))
# if treatmen 1 or 2, remove species with a *sp* (not from original data set)
dat.long <- dat.long[
(dat.long$treatment %in% c("1", "2") & grepl("\\*.*\\*", as.character(dat.long$species), perl = TRUE)) | #if treatment 1 or 2 only *sp*
!(dat.long$treatment %in% c("1", "2")), ] # if other treatment
dat.long$species <- gsub("*", "", dat.long$species,fixed = TRUE) # get rid of *
# convert to factor and numeric
dat.long <- cbind(dat.long[,c(1:2,19:24)],sapply(dat.long[,c(3:18)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
#### READ IN 2017 data
ReadInBodyPhenology2017 <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-15,16),function(i){
x <- dat[ ,c(1:2,i:(i+15))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 4 ), rep(1:4, each=4), sep="."))
x$date <- dat.h[1,i+1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- substr(dat.long$turfID, 1,1)
dat.long$destSite <- site
dat.long$block <- substr(dat.long$turfID, 2,2)
dat.long$treatment <- substr(dat.long$turfID, 4,nchar(dat.long$turfID))
# convert to factor and numeric
dat.long <- cbind(dat.long[,c(1:2,19:24)],sapply(dat.long[,c(3:18)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
#### READ IN EXTRA CONTROLS 2017
ReadInBodyPhenologyExtra <- function(datasheet, site, year){
# import body of data
dat <- read.csv(datasheet, header=FALSE, sep=";", skip=3, stringsAsFactors=FALSE)
dat <- dat[dat$V2!="",] # get rid of empty lines, where no species
dat <- dat[,-3] # get rid of chinese names
dat$V2<-gsub(" ", "", dat$V2,fixed = TRUE) # get rid of space
# loop to get turfID in all cells
for (i in 2:nrow(dat)){
if(nchar(dat$V1[i])==0){
dat$V1[i] <- dat$V1[i-1]
}
}
# import head of data set
dat.h <- read.csv(datasheet, sep=";", header=FALSE, nrow=3, stringsAsFactors=FALSE)
#browser()
# merge data into long data table
long.table <- lapply(seq(3,ncol(dat)-35,36),function(i){
x <- dat[ ,c(1:2,i:(i+35))]
names(x) <- c("turfID", "species", paste(rep(c("b", "f", "s", "r"), 9), rep(1:9, each=4), sep="."))
x$date <- dat.h[1,i+1]
x$doy <- yday(ymd(x$date))
x
})
dat.long <- do.call(rbind,c(long.table, stingsAsFactors=FALSE))
# Extract site
dat.long$origSite <- site
dat.long$destSite <- site
dat.long$block <- sub(".*\\-", "", dat.long$turfID) # place everything before - with blank
dat.long$treatment <- "EC"
# convert to factor and numeric
dat.long <- cbind(dat.long[,c("turfID", "species", "date", "doy", "origSite", "destSite", "block", "treatment")],sapply(dat.long[,c(3:38)],as.numeric))
#sapply(dat.long[,c(4:7,9:12,14:17,19:22)],function(x)print(grep("\\D", x = x, value = TRUE))) # Check error messages
dat.long <- dat.long[-1,]
dat.long$year <- year
dat.long <- dat.long[-nrow(dat.long),] # remove strange last row
dat.long
return(dat.long)
}
# Calculate the sum of buds, flowers and seeds per turf and species
CalcSums <- function(dat){
dat <- dat %>%
mutate(bud = rowSums(.[grep("b\\.", names(.))], na.rm = TRUE)) %>%
mutate(flower = rowSums(.[grep("f\\.", names(.))], na.rm = TRUE)) %>%
mutate(seed = rowSums(.[grep("s\\.", names(.))], na.rm = TRUE)) %>%
mutate(ripe = rowSums(.[grep("r\\.", names(.))], na.rm = TRUE))
return(dat)
}
#### FUNCTIONS FOR FIGURES ####
### GET MEAN AND SE BY SPECIES ###
SpeciesMeanSE <- function(dat){
# Calculate mean and se by species, pheno.stage, origSite, newTT
MeanSE <- dat %>%
group_by(year, newTT, origSite, pheno.stage, pheno.var, species) %>%
summarise(N = sum(!is.na(value)), mean = mean(value, na.rm = TRUE), se = sd(value, na.rm = TRUE)/sqrt(N))
# Calculate mean for difference between Control and Treatment
#SPOnlyInOneTreatment
SpeciesDifference <- MeanSE %>%
ungroup() %>%
select(-N) %>% # remove site, because it causes problems
unite(united, mean, se, sep = "_") %>% # unite mean and se
spread(key = newTT, value = united) %>% # spread Treatments
separate(col = Control, into = c("Control_mean", "Control_se"), sep = "_", convert = TRUE) %>%
separate(col = OTC, into = c("OTC_mean", "OTC_se"), sep = "_", convert = TRUE) %>%
separate(col = Warm, into = c("Warm_mean", "Warm_se"), sep = "_", convert = TRUE) %>%
separate(col = Cold, into = c("Cold_mean", "Cold_se"), sep = "_", convert = TRUE) %>%
mutate(OTC_mean = OTC_mean - Control_mean, Warm_mean = Warm_mean - Control_mean, Cold_mean = Cold_mean - Control_mean) %>%
mutate(OTC_se = sqrt(Control_se^2 + OTC_se^2), Warm_se = sqrt(Control_se^2 + Warm_se^2), Cold_se = sqrt(Control_se^2 + Cold_se^2)) %>%
select(-Control_mean, -Control_se) %>%
unite(OTC, OTC_mean, OTC_se, sep = "_") %>%
unite(Warm, Warm_mean, Warm_se, sep = "_") %>%
unite(Cold, Cold_mean, Cold_se, sep = "_") %>%
gather(key = Treatment, value = united, -year, -origSite, -pheno.stage, -pheno.var, -species) %>%
separate(col = united, into = c("mean", "se"), sep = "_", convert = TRUE) %>%
filter(!is.na(mean))
return(SpeciesDifference)
}
### COMMUNITY DATA ###
PlotCommunityData <- function(dat, phenovar){
CommunityDifference <- dat %>%
mutate(newname = paste(origSite, Treatment, sep = "_")) %>% # paste Treatment and site, they are unique and can be renamed
mutate(newname = plyr::mapvalues(newname, c("H_OTC", "A_OTC", "H_Transplant", "A_Transplant"), c("High alpine OTC", "Alpine OTC", "High alpine Transplant", "Alpine Transplant"))) %>%
mutate(newname = factor(newname, levels = c("High alpine OTC", "Alpine OTC", "High alpine Transplant", "Alpine Transplant"))) %>%
group_by(pheno.stage, newname) %>%
summarise(Difference = mean(mean, na.rm = TRUE), SE = mean(se, na.rm = TRUE)) %>%
mutate(Treatment = sub(".* ", "", newname))
ggplot(CommunityDifference, aes(x = newname, y = Difference, color = Treatment, shape = Treatment, ymax = Difference + SE, ymin = Difference - SE)) +
geom_hline(yintercept=0, color = "gray") +
geom_point(size = 1.8) +
labs(x = "", y = "Treatment - control in days") +
scale_colour_manual(name = "", values = c("red", "purple")) +
scale_shape_manual(name = "", values = c(16, 17)) +
facet_grid(~ pheno.stage) +
geom_errorbar(width=0.2) +
scale_x_discrete(labels = c("High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine", "High alpine OTC" = "High alpine", "Alpine OTC" = "Alpine", "High alpine Transplant" = "High alpine", "Alpine Transplant" = "Alpine")) +
ggtitle(phenovar) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
### SPECIES DATA ###
PlotSpeciesData <- function(dat, phenovar, phenostage, Year){
dat2 <- with(dat, expand.grid(year = unique(year), Treatment=unique(Treatment), species=unique(species), origSite = unique(origSite), pheno.stage = unique(pheno.stage))) %>%
left_join(dat, by = c("year", "Treatment", "species", "origSite", "pheno.stage")) %>%
group_by(year, species, origSite) %>%
filter(sum(!is.na(mean)) > 0) %>%
ungroup()
# Draw plot
dat2 %>%
filter(year == Year, pheno.var == phenovar, pheno.stage == phenostage) %>%
mutate(origSite = plyr::mapvalues(origSite, c("H", "A", "M"), c("High Alpine", "Alpine", "Middle"))) %>%
mutate(Treatment = plyr::mapvalues(Treatment, c("Cold", "OTC", "Warm"), c("Transplant Cold", "OTC", "Transplant Warm"))) %>%
mutate(Treatment = factor(Treatment, levels = c("OTC", "Transplant Warm", "Transplant Cold"))) %>%
ggplot(aes(y = mean, x = species, fill = Treatment, ymin = mean - se, ymax = mean + se)) +
geom_col(position="dodge", width = 0.7) +
geom_errorbar(position = position_dodge(0.7), width = 0.2) +
geom_hline(yintercept = 0, colour = "grey", linetype = 2) +
#scale_fill_manual(name = "", values = c("purple", "orange", "lightblue")) +
labs(y = "Difference between treatment and control in days", x = "", title = phenovar) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
facet_grid(pheno.stage ~ Treatment * origSite, scales = "free_x", space = "free_x")
}
PlotSpeciesData2 <- function(dat, phenovar, Year){
dat %>%
filter(year == Year, pheno.var == phenovar, pheno.stage != "Ripe") %>%
#mutate(origSite = plyr::mapvalues(origSite, c("H", "A", "M"), c("High Alpine", "Alpine", "Middle"))) %>%
mutate(Treatment = plyr::mapvalues(Treatment, c("Cold", "OTC", "Warm"), c("Transplant Cold", "OTC", "Transplant Warm"))) %>%
mutate(Treatment = factor(Treatment, levels = c("OTC", "Transplant Warm", "Transplant Cold"))) %>%
mutate(origSite = plyr::mapvalues(origSite, c("A", "H", "M"), c("Alpine", "High alpine", "Mid"))) %>%
mutate(origSite = factor(origSite, levels = c("High alpine", "Alpine", "Mid"))) %>%
mutate(Order = paste(Treatment, origSite, species, sep = "_")) %>%
ggplot(aes(y = mean, x = species, fill = Treatment, ymin = mean - se, ymax = mean + se)) +
geom_col(position="dodge", width = 0.7) +
geom_errorbar(position = position_dodge(0.7), width = 0.2) +
geom_hline(yintercept = 0, colour = "grey", linetype = 2) +
scale_fill_manual(name = "", values = c(rep("purple", 1), rep("orange", 1), rep("lightblue", 1))) +
#scale_x_discrete(labels = SP) +
labs(y = "Difference between treatment and control in days", x = "", title = "peak phenophase") +
theme(axis.text.x = element_text(angle = 45, hjust = 1), legend.position="top") +
facet_grid(pheno.stage ~ Treatment * origSite, scales = "free_x", space = "free_x")
}
#### FUNCTIONS FOR ANALYSIS ####
#### Function to produce model-checking plots for the fixed effects of an lmer model
ModelCheck <- function(mod){
par(mfrow = c(1,2))
# Residual plot: checking homogeneity of the variance and linerarity
plot(fitted(mod), resid(mod)) #should have no pattern
abline(h=0)
# QQnorm plot: normal distribution of the residuals
qqnorm(resid(mod), ylab="Residuals") #should be approximately straight line
qqline(resid(mod))
}
### Test overdispersion
# compare the residual deviance to the residual degrees of freedom
# these are assumed to be the same.
overdisp_fun <- function(model) {
## number of variance parameters in
## an n-by-n variance-covariance matrix
vpars <- function(m) {
nrow(m)*(nrow(m)+1)/2
}
model.df <- sum(sapply(VarCorr(model),vpars))+length(fixef(model))
rdf <- nrow(model.frame(model))-model.df
rp <- residuals(model,type="pearson")
Pearson.chisq <- sum(rp^2)
prat <- Pearson.chisq/rdf
pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)
c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval)
}
# Function to calculate QAICc
# NB, phi is the scaling parameter from the quasi-family model. If using e.g. a poisson family, phi=1 and QAICc returns AICc, or AIC if QAICc=FALSE.
QAICc <- function(mod, scale, QAICc = TRUE) {
ll <- as.numeric(logLik(mod))
df <- attr(logLik(mod), "df")
n <- length(resid(mod))
if (QAICc)
qaic = as.numeric(-2 * ll/scale + 2 * df + 2 * df * (df + 1)/(n - df - 1))
else qaic = as.numeric(-2 * ll/scale + 2 * df)
qaic
}
# Model selection
modsel <- function(mods,x){
phi=1
dd <- data.frame(Model=1:length(mods), K=1, QAIC=1)
for(j in 1:length(mods)){
dd$K[j] = attr(logLik(mods[[j]]),"df")
dd$QAIC[j] = QAICc(mods[[j]],phi)
}
dd$delta.i <- dd$QAIC - min(dd$QAIC)
dd <- subset(dd,dd$delta.i<x)
dd$re.lik <- round(exp(-0.5*dd$delta.i),3)
sum.aic <- sum(exp(-0.5*dd$delta.i))
wi <- numeric(0)
for (i in 1:length(dd$Model)){wi[i] <- round(exp(-0.5*dd$delta.i[i])/sum.aic,3)}; dd$wi<-wi
print(dds <- dd[order(dd$QAIC), ])
assign("mstable",dd,envir=.GlobalEnv)
}
|
# ISSUE DATA
data <- read.csv(here("data/FSCissues.csv"))
data$Change <- gsub("Bacame", "Became", data$Change)
data$Issue %<>% gsub(" $", "", .)
data$Change %<>% gsub(" * .$", "", .)
data$Strictist %<>% as.factor()
# PROGRAM NAMES
data %<>% mutate(Program = ifelse(Program == "FSC-US", "Activist-backed FSC-US", Program))
data %<>% mutate(Program = ifelse(Program == "SFI", "Industry-backed SFI", Program))
data %<>% mutate(Program = ifelse(Program == "PEFC", "Industry-backed PEFC", Program))
# corrections
data %<>% mutate(Change = ifelse(Issue == "Plantations" & Year == "2015" & Program == "Industry-backed SFI", "Became more prescriptive", Change))
data %<>% mutate(Change = ifelse(Issue == "Riparian" & Year == "2010" & Program == "Activist-backed FSC-US", "Became more prescriptive", Change))
data %<>% mutate(Prescriptiveness = ifelse(Issue == "Plantations" & Year %in% c("2015", "2016") & Program == "Industry-backed SFI", 2, Prescriptiveness))
# RELEVEL
data$Prescriptiveness %<>% str_replace("At least as or m", "M")
data$Prescriptiveness %<>% as.factor()
levels(data$Prescriptiveness) <- c("No prescriptive requirements", "Some prescriptive requirements", "Most prescriptive")
SFIvFSC <- data
SFIvFSC$Issue %<>% as.factor()
# make table
issues <- filter(data) %>%
group_by(Issue, Year) %>% mutate(Change = paste(Strictist, Change)) %>%
group_by(Issue, Year) %>% mutate(pattern = paste(Change, collapse = ": ")) %>%
mutate(pattern = ifelse(pattern == "1 No change: 0 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 1 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 0 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 No change", "Downward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 No change", "Downward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 No change", "Upward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 1 No change", "Upward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 No change: 0 Became more prescriptive", "Upward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 1 Became more prescriptive", "Upward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 Became less prescriptive", "Downward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 Became less prescriptive", "Downward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 0 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 1 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 Became more prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 Became less prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 Became more prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 0 Became more prescriptive", "Opposing divergence", pattern)) # aesthetic feels wrong
# correction to above method
issues %<>% mutate(pattern = ifelse(pattern == "Upward parallell" & Issue == "Riparian", "Upward divergence", pattern))
# Get data
d <- read.csv(here("data/FSC.csv"))
d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "FSC", Net.Change+4, Net.Change))
d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "SFI", Net.Change+1, Increased))
d %<>% gather("Measure","Change", 6:7)
d %<>% group_by(Program)
d %<>% gather("Prescriptiveness","Issues", 3:5)
d$Prescriptiveness %<>% as.factor()
#d$Prescriptiveness %<>% relevel("Some.Requirements")
#d$Prescriptiveness %<>% relevel("Most.Prescriptive")
d %<>% ungroup()
d %<>% mutate(Program = ifelse(Program == "FSC", "Activist-backed FSC-US", Program))
d %<>% mutate(Program = ifelse(Program == "SFI", "Industry-backed SFI", Program))
d$Program %<>% as.factor()
d %<>% mutate(Measure = ifelse(Measure == "Net.Change", "Net Change", Measure))
# corrections
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Activist-backed FSC-US" & Measure == "Increased", 21, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Activist-backed FSC-US" & Measure == "Net Change", 17, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Industry-backed SFI" & Measure == "Increased", 8, Change))
d %<>% mutate(Change = if_else(Year > 2012 & Program == "Industry-backed SFI" & Measure == "Increased", 9, Change))
d %<>% mutate(Change = if_else(Year > 2014 & Program == "Industry-backed SFI" & Measure == "Increased", 12, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Industry-backed SFI" & Measure == "Net Change", 7, Change))
d %<>% mutate(Change = if_else(Year > 2012 & Program == "Industry-backed SFI" & Measure == "Net Change", 8, Change))
d %<>% mutate(Change = if_else(Year > 2014 & Program == "Industry-backed SFI" & Measure == "Net Change", 11, Change))
#data <- filter(d, Measure == "Increased")
SFIvFSCnetChange <- d
# PEFC FSC
d <- read.csv(here("data/PEFCvFSC.csv"))
# d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "FSC", Net.Change+4, Increased))
# d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "Industry-backed SFI", Net.Change+1, Increased))
# also not run for PEFC?
d %<>% gather("Measure","Change08", 7:8)
d %<>% mutate(Measure = ifelse(Measure == "Net.Change", "Net Change", Measure))
# d.fsc <- filter(d, Program == "Activist-backed FSC-US")
# d.sfi <- filter(d, Program == "Industry-backed SFI")
d$Program %<>% as.character()
d %<>% mutate(Program = ifelse(Program == "FSC-P&C", "Activist-backed FSC P&C", Program))
d %<>% mutate(Program = ifelse(Program == "PEFC", "Industry-backed PEFC", Program))
d$Program %<>% as.factor()
d$Strictist %<>% as.factor()
d$Year %<>% as.numeric()
d$Prescriptiveness %<>% as.factor()
levels(d$Prescriptiveness) <- c("No prescriptive requirements", "Some prescriptive requirements", "Most prescriptive")
d$Change %<>% as.factor()
levels(d$Change) <- c("Became less prescriptive", "No change", "Became more prescriptive")
d$Change %<>% relevel("Became more prescriptive")
d$Change %<>% relevel("Became less prescriptive")
PEFCvFSC <- d
PEFCvFSC$Issues %<>% as.factor()
#year as factor data
data <- filter(d, Measure == "Increased")
# data$Year %<>% as.factor()
d %<>% mutate(Change08 = ifelse(Year > 2011 & Program == "Activist-backed FSC P&C" & Measure == "Increased", 13, Change08))
d %<>% mutate(Change08 = ifelse(Year > 2011 & Program == "Activist-backed FSC P&C" & Measure == "No change", 9, Change08))
d %<>% mutate(Change08 = ifelse(Year > 2009 & Program == "Industry-backed PEFC", 18, Change08))
PEFCvFSCnetChange <- d
save.image(here("data/SFIvFSC.Rdata"))
|
/data/cleandata.R
|
no_license
|
judgelord/FSC-SFI
|
R
| false | false | 7,362 |
r
|
# ISSUE DATA
data <- read.csv(here("data/FSCissues.csv"))
data$Change <- gsub("Bacame", "Became", data$Change)
data$Issue %<>% gsub(" $", "", .)
data$Change %<>% gsub(" * .$", "", .)
data$Strictist %<>% as.factor()
# PROGRAM NAMES
data %<>% mutate(Program = ifelse(Program == "FSC-US", "Activist-backed FSC-US", Program))
data %<>% mutate(Program = ifelse(Program == "SFI", "Industry-backed SFI", Program))
data %<>% mutate(Program = ifelse(Program == "PEFC", "Industry-backed PEFC", Program))
# corrections
data %<>% mutate(Change = ifelse(Issue == "Plantations" & Year == "2015" & Program == "Industry-backed SFI", "Became more prescriptive", Change))
data %<>% mutate(Change = ifelse(Issue == "Riparian" & Year == "2010" & Program == "Activist-backed FSC-US", "Became more prescriptive", Change))
data %<>% mutate(Prescriptiveness = ifelse(Issue == "Plantations" & Year %in% c("2015", "2016") & Program == "Industry-backed SFI", 2, Prescriptiveness))
# RELEVEL
data$Prescriptiveness %<>% str_replace("At least as or m", "M")
data$Prescriptiveness %<>% as.factor()
levels(data$Prescriptiveness) <- c("No prescriptive requirements", "Some prescriptive requirements", "Most prescriptive")
SFIvFSC <- data
SFIvFSC$Issue %<>% as.factor()
# make table
issues <- filter(data) %>%
group_by(Issue, Year) %>% mutate(Change = paste(Strictist, Change)) %>%
group_by(Issue, Year) %>% mutate(pattern = paste(Change, collapse = ": ")) %>%
mutate(pattern = ifelse(pattern == "1 No change: 0 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 1 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 0 No change", "Equilibrium", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 No change", "Downward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 No change", "Downward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 No change", "Upward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 1 No change", "Upward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 No change: 0 Became more prescriptive", "Upward convergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 No change: 1 Became more prescriptive", "Upward divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 Became less prescriptive", "Downward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 Became less prescriptive", "Downward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 0 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became more prescriptive: 1 Became more prescriptive", "Upward parallell", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became less prescriptive: 0 Became more prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "1 Became more prescriptive: 0 Became less prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 1 Became more prescriptive", "Opposing divergence", pattern)) %>%
mutate(pattern = ifelse(pattern == "0 Became less prescriptive: 0 Became more prescriptive", "Opposing divergence", pattern)) # aesthetic feels wrong
# correction to above method
issues %<>% mutate(pattern = ifelse(pattern == "Upward parallell" & Issue == "Riparian", "Upward divergence", pattern))
# Get data
d <- read.csv(here("data/FSC.csv"))
d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "FSC", Net.Change+4, Net.Change))
d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "SFI", Net.Change+1, Increased))
d %<>% gather("Measure","Change", 6:7)
d %<>% group_by(Program)
d %<>% gather("Prescriptiveness","Issues", 3:5)
d$Prescriptiveness %<>% as.factor()
#d$Prescriptiveness %<>% relevel("Some.Requirements")
#d$Prescriptiveness %<>% relevel("Most.Prescriptive")
d %<>% ungroup()
d %<>% mutate(Program = ifelse(Program == "FSC", "Activist-backed FSC-US", Program))
d %<>% mutate(Program = ifelse(Program == "SFI", "Industry-backed SFI", Program))
d$Program %<>% as.factor()
d %<>% mutate(Measure = ifelse(Measure == "Net.Change", "Net Change", Measure))
# corrections
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Activist-backed FSC-US" & Measure == "Increased", 21, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Activist-backed FSC-US" & Measure == "Net Change", 17, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Industry-backed SFI" & Measure == "Increased", 8, Change))
d %<>% mutate(Change = if_else(Year > 2012 & Program == "Industry-backed SFI" & Measure == "Increased", 9, Change))
d %<>% mutate(Change = if_else(Year > 2014 & Program == "Industry-backed SFI" & Measure == "Increased", 12, Change))
d %<>% mutate(Change = if_else(Year > 2009 & Program == "Industry-backed SFI" & Measure == "Net Change", 7, Change))
d %<>% mutate(Change = if_else(Year > 2012 & Program == "Industry-backed SFI" & Measure == "Net Change", 8, Change))
d %<>% mutate(Change = if_else(Year > 2014 & Program == "Industry-backed SFI" & Measure == "Net Change", 11, Change))
#data <- filter(d, Measure == "Increased")
SFIvFSCnetChange <- d
# PEFC FSC
d <- read.csv(here("data/PEFCvFSC.csv"))
# d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "FSC", Net.Change+4, Increased))
# d %<>% mutate(Increased = ifelse(Year > 2009 & Program == "Industry-backed SFI", Net.Change+1, Increased))
# also not run for PEFC?
d %<>% gather("Measure","Change08", 7:8)
d %<>% mutate(Measure = ifelse(Measure == "Net.Change", "Net Change", Measure))
# d.fsc <- filter(d, Program == "Activist-backed FSC-US")
# d.sfi <- filter(d, Program == "Industry-backed SFI")
d$Program %<>% as.character()
d %<>% mutate(Program = ifelse(Program == "FSC-P&C", "Activist-backed FSC P&C", Program))
d %<>% mutate(Program = ifelse(Program == "PEFC", "Industry-backed PEFC", Program))
d$Program %<>% as.factor()
d$Strictist %<>% as.factor()
d$Year %<>% as.numeric()
d$Prescriptiveness %<>% as.factor()
levels(d$Prescriptiveness) <- c("No prescriptive requirements", "Some prescriptive requirements", "Most prescriptive")
d$Change %<>% as.factor()
levels(d$Change) <- c("Became less prescriptive", "No change", "Became more prescriptive")
d$Change %<>% relevel("Became more prescriptive")
d$Change %<>% relevel("Became less prescriptive")
PEFCvFSC <- d
PEFCvFSC$Issues %<>% as.factor()
#year as factor data
data <- filter(d, Measure == "Increased")
# data$Year %<>% as.factor()
d %<>% mutate(Change08 = ifelse(Year > 2011 & Program == "Activist-backed FSC P&C" & Measure == "Increased", 13, Change08))
d %<>% mutate(Change08 = ifelse(Year > 2011 & Program == "Activist-backed FSC P&C" & Measure == "No change", 9, Change08))
d %<>% mutate(Change08 = ifelse(Year > 2009 & Program == "Industry-backed PEFC", 18, Change08))
PEFCvFSCnetChange <- d
save.image(here("data/SFIvFSC.Rdata"))
|
answer_data='AAAAA.csv'
google_token='goolge_token.rds'
shiny_path=''
shiny_name=''
shiny_token=''
shiny_secret=''
|
/config.R
|
no_license
|
samex4x4/Shiny_AUC_Score_Update
|
R
| false | false | 117 |
r
|
answer_data='AAAAA.csv'
google_token='goolge_token.rds'
shiny_path=''
shiny_name=''
shiny_token=''
shiny_secret=''
|
rm(list=ls())
library(car); library(stargazer)
setwd("D:/AP LARSON/HOLC")
dat <- read.csv("HOLCbyTractFinal.csv")
str(dat$cbsaname)
# Set up for panel model
namevector <- as.character(unique(dat$cbsaname))
for (i in namevector){
dat[,namevector] <- NA
}
for (i in 1:length(namevector)){
dat[i + 60] <- ifelse(dat$cbsaname == namevector[[i]], 1, 0)
}
# Control by region
# See https://www2.census.gov/programs-surveys/popest/geographies/2015/state-geocodes-v2015.xls
xwalk <- read.csv("state-geocodes-v2015.csv")
xwalk <- xwalk[c(2:3)]
dat <- merge(dat, xwalk, by.x = "st", by.y = "statefp")
dat$regSouth <- ifelse(dat$name == "South Region", 1, 0)
dat$regMidwest <- ifelse(dat$name == "Midwest Region", 1, 0)
dat$regNortheast <- ifelse(dat$name == "Northeast Region", 1, 0)
dat$regWest <- ifelse(dat$name == "West Region", 1, 0)
housingValue <- lm(thouHousVal ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
bed0 +
bed1 +
bed2 +
bed3 +
bed4 +
medAge +
completePlumb +
completeKitch, data = dat)
# print(summary(housingValue), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/housingValue.tex"
writeLines(capture.output(stargazer(housingValue,
style = "qje",
omit = 2:64,
dep.var.labels = "Home Value, $1000s",
covariate.labels = c("HOLC Rating",
"1 Bedroom",
"2 Bedrooms",
"3 Bedrooms",
"4 Bedrooms",
"Median Home Age",
"Complete Plumbing Facilities",
"Complete Kitchen Facilities"),
title = "Home Value, $1000s")), texFileName)
# Tests for multicollinearity. Must re-run
myCols <- c("quantScore", "thouHousVal", "bed0", "bed1",
"bed2", "bed3", "bed4", "medAge", "completePlumb", "completeKitch")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
vif(housingValue)
tenure <- lm(pctOwn ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(tenure), digits = 3)
vif(tenure)
texFileName <- "D:/AP LARSON/HOLC/tenure.tex"
writeLines(capture.output(stargazer(tenure,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Own Home",
covariate.labels = "HOLC Rating",
title = "Percentage Home Ownership")), texFileName)
# May need to add additional ed variables
income <- lm(thouInc ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
edHighSchool, data = dat)
# print(summary(income), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/income.tex"
writeLines(capture.output(stargazer(income,
style = "qje",
omit = 2:64,
dep.var.labels = "Median Income, $1000s",
covariate.labels = c("HOLC Rating", "Pct. HS Grad or Equivalent"),
title = "Median Annual Household Income, $1000s")), texFileName)
myCols <- c("quantScore", "thouInc", "edHighSchool",
"edSomeColl", "edBach", "edGrad")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
unemp <- lm(pctUnemp ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
edHighSchool, data = dat)
# print(summary(unemp), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/unemp.tex"
writeLines(capture.output(stargazer(unemp,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Unemployed",
covariate.labels = c("HOLC Rating", "Pct. HS Grad or Equivalent"),
title = "Unemployment Rate")), texFileName)
myCols <- c("quantScore", "pctUnemp", "edHighSchool",
"edSomeColl", "edBach", "edGrad")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
zeroCar <- lm(zeroCar ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(zeroCar), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/zeroCar.tex"
writeLines(capture.output(stargazer(zeroCar,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Zero-Car Households",
covariate.labels = "HOLC Rating",
title = "Percentage Zero-Car Households")), texFileName)
myCols <- c("zeroCar", "incomeData")
testCase <- dat[myCols]
cor(testCase, method = "pearson", use = "complete.obs")
singParent <- lm(singParentHH ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(singParent), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/singParent.tex"
writeLines(capture.output(stargazer(singParent,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Single-Parent HHs",
covariate.labels = "HOLC Rating",
title = "Percentage Single-Parent Households")), texFileName)
comBl10 <- lm(comBl10 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(comBl10), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/comBl10.tex"
writeLines(capture.output(stargazer(comBl10,
style = "qje",
omit = 2:64,
title = "Percentage Commutes Below 10 Minutes")), texFileName)
com10 <- lm(com10 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com10), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com10.tex"
writeLines(capture.output(stargazer(com10,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 10-19 Minutes")), texFileName)
com20 <- lm(com20 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com20), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com20.tex"
writeLines(capture.output(stargazer(com20,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 20-29 Minutes")), texFileName)
com30 <- lm(com30 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com30), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com30.tex"
writeLines(capture.output(stargazer(com30,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 30-39 Minutes")), texFileName)
com40 <- lm(com40 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com40), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com40.tex"
writeLines(capture.output(stargazer(com40,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 40-59 Minutes")), texFileName)
com60 <- lm(com60 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com60), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com60.tex"
writeLines(capture.output(stargazer(com60,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 60 or More Minutes")), texFileName)
texFileName <- "D:/AP LARSON/HOLC/allCom.tex"
writeLines(capture.output(stargazer(comBl10, com10, com20, com30, com40, com60,
style = "qje",
omit = 2:64,
title = "Percentage Commutes by Duration (Minutes)")), texFileName)
dat$allBl149 <- dat$pct100 + dat$pct149
poverty <- lm(allBl149 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(poverty), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/poverty.tex"
writeLines(capture.output(stargazer(poverty,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Below 149% FPL",
covariate.labels = "HOLC Rating",
title = "Percentage Residents Below 149% FPL")), texFileName)
deepPoverty <- lm(pct100 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(deepPoverty), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/deepPoverty.tex"
writeLines(capture.output(stargazer(deepPoverty,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Below 100% FPL",
covariate.labels = "HOLC Rating",
title = "Percentage Residents Below 100% FPL")), texFileName)
nWht <- lm(pctWht ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nWht), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nWht.tex"
writeLines(capture.output(stargazer(nWht,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. White Residents",
covariate.labels = "HOLC Rating",
title = "Percentage White Residents")), texFileName)
nBlk <- lm(pctBlk ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nBlk), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nBlk.tex"
writeLines(capture.output(stargazer(nBlk,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Black Residents",
covariate.labels = "HOLC Rating",
title = "Percentage Black Residents")), texFileName)
nHisp <- lm(pctHisp ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nHisp), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nHisp.tex"
writeLines(capture.output(stargazer(nHisp,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Hispanic/Latino Residents",
covariate.labels = "HOLC Rating",
title = "Percentage Hispanic/Latino Residents")), texFileName)
rentCost <- lm(hunMedRent ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(rentCost), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/rentCost.tex"
writeLines(capture.output(stargazer(rentCost,
style = "qje",
omit = 2:64,
dep.var.labels = "Median Rent, $100s",
covariate.labels = "HOLC Rating",
title = "Median Rent, $100s")), texFileName)
grapi <- lm(grapi ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
hunMedRent,
data = dat)
# print(summary(grapi), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/grapi.tex"
writeLines(capture.output(stargazer(grapi,
style = "qje",
omit = 2:64,
dep.var.labels = "GRAPI",
covariate.labels = c("HOLC Rating", "Median Rent, $100s"),
title = "Gross Rent as a Percentage of Annual Income")), texFileName)
myCols <- c("grapi", "hunMedRent")
testCase <- dat[myCols]
cor(testCase, method = "pearson", use = "complete.obs")
# For fun, by region. Doesn't control for within-city corr, though
housingValueR <- lm(thouHousVal ~ quantScore +
regWest +
regNortheast +
regSouth +
bed0 +
bed1 +
bed2 +
bed3 +
bed4 +
medAge +
completePlumb +
completeKitch, data = dat)
# print(summary(housingValue), digits = 3)
|
/Regressions.R
|
no_license
|
addisonlarson/HOLCModeling
|
R
| false | false | 64,351 |
r
|
rm(list=ls())
library(car); library(stargazer)
setwd("D:/AP LARSON/HOLC")
dat <- read.csv("HOLCbyTractFinal.csv")
str(dat$cbsaname)
# Set up for panel model
namevector <- as.character(unique(dat$cbsaname))
for (i in namevector){
dat[,namevector] <- NA
}
for (i in 1:length(namevector)){
dat[i + 60] <- ifelse(dat$cbsaname == namevector[[i]], 1, 0)
}
# Control by region
# See https://www2.census.gov/programs-surveys/popest/geographies/2015/state-geocodes-v2015.xls
xwalk <- read.csv("state-geocodes-v2015.csv")
xwalk <- xwalk[c(2:3)]
dat <- merge(dat, xwalk, by.x = "st", by.y = "statefp")
dat$regSouth <- ifelse(dat$name == "South Region", 1, 0)
dat$regMidwest <- ifelse(dat$name == "Midwest Region", 1, 0)
dat$regNortheast <- ifelse(dat$name == "Northeast Region", 1, 0)
dat$regWest <- ifelse(dat$name == "West Region", 1, 0)
housingValue <- lm(thouHousVal ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
bed0 +
bed1 +
bed2 +
bed3 +
bed4 +
medAge +
completePlumb +
completeKitch, data = dat)
# print(summary(housingValue), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/housingValue.tex"
writeLines(capture.output(stargazer(housingValue,
style = "qje",
omit = 2:64,
dep.var.labels = "Home Value, $1000s",
covariate.labels = c("HOLC Rating",
"1 Bedroom",
"2 Bedrooms",
"3 Bedrooms",
"4 Bedrooms",
"Median Home Age",
"Complete Plumbing Facilities",
"Complete Kitchen Facilities"),
title = "Home Value, $1000s")), texFileName)
# Tests for multicollinearity. Must re-run
myCols <- c("quantScore", "thouHousVal", "bed0", "bed1",
"bed2", "bed3", "bed4", "medAge", "completePlumb", "completeKitch")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
vif(housingValue)
tenure <- lm(pctOwn ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(tenure), digits = 3)
vif(tenure)
texFileName <- "D:/AP LARSON/HOLC/tenure.tex"
writeLines(capture.output(stargazer(tenure,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Own Home",
covariate.labels = "HOLC Rating",
title = "Percentage Home Ownership")), texFileName)
# May need to add additional ed variables
income <- lm(thouInc ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
edHighSchool, data = dat)
# print(summary(income), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/income.tex"
writeLines(capture.output(stargazer(income,
style = "qje",
omit = 2:64,
dep.var.labels = "Median Income, $1000s",
covariate.labels = c("HOLC Rating", "Pct. HS Grad or Equivalent"),
title = "Median Annual Household Income, $1000s")), texFileName)
myCols <- c("quantScore", "thouInc", "edHighSchool",
"edSomeColl", "edBach", "edGrad")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
unemp <- lm(pctUnemp ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
edHighSchool, data = dat)
# print(summary(unemp), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/unemp.tex"
writeLines(capture.output(stargazer(unemp,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Unemployed",
covariate.labels = c("HOLC Rating", "Pct. HS Grad or Equivalent"),
title = "Unemployment Rate")), texFileName)
myCols <- c("quantScore", "pctUnemp", "edHighSchool",
"edSomeColl", "edBach", "edGrad")
testCase <- dat[myCols]
round(cor(testCase, method = "pearson", use = "complete.obs"), digits = 3)
zeroCar <- lm(zeroCar ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(zeroCar), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/zeroCar.tex"
writeLines(capture.output(stargazer(zeroCar,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Zero-Car Households",
covariate.labels = "HOLC Rating",
title = "Percentage Zero-Car Households")), texFileName)
myCols <- c("zeroCar", "incomeData")
testCase <- dat[myCols]
cor(testCase, method = "pearson", use = "complete.obs")
singParent <- lm(singParentHH ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(singParent), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/singParent.tex"
writeLines(capture.output(stargazer(singParent,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Single-Parent HHs",
covariate.labels = "HOLC Rating",
title = "Percentage Single-Parent Households")), texFileName)
comBl10 <- lm(comBl10 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(comBl10), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/comBl10.tex"
writeLines(capture.output(stargazer(comBl10,
style = "qje",
omit = 2:64,
title = "Percentage Commutes Below 10 Minutes")), texFileName)
com10 <- lm(com10 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com10), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com10.tex"
writeLines(capture.output(stargazer(com10,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 10-19 Minutes")), texFileName)
com20 <- lm(com20 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com20), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com20.tex"
writeLines(capture.output(stargazer(com20,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 20-29 Minutes")), texFileName)
com30 <- lm(com30 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com30), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com30.tex"
writeLines(capture.output(stargazer(com30,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 30-39 Minutes")), texFileName)
com40 <- lm(com40 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com40), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com40.tex"
writeLines(capture.output(stargazer(com40,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 40-59 Minutes")), texFileName)
com60 <- lm(com60 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(com60), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/com60.tex"
writeLines(capture.output(stargazer(com60,
style = "qje",
omit = 2:64,
title = "Percentage Commutes 60 or More Minutes")), texFileName)
texFileName <- "D:/AP LARSON/HOLC/allCom.tex"
writeLines(capture.output(stargazer(comBl10, com10, com20, com30, com40, com60,
style = "qje",
omit = 2:64,
title = "Percentage Commutes by Duration (Minutes)")), texFileName)
dat$allBl149 <- dat$pct100 + dat$pct149
poverty <- lm(allBl149 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(poverty), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/poverty.tex"
writeLines(capture.output(stargazer(poverty,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Below 149% FPL",
covariate.labels = "HOLC Rating",
title = "Percentage Residents Below 149% FPL")), texFileName)
deepPoverty <- lm(pct100 ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(deepPoverty), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/deepPoverty.tex"
writeLines(capture.output(stargazer(deepPoverty,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Below 100% FPL",
covariate.labels = "HOLC Rating",
title = "Percentage Residents Below 100% FPL")), texFileName)
nWht <- lm(pctWht ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nWht), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nWht.tex"
writeLines(capture.output(stargazer(nWht,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. White Residents",
covariate.labels = "HOLC Rating",
title = "Percentage White Residents")), texFileName)
nBlk <- lm(pctBlk ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nBlk), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nBlk.tex"
writeLines(capture.output(stargazer(nBlk,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Black Residents",
covariate.labels = "HOLC Rating",
title = "Percentage Black Residents")), texFileName)
nHisp <- lm(pctHisp ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(nHisp), digits = 4)
texFileName <- "D:/AP LARSON/HOLC/nHisp.tex"
writeLines(capture.output(stargazer(nHisp,
style = "qje",
omit = 2:64,
dep.var.labels = "Pct. Hispanic/Latino Residents",
covariate.labels = "HOLC Rating",
title = "Percentage Hispanic/Latino Residents")), texFileName)
rentCost <- lm(hunMedRent ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC`,
# `Youngstown-Warren-Boardman, OH-PA` +
data = dat)
# print(summary(rentCost), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/rentCost.tex"
writeLines(capture.output(stargazer(rentCost,
style = "qje",
omit = 2:64,
dep.var.labels = "Median Rent, $100s",
covariate.labels = "HOLC Rating",
title = "Median Rent, $100s")), texFileName)
grapi <- lm(grapi ~ quantScore +
`Akron, OH` +
`Albany-Schenectady-Troy, NY` +
`Atlanta-Sandy Springs-Roswell, GA` +
`Atlantic City-Hammonton, NJ` +
`Baltimore-Columbia-Towson, MD` +
`Binghamton, NY` +
`Boston, MA` +
`Buffalo-Cheektowaga-Niagara Falls, NY` +
`Cambridge-Newton-Framingham, MA` +
`Camden, NJ` +
`Charlotte-Concord-Gastonia, NC-SC` +
`Chattanooga, TN-GA` +
`Chicago-Naperville-Arlington Heights, IL` +
`Cleveland-Elyria, OH` +
`Columbus, OH` +
`Dallas-Plano-Irving, TX` +
`Dayton, OH` +
`Detroit-Dearborn-Livonia, MI` +
`Duluth, MN-WI` +
`Erie, PA` +
`Evansville, IN-KY` +
`Flint, MI` +
`Fort Wayne, IN` +
`Gary, IN` +
`Grand Rapids-Wyoming, MI` +
`Houston-The Woodlands-Sugar Land, TX` +
`Indianapolis-Carmel-Anderson, IN` +
`Jacksonville, FL` +
`Kansas City, MO-KS` +
`Knoxville, TN` +
`Lake County-Kenosha County, IL-WI` +
`Louisville/Jefferson County, KY-IN` +
`Madison, WI` +
`Manchester-Nashua, NH` +
`Miami-Miami Beach-Kendall, FL` +
`Milwaukee-Waukesha-West Allis, WI` +
`Minneapolis-St. Paul-Bloomington, MN-WI` +
`Montgomery County-Bucks County-Chester County, PA` +
`Nashville-Davidson--Murfreesboro--Franklin, TN` +
`New Orleans-Metairie, LA` +
`New York-Jersey City-White Plains, NY-NJ` +
`Newark, NJ-PA` +
`Philadelphia, PA` +
`Pittsburgh, PA` +
`Portland-Vancouver-Hillsboro, OR-WA` +
`Richmond, VA` +
`Roanoke, VA` +
`Rochester, NY` +
`Rockford, IL` +
`Seattle-Bellevue-Everett, WA` +
`South Bend-Mishawaka, IN-MI` +
`Spokane-Spokane Valley, WA` +
`St. Louis, MO-IL` +
`Syracuse, NY` +
`Tacoma-Lakewood, WA` +
`Tampa-St. Petersburg-Clearwater, FL` +
`Toledo, OH` +
`Trenton, NJ` +
`Utica-Rome, NY` +
`Virginia Beach-Norfolk-Newport News, VA-NC` +
`Warren-Troy-Farmington Hills, MI` +
`Winston-Salem, NC` +
# `Youngstown-Warren-Boardman, OH-PA` +
hunMedRent,
data = dat)
# print(summary(grapi), digits = 3)
texFileName <- "D:/AP LARSON/HOLC/grapi.tex"
writeLines(capture.output(stargazer(grapi,
style = "qje",
omit = 2:64,
dep.var.labels = "GRAPI",
covariate.labels = c("HOLC Rating", "Median Rent, $100s"),
title = "Gross Rent as a Percentage of Annual Income")), texFileName)
myCols <- c("grapi", "hunMedRent")
testCase <- dat[myCols]
cor(testCase, method = "pearson", use = "complete.obs")
# For fun, by region. Doesn't control for within-city corr, though
housingValueR <- lm(thouHousVal ~ quantScore +
regWest +
regNortheast +
regSouth +
bed0 +
bed1 +
bed2 +
bed3 +
bed4 +
medAge +
completePlumb +
completeKitch, data = dat)
# print(summary(housingValue), digits = 3)
|
FDRhistBaySeq <- function(results_BaySeq, the.file, Project){
db.cache <- InitDb(db.name='fdrhistbays_db', db.path=file.path("RNASeqGUI_Projects",Project,"Logs","cache"))
SaveInCache(db.cache, the.file, "thefile_key")
SaveInCache(db.cache, Project, "project_key")
SaveInCache(db.cache, results_BaySeq, "res_key")
#results_BaySeq <- read.table("results_BaySeq.txt", header=TRUE, row.names=1)
hist(results_BaySeq$FDR, breaks=100, col="gold", border="red", main="FDR Histogram", xlab="FDR", ylab="Frequency")
if(Sys.info()[[1]]=="Windows"){
a=paste(getwd(),"\\RNASeqGUI_Projects\\",Project,"\\Plots\\",sep="")
the.file2 = strsplit(the.file,"\\\\")
the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile
the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates ".txt"
outputName=paste(the.file2,"_FDR_hist.pdf", sep="")
b=paste(a,outputName,sep="\\")
dev.print(device = pdf, file=b)
#write into the project report
report=paste("RNASeqGUI_Projects\\",Project,"\\Logs\\report.Rmd",sep="")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste(" * In the *Result Inspection Interface*, you clicked the **FDR Hist** button for BaySeq at `", Sys.time(),"` and the ",outputName," file has been saved in the `", Project,"\\Plots` folder.", sep="")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste("You chose the following count file: `",the.file,"`, Project: `",Project,"`, ",sep="\n")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ```{r} ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("db.cache <- InitDb(db.name='fdrhistbays_db', db.path='cache')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" results_BaySeq <- LoadCachedObject(db.cache, 'res_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file <- LoadCachedObject(db.cache, 'thefile_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" Project <- LoadCachedObject(db.cache, 'project_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
# message5 <- paste("res = read.table(", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("'",the.file,"',header=TRUE,row.names=1)", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("the.file ='",the.file,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("Project ='", Project,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print('This file has been loaded: ')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print(head(results_BaySeq))",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = strsplit(the.file,'\\')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates '.txt'",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("hist(results_BaySeq$FDR, breaks=100, col='gold', border='red', ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("main='FDR Histogram', xlab='FDR', ylab='Frequency') ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("outputName=paste(the.file2,'_FDR_hist.pdf', sep='')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("a=paste(getwd(),'\\RNASeqGUI_Projects\\',Project,'\\Plots\\',sep='')",sep="")
message5 <- paste("b=paste(a,outputName,sep='\\')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("# dev.print(device = pdf, file=b)",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message8 <- paste(" ``` ",sep="\n")
write(message8, file = report,ncolumns = if(is.character(message8)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" _______________________________________________________________________ ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
}else{ #Linux
a=paste(getwd(),"/RNASeqGUI_Projects/",Project,"/Plots/",sep="")
the.file2 = strsplit(the.file,"/")
the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile
the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates ".txt"
outputName=paste(the.file2,"_FDR_hist.pdf", sep="")
b=paste(a,outputName,sep="/")
dev.print(device = pdf, file=b)
#write into the project report
report=paste("RNASeqGUI_Projects/",Project,"/Logs/report.Rmd",sep="")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste(" * In the *Result Inspection Interface*, you clicked the **FDR Hist** button for BaySeq at `", Sys.time(),"` and the ",outputName," file has been saved in the `", Project,"/Plots` folder.", sep="")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste("You chose the following count file: `",the.file,"`, Project: `",Project,"`, ",sep="\n")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ```{r} ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("db.cache <- InitDb(db.name='fdrhistbays_db', db.path='cache')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" results_BaySeq <- LoadCachedObject(db.cache, 'res_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file <- LoadCachedObject(db.cache, 'thefile_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" Project <- LoadCachedObject(db.cache, 'project_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
# message5 <- paste("res = read.table(", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("'",the.file,"',header=TRUE,row.names=1)", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("the.file ='",the.file,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("Project ='", Project,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print('This file has been loaded: ')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print(head(results_BaySeq))",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = strsplit(the.file,'/')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates '.txt'",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("hist(results_BaySeq$FDR, breaks=100, col='gold', border='red', ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("main='FDR Histogram', xlab='FDR', ylab='Frequency') ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("outputName=paste(the.file2,'_prob_hist.pdf', sep='')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("a=paste(getwd(),'/RNASeqGUI_Projects/',Project,'/Plots/',sep='')",sep="")
message5 <- paste("b=paste(a,outputName,sep='/')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("# dev.print(device = pdf, file=b)",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message8 <- paste(" ``` ",sep="\n")
write(message8, file = report,ncolumns = if(is.character(message8)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" _______________________________________________________________________ ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
}
}
|
/R/FDRhistBaySeq.R
|
no_license
|
drighelli/RNASeqGUI
|
R
| false | false | 11,036 |
r
|
FDRhistBaySeq <- function(results_BaySeq, the.file, Project){
db.cache <- InitDb(db.name='fdrhistbays_db', db.path=file.path("RNASeqGUI_Projects",Project,"Logs","cache"))
SaveInCache(db.cache, the.file, "thefile_key")
SaveInCache(db.cache, Project, "project_key")
SaveInCache(db.cache, results_BaySeq, "res_key")
#results_BaySeq <- read.table("results_BaySeq.txt", header=TRUE, row.names=1)
hist(results_BaySeq$FDR, breaks=100, col="gold", border="red", main="FDR Histogram", xlab="FDR", ylab="Frequency")
if(Sys.info()[[1]]=="Windows"){
a=paste(getwd(),"\\RNASeqGUI_Projects\\",Project,"\\Plots\\",sep="")
the.file2 = strsplit(the.file,"\\\\")
the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile
the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates ".txt"
outputName=paste(the.file2,"_FDR_hist.pdf", sep="")
b=paste(a,outputName,sep="\\")
dev.print(device = pdf, file=b)
#write into the project report
report=paste("RNASeqGUI_Projects\\",Project,"\\Logs\\report.Rmd",sep="")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste(" * In the *Result Inspection Interface*, you clicked the **FDR Hist** button for BaySeq at `", Sys.time(),"` and the ",outputName," file has been saved in the `", Project,"\\Plots` folder.", sep="")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste("You chose the following count file: `",the.file,"`, Project: `",Project,"`, ",sep="\n")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ```{r} ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("db.cache <- InitDb(db.name='fdrhistbays_db', db.path='cache')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" results_BaySeq <- LoadCachedObject(db.cache, 'res_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file <- LoadCachedObject(db.cache, 'thefile_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" Project <- LoadCachedObject(db.cache, 'project_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
# message5 <- paste("res = read.table(", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("'",the.file,"',header=TRUE,row.names=1)", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("the.file ='",the.file,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("Project ='", Project,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print('This file has been loaded: ')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print(head(results_BaySeq))",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = strsplit(the.file,'\\')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates '.txt'",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("hist(results_BaySeq$FDR, breaks=100, col='gold', border='red', ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("main='FDR Histogram', xlab='FDR', ylab='Frequency') ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("outputName=paste(the.file2,'_FDR_hist.pdf', sep='')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("a=paste(getwd(),'\\RNASeqGUI_Projects\\',Project,'\\Plots\\',sep='')",sep="")
message5 <- paste("b=paste(a,outputName,sep='\\')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("# dev.print(device = pdf, file=b)",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message8 <- paste(" ``` ",sep="\n")
write(message8, file = report,ncolumns = if(is.character(message8)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" _______________________________________________________________________ ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
}else{ #Linux
a=paste(getwd(),"/RNASeqGUI_Projects/",Project,"/Plots/",sep="")
the.file2 = strsplit(the.file,"/")
the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile
the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates ".txt"
outputName=paste(the.file2,"_FDR_hist.pdf", sep="")
b=paste(a,outputName,sep="/")
dev.print(device = pdf, file=b)
#write into the project report
report=paste("RNASeqGUI_Projects/",Project,"/Logs/report.Rmd",sep="")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste(" * In the *Result Inspection Interface*, you clicked the **FDR Hist** button for BaySeq at `", Sys.time(),"` and the ",outputName," file has been saved in the `", Project,"/Plots` folder.", sep="")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message2 <- paste("You chose the following count file: `",the.file,"`, Project: `",Project,"`, ",sep="\n")
write(message2, file = report,ncolumns = if(is.character(message2)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" ```{r} ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("db.cache <- InitDb(db.name='fdrhistbays_db', db.path='cache')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" results_BaySeq <- LoadCachedObject(db.cache, 'res_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file <- LoadCachedObject(db.cache, 'thefile_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" Project <- LoadCachedObject(db.cache, 'project_key')", sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
# message5 <- paste("res = read.table(", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("'",the.file,"',header=TRUE,row.names=1)", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("the.file ='",the.file,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
#
# message5 <- paste("Project ='", Project,"'", sep="")
# write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print('This file has been loaded: ')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("print(head(results_BaySeq))",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = strsplit(the.file,'/')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = the.file2[[1]][length(the.file2[[1]])] #estract the namefile",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("the.file2 = substring(the.file2,1,nchar(the.file2)-4) # eliminates '.txt'",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("hist(results_BaySeq$FDR, breaks=100, col='gold', border='red', ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("main='FDR Histogram', xlab='FDR', ylab='Frequency') ",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("outputName=paste(the.file2,'_prob_hist.pdf', sep='')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("a=paste(getwd(),'/RNASeqGUI_Projects/',Project,'/Plots/',sep='')",sep="")
message5 <- paste("b=paste(a,outputName,sep='/')",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste("# dev.print(device = pdf, file=b)",sep="")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
message8 <- paste(" ``` ",sep="\n")
write(message8, file = report,ncolumns = if(is.character(message8)) 1 else 5,append = TRUE, sep = "\n")
message5 <- paste(" _______________________________________________________________________ ",sep="\n")
write(message5, file = report,ncolumns = if(is.character(message5)) 1 else 5,append = TRUE, sep = "\n")
}
}
|
install.packages(XLConnect)
"XLConnect"
library(httr)
library(devtools)
library(twitter)
library(base64enc)
Consumer Key (API Key) y0p7r2CZ5NJKoDYlz7NSphMLz
Consumer Secret (API Secret) vaCgJCpKzSQAYhTPoURKaEAEE8AbuNEGSsplmd4hZES3lk0ScO
Access Token 868377331898826752-wBX6bXzsC9WUVSchpzPk4HwNKtapViQ
Access Token Secret SLVTXCWNW2MAO77JItPzGSvUFiapQW1I9pRBKS5QNrpJ3
consumerkey="y0p7r2CZ5NJKoDYlz7NSphMLz"
consumersecret="vaCgJCpKzSQAYhTPoURKaEAEE8AbuNEGSsplmd4hZES3lk0ScO"
accesstoken="868377331898826752-wBX6bXzsC9WUVSchpzPk4HwNKtapViQ"
accesssecret="SLVTXCWNW2MAO77JItPzGSvUFiapQW1I9pRBKS5QNrpJ3"
some_tweets = searchTwitter("sachinabilliondreams", n=100, lang="en")
|
/Email/twitter.R
|
no_license
|
adityakumbhavdekar/iris
|
R
| false | false | 671 |
r
|
install.packages(XLConnect)
"XLConnect"
library(httr)
library(devtools)
library(twitter)
library(base64enc)
Consumer Key (API Key) y0p7r2CZ5NJKoDYlz7NSphMLz
Consumer Secret (API Secret) vaCgJCpKzSQAYhTPoURKaEAEE8AbuNEGSsplmd4hZES3lk0ScO
Access Token 868377331898826752-wBX6bXzsC9WUVSchpzPk4HwNKtapViQ
Access Token Secret SLVTXCWNW2MAO77JItPzGSvUFiapQW1I9pRBKS5QNrpJ3
consumerkey="y0p7r2CZ5NJKoDYlz7NSphMLz"
consumersecret="vaCgJCpKzSQAYhTPoURKaEAEE8AbuNEGSsplmd4hZES3lk0ScO"
accesstoken="868377331898826752-wBX6bXzsC9WUVSchpzPk4HwNKtapViQ"
accesssecret="SLVTXCWNW2MAO77JItPzGSvUFiapQW1I9pRBKS5QNrpJ3"
some_tweets = searchTwitter("sachinabilliondreams", n=100, lang="en")
|
install.packages('Seurat')
library(Seurat)
install.packages("dplyr")
library(dplyr)
install.packages('Rtsne')
library(Rtsne)
install.packages("reticulate")
library(reticulate)
reticulate::py_install(packages = 'umap-learn')
use_condaenv(condaenv="Renv", conda="/home/druss/anaconda3/bin/conda")
library(umap)
library(cowplot)
# Read expression files
# In joseph's case these are FPKM normalized results from Tophat - Cufflinks pipeline
# control <- read.table(file = "/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_control.csv", sep = ",")
# str(control [,1:20])
# treatment <- read.table(file ="/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", sep = ",")
#
# control_rpkm_log <- log(control_rpkm + 1,2) # put on the log scale
#
# hist(as.vector(as.matrix(control_rpkm)))
# dim(control_rpkm)
# head(rownames(control_rpkm))
# head(colnames(control_rpkm))
# extract top 1000 variable genes
# gene.var <- apply(control_rpkm, 1, function(x) var(x[x>0]))
# control_rpkm.top1000 <- control_rpkm[which(rank(-gene.var)<=1000),]
#
# rpkm.pca <- prcomp(control_rpkm.top1000,
# center = TRUE,
# scale. = TRUE)
# summary(rpkm.pca)$importance[,1:5]
#
# plot(rpkm.pca, type="l", main="Top 10 PCs")
#
# #it's often advantageous to run a quick association of the top components of variation with your known variables
# #Association with PC1
# PC1 = rpkm.pca$rotation[,1]
# model.pc1 <- anova(lm(PC1 ~. , df[,-c(2)]))
# #Association with PC2
# PC2 = rpkm.pca$rotation[,2]
# model.pc2 <- anova(lm(PC2 ~. , df[,-c(1)]))
# summary(model.pc2)
####### Seurat ######
#####################
####### Control ######
#####################
control_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_control.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(control_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
# ctrl_assay <- CreateAssayObject(counts = control, min.cells = 3)
ctrl <- CreateSeuratObject(counts = control_rpkm, project = "Joseph_CTRL", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
ctrl_variable_features <- FindVariableFeatures(ctrl, selection.method = "vst", nfeatures = 2000)
write.csv(ctrl_variable_feature_id, "ctrl_variable_features")
# Identify the 10 most highly variable genes
ctrl_variable_feature_id <- VariableFeatures(ctrl_variable_features)
top10_ctrl_variable_feature_id <- head(VariableFeatures(ctrl_variable_features), 10)
write.csv(top10_ctrl_variable_feature_id, "top10_ctrl_variable_features")
# plot variable features with and without labels
plot_ctrl_variable_features <- VariableFeaturePlot(ctrl_variable_features)
plot_ctrl_variable_features_top10 <- LabelPoints(plot = plot_ctrl_variable_features, points = top10_ctrl_variable_feature_id, repel = TRUE)
#CombinePlots(plots = list(plot_ctrl_variable_features, plot_ctrl_variable_features_top10))
#Scale
all.genes <- rownames(ctrl_variable_features)
scaled_ctrl_variable_features <- ScaleData(ctrl_variable_features, features = all.genes)
#PCA
pca_ctrl <- RunPCA(scaled_ctrl_variable_features, features = VariableFeatures(object = scaled_ctrl_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_ctrl[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_ctrl_plot <- VizDimLoadings(pca_ctrl, dims = 1:2, reduction = "pca")
dimplot_ctrl_plot <- DimPlot(pca_ctrl, reduction = "pca")
#FindNeighbors
ctrl_neighbors <- FindNeighbors(pca_ctrl, dims = 1:10)
ctrl_clusters <- FindClusters(ctrl_neighbors, resolution = 0.5)
ctrl_clusters <- FindClusters(object = ctrl_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(ctrl_clusters), 25)
ctrl_tsne <- RunTSNE(object = ctrl_clusters, dims.use = 1:5, do.fast = TRUE, perplexity = 1)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = ctrl_tsne)
# find all markers of cluster 1
cluster1.markers <- FindMarkers(object = ctrl_tsne, ident.1 = 0, min.pct = 0.25)
# find markers for every cluster compared to all remaining cells, report only the positive ones
ctrl_markers <- FindAllMarkers(ctrl_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
ctrl_cluster1.markers <- FindMarkers(ctrl_clusters, ident.1 = 0, logfc.threshold = 0.25, test.use = "roc", only.pos = TRUE)
####### Treatment ######
#####################
# treatment <- read.table(file ="/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", sep = ",")
treatment_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(treatment_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
trt <- CreateSeuratObject(counts = treatment_rpkm, project = "Joseph_TRT", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
trt_variable_features <- FindVariableFeatures(trt, selection.method = "vst", nfeatures = 2000)
trt_variable_feature_id <- VariableFeatures(trt_variable_features)
write.csv(trt_variable_feature_id, "trt_variable_features")
top10_trt_variable_feature_id <- head(VariableFeatures(trt_variable_features), 10)
write.csv(top10_trt_variable_feature_id, "top10_trt_variable_features")
# plot variable features with and without labels
plot_trt_variable_features <- VariableFeaturePlot(trt_variable_features)
plot_trt_variable_features_top10 <- LabelPoints(plot = plot_trt_variable_features, points = top10_trt_variable_feature_id, repel = TRUE)
#CombinePlots(plots = list(plot_ctrl_variable_features, plot_ctrl_variable_features_top10))
#Scale
all.genes.trt <- rownames(trt_variable_features)
scaled_trt_variable_features <- ScaleData(trt_variable_features, features = all.genes.trt)
#PCA
pca_trt <- RunPCA(scaled_trt_variable_features, features = VariableFeatures(object = scaled_trt_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_trt[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_trt_plot <- VizDimLoadings(pca_trt, dims = 1:2, reduction = "pca")
dimplot_trt_plot <- DimPlot(pca_trt, reduction = "pca")
#FindNeighbors
trt_neighbors <- FindNeighbors(pca_trt, dims = 1:10)
trt_clusters <- FindClusters(trt_neighbors, resolution = 0.5)
trt_clusters <- FindClusters(object = trt_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(trt_clusters), 25)
trt_tsne <- RunTSNE(object = trt_clusters, dims.use = 1:5, do.fast = TRUE)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = trt_tsne)
# find all markers of cluster 1
trt.cluster1.markers <- FindMarkers(object = trt_tsne, ident.1 = 1, min.pct = 0.25)
write.csv(trt.cluster1.markers, "treatment_cluster1_markers")
# find markers for every cluster compared to all remaining cells, report only the positive ones
trt_markers <- FindAllMarkers(trt_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(trt_markers, "treatment_markers")
################ Combined ###########################
#####################################################
combined_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_combined.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(combined_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
comb <- CreateSeuratObject(counts = combined_rpkm, project = "Joseph_Comb", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
comb_variable_features <- FindVariableFeatures(comb, selection.method = "vst", nfeatures = 2000)
comb_variable_feature_id <- VariableFeatures(comb_variable_features)
write.csv(comb_variable_feature_id, "comb_variable_features")
top10_comb_variable_feature_id <- head(VariableFeatures(comb_variable_features), 10)
write.csv(top10_comb_variable_feature_id, "top10_trt_variable_features")
# plot variable features with and without labels
plot_comb_variable_features <- VariableFeaturePlot(comb_variable_features)
plot_comb_variable_features_top10 <- LabelPoints(plot = plot_comb_variable_features, points = top10_comb_variable_feature_id, repel = TRUE)
#Scale
all.genes.comb <- rownames(comb_variable_features)
scaled_comb_variable_features <- ScaleData(comb_variable_features, features = all.genes.comb)
#PCA
pca_comb <- RunPCA(scaled_comb_variable_features, features = VariableFeatures(object = scaled_comb_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_comb[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_comb_plot <- VizDimLoadings(pca_comb, dims = 1:2, reduction = "pca")
dimplot_comb_plot <- DimPlot(pca_comb, reduction = "pca")
DimHeatmap(pca_comb, nfeatures = 20, dims = 3, cells = 500, balanced = TRUE)
DimHeatmap(pca_comb, dims = 1:15, cells = 500, balanced = TRUE)
#FindNeighbors
comb_neighbors <- FindNeighbors(pca_comb, dims = 1:10)
#comb_clusters <- FindClusters(comb_neighbors, resolution = 0.5)
comb_clusters <- FindClusters(object = comb_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(comb_clusters), 25)
comb_tsne <- RunTSNE(object = comb_clusters, dims.use = 1:5, do.fast = TRUE)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = comb_tsne)
# find all markers of cluster 1
comb.cluster1.markers <- FindMarkers(object = comb_tsne, ident.1 = 1, min.pct = 0.25)
write.csv(comb.cluster1.markers, "comb_cluster1_markers")
# find markers for every cluster compared to all remaining cells, report only the positive ones
comb_markers <- FindAllMarkers(comb_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(comb_markers, "comb_markers")
library("biomaRt")
# Convert final results .csv file into .txt file
comb_markers_csv <- "comb_markers.csv"
write.table(read.csv(comb_markers_csv), gsub(".csv",".txt",comb_markers_csv))
comb_markers_txt <- "comb_markers.txt"
ensembl=useMart("ensembl", dataset="mmusculus_gene_ensembl")
a2 <- read.table(comb_markers_txt, head=TRUE)
b2 <- getBM(filters= "ensembl_gene_id", attributes= c("mgi_symbol","ensembl_gene_id", "description"), values=a2$ensembl_gene_id, mart= ensembl)
colnames(a2)[colnames(a2)=="X"] <- "ensembl_gene_id"
m2 <- merge(a2, b2, by="ensembl_gene_id")
write.csv(as.data.frame(m2),file = "comb_annotated.csv")
|
/R_scripts/seurat_joseph_single_cell.R
|
no_license
|
pbpayal/Bioinformatics-Documents
|
R
| false | false | 11,127 |
r
|
install.packages('Seurat')
library(Seurat)
install.packages("dplyr")
library(dplyr)
install.packages('Rtsne')
library(Rtsne)
install.packages("reticulate")
library(reticulate)
reticulate::py_install(packages = 'umap-learn')
use_condaenv(condaenv="Renv", conda="/home/druss/anaconda3/bin/conda")
library(umap)
library(cowplot)
# Read expression files
# In joseph's case these are FPKM normalized results from Tophat - Cufflinks pipeline
# control <- read.table(file = "/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_control.csv", sep = ",")
# str(control [,1:20])
# treatment <- read.table(file ="/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", sep = ",")
#
# control_rpkm_log <- log(control_rpkm + 1,2) # put on the log scale
#
# hist(as.vector(as.matrix(control_rpkm)))
# dim(control_rpkm)
# head(rownames(control_rpkm))
# head(colnames(control_rpkm))
# extract top 1000 variable genes
# gene.var <- apply(control_rpkm, 1, function(x) var(x[x>0]))
# control_rpkm.top1000 <- control_rpkm[which(rank(-gene.var)<=1000),]
#
# rpkm.pca <- prcomp(control_rpkm.top1000,
# center = TRUE,
# scale. = TRUE)
# summary(rpkm.pca)$importance[,1:5]
#
# plot(rpkm.pca, type="l", main="Top 10 PCs")
#
# #it's often advantageous to run a quick association of the top components of variation with your known variables
# #Association with PC1
# PC1 = rpkm.pca$rotation[,1]
# model.pc1 <- anova(lm(PC1 ~. , df[,-c(2)]))
# #Association with PC2
# PC2 = rpkm.pca$rotation[,2]
# model.pc2 <- anova(lm(PC2 ~. , df[,-c(1)]))
# summary(model.pc2)
####### Seurat ######
#####################
####### Control ######
#####################
control_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_control.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(control_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
# ctrl_assay <- CreateAssayObject(counts = control, min.cells = 3)
ctrl <- CreateSeuratObject(counts = control_rpkm, project = "Joseph_CTRL", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
ctrl_variable_features <- FindVariableFeatures(ctrl, selection.method = "vst", nfeatures = 2000)
write.csv(ctrl_variable_feature_id, "ctrl_variable_features")
# Identify the 10 most highly variable genes
ctrl_variable_feature_id <- VariableFeatures(ctrl_variable_features)
top10_ctrl_variable_feature_id <- head(VariableFeatures(ctrl_variable_features), 10)
write.csv(top10_ctrl_variable_feature_id, "top10_ctrl_variable_features")
# plot variable features with and without labels
plot_ctrl_variable_features <- VariableFeaturePlot(ctrl_variable_features)
plot_ctrl_variable_features_top10 <- LabelPoints(plot = plot_ctrl_variable_features, points = top10_ctrl_variable_feature_id, repel = TRUE)
#CombinePlots(plots = list(plot_ctrl_variable_features, plot_ctrl_variable_features_top10))
#Scale
all.genes <- rownames(ctrl_variable_features)
scaled_ctrl_variable_features <- ScaleData(ctrl_variable_features, features = all.genes)
#PCA
pca_ctrl <- RunPCA(scaled_ctrl_variable_features, features = VariableFeatures(object = scaled_ctrl_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_ctrl[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_ctrl_plot <- VizDimLoadings(pca_ctrl, dims = 1:2, reduction = "pca")
dimplot_ctrl_plot <- DimPlot(pca_ctrl, reduction = "pca")
#FindNeighbors
ctrl_neighbors <- FindNeighbors(pca_ctrl, dims = 1:10)
ctrl_clusters <- FindClusters(ctrl_neighbors, resolution = 0.5)
ctrl_clusters <- FindClusters(object = ctrl_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(ctrl_clusters), 25)
ctrl_tsne <- RunTSNE(object = ctrl_clusters, dims.use = 1:5, do.fast = TRUE, perplexity = 1)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = ctrl_tsne)
# find all markers of cluster 1
cluster1.markers <- FindMarkers(object = ctrl_tsne, ident.1 = 0, min.pct = 0.25)
# find markers for every cluster compared to all remaining cells, report only the positive ones
ctrl_markers <- FindAllMarkers(ctrl_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
ctrl_cluster1.markers <- FindMarkers(ctrl_clusters, ident.1 = 0, logfc.threshold = 0.25, test.use = "roc", only.pos = TRUE)
####### Treatment ######
#####################
# treatment <- read.table(file ="/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", sep = ",")
treatment_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_treatment.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(treatment_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
trt <- CreateSeuratObject(counts = treatment_rpkm, project = "Joseph_TRT", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
trt_variable_features <- FindVariableFeatures(trt, selection.method = "vst", nfeatures = 2000)
trt_variable_feature_id <- VariableFeatures(trt_variable_features)
write.csv(trt_variable_feature_id, "trt_variable_features")
top10_trt_variable_feature_id <- head(VariableFeatures(trt_variable_features), 10)
write.csv(top10_trt_variable_feature_id, "top10_trt_variable_features")
# plot variable features with and without labels
plot_trt_variable_features <- VariableFeaturePlot(trt_variable_features)
plot_trt_variable_features_top10 <- LabelPoints(plot = plot_trt_variable_features, points = top10_trt_variable_feature_id, repel = TRUE)
#CombinePlots(plots = list(plot_ctrl_variable_features, plot_ctrl_variable_features_top10))
#Scale
all.genes.trt <- rownames(trt_variable_features)
scaled_trt_variable_features <- ScaleData(trt_variable_features, features = all.genes.trt)
#PCA
pca_trt <- RunPCA(scaled_trt_variable_features, features = VariableFeatures(object = scaled_trt_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_trt[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_trt_plot <- VizDimLoadings(pca_trt, dims = 1:2, reduction = "pca")
dimplot_trt_plot <- DimPlot(pca_trt, reduction = "pca")
#FindNeighbors
trt_neighbors <- FindNeighbors(pca_trt, dims = 1:10)
trt_clusters <- FindClusters(trt_neighbors, resolution = 0.5)
trt_clusters <- FindClusters(object = trt_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(trt_clusters), 25)
trt_tsne <- RunTSNE(object = trt_clusters, dims.use = 1:5, do.fast = TRUE)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = trt_tsne)
# find all markers of cluster 1
trt.cluster1.markers <- FindMarkers(object = trt_tsne, ident.1 = 1, min.pct = 0.25)
write.csv(trt.cluster1.markers, "treatment_cluster1_markers")
# find markers for every cluster compared to all remaining cells, report only the positive ones
trt_markers <- FindAllMarkers(trt_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(trt_markers, "treatment_markers")
################ Combined ###########################
#####################################################
combined_rpkm <- read.csv("/Users/pbanerjee/Documents/CBU/CNMC_Projects/Neuroscience/Joseph/joseph_combined.csv", stringsAsFactors = FALSE, header=TRUE, row.names = 1)
str(combined_rpkm [,1:20]) # restrict to the first 20 columns (cells)
# Set up control
comb <- CreateSeuratObject(counts = combined_rpkm, project = "Joseph_Comb", min.cells = 3)
# ctrl$treat <- "CTRL"
# ctrl <- subset(ctrl, subset = nFeature_RNA > 500)
# ctrl <- NormalizeData(ctrl, verbose = FALSE) # since this is already normaliozed
comb_variable_features <- FindVariableFeatures(comb, selection.method = "vst", nfeatures = 2000)
comb_variable_feature_id <- VariableFeatures(comb_variable_features)
write.csv(comb_variable_feature_id, "comb_variable_features")
top10_comb_variable_feature_id <- head(VariableFeatures(comb_variable_features), 10)
write.csv(top10_comb_variable_feature_id, "top10_trt_variable_features")
# plot variable features with and without labels
plot_comb_variable_features <- VariableFeaturePlot(comb_variable_features)
plot_comb_variable_features_top10 <- LabelPoints(plot = plot_comb_variable_features, points = top10_comb_variable_feature_id, repel = TRUE)
#Scale
all.genes.comb <- rownames(comb_variable_features)
scaled_comb_variable_features <- ScaleData(comb_variable_features, features = all.genes.comb)
#PCA
pca_comb <- RunPCA(scaled_comb_variable_features, features = VariableFeatures(object = scaled_comb_variable_features))
# Examine and visualize PCA results a few different ways
print(pca_comb[["pca"]], dims = 1:5, nfeatures = 5)
#Plots and Visualization
vizdimload_comb_plot <- VizDimLoadings(pca_comb, dims = 1:2, reduction = "pca")
dimplot_comb_plot <- DimPlot(pca_comb, reduction = "pca")
DimHeatmap(pca_comb, nfeatures = 20, dims = 3, cells = 500, balanced = TRUE)
DimHeatmap(pca_comb, dims = 1:15, cells = 500, balanced = TRUE)
#FindNeighbors
comb_neighbors <- FindNeighbors(pca_comb, dims = 1:10)
#comb_clusters <- FindClusters(comb_neighbors, resolution = 0.5)
comb_clusters <- FindClusters(object = comb_neighbors, reduction.type = "pca", dims.use = 1:10,
resolution = 0.6, print.output = 0, save.SNN = TRUE)
# Look at cluster IDs of the first 5 cells
head(Idents(comb_clusters), 25)
comb_tsne <- RunTSNE(object = comb_clusters, dims.use = 1:5, do.fast = TRUE)
# note that you can set do.label=T to help label individual clusters
TSNEPlot(object = comb_tsne)
# find all markers of cluster 1
comb.cluster1.markers <- FindMarkers(object = comb_tsne, ident.1 = 1, min.pct = 0.25)
write.csv(comb.cluster1.markers, "comb_cluster1_markers")
# find markers for every cluster compared to all remaining cells, report only the positive ones
comb_markers <- FindAllMarkers(comb_tsne, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
write.csv(comb_markers, "comb_markers")
library("biomaRt")
# Convert final results .csv file into .txt file
comb_markers_csv <- "comb_markers.csv"
write.table(read.csv(comb_markers_csv), gsub(".csv",".txt",comb_markers_csv))
comb_markers_txt <- "comb_markers.txt"
ensembl=useMart("ensembl", dataset="mmusculus_gene_ensembl")
a2 <- read.table(comb_markers_txt, head=TRUE)
b2 <- getBM(filters= "ensembl_gene_id", attributes= c("mgi_symbol","ensembl_gene_id", "description"), values=a2$ensembl_gene_id, mart= ensembl)
colnames(a2)[colnames(a2)=="X"] <- "ensembl_gene_id"
m2 <- merge(a2, b2, by="ensembl_gene_id")
write.csv(as.data.frame(m2),file = "comb_annotated.csv")
|
rm(list = ls())
library(data.table)
library(zoo)
str(Vaccinations)
IndiaVaccinations <- VaccinationData[country == "India"]
USA <- VaccinationData[country == "United States"]
|
/CoronaVaccination.R
|
no_license
|
midhunt/CoronaVirus
|
R
| false | false | 181 |
r
|
rm(list = ls())
library(data.table)
library(zoo)
str(Vaccinations)
IndiaVaccinations <- VaccinationData[country == "India"]
USA <- VaccinationData[country == "United States"]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find-overlaps.R
\name{find_overlaps}
\alias{find_overlaps}
\title{Find overlapping indices of two gtf/gff/bed/bam objects}
\usage{
find_overlaps(x, y, ignore_redundant = FALSE, ignore_strand = FALSE, ...)
}
\arguments{
\item{x, y}{An object of class \code{gtf}, \code{gff}, \code{bed} or
\code{bam}.}
\item{ignore_redundant}{Should redundant overlaps be ignored?}
\item{ignore_strand}{Logical argument to pass to \code{GRanges} function.
Indicates whether \code{strand} should be ignored when constructing
\code{GRanges} object or not. Default is \code{FALSE}.}
\item{...}{Additional arguments passed to
\code{GenomicRanges::findOverlaps}.}
}
\value{
A \code{data.table} containing overlapping indices.
}
\description{
For internal use only. Function for finding overlaps between
two objects of class \code{gtf/gff/bed/bam} using
\code{GenomicRanges::findOverlaps}.
}
|
/man/find_overlaps.Rd
|
no_license
|
openanalytics/gcount
|
R
| false | true | 956 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find-overlaps.R
\name{find_overlaps}
\alias{find_overlaps}
\title{Find overlapping indices of two gtf/gff/bed/bam objects}
\usage{
find_overlaps(x, y, ignore_redundant = FALSE, ignore_strand = FALSE, ...)
}
\arguments{
\item{x, y}{An object of class \code{gtf}, \code{gff}, \code{bed} or
\code{bam}.}
\item{ignore_redundant}{Should redundant overlaps be ignored?}
\item{ignore_strand}{Logical argument to pass to \code{GRanges} function.
Indicates whether \code{strand} should be ignored when constructing
\code{GRanges} object or not. Default is \code{FALSE}.}
\item{...}{Additional arguments passed to
\code{GenomicRanges::findOverlaps}.}
}
\value{
A \code{data.table} containing overlapping indices.
}
\description{
For internal use only. Function for finding overlaps between
two objects of class \code{gtf/gff/bed/bam} using
\code{GenomicRanges::findOverlaps}.
}
|
context("api")
test_that("neon_dir()", {
x <- neon_dir()
expect_equal(x, Sys.getenv("NEONSTORE_HOME"))
})
test_that("neon_sites()", {
skip_on_cran()
skip_if_offline()
x <- neon_sites()
expect_is(x, "data.frame")
expect_match(colnames(x), "siteCode", all=FALSE)
expect_gt(nrow(x), 10)
})
test_that("neon_products()", {
skip_on_cran()
skip_if_offline()
x <- neon_products()
expect_is(x, "data.frame")
expect_match(colnames(x), "productCode", all=FALSE)
expect_gt(nrow(x), 10)
})
test_that("neon_data()", {
skip_on_cran()
skip_if_offline()
x <- neon_data(product = "DP1.10003.001",
site = "YELL",
start_date = "2019-06-01",
end_date = "2019-08-01")
expect_is(x, "data.frame")
expect_gt(nrow(x), 1)
})
test_that("take_first_match()", {
df <- data.frame(A = c(1,1,2,2),
B = c("a", "b", "c", "d"),
row.names = NULL)
out <- take_first_match(df, "A")
expect_equal(dim(out), c(2,2))
})
test_that("neon_download()", {
skip_on_cran()
skip_if_offline()
x <- neon_download("DP1.10003.001",
site = "BART",
start_date = "2018-01-01",
end_date = "2019-01-01")
expect_is(x, "data.frame")
expect_gt(nrow(x), 0)
})
test_that("download_filters", {
x <- download_filters(NULL, "", "", FALSE, tempdir())
expect_null(x)
x <- download_filters(data.frame(), "", "", FALSE, tempdir())
expect_null(x)
})
|
/tests/testthat/test-api.R
|
permissive
|
cboettig/neonstore
|
R
| false | false | 1,581 |
r
|
context("api")
test_that("neon_dir()", {
x <- neon_dir()
expect_equal(x, Sys.getenv("NEONSTORE_HOME"))
})
test_that("neon_sites()", {
skip_on_cran()
skip_if_offline()
x <- neon_sites()
expect_is(x, "data.frame")
expect_match(colnames(x), "siteCode", all=FALSE)
expect_gt(nrow(x), 10)
})
test_that("neon_products()", {
skip_on_cran()
skip_if_offline()
x <- neon_products()
expect_is(x, "data.frame")
expect_match(colnames(x), "productCode", all=FALSE)
expect_gt(nrow(x), 10)
})
test_that("neon_data()", {
skip_on_cran()
skip_if_offline()
x <- neon_data(product = "DP1.10003.001",
site = "YELL",
start_date = "2019-06-01",
end_date = "2019-08-01")
expect_is(x, "data.frame")
expect_gt(nrow(x), 1)
})
test_that("take_first_match()", {
df <- data.frame(A = c(1,1,2,2),
B = c("a", "b", "c", "d"),
row.names = NULL)
out <- take_first_match(df, "A")
expect_equal(dim(out), c(2,2))
})
test_that("neon_download()", {
skip_on_cran()
skip_if_offline()
x <- neon_download("DP1.10003.001",
site = "BART",
start_date = "2018-01-01",
end_date = "2019-01-01")
expect_is(x, "data.frame")
expect_gt(nrow(x), 0)
})
test_that("download_filters", {
x <- download_filters(NULL, "", "", FALSE, tempdir())
expect_null(x)
x <- download_filters(data.frame(), "", "", FALSE, tempdir())
expect_null(x)
})
|
# Introduccion a R
# DataCLubUy
# 11/Set/2020
# Principio ---------------------------------------------------------------
# "#" indica el principio de un comentario (lineas que no se van a ejecutar)
# IMPORTANTE: setear directorio de trabajo ya que alli se van a buscar y a guardar los archivos por defecto
# puede ser a traves del menu Session > Set Working Directory
# o a traves del comando
# setwd("escribaAquiElDirectorio")
getwd() #get working directory: dice en que directorio estamos trabajando
setwd("~/Dropbox/DataClubUy/R")
# R como calculadora ------------------------------------------------------
5 + 5 # Suma
5 - 5 # Resta
3 * 5 # Multiplicacion
5 / 2 # Division
2^3 # Potencia
2**3 # Potencia
4 %/% 2 # Cociente entero de una division
4 %% 2 # Resto (o modulo) de la division
(4^2) - (3*2) # Usar parentesis para agrupar
exp(1) # exponencial
sqrt(2) # raiz cuadrada
log(1) # logaritmo
sin(1); cos(1); tan(1) # trigonometricas
asin(1) #arco seno
# Redondeo de un numero
x <- pi
x # 3.141593
round(x, 3) # round(a, d) Redondea a con d decimales
ceiling(x) # ceiling(a) Menor entero mayor que a
floor(x) # floor(a) Mayor entero menor que a
trunc(x) # trunc(a) Elimina los decimales de (a) hacia 0
# Creacion de objetos -----------------------------------------------------
x <- 5
x
x + 10
class(x) # dice que tipo de objeto es x
y <- c(TRUE, FALSE)
y
class(y)
z <- c("ID_1", "ID_2")
z
class(z)
matriz <- matrix(data = 1:100, nrow = 10, ncol = 10, byrow = TRUE)
matriz
head(matriz) # muestra las primeras 6 filas
View(matriz)
class(matriz)
dim(matriz) # devuelve cantidad de filas y columnas
#Data Frame: Puede contener diferentes tipos de variables, normalmente la mas utilizada.
df <- data.frame(ID = z, # columna con nombre ID y valores en z
esHembra = y,
edad = x, stringsAsFactors = FALSE)
df
View(df)
cbind(df,df)
rbind(df,df)
lista <- list(x, y, z)
lista
# Indexado ----------------------------------------------------------------
# Indexar vectores
vector <- c(1:20)
vector
vector[2]
vector[18:20]
vector[c(4,10)]
vector[-2]
# el indexado se puede usar para sustituir elementos
vector[2] <- 300
vector
vector[2]
# Indexar matrices
matriz[5,7]
matriz[9:10,]
matriz[1,c(2,4)]
matriz[,-c(1:8)]
# Indexar data frames
df$ # pararse despues del $ y apretar tabulador
df$ID
df[,"ID"]
df[,1]
df[,-1]
df[,2:3]
df[,c(2,3)]
df[,c("esHembra", "edad")]
# Indexar listas
lista[[2]] #devuelve elemento
lista[2] #devuelve lista con elemente
# Operadores logicos ------------------------------------------------------
3 > 2 # TRUE
3 < 2 # FALSE
3 == 2 # FALSE
5 == 5 # TRUE
5 != 5 # FALSE
# Cuando la operacion logica usa un vector, devuelve un valor por cada elemento
a <- 0:5
a
a < 2 # En este caso se compara cada elemento de 'a' con 2
a[a < 2]
a == 2
b <- 5:0
b
a == b # Compara cada elemento del vector 'a' con el correspondiente de 'b'.
a >= b[1] # Compara cada elemento de 'a' con el primer elemento del vector 'b'.
a %in% c(1,2)
z %in% c("ID_1")
a < 2 | a > 4
a[a < 2 | a > 4]
#Casos particulares: NA, NULL y NaN
#NA : Not Available
#NULL : objeto vacio
#NaN : Not a Number, no es un numero (ej.sqrt(-1))
#Para identificarlos se usan las funciones is.XX. Por ejemplo:
is.na(NA) # TRUE
is.na(6) # FALSE
9 == NA # NA
is.null(NULL) # TRUE
is.nan(NaN) # TRUE
# Operaciones con vectores ------------------------------------------------
vector
vector*2 + 3
vector + vector
max(vector)
min(vector)
range(vector)
mean(vector)
sum(vector)
cumsum(vector)
sd(vector)
quantile(vector)
?mean
??mean
# Levantado de datos ------------------------------------------------------
read.table(file = "datos.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
read.csv()
read.csv2()
kin <- read.table(file = "datos.txt", header = TRUE)
head(kin)
str(kin)
kin$id <- as.character(kin$id)
kin$id2 <- as.character(kin$id2)
# Loops -------------------------------------------------------------------
nombres <- unique(kin$id)
nombres
length(nombres)
A <- matrix(NA, nrow = length(nombres), ncol = length(nombres))
A
for (i in 1:length(nombres)) {
for (j in 1:length(nombres)) {
A[i,j] <- kin[kin$id %in% nombres[i] & kin$id2 %in% nombres[j], "K"]
}
}
#ejemplos de uso de next
for (i in 1:5) {
if (i == 3) {
next
}
print(i)
}
#ejemplo de uso de break
for (i in 1:5) {
if (i == 3) {
break
}
print(i)
}
#ifelse
kin$sonParientes <- ifelse(kin$K == 0, "No son parientes", "Son parientes")
kin$sonParientes <- ifelse(kin$K == 0, "No son parientes",
ifelse(kin$id == kin$id2, "Es el mismo individuo", "Son parientes"))
table(kin$sonParientes) #comando util para tener conteo de datos que tienen cada valor
# Funciones apply ---------------------------------------------------------
m <- data.frame(matriz[1:3,1:3]); m #el ";" hace que se ejecuten los dos comandos
colnames(m) <- c("uno", "dos", "tres"); m
rownames(m) <- c("uno", "dos", "tres"); m
m$media <- apply(m, 1, mean); m
m[4,] <- apply(m, 2, mean); m
lapply(lista, length)
sapply(lista, length)
?tapply
n <- 17
fac <- factor(rep_len(1:3, n), levels = 1:5)
tapply(1:n, fac, sum)
?mapply
?vapply
# Funciones ---------------------------------------------------------------
#definir la funcion
cuales <- function(columna, valor) {
cat("Los individuos que tienen en la columna", columna, "el valor", valor, "son:\n")
cat(df[df[,columna] %in% valor, c("ID")])
}
#usarla
cuales("edad", 5)
cuales("esHembra", TRUE)
cuales2 <- function(columna = "edad", valor = 5) {
cumplen <- df[df[,columna] %in% valor, c("ID")]
if(length(cumplen) == 1) {
cat("El individuo que tiene en la columna", columna, "el valor", valor, "es:\n", cumplen)
} else {
cat("Los individuos que tienen en la columna", columna, "el valor", valor, "son:\n", cumplen)
}
}
cuales2() #usa todos los argumentos por defecto
cuales2("esHembra", TRUE)
# Instalar y cargar paquetes ----------------------------------------------
# Instalar paquetes
install.packages("tidyverse")
select()
# Cargar paquetes
library(tidyverse)
|
/E5T1_IntroducciónR/EjemplosR.R
|
no_license
|
dclubuy/DataClubUy
|
R
| false | false | 6,108 |
r
|
# Introduccion a R
# DataCLubUy
# 11/Set/2020
# Principio ---------------------------------------------------------------
# "#" indica el principio de un comentario (lineas que no se van a ejecutar)
# IMPORTANTE: setear directorio de trabajo ya que alli se van a buscar y a guardar los archivos por defecto
# puede ser a traves del menu Session > Set Working Directory
# o a traves del comando
# setwd("escribaAquiElDirectorio")
getwd() #get working directory: dice en que directorio estamos trabajando
setwd("~/Dropbox/DataClubUy/R")
# R como calculadora ------------------------------------------------------
5 + 5 # Suma
5 - 5 # Resta
3 * 5 # Multiplicacion
5 / 2 # Division
2^3 # Potencia
2**3 # Potencia
4 %/% 2 # Cociente entero de una division
4 %% 2 # Resto (o modulo) de la division
(4^2) - (3*2) # Usar parentesis para agrupar
exp(1) # exponencial
sqrt(2) # raiz cuadrada
log(1) # logaritmo
sin(1); cos(1); tan(1) # trigonometricas
asin(1) #arco seno
# Redondeo de un numero
x <- pi
x # 3.141593
round(x, 3) # round(a, d) Redondea a con d decimales
ceiling(x) # ceiling(a) Menor entero mayor que a
floor(x) # floor(a) Mayor entero menor que a
trunc(x) # trunc(a) Elimina los decimales de (a) hacia 0
# Creacion de objetos -----------------------------------------------------
x <- 5
x
x + 10
class(x) # dice que tipo de objeto es x
y <- c(TRUE, FALSE)
y
class(y)
z <- c("ID_1", "ID_2")
z
class(z)
matriz <- matrix(data = 1:100, nrow = 10, ncol = 10, byrow = TRUE)
matriz
head(matriz) # muestra las primeras 6 filas
View(matriz)
class(matriz)
dim(matriz) # devuelve cantidad de filas y columnas
#Data Frame: Puede contener diferentes tipos de variables, normalmente la mas utilizada.
df <- data.frame(ID = z, # columna con nombre ID y valores en z
esHembra = y,
edad = x, stringsAsFactors = FALSE)
df
View(df)
cbind(df,df)
rbind(df,df)
lista <- list(x, y, z)
lista
# Indexado ----------------------------------------------------------------
# Indexar vectores
vector <- c(1:20)
vector
vector[2]
vector[18:20]
vector[c(4,10)]
vector[-2]
# el indexado se puede usar para sustituir elementos
vector[2] <- 300
vector
vector[2]
# Indexar matrices
matriz[5,7]
matriz[9:10,]
matriz[1,c(2,4)]
matriz[,-c(1:8)]
# Indexar data frames
df$ # pararse despues del $ y apretar tabulador
df$ID
df[,"ID"]
df[,1]
df[,-1]
df[,2:3]
df[,c(2,3)]
df[,c("esHembra", "edad")]
# Indexar listas
lista[[2]] #devuelve elemento
lista[2] #devuelve lista con elemente
# Operadores logicos ------------------------------------------------------
3 > 2 # TRUE
3 < 2 # FALSE
3 == 2 # FALSE
5 == 5 # TRUE
5 != 5 # FALSE
# Cuando la operacion logica usa un vector, devuelve un valor por cada elemento
a <- 0:5
a
a < 2 # En este caso se compara cada elemento de 'a' con 2
a[a < 2]
a == 2
b <- 5:0
b
a == b # Compara cada elemento del vector 'a' con el correspondiente de 'b'.
a >= b[1] # Compara cada elemento de 'a' con el primer elemento del vector 'b'.
a %in% c(1,2)
z %in% c("ID_1")
a < 2 | a > 4
a[a < 2 | a > 4]
#Casos particulares: NA, NULL y NaN
#NA : Not Available
#NULL : objeto vacio
#NaN : Not a Number, no es un numero (ej.sqrt(-1))
#Para identificarlos se usan las funciones is.XX. Por ejemplo:
is.na(NA) # TRUE
is.na(6) # FALSE
9 == NA # NA
is.null(NULL) # TRUE
is.nan(NaN) # TRUE
# Operaciones con vectores ------------------------------------------------
vector
vector*2 + 3
vector + vector
max(vector)
min(vector)
range(vector)
mean(vector)
sum(vector)
cumsum(vector)
sd(vector)
quantile(vector)
?mean
??mean
# Levantado de datos ------------------------------------------------------
read.table(file = "datos.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
read.csv()
read.csv2()
kin <- read.table(file = "datos.txt", header = TRUE)
head(kin)
str(kin)
kin$id <- as.character(kin$id)
kin$id2 <- as.character(kin$id2)
# Loops -------------------------------------------------------------------
nombres <- unique(kin$id)
nombres
length(nombres)
A <- matrix(NA, nrow = length(nombres), ncol = length(nombres))
A
for (i in 1:length(nombres)) {
for (j in 1:length(nombres)) {
A[i,j] <- kin[kin$id %in% nombres[i] & kin$id2 %in% nombres[j], "K"]
}
}
#ejemplos de uso de next
for (i in 1:5) {
if (i == 3) {
next
}
print(i)
}
#ejemplo de uso de break
for (i in 1:5) {
if (i == 3) {
break
}
print(i)
}
#ifelse
kin$sonParientes <- ifelse(kin$K == 0, "No son parientes", "Son parientes")
kin$sonParientes <- ifelse(kin$K == 0, "No son parientes",
ifelse(kin$id == kin$id2, "Es el mismo individuo", "Son parientes"))
table(kin$sonParientes) #comando util para tener conteo de datos que tienen cada valor
# Funciones apply ---------------------------------------------------------
m <- data.frame(matriz[1:3,1:3]); m #el ";" hace que se ejecuten los dos comandos
colnames(m) <- c("uno", "dos", "tres"); m
rownames(m) <- c("uno", "dos", "tres"); m
m$media <- apply(m, 1, mean); m
m[4,] <- apply(m, 2, mean); m
lapply(lista, length)
sapply(lista, length)
?tapply
n <- 17
fac <- factor(rep_len(1:3, n), levels = 1:5)
tapply(1:n, fac, sum)
?mapply
?vapply
# Funciones ---------------------------------------------------------------
#definir la funcion
cuales <- function(columna, valor) {
cat("Los individuos que tienen en la columna", columna, "el valor", valor, "son:\n")
cat(df[df[,columna] %in% valor, c("ID")])
}
#usarla
cuales("edad", 5)
cuales("esHembra", TRUE)
cuales2 <- function(columna = "edad", valor = 5) {
cumplen <- df[df[,columna] %in% valor, c("ID")]
if(length(cumplen) == 1) {
cat("El individuo que tiene en la columna", columna, "el valor", valor, "es:\n", cumplen)
} else {
cat("Los individuos que tienen en la columna", columna, "el valor", valor, "son:\n", cumplen)
}
}
cuales2() #usa todos los argumentos por defecto
cuales2("esHembra", TRUE)
# Instalar y cargar paquetes ----------------------------------------------
# Instalar paquetes
install.packages("tidyverse")
select()
# Cargar paquetes
library(tidyverse)
|
library(ape)
testtree <- read.tree("12551_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12551_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/12551_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false | false | 137 |
r
|
library(ape)
testtree <- read.tree("12551_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12551_0_unrooted.txt")
|
context('tidy_corpus')
test_that('empty corpus', {
t1 <- data.frame(id = c(1,2,3), text = c(NA, '', ':)'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 0)
})
test_that('basic corpus', {
t1 <- data.frame(id = c(1,2), text = c('a', 'a b'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 3)
expect_true(all(t2$id == c(1, 2, 2)))
expect_true(all(t2$word == c('a', 'a', 'b')))
})
test_that('emote corpus', {
t1 <- data.frame(id = c(1), text = c('\U0001f44b'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 0)
})
|
/tests/testthat/test-tidy_corpus.R
|
permissive
|
markanewman/LinguisticMarkers
|
R
| false | false | 692 |
r
|
context('tidy_corpus')
test_that('empty corpus', {
t1 <- data.frame(id = c(1,2,3), text = c(NA, '', ':)'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 0)
})
test_that('basic corpus', {
t1 <- data.frame(id = c(1,2), text = c('a', 'a b'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 3)
expect_true(all(t2$id == c(1, 2, 2)))
expect_true(all(t2$word == c('a', 'a', 'b')))
})
test_that('emote corpus', {
t1 <- data.frame(id = c(1), text = c('\U0001f44b'))
func <- LinguisticMarkers:::tidy_corpus
t2 <- func(t1, 'id', 'text')
expect_equal(nrow(t2), 0)
})
|
#' @noRd
# build search url
get_url <- function(mindate_taken,
maxdate_taken,
mindate_uploaded = NULL,
maxdate_uploaded = NULL,
user_id = NULL,
api_key,
page,
text = NULL,
tags = NULL,
bbox = NULL,
woe_id = NULL,
has_geo = TRUE) {
base_url <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=", api_key,
"&text=", text,
"&tags=", tags,
"&min_taken_date=", as.character(mindate_taken),
"&max_taken_date=", as.character(maxdate_taken),
ifelse(!(is.null(mindate_uploaded)), paste0(
"&min_upload_date=", mindate_uploaded), ""),
ifelse(!(is.null(maxdate_uploaded)), paste0(
"&max_upload_date=", maxdate_uploaded), ""),
ifelse(!(is.null(user_id)), paste0(
"&user_id=", user_id), ""),
ifelse(!(is.null(bbox)), paste0("&bbox=", bbox), ""),
ifelse(!(is.null(woe_id)), paste0("&woe_id=", woe_id), ""),
ifelse(has_geo, paste0("&has_geo=", has_geo), ""),
"&extras=", "description,date_taken,geo,tags,license,",
"url_sq,url_t,url_s,url_q,url_m,url_n,url_z,url_c,",
"url_l,url_o,count_views,count_comments,count_faves",
"&page=", page,
"&format=", "rest",
sep = ""
)
return(base_url)
}
# search url
search_url <- function(base_url) {
# get total number of results
r <- httr::GET(base_url, encoding = "ISO-8859")
# put first error catch here
count_stat <- 0
while (r$status_code != 200 & count_stat < 3) {
Sys.sleep(0.5)
r <- httr::GET(base_url, encoding = "ISO-8859")
count_stat <- count_stat + 1
}
if (r$status_code != 200) {
warning("Status code:", r$status, " for ", base_url,
" - message: ", httr::content(r, "text", encoding = "ISO-8859"))
}
error <- tryCatch({
photo_xml <- xml2::read_xml(r)
error <- "success"
}, error = function(err) {
warning(base_url, " skipped beacuse: ", err)
error <- "error"
photo_xml <- NULL
})
return(photo_xml)
}
#error warnings
find_errors <- function(error_xml = NULL){
if (xml2::xml_attrs(error_xml) == "fail"){
warn_data <- data.frame(xml2::xml_attrs(xml2::xml_children(error_xml)))
warn <- as.character(unlist(warn_data))
stop(paste(warn[2]))
}
}
# for the todo bullet points
ui_todo <- function(x, .envir = parent.frame()) {
x <- glue::glue_collapse(x, "\n")
x <- glue::glue(x, .envir = .envir)
x <- gsub("\n", paste0("\n", " "), x)
x <- paste0(crayon::red(clisymbols::symbol$bullet), " ", x)
lines <- paste0(x, "\n")
cat(lines, sep = "")
}
# for the info bullet points
ui_info <- function(x, .envir = parent.frame()) {
x <- glue::glue_collapse(x, "\n")
x <- glue::glue(x, .envir = .envir)
x <- paste0(crayon::yellow(clisymbols::symbol$info), " ", x)
lines <- paste0(x, "\n")
cat(lines, sep = "")
}
# this checks for the presence of a key, if no key it prompts the user to create
# one, it then checks the validity of the key
create_and_check_key <- function() {
if (!file.exists("api_key.txt")) {
ui_todo(
"Create a Flickr API key at https://www.flickr.com/services/apps/create/")
utils::browseURL("https://www.flickr.com/services/apps/create/")
ui_todo("Enter your Flickr API key:")
utils::write.table(readline(),
file = "api_key.txt",
col.names = FALSE,
row.names = FALSE)
}
api_key <- utils::read.table("api_key.txt", stringsAsFactors = FALSE)
base_url <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=",
api_key,
sep = "")
photo_xml <- search_url(base_url = base_url)
pages_data <- data.frame(xml2::xml_attrs(xml2::xml_children(photo_xml)))
warn <- as.character(unlist(pages_data))
if ((warn[2]) == ("Invalid API Key (Key has invalid format)")) {
stop("Invalid API Key: correct this in api_key.txt")
}
return(api_key)
}
# check that flickr locaiton services are working and woe_id is valid
check_location <- function(api_key = NULL) {
known_location <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=",
api_key,
"&woe_id=35356",
sep = "")
r <- httr::GET(known_location, encoding = "ISO-8859")
photo_xml <- xml2::read_xml(r)
known_warn <- data.frame(xml2::xml_attrs(xml2::xml_children(photo_xml)))
if ((known_warn[2, 1]) == ("Not a valid place type")) {
stop("Flickr location services are down")
}
}
create_bbox <- function(sf_layer = NULL){
# find crs
layer_epsg <- unlist(sf::st_crs(sf_layer)[1])
# transform if needed
if ((is.na(layer_epsg)) | (layer_epsg != 4326)) {
sf_layer <- sf::st_transform(
sf_layer, crs = "+proj=longlat +datum=WGS84 +no_defs")
}
# generate bbox
bbox <- sf::st_bbox(sf_layer)
xmin <- bbox[1]
ymin <- bbox[2]
xmax <- bbox[3]
ymax <- bbox[4]
# bbox for url search
bbox <- as.character(paste(
xmin, ",", ymin, ",", xmax, ",", ymax, sep = ""))
}
|
/R/utils.R
|
no_license
|
monicagerber/photosearcher
|
R
| false | false | 5,649 |
r
|
#' @noRd
# build search url
get_url <- function(mindate_taken,
maxdate_taken,
mindate_uploaded = NULL,
maxdate_uploaded = NULL,
user_id = NULL,
api_key,
page,
text = NULL,
tags = NULL,
bbox = NULL,
woe_id = NULL,
has_geo = TRUE) {
base_url <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=", api_key,
"&text=", text,
"&tags=", tags,
"&min_taken_date=", as.character(mindate_taken),
"&max_taken_date=", as.character(maxdate_taken),
ifelse(!(is.null(mindate_uploaded)), paste0(
"&min_upload_date=", mindate_uploaded), ""),
ifelse(!(is.null(maxdate_uploaded)), paste0(
"&max_upload_date=", maxdate_uploaded), ""),
ifelse(!(is.null(user_id)), paste0(
"&user_id=", user_id), ""),
ifelse(!(is.null(bbox)), paste0("&bbox=", bbox), ""),
ifelse(!(is.null(woe_id)), paste0("&woe_id=", woe_id), ""),
ifelse(has_geo, paste0("&has_geo=", has_geo), ""),
"&extras=", "description,date_taken,geo,tags,license,",
"url_sq,url_t,url_s,url_q,url_m,url_n,url_z,url_c,",
"url_l,url_o,count_views,count_comments,count_faves",
"&page=", page,
"&format=", "rest",
sep = ""
)
return(base_url)
}
# search url
search_url <- function(base_url) {
# get total number of results
r <- httr::GET(base_url, encoding = "ISO-8859")
# put first error catch here
count_stat <- 0
while (r$status_code != 200 & count_stat < 3) {
Sys.sleep(0.5)
r <- httr::GET(base_url, encoding = "ISO-8859")
count_stat <- count_stat + 1
}
if (r$status_code != 200) {
warning("Status code:", r$status, " for ", base_url,
" - message: ", httr::content(r, "text", encoding = "ISO-8859"))
}
error <- tryCatch({
photo_xml <- xml2::read_xml(r)
error <- "success"
}, error = function(err) {
warning(base_url, " skipped beacuse: ", err)
error <- "error"
photo_xml <- NULL
})
return(photo_xml)
}
#error warnings
find_errors <- function(error_xml = NULL){
if (xml2::xml_attrs(error_xml) == "fail"){
warn_data <- data.frame(xml2::xml_attrs(xml2::xml_children(error_xml)))
warn <- as.character(unlist(warn_data))
stop(paste(warn[2]))
}
}
# for the todo bullet points
ui_todo <- function(x, .envir = parent.frame()) {
x <- glue::glue_collapse(x, "\n")
x <- glue::glue(x, .envir = .envir)
x <- gsub("\n", paste0("\n", " "), x)
x <- paste0(crayon::red(clisymbols::symbol$bullet), " ", x)
lines <- paste0(x, "\n")
cat(lines, sep = "")
}
# for the info bullet points
ui_info <- function(x, .envir = parent.frame()) {
x <- glue::glue_collapse(x, "\n")
x <- glue::glue(x, .envir = .envir)
x <- paste0(crayon::yellow(clisymbols::symbol$info), " ", x)
lines <- paste0(x, "\n")
cat(lines, sep = "")
}
# this checks for the presence of a key, if no key it prompts the user to create
# one, it then checks the validity of the key
create_and_check_key <- function() {
if (!file.exists("api_key.txt")) {
ui_todo(
"Create a Flickr API key at https://www.flickr.com/services/apps/create/")
utils::browseURL("https://www.flickr.com/services/apps/create/")
ui_todo("Enter your Flickr API key:")
utils::write.table(readline(),
file = "api_key.txt",
col.names = FALSE,
row.names = FALSE)
}
api_key <- utils::read.table("api_key.txt", stringsAsFactors = FALSE)
base_url <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=",
api_key,
sep = "")
photo_xml <- search_url(base_url = base_url)
pages_data <- data.frame(xml2::xml_attrs(xml2::xml_children(photo_xml)))
warn <- as.character(unlist(pages_data))
if ((warn[2]) == ("Invalid API Key (Key has invalid format)")) {
stop("Invalid API Key: correct this in api_key.txt")
}
return(api_key)
}
# check that flickr locaiton services are working and woe_id is valid
check_location <- function(api_key = NULL) {
known_location <- paste("https://api.flickr.com/services/rest/",
"?method=flickr.photos.search&api_key=",
api_key,
"&woe_id=35356",
sep = "")
r <- httr::GET(known_location, encoding = "ISO-8859")
photo_xml <- xml2::read_xml(r)
known_warn <- data.frame(xml2::xml_attrs(xml2::xml_children(photo_xml)))
if ((known_warn[2, 1]) == ("Not a valid place type")) {
stop("Flickr location services are down")
}
}
create_bbox <- function(sf_layer = NULL){
# find crs
layer_epsg <- unlist(sf::st_crs(sf_layer)[1])
# transform if needed
if ((is.na(layer_epsg)) | (layer_epsg != 4326)) {
sf_layer <- sf::st_transform(
sf_layer, crs = "+proj=longlat +datum=WGS84 +no_defs")
}
# generate bbox
bbox <- sf::st_bbox(sf_layer)
xmin <- bbox[1]
ymin <- bbox[2]
xmax <- bbox[3]
ymax <- bbox[4]
# bbox for url search
bbox <- as.character(paste(
xmin, ",", ymin, ",", xmax, ",", ymax, sep = ""))
}
|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 20 # number of knots to use for the basis functions
cv <- 4 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 20 # number of knots to use for the basis functions
cv <- 9 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
|
/markdown/fire-analysis/fit-gsk-20-4.R
|
permissive
|
sammorris81/extreme-decomp
|
R
| false | false | 1,324 |
r
|
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 20 # number of knots to use for the basis functions
cv <- 4 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
rm(list=ls())
source(file = "./package_load.R", chdir = T)
# Number of bases: 5, 10, 15, 20
process <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
margin <- "gsk" # ebf: empirical basis functions, gsk: gaussian kernels
L <- 20 # number of knots to use for the basis functions
cv <- 9 # which cross-validation set to use
results.file <- paste("./cv-results/", process, "-", margin, "-", L, "-", cv,
".RData", sep = "")
table.file <- paste("./cv-tables/", process, "-", margin, "-", L, "-", cv,
".txt", sep = "")
# fit the model and get predictions
source(file = "./fitmodel.R")
|
#' Retry a request until it succeeds.
#'
#' Safely retry a request until it succeeds, as defined by the `terminate_on`
#' parameter, which by default means a response for which [http_error()]
#' is `FALSE`. Will also retry on error conditions raised by the underlying curl code,
#' but if the last retry still raises one, `RETRY` will raise it again with
#' [stop()].
#' It is designed to be kind to the server: after each failure
#' randomly waits up to twice as long. (Technically it uses exponential
#' backoff with jitter, using the approach outlined in
#' <https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/>.)
#' If the server returns status code 429 and specifies a `retry-after` value, that
#' value will be used instead, unless it's smaller than `pause_min`.
#'
#' @inheritParams VERB
#' @inherit GET params return
#' @inheritParams POST
#' @param times Maximum number of requests to attempt.
#' @param pause_base,pause_cap This method uses exponential back-off with
#' full jitter - this means that each request will randomly wait between 0
#' and `pause_base * 2 ^ attempt` seconds, up to a maximum of
#' `pause_cap` seconds.
#' @param pause_min Minimum time to wait in the backoff; generally
#' only necessary if you need pauses less than one second (which may
#' not be kind to the server, use with caution!).
#' @param quiet If `FALSE`, will print a message displaying how long
#' until the next request.
#' @param terminate_on Optional vector of numeric HTTP status codes that if found
#' on the response will terminate the retry process. If `NULL`, will keep
#' retrying while [http_error()] is `TRUE` for the response.
#' @param terminate_on_success If `TRUE`, the default, this will
#' automatically terminate when the request is successful, regardless of the
#' value of `terminate_on`.
#' @return The last response. Note that if the request doesn't succeed after
#' `times` times this will be a failed request, i.e. you still need
#' to use [stop_for_status()].
#' @export
#' @examples
#' # Succeeds straight away
#' RETRY("GET", "http://httpbin.org/status/200")
#' # Never succeeds
#' RETRY("GET", "http://httpbin.org/status/500")
#' \dontrun{
#' # Invalid hostname generates curl error condition and is retried but eventually
#' # raises an error condition.
#' RETRY("GET", "http://invalidhostname/")
#' }
RETRY <- function(verb, url = NULL, config = list(), ...,
body = NULL, encode = c("multipart", "form", "json", "raw"),
times = 3, pause_base = 1, pause_cap = 60, pause_min = 1,
handle = NULL, quiet = FALSE,
terminate_on = NULL,
terminate_on_success = TRUE) {
stopifnot(is.numeric(times), length(times) == 1L)
stopifnot(is.numeric(pause_base), length(pause_base) == 1L)
stopifnot(is.numeric(pause_cap), length(pause_cap) == 1L)
stopifnot(is.numeric(terminate_on) || is.null(terminate_on))
stopifnot(is.logical(terminate_on_success), length(terminate_on_success) == 1)
hu <- handle_url(handle, url, ...)
req <- request_build(verb, hu$url, body_config(body, match.arg(encode)), config, ...)
resp <- tryCatch(request_perform(req, hu$handle$handle), error = function(e) e)
i <- 1
while (!retry_should_terminate(i, times, resp, terminate_on, terminate_on_success)) {
backoff_full_jitter(i, resp, pause_base, pause_cap, pause_min, quiet = quiet)
i <- i + 1
resp <- tryCatch(request_perform(req, hu$handle$handle), error = function(e) e)
}
if (inherits(resp, "error")) {
stop(resp)
}
resp
}
retry_should_terminate <- function(i, times, resp, terminate_on, terminate_on_success) {
if (i >= times) {
TRUE
} else if (inherits(resp, "error")) {
FALSE
} else if (terminate_on_success && !http_error(resp)) {
TRUE
} else if (!is.null(terminate_on)) {
status_code(resp) %in% terminate_on
} else {
!http_error(resp)
}
}
backoff_full_jitter <- function(i, resp, pause_base = 1, pause_cap = 60,
pause_min = 1, quiet = FALSE) {
length <- max(pause_min, stats::runif(1, max = min(pause_cap, pause_base * (2^i))))
if (!quiet) {
if (inherits(resp, "error")) {
error_description <- gsub("[\n\r]*$", "\n", as.character(resp))
status <- "ERROR"
} else {
error_description <- ""
status <- status_code(resp)
}
if (status == 429) {
retry_after <- resp$headers[["retry-after"]]
if (!is.null(retry_after)) {
length <- max(pause_min, as.numeric(retry_after))
}
}
message(error_description, "Request failed [", status, "]. Retrying in ", round(length, 1), " seconds...")
}
Sys.sleep(length)
}
|
/R/retry.R
|
permissive
|
womeimingzi11/httr
|
R
| false | false | 4,723 |
r
|
#' Retry a request until it succeeds.
#'
#' Safely retry a request until it succeeds, as defined by the `terminate_on`
#' parameter, which by default means a response for which [http_error()]
#' is `FALSE`. Will also retry on error conditions raised by the underlying curl code,
#' but if the last retry still raises one, `RETRY` will raise it again with
#' [stop()].
#' It is designed to be kind to the server: after each failure
#' randomly waits up to twice as long. (Technically it uses exponential
#' backoff with jitter, using the approach outlined in
#' <https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/>.)
#' If the server returns status code 429 and specifies a `retry-after` value, that
#' value will be used instead, unless it's smaller than `pause_min`.
#'
#' @inheritParams VERB
#' @inherit GET params return
#' @inheritParams POST
#' @param times Maximum number of requests to attempt.
#' @param pause_base,pause_cap This method uses exponential back-off with
#' full jitter - this means that each request will randomly wait between 0
#' and `pause_base * 2 ^ attempt` seconds, up to a maximum of
#' `pause_cap` seconds.
#' @param pause_min Minimum time to wait in the backoff; generally
#' only necessary if you need pauses less than one second (which may
#' not be kind to the server, use with caution!).
#' @param quiet If `FALSE`, will print a message displaying how long
#' until the next request.
#' @param terminate_on Optional vector of numeric HTTP status codes that if found
#' on the response will terminate the retry process. If `NULL`, will keep
#' retrying while [http_error()] is `TRUE` for the response.
#' @param terminate_on_success If `TRUE`, the default, this will
#' automatically terminate when the request is successful, regardless of the
#' value of `terminate_on`.
#' @return The last response. Note that if the request doesn't succeed after
#' `times` times this will be a failed request, i.e. you still need
#' to use [stop_for_status()].
#' @export
#' @examples
#' # Succeeds straight away
#' RETRY("GET", "http://httpbin.org/status/200")
#' # Never succeeds
#' RETRY("GET", "http://httpbin.org/status/500")
#' \dontrun{
#' # Invalid hostname generates curl error condition and is retried but eventually
#' # raises an error condition.
#' RETRY("GET", "http://invalidhostname/")
#' }
RETRY <- function(verb, url = NULL, config = list(), ...,
body = NULL, encode = c("multipart", "form", "json", "raw"),
times = 3, pause_base = 1, pause_cap = 60, pause_min = 1,
handle = NULL, quiet = FALSE,
terminate_on = NULL,
terminate_on_success = TRUE) {
stopifnot(is.numeric(times), length(times) == 1L)
stopifnot(is.numeric(pause_base), length(pause_base) == 1L)
stopifnot(is.numeric(pause_cap), length(pause_cap) == 1L)
stopifnot(is.numeric(terminate_on) || is.null(terminate_on))
stopifnot(is.logical(terminate_on_success), length(terminate_on_success) == 1)
hu <- handle_url(handle, url, ...)
req <- request_build(verb, hu$url, body_config(body, match.arg(encode)), config, ...)
resp <- tryCatch(request_perform(req, hu$handle$handle), error = function(e) e)
i <- 1
while (!retry_should_terminate(i, times, resp, terminate_on, terminate_on_success)) {
backoff_full_jitter(i, resp, pause_base, pause_cap, pause_min, quiet = quiet)
i <- i + 1
resp <- tryCatch(request_perform(req, hu$handle$handle), error = function(e) e)
}
if (inherits(resp, "error")) {
stop(resp)
}
resp
}
retry_should_terminate <- function(i, times, resp, terminate_on, terminate_on_success) {
if (i >= times) {
TRUE
} else if (inherits(resp, "error")) {
FALSE
} else if (terminate_on_success && !http_error(resp)) {
TRUE
} else if (!is.null(terminate_on)) {
status_code(resp) %in% terminate_on
} else {
!http_error(resp)
}
}
backoff_full_jitter <- function(i, resp, pause_base = 1, pause_cap = 60,
pause_min = 1, quiet = FALSE) {
length <- max(pause_min, stats::runif(1, max = min(pause_cap, pause_base * (2^i))))
if (!quiet) {
if (inherits(resp, "error")) {
error_description <- gsub("[\n\r]*$", "\n", as.character(resp))
status <- "ERROR"
} else {
error_description <- ""
status <- status_code(resp)
}
if (status == 429) {
retry_after <- resp$headers[["retry-after"]]
if (!is.null(retry_after)) {
length <- max(pause_min, as.numeric(retry_after))
}
}
message(error_description, "Request failed [", status, "]. Retrying in ", round(length, 1), " seconds...")
}
Sys.sleep(length)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_coef}
\alias{plot_coef}
\title{plot estimated coefficients}
\usage{
plot_coef(x)
}
\arguments{
\item{x}{model object}
}
\description{
plot estimated coefficients
}
|
/man/plot_coef.Rd
|
permissive
|
nanxstats/logreg
|
R
| false | true | 260 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_coef}
\alias{plot_coef}
\title{plot estimated coefficients}
\usage{
plot_coef(x)
}
\arguments{
\item{x}{model object}
}
\description{
plot estimated coefficients
}
|
function (A)
{
e <- get("data.env", .GlobalEnv)
e[["JSDmat"]][[length(e[["JSDmat"]]) + 1]] <- list(A = A)
.Call("_textmineR_JSDmat", A)
}
|
/valgrind_test_dir/JSDmat-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | false | 151 |
r
|
function (A)
{
e <- get("data.env", .GlobalEnv)
e[["JSDmat"]][[length(e[["JSDmat"]]) + 1]] <- list(A = A)
.Call("_textmineR_JSDmat", A)
}
|
testlist <- list(A = structure(c(1.01184644268287e-319, 8.11762876570616e-310, 2.10747668061271e+101, 5.78517196954163e+98, 2.02410200510026e-79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 10L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613103427-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 469 |
r
|
testlist <- list(A = structure(c(1.01184644268287e-319, 8.11762876570616e-310, 2.10747668061271e+101, 5.78517196954163e+98, 2.02410200510026e-79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 10L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
library(tidyverse)
library(lubridate)
library(zoo)
hotel_data <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-11/hotels.csv")
hotel_data %>% View()
hotel_data %>%
filter(!is_canceled) %>%
count(reservation_status_date) %>%
ggplot(aes(reservation_status_date, n)) +
geom_line()
hotel_data %>%
filter(!is_canceled) %>%
mutate(year_month = as.yearmon(paste(year(reservation_status_date), month(reservation_status_date), sep = '-'),
'%Y-%m')) %>%
ggplot(aes(year_month, stays_in_week_nights, group = year_month)) +
geom_boxplot()
|
/2020/week_07/hotels.R
|
no_license
|
shoninouye/tidytuesday-analysis
|
R
| false | false | 632 |
r
|
library(tidyverse)
library(lubridate)
library(zoo)
hotel_data <- read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-02-11/hotels.csv")
hotel_data %>% View()
hotel_data %>%
filter(!is_canceled) %>%
count(reservation_status_date) %>%
ggplot(aes(reservation_status_date, n)) +
geom_line()
hotel_data %>%
filter(!is_canceled) %>%
mutate(year_month = as.yearmon(paste(year(reservation_status_date), month(reservation_status_date), sep = '-'),
'%Y-%m')) %>%
ggplot(aes(year_month, stays_in_week_nights, group = year_month)) +
geom_boxplot()
|
## Plot 4
## Task 1: Set working directory
setwd("~/R/Exploratory_DataAnalysis")
## Task 2: Read in the data
mydata<-read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
## Task 3: Subset
sub_data <- subset(mydata, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(mydata)
## Task 4: Fix dates
datetime <- paste(as.Date(sub_data$Date), sub_data$Time)
sub_data$Datetime <- as.POSIXct(datetime)
## Task 5: Plot and Save
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(sub_data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
dev.off()
|
/plot4.R
|
no_license
|
dianajflora/Coursera_ExploratoryDataAnalysis
|
R
| false | false | 1,410 |
r
|
## Plot 4
## Task 1: Set working directory
setwd("~/R/Exploratory_DataAnalysis")
## Task 2: Read in the data
mydata<-read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
mydata$Date <- as.Date(mydata$Date, format="%d/%m/%Y")
## Task 3: Subset
sub_data <- subset(mydata, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(mydata)
## Task 4: Fix dates
datetime <- paste(as.Date(sub_data$Date), sub_data$Time)
sub_data$Datetime <- as.POSIXct(datetime)
## Task 5: Plot and Save
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(sub_data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
dev.off()
|
library(tidyverse)
library(scales)
library(cowplot)
demogr <- read_csv("demographics.csv")
# demogr %>% group_by(płeć) %>% summarise(n = n())
plec_levels <- c(Kobieta = "female",
Kobieta = "Female",
Kobieta = "Woman",
Mężczyzna = "male",
Mężczyzna = "Male",
Mężczyzna = "Man",
Inna = "A different identity",
Inna = "Non-binary, genderqueer, or gender non-conforming",
Inna = "Woman;Man",
`Brak/odmowa odp.` = "Prefer not to say")
demogr <- demogr %>% mutate(Płeć = fct_recode(.f = płeć,
!!!plec_levels),
Płeć = fct_explicit_na(f = Płeć,
na_level = "Brak/odmowa odp."),
Płeć = fct_relevel(Płeć, "Brak/odmowa odp.", after = Inf),
Płeć = fct_relevel(Płeć, "Inna", after = 2),
Źródło = fct_relevel(Źródło, "PyData 2018 [n = 284]", after = Inf))
table(demogr$płeć, demogr$Płeć, useNA = "always")
prop.table(table(demogr$Źródło, demogr$Płeć), 1)
demogr %>% group_by(Źródło) %>% summarise(n = n(),
n_K = sum(Płeć == "Kobieta"),
n_M = sum(Płeć == "Mężczyzna"),
M_per_K = n_M / n_K) %>%
arrange(M_per_K)
plec <- ggplot(demogr) +
geom_bar(aes(Źródło, fill = Płeć), position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "Na jedną kobietę przypada od 2 (WhyR? 2017)\ndo 13 mężczyzn (Stack Overflow 2018)",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL)
# png("plec.png", width = 160, height = 70, units = "mm", res = 300)
# plot(plec) # Rys. 7. in chapter 5.1.
# dev.off()
# demogr %>% group_by(wiek) %>% summarise(n = n()) %>% View()
wiek_levels <- c(`mniej niż 18 lat` = "17",
`mniej niż 18 lat` = "Under 18 years old",
`18 - 24` = "18 - 24 years old",
`18 - 24` = "18-21",
`18 - 24` = "22-24",
`18 - 24` = "19",
`18 - 24` = "20",
`18 - 24` = "21",
`18 - 24` = "22",
`18 - 24` = "23",
`18 - 24` = "24",
`25 - 34` = "25 - 34 years old",
`25 - 34` = "25",
`25 - 34` = "25-29",
`25 - 34` = "26",
`25 - 34` = "27",
`25 - 34` = "28",
`25 - 34` = "29",
`25 - 34` = "30",
`25 - 34` = "30-34",
`25 - 34` = "31",
`25 - 34` = "32",
`25 - 34` = "33",
`25 - 34` = "34",
`35 - 44` = "35",
`35 - 44` = "35 - 44 years old",
`35 - 44` = "35-39",
`35 - 44` = "36",
`35 - 44` = "37",
`35 - 44` = "38",
`35 - 44` = "39",
`35 - 44` = "40",
`35 - 44` = "40-44",
`35 - 44` = "41",
`35 - 44` = "42",
`35 - 44` = "43",
`35 - 44` = "44",
`45 - 54` = "45 - 54 years old",
`45 - 54` = "45",
`45 - 54` = "45-49",
`45 - 54` = "47",
`45 - 54` = "48",
`45 - 54` = "49",
`45 - 54` = "50",
`45 - 54` = "50-54",
`45 - 54` = "52",
`45 - 54` = "53",
`55 - 69` = "55-59",
`55 - 69` = "55",
`55 - 69` = "55-59",
`55 - 69` = "56",
`55 - 69` = "57",
`55 - 69` = "58",
`55 - 69` = "60-69",
`Brak/odmowa odp.` = "0")
demogr <- demogr %>% mutate(Wiek = fct_recode(.f = wiek,
!!!wiek_levels),
Wiek = fct_explicit_na(f = Wiek,
na_level = "Brak/odmowa odp."),
Wiek = fct_relevel(.f = Wiek, "Brak/odmowa odp.", after = Inf))
table(demogr$wiek, demogr$Wiek, useNA = "always")
prop.table(table(demogr$Źródło, demogr$Wiek), 1)
demogr %>% mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69")) %>%
group_by(Źródło) %>% summarise(n = n(), n_older = sum(older_than_34),
prop_older = n_older / n)
wiek <- ggplot(demogr %>% filter(Źródło != "PyData 2018 [n = 284]")) +
geom_bar(aes(Źródło, fill = Wiek), position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 25)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wiek od 25 do 34 lat",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL)
# png("wiek.png", width = 160, height = 80, units = "mm", res = 300)
# plot(wiek) # Rys. 8. in chapter 5.1.
# dev.off()
# demogr %>% group_by(poziom_wykształcenia) %>% summarise(n = n()) %>% View()
poziom_wyksz_levels <- c(`Licencjat/inżynier` = "Bachelor's degree",
`Licencjat/inżynier` = "Bachelor’s degree",
`Licencjat/inżynier` = "Bachelor’s degree (BA, BS, B.Eng., etc.)",
Doktorat = "Doctoral degree",
Doktorat = "dr",
Doktorat = "dr hab.",
Doktorat = "dr inż.",
Średnie = "I did not complete any formal education past high school",
`Brak/odmowa odp.` = "I prefer not to answer",
`Licencjat/inżynier` = "inż.",
`Licencjat/inżynier` = "lic.",
Magister = "Master's degree",
Magister = "Master’s degree",
Magister = "Master’s degree (MA, MS, M.Eng., MBA, etc.)",
Magister = "mgr",
Magister = "mgr inż.",
Średnie = "No formal education past high school",
Doktorat = "Other doctoral degree (Ph.D, Ed.D., etc.)",
Podstawowe = "Primary/elementary school",
Doktorat = "prof. nzw. dr hab.",
Doktorat = "prof. nzw. dr hab. inż.",
Magister = "Professional degree",
Magister = "Professional degree (JD, MD, etc.)",
Średnie = "Secondary school (e.g. American high school, German Realschule or Gymnasium, etc.)",
Średnie = "Some college/university study without earning a bachelor's degree",
Średnie = "Some college/university study without earning a bachelor’s degree",
Średnie = "Some college/university study without earning a degree")
demogr <- demogr %>% mutate(`Poziom wykształcenia` = fct_recode(.f = poziom_wykształcenia,
!!!poziom_wyksz_levels),
`Poziom wykształcenia` = fct_explicit_na(f = `Poziom wykształcenia`,
na_level = "Brak/odmowa odp."),
`Poziom wykształcenia` = fct_inorder(f = `Poziom wykształcenia`),
`Poziom wykształcenia` = fct_relevel(.f = `Poziom wykształcenia`, "Brak/odmowa odp.",
after = Inf),
`Poziom wykształcenia` = fct_relevel(.f = `Poziom wykształcenia`, "Doktorat",
after = 0))
table(demogr$poziom_wykształcenia, demogr$`Poziom wykształcenia`, useNA = "always")
prop.table(table(demogr$Źródło, demogr$`Poziom wykształcenia`),1)
poziom <- ggplot(demogr %>% filter(Źródło != "PyData 2018 [n = 284]")) +
geom_bar(aes(Źródło, fill = `Poziom wykształcenia`),
position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 20)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wykształcenie\nmagisterskie, ale udział osób z doktoratem, jak i wykszt. średnim\nsięga 1/6 wskazań",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL,
fill = "Poziom wykształcenia")
# png("poziom.png", width = 160, height = 80, units = "mm", res = 300)
# plot(poziom) # Rys. 9. in chapter 5.1.
# dev.off()
# demogr %>% group_by(kierunek_wykształcenia) %>% summarise(n = n()) %>% View()
kier_wykszt_levels <- c(`Biznes/ekonomia` = "A business discipline (accounting, economics, finance, etc.)",
`Biznes/ekonomia` = "A business discipline (ex. accounting, finance, marketing)",
`Medyczne/przyrodnicze` = "A health science (ex. nursing, pharmacy, radiology)",
Humanistyczne = "A humanities discipline",
Humanistyczne = "A humanities discipline (ex. literature, history, philosophy)",
`Medyczne/przyrodnicze` = "A natural science (ex. biology, chemistry, physics)",
Społeczne = "A social science",
Społeczne = "A social science (ex. anthropology, psychology, political science)",
Techniczne = "Another engineering discipline (ex. civil, electrical, mechanical)",
Informatyczne = "Computer Science",
Informatyczne = "Computer science (software engineering, etc.)",
Informatyczne = "Computer science, computer engineering, or software engineering",
Techniczne = "Electrical Engineering",
Techniczne = "Engineering (non-computer focused)",
`Medyczne/przyrodnicze` = "Environmental science or geology",
Humanistyczne = "Humanities (history, literature, philosophy, etc.)",
Inne = "I never declared a major",
Informatyczne = "Information systems, information technology, or system administration",
Informatyczne = "Information technology, networking, or system administration",
`Matematyczne/statystyczne` = "Mathematics or statistics",
`Medyczne/przyrodnicze` = "Medical or life sciences (biology, chemistry, medicine, etc.)",
Inne = "Other",
`Medyczne/przyrodnicze` = "Physics",
`Medyczne/przyrodnicze` = "Physics or astronomy",
Społeczne = "Psychology",
Społeczne = "Social sciences (anthropology, psychology, sociology, etc.)")
demogr <- demogr %>% mutate(`Kierunek wykształcenia` = fct_recode(.f = kierunek_wykształcenia,
!!!kier_wykszt_levels),
`Kierunek wykształcenia` = fct_explicit_na(f = `Kierunek wykształcenia`,
na_level = "Brak/odmowa odp."),
`Kierunek wykształcenia` = fct_infreq(`Kierunek wykształcenia`),
`Kierunek wykształcenia` = fct_relevel(.f = `Kierunek wykształcenia`, "Inne", after = Inf),
`Kierunek wykształcenia` = fct_relevel(.f = `Kierunek wykształcenia`, "Brak/odmowa odp.",
after = Inf))
prop.table(table(demogr$Źródło, demogr$`Kierunek wykształcenia`), 1)
kierunek <- ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]" & Źródło != "WhyR? 2017 [n = 202]")) +
geom_bar(aes(Źródło, fill = `Kierunek wykształcenia`),
position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 20)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wykształcenie\ninformatyczne, w drugiej kolejności wskazywano wykszt.\nmatematyczne/statystyczne lub medyczne/przyrodnicze",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL,
fill = "Kierunek wykształcenia")
# png("kierunek.png", width = 160, height = 95, units = "mm", res = 300)
# plot(kierunek) # Rys. 8. in chapter 5.1.
# dev.off()
##### to do
# demogr %>% mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69"))
ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]")) +
geom_count(aes(x = `Płeć`, y = fct_rev(Wiek))) +
scale_size_continuous(range = c(2, 12)) +
facet_grid(rows = vars(Źródło))
ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]") %>%
mutate(older_than_44 = Wiek %in% c("45 - 54", "55 - 69"))) +
geom_bar(aes(fill = older_than_44, x = Płeć), position = "dodge") +
scale_y_log10() +
facet_grid(rows = vars(Źródło))
ggplot(data = demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna")),
aes(x = fct_rev(Wiek), fill = Płeć)) +
geom_bar(data = subset(demogr, Płeć == "Kobieta"),
mapping = aes(y = ..count..)) +
geom_bar(data = subset(demogr, Płeć == "Mężczyzna"),
mapping = aes(y = -..count.. ),
position = "identity") +
scale_y_continuous(labels = abs) +
coord_flip()
ggplot(data = demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
Wiek != "Brak/odmowa odp.")) +
geom_bar(aes(x = Płeć, fill = Wiek), position = "fill")
#### show diff
plec_wiek_df <- demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
Wiek != "Brak/odmowa odp.",
Źródło != "WhyR? 2017 [n = 202]") %>%
group_by(Wiek) %>%
summarise(n = n(),
Mężczyźni = sum(Płeć == "Mężczyzna"),
Kobiety = sum(Płeć == "Kobieta")) %>%
mutate(pct_M = Mężczyźni / sum(Mężczyźni),
pct_K = Kobiety / sum(Kobiety),
pct_diff = pct_K - pct_M)
plec_wiek_lolipop <- plec_wiek_df %>%
ggplot() +
geom_point(aes(x = fct_rev(Wiek), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = Wiek,
xend = Wiek,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = '11B',
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Wiek\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale kategorii wiekowej dla wybranych płci [frakcja kobiet - frakcja mężczyzn]")
plec_wiek_col <- plec_wiek_df %>%
select(Wiek, pct_M, pct_K) %>%
pivot_longer(cols = pct_M:pct_K,
names_to = 'plec',
values_to = 'pct') %>%
ggplot(aes(x = Wiek, y = pct, fill = plec)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '11A',
subtitle = 'Udział osób w wieku 18 - 24 jest o ponad 6 pp. wyższy wśród kobiet niż wśród mężczyzn.\nBrak kobiet w grupach wiekowych: "mniej niż 18 lat" i "55 - 69"',
x = 'Wiek [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('Kobiety [n = 71]', 'Mężczyźni [n = 487]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left")
plec_wiek <- plot_grid(plot_grid(plec_wiek_col,
plec_wiek_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
# png("plec_wiek.png", width = 160, height = 180, units = "mm", res = 300)
# plot(plec_wiek) # Rys. 11. in chapter 5.1.
# dev.off()
plec_poziom_df <- demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
`Poziom wykształcenia` != "Brak/odmowa odp.",
Źródło != "WhyR? 2017 [n = 202]") %>%
group_by(`Poziom wykształcenia`) %>%
summarise(n = n(),
Mężczyźni = sum(Płeć == "Mężczyzna"),
Kobiety = sum(Płeć == "Kobieta")) %>%
mutate(pct_M = Mężczyźni / sum(Mężczyźni),
pct_K = Kobiety / sum(Kobiety),
pct_diff = pct_K - pct_M)
plec_poziom_lolipop <- plec_poziom_df %>%
ggplot() +
geom_point(aes(x = fct_rev(`Poziom wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Poziom wykształcenia`,
xend = `Poziom wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = "12B" ,
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Poziom wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale poziomu wykształcenia dla wybranych płci\n[frakcja kobiet - frakcja mężczyzn]")
plec_poziom_col <- plec_poziom_df %>%
select(`Poziom wykształcenia`, pct_M, pct_K) %>%
pivot_longer(cols = pct_M:pct_K,
names_to = 'plec',
values_to = 'pct') %>%
ggplot(aes(x = `Poziom wykształcenia`, y = pct, fill = plec)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '12A',
subtitle = 'Udział osób z wykształceniem magisterskim jest o ponad 9 pp. wyższy wśród kobiet\nniż wśród mężczyzn. Brak kobiet z wykształceniem podstawowym',
x = 'Poziom wykształcenia [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('Kobiety [n = 71]', 'Mężczyźni [n = 487]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left")
plec_poziom <- plot_grid(plot_grid(plec_poziom_col,
plec_poziom_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
#
# png("plec_poziom.png", width = 160, height = 180, units = "mm", res = 300)
# plot(plec_poziom) # Rys. 12. in chapter 5.1.
# dev.off()
# exploration
demogr %>% filter(Płeć != "Brak/odmowa odp.",
Wiek != "Brak/odmowa odp.",
`Poziom wykształcenia` != "Brak/odmowa odp.",
`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
group_by(Płeć, Wiek, `Poziom wykształcenia`, `Kierunek wykształcenia`) %>%
summarise(n = n()) %>% ungroup() %>% mutate(pct = n / sum(n)) %>%
top_n(n = 5, wt = n)
demogr %>%
filter(`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
mutate(older_than_44 = Wiek %in% c("45 - 54", "55 - 69")) %>%
group_by(`Kierunek wykształcenia`) %>%
summarise(n = n(),
do_44 = sum(older_than_44 == FALSE),
powyzej_44 = sum(older_than_44 == TRUE)) %>%
mutate(pct_do_44 = do_44 / sum(do_44),
pct_powyzej_44 = powyzej_44 / sum(powyzej_44),
pct_diff = 100 * (pct_do_44 - pct_powyzej_44)) %>%
ggplot() +
geom_point(aes(x = fct_rev(`Kierunek wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Kierunek wykształcenia`,
xend = `Kierunek wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip()
# labs(title = "Udział osób z wykształceniem magisterskim jest\no ponad 9 pp. wyższy wśród kobiet niż wśród mężczyzn",
# subtitle = 'brak kobiet z wykształceniem podstawowym' ,
# caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
# x = 'Poziom wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
# y = "Różnica w udziale poziomu wykształcenia dla wybranych płci\n[pp. = % kobiet - % mężczyzn]")
wiek_kier_df <- demogr %>%
filter(`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69")) %>%
group_by(`Kierunek wykształcenia`) %>%
summarise(n = n(),
do_34 = sum(older_than_34 == FALSE),
powyzej_34 = sum(older_than_34 == TRUE)) %>%
mutate(pct_do_34 = do_34 / sum(do_34),
pct_powyzej_34 = powyzej_34 / sum(powyzej_34),
pct_diff = pct_do_34 - pct_powyzej_34)
wiek_kier_lolipop <- wiek_kier_df %>%
ggplot() +
geom_point(aes(x = fct_rev(`Kierunek wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Kierunek wykształcenia`,
xend = `Kierunek wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = "13B" ,
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Kierunek wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale kierunku wykształcenia dla grup wiekowych\n[frakcja do 34 r.ż. - frakcja od 35 r.ż.]")
wiek_kier_col <- wiek_kier_df %>%
select(`Kierunek wykształcenia`, pct_do_34, pct_powyzej_34) %>%
pivot_longer(cols = pct_do_34:pct_powyzej_34,
names_to = 'wiek_34',
values_to = 'pct') %>%
ggplot(aes(x = `Kierunek wykształcenia`, y = pct, fill = wiek_34)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '12A',
subtitle = 'Udział osób z wykształceniem informatycznym jest o prawie 13 pp. wyższy wśród osób do 34 r.ż.\nniż dla osób w wieku 35 lat i starszych. Odwrotnie jest dla wykształcenia medycznego/przyrodnicznego',
x = 'Kierunek wykształcenia [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('do 34 r.ż. [n = 425]', 'od 35 r.ż. [n = 114]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left",
axis.text.x = element_text(angle = 20))
wiek_kier <- plot_grid(plot_grid(wiek_kier_col,
wiek_kier_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
# png("wiek_kier.png", width = 160, height = 180, units = "mm", res = 300)
# plot(wiek_kier) # Rys. 12. in chapter 5.1.
# dev.off()
|
/21_demographics_viz.R
|
permissive
|
zremek/survey-polish-data-science
|
R
| false | false | 24,951 |
r
|
library(tidyverse)
library(scales)
library(cowplot)
demogr <- read_csv("demographics.csv")
# demogr %>% group_by(płeć) %>% summarise(n = n())
plec_levels <- c(Kobieta = "female",
Kobieta = "Female",
Kobieta = "Woman",
Mężczyzna = "male",
Mężczyzna = "Male",
Mężczyzna = "Man",
Inna = "A different identity",
Inna = "Non-binary, genderqueer, or gender non-conforming",
Inna = "Woman;Man",
`Brak/odmowa odp.` = "Prefer not to say")
demogr <- demogr %>% mutate(Płeć = fct_recode(.f = płeć,
!!!plec_levels),
Płeć = fct_explicit_na(f = Płeć,
na_level = "Brak/odmowa odp."),
Płeć = fct_relevel(Płeć, "Brak/odmowa odp.", after = Inf),
Płeć = fct_relevel(Płeć, "Inna", after = 2),
Źródło = fct_relevel(Źródło, "PyData 2018 [n = 284]", after = Inf))
table(demogr$płeć, demogr$Płeć, useNA = "always")
prop.table(table(demogr$Źródło, demogr$Płeć), 1)
demogr %>% group_by(Źródło) %>% summarise(n = n(),
n_K = sum(Płeć == "Kobieta"),
n_M = sum(Płeć == "Mężczyzna"),
M_per_K = n_M / n_K) %>%
arrange(M_per_K)
plec <- ggplot(demogr) +
geom_bar(aes(Źródło, fill = Płeć), position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "Na jedną kobietę przypada od 2 (WhyR? 2017)\ndo 13 mężczyzn (Stack Overflow 2018)",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL)
# png("plec.png", width = 160, height = 70, units = "mm", res = 300)
# plot(plec) # Rys. 7. in chapter 5.1.
# dev.off()
# demogr %>% group_by(wiek) %>% summarise(n = n()) %>% View()
wiek_levels <- c(`mniej niż 18 lat` = "17",
`mniej niż 18 lat` = "Under 18 years old",
`18 - 24` = "18 - 24 years old",
`18 - 24` = "18-21",
`18 - 24` = "22-24",
`18 - 24` = "19",
`18 - 24` = "20",
`18 - 24` = "21",
`18 - 24` = "22",
`18 - 24` = "23",
`18 - 24` = "24",
`25 - 34` = "25 - 34 years old",
`25 - 34` = "25",
`25 - 34` = "25-29",
`25 - 34` = "26",
`25 - 34` = "27",
`25 - 34` = "28",
`25 - 34` = "29",
`25 - 34` = "30",
`25 - 34` = "30-34",
`25 - 34` = "31",
`25 - 34` = "32",
`25 - 34` = "33",
`25 - 34` = "34",
`35 - 44` = "35",
`35 - 44` = "35 - 44 years old",
`35 - 44` = "35-39",
`35 - 44` = "36",
`35 - 44` = "37",
`35 - 44` = "38",
`35 - 44` = "39",
`35 - 44` = "40",
`35 - 44` = "40-44",
`35 - 44` = "41",
`35 - 44` = "42",
`35 - 44` = "43",
`35 - 44` = "44",
`45 - 54` = "45 - 54 years old",
`45 - 54` = "45",
`45 - 54` = "45-49",
`45 - 54` = "47",
`45 - 54` = "48",
`45 - 54` = "49",
`45 - 54` = "50",
`45 - 54` = "50-54",
`45 - 54` = "52",
`45 - 54` = "53",
`55 - 69` = "55-59",
`55 - 69` = "55",
`55 - 69` = "55-59",
`55 - 69` = "56",
`55 - 69` = "57",
`55 - 69` = "58",
`55 - 69` = "60-69",
`Brak/odmowa odp.` = "0")
demogr <- demogr %>% mutate(Wiek = fct_recode(.f = wiek,
!!!wiek_levels),
Wiek = fct_explicit_na(f = Wiek,
na_level = "Brak/odmowa odp."),
Wiek = fct_relevel(.f = Wiek, "Brak/odmowa odp.", after = Inf))
table(demogr$wiek, demogr$Wiek, useNA = "always")
prop.table(table(demogr$Źródło, demogr$Wiek), 1)
demogr %>% mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69")) %>%
group_by(Źródło) %>% summarise(n = n(), n_older = sum(older_than_34),
prop_older = n_older / n)
wiek <- ggplot(demogr %>% filter(Źródło != "PyData 2018 [n = 284]")) +
geom_bar(aes(Źródło, fill = Wiek), position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 25)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wiek od 25 do 34 lat",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL)
# png("wiek.png", width = 160, height = 80, units = "mm", res = 300)
# plot(wiek) # Rys. 8. in chapter 5.1.
# dev.off()
# demogr %>% group_by(poziom_wykształcenia) %>% summarise(n = n()) %>% View()
poziom_wyksz_levels <- c(`Licencjat/inżynier` = "Bachelor's degree",
`Licencjat/inżynier` = "Bachelor’s degree",
`Licencjat/inżynier` = "Bachelor’s degree (BA, BS, B.Eng., etc.)",
Doktorat = "Doctoral degree",
Doktorat = "dr",
Doktorat = "dr hab.",
Doktorat = "dr inż.",
Średnie = "I did not complete any formal education past high school",
`Brak/odmowa odp.` = "I prefer not to answer",
`Licencjat/inżynier` = "inż.",
`Licencjat/inżynier` = "lic.",
Magister = "Master's degree",
Magister = "Master’s degree",
Magister = "Master’s degree (MA, MS, M.Eng., MBA, etc.)",
Magister = "mgr",
Magister = "mgr inż.",
Średnie = "No formal education past high school",
Doktorat = "Other doctoral degree (Ph.D, Ed.D., etc.)",
Podstawowe = "Primary/elementary school",
Doktorat = "prof. nzw. dr hab.",
Doktorat = "prof. nzw. dr hab. inż.",
Magister = "Professional degree",
Magister = "Professional degree (JD, MD, etc.)",
Średnie = "Secondary school (e.g. American high school, German Realschule or Gymnasium, etc.)",
Średnie = "Some college/university study without earning a bachelor's degree",
Średnie = "Some college/university study without earning a bachelor’s degree",
Średnie = "Some college/university study without earning a degree")
demogr <- demogr %>% mutate(`Poziom wykształcenia` = fct_recode(.f = poziom_wykształcenia,
!!!poziom_wyksz_levels),
`Poziom wykształcenia` = fct_explicit_na(f = `Poziom wykształcenia`,
na_level = "Brak/odmowa odp."),
`Poziom wykształcenia` = fct_inorder(f = `Poziom wykształcenia`),
`Poziom wykształcenia` = fct_relevel(.f = `Poziom wykształcenia`, "Brak/odmowa odp.",
after = Inf),
`Poziom wykształcenia` = fct_relevel(.f = `Poziom wykształcenia`, "Doktorat",
after = 0))
table(demogr$poziom_wykształcenia, demogr$`Poziom wykształcenia`, useNA = "always")
prop.table(table(demogr$Źródło, demogr$`Poziom wykształcenia`),1)
poziom <- ggplot(demogr %>% filter(Źródło != "PyData 2018 [n = 284]")) +
geom_bar(aes(Źródło, fill = `Poziom wykształcenia`),
position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 20)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wykształcenie\nmagisterskie, ale udział osób z doktoratem, jak i wykszt. średnim\nsięga 1/6 wskazań",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL,
fill = "Poziom wykształcenia")
# png("poziom.png", width = 160, height = 80, units = "mm", res = 300)
# plot(poziom) # Rys. 9. in chapter 5.1.
# dev.off()
# demogr %>% group_by(kierunek_wykształcenia) %>% summarise(n = n()) %>% View()
kier_wykszt_levels <- c(`Biznes/ekonomia` = "A business discipline (accounting, economics, finance, etc.)",
`Biznes/ekonomia` = "A business discipline (ex. accounting, finance, marketing)",
`Medyczne/przyrodnicze` = "A health science (ex. nursing, pharmacy, radiology)",
Humanistyczne = "A humanities discipline",
Humanistyczne = "A humanities discipline (ex. literature, history, philosophy)",
`Medyczne/przyrodnicze` = "A natural science (ex. biology, chemistry, physics)",
Społeczne = "A social science",
Społeczne = "A social science (ex. anthropology, psychology, political science)",
Techniczne = "Another engineering discipline (ex. civil, electrical, mechanical)",
Informatyczne = "Computer Science",
Informatyczne = "Computer science (software engineering, etc.)",
Informatyczne = "Computer science, computer engineering, or software engineering",
Techniczne = "Electrical Engineering",
Techniczne = "Engineering (non-computer focused)",
`Medyczne/przyrodnicze` = "Environmental science or geology",
Humanistyczne = "Humanities (history, literature, philosophy, etc.)",
Inne = "I never declared a major",
Informatyczne = "Information systems, information technology, or system administration",
Informatyczne = "Information technology, networking, or system administration",
`Matematyczne/statystyczne` = "Mathematics or statistics",
`Medyczne/przyrodnicze` = "Medical or life sciences (biology, chemistry, medicine, etc.)",
Inne = "Other",
`Medyczne/przyrodnicze` = "Physics",
`Medyczne/przyrodnicze` = "Physics or astronomy",
Społeczne = "Psychology",
Społeczne = "Social sciences (anthropology, psychology, sociology, etc.)")
demogr <- demogr %>% mutate(`Kierunek wykształcenia` = fct_recode(.f = kierunek_wykształcenia,
!!!kier_wykszt_levels),
`Kierunek wykształcenia` = fct_explicit_na(f = `Kierunek wykształcenia`,
na_level = "Brak/odmowa odp."),
`Kierunek wykształcenia` = fct_infreq(`Kierunek wykształcenia`),
`Kierunek wykształcenia` = fct_relevel(.f = `Kierunek wykształcenia`, "Inne", after = Inf),
`Kierunek wykształcenia` = fct_relevel(.f = `Kierunek wykształcenia`, "Brak/odmowa odp.",
after = Inf))
prop.table(table(demogr$Źródło, demogr$`Kierunek wykształcenia`), 1)
kierunek <- ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]" & Źródło != "WhyR? 2017 [n = 202]")) +
geom_bar(aes(Źródło, fill = `Kierunek wykształcenia`),
position = "fill", colour = "black", width = 0.7) +
theme_minimal(base_family = "serif", base_size = 10) +
# theme(axis.text.x = element_text(angle = 20)) +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(labels = scales::percent) +
coord_flip() +
labs(title = "W każdym ze źródeł najczęściej wskazywano wykształcenie\ninformatyczne, w drugiej kolejności wskazywano wykszt.\nmatematyczne/statystyczne lub medyczne/przyrodnicze",
caption = 'Połączono dane z ankiet wymienionych na osi "Źródło"',
x = "Źródło",
y = NULL,
fill = "Kierunek wykształcenia")
# png("kierunek.png", width = 160, height = 95, units = "mm", res = 300)
# plot(kierunek) # Rys. 8. in chapter 5.1.
# dev.off()
##### to do
# demogr %>% mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69"))
ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]")) +
geom_count(aes(x = `Płeć`, y = fct_rev(Wiek))) +
scale_size_continuous(range = c(2, 12)) +
facet_grid(rows = vars(Źródło))
ggplot(demogr %>%
filter(Źródło != "PyData 2018 [n = 284]") %>%
mutate(older_than_44 = Wiek %in% c("45 - 54", "55 - 69"))) +
geom_bar(aes(fill = older_than_44, x = Płeć), position = "dodge") +
scale_y_log10() +
facet_grid(rows = vars(Źródło))
ggplot(data = demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna")),
aes(x = fct_rev(Wiek), fill = Płeć)) +
geom_bar(data = subset(demogr, Płeć == "Kobieta"),
mapping = aes(y = ..count..)) +
geom_bar(data = subset(demogr, Płeć == "Mężczyzna"),
mapping = aes(y = -..count.. ),
position = "identity") +
scale_y_continuous(labels = abs) +
coord_flip()
ggplot(data = demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
Wiek != "Brak/odmowa odp.")) +
geom_bar(aes(x = Płeć, fill = Wiek), position = "fill")
#### show diff
plec_wiek_df <- demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
Wiek != "Brak/odmowa odp.",
Źródło != "WhyR? 2017 [n = 202]") %>%
group_by(Wiek) %>%
summarise(n = n(),
Mężczyźni = sum(Płeć == "Mężczyzna"),
Kobiety = sum(Płeć == "Kobieta")) %>%
mutate(pct_M = Mężczyźni / sum(Mężczyźni),
pct_K = Kobiety / sum(Kobiety),
pct_diff = pct_K - pct_M)
plec_wiek_lolipop <- plec_wiek_df %>%
ggplot() +
geom_point(aes(x = fct_rev(Wiek), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = Wiek,
xend = Wiek,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = '11B',
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Wiek\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale kategorii wiekowej dla wybranych płci [frakcja kobiet - frakcja mężczyzn]")
plec_wiek_col <- plec_wiek_df %>%
select(Wiek, pct_M, pct_K) %>%
pivot_longer(cols = pct_M:pct_K,
names_to = 'plec',
values_to = 'pct') %>%
ggplot(aes(x = Wiek, y = pct, fill = plec)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '11A',
subtitle = 'Udział osób w wieku 18 - 24 jest o ponad 6 pp. wyższy wśród kobiet niż wśród mężczyzn.\nBrak kobiet w grupach wiekowych: "mniej niż 18 lat" i "55 - 69"',
x = 'Wiek [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('Kobiety [n = 71]', 'Mężczyźni [n = 487]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left")
plec_wiek <- plot_grid(plot_grid(plec_wiek_col,
plec_wiek_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
# png("plec_wiek.png", width = 160, height = 180, units = "mm", res = 300)
# plot(plec_wiek) # Rys. 11. in chapter 5.1.
# dev.off()
plec_poziom_df <- demogr %>%
filter(Płeć %in% c("Kobieta", "Mężczyzna"),
`Poziom wykształcenia` != "Brak/odmowa odp.",
Źródło != "WhyR? 2017 [n = 202]") %>%
group_by(`Poziom wykształcenia`) %>%
summarise(n = n(),
Mężczyźni = sum(Płeć == "Mężczyzna"),
Kobiety = sum(Płeć == "Kobieta")) %>%
mutate(pct_M = Mężczyźni / sum(Mężczyźni),
pct_K = Kobiety / sum(Kobiety),
pct_diff = pct_K - pct_M)
plec_poziom_lolipop <- plec_poziom_df %>%
ggplot() +
geom_point(aes(x = fct_rev(`Poziom wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Poziom wykształcenia`,
xend = `Poziom wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = "12B" ,
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Poziom wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale poziomu wykształcenia dla wybranych płci\n[frakcja kobiet - frakcja mężczyzn]")
plec_poziom_col <- plec_poziom_df %>%
select(`Poziom wykształcenia`, pct_M, pct_K) %>%
pivot_longer(cols = pct_M:pct_K,
names_to = 'plec',
values_to = 'pct') %>%
ggplot(aes(x = `Poziom wykształcenia`, y = pct, fill = plec)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '12A',
subtitle = 'Udział osób z wykształceniem magisterskim jest o ponad 9 pp. wyższy wśród kobiet\nniż wśród mężczyzn. Brak kobiet z wykształceniem podstawowym',
x = 'Poziom wykształcenia [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('Kobiety [n = 71]', 'Mężczyźni [n = 487]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left")
plec_poziom <- plot_grid(plot_grid(plec_poziom_col,
plec_poziom_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
#
# png("plec_poziom.png", width = 160, height = 180, units = "mm", res = 300)
# plot(plec_poziom) # Rys. 12. in chapter 5.1.
# dev.off()
# exploration
demogr %>% filter(Płeć != "Brak/odmowa odp.",
Wiek != "Brak/odmowa odp.",
`Poziom wykształcenia` != "Brak/odmowa odp.",
`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
group_by(Płeć, Wiek, `Poziom wykształcenia`, `Kierunek wykształcenia`) %>%
summarise(n = n()) %>% ungroup() %>% mutate(pct = n / sum(n)) %>%
top_n(n = 5, wt = n)
demogr %>%
filter(`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
mutate(older_than_44 = Wiek %in% c("45 - 54", "55 - 69")) %>%
group_by(`Kierunek wykształcenia`) %>%
summarise(n = n(),
do_44 = sum(older_than_44 == FALSE),
powyzej_44 = sum(older_than_44 == TRUE)) %>%
mutate(pct_do_44 = do_44 / sum(do_44),
pct_powyzej_44 = powyzej_44 / sum(powyzej_44),
pct_diff = 100 * (pct_do_44 - pct_powyzej_44)) %>%
ggplot() +
geom_point(aes(x = fct_rev(`Kierunek wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Kierunek wykształcenia`,
xend = `Kierunek wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip()
# labs(title = "Udział osób z wykształceniem magisterskim jest\no ponad 9 pp. wyższy wśród kobiet niż wśród mężczyzn",
# subtitle = 'brak kobiet z wykształceniem podstawowym' ,
# caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
# x = 'Poziom wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
# y = "Różnica w udziale poziomu wykształcenia dla wybranych płci\n[pp. = % kobiet - % mężczyzn]")
wiek_kier_df <- demogr %>%
filter(`Kierunek wykształcenia` != "Brak/odmowa odp.") %>%
mutate(older_than_34 = Wiek %in% c("35 - 44", "45 - 54", "55 - 69")) %>%
group_by(`Kierunek wykształcenia`) %>%
summarise(n = n(),
do_34 = sum(older_than_34 == FALSE),
powyzej_34 = sum(older_than_34 == TRUE)) %>%
mutate(pct_do_34 = do_34 / sum(do_34),
pct_powyzej_34 = powyzej_34 / sum(powyzej_34),
pct_diff = pct_do_34 - pct_powyzej_34)
wiek_kier_lolipop <- wiek_kier_df %>%
ggplot() +
geom_point(aes(x = fct_rev(`Kierunek wykształcenia`), y = pct_diff,
colour = pct_diff >= 0), size = 5) +
geom_segment(aes(x = `Kierunek wykształcenia`,
xend = `Kierunek wykształcenia`,
y = 0,
yend = pct_diff,
colour = pct_diff >= 0),
size = 1) +
geom_hline(yintercept = 0, colour = "gray") +
scale_colour_manual(values = c("#80b1d3", "#fdb462")) +
theme_minimal(base_family = "serif", base_size = 10) +
guides(colour = "none") +
coord_flip() +
labs(title = "13B" ,
caption = 'Połączono dane z ankiet Kaggle 2017 i 2018 oraz Stack Overflow 2018 i 2019',
x = 'Kierunek wykształcenia\n[wykluczono kategorię "Brak/odmowa odp."]',
y = "Różnica w udziale kierunku wykształcenia dla grup wiekowych\n[frakcja do 34 r.ż. - frakcja od 35 r.ż.]")
wiek_kier_col <- wiek_kier_df %>%
select(`Kierunek wykształcenia`, pct_do_34, pct_powyzej_34) %>%
pivot_longer(cols = pct_do_34:pct_powyzej_34,
names_to = 'wiek_34',
values_to = 'pct') %>%
ggplot(aes(x = `Kierunek wykształcenia`, y = pct, fill = wiek_34)) +
geom_col(position = 'dodge', colour = "black") +
labs(title = '12A',
subtitle = 'Udział osób z wykształceniem informatycznym jest o prawie 13 pp. wyższy wśród osób do 34 r.ż.\nniż dla osób w wieku 35 lat i starszych. Odwrotnie jest dla wykształcenia medycznego/przyrodnicznego',
x = 'Kierunek wykształcenia [wykluczono kategorię "Brak/odmowa odp."]',
y = "Frakcja [każdy Wymiar = 1]") +
scale_fill_brewer(palette = "Set3", name = "Wymiar:",
labels = c('do 34 r.ż. [n = 425]', 'od 35 r.ż. [n = 114]')) +
theme_minimal(base_family = "serif", base_size = 10) +
theme(legend.position = "bottom", legend.direction = "horizontal", legend.box.just = "left",
axis.text.x = element_text(angle = 20))
wiek_kier <- plot_grid(plot_grid(wiek_kier_col,
wiek_kier_lolipop,
nrow = 2,
rel_heights = c(2, 1)))
# png("wiek_kier.png", width = 160, height = 180, units = "mm", res = 300)
# plot(wiek_kier) # Rys. 12. in chapter 5.1.
# dev.off()
|
## Download Zip file to your desire working directory
filesPath <- "F:/Data Science/GettingandCleaningData/Course Project - Getting and Cleaning Data"
setwd(filesPath)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
# Unzip file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Load Data
test_x <- read.table("X_test.txt")
test_y <- read.table("y_test.txt")
train_x <- read.table("X_train.txt")
train_y <- read.table("y_train.txt")
subject_train <- read.table("subject_train.txt")
subject_test <- read.table("subject_test.txt")
activity_labels <- read.table("activity_labels.txt")
features <- read.table("features.txt")
## Analysis
# 1. Merges the training and the test sets to create one data set.
# 1.1 Add column names
names(train_x) <- features$V2
names(test_x) <- features$V2
# 1.2 Merge two sets together
dataset <- rbind(train_x, test_x)
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
mean_std <- grepl("mean\\(\\)", names(dataset)) | grepl("std\\(\\)", names(dataset))
dataset <- dataset[, mean_std]
# 3. Uses descriptive activity names to name the activities in the data set
# 3.1 Add activity labels
dataset_y <- rbind(train_y, test_y)
dataset <- data.table(activity = dataset_y$V1, dataset)
# 3.2 Add subject
subject <- rbind(subject_train, subject_test)
dataset <- data.table(subjectID = subject$V1, dataset)
# 4. Appropriately labels the data set with descriptive variable names
# Replace numbers in activity with descriptive variable names
library(plyr)
dataset$activity <- mapvalues(dataset$activity, from = c(1:6),
to = c("walking", "walking upstairs", "walking downstairs",
"sitting", "standing", "laying"))
# 5. From the data set in step 4, creates a second, independent tidy data set with
# melt data
melt <- melt(dataset, id = c("subjectID", "activity"))
# reshape data
tidydata <- dcast(melt, subjectID + activity ~ variable, mean)
# Create txt file with tidy dataset
write.table(tidydata, file = "tidydata.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
ellazhoujx/CourseProject-GettingandCleaningData
|
R
| false | false | 2,344 |
r
|
## Download Zip file to your desire working directory
filesPath <- "F:/Data Science/GettingandCleaningData/Course Project - Getting and Cleaning Data"
setwd(filesPath)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
# Unzip file
unzip(zipfile="./data/Dataset.zip",exdir="./data")
## Load Data
test_x <- read.table("X_test.txt")
test_y <- read.table("y_test.txt")
train_x <- read.table("X_train.txt")
train_y <- read.table("y_train.txt")
subject_train <- read.table("subject_train.txt")
subject_test <- read.table("subject_test.txt")
activity_labels <- read.table("activity_labels.txt")
features <- read.table("features.txt")
## Analysis
# 1. Merges the training and the test sets to create one data set.
# 1.1 Add column names
names(train_x) <- features$V2
names(test_x) <- features$V2
# 1.2 Merge two sets together
dataset <- rbind(train_x, test_x)
# 2. Extracts only the measurements on the mean and standard deviation
# for each measurement.
mean_std <- grepl("mean\\(\\)", names(dataset)) | grepl("std\\(\\)", names(dataset))
dataset <- dataset[, mean_std]
# 3. Uses descriptive activity names to name the activities in the data set
# 3.1 Add activity labels
dataset_y <- rbind(train_y, test_y)
dataset <- data.table(activity = dataset_y$V1, dataset)
# 3.2 Add subject
subject <- rbind(subject_train, subject_test)
dataset <- data.table(subjectID = subject$V1, dataset)
# 4. Appropriately labels the data set with descriptive variable names
# Replace numbers in activity with descriptive variable names
library(plyr)
dataset$activity <- mapvalues(dataset$activity, from = c(1:6),
to = c("walking", "walking upstairs", "walking downstairs",
"sitting", "standing", "laying"))
# 5. From the data set in step 4, creates a second, independent tidy data set with
# melt data
melt <- melt(dataset, id = c("subjectID", "activity"))
# reshape data
tidydata <- dcast(melt, subjectID + activity ~ variable, mean)
# Create txt file with tidy dataset
write.table(tidydata, file = "tidydata.txt", row.names = FALSE)
|
##########################################################
## Parameters for the simulation, SIS without mortality ##
##########################################################
nt <- 1e5
num_points <- 600
rptfreq <- max(nt / num_points, 1)
num_runs <- 5
deterministic <- FALSE ## run the model via reaction-diffusion deterministic?
## Need to cover a range of: (over a number of replicate runs)
## mutation frequency
## mutation standard deviation
## starting mortality rate
## starting efficiency
## efficiency scale
## Also need to consider running across different shapes of the tradeoff curve
## power_c
## power_exp
## Set up parameters in a manner to correspond to output data frame
params <- expand.grid(
nt = nt
, rptfreq = rptfreq
, deterministic = deterministic
## ** !! key parameters.
## (TRUE, --, --) for tuning
## (FALSE, TRUE, --) for just tradeoff curve
## (FALSE, FALSE, FALSE) for _no_ parastie beta setback with evo change in alpha
## (FALSE, FALSE, TRUE) for parastie beta setback with evo change in alpha
## for nearly neutral see Ben's results and use Ben's script
, parasite_tuning = TRUE
, tradeoff_only = FALSE
, agg_eff_adjust = FALSE
, mut_var = "beta"
## Need to convert this to a rate of diffusion if deterministic == TRUE
, mu = if (deterministic == FALSE) {
c(0.01)
} else {
0.025
}
, mut_mean = 0
, mut_sd = 0.15
## If deterministic == TRUE, start with a whole array of possible strain values for virulence (done internally)
## Ignored if not
, alpha0 = 0.01
, tune0 = 0.03
## min recovery rate driven by the host. For SIS without host mortality can't allow parasites to drop recovery to 0 without
## any other background mortality or recovery or hosts will evolve to either 0 recovery or max recovery depending on whether
## the tradeoff gamma is > or < 1
, gamma0 = 0.2
, run = seq(1, num_runs, by = 1)
, power_c = if (deterministic == FALSE) {
0.01
} else {
10
}
, power_exp = 3
, N = 400
, eff_scale = 30
)
params <- transform(params
, param_num = seq(1, nrow(params))
, seed = sample(1:1e5, nrow(params), replace = FALSE))
## Also run AD version?
AD_also <- FALSE
|
/parameter_setup_nm.R
|
no_license
|
morgankain/Stochastic_Virulence_Evolution
|
R
| false | false | 2,420 |
r
|
##########################################################
## Parameters for the simulation, SIS without mortality ##
##########################################################
nt <- 1e5
num_points <- 600
rptfreq <- max(nt / num_points, 1)
num_runs <- 5
deterministic <- FALSE ## run the model via reaction-diffusion deterministic?
## Need to cover a range of: (over a number of replicate runs)
## mutation frequency
## mutation standard deviation
## starting mortality rate
## starting efficiency
## efficiency scale
## Also need to consider running across different shapes of the tradeoff curve
## power_c
## power_exp
## Set up parameters in a manner to correspond to output data frame
params <- expand.grid(
nt = nt
, rptfreq = rptfreq
, deterministic = deterministic
## ** !! key parameters.
## (TRUE, --, --) for tuning
## (FALSE, TRUE, --) for just tradeoff curve
## (FALSE, FALSE, FALSE) for _no_ parastie beta setback with evo change in alpha
## (FALSE, FALSE, TRUE) for parastie beta setback with evo change in alpha
## for nearly neutral see Ben's results and use Ben's script
, parasite_tuning = TRUE
, tradeoff_only = FALSE
, agg_eff_adjust = FALSE
, mut_var = "beta"
## Need to convert this to a rate of diffusion if deterministic == TRUE
, mu = if (deterministic == FALSE) {
c(0.01)
} else {
0.025
}
, mut_mean = 0
, mut_sd = 0.15
## If deterministic == TRUE, start with a whole array of possible strain values for virulence (done internally)
## Ignored if not
, alpha0 = 0.01
, tune0 = 0.03
## min recovery rate driven by the host. For SIS without host mortality can't allow parasites to drop recovery to 0 without
## any other background mortality or recovery or hosts will evolve to either 0 recovery or max recovery depending on whether
## the tradeoff gamma is > or < 1
, gamma0 = 0.2
, run = seq(1, num_runs, by = 1)
, power_c = if (deterministic == FALSE) {
0.01
} else {
10
}
, power_exp = 3
, N = 400
, eff_scale = 30
)
params <- transform(params
, param_num = seq(1, nrow(params))
, seed = sample(1:1e5, nrow(params), replace = FALSE))
## Also run AD version?
AD_also <- FALSE
|
= class RedEye
--- RedEye#__alloc__
--- RedEye.new(GdkPixbuf* pixbuf, Integer minX, Integer minY, Integer maxX, Integer maxY)
--- RedEye#identify_blobs(double green_sensitivity, double blue_sensitivity, Integer min_red_val)
--- RedEye#correct_blob(Integer blob_id)
--- RedEye#highlight_blob(Integer blob_id, Integer col)
--- RedEye#preview_blob(Integer blob_id, Integer col, gboolean reset_preview)
--- RedEye#preview
--- RedEye#pixbuf
== enum RedEye::Region
|
/ext/redeye/redeye.rd
|
no_license
|
geoffyoungs/redeye
|
R
| false | false | 472 |
rd
|
= class RedEye
--- RedEye#__alloc__
--- RedEye.new(GdkPixbuf* pixbuf, Integer minX, Integer minY, Integer maxX, Integer maxY)
--- RedEye#identify_blobs(double green_sensitivity, double blue_sensitivity, Integer min_red_val)
--- RedEye#correct_blob(Integer blob_id)
--- RedEye#highlight_blob(Integer blob_id, Integer col)
--- RedEye#preview_blob(Integer blob_id, Integer col, gboolean reset_preview)
--- RedEye#preview
--- RedEye#pixbuf
== enum RedEye::Region
|
### Project: Polio Mixing Experiment for Bangladesh Sample Sequencing
### Purpose: Empirically derive the optimal filtering criteria for variant calling
library(tidyverse)
library(shiny)
library(ggplot2)
library(pROC)
library(wesanderson)
library(gtable)
library(gridExtra)
library(plyr)
library(reshape2)
library(showtext)
source("./supportFunctions.R")
ui <- fluidPage(
titlePanel("Benchmarking Experiment for Poliovirus Variant Calling v2.0"),
tags$head(tags$style('body {font-family: Verdana;}')),
fluidRow(
column(6, h4("Quality Distribution"), plotOutput("QualDist")),
column(6, h4("Distribution on Read"), plotOutput("ReadDist")),
column(6, h4("Frequency Distribution"), plotOutput("FreqDist")),
column(6, h4("Variant Frequency: Expected vs. Observed"), plotOutput("ObservedExpected")),
column(6, h4("Coverage"), plotOutput("Coverage")),
column(6, h4("Output Table"), tableOutput('table')),
column(6, h4("Expected Variants by Position"), plotOutput("Variants")),
column(6, h4("False Positives by Position"), plotOutput("FalseVariants")),
column(6, h4("True Variant Frequency by Position"), plotOutput("FreqByPosition"))
),
hr(),
fluidRow(
column(3,
sliderInput(inputId = "MapQ",
label = "Mean MapQ Cutoff",
min = 20, max = 44, value = 30),
sliderInput(inputId = "Phred",
label = "Mean Phred Cutoff",
min = 20, max = 39, value = 35),
sliderInput(inputId = "freq.var",
label = "Frequency Cutoff",
min = 0, max = 0.1, value = 0)
),
column(3,
sliderInput("pos",
label="Read Position Cutoff",
min = 0, max = 250, value = c(0, 250)),
sliderInput(inputId = "p.val",
label = "p-value Cutoff",
min = 0, max = 0.9, value = 0.9)
),
column(3,
radioButtons("dups",
label = "Sequencing Duplicates",
choices = list("Variants found in both replicates" = "collapsed",
"Using only the first replicate" = "first",
"Using only the second replicate" = "second"),
selected = "collapsed"),
radioButtons("disp",
label = "deepSNV Dispersion Model",
choices = list("Binomial" = "binomial",
"Betabinominal one-sided" = "onesided"),
#"Betabinominal two-sided" = "twosided"),
selected = "onesided"),
radioButtons("tna",
label = "Spiked into stool total nucleic acid",
choices = list("Spike-in" = "yes",
"No spike-in" = "no"),
selected = "yes")
),
column(3,
radioButtons("inputLevel",
label = "Genome Copy Input",
choices = list("4x10^4" = "4_E4", "9x10^3" = "9_E3", "9x10^2" = "9_E2", "9x10^1" = "9_E1"),
selected = "4_E4"),
radioButtons("subset",
label = "Subset of Reads",
choices = list("100%" = "100",
"50%" = "50",
"25%" = "25",
"10%" = "10"),
selected = "100"),
radioButtons("exSeg2",
label = "Exclude Segment 2?",
choices = list("Yes" = "yes",
"No" = "no"),
selected = "no")
)
)
)
server <- function(input, output)
{
dataInput <- reactive({
replicate = ifelse(input$dups == "first" | input$dups == "second", "notcollapsed", "collapsed")
file <- paste0("./data/processed/shiny2.", input$disp, ".", input$subset, ".", replicate, ".variants.csv")
variants_raw <- read_csv(file)
if(input$dups == "first") {
data <- filter(variants_raw, Rep == 1)
} else if(input$dups == "second") {
data <- filter(variants_raw, Rep == 2)
} else {
data <- variants_raw
}
data <- filter(data, InputLevel == input$inputLevel, inTNA == input$tna)
data <- mutate(data, SampleNumber = Id)
primer_fwd_outer <- 95 # 95 to 115
primer_rev_outer <- 7440 # 7415 to 7440
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
data <- mutate(data, exp.freq = PercWT/100)
data <- mutate(data, Id = as.factor(as.character(PercWT)))
data <- filter(data, pos > primer_fwd_outer & pos < primer_rev_outer)
if(input$exSeg2 == "yes")
{
data <- filter(data, pos < 1500 | pos > 4100)
} else {}
range_factor <- 5 # this filter prevents there from being observed true positives that are due to lack of Sabin1 amplification in low copy number samples
#data <- mutate(data, category = ifelse(freq.var > exp.freq*range_factor | freq.var < exp.freq*(1/range_factor), FALSE, category))
return(data)
})
covInput <- reactive({
file <- paste0("./data/processed/shiny2.", input$disp, ".", input$subset, ".coverage.csv")
coverage_raw <- read.csv(file)
return(coverage_raw)
})
# Plot MapQ and Phred, color-coded by TP/FP
output$QualDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = Phred, y = MapQ, color = category)) + geom_point() + xlab("Phred") + ylab("MapQ") + theme_minimal() + scale_color_manual(values = palette[c(4,5)]) + ylim(c(30,44)) + xlim(c(35,39)) + geom_vline(xintercept = input$Phred, linetype = "dotted", color = "black", size = 1) + geom_hline(yintercept = input$MapQ, linetype = "dotted", color = "black", size = 1)
})
# Plot average read position, color-coded by TP/FP
output$ReadDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = Read_pos, fill = category)) + geom_histogram(position = "dodge") + xlab("Read Position") + ylab("Count") + scale_fill_manual(values = palette[c(4,5)]) + theme_minimal() + geom_vline(xintercept = input$pos, linetype = "dotted", color = "black", size = 1)
})
# Plot frequency histogram, color-coded by TP/FP
output$FreqDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = freq.var, fill = category)) + geom_histogram(binwidth = 0.001, position = "dodge") + xlab("Frequency") + ylab("Count") + scale_fill_manual(values = palette[c(4,5)]) + theme_minimal() + geom_vline(xintercept = input$freq.var, linetype = "dotted", color = "black", size = 1) + xlim(c(0, 0.1))
})
# Plot observed vs. expected frequency
output$ObservedExpected <- renderPlot({
data <- dataInput()
data <- mutate(data, exp.freq = PercWT/100)
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = exp.freq, y = freq.var, color = category)) +
geom_point(size = 1) +
scale_color_manual(values = palette[c(4,5)]) +
theme_minimal() +
geom_abline(intercept = 0, slope = 1,linetype = 2, size = 1) +
xlab("Expected Frequency") +
ylab("Observed Frequency") +
scale_y_log10(limits=c(0.001,1),breaks=c(0.001,0.002,0.005,0.01,0.02,0.05,0.1, 0.25, 0.5, 1)) +
scale_x_log10(limits=c(0.001,1),breaks=c(0.001,0.002,0.005,0.01,0.02,0.05,0.1, 0.25, 0.5, 1))
})
# Plot coverage for the given samples
output$Coverage <- renderPlot({
data <- dataInput()
coverage_raw <- covInput()
coverage <- filter(coverage_raw, Id %in% data$SampleNumber)
base = 12000
interval = 300
linesize = 1.2
ggplot(coverage, aes(x = chr.pos, y = coverage, group = Id)) + geom_line(aes(color = Id)) +
theme_bw() +
xlab("Genome Position") +
ylab("Coverage (Read Depth)") +
theme(legend.position = "none") +
xlim(c(95, 7440)) +
geom_segment(data = coverage, size = linesize, color = "orange1", aes(x = 98, y = base + interval*0, xend = 2436, yend = base + interval*0)) +
geom_segment(data = coverage, size = linesize, color = "mediumseagreen", aes(x = 1472, y = base + interval*1, xend = 4151, yend = base + interval*1)) +
geom_segment(data = coverage, size = linesize, color = "indianred4", aes(x = 3214, y = base + interval*2, xend = 5861, yend = base + interval*2)) +
geom_segment(data = coverage, size = linesize, color = "slateblue3", aes(x = 4966, y = base + interval*3, xend = 7400, yend = base + interval*3)) +
geom_abline(intercept = 200, slope = 0, linetype = 3, size = 0.5, color = "black") +
geom_abline(intercept = 1000, slope = 0, linetype = 3, size = 0.5, color = "black")
})
# Plot position of true positives and false negatives
output$Variants <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
expectedVariants <- read_csv("./data/reference/MixingStudyExpectedVariants_ByLevel.csv")
expectedVariants <- mutate(expectedVariants, mutation_level = paste0(mutation, "_", level))
expectedVariants <- mutate(expectedVariants, found = ifelse(mutation_level %in% data$mutation_level, "True Positive", "False Negative"))
if(input$exSeg2 == "yes")
{
expectedVariants <- filter(expectedVariants, Position < 1500 | Position > 4100)
} else {}
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
expectedVariants$level <- factor(expectedVariants$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(expectedVariants$level))
ggplot(expectedVariants, aes(x = Position, y = level)) +
geom_point(aes(color = found), size = 5, shape = 108) +
geom_segment(data = chrs, aes(x = start, y = level, xend = stop, yend = level)) +
ylab("Expected Frequency Group") +
xlab("") +
theme_minimal() +
scale_color_manual(name = "", values = palette[c(1,2)]) +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
scale_x_continuous(breaks = c()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) + theme(legend.position = "bottom") +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500))
})
# Plot position of false positives
output$FalseVariants <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
data <- filter(data, category == FALSE & !is.na(level))
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
data$level <- factor(data$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(data$level))
ggplot(data, aes(x = pos, y = level)) +
geom_point(aes(), size = 5, shape = 108, color = "violet") +
geom_segment(data = chrs, aes(x = start, y = level, xend = stop, yend = level)) +
ylab("Expected Frequency Group") +
xlab("") +
theme_minimal() +
scale_color_manual(name = "", values = palette[c(1,2)]) +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
scale_x_continuous(breaks = c()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) + theme(legend.position = "bottom") +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500))
})
# Plot frequency of true positive variants against position, color by expected frequency
output$FreqByPosition <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
data <- filter(data, category == TRUE & !is.na(level))
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
data$level <- factor(data$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(data$level))
base = 0.6
interval = 0.02
linesize = 1.2
ggplot(data) +
geom_point(aes(x = pos, y = freq.var, color = level)) +
ylab("Frequency") +
xlab("Genome Position") +
theme_minimal() +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) +
theme(legend.position = "right") +
ylim(c(0, 0.7)) +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500)) +
scale_color_manual(values = palette, name = "Expected Frequency") +
geom_abline(intercept = 0.1, slope = 0, linetype = 3, size = 0.5, color = palette[4]) +
geom_abline(intercept = 0.05, slope = 0, linetype = 3, size = 0.5, color = palette[3]) +
geom_abline(intercept = 0.02, slope = 0, linetype = 3, size = 0.5, color = palette[2]) +
geom_abline(intercept = 0.01, slope = 0, linetype = 3, size = 0.5, color = palette[1]) +
geom_segment(data = data, size = linesize, color = "orange1", aes(x = 98, y = base + interval*0, xend = 2436, yend = base + interval*0)) +
geom_segment(data = data, size = linesize, color = "mediumseagreen", aes(x = 1472, y = base + interval*1, xend = 4151, yend = base + interval*1)) +
geom_segment(data = data, size = linesize, color = "indianred4", aes(x = 3214, y = base + interval*2, xend = 5861, yend = base + interval*2)) +
geom_segment(data = data, size = linesize, color = "slateblue3", aes(x = 4966, y = base + interval*3, xend = 7400, yend = base + interval*3))
})
# Make the table
output$table <- renderTable({
data <- dataInput()
primer_fwd_outer <- 95 # 95 to 115
primer_rev_outer <- 7440 # 7415 to 7440
expectedVariants <- read_csv("./data/reference/MixingStudyExpectedVariants.csv")
#expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
#possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
if(input$exSeg2 == "yes")
{
expectedVariants <- filter(expectedVariants, Position < 1500 | Position > 4100)
expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1 - (4100-1500))*3 # Positions in primer range, times 3.
} else {
expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
}
filtered_data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
dd = 4
m.roc.table <- miseq.roc.table(filtered_data, 1, expectedTruePos, possible_vars, ">")
m.roc.table <- rename(m.roc.table, c("exp.freq"="Frequency","adj.sensitivity"="Sensitivity","TP"="True\nPositives","adj.specificity"="Specificity","FP"="False\nPositives"))
m.roc.table$Frequency <- c("100%", "10%", "5%", "2%", "1%")
m.roc.table$Sensitivity <- round(m.roc.table$Sensitivity, digits = dd)
m.roc.table$Specificity <- round(m.roc.table$Specificity, digits = dd)
m.roc.table
}, digits = 4)
}
shinyApp(ui = ui, server = server)
|
/app.R
|
permissive
|
andrewvalesano/PolioBenchmarking_Shiny
|
R
| false | false | 18,089 |
r
|
### Project: Polio Mixing Experiment for Bangladesh Sample Sequencing
### Purpose: Empirically derive the optimal filtering criteria for variant calling
library(tidyverse)
library(shiny)
library(ggplot2)
library(pROC)
library(wesanderson)
library(gtable)
library(gridExtra)
library(plyr)
library(reshape2)
library(showtext)
source("./supportFunctions.R")
ui <- fluidPage(
titlePanel("Benchmarking Experiment for Poliovirus Variant Calling v2.0"),
tags$head(tags$style('body {font-family: Verdana;}')),
fluidRow(
column(6, h4("Quality Distribution"), plotOutput("QualDist")),
column(6, h4("Distribution on Read"), plotOutput("ReadDist")),
column(6, h4("Frequency Distribution"), plotOutput("FreqDist")),
column(6, h4("Variant Frequency: Expected vs. Observed"), plotOutput("ObservedExpected")),
column(6, h4("Coverage"), plotOutput("Coverage")),
column(6, h4("Output Table"), tableOutput('table')),
column(6, h4("Expected Variants by Position"), plotOutput("Variants")),
column(6, h4("False Positives by Position"), plotOutput("FalseVariants")),
column(6, h4("True Variant Frequency by Position"), plotOutput("FreqByPosition"))
),
hr(),
fluidRow(
column(3,
sliderInput(inputId = "MapQ",
label = "Mean MapQ Cutoff",
min = 20, max = 44, value = 30),
sliderInput(inputId = "Phred",
label = "Mean Phred Cutoff",
min = 20, max = 39, value = 35),
sliderInput(inputId = "freq.var",
label = "Frequency Cutoff",
min = 0, max = 0.1, value = 0)
),
column(3,
sliderInput("pos",
label="Read Position Cutoff",
min = 0, max = 250, value = c(0, 250)),
sliderInput(inputId = "p.val",
label = "p-value Cutoff",
min = 0, max = 0.9, value = 0.9)
),
column(3,
radioButtons("dups",
label = "Sequencing Duplicates",
choices = list("Variants found in both replicates" = "collapsed",
"Using only the first replicate" = "first",
"Using only the second replicate" = "second"),
selected = "collapsed"),
radioButtons("disp",
label = "deepSNV Dispersion Model",
choices = list("Binomial" = "binomial",
"Betabinominal one-sided" = "onesided"),
#"Betabinominal two-sided" = "twosided"),
selected = "onesided"),
radioButtons("tna",
label = "Spiked into stool total nucleic acid",
choices = list("Spike-in" = "yes",
"No spike-in" = "no"),
selected = "yes")
),
column(3,
radioButtons("inputLevel",
label = "Genome Copy Input",
choices = list("4x10^4" = "4_E4", "9x10^3" = "9_E3", "9x10^2" = "9_E2", "9x10^1" = "9_E1"),
selected = "4_E4"),
radioButtons("subset",
label = "Subset of Reads",
choices = list("100%" = "100",
"50%" = "50",
"25%" = "25",
"10%" = "10"),
selected = "100"),
radioButtons("exSeg2",
label = "Exclude Segment 2?",
choices = list("Yes" = "yes",
"No" = "no"),
selected = "no")
)
)
)
server <- function(input, output)
{
dataInput <- reactive({
replicate = ifelse(input$dups == "first" | input$dups == "second", "notcollapsed", "collapsed")
file <- paste0("./data/processed/shiny2.", input$disp, ".", input$subset, ".", replicate, ".variants.csv")
variants_raw <- read_csv(file)
if(input$dups == "first") {
data <- filter(variants_raw, Rep == 1)
} else if(input$dups == "second") {
data <- filter(variants_raw, Rep == 2)
} else {
data <- variants_raw
}
data <- filter(data, InputLevel == input$inputLevel, inTNA == input$tna)
data <- mutate(data, SampleNumber = Id)
primer_fwd_outer <- 95 # 95 to 115
primer_rev_outer <- 7440 # 7415 to 7440
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
data <- mutate(data, exp.freq = PercWT/100)
data <- mutate(data, Id = as.factor(as.character(PercWT)))
data <- filter(data, pos > primer_fwd_outer & pos < primer_rev_outer)
if(input$exSeg2 == "yes")
{
data <- filter(data, pos < 1500 | pos > 4100)
} else {}
range_factor <- 5 # this filter prevents there from being observed true positives that are due to lack of Sabin1 amplification in low copy number samples
#data <- mutate(data, category = ifelse(freq.var > exp.freq*range_factor | freq.var < exp.freq*(1/range_factor), FALSE, category))
return(data)
})
covInput <- reactive({
file <- paste0("./data/processed/shiny2.", input$disp, ".", input$subset, ".coverage.csv")
coverage_raw <- read.csv(file)
return(coverage_raw)
})
# Plot MapQ and Phred, color-coded by TP/FP
output$QualDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = Phred, y = MapQ, color = category)) + geom_point() + xlab("Phred") + ylab("MapQ") + theme_minimal() + scale_color_manual(values = palette[c(4,5)]) + ylim(c(30,44)) + xlim(c(35,39)) + geom_vline(xintercept = input$Phred, linetype = "dotted", color = "black", size = 1) + geom_hline(yintercept = input$MapQ, linetype = "dotted", color = "black", size = 1)
})
# Plot average read position, color-coded by TP/FP
output$ReadDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = Read_pos, fill = category)) + geom_histogram(position = "dodge") + xlab("Read Position") + ylab("Count") + scale_fill_manual(values = palette[c(4,5)]) + theme_minimal() + geom_vline(xintercept = input$pos, linetype = "dotted", color = "black", size = 1)
})
# Plot frequency histogram, color-coded by TP/FP
output$FreqDist <- renderPlot({
data <- dataInput()
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = freq.var, fill = category)) + geom_histogram(binwidth = 0.001, position = "dodge") + xlab("Frequency") + ylab("Count") + scale_fill_manual(values = palette[c(4,5)]) + theme_minimal() + geom_vline(xintercept = input$freq.var, linetype = "dotted", color = "black", size = 1) + xlim(c(0, 0.1))
})
# Plot observed vs. expected frequency
output$ObservedExpected <- renderPlot({
data <- dataInput()
data <- mutate(data, exp.freq = PercWT/100)
palette <- wes_palette("Darjeeling1")
ggplot(data, aes(x = exp.freq, y = freq.var, color = category)) +
geom_point(size = 1) +
scale_color_manual(values = palette[c(4,5)]) +
theme_minimal() +
geom_abline(intercept = 0, slope = 1,linetype = 2, size = 1) +
xlab("Expected Frequency") +
ylab("Observed Frequency") +
scale_y_log10(limits=c(0.001,1),breaks=c(0.001,0.002,0.005,0.01,0.02,0.05,0.1, 0.25, 0.5, 1)) +
scale_x_log10(limits=c(0.001,1),breaks=c(0.001,0.002,0.005,0.01,0.02,0.05,0.1, 0.25, 0.5, 1))
})
# Plot coverage for the given samples
output$Coverage <- renderPlot({
data <- dataInput()
coverage_raw <- covInput()
coverage <- filter(coverage_raw, Id %in% data$SampleNumber)
base = 12000
interval = 300
linesize = 1.2
ggplot(coverage, aes(x = chr.pos, y = coverage, group = Id)) + geom_line(aes(color = Id)) +
theme_bw() +
xlab("Genome Position") +
ylab("Coverage (Read Depth)") +
theme(legend.position = "none") +
xlim(c(95, 7440)) +
geom_segment(data = coverage, size = linesize, color = "orange1", aes(x = 98, y = base + interval*0, xend = 2436, yend = base + interval*0)) +
geom_segment(data = coverage, size = linesize, color = "mediumseagreen", aes(x = 1472, y = base + interval*1, xend = 4151, yend = base + interval*1)) +
geom_segment(data = coverage, size = linesize, color = "indianred4", aes(x = 3214, y = base + interval*2, xend = 5861, yend = base + interval*2)) +
geom_segment(data = coverage, size = linesize, color = "slateblue3", aes(x = 4966, y = base + interval*3, xend = 7400, yend = base + interval*3)) +
geom_abline(intercept = 200, slope = 0, linetype = 3, size = 0.5, color = "black") +
geom_abline(intercept = 1000, slope = 0, linetype = 3, size = 0.5, color = "black")
})
# Plot position of true positives and false negatives
output$Variants <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
expectedVariants <- read_csv("./data/reference/MixingStudyExpectedVariants_ByLevel.csv")
expectedVariants <- mutate(expectedVariants, mutation_level = paste0(mutation, "_", level))
expectedVariants <- mutate(expectedVariants, found = ifelse(mutation_level %in% data$mutation_level, "True Positive", "False Negative"))
if(input$exSeg2 == "yes")
{
expectedVariants <- filter(expectedVariants, Position < 1500 | Position > 4100)
} else {}
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
expectedVariants$level <- factor(expectedVariants$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(expectedVariants$level))
ggplot(expectedVariants, aes(x = Position, y = level)) +
geom_point(aes(color = found), size = 5, shape = 108) +
geom_segment(data = chrs, aes(x = start, y = level, xend = stop, yend = level)) +
ylab("Expected Frequency Group") +
xlab("") +
theme_minimal() +
scale_color_manual(name = "", values = palette[c(1,2)]) +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
scale_x_continuous(breaks = c()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) + theme(legend.position = "bottom") +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500))
})
# Plot position of false positives
output$FalseVariants <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
data <- filter(data, category == FALSE & !is.na(level))
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
data$level <- factor(data$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(data$level))
ggplot(data, aes(x = pos, y = level)) +
geom_point(aes(), size = 5, shape = 108, color = "violet") +
geom_segment(data = chrs, aes(x = start, y = level, xend = stop, yend = level)) +
ylab("Expected Frequency Group") +
xlab("") +
theme_minimal() +
scale_color_manual(name = "", values = palette[c(1,2)]) +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
scale_x_continuous(breaks = c()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) + theme(legend.position = "bottom") +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500))
})
# Plot frequency of true positive variants against position, color by expected frequency
output$FreqByPosition <- renderPlot({
data <- dataInput()
data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
data <- mutate(data, level = ifelse(PercWT == 1, "1_percent",
ifelse(PercWT == 2, "2_percent",
ifelse(PercWT == 5, "5_percent",
ifelse(PercWT == 10, "10_percent",
ifelse(PercWT == 100, "100_percent", NA))))))
data <- mutate(data, mutation_level = paste0(mutation, "_", level))
data <- filter(data, category == TRUE & !is.na(level))
levels <- c("100_percent", "10_percent", "5_percent", "2_percent", "1_percent")
chrs <- data.frame("level" = levels)
chrs <- mutate(chrs, start = 0, stop = 7440)
palette <- wes_palette("Darjeeling1")
data$level <- factor(data$level, levels = rev(c("100_percent","10_percent","5_percent","2_percent","1_percent")))
chrs$level <- factor(chrs$level, levels = levels(data$level))
base = 0.6
interval = 0.02
linesize = 1.2
ggplot(data) +
geom_point(aes(x = pos, y = freq.var, color = level)) +
ylab("Frequency") +
xlab("Genome Position") +
theme_minimal() +
theme(axis.line.x = element_blank(), axis.line.y = element_blank()) +
theme(panel.grid.major = element_line(colour = "gray96"), panel.grid.minor = element_line(colour = "white")) +
theme(legend.position = "right") +
ylim(c(0, 0.7)) +
scale_x_continuous(breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 7500)) +
scale_color_manual(values = palette, name = "Expected Frequency") +
geom_abline(intercept = 0.1, slope = 0, linetype = 3, size = 0.5, color = palette[4]) +
geom_abline(intercept = 0.05, slope = 0, linetype = 3, size = 0.5, color = palette[3]) +
geom_abline(intercept = 0.02, slope = 0, linetype = 3, size = 0.5, color = palette[2]) +
geom_abline(intercept = 0.01, slope = 0, linetype = 3, size = 0.5, color = palette[1]) +
geom_segment(data = data, size = linesize, color = "orange1", aes(x = 98, y = base + interval*0, xend = 2436, yend = base + interval*0)) +
geom_segment(data = data, size = linesize, color = "mediumseagreen", aes(x = 1472, y = base + interval*1, xend = 4151, yend = base + interval*1)) +
geom_segment(data = data, size = linesize, color = "indianred4", aes(x = 3214, y = base + interval*2, xend = 5861, yend = base + interval*2)) +
geom_segment(data = data, size = linesize, color = "slateblue3", aes(x = 4966, y = base + interval*3, xend = 7400, yend = base + interval*3))
})
# Make the table
output$table <- renderTable({
data <- dataInput()
primer_fwd_outer <- 95 # 95 to 115
primer_rev_outer <- 7440 # 7415 to 7440
expectedVariants <- read_csv("./data/reference/MixingStudyExpectedVariants.csv")
#expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
#possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
if(input$exSeg2 == "yes")
{
expectedVariants <- filter(expectedVariants, Position < 1500 | Position > 4100)
expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1 - (4100-1500))*3 # Positions in primer range, times 3.
} else {
expectedTruePos <- nrow(expectedVariants) # Expect to see 66 variants.
possible_vars <- (primer_rev_outer - primer_fwd_outer - 1)*3 # Positions in primer range, times 3.
}
filtered_data <- filter(data, p.val < input$p.val, MapQ > input$MapQ & Phred > input$Phred & freq.var > input$freq.var & Read_pos <= input$pos[2] & Read_pos >= input$pos[1])
dd = 4
m.roc.table <- miseq.roc.table(filtered_data, 1, expectedTruePos, possible_vars, ">")
m.roc.table <- rename(m.roc.table, c("exp.freq"="Frequency","adj.sensitivity"="Sensitivity","TP"="True\nPositives","adj.specificity"="Specificity","FP"="False\nPositives"))
m.roc.table$Frequency <- c("100%", "10%", "5%", "2%", "1%")
m.roc.table$Sensitivity <- round(m.roc.table$Sensitivity, digits = dd)
m.roc.table$Specificity <- round(m.roc.table$Specificity, digits = dd)
m.roc.table
}, digits = 4)
}
shinyApp(ui = ui, server = server)
|
context("Bootstrap Diagnosands")
test_that("test diagnosands", {
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <-
declare_potential_outcomes(Y_Z_0 = noise,
Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_assignment <- declare_assignment(m = 25)
pate <- declare_estimand(mean(Y_Z_1 - Y_Z_0), label = "pate")
pate_estimator1 <- declare_estimator(Y ~ Z, estimand = pate, label = "test1")
pate_estimator2 <- declare_estimator(Y ~ Z - 1, estimand = pate, label = "test2")
reveal <- declare_reveal()
my_design <- declare_design(my_population(),
my_potential_outcomes, pate,
my_assignment,
reveal,
pate_estimator1,
pate_estimator2)
# default set
diagnosis <- diagnose_design(my_design, sims = 2, bootstrap = 2)
expect_equal(dim(diagnosis$diagnosands), c(2,19))
expect_equal(dim(diagnosis$simulations), c(4,10))
})
|
/tests/testthat/test-bootstrap-diagnosands.R
|
no_license
|
antoshachekhonte/DeclareDesign
|
R
| false | false | 1,064 |
r
|
context("Bootstrap Diagnosands")
test_that("test diagnosands", {
my_population <- declare_population(N = 50, noise = rnorm(N))
my_potential_outcomes <-
declare_potential_outcomes(Y_Z_0 = noise,
Y_Z_1 = noise + rnorm(N, mean = 2, sd = 2))
my_assignment <- declare_assignment(m = 25)
pate <- declare_estimand(mean(Y_Z_1 - Y_Z_0), label = "pate")
pate_estimator1 <- declare_estimator(Y ~ Z, estimand = pate, label = "test1")
pate_estimator2 <- declare_estimator(Y ~ Z - 1, estimand = pate, label = "test2")
reveal <- declare_reveal()
my_design <- declare_design(my_population(),
my_potential_outcomes, pate,
my_assignment,
reveal,
pate_estimator1,
pate_estimator2)
# default set
diagnosis <- diagnose_design(my_design, sims = 2, bootstrap = 2)
expect_equal(dim(diagnosis$diagnosands), c(2,19))
expect_equal(dim(diagnosis$simulations), c(4,10))
})
|
question_rating <- function(data){
flat_res <- data$result$problem
graph_data <- flat_res %>%
group_by(rating) %>%
summarise(count = n()) %>%
mutate(percent = round(count/sum(count)*100, digits = 2))
graph <- plot_ly(
graph_data,
x = ~rating,
y = ~count,
color = ~rating,
type = 'bar',
text = ~paste('</br> Count: ',count,
'</br> Rating: ',rating,
'</br> Percent :',percent),
hoverinfo = 'text'
) %>%
layout(
showlegend = FALSE
# plot_bgcolor='transparent',
# paper_bgcolor='transparent'
)
graph
return(graph)
}
|
/app/analytics/question_rating.R
|
no_license
|
codesparsh/Codeforces-Visualizer
|
R
| false | false | 656 |
r
|
question_rating <- function(data){
flat_res <- data$result$problem
graph_data <- flat_res %>%
group_by(rating) %>%
summarise(count = n()) %>%
mutate(percent = round(count/sum(count)*100, digits = 2))
graph <- plot_ly(
graph_data,
x = ~rating,
y = ~count,
color = ~rating,
type = 'bar',
text = ~paste('</br> Count: ',count,
'</br> Rating: ',rating,
'</br> Percent :',percent),
hoverinfo = 'text'
) %>%
layout(
showlegend = FALSE
# plot_bgcolor='transparent',
# paper_bgcolor='transparent'
)
graph
return(graph)
}
|
\encoding{UTF-8}
\name{get.music.info}
\alias{get.music.info}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
获取豆瓣音乐的专辑信息
}
\description{
获取豆瓣音乐的专辑信息,API V2接口。
}
\usage{
get.music.info(musicid)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{musicid}{豆瓣音乐专辑ID号 }
}
\value{
\item{title}{音乐专辑名称}
\item{author }{作者}
\item{rating }{评分:最小值、评分人数、平均值、最大值}
\item{summary }{内容简介}
\item{tags }{一个data.frame,常用标签,两列分别是标签名称和标签频数}
\item{songs}{专辑所包括的歌曲名称}
\item{href}{音乐专辑主页网址}
\item{image }{封面图片网址}
\item{attribute }{一个list,专辑的其他信息,比如出版公司、出版时间等}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
<\email{qxde01@gmail.com}>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## http://music.douban.com/subject/3843530/
\dontrun{get.music.info(musicid='3843530')}
}
\keyword{music}
\keyword{douban}
|
/man/get.music.info.Rd
|
no_license
|
ljtyduyu/Rdouban
|
R
| false | false | 1,325 |
rd
|
\encoding{UTF-8}
\name{get.music.info}
\alias{get.music.info}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
获取豆瓣音乐的专辑信息
}
\description{
获取豆瓣音乐的专辑信息,API V2接口。
}
\usage{
get.music.info(musicid)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{musicid}{豆瓣音乐专辑ID号 }
}
\value{
\item{title}{音乐专辑名称}
\item{author }{作者}
\item{rating }{评分:最小值、评分人数、平均值、最大值}
\item{summary }{内容简介}
\item{tags }{一个data.frame,常用标签,两列分别是标签名称和标签频数}
\item{songs}{专辑所包括的歌曲名称}
\item{href}{音乐专辑主页网址}
\item{image }{封面图片网址}
\item{attribute }{一个list,专辑的其他信息,比如出版公司、出版时间等}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
<\email{qxde01@gmail.com}>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## http://music.douban.com/subject/3843530/
\dontrun{get.music.info(musicid='3843530')}
}
\keyword{music}
\keyword{douban}
|
library(shiny)
library(leaflet)
library(rCharts)
library(ggplot2)
library(dplyr)
library(scales)
library(DT)
source('global.R')
attach('./data/combined_movies.rda')
shinyServer(function(input, output){
reactive_dataset <- reactive({
if (input$menu1 != 'histogram'){
dataset <- combined_movies[combined_movies$IMDB >= input$range[1] & movie_dataset$IMDB <= input$range[2] & combined_movies$Year >= input$range_year[1] & movie_dataset$Year <= input$range_year[2],]
}
else if (input$var == 'IMDB Score'){
dataset <- combined_movies[combined_movies$IMDB >= input$range[1] & movie_dataset$IMDB <= input$range[2],]
}
else if (input$var == 'Year') {
dataset <- combined_movies[combined_movies$Year >= input$range_year[1] & movie_dataset$Year <= input$range_year[2],]
}
else{
dataset <- combined_movies
}
dataset
})
output$map <- renderLeaflet({
leaflet(combined_movies) %>%
#addTiles() %>%
addProviderTiles('CartoDB.Positron') %>%
addMarkers(popup = paste(
paste0("<img src = ./", combined_movies$Poster_new, " width='200' height = '300'"), '<br>','<br>', '<br>',
combined_movies$Film, ",",
combined_movies$Year, "<br>",
'Location:', combined_movies$Location.Display.Text, '<br>',
'IMDB rating:', combined_movies$IMDB, '<br>',
paste0('<a href = ', combined_movies$IMDB.LINK, " target = '_blank'", '> IMDB Link </a>'),
"<style> div.leaflet-popup-content {width:auto !important;}</style>"
)
)
})
observe( {
new_data = reactive_dataset()
updated = new_data[!is.na(new_data$Film), ]
proxy = leafletProxy('map', data = updated)
proxy %>% clearMarkers() %>%
addMarkers(popup = paste(
paste0("<img src = ./", updated$Poster_new, " width='200' height = '300'"), '<br>', '<br>', '<br>',
updated$Film, ",",
updated$Year, "<br>",
'Location:', updated$Location.Display.Text, '<br>',
'IMDB rating:', updated$IMDB, '<br>',
paste0('<a href = ', updated$IMDB.LINK, " target = '_blank'", '> IMDB Link </a>'),
"<style> div.leaflet-popup-content {width:auto !important;}</style>"
)
)
})
observe({
output$table <- DT::renderDataTable({
data = reactive_dataset()
stripped_data <- data[!is.na(data$Film), c('Film', 'Year', 'Director', 'Budget', 'Gross', 'Duration', 'IMDB')]
cleaned_data <- stripped_data[!duplicated(stripped_data),]
DT::datatable(cleaned_data, rownames = FALSE) %>% DT::formatStyle(input$selected, background = 'skyblue', fontWeight = 'bold')
})
})
observe({
output$groups <- DT::renderDataTable({
data = reactive_dataset()
if (input$by_group == 'Director'){
df <- data %>% group_by(Director) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Director), ])
}
else if (input$by_group == 'Borough'){
df <- data %>% group_by(Borough) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Borough), ])
}
else if (input$by_group == 'Neighborhood'){
df <- data %>% group_by(Neighborhood) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Neighborhood), ])
}
})
})
observe({
output$histogram <- renderPlot({
data = reactive_dataset()
if(input$var == 'Year'){
g <- ggplot(data, aes(x = as.numeric(data$Year), fill = ..x..)) +
geom_histogram(breaks = seq(input$range_year[1], input$range_year[2], by = input$binsize_year)) +
xlab('Year') +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
scale_x_continuous(breaks = seq(input$range_year[1], input$range_year[2], input$binsize_year), expand = c(0, 0)) +
scale_y_continuous(breaks = seq(5, 200, 5), expand = c(0, 0))
print(g)
}
else if (input$var == 'IMDB Score'){
g <- ggplot(data, aes(x = data$IMDB, fill = ..x..)) +
geom_histogram(breaks = seq(input$range[1], input$range[2], by = input$binsize_IMDB)) +
xlab('IMDB Score') +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
scale_x_continuous(breaks = seq(input$range[1], input$range[2], input$binsize_IMDB), expand = c(0, 0)) +
scale_y_continuous(breaks = seq(5, 200, 5), expand = c(0, 0))
print(g)
}
else if (input$var == 'Duration'){
g <- ggplot(data, aes(x = data$Duration, fill = ..x..)) +
geom_histogram(breaks = seq(50, 250, by = input$binsize_duration)) +
xlab('Movie Duration') +
scale_x_continuous(breaks = seq(50, 250, input$binsize_duration), expand = c(0, 5)) +
scale_y_continuous(breaks = seq(0, 200, by = 10), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
else if (input$var == 'Budget'){
g <- ggplot(data, aes(x = data$Budget, fill = ..x..)) +
geom_histogram(breaks = seq(0, 200000000, by = input$binsize_budget)) +
xlab('Budget') +
scale_x_continuous(breaks = seq(0, 200000000, input$binsize_budget), labels = comma, expand = c(0, 0)) +
scale_y_continuous(breaks = seq(0, 200, by = 5), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
else if (input$var == 'Gross'){
g <- ggplot(data, aes(x = data$Gross, fill = ..x..)) +
geom_histogram(breaks = seq(0, 200000000, by = input$binsize_gross)) +
xlab('Gross') +
scale_x_continuous(breaks = seq(0, 200000000, input$binsize_gross), labels = comma, expand = c(0, 0)) +
scale_y_continuous(breaks = seq(0, 200, by = 5), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
})
})
observe({
output$scatter <- renderPlot({
if (input$regression %% 2 != 0){
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar, input$yvar, input$factor)]), aes_string(x = input$xvar, y = input$yvar)) +
geom_point(aes_string(color = input$factor)) +
scale_color_gradient(low = 'blue') +
scale_colour_brewer() +
theme_dark() +
#scale_x_continuous() +
#scale_y_continuous() +
geom_smooth(method = 'lm', formula = y~x, se = FALSE)
}
else {
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar, input$yvar, input$factor)]), aes_string(x = input$xvar, y = input$yvar)) +
geom_point(aes_string(color = input$factor)) +
scale_colour_brewer() +
theme_dark()
#scale_x_continuous() +
#scale_y_continuous()
}
print(g)
})
})
observe({
output$box <- renderPlot({
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar_box, input$yvar_box)]), aes_string(x = input$xvar_box, y = input$yvar_box)) +
geom_boxplot(aes(fill = ..x..)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(legend.position = 'none')
#scale_x_continuous(expand = c(0,0)) +
#scale_y_continuous(expand = c(0, 0))
print(g)
})
})
})
|
/Project1-ExploreVis/Epstein_Shiny_Dashboard_Project/server.R
|
no_license
|
liyuhaojohn/bootcamp008_project
|
R
| false | false | 7,971 |
r
|
library(shiny)
library(leaflet)
library(rCharts)
library(ggplot2)
library(dplyr)
library(scales)
library(DT)
source('global.R')
attach('./data/combined_movies.rda')
shinyServer(function(input, output){
reactive_dataset <- reactive({
if (input$menu1 != 'histogram'){
dataset <- combined_movies[combined_movies$IMDB >= input$range[1] & movie_dataset$IMDB <= input$range[2] & combined_movies$Year >= input$range_year[1] & movie_dataset$Year <= input$range_year[2],]
}
else if (input$var == 'IMDB Score'){
dataset <- combined_movies[combined_movies$IMDB >= input$range[1] & movie_dataset$IMDB <= input$range[2],]
}
else if (input$var == 'Year') {
dataset <- combined_movies[combined_movies$Year >= input$range_year[1] & movie_dataset$Year <= input$range_year[2],]
}
else{
dataset <- combined_movies
}
dataset
})
output$map <- renderLeaflet({
leaflet(combined_movies) %>%
#addTiles() %>%
addProviderTiles('CartoDB.Positron') %>%
addMarkers(popup = paste(
paste0("<img src = ./", combined_movies$Poster_new, " width='200' height = '300'"), '<br>','<br>', '<br>',
combined_movies$Film, ",",
combined_movies$Year, "<br>",
'Location:', combined_movies$Location.Display.Text, '<br>',
'IMDB rating:', combined_movies$IMDB, '<br>',
paste0('<a href = ', combined_movies$IMDB.LINK, " target = '_blank'", '> IMDB Link </a>'),
"<style> div.leaflet-popup-content {width:auto !important;}</style>"
)
)
})
observe( {
new_data = reactive_dataset()
updated = new_data[!is.na(new_data$Film), ]
proxy = leafletProxy('map', data = updated)
proxy %>% clearMarkers() %>%
addMarkers(popup = paste(
paste0("<img src = ./", updated$Poster_new, " width='200' height = '300'"), '<br>', '<br>', '<br>',
updated$Film, ",",
updated$Year, "<br>",
'Location:', updated$Location.Display.Text, '<br>',
'IMDB rating:', updated$IMDB, '<br>',
paste0('<a href = ', updated$IMDB.LINK, " target = '_blank'", '> IMDB Link </a>'),
"<style> div.leaflet-popup-content {width:auto !important;}</style>"
)
)
})
observe({
output$table <- DT::renderDataTable({
data = reactive_dataset()
stripped_data <- data[!is.na(data$Film), c('Film', 'Year', 'Director', 'Budget', 'Gross', 'Duration', 'IMDB')]
cleaned_data <- stripped_data[!duplicated(stripped_data),]
DT::datatable(cleaned_data, rownames = FALSE) %>% DT::formatStyle(input$selected, background = 'skyblue', fontWeight = 'bold')
})
})
observe({
output$groups <- DT::renderDataTable({
data = reactive_dataset()
if (input$by_group == 'Director'){
df <- data %>% group_by(Director) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Director), ])
}
else if (input$by_group == 'Borough'){
df <- data %>% group_by(Borough) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Borough), ])
}
else if (input$by_group == 'Neighborhood'){
df <- data %>% group_by(Neighborhood) %>% summarise(count = n()) %>% arrange(desc(count))
DT::datatable(df[!is.na(df$Neighborhood), ])
}
})
})
observe({
output$histogram <- renderPlot({
data = reactive_dataset()
if(input$var == 'Year'){
g <- ggplot(data, aes(x = as.numeric(data$Year), fill = ..x..)) +
geom_histogram(breaks = seq(input$range_year[1], input$range_year[2], by = input$binsize_year)) +
xlab('Year') +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
scale_x_continuous(breaks = seq(input$range_year[1], input$range_year[2], input$binsize_year), expand = c(0, 0)) +
scale_y_continuous(breaks = seq(5, 200, 5), expand = c(0, 0))
print(g)
}
else if (input$var == 'IMDB Score'){
g <- ggplot(data, aes(x = data$IMDB, fill = ..x..)) +
geom_histogram(breaks = seq(input$range[1], input$range[2], by = input$binsize_IMDB)) +
xlab('IMDB Score') +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
scale_x_continuous(breaks = seq(input$range[1], input$range[2], input$binsize_IMDB), expand = c(0, 0)) +
scale_y_continuous(breaks = seq(5, 200, 5), expand = c(0, 0))
print(g)
}
else if (input$var == 'Duration'){
g <- ggplot(data, aes(x = data$Duration, fill = ..x..)) +
geom_histogram(breaks = seq(50, 250, by = input$binsize_duration)) +
xlab('Movie Duration') +
scale_x_continuous(breaks = seq(50, 250, input$binsize_duration), expand = c(0, 5)) +
scale_y_continuous(breaks = seq(0, 200, by = 10), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
else if (input$var == 'Budget'){
g <- ggplot(data, aes(x = data$Budget, fill = ..x..)) +
geom_histogram(breaks = seq(0, 200000000, by = input$binsize_budget)) +
xlab('Budget') +
scale_x_continuous(breaks = seq(0, 200000000, input$binsize_budget), labels = comma, expand = c(0, 0)) +
scale_y_continuous(breaks = seq(0, 200, by = 5), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
else if (input$var == 'Gross'){
g <- ggplot(data, aes(x = data$Gross, fill = ..x..)) +
geom_histogram(breaks = seq(0, 200000000, by = input$binsize_gross)) +
xlab('Gross') +
scale_x_continuous(breaks = seq(0, 200000000, input$binsize_gross), labels = comma, expand = c(0, 0)) +
scale_y_continuous(breaks = seq(0, 200, by = 5), expand = c(0, 0)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(axis.title = element_text(size = 20), plot.title = element_text(size = 21, hjust = 0.5))
print(g)
}
})
})
observe({
output$scatter <- renderPlot({
if (input$regression %% 2 != 0){
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar, input$yvar, input$factor)]), aes_string(x = input$xvar, y = input$yvar)) +
geom_point(aes_string(color = input$factor)) +
scale_color_gradient(low = 'blue') +
scale_colour_brewer() +
theme_dark() +
#scale_x_continuous() +
#scale_y_continuous() +
geom_smooth(method = 'lm', formula = y~x, se = FALSE)
}
else {
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar, input$yvar, input$factor)]), aes_string(x = input$xvar, y = input$yvar)) +
geom_point(aes_string(color = input$factor)) +
scale_colour_brewer() +
theme_dark()
#scale_x_continuous() +
#scale_y_continuous()
}
print(g)
})
})
observe({
output$box <- renderPlot({
data = reactive_dataset()
g <- ggplot(na.omit(data[, c(input$xvar_box, input$yvar_box)]), aes_string(x = input$xvar_box, y = input$yvar_box)) +
geom_boxplot(aes(fill = ..x..)) +
scale_fill_gradient(low = "#56B1F7", high = "#132B43") +
theme(legend.position = 'none')
#scale_x_continuous(expand = c(0,0)) +
#scale_y_continuous(expand = c(0, 0))
print(g)
})
})
})
|
test_that("ifelse_censor_linter skips allowed usages", {
expect_lint("ifelse(x == 2, x, y)", NULL, ifelse_censor_linter())
expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter())
})
test_that("ifelse_censor_linter blocks simple disallowed usages", {
expect_lint(
"ifelse(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to ifelse(x < y, y, x)"),
ifelse_censor_linter()
)
# other equivalents to base::ifelse()
expect_lint(
"if_else(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to if_else(x < y, y, x)"),
ifelse_censor_linter()
)
expect_lint(
"fifelse(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to fifelse(x < y, y, x)"),
ifelse_censor_linter()
)
# other equivalents for censoring
expect_lint(
"ifelse(x <= 0, 0, x)",
rex::rex("pmax(x, y) is preferable to ifelse(x <= y, y, x)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x > 0, x, 0)",
rex::rex("pmax(x, y) is preferable to ifelse(x > y, x, y)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x >= 0, x, 0)",
rex::rex("pmax(x, y) is preferable to ifelse(x >= y, x, y)"),
ifelse_censor_linter()
)
# pairwise min/max (similar to censoring)
expect_lint(
"ifelse(x < y, x, y)",
rex::rex("pmin(x, y) is preferable to ifelse(x < y, x, y)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x >= y, y, x)",
rex::rex("pmin(x, y) is preferable to ifelse(x >= y, y, x)"),
ifelse_censor_linter()
)
# more complicated expression still matches
lines <- trim_some("
ifelse(2 + p + 104 + 1 > ncols,
ncols, 2 + p + 104 + 1
)
")
expect_lint(
lines,
rex::rex("pmin(x, y) is preferable to ifelse(x > y, y, x)"),
ifelse_censor_linter()
)
})
# TODO(michaelchirico): how easy would it be to strip parens when considering lint?
# e.g. ifelse(x < (kMaxIndex - 1), x, kMaxIndex - 1)
|
/tests/testthat/test-ifelse_censor_linter.R
|
permissive
|
cordis-dev/lintr
|
R
| false | false | 1,924 |
r
|
test_that("ifelse_censor_linter skips allowed usages", {
expect_lint("ifelse(x == 2, x, y)", NULL, ifelse_censor_linter())
expect_lint("ifelse(x > 2, x, y)", NULL, ifelse_censor_linter())
})
test_that("ifelse_censor_linter blocks simple disallowed usages", {
expect_lint(
"ifelse(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to ifelse(x < y, y, x)"),
ifelse_censor_linter()
)
# other equivalents to base::ifelse()
expect_lint(
"if_else(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to if_else(x < y, y, x)"),
ifelse_censor_linter()
)
expect_lint(
"fifelse(x < 0, 0, x)",
rex::rex("pmax(x, y) is preferable to fifelse(x < y, y, x)"),
ifelse_censor_linter()
)
# other equivalents for censoring
expect_lint(
"ifelse(x <= 0, 0, x)",
rex::rex("pmax(x, y) is preferable to ifelse(x <= y, y, x)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x > 0, x, 0)",
rex::rex("pmax(x, y) is preferable to ifelse(x > y, x, y)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x >= 0, x, 0)",
rex::rex("pmax(x, y) is preferable to ifelse(x >= y, x, y)"),
ifelse_censor_linter()
)
# pairwise min/max (similar to censoring)
expect_lint(
"ifelse(x < y, x, y)",
rex::rex("pmin(x, y) is preferable to ifelse(x < y, x, y)"),
ifelse_censor_linter()
)
expect_lint(
"ifelse(x >= y, y, x)",
rex::rex("pmin(x, y) is preferable to ifelse(x >= y, y, x)"),
ifelse_censor_linter()
)
# more complicated expression still matches
lines <- trim_some("
ifelse(2 + p + 104 + 1 > ncols,
ncols, 2 + p + 104 + 1
)
")
expect_lint(
lines,
rex::rex("pmin(x, y) is preferable to ifelse(x > y, y, x)"),
ifelse_censor_linter()
)
})
# TODO(michaelchirico): how easy would it be to strip parens when considering lint?
# e.g. ifelse(x < (kMaxIndex - 1), x, kMaxIndex - 1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmnet_utils.R
\name{make_roc2}
\alias{make_roc2}
\title{make_roc2}
\usage{
make_roc2(roc)
}
\description{
make_roc2
}
|
/man/make_roc2.Rd
|
no_license
|
kevinmhadi/khtools
|
R
| false | true | 197 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glmnet_utils.R
\name{make_roc2}
\alias{make_roc2}
\title{make_roc2}
\usage{
make_roc2(roc)
}
\description{
make_roc2
}
|
# Poker hands dataset
#### load data ####
poker = read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-training-true.data",
header = FALSE, sep = ",", strip.white = TRUE, na.strings="NA", stringsAsFactors = TRUE)
names(poker)<- c("s1","c1","s2","c2","s3","c3","s4","c4","s5","c5","Class")
poker = na.omit(poker)
#Class as factor
poker$Class = as.factor(poker$Class)
summary(poker$Class)
#### process data ####
new <- poker[,c(2,4,6,8,10,1,3,5,7,9,11)]
new["d1"] <- NA
new["d2"] <- NA
new["d3"] <- NA
new["d4"] <- NA
new["d5"] <- NA
new["flush"] <- NA
for(i in 1:nrow(new)){
#sort the cards, then calculate the difference between cards
x <- new[i,1:5]
x <- sort(x)
new[i,"d1"]<- x[,2]- x[,1]
new[i,"d2"]<- x[,3]- x[,2]
new[i,"d3"]<- x[,4]- x[,3]
new[i,"d4"]<- x[,5]- x[,4]
new[i,"d5"]<- x[,5]- x[,1]
#boolean flush
if(new[i,"s1"] == new[i,"s2"] & new[i,"s1"] == new[i,"s3"] & new[i,"s1"] == new[i,"s4"] & new[i,"s1"] == new[i,"s5"]){
new[i,"flush"] <- 1
}
else {
new[i,"flush"] <- 0
}
}
#### write new data ####
# myData <- new[, c(12,13,14,15,16,17,11)]
#
# write.csv(myData, file = "feature_train.csv", row.names = FALSE)
#sort by class
myData <- myData[order(myData$Class, decreasing = TRUE),]
write.csv(myData, file = "feature_train_sort.csv", row.names = FALSE)
|
/processing.R
|
no_license
|
Eulerbird/Poker-Hand-Prediction-R-
|
R
| false | false | 1,433 |
r
|
# Poker hands dataset
#### load data ####
poker = read.csv("http://archive.ics.uci.edu/ml/machine-learning-databases/poker/poker-hand-training-true.data",
header = FALSE, sep = ",", strip.white = TRUE, na.strings="NA", stringsAsFactors = TRUE)
names(poker)<- c("s1","c1","s2","c2","s3","c3","s4","c4","s5","c5","Class")
poker = na.omit(poker)
#Class as factor
poker$Class = as.factor(poker$Class)
summary(poker$Class)
#### process data ####
new <- poker[,c(2,4,6,8,10,1,3,5,7,9,11)]
new["d1"] <- NA
new["d2"] <- NA
new["d3"] <- NA
new["d4"] <- NA
new["d5"] <- NA
new["flush"] <- NA
for(i in 1:nrow(new)){
#sort the cards, then calculate the difference between cards
x <- new[i,1:5]
x <- sort(x)
new[i,"d1"]<- x[,2]- x[,1]
new[i,"d2"]<- x[,3]- x[,2]
new[i,"d3"]<- x[,4]- x[,3]
new[i,"d4"]<- x[,5]- x[,4]
new[i,"d5"]<- x[,5]- x[,1]
#boolean flush
if(new[i,"s1"] == new[i,"s2"] & new[i,"s1"] == new[i,"s3"] & new[i,"s1"] == new[i,"s4"] & new[i,"s1"] == new[i,"s5"]){
new[i,"flush"] <- 1
}
else {
new[i,"flush"] <- 0
}
}
#### write new data ####
# myData <- new[, c(12,13,14,15,16,17,11)]
#
# write.csv(myData, file = "feature_train.csv", row.names = FALSE)
#sort by class
myData <- myData[order(myData$Class, decreasing = TRUE),]
write.csv(myData, file = "feature_train_sort.csv", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeOperator.R
\name{makeOperator}
\alias{makeOperator}
\title{Construct evolutionary operator.}
\usage{
makeOperator(operator, name, description = NULL,
supported = getAvailableRepresentations(), params = list())
}
\arguments{
\item{operator}{[\code{function}]\cr
Actual mutation operator.}
\item{name}{[\code{character(1)}]\cr
Name of the operator.}
\item{description}{[\code{character(1)}]\cr
Short description of how the mutator works.
Default is \code{NULL} which means no description at all.}
\item{supported}{[\code{character}]\cr
Vector of names of supported parameter representations. Possible choices:
\dQuote{permutation}, \dQuote{float}, \dQuote{binary} or \dQuote{custom}.}
\item{params}{[\code{list}]\cr
Named list of the parameters the operator has been initialized with.
Default is the empty list.}
}
\value{
[\code{ecr_operator}] Operator object.
}
\description{
Helper function which constructs an evolutionary operator.
}
\note{
In general you will not need this function, but rather one of its
deriviatives like \code{\link{makeMutator}} or \code{\link{makeSelector}}.
}
|
/man/makeOperator.Rd
|
no_license
|
niiwise/ecr2
|
R
| false | true | 1,176 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeOperator.R
\name{makeOperator}
\alias{makeOperator}
\title{Construct evolutionary operator.}
\usage{
makeOperator(operator, name, description = NULL,
supported = getAvailableRepresentations(), params = list())
}
\arguments{
\item{operator}{[\code{function}]\cr
Actual mutation operator.}
\item{name}{[\code{character(1)}]\cr
Name of the operator.}
\item{description}{[\code{character(1)}]\cr
Short description of how the mutator works.
Default is \code{NULL} which means no description at all.}
\item{supported}{[\code{character}]\cr
Vector of names of supported parameter representations. Possible choices:
\dQuote{permutation}, \dQuote{float}, \dQuote{binary} or \dQuote{custom}.}
\item{params}{[\code{list}]\cr
Named list of the parameters the operator has been initialized with.
Default is the empty list.}
}
\value{
[\code{ecr_operator}] Operator object.
}
\description{
Helper function which constructs an evolutionary operator.
}
\note{
In general you will not need this function, but rather one of its
deriviatives like \code{\link{makeMutator}} or \code{\link{makeSelector}}.
}
|
train = read.csv("train.csv")
test = read.csv("test.csv")
str(train)
summary(train)
for(i in 1:ncol(train)){
train[is.na(train[,i]), i] <- mean(train[,i], na.rm = TRUE)
}
for(j in 1:ncol(test)){
test[is.na(test[,j]), j] <- mean(test[,j], na.rm = TRUE)
}
summary(train)
summary(test)
str(train)
trainid = train$ID
testid = test$ID
train$ID = NULL
test$ID = NULL
str(train)
train$Outcome = as.factor(train$Outcome)
library(caret)
library(nnet)
train2 = train[sample(nrow(train), 10000),]
index = createDataPartition(train2$Outcome , p = 0.5, list = FALSE)
trainSet <- train2[ index,]
testSet <- train2[-index,]
str(trainSet)
trainSet$Outcome = as.factor(trainSet$Outcome)
mynnet = nnet(Outcome~., data = train,size = 10,
decay = 0.01, maxit = 100 )
pr = predict(mynnet, test)
table(testSet$Outcome, pr>0.5)
dd = data.frame(ID = testid, Outcome = pr)
write.csv(dd, "dd.csv",row.names = FALSE)
|
/Analytics-Vidhya/Stock_Market.R
|
no_license
|
tinkudhull/Data_Science
|
R
| false | false | 951 |
r
|
train = read.csv("train.csv")
test = read.csv("test.csv")
str(train)
summary(train)
for(i in 1:ncol(train)){
train[is.na(train[,i]), i] <- mean(train[,i], na.rm = TRUE)
}
for(j in 1:ncol(test)){
test[is.na(test[,j]), j] <- mean(test[,j], na.rm = TRUE)
}
summary(train)
summary(test)
str(train)
trainid = train$ID
testid = test$ID
train$ID = NULL
test$ID = NULL
str(train)
train$Outcome = as.factor(train$Outcome)
library(caret)
library(nnet)
train2 = train[sample(nrow(train), 10000),]
index = createDataPartition(train2$Outcome , p = 0.5, list = FALSE)
trainSet <- train2[ index,]
testSet <- train2[-index,]
str(trainSet)
trainSet$Outcome = as.factor(trainSet$Outcome)
mynnet = nnet(Outcome~., data = train,size = 10,
decay = 0.01, maxit = 100 )
pr = predict(mynnet, test)
table(testSet$Outcome, pr>0.5)
dd = data.frame(ID = testid, Outcome = pr)
write.csv(dd, "dd.csv",row.names = FALSE)
|
getwd()
setwd("C:/Users/schnuri/Desktop/Neuer Ordner/Dataset/")
#get from dataset the monthly corrected values for NAs
#see what data you have
#eliminate columns you dont need
data = co2month[,c(3,5)]
colnames(data)= c("year", "co2")
#visualize our data
attach(data)
plot(year, co2, add=T, type="n",las=1, xlab="Year", ylab="CO2 conc. (ppm)", main="CO2 concentration in the atmosphere")
grid (NULL,NULL, lty = 6, col = "cornsilk2")
points(year, co2, col="cornflowerblue" )
#cant see much, maybe smooth the curve
k <- 5
lines(year,filter(co2, rep(1/k,k)),col = 'red', type="l", lwd = 3 )
#check for data mistakes, ect. : visualize our data
x <- co2
op <- par(mfrow = c(1,2),
mar = c(5,4,1,2)+.1,
oma = c(0,0,2,0))
hist(x, freq=F,
col = "light blue",
xlab = "",
main = "")
qqnorm(x,
main = "")
qqline(x,
col = 'red')
par(op)
mtext("CO2 Concentration (ppm)",
line = 2.5,
font = 2,
cex = 1.2)
#differencing our variable
x <- diff(co2)
op <- par(mfrow = c(1,2),
mar = c(5,4,1,2)+.1,
oma = c(0,0,2,0))
hist(x,
col = "light blue",
xlab = "",
main = "")
qqnorm(x,
main = "")
qqline(x,
col = 'red')
par(op)
mtext("CO2 Concentration (ppm) increments",
line = 2.5,
font = 2,
cex = 1.2)
op <- par(mfrow = c(3,1),
mar = c(2,4,1,2)+.1,
oma = c(0,0,2,0))
acf(x, xlab = "")
pacf(x, xlab = "")
spectrum(x, xlab = "", main = "")
par(op)
mtext("CO2 Concentration (ppm) diagnostics",
line = 2.5,
font = 2,
cex = 1.2)
#autocorrelation
#what it should look like, if you dont have autocorr. problems:
n <- 200
x <- rnorm(n)
acf(x)
#lag 0 is always y=1, because the data is compared with the true data, which are equal
x <- co2
acf(x)
#make a monthly time series
newco2=ts(co2, 1958,2014,12 )
class(newco2)
#decompose the time series into the trend, seasonal fluctiation and the random white noise
dec = decompose(newco2)
plot(dec)
#fit the linear model, that CO2 concentrations are dependent from time ( in months ) for time period of 1958 until 2014
data.lm = lm( co2 ~ year)
#fit predict values
MyData=data.frame(year=seq(from=(1958),to=2014, by=0.1))
G=predict(month.lm, newdata=MyData, type="response", se=T) #poission: type ="LINK", binomial: type="RESPONSE"
plot(year, co2, type="l", col="cornflowerblue", las=1, xlab="Year", ylab="CO2 Conc. (ppm)", main="CO2 Concentration in the Atmosphere")
#go for confidence interval
F=(G$fit)
FSUP=(G$fit+1.96*G$se.fit) # make upper conf. int.
FSLOW=(G$fit-1.96*G$se.fit) # make lower conf. int.
lines(MyData$year, F, lty=1, col="darkblue")
#visualize the residuals of the linear model:
par(mfrow=c(2,2))
plot(data.lm)
par(mfrow=c(1,1))
#problems in spread of variance and normal distribution
#check for normal distribution of residuals:
shapiro.test(data.lm$residuals)
#highly not normally distributed
#check for differences of co2 values to
qqnorm(diff(co2))
abline(h=0)
#look at estimated parameters: close to underlying parameters:
coef(data.lm)
#look at standard errors
sqrt(diag(vcov(data.lm)))
#highly underestimated, standard errors are higher in true !
summary(data.lm)
#p value is not right evidence , misleading
#data tranformation not useful!
#check for stationarity
library(tseries)
adf.test(co2, alternative = "stationary")
#is non stationary, has a trend
#look at correlogram
#of autocorrelation (seasonal trend)
print(acf(data.lm$residuals))
#of partial autocorrelation (eliminates intermediate correlation between x(t) and x(t-1))
pacf(data.lm$residuals)
data.pacf=(pacf(data.lm$residuals))
data.pacf[1]
*****
#make gls
library(nlme)
data.gls = gls(co2 ~ year,cor= corAR1(0.94)) #use lag one data.acf[2]
data.gls
summary(data.gls)
summary(data.lm)
#alright, RSE of new gls is less underestimated
#check confidence interval
confint(data.gls)
#bigger interval
#parameter estimates are significant, trend is significant
plot(data.gls)
plot(data.lm)
#well spread is already smaller of variances
#look at correlogram
#of autocorrelation (seasonal trend)
datagls.acf=(acf(data.gls$residuals))
print(acf(data.gls$residuals)
)#of partial autocorrelation (eliminates intermediate correlation between x(t) and x(t-1))
pacf(data.gls$residuals)
data.pacf=(pacf(data.gls$residuals))
datagls.acf[2]
|
/Rscript/visualization.R
|
no_license
|
Mallypop/Time-Series-Project
|
R
| false | false | 4,371 |
r
|
getwd()
setwd("C:/Users/schnuri/Desktop/Neuer Ordner/Dataset/")
#get from dataset the monthly corrected values for NAs
#see what data you have
#eliminate columns you dont need
data = co2month[,c(3,5)]
colnames(data)= c("year", "co2")
#visualize our data
attach(data)
plot(year, co2, add=T, type="n",las=1, xlab="Year", ylab="CO2 conc. (ppm)", main="CO2 concentration in the atmosphere")
grid (NULL,NULL, lty = 6, col = "cornsilk2")
points(year, co2, col="cornflowerblue" )
#cant see much, maybe smooth the curve
k <- 5
lines(year,filter(co2, rep(1/k,k)),col = 'red', type="l", lwd = 3 )
#check for data mistakes, ect. : visualize our data
x <- co2
op <- par(mfrow = c(1,2),
mar = c(5,4,1,2)+.1,
oma = c(0,0,2,0))
hist(x, freq=F,
col = "light blue",
xlab = "",
main = "")
qqnorm(x,
main = "")
qqline(x,
col = 'red')
par(op)
mtext("CO2 Concentration (ppm)",
line = 2.5,
font = 2,
cex = 1.2)
#differencing our variable
x <- diff(co2)
op <- par(mfrow = c(1,2),
mar = c(5,4,1,2)+.1,
oma = c(0,0,2,0))
hist(x,
col = "light blue",
xlab = "",
main = "")
qqnorm(x,
main = "")
qqline(x,
col = 'red')
par(op)
mtext("CO2 Concentration (ppm) increments",
line = 2.5,
font = 2,
cex = 1.2)
op <- par(mfrow = c(3,1),
mar = c(2,4,1,2)+.1,
oma = c(0,0,2,0))
acf(x, xlab = "")
pacf(x, xlab = "")
spectrum(x, xlab = "", main = "")
par(op)
mtext("CO2 Concentration (ppm) diagnostics",
line = 2.5,
font = 2,
cex = 1.2)
#autocorrelation
#what it should look like, if you dont have autocorr. problems:
n <- 200
x <- rnorm(n)
acf(x)
#lag 0 is always y=1, because the data is compared with the true data, which are equal
x <- co2
acf(x)
#make a monthly time series
newco2=ts(co2, 1958,2014,12 )
class(newco2)
#decompose the time series into the trend, seasonal fluctiation and the random white noise
dec = decompose(newco2)
plot(dec)
#fit the linear model, that CO2 concentrations are dependent from time ( in months ) for time period of 1958 until 2014
data.lm = lm( co2 ~ year)
#fit predict values
MyData=data.frame(year=seq(from=(1958),to=2014, by=0.1))
G=predict(month.lm, newdata=MyData, type="response", se=T) #poission: type ="LINK", binomial: type="RESPONSE"
plot(year, co2, type="l", col="cornflowerblue", las=1, xlab="Year", ylab="CO2 Conc. (ppm)", main="CO2 Concentration in the Atmosphere")
#go for confidence interval
F=(G$fit)
FSUP=(G$fit+1.96*G$se.fit) # make upper conf. int.
FSLOW=(G$fit-1.96*G$se.fit) # make lower conf. int.
lines(MyData$year, F, lty=1, col="darkblue")
#visualize the residuals of the linear model:
par(mfrow=c(2,2))
plot(data.lm)
par(mfrow=c(1,1))
#problems in spread of variance and normal distribution
#check for normal distribution of residuals:
shapiro.test(data.lm$residuals)
#highly not normally distributed
#check for differences of co2 values to
qqnorm(diff(co2))
abline(h=0)
#look at estimated parameters: close to underlying parameters:
coef(data.lm)
#look at standard errors
sqrt(diag(vcov(data.lm)))
#highly underestimated, standard errors are higher in true !
summary(data.lm)
#p value is not right evidence , misleading
#data tranformation not useful!
#check for stationarity
library(tseries)
adf.test(co2, alternative = "stationary")
#is non stationary, has a trend
#look at correlogram
#of autocorrelation (seasonal trend)
print(acf(data.lm$residuals))
#of partial autocorrelation (eliminates intermediate correlation between x(t) and x(t-1))
pacf(data.lm$residuals)
data.pacf=(pacf(data.lm$residuals))
data.pacf[1]
*****
#make gls
library(nlme)
data.gls = gls(co2 ~ year,cor= corAR1(0.94)) #use lag one data.acf[2]
data.gls
summary(data.gls)
summary(data.lm)
#alright, RSE of new gls is less underestimated
#check confidence interval
confint(data.gls)
#bigger interval
#parameter estimates are significant, trend is significant
plot(data.gls)
plot(data.lm)
#well spread is already smaller of variances
#look at correlogram
#of autocorrelation (seasonal trend)
datagls.acf=(acf(data.gls$residuals))
print(acf(data.gls$residuals)
)#of partial autocorrelation (eliminates intermediate correlation between x(t) and x(t-1))
pacf(data.gls$residuals)
data.pacf=(pacf(data.gls$residuals))
datagls.acf[2]
|
# This tests the mapCellsToEdges function.
# library(testthat); library(TSCAN); source('test-mapping.R')
set.seed(10001)
test_that("mapCellsToEdges works correctly in general", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
mst <- createClusterMST(y, cluster=clust)
mapping <- mapCellsToEdges(y, mst, clusters=clust)
expect_true(all(mapping$left.cluster == clust | mapping$right.cluster ==clust))
# Expect the edge lengths to match up.
mat <- mst[]
edge.lens <- mat[cbind(
match(mapping$left.cluster, colnames(mat)),
match(mapping$right.cluster, colnames(mat))
)]
expect_equal(edge.lens, mapping$left.distance + mapping$right.distance)
# Expect distances along the MST to be shorter than the actual distances.
centered <- rowmean(y, clust)
expect_true(all(
mapping$left.distance <= sqrt(rowSums((y - centered[mapping$left.cluster,])^2))
))
expect_true(all(
mapping$right.distance <= sqrt(rowSums((y - centered[mapping$right.cluster,])^2))
))
})
test_that("mapCellsToEdges maps elements onto vertices correctly", {
y <- matrix(rnorm(100), ncol=10)
rownames(y) <- 1:10
mst <- createClusterMST(y, cluster=NULL)
mapping <- mapCellsToEdges(y, mst, clusters=rownames(y))
expect_true(all(
(mapping$left.cluster==1:10 & mapping$left.distance < 1e-8) |
(mapping$right.cluster==1:10 & mapping$right.distance < 1e-8)
))
# Works on extremes correctly.
y <- matrix(1:10, nrow=10, ncol=2)
rownames(y) <- 1:10
mst <- createClusterMST(y, cluster=NULL)
mapping <- mapCellsToEdges(rbind(c(0,0), c(100, 100)), mst, clusters=NULL)
expect_true(mapping$right.cluster[1]==1 && mapping$right.distance[1]==0)
expect_true(mapping$left.cluster[2]==10 && mapping$left.distance[2]==0)
})
set.seed(10002)
test_that("mapCellsToEdges handles free mapping correctly", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
mst <- createClusterMST(y, cluster=clust)
y2 <- matrix(rnorm(1000), ncol=10)
free <- mapCellsToEdges(y2, mst, clusters=NULL)
# Expect the edge lengths to match up.
mat <- mst[]
edge.lens <- mat[cbind(
match(free$left.cluster, colnames(mat)),
match(free$right.cluster, colnames(mat))
)]
expect_equal(edge.lens, free$left.distance + free$right.distance)
# Expect the same behavior if we forced it to a cluster.
forced1 <- mapCellsToEdges(y2, mst, clusters=free$left.cluster)
expect_equal(free, forced1)
forced2 <- mapCellsToEdges(y2, mst, clusters=free$right.cluster)
expect_equal(free, forced2)
})
set.seed(100100)
test_that("mapCellsToEdges works with SE and SCE objects", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
library(SingleCellExperiment)
se <- SummarizedExperiment(t(y))
mst <- createClusterMST(se, cluster=clust, assay.type=1)
map <- mapCellsToEdges(se, mst, cluster=clust, assay.type=1)
expect_identical(map, mapCellsToEdges(y, mst, cluster=clust))
sce <- SingleCellExperiment(t(y))
map2 <- mapCellsToEdges(sce, mst, cluster=clust, assay.type=1)
expect_identical(map, map2)
assay(sce) <- 2*assay(sce) # check it isn't just pulling it out of the assays.
reducedDim(sce, "PCA") <- y
map3 <- mapCellsToEdges(sce, mst, cluster=clust, use.dimred="PCA")
expect_identical(map, map3)
})
|
/tests/testthat/test-mapping.R
|
no_license
|
vd4mmind/TSCAN
|
R
| false | false | 3,442 |
r
|
# This tests the mapCellsToEdges function.
# library(testthat); library(TSCAN); source('test-mapping.R')
set.seed(10001)
test_that("mapCellsToEdges works correctly in general", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
mst <- createClusterMST(y, cluster=clust)
mapping <- mapCellsToEdges(y, mst, clusters=clust)
expect_true(all(mapping$left.cluster == clust | mapping$right.cluster ==clust))
# Expect the edge lengths to match up.
mat <- mst[]
edge.lens <- mat[cbind(
match(mapping$left.cluster, colnames(mat)),
match(mapping$right.cluster, colnames(mat))
)]
expect_equal(edge.lens, mapping$left.distance + mapping$right.distance)
# Expect distances along the MST to be shorter than the actual distances.
centered <- rowmean(y, clust)
expect_true(all(
mapping$left.distance <= sqrt(rowSums((y - centered[mapping$left.cluster,])^2))
))
expect_true(all(
mapping$right.distance <= sqrt(rowSums((y - centered[mapping$right.cluster,])^2))
))
})
test_that("mapCellsToEdges maps elements onto vertices correctly", {
y <- matrix(rnorm(100), ncol=10)
rownames(y) <- 1:10
mst <- createClusterMST(y, cluster=NULL)
mapping <- mapCellsToEdges(y, mst, clusters=rownames(y))
expect_true(all(
(mapping$left.cluster==1:10 & mapping$left.distance < 1e-8) |
(mapping$right.cluster==1:10 & mapping$right.distance < 1e-8)
))
# Works on extremes correctly.
y <- matrix(1:10, nrow=10, ncol=2)
rownames(y) <- 1:10
mst <- createClusterMST(y, cluster=NULL)
mapping <- mapCellsToEdges(rbind(c(0,0), c(100, 100)), mst, clusters=NULL)
expect_true(mapping$right.cluster[1]==1 && mapping$right.distance[1]==0)
expect_true(mapping$left.cluster[2]==10 && mapping$left.distance[2]==0)
})
set.seed(10002)
test_that("mapCellsToEdges handles free mapping correctly", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
mst <- createClusterMST(y, cluster=clust)
y2 <- matrix(rnorm(1000), ncol=10)
free <- mapCellsToEdges(y2, mst, clusters=NULL)
# Expect the edge lengths to match up.
mat <- mst[]
edge.lens <- mat[cbind(
match(free$left.cluster, colnames(mat)),
match(free$right.cluster, colnames(mat))
)]
expect_equal(edge.lens, free$left.distance + free$right.distance)
# Expect the same behavior if we forced it to a cluster.
forced1 <- mapCellsToEdges(y2, mst, clusters=free$left.cluster)
expect_equal(free, forced1)
forced2 <- mapCellsToEdges(y2, mst, clusters=free$right.cluster)
expect_equal(free, forced2)
})
set.seed(100100)
test_that("mapCellsToEdges works with SE and SCE objects", {
y <- matrix(rnorm(1000), ncol=10)
clust <- kmeans(y,5)$cluster
library(SingleCellExperiment)
se <- SummarizedExperiment(t(y))
mst <- createClusterMST(se, cluster=clust, assay.type=1)
map <- mapCellsToEdges(se, mst, cluster=clust, assay.type=1)
expect_identical(map, mapCellsToEdges(y, mst, cluster=clust))
sce <- SingleCellExperiment(t(y))
map2 <- mapCellsToEdges(sce, mst, cluster=clust, assay.type=1)
expect_identical(map, map2)
assay(sce) <- 2*assay(sce) # check it isn't just pulling it out of the assays.
reducedDim(sce, "PCA") <- y
map3 <- mapCellsToEdges(sce, mst, cluster=clust, use.dimred="PCA")
expect_identical(map, map3)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/senCloudMask.R
\name{senCloudMask}
\alias{senCloudMask}
\title{Create cloud masks for Sentinel-2 images}
\usage{
senCloudMask(
src,
AppRoot,
out.name,
resbands,
sensitivity = 50,
overwrite = FALSE,
...
)
}
\arguments{
\item{src}{the path to the folder with the "\code{S2MSI2A}" images.}
\item{AppRoot}{the directory where the cloud masks are saved.}
\item{out.name}{the name of the folder that stores the outputs.
If the arguemnt is not defined the folder will be named as "CloudMask".}
\item{resbands}{a \code{character} vector argument. Defines the band resolution
used to create the cloud mask. Ex "20m" or "60m".}
\item{sensitivity}{a \code{numeric} argument. Defines the sensitivity of the
cloud detection method.}
\item{overwrite}{logical argument. If \code{TRUE}, overwrites the existing
images with the same name.}
\item{...}{arguments for nested functions.
\itemize{
\item \code{dates} a vector with the capturing dates being considered
for mosaicking. If not supplied, all dates are mosaicked.
}}
}
\value{
this function does not return anything. It saves the cloud masks (CLD)
as GTiff files in the \code{AppRoot} directory.
}
\description{
\code{senCloudMask} creates cloud masks derived from the cloud probability
band (\code{CLDPRB}) band from the "\code{S2MSI2A}" product.
}
\details{
The valid threshold range for \code{sensitivity} is 0-100. By default,
the argument is set to 50.
}
\examples{
\dontrun{
# load a spatial polygon object of Navarre
data(ex.navarre)
# Download S2MSI1C products sensed by Sentinel-2
# between the julian days 210 and 218, 2018
wdir <- file.path(tempdir(),"Path_for_downloading_folder")
print(wdir)
senDownSearch(startDate = as.Date("2018210", "\%Y\%j"),
endDate = as.Date("2018218", "\%Y\%j"),
platform = "Sentinel-2",
extent = ex.navarre,
product = "S2MSI2A",
pathrow = c("R094"),
username = "username",
password = "password",
AppRoot = wdir)
# define the paths to the Sentinle-2 images and the
# folder with the unzipped images
wdir.sen <- file.path(wdir, "Sentinel-2")
wdir.sen.unzip <- file.path(wdir.sen, "unzip")
# mosaic the Sentinel-2 images
senMosaic(wdir.sen.unzip,
AppRoot = wdir.sen,
gutils = TRUE,
out.name = "Navarre")
# calculate the cloud mask
wdir.sen.navarre <- file.path(wdir.sen, "Navarre")
senCloudMask(src = wdir.sen.navarre,
resbands = "60m",
overwrite = TRUE,
sensitivity = 98,
AppRoot = wdir.sen)
# define the path for the Sentinel-2 cloud mask
wdir.sen.cloud <- file.path(wdir.sen, "CloudMask")
# select B02 images of 60 meters
tiles.sen.navarre <- list.files(wdir.sen.navarre,
full.names = TRUE,
recursive = TRUE,
pattern = "\\\\.tif$")
tiles.sen.navarre.b2 <- tiles.sen.navarre[grepl("B02",tiles.sen.navarre)]
tiles.sen.navarre.b2 <- tiles.sen.navarre.b2[grepl("60m",tiles.sen.navarre.b2)]
# generate a 60-meter resolution cloud mask
tiles.sen.cloud <- list.files(wdir.sen.cloud,
full.names = TRUE,
pattern = "\\\\.tif$")
tiles.sen.cloud.60 <- tiles.sen.cloud[grepl("60m",tiles.sen.cloud)]
# remove the cloud mask from b02 tiles
img.sen.navarre.b2 <- stack(tiles.sen.navarre.b2)
img.sen.cloud.60 <- stack(tiles.sen.cloud.60)
img.sen.navarre.b2.cloud.free <- img.sen.navarre.b2*img.sen.cloud.60
# plot b2 cloud free layers
spplot(img.sen.navarre.b2.cloud.free)
}
}
|
/man/senCloudMask.Rd
|
no_license
|
cran/RGISTools
|
R
| false | true | 3,821 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/senCloudMask.R
\name{senCloudMask}
\alias{senCloudMask}
\title{Create cloud masks for Sentinel-2 images}
\usage{
senCloudMask(
src,
AppRoot,
out.name,
resbands,
sensitivity = 50,
overwrite = FALSE,
...
)
}
\arguments{
\item{src}{the path to the folder with the "\code{S2MSI2A}" images.}
\item{AppRoot}{the directory where the cloud masks are saved.}
\item{out.name}{the name of the folder that stores the outputs.
If the arguemnt is not defined the folder will be named as "CloudMask".}
\item{resbands}{a \code{character} vector argument. Defines the band resolution
used to create the cloud mask. Ex "20m" or "60m".}
\item{sensitivity}{a \code{numeric} argument. Defines the sensitivity of the
cloud detection method.}
\item{overwrite}{logical argument. If \code{TRUE}, overwrites the existing
images with the same name.}
\item{...}{arguments for nested functions.
\itemize{
\item \code{dates} a vector with the capturing dates being considered
for mosaicking. If not supplied, all dates are mosaicked.
}}
}
\value{
this function does not return anything. It saves the cloud masks (CLD)
as GTiff files in the \code{AppRoot} directory.
}
\description{
\code{senCloudMask} creates cloud masks derived from the cloud probability
band (\code{CLDPRB}) band from the "\code{S2MSI2A}" product.
}
\details{
The valid threshold range for \code{sensitivity} is 0-100. By default,
the argument is set to 50.
}
\examples{
\dontrun{
# load a spatial polygon object of Navarre
data(ex.navarre)
# Download S2MSI1C products sensed by Sentinel-2
# between the julian days 210 and 218, 2018
wdir <- file.path(tempdir(),"Path_for_downloading_folder")
print(wdir)
senDownSearch(startDate = as.Date("2018210", "\%Y\%j"),
endDate = as.Date("2018218", "\%Y\%j"),
platform = "Sentinel-2",
extent = ex.navarre,
product = "S2MSI2A",
pathrow = c("R094"),
username = "username",
password = "password",
AppRoot = wdir)
# define the paths to the Sentinle-2 images and the
# folder with the unzipped images
wdir.sen <- file.path(wdir, "Sentinel-2")
wdir.sen.unzip <- file.path(wdir.sen, "unzip")
# mosaic the Sentinel-2 images
senMosaic(wdir.sen.unzip,
AppRoot = wdir.sen,
gutils = TRUE,
out.name = "Navarre")
# calculate the cloud mask
wdir.sen.navarre <- file.path(wdir.sen, "Navarre")
senCloudMask(src = wdir.sen.navarre,
resbands = "60m",
overwrite = TRUE,
sensitivity = 98,
AppRoot = wdir.sen)
# define the path for the Sentinel-2 cloud mask
wdir.sen.cloud <- file.path(wdir.sen, "CloudMask")
# select B02 images of 60 meters
tiles.sen.navarre <- list.files(wdir.sen.navarre,
full.names = TRUE,
recursive = TRUE,
pattern = "\\\\.tif$")
tiles.sen.navarre.b2 <- tiles.sen.navarre[grepl("B02",tiles.sen.navarre)]
tiles.sen.navarre.b2 <- tiles.sen.navarre.b2[grepl("60m",tiles.sen.navarre.b2)]
# generate a 60-meter resolution cloud mask
tiles.sen.cloud <- list.files(wdir.sen.cloud,
full.names = TRUE,
pattern = "\\\\.tif$")
tiles.sen.cloud.60 <- tiles.sen.cloud[grepl("60m",tiles.sen.cloud)]
# remove the cloud mask from b02 tiles
img.sen.navarre.b2 <- stack(tiles.sen.navarre.b2)
img.sen.cloud.60 <- stack(tiles.sen.cloud.60)
img.sen.navarre.b2.cloud.free <- img.sen.navarre.b2*img.sen.cloud.60
# plot b2 cloud free layers
spplot(img.sen.navarre.b2.cloud.free)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pullGeno.R
\name{pullSnpGeno}
\alias{pullSnpGeno}
\title{Pull SNP genotypes}
\usage{
pullSnpGeno(pop, snpChip = 1, chr = NULL, asRaw = FALSE, simParam = NULL)
}
\arguments{
\item{pop}{an object of \code{\link{Pop-class}}}
\item{snpChip}{an integer. Indicates which SNP
chip's genotypes to retrieve.}
\item{chr}{a vector of chromosomes to retrieve. If NULL,
all chromosome are retrieved.}
\item{asRaw}{return in raw (byte) format}
\item{simParam}{an object of \code{\link{SimParam}}}
}
\value{
Returns a matrix of SNP genotypes.
}
\description{
Retrieves SNP genotype data
}
\examples{
#Create founder haplotypes
founderPop = quickHaplo(nInd=10, nChr=1, segSites=15)
#Set simulation parameters
SP = SimParam$new(founderPop)
SP$addTraitA(10)
SP$addSnpChip(5)
#Create population
pop = newPop(founderPop, simParam=SP)
pullSnpGeno(pop, simParam=SP)
}
|
/man/pullSnpGeno.Rd
|
no_license
|
cran/AlphaSimR
|
R
| false | true | 972 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pullGeno.R
\name{pullSnpGeno}
\alias{pullSnpGeno}
\title{Pull SNP genotypes}
\usage{
pullSnpGeno(pop, snpChip = 1, chr = NULL, asRaw = FALSE, simParam = NULL)
}
\arguments{
\item{pop}{an object of \code{\link{Pop-class}}}
\item{snpChip}{an integer. Indicates which SNP
chip's genotypes to retrieve.}
\item{chr}{a vector of chromosomes to retrieve. If NULL,
all chromosome are retrieved.}
\item{asRaw}{return in raw (byte) format}
\item{simParam}{an object of \code{\link{SimParam}}}
}
\value{
Returns a matrix of SNP genotypes.
}
\description{
Retrieves SNP genotype data
}
\examples{
#Create founder haplotypes
founderPop = quickHaplo(nInd=10, nChr=1, segSites=15)
#Set simulation parameters
SP = SimParam$new(founderPop)
SP$addTraitA(10)
SP$addSnpChip(5)
#Create population
pop = newPop(founderPop, simParam=SP)
pullSnpGeno(pop, simParam=SP)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beta.div.r
\name{beta.div}
\alias{beta.div}
\title{Make a distance matrix of samples vs samples.}
\usage{
beta.div(biom, method, weighted = TRUE, tree = NULL)
}
\arguments{
\item{biom}{A \code{matrix}, \code{simple_triplet_matrix}, or \code{BIOM}
object, as returned from \link{read.biom}. For matrices, the rows and
columns are assumed to be the taxa and samples, respectively.}
\item{method}{The distance algorithm to use. Options are:
\bold{\dQuote{manhattan}}, \bold{\dQuote{euclidean}},
\bold{\dQuote{bray-curtis}}, \bold{\dQuote{jaccard}}, and
\bold{\dQuote{unifrac}}. Non-ambiguous abbrevations of the method
names are also accepted. A phylogentic tree must be present in
\code{biom} or explicitly provided via \code{tree=} to use the UniFrac methods.}
\item{weighted}{Take relative abundances into account. When
\code{weighted=FALSE}, only presence/absence is considered.}
\item{tree}{A \code{phylo} object representing the phylogenetic
relationships of the taxa in \code{biom}. Will be taken from the tree
embedded in the \code{biom} object if not explicitly specified. Only
required for computing UniFrac distance matrices.}
}
\value{
A distance matrix.
}
\description{
Make a distance matrix of samples vs samples.
}
\examples{
library(rbiom)
infile <- system.file("extdata", "hmp50.bz2", package = "rbiom")
biom <- read.biom(infile)
biom <- select(biom, 1:10)
dm <- beta.div(biom, 'unifrac')
as.matrix(dm)[1:4,1:4]
plot(hclust(dm))
}
|
/man/beta.div.Rd
|
no_license
|
cran/rbiom
|
R
| false | true | 1,624 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/beta.div.r
\name{beta.div}
\alias{beta.div}
\title{Make a distance matrix of samples vs samples.}
\usage{
beta.div(biom, method, weighted = TRUE, tree = NULL)
}
\arguments{
\item{biom}{A \code{matrix}, \code{simple_triplet_matrix}, or \code{BIOM}
object, as returned from \link{read.biom}. For matrices, the rows and
columns are assumed to be the taxa and samples, respectively.}
\item{method}{The distance algorithm to use. Options are:
\bold{\dQuote{manhattan}}, \bold{\dQuote{euclidean}},
\bold{\dQuote{bray-curtis}}, \bold{\dQuote{jaccard}}, and
\bold{\dQuote{unifrac}}. Non-ambiguous abbrevations of the method
names are also accepted. A phylogentic tree must be present in
\code{biom} or explicitly provided via \code{tree=} to use the UniFrac methods.}
\item{weighted}{Take relative abundances into account. When
\code{weighted=FALSE}, only presence/absence is considered.}
\item{tree}{A \code{phylo} object representing the phylogenetic
relationships of the taxa in \code{biom}. Will be taken from the tree
embedded in the \code{biom} object if not explicitly specified. Only
required for computing UniFrac distance matrices.}
}
\value{
A distance matrix.
}
\description{
Make a distance matrix of samples vs samples.
}
\examples{
library(rbiom)
infile <- system.file("extdata", "hmp50.bz2", package = "rbiom")
biom <- read.biom(infile)
biom <- select(biom, 1:10)
dm <- beta.div(biom, 'unifrac')
as.matrix(dm)[1:4,1:4]
plot(hclust(dm))
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinymaterial)
library(shinydashboard)
library(tidyverse)
library(MASS)
library(fBasics)
library(urca)
library(TSA)
library(networkD3)
library(readr)
ui <- dashboardPage(
dashboardHeader(title = "Madres solteras"),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Grafos", icon = icon("th"), tabName = "Grafos",
badgeLabel = "new", badgeColor = "green")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
h2("Parámetros para hallar el resultado de la predicción de la satisfacción de una madre soltera"),
numericInput("textAge", h3("Ingrese la edad de la madre soltera:"), 0, min = 15, max = 100),
numericInput("textAgeMunicipality", h3("Ingrese el número de años que ha vivido la madre soltera en un municipio:"), 1, min = 1, max = 100),
selectInput("textReasonForDisplacement", h3("Ingrese el número que indica la razón del desplazamiento:"), list('Indique la razón de desplazamiento' = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))),
numericInput("textHealth", h3("Elija el nivel de satisfacción con los servicios de salud prestados:"), 0, min = 0, max = 10),
numericInput("textLevelSecurity", h3("Nivel de seguridad:"), 0, min = 0, max = 10),
numericInput("textWork", h3("Nivel de satisfacción con el trabajo que poseen:"), 0, min = 0, max = 10),
numericInput("textLevelHappy", h3("Nivel de felicidad de la semana anterior:"), 0, min = 0, max = 10),
numericInput("textLive", h3("Nivel de deseo de vivir por parte de la madre soltera:"), 0, min = 0, max = 10),
numericInput("textCompletHome", h3("Elija el número de familias que viven en el hogar:"), 0, min = 0, max = 10),
numericInput("textEnterEconomic", h3("Nivel de sagtisfacción por el ingreso económico:"), 0, min = 0, max = 10),
numericInput("textTranquility", h3("Elija el nivel de tranquilidad de la madre soltera:"), 0, min = 0, max = 10),
selectInput("textDisplaced", h3("¿La madre soltera es desplazada?:"),
list('Elija si o no si la madre soltera es desplazada' = c(1, 2))
),
numericInput("textBoys", h3("Elija el número de hijos en el hogar:"), 0, min = 0, max = 10),
selectInput("textFather", h3("¿Actualmente el padre de la madre soltera vive con ella? :"),
list('Elija si wk padre de la madre soltera está vivo o muerto' = c(1, 2, 3))
),
textOutput("resultPredicction"),
tags$head(
tags$style
(
HTML
(
'#run{background-color:#8BC5ED}'
)
)
),
actionButton("run","Run Analysis")
),
tabItem(tabName = "Grafos",
h2("Grafos"),
numericInput("textDocument", h3("Escriba el documento:"), 0, min = 0, max = 60000000),
textOutput("resultTextDocument")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
observeEvent(input$run, {
output$resultPredicction <- renderText({
#-------------------------------------------------------------------------------------------------------
## llamado de la base de datos y c+alculo de la predicción
setwd("/home/jose/shinyapp/taeunalmed2018/files/") ## esta es la posicion de la base de datos.txt
data<-read.csv("/home/jose/shinyapp/taeunalmed2018/files/data.txt", header = TRUE, sep = " ")
## transformar variable
data$satisfecho<-as.numeric(data$satisfecho)
y<-data$satisfecho+1
z<-y^2.414084
### convertir variables
data$edad<-as.numeric(data$edad)
data$desplazado_municipio<-as.factor(data$desplazado_municipio)
data$anios_viviendo_municipio<-as.numeric(data$anios_viviendo_municipio)
data$razon_desplazamiento<-as.factor(data$razon_desplazamiento)
data$padre_vive_hogar<-as.factor(data$padre_vive_hogar)
data$ingreso_economico<-as.numeric(data$ingreso_economico)
data$salud<-as.numeric(data$salud)
data$nivel_seguridad<-as.numeric(data$nivel_seguridad)
data$trabajo<-as.numeric(data$trabajo)
data$feliz<-as.numeric(data$feliz)
data$tranquilidad<-as.numeric(data$tranquilidad)
data$vale_vivir<-as.numeric(data$vale_vivir)
data$hogares_completos<-as.numeric(data$hogares_completos)
## Prediccion formula
p <- predict(object = modback,
newdata=data.frame(edad = input$textAge,
anios_viviendo_municipio = input$textAgeMunicipality,
razon_desplazamiento = input$textReasonForDisplacement,
salud = input$textHealth,
nivel_seguridad = input$textLevelSecurity,
trabajo = input$textWork,
feliz = input$textLevelHappy,
vale_vivir = input$textLive,
hogares_completos = input$textCompletHome,
ingreso_economico = input$textEnterEconomic,
tranquilidad = input$textTranquility,
desplazado_municipio = input$textDisplaced,
ninos = input$textBoys,
padre_vive_hogar = input$textFather),
type = "response")
prediccion <- p^(1/2.414084) - 1
prediccion <-round(prediccion,0) - 1 ### revisar si es -1
prediccion
#-------------------------------------------------------------------------------------------------------
paste("El nivel de satisfacción por parte de la madre soltera es de", prediccion, ", donde de [0, 3] es baja, de [4, 6] es media y [7, 10] es alta")
})
})
output$resultTextDocument <- renderForceNetwork({
setwd("/home/jose/shinyapp/taeunalmed2018/files/")
Caractersticas <- read.csv("/home/jose/shinyapp/taeunalmed2018/files/CaracteristicasYComposicionDelHogar.txt", header = TRUE, sep = ",")
tabla <- Caractersticas[,c(1,4,9,11,12,20,21,23,24)]
grafo <- data.frame(Source=numeric(1),Target=numeric(1))
familiaN <- which(tabla$DIRECTORIO == input$textDocument)
Flia <- tabla[familiaN,]
aux <- which(Flia$P6071 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,5]))
colnames(aux2)<-c('Source','Target')
aux<-which(grafo$Source==aux2$Target[1])
if(length(aux)==0){
grafo<-rbind(grafo,aux2)
}
}
aux <- which(Flia$P6081 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,7]))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
aux <- which(Flia$P6083 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,9]))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
aux <- which(Flia$P6051 > 9 )
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
if(observaciones!=0){
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(1))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
}
for (i in 1:nrow(grafo)){
if(grafo[i,1] > grafo[i,2]){
aux<-grafo[i,1]
grafo[i,1]<-grafo[i,2]
grafo[i,2]<- aux
}
}
nodes <- data.frame(ID=as.numeric(Flia$ORDEN))
grafo2<-data.frame(name=as.character(100))
for (i in 1:nrow(nodes)){
aux <- which(Flia$ORDEN == nodes[i,1])
aux <- Flia[aux,3]
aux <- ifelse(aux== 1,'Cabeza_Hogar',
ifelse(aux== 2,'Pareja',
ifelse(aux== 3,'Hij@',
ifelse(aux== 4,'Niet@',
ifelse(aux== 5,'Padre-Madre',
ifelse(aux== 6,'Suegr@',
ifelse(aux== 7,'Herman@',
ifelse(aux== 8, 'Yerno-Nuera',
ifelse(aux== 9,'Otro_f',
ifelse(aux== 10,'Emplead@',
ifelse(aux== 11,'Pariente_Empleado',
ifelse(aux== 12,'Trabajador',
ifelse(aux== 13,'pensionista','Otro')))))))))))))
grafo2 <- rbind(grafo2,data.frame(name=as.character(aux)))
}
grafo2 <- grafo2[-1,]
grafo <- grafo[-1,]
nodes <- cbind(nodes,grafo2)
colnames(nodes)<-c('ID','name')
for(i in 1:nrow(nodes)){
nodes[i,1]<-nodes[i,1]-1
}
for(i in 1:nrow(grafo)){
grafo[i,1]<-grafo[i,1]-1
grafo[i,2]<-grafo[i,2]-1
}
nodes<-cbind(nodes,data.frame(Group=as.numeric(1)))
forceNetwork(Links = grafo, Nodes = nodes,
Source = "Source", Target = "Target",
NodeID = "name",Group = 'Group', opacity = 0.8)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/taeunalmed2018/app.R
|
no_license
|
joaerazogo/shinyapp
|
R
| false | false | 10,430 |
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinymaterial)
library(shinydashboard)
library(tidyverse)
library(MASS)
library(fBasics)
library(urca)
library(TSA)
library(networkD3)
library(readr)
ui <- dashboardPage(
dashboardHeader(title = "Madres solteras"),
dashboardSidebar(
sidebarMenu(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Grafos", icon = icon("th"), tabName = "Grafos",
badgeLabel = "new", badgeColor = "green")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
h2("Parámetros para hallar el resultado de la predicción de la satisfacción de una madre soltera"),
numericInput("textAge", h3("Ingrese la edad de la madre soltera:"), 0, min = 15, max = 100),
numericInput("textAgeMunicipality", h3("Ingrese el número de años que ha vivido la madre soltera en un municipio:"), 1, min = 1, max = 100),
selectInput("textReasonForDisplacement", h3("Ingrese el número que indica la razón del desplazamiento:"), list('Indique la razón de desplazamiento' = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))),
numericInput("textHealth", h3("Elija el nivel de satisfacción con los servicios de salud prestados:"), 0, min = 0, max = 10),
numericInput("textLevelSecurity", h3("Nivel de seguridad:"), 0, min = 0, max = 10),
numericInput("textWork", h3("Nivel de satisfacción con el trabajo que poseen:"), 0, min = 0, max = 10),
numericInput("textLevelHappy", h3("Nivel de felicidad de la semana anterior:"), 0, min = 0, max = 10),
numericInput("textLive", h3("Nivel de deseo de vivir por parte de la madre soltera:"), 0, min = 0, max = 10),
numericInput("textCompletHome", h3("Elija el número de familias que viven en el hogar:"), 0, min = 0, max = 10),
numericInput("textEnterEconomic", h3("Nivel de sagtisfacción por el ingreso económico:"), 0, min = 0, max = 10),
numericInput("textTranquility", h3("Elija el nivel de tranquilidad de la madre soltera:"), 0, min = 0, max = 10),
selectInput("textDisplaced", h3("¿La madre soltera es desplazada?:"),
list('Elija si o no si la madre soltera es desplazada' = c(1, 2))
),
numericInput("textBoys", h3("Elija el número de hijos en el hogar:"), 0, min = 0, max = 10),
selectInput("textFather", h3("¿Actualmente el padre de la madre soltera vive con ella? :"),
list('Elija si wk padre de la madre soltera está vivo o muerto' = c(1, 2, 3))
),
textOutput("resultPredicction"),
tags$head(
tags$style
(
HTML
(
'#run{background-color:#8BC5ED}'
)
)
),
actionButton("run","Run Analysis")
),
tabItem(tabName = "Grafos",
h2("Grafos"),
numericInput("textDocument", h3("Escriba el documento:"), 0, min = 0, max = 60000000),
textOutput("resultTextDocument")
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
observeEvent(input$run, {
output$resultPredicction <- renderText({
#-------------------------------------------------------------------------------------------------------
## llamado de la base de datos y c+alculo de la predicción
setwd("/home/jose/shinyapp/taeunalmed2018/files/") ## esta es la posicion de la base de datos.txt
data<-read.csv("/home/jose/shinyapp/taeunalmed2018/files/data.txt", header = TRUE, sep = " ")
## transformar variable
data$satisfecho<-as.numeric(data$satisfecho)
y<-data$satisfecho+1
z<-y^2.414084
### convertir variables
data$edad<-as.numeric(data$edad)
data$desplazado_municipio<-as.factor(data$desplazado_municipio)
data$anios_viviendo_municipio<-as.numeric(data$anios_viviendo_municipio)
data$razon_desplazamiento<-as.factor(data$razon_desplazamiento)
data$padre_vive_hogar<-as.factor(data$padre_vive_hogar)
data$ingreso_economico<-as.numeric(data$ingreso_economico)
data$salud<-as.numeric(data$salud)
data$nivel_seguridad<-as.numeric(data$nivel_seguridad)
data$trabajo<-as.numeric(data$trabajo)
data$feliz<-as.numeric(data$feliz)
data$tranquilidad<-as.numeric(data$tranquilidad)
data$vale_vivir<-as.numeric(data$vale_vivir)
data$hogares_completos<-as.numeric(data$hogares_completos)
## Prediccion formula
p <- predict(object = modback,
newdata=data.frame(edad = input$textAge,
anios_viviendo_municipio = input$textAgeMunicipality,
razon_desplazamiento = input$textReasonForDisplacement,
salud = input$textHealth,
nivel_seguridad = input$textLevelSecurity,
trabajo = input$textWork,
feliz = input$textLevelHappy,
vale_vivir = input$textLive,
hogares_completos = input$textCompletHome,
ingreso_economico = input$textEnterEconomic,
tranquilidad = input$textTranquility,
desplazado_municipio = input$textDisplaced,
ninos = input$textBoys,
padre_vive_hogar = input$textFather),
type = "response")
prediccion <- p^(1/2.414084) - 1
prediccion <-round(prediccion,0) - 1 ### revisar si es -1
prediccion
#-------------------------------------------------------------------------------------------------------
paste("El nivel de satisfacción por parte de la madre soltera es de", prediccion, ", donde de [0, 3] es baja, de [4, 6] es media y [7, 10] es alta")
})
})
output$resultTextDocument <- renderForceNetwork({
setwd("/home/jose/shinyapp/taeunalmed2018/files/")
Caractersticas <- read.csv("/home/jose/shinyapp/taeunalmed2018/files/CaracteristicasYComposicionDelHogar.txt", header = TRUE, sep = ",")
tabla <- Caractersticas[,c(1,4,9,11,12,20,21,23,24)]
grafo <- data.frame(Source=numeric(1),Target=numeric(1))
familiaN <- which(tabla$DIRECTORIO == input$textDocument)
Flia <- tabla[familiaN,]
aux <- which(Flia$P6071 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,5]))
colnames(aux2)<-c('Source','Target')
aux<-which(grafo$Source==aux2$Target[1])
if(length(aux)==0){
grafo<-rbind(grafo,aux2)
}
}
aux <- which(Flia$P6081 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,7]))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
aux <- which(Flia$P6083 == 1)
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(relacion[i,9]))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
aux <- which(Flia$P6051 > 9 )
relacion <- Flia[aux,]
observaciones<-nrow(relacion)
if(observaciones!=0){
for (i in 1:observaciones) {
aux2 <- data.frame(as.numeric(relacion[i,2]),as.numeric(1))
colnames(aux2)<-c('Source','Target')
grafo<-rbind(grafo,aux2)
}
}
for (i in 1:nrow(grafo)){
if(grafo[i,1] > grafo[i,2]){
aux<-grafo[i,1]
grafo[i,1]<-grafo[i,2]
grafo[i,2]<- aux
}
}
nodes <- data.frame(ID=as.numeric(Flia$ORDEN))
grafo2<-data.frame(name=as.character(100))
for (i in 1:nrow(nodes)){
aux <- which(Flia$ORDEN == nodes[i,1])
aux <- Flia[aux,3]
aux <- ifelse(aux== 1,'Cabeza_Hogar',
ifelse(aux== 2,'Pareja',
ifelse(aux== 3,'Hij@',
ifelse(aux== 4,'Niet@',
ifelse(aux== 5,'Padre-Madre',
ifelse(aux== 6,'Suegr@',
ifelse(aux== 7,'Herman@',
ifelse(aux== 8, 'Yerno-Nuera',
ifelse(aux== 9,'Otro_f',
ifelse(aux== 10,'Emplead@',
ifelse(aux== 11,'Pariente_Empleado',
ifelse(aux== 12,'Trabajador',
ifelse(aux== 13,'pensionista','Otro')))))))))))))
grafo2 <- rbind(grafo2,data.frame(name=as.character(aux)))
}
grafo2 <- grafo2[-1,]
grafo <- grafo[-1,]
nodes <- cbind(nodes,grafo2)
colnames(nodes)<-c('ID','name')
for(i in 1:nrow(nodes)){
nodes[i,1]<-nodes[i,1]-1
}
for(i in 1:nrow(grafo)){
grafo[i,1]<-grafo[i,1]-1
grafo[i,2]<-grafo[i,2]-1
}
nodes<-cbind(nodes,data.frame(Group=as.numeric(1)))
forceNetwork(Links = grafo, Nodes = nodes,
Source = "Source", Target = "Target",
NodeID = "name",Group = 'Group', opacity = 0.8)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
\name{mc.se}
\alias{mc.se}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Count error for h2 and corr
}
\description{
This function counts standard error(se) for heritability(h2) and corr value for MCMCglmm package.
}
\usage{
mc.se(object = NULL, Nmc = NULL, confinterval = NULL, lv = NULL,
uv = NULL, n = NULL, conf.level = NULL, sigf = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
MCMCglmm model results or h2/corr formula results
}
\item{Nmc}{
Use MCMCglmm result directly (T) or not (F)
}
\item{confinterval}{
Confidence interval for heritability or corr
}
\item{lv}{
Lower value from confidence interval
}
\item{uv}{
Upper value from confidence interval
}
\item{n}{
Total number of aim trait observation
}
\item{conf.level}{
Confidence level
}
\item{sigf}{
Output significent level(T) or not(F,default)
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{Nmc }{Default is T to use MCMCglmm results directly, F for not.}
\item{n }{Total number of aim trait observation,default value is 1000.}
\item{conf.level }{Confidence level, default value is 0.95.}
\item{sigf}{Output significant level (T) or not (default F).}
}
\author{
Yuanzhen Lin <yzhlinscau@163.com>
}
\references{
Yuanzhen Lin. R & ASReml-R Statistics. China Forestry Publishing House. 2016
}
\seealso{
Website for instant update: yzhlin-asreml.ys168.com
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
library(AAfun)
library(MCMCglmm)
data(PrSpa)
df<-subset(PrSpa,Spacing=='3')
######## single trait model ########
m1.glmm <- MCMCglmm(h5 ~ 1 + Rep,
random = ~ Fam, pr = TRUE,
family = 'gaussian',
data = df)
h2.glmm<-4*m1.glmm$VCV[,'Fam']/(m1.glmm$VCV[,'Fam']+m1.glmm$VCV[,'units'])
posterior.mode(h2.glmm)
mc.se(h2.glmm)
confinterval<-HPDinterval(h2.glmm)
mc.se(confinterval=confinterval,Nmc=F)
mc.se(confinterval=confinterval,Nmc=F,n=559,conf.level=0.95)
### Second method has the same result ###
lv<-HPDinterval(h2.glmm)[1]
uv<-HPDinterval(h2.glmm)[2]
mc.se(lv=lv,uv=uv,Nmc=F)
######## bi-trait model ########
df$dj<-1000*df$dj
phen.var<-matrix(c(var(df$dj,na.rm=TRUE),0,0,
var(df$h5,na.rm=TRUE)),2,2)
prior<-list(G=list(G1=list(V=phen.var/2,n=2)),
R=list(V=phen.var/2,n=2))
set.seed(1234)
m2.glmm <- MCMCglmm(cbind(dj,h5)~ trait-1+trait:Rep, random=~ us(trait):Fam,
rcov=~ us(trait):units, data=df, family=c("gaussian", "gaussian"),
nitt=130000,thin=100,burnin=30000,
prior=prior,verbose=FALSE,pr=TRUE)
posterior.mode(m2.glmm$VCV)
HPDinterval(m2.glmm$VCV)
#### count se for variance component
mc.se(m2.glmm$VCV)
#### count se for fixed and randomed effects
# mc.se(m2.glmm$Sol)
posterior.mode(m2.glmm$Sol)[c(1:5,40:45,80:85)]
mc.se(m2.glmm$Sol)[c(1:5,40:45,80:85),]
#### count se for heritability
A.h2.glmm<-4*m2.glmm$VCV[,'dj:dj.Fam']/(m2.glmm$VCV[,'dj:dj.Fam']+m2.glmm$VCV[,'dj:dj.units'])
posterior.mode(A.h2.glmm)
mc.se(A.h2.glmm)
confinterval<-HPDinterval(A.h2.glmm)
mc.se(confinterval=confinterval,Nmc=F)
mc.se(confinterval=confinterval,n=559,conf.level=0.95,Nmc=F)
### Second method has the same result for h2 ###
lv<-HPDinterval(A.h2.glmm)[,1]
uv<-HPDinterval(A.h2.glmm)[,2]
mc.se(lv=lv,uv=uv,Nmc=F)
#### count se for corr
gCorr.glmm<-m2.glmm$VCV[,'h5:dj.Fam']/sqrt(m2.glmm$VCV[,'dj:dj.Fam']*m2.glmm$VCV[,'h5:h5.Fam'])
mc.se(gCorr.glmm,sigf=T)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ MCMCglmm.se }
\keyword{ mc.se }% __ONLY ONE__ keyword per line
|
/man/mc.se.Rd
|
no_license
|
yzhlinscau/AAfun
|
R
| false | false | 3,876 |
rd
|
\name{mc.se}
\alias{mc.se}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Count error for h2 and corr
}
\description{
This function counts standard error(se) for heritability(h2) and corr value for MCMCglmm package.
}
\usage{
mc.se(object = NULL, Nmc = NULL, confinterval = NULL, lv = NULL,
uv = NULL, n = NULL, conf.level = NULL, sigf = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{
MCMCglmm model results or h2/corr formula results
}
\item{Nmc}{
Use MCMCglmm result directly (T) or not (F)
}
\item{confinterval}{
Confidence interval for heritability or corr
}
\item{lv}{
Lower value from confidence interval
}
\item{uv}{
Upper value from confidence interval
}
\item{n}{
Total number of aim trait observation
}
\item{conf.level}{
Confidence level
}
\item{sigf}{
Output significent level(T) or not(F,default)
}
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
\item{Nmc }{Default is T to use MCMCglmm results directly, F for not.}
\item{n }{Total number of aim trait observation,default value is 1000.}
\item{conf.level }{Confidence level, default value is 0.95.}
\item{sigf}{Output significant level (T) or not (default F).}
}
\author{
Yuanzhen Lin <yzhlinscau@163.com>
}
\references{
Yuanzhen Lin. R & ASReml-R Statistics. China Forestry Publishing House. 2016
}
\seealso{
Website for instant update: yzhlin-asreml.ys168.com
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
library(AAfun)
library(MCMCglmm)
data(PrSpa)
df<-subset(PrSpa,Spacing=='3')
######## single trait model ########
m1.glmm <- MCMCglmm(h5 ~ 1 + Rep,
random = ~ Fam, pr = TRUE,
family = 'gaussian',
data = df)
h2.glmm<-4*m1.glmm$VCV[,'Fam']/(m1.glmm$VCV[,'Fam']+m1.glmm$VCV[,'units'])
posterior.mode(h2.glmm)
mc.se(h2.glmm)
confinterval<-HPDinterval(h2.glmm)
mc.se(confinterval=confinterval,Nmc=F)
mc.se(confinterval=confinterval,Nmc=F,n=559,conf.level=0.95)
### Second method has the same result ###
lv<-HPDinterval(h2.glmm)[1]
uv<-HPDinterval(h2.glmm)[2]
mc.se(lv=lv,uv=uv,Nmc=F)
######## bi-trait model ########
df$dj<-1000*df$dj
phen.var<-matrix(c(var(df$dj,na.rm=TRUE),0,0,
var(df$h5,na.rm=TRUE)),2,2)
prior<-list(G=list(G1=list(V=phen.var/2,n=2)),
R=list(V=phen.var/2,n=2))
set.seed(1234)
m2.glmm <- MCMCglmm(cbind(dj,h5)~ trait-1+trait:Rep, random=~ us(trait):Fam,
rcov=~ us(trait):units, data=df, family=c("gaussian", "gaussian"),
nitt=130000,thin=100,burnin=30000,
prior=prior,verbose=FALSE,pr=TRUE)
posterior.mode(m2.glmm$VCV)
HPDinterval(m2.glmm$VCV)
#### count se for variance component
mc.se(m2.glmm$VCV)
#### count se for fixed and randomed effects
# mc.se(m2.glmm$Sol)
posterior.mode(m2.glmm$Sol)[c(1:5,40:45,80:85)]
mc.se(m2.glmm$Sol)[c(1:5,40:45,80:85),]
#### count se for heritability
A.h2.glmm<-4*m2.glmm$VCV[,'dj:dj.Fam']/(m2.glmm$VCV[,'dj:dj.Fam']+m2.glmm$VCV[,'dj:dj.units'])
posterior.mode(A.h2.glmm)
mc.se(A.h2.glmm)
confinterval<-HPDinterval(A.h2.glmm)
mc.se(confinterval=confinterval,Nmc=F)
mc.se(confinterval=confinterval,n=559,conf.level=0.95,Nmc=F)
### Second method has the same result for h2 ###
lv<-HPDinterval(A.h2.glmm)[,1]
uv<-HPDinterval(A.h2.glmm)[,2]
mc.se(lv=lv,uv=uv,Nmc=F)
#### count se for corr
gCorr.glmm<-m2.glmm$VCV[,'h5:dj.Fam']/sqrt(m2.glmm$VCV[,'dj:dj.Fam']*m2.glmm$VCV[,'h5:h5.Fam'])
mc.se(gCorr.glmm,sigf=T)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ MCMCglmm.se }
\keyword{ mc.se }% __ONLY ONE__ keyword per line
|
# NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 1000 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# OpenAPI spec version: 2.0.2-beta
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' FirstLastNameGenderedOut Class
#'
#' @field id
#' @field firstName
#' @field lastName
#' @field likelyGender
#' @field genderScale
#' @field score
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FirstLastNameGenderedOut <- R6::R6Class(
'FirstLastNameGenderedOut',
public = list(
`id` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`likelyGender` = NULL,
`genderScale` = NULL,
`score` = NULL,
initialize = function(`id`, `firstName`, `lastName`, `likelyGender`, `genderScale`, `score`){
if (!missing(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!missing(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!missing(`likelyGender`)) {
stopifnot(is.character(`likelyGender`), length(`likelyGender`) == 1)
self$`likelyGender` <- `likelyGender`
}
if (!missing(`genderScale`)) {
stopifnot(is.numeric(`genderScale`), length(`genderScale`) == 1)
self$`genderScale` <- `genderScale`
}
if (!missing(`score`)) {
stopifnot(is.numeric(`score`), length(`score`) == 1)
self$`score` <- `score`
}
},
toJSON = function() {
FirstLastNameGenderedOutObject <- list()
if (!is.null(self$`id`)) {
FirstLastNameGenderedOutObject[['id']] <-
self$`id`
}
if (!is.null(self$`firstName`)) {
FirstLastNameGenderedOutObject[['firstName']] <-
self$`firstName`
}
if (!is.null(self$`lastName`)) {
FirstLastNameGenderedOutObject[['lastName']] <-
self$`lastName`
}
if (!is.null(self$`likelyGender`)) {
FirstLastNameGenderedOutObject[['likelyGender']] <-
self$`likelyGender`
}
if (!is.null(self$`genderScale`)) {
FirstLastNameGenderedOutObject[['genderScale']] <-
self$`genderScale`
}
if (!is.null(self$`score`)) {
FirstLastNameGenderedOutObject[['score']] <-
self$`score`
}
FirstLastNameGenderedOutObject
},
fromJSON = function(FirstLastNameGenderedOutJson) {
FirstLastNameGenderedOutObject <- jsonlite::fromJSON(FirstLastNameGenderedOutJson)
if (!is.null(FirstLastNameGenderedOutObject$`id`)) {
self$`id` <- FirstLastNameGenderedOutObject$`id`
}
if (!is.null(FirstLastNameGenderedOutObject$`firstName`)) {
self$`firstName` <- FirstLastNameGenderedOutObject$`firstName`
}
if (!is.null(FirstLastNameGenderedOutObject$`lastName`)) {
self$`lastName` <- FirstLastNameGenderedOutObject$`lastName`
}
if (!is.null(FirstLastNameGenderedOutObject$`likelyGender`)) {
self$`likelyGender` <- FirstLastNameGenderedOutObject$`likelyGender`
}
if (!is.null(FirstLastNameGenderedOutObject$`genderScale`)) {
self$`genderScale` <- FirstLastNameGenderedOutObject$`genderScale`
}
if (!is.null(FirstLastNameGenderedOutObject$`score`)) {
self$`score` <- FirstLastNameGenderedOutObject$`score`
}
},
toJSONString = function() {
sprintf(
'{
"id":
"%s",
"firstName":
"%s",
"lastName":
"%s",
"likelyGender":
"%s",
"genderScale":
%d,
"score":
%d
}',
self$`id`,
self$`firstName`,
self$`lastName`,
self$`likelyGender`,
self$`genderScale`,
self$`score`
)
},
fromJSONString = function(FirstLastNameGenderedOutJson) {
FirstLastNameGenderedOutObject <- jsonlite::fromJSON(FirstLastNameGenderedOutJson)
self$`id` <- FirstLastNameGenderedOutObject$`id`
self$`firstName` <- FirstLastNameGenderedOutObject$`firstName`
self$`lastName` <- FirstLastNameGenderedOutObject$`lastName`
self$`likelyGender` <- FirstLastNameGenderedOutObject$`likelyGender`
self$`genderScale` <- FirstLastNameGenderedOutObject$`genderScale`
self$`score` <- FirstLastNameGenderedOutObject$`score`
self
}
)
)
|
/R/first_last_name_gendered_out.R
|
no_license
|
wing328/namsor-r-client
|
R
| false | false | 4,899 |
r
|
# NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 1000 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# OpenAPI spec version: 2.0.2-beta
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' FirstLastNameGenderedOut Class
#'
#' @field id
#' @field firstName
#' @field lastName
#' @field likelyGender
#' @field genderScale
#' @field score
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
FirstLastNameGenderedOut <- R6::R6Class(
'FirstLastNameGenderedOut',
public = list(
`id` = NULL,
`firstName` = NULL,
`lastName` = NULL,
`likelyGender` = NULL,
`genderScale` = NULL,
`score` = NULL,
initialize = function(`id`, `firstName`, `lastName`, `likelyGender`, `genderScale`, `score`){
if (!missing(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`firstName`)) {
stopifnot(is.character(`firstName`), length(`firstName`) == 1)
self$`firstName` <- `firstName`
}
if (!missing(`lastName`)) {
stopifnot(is.character(`lastName`), length(`lastName`) == 1)
self$`lastName` <- `lastName`
}
if (!missing(`likelyGender`)) {
stopifnot(is.character(`likelyGender`), length(`likelyGender`) == 1)
self$`likelyGender` <- `likelyGender`
}
if (!missing(`genderScale`)) {
stopifnot(is.numeric(`genderScale`), length(`genderScale`) == 1)
self$`genderScale` <- `genderScale`
}
if (!missing(`score`)) {
stopifnot(is.numeric(`score`), length(`score`) == 1)
self$`score` <- `score`
}
},
toJSON = function() {
FirstLastNameGenderedOutObject <- list()
if (!is.null(self$`id`)) {
FirstLastNameGenderedOutObject[['id']] <-
self$`id`
}
if (!is.null(self$`firstName`)) {
FirstLastNameGenderedOutObject[['firstName']] <-
self$`firstName`
}
if (!is.null(self$`lastName`)) {
FirstLastNameGenderedOutObject[['lastName']] <-
self$`lastName`
}
if (!is.null(self$`likelyGender`)) {
FirstLastNameGenderedOutObject[['likelyGender']] <-
self$`likelyGender`
}
if (!is.null(self$`genderScale`)) {
FirstLastNameGenderedOutObject[['genderScale']] <-
self$`genderScale`
}
if (!is.null(self$`score`)) {
FirstLastNameGenderedOutObject[['score']] <-
self$`score`
}
FirstLastNameGenderedOutObject
},
fromJSON = function(FirstLastNameGenderedOutJson) {
FirstLastNameGenderedOutObject <- jsonlite::fromJSON(FirstLastNameGenderedOutJson)
if (!is.null(FirstLastNameGenderedOutObject$`id`)) {
self$`id` <- FirstLastNameGenderedOutObject$`id`
}
if (!is.null(FirstLastNameGenderedOutObject$`firstName`)) {
self$`firstName` <- FirstLastNameGenderedOutObject$`firstName`
}
if (!is.null(FirstLastNameGenderedOutObject$`lastName`)) {
self$`lastName` <- FirstLastNameGenderedOutObject$`lastName`
}
if (!is.null(FirstLastNameGenderedOutObject$`likelyGender`)) {
self$`likelyGender` <- FirstLastNameGenderedOutObject$`likelyGender`
}
if (!is.null(FirstLastNameGenderedOutObject$`genderScale`)) {
self$`genderScale` <- FirstLastNameGenderedOutObject$`genderScale`
}
if (!is.null(FirstLastNameGenderedOutObject$`score`)) {
self$`score` <- FirstLastNameGenderedOutObject$`score`
}
},
toJSONString = function() {
sprintf(
'{
"id":
"%s",
"firstName":
"%s",
"lastName":
"%s",
"likelyGender":
"%s",
"genderScale":
%d,
"score":
%d
}',
self$`id`,
self$`firstName`,
self$`lastName`,
self$`likelyGender`,
self$`genderScale`,
self$`score`
)
},
fromJSONString = function(FirstLastNameGenderedOutJson) {
FirstLastNameGenderedOutObject <- jsonlite::fromJSON(FirstLastNameGenderedOutJson)
self$`id` <- FirstLastNameGenderedOutObject$`id`
self$`firstName` <- FirstLastNameGenderedOutObject$`firstName`
self$`lastName` <- FirstLastNameGenderedOutObject$`lastName`
self$`likelyGender` <- FirstLastNameGenderedOutObject$`likelyGender`
self$`genderScale` <- FirstLastNameGenderedOutObject$`genderScale`
self$`score` <- FirstLastNameGenderedOutObject$`score`
self
}
)
)
|
library(prism)
### Name: prism_image
### Title: Quick image plot
### Aliases: prism_image
### ** Examples
## Not run:
##D get_prism_dailys(type="tmean", minDate = "2013-06-01", maxDate = "2013-06-14", keepZip=FALSE)
##D prism_image(ls_prism_data()[1])
## End(Not run)
|
/data/genthat_extracted_code/prism/examples/prism_image.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 277 |
r
|
library(prism)
### Name: prism_image
### Title: Quick image plot
### Aliases: prism_image
### ** Examples
## Not run:
##D get_prism_dailys(type="tmean", minDate = "2013-06-01", maxDate = "2013-06-14", keepZip=FALSE)
##D prism_image(ls_prism_data()[1])
## End(Not run)
|
## Script to carry out sensitivity analysis:
## The effect of the mean seasonal variations
## Geographical variations
## Long-term trends
rm(list=ls())
library(esd)
ar1 <- function(x) {
n <- length(x)
x0 <- x[1:(n-1)]
x1 <- x[2:n]
ok <- is.finite(x0) & is.finite(x1)
ar1 <- cor(x0[ok],x1[ok])
return(ar1)
}
analysetrends <- function(param='pr',pal=NULL,col=NULL,FUN='wetmean',prop=FALSE,pattern=1) {
load(paste(param,'.aaca.rda',sep=''))
X <- eval(parse(text=paste(param,'.aaca',sep='')))
X <- subset(X,it=c(1960,2015))
if (FUN=='sd') X <- anomaly(X)
if ( (FUN!='wet.spell') & (FUN!='dry.spell'))
Xc <- aggregate(X,year,FUN) else {
ispell <- switch(FUN,'wet.spell'=1,'dry.spell'=2)
## For spell-length statistics, more a special treatment is needed
## Remove records with many missing data (more than 2.5%).
nok <- (apply(X,2,nv) > 0.975*dim(X)[1])
X <- subset(X,is=nok)
x <- list()
for (i in 1:dim(X)[2]) x[[i]] <- subset(spell(subset(X,is=i),threshold=1),is=ispell)
xx <- do.call('merge',x)
class(xx) <- class(X)
xx <- attrcp(X,xx)
attr(xx,'variable') <- FUN
attr(xx,'unit') <- 'days'
Xc <- aggregate(xx,year,"mean")
}
if (FUN=='wetmean') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wetfreq') {Xcc <- coredata(Xc); Xcc[Xcc <= 0] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wet.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 10] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='dry.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (prop) {
coredata(Xc) <- 100*t(t(coredata(Xc))/apply(coredata(subset(Xc,it=c(1961,1990))),2,'mean',na.rm=TRUE))
FUN <- paste(FUN,'prop',sep='')
attr(Xc,'unit') <- '%'
}
trends <- apply(Xc,2,'trend.coef')
nval <- apply(Xc,2,'nv')
trends[nval < 30] <- NA
varnm <- switch(paste(param,FUN),
't2m mean'=expression(T[2*m]),
't2m sd'=expression(sigma[T]),
't2m ar1'=expression(R(tau)),
'pr wetmean'=expression(mu),
'pr wetfreq'=expression(f[w]),
'pr wet.spell'=expression(n[c*w*d]),
'pr dry.spell'=expression(n[c*d*d]),
'pr wetmeanprop'=expression(mu))
unitnm <- switch(paste(param,FUN),
't2m mean'=expression(degree*C),
't2m sd'=expression(degree*C),
't2m ar1'='',
'pr wetmean'='mm/day',
'pr wetfreq'='',
'pr wet.spell'='days',
'pr dry.spell'='days',
'pr wetmeanprop'="'%'")
longnm <- switch(paste(param,FUN),
't2m mean'='Mean temperature',
't2m sd'='Temperature variability',
't2m ar1'='Temperature persistence',
'pr wetmean'='Precipitation intensity',
'pr wetfreq'='Wet-day frequency',
'pr wet.spell'='Mean wet-spell length',
'pr dry.spell'='Mean dry-spell length',
'pr wetmeanprop'='Proportional precipitation intensity')
nv <- apply(X,2,nv)
xlim <- range(lon(X)); ylim <- range(lat(X))
col <- rgb(0.75*(1-(lat(X)-ylim[1])/diff(ylim)),0.75*(lon(X)-xlim[1])/diff(xlim),
0.75*(lat(X)-ylim[1])/diff(ylim),0.75*nv/dim(X)[1])
attr(Xc,'longname') <- longnm
plot(Xc,col=col)
grid()
dev.copy2pdf(file=paste('paper59_Fig_trend_',
param,'_',FUN,'.pdf',sep=''))
pca <- PCA(pcafill(Xc),n=12)
attr(pca,'variable') <- varnm
attr(pca,'unit') <- unitnm
attr(pca,'longname') <- FUN
plot(pca,pattern=pattern)
dev.copy2pdf(file=paste('paper59_Fig_pcatrend_',pattern,'_',
param,'_',FUN,'.pdf',sep=''))
z <- attr(pca,'pattern')[,pattern]*attr(pca,'eigenvalues')[pattern]
## Get elevation data
data(etopo5)
etopo5 <- subset(etopo5,is=list(lon=range(lon(X)),lat=range(lat(X))))
etopo5[etopo5<=0] <- NA
## Grid the PCA pattern
require(LatticeKrig)
## Set the grid to be the same as that of etopo5:
grid <- structure(list(x=lon(etopo5),y=lat(etopo5)),class='gridList')
## Flag dubplicated stations:
ok <- !(duplicated(lon(X)) & duplicated(lat(X)))
##
obj <- LatticeKrig( x=cbind(lon(X)[ok],lat(X)[ok]),
y=z[ok],Z=alt(X)[ok])
## obj <- LatticeKrig( x=cbind(lon[ok],lat[ok]), y=z[2,ok],Z=alt[ok])
w <- predictSurface(obj, grid.list = grid,Z=etopo5)
w$z[is.na(etopo5)] <- NA
dev.new()
#surface( w )
## Get rid of packages that have functions of same name:
detach("package:LatticeKrig")
detach("package:fields")
detach("package:spam")
detach("package:grid")
detach("package:maps")
## Convert the results from LatticeKrig to esd:
W <- w$z
attr(W,'variable') <- varnm
attr(W,'unit') <- unitnm
attr(W,'longitude') <- w$x
attr(W,'latitude') <- w$y
class(W) <- class(etopo5)
## Make a projection that zooms in on the Barents region
#map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',colbar=list(n=21))
rev <- switch(param,'t2m'=TRUE,'pr'=FALSE)
Wx <- max(abs(W),na.rm=TRUE)
if (min(W,na.rm=TRUE) < 0) {
breaks <- round(seq(-Wx,Wx,length=31),2)
pal <- switch(param,'t2m'='t2m','pr'='precip')
} else {
breaks <- round(seq(0,Wx,length=31),2)
pal <- switch(param,'t2m'='warm','pr'='precip')
}
mapcol <- colscal(n=length(breaks)-1,col=pal)
#attr(W,'variable') <- varid(X)[1]
#attr(W,'unit') <- unit(X)[1]
## If temperature and all values are of same sign, use a one-signed color scheme
map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',
colbar=list(col=mapcol,breaks=breaks,pal=pal,rev=rev))
dev.copy2pdf(file=paste('paper59_Fig_trend_',param,'_',FUN,'map-pca',pattern,'.pdf',sep=''))
tx <- 1.2*quantile(abs(trends),0.97,na.rm=TRUE)
breaks <- seq(-tx,tx,length=31)
trends[abs(trends) > tx] <- NA
dev.new()
h <- hist(trends,breaks=breaks,col=colscal(30),freq=FALSE,
main=paste(varid(Xc)[1],'trends'),xlab=paste(unit(Xc)[1],'/decade'))
grid()
polygon(c(-tx,0,0,-tx,-tx),c(0,0,2*rep(max(h$density),2),0),col=rgb(0.5,0.5,0.5,0.2))
lines(h$mids,dnorm(h$mids,mean=mean(trends,na.rm=TRUE),sd=sd(trends,na.rm=TRUE)),
lwd=5,col=rgb(0.5,0.3,0.3,0.25))
p.gt.0 <- round(pnorm(0,mean=mean(trends,na.rm=TRUE),sd=sd(trends,na.rm=TRUE)),3)
figlab(paste('Pr(X > 0)=',1-p.gt.0))
dev.copy2pdf(file=paste('paper59_Fig_trend_',param,'_',FUN,'pdf-pca',pattern,'.pdf',sep=''))
}
## Inter-annual variability
iav <- function(param='pr',FUN='wetmean',FUNX='sd',pal=NULL,rev=NULL,col=NULL,prop=FALSE) {
load(paste(param,'.aaca.rda',sep=''))
X <- eval(parse(text=paste(param,'.aaca',sep='')))
X <- subset(X,it=c(1960,2015))
if (FUN=='sd') X <- anomaly(X)
if ( (FUN!='wet.spell') & (FUN!='dry.spell'))
Xc <- aggregate(X,year,FUN) else {
ispell <- switch(FUN,'wet.spell'=1,'dry.spell'=2)
## For spell-length statistics, more a special treatment is needed
## Remove records with many missing data (more than 2.5%).
nok <- (apply(X,2,nv) > 0.975*dim(X)[1])
X <- subset(X,is=nok)
x <- list()
for (i in 1:dim(X)[2]) x[[i]] <- subset(spell(subset(X,is=i),threshold=1),is=ispell)
xx <- do.call('merge',x)
class(xx) <- class(X)
xx <- attrcp(X,xx)
attr(xx,'variable') <- FUN
attr(xx,'unit') <- 'days'
Xc <- aggregate(xx,year,"mean")
}
if (FUN=='wetmean') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wetfreq') {Xcc <- coredata(Xc); Xcc[Xcc <= 0] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wet.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 10] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='dry.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (prop) {
Xc <- round(100*t(t(Xc)/apply(coredata(subset(Xc,it=c(1961,1990))),2,'mean',na.rm=TRUE)),1)
FUN <- paste(FUN,'prop',sep='')
attr(Xc,'unit') <- "'%'"
}
nval <- apply(Xc,2,'nv')
z <- apply(Xc,2,FUNX,na.rm=TRUE)
z[nval < 30] <- NA
varnm <- switch(paste(param,FUN),
't2m mean'=expression(T[2*m]),
't2m sd'=expression(sigma[T]),
't2m ar1'=expression(R(tau)),
'pr wetmean'=expression(mu),
'pr wetfreq'=expression(f[w]),
'pr wet.spell'=expression(n[c*w*d]),
'pr dry.spell'=expression(n[c*d*d]),
'pr wetmeanprop'=expression(mu),
'pr wetfreqprop'=expression(f[w]))
unitnm <- switch(paste(param,FUN),
't2m mean'=expression(degree*C),
't2m sd'=expression(degree*C),
't2m ar1'='',
'pr wetmean'='mm/day',
'pr wetfreq'='fraction',
'pr wet.spell'='days',
'pr dry.spell'='days',
'pr wetmeanprop'="'%'",
'pr wetfreqprop'="'%'")
longnm <- switch(paste(param,FUN),
't2m mean'='Mean temperature',
't2m sd'='Temperature variability',
't2m ar1'='Temperature persistence',
'pr wetmean'='Precipitation intensity',
'pr wetfreq'='Wet-day frequency',
'pr wet.spell'='Mean wet-spell length',
'pr dry.spell'='Mean dry-spell length',
'pr wetmeanprop'='Proportional precipitation intensity',
'pr wetfreqprop'='Proportional precipitation frequency')
## Get elevation data
data(etopo5)
etopo5 <- subset(etopo5,is=list(lon=range(lon(X)),lat=range(lat(X))))
etopo5[etopo5<=0] <- NA
## Grid the PCA pattern
require(LatticeKrig)
## Set the grid to be the same as that of etopo5:
grid <- structure(list(x=lon(etopo5),y=lat(etopo5)),class='gridList')
## Flag dubplicated stations:
ok <- !(duplicated(lon(X)) & duplicated(lat(X))) & is.finite(z)
##
obj <- LatticeKrig( x=cbind(lon(X)[ok],lat(X)[ok]),
y=z[ok],Z=alt(X)[ok])
## obj <- LatticeKrig( x=cbind(lon[ok],lat[ok]), y=z[2,ok],Z=alt[ok])
w <- predictSurface(obj, grid.list = grid,Z=etopo5)
w$z[is.na(etopo5)] <- NA
dev.new()
#surface( w )
## Get rid of packages that have functions of same name:
detach("package:LatticeKrig")
detach("package:fields")
detach("package:spam")
detach("package:grid")
detach("package:maps")
## Convert the results from LatticeKrig to esd:
W <- w$z
attr(W,'variable') <- varnm
attr(W,'unit') <- attr(Xc,'unit')
attr(W,'longitude') <- w$x
attr(W,'latitude') <- w$y
class(W) <- class(etopo5)
## Make a projection that zooms in on the Barents region
#map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',colbar=list(n=21))
if (is.null(rev)) rev <- switch(param,'t2m'=TRUE,'pr'=FALSE)
Wx <- max(abs(W),na.rm=TRUE)
if (min(W,na.rm=TRUE) < 0) {
breaks <- round(seq(-Wx,Wx,length=31),2)
if (is.null(pal)) pal <- switch(param,'t2m'='t2m','pr'='precip')
} else {
breaks <- round(seq(0,Wx,length=31),2)
if (is.null(pal)) pal <- switch(param,'t2m'='warm','pr'='precip')
}
mapcol <- colscal(n=length(breaks)-1,col=pal)
if (rev) mapcol <- rev(mapcol)
#attr(W,'variable') <- varid(X)[1]
#attr(W,'unit') <- unit(X)[1]
## If temperature and all values are of same sign, use a one-signed color scheme
map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',
colbar=list(col=mapcol,breaks=breaks,pal=pal))
if (FUNX=='sd') figlab('Magnitude of interannual variations',ypos=0.97) else
if (FUNX=='trend.coef') figlab(paste('Historic trends',start(X),'-',end(X)),ypos=0.97)
#lab <- eval(parse(text=paste('expression(',varnm,'*(',unitnm,'))')))
#figlab(lab)
dev.copy2pdf(file=paste('paper59_Fig_iav_',param,'_',FUN,'_',FUNX,'map.pdf',sep=''))
}
if (FALSE) {
analysetrends(param='t2m',FUN='mean',pattern=1)
analysetrends(param='t2m',FUN='mean',pattern=2)
analysetrends(param='t2m',FUN='sd',pattern=1)
analysetrends(param='t2m',FUN='sd',pattern=2)
analysetrends(param='t2m',FUN='ar1',pattern=1)
analysetrends(param='t2m',FUN='ar1',pattern=2)
analysetrends(param='pr',FUN='wetmean',prop=TRUE,pattern=1)
analysetrends(param='pr',FUN='wetmean',prop=TRUE,pattern=2)
analysetrends(param='pr',FUN='wetfreq',prop=TRUE,pattern=1)
analysetrends(param='pr',FUN='wetfreq',prop=TRUE,pattern=2)
analysetrends(param='pr',FUN='wet.spell',pattern=1)
analysetrends(param='pr',FUN='wet.spell',pattern=2)
analysetrends(param='pr',FUN='dry.spell',pattern=1)
analysetrends(param='pr',FUN='dry.spell',pattern=2)
}
#iav(param='t2m',FUN='mean')
#iav(param='t2m',FUN='sd')
#iav(param='pr',FUN='sum',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wetmean',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wetfreq',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wet.spell',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='dry.spell',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='sum',FUNX='trend.coef',pal='t2m',rev=TRUE)
iav(param='pr',FUN='wetmean',FUNX='trend.coef',pal='t2m',rev=TRUE,prop=TRUE)
iav(param='pr',FUN='wetfreq',FUNX='trend.coef',pal='t2m',rev=TRUE,prop=TRUE)
#iav(param='pr',FUN='wet.spell',FUNX='trend.coef',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='dry.spell',FUNX='trend.coef',pal='t2m',rev=TRUE)
load('t2m.aaca.rda')
X <- coredata(annual(t2m.aaca,'mean'))
Y <- coredata(annual(anomaly(t2m.aaca),'sd'))
ok <- (apply(X,2,nv)>30)
x <- apply(X[,ok],2,'trend.coef')
y <- apply(Y[,ok],2,'trend.coef')
dev.new()
par(bty='n')
mx <- mean(x,na.rm=TRUE); sx <- sd(x,na.rm=TRUE)
my <- mean(y,na.rm=TRUE); sy <- sd(y,na.rm=TRUE)
s <- sin(seq(0,2*pi,length.out=360)); c <- cos(seq(0,2*pi,length.out=360))
plot(x,y,pch=19,xlim=c(-2,2),ylim=c(-0.5,0.5),cex=0.5,
main='Temperature trends: mean and variability',
xlab=expression(bar(x)),ylab=expression(sigma[T]))
rect(-2,-0.5,0,0.5,col=rgb(0.5,0.5,1,0.2))
rect(-2,-0.5,2,0,col=rgb(0.5,0.5,0,0.2))
rect(0,0,2,0.5,col=rgb(1,0.5,0.5,0.1))
for (p in seq(0.9,0.1,by=-0.1)) {
rx <- qnorm(p,sd=sx); ry <- qnorm(p,sd=sy)
polygon(mx+rx*s,my+ry*c,col=rgb(0.5,0.5,+.5,0.2),border='grey')
}
points(x,y,pch=19,cex=0.5)
lines(c(-2,mx),rep(my,2),lty=2)
text(-1.25,my,pos=3,paste(round(my/mx,2),'degree/degree'))
dev.copy2pdf(file='paper59_Fig_trend_t2m_meansigma.pdf')
|
/AACA/paper59trends.R
|
no_license
|
metno/esd_Rmarkdown
|
R
| false | false | 13,917 |
r
|
## Script to carry out sensitivity analysis:
## The effect of the mean seasonal variations
## Geographical variations
## Long-term trends
rm(list=ls())
library(esd)
ar1 <- function(x) {
n <- length(x)
x0 <- x[1:(n-1)]
x1 <- x[2:n]
ok <- is.finite(x0) & is.finite(x1)
ar1 <- cor(x0[ok],x1[ok])
return(ar1)
}
analysetrends <- function(param='pr',pal=NULL,col=NULL,FUN='wetmean',prop=FALSE,pattern=1) {
load(paste(param,'.aaca.rda',sep=''))
X <- eval(parse(text=paste(param,'.aaca',sep='')))
X <- subset(X,it=c(1960,2015))
if (FUN=='sd') X <- anomaly(X)
if ( (FUN!='wet.spell') & (FUN!='dry.spell'))
Xc <- aggregate(X,year,FUN) else {
ispell <- switch(FUN,'wet.spell'=1,'dry.spell'=2)
## For spell-length statistics, more a special treatment is needed
## Remove records with many missing data (more than 2.5%).
nok <- (apply(X,2,nv) > 0.975*dim(X)[1])
X <- subset(X,is=nok)
x <- list()
for (i in 1:dim(X)[2]) x[[i]] <- subset(spell(subset(X,is=i),threshold=1),is=ispell)
xx <- do.call('merge',x)
class(xx) <- class(X)
xx <- attrcp(X,xx)
attr(xx,'variable') <- FUN
attr(xx,'unit') <- 'days'
Xc <- aggregate(xx,year,"mean")
}
if (FUN=='wetmean') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wetfreq') {Xcc <- coredata(Xc); Xcc[Xcc <= 0] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wet.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 10] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='dry.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (prop) {
coredata(Xc) <- 100*t(t(coredata(Xc))/apply(coredata(subset(Xc,it=c(1961,1990))),2,'mean',na.rm=TRUE))
FUN <- paste(FUN,'prop',sep='')
attr(Xc,'unit') <- '%'
}
trends <- apply(Xc,2,'trend.coef')
nval <- apply(Xc,2,'nv')
trends[nval < 30] <- NA
varnm <- switch(paste(param,FUN),
't2m mean'=expression(T[2*m]),
't2m sd'=expression(sigma[T]),
't2m ar1'=expression(R(tau)),
'pr wetmean'=expression(mu),
'pr wetfreq'=expression(f[w]),
'pr wet.spell'=expression(n[c*w*d]),
'pr dry.spell'=expression(n[c*d*d]),
'pr wetmeanprop'=expression(mu))
unitnm <- switch(paste(param,FUN),
't2m mean'=expression(degree*C),
't2m sd'=expression(degree*C),
't2m ar1'='',
'pr wetmean'='mm/day',
'pr wetfreq'='',
'pr wet.spell'='days',
'pr dry.spell'='days',
'pr wetmeanprop'="'%'")
longnm <- switch(paste(param,FUN),
't2m mean'='Mean temperature',
't2m sd'='Temperature variability',
't2m ar1'='Temperature persistence',
'pr wetmean'='Precipitation intensity',
'pr wetfreq'='Wet-day frequency',
'pr wet.spell'='Mean wet-spell length',
'pr dry.spell'='Mean dry-spell length',
'pr wetmeanprop'='Proportional precipitation intensity')
nv <- apply(X,2,nv)
xlim <- range(lon(X)); ylim <- range(lat(X))
col <- rgb(0.75*(1-(lat(X)-ylim[1])/diff(ylim)),0.75*(lon(X)-xlim[1])/diff(xlim),
0.75*(lat(X)-ylim[1])/diff(ylim),0.75*nv/dim(X)[1])
attr(Xc,'longname') <- longnm
plot(Xc,col=col)
grid()
dev.copy2pdf(file=paste('paper59_Fig_trend_',
param,'_',FUN,'.pdf',sep=''))
pca <- PCA(pcafill(Xc),n=12)
attr(pca,'variable') <- varnm
attr(pca,'unit') <- unitnm
attr(pca,'longname') <- FUN
plot(pca,pattern=pattern)
dev.copy2pdf(file=paste('paper59_Fig_pcatrend_',pattern,'_',
param,'_',FUN,'.pdf',sep=''))
z <- attr(pca,'pattern')[,pattern]*attr(pca,'eigenvalues')[pattern]
## Get elevation data
data(etopo5)
etopo5 <- subset(etopo5,is=list(lon=range(lon(X)),lat=range(lat(X))))
etopo5[etopo5<=0] <- NA
## Grid the PCA pattern
require(LatticeKrig)
## Set the grid to be the same as that of etopo5:
grid <- structure(list(x=lon(etopo5),y=lat(etopo5)),class='gridList')
## Flag dubplicated stations:
ok <- !(duplicated(lon(X)) & duplicated(lat(X)))
##
obj <- LatticeKrig( x=cbind(lon(X)[ok],lat(X)[ok]),
y=z[ok],Z=alt(X)[ok])
## obj <- LatticeKrig( x=cbind(lon[ok],lat[ok]), y=z[2,ok],Z=alt[ok])
w <- predictSurface(obj, grid.list = grid,Z=etopo5)
w$z[is.na(etopo5)] <- NA
dev.new()
#surface( w )
## Get rid of packages that have functions of same name:
detach("package:LatticeKrig")
detach("package:fields")
detach("package:spam")
detach("package:grid")
detach("package:maps")
## Convert the results from LatticeKrig to esd:
W <- w$z
attr(W,'variable') <- varnm
attr(W,'unit') <- unitnm
attr(W,'longitude') <- w$x
attr(W,'latitude') <- w$y
class(W) <- class(etopo5)
## Make a projection that zooms in on the Barents region
#map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',colbar=list(n=21))
rev <- switch(param,'t2m'=TRUE,'pr'=FALSE)
Wx <- max(abs(W),na.rm=TRUE)
if (min(W,na.rm=TRUE) < 0) {
breaks <- round(seq(-Wx,Wx,length=31),2)
pal <- switch(param,'t2m'='t2m','pr'='precip')
} else {
breaks <- round(seq(0,Wx,length=31),2)
pal <- switch(param,'t2m'='warm','pr'='precip')
}
mapcol <- colscal(n=length(breaks)-1,col=pal)
#attr(W,'variable') <- varid(X)[1]
#attr(W,'unit') <- unit(X)[1]
## If temperature and all values are of same sign, use a one-signed color scheme
map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',
colbar=list(col=mapcol,breaks=breaks,pal=pal,rev=rev))
dev.copy2pdf(file=paste('paper59_Fig_trend_',param,'_',FUN,'map-pca',pattern,'.pdf',sep=''))
tx <- 1.2*quantile(abs(trends),0.97,na.rm=TRUE)
breaks <- seq(-tx,tx,length=31)
trends[abs(trends) > tx] <- NA
dev.new()
h <- hist(trends,breaks=breaks,col=colscal(30),freq=FALSE,
main=paste(varid(Xc)[1],'trends'),xlab=paste(unit(Xc)[1],'/decade'))
grid()
polygon(c(-tx,0,0,-tx,-tx),c(0,0,2*rep(max(h$density),2),0),col=rgb(0.5,0.5,0.5,0.2))
lines(h$mids,dnorm(h$mids,mean=mean(trends,na.rm=TRUE),sd=sd(trends,na.rm=TRUE)),
lwd=5,col=rgb(0.5,0.3,0.3,0.25))
p.gt.0 <- round(pnorm(0,mean=mean(trends,na.rm=TRUE),sd=sd(trends,na.rm=TRUE)),3)
figlab(paste('Pr(X > 0)=',1-p.gt.0))
dev.copy2pdf(file=paste('paper59_Fig_trend_',param,'_',FUN,'pdf-pca',pattern,'.pdf',sep=''))
}
## Inter-annual variability
iav <- function(param='pr',FUN='wetmean',FUNX='sd',pal=NULL,rev=NULL,col=NULL,prop=FALSE) {
load(paste(param,'.aaca.rda',sep=''))
X <- eval(parse(text=paste(param,'.aaca',sep='')))
X <- subset(X,it=c(1960,2015))
if (FUN=='sd') X <- anomaly(X)
if ( (FUN!='wet.spell') & (FUN!='dry.spell'))
Xc <- aggregate(X,year,FUN) else {
ispell <- switch(FUN,'wet.spell'=1,'dry.spell'=2)
## For spell-length statistics, more a special treatment is needed
## Remove records with many missing data (more than 2.5%).
nok <- (apply(X,2,nv) > 0.975*dim(X)[1])
X <- subset(X,is=nok)
x <- list()
for (i in 1:dim(X)[2]) x[[i]] <- subset(spell(subset(X,is=i),threshold=1),is=ispell)
xx <- do.call('merge',x)
class(xx) <- class(X)
xx <- attrcp(X,xx)
attr(xx,'variable') <- FUN
attr(xx,'unit') <- 'days'
Xc <- aggregate(xx,year,"mean")
}
if (FUN=='wetmean') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wetfreq') {Xcc <- coredata(Xc); Xcc[Xcc <= 0] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='wet.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 10] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (FUN=='dry.spell') {Xcc <- coredata(Xc); Xcc[Xcc > 20] <- NA; Xcc -> coredata(Xc); rm('Xcc')}
if (prop) {
Xc <- round(100*t(t(Xc)/apply(coredata(subset(Xc,it=c(1961,1990))),2,'mean',na.rm=TRUE)),1)
FUN <- paste(FUN,'prop',sep='')
attr(Xc,'unit') <- "'%'"
}
nval <- apply(Xc,2,'nv')
z <- apply(Xc,2,FUNX,na.rm=TRUE)
z[nval < 30] <- NA
varnm <- switch(paste(param,FUN),
't2m mean'=expression(T[2*m]),
't2m sd'=expression(sigma[T]),
't2m ar1'=expression(R(tau)),
'pr wetmean'=expression(mu),
'pr wetfreq'=expression(f[w]),
'pr wet.spell'=expression(n[c*w*d]),
'pr dry.spell'=expression(n[c*d*d]),
'pr wetmeanprop'=expression(mu),
'pr wetfreqprop'=expression(f[w]))
unitnm <- switch(paste(param,FUN),
't2m mean'=expression(degree*C),
't2m sd'=expression(degree*C),
't2m ar1'='',
'pr wetmean'='mm/day',
'pr wetfreq'='fraction',
'pr wet.spell'='days',
'pr dry.spell'='days',
'pr wetmeanprop'="'%'",
'pr wetfreqprop'="'%'")
longnm <- switch(paste(param,FUN),
't2m mean'='Mean temperature',
't2m sd'='Temperature variability',
't2m ar1'='Temperature persistence',
'pr wetmean'='Precipitation intensity',
'pr wetfreq'='Wet-day frequency',
'pr wet.spell'='Mean wet-spell length',
'pr dry.spell'='Mean dry-spell length',
'pr wetmeanprop'='Proportional precipitation intensity',
'pr wetfreqprop'='Proportional precipitation frequency')
## Get elevation data
data(etopo5)
etopo5 <- subset(etopo5,is=list(lon=range(lon(X)),lat=range(lat(X))))
etopo5[etopo5<=0] <- NA
## Grid the PCA pattern
require(LatticeKrig)
## Set the grid to be the same as that of etopo5:
grid <- structure(list(x=lon(etopo5),y=lat(etopo5)),class='gridList')
## Flag dubplicated stations:
ok <- !(duplicated(lon(X)) & duplicated(lat(X))) & is.finite(z)
##
obj <- LatticeKrig( x=cbind(lon(X)[ok],lat(X)[ok]),
y=z[ok],Z=alt(X)[ok])
## obj <- LatticeKrig( x=cbind(lon[ok],lat[ok]), y=z[2,ok],Z=alt[ok])
w <- predictSurface(obj, grid.list = grid,Z=etopo5)
w$z[is.na(etopo5)] <- NA
dev.new()
#surface( w )
## Get rid of packages that have functions of same name:
detach("package:LatticeKrig")
detach("package:fields")
detach("package:spam")
detach("package:grid")
detach("package:maps")
## Convert the results from LatticeKrig to esd:
W <- w$z
attr(W,'variable') <- varnm
attr(W,'unit') <- attr(Xc,'unit')
attr(W,'longitude') <- w$x
attr(W,'latitude') <- w$y
class(W) <- class(etopo5)
## Make a projection that zooms in on the Barents region
#map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',colbar=list(n=21))
if (is.null(rev)) rev <- switch(param,'t2m'=TRUE,'pr'=FALSE)
Wx <- max(abs(W),na.rm=TRUE)
if (min(W,na.rm=TRUE) < 0) {
breaks <- round(seq(-Wx,Wx,length=31),2)
if (is.null(pal)) pal <- switch(param,'t2m'='t2m','pr'='precip')
} else {
breaks <- round(seq(0,Wx,length=31),2)
if (is.null(pal)) pal <- switch(param,'t2m'='warm','pr'='precip')
}
mapcol <- colscal(n=length(breaks)-1,col=pal)
if (rev) mapcol <- rev(mapcol)
#attr(W,'variable') <- varid(X)[1]
#attr(W,'unit') <- unit(X)[1]
## If temperature and all values are of same sign, use a one-signed color scheme
map(W,xlim=range(lon(W)),ylim=range(lat(W)),projection='sphere',
colbar=list(col=mapcol,breaks=breaks,pal=pal))
if (FUNX=='sd') figlab('Magnitude of interannual variations',ypos=0.97) else
if (FUNX=='trend.coef') figlab(paste('Historic trends',start(X),'-',end(X)),ypos=0.97)
#lab <- eval(parse(text=paste('expression(',varnm,'*(',unitnm,'))')))
#figlab(lab)
dev.copy2pdf(file=paste('paper59_Fig_iav_',param,'_',FUN,'_',FUNX,'map.pdf',sep=''))
}
if (FALSE) {
analysetrends(param='t2m',FUN='mean',pattern=1)
analysetrends(param='t2m',FUN='mean',pattern=2)
analysetrends(param='t2m',FUN='sd',pattern=1)
analysetrends(param='t2m',FUN='sd',pattern=2)
analysetrends(param='t2m',FUN='ar1',pattern=1)
analysetrends(param='t2m',FUN='ar1',pattern=2)
analysetrends(param='pr',FUN='wetmean',prop=TRUE,pattern=1)
analysetrends(param='pr',FUN='wetmean',prop=TRUE,pattern=2)
analysetrends(param='pr',FUN='wetfreq',prop=TRUE,pattern=1)
analysetrends(param='pr',FUN='wetfreq',prop=TRUE,pattern=2)
analysetrends(param='pr',FUN='wet.spell',pattern=1)
analysetrends(param='pr',FUN='wet.spell',pattern=2)
analysetrends(param='pr',FUN='dry.spell',pattern=1)
analysetrends(param='pr',FUN='dry.spell',pattern=2)
}
#iav(param='t2m',FUN='mean')
#iav(param='t2m',FUN='sd')
#iav(param='pr',FUN='sum',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wetmean',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wetfreq',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='wet.spell',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='dry.spell',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='sum',FUNX='trend.coef',pal='t2m',rev=TRUE)
iav(param='pr',FUN='wetmean',FUNX='trend.coef',pal='t2m',rev=TRUE,prop=TRUE)
iav(param='pr',FUN='wetfreq',FUNX='trend.coef',pal='t2m',rev=TRUE,prop=TRUE)
#iav(param='pr',FUN='wet.spell',FUNX='trend.coef',pal='t2m',rev=TRUE)
#iav(param='pr',FUN='dry.spell',FUNX='trend.coef',pal='t2m',rev=TRUE)
load('t2m.aaca.rda')
X <- coredata(annual(t2m.aaca,'mean'))
Y <- coredata(annual(anomaly(t2m.aaca),'sd'))
ok <- (apply(X,2,nv)>30)
x <- apply(X[,ok],2,'trend.coef')
y <- apply(Y[,ok],2,'trend.coef')
dev.new()
par(bty='n')
mx <- mean(x,na.rm=TRUE); sx <- sd(x,na.rm=TRUE)
my <- mean(y,na.rm=TRUE); sy <- sd(y,na.rm=TRUE)
s <- sin(seq(0,2*pi,length.out=360)); c <- cos(seq(0,2*pi,length.out=360))
plot(x,y,pch=19,xlim=c(-2,2),ylim=c(-0.5,0.5),cex=0.5,
main='Temperature trends: mean and variability',
xlab=expression(bar(x)),ylab=expression(sigma[T]))
rect(-2,-0.5,0,0.5,col=rgb(0.5,0.5,1,0.2))
rect(-2,-0.5,2,0,col=rgb(0.5,0.5,0,0.2))
rect(0,0,2,0.5,col=rgb(1,0.5,0.5,0.1))
for (p in seq(0.9,0.1,by=-0.1)) {
rx <- qnorm(p,sd=sx); ry <- qnorm(p,sd=sy)
polygon(mx+rx*s,my+ry*c,col=rgb(0.5,0.5,+.5,0.2),border='grey')
}
points(x,y,pch=19,cex=0.5)
lines(c(-2,mx),rep(my,2),lty=2)
text(-1.25,my,pos=3,paste(round(my/mx,2),'degree/degree'))
dev.copy2pdf(file='paper59_Fig_trend_t2m_meansigma.pdf')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracesMethods.R
\name{plot.traces}
\alias{plot.traces}
\title{Plot traces}
\usage{
\method{plot}{traces}(traces, log = FALSE, legend = TRUE,
PDF = FALSE, name = "Traces", plot = TRUE, colour_by = "id",
highlight = NULL, highlight_col = NULL, colorMap = NULL,
monomer_MW = TRUE)
}
\arguments{
\item{traces}{Object of class traces.}
\item{log}{Logical, whether the intensities should be plotted in log scale. Default is \code{FALSE}.}
\item{legend}{Logical, whether a legend of the traces should be plotted. Should be set to \code{FALSE}
if many chromatograms are plotted. Default is \code{TRUE}.}
\item{PDF}{Logical, whether to plot to PDF. PDF file is saved in working directory. Default is \code{FALSE}.}
\item{name}{Character string with name of the plot, only used if \code{PDF=TRUE}.
PDF file is saved under name.pdf. Default is "Traces".}
\item{colorMap}{named character vector containing valid color specifications for plotting.
The names of the vector must correspond to the ids of the peptides to be plotted.}
\item{monomer_MW}{Logical if monomer MWs should be indicated}
}
\description{
Plot all chromatograms in a traces object. Most generic plotting function.
}
\examples{
# Protein traces
proteinTraces=exampleProteinTraces
plot(proteinTraces)
# Annotate traces with molecular weight to include molecular weight information in plot.
calibrationTable <- exampleCalibrationTable
# Perform molecular weight calibration:
calibration = calibrateMW(calibrationTable)
# Perform molecular weight annotation:
mwProteinTraces <- annotateMolecularWeight(proteinTraces, calibration)
plot(mwProteinTraces)
# Plot all peptides of a specific protein across a defined chromatographic region
peptideTraces <- examplePeptideTraces
subsetPeptideTraces <- subset(peptideTraces,trace_subset_ids="Q9UHV9",trace_subset_type="protein_id",fraction_ids=c(30:70))
plot(subsetPeptideTraces,legend=FALSE)
}
|
/man/plot.traces.Rd
|
permissive
|
kiahalespractice/CCprofiler
|
R
| false | true | 1,980 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracesMethods.R
\name{plot.traces}
\alias{plot.traces}
\title{Plot traces}
\usage{
\method{plot}{traces}(traces, log = FALSE, legend = TRUE,
PDF = FALSE, name = "Traces", plot = TRUE, colour_by = "id",
highlight = NULL, highlight_col = NULL, colorMap = NULL,
monomer_MW = TRUE)
}
\arguments{
\item{traces}{Object of class traces.}
\item{log}{Logical, whether the intensities should be plotted in log scale. Default is \code{FALSE}.}
\item{legend}{Logical, whether a legend of the traces should be plotted. Should be set to \code{FALSE}
if many chromatograms are plotted. Default is \code{TRUE}.}
\item{PDF}{Logical, whether to plot to PDF. PDF file is saved in working directory. Default is \code{FALSE}.}
\item{name}{Character string with name of the plot, only used if \code{PDF=TRUE}.
PDF file is saved under name.pdf. Default is "Traces".}
\item{colorMap}{named character vector containing valid color specifications for plotting.
The names of the vector must correspond to the ids of the peptides to be plotted.}
\item{monomer_MW}{Logical if monomer MWs should be indicated}
}
\description{
Plot all chromatograms in a traces object. Most generic plotting function.
}
\examples{
# Protein traces
proteinTraces=exampleProteinTraces
plot(proteinTraces)
# Annotate traces with molecular weight to include molecular weight information in plot.
calibrationTable <- exampleCalibrationTable
# Perform molecular weight calibration:
calibration = calibrateMW(calibrationTable)
# Perform molecular weight annotation:
mwProteinTraces <- annotateMolecularWeight(proteinTraces, calibration)
plot(mwProteinTraces)
# Plot all peptides of a specific protein across a defined chromatographic region
peptideTraces <- examplePeptideTraces
subsetPeptideTraces <- subset(peptideTraces,trace_subset_ids="Q9UHV9",trace_subset_type="protein_id",fraction_ids=c(30:70))
plot(subsetPeptideTraces,legend=FALSE)
}
|
###############################################################
###### Catch illegal input for GLM w/ Beta Constraints #######
###############################################################
test <- function() {
## Import data
h2oData <- h2o.importFile("/mnt/0xcustomer-datasets/c27/data.csv")
betaConstraints <- h2o.importFile("/mnt/0xcustomer-datasets/c27/constraints_indices.csv")
betaConstraints <- betaConstraints[1:(nrow(betaConstraints)-1),] # remove intercept
bc <- as.data.frame(betaConstraints)
## Set Parameters
indVars <- as.character(bc[1:nrow(bc), "names"])
depVars <- "C3"
lambda <- 1e-8
alpha <- 0
family_type <- "binomial"
## Function to run GLM with specific beta_constraints
run_glm <- function(bc) {
h2o.glm(x = indVars, y = depVars, training_frame = h2oData, family = family_type,
lambda = lambda, alpha = alpha, beta_constraints = bc)
}
Log.info("Illegal input case: Duplicate beta constraint entries.")
a <- rbind(bc[1,],bc)
checkException(run_glm(a), "Did not catch duplicate constraint.")
Log.info("Illegal input case: No such predictor.")
b <- data.frame(names = "fakeFeature", lower_bounds = -10000, upper_bounds = 10000, beta_given = 1, rho =1)
b <- rbind(bc, b)
checkException(run_glm(b), "Did not catch fake feature.")
#CNC - Tomas comments that an empty frame is fine, and should not throw an exception
#Log.info("Illegal input case: Empty beta constraints frame.")
#empty <- betaConstraints[betaConstraints$lower_bounds == 22,]
#checkException(run_glm(empty), "Did not reject empty frame.", silent = T)
Log.info("Illegal input case: Typo in beta constraint column name.")
c <- bc
names(c) <- gsub("lower_bounds", replacement = "lowerbounds", x = names(bc))
checkException(run_glm(c), "Did not detect beta constraint column name typo.", silent = T)
}
doTest("GLM Test: Beta Constraints Illegal Argument Exceptions", test)
|
/h2o-r/tests/testdir_algos/glm/runit_INTERNAL_GLM_bc_illegal_input.R
|
permissive
|
StephRoark/h2o-3
|
R
| false | false | 1,940 |
r
|
###############################################################
###### Catch illegal input for GLM w/ Beta Constraints #######
###############################################################
test <- function() {
## Import data
h2oData <- h2o.importFile("/mnt/0xcustomer-datasets/c27/data.csv")
betaConstraints <- h2o.importFile("/mnt/0xcustomer-datasets/c27/constraints_indices.csv")
betaConstraints <- betaConstraints[1:(nrow(betaConstraints)-1),] # remove intercept
bc <- as.data.frame(betaConstraints)
## Set Parameters
indVars <- as.character(bc[1:nrow(bc), "names"])
depVars <- "C3"
lambda <- 1e-8
alpha <- 0
family_type <- "binomial"
## Function to run GLM with specific beta_constraints
run_glm <- function(bc) {
h2o.glm(x = indVars, y = depVars, training_frame = h2oData, family = family_type,
lambda = lambda, alpha = alpha, beta_constraints = bc)
}
Log.info("Illegal input case: Duplicate beta constraint entries.")
a <- rbind(bc[1,],bc)
checkException(run_glm(a), "Did not catch duplicate constraint.")
Log.info("Illegal input case: No such predictor.")
b <- data.frame(names = "fakeFeature", lower_bounds = -10000, upper_bounds = 10000, beta_given = 1, rho =1)
b <- rbind(bc, b)
checkException(run_glm(b), "Did not catch fake feature.")
#CNC - Tomas comments that an empty frame is fine, and should not throw an exception
#Log.info("Illegal input case: Empty beta constraints frame.")
#empty <- betaConstraints[betaConstraints$lower_bounds == 22,]
#checkException(run_glm(empty), "Did not reject empty frame.", silent = T)
Log.info("Illegal input case: Typo in beta constraint column name.")
c <- bc
names(c) <- gsub("lower_bounds", replacement = "lowerbounds", x = names(bc))
checkException(run_glm(c), "Did not detect beta constraint column name typo.", silent = T)
}
doTest("GLM Test: Beta Constraints Illegal Argument Exceptions", test)
|
# Read in the raw data
# Install packages
pacman::p_load("tidyverse", "DECIPHER", "Biostrings", "skimr", "caret",
"cowplot", "tidymodels", "ranger", "tree", "rsample",
"randomForest", "gbm","nnet","e1071","svmpath","lars",
"glmnet", "svmpath", "readxl")
# Set working directory
setwd("~/Documents/University_of_Minnesota/Wackett_Lab/github/synbio-data-analysis/")
# Read in the principal componenets of molecular features
molec_fts <- read_csv("data/machine_learning/PC7_molecular_descriptors.csv") %>%
dplyr::rename(substrate = V1)
molec_fts$substrate <- gsub(" ", "\\.", molec_fts$substrate)
molec_fts$substrate[molec_fts$substrate == "oxidazole"] <- "oxadiazole"
# Read in the raw molecular features
chem_descr <- read_csv("data/substrate_comparisons/15pNPs_159_selected_molecular_properties.csv") %>%
dplyr::select(cmpnd_abbrev, nB, MW, AROMATIC, O, N, Cl, MLogP, nRotB, VABC, nAtomLAC) %>%
dplyr::mutate(substrate = gsub(" ", "\\.", cmpnd_abbrev)) %>%
dplyr::select(-cmpnd_abbrev)
chem_descr$substrate[chem_descr$substrate == "oxidazole"] <- "oxadiazole"
# Read in the cavity volumes
cavity_fts <- read_excel("data/caver_models/CastP_volume_rawdata.xlsx") %>%
janitor::clean_names() %>%
dplyr::mutate(genus = gsub("\\.pdb", "", word(filename, sep = "_", 4))) %>%
dplyr::mutate(species = gsub("\\.pdb", "", word(filename, sep = "_", 5))) %>%
dplyr::mutate(org = paste0(genus, " ", species)) %>%
dplyr::select(org, volume_sa, area_sa)
# Read in the sequence features
seq_fts <- read_csv("data/machine_learning/73_12angstrom_4aa_features.csv") %>%
dplyr::mutate(raw_org = word(enzyme, sep = "\\.1", 2)) %>%
dplyr::mutate(org = paste0(word(raw_org, sep = "_", 2), " ", word(raw_org, sep = "_", 3))) %>%
dplyr::select(-raw_org) # remove enzyme
seq_fts$org[seq_fts$enzyme == "4KU5_Xanthomonas_campestris"] <- "Xanthomonas campestris"
# Read in the activity data
activity <- read_csv("data/machine_learning/20191218_all_cmpnds_avg_log_slopes_for_modeling.csv")
# Fix discrepancies in merging names
pseudo1 <- seq_fts$org[grep("Pseudoxanthomonas", seq_fts$org)]
pseudo2 <- activity$org[grep("Pseudoxanthomonas", activity$org)][1]
seq_fts$org[grep("Pseudoxanthomonas", seq_fts$org)] <- pseudo2
leif1 <- seq_fts$org[grep("Leifsonia", seq_fts$org)]
leif2 <- activity$org[grep("Leifsonia", activity$org)][1]
seq_fts$org[grep("Leifsonia", seq_fts$org)] <- leif2
# Now merge everything...
comb <- activity %>%
dplyr::left_join(., molec_fts, by = "substrate") %>%
dplyr::left_join(., chem_descr, by = "substrate") %>%
dplyr::left_join(., cavity_fts, by = "org") %>%
dplyr::left_join(., seq_fts, by = "org")
# Now remove duplicate rows (hopefully there aren't any)
dedup <- comb[complete.cases(comb),] # no duplicates
dedup <- dedup[!duplicated(dedup),]
# Only keep variables with nonzero variance
nozdat <- nearZeroVar(dedup, saveMetrics = TRUE)
which_rem <- rownames(nozdat)[nozdat[,"nzv"] == TRUE]
which_rem
# write_csv(dedup, "data/machine_learning/20191228_1095_training_examples_12angstrom_features.csv")
dat <- dedup %>%
dplyr::mutate(id = paste0(org, "_", substrate)) %>%
dplyr::mutate(is_active = as.factor(case_when(activity == 0 ~ "N",
TRUE ~ "Y"))) %>%
dplyr::select(-which_rem, -org, -substrate) #%>%
#dplyr::select(id, contains("PC"), contains("_"), is_active)
# Set random seed
set.seed(20200104)
# Split into test and training data
dat_split <- initial_split(dat, strata = "is_active")
dat_train <- training(dat_split)
dat_test <- testing(dat_split)
nrow(dat_train)/nrow(dat) # 75 %
##### Test RF with only raw chemical descriptors and sequence features
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(-contains("PC"), -volume_sa, -area_sa, -enzyme, -activity, -id, -is_active)
x_test <- dat_test %>%
dplyr::select(-contains("PC"), -volume_sa, -area_sa, -enzyme, -activity, -id, -is_active)
colnames(x_train)
colnames(x_test)
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_1 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_1) # Training set accuracy is 81.7% for rf1
# Try prediction
rf_ml_1 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_1$bestTune$splitrule),
mtry = rf_1$bestTune$mtry, min.node.size = rf_1$bestTune$min.node.size,
importance = "permutation")
rf_1_pred <- predict(rf_1, newdata = form_test)
cm_rf_1 <- confusionMatrix(rf_1_pred, as.factor(dat_test$is_active))
cm_rf_1 # 76.6% test classification accuracy with limited chemical features
sink("output/machine_learning/20200104_rf1_rawchem_and_seq_binary_classification_results.txt")
cm_rf_1
sink()
vimp_1 <- data.frame(cbind(sort(rf_ml_1$variable.importance, decreasing = T),
names(sort(rf_ml_1$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_1
pdf("output/machine_learning/rf1_chem_descr_only_binary_classification_vimp.pdf", width = 6, height = 6)
vp1 <- ggplot(data = vimp_1,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp1
dev.off()
##### RF with only chem PCs and sequence feature
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_2 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_2) # Training set accuracy is 82.9% for random forest
# Try prediction
rf_2$bestTune$splitrule
rf_2$bestTune$mtry
rf_2$bestTune$min.node.size
rf_ml_2 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_2$bestTune$splitrule),
mtry = rf_2$bestTune$mtry, min.node.size = rf_2$bestTune$min.node.size,
importance = "permutation")
rf_2_pred <- predict(rf_2, newdata = form_test)
cm_rf_2 <- confusionMatrix(rf_2_pred, as.factor(dat_test$is_active))
cm_rf_2 # 79.5% test accuracy
channel_a <- c(253, 258, 261, 284, 291, 292, 295, 343, 345, 349, 351, 353)
channel_b <- c(176, 173, 172, 242, 243, 112, 111, 171, 117, 316, 203, 246)
channels <- c(channel_a, channel_b)
rf_ml_2$variable.importance != 0
imps <- word(names(rf_ml_2$variable.importance)[rf_ml_2$variable.importance != 0], -1, sep = "_")
imps
imps <- as.numeric(imps[!grepl("PC", imps)])
imps[imps %in% channel_a]
imps[imps %in% channel_b]
sink("output/machine_learning/20200104_rf2_PC_and_seq_binary_classification_results.txt")
cm_rf_2
sink()
vimp_2 <- data.frame(cbind(sort(rf_ml_2$variable.importance, decreasing = T),
names(sort(rf_ml_2$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_2
pdf("output/machine_learning/rf2_PC_seqfeats_binary_classification_vimp.pdf", width = 6, height = 6)
vp2 <- ggplot(data = vimp_2,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp2
dev.off()
##### RF with chem PCs, sequence features, and surface areas
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"), contains("_sa"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"), contains("_sa"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_3 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_3) # Training set accuracy is 82.4% for random forest
rf_3$bestTune$splitrule
rf_3$bestTune$mtry
rf_3$bestTune$min.node.size
# Try prediction
rf_ml_3 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_3$bestTune$splitrule),
mtry = rf_3$bestTune$mtry, min.node.size = rf_3$bestTune$min.node.size,
importance = "permutation")
rf_3_pred <- predict(rf_3, newdata = form_test)
cm_rf_3 <- confusionMatrix(rf_3_pred, as.factor(dat_test$is_active))
cm_rf_3 # 79.5% test accuracy
sink("output/machine_learning/20200104_rf3_PC_and_seq_and_volume_binary_classification_results.txt")
cm_rf_3
sink()
vimp_3 <- data.frame(cbind(sort(rf_ml_3$variable.importance, decreasing = T),
names(sort(rf_ml_3$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_3
pdf("output/machine_learning/rf3_PC_seqfeats_sa_binary_classification_vimp.pdf", width = 6, height = 6)
vp3 <- ggplot(data = vimp_3,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp3
dev.off()
plot_grid(vp1, vp2, vp3, ncol = 3)
#### Now try tuning the mtry
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
rf_grid <- expand.grid(mtry = c(2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50),
splitrule = "gini",
min.node.size = 1)
# Quick test
rf_4 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 3,
verboseIter = T, classProbs = T,
savePredictions = "final"),
tuneGrid = rf_grid,
num.trees = 500,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
rf_grid2 <- expand.grid(mtry = c(75, 100, 125, 150),
splitrule = "gini",
min.node.size = 1)
# Quick test
rf_5 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 3,
verboseIter = T, classProbs = T,
savePredictions = "final"),
tuneGrid = rf_grid2,
num.trees = 500,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_5) # Training set accuracy is 82.9% for random forest
# Try prediction
rf_5$bestTune$splitrule
rf_5$bestTune$mtry
rf_5$bestTune$min.node.size
rf_ml_5 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_5$bestTune$splitrule),
mtry = rf_5$bestTune$mtry, min.node.size = rf_5$bestTune$min.node.size,
importance = "permutation")
rf_5_pred <- predict(rf_5, newdata = form_test)
cm_rf_5 <- confusionMatrix(rf_5_pred, as.factor(dat_test$is_active))
cm_rf_5 # 79.5% test accuracy
pdf("output/machine_learning/impact_of_mtry_on_pred_error.pdf", width = 5, height = 5)
ggplot(bal_mean, aes(x = Var2, y = mean_acc)) + #color = Var1, group = Var1)) +
geom_point() +
geom_path() +
geom_errorbar(aes(ymin = bal_mean$mean_acc - bal_sd$mean_sd, ymax = bal_mean$mean_acc + bal_sd$mean_sd )) +
ylab("OOB Prediction Error") +
xlab("mtry") +
scale_color_manual(values = pal2) +
theme(legend.position = "top",
axis.text.x = element_text(angle = 90),
legend.text = element_text(size = 8),
legend.title = element_blank()) +
ylim(0.15, 0.25)
dev.off()
|
/bin/machine_learning/20200103_binary_classificaton_test_molec_features.r
|
no_license
|
serina-robinson/synbio-data-analysis
|
R
| false | false | 14,893 |
r
|
# Read in the raw data
# Install packages
pacman::p_load("tidyverse", "DECIPHER", "Biostrings", "skimr", "caret",
"cowplot", "tidymodels", "ranger", "tree", "rsample",
"randomForest", "gbm","nnet","e1071","svmpath","lars",
"glmnet", "svmpath", "readxl")
# Set working directory
setwd("~/Documents/University_of_Minnesota/Wackett_Lab/github/synbio-data-analysis/")
# Read in the principal componenets of molecular features
molec_fts <- read_csv("data/machine_learning/PC7_molecular_descriptors.csv") %>%
dplyr::rename(substrate = V1)
molec_fts$substrate <- gsub(" ", "\\.", molec_fts$substrate)
molec_fts$substrate[molec_fts$substrate == "oxidazole"] <- "oxadiazole"
# Read in the raw molecular features
chem_descr <- read_csv("data/substrate_comparisons/15pNPs_159_selected_molecular_properties.csv") %>%
dplyr::select(cmpnd_abbrev, nB, MW, AROMATIC, O, N, Cl, MLogP, nRotB, VABC, nAtomLAC) %>%
dplyr::mutate(substrate = gsub(" ", "\\.", cmpnd_abbrev)) %>%
dplyr::select(-cmpnd_abbrev)
chem_descr$substrate[chem_descr$substrate == "oxidazole"] <- "oxadiazole"
# Read in the cavity volumes
cavity_fts <- read_excel("data/caver_models/CastP_volume_rawdata.xlsx") %>%
janitor::clean_names() %>%
dplyr::mutate(genus = gsub("\\.pdb", "", word(filename, sep = "_", 4))) %>%
dplyr::mutate(species = gsub("\\.pdb", "", word(filename, sep = "_", 5))) %>%
dplyr::mutate(org = paste0(genus, " ", species)) %>%
dplyr::select(org, volume_sa, area_sa)
# Read in the sequence features
seq_fts <- read_csv("data/machine_learning/73_12angstrom_4aa_features.csv") %>%
dplyr::mutate(raw_org = word(enzyme, sep = "\\.1", 2)) %>%
dplyr::mutate(org = paste0(word(raw_org, sep = "_", 2), " ", word(raw_org, sep = "_", 3))) %>%
dplyr::select(-raw_org) # remove enzyme
seq_fts$org[seq_fts$enzyme == "4KU5_Xanthomonas_campestris"] <- "Xanthomonas campestris"
# Read in the activity data
activity <- read_csv("data/machine_learning/20191218_all_cmpnds_avg_log_slopes_for_modeling.csv")
# Fix discrepancies in merging names
pseudo1 <- seq_fts$org[grep("Pseudoxanthomonas", seq_fts$org)]
pseudo2 <- activity$org[grep("Pseudoxanthomonas", activity$org)][1]
seq_fts$org[grep("Pseudoxanthomonas", seq_fts$org)] <- pseudo2
leif1 <- seq_fts$org[grep("Leifsonia", seq_fts$org)]
leif2 <- activity$org[grep("Leifsonia", activity$org)][1]
seq_fts$org[grep("Leifsonia", seq_fts$org)] <- leif2
# Now merge everything...
comb <- activity %>%
dplyr::left_join(., molec_fts, by = "substrate") %>%
dplyr::left_join(., chem_descr, by = "substrate") %>%
dplyr::left_join(., cavity_fts, by = "org") %>%
dplyr::left_join(., seq_fts, by = "org")
# Now remove duplicate rows (hopefully there aren't any)
dedup <- comb[complete.cases(comb),] # no duplicates
dedup <- dedup[!duplicated(dedup),]
# Only keep variables with nonzero variance
nozdat <- nearZeroVar(dedup, saveMetrics = TRUE)
which_rem <- rownames(nozdat)[nozdat[,"nzv"] == TRUE]
which_rem
# write_csv(dedup, "data/machine_learning/20191228_1095_training_examples_12angstrom_features.csv")
dat <- dedup %>%
dplyr::mutate(id = paste0(org, "_", substrate)) %>%
dplyr::mutate(is_active = as.factor(case_when(activity == 0 ~ "N",
TRUE ~ "Y"))) %>%
dplyr::select(-which_rem, -org, -substrate) #%>%
#dplyr::select(id, contains("PC"), contains("_"), is_active)
# Set random seed
set.seed(20200104)
# Split into test and training data
dat_split <- initial_split(dat, strata = "is_active")
dat_train <- training(dat_split)
dat_test <- testing(dat_split)
nrow(dat_train)/nrow(dat) # 75 %
##### Test RF with only raw chemical descriptors and sequence features
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(-contains("PC"), -volume_sa, -area_sa, -enzyme, -activity, -id, -is_active)
x_test <- dat_test %>%
dplyr::select(-contains("PC"), -volume_sa, -area_sa, -enzyme, -activity, -id, -is_active)
colnames(x_train)
colnames(x_test)
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_1 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_1) # Training set accuracy is 81.7% for rf1
# Try prediction
rf_ml_1 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_1$bestTune$splitrule),
mtry = rf_1$bestTune$mtry, min.node.size = rf_1$bestTune$min.node.size,
importance = "permutation")
rf_1_pred <- predict(rf_1, newdata = form_test)
cm_rf_1 <- confusionMatrix(rf_1_pred, as.factor(dat_test$is_active))
cm_rf_1 # 76.6% test classification accuracy with limited chemical features
sink("output/machine_learning/20200104_rf1_rawchem_and_seq_binary_classification_results.txt")
cm_rf_1
sink()
vimp_1 <- data.frame(cbind(sort(rf_ml_1$variable.importance, decreasing = T),
names(sort(rf_ml_1$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_1
pdf("output/machine_learning/rf1_chem_descr_only_binary_classification_vimp.pdf", width = 6, height = 6)
vp1 <- ggplot(data = vimp_1,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp1
dev.off()
##### RF with only chem PCs and sequence feature
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_2 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_2) # Training set accuracy is 82.9% for random forest
# Try prediction
rf_2$bestTune$splitrule
rf_2$bestTune$mtry
rf_2$bestTune$min.node.size
rf_ml_2 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_2$bestTune$splitrule),
mtry = rf_2$bestTune$mtry, min.node.size = rf_2$bestTune$min.node.size,
importance = "permutation")
rf_2_pred <- predict(rf_2, newdata = form_test)
cm_rf_2 <- confusionMatrix(rf_2_pred, as.factor(dat_test$is_active))
cm_rf_2 # 79.5% test accuracy
channel_a <- c(253, 258, 261, 284, 291, 292, 295, 343, 345, 349, 351, 353)
channel_b <- c(176, 173, 172, 242, 243, 112, 111, 171, 117, 316, 203, 246)
channels <- c(channel_a, channel_b)
rf_ml_2$variable.importance != 0
imps <- word(names(rf_ml_2$variable.importance)[rf_ml_2$variable.importance != 0], -1, sep = "_")
imps
imps <- as.numeric(imps[!grepl("PC", imps)])
imps[imps %in% channel_a]
imps[imps %in% channel_b]
sink("output/machine_learning/20200104_rf2_PC_and_seq_binary_classification_results.txt")
cm_rf_2
sink()
vimp_2 <- data.frame(cbind(sort(rf_ml_2$variable.importance, decreasing = T),
names(sort(rf_ml_2$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_2
pdf("output/machine_learning/rf2_PC_seqfeats_binary_classification_vimp.pdf", width = 6, height = 6)
vp2 <- ggplot(data = vimp_2,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp2
dev.off()
##### RF with chem PCs, sequence features, and surface areas
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"), contains("_sa"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"), contains("_sa"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
# Quick test
rf_3 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 5,
verboseIter = T, classProbs = T,
savePredictions = "final"),
# tuneGrid = tgrid,
num.trees = 1000,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_3) # Training set accuracy is 82.4% for random forest
rf_3$bestTune$splitrule
rf_3$bestTune$mtry
rf_3$bestTune$min.node.size
# Try prediction
rf_ml_3 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_3$bestTune$splitrule),
mtry = rf_3$bestTune$mtry, min.node.size = rf_3$bestTune$min.node.size,
importance = "permutation")
rf_3_pred <- predict(rf_3, newdata = form_test)
cm_rf_3 <- confusionMatrix(rf_3_pred, as.factor(dat_test$is_active))
cm_rf_3 # 79.5% test accuracy
sink("output/machine_learning/20200104_rf3_PC_and_seq_and_volume_binary_classification_results.txt")
cm_rf_3
sink()
vimp_3 <- data.frame(cbind(sort(rf_ml_3$variable.importance, decreasing = T),
names(sort(rf_ml_3$variable.importance, decreasing = T))), stringsAsFactors = F) %>%
dplyr::rename(importance = X1,
variable = X2) %>%
mutate(importance = as.numeric(importance)) %>%
dplyr::slice(1:30)
vimp_3
pdf("output/machine_learning/rf3_PC_seqfeats_sa_binary_classification_vimp.pdf", width = 6, height = 6)
vp3 <- ggplot(data = vimp_3,
aes(x=reorder(variable,importance), y=importance, fill=importance))+
geom_bar(stat="identity", position="dodge")+ coord_flip()+
ylab("Variable Importance")+
xlab("")+
guides(fill=F)+
scale_fill_gradient(low="red", high="blue")
vp3
dev.off()
plot_grid(vp1, vp2, vp3, ncol = 3)
#### Now try tuning the mtry
# Define variables of interest
x_train <- dat_train %>%
dplyr::select(contains("PC"), contains("4KU5"))
x_test <- dat_test %>%
dplyr::select(contains("PC"), contains("4KU5"))
y_train <- dat_train$is_active
y_test <- dat_test$is_active
# Make a data frame for prediction
df_train <- data.frame(x_train, stringsAsFactors = F, row.names = dat_train$id)
# Complete dataset for training and testing
form_train <- data.frame(cbind(x_train, y_train), stringsAsFactors = F, row.names = dat_train$id)
form_test <- data.frame(cbind(x_test, y_test), stringsAsFactors = F, row.names = dat_test$id)
rf_grid <- expand.grid(mtry = c(2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50),
splitrule = "gini",
min.node.size = 1)
# Quick test
rf_4 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 3,
verboseIter = T, classProbs = T,
savePredictions = "final"),
tuneGrid = rf_grid,
num.trees = 500,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
rf_grid2 <- expand.grid(mtry = c(75, 100, 125, 150),
splitrule = "gini",
min.node.size = 1)
# Quick test
rf_5 <- train(
x = df_train,
y = y_train,
method = "ranger",
trControl = trainControl(method = "repeatedcv", number = 10,
repeats = 3,
verboseIter = T, classProbs = T,
savePredictions = "final"),
tuneGrid = rf_grid2,
num.trees = 500,
# respect.unordered.factors = FALSE,
verbose = TRUE,
preProcess = c("center", "scale"),
importance = "permutation")
# Confusion matrix
getTrainPerf(rf_5) # Training set accuracy is 82.9% for random forest
# Try prediction
rf_5$bestTune$splitrule
rf_5$bestTune$mtry
rf_5$bestTune$min.node.size
rf_ml_5 <- ranger(y_train ~., data = form_train, num.trees = 1000, splitrule = as.character(rf_5$bestTune$splitrule),
mtry = rf_5$bestTune$mtry, min.node.size = rf_5$bestTune$min.node.size,
importance = "permutation")
rf_5_pred <- predict(rf_5, newdata = form_test)
cm_rf_5 <- confusionMatrix(rf_5_pred, as.factor(dat_test$is_active))
cm_rf_5 # 79.5% test accuracy
pdf("output/machine_learning/impact_of_mtry_on_pred_error.pdf", width = 5, height = 5)
ggplot(bal_mean, aes(x = Var2, y = mean_acc)) + #color = Var1, group = Var1)) +
geom_point() +
geom_path() +
geom_errorbar(aes(ymin = bal_mean$mean_acc - bal_sd$mean_sd, ymax = bal_mean$mean_acc + bal_sd$mean_sd )) +
ylab("OOB Prediction Error") +
xlab("mtry") +
scale_color_manual(values = pal2) +
theme(legend.position = "top",
axis.text.x = element_text(angle = 90),
legend.text = element_text(size = 8),
legend.title = element_blank()) +
ylim(0.15, 0.25)
dev.off()
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "MLA"
outDir <- file.path("code", "scenarios_micro", "micro_noecig", "outData")
#############################################################################################################################
source(file.path("code", "scenarios_micro", "util", "microsimulation_preparation.R"))
init <- micro_prep()
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numYear <- endYear - startYear + 1
# Scenario-specific parameters
MLAge <- 21
fMLA <- 1/2
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
rowMLAge <- (MLAge-1) - startAge + 1
row18 <- 17 - startAge + 1
v.NC.primer[row18:(rowMLAge-1)] <- v.NC.primer[row18:(rowMLAge-1)]*fMLA
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "N", "C", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
}
n.t <- 50 # time horizon, 50 cycles
v.n <- c("N","C","Q","X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # death rate decrease 3% annually
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.Cbase <- c(rep(0.91, 19), rep(c(0.88, 0.86, 0.83, 0.81, 0.78, 0.76, 0.74), each=5), rep(0.71, 16)) # utility when smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Qbase <- 1 - (1-u.Cbase) * 0.05
v.NXbase <- init$deathRateN
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ####################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
startAge <- 11
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs( u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
# The Probs function that updates the transition probabilities of every cycle is shown below.
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.RR
}
Probs <- function(M_it, v.index, v.RR) {
# M_it: state occupied by individual i at cycle t (character variable)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni * (1-v.toX.ni)
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni * (1-v.toX.ni)
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni * (1-v.toX.ni)
m.p.it[,M_it == "N"] <- rbind(1-v.NC.ni[M_it=="N"]-v.toX.ni[M_it=="N"], v.NC.ni[M_it=="N"], 0, v.toX.ni[M_it=="N"]) # transition probabilities when never smoke
m.p.it[,M_it == "C"] <- rbind(0, 1-v.CQ.ni[M_it=="C"]-v.toX.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], 1-v.QC.ni[M_it=="Q"]-v.toX.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "X"] <- c(0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Costs function
# The Costs function estimates the costs at every cycle.
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[v.index<1 | v.index>70] <- 0
u
}
# Get utility, i.e. unit QALY for the individual at time t
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u <- getInitU(M_it, v.index)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName, "_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName, "_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName, "_sim_no_trt.rds")))
|
/code/scenarios_micro/micro_noecig/MLA_micro.R
|
no_license
|
KateDoan/gice
|
R
| false | false | 10,824 |
r
|
#############################################################################################################################
rm(list=ls())
#############################################################################################################################
test <- TRUE # set test to FALSE to run the real simulation
scenarioName <- "MLA"
outDir <- file.path("code", "scenarios_micro", "micro_noecig", "outData")
#############################################################################################################################
source(file.path("code", "scenarios_micro", "util", "microsimulation_preparation.R"))
init <- micro_prep()
startAge <- 11
endAge <- 80
startYear <- 2017
endYear <- 2067
numYear <- endYear - startYear + 1
# Scenario-specific parameters
MLAge <- 21
fMLA <- 1/2
v.NC.primer <- init$NCprimer
v.CQ.primer <- init$CQprimer
v.QC.primer <- init$QCprimer
rowMLAge <- (MLAge-1) - startAge + 1
row18 <- 17 - startAge + 1
v.NC.primer[row18:(rowMLAge-1)] <- v.NC.primer[row18:(rowMLAge-1)]*fMLA
if(test){
n.i <- 10
v.M_1 <- rep(c("C", "N", "N", "C", "Q"), n.i/5)
v.age <- rep(c(19, 4, 35, 180, 20), n.i/5)
} else {
n.i <- length(init$sin.ages) # number of simulated individuals
v.M_1 <- init$sin.states # beginning states
v.age <- init$sin.ages # initialize age
}
n.t <- 50 # time horizon, 50 cycles
v.n <- c("N","C","Q","X") # the model states: Never Smoker(N), Smoker(C), Quitter(Q)
n.s <- length(v.n) # the number of states
d.e <- 0.03 # equal discounting of costs and QALYs by 3%
d.x <- 0.03 # death rate decrease 3% annually
# Cost and utility inputs
u.N <- 1 # utility when not smoking
u.Cbase <- c(rep(0.91, 19), rep(c(0.88, 0.86, 0.83, 0.81, 0.78, 0.76, 0.74), each=5), rep(0.71, 16)) # utility when smoking
u.bTbase <- c(rep(0.98, 19), rep(0.96, 10), rep(0.97, 5), rep(0.96, 5), rep(0.97, 15), rep(0.98, 16))
u.Qbase <- 1 - (1-u.Cbase) * 0.05
v.NXbase <- init$deathRateN
v.RRCbase <- c(rep(2.8, 49), rep(2.5, 10), rep(2.0, 11))
v.bTbase <- c(rep(0.92, 5), rep(0.93, 5), rep(0.94, 14), rep(0.95, 21), rep(0.96, 25))
v.RRQbase <- 1 + (v.RRCbase - 1) * 0.05
##################################### Functions ###########################################
##################################### Helper functions ####################################
getNiVec <- function(v.base, v.index){
v.ni <- v.base[v.index]
v.ni[is.na(v.ni)] <- 0
v.ni
}
##################################### Main functions ####################################
# THE NEW samplev() FUNCTION
# efficient implementation of the rMultinom() function of the Hmisc package ####
samplev <- function (probs, m) {
d <- dim(probs)
n <- d[1]
k <- d[2]
lev <- dimnames(probs)[[2]]
if (!length(lev))
lev <- 1:k
ran <- matrix(lev[1], ncol = m, nrow = n)
U <- t(probs)
for(i in 2:k) {
U[i, ] <- U[i, ] + U[i - 1, ]
}
if (any((U[k, ] - 1) > 1e-05))
stop("error in multinom: probabilities do not sum to 1")
for (j in 1:m) {
un <- rep(runif(n), rep(k, n))
ran[, j] <- lev[1 + colSums(un > U)]
}
ran
}
# The MicroSim function for the simple microsimulation of the 'Sick-Sicker' model keeps track of what happens to each individual during each cycle.
MicroSim <- function(v.M_1, v.age, n.i, n.t, v.n, X = NULL, d.c, d.e, TR.out = TRUE, TS.out = FALSE, Trt = FALSE, seed = 1) {
# Arguments:
# v.M_1: vector of initial states for individuals
# n.i: number of individuals
# n.t: total number of cycles to run the model
# v.n: vector of health state names
# X: vector or matrix of individual characteristics
# d.c: discount rate for costs
# d.e: discount rate for health outcome (QALYs)
# TR.out: should the output include a Microsimulation trace? (default is TRUE)
# TS.out: should the output include a matrix of transitions between states? (default is TRUE)
# Trt: are the n.i individuals receiving treatment? (scalar with a Boolean value, default is FALSE)
# seed: starting seed number for random number generator (default is 1)
# Makes use of:
# Probs: function for the estimation of transition probabilities
# Costs: function for the estimation of cost state values
# Effs: function for the estimation of state specific health outcomes (QALYs)
startAge <- 11
v.index <- v.age - startAge + 1
v.dwe <- 1 / (1 + d.e) ^ (0:n.t)
# Create the matrix capturing the state name/costs/health outcomes for all individuals at each time point
m.M <- m.E <- matrix(nrow = n.i, ncol = n.t + 1,
dimnames = list(paste("ind", 1:n.i, sep = " "),
paste("cycle", 0:n.t, sep = " ")))
if(TR.out == TRUE) {
TR = matrix(NA, n.s, n.t)
}
m.M[, 1] <- v.M_1 # indicate the initial health state
v.RR <- getInitRR(v.M_1, v.index)
u <- getInitU(v.M_1, v.index)
m.E[, 1] <- Effs (u, cl=1)
set.seed(seed) # set the seed for every individual for the random number generator
for (t in 1:n.t) { # t <- 3
# print(v.index)
if (TR.out == TRUE) {
TR[,t] <- table(factor((m.M[,t])[v.age>=12 & v.age<=80], levels=v.n, ordered=TRUE))
}
if(t>1){
v.RR <- getRR(v.RR, m.M[,t], v.index)
}
# print(t)
# print(v.RR)
m.p <- Probs(m.M[, t], v.index, v.RR) # calculate the transition probabilities at cycle t
m.M[, t + 1] <- samplev(prob = m.p, m = 1) # sample the next health state and store that state in matrix m.M
cat('\r', paste(round(t/n.t * 100), "% done", sep = " ")) # display the progress of the simulation
v.age <- v.age + 1
v.index <- v.index + 1
v.NXbase <<- v.NXbase * (1-d.x)
u <- getU(u, m.M[,t+1], v.index)
m.E[,t + 1] <- Effs( u, cl=1)
} # close the loop for the time points
if (TS.out == TRUE) { # create a matrix of transitions across states
TS <- paste(m.M, cbind(m.M[, -1], NA), sep = "->") # transitions from one state to the other
TS <- matrix(TS, nrow = n.i)
rownames(TS) <- paste("Ind", 1:n.i, sep = " ") # name the rows
colnames(TS) <- paste("Cycle", 0:n.t, sep = " ") # name the columns
} else {
TS <- NULL
}
if(TR.out==TRUE){
TR <- prop.table(t(TR), margin = 1)
} else {
TR <- NULL
}
te <- m.E %*% v.dwe # total (discounted) QALYs per individual
te_hat <- mean(te) # average (discounted) QALYs
colSumME <- colSums(m.E)
results <- list(m.M = m.M, TS = TS, TR = TR, m.E = m.E, te = te, te_hat = te_hat, colSumME = colSumME) # store the results from the simulation in a list
return(results) # return the results
} # end of the MicroSim function
#### Probability function
# The Probs function that updates the transition probabilities of every cycle is shown below.
getInitRR <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RR <- rep(1, n.i)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- v.RRQ.ni[M_it=="Q"]
v.RR
}
getRR <- function(v.RRold, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
v.bT.ni <- getNiVec(v.bTbase, v.index)
v.RRC.ni <- getNiVec(v.RRCbase, v.index)
v.RRQ.ni <- getNiVec(v.RRQbase, v.index)
v.RR <- getInitRR(M_it, v.index)
v.RR[M_it=="N"] <- 1
v.RR[M_it=="C"] <- v.RRC.ni[M_it=="C"]
v.RR[M_it=="Q"] <- 1 + (v.RRold[M_it=="Q"] - 1) * v.bT.ni[M_it=="Q"]
v.RR
}
Probs <- function(M_it, v.index, v.RR) {
# M_it: state occupied by individual i at cycle t (character variable)
v.index[v.index<=0] <- length(v.NXbase) + 10
m.p.it <- matrix(NA, n.s, n.i) # create vector of state transition probabilities
rownames(m.p.it) <- v.n # assign names to the vector
# Update base transition rates
v.NX.ni <- getNiVec(v.NXbase, v.index)
v.toX.ni <- v.RR * v.NX.ni
v.NC.primer.ni <- getNiVec(v.NC.primer, v.index)
v.NC.ni <- v.NC.primer.ni * (1-v.toX.ni)
v.CQ.primer.ni <- getNiVec(v.CQ.primer, v.index)
v.CQ.ni <- v.CQ.primer.ni * (1-v.toX.ni)
v.QC.primer.ni <- getNiVec(v.QC.primer, v.index)
v.QC.ni <- v.QC.primer.ni * (1-v.toX.ni)
m.p.it[,M_it == "N"] <- rbind(1-v.NC.ni[M_it=="N"]-v.toX.ni[M_it=="N"], v.NC.ni[M_it=="N"], 0, v.toX.ni[M_it=="N"]) # transition probabilities when never smoke
m.p.it[,M_it == "C"] <- rbind(0, 1-v.CQ.ni[M_it=="C"]-v.toX.ni[M_it=="C"], v.CQ.ni[M_it=="C"], v.toX.ni[M_it=="C"]) # transition probabilities when current smoke
m.p.it[,M_it == "Q"] <- rbind(0, v.QC.ni[M_it=="Q"], 1-v.QC.ni[M_it=="Q"]-v.toX.ni[M_it=="Q"], v.toX.ni[M_it=="Q"]) # transition probabilities when quit smoke
m.p.it[,M_it == "X"] <- c(0, 0, 0, 1) # transition probabilities when dead
# cat("\n")
# print(m.p.it)
# cat("\n")
ifelse(colSums(m.p.it) == 1, return(t(m.p.it)), print("Probabilities do not sum to 1")) # return the transition probabilities or produce an error
}
### Costs function
# The Costs function estimates the costs at every cycle.
getInitU <- function(M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u <- rep(0, n.i)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- u.Q.ni[M_it=="Q"]
u[v.index<1 | v.index>70] <- 0
u
}
# Get utility, i.e. unit QALY for the individual at time t
getU <- function(u.old, M_it, v.index){
v.index[v.index<=0] <- length(v.NXbase) + 10
u.bT.ni <- getNiVec(u.bTbase, v.index)
u.C.ni <- getNiVec(u.Cbase, v.index)
u.Q.ni <- getNiVec(u.Qbase, v.index)
u <- getInitU(M_it, v.index)
u[M_it=="N"] <- 1
u[M_it=="C"] <- u.C.ni[M_it=="C"]
u[M_it=="Q"] <- 1 - (1 - u.old[M_it=="Q"]) * u.bT.ni[M_it=="Q"]
u[M_it == "X"] <- 0 # update the utility if dead
u[v.index<1 | v.index>70] <- 0
u
}
Effs <- function (u, cl = 1) {
# cl: cycle length (default is 1)
QALYs <- u * cl # calculate the QALYs during cycle t
return(QALYs) # return the QALYs
}
##################################### Run the simulation ##################################
# START SIMULATION
p = Sys.time()
sim_no_trt <- MicroSim(v.M_1, v.age, n.i, n.t, v.n, X = v.x, d.c, d.e, Trt = FALSE, seed = 200)
comp.time = Sys.time() - p
comp.time
# PRINT DATA
sim_no_trt$TR
# sim_no_trt$m.M
# SAVE DATA
saveRDS(sim_no_trt$TR, file.path(outDir, paste0(scenarioName, "_TR.rds")))
saveRDS(sim_no_trt$colSumME, file.path(outDir, paste0(scenarioName, "_colSumME.rds")))
# saveRDS(sim_no_trt, file.path(outDir, paste0(scenarioName, "_sim_no_trt.rds")))
|
# param is a named numeric vector or a matrix.
# if param is a vector, it specifies a 5PL curve.
# if param is a matrix, each row of it specifies a 5PL curve
# param can be in either the classical parameterization or the g-h parameterization
# param can come from drm fit or bcrm fit
# the ed50 parameterization is not currently supported
# return a list with 5 elements: b, c, d, e, f
get.curve.param.list=function(param){
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, names(param))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
tmp=substr(colnames(param),1,1)
tmp["logtao"==colnames(param)]="logtao" # logtao can not be shortened to l
colnames(param)=tmp
if (!"c" %in% colnames(param)) stop("param does not have d")
if (!"d" %in% colnames(param)) stop("param does not have d")
if("b" %in% tmp & "e" %in% tmp) {
# classical parameterization
} else if ("g" %in% tmp & "h" %in% tmp) {
# gh parameterization
param=gh2cla(param)
} else if ("logtao" %in% tmp & "b" %in% tmp) {
# ED50 parameterization
param=ed502cla(param)
} else if ("logtao" %in% tmp & ("h" %in% tmp | "logh" %in% tmp)) {
# ED50 parameterization
param=ed50b2cla(param)
} else {
stop("not gh not cla not ED50: "%+%concatList(colnames(param),","))
}
b=param[,"b"]; c=param[,"c"]; d=param[,"d"]; e=param[,"e"]
names(b)=NULL; names(c)=NULL; names(d)=NULL; names(e)=NULL; # so that the outcome is not named incorrectly
if (any(e<0)) stop("e cannot be negative")
res=list(b=b, c=c, d=d, e=e)
if ("f" %in% colnames(param)) {
f=param[,"f"]
names(f)=NULL
if (any(f<0)) stop("f cannot be negative")
res=c(res, f=list(f))
} else {
res=c(res, f=list(rep(1, length(e))))
}
res
}
############################################################################
# classical parameterization
# following functions are vectorized for t, but not for param
FivePL.t=function (t,param) {
param.list=get.curve.param.list(param)
if (!(length(t)==1 | length(param.list$c)==1 | length(t)==length(param.list$c))) stop ("t or x and param not match in length")
out=with(param.list, (d-c)/{1+exp(b*t-b*log(e))}^f+c)
names(out)=rep("y", length(out))
out
}
FivePL.t.func=function (param) {
param.list=get.curve.param.list(param)
return (with(param.list, function (t) (d-c)/{1+exp(b*t-b*log(e))}^f+c))
}
FivePL.x=function (x,param) {
FivePL.t(log(x), param)
}
# returns the concentration for a given set of y, if out of bound, returns either 0 or Inf
FivePL.t.inv = function (y,param) {
param.list=get.curve.param.list(param)
if (!(length(y)==1 | length(param.list$c)==1 | length(y)==length(param.list$c))) stop ("y and param not match in length")
with(param.list, {
out=suppressWarnings(1/b*log( ((d-c)/(y-c))^(1/f) - 1 ) + log(e))
# deal with NaN, need to do this b/c of the need to differentiate between out of left bound and out of right bound,
# which is needed when a replacement value will be used
out = ifelse(y>c & y<d, out, {
ifelse(y<c, -Inf, Inf)*ifelse(b<0,1,-1)
})
names(out)=rep("x", length(out))
out
})
}
FivePL.t.inv.func=function (param) {
param.list=get.curve.param.list(param)
with(param.list, {
function (y) {
out=suppressWarnings(1/b*log( ((d-c)/(y-c))^(1/f) - 1 ) + log(e))
# deal with NaN, need to do this b/c of the need to differentiate between out of left bound and out of right bound,
# which is needed when a replacement value will be used
out = ifelse(y>c & y<d, out, {
ifelse(y<c, -Inf, Inf)*ifelse(b<0,1,-1)
})
names(out)=rep("x", length(out))
out
}
})
}
FivePL.x.inv = function (y,param) exp(FivePL.t.inv(y,param))
FivePL.x.inv.func=function (param) {
function (y) {
exp(FivePL.t.inv.func(param)(y))
}
}
FourPL.x <- function (x, param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.x(x,param)
}
FourPL.x.inv <- function (y, param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.x.inv(y,param)
}
FourPL.t.func=function (param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.t.func(param)
}
ED5PL = function (param, tao) {
names(param)=substr(names(param),1,1)
b=param["b"]; c=param["c"]; d=param["d"]; e=param["e"]; f=param["f"]; g=param["g"]; h=param["h"]; logtao=param["logtao"]
if(is.na(b) | is.na(e)) {
if(!is.na(g) & !is.na(h)) {
pp=gh2cla(c(c,d,g,h=h,f=f))
b=pp["b"]
e=pp["e"]
} else if (!is.na(logtao)) {
pp=ed502cla(c(c,d,b=b,logtao=unname(logtao),f=f))
e=pp["e"]
} else {
stop("parameterization not recognized")
}
}
unname(e*(tao^{-1/f}-1)^{1/b})
}
#Treat out of bound concentration estimates
#y: a number. The readout
#p: a vector of number. Parameters for a 5pl/4pl curve.
#t.range: a vector of two numbers. The range of log standard samples concentrations.
#If y is less than lower asymptote, return t.range[1]+log(1/2), i.e. log of half of smallest standard samples concentration.
#If y is higher than upper asymptote, return t.range[2], i.e. log of largest standard samples concentration
treat.out.of.bound=function(y, p, t.range){
if (y<get.curve.param.list(p)$c) {
t.0=t.range[1]+log(1/2) # half of the smallest standard concentration
} else if (y>get.curve.param.list(p)$d) {
t.0=t.range[2] # the largest standard concentration
} else stop("treat.out.of.bound: this cannot be right.")
t.0
}
###########################################################################################################
# simulate one curve, return FI, a vectorized function
simulate1curve=function(param, t, sd.e=0.1, expy=TRUE, gamma=0) {
if (expy) {
.mean=FivePL.t(t, param)
y = rnorm (n=length(.mean), mean=.mean, sd=sd.e*.mean^(gamma/2))
exp(y)
} else {
.mean=(FivePL.t(t, param))
y = rnorm (n=length(.mean), mean=.mean, sd=sd.e*.mean^(gamma/2))
y
}
}
###########################################################################################################
# summary measure for comparing two curves
get.abs.dev = function(p1, p2, t.range, y.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f.0 =FivePL.t.inv.func(p1)
f.hat=FivePL.t.inv.func(p2)
integrate( function(y) {
t.0 = f.0 (y)
t.0 = sapply (1:length(y), function (i) { # as y is a vector and treat.out.of.bound is not vectorized, we need to loop through the lenght of y
if (is.nan(t.0[i])) treat.out.of.bound (y[i], p1, t.range) else t.0[i]
})
t.hat = f.hat(y)
t.hat = sapply (1:length(y), function (i) {
if (is.nan(t.hat[i]) | Inf==abs(t.hat[i])) treat.out.of.bound (y[i], p2, t.range) else t.hat[i]
})
abs(t.hat - t.0)
}, lower=y.range[1], upper=y.range[2], subdivisions=1000 )$value/(y.range[2]-y.range[1])
}
# get area between two curves betwee two curves
get.abc = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f1=FivePL.t.func( p1 )
f0=FivePL.t.func( p2 )
integrate( function(t) abs(f1(t)-f0(t)) , lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
# get S1 betwee two curves
get.S1 = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f1=FivePL.t.func( p1 )
f0=FivePL.t.func( p2 )
integrate( function(t) (f1(t)-f0(t))^2 , lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
# get S2 betwee two curves, percent bias
get.S2 = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f0=FivePL.t.func( p1 )
f1.inv=FivePL.x.inv.func(p2)
integrate( function(t) {
x0= exp(t)
x1= f1.inv(f0(t))
abs(x1-x0)/x0 * 100
}, lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
|
/nCal/R/5pl.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 9,930 |
r
|
# param is a named numeric vector or a matrix.
# if param is a vector, it specifies a 5PL curve.
# if param is a matrix, each row of it specifies a 5PL curve
# param can be in either the classical parameterization or the g-h parameterization
# param can come from drm fit or bcrm fit
# the ed50 parameterization is not currently supported
# return a list with 5 elements: b, c, d, e, f
get.curve.param.list=function(param){
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, names(param))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
tmp=substr(colnames(param),1,1)
tmp["logtao"==colnames(param)]="logtao" # logtao can not be shortened to l
colnames(param)=tmp
if (!"c" %in% colnames(param)) stop("param does not have d")
if (!"d" %in% colnames(param)) stop("param does not have d")
if("b" %in% tmp & "e" %in% tmp) {
# classical parameterization
} else if ("g" %in% tmp & "h" %in% tmp) {
# gh parameterization
param=gh2cla(param)
} else if ("logtao" %in% tmp & "b" %in% tmp) {
# ED50 parameterization
param=ed502cla(param)
} else if ("logtao" %in% tmp & ("h" %in% tmp | "logh" %in% tmp)) {
# ED50 parameterization
param=ed50b2cla(param)
} else {
stop("not gh not cla not ED50: "%+%concatList(colnames(param),","))
}
b=param[,"b"]; c=param[,"c"]; d=param[,"d"]; e=param[,"e"]
names(b)=NULL; names(c)=NULL; names(d)=NULL; names(e)=NULL; # so that the outcome is not named incorrectly
if (any(e<0)) stop("e cannot be negative")
res=list(b=b, c=c, d=d, e=e)
if ("f" %in% colnames(param)) {
f=param[,"f"]
names(f)=NULL
if (any(f<0)) stop("f cannot be negative")
res=c(res, f=list(f))
} else {
res=c(res, f=list(rep(1, length(e))))
}
res
}
############################################################################
# classical parameterization
# following functions are vectorized for t, but not for param
FivePL.t=function (t,param) {
param.list=get.curve.param.list(param)
if (!(length(t)==1 | length(param.list$c)==1 | length(t)==length(param.list$c))) stop ("t or x and param not match in length")
out=with(param.list, (d-c)/{1+exp(b*t-b*log(e))}^f+c)
names(out)=rep("y", length(out))
out
}
FivePL.t.func=function (param) {
param.list=get.curve.param.list(param)
return (with(param.list, function (t) (d-c)/{1+exp(b*t-b*log(e))}^f+c))
}
FivePL.x=function (x,param) {
FivePL.t(log(x), param)
}
# returns the concentration for a given set of y, if out of bound, returns either 0 or Inf
FivePL.t.inv = function (y,param) {
param.list=get.curve.param.list(param)
if (!(length(y)==1 | length(param.list$c)==1 | length(y)==length(param.list$c))) stop ("y and param not match in length")
with(param.list, {
out=suppressWarnings(1/b*log( ((d-c)/(y-c))^(1/f) - 1 ) + log(e))
# deal with NaN, need to do this b/c of the need to differentiate between out of left bound and out of right bound,
# which is needed when a replacement value will be used
out = ifelse(y>c & y<d, out, {
ifelse(y<c, -Inf, Inf)*ifelse(b<0,1,-1)
})
names(out)=rep("x", length(out))
out
})
}
FivePL.t.inv.func=function (param) {
param.list=get.curve.param.list(param)
with(param.list, {
function (y) {
out=suppressWarnings(1/b*log( ((d-c)/(y-c))^(1/f) - 1 ) + log(e))
# deal with NaN, need to do this b/c of the need to differentiate between out of left bound and out of right bound,
# which is needed when a replacement value will be used
out = ifelse(y>c & y<d, out, {
ifelse(y<c, -Inf, Inf)*ifelse(b<0,1,-1)
})
names(out)=rep("x", length(out))
out
}
})
}
FivePL.x.inv = function (y,param) exp(FivePL.t.inv(y,param))
FivePL.x.inv.func=function (param) {
function (y) {
exp(FivePL.t.inv.func(param)(y))
}
}
FourPL.x <- function (x, param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.x(x,param)
}
FourPL.x.inv <- function (y, param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.x.inv(y,param)
}
FourPL.t.func=function (param) {
# convert param to a matrix if necessary
if (is.matrix(param)) {
# do nothing
} else if (is.vector(param)) {
param=matrix(param, nrow=1, dimnames=list(NULL, substr(names(param),1,1))) # as.matrix turns a vector into a column
} else {
stop ("param is not matrix or vector")
}
# add f=1
param=cbind(param, f=1)
FivePL.t.func(param)
}
ED5PL = function (param, tao) {
names(param)=substr(names(param),1,1)
b=param["b"]; c=param["c"]; d=param["d"]; e=param["e"]; f=param["f"]; g=param["g"]; h=param["h"]; logtao=param["logtao"]
if(is.na(b) | is.na(e)) {
if(!is.na(g) & !is.na(h)) {
pp=gh2cla(c(c,d,g,h=h,f=f))
b=pp["b"]
e=pp["e"]
} else if (!is.na(logtao)) {
pp=ed502cla(c(c,d,b=b,logtao=unname(logtao),f=f))
e=pp["e"]
} else {
stop("parameterization not recognized")
}
}
unname(e*(tao^{-1/f}-1)^{1/b})
}
#Treat out of bound concentration estimates
#y: a number. The readout
#p: a vector of number. Parameters for a 5pl/4pl curve.
#t.range: a vector of two numbers. The range of log standard samples concentrations.
#If y is less than lower asymptote, return t.range[1]+log(1/2), i.e. log of half of smallest standard samples concentration.
#If y is higher than upper asymptote, return t.range[2], i.e. log of largest standard samples concentration
treat.out.of.bound=function(y, p, t.range){
if (y<get.curve.param.list(p)$c) {
t.0=t.range[1]+log(1/2) # half of the smallest standard concentration
} else if (y>get.curve.param.list(p)$d) {
t.0=t.range[2] # the largest standard concentration
} else stop("treat.out.of.bound: this cannot be right.")
t.0
}
###########################################################################################################
# simulate one curve, return FI, a vectorized function
simulate1curve=function(param, t, sd.e=0.1, expy=TRUE, gamma=0) {
if (expy) {
.mean=FivePL.t(t, param)
y = rnorm (n=length(.mean), mean=.mean, sd=sd.e*.mean^(gamma/2))
exp(y)
} else {
.mean=(FivePL.t(t, param))
y = rnorm (n=length(.mean), mean=.mean, sd=sd.e*.mean^(gamma/2))
y
}
}
###########################################################################################################
# summary measure for comparing two curves
get.abs.dev = function(p1, p2, t.range, y.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f.0 =FivePL.t.inv.func(p1)
f.hat=FivePL.t.inv.func(p2)
integrate( function(y) {
t.0 = f.0 (y)
t.0 = sapply (1:length(y), function (i) { # as y is a vector and treat.out.of.bound is not vectorized, we need to loop through the lenght of y
if (is.nan(t.0[i])) treat.out.of.bound (y[i], p1, t.range) else t.0[i]
})
t.hat = f.hat(y)
t.hat = sapply (1:length(y), function (i) {
if (is.nan(t.hat[i]) | Inf==abs(t.hat[i])) treat.out.of.bound (y[i], p2, t.range) else t.hat[i]
})
abs(t.hat - t.0)
}, lower=y.range[1], upper=y.range[2], subdivisions=1000 )$value/(y.range[2]-y.range[1])
}
# get area between two curves betwee two curves
get.abc = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f1=FivePL.t.func( p1 )
f0=FivePL.t.func( p2 )
integrate( function(t) abs(f1(t)-f0(t)) , lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
# get S1 betwee two curves
get.S1 = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f1=FivePL.t.func( p1 )
f0=FivePL.t.func( p2 )
integrate( function(t) (f1(t)-f0(t))^2 , lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
# get S2 betwee two curves, percent bias
get.S2 = function(p1, p2, t.range) {
if (!"f" %in% names(p1)) p1=c(p1, f=1)
if (!"f" %in% names(p2)) p2=c(p2, f=1)
f0=FivePL.t.func( p1 )
f1.inv=FivePL.x.inv.func(p2)
integrate( function(t) {
x0= exp(t)
x1= f1.inv(f0(t))
abs(x1-x0)/x0 * 100
}, lower=t.range[1], upper=t.range[2], subdivisions=1000 )$value/(t.range[2]-t.range[1])
}
|
#EXAM!!!!!!
gala_data = read.table("gala.txt", header = TRUE)
gala_data
attach(gala_data)
gala_glm = glm(Species~Area+Elevation+Nearest+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 3.15 - 5.80e-04 * Area + 3.54e-03 * Elevation + 8.83e-03 * Nearest - 5.71e-03 * Scruz - 6.63e-04 * Adjacent + error
######################## END #########################################
## For lm
# Nearest p = 0.9932 > 0.05
# Omit it
# Step Down
gala_glm_sd = glm(Species~Area+Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Area p = 0.27555 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Scruz p = 0.19632 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 1.4329 + 0.2766 * Elevation - 0.0689 * Adjacent + error
##TASK 2
###Task 2
galapagos=read.table("gala.txt", header=TRUE)
#With this method, we start with all of the possible variables in our model. Then, we choose the one that gives the highest p-value. If this p-value is bigger than 0.05, we will discard the variable and repeat the process without it.
galaglm=glm(sqrt(Species)~Area+Elevation+Nearest+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Nearest has the highest p-value with a p-value of 0.411. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Scruz has the highest p-value with a p-value of 0.2466. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#As we can see, all the p-values are smaller than 0.05, thus meaning that all the variables are significant for our model.
#The resulting model of the step-down method is:
#sqrt(Species) = 1.314e+00 + -3.262e-04\*Area + 2.018e-03\*Elevation -3.987e-04\*Adjacent + error
|
/Exam/Exam-scriptC.R
|
no_license
|
MglMX/EDDA-VU
|
R
| false | false | 2,611 |
r
|
#EXAM!!!!!!
gala_data = read.table("gala.txt", header = TRUE)
gala_data
attach(gala_data)
gala_glm = glm(Species~Area+Elevation+Nearest+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 3.15 - 5.80e-04 * Area + 3.54e-03 * Elevation + 8.83e-03 * Nearest - 5.71e-03 * Scruz - 6.63e-04 * Adjacent + error
######################## END #########################################
## For lm
# Nearest p = 0.9932 > 0.05
# Omit it
# Step Down
gala_glm_sd = glm(Species~Area+Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Area p = 0.27555 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Scruz p = 0.19632 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 1.4329 + 0.2766 * Elevation - 0.0689 * Adjacent + error
##TASK 2
###Task 2
galapagos=read.table("gala.txt", header=TRUE)
#With this method, we start with all of the possible variables in our model. Then, we choose the one that gives the highest p-value. If this p-value is bigger than 0.05, we will discard the variable and repeat the process without it.
galaglm=glm(sqrt(Species)~Area+Elevation+Nearest+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Nearest has the highest p-value with a p-value of 0.411. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Scruz has the highest p-value with a p-value of 0.2466. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#As we can see, all the p-values are smaller than 0.05, thus meaning that all the variables are significant for our model.
#The resulting model of the step-down method is:
#sqrt(Species) = 1.314e+00 + -3.262e-04\*Area + 2.018e-03\*Elevation -3.987e-04\*Adjacent + error
|
# Importing dataset
dataset <- read.csv('Mall_Customers.csv')
X = dataset[4:5]
# Using elbow method to find the optimal number of clusters
set.seed(6)
wcss = vector()
for(i in 1:10) wcss[i] = sum(kmeans(X, i)$withinss)
plot(1:10,
wcss,
type='b',
main = paste('Clusters of clients'),
xlab = 'Number of clusters',
ylab = 'WCSS')
# Applying k-means to dataset
set.seed(29)
kmeans = kmeans(X, 5, iter.max = 300, nstart = 10)
y_kmeans = kmeans$cluster
# Visualising the clusters
library(cluster)
clusplot(X,
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of clients'),
xlab = 'Annual income',
ylab = 'Spending Score')
|
/4_Clustering/KMeans.R
|
no_license
|
saimahesh-geek/machine-learning
|
R
| false | false | 803 |
r
|
# Importing dataset
dataset <- read.csv('Mall_Customers.csv')
X = dataset[4:5]
# Using elbow method to find the optimal number of clusters
set.seed(6)
wcss = vector()
for(i in 1:10) wcss[i] = sum(kmeans(X, i)$withinss)
plot(1:10,
wcss,
type='b',
main = paste('Clusters of clients'),
xlab = 'Number of clusters',
ylab = 'WCSS')
# Applying k-means to dataset
set.seed(29)
kmeans = kmeans(X, 5, iter.max = 300, nstart = 10)
y_kmeans = kmeans$cluster
# Visualising the clusters
library(cluster)
clusplot(X,
y_kmeans,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of clients'),
xlab = 'Annual income',
ylab = 'Spending Score')
|
%% File Name: TAM-package.Rd
%% File Version: 2.62
\name{TAM-package}
\alias{TAM-package}
\alias{TAM}
\docType{package}
\title{
Test Analysis Modules
}
\description{
Includes marginal maximum likelihood estimation and joint maximum
likelihood estimation for unidimensional and multidimensional
item response models. The package functionality covers the
Rasch model, 2PL model, 3PL model, generalized partial credit model,
multi-faceted Rasch model, nominal item response model,
structured latent class model, mixture distribution IRT models,
and located latent class models. Latent regression models and
plausible value imputation are also supported. For details see
Adams, Wilson, and Wang, 1997, <doi:10.1177/0146621697211001>,
Adams, Wilson, and Wu, 1997, <doi:10.3102/10769986022001047>,
Formann, 1982, <doi:10.1002/bimj.4710240209>,
Formann, 1992, <doi:10.1080/01621459.1992.10475229>.
}
\details{
See \url{http://www.edmeasurementsurveys.com/TAM/Tutorials/} for
tutorials of the \pkg{TAM} package.
}
\author{
Alexander Robitzsch [aut, cre], Thomas Kiefer [aut], Margaret Wu [aut]
Maintainer: Alexander Robitzsch <robitzsch@ipn.uni-kiel.de>
}
\references{
Adams, R. J., Wilson, M., & Wang, W. C. (1997). The multidimensional random coefficients
multinomial logit model. \emph{Applied Psychological Measurement, 21}(1), 1-23.
Adams, R. J., Wilson, M., & Wu, M. (1997).
Multilevel item response models: An approach to errors in
variables regression. \emph{Journal of Educational and Behavioral
Statistics, 22}, 47-76.
Adams, R. J., & Wu, M. L. (2007). The mixed-coefficients multinomial logit model.
A generalized form of the Rasch model. In M. von Davier & C. H. Carstensen (Eds.):
\emph{Multivariate and mixture distribution Rasch models: Extensions and applications}
(pp. 55-76). New York: Springer.
Formann, A. K. (1982). Linear logistic latent class analysis.
\emph{Biometrical Journal, 24}(2), 171-190.
Formann, A. K. (1992). Linear logistic latent class analysis for polytomous data.
\emph{Journal of the American Statistical Association, 87}, 476-486.
}
\keyword{package}
%\seealso{
%%~~ Optional links to other man pages, e.g. ~~
%%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
%}
%\examples{
%% ## ~~ simple examples of the most important functions ~~
%}
|
/man/TAM-package.Rd
|
no_license
|
markdly/TAM
|
R
| false | false | 2,325 |
rd
|
%% File Name: TAM-package.Rd
%% File Version: 2.62
\name{TAM-package}
\alias{TAM-package}
\alias{TAM}
\docType{package}
\title{
Test Analysis Modules
}
\description{
Includes marginal maximum likelihood estimation and joint maximum
likelihood estimation for unidimensional and multidimensional
item response models. The package functionality covers the
Rasch model, 2PL model, 3PL model, generalized partial credit model,
multi-faceted Rasch model, nominal item response model,
structured latent class model, mixture distribution IRT models,
and located latent class models. Latent regression models and
plausible value imputation are also supported. For details see
Adams, Wilson, and Wang, 1997, <doi:10.1177/0146621697211001>,
Adams, Wilson, and Wu, 1997, <doi:10.3102/10769986022001047>,
Formann, 1982, <doi:10.1002/bimj.4710240209>,
Formann, 1992, <doi:10.1080/01621459.1992.10475229>.
}
\details{
See \url{http://www.edmeasurementsurveys.com/TAM/Tutorials/} for
tutorials of the \pkg{TAM} package.
}
\author{
Alexander Robitzsch [aut, cre], Thomas Kiefer [aut], Margaret Wu [aut]
Maintainer: Alexander Robitzsch <robitzsch@ipn.uni-kiel.de>
}
\references{
Adams, R. J., Wilson, M., & Wang, W. C. (1997). The multidimensional random coefficients
multinomial logit model. \emph{Applied Psychological Measurement, 21}(1), 1-23.
Adams, R. J., Wilson, M., & Wu, M. (1997).
Multilevel item response models: An approach to errors in
variables regression. \emph{Journal of Educational and Behavioral
Statistics, 22}, 47-76.
Adams, R. J., & Wu, M. L. (2007). The mixed-coefficients multinomial logit model.
A generalized form of the Rasch model. In M. von Davier & C. H. Carstensen (Eds.):
\emph{Multivariate and mixture distribution Rasch models: Extensions and applications}
(pp. 55-76). New York: Springer.
Formann, A. K. (1982). Linear logistic latent class analysis.
\emph{Biometrical Journal, 24}(2), 171-190.
Formann, A. K. (1992). Linear logistic latent class analysis for polytomous data.
\emph{Journal of the American Statistical Association, 87}, 476-486.
}
\keyword{package}
%\seealso{
%%~~ Optional links to other man pages, e.g. ~~
%%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
%}
%\examples{
%% ## ~~ simple examples of the most important functions ~~
%}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/e2e_plot_ycurve.R
\name{e2e_plot_ycurve}
\alias{e2e_plot_ycurve}
\title{Plot fishery yield curve data for planktivorous or demersal fish.}
\usage{
e2e_plot_ycurve(
model,
selection = "",
use.saved = FALSE,
use.example = FALSE,
results = NULL,
title = ""
)
}
\arguments{
\item{model}{R-list object defining the baseline model configuration used to generate the data and compiled by the e2e_read() function.}
\item{selection}{Text string from a list identifying the fish guild for which a yield curve is to be generated. Select from: "PLANKTIV", "DEMERSAL". Remember to include the phrase within "" quotes.}
\item{use.saved}{Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder as set by an e2e_read() function call (default=FALSE).}
\item{use.example}{Logical. If TRUE use pre-computed example data from the internal North Sea model rather than user-generated data (default=FALSE).}
\item{results}{Dataframe generated by the function e2e_run_ycurve(). Only needed if use.saved2=FALSE and use.example2=FALSE. (Default=NULL).}
\item{title}{Optional free text (enclosed in "") to be added as a header for the plot (default = "").}
}
\value{
Dataframe of results from which the plot is created, graphical display in a new graphics window
}
\description{
Plot planktivorous or demersal fish yield curve data generated by the function e2e_run_ycurve().
}
\details{
In the function e2e_run_ycurve(), the baseline for the sequence of runs (harvest ratio multiplier = 1.0) is a model name and variant as loaded by the e2e_read() function.
The function then generates a set of biomass, landings and discards data for multiples of the target fish (planktivorous or demersal) harvest ratios relative to this baseline. This is done for a given value of
the alternative (demersal or planktivorous) harvest ratio (also a multiple of the the baseline).
This function plots two graphs - the annual average fish biomass in the whole model domain (mMN/m2) as a function of harvest ratio multiplier, and the yield curve, ie the annual catch (and discards) (mMN/m2/y) as functions of the multiplier.
The yield curve represents the catch that would be generated from the stationary state of the model attained with long-term repeating annual cycles of all driving data including fishing.
Arguments for this function permit the input data to be drawn from an existing data object generated by the function e2e_run_ycurve(), a previously generated csv file, or example data
provided with the package for versions of the internal North Sea models.
}
\examples{
\donttest{
# Load the 1970-1999 version of the North Sea model supplied with the package and
# generate a yield data object:
# WARNING - this example will take several minutes to run even though the model is
# only run for 3 years per step in harvest ratio. A realistic run would require
# at least 50 years per step and take much longer.
model <- e2e_read("North_Sea", "1970-1999")
pfhr=c(0,0.5,0.75,1.0,1.25,1.5,2.0,2.5,3.0)
pf_yield_data <- e2e_run_ycurve(model,selection="PLANKTIV",nyears=3, HRvector=pfhr,
HRfixed=1,csv.output=FALSE)
data <- e2e_plot_ycurve(model,selection="PLANKTIV", results=pf_yield_data,
title="Planktivorous yield with baseline demersal fishing")
# Users can then plot other biomass, landings and discards data in the results
# object by, for example:
par(mfrow=c(2,1))
par(mar=c(3.2,5,2,0.8))
ym<-1.1*max(pf_yield_data$Cetaceanbiom)
plot(pf_yield_data$PlankFishHRmult, pf_yield_data$Cetaceanbiom, ylim=c(0,ym),
type="l",lwd=3,yaxt="n",xaxt="n",ann=FALSE)
abline(v=1,lty="dashed")
axis(side=1,las=1,cex.axis=0.9)
axis(side=2,las=1,cex.axis=0.9)
mtext("Planktiv. fish harvest ratio multiplier",cex=1,side=1,line=2)
mtext("Cetacean biomass",cex=1,side=2,line=3.5)
mtext(bquote("mMN.m"^-2),cex=0.7,side=3,line=-0.05,adj=-0.18)
ym<-1.1*max(pf_yield_data$Cetaceandisc)
plot(pf_yield_data$PlankFishHRmult, pf_yield_data$Cetaceandisc, ylim=c(0,ym),
type="l",lwd=3,yaxt="n",xaxt="n",ann=FALSE)
abline(v=1,lty="dashed")
axis(side=1,las=1,cex.axis=0.9)
axis(side=2,las=1,cex.axis=0.9)
mtext("Planktiv. fish harvest ratio multiplier",cex=1,side=1,line=2)
mtext("Cetacean by-catch",cex=1,side=2,line=3.5)
mtext(bquote("mMN.m"^-2 ~ ".y"^-1),cex=0.7,side=3,line=-0.05,adj=-0.18)
}
# Using example data generated with selection="PLANKTIV" ...
# Plot example data for one of the North Sea model versions internal to the package
# This example requires the Strathe2E2examples supplementary data package.
if(require(StrathE2E2examples)){
model <- e2e_read("North_Sea", "1970-1999")
pf_yield_data<-e2e_plot_ycurve(model, selection="PLANKTIV", use.example=TRUE)
}
# Using example data generated with selection="DEMERSAL"...
# This example requires the Strathe2E2examples supplementary data package.
if(require(StrathE2E2examples)){
model <- e2e_read("North_Sea", "1970-1999")
df_yield_data<-e2e_plot_ycurve(model, selection="DEMERSAL", use.example=TRUE)
}
}
\seealso{
\code{\link{e2e_read}}, \code{\link{e2e_run}}, \code{\link{e2e_run_ycurve}}
}
|
/man/e2e_plot_ycurve.Rd
|
no_license
|
cran/StrathE2E2
|
R
| false | true | 5,373 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/e2e_plot_ycurve.R
\name{e2e_plot_ycurve}
\alias{e2e_plot_ycurve}
\title{Plot fishery yield curve data for planktivorous or demersal fish.}
\usage{
e2e_plot_ycurve(
model,
selection = "",
use.saved = FALSE,
use.example = FALSE,
results = NULL,
title = ""
)
}
\arguments{
\item{model}{R-list object defining the baseline model configuration used to generate the data and compiled by the e2e_read() function.}
\item{selection}{Text string from a list identifying the fish guild for which a yield curve is to be generated. Select from: "PLANKTIV", "DEMERSAL". Remember to include the phrase within "" quotes.}
\item{use.saved}{Logical. If TRUE use data from a prior user-defined run held as csv files data in the current results folder as set by an e2e_read() function call (default=FALSE).}
\item{use.example}{Logical. If TRUE use pre-computed example data from the internal North Sea model rather than user-generated data (default=FALSE).}
\item{results}{Dataframe generated by the function e2e_run_ycurve(). Only needed if use.saved2=FALSE and use.example2=FALSE. (Default=NULL).}
\item{title}{Optional free text (enclosed in "") to be added as a header for the plot (default = "").}
}
\value{
Dataframe of results from which the plot is created, graphical display in a new graphics window
}
\description{
Plot planktivorous or demersal fish yield curve data generated by the function e2e_run_ycurve().
}
\details{
In the function e2e_run_ycurve(), the baseline for the sequence of runs (harvest ratio multiplier = 1.0) is a model name and variant as loaded by the e2e_read() function.
The function then generates a set of biomass, landings and discards data for multiples of the target fish (planktivorous or demersal) harvest ratios relative to this baseline. This is done for a given value of
the alternative (demersal or planktivorous) harvest ratio (also a multiple of the the baseline).
This function plots two graphs - the annual average fish biomass in the whole model domain (mMN/m2) as a function of harvest ratio multiplier, and the yield curve, ie the annual catch (and discards) (mMN/m2/y) as functions of the multiplier.
The yield curve represents the catch that would be generated from the stationary state of the model attained with long-term repeating annual cycles of all driving data including fishing.
Arguments for this function permit the input data to be drawn from an existing data object generated by the function e2e_run_ycurve(), a previously generated csv file, or example data
provided with the package for versions of the internal North Sea models.
}
\examples{
\donttest{
# Load the 1970-1999 version of the North Sea model supplied with the package and
# generate a yield data object:
# WARNING - this example will take several minutes to run even though the model is
# only run for 3 years per step in harvest ratio. A realistic run would require
# at least 50 years per step and take much longer.
model <- e2e_read("North_Sea", "1970-1999")
pfhr=c(0,0.5,0.75,1.0,1.25,1.5,2.0,2.5,3.0)
pf_yield_data <- e2e_run_ycurve(model,selection="PLANKTIV",nyears=3, HRvector=pfhr,
HRfixed=1,csv.output=FALSE)
data <- e2e_plot_ycurve(model,selection="PLANKTIV", results=pf_yield_data,
title="Planktivorous yield with baseline demersal fishing")
# Users can then plot other biomass, landings and discards data in the results
# object by, for example:
par(mfrow=c(2,1))
par(mar=c(3.2,5,2,0.8))
ym<-1.1*max(pf_yield_data$Cetaceanbiom)
plot(pf_yield_data$PlankFishHRmult, pf_yield_data$Cetaceanbiom, ylim=c(0,ym),
type="l",lwd=3,yaxt="n",xaxt="n",ann=FALSE)
abline(v=1,lty="dashed")
axis(side=1,las=1,cex.axis=0.9)
axis(side=2,las=1,cex.axis=0.9)
mtext("Planktiv. fish harvest ratio multiplier",cex=1,side=1,line=2)
mtext("Cetacean biomass",cex=1,side=2,line=3.5)
mtext(bquote("mMN.m"^-2),cex=0.7,side=3,line=-0.05,adj=-0.18)
ym<-1.1*max(pf_yield_data$Cetaceandisc)
plot(pf_yield_data$PlankFishHRmult, pf_yield_data$Cetaceandisc, ylim=c(0,ym),
type="l",lwd=3,yaxt="n",xaxt="n",ann=FALSE)
abline(v=1,lty="dashed")
axis(side=1,las=1,cex.axis=0.9)
axis(side=2,las=1,cex.axis=0.9)
mtext("Planktiv. fish harvest ratio multiplier",cex=1,side=1,line=2)
mtext("Cetacean by-catch",cex=1,side=2,line=3.5)
mtext(bquote("mMN.m"^-2 ~ ".y"^-1),cex=0.7,side=3,line=-0.05,adj=-0.18)
}
# Using example data generated with selection="PLANKTIV" ...
# Plot example data for one of the North Sea model versions internal to the package
# This example requires the Strathe2E2examples supplementary data package.
if(require(StrathE2E2examples)){
model <- e2e_read("North_Sea", "1970-1999")
pf_yield_data<-e2e_plot_ycurve(model, selection="PLANKTIV", use.example=TRUE)
}
# Using example data generated with selection="DEMERSAL"...
# This example requires the Strathe2E2examples supplementary data package.
if(require(StrathE2E2examples)){
model <- e2e_read("North_Sea", "1970-1999")
df_yield_data<-e2e_plot_ycurve(model, selection="DEMERSAL", use.example=TRUE)
}
}
\seealso{
\code{\link{e2e_read}}, \code{\link{e2e_run}}, \code{\link{e2e_run_ycurve}}
}
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682966013L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609858392-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 713 |
r
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682966013L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result)
|
# plot3.R
plot3 <- function() {
library("data.table")
hhData = loadData() # function below...
# Open the png file as a device, plot the graph there, and close it...
png(filename = "plot3.png", width = 480, height = 480, units = "px")
with(hhData, plot(DateTime, Sub_metering_1, type = "l", col="black",
main = "", xlab = "",
ylab = "Energy sub metering"))
with(hhData, lines(DateTime, Sub_metering_2, col="red"))
with(hhData, lines(DateTime, Sub_metering_3, col="blue"))
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 1)
dev.off()
}
# This function should really be factored out into its own source file,
# but copies are included in each Plot*.R file for easier evaluation.
loadData <- function() {
# ASSUMES that working directory is set and data file is present.
# load data file into data.table (not a data.frame) for performance
fullDT <- suppressWarnings(fread(
"household_power_consumption.txt", na.strings = "?"))
# coerce the data and time into data.table's formats,
# adding a consolidated date/time column
fullDT[,Date := as.IDate(Date, format = "%d/%m/%Y")]
fullDT[,Time := as.ITime(Time)]
fullDT[,DateTime := as.POSIXct(Date, Time)]
# subset for the dates we want
setkey(fullDT, Date)
targetDT <- subset(fullDT, Date == '2007-02-01' | Date == '2007-02-02')
# coerce numeric columns into appropriate format
targetDT[,Global_active_power := as.numeric(Global_active_power)]
targetDT[,Global_reactive_power := as.numeric(Global_reactive_power)]
targetDT[,Voltage := as.numeric(Voltage)]
targetDT[,Global_intensity := as.numeric(Global_intensity)]
targetDT[,Sub_metering_1 := as.numeric(Sub_metering_1)]
targetDT[,Sub_metering_2 := as.numeric(Sub_metering_2)]
targetDT[,Sub_metering_3 := as.numeric(Sub_metering_3)]
setkey(targetDT, DateTime)
return (targetDT)
}
|
/plot3.R
|
no_license
|
htornitram/ExData_Plotting1
|
R
| false | false | 2,180 |
r
|
# plot3.R
plot3 <- function() {
library("data.table")
hhData = loadData() # function below...
# Open the png file as a device, plot the graph there, and close it...
png(filename = "plot3.png", width = 480, height = 480, units = "px")
with(hhData, plot(DateTime, Sub_metering_1, type = "l", col="black",
main = "", xlab = "",
ylab = "Energy sub metering"))
with(hhData, lines(DateTime, Sub_metering_2, col="red"))
with(hhData, lines(DateTime, Sub_metering_3, col="blue"))
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 1)
dev.off()
}
# This function should really be factored out into its own source file,
# but copies are included in each Plot*.R file for easier evaluation.
loadData <- function() {
# ASSUMES that working directory is set and data file is present.
# load data file into data.table (not a data.frame) for performance
fullDT <- suppressWarnings(fread(
"household_power_consumption.txt", na.strings = "?"))
# coerce the data and time into data.table's formats,
# adding a consolidated date/time column
fullDT[,Date := as.IDate(Date, format = "%d/%m/%Y")]
fullDT[,Time := as.ITime(Time)]
fullDT[,DateTime := as.POSIXct(Date, Time)]
# subset for the dates we want
setkey(fullDT, Date)
targetDT <- subset(fullDT, Date == '2007-02-01' | Date == '2007-02-02')
# coerce numeric columns into appropriate format
targetDT[,Global_active_power := as.numeric(Global_active_power)]
targetDT[,Global_reactive_power := as.numeric(Global_reactive_power)]
targetDT[,Voltage := as.numeric(Voltage)]
targetDT[,Global_intensity := as.numeric(Global_intensity)]
targetDT[,Sub_metering_1 := as.numeric(Sub_metering_1)]
targetDT[,Sub_metering_2 := as.numeric(Sub_metering_2)]
targetDT[,Sub_metering_3 := as.numeric(Sub_metering_3)]
setkey(targetDT, DateTime)
return (targetDT)
}
|
preprocesamiento<-{
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
div(actionButton("guardar", label = "Guardar .csv", icon = icon("save"))),
div(actionButton("normalizar", label = "Normalizar Columnas", icon = icon("database")))
),
mainPanel(
tableOutput("tablaN")
)
)
}
|
/TF Shiny/www/Tabs/TabPreprocesamiento.R
|
no_license
|
adbxd/kickstarter-projects-Adminfo
|
R
| false | false | 358 |
r
|
preprocesamiento<-{
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
div(actionButton("guardar", label = "Guardar .csv", icon = icon("save"))),
div(actionButton("normalizar", label = "Normalizar Columnas", icon = icon("database")))
),
mainPanel(
tableOutput("tablaN")
)
)
}
|
#!/share/nas2/genome/biosoft/R/3.1.1/lib64/R/bin/Rscript
# usage function
usage <- function() {
print("-------------------------------------------------------------------------------")
print("Usage: Rscript bin/cog_anno_plot.r in.stat out.png")
print("1) in.stat: the stat file for COG anno")
print("2) out.png: the filename for output")
print("-------------------------------------------------------------------------------")
}
# get args
args <-commandArgs(TRUE)
# check args length
if( length(args) != 2 ) {
print(args)
usage()
stop("the length of args != 2")
}
# load library
library(ggplot2)
library(grid)
# get args
args <-commandArgs(TRUE)
# reading data
data <- read.delim(args[1], header=TRUE, sep="\t")
head(data)
# plot
df <- data.frame(Frequency=data[,3], group=data[,1])
data$ration=round(100*as.vector(data[,3])/sum(as.vector(data[,3])),digits=2)
labels <- paste(data[,1],": " ,data[,2]," [",data[,3],"~",data$ration,"%","]",sep="")
p <- ggplot(data=df, aes(x=group, y=Frequency)) + geom_bar(aes(fill=group), stat="identity")
p <- p + scale_fill_discrete(name="", breaks=sort(data[,1]), labels=sort(labels))
p <- p + theme(legend.key.size=unit(0.5, "cm"))
p <- p + labs(x="Function Class", title="COG Function Classification of Consensus Sequence")
# output plot
png(filename=args[2], height = 3000, width = 5000, res = 500, units = "px")
print(p)
dev.off()
|
/bin/annotation/v1.5/bin/cog_anno_plot.r
|
no_license
|
baibaijingjing/LncRNA
|
R
| false | false | 1,401 |
r
|
#!/share/nas2/genome/biosoft/R/3.1.1/lib64/R/bin/Rscript
# usage function
usage <- function() {
print("-------------------------------------------------------------------------------")
print("Usage: Rscript bin/cog_anno_plot.r in.stat out.png")
print("1) in.stat: the stat file for COG anno")
print("2) out.png: the filename for output")
print("-------------------------------------------------------------------------------")
}
# get args
args <-commandArgs(TRUE)
# check args length
if( length(args) != 2 ) {
print(args)
usage()
stop("the length of args != 2")
}
# load library
library(ggplot2)
library(grid)
# get args
args <-commandArgs(TRUE)
# reading data
data <- read.delim(args[1], header=TRUE, sep="\t")
head(data)
# plot
df <- data.frame(Frequency=data[,3], group=data[,1])
data$ration=round(100*as.vector(data[,3])/sum(as.vector(data[,3])),digits=2)
labels <- paste(data[,1],": " ,data[,2]," [",data[,3],"~",data$ration,"%","]",sep="")
p <- ggplot(data=df, aes(x=group, y=Frequency)) + geom_bar(aes(fill=group), stat="identity")
p <- p + scale_fill_discrete(name="", breaks=sort(data[,1]), labels=sort(labels))
p <- p + theme(legend.key.size=unit(0.5, "cm"))
p <- p + labs(x="Function Class", title="COG Function Classification of Consensus Sequence")
# output plot
png(filename=args[2], height = 3000, width = 5000, res = 500, units = "px")
print(p)
dev.off()
|
library(sandwich)
### Name: estfun
### Title: Extract Empirical Estimating Functions
### Aliases: estfun estfun.lm estfun.glm estfun.mlm estfun.rlm estfun.polr
### estfun.clm estfun.survreg estfun.coxph estfun.nls estfun.hurdle
### estfun.zeroinfl estfun.mlogit
### Keywords: regression
### ** Examples
## linear regression
x <- sin(1:10)
y <- rnorm(10)
fm <- lm(y ~ x)
## estimating function: (y - x'beta) * x
estfun(fm)
residuals(fm) * cbind(1, x)
|
/data/genthat_extracted_code/sandwich/examples/estfun.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 462 |
r
|
library(sandwich)
### Name: estfun
### Title: Extract Empirical Estimating Functions
### Aliases: estfun estfun.lm estfun.glm estfun.mlm estfun.rlm estfun.polr
### estfun.clm estfun.survreg estfun.coxph estfun.nls estfun.hurdle
### estfun.zeroinfl estfun.mlogit
### Keywords: regression
### ** Examples
## linear regression
x <- sin(1:10)
y <- rnorm(10)
fm <- lm(y ~ x)
## estimating function: (y - x'beta) * x
estfun(fm)
residuals(fm) * cbind(1, x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim10GDINA.R
\docType{data}
\name{sim10GDINA}
\alias{sim10GDINA}
\title{Simulated data (10 items, G-DINA model)}
\format{
A list with components:
\describe{
\item{\code{simdat}}{simulated responses of 1000 examinees}
\item{\code{simQ}}{artificial Q-matrix}
\item{\code{simItempar}}{artificial item parameters (probability of success for each latent group)}
}
}
\usage{
sim10GDINA
}
\description{
Simulated data, Q-matrix and item parameters for a 10-item test with 3 attributes.
}
\references{
Ma, W., & de la Torre, J. (2020). GDINA: An R Package for Cognitive Diagnosis Modeling. \emph{Journal of Statistical Software, 93(14)}, 1-26.
}
\author{
{Wenchao Ma, The University of Alabama, \email{wenchao.ma@ua.edu}}
}
\keyword{datasets}
|
/man/sim10GDINA.Rd
|
no_license
|
Wenchao-Ma/GDINA
|
R
| false | true | 813 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim10GDINA.R
\docType{data}
\name{sim10GDINA}
\alias{sim10GDINA}
\title{Simulated data (10 items, G-DINA model)}
\format{
A list with components:
\describe{
\item{\code{simdat}}{simulated responses of 1000 examinees}
\item{\code{simQ}}{artificial Q-matrix}
\item{\code{simItempar}}{artificial item parameters (probability of success for each latent group)}
}
}
\usage{
sim10GDINA
}
\description{
Simulated data, Q-matrix and item parameters for a 10-item test with 3 attributes.
}
\references{
Ma, W., & de la Torre, J. (2020). GDINA: An R Package for Cognitive Diagnosis Modeling. \emph{Journal of Statistical Software, 93(14)}, 1-26.
}
\author{
{Wenchao Ma, The University of Alabama, \email{wenchao.ma@ua.edu}}
}
\keyword{datasets}
|
library(POT)
### Name: qq
### Title: Quantile Quantile Plot
### Aliases: qq qq.uvpot
### Keywords: hplot
### ** Examples
x <- rgpd(75, 1, 2, 0.1)
pwmu <- fitgpd(x, 1, "pwmu")
qq(pwmu)
|
/data/genthat_extracted_code/POT/examples/qq.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 191 |
r
|
library(POT)
### Name: qq
### Title: Quantile Quantile Plot
### Aliases: qq qq.uvpot
### Keywords: hplot
### ** Examples
x <- rgpd(75, 1, 2, 0.1)
pwmu <- fitgpd(x, 1, "pwmu")
qq(pwmu)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_MiniMax_drivers.R
\name{MiniMax_calculateDrivers}
\alias{MiniMax_calculateDrivers}
\title{Mark which Platforms are Driving the MiniMax Statistics}
\usage{
MiniMax_calculateDrivers(
res_df,
orderStat = 2L,
drivers_char = colnames(res_df),
sortLabels = TRUE,
separator = " and "
)
}
\arguments{
\item{res_df}{A data frame of \emph{p}-values. The rows correspond to gene
sets / pathways and the columns correspond to a data platform for the
disease of interest.}
\item{orderStat}{How many platforms should show a biological signal for a
pathway / gene set to have multi-omic "enrichment"? Defaults to 2. See
"Details" for more information.}
\item{drivers_char}{What labels should be given to the driving platforms?
Defaults to the column names of \code{res_df}. If you supply custom labels,
make sure to match them to the column order of \code{res_df}.}
\item{sortLabels}{Should the driver labels be sorted alphabetically before
concatenation? Defaults to \code{TRUE}; that is, a multi-omics result
driven first by protein expression then by DNA methylation will have the
same label as a result driven first by DNA methylation then by protein
expression. If you would like the magnitude of the \emph{p}-value to set
the label order, then use \code{sortLabels = FALSE}.}
\item{separator}{What character string should be used to separate the names
of the driving platforms? Defaults to \code{" and "}; for example, if the
platform driver labels are \code{"protein"} and \code{"cnv"}, and if
\code{sortLabels = TRUE}, then the label of drivers would be
\code{"cnv and protein"}.}
}
\value{
A vector of the names of the platforms driving the MiniMax statistic
values.
}
\description{
Given a data frame of pathway-level \emph{p}-values, mark which
of the multi-omics platforms are driving the MiniMax statistic.
}
\details{
The MiniMax statistic is defined as the minimum of all pairwise
maxima of pathway \emph{p}-values. This operation is arithmetically
equivalent to sorting the \emph{p}-values and taking the second smallest.
In our experience, setting this "order statistic" cutoff to 2 is
appropriate for =< 5 data platforms. Biologically, this is equivalent to
saying "if this pathway is dysregulated in at least two data types for
this disease / condition, it is worthy of additional consideration". In
situations where more than 5 data platforms are available for the disease
of interest, we recommend increasing the \code{orderStat} value to 3.
NOTE: this result does not depend on the pathway significance level at all.
This result will simply show you which platforms had the smallest
\emph{p}-values for a particular pathway, even if the MiniMax statistic is
not statistically significant for that pathway. Therefore, we recommend
that this function be used only for interpretation of results post-hoc.
}
\examples{
MiniMax_calculateDrivers(
multiOmicsHighSignalResults_df[, -(1:2)],
drivers_char = c("cnv", "rnaSeq", "protein")
)
}
|
/man/MiniMax_calculateDrivers.Rd
|
no_license
|
TransBioInfoLab/pathwayMultiomics
|
R
| false | true | 3,054 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_MiniMax_drivers.R
\name{MiniMax_calculateDrivers}
\alias{MiniMax_calculateDrivers}
\title{Mark which Platforms are Driving the MiniMax Statistics}
\usage{
MiniMax_calculateDrivers(
res_df,
orderStat = 2L,
drivers_char = colnames(res_df),
sortLabels = TRUE,
separator = " and "
)
}
\arguments{
\item{res_df}{A data frame of \emph{p}-values. The rows correspond to gene
sets / pathways and the columns correspond to a data platform for the
disease of interest.}
\item{orderStat}{How many platforms should show a biological signal for a
pathway / gene set to have multi-omic "enrichment"? Defaults to 2. See
"Details" for more information.}
\item{drivers_char}{What labels should be given to the driving platforms?
Defaults to the column names of \code{res_df}. If you supply custom labels,
make sure to match them to the column order of \code{res_df}.}
\item{sortLabels}{Should the driver labels be sorted alphabetically before
concatenation? Defaults to \code{TRUE}; that is, a multi-omics result
driven first by protein expression then by DNA methylation will have the
same label as a result driven first by DNA methylation then by protein
expression. If you would like the magnitude of the \emph{p}-value to set
the label order, then use \code{sortLabels = FALSE}.}
\item{separator}{What character string should be used to separate the names
of the driving platforms? Defaults to \code{" and "}; for example, if the
platform driver labels are \code{"protein"} and \code{"cnv"}, and if
\code{sortLabels = TRUE}, then the label of drivers would be
\code{"cnv and protein"}.}
}
\value{
A vector of the names of the platforms driving the MiniMax statistic
values.
}
\description{
Given a data frame of pathway-level \emph{p}-values, mark which
of the multi-omics platforms are driving the MiniMax statistic.
}
\details{
The MiniMax statistic is defined as the minimum of all pairwise
maxima of pathway \emph{p}-values. This operation is arithmetically
equivalent to sorting the \emph{p}-values and taking the second smallest.
In our experience, setting this "order statistic" cutoff to 2 is
appropriate for =< 5 data platforms. Biologically, this is equivalent to
saying "if this pathway is dysregulated in at least two data types for
this disease / condition, it is worthy of additional consideration". In
situations where more than 5 data platforms are available for the disease
of interest, we recommend increasing the \code{orderStat} value to 3.
NOTE: this result does not depend on the pathway significance level at all.
This result will simply show you which platforms had the smallest
\emph{p}-values for a particular pathway, even if the MiniMax statistic is
not statistically significant for that pathway. Therefore, we recommend
that this function be used only for interpretation of results post-hoc.
}
\examples{
MiniMax_calculateDrivers(
multiOmicsHighSignalResults_df[, -(1:2)],
drivers_char = c("cnv", "rnaSeq", "protein")
)
}
|
library(MASS)
get_mu <- function(xl)
{
m <- dim(xl)[2]
mu <- matrix(NA, 1, m)
for(i in 1:m)
{
mu[1,i] <- mean(xl[,i])
}
return(mu)
}
get_matrix <- function(xl,mu)
{
n <- dim(xl)[1]
m <- dim(xl)[2]
sigma <- matrix(0, m, m)
xl <- as.matrix(xl)
for(i in 1:n)
{
sigma <- sigma + (t(xl[i,]-mu) %*% (xl[i,]-mu))
}
return(sigma/(n-1))
}
get_coef <- function(mu1,mu2,sigma1,sigma2)
{
d1 <- det(sigma1)
d2 <- det(sigma2)
invs1 <- solve(sigma1)
invs2 <- solve(sigma2)
a <- invs1 - invs2
b <- invs1 %*% t(mu1) - invs2 %*% t(mu2)
A <- a[1,1] # x^2
B <- a[2,2] # y^2
C <- 2 * a[1, 2] # xy
D <- -2 * b[1, 1] # x
E <- -2 * b[2, 1] # y
G <- c(mu1 %*% invs1 %*% t(mu1) - mu2 %*% invs2 %*% t(mu2)) + log(abs(det(sigma1))) - log(abs(det(sigma2)))
func <- function(x, y) {
x^2 * A + y^2 * B + x*y*C + x*D + y*E + G
}
return(func)
}
xl <- iris[,3:5]
# Рисуем обучающую выборку
colors <- c("setosa" = "red", "versicolor" = "green3","virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species],col = colors[iris$Species])
# Оценивание
f <- xl[xl[,3] == "setosa",1:2]
s <- xl[xl[,3] == "versicolor",1:2]
fs <- xl[xl[,3] == "virginica",1:2]
mu1 <- get_mu(f)
mu2 <- get_mu(s)
mu3 <- get_mu(fs)
sigma1 <- get_matrix(f, mu1)
sigma2 <- get_matrix(s, mu2)
sigma3 <- get_matrix(fs, mu3)
mu <- rbind(mu1,mu2,mu3)
sigma <- rbind(sigma1,sigma2,sigma3)
classif <- function(l,sigma,mu,classes,lamda=1,P=0.5){
m <- length(classes)
max <- -100000
class <- "unknown"
for(i in 1:m){
k <- log(lamda*P)-0.5*t(l-mu[i,]) %*% solve(sigma[(2*i-1):(2*i),1:2]) %*% (l-mu[i,])-0.5*log(abs(det(sigma[(2*i-1):(2*i),1:2])))
if( k > max ){
max <- k
class <- classes[i]
}
}
return(class)
}
for(i in seq(0,8,0.1)){
for(j in seq(0,2.5,0.1)){
l<-c(i,j)
class <- classif(l,sigma,mu,colors)
points(l[1],l[2],pch = 21, col=class, asp = 1)
}
}
func <- get_coef(mu1,mu3,sigma1,sigma3)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "green", add = TRUE)
func <- get_coef(mu1,mu2,sigma1,sigma2)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "blue", add = TRUE)
func <- get_coef(mu2,mu3,sigma2,sigma3)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "red", add = TRUE)
library(MASS)
get_mu <- function(xl)
{
m <- dim(xl)[2]
mu <- matrix(NA, 1, m)
for(i in 1:m)
{
mu[1,i] <- mean(xl[,i])
}
return(mu)
}
get_matrix <- function(xl1,xl2,xl3,mu1,mu2,mu3)
{
xl1 <- as.matrix(xl1)
xl2 <- as.matrix(xl2)
xl3 <- as.matrix(xl3)
n <- dim(xl1)[1]
m <- dim(xl2)[1]
d <- dim(xl3)[1]
nm <- n+m+d
col <- dim(xl1)[2]
sigma <- matrix(0, col, col)
for(i in 1:n)
{
sigma <- sigma + (t(xl1[i,]-mu1) %*% (xl1[i,]-mu1))
}
for(i in 1:m)
{
sigma <- sigma + (t(xl2[i,]-mu2) %*% (xl2[i,]-mu2))
}
for(i in 1:d)
{
sigma <- sigma + (t(xl3[i,]-mu3) %*% (xl3[i,]-mu3))
}
return(sigma/(nm+3))
}
get_coef <- function(mu1,mu2,sigma1,sigma2)
{
d1 <- det(sigma1)
d2 <- det(sigma2)
invs1 <- solve(sigma1)
invs2 <- solve(sigma2)
a <- invs1 - invs2
b <- invs1 %*% t(mu1) - invs2 %*% t(mu2)
D <- -2 * b[1, 1] # x
E <- -2 * b[2, 1] # y
G <- c(mu1 %*% invs1 %*% t(mu1) - mu2 %*% invs2 %*% t(mu2)) + log(abs(det(sigma1))) - log(abs(det(sigma2)))
func <- function(x, y) {
x*D + y*E + G
}
return(func)
}
xl <- iris[,3:5]
colors <- c("setosa" = "red", "versicolor" = "green3","virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species],col = colors[iris$Species])
# Оценивание
f <- xl[xl[,3] == "setosa",1:2]
s <- xl[xl[,3] == "versicolor",1:2]
fs <- xl[xl[,3] == "virginica",1:2]
mu1 <- get_mu(f)
mu2 <- get_mu(s)
mu3 <- get_mu(fs)
sigma <- get_matrix(f,s,fs,mu1,mu2,mu3)
mu <- rbind(mu1,mu2,mu3)
classif <- function(l,sigma,mu,classes,lamda=1,P=0.5){
m <- length(classes)
max <- -100000
class <- "unknown"
for(i in 1:m){
k <- log(lamda*P)-0.5*t(mu[i,]) %*% solve(sigma) %*% mu[i,]+t(l) %*% solve(sigma) %*% mu[i,]
if( k > max ){
max <- k
class <- classes[i]
}
}
return(class)
}
for(i in seq(0,8,0.1)){
for(j in seq(0,2.5,0.1)){
l<-c(i,j)
class <- classif(l,sigma,mu,colors)
points(l[1],l[2],pch = 21, col=class, asp = 1)
}
}
f <- get_coef(mu1,mu3,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "green", add = TRUE)
f <- get_coef(mu1,mu2,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "blue", add = TRUE)
f <- get_coef(mu2,mu3,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "red", add = TRUE)
|
/bayes/iris.R
|
no_license
|
TIR13/ML0
|
R
| false | false | 5,284 |
r
|
library(MASS)
get_mu <- function(xl)
{
m <- dim(xl)[2]
mu <- matrix(NA, 1, m)
for(i in 1:m)
{
mu[1,i] <- mean(xl[,i])
}
return(mu)
}
get_matrix <- function(xl,mu)
{
n <- dim(xl)[1]
m <- dim(xl)[2]
sigma <- matrix(0, m, m)
xl <- as.matrix(xl)
for(i in 1:n)
{
sigma <- sigma + (t(xl[i,]-mu) %*% (xl[i,]-mu))
}
return(sigma/(n-1))
}
get_coef <- function(mu1,mu2,sigma1,sigma2)
{
d1 <- det(sigma1)
d2 <- det(sigma2)
invs1 <- solve(sigma1)
invs2 <- solve(sigma2)
a <- invs1 - invs2
b <- invs1 %*% t(mu1) - invs2 %*% t(mu2)
A <- a[1,1] # x^2
B <- a[2,2] # y^2
C <- 2 * a[1, 2] # xy
D <- -2 * b[1, 1] # x
E <- -2 * b[2, 1] # y
G <- c(mu1 %*% invs1 %*% t(mu1) - mu2 %*% invs2 %*% t(mu2)) + log(abs(det(sigma1))) - log(abs(det(sigma2)))
func <- function(x, y) {
x^2 * A + y^2 * B + x*y*C + x*D + y*E + G
}
return(func)
}
xl <- iris[,3:5]
# Рисуем обучающую выборку
colors <- c("setosa" = "red", "versicolor" = "green3","virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species],col = colors[iris$Species])
# Оценивание
f <- xl[xl[,3] == "setosa",1:2]
s <- xl[xl[,3] == "versicolor",1:2]
fs <- xl[xl[,3] == "virginica",1:2]
mu1 <- get_mu(f)
mu2 <- get_mu(s)
mu3 <- get_mu(fs)
sigma1 <- get_matrix(f, mu1)
sigma2 <- get_matrix(s, mu2)
sigma3 <- get_matrix(fs, mu3)
mu <- rbind(mu1,mu2,mu3)
sigma <- rbind(sigma1,sigma2,sigma3)
classif <- function(l,sigma,mu,classes,lamda=1,P=0.5){
m <- length(classes)
max <- -100000
class <- "unknown"
for(i in 1:m){
k <- log(lamda*P)-0.5*t(l-mu[i,]) %*% solve(sigma[(2*i-1):(2*i),1:2]) %*% (l-mu[i,])-0.5*log(abs(det(sigma[(2*i-1):(2*i),1:2])))
if( k > max ){
max <- k
class <- classes[i]
}
}
return(class)
}
for(i in seq(0,8,0.1)){
for(j in seq(0,2.5,0.1)){
l<-c(i,j)
class <- classif(l,sigma,mu,colors)
points(l[1],l[2],pch = 21, col=class, asp = 1)
}
}
func <- get_coef(mu1,mu3,sigma1,sigma3)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "green", add = TRUE)
func <- get_coef(mu1,mu2,sigma1,sigma2)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "blue", add = TRUE)
func <- get_coef(mu2,mu3,sigma2,sigma3)
# Рисуем дискриминантую функцию
y <- seq(0, 3, len = 100)
x <- seq(0, 8, len = 100)
z <- outer(x, y, func)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "red", add = TRUE)
library(MASS)
get_mu <- function(xl)
{
m <- dim(xl)[2]
mu <- matrix(NA, 1, m)
for(i in 1:m)
{
mu[1,i] <- mean(xl[,i])
}
return(mu)
}
get_matrix <- function(xl1,xl2,xl3,mu1,mu2,mu3)
{
xl1 <- as.matrix(xl1)
xl2 <- as.matrix(xl2)
xl3 <- as.matrix(xl3)
n <- dim(xl1)[1]
m <- dim(xl2)[1]
d <- dim(xl3)[1]
nm <- n+m+d
col <- dim(xl1)[2]
sigma <- matrix(0, col, col)
for(i in 1:n)
{
sigma <- sigma + (t(xl1[i,]-mu1) %*% (xl1[i,]-mu1))
}
for(i in 1:m)
{
sigma <- sigma + (t(xl2[i,]-mu2) %*% (xl2[i,]-mu2))
}
for(i in 1:d)
{
sigma <- sigma + (t(xl3[i,]-mu3) %*% (xl3[i,]-mu3))
}
return(sigma/(nm+3))
}
get_coef <- function(mu1,mu2,sigma1,sigma2)
{
d1 <- det(sigma1)
d2 <- det(sigma2)
invs1 <- solve(sigma1)
invs2 <- solve(sigma2)
a <- invs1 - invs2
b <- invs1 %*% t(mu1) - invs2 %*% t(mu2)
D <- -2 * b[1, 1] # x
E <- -2 * b[2, 1] # y
G <- c(mu1 %*% invs1 %*% t(mu1) - mu2 %*% invs2 %*% t(mu2)) + log(abs(det(sigma1))) - log(abs(det(sigma2)))
func <- function(x, y) {
x*D + y*E + G
}
return(func)
}
xl <- iris[,3:5]
colors <- c("setosa" = "red", "versicolor" = "green3","virginica" = "blue")
plot(iris[, 3:4], pch = 21, bg = colors[iris$Species],col = colors[iris$Species])
# Оценивание
f <- xl[xl[,3] == "setosa",1:2]
s <- xl[xl[,3] == "versicolor",1:2]
fs <- xl[xl[,3] == "virginica",1:2]
mu1 <- get_mu(f)
mu2 <- get_mu(s)
mu3 <- get_mu(fs)
sigma <- get_matrix(f,s,fs,mu1,mu2,mu3)
mu <- rbind(mu1,mu2,mu3)
classif <- function(l,sigma,mu,classes,lamda=1,P=0.5){
m <- length(classes)
max <- -100000
class <- "unknown"
for(i in 1:m){
k <- log(lamda*P)-0.5*t(mu[i,]) %*% solve(sigma) %*% mu[i,]+t(l) %*% solve(sigma) %*% mu[i,]
if( k > max ){
max <- k
class <- classes[i]
}
}
return(class)
}
for(i in seq(0,8,0.1)){
for(j in seq(0,2.5,0.1)){
l<-c(i,j)
class <- classif(l,sigma,mu,colors)
points(l[1],l[2],pch = 21, col=class, asp = 1)
}
}
f <- get_coef(mu1,mu3,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "green", add = TRUE)
f <- get_coef(mu1,mu2,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "blue", add = TRUE)
f <- get_coef(mu2,mu3,sigma,sigma)
x <- seq(0, 7, len = 200)
y <- seq(0, 5, len = 40)
z <- outer(x, y, f)
contour(x, y, z, levels = 0, drawlabels = FALSE, lwd = 2.5, col = "red", add = TRUE)
|
#
#
# HAI Modeling
# All analysis are in log2 scale
#
# Developed by Saeid Parvandeh 12/27/2016
#
#-----------------------------------------
rm(list=ls())
library(ggplot2)
library(EnvStats)
library(gridExtra)
# read Baylor titers
load("baylor_titers.RData")
bay.d0 <- baylor_titers$Matched.Max.day0
bay.d28 <- baylor_titers$Max.day28
bay.age <- baylor_titers$Age
bay.max.fc <- baylor_titers$MAX.FC
bay.fc <- bay.max.fc
# log2 scale
bay.d0.log2 <- log2(bay.d0)
bay.d28.log2 <- log2(bay.d28)
bay.fc.log2 <- log2(bay.fc)
# read Emory titers
load("emory_titers.RData")
emory.d0 <- emory_titers$Matched.Max.day0
emory.d28 <- emory_titers$MAX.day28
emory.age <- emory_titers$age_reported
emory.max.fc <- emory_titers$MAX.FC
emory.fc <- emory.max.fc
# log2 scale
emory.d0.log2 <- log2(emory.d0)
emory.d28.log2 <- log2(emory.d28)
emory.fc.log2 <- log2(emory.fc)
# read Mayo titers
load("mayo_titers.RData")
mayo.d0 <- mayo_titers$day0
mayo.d28 <- mayo_titers$day28
mayo.age <- mayo_titers$age
mayo.fc <- mayo.d28/mayo.d0
# log2 scale
mayo.d0.log2 <- log2(mayo.d0)
mayo.d28.log2 <- log2(mayo.d28)
mayo.fc.log2 <- log2(mayo.fc)
# ----- fit baylor data -----
bay.df <- data.frame(d0=bay.d0.log2,fc=bay.fc.log2)
bay.logfit <- lm(fc~d0, data=bay.df) # need intercept
bay.logfit.sum <- summary(bay.logfit)
cat("R-squared")
bay.logfit.sum$r.squared
b1<-bay.logfit$coefficients[1]
b2<-bay.logfit$coefficients[2]
bay.logfitfn <- function(x) {b1*x^b2}
bay.line <- bay.logfitfn(bay.d0.log2)
plot(bay.d0.log2,bay.fc.log2, main="Baylor")
points(bay.d0.log2,bay.line,col="red",pch="x",cex=1.5)
legend(5,4.5,c("original data", "model"),pch=c("o","x"),col=c("black","red"))
bay.age.log2 <- log2(bay.age)
hist(bay.age.log2)
bay.age.quants <- quantile(bay.age.log2)
sum(bay.age.log2>bay.age.quants[4]) # median
bay.d0.log2[bay.age.log2>bay.age.quants[4]]
# effect of age: negative correlation with fc (and day28)
# the older you are the lower your fold change
cor(bay.age.log2,bay.fc.log2)
plot(bay.age.log2,bay.fc.log2)
abline(lm(bay.fc.log2~bay.age.log2))
# effect of age: no correlation with d0
cor(bay.age.log2,bay.d0.log2)
plot(bay.age.log2,bay.d0.log2)
abline(lm(bay.d0.log2~bay.age.log2))
# boxplot of all day-0 titers - log2 scale
par(mfrow = c(1, 2))
boxplot(bay.d0.log2, bay.d28.log2, emory.d0.log2, emory.d28.log2,
mayo.d0.log2, mayo.d28.log2,
main = "Range of D0 and D28 HAI titers", ylab = "HAI Fold Change",
names = c("Baylor-D0", "Baylor-D28", "Emory-D0", "Emory-D28",
"Mayo-D0", "Mayo-D28"),
col = c("yellow", "yellow", "palevioletred1", "palevioletred1",
"royalblue2", "royalblue2"),
cex.axis = .95, las=2, cex.main = 1.0, cex.lab = 1.5)
boxplot(bay.fc.log2, emory.fc.log2, mayo.fc.log2,
main = "Range of Fold-Change HAI titers", ylab = "HAI Fold Change",
names = c("Baylor-FC", "Emory-FC", "Mayo-FC"),
col = c("yellow", "palevioletred1", "royalblue2"),
cex.axis = .95, las=2, cex.main = 1.0, cex.lab = 1.5)
# ------ Train Baylor Data -------
# we start with training Baylor and test on Baylor
bay.log.predict<-predict(bay.logfit, newdata=data.frame(d0=bay.d0.log2), interval="confidence", level=.95, se.fit=T)
bay.ci <- pointwise(bay.log.predict, coverage=0.95, individual=T)
bay.plot.df <- data.frame(d0=bay.d0.log2,fc=bay.fc.log2,yhat=bay.ci$fit[,1],
lwr=bay.ci$fit[,2],
upr=bay.ci$fit[,3])
bay.plot.df$Age <- bay.age
bay.logfit.sum <- summary(bay.logfit)
bay.r2 <- bay.logfit.sum$r.squared
bay.b1<-bay.logfit$coefficients[1]
bay.b2<-bay.logfit$coefficients[2]
lb1 <- paste("R^2 ==", round(bay.r2,digits=2))
eq <- bquote("fc = " ~ .(round(bay.b1,digits=2)) ~ day[0]^{.(round(bay.b2,digits=3))})
lb2<- as.character(as.expression(eq))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = bay.plot.df))
# Create a function to produce the fitted line
bay_fun_line <- function(x) lmc[1] + lmc[2] * x
g1 <- ggplot(bay.plot.df, aes(x=d0, y = fc)) +
geom_point(aes(colour = "Baylor Data"), size = 5, shape = 1) +
geom_line(aes(linetype = "Baylor Data"), alpha = 0) +
stat_function(fun = bay_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Training: Baylor Data") +
annotate("text", x=5.0, y=4.7, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=5.0, y=5.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.77,.9)) +
scale_linetype_manual(values = c("blank", "dashed"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("black", "blue"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank"), size = c(1, 5))),
labels = c("Baylor Model", "Baylor Data"),
breaks = c("Baylor Model", "Baylor Data"))
# ------- Test on Emory with different colors for each year -------
emory.log.predict<-predict(bay.logfit, newdata=data.frame(d0=emory.d0.log2), interval="confidence", level=.95, se.fit=T)
emory.ci <- pointwise(emory.log.predict, coverage=0.95, individual=T)
emory.plot.df <- data.frame(d0=emory.d0.log2,fc=emory.fc.log2,yhat=emory.ci$fit[,1],
lwr=emory.ci$fit[,2],
upr=emory.ci$fit[,3])
# Function for cbind data frames with different rows
cbind.na<-function(df1, df2){
#Collect all unique rownames
total.rownames<-union(x = rownames(x = df1),y = rownames(x=df2))
#Create a new dataframe with rownames
df<-data.frame(row.names = total.rownames)
#Get absent rownames for both of the dataframe
absent.names.1<-setdiff(x = rownames(df1),y = rownames(df))
absent.names.2<-setdiff(x = rownames(df2),y = rownames(df))
#Fill absents with NAs
df1.fixed<-data.frame(row.names = absent.names.1,matrix(data = NA,nrow = length(absent.names.1),ncol=ncol(df1)))
colnames(df1.fixed)<-colnames(df1)
df1<-rbind(df1,df1.fixed)
df2.fixed<-data.frame(row.names = absent.names.2,matrix(data = NA,nrow = length(absent.names.2),ncol=ncol(df2)))
colnames(df2.fixed)<-colnames(df2)
df2<-rbind(df2,df2.fixed)
#Finally cbind into new dataframe
df<-cbind(df,df1[rownames(df),],df2[rownames(df),])
return(df)
}
emory_2007.df <- data.frame(d01=emory.d0.log2[1:28], fc1=emory.fc.log2[1:28])
emory_2009.df <- data.frame(d02=emory.d0.log2[29:86], fc2=emory.fc.log2[29:86])
emory.prebind.df <- cbind.na(emory_2007.df, emory_2009.df)
emory.plot.df_2 <- cbind.na(emory.prebind.df, emory.plot.df)
legend.fit <- "Baylor Model"
legend.dat1 <- "Emory Data 2007-2009"
legend.dat2 <- "Emory Data 2009-2011"
override.shape <- c(1, 2, 4)
override.color <- c("purple","black","red")
emory.logfit<-predict(bay.logfit, newdata=data.frame(d0=emory.d0.log2))
emory.r2 <- summary(lm(emory.logfit~emory.fc.log2))$r.squared
lb1 <- paste("R^2 ==", round(emory.r2,digits=2))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = emory.plot.df_2))
# Create a function to produce the fitted line
emory_fun_line <- function(x) lmc[1] + lmc[2] * x
g2 <- ggplot(emory.plot.df_2, aes(x = d0, y = fc)) +
geom_point(aes(x = d01, y = fc1, color="Emory Data 2007-2009"), shape = 4, size = 5, show.legend = F) +
# geom_line(aes(x = d01, y = fc1, linetype = "Emory Data 2007-2009"), alpha = 0) +
geom_point(aes(x = d02, y = fc2, color="Emory Data 2009-2011"), shape = 1, size = 5, show.legend = F) +
# geom_line(aes(x = d02, y = fc2, linetype = "Emory Data 2009-2011"), alpha = 0) +
stat_function(fun = emory_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Testing: Emory Data") +
annotate("text", x=6.5, y=6.5, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=6.5, y=7.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.8,.9)) +
scale_linetype_manual(values = c("dashed", "blank", "blank"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("blue", "purple", "black"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank", "blank"), size = c(1, 5, 5))))
# ------ Test on Mayo ------
mayo.log.predict <- predict(bay.logfit, newdata = data.frame(d0=mayo.d0.log2), interval = "confidence", level = .95, se.fit = T)
mayo.ci <- pointwise(mayo.log.predict, coverage = 0.95, individual = T)
mayo.plot.df <- data.frame(d0=mayo.d0.log2, fc=mayo.fc.log2, yhat=mayo.ci$fit[,1],
lwr=mayo.ci$fit[, 2],
upr=mayo.ci$fit[, 3])
mayo.logfit<-predict(bay.logfit, newdata=data.frame(d0=mayo.d0.log2))
mayo.r2 <- summary(lm(mayo.logfit~mayo.fc.log2))$r.squared
lb1 <- paste("R^2 ==", round(mayo.r2,digits=2))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = mayo.plot.df))
# Create a function to produce the fitted line
mayo_fun_line <- function(x) lmc[1] + lmc[2] * x
g3 <- ggplot(mayo.plot.df, aes(x=d0, y = fc)) +
geom_point(aes(colour = "Mayo Data"), size = 5, shape = 1) +
geom_line(aes(linetype = "Mayo Data"), alpha = 0) +
stat_function(fun = mayo_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Testing: Mayo Data") +
annotate("text", x=8.0, y=3.7, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=8.0, y=4.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.77,.9)) +
scale_linetype_manual(values = c("dashed", "blank"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("blue", "black"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank"), size = c(1, 5))))
# barplot of number of subjects
barplot_data <- data.frame(Data = c("Baylor", "Emory_2007", "Emory_2009", "Mayo"),
Subjects = c(length(bay.d0), dim(emory_2007.df)[1], dim(emory_2009.df)[1],
length(mayo.d0)))
g4 <- ggplot(barplot_data, aes(x=Data, y=Subjects)) + geom_bar(stat="identity") +
ggtitle("Number of Subjects in Each Data") +
theme(axis.text.x = element_text(size = 18),
axis.title.x = element_text(size=18),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"))
grid.arrange(g1, g2, g3, g4, ncol = 2, nrow = 2)
|
/R script/5_Modeling_log2(HAI).R
|
no_license
|
saeid651/predictHAI
|
R
| false | false | 12,446 |
r
|
#
#
# HAI Modeling
# All analysis are in log2 scale
#
# Developed by Saeid Parvandeh 12/27/2016
#
#-----------------------------------------
rm(list=ls())
library(ggplot2)
library(EnvStats)
library(gridExtra)
# read Baylor titers
load("baylor_titers.RData")
bay.d0 <- baylor_titers$Matched.Max.day0
bay.d28 <- baylor_titers$Max.day28
bay.age <- baylor_titers$Age
bay.max.fc <- baylor_titers$MAX.FC
bay.fc <- bay.max.fc
# log2 scale
bay.d0.log2 <- log2(bay.d0)
bay.d28.log2 <- log2(bay.d28)
bay.fc.log2 <- log2(bay.fc)
# read Emory titers
load("emory_titers.RData")
emory.d0 <- emory_titers$Matched.Max.day0
emory.d28 <- emory_titers$MAX.day28
emory.age <- emory_titers$age_reported
emory.max.fc <- emory_titers$MAX.FC
emory.fc <- emory.max.fc
# log2 scale
emory.d0.log2 <- log2(emory.d0)
emory.d28.log2 <- log2(emory.d28)
emory.fc.log2 <- log2(emory.fc)
# read Mayo titers
load("mayo_titers.RData")
mayo.d0 <- mayo_titers$day0
mayo.d28 <- mayo_titers$day28
mayo.age <- mayo_titers$age
mayo.fc <- mayo.d28/mayo.d0
# log2 scale
mayo.d0.log2 <- log2(mayo.d0)
mayo.d28.log2 <- log2(mayo.d28)
mayo.fc.log2 <- log2(mayo.fc)
# ----- fit baylor data -----
bay.df <- data.frame(d0=bay.d0.log2,fc=bay.fc.log2)
bay.logfit <- lm(fc~d0, data=bay.df) # need intercept
bay.logfit.sum <- summary(bay.logfit)
cat("R-squared")
bay.logfit.sum$r.squared
b1<-bay.logfit$coefficients[1]
b2<-bay.logfit$coefficients[2]
bay.logfitfn <- function(x) {b1*x^b2}
bay.line <- bay.logfitfn(bay.d0.log2)
plot(bay.d0.log2,bay.fc.log2, main="Baylor")
points(bay.d0.log2,bay.line,col="red",pch="x",cex=1.5)
legend(5,4.5,c("original data", "model"),pch=c("o","x"),col=c("black","red"))
bay.age.log2 <- log2(bay.age)
hist(bay.age.log2)
bay.age.quants <- quantile(bay.age.log2)
sum(bay.age.log2>bay.age.quants[4]) # median
bay.d0.log2[bay.age.log2>bay.age.quants[4]]
# effect of age: negative correlation with fc (and day28)
# the older you are the lower your fold change
cor(bay.age.log2,bay.fc.log2)
plot(bay.age.log2,bay.fc.log2)
abline(lm(bay.fc.log2~bay.age.log2))
# effect of age: no correlation with d0
cor(bay.age.log2,bay.d0.log2)
plot(bay.age.log2,bay.d0.log2)
abline(lm(bay.d0.log2~bay.age.log2))
# boxplot of all day-0 titers - log2 scale
par(mfrow = c(1, 2))
boxplot(bay.d0.log2, bay.d28.log2, emory.d0.log2, emory.d28.log2,
mayo.d0.log2, mayo.d28.log2,
main = "Range of D0 and D28 HAI titers", ylab = "HAI Fold Change",
names = c("Baylor-D0", "Baylor-D28", "Emory-D0", "Emory-D28",
"Mayo-D0", "Mayo-D28"),
col = c("yellow", "yellow", "palevioletred1", "palevioletred1",
"royalblue2", "royalblue2"),
cex.axis = .95, las=2, cex.main = 1.0, cex.lab = 1.5)
boxplot(bay.fc.log2, emory.fc.log2, mayo.fc.log2,
main = "Range of Fold-Change HAI titers", ylab = "HAI Fold Change",
names = c("Baylor-FC", "Emory-FC", "Mayo-FC"),
col = c("yellow", "palevioletred1", "royalblue2"),
cex.axis = .95, las=2, cex.main = 1.0, cex.lab = 1.5)
# ------ Train Baylor Data -------
# we start with training Baylor and test on Baylor
bay.log.predict<-predict(bay.logfit, newdata=data.frame(d0=bay.d0.log2), interval="confidence", level=.95, se.fit=T)
bay.ci <- pointwise(bay.log.predict, coverage=0.95, individual=T)
bay.plot.df <- data.frame(d0=bay.d0.log2,fc=bay.fc.log2,yhat=bay.ci$fit[,1],
lwr=bay.ci$fit[,2],
upr=bay.ci$fit[,3])
bay.plot.df$Age <- bay.age
bay.logfit.sum <- summary(bay.logfit)
bay.r2 <- bay.logfit.sum$r.squared
bay.b1<-bay.logfit$coefficients[1]
bay.b2<-bay.logfit$coefficients[2]
lb1 <- paste("R^2 ==", round(bay.r2,digits=2))
eq <- bquote("fc = " ~ .(round(bay.b1,digits=2)) ~ day[0]^{.(round(bay.b2,digits=3))})
lb2<- as.character(as.expression(eq))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = bay.plot.df))
# Create a function to produce the fitted line
bay_fun_line <- function(x) lmc[1] + lmc[2] * x
g1 <- ggplot(bay.plot.df, aes(x=d0, y = fc)) +
geom_point(aes(colour = "Baylor Data"), size = 5, shape = 1) +
geom_line(aes(linetype = "Baylor Data"), alpha = 0) +
stat_function(fun = bay_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Training: Baylor Data") +
annotate("text", x=5.0, y=4.7, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=5.0, y=5.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.77,.9)) +
scale_linetype_manual(values = c("blank", "dashed"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("black", "blue"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank"), size = c(1, 5))),
labels = c("Baylor Model", "Baylor Data"),
breaks = c("Baylor Model", "Baylor Data"))
# ------- Test on Emory with different colors for each year -------
emory.log.predict<-predict(bay.logfit, newdata=data.frame(d0=emory.d0.log2), interval="confidence", level=.95, se.fit=T)
emory.ci <- pointwise(emory.log.predict, coverage=0.95, individual=T)
emory.plot.df <- data.frame(d0=emory.d0.log2,fc=emory.fc.log2,yhat=emory.ci$fit[,1],
lwr=emory.ci$fit[,2],
upr=emory.ci$fit[,3])
# Function for cbind data frames with different rows
cbind.na<-function(df1, df2){
#Collect all unique rownames
total.rownames<-union(x = rownames(x = df1),y = rownames(x=df2))
#Create a new dataframe with rownames
df<-data.frame(row.names = total.rownames)
#Get absent rownames for both of the dataframe
absent.names.1<-setdiff(x = rownames(df1),y = rownames(df))
absent.names.2<-setdiff(x = rownames(df2),y = rownames(df))
#Fill absents with NAs
df1.fixed<-data.frame(row.names = absent.names.1,matrix(data = NA,nrow = length(absent.names.1),ncol=ncol(df1)))
colnames(df1.fixed)<-colnames(df1)
df1<-rbind(df1,df1.fixed)
df2.fixed<-data.frame(row.names = absent.names.2,matrix(data = NA,nrow = length(absent.names.2),ncol=ncol(df2)))
colnames(df2.fixed)<-colnames(df2)
df2<-rbind(df2,df2.fixed)
#Finally cbind into new dataframe
df<-cbind(df,df1[rownames(df),],df2[rownames(df),])
return(df)
}
emory_2007.df <- data.frame(d01=emory.d0.log2[1:28], fc1=emory.fc.log2[1:28])
emory_2009.df <- data.frame(d02=emory.d0.log2[29:86], fc2=emory.fc.log2[29:86])
emory.prebind.df <- cbind.na(emory_2007.df, emory_2009.df)
emory.plot.df_2 <- cbind.na(emory.prebind.df, emory.plot.df)
legend.fit <- "Baylor Model"
legend.dat1 <- "Emory Data 2007-2009"
legend.dat2 <- "Emory Data 2009-2011"
override.shape <- c(1, 2, 4)
override.color <- c("purple","black","red")
emory.logfit<-predict(bay.logfit, newdata=data.frame(d0=emory.d0.log2))
emory.r2 <- summary(lm(emory.logfit~emory.fc.log2))$r.squared
lb1 <- paste("R^2 ==", round(emory.r2,digits=2))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = emory.plot.df_2))
# Create a function to produce the fitted line
emory_fun_line <- function(x) lmc[1] + lmc[2] * x
g2 <- ggplot(emory.plot.df_2, aes(x = d0, y = fc)) +
geom_point(aes(x = d01, y = fc1, color="Emory Data 2007-2009"), shape = 4, size = 5, show.legend = F) +
# geom_line(aes(x = d01, y = fc1, linetype = "Emory Data 2007-2009"), alpha = 0) +
geom_point(aes(x = d02, y = fc2, color="Emory Data 2009-2011"), shape = 1, size = 5, show.legend = F) +
# geom_line(aes(x = d02, y = fc2, linetype = "Emory Data 2009-2011"), alpha = 0) +
stat_function(fun = emory_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Testing: Emory Data") +
annotate("text", x=6.5, y=6.5, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=6.5, y=7.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.8,.9)) +
scale_linetype_manual(values = c("dashed", "blank", "blank"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("blue", "purple", "black"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank", "blank"), size = c(1, 5, 5))))
# ------ Test on Mayo ------
mayo.log.predict <- predict(bay.logfit, newdata = data.frame(d0=mayo.d0.log2), interval = "confidence", level = .95, se.fit = T)
mayo.ci <- pointwise(mayo.log.predict, coverage = 0.95, individual = T)
mayo.plot.df <- data.frame(d0=mayo.d0.log2, fc=mayo.fc.log2, yhat=mayo.ci$fit[,1],
lwr=mayo.ci$fit[, 2],
upr=mayo.ci$fit[, 3])
mayo.logfit<-predict(bay.logfit, newdata=data.frame(d0=mayo.d0.log2))
mayo.r2 <- summary(lm(mayo.logfit~mayo.fc.log2))$r.squared
lb1 <- paste("R^2 ==", round(mayo.r2,digits=2))
# Find the linear model coefficients
lmc <- coef(lm(yhat ~ d0, data = mayo.plot.df))
# Create a function to produce the fitted line
mayo_fun_line <- function(x) lmc[1] + lmc[2] * x
g3 <- ggplot(mayo.plot.df, aes(x=d0, y = fc)) +
geom_point(aes(colour = "Mayo Data"), size = 5, shape = 1) +
geom_line(aes(linetype = "Mayo Data"), alpha = 0) +
stat_function(fun = mayo_fun_line, aes(colour = "Baylor Model", linetype = "Baylor Model"), size = 1) +
xlab("log2(Day 0) HAI") +
ylab("log2(Day 28 / Day 0) HAI") +
ggtitle("Testing: Mayo Data") +
annotate("text", x=8.0, y=3.7, hjust=0, label=lb1, parse=TRUE,size=5) +
annotate("text", x=8.0, y=4.0, hjust=0, label=lb2, parse=TRUE,size=5) +
theme(axis.text.x = element_text(size = 20),
axis.title.x = element_text(size=20),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
panel.background = element_rect(fill = 'white', colour = 'black'), #element_blank(),
panel.grid.major = element_line(colour = "gray"),
panel.grid.minor = element_line(colour = "gray"),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"),
legend.text = element_text(size = 13),
legend.position = c(.77,.9)) +
scale_linetype_manual(values = c("dashed", "blank"), guide = FALSE) +
scale_colour_manual(name ="HAI", values = c("blue", "black"),
guide = guide_legend(override.aes = list(
linetype = c("dashed", "blank"), size = c(1, 5))))
# barplot of number of subjects
barplot_data <- data.frame(Data = c("Baylor", "Emory_2007", "Emory_2009", "Mayo"),
Subjects = c(length(bay.d0), dim(emory_2007.df)[1], dim(emory_2009.df)[1],
length(mayo.d0)))
g4 <- ggplot(barplot_data, aes(x=Data, y=Subjects)) + geom_bar(stat="identity") +
ggtitle("Number of Subjects in Each Data") +
theme(axis.text.x = element_text(size = 18),
axis.title.x = element_text(size=18),
axis.text.y = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.text=element_text(size=20),
plot.title = element_text(size = 20, face = "bold"))
grid.arrange(g1, g2, g3, g4, ncol = 2, nrow = 2)
|
############################
# S3 method for gmnl package
#############################
#' @rdname gmnl
#' @method print gmnl
#' @import stats
#' @export
print.gmnl <- function(x, digits = max(3, getOption("digits") - 3),
width = getOption("width"), ...){
cat("\nCall:\n", deparse(x$call),"\n\n", sep = "")
cat("\nCoefficients:\n")
print.default(format(coef(x), digits = digits), print.gap = 2,
quote = FALSE)
cat("\n")
invisible(x)
}
#' @rdname gmnl
#' @method summary gmnl
#' @import stats
#' @export
summary.gmnl <- function(object,...){
b <- object$coefficients
std.err <- sqrt(diag(vcov(object)))
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
CoefTable <- cbind(b, std.err, z, p)
colnames(CoefTable) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
object$CoefTable <- CoefTable
class(object) <- c("summary.gmnl", "gmnl")
return(object)
}
#' @rdname gmnl
#' @method print summary.gmnl
#' @import stats
#' @export
print.summary.gmnl <- function(x, digits = max(3, getOption("digits") - 2),
width = getOption("width"),
...){
cat(paste("\nModel estimated on:", format(Sys.time(), "%a %b %d %X %Y"), "\n"))
cat("\nCall:\n")
cat(paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "")
cat("\nFrequencies of categories:\n")
print(prop.table(x$freq), digits = digits)
cat("\n")
cat(paste("The estimation took:", make.time(x) ,"\n"))
cat("\nCoefficients:\n")
printCoefmat(x$CoefTable, digits = digits)
cat(paste("\nOptimization of log-likelihood by", x$logLik$type))
cat(paste("\nLog Likelihood:", signif(x$logLik$maximum, digits)))
cat(paste("\nNumber of observations:", x$logLik$nobs))
cat(paste("\nNumber of iterations:" , x$logLik$iterations))
cat(paste("\nExit of MLE:", x$logLik$message))
if (!(x$model == "mnl" | x$model == "lc")) cat(paste("\nSimulation based on", x$R, "draws"))
invisible(x)
}
#' vcov method for gmnl objects
#'
#' The \code{vcov} method for \code{gmnl} objects extracts the covariance matrix of the coefficients or the random parameters. It also allows to get the standard errors for the variance-covariance matrix of the random parameters
#'
#' @param object a fitted model of class \code{gmnl},
#' @param what indicates which covariance matrix has to be extracted. The default is \code{coefficient}, in this case the \code{vcov} behaves as usual. If \code{what = "ranp"} the covariance matrix of the random parameters is returned as default,
#' @param type if the model is estimated with random parameters, then this argument indicates what matrix should be returned. If \code{type = "cov"}, then the covariance matrix of the random parameters is returned; if \code{type = "cor"} then the correlation matrix of the random parameters is returned; if \code{type = "sd"} then the standard deviation of the random parameters is returned,
#' @param se if \code{TRUE} \code{type = "cov"} then the standard error of the covariance matrix of the random parameters is returned; if \code{TRUE} \code{type = "sd"} the standard error of the standard deviation of the random parameter is returned. This argument if valid only if the model is estimated using correlated random parameters,
#' @param Q this argument is only valid if the "\code{mm}" (MM-MNL) model is estimated. It indicates the class for which the variance-covariance matrix is computed,
#' @param digits number of digits,
#' @param ... further arguments
#' @details This new interface replaces the \code{cor.gmnl}, \code{cov.gmnl} and \code{se.cov.gmnl} functions which are deprecated.
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial logit models with random parameters.
#' @method vcov gmnl
#' @import stats
#' @export
vcov.gmnl <- function(object, what = c('coefficient', 'ranp'), type = c('cov', 'cor', 'sd'),
se = FALSE, Q = NULL, digits = max(3, getOption("digits") - 2), ...)
{
what <- match.arg(what)
type <- match.arg(type)
if (what == 'coefficient') {
H <- object$logLik$hessian
vcov <- solve(-H)
rownames(vcov) <- colnames(vcov) <- names(coef(object))
return(vcov)
}
if (what == 'ranp') {
if (se) {
if (type == 'cov') se.cov.gmnl(object, sd = FALSE, Q = Q, digits = digits)
if (type == 'sd') se.cov.gmnl(object, sd = TRUE, Q = Q, digits = digits)
if (type == 'cor') stop("standard error for correlation coefficients not implemented yet")
} else {
if (type == 'cov') print(cov.gmnl(object, Q = Q))
if (type == 'cor') print(cor.gmnl(object, Q = Q))
if (type == 'sd') print(sqrt(diag(cov.gmnl(object, Q))))
}
}
}
#' @rdname gmnl
#' @method update gmnl
#' @import stats
#' @export
update.gmnl <- function(object, new, ...){
call <- object$call
if (is.null(call))
stop("need an object with call component")
extras <- match.call(expand.dots = FALSE)$...
if (!missing(new))
call$formula <- update(formula(object), new)
if (length(extras) > 0) {
existing <- !is.na(match(names(extras), names(call)))
for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
if (any(!existing)) {
call <- c(as.list(call), extras[!existing])
call <- as.call(call)
}
}
eval(call, parent.frame())
}
#' @rdname gmnl
#' @export
coef.gmnl <- function(object, ...){
result <- object$coefficients
return(result)
}
#' @rdname gmnl
#' @export
model.matrix.gmnl <- function(object, ...){
model.matrix(object$formula, object$mf)
}
model.response.gmnl <- function(object, ...){
y.name <- paste(deparse(object$formula[[2]]))
object$mf[[y.name]]
}
#' @rdname gmnl
#' @export
residuals.gmnl <- function(object, outcome = TRUE, ...){
if (!outcome) {
result <- object$residuals
}
else{
J <- ncol(object$residuals)
y <- matrix(model.response.gmnl(object), ncol = J, byrow = T)
result <- apply(y * object$residuals, 1, sum)
}
result
}
#' @rdname gmnl
#' @import stats
df.residual.gmnl <- function(object, ...){
n <- length(residuals(object))
K <- length(coef(object))
return(n - K)
}
#' @rdname gmnl
#' @export
fitted.gmnl <- function(object, outcome = TRUE, ...){
if (outcome) result <- object$prob.ind
else result <- object$prob.alt
result
}
#' @rdname gmnl
#' @export
logLik.gmnl <- function(object,...){
structure(object$logLik$maximum[[1]], df = length(object$coefficients),
nobs = object$logLik$nobs, class = "logLik")
}
#' Get Model Summaries for Use with "mtable"
#'
#' A generic function to collect coefficients and summary statistics from a \code{gmnl} object. It is used in \code{mtable}.
#'
#' @param obj a \code{gmnl} object,
#' @param alpha level of the confidence intervals,
#' @param ... further arguments,
#'
#' @details For more details see package \pkg{memisc}
#' @examples
#' ## Estimate MNL models
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' mnl.1 <- gmnl(choice ~ wait + vcost + travel + gcost | 0, data = TM)
#' mnl.2 <- gmnl(choice ~ wait + vcost | 0, data = TM)
#'
#' ## Table
#' library(memisc)
#' mtable("MNL 1"= mnl.1, "MNL 2" = mnl.2,
#' summary.stats = c("N", "Log-likelihood", "BIC", "AIC"))
#' @import stats
#' @export getSummary.gmnl
getSummary.gmnl <- function(obj, alpha = 0.05, ...){
smry <- summary(obj)
coef <- smry$CoefTable
lower <- coef[, 1] - coef[, 2] * qnorm(alpha / 2)
upper <- coef[, 1] + coef[, 2] * qnorm(alpha / 2)
coef <- cbind(coef, lower, upper)
colnames(coef) <- c("est", "se", "stat", "p", "lwr", "upr")
N <- obj$logLik$nobs
ll <- logLik(obj)
sumstat <- c(logLik = ll, deviance = NA, AIC = AIC(obj), BIC = BIC(obj), N = N,
LR = NA, df = NA, p = NA, Aldrich.Nelson = NA, McFadden = NA, Cox.Snell = NA,
Nagelkerke = NA)
list(coef = coef, sumstat = sumstat, contrasts = obj$contrasts,
xlevels = NULL, call = obj$call)
}
#' Akaike's Information Criterion
#'
#' Calculate the Akaike's information Criterion (AIC) or the Bayesian
#' information Criterion (BIC) for an object of class \code{gmnl}.
#'
#' @param object a fitted model of class \code{gmnl}.
#' @param ... additional arguments to be passed to or from other functions.
#' @param k a numeric value, use as penalty coefficient for number of parameters
#' in the fitted model.
#' @details For more information see \code{\link[stats]{AIC}} or \code{\link[stats]{BIC}}
#' @return A numeric value with the corresponding AIC or BIC value.
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial logit models with observed and unobserved individual heterogeneity.
#'
#' @import stats
#' @method AIC gmnl
#' @export
AIC.gmnl <- function(object, ..., k = 2){
return(-2 * object$logLik$maximum[[1]] + k * length(coef(object)))
}
#' @rdname AIC.gmnl
#' @import stats
#' @method BIC gmnl
#' @export
#' @examples
#'
#' ## Estimate MNL model
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' mnl <- gmnl(choice ~ wait + vcost + travel + gcost | 0 , data = TM)
#' AIC(mnl)
#' BIC(mnl)
BIC.gmnl <- function(object, ...){
return(AIC(object, k = log(object$logLik$nobs)))
}
#### Methods for sandwiches
#' Bread for Sandwiches
#'
#' Computes the ``bread'' of the sandwich covariance matrix for objects of class \code{gmnl}.
#'
#' @param x a fitted model of class \code{gmnl}.
#' @param ... other arguments when \code{bread} is applied to another
#' class object.
#' @return The covariance matrix times observations
#' @details For more information see \code{\link[sandwich]{bread}} from the package \pkg{sandwich}.
#' @references Zeileis A (2006), Object-oriented Computation of Sandwich
#' Estimators. Journal of Statistical Software, 16(9), 1--16.
#' @method bread gmnl
#' @import stats
#' @export bread.gmnl
bread.gmnl <- function(x, ... ){
return( vcov( x ) * x$logLik$nobs)
}
#' Gradient for Observations
#'
#' It extracts the gradient for each observation evaluated at the estimated parameters for an object of class \code{gmnl}.
#'
#' @param x a fitted model of class \code{gmnl}.
#' @param ... other arguments. Ignored.
#' @return The gradient matrix of dimension \eqn{n \times K}
#' @references Zeileis A (2006), Object-oriented Computation of Sandwich
#' Estimators. Journal of Statistical Software, 16(9), 1--16.
#' @details For more information see \code{\link[sandwich]{estfun}} from package \pkg{sandwich}.
#' @method estfun gmnl
#' @export estfun.gmnl
estfun.gmnl <- function(x, ... ){
return(x$logLik$gradientObs )
}
#' @rdname gmnl
#' @export nObs.gmnl
nObs.gmnl <- function(x, ... ){
return(x$logLik$nobs)
}
#' Get the Conditional Individual Coefficients
#'
#' This a helper function to obtain the individuals' conditional estimate of the either random parameters or willingness-to-pay.
#' @param x an object of class \code{gmnl}.
#' @param par a string giving the name of the variable with a random parameter.
#' @param effect a string indicating what should be computed: the conditional expectation of the individual coefficients "\code{ce}", or the conditional expectation of the willingness-to-pay "\code{wtp}".
#' @param wrt a string indicating with respect to which variable the willingness-to-pay should be computed.
#' @param ... further arguments. Ignorred.
#'
#' @return A named list where "\code{mean}" contains the individuals' conditional mean for the random parameter or willingness-to-pay, and where "\code{sd.est}" contains standard errors.
#' @export
#' @author Mauricio Sarrias.
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial Logit models with individual parameters.
#' @import stats
#' @examples
#' \dontrun{
#' ## Data
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' ## MIXL model with observed heterogeneity
#' mixl.hier <- gmnl(choice ~ vcost + gcost + travel + wait | 1 | 0 | income + size - 1,
#' data = TM,
#' model = "mixl",
#' ranp = c(travel = "t", wait = "n"),
#' mvar = list(travel = c("income","size"), wait = c("income")),
#' R = 30,
#' haltons = list("primes"= c(2, 17), "drop" = rep(19, 2)))
#'
#' ## Get the individuals' conditional mean and their standard errors for lwage
#' bi.travel <- effect.gmnl(mixl.hier, par = "travel", effect = "ce")
#' summary(bi.travel$mean)
#' summary(bi.travel$sd.est)
#'
#' ## Get the individuals' conditional WTP of travel with respect to gcost
#' wtp.travel <- effect.gmnl(mixl.hier, par = "travel", effect = "wtp", wrt = "gcost")
#' summary(wtp.travel$mean)
#' summary(wtp.travel$sd.est)
#' }
effect.gmnl <- function(x, par = NULL, effect = c("ce", "wtp"), wrt = NULL, ... ){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
model <- x$model
if (model == "mnl") stop("This function is valid only for models with individual heterogeneity")
type <- match.arg(effect)
ranp <- x$ranp
#if (model != "lc" && !is.null(par) && !(par %in% names(ranp))) stop("This parameter is not random: ", par)
#if (model != "lc" || model!= "smnl") if (!(par %in% names(ranp))) stop("This parameter is not random: ", par)
if (type == "wtp" & is.null(wrt)) stop("you need to specify wrt")
bi <- x$bi
Qir <- x$Qir
if (model == "mixl" || model == "gmnl" || model == "smnl") {
N <- nrow(Qir)
K <- dim(bi)[[3]]
var_coefn <- dimnames(bi)[[3]]
mean <- mean.sq <- matrix(NA, N, K)
if (type == "wtp") {
if (model != "smnl") {
is.ran <- any(names(ranp) %in% wrt)
gamma <- if (is.ran) bi[, , wrt] else coef(x)[wrt]
} else gamma <- bi[, , wrt]
for (j in 1:K) {
mean[, j] <- rowSums((bi[, , j] / gamma) * Qir)
mean.sq[, j] <- rowSums(((bi[, , j] / gamma) ^ 2) * Qir)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(bi[, , j] * Qir)
mean.sq[, j] <- rowSums(bi[, , j] ^ 2 * Qir)
}
}
}
if (model == "lc") {
N <- nrow(Qir)
K <- ncol(bi)
var_coefn <- colnames(bi)
mean <- mean.sq <- matrix(NA, N, K)
if (type == "wtp") {
gamma <- bi[, wrt]
for (j in 1:K) {
mean[, j] <- rowSums(repRows(bi[, j] / gamma, N) * Qir)
mean.sq[, j] <- rowSums(repRows((bi[, j] / gamma) ^ 2, N) * Qir)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(repRows(bi[, j], N) * Qir)
mean.sq[, j] <- rowSums(repRows(bi[, j] ^ 2, N) * Qir)
}
}
}
if (model == "mm") {
wnq <- Qir$wnq
Ln <- Qir$Ln
Pnrq <- Qir$Pnrq
N <- length(Ln)
K <- dim(bi)[[4]]
mean <- mean.sq <- matrix(NA, N, K)
var_coefn <- dimnames(bi)[[4]]
if (type == "wtp") {
gamma <- bi[,,,wrt]
for (j in 1:K) {
mean[, j] <- rowSums(wnq * apply((bi[,,,j] / gamma) * Pnrq, c(1, 3), mean) / Ln)
mean.sq[, j] <- rowSums(wnq * apply((bi[,,,j] / gamma) ^ 2 * Pnrq, c(1, 3), mean) / Ln)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(wnq * apply(bi[,,,j] * Pnrq, c(1, 3), mean) / Ln)
mean.sq[, j] <- rowSums(wnq * apply(bi[,,,j] ^ 2 * Pnrq, c(1, 3), mean) / Ln)
}
}
}
sd.est <- suppressWarnings(sqrt(mean.sq - mean ^ 2))
colnames(mean) <- colnames(sd.est) <- var_coefn
if (!is.null(par)) {
mean <- mean[, par]
sd.est <- sd.est[, par]
}
effe <- list(
mean = mean,
sd.est = sd.est)
return(effe)
}
#' Plot of the Distribution of the Conditional Expectation of Random Parameters
#'
#' Methods for \code{gmnl} objects which provide a plot of the distribution of the conditional expectation of the random parameters or the distribution of the conditional willigness-to-pay.
#'
#'
#' @param x an object of class \code{gmnl}.
#' @param par a string giving the name of the variable with random parameter.
#' @param type a string indicating the type of distribution: it can be a \code{histogram} or a \code{density} of the conditional expectation of the random coefficients or WTP.
#' @param ind a boolean. If \code{TRUE}, a 95\% interval of conditional distribution for each individual is plotted. As default, the conditional expectation of \code{par} for the first 10 individual is plotted.
#' @param id only relevant if \code{ind} is not \code{NULL}. This is a vector indicating the individuals for whom the user want to plot the conditional coefficients.
#' @param effect a string indicating whether the conditional expectation, "\code{ce}", or the WTP, "\code{wtp}" should be plotted.
#' @param wrt a string indicating with respect to which variable the WTP should be computed if \code{effect = "wtp"}.
#' @param adjust bandwidth for the kernel density.
#' @param main an overall title for the plot.
#' @param xlab a title for the x axis.
#' @param ylab a title for the y axis.
#' @param col color for the graph.
#' @param breaks number of breaks for the histrogram if \code{type = "histogram"}.
#' @param ... further arguments to be passed to \code{plot} or \code{plotCI}.
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of different multinomial models with individual heterogeneity and \code{\link[gmnl]{effect.gmnl}}.
#' @importFrom plotrix plotCI
#' @method plot gmnl
#' @author Mauricio Sarrias
#' @export
#' @import graphics
#' @import stats
#' @examples
#' \dontrun{
#' ## Examples using the Electricity data set from the mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a MIXL model with correlated random parameters
#' Elec.cor <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0, data = Electr,
#' subset = 1:3000,
#' model = 'mixl',
#' R = 10,
#' panel = TRUE,
#' ranp = c(cl = "n", loc = "n", wk = "n", tod = "n", seas = "n"),
#' correlation = TRUE)
#'
#' ## Plot the density of the conditional expectation distribution of loc
#' plot(Elec.cor, par = "loc", effect = "ce", type = "density", col = "grey")
#'
#' ## Plot the conditional expectation of loc for each individual
#' plot(Elec.cor, par = "loc", effect = "ce", ind = TRUE, id = 1:30)
#'
#' ## Plot the WTP for cl
#' plot(Elec.cor, par = "loc", effect = "wtp", wrt = "pf")
#'}
plot.gmnl <- function(x, par = NULL, effect = c("ce", "wtp"), wrt = NULL,
type = c("density", "histogram"), adjust = 1,
main = NULL, col = "indianred1", breaks = 10, ylab = NULL,
xlab = NULL, ind = FALSE, id = NULL, ...){
model <- x$model
if (model == "mnl") stop("The plot is valid only for models with individual heterogeneity")
if (is.null(par)) stop("Must specified the name of the parameter")
type <- match.arg(type)
effect <- match.arg(effect)
xlab <- switch(effect,
"wtp" = expression(E(hat(wtp[i]))),
"ce" = expression(E(hat(beta[i]))))
if (!ind) {
if (is.null(main)) main <- paste("Conditional Distribution for", par)
if (is.null(ylab)) {
ylab <- switch(type,
"density" = "Density",
"histogram" = "Frequency")
}
rpar <- effect.gmnl(x, par, effect = effect, wrt = wrt)$mean
if (type == "density") {
pdens <- density(rpar, adjust = adjust)
plot(pdens, ylab = ylab, xlab = xlab, main = main, col = col)
has.pos <- any(pdens$x > 0)
if (has.pos) {
x1 <- min(which(pdens$x >= 0))
x2 <- max(which(pdens$x < max(pdens$x)))
with(pdens, polygon(x = c(x[c(x1, x1:x2, x2)]), y = c(0, y[x1:x2], 0), col = col, border = NA))
}
} else {
minb <- round(min(rpar), 2)
maxb <- round(max(rpar), 2)
hist(rpar, xlab = xlab, main = main, col = col, breaks = breaks,
xaxs = "i", yaxs = "i", las = 1, xaxt = 'n', ylab = ylab)
axis(1, at = seq(minb, maxb, (maxb - minb) * .05))
}
} else {
if (is.null(main)) main <- paste("95% Probability Intervals for ", par)
if (is.null(id)) id <- seq(1, 10, 1)
if (is.null(ylab)) ylab <- "Individuals"
f.bran <- effect.gmnl(x, par, effect = effect, wrt = wrt)$mean
f.sran <- effect.gmnl(x, par, effect = effect, wrt = wrt)$sd.est
lower <- f.bran - qnorm(0.975) * f.sran
upper <- f.bran + qnorm(0.975) * f.sran
plotrix::plotCI(as.numeric(id), f.bran[id], ui = upper[id], li = lower[id],
xlab = ylab, ylab = xlab,
lty = 2, main = main,
pch = 21, col = col)
}
}
#' Functions for Correlated Random Parameters
#'
#' These are a set of functions that help to extract the variance-covariance matrix, the correlation matrix, and the standard error of the random parameters for models of class \code{gmnl}.
#'
#' @param x an object of class \code{gmnl} where \code{ranp} is not \code{NULL}.
#' @param Q this argument is only valid if the "\code{mm}" (MM-MNL) model is estimated. It indicates the class for which the variance-covariance matrix is computed.
#' @param sd if \code{TRUE}, then the standard deviations of the random parameters along with their standard errors are computed.
#' @param digits the number of digits.
#'
#' @return \code{cov.gmnl} returns a matrix with the variance of the random parameters if the model is fitted with random coefficients. If the model is fitted with \code{correlation = TRUE}, then the variance-covariance matrix is returned.
#'
#'
#' If \code{correlation = TRUE} in the fitted model, then \code{se.cov.gmnl} returns a coefficient matrix for the elements of the variance-covariance matrix or the standard deviations if \code{sd = TRUE}.
#'
#'
#' @details The variance-covariance matrix is computed using the Cholesky decomposition \eqn{LL'=\Sigma}.
#'
#'
#' \code{se.cov.gmnl} function is a wrapper for the \code{\link[msm]{deltamethod}} function of the \pkg{msm} package.
#' @author Mauricio Sarrias \email{msarrias86@@gmail.com}
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of different multinomial models with individual heterogeneity.
#' @examples
#' \dontrun{
#' ## Examples using Electricity data set from mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a MIXL model with correlated random parameters
#' Elec.cor <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0, data = Electr,
#' subset = 1:3000,
#' model = 'mixl',
#' R = 10,
#' panel = TRUE,
#' ranp = c(cl = "n", loc = "n", wk = "n", tod = "n", seas = "n"),
#' correlation = TRUE)
#'
#' ## Use functions for correlated random parameters
#' cov.gmnl(Elec.cor)
#' se.cov.gmnl(Elec.cor)
#' se.cov.gmnl(Elec.cor, sd = TRUE)
#' cor.gmnl(Elec.cor)
#' }
#' @export
cov.gmnl <- function(x, Q = NULL){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
if (is.null(x$ranp)) stop('cov.gmnl only relevant for random coefficient model')
model <- x$model
if (!is.null(Q) & model != "mm") stop("Q is only relevant for MM-MNL model")
if (model == "mm") {
if (is.null(Q)) stop("MM-MNL model requires Q")
if (Q > x$Q) stop("Q is greater than the number of classes in the fitted model")
}
beta.hat <- x$coefficients
K <- length(x$ranp)
nr <- names(x$ranp)
if (x$correlation) {
names.stds <- c()
if (model == "mm") {
for (i in 1:K) names.stds <- c(names.stds, paste('class', Q, 'sd', nr[i], nr[i:K], sep = '.'))
} else {
for (i in 1:K) names.stds <- c(names.stds, paste('sd', nr[i], nr[i:K], sep = '.'))
}
v <- beta.hat[names.stds]
V <- tcrossprod(makeL(v))
colnames(V) <- rownames(V) <- nr
} else{
names.stds <- if (model != "mm") paste("sd", nr, sep = ".") else paste("class", Q, "sd", nr, sep = ".")
sv <- beta.hat[names.stds]
V <- matrix(0, K, K)
diag(V) <- sv ^ 2
colnames(V) <- rownames(V) <- nr
}
V
}
#' @rdname cov.gmnl
#' @export
cor.gmnl <- function(x, Q = NULL){
if (!x$correlation) stop('cor.gmnl only relevant for correlated random coefficient')
V <- cov.gmnl(x, Q = Q)
nr <- names(x$ranp)
D <- diag(sqrt(diag(V)))
Rho <- solve(D) %*% V %*% solve(D)
colnames(Rho) <- rownames(Rho) <- nr
Rho
}
#' @rdname cov.gmnl
#' @importFrom msm deltamethod
#' @import stats
#' @export
se.cov.gmnl <- function(x, sd = FALSE, Q = NULL, digits = max(3, getOption("digits") - 2)){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
if (!x$correlation) stop('se.cov.gmnl only relevant for correlated random coefficient')
model <- x$model
if (!is.null(Q) & model != "mm") stop("Q is only relevant for MM-MNL model")
if (model == "mm") {
if (is.null(Q)) stop("MM-MNL model requires Q")
if (Q > x$Q) stop("Q is greater than the number of classes in the fitted model")
}
beta.hat <- x$coefficients
Ka <- length(x$ranp)
nr <- names(x$ranp)
names.stds <- c()
if (model == "mm") {
for (i in 1:Ka) names.stds <- c(names.stds, paste('class', Q, 'sd', nr[i], nr[i:Ka], sep = '.'))
} else {
for (i in 1:Ka) names.stds <- c(names.stds, paste('sd', nr[i], nr[i:Ka], sep = '.'))
}
stds.hat <- beta.hat[names.stds]
sel.vcov <- vcov(x)[names.stds, names.stds]
form <- c()
if (sd) {
for (i in 1:Ka) {
k <- i
if (i == 1) {
form <- paste("~ sqrt(", c(form, paste(paste("x", i, sep = ""), paste("x", k, sep = ""), sep = "*")), ")")
} else {
temp <- paste(paste("x", i, sep = ""), paste("x", k, sep = ""), sep = "*")
j <- 2
while(j <= i) {
temp <- paste(temp, make.add(row = j, col = k, Ka = Ka)[1], sep = "+")
j <- j + 1
}
form <- c(form, paste("~ sqrt(", temp, ")"))
}
}
b <- sqrt(diag(cov.gmnl(x, Q)))
names(b) <- colnames(cov.gmnl(x, Q))
} else {
for (i in 1:Ka) {
if (i == 1) {
form <- paste("~", c(form, paste(paste("x", i:Ka, sep = ""), paste("x", i, sep = ""), sep = "*")))
} else {
temp <- paste(paste("x", i:Ka, sep = ""), paste("x", i, sep = ""), sep = "*")
j <- 2
while(j <= i) {
temp <- paste(temp, make.add(row = j, col = i, Ka = Ka), sep = "+")
j <- j + 1
}
form <- c(form, paste("~", temp))
}
}
names.vcov <- c()
for (i in 1:Ka) names.vcov <- c(names.vcov, paste('v', nr[i], nr[i:Ka], sep = '.'))
b <- drop(cov.gmnl(x, Q)[lower.tri(cov.gmnl(x, Q), diag = TRUE)])
names(b) <- names.vcov
}
std.err <- c()
for (i in 1:length(form)) {
std.err <- c(std.err, msm::deltamethod(as.formula(form[i]), stds.hat, sel.vcov, ses = TRUE))
}
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
tableChol <- cbind(b, std.err, z, p)
if (!sd) cat(paste("\nElements of the variance-covariance matrix \n\n"))
else cat(paste("\nStandard deviations of the random parameters \n\n"))
#colnames(tableChol) <- c("Estimate", "Std. Error", "t-value", "Pr(>|t|)")
colnames(tableChol) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
printCoefmat(tableChol, digits = digits)
}
#' Compute Willingness-to-pay
#'
#' Compute the willingness-to-pay.
#'
#' @param object an object of class \code{gmnl}.
#' @param wrt a string indicating the variable with respect to which the WTP is computed,
#' @param digits number of significant digits to be used for most numbers.
#' @return A coefficient matrix with the WTP point estimates and standard errors.
#' @export
#' @details For each coefficient, this function computes both the point estimate and standard error of WTP with respect to the variable specified in the argument \code{wrt}. Specifically, let \eqn{\beta_k} be the coefficient for variable \eqn{k}, then \deqn{WTP_{k}=-\beta_k/\beta_p}
#'
#'
#' where \eqn{\beta_p} is the coefficient for the variable specified with the argument \code{wrt}. Note that, \code{wtp.gmnl} does not include the negative sign.
#'
#'
#' \code{wtp.gmnl} function is a wrapper for the \code{\link[msm]{deltamethod}} function of the \pkg{msm} package.
#' @seealso \code{\link[msm]{deltamethod}} for the estimation of the standard errors.
#' @author Mauricio Sarrias.
#' @examples
#'
#' ## Examples using the Electricity data set from the mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a conditional logit model
#' clogit <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0,
#' data = Electr)
#' wtp.gmnl(clogit, wrt = "pf")
#' @import stats
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
wtp.gmnl <- function(object, wrt = NULL, digits = max(3, getOption("digits") - 2)){
if (is.null(wrt)) stop("WTP needs the variable in the denominator: wrt")
beta.hat <- coef(object)
posi <- match(wrt, names(beta.hat))
form <- c()
b <- c()
namesb <- names(beta.hat)[-c(posi)]
for (i in 1:length(beta.hat)) {
if (i != posi) {
b <- c(b, beta.hat[i]/ beta.hat[posi])
form <- c(form, paste("~", "x", i, "/", "x", posi, sep = ""))
}
}
names(b) <- namesb
std.err <- c()
for (i in 1:length(form)) {
std.err <- c(std.err, msm::deltamethod(as.formula(form[i]), beta.hat, vcov(object), ses = TRUE))
}
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
tablewtp <- cbind(b, std.err, z, p)
colnames(tablewtp) <- c("Estimate", "Std. Error", "t-value", "Pr(>|t|)")
cat(paste("\nWilligness-to-pay respect to: ", wrt, "\n\n"))
printCoefmat(tablewtp, digits = digits)
}
|
/gmnl/R/gmnl.methods.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 31,475 |
r
|
############################
# S3 method for gmnl package
#############################
#' @rdname gmnl
#' @method print gmnl
#' @import stats
#' @export
print.gmnl <- function(x, digits = max(3, getOption("digits") - 3),
width = getOption("width"), ...){
cat("\nCall:\n", deparse(x$call),"\n\n", sep = "")
cat("\nCoefficients:\n")
print.default(format(coef(x), digits = digits), print.gap = 2,
quote = FALSE)
cat("\n")
invisible(x)
}
#' @rdname gmnl
#' @method summary gmnl
#' @import stats
#' @export
summary.gmnl <- function(object,...){
b <- object$coefficients
std.err <- sqrt(diag(vcov(object)))
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
CoefTable <- cbind(b, std.err, z, p)
colnames(CoefTable) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
object$CoefTable <- CoefTable
class(object) <- c("summary.gmnl", "gmnl")
return(object)
}
#' @rdname gmnl
#' @method print summary.gmnl
#' @import stats
#' @export
print.summary.gmnl <- function(x, digits = max(3, getOption("digits") - 2),
width = getOption("width"),
...){
cat(paste("\nModel estimated on:", format(Sys.time(), "%a %b %d %X %Y"), "\n"))
cat("\nCall:\n")
cat(paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "")
cat("\nFrequencies of categories:\n")
print(prop.table(x$freq), digits = digits)
cat("\n")
cat(paste("The estimation took:", make.time(x) ,"\n"))
cat("\nCoefficients:\n")
printCoefmat(x$CoefTable, digits = digits)
cat(paste("\nOptimization of log-likelihood by", x$logLik$type))
cat(paste("\nLog Likelihood:", signif(x$logLik$maximum, digits)))
cat(paste("\nNumber of observations:", x$logLik$nobs))
cat(paste("\nNumber of iterations:" , x$logLik$iterations))
cat(paste("\nExit of MLE:", x$logLik$message))
if (!(x$model == "mnl" | x$model == "lc")) cat(paste("\nSimulation based on", x$R, "draws"))
invisible(x)
}
#' vcov method for gmnl objects
#'
#' The \code{vcov} method for \code{gmnl} objects extracts the covariance matrix of the coefficients or the random parameters. It also allows to get the standard errors for the variance-covariance matrix of the random parameters
#'
#' @param object a fitted model of class \code{gmnl},
#' @param what indicates which covariance matrix has to be extracted. The default is \code{coefficient}, in this case the \code{vcov} behaves as usual. If \code{what = "ranp"} the covariance matrix of the random parameters is returned as default,
#' @param type if the model is estimated with random parameters, then this argument indicates what matrix should be returned. If \code{type = "cov"}, then the covariance matrix of the random parameters is returned; if \code{type = "cor"} then the correlation matrix of the random parameters is returned; if \code{type = "sd"} then the standard deviation of the random parameters is returned,
#' @param se if \code{TRUE} \code{type = "cov"} then the standard error of the covariance matrix of the random parameters is returned; if \code{TRUE} \code{type = "sd"} the standard error of the standard deviation of the random parameter is returned. This argument if valid only if the model is estimated using correlated random parameters,
#' @param Q this argument is only valid if the "\code{mm}" (MM-MNL) model is estimated. It indicates the class for which the variance-covariance matrix is computed,
#' @param digits number of digits,
#' @param ... further arguments
#' @details This new interface replaces the \code{cor.gmnl}, \code{cov.gmnl} and \code{se.cov.gmnl} functions which are deprecated.
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial logit models with random parameters.
#' @method vcov gmnl
#' @import stats
#' @export
vcov.gmnl <- function(object, what = c('coefficient', 'ranp'), type = c('cov', 'cor', 'sd'),
se = FALSE, Q = NULL, digits = max(3, getOption("digits") - 2), ...)
{
what <- match.arg(what)
type <- match.arg(type)
if (what == 'coefficient') {
H <- object$logLik$hessian
vcov <- solve(-H)
rownames(vcov) <- colnames(vcov) <- names(coef(object))
return(vcov)
}
if (what == 'ranp') {
if (se) {
if (type == 'cov') se.cov.gmnl(object, sd = FALSE, Q = Q, digits = digits)
if (type == 'sd') se.cov.gmnl(object, sd = TRUE, Q = Q, digits = digits)
if (type == 'cor') stop("standard error for correlation coefficients not implemented yet")
} else {
if (type == 'cov') print(cov.gmnl(object, Q = Q))
if (type == 'cor') print(cor.gmnl(object, Q = Q))
if (type == 'sd') print(sqrt(diag(cov.gmnl(object, Q))))
}
}
}
#' @rdname gmnl
#' @method update gmnl
#' @import stats
#' @export
update.gmnl <- function(object, new, ...){
call <- object$call
if (is.null(call))
stop("need an object with call component")
extras <- match.call(expand.dots = FALSE)$...
if (!missing(new))
call$formula <- update(formula(object), new)
if (length(extras) > 0) {
existing <- !is.na(match(names(extras), names(call)))
for (a in names(extras)[existing]) call[[a]] <- extras[[a]]
if (any(!existing)) {
call <- c(as.list(call), extras[!existing])
call <- as.call(call)
}
}
eval(call, parent.frame())
}
#' @rdname gmnl
#' @export
coef.gmnl <- function(object, ...){
result <- object$coefficients
return(result)
}
#' @rdname gmnl
#' @export
model.matrix.gmnl <- function(object, ...){
model.matrix(object$formula, object$mf)
}
model.response.gmnl <- function(object, ...){
y.name <- paste(deparse(object$formula[[2]]))
object$mf[[y.name]]
}
#' @rdname gmnl
#' @export
residuals.gmnl <- function(object, outcome = TRUE, ...){
if (!outcome) {
result <- object$residuals
}
else{
J <- ncol(object$residuals)
y <- matrix(model.response.gmnl(object), ncol = J, byrow = T)
result <- apply(y * object$residuals, 1, sum)
}
result
}
#' @rdname gmnl
#' @import stats
df.residual.gmnl <- function(object, ...){
n <- length(residuals(object))
K <- length(coef(object))
return(n - K)
}
#' @rdname gmnl
#' @export
fitted.gmnl <- function(object, outcome = TRUE, ...){
if (outcome) result <- object$prob.ind
else result <- object$prob.alt
result
}
#' @rdname gmnl
#' @export
logLik.gmnl <- function(object,...){
structure(object$logLik$maximum[[1]], df = length(object$coefficients),
nobs = object$logLik$nobs, class = "logLik")
}
#' Get Model Summaries for Use with "mtable"
#'
#' A generic function to collect coefficients and summary statistics from a \code{gmnl} object. It is used in \code{mtable}.
#'
#' @param obj a \code{gmnl} object,
#' @param alpha level of the confidence intervals,
#' @param ... further arguments,
#'
#' @details For more details see package \pkg{memisc}
#' @examples
#' ## Estimate MNL models
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' mnl.1 <- gmnl(choice ~ wait + vcost + travel + gcost | 0, data = TM)
#' mnl.2 <- gmnl(choice ~ wait + vcost | 0, data = TM)
#'
#' ## Table
#' library(memisc)
#' mtable("MNL 1"= mnl.1, "MNL 2" = mnl.2,
#' summary.stats = c("N", "Log-likelihood", "BIC", "AIC"))
#' @import stats
#' @export getSummary.gmnl
getSummary.gmnl <- function(obj, alpha = 0.05, ...){
smry <- summary(obj)
coef <- smry$CoefTable
lower <- coef[, 1] - coef[, 2] * qnorm(alpha / 2)
upper <- coef[, 1] + coef[, 2] * qnorm(alpha / 2)
coef <- cbind(coef, lower, upper)
colnames(coef) <- c("est", "se", "stat", "p", "lwr", "upr")
N <- obj$logLik$nobs
ll <- logLik(obj)
sumstat <- c(logLik = ll, deviance = NA, AIC = AIC(obj), BIC = BIC(obj), N = N,
LR = NA, df = NA, p = NA, Aldrich.Nelson = NA, McFadden = NA, Cox.Snell = NA,
Nagelkerke = NA)
list(coef = coef, sumstat = sumstat, contrasts = obj$contrasts,
xlevels = NULL, call = obj$call)
}
#' Akaike's Information Criterion
#'
#' Calculate the Akaike's information Criterion (AIC) or the Bayesian
#' information Criterion (BIC) for an object of class \code{gmnl}.
#'
#' @param object a fitted model of class \code{gmnl}.
#' @param ... additional arguments to be passed to or from other functions.
#' @param k a numeric value, use as penalty coefficient for number of parameters
#' in the fitted model.
#' @details For more information see \code{\link[stats]{AIC}} or \code{\link[stats]{BIC}}
#' @return A numeric value with the corresponding AIC or BIC value.
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial logit models with observed and unobserved individual heterogeneity.
#'
#' @import stats
#' @method AIC gmnl
#' @export
AIC.gmnl <- function(object, ..., k = 2){
return(-2 * object$logLik$maximum[[1]] + k * length(coef(object)))
}
#' @rdname AIC.gmnl
#' @import stats
#' @method BIC gmnl
#' @export
#' @examples
#'
#' ## Estimate MNL model
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' mnl <- gmnl(choice ~ wait + vcost + travel + gcost | 0 , data = TM)
#' AIC(mnl)
#' BIC(mnl)
BIC.gmnl <- function(object, ...){
return(AIC(object, k = log(object$logLik$nobs)))
}
#### Methods for sandwiches
#' Bread for Sandwiches
#'
#' Computes the ``bread'' of the sandwich covariance matrix for objects of class \code{gmnl}.
#'
#' @param x a fitted model of class \code{gmnl}.
#' @param ... other arguments when \code{bread} is applied to another
#' class object.
#' @return The covariance matrix times observations
#' @details For more information see \code{\link[sandwich]{bread}} from the package \pkg{sandwich}.
#' @references Zeileis A (2006), Object-oriented Computation of Sandwich
#' Estimators. Journal of Statistical Software, 16(9), 1--16.
#' @method bread gmnl
#' @import stats
#' @export bread.gmnl
bread.gmnl <- function(x, ... ){
return( vcov( x ) * x$logLik$nobs)
}
#' Gradient for Observations
#'
#' It extracts the gradient for each observation evaluated at the estimated parameters for an object of class \code{gmnl}.
#'
#' @param x a fitted model of class \code{gmnl}.
#' @param ... other arguments. Ignored.
#' @return The gradient matrix of dimension \eqn{n \times K}
#' @references Zeileis A (2006), Object-oriented Computation of Sandwich
#' Estimators. Journal of Statistical Software, 16(9), 1--16.
#' @details For more information see \code{\link[sandwich]{estfun}} from package \pkg{sandwich}.
#' @method estfun gmnl
#' @export estfun.gmnl
estfun.gmnl <- function(x, ... ){
return(x$logLik$gradientObs )
}
#' @rdname gmnl
#' @export nObs.gmnl
nObs.gmnl <- function(x, ... ){
return(x$logLik$nobs)
}
#' Get the Conditional Individual Coefficients
#'
#' This a helper function to obtain the individuals' conditional estimate of the either random parameters or willingness-to-pay.
#' @param x an object of class \code{gmnl}.
#' @param par a string giving the name of the variable with a random parameter.
#' @param effect a string indicating what should be computed: the conditional expectation of the individual coefficients "\code{ce}", or the conditional expectation of the willingness-to-pay "\code{wtp}".
#' @param wrt a string indicating with respect to which variable the willingness-to-pay should be computed.
#' @param ... further arguments. Ignorred.
#'
#' @return A named list where "\code{mean}" contains the individuals' conditional mean for the random parameter or willingness-to-pay, and where "\code{sd.est}" contains standard errors.
#' @export
#' @author Mauricio Sarrias.
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of multinomial Logit models with individual parameters.
#' @import stats
#' @examples
#' \dontrun{
#' ## Data
#' data("TravelMode", package = "AER")
#' library(mlogit)
#' TM <- mlogit.data(TravelMode, choice = "choice", shape = "long",
#' alt.levels = c("air", "train", "bus", "car"), chid.var = "individual")
#'
#' ## MIXL model with observed heterogeneity
#' mixl.hier <- gmnl(choice ~ vcost + gcost + travel + wait | 1 | 0 | income + size - 1,
#' data = TM,
#' model = "mixl",
#' ranp = c(travel = "t", wait = "n"),
#' mvar = list(travel = c("income","size"), wait = c("income")),
#' R = 30,
#' haltons = list("primes"= c(2, 17), "drop" = rep(19, 2)))
#'
#' ## Get the individuals' conditional mean and their standard errors for lwage
#' bi.travel <- effect.gmnl(mixl.hier, par = "travel", effect = "ce")
#' summary(bi.travel$mean)
#' summary(bi.travel$sd.est)
#'
#' ## Get the individuals' conditional WTP of travel with respect to gcost
#' wtp.travel <- effect.gmnl(mixl.hier, par = "travel", effect = "wtp", wrt = "gcost")
#' summary(wtp.travel$mean)
#' summary(wtp.travel$sd.est)
#' }
effect.gmnl <- function(x, par = NULL, effect = c("ce", "wtp"), wrt = NULL, ... ){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
model <- x$model
if (model == "mnl") stop("This function is valid only for models with individual heterogeneity")
type <- match.arg(effect)
ranp <- x$ranp
#if (model != "lc" && !is.null(par) && !(par %in% names(ranp))) stop("This parameter is not random: ", par)
#if (model != "lc" || model!= "smnl") if (!(par %in% names(ranp))) stop("This parameter is not random: ", par)
if (type == "wtp" & is.null(wrt)) stop("you need to specify wrt")
bi <- x$bi
Qir <- x$Qir
if (model == "mixl" || model == "gmnl" || model == "smnl") {
N <- nrow(Qir)
K <- dim(bi)[[3]]
var_coefn <- dimnames(bi)[[3]]
mean <- mean.sq <- matrix(NA, N, K)
if (type == "wtp") {
if (model != "smnl") {
is.ran <- any(names(ranp) %in% wrt)
gamma <- if (is.ran) bi[, , wrt] else coef(x)[wrt]
} else gamma <- bi[, , wrt]
for (j in 1:K) {
mean[, j] <- rowSums((bi[, , j] / gamma) * Qir)
mean.sq[, j] <- rowSums(((bi[, , j] / gamma) ^ 2) * Qir)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(bi[, , j] * Qir)
mean.sq[, j] <- rowSums(bi[, , j] ^ 2 * Qir)
}
}
}
if (model == "lc") {
N <- nrow(Qir)
K <- ncol(bi)
var_coefn <- colnames(bi)
mean <- mean.sq <- matrix(NA, N, K)
if (type == "wtp") {
gamma <- bi[, wrt]
for (j in 1:K) {
mean[, j] <- rowSums(repRows(bi[, j] / gamma, N) * Qir)
mean.sq[, j] <- rowSums(repRows((bi[, j] / gamma) ^ 2, N) * Qir)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(repRows(bi[, j], N) * Qir)
mean.sq[, j] <- rowSums(repRows(bi[, j] ^ 2, N) * Qir)
}
}
}
if (model == "mm") {
wnq <- Qir$wnq
Ln <- Qir$Ln
Pnrq <- Qir$Pnrq
N <- length(Ln)
K <- dim(bi)[[4]]
mean <- mean.sq <- matrix(NA, N, K)
var_coefn <- dimnames(bi)[[4]]
if (type == "wtp") {
gamma <- bi[,,,wrt]
for (j in 1:K) {
mean[, j] <- rowSums(wnq * apply((bi[,,,j] / gamma) * Pnrq, c(1, 3), mean) / Ln)
mean.sq[, j] <- rowSums(wnq * apply((bi[,,,j] / gamma) ^ 2 * Pnrq, c(1, 3), mean) / Ln)
}
} else {
for (j in 1:K) {
mean[, j] <- rowSums(wnq * apply(bi[,,,j] * Pnrq, c(1, 3), mean) / Ln)
mean.sq[, j] <- rowSums(wnq * apply(bi[,,,j] ^ 2 * Pnrq, c(1, 3), mean) / Ln)
}
}
}
sd.est <- suppressWarnings(sqrt(mean.sq - mean ^ 2))
colnames(mean) <- colnames(sd.est) <- var_coefn
if (!is.null(par)) {
mean <- mean[, par]
sd.est <- sd.est[, par]
}
effe <- list(
mean = mean,
sd.est = sd.est)
return(effe)
}
#' Plot of the Distribution of the Conditional Expectation of Random Parameters
#'
#' Methods for \code{gmnl} objects which provide a plot of the distribution of the conditional expectation of the random parameters or the distribution of the conditional willigness-to-pay.
#'
#'
#' @param x an object of class \code{gmnl}.
#' @param par a string giving the name of the variable with random parameter.
#' @param type a string indicating the type of distribution: it can be a \code{histogram} or a \code{density} of the conditional expectation of the random coefficients or WTP.
#' @param ind a boolean. If \code{TRUE}, a 95\% interval of conditional distribution for each individual is plotted. As default, the conditional expectation of \code{par} for the first 10 individual is plotted.
#' @param id only relevant if \code{ind} is not \code{NULL}. This is a vector indicating the individuals for whom the user want to plot the conditional coefficients.
#' @param effect a string indicating whether the conditional expectation, "\code{ce}", or the WTP, "\code{wtp}" should be plotted.
#' @param wrt a string indicating with respect to which variable the WTP should be computed if \code{effect = "wtp"}.
#' @param adjust bandwidth for the kernel density.
#' @param main an overall title for the plot.
#' @param xlab a title for the x axis.
#' @param ylab a title for the y axis.
#' @param col color for the graph.
#' @param breaks number of breaks for the histrogram if \code{type = "histogram"}.
#' @param ... further arguments to be passed to \code{plot} or \code{plotCI}.
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of different multinomial models with individual heterogeneity and \code{\link[gmnl]{effect.gmnl}}.
#' @importFrom plotrix plotCI
#' @method plot gmnl
#' @author Mauricio Sarrias
#' @export
#' @import graphics
#' @import stats
#' @examples
#' \dontrun{
#' ## Examples using the Electricity data set from the mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a MIXL model with correlated random parameters
#' Elec.cor <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0, data = Electr,
#' subset = 1:3000,
#' model = 'mixl',
#' R = 10,
#' panel = TRUE,
#' ranp = c(cl = "n", loc = "n", wk = "n", tod = "n", seas = "n"),
#' correlation = TRUE)
#'
#' ## Plot the density of the conditional expectation distribution of loc
#' plot(Elec.cor, par = "loc", effect = "ce", type = "density", col = "grey")
#'
#' ## Plot the conditional expectation of loc for each individual
#' plot(Elec.cor, par = "loc", effect = "ce", ind = TRUE, id = 1:30)
#'
#' ## Plot the WTP for cl
#' plot(Elec.cor, par = "loc", effect = "wtp", wrt = "pf")
#'}
plot.gmnl <- function(x, par = NULL, effect = c("ce", "wtp"), wrt = NULL,
type = c("density", "histogram"), adjust = 1,
main = NULL, col = "indianred1", breaks = 10, ylab = NULL,
xlab = NULL, ind = FALSE, id = NULL, ...){
model <- x$model
if (model == "mnl") stop("The plot is valid only for models with individual heterogeneity")
if (is.null(par)) stop("Must specified the name of the parameter")
type <- match.arg(type)
effect <- match.arg(effect)
xlab <- switch(effect,
"wtp" = expression(E(hat(wtp[i]))),
"ce" = expression(E(hat(beta[i]))))
if (!ind) {
if (is.null(main)) main <- paste("Conditional Distribution for", par)
if (is.null(ylab)) {
ylab <- switch(type,
"density" = "Density",
"histogram" = "Frequency")
}
rpar <- effect.gmnl(x, par, effect = effect, wrt = wrt)$mean
if (type == "density") {
pdens <- density(rpar, adjust = adjust)
plot(pdens, ylab = ylab, xlab = xlab, main = main, col = col)
has.pos <- any(pdens$x > 0)
if (has.pos) {
x1 <- min(which(pdens$x >= 0))
x2 <- max(which(pdens$x < max(pdens$x)))
with(pdens, polygon(x = c(x[c(x1, x1:x2, x2)]), y = c(0, y[x1:x2], 0), col = col, border = NA))
}
} else {
minb <- round(min(rpar), 2)
maxb <- round(max(rpar), 2)
hist(rpar, xlab = xlab, main = main, col = col, breaks = breaks,
xaxs = "i", yaxs = "i", las = 1, xaxt = 'n', ylab = ylab)
axis(1, at = seq(minb, maxb, (maxb - minb) * .05))
}
} else {
if (is.null(main)) main <- paste("95% Probability Intervals for ", par)
if (is.null(id)) id <- seq(1, 10, 1)
if (is.null(ylab)) ylab <- "Individuals"
f.bran <- effect.gmnl(x, par, effect = effect, wrt = wrt)$mean
f.sran <- effect.gmnl(x, par, effect = effect, wrt = wrt)$sd.est
lower <- f.bran - qnorm(0.975) * f.sran
upper <- f.bran + qnorm(0.975) * f.sran
plotrix::plotCI(as.numeric(id), f.bran[id], ui = upper[id], li = lower[id],
xlab = ylab, ylab = xlab,
lty = 2, main = main,
pch = 21, col = col)
}
}
#' Functions for Correlated Random Parameters
#'
#' These are a set of functions that help to extract the variance-covariance matrix, the correlation matrix, and the standard error of the random parameters for models of class \code{gmnl}.
#'
#' @param x an object of class \code{gmnl} where \code{ranp} is not \code{NULL}.
#' @param Q this argument is only valid if the "\code{mm}" (MM-MNL) model is estimated. It indicates the class for which the variance-covariance matrix is computed.
#' @param sd if \code{TRUE}, then the standard deviations of the random parameters along with their standard errors are computed.
#' @param digits the number of digits.
#'
#' @return \code{cov.gmnl} returns a matrix with the variance of the random parameters if the model is fitted with random coefficients. If the model is fitted with \code{correlation = TRUE}, then the variance-covariance matrix is returned.
#'
#'
#' If \code{correlation = TRUE} in the fitted model, then \code{se.cov.gmnl} returns a coefficient matrix for the elements of the variance-covariance matrix or the standard deviations if \code{sd = TRUE}.
#'
#'
#' @details The variance-covariance matrix is computed using the Cholesky decomposition \eqn{LL'=\Sigma}.
#'
#'
#' \code{se.cov.gmnl} function is a wrapper for the \code{\link[msm]{deltamethod}} function of the \pkg{msm} package.
#' @author Mauricio Sarrias \email{msarrias86@@gmail.com}
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
#' @seealso \code{\link[gmnl]{gmnl}} for the estimation of different multinomial models with individual heterogeneity.
#' @examples
#' \dontrun{
#' ## Examples using Electricity data set from mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a MIXL model with correlated random parameters
#' Elec.cor <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0, data = Electr,
#' subset = 1:3000,
#' model = 'mixl',
#' R = 10,
#' panel = TRUE,
#' ranp = c(cl = "n", loc = "n", wk = "n", tod = "n", seas = "n"),
#' correlation = TRUE)
#'
#' ## Use functions for correlated random parameters
#' cov.gmnl(Elec.cor)
#' se.cov.gmnl(Elec.cor)
#' se.cov.gmnl(Elec.cor, sd = TRUE)
#' cor.gmnl(Elec.cor)
#' }
#' @export
cov.gmnl <- function(x, Q = NULL){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
if (is.null(x$ranp)) stop('cov.gmnl only relevant for random coefficient model')
model <- x$model
if (!is.null(Q) & model != "mm") stop("Q is only relevant for MM-MNL model")
if (model == "mm") {
if (is.null(Q)) stop("MM-MNL model requires Q")
if (Q > x$Q) stop("Q is greater than the number of classes in the fitted model")
}
beta.hat <- x$coefficients
K <- length(x$ranp)
nr <- names(x$ranp)
if (x$correlation) {
names.stds <- c()
if (model == "mm") {
for (i in 1:K) names.stds <- c(names.stds, paste('class', Q, 'sd', nr[i], nr[i:K], sep = '.'))
} else {
for (i in 1:K) names.stds <- c(names.stds, paste('sd', nr[i], nr[i:K], sep = '.'))
}
v <- beta.hat[names.stds]
V <- tcrossprod(makeL(v))
colnames(V) <- rownames(V) <- nr
} else{
names.stds <- if (model != "mm") paste("sd", nr, sep = ".") else paste("class", Q, "sd", nr, sep = ".")
sv <- beta.hat[names.stds]
V <- matrix(0, K, K)
diag(V) <- sv ^ 2
colnames(V) <- rownames(V) <- nr
}
V
}
#' @rdname cov.gmnl
#' @export
cor.gmnl <- function(x, Q = NULL){
if (!x$correlation) stop('cor.gmnl only relevant for correlated random coefficient')
V <- cov.gmnl(x, Q = Q)
nr <- names(x$ranp)
D <- diag(sqrt(diag(V)))
Rho <- solve(D) %*% V %*% solve(D)
colnames(Rho) <- rownames(Rho) <- nr
Rho
}
#' @rdname cov.gmnl
#' @importFrom msm deltamethod
#' @import stats
#' @export
se.cov.gmnl <- function(x, sd = FALSE, Q = NULL, digits = max(3, getOption("digits") - 2)){
if (!inherits(x, "gmnl")) stop("not a \"gmnl\" object")
if (!x$correlation) stop('se.cov.gmnl only relevant for correlated random coefficient')
model <- x$model
if (!is.null(Q) & model != "mm") stop("Q is only relevant for MM-MNL model")
if (model == "mm") {
if (is.null(Q)) stop("MM-MNL model requires Q")
if (Q > x$Q) stop("Q is greater than the number of classes in the fitted model")
}
beta.hat <- x$coefficients
Ka <- length(x$ranp)
nr <- names(x$ranp)
names.stds <- c()
if (model == "mm") {
for (i in 1:Ka) names.stds <- c(names.stds, paste('class', Q, 'sd', nr[i], nr[i:Ka], sep = '.'))
} else {
for (i in 1:Ka) names.stds <- c(names.stds, paste('sd', nr[i], nr[i:Ka], sep = '.'))
}
stds.hat <- beta.hat[names.stds]
sel.vcov <- vcov(x)[names.stds, names.stds]
form <- c()
if (sd) {
for (i in 1:Ka) {
k <- i
if (i == 1) {
form <- paste("~ sqrt(", c(form, paste(paste("x", i, sep = ""), paste("x", k, sep = ""), sep = "*")), ")")
} else {
temp <- paste(paste("x", i, sep = ""), paste("x", k, sep = ""), sep = "*")
j <- 2
while(j <= i) {
temp <- paste(temp, make.add(row = j, col = k, Ka = Ka)[1], sep = "+")
j <- j + 1
}
form <- c(form, paste("~ sqrt(", temp, ")"))
}
}
b <- sqrt(diag(cov.gmnl(x, Q)))
names(b) <- colnames(cov.gmnl(x, Q))
} else {
for (i in 1:Ka) {
if (i == 1) {
form <- paste("~", c(form, paste(paste("x", i:Ka, sep = ""), paste("x", i, sep = ""), sep = "*")))
} else {
temp <- paste(paste("x", i:Ka, sep = ""), paste("x", i, sep = ""), sep = "*")
j <- 2
while(j <= i) {
temp <- paste(temp, make.add(row = j, col = i, Ka = Ka), sep = "+")
j <- j + 1
}
form <- c(form, paste("~", temp))
}
}
names.vcov <- c()
for (i in 1:Ka) names.vcov <- c(names.vcov, paste('v', nr[i], nr[i:Ka], sep = '.'))
b <- drop(cov.gmnl(x, Q)[lower.tri(cov.gmnl(x, Q), diag = TRUE)])
names(b) <- names.vcov
}
std.err <- c()
for (i in 1:length(form)) {
std.err <- c(std.err, msm::deltamethod(as.formula(form[i]), stds.hat, sel.vcov, ses = TRUE))
}
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
tableChol <- cbind(b, std.err, z, p)
if (!sd) cat(paste("\nElements of the variance-covariance matrix \n\n"))
else cat(paste("\nStandard deviations of the random parameters \n\n"))
#colnames(tableChol) <- c("Estimate", "Std. Error", "t-value", "Pr(>|t|)")
colnames(tableChol) <- c("Estimate", "Std. Error", "z-value", "Pr(>|z|)")
printCoefmat(tableChol, digits = digits)
}
#' Compute Willingness-to-pay
#'
#' Compute the willingness-to-pay.
#'
#' @param object an object of class \code{gmnl}.
#' @param wrt a string indicating the variable with respect to which the WTP is computed,
#' @param digits number of significant digits to be used for most numbers.
#' @return A coefficient matrix with the WTP point estimates and standard errors.
#' @export
#' @details For each coefficient, this function computes both the point estimate and standard error of WTP with respect to the variable specified in the argument \code{wrt}. Specifically, let \eqn{\beta_k} be the coefficient for variable \eqn{k}, then \deqn{WTP_{k}=-\beta_k/\beta_p}
#'
#'
#' where \eqn{\beta_p} is the coefficient for the variable specified with the argument \code{wrt}. Note that, \code{wtp.gmnl} does not include the negative sign.
#'
#'
#' \code{wtp.gmnl} function is a wrapper for the \code{\link[msm]{deltamethod}} function of the \pkg{msm} package.
#' @seealso \code{\link[msm]{deltamethod}} for the estimation of the standard errors.
#' @author Mauricio Sarrias.
#' @examples
#'
#' ## Examples using the Electricity data set from the mlogit package
#' library(mlogit)
#' data("Electricity", package = "mlogit")
#' Electr <- mlogit.data(Electricity, id.var = "id", choice = "choice",
#' varying = 3:26, shape = "wide", sep = "")
#'
#' ## Estimate a conditional logit model
#' clogit <- gmnl(choice ~ pf + cl + loc + wk + tod + seas| 0,
#' data = Electr)
#' wtp.gmnl(clogit, wrt = "pf")
#' @import stats
#' @references
#' \itemize{
#' \item Greene, W. H. (2012). Econometric Analysis, Seventh Edition. Pearson Hall.
#' \item Train, K. (2009). Discrete Choice Methods with Simulation. Cambridge University Press.
#' }
wtp.gmnl <- function(object, wrt = NULL, digits = max(3, getOption("digits") - 2)){
if (is.null(wrt)) stop("WTP needs the variable in the denominator: wrt")
beta.hat <- coef(object)
posi <- match(wrt, names(beta.hat))
form <- c()
b <- c()
namesb <- names(beta.hat)[-c(posi)]
for (i in 1:length(beta.hat)) {
if (i != posi) {
b <- c(b, beta.hat[i]/ beta.hat[posi])
form <- c(form, paste("~", "x", i, "/", "x", posi, sep = ""))
}
}
names(b) <- namesb
std.err <- c()
for (i in 1:length(form)) {
std.err <- c(std.err, msm::deltamethod(as.formula(form[i]), beta.hat, vcov(object), ses = TRUE))
}
z <- b / std.err
p <- 2 * (1 - pnorm(abs(z)))
tablewtp <- cbind(b, std.err, z, p)
colnames(tablewtp) <- c("Estimate", "Std. Error", "t-value", "Pr(>|t|)")
cat(paste("\nWilligness-to-pay respect to: ", wrt, "\n\n"))
printCoefmat(tablewtp, digits = digits)
}
|
library(rgl)
### Name: writeWebGL
### Title: Write scene to HTML.
### Aliases: writeWebGL
### Keywords: graphics
### ** Examples
plot3d(rnorm(100), rnorm(100), rnorm(100), type = "s", col = "red")
# This writes a copy into temporary directory 'webGL', and then displays it
filename <- writeWebGL(dir = file.path(tempdir(), "webGL"),
width = 500, reuse = TRUE)
# Display the "reuse" attribute
attr(filename, "reuse")
# Display the scene in a browser
if (interactive())
browseURL(paste0("file://", filename))
|
/data/genthat_extracted_code/rgl/examples/writeWebGL.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 557 |
r
|
library(rgl)
### Name: writeWebGL
### Title: Write scene to HTML.
### Aliases: writeWebGL
### Keywords: graphics
### ** Examples
plot3d(rnorm(100), rnorm(100), rnorm(100), type = "s", col = "red")
# This writes a copy into temporary directory 'webGL', and then displays it
filename <- writeWebGL(dir = file.path(tempdir(), "webGL"),
width = 500, reuse = TRUE)
# Display the "reuse" attribute
attr(filename, "reuse")
# Display the scene in a browser
if (interactive())
browseURL(paste0("file://", filename))
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/pad.R
\name{pad}
\alias{pad}
\title{Pad numeric vars to strings of specified size}
\usage{
pad(x, mx = NULL, fill = 0)
}
\arguments{
\item{x}{Input object}
\item{mx}{How many places do you want padded?}
\item{fill}{What should it be padded with?}
}
\value{
value Non-string object
}
\description{
Pad numeric vars to strings of specified size
}
\examples{
pad(24,mx=4,fill=0)
}
\seealso{
\code{\link{is.character}} which this function wraps
}
\keyword{alpha}
\keyword{color}
|
/man/pad.Rd
|
no_license
|
bestdan/ULF
|
R
| false | false | 564 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/pad.R
\name{pad}
\alias{pad}
\title{Pad numeric vars to strings of specified size}
\usage{
pad(x, mx = NULL, fill = 0)
}
\arguments{
\item{x}{Input object}
\item{mx}{How many places do you want padded?}
\item{fill}{What should it be padded with?}
}
\value{
value Non-string object
}
\description{
Pad numeric vars to strings of specified size
}
\examples{
pad(24,mx=4,fill=0)
}
\seealso{
\code{\link{is.character}} which this function wraps
}
\keyword{alpha}
\keyword{color}
|
library("Rimlbuoy")
library("xts")
library("ggplot2")
source("~/MEGA/Arctus/project/workshop_presentation/read.extract.pix.file.R")
load("~/MEGA/data/BoueesIML/2015/L2/COPS.DB.IML4.V3.RData")
################read data
####################################
#read data for viking buoy
iml4="~/MEGA/Arctus/data/total/IML4_20160501_20161108_20170324_P3.3_RHOWN.csv"
iml4dat=read.csv(iml4,header = T,sep = ";")
datetime=strptime(iml4dat$TIME_IS,format = "%Y%m%dT%H%M%SZ")
#Read modis data
data2014.modis=read.pix_Extract("~/MEGA/Arctus/data/extracted/pixEx_modis_2014_Level 2_measurements.txt")
modis.day.f=as.factor(data2014.modis$data[,8])
modis.datetime.f=strptime(paste(data2014.modis$data[,8],data2014.modis$data[,9]),format = "%Y-%m-%d %H:%M")
##need to completed
|
/matchup_COPS.R
|
no_license
|
zygomare/L2_validation
|
R
| false | false | 771 |
r
|
library("Rimlbuoy")
library("xts")
library("ggplot2")
source("~/MEGA/Arctus/project/workshop_presentation/read.extract.pix.file.R")
load("~/MEGA/data/BoueesIML/2015/L2/COPS.DB.IML4.V3.RData")
################read data
####################################
#read data for viking buoy
iml4="~/MEGA/Arctus/data/total/IML4_20160501_20161108_20170324_P3.3_RHOWN.csv"
iml4dat=read.csv(iml4,header = T,sep = ";")
datetime=strptime(iml4dat$TIME_IS,format = "%Y%m%dT%H%M%SZ")
#Read modis data
data2014.modis=read.pix_Extract("~/MEGA/Arctus/data/extracted/pixEx_modis_2014_Level 2_measurements.txt")
modis.day.f=as.factor(data2014.modis$data[,8])
modis.datetime.f=strptime(paste(data2014.modis$data[,8],data2014.modis$data[,9]),format = "%Y-%m-%d %H:%M")
##need to completed
|
\name{ISV.Equality}
\alias{ISV.Equality}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Test for Equality of Intra-Subject Variabilities
}
\description{
H0: within-subject variance of treatment T is equal to within-subject variance of treatment R
Ha: not equal
The test is finding whether two drug products have the same intra-subject variability.
}
\usage{
ISV.Equality(alpha, beta, sigma1, sigma2, m)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{alpha}{
significance level
}
\item{beta}{
power = 1-beta
}
\item{sigma1}{
within-subject variance of treatment 1
}
\item{sigma2}{
within-subject variance of treatment 2
}
\item{m}{
for each subject, there are m replicates.
}
}
\references{
Chow SC, Shao J, Wang H. Sample Size Calculation in Clinical Research. New York: Marcel Dekker, 2003
}
|
/man/ISV.Equality.Rd
|
no_license
|
cran/TrialSize
|
R
| false | false | 907 |
rd
|
\name{ISV.Equality}
\alias{ISV.Equality}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Test for Equality of Intra-Subject Variabilities
}
\description{
H0: within-subject variance of treatment T is equal to within-subject variance of treatment R
Ha: not equal
The test is finding whether two drug products have the same intra-subject variability.
}
\usage{
ISV.Equality(alpha, beta, sigma1, sigma2, m)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{alpha}{
significance level
}
\item{beta}{
power = 1-beta
}
\item{sigma1}{
within-subject variance of treatment 1
}
\item{sigma2}{
within-subject variance of treatment 2
}
\item{m}{
for each subject, there are m replicates.
}
}
\references{
Chow SC, Shao J, Wang H. Sample Size Calculation in Clinical Research. New York: Marcel Dekker, 2003
}
|
\name{speedglm-package}
\alias{speedglm-package}
\docType{package}
\title{
Fitting Linear and Generalized Linear Models to Large Data Sets.
}
\description{
Fits LMs and GLMs to large data sets. For data loaded in R memory the fitting is usually
fast, especially if R is linked against an optimized BLAS. For data sets of size greater
of R memory, the fitting is made by an updating algorithm.}
\details{
\tabular{ll}{
Package: \tab speedglm\cr
Type: \tab Package\cr
Version: \tab 0.2\cr
Date: \tab 2013-07-23\cr
Depends: \tab Matrix\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
}
\author{
Marco Enea
Maintainer: Marco Enea <marco.enea@unipa.it>
}
\keyword{ models}
|
/man/speedglm-package.rd
|
no_license
|
francesconero/speedglm
|
R
| false | false | 717 |
rd
|
\name{speedglm-package}
\alias{speedglm-package}
\docType{package}
\title{
Fitting Linear and Generalized Linear Models to Large Data Sets.
}
\description{
Fits LMs and GLMs to large data sets. For data loaded in R memory the fitting is usually
fast, especially if R is linked against an optimized BLAS. For data sets of size greater
of R memory, the fitting is made by an updating algorithm.}
\details{
\tabular{ll}{
Package: \tab speedglm\cr
Type: \tab Package\cr
Version: \tab 0.2\cr
Date: \tab 2013-07-23\cr
Depends: \tab Matrix\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
}
\author{
Marco Enea
Maintainer: Marco Enea <marco.enea@unipa.it>
}
\keyword{ models}
|
library(xpose4)
### Name: reset.graph.par
### Title: Resets Xpose variable definitions to factory settings
### Aliases: reset.graph.par
### Keywords: methods
### ** Examples
## Not run:
##D ## xpdb5 is an Xpose data object
##D ## We expect to find the required NONMEM run and table files for run
##D ## 5 in the current working directory
##D xpdb5 <- xpose.data(5)
##D
##D ## Import graphics preferences you saved earlier using export.graph.par
##D xpdb5 <- import.graph.par(xpdb5)
##D
##D ## Reset to default values
##D xpdb5 <- reset.graph.par(xpdb5)
##D
##D ## Change WRES definition
##D xpdb5 <- change.wres(xpdb5)
## End(Not run)
|
/data/genthat_extracted_code/xpose4/examples/reset.graph.par.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 648 |
r
|
library(xpose4)
### Name: reset.graph.par
### Title: Resets Xpose variable definitions to factory settings
### Aliases: reset.graph.par
### Keywords: methods
### ** Examples
## Not run:
##D ## xpdb5 is an Xpose data object
##D ## We expect to find the required NONMEM run and table files for run
##D ## 5 in the current working directory
##D xpdb5 <- xpose.data(5)
##D
##D ## Import graphics preferences you saved earlier using export.graph.par
##D xpdb5 <- import.graph.par(xpdb5)
##D
##D ## Reset to default values
##D xpdb5 <- reset.graph.par(xpdb5)
##D
##D ## Change WRES definition
##D xpdb5 <- change.wres(xpdb5)
## End(Not run)
|
\name{ear2bey}
\alias{ear2bey}
\title{bond-equivalent yield (BEY), 2 x the semiannual discount rate}
\usage{
ear2bey(ear)
}
\arguments{
\item{ear}{effective annual rate}
}
\description{
bond-equivalent yield (BEY), 2 x the semiannual discount
rate
}
\examples{
ear2bey(ear=0.08)
}
\seealso{
\code{\link{ear}}
}
|
/man/ear2bey.Rd
|
no_license
|
asheshwor/FinCal
|
R
| false | false | 314 |
rd
|
\name{ear2bey}
\alias{ear2bey}
\title{bond-equivalent yield (BEY), 2 x the semiannual discount rate}
\usage{
ear2bey(ear)
}
\arguments{
\item{ear}{effective annual rate}
}
\description{
bond-equivalent yield (BEY), 2 x the semiannual discount
rate
}
\examples{
ear2bey(ear=0.08)
}
\seealso{
\code{\link{ear}}
}
|
\name{parviol}
\Rdversion{1.1}
\alias{parviol}
\title{Parviol}
\description{
Parviol combines parallel coordinates and violin plot
}
\usage{
parviol(df, violinplot=TRUE, main=NULL, sub=NULL)
}
\arguments{
\item{df}{data frame.}
\item{violinplot}{if \code{TRUE} draws violin plots on parallel axis.}
\item{main}{main title for the plot.}
\item{sub}{sub title for the plot.}
}
\details{
Parviol draws parallel coordinates and violin plot for every variable on parallel axis.
}
\author{Jaroslav Myslivec <jaroslav.myslivec@upce.cz>}
\examples{
data(iris)
parviol(iris)
}
\keyword{hplot}
|
/man/parviol.Rd
|
no_license
|
cran/parviol
|
R
| false | false | 623 |
rd
|
\name{parviol}
\Rdversion{1.1}
\alias{parviol}
\title{Parviol}
\description{
Parviol combines parallel coordinates and violin plot
}
\usage{
parviol(df, violinplot=TRUE, main=NULL, sub=NULL)
}
\arguments{
\item{df}{data frame.}
\item{violinplot}{if \code{TRUE} draws violin plots on parallel axis.}
\item{main}{main title for the plot.}
\item{sub}{sub title for the plot.}
}
\details{
Parviol draws parallel coordinates and violin plot for every variable on parallel axis.
}
\author{Jaroslav Myslivec <jaroslav.myslivec@upce.cz>}
\examples{
data(iris)
parviol(iris)
}
\keyword{hplot}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionsataglance-data.R
\name{parseCorrGrades}
\alias{parseCorrGrades}
\title{Parse CORR Grades}
\usage{
parseCorrGrades(timeSeries, timezone)
}
\arguments{
\item{timeSeries}{The time series to get grades for}
\item{timezone}{The timezone to parse data into}
}
\description{
Retrieves and formats grades for the CORR report
}
|
/man/parseCorrGrades.Rd
|
permissive
|
USGS-R/repgen
|
R
| false | true | 409 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correctionsataglance-data.R
\name{parseCorrGrades}
\alias{parseCorrGrades}
\title{Parse CORR Grades}
\usage{
parseCorrGrades(timeSeries, timezone)
}
\arguments{
\item{timeSeries}{The time series to get grades for}
\item{timezone}{The timezone to parse data into}
}
\description{
Retrieves and formats grades for the CORR report
}
|
##
# The following bug is associated with JIRA PUB-838
# 'Inaccurate error message: h2o.performance()'
# Testing h2o.performance with rogue label vector and original dataframe
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test <- function(conn) {
print("Reading in original prostate data.")
prostate.hex = h2o.importFile(conn, locate("smalldata/prostate/prostate.csv.zip"), key="prostate.hex", header=TRUE)
print("Run test/train split at 20/80.")
prostate.hex$split <- ifelse(h2o.runif(prostate.hex)>0.8, yes=1, no=0)
prostate.train <- h2o.assign(prostate.hex[prostate.hex$split == 0, c(1:9)], "prostate.train")
prostate.test <- h2o.assign(prostate.hex[prostate.hex$split == 1, c(1:9)], "prostate.test")
test.labels = h2o.assign(prostate.test[,2], "test.labels")
print("Set variables to build models")
myX = c(3:9)
myY = 2
print("Creating model")
system.time(h2o.glm.model <- h2o.glm(x=myX, y=myY, data=prostate.train, key="h2o.glm.prostate", family="binomial", alpha=1, higher_accuracy=F, lambda_search=F, nfolds=0, variable_importances=FALSE, use_all_factor_levels=FALSE))
print("Predict on test data")
prediction <- h2o.predict(h2o.glm.model, prostate.test)
print("Check performance of model")
h2o.performance(prediction$'1', prostate.test$'CAPSULE') # works
h2o.performance(prediction$'1', test.labels) # checking performance with separate vector containing labels
testEnd()
}
doTest("Testing h2o.performance with rogue label vector and original dataframe ", test)
|
/h2o-r/tests/testdir_jira/runit_NOPASS_pub_838_h2o_perf_message.R
|
permissive
|
JMR-b/h2o-dev
|
R
| false | false | 1,575 |
r
|
##
# The following bug is associated with JIRA PUB-838
# 'Inaccurate error message: h2o.performance()'
# Testing h2o.performance with rogue label vector and original dataframe
##
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../h2o-runit.R')
test <- function(conn) {
print("Reading in original prostate data.")
prostate.hex = h2o.importFile(conn, locate("smalldata/prostate/prostate.csv.zip"), key="prostate.hex", header=TRUE)
print("Run test/train split at 20/80.")
prostate.hex$split <- ifelse(h2o.runif(prostate.hex)>0.8, yes=1, no=0)
prostate.train <- h2o.assign(prostate.hex[prostate.hex$split == 0, c(1:9)], "prostate.train")
prostate.test <- h2o.assign(prostate.hex[prostate.hex$split == 1, c(1:9)], "prostate.test")
test.labels = h2o.assign(prostate.test[,2], "test.labels")
print("Set variables to build models")
myX = c(3:9)
myY = 2
print("Creating model")
system.time(h2o.glm.model <- h2o.glm(x=myX, y=myY, data=prostate.train, key="h2o.glm.prostate", family="binomial", alpha=1, higher_accuracy=F, lambda_search=F, nfolds=0, variable_importances=FALSE, use_all_factor_levels=FALSE))
print("Predict on test data")
prediction <- h2o.predict(h2o.glm.model, prostate.test)
print("Check performance of model")
h2o.performance(prediction$'1', prostate.test$'CAPSULE') # works
h2o.performance(prediction$'1', test.labels) # checking performance with separate vector containing labels
testEnd()
}
doTest("Testing h2o.performance with rogue label vector and original dataframe ", test)
|
#Exploratory analysis
load("~/Documents/Caltex/Analysis workspace.Rdata")
setwd('/Users/Sophie/Documents/Caltex')
detach(analysis.data)
attach(analysis.data)
#Time series plot
plot(Calendar.day[Fuel.Grade == 'E10'], quantity[Fuel.Grade == 'E10'], type = 'l',
col = 'gray', xlab = 'Sale date', ylab = 'Fuel quantity (L)',
main = 'E10 fuel quantity sales by sale date')
lines(Calendar.day[Fuel.Grade == 'E10'],
smooth.spline(quantity[Fuel.Grade == 'E10'], spar = 0.4)$y,
col='blue')
abline(v = as.Date('2014-02-14'), col = 'red')
par(new=T)
plot(Calendar.day[Fuel.Grade == 'E10'],
-AWE.step[Fuel.Grade == 'E10'],
col='green',
type = 'l')
#Box plots by day of week, split pre and post 2014-02-14
boxplot(quantity[Fuel.Grade == 'E10'] ~ dow[Fuel.Grade == 'E10'] +
level.shift[Fuel.Grade == 'E10'])
#Flag Saturday and Sundays
boxplot(quantity[Fuel.Grade == 'E10'] ~ Sat[Fuel.Grade == 'E10'] +
Sun[Fuel.Grade == 'E10'] + level.shift[Fuel.Grade == 'E10'])
#Box plots by month, split pre and post 2014-02-14
boxplot(quantity[Fuel.Grade == 'E10'] ~ month[Fuel.Grade == 'E10'] +
level.shift[Fuel.Grade == 'E10'])
#Plot quantity vs price
selector = Fuel.Grade == 'E10' & level.shift == 0
library(hexbin)
bin<-hexbin(price.Caltex[selector], quantity[selector],
xbins=50, xlab = 'Price (cpl)',
ylab = 'Quantity (L)')
plot(bin, main="E10 Fuel purchased vs price",
colramp = colorRampPalette(c('gray','red')))
plot(price.Caltex[selector], quantity[selector])
#Average difference in price
plot(pricediff.pc.mkt[selector], quantity[selector])
plot(pricediff.cpl.mkt[selector], quantity[selector])
#BP
plot(pricediff.pc.BP[selector], quantity[selector])
plot(pricediff.cpl.BP[selector], quantity[selector])
#Coles
plot(pricediff.pc.Coles[selector], quantity[selector])
plot(pricediff.cpl.Coles[selector], quantity[selector])
#711
plot(pricediff.pc.711[selector], quantity[selector])
plot(pricediff.cpl.711[selector], quantity[selector])
selector = Fuel.Grade == 'E10'
init <- glm(quantity ~ month + dow + level.shift + price.Caltex + pricediff.cpl.mkt,
family = poisson(link='log'), data = analysis.data, subset = selector)
summary(init)
#Plot prices
plot(Calendar.day, price.Caltex, type = 'l', col = 'gray')
lines(Calendar.day, price.711, col = 2)
lines(Calendar.day, price.BP, col = 3)
lines(Calendar.day, price.Coles, col = 4)
plot(c(as.Date('2013-01-01'), as.Date('2015-06-30')), c(-8,8), type = 'n')
points(Calendar.day,pricediff.cpl.BP)
points(Calendar.day,pricediff.cpl.711, col = 'red')
points(Calendar.day,pricediff.cpl.Coles, col = 'blue')
analysis.data$cheapest <- as.factor(as.numeric(analysis.data$price.Caltex <=
apply(analysis.data[,c('price.BP', 'price.711', 'price.Coles')], 1, min)))
analysis.data$price.rank <- as.factor(
apply(analysis.data[,c('price.Caltex', 'price.BP', 'price.711', 'price.Coles')],
1, rank, ties.method = 'min')['price.Caltex',]
)
boxplot(quantity ~ price.rank + level.shift)
par(mfrow = c(2,2))
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 1], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 2], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 3], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 4], freq = F, add = T)
#Very similar
summary(analysis.data)
attach(analysis.data)
detach(analysis.data)
subber = Fuel.Grade == 'E10'
par(mfrow = c(2,1))
plot(price.Caltex[subber], profit.e10[subber])
plot(quantity[subber], profit.e10[subber])
par(mfrow = c(1,1))
plot(price.Caltex[subber], (fuel.revenue[subber] - profit.e10[subber])/
quantity[subber])
analysis.data$nam.pl[Fuel.Grade == 'E10'] <- ((analysis.data$profit.e10)
/ analysis.data$quantity)[Fuel.Grade == 'E10']
analysis.data$nam.pl[Fuel.Grade == 'Vtx 95'] <- ((analysis.data$profit.vtx)
/ analysis.data$quantity)[Fuel.Grade == 'Vtx 95']
subber = Fuel.Grade == 'Vtx 95'
par(mfrow = c(2,1))
plot(price.Caltex[subber], profit.vtx[subber])
plot(quantity[subber], profit.vtx[subber])
par(mfrow = c(1,1))
plot(price.Caltex[subber], (fuel.revenue[subber] - profit.vtx[subber])/
quantity[subber])
#Linear, can derive a margin per litre, two outlying points for Vortex
library(hexbin)
selector <- nam.pl < 1 & Fuel.Grade == 'E10'
bin<-hexbin(sqrt(nam.pl)[selector], quantity[selector],
xbins=50, xlab = 'sqrt NAM per Litre (cpL)',
ylab = 'Quantity (L)')
plot(bin)
basic <- glm(quantity ~ sqrt(nam.pl), family = poisson, data = analysis.data,
subset = nam.pl < 1 & Fuel.Grade == 'E10')
summary(basic)
anova(basic)
plot(basic)
|
/3_Exploratory analysis.r
|
no_license
|
S0phie/Example-data-analysis
|
R
| false | false | 4,790 |
r
|
#Exploratory analysis
load("~/Documents/Caltex/Analysis workspace.Rdata")
setwd('/Users/Sophie/Documents/Caltex')
detach(analysis.data)
attach(analysis.data)
#Time series plot
plot(Calendar.day[Fuel.Grade == 'E10'], quantity[Fuel.Grade == 'E10'], type = 'l',
col = 'gray', xlab = 'Sale date', ylab = 'Fuel quantity (L)',
main = 'E10 fuel quantity sales by sale date')
lines(Calendar.day[Fuel.Grade == 'E10'],
smooth.spline(quantity[Fuel.Grade == 'E10'], spar = 0.4)$y,
col='blue')
abline(v = as.Date('2014-02-14'), col = 'red')
par(new=T)
plot(Calendar.day[Fuel.Grade == 'E10'],
-AWE.step[Fuel.Grade == 'E10'],
col='green',
type = 'l')
#Box plots by day of week, split pre and post 2014-02-14
boxplot(quantity[Fuel.Grade == 'E10'] ~ dow[Fuel.Grade == 'E10'] +
level.shift[Fuel.Grade == 'E10'])
#Flag Saturday and Sundays
boxplot(quantity[Fuel.Grade == 'E10'] ~ Sat[Fuel.Grade == 'E10'] +
Sun[Fuel.Grade == 'E10'] + level.shift[Fuel.Grade == 'E10'])
#Box plots by month, split pre and post 2014-02-14
boxplot(quantity[Fuel.Grade == 'E10'] ~ month[Fuel.Grade == 'E10'] +
level.shift[Fuel.Grade == 'E10'])
#Plot quantity vs price
selector = Fuel.Grade == 'E10' & level.shift == 0
library(hexbin)
bin<-hexbin(price.Caltex[selector], quantity[selector],
xbins=50, xlab = 'Price (cpl)',
ylab = 'Quantity (L)')
plot(bin, main="E10 Fuel purchased vs price",
colramp = colorRampPalette(c('gray','red')))
plot(price.Caltex[selector], quantity[selector])
#Average difference in price
plot(pricediff.pc.mkt[selector], quantity[selector])
plot(pricediff.cpl.mkt[selector], quantity[selector])
#BP
plot(pricediff.pc.BP[selector], quantity[selector])
plot(pricediff.cpl.BP[selector], quantity[selector])
#Coles
plot(pricediff.pc.Coles[selector], quantity[selector])
plot(pricediff.cpl.Coles[selector], quantity[selector])
#711
plot(pricediff.pc.711[selector], quantity[selector])
plot(pricediff.cpl.711[selector], quantity[selector])
selector = Fuel.Grade == 'E10'
init <- glm(quantity ~ month + dow + level.shift + price.Caltex + pricediff.cpl.mkt,
family = poisson(link='log'), data = analysis.data, subset = selector)
summary(init)
#Plot prices
plot(Calendar.day, price.Caltex, type = 'l', col = 'gray')
lines(Calendar.day, price.711, col = 2)
lines(Calendar.day, price.BP, col = 3)
lines(Calendar.day, price.Coles, col = 4)
plot(c(as.Date('2013-01-01'), as.Date('2015-06-30')), c(-8,8), type = 'n')
points(Calendar.day,pricediff.cpl.BP)
points(Calendar.day,pricediff.cpl.711, col = 'red')
points(Calendar.day,pricediff.cpl.Coles, col = 'blue')
analysis.data$cheapest <- as.factor(as.numeric(analysis.data$price.Caltex <=
apply(analysis.data[,c('price.BP', 'price.711', 'price.Coles')], 1, min)))
analysis.data$price.rank <- as.factor(
apply(analysis.data[,c('price.Caltex', 'price.BP', 'price.711', 'price.Coles')],
1, rank, ties.method = 'min')['price.Caltex',]
)
boxplot(quantity ~ price.rank + level.shift)
par(mfrow = c(2,2))
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 1], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 2], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 3], freq = F, add = T)
plot(c(0,10000), c(0,0.0004), type = 'n')
hist(quantity[price.rank == 4], freq = F, add = T)
#Very similar
summary(analysis.data)
attach(analysis.data)
detach(analysis.data)
subber = Fuel.Grade == 'E10'
par(mfrow = c(2,1))
plot(price.Caltex[subber], profit.e10[subber])
plot(quantity[subber], profit.e10[subber])
par(mfrow = c(1,1))
plot(price.Caltex[subber], (fuel.revenue[subber] - profit.e10[subber])/
quantity[subber])
analysis.data$nam.pl[Fuel.Grade == 'E10'] <- ((analysis.data$profit.e10)
/ analysis.data$quantity)[Fuel.Grade == 'E10']
analysis.data$nam.pl[Fuel.Grade == 'Vtx 95'] <- ((analysis.data$profit.vtx)
/ analysis.data$quantity)[Fuel.Grade == 'Vtx 95']
subber = Fuel.Grade == 'Vtx 95'
par(mfrow = c(2,1))
plot(price.Caltex[subber], profit.vtx[subber])
plot(quantity[subber], profit.vtx[subber])
par(mfrow = c(1,1))
plot(price.Caltex[subber], (fuel.revenue[subber] - profit.vtx[subber])/
quantity[subber])
#Linear, can derive a margin per litre, two outlying points for Vortex
library(hexbin)
selector <- nam.pl < 1 & Fuel.Grade == 'E10'
bin<-hexbin(sqrt(nam.pl)[selector], quantity[selector],
xbins=50, xlab = 'sqrt NAM per Litre (cpL)',
ylab = 'Quantity (L)')
plot(bin)
basic <- glm(quantity ~ sqrt(nam.pl), family = poisson, data = analysis.data,
subset = nam.pl < 1 & Fuel.Grade == 'E10')
summary(basic)
anova(basic)
plot(basic)
|
.lang$init.R <- list(
plain = list(
error_rpgm.setLang = "(init.R) Aucune langue n'a pu être chargée."
),
gui.updateLang = list(
error = "(gui.updateLang) Il n'y a pas de liste nommée .lang$%s"
)
)
.lang$example <- list(
title = list(
value = "Gestion des langues dans une Application RPGM"
),
description = list(
value = "<p>Cette application est un exemple de gestion de différentes langues pour l'utilisateur finale. Comme ça marche ? Dans cette application, il y a un dossier <code>lang</code> dans lequel il y a : <ul><li>Un fichier <code>init.R</code></li><li>Plusieurs fichiers avec le nom de la langue, comme <code>English.R</code></li></ul>Dans <code>init.R</code>, la fonction <code>rpgm.setLang</code> charge la langue. Cela initialise une liste <code>.lang</code> et charge dans celle-ci la langue grâce au fichier .R correspondant. Dans <code>French.R</code> ou <code>English.R</code>, la langue choisie est chargée.</p><p>Cette application prend comme langue celle par défaut de votre système d'exploitation parmi <em>French</em> et <em>English</em>. Il y a une liste à choix ci-dessous qui permet de changer la langue à la volée (il n'est pas nécessaire d'ajouter cela à une application, la langue pouvait être sélectionné automatiquement ou sur une première gui).</p>"
),
lang = list(
labeltext = "Langue:",
helptext = "Quand vous sélectionnez la langue ici, il est défini pour toute l'application et la GUI actuelle est mise à jour automatiquement."
)
)
|
/lang/French.R
|
no_license
|
pgmsolutions/pgm-lang
|
R
| false | false | 1,581 |
r
|
.lang$init.R <- list(
plain = list(
error_rpgm.setLang = "(init.R) Aucune langue n'a pu être chargée."
),
gui.updateLang = list(
error = "(gui.updateLang) Il n'y a pas de liste nommée .lang$%s"
)
)
.lang$example <- list(
title = list(
value = "Gestion des langues dans une Application RPGM"
),
description = list(
value = "<p>Cette application est un exemple de gestion de différentes langues pour l'utilisateur finale. Comme ça marche ? Dans cette application, il y a un dossier <code>lang</code> dans lequel il y a : <ul><li>Un fichier <code>init.R</code></li><li>Plusieurs fichiers avec le nom de la langue, comme <code>English.R</code></li></ul>Dans <code>init.R</code>, la fonction <code>rpgm.setLang</code> charge la langue. Cela initialise une liste <code>.lang</code> et charge dans celle-ci la langue grâce au fichier .R correspondant. Dans <code>French.R</code> ou <code>English.R</code>, la langue choisie est chargée.</p><p>Cette application prend comme langue celle par défaut de votre système d'exploitation parmi <em>French</em> et <em>English</em>. Il y a une liste à choix ci-dessous qui permet de changer la langue à la volée (il n'est pas nécessaire d'ajouter cela à une application, la langue pouvait être sélectionné automatiquement ou sur une première gui).</p>"
),
lang = list(
labeltext = "Langue:",
helptext = "Quand vous sélectionnez la langue ici, il est défini pour toute l'application et la GUI actuelle est mise à jour automatiquement."
)
)
|
\name{hg18refGene}
\alias{hg18refGene}
\title{hg18's refGenes }
\description{
The human (hg18) reference genes from UCSC
}
\keyword{datasets}
|
/man/hg18refGene.Rd
|
no_license
|
supatt-lab/r3Cseq
|
R
| false | false | 141 |
rd
|
\name{hg18refGene}
\alias{hg18refGene}
\title{hg18's refGenes }
\description{
The human (hg18) reference genes from UCSC
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{groupByPieceThemeAvg}
\alias{groupByPieceThemeAvg}
\title{Average pieces for each theme.}
\usage{
groupByPieceThemeAvg(dataset, minYear = min(dataset$year),
maxYear = max(dataset$year), minPrice = min(dataset$price),
maxPrice = max(dataset$price), minPieces = min(dataset$pieces),
maxPieces = max(dataset$pieces),
themes = sort(unique(dataset$theme)),
subthemes = sort(unique(dataset$subtheme)))
}
\arguments{
\item{dataset}{the dataset.}
\item{minYear}{the minimum year. Defaults to be minimum year in the dataset.}
\item{maxYear}{the maximum year. Defaults to be maximum year in the dataset.}
\item{minPrice}{the minimum price count. Defaults to be minimum price in the dataset.}
\item{maxPrice}{the maximum price count. Defaults to be maximum price in the dataset.}
\item{minPieces}{the minimum piece count. Defaults to be minimum piece count in the dataset.}
\item{maxPieces}{the maximum piece count. Defaults to be maximum piece count in the dataset.}
\item{themes}{the themes to include in the filtered dataset. Defaults to be all themes.}
\item{subthemes}{the subthemes to include in the filtered dataset. Defaults to be all subthemes.}
}
\value{
data.table 2 columns
}
\description{
Average pieces for each theme.
}
\examples{
dataset <- read.lego(system.file('extdata', 'brickset-mysets-owned.csv', package ='lego'))
groupByPieceThemeAvg(dataset)
groupByPieceThemeAvg(dataset, themes=c('Star Wars'))
groupByPieceThemeAvg(dataset, 2010, 2016, 5, 10, 100, 1000, c('Star Wars'), c('Episode I'))
groupByPieceThemeAvg(dataset, 2014, 2015, themes=c('Star Wars'), subthemes=c('Episode I'))
}
\keyword{lego}
|
/lego/man/groupByPieceThemeAvg.Rd
|
permissive
|
darrenredmond/advanced_r
|
R
| false | true | 1,721 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{groupByPieceThemeAvg}
\alias{groupByPieceThemeAvg}
\title{Average pieces for each theme.}
\usage{
groupByPieceThemeAvg(dataset, minYear = min(dataset$year),
maxYear = max(dataset$year), minPrice = min(dataset$price),
maxPrice = max(dataset$price), minPieces = min(dataset$pieces),
maxPieces = max(dataset$pieces),
themes = sort(unique(dataset$theme)),
subthemes = sort(unique(dataset$subtheme)))
}
\arguments{
\item{dataset}{the dataset.}
\item{minYear}{the minimum year. Defaults to be minimum year in the dataset.}
\item{maxYear}{the maximum year. Defaults to be maximum year in the dataset.}
\item{minPrice}{the minimum price count. Defaults to be minimum price in the dataset.}
\item{maxPrice}{the maximum price count. Defaults to be maximum price in the dataset.}
\item{minPieces}{the minimum piece count. Defaults to be minimum piece count in the dataset.}
\item{maxPieces}{the maximum piece count. Defaults to be maximum piece count in the dataset.}
\item{themes}{the themes to include in the filtered dataset. Defaults to be all themes.}
\item{subthemes}{the subthemes to include in the filtered dataset. Defaults to be all subthemes.}
}
\value{
data.table 2 columns
}
\description{
Average pieces for each theme.
}
\examples{
dataset <- read.lego(system.file('extdata', 'brickset-mysets-owned.csv', package ='lego'))
groupByPieceThemeAvg(dataset)
groupByPieceThemeAvg(dataset, themes=c('Star Wars'))
groupByPieceThemeAvg(dataset, 2010, 2016, 5, 10, 100, 1000, c('Star Wars'), c('Episode I'))
groupByPieceThemeAvg(dataset, 2014, 2015, themes=c('Star Wars'), subthemes=c('Episode I'))
}
\keyword{lego}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mapHydroShed1}
\alias{mapHydroShed1}
\title{HydroSHEDS level 1}
\format{
A SpatialPolygonsDataFrame
}
\source{
Lehner, B., Grill G. (2013): Global river hydrography and network routing:
baseline data and new approaches to study the world’s large river systems.
Hydrological Processes, 27(15): 2171–2186. \url{https://www.hydrosheds.org/page/hydrobasins}
}
\usage{
mapHydroShed1
}
\description{
HydroSHEDS level 1
}
\examples{
\dontrun{
library(sp); library(rmapdata)
sp::plot(mapHydroShed1)
head(mapHydroShed1@data)
}
}
\keyword{datasets}
|
/man/mapHydroShed1.Rd
|
no_license
|
JGCRI/rmapdata
|
R
| false | true | 652 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{mapHydroShed1}
\alias{mapHydroShed1}
\title{HydroSHEDS level 1}
\format{
A SpatialPolygonsDataFrame
}
\source{
Lehner, B., Grill G. (2013): Global river hydrography and network routing:
baseline data and new approaches to study the world’s large river systems.
Hydrological Processes, 27(15): 2171–2186. \url{https://www.hydrosheds.org/page/hydrobasins}
}
\usage{
mapHydroShed1
}
\description{
HydroSHEDS level 1
}
\examples{
\dontrun{
library(sp); library(rmapdata)
sp::plot(mapHydroShed1)
head(mapHydroShed1@data)
}
}
\keyword{datasets}
|
# Dependencies
list.of.packages <- c("scatterplot3d","plotly")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
rm(list = c("list.of.packages","new.packages"))
# nastavíme velkosť výberu s ktorým pracujeme
Ssize <- 1200
RS <- sample(1:nrow(data),Ssize,replace = FALSE)
# vzorka dát
RS_data <- data.frame(data[RS,])
rm(RS)
#plot premenné ktoré má význam upravovať alebo z nich niečo vytiahnúť
cols<- c("vehicle_engine_volume","vehicle_weight","vehicle_age",
"insurer_age","vehicle_engine_power",
"insurer_number_of_previous_insured_months",
"insurer_number_of_previous_accidents")
par(mfrow = c(3,3))
for (i in cols){
plot(as.vector(RS_data[,i]),RS_data$insurance_median, type = "p",
na.rm = TRUE,xlab = i)
}
rm(cols)
# pre prehľadnosť zmeníme trošku vek
ageR <- RS_data$insurer_age + rnorm(Ssize, 0,.5)
par(mfrow = c(1,1))
plot(ageR,RS_data$insurance_median, type = "p",xlab = "ageR")
rm(ageR)
# zopbrazenie grafov s oddelením novo vzniknutých skupín
ages <- c(0,24,26,30,35,40,45,50,60,70)
for (i in ages){
abline(v = i, col = "lightblue")
}
rm(ages)
plot(RS_data$vehicle_engine_volume,RS_data$insurance_median, type = "p")
vols <- c(0,1000, 1350, 1600, 1850, 2000, 2200, 2600)
for (i in vols){
abline(v = i, col = "lightblue")
}
plot(RS_data$vehicle_engine_power,RS_data$insurance_median, type = "p")
vols <- c(50,75,93,110,150,180,5000)
for (i in vols){
abline(v = i, col = "lightblue")
}
# z psc nic nevidíme ale ukážeme si grafík
psc <- substr(RS_data$insurer_ZIP_code,1,3)
psc2 <- substr(RS_data$insurer_ZIP_code,1,2)
plot(psc, RS_data$insurance_median)
plot(psc2, RS_data$insurance_median)
# 3D grafy pre bonus-malus
colsBM <- c("insurer_number_of_previous_insured_months",
"insurer_npaR",
"insurance_median")
# malé znáhodnenie pre lešpiu prehľadnosť grafu
RS_data$insurer_npaR <-
RS_data$insurer_number_of_previous_accidents+rnorm(Ssize,0,.2)
# ofarbíme pekne
colors <- c("#999999", "#E69F00", "#56B4E9")
colors <- colors[as.numeric(RS_data$insurer_number_of_previous_accidents)+1]
# listok vektorov pre pohľady z rôznych uhlov na 3D scatter
listicek <- list("a" = c(1,2,3),
"b" = c(2,1,3),
"c" = c(3,1,2),
"d" = c(3,2,1))
# všetky 4 spolu
par(mfrow = c(2,2))
for (i in listicek){
scatterplot3d(RS_data[,colsBM[i]], pch = 16, color=colors,
grid=TRUE, box=FALSE)
}
# po 1 grafe - treba sa preklikať
par(mfrow = c(1,1))
for (i in listicek){
scatterplot3d(RS_data[,colsBM[i]], pch = 16, color=colors,
grid=TRUE, box=FALSE)
}
# BM, ktorý sa nám pozdával ako zmysluplný
plot(RS_data$insurer_number_of_previous_insured_months-
36*RS_data$insurer_number_of_previous_accidents,
RS_data$insurance_median, xlab = "Prev. ins. mths - 36*accidents")
# neviem prečo v loope plotly nefunguje... nižšie je to rozpísané
companies <- c("insurance_comp_1","insurance_comp_2",
"insurance_comp_3","insurance_comp_4",
"insurance_comp_5","insurance_comp_6",
"insurance_comp_7","insurance_median")
for (c in companies){
eval(parse( text =
sprintf("fig <- plot_ly(RS_data,
x = ~ insurer_number_of_previous_insured_months,
y = ~ insurer_number_of_previous_accidents,
z = ~ %s,
marker = list(size = 4),
color = ~insurer_number_of_previous_accidents)
add_markers(fig,
x = RS_data$insurer_number_of_previous_insured_months,
y = RS_data$insurer_number_of_previous_accidents,
z = RS_data$%s)
fig",c,c)))
fig
}
# rozpísané generovanie plotly 3D grafu pre všetky poisťovne - nič z toho nevidím
plotText <- "fig <- plot_ly(RS_data,
x = ~ insurer_number_of_previous_insured_months,
y = ~ insurer_number_of_previous_accidents,
z = ~ %s,
marker = list(size = 4),
color = ~insurer_number_of_previous_accidents)
add_markers(fig,
x = RS_data$insurer_number_of_previous_insured_months,
y = RS_data$insurer_number_of_previous_accidents,
z = RS_data$%s)
layout(fig, title = '%s')
fig"
eval(parse( text = sprintf(plotText,companies[1],companies[1],companies[1])))
eval(parse( text = sprintf(plotText,companies[2],companies[2],companies[2])))
eval(parse( text = sprintf(plotText,companies[3],companies[3],companies[3])))
eval(parse( text = sprintf(plotText,companies[4],companies[4],companies[4])))
eval(parse( text = sprintf(plotText,companies[5],companies[5],companies[5])))
eval(parse( text = sprintf(plotText,companies[6],companies[6],companies[6])))
eval(parse( text = sprintf(plotText,companies[7],companies[7],companies[7])))
eval(parse( text = sprintf(plotText,companies[8],companies[8],companies[8])))
# remove unnecessary variables from memory
rm(c,colors,colsBM,companies,fig,i,listicek,plotText,psc,psc2,Ssize,vols)
|
/Vytvorenie_premennych.R
|
no_license
|
LukasVeverka/DataX
|
R
| false | false | 5,504 |
r
|
# Dependencies
list.of.packages <- c("scatterplot3d","plotly")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
rm(list = c("list.of.packages","new.packages"))
# nastavíme velkosť výberu s ktorým pracujeme
Ssize <- 1200
RS <- sample(1:nrow(data),Ssize,replace = FALSE)
# vzorka dát
RS_data <- data.frame(data[RS,])
rm(RS)
#plot premenné ktoré má význam upravovať alebo z nich niečo vytiahnúť
cols<- c("vehicle_engine_volume","vehicle_weight","vehicle_age",
"insurer_age","vehicle_engine_power",
"insurer_number_of_previous_insured_months",
"insurer_number_of_previous_accidents")
par(mfrow = c(3,3))
for (i in cols){
plot(as.vector(RS_data[,i]),RS_data$insurance_median, type = "p",
na.rm = TRUE,xlab = i)
}
rm(cols)
# pre prehľadnosť zmeníme trošku vek
ageR <- RS_data$insurer_age + rnorm(Ssize, 0,.5)
par(mfrow = c(1,1))
plot(ageR,RS_data$insurance_median, type = "p",xlab = "ageR")
rm(ageR)
# zopbrazenie grafov s oddelením novo vzniknutých skupín
ages <- c(0,24,26,30,35,40,45,50,60,70)
for (i in ages){
abline(v = i, col = "lightblue")
}
rm(ages)
plot(RS_data$vehicle_engine_volume,RS_data$insurance_median, type = "p")
vols <- c(0,1000, 1350, 1600, 1850, 2000, 2200, 2600)
for (i in vols){
abline(v = i, col = "lightblue")
}
plot(RS_data$vehicle_engine_power,RS_data$insurance_median, type = "p")
vols <- c(50,75,93,110,150,180,5000)
for (i in vols){
abline(v = i, col = "lightblue")
}
# z psc nic nevidíme ale ukážeme si grafík
psc <- substr(RS_data$insurer_ZIP_code,1,3)
psc2 <- substr(RS_data$insurer_ZIP_code,1,2)
plot(psc, RS_data$insurance_median)
plot(psc2, RS_data$insurance_median)
# 3D grafy pre bonus-malus
colsBM <- c("insurer_number_of_previous_insured_months",
"insurer_npaR",
"insurance_median")
# malé znáhodnenie pre lešpiu prehľadnosť grafu
RS_data$insurer_npaR <-
RS_data$insurer_number_of_previous_accidents+rnorm(Ssize,0,.2)
# ofarbíme pekne
colors <- c("#999999", "#E69F00", "#56B4E9")
colors <- colors[as.numeric(RS_data$insurer_number_of_previous_accidents)+1]
# listok vektorov pre pohľady z rôznych uhlov na 3D scatter
listicek <- list("a" = c(1,2,3),
"b" = c(2,1,3),
"c" = c(3,1,2),
"d" = c(3,2,1))
# všetky 4 spolu
par(mfrow = c(2,2))
for (i in listicek){
scatterplot3d(RS_data[,colsBM[i]], pch = 16, color=colors,
grid=TRUE, box=FALSE)
}
# po 1 grafe - treba sa preklikať
par(mfrow = c(1,1))
for (i in listicek){
scatterplot3d(RS_data[,colsBM[i]], pch = 16, color=colors,
grid=TRUE, box=FALSE)
}
# BM, ktorý sa nám pozdával ako zmysluplný
plot(RS_data$insurer_number_of_previous_insured_months-
36*RS_data$insurer_number_of_previous_accidents,
RS_data$insurance_median, xlab = "Prev. ins. mths - 36*accidents")
# neviem prečo v loope plotly nefunguje... nižšie je to rozpísané
companies <- c("insurance_comp_1","insurance_comp_2",
"insurance_comp_3","insurance_comp_4",
"insurance_comp_5","insurance_comp_6",
"insurance_comp_7","insurance_median")
for (c in companies){
eval(parse( text =
sprintf("fig <- plot_ly(RS_data,
x = ~ insurer_number_of_previous_insured_months,
y = ~ insurer_number_of_previous_accidents,
z = ~ %s,
marker = list(size = 4),
color = ~insurer_number_of_previous_accidents)
add_markers(fig,
x = RS_data$insurer_number_of_previous_insured_months,
y = RS_data$insurer_number_of_previous_accidents,
z = RS_data$%s)
fig",c,c)))
fig
}
# rozpísané generovanie plotly 3D grafu pre všetky poisťovne - nič z toho nevidím
plotText <- "fig <- plot_ly(RS_data,
x = ~ insurer_number_of_previous_insured_months,
y = ~ insurer_number_of_previous_accidents,
z = ~ %s,
marker = list(size = 4),
color = ~insurer_number_of_previous_accidents)
add_markers(fig,
x = RS_data$insurer_number_of_previous_insured_months,
y = RS_data$insurer_number_of_previous_accidents,
z = RS_data$%s)
layout(fig, title = '%s')
fig"
eval(parse( text = sprintf(plotText,companies[1],companies[1],companies[1])))
eval(parse( text = sprintf(plotText,companies[2],companies[2],companies[2])))
eval(parse( text = sprintf(plotText,companies[3],companies[3],companies[3])))
eval(parse( text = sprintf(plotText,companies[4],companies[4],companies[4])))
eval(parse( text = sprintf(plotText,companies[5],companies[5],companies[5])))
eval(parse( text = sprintf(plotText,companies[6],companies[6],companies[6])))
eval(parse( text = sprintf(plotText,companies[7],companies[7],companies[7])))
eval(parse( text = sprintf(plotText,companies[8],companies[8],companies[8])))
# remove unnecessary variables from memory
rm(c,colors,colsBM,companies,fig,i,listicek,plotText,psc,psc2,Ssize,vols)
|
# Daniela Victoria Cansino Rosales
# Matrícula: 1821849
# 27.02.2020
# Importar datos de Cedro Rojo --------------------------------------------
setwd("C:/Tarea/108-Estadistica/Clases")
CR <- read.csv("cedro_rojo.csv", header = TRUE)
summary(CR)
# Una muestra -------------------------------------------------------------
# Media teoretica de la variable Diametro establecida por CONAFOR
# para el cedro rojo es igual a 13
# Comparar la media observada de la variable diametro para las
# plantulas producidas en vivero 12.524
#"mu" debe ser igual a la variable teoretica
# el valor de alfa establecido es 0.05
t.test(CR$diametro, mu=13)
# se acepta la alternativa
t.test(CR$diametro, mu=12.7)
#se acepta la nula
t.test(CR$altura, mu=20)
#se acepta la hipotesis alternativa
t.test(CR$altura, mu=19)
#se acepta la hipotesis nula
|
/Clases/Script_4.R
|
no_license
|
1821849/108-Estadistica
|
R
| false | false | 840 |
r
|
# Daniela Victoria Cansino Rosales
# Matrícula: 1821849
# 27.02.2020
# Importar datos de Cedro Rojo --------------------------------------------
setwd("C:/Tarea/108-Estadistica/Clases")
CR <- read.csv("cedro_rojo.csv", header = TRUE)
summary(CR)
# Una muestra -------------------------------------------------------------
# Media teoretica de la variable Diametro establecida por CONAFOR
# para el cedro rojo es igual a 13
# Comparar la media observada de la variable diametro para las
# plantulas producidas en vivero 12.524
#"mu" debe ser igual a la variable teoretica
# el valor de alfa establecido es 0.05
t.test(CR$diametro, mu=13)
# se acepta la alternativa
t.test(CR$diametro, mu=12.7)
#se acepta la nula
t.test(CR$altura, mu=20)
#se acepta la hipotesis alternativa
t.test(CR$altura, mu=19)
#se acepta la hipotesis nula
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initializeKARL.R
\name{initializeKARL}
\alias{initializeKARL}
\title{initializeKARL}
\usage{
initializeKARL(rank = "all",
silva.url = "http://www.arb-silva.de/fileadmin/silva_databases/current/Exports/SILVA_123_SSURef_Nr99_tax_silva.fasta.gz",
min = 2)
}
\arguments{
\item{rank}{takes 'domain', 'phylum', 'class', 'order', 'family', 'genus' or 'all' (default: 'all').}
\item{min}{takes the minimum number of genomes per taxon to build a model (default: 2).}
\item{data}{default is 'alldata' object.}
}
\description{
This function generates and stores models and databases for prediction.
}
\examples{
initializeKARL(rank='all',min=2)
}
\keyword{models}
\keyword{prediction}
|
/man/initializeKARL.Rd
|
no_license
|
fernandoh76/KARL
|
R
| false | true | 759 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initializeKARL.R
\name{initializeKARL}
\alias{initializeKARL}
\title{initializeKARL}
\usage{
initializeKARL(rank = "all",
silva.url = "http://www.arb-silva.de/fileadmin/silva_databases/current/Exports/SILVA_123_SSURef_Nr99_tax_silva.fasta.gz",
min = 2)
}
\arguments{
\item{rank}{takes 'domain', 'phylum', 'class', 'order', 'family', 'genus' or 'all' (default: 'all').}
\item{min}{takes the minimum number of genomes per taxon to build a model (default: 2).}
\item{data}{default is 'alldata' object.}
}
\description{
This function generates and stores models and databases for prediction.
}
\examples{
initializeKARL(rank='all',min=2)
}
\keyword{models}
\keyword{prediction}
|
getwd()
USDA = read.csv("USDA.csv")
summary(USDA)
str(USDA)
max(USDA$Sodium)
max(USDA$Sodium, na.rm=TRUE)
which.max(USDA$Sodium, na.rm=TRUE)
which.max(USDA$Sodium)
max(USDA$Sodium)
names(USDA)
USDA$Description[265]
HighSodium = subset(USDA, Sodium > 10000)
nrow(HighSodium)
HighSodium$Description
match("CAVIAR", USDA$Description)
USDA$Sodium[4154]
USDA$Sodium[match("CAVIAR", USDA$Description)]
str(USDA$Sodium)
summary(USDA$Sodium)
sd(USDA$Sodium)
sd(USDA$Sodium,na.rm())
sd(USDA$Sodium,na.rm()=TRUE)
sd(USDA$Sodium,na.rm=TRUE)
plot(USDA$Protein, USDA$TotalFat)
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
hist(USDA$VitaminC)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency")
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100))
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=100)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100))
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency")
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=100)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=2000)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", breaks=2000)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=2000)
boxplot(USDA$Sugar, main="Boxplot of Sugar Levels")
boxplot(USDA$Sugar, main="Boxplot of Sugar Levels", ylab = "Sugar in gms")
USDA$Sodium[1] > mean(USDA$Sodium, na.rm=TRUE)
USDA$Sodium[2] > mean(USDA$Sodium, na.rm=TRUE)
USDA$Sodium[50] > mean(USDA$Sodium, na.rm=TRUE)
HighSodium = USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE)
str(HighSodium)
summary(HighSodium)
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
str(HighSodium)
summary(HighSodium)
USDA$HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
str(USDA)
USDA$HighProtein = as.numeric(USDA$Protein > mean(USDA$Protein, na.rm=TRUE))
USDA$HighTotalFat = as.numeric(USDA$TotalFat > mean(USDA$TotalFat, na.rm=TRUE))
USDA$HighCarbohydrate = as.numeric(USDA$Carbohydrate > mean(USDA$Carbohydrate, na.rm=TRUE))
str(USDA)
table(USDA$HighSodium)
table(USDA$HighSodium, USDA$HighTotalFat)
tapply(USDA$Iron, USDA$HighProtein, mean, na.rm=TRUE)
tapply(USDA$VitaminC, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$Protien, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$Protein, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$VitaminC, USDA$HighCarbohydrate, summary, na.rm=TRUE)
?read.table
?read.csv
savehistory("~/Projects/edx/AnalyticsEdge_MITx15_071x/lec1/lab2.R")
|
/2015/mitx_ana_edge_15_071x/lec1-intro/recitation.R
|
no_license
|
bicepjai/myclasses
|
R
| false | false | 3,330 |
r
|
getwd()
USDA = read.csv("USDA.csv")
summary(USDA)
str(USDA)
max(USDA$Sodium)
max(USDA$Sodium, na.rm=TRUE)
which.max(USDA$Sodium, na.rm=TRUE)
which.max(USDA$Sodium)
max(USDA$Sodium)
names(USDA)
USDA$Description[265]
HighSodium = subset(USDA, Sodium > 10000)
nrow(HighSodium)
HighSodium$Description
match("CAVIAR", USDA$Description)
USDA$Sodium[4154]
USDA$Sodium[match("CAVIAR", USDA$Description)]
str(USDA$Sodium)
summary(USDA$Sodium)
sd(USDA$Sodium)
sd(USDA$Sodium,na.rm())
sd(USDA$Sodium,na.rm()=TRUE)
sd(USDA$Sodium,na.rm=TRUE)
plot(USDA$Protein, USDA$TotalFat)
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
plot(USDA$Protein, USDA$TotalFat, xlab="Protein", ylab="TotalFat", main= "Protien Vs Fat", col="red")
hist(USDA$VitaminC)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency")
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100))
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=100)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100))
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency")
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=100)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=2000)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", breaks=2000)
hist(USDA$VitaminC, xlab="Vitamin C (mg)", ylab = "Frequency", main = "Histogram of Vitamin C", xlim = c(0,100), breaks=2000)
boxplot(USDA$Sugar, main="Boxplot of Sugar Levels")
boxplot(USDA$Sugar, main="Boxplot of Sugar Levels", ylab = "Sugar in gms")
USDA$Sodium[1] > mean(USDA$Sodium, na.rm=TRUE)
USDA$Sodium[2] > mean(USDA$Sodium, na.rm=TRUE)
USDA$Sodium[50] > mean(USDA$Sodium, na.rm=TRUE)
HighSodium = USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE)
str(HighSodium)
summary(HighSodium)
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
str(HighSodium)
summary(HighSodium)
USDA$HighSodium = as.numeric(USDA$Sodium > mean(USDA$Sodium, na.rm=TRUE))
str(USDA)
USDA$HighProtein = as.numeric(USDA$Protein > mean(USDA$Protein, na.rm=TRUE))
USDA$HighTotalFat = as.numeric(USDA$TotalFat > mean(USDA$TotalFat, na.rm=TRUE))
USDA$HighCarbohydrate = as.numeric(USDA$Carbohydrate > mean(USDA$Carbohydrate, na.rm=TRUE))
str(USDA)
table(USDA$HighSodium)
table(USDA$HighSodium, USDA$HighTotalFat)
tapply(USDA$Iron, USDA$HighProtein, mean, na.rm=TRUE)
tapply(USDA$VitaminC, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$Protien, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$Protein, USDA$HighCarbohydrate, mean, na.rm=TRUE)
tapply(USDA$VitaminC, USDA$HighCarbohydrate, summary, na.rm=TRUE)
?read.table
?read.csv
savehistory("~/Projects/edx/AnalyticsEdge_MITx15_071x/lec1/lab2.R")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matchNodes.R
\name{matchNodes}
\alias{matchNodes}
\title{matchNodes()}
\usage{
matchNodes(list_1, list_2)
}
\arguments{
\item{list_1}{A list object from createAncestry()}
\item{list_2}{Another list object from createAncestry()}
}
\description{
Outputs a matrix[,] that shows which nodes match on species
}
\keyword{ancestry}
|
/man/matchNodes.Rd
|
no_license
|
palautatan/csnap
|
R
| false | true | 404 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matchNodes.R
\name{matchNodes}
\alias{matchNodes}
\title{matchNodes()}
\usage{
matchNodes(list_1, list_2)
}
\arguments{
\item{list_1}{A list object from createAncestry()}
\item{list_2}{Another list object from createAncestry()}
}
\description{
Outputs a matrix[,] that shows which nodes match on species
}
\keyword{ancestry}
|
# Attemtping the replicate the ACSI index using sentiment analysis on Twitter data
# Using the Bing lexicon
# Loading necessary libraries
library(readr)
library(dplyr)
library(tidyr)
library(tidytext)
library(stringr)
library(ggplot2)
# Reading tweets from tweets csv. Also elimintaing retweets by choosing distinct text entries.
# Removing all retweets would have also removed tweets whose original tweet has not
# been captured
path <- file.path("C:", "Users", "anton", "Dropbox", "Twitter Data", "airlines.csv", fsep = "/")
tweetsText <- read_csv(path, col_names = TRUE)
# Comment out the following line if you want to include retweets in analysis
tweetsText <- distinct(tweetsText,text,.keep_all = TRUE) # To be used in order to exclude retweets
# Removing tweets from airline accounts as they do not reflect consumer sentiment
tweetsText$screen_name <- tolower(tweetsText$screen_name)
tweetsText <- tweetsText[!(tweetsText$screen_name %in% c("united", "alaskaair", "allegiant", "americanair",
"delta", "flyfrontier", "hawaiianair", "jetblue", "southwestair", "spiritairlines")),]
# Perform unnest tokens, adding the tweet ID on a separate column to keep track for further grouping
# Sentiment analysis using bing lexicon
tweetsSentimentsBing <- tweetsText %>%
mutate(.,tweetNumber = row_number()) %>%
unnest_tokens(., word, text) %>%
filter(!word %in% stop_words$word)%>%
group_by(airline) %>%
inner_join(get_sentiments("bing")) %>%
mutate(.,total_words = n()) %>%
ungroup()
# Creating a chart to identify if any of the highest contributing words are out of context
tweetsSentimentsBing %>%
count(airline,word,sentiment,total_words) %>%
mutate(.,percent = n/total_words) %>%
group_by(airline, sentiment == "positive") %>%
top_n(n=10,wt=percent) %>%
ungroup() %>%
mutate(word = reorder(word,percent)) %>%
ggplot(aes(x=word,y=percent,fill = sentiment)) +
geom_col() +
coord_flip() +
facet_wrap(~airline, scales = "free")
# Removing words falsely identified as sentiment word such as the word "trump"
tweetsSentimentsBing <- tweetsSentimentsBing[!(tweetsSentimentsBing$word %in% c("trump")),]
# Create polarity matrix
positiveSentiments <- tweetsSentimentsBing %>%
filter(sentiment == "positive") %>%
count(.,airline, tweetNumber)
negativeSentiments <- tweetsSentimentsBing %>%
filter(sentiment == "negative") %>%
count(.,airline, tweetNumber)
pol <- full_join(positiveSentiments, negativeSentiments, by=c("tweetNumber","airline"), suffix = c(".positive",".negative"))
pol[is.na(pol)] <- 0
pol <- mutate(pol, polarity = n.positive - n.negative)
ggplot(pol, aes(x=polarity)) + geom_histogram(binwidth = 1, color="black",fill="white") +
facet_wrap(facets = "airline", scales = "free") +
ggsave(file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityBing.pdf", fsep = "/"))
# As we can see the airlines with the most positive comments, normalised to volume, are
# Positive: AlaskaAir, Delta, JetBlue, SouthWest, Allegiant
# Negative: Spirit, Fly Frontier
# Average polarities to rank airlines
airlinePolarity <- pol %>%
group_by(airline) %>%
summarise(avgPol = mean(polarity)) %>%
mutate(rankPol = rank(desc(avgPol))) %>%
ungroup()
# A more extensive method of ranking polarities using thresholds but really not needed
#airlinePolarity <- pol %>%
# group_by(airline) %>%
# summarise(threshold1 = sum(polarity>=1)/sum(polarity<=-1),
# threshold2 = sum(polarity>=2)/sum(polarity<=-2),
# threshold3 = sum(polarity>=3)/sum(polarity<=-3)) %>%
# mutate(rankThr1 = rank(desc(threshold1)),
# rankThr2 = rank(desc(threshold2)),
# rankThr3 = rank(desc(threshold3)))
#airlinePolarity$avgRank <- 0
#for(i in 1:nrow(airlinePolarity)){
# airlinePolarity$avgRank[i] <- round(mean(c(as.numeric(airlinePolarity[i,"rankThr1"]),
# as.numeric(airlinePolarity[i,"rankThr2"]),
# as.numeric(airlinePolarity[i,"rankThr3"]))))
#}
# compare against ACSI
acsiRank <- c(1,6,5,4,8,3,2,9,7)
airlinePolarity <- mutate(airlinePolarity, acsi_rank = acsiRank, rank_diff = abs(rankPol-acsi_rank)) %>%
mutate(airline = reorder(airline,acsi_rank))
ggplot(airlinePolarity,aes(x=airline, group=1)) +
geom_line(aes(y=desc(rankPol),color="blue")) +
geom_line(aes(y=desc(acsi_rank),color="red")) +
scale_color_discrete(name = "Rank", labels = c("OurRank", "ACSI")) +
ggsave(file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityRankBing.pdf", fsep = "/"))
mean(airlinePolarity$rank_diff) # 0.88 mean rank diff
sd(airlinePolarity$rank_diff) # 1.16 standard deviation
# write to csv
write_csv(airlinePolarity, file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityBing.csv", fsep = "/"))
|
/airlineTweetSentimentAnalysisBing.r
|
no_license
|
atofarides/ieseDataSciTwitterProject
|
R
| false | false | 5,130 |
r
|
# Attemtping the replicate the ACSI index using sentiment analysis on Twitter data
# Using the Bing lexicon
# Loading necessary libraries
library(readr)
library(dplyr)
library(tidyr)
library(tidytext)
library(stringr)
library(ggplot2)
# Reading tweets from tweets csv. Also elimintaing retweets by choosing distinct text entries.
# Removing all retweets would have also removed tweets whose original tweet has not
# been captured
path <- file.path("C:", "Users", "anton", "Dropbox", "Twitter Data", "airlines.csv", fsep = "/")
tweetsText <- read_csv(path, col_names = TRUE)
# Comment out the following line if you want to include retweets in analysis
tweetsText <- distinct(tweetsText,text,.keep_all = TRUE) # To be used in order to exclude retweets
# Removing tweets from airline accounts as they do not reflect consumer sentiment
tweetsText$screen_name <- tolower(tweetsText$screen_name)
tweetsText <- tweetsText[!(tweetsText$screen_name %in% c("united", "alaskaair", "allegiant", "americanair",
"delta", "flyfrontier", "hawaiianair", "jetblue", "southwestair", "spiritairlines")),]
# Perform unnest tokens, adding the tweet ID on a separate column to keep track for further grouping
# Sentiment analysis using bing lexicon
tweetsSentimentsBing <- tweetsText %>%
mutate(.,tweetNumber = row_number()) %>%
unnest_tokens(., word, text) %>%
filter(!word %in% stop_words$word)%>%
group_by(airline) %>%
inner_join(get_sentiments("bing")) %>%
mutate(.,total_words = n()) %>%
ungroup()
# Creating a chart to identify if any of the highest contributing words are out of context
tweetsSentimentsBing %>%
count(airline,word,sentiment,total_words) %>%
mutate(.,percent = n/total_words) %>%
group_by(airline, sentiment == "positive") %>%
top_n(n=10,wt=percent) %>%
ungroup() %>%
mutate(word = reorder(word,percent)) %>%
ggplot(aes(x=word,y=percent,fill = sentiment)) +
geom_col() +
coord_flip() +
facet_wrap(~airline, scales = "free")
# Removing words falsely identified as sentiment word such as the word "trump"
tweetsSentimentsBing <- tweetsSentimentsBing[!(tweetsSentimentsBing$word %in% c("trump")),]
# Create polarity matrix
positiveSentiments <- tweetsSentimentsBing %>%
filter(sentiment == "positive") %>%
count(.,airline, tweetNumber)
negativeSentiments <- tweetsSentimentsBing %>%
filter(sentiment == "negative") %>%
count(.,airline, tweetNumber)
pol <- full_join(positiveSentiments, negativeSentiments, by=c("tweetNumber","airline"), suffix = c(".positive",".negative"))
pol[is.na(pol)] <- 0
pol <- mutate(pol, polarity = n.positive - n.negative)
ggplot(pol, aes(x=polarity)) + geom_histogram(binwidth = 1, color="black",fill="white") +
facet_wrap(facets = "airline", scales = "free") +
ggsave(file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityBing.pdf", fsep = "/"))
# As we can see the airlines with the most positive comments, normalised to volume, are
# Positive: AlaskaAir, Delta, JetBlue, SouthWest, Allegiant
# Negative: Spirit, Fly Frontier
# Average polarities to rank airlines
airlinePolarity <- pol %>%
group_by(airline) %>%
summarise(avgPol = mean(polarity)) %>%
mutate(rankPol = rank(desc(avgPol))) %>%
ungroup()
# A more extensive method of ranking polarities using thresholds but really not needed
#airlinePolarity <- pol %>%
# group_by(airline) %>%
# summarise(threshold1 = sum(polarity>=1)/sum(polarity<=-1),
# threshold2 = sum(polarity>=2)/sum(polarity<=-2),
# threshold3 = sum(polarity>=3)/sum(polarity<=-3)) %>%
# mutate(rankThr1 = rank(desc(threshold1)),
# rankThr2 = rank(desc(threshold2)),
# rankThr3 = rank(desc(threshold3)))
#airlinePolarity$avgRank <- 0
#for(i in 1:nrow(airlinePolarity)){
# airlinePolarity$avgRank[i] <- round(mean(c(as.numeric(airlinePolarity[i,"rankThr1"]),
# as.numeric(airlinePolarity[i,"rankThr2"]),
# as.numeric(airlinePolarity[i,"rankThr3"]))))
#}
# compare against ACSI
acsiRank <- c(1,6,5,4,8,3,2,9,7)
airlinePolarity <- mutate(airlinePolarity, acsi_rank = acsiRank, rank_diff = abs(rankPol-acsi_rank)) %>%
mutate(airline = reorder(airline,acsi_rank))
ggplot(airlinePolarity,aes(x=airline, group=1)) +
geom_line(aes(y=desc(rankPol),color="blue")) +
geom_line(aes(y=desc(acsi_rank),color="red")) +
scale_color_discrete(name = "Rank", labels = c("OurRank", "ACSI")) +
ggsave(file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityRankBing.pdf", fsep = "/"))
mean(airlinePolarity$rank_diff) # 0.88 mean rank diff
sd(airlinePolarity$rank_diff) # 1.16 standard deviation
# write to csv
write_csv(airlinePolarity, file.path("~", "Github","ieseDataSciTwitterProject", "airlinePolarityBing.csv", fsep = "/"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_edge_link.R
\name{geom_edge_link}
\alias{geom_edge_link}
\alias{geom_edge_link2}
\alias{geom_edge_link0}
\title{Draw edges as straight lines between nodes}
\usage{
geom_edge_link(mapping = NULL, data = get_edges("short"),
position = "identity", arrow = NULL, n = 100, lineend = "butt",
linejoin = "round", linemitre = 1, label_colour = "black",
label_alpha = 1, label_parse = FALSE, check_overlap = FALSE,
angle_calc = "rot", force_flip = TRUE, label_dodge = NULL,
label_push = NULL, show.legend = NA, ...)
geom_edge_link2(mapping = NULL, data = get_edges("long"),
position = "identity", arrow = NULL, n = 100, lineend = "butt",
linejoin = "round", linemitre = 1, label_colour = "black",
label_alpha = 1, label_parse = FALSE, check_overlap = FALSE,
angle_calc = "rot", force_flip = TRUE, label_dodge = NULL,
label_push = NULL, show.legend = NA, ...)
geom_edge_link0(mapping = NULL, data = get_edges(), position = "identity",
arrow = NULL, lineend = "butt", show.legend = NA, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{ggplot2::aes()}}
or \code{\link[ggplot2:aes_]{ggplot2::aes_()}}. By default x, y, xend, yend, group and
circular are mapped to x, y, xend, yend, edge.id and circular in the edge
data.}
\item{data}{The return of a call to \code{get_edges()} or a data.frame
giving edges in corrent format (see details for for guidance on the format).
See \code{\link[=get_edges]{get_edges()}} for more details on edge extraction.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{arrow}{Arrow specification, as created by \code{\link[grid:arrow]{grid::arrow()}}}
\item{n}{The number of points to create along the path.}
\item{lineend}{Line end style (round, butt, square)}
\item{linejoin}{Line join style (round, mitre, bevel)}
\item{linemitre}{Line mitre limit (number greater than 1)}
\item{label_colour}{The colour of the edge label. If \code{NA} it will use
the colour of the edge.}
\item{label_alpha}{The opacity of the edge label. If \code{NA} it will use
the opacity of the edge.}
\item{label_parse}{If \code{TRUE}, the labels will be parsed into expressions
and displayed as described in \code{\link[grDevices:plotmath]{grDevices::plotmath()}}.}
\item{check_overlap}{If \code{TRUE}, text that overlaps previous text in the
same layer will not be plotted.}
\item{angle_calc}{Either 'none', 'along', or 'across'. If 'none' the label will
use the angle aesthetic of the geom. If 'along' The label will be written
along the edge direction. If 'across' the label will be written across the
edge direction.}
\item{force_flip}{Logical. If \code{angle_calc} is either 'along' or 'across'
should the label be flipped if it is on it's head. Default to \code{TRUE}.}
\item{label_dodge}{A \code{\link[grid:unit]{grid::unit()}} giving a fixed vertical shift
to add to the label in case of \code{angle_calc} is either 'along' or 'across'}
\item{label_push}{A \code{\link[grid:unit]{grid::unit()}} giving a fixed horizontal shift
to add to the label in case of \code{angle_calc} is either 'along' or 'across'}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{...}{other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
This geom draws edges in the simplest way - as straight lines between the
start and end nodes. Not much more to say about that...
}
\section{Edge variants}{
Many geom_edge_* layers comes in 3 flavors depending on the level of control
needed over the drawing. The default (no numeric postfix) generate a number
of points (\code{n}) along the edge and draws it as a path. Each point along
the line has a numeric value associated with it giving the position along the
path, and it is therefore possible to show the direction of the edge by
mapping to this e.g. \code{colour = ..index..}. The version postfixed with a
"2" uses the "long" edge format (see \code{\link[=get_edges]{get_edges()}}) and makes it
possible to interpolate node parameter between the start and end node along
the edge. It is considerable less performant so should only be used if this
is needed. The version postfixed with a "0" draws the edge in the most
performant way, often directly using an appropriate grob from the grid
package, but does not allow for gradients along the edge.
Often it is beneficial to stop the drawing of the edge before it reaches the
node, for instance in cases where an arrow should be drawn and the arrowhead
shouldn't lay ontop or below the node point. geom_edge_* and geom_edge_*2
supports this through the start_cap and end_cap aesthetics that takes a
\code{\link[=geometry]{geometry()}} specification and dynamically caps the termini of the
edges based on the given specifications. This means that if
\code{end_cap = circle(1, 'cm')} the edges will end at a distance of 1cm even
during resizing of the plot window.
All \code{geom_edge_*} and \code{geom_edge_*2} have the ability to draw a
label along the edge. The reason this is not a separate geom is that in order
for the label to know the location of the edge it needs to know the edge type
etc. Labels are drawn by providing a label aesthetic. The label_pos can be
used to specify where along the edge it should be drawn by supplying a number
between 0 and 1. The label_size aesthetic can be used to control the size of
the label. Often it is needed to have the label written along the direction
of the edge, but since the actual angle is dependent on the plot dimensions
this cannot be calculated beforehand. Using the angle_calc argument allows
you to specify whether to use the supplied angle aesthetic or whether to draw
the label along or across the edge.
}
\section{Edge aesthetic name expansion}{
In order to avoid excessive typing edge aesthetic names are
automatically expanded. Because of this it is not necessary to write
\code{edge_colour} within the \code{aes()} call as \code{colour} will
automatically be renamed appropriately.
}
\section{Aesthetics}{
\code{geom_edge_link} and \code{geom_edge_link0} understand the following
aesthetics. Bold aesthetics are automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item \strong{xend}
\item \strong{yend}
\item edge_colour
\item edge_width
\item edge_linetype
\item edge_alpha
\item filter
}
\code{geom_edge_link2} understand the following aesthetics. Bold aesthetics are
automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item \strong{group}
\item edge_colour
\item edge_width
\item edge_linetype
\item edge_alpha
\item filter
}
\code{geom_edge_link} and \code{geom_edge_link2} furthermore takes the following
aesthetics.
\itemize{
\item start_cap
\item end_cap
\item label
\item label_pos
\item label_size
\item angle
\item hjust
\item vjust
\item family
\item fontface
\item lineheight
}
}
\section{Computed variables}{
\describe{
\item{index}{The position along the path (not computed for the *0 version)}
}
}
\examples{
require(tidygraph)
gr <- create_notable('bull') \%>\%
mutate(class = sample(letters[1:3], n(), replace = TRUE)) \%>\%
activate(edges) \%>\%
mutate(class = sample(letters[1:3], n(), replace = TRUE))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link(aes(alpha = ..index..))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link2(aes(colour = node.class))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link0(aes(colour = class))
}
\seealso{
Other geom_edge_*: \code{\link{geom_edge_arc}},
\code{\link{geom_edge_density}},
\code{\link{geom_edge_diagonal}},
\code{\link{geom_edge_elbow}},
\code{\link{geom_edge_fan}},
\code{\link{geom_edge_hive}},
\code{\link{geom_edge_loop}},
\code{\link{geom_edge_point}}
}
\author{
Thomas Lin Pedersen
}
\concept{geom_edge_*}
|
/man/geom_edge_link.Rd
|
permissive
|
malcolmbarrett/ggraph
|
R
| false | true | 8,334 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_edge_link.R
\name{geom_edge_link}
\alias{geom_edge_link}
\alias{geom_edge_link2}
\alias{geom_edge_link0}
\title{Draw edges as straight lines between nodes}
\usage{
geom_edge_link(mapping = NULL, data = get_edges("short"),
position = "identity", arrow = NULL, n = 100, lineend = "butt",
linejoin = "round", linemitre = 1, label_colour = "black",
label_alpha = 1, label_parse = FALSE, check_overlap = FALSE,
angle_calc = "rot", force_flip = TRUE, label_dodge = NULL,
label_push = NULL, show.legend = NA, ...)
geom_edge_link2(mapping = NULL, data = get_edges("long"),
position = "identity", arrow = NULL, n = 100, lineend = "butt",
linejoin = "round", linemitre = 1, label_colour = "black",
label_alpha = 1, label_parse = FALSE, check_overlap = FALSE,
angle_calc = "rot", force_flip = TRUE, label_dodge = NULL,
label_push = NULL, show.legend = NA, ...)
geom_edge_link0(mapping = NULL, data = get_edges(), position = "identity",
arrow = NULL, lineend = "butt", show.legend = NA, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{ggplot2::aes()}}
or \code{\link[ggplot2:aes_]{ggplot2::aes_()}}. By default x, y, xend, yend, group and
circular are mapped to x, y, xend, yend, edge.id and circular in the edge
data.}
\item{data}{The return of a call to \code{get_edges()} or a data.frame
giving edges in corrent format (see details for for guidance on the format).
See \code{\link[=get_edges]{get_edges()}} for more details on edge extraction.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{arrow}{Arrow specification, as created by \code{\link[grid:arrow]{grid::arrow()}}}
\item{n}{The number of points to create along the path.}
\item{lineend}{Line end style (round, butt, square)}
\item{linejoin}{Line join style (round, mitre, bevel)}
\item{linemitre}{Line mitre limit (number greater than 1)}
\item{label_colour}{The colour of the edge label. If \code{NA} it will use
the colour of the edge.}
\item{label_alpha}{The opacity of the edge label. If \code{NA} it will use
the opacity of the edge.}
\item{label_parse}{If \code{TRUE}, the labels will be parsed into expressions
and displayed as described in \code{\link[grDevices:plotmath]{grDevices::plotmath()}}.}
\item{check_overlap}{If \code{TRUE}, text that overlaps previous text in the
same layer will not be plotted.}
\item{angle_calc}{Either 'none', 'along', or 'across'. If 'none' the label will
use the angle aesthetic of the geom. If 'along' The label will be written
along the edge direction. If 'across' the label will be written across the
edge direction.}
\item{force_flip}{Logical. If \code{angle_calc} is either 'along' or 'across'
should the label be flipped if it is on it's head. Default to \code{TRUE}.}
\item{label_dodge}{A \code{\link[grid:unit]{grid::unit()}} giving a fixed vertical shift
to add to the label in case of \code{angle_calc} is either 'along' or 'across'}
\item{label_push}{A \code{\link[grid:unit]{grid::unit()}} giving a fixed horizontal shift
to add to the label in case of \code{angle_calc} is either 'along' or 'across'}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{...}{other arguments passed on to \code{\link[=layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
}
\description{
This geom draws edges in the simplest way - as straight lines between the
start and end nodes. Not much more to say about that...
}
\section{Edge variants}{
Many geom_edge_* layers comes in 3 flavors depending on the level of control
needed over the drawing. The default (no numeric postfix) generate a number
of points (\code{n}) along the edge and draws it as a path. Each point along
the line has a numeric value associated with it giving the position along the
path, and it is therefore possible to show the direction of the edge by
mapping to this e.g. \code{colour = ..index..}. The version postfixed with a
"2" uses the "long" edge format (see \code{\link[=get_edges]{get_edges()}}) and makes it
possible to interpolate node parameter between the start and end node along
the edge. It is considerable less performant so should only be used if this
is needed. The version postfixed with a "0" draws the edge in the most
performant way, often directly using an appropriate grob from the grid
package, but does not allow for gradients along the edge.
Often it is beneficial to stop the drawing of the edge before it reaches the
node, for instance in cases where an arrow should be drawn and the arrowhead
shouldn't lay ontop or below the node point. geom_edge_* and geom_edge_*2
supports this through the start_cap and end_cap aesthetics that takes a
\code{\link[=geometry]{geometry()}} specification and dynamically caps the termini of the
edges based on the given specifications. This means that if
\code{end_cap = circle(1, 'cm')} the edges will end at a distance of 1cm even
during resizing of the plot window.
All \code{geom_edge_*} and \code{geom_edge_*2} have the ability to draw a
label along the edge. The reason this is not a separate geom is that in order
for the label to know the location of the edge it needs to know the edge type
etc. Labels are drawn by providing a label aesthetic. The label_pos can be
used to specify where along the edge it should be drawn by supplying a number
between 0 and 1. The label_size aesthetic can be used to control the size of
the label. Often it is needed to have the label written along the direction
of the edge, but since the actual angle is dependent on the plot dimensions
this cannot be calculated beforehand. Using the angle_calc argument allows
you to specify whether to use the supplied angle aesthetic or whether to draw
the label along or across the edge.
}
\section{Edge aesthetic name expansion}{
In order to avoid excessive typing edge aesthetic names are
automatically expanded. Because of this it is not necessary to write
\code{edge_colour} within the \code{aes()} call as \code{colour} will
automatically be renamed appropriately.
}
\section{Aesthetics}{
\code{geom_edge_link} and \code{geom_edge_link0} understand the following
aesthetics. Bold aesthetics are automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item \strong{xend}
\item \strong{yend}
\item edge_colour
\item edge_width
\item edge_linetype
\item edge_alpha
\item filter
}
\code{geom_edge_link2} understand the following aesthetics. Bold aesthetics are
automatically set, but can be overridden.
\itemize{
\item \strong{x}
\item \strong{y}
\item \strong{group}
\item edge_colour
\item edge_width
\item edge_linetype
\item edge_alpha
\item filter
}
\code{geom_edge_link} and \code{geom_edge_link2} furthermore takes the following
aesthetics.
\itemize{
\item start_cap
\item end_cap
\item label
\item label_pos
\item label_size
\item angle
\item hjust
\item vjust
\item family
\item fontface
\item lineheight
}
}
\section{Computed variables}{
\describe{
\item{index}{The position along the path (not computed for the *0 version)}
}
}
\examples{
require(tidygraph)
gr <- create_notable('bull') \%>\%
mutate(class = sample(letters[1:3], n(), replace = TRUE)) \%>\%
activate(edges) \%>\%
mutate(class = sample(letters[1:3], n(), replace = TRUE))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link(aes(alpha = ..index..))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link2(aes(colour = node.class))
ggraph(gr, 'igraph', algorithm = 'nicely') +
geom_edge_link0(aes(colour = class))
}
\seealso{
Other geom_edge_*: \code{\link{geom_edge_arc}},
\code{\link{geom_edge_density}},
\code{\link{geom_edge_diagonal}},
\code{\link{geom_edge_elbow}},
\code{\link{geom_edge_fan}},
\code{\link{geom_edge_hive}},
\code{\link{geom_edge_loop}},
\code{\link{geom_edge_point}}
}
\author{
Thomas Lin Pedersen
}
\concept{geom_edge_*}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{methods}
\name{generateCircos}
\alias{generateCircos}
\title{generate circos input file}
\arguments{
\item{mat}{matrix}
\item{sorted.origins}{labels}
\item{SCTGs_}{SCTG numbers}
\item{origins}{labels}
\item{VWCTable}{matrix}
}
\description{
generate circos input file
}
|
/man/generateCircos.Rd
|
no_license
|
XiaowenLin/uscropnetwork
|
R
| false | false | 337 |
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\docType{methods}
\name{generateCircos}
\alias{generateCircos}
\title{generate circos input file}
\arguments{
\item{mat}{matrix}
\item{sorted.origins}{labels}
\item{SCTGs_}{SCTG numbers}
\item{origins}{labels}
\item{VWCTable}{matrix}
}
\description{
generate circos input file
}
|
#' Save MLflow Model Flavor
#'
#' Saves model in MLflow's flavor, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param x The serving function or model that will perform a prediction.
#' @param path Destination path where this MLflow compatible model
#' will be saved.
#'
#' @return This funciton must return a list of flavors that conform to
#' the MLmodel specification.
#'
#' @export
mlflow_save_flavor <- function(x, path = "model") {
UseMethod("mlflow_save_flavor")
}
#' Load MLflow Model Flavor
#'
#' Loads an MLflow model flavor, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param flavor_path The path to the MLflow model wrapped in the correct
#' class.
#'
#' @export
mlflow_load_flavor <- function(flavor_path) {
UseMethod("mlflow_load_flavor")
}
#' Predict over MLflow Model Flavor
#'
#' Performs prediction over a model loaded using
#' \code{mlflow_load_model()}, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param model The loaded MLflow model flavor.
#' @param data A data frame to perform scoring.
#'
#' @export
mlflow_predict_flavor <- function(model, data) {
UseMethod("mlflow_predict_flavor")
}
|
/mlflow/R/mlflow/R/flavor.R
|
permissive
|
kevinykuo/mlflow
|
R
| false | false | 1,225 |
r
|
#' Save MLflow Model Flavor
#'
#' Saves model in MLflow's flavor, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param x The serving function or model that will perform a prediction.
#' @param path Destination path where this MLflow compatible model
#' will be saved.
#'
#' @return This funciton must return a list of flavors that conform to
#' the MLmodel specification.
#'
#' @export
mlflow_save_flavor <- function(x, path = "model") {
UseMethod("mlflow_save_flavor")
}
#' Load MLflow Model Flavor
#'
#' Loads an MLflow model flavor, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param flavor_path The path to the MLflow model wrapped in the correct
#' class.
#'
#' @export
mlflow_load_flavor <- function(flavor_path) {
UseMethod("mlflow_load_flavor")
}
#' Predict over MLflow Model Flavor
#'
#' Performs prediction over a model loaded using
#' \code{mlflow_load_model()}, to be used by package authors
#' to extend the supported MLflow models.
#'
#' @param model The loaded MLflow model flavor.
#' @param data A data frame to perform scoring.
#'
#' @export
mlflow_predict_flavor <- function(model, data) {
UseMethod("mlflow_predict_flavor")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TwoSampleTest.HD-package.r
\docType{package}
\name{TwoSampleTest.HD-package}
\alias{TwoSampleTest.HD-package}
\alias{_PACKAGE}
\alias{TwoSampleTest.HDpackage}
\title{A two-sample test for the equality of distributions for high-dimensional data}
\value{
\itemize{
\item{‘TwoSampleTest.HD’}
}
}
\description{
This package implements four different tests proposed in Cousido-Rocha et al. (2018). These methods
test the (global) null hypothesis of equality of the univariate marginals of the p-variate distributions in
the two populations. In other words, the null hypothesis is an intersection of the p null hypotheses
corresponding to p different two-sample problems. These methods are particularly well suited to the low
sample size, high dimensional setting (n << p). The sample size can be as small as 2. The test accounts
for the possibility that the p variables in each data set can be weakly dependent. Three of the methods
arise from different approaches to estimate the variance of the same statistic. This statistic averages p
individual statistics based on comparing the empirical characteristic functions computed from the two
samples. The last method is an alternative global test whose statistic averages the p-values derived from
applying permutation tests to the individual statistics mentioned above. When the global null hypothesis
is rejected such permutation p-values can also be used to identify which variables contribute to this
significance. The standardized version of each test statistic and its p-value are computed among other
things.
}
\details{
Package ‘TwoSampleTest.HD’
Documentation for package ‘TwoSampleTest.HD’ version 1.2
\itemize{
\item{Package: ‘TwoSampleTest.HD’}
\item{Version: 1.2}
\item{Maintainer: Marta Cousido Rocha \email{martacousido@uvigo.es}}
\item{License: GPL-2}
}
}
\section{Acknowledgements}{
This work has received financial support of the Call 2015 Grants for PhD contracts
for training of doctors of the Ministry of Economy and Competitiveness,
co-financed by the European Social Fund (Ref. BES-2015-074958).
The authors acknowledge support from MTM2014-55966-P project,
Ministry of Economy and Competitiveness, and MTM2017-89422-P project,
Ministry of Economy, Industry and Competitiveness, State Research Agency,
and Regional Development Fund, UE. The authors also acknowledge the financial
support provided by the SiDOR research group through the grant Competitive Reference Group,
2016-2019 (ED431C 2016/040), funded by the ``Consellería de Cultura,
Educación e Ordenación Universitaria. Xunta de Galicia''.
}
\references{
Cousido-Rocha, M., de Uña-Álvarez J., and Hart, J. (2018). A two-sample test for the equality of distributions for high-dimensional data. Preprint.
}
\author{
\itemize{
\item{Cousido Rocha, Marta.}
\item{Soage González, José Carlos.}
\item{de Uña-Álvarez, Jacobo.}
\item{D. Hart, Jeffrey.}
}
}
|
/man/TwoSampleTest.HD-package.Rd
|
no_license
|
cran/TwoSampleTest.HD
|
R
| false | true | 2,985 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TwoSampleTest.HD-package.r
\docType{package}
\name{TwoSampleTest.HD-package}
\alias{TwoSampleTest.HD-package}
\alias{_PACKAGE}
\alias{TwoSampleTest.HDpackage}
\title{A two-sample test for the equality of distributions for high-dimensional data}
\value{
\itemize{
\item{‘TwoSampleTest.HD’}
}
}
\description{
This package implements four different tests proposed in Cousido-Rocha et al. (2018). These methods
test the (global) null hypothesis of equality of the univariate marginals of the p-variate distributions in
the two populations. In other words, the null hypothesis is an intersection of the p null hypotheses
corresponding to p different two-sample problems. These methods are particularly well suited to the low
sample size, high dimensional setting (n << p). The sample size can be as small as 2. The test accounts
for the possibility that the p variables in each data set can be weakly dependent. Three of the methods
arise from different approaches to estimate the variance of the same statistic. This statistic averages p
individual statistics based on comparing the empirical characteristic functions computed from the two
samples. The last method is an alternative global test whose statistic averages the p-values derived from
applying permutation tests to the individual statistics mentioned above. When the global null hypothesis
is rejected such permutation p-values can also be used to identify which variables contribute to this
significance. The standardized version of each test statistic and its p-value are computed among other
things.
}
\details{
Package ‘TwoSampleTest.HD’
Documentation for package ‘TwoSampleTest.HD’ version 1.2
\itemize{
\item{Package: ‘TwoSampleTest.HD’}
\item{Version: 1.2}
\item{Maintainer: Marta Cousido Rocha \email{martacousido@uvigo.es}}
\item{License: GPL-2}
}
}
\section{Acknowledgements}{
This work has received financial support of the Call 2015 Grants for PhD contracts
for training of doctors of the Ministry of Economy and Competitiveness,
co-financed by the European Social Fund (Ref. BES-2015-074958).
The authors acknowledge support from MTM2014-55966-P project,
Ministry of Economy and Competitiveness, and MTM2017-89422-P project,
Ministry of Economy, Industry and Competitiveness, State Research Agency,
and Regional Development Fund, UE. The authors also acknowledge the financial
support provided by the SiDOR research group through the grant Competitive Reference Group,
2016-2019 (ED431C 2016/040), funded by the ``Consellería de Cultura,
Educación e Ordenación Universitaria. Xunta de Galicia''.
}
\references{
Cousido-Rocha, M., de Uña-Álvarez J., and Hart, J. (2018). A two-sample test for the equality of distributions for high-dimensional data. Preprint.
}
\author{
\itemize{
\item{Cousido Rocha, Marta.}
\item{Soage González, José Carlos.}
\item{de Uña-Álvarez, Jacobo.}
\item{D. Hart, Jeffrey.}
}
}
|
get_queries_parameter_documentation <- function() {
return(c(
"a list of queries, each provided as a list of parameters. The queries are",
"executed by the [geocode] function in the order provided.",
"(ex. `list(list(method = 'osm'), list(method = 'census'), ...)`)"
))
}
get_global_params_parameter_documentation <- function() {
return(c(
"a list of parameters to be used for all queries",
"(ex. `list(address = 'address', full_results = TRUE)`)"
))
}
#' Combine multiple geocoding queries
#'
#' @description Passes address inputs in character vector form to the
#' [geocode_combine] function for geocoding.
#'
#' Note that address inputs must be specified for queries either with the `queries` parameter (for each query)
#' or the `global_params` parameter (for all queries). For example `global_params = list(address = 'address')`
#' passes addresses provided in the `address` parameter to all queries.
#'
#' @param queries `r get_queries_parameter_documentation()`
#' @param global_params `r get_global_params_parameter_documentation()`
#' @inheritParams geo
#' @param ... arguments passed to the [geocode_combine] function
#' @inherit geo return
#' @examples
#' \donttest{
#'
#' options(tidygeocoder.progress_bar = FALSE)
#' example_addresses <- c("100 Main St New York, NY", "Paris", "Not a Real Address")
#'
#' geo_combine(
#' queries = list(
#' list(method = 'census'),
#' list(method = 'osm')
#' ),
#' address = example_addresses,
#' global_params = list(address = 'address')
#' )
#'
#' geo_combine(
#' queries = list(
#' list(method = 'arcgis'),
#' list(method = 'census', mode = 'single'),
#' list(method = 'census', mode = 'batch')
#' ),
#' global_params = list(address = 'address'),
#' address = example_addresses,
#' cascade = FALSE,
#' return_list = TRUE
#' )
#'
#' geo_combine(
#' queries = list(
#' list(method = 'arcgis', address = 'city'),
#' list(method = 'osm', city = 'city', country = 'country')
#' ),
#' city = c('Tokyo', 'New York'),
#' country = c('Japan', 'United States'),
#' cascade = FALSE
#' )
#' }
#' @seealso [geocode_combine] [geo] [geocode]
#' @export
geo_combine <- function(queries, global_params = list(), address = NULL,
street = NULL, city = NULL, county = NULL, state = NULL, postalcode = NULL,
country = NULL, lat = lat, long = long, ...) {
# NSE - converts lat and long parameters to character values
lat <- rm_quote(deparse(substitute(lat)))
long <- rm_quote(deparse(substitute(long)))
# Check address arguments -------------------------------------
address_pack <- package_addresses(address, street, city, county,
state, postalcode, country)
# prepare data for geocode_combine() function -----------------
input_df <- tibble::tibble(
address = address,
street = street, city = city, county = county, state = state, postalcode = postalcode, country = country
)
# pass arguments to geocode_combine() as a list. lat and long arguments did not work when passed directly
arguments_to_pass <- c(list(
.tbl = input_df, queries = queries, global_params = global_params, lat = lat, long = long),
list(...)
)
return(
do.call(geocode_combine, arguments_to_pass)
)
}
#' Combine multiple geocoding queries
#'
#' @description Executes multiple geocoding queries on a dataframe input and combines
#' the results. To use a character vector input instead, see the [geo_combine] function.
#' Queries are executed by the [geocode] function. See example usage
#' in `vignette("tidygeocoder")`.
#'
#' Query results are by default labelled to show which query produced each result. Labels are either
#' placed in a `query` column (if `return_list = FALSE`) or used as the names of the returned list
#' (if `return_list = TRUE`). By default the `method` parameter value of each query is used as a query label.
#' If the same `method` is used in multiple queries then a number is added according
#' to the order of the queries (ie. `osm1`, `osm2`, ...). To provide your own custom query labels
#' use the `query_names` parameter.
#'
#' @param queries `r get_queries_parameter_documentation()`
#' @param global_params `r get_global_params_parameter_documentation()`
#' @param return_list if TRUE then results from each service will be returned as separate
#' dataframes. If FALSE (default) then all results will be combined into a single dataframe.
#' @param cascade if TRUE (default) then only addresses that are not found by a geocoding
#' service will be attempted by subsequent queries. If FALSE then all queries will
#' attempt to geocode all addresses.
#' @param query_names optional vector with one label for each query provided
#' (ex. `c('geocodio batch', 'geocodio single')`).
#' @inheritParams geocode
#' @inherit geo return
#' @examples
#' \donttest{
#'
#' library(dplyr, warn.conflicts = FALSE)
#'
#' sample_addresses %>%
#' geocode_combine(
#' queries = list(list(method = 'census'), list(method = 'osm')),
#' global_params = list(address = 'addr'), cascade = TRUE)
#'
#' more_addresses <- tibble::tribble(
#' ~street_address, ~city, ~state, ~zip_cd,
#' "624 W DAVIS ST #1D", "BURLINGTON", "NC", 27215,
#' "201 E CENTER ST #268", "MEBANE", "NC", 27302,
#' "100 Wall Street", "New York", "NY", 10005,
#' "Bucharest", NA, NA, NA
#' )
#'
#' more_addresses %>%
#' geocode_combine(
#' queries = list(
#' list(method = 'census', mode = 'batch'),
#' list(method = 'census', mode = 'single'),
#' list(method = 'osm')
#' ),
#' global_params = list(street = 'street_address',
#' city = 'city', state = 'state', postalcode = 'zip_cd'),
#' query_names = c('census batch', 'census single', 'osm')
#' )
#'
#' more_addresses %>%
#' geocode_combine(
#' queries = list(
#' list(method = 'census', mode = 'batch', street = 'street_address',
#' city = 'city', state = 'state', postalcode = 'zip_cd'),
#' list(method = 'arcgis', address = 'street_address')
#' ),
#' cascade = FALSE,
#' return_list = TRUE
#' )
#' }
#' @seealso [geo_combine] [geo] [geocode]
#' @export
geocode_combine <-
function(
.tbl,
queries,
global_params = list(),
return_list = FALSE,
cascade = TRUE,
query_names = NULL,
lat = 'lat',
long = 'long'
) {
# NSE - converts lat and long parameters to character values
lat <- rm_quote(deparse(substitute(lat)))
long <- rm_quote(deparse(substitute(long)))
stopifnot(
is.data.frame(.tbl),
is.list(queries),
# either queries is an empty list or contains other lists
length(queries) == 0 || all(sapply(queries, is.list)),
is.list(global_params), is.logical(return_list), is.logical(cascade),
is.null(query_names) || is.character(query_names)
)
# combine all parameter lists into a list of lists
all_param_lists <- queries
all_param_lists[[length(queries) + 1]] <- global_params
# get all parameter names
all_param_names <- unlist(sapply(all_param_lists, names))
# which columns are used to store addresses
used_address_colnames <- unique(unlist(sapply(all_param_lists,
function(x) unlist(x[names(x) %in% pkg.globals$address_arg_names], use.names = FALSE))))
if (cascade == TRUE) {
for (query in all_param_lists) {
if (!is.null(query[["return_input"]]) && query[["return_input"]] == FALSE) {
stop('return_input must be set to TRUE for geocode_combine()', call. = FALSE)
}
}
}
# add global arguments to each query
queries_prepped <- lapply(queries, function(x) {
c(
list(.tbl = .tbl, lat = lat, long = long),
global_params,
x)})
# Set default query names and check user input query names
if (is.null(query_names)) {
# default query names to the method arguments and fill in 'osm' if the method argument isn't provided
query_names <- unlist(lapply(queries_prepped, function(q) if (!is.null(q[['method']])) q[['method']] else 'osm'))
# number duplicate query names if necessary (to prevent duplicates)
# ie. 'osm1', 'osm2', etc.
for (name in unique(query_names)) {
# if the given name occurs more than once in query_names then iterate through and add numbers
if ((sum(query_names == name)) > 1) {
i <- 1
dup_num <- 1
for (i in 1:length(query_names)) {
if (query_names[[i]] == name) {
query_names[[i]] <- paste0(query_names[[i]], as.character(dup_num), collapse = '')
dup_num <- dup_num + 1
}
}
}
}
} else {
if (length(query_names) != length(queries)) {
stop('query_names parameter must contain one name per query provided. See ?geocode_combine')
}
if (any(duplicated(query_names)) == TRUE) {
stop('query_names values should be unique. See ?geocode_combine')
}
if (any(trimws(query_names) == '')) {
stop('query_names values should not be blank. See ?geocode_combine')
}
}
# Sanity check all queries (ie. make sure no queries have a mistyped argument, etc.)
for (query in queries_prepped) {
query[['no_query']] <- TRUE
tryCatch(
expr = {
suppressMessages(do.call(geocode, query))
},
error = function(e) {
message('The following error was produced:\n')
message(e)
message('\n\nBy these query parameters:\n')
for (name in names(query)) {
# don't display .tbl parameter for now
if (name != '.tbl') message(paste0(name, ' = ', query[[name]]))
}
},
finally = {
message('')
})}
# iterate through the queries (list of lists) and execute each query
# aggregate results in list object
all_results <- list()
not_found <- tibble::tibble()
for (i in 1:length(queries_prepped)) {
query <- queries_prepped[[i]]
if (cascade == TRUE) {
# adjust the input dataframe based on which addresses were not found in prior query
if (nrow(not_found) != 0) {
query[['.tbl']] <- not_found
} else if (i != 1) {
break # break loop - all addresses are geocoded
}
}
# use geocode() to execute the query
result <- do.call(geocode, query)
na_indices <- is.na(result[[lat]]) | is.na(result[[long]])
# which addresses were not found
not_found <- result[na_indices, intersect(colnames(result), colnames(.tbl))]
found <- result[!is.na(result[[lat]]) & !is.na(result[[long]]), ]
# aggregate results. if using cascade then separate the not-found addresses
all_results <- if (cascade == TRUE) c(all_results, list(found)) else c(all_results, list(result))
}
names(all_results) <- query_names[1:length(all_results)]
# if there are addresses that no method found then in cascade then
# separate them into their own list item
if (cascade == TRUE) {
if(nrow(not_found) != 0) all_results[[length(all_results) + 1]] <- not_found
}
# bind all results into one dataframe if return_list == FALSE
# otherwise return list
if (return_list == TRUE) {
return(all_results)
} else {
# label the dataframes contained in the all_results list with a 'query' column
all_results_labeled <- mapply(function(x, y) dplyr::bind_cols(x, tibble::tibble(query = y)),
all_results, names(all_results), SIMPLIFY = FALSE)
# put all results into a single dataframe
bound_data <- dplyr::bind_rows(all_results_labeled)
# remove .id column if it is present
bound_data <- bound_data[!names(bound_data) %in% '.id']
# reorder the dataset to match the order it was received in before returning it
proper_order <- unique(.tbl[used_address_colnames])
proper_order[['.id']] <- 1:nrow(proper_order)
# join to get our .id column so we can sort the output dataset
bound_data_joined <- dplyr::left_join(bound_data, proper_order, by = used_address_colnames)
# sort the dataset
bound_data_joined <- bound_data_joined[order(bound_data_joined[['.id']]), ]
# remove .id column before returning the dataset
return(bound_data_joined[!names(bound_data_joined) %in% '.id'])
}
}
|
/R/geocode_combine.R
|
permissive
|
cran/tidygeocoder
|
R
| false | false | 12,539 |
r
|
get_queries_parameter_documentation <- function() {
return(c(
"a list of queries, each provided as a list of parameters. The queries are",
"executed by the [geocode] function in the order provided.",
"(ex. `list(list(method = 'osm'), list(method = 'census'), ...)`)"
))
}
get_global_params_parameter_documentation <- function() {
return(c(
"a list of parameters to be used for all queries",
"(ex. `list(address = 'address', full_results = TRUE)`)"
))
}
#' Combine multiple geocoding queries
#'
#' @description Passes address inputs in character vector form to the
#' [geocode_combine] function for geocoding.
#'
#' Note that address inputs must be specified for queries either with the `queries` parameter (for each query)
#' or the `global_params` parameter (for all queries). For example `global_params = list(address = 'address')`
#' passes addresses provided in the `address` parameter to all queries.
#'
#' @param queries `r get_queries_parameter_documentation()`
#' @param global_params `r get_global_params_parameter_documentation()`
#' @inheritParams geo
#' @param ... arguments passed to the [geocode_combine] function
#' @inherit geo return
#' @examples
#' \donttest{
#'
#' options(tidygeocoder.progress_bar = FALSE)
#' example_addresses <- c("100 Main St New York, NY", "Paris", "Not a Real Address")
#'
#' geo_combine(
#' queries = list(
#' list(method = 'census'),
#' list(method = 'osm')
#' ),
#' address = example_addresses,
#' global_params = list(address = 'address')
#' )
#'
#' geo_combine(
#' queries = list(
#' list(method = 'arcgis'),
#' list(method = 'census', mode = 'single'),
#' list(method = 'census', mode = 'batch')
#' ),
#' global_params = list(address = 'address'),
#' address = example_addresses,
#' cascade = FALSE,
#' return_list = TRUE
#' )
#'
#' geo_combine(
#' queries = list(
#' list(method = 'arcgis', address = 'city'),
#' list(method = 'osm', city = 'city', country = 'country')
#' ),
#' city = c('Tokyo', 'New York'),
#' country = c('Japan', 'United States'),
#' cascade = FALSE
#' )
#' }
#' @seealso [geocode_combine] [geo] [geocode]
#' @export
geo_combine <- function(queries, global_params = list(), address = NULL,
street = NULL, city = NULL, county = NULL, state = NULL, postalcode = NULL,
country = NULL, lat = lat, long = long, ...) {
# NSE - converts lat and long parameters to character values
lat <- rm_quote(deparse(substitute(lat)))
long <- rm_quote(deparse(substitute(long)))
# Check address arguments -------------------------------------
address_pack <- package_addresses(address, street, city, county,
state, postalcode, country)
# prepare data for geocode_combine() function -----------------
input_df <- tibble::tibble(
address = address,
street = street, city = city, county = county, state = state, postalcode = postalcode, country = country
)
# pass arguments to geocode_combine() as a list. lat and long arguments did not work when passed directly
arguments_to_pass <- c(list(
.tbl = input_df, queries = queries, global_params = global_params, lat = lat, long = long),
list(...)
)
return(
do.call(geocode_combine, arguments_to_pass)
)
}
#' Combine multiple geocoding queries
#'
#' @description Executes multiple geocoding queries on a dataframe input and combines
#' the results. To use a character vector input instead, see the [geo_combine] function.
#' Queries are executed by the [geocode] function. See example usage
#' in `vignette("tidygeocoder")`.
#'
#' Query results are by default labelled to show which query produced each result. Labels are either
#' placed in a `query` column (if `return_list = FALSE`) or used as the names of the returned list
#' (if `return_list = TRUE`). By default the `method` parameter value of each query is used as a query label.
#' If the same `method` is used in multiple queries then a number is added according
#' to the order of the queries (ie. `osm1`, `osm2`, ...). To provide your own custom query labels
#' use the `query_names` parameter.
#'
#' @param queries `r get_queries_parameter_documentation()`
#' @param global_params `r get_global_params_parameter_documentation()`
#' @param return_list if TRUE then results from each service will be returned as separate
#' dataframes. If FALSE (default) then all results will be combined into a single dataframe.
#' @param cascade if TRUE (default) then only addresses that are not found by a geocoding
#' service will be attempted by subsequent queries. If FALSE then all queries will
#' attempt to geocode all addresses.
#' @param query_names optional vector with one label for each query provided
#' (ex. `c('geocodio batch', 'geocodio single')`).
#' @inheritParams geocode
#' @inherit geo return
#' @examples
#' \donttest{
#'
#' library(dplyr, warn.conflicts = FALSE)
#'
#' sample_addresses %>%
#' geocode_combine(
#' queries = list(list(method = 'census'), list(method = 'osm')),
#' global_params = list(address = 'addr'), cascade = TRUE)
#'
#' more_addresses <- tibble::tribble(
#' ~street_address, ~city, ~state, ~zip_cd,
#' "624 W DAVIS ST #1D", "BURLINGTON", "NC", 27215,
#' "201 E CENTER ST #268", "MEBANE", "NC", 27302,
#' "100 Wall Street", "New York", "NY", 10005,
#' "Bucharest", NA, NA, NA
#' )
#'
#' more_addresses %>%
#' geocode_combine(
#' queries = list(
#' list(method = 'census', mode = 'batch'),
#' list(method = 'census', mode = 'single'),
#' list(method = 'osm')
#' ),
#' global_params = list(street = 'street_address',
#' city = 'city', state = 'state', postalcode = 'zip_cd'),
#' query_names = c('census batch', 'census single', 'osm')
#' )
#'
#' more_addresses %>%
#' geocode_combine(
#' queries = list(
#' list(method = 'census', mode = 'batch', street = 'street_address',
#' city = 'city', state = 'state', postalcode = 'zip_cd'),
#' list(method = 'arcgis', address = 'street_address')
#' ),
#' cascade = FALSE,
#' return_list = TRUE
#' )
#' }
#' @seealso [geo_combine] [geo] [geocode]
#' @export
geocode_combine <-
function(
.tbl,
queries,
global_params = list(),
return_list = FALSE,
cascade = TRUE,
query_names = NULL,
lat = 'lat',
long = 'long'
) {
# NSE - converts lat and long parameters to character values
lat <- rm_quote(deparse(substitute(lat)))
long <- rm_quote(deparse(substitute(long)))
stopifnot(
is.data.frame(.tbl),
is.list(queries),
# either queries is an empty list or contains other lists
length(queries) == 0 || all(sapply(queries, is.list)),
is.list(global_params), is.logical(return_list), is.logical(cascade),
is.null(query_names) || is.character(query_names)
)
# combine all parameter lists into a list of lists
all_param_lists <- queries
all_param_lists[[length(queries) + 1]] <- global_params
# get all parameter names
all_param_names <- unlist(sapply(all_param_lists, names))
# which columns are used to store addresses
used_address_colnames <- unique(unlist(sapply(all_param_lists,
function(x) unlist(x[names(x) %in% pkg.globals$address_arg_names], use.names = FALSE))))
if (cascade == TRUE) {
for (query in all_param_lists) {
if (!is.null(query[["return_input"]]) && query[["return_input"]] == FALSE) {
stop('return_input must be set to TRUE for geocode_combine()', call. = FALSE)
}
}
}
# add global arguments to each query
queries_prepped <- lapply(queries, function(x) {
c(
list(.tbl = .tbl, lat = lat, long = long),
global_params,
x)})
# Set default query names and check user input query names
if (is.null(query_names)) {
# default query names to the method arguments and fill in 'osm' if the method argument isn't provided
query_names <- unlist(lapply(queries_prepped, function(q) if (!is.null(q[['method']])) q[['method']] else 'osm'))
# number duplicate query names if necessary (to prevent duplicates)
# ie. 'osm1', 'osm2', etc.
for (name in unique(query_names)) {
# if the given name occurs more than once in query_names then iterate through and add numbers
if ((sum(query_names == name)) > 1) {
i <- 1
dup_num <- 1
for (i in 1:length(query_names)) {
if (query_names[[i]] == name) {
query_names[[i]] <- paste0(query_names[[i]], as.character(dup_num), collapse = '')
dup_num <- dup_num + 1
}
}
}
}
} else {
if (length(query_names) != length(queries)) {
stop('query_names parameter must contain one name per query provided. See ?geocode_combine')
}
if (any(duplicated(query_names)) == TRUE) {
stop('query_names values should be unique. See ?geocode_combine')
}
if (any(trimws(query_names) == '')) {
stop('query_names values should not be blank. See ?geocode_combine')
}
}
# Sanity check all queries (ie. make sure no queries have a mistyped argument, etc.)
for (query in queries_prepped) {
query[['no_query']] <- TRUE
tryCatch(
expr = {
suppressMessages(do.call(geocode, query))
},
error = function(e) {
message('The following error was produced:\n')
message(e)
message('\n\nBy these query parameters:\n')
for (name in names(query)) {
# don't display .tbl parameter for now
if (name != '.tbl') message(paste0(name, ' = ', query[[name]]))
}
},
finally = {
message('')
})}
# iterate through the queries (list of lists) and execute each query
# aggregate results in list object
all_results <- list()
not_found <- tibble::tibble()
for (i in 1:length(queries_prepped)) {
query <- queries_prepped[[i]]
if (cascade == TRUE) {
# adjust the input dataframe based on which addresses were not found in prior query
if (nrow(not_found) != 0) {
query[['.tbl']] <- not_found
} else if (i != 1) {
break # break loop - all addresses are geocoded
}
}
# use geocode() to execute the query
result <- do.call(geocode, query)
na_indices <- is.na(result[[lat]]) | is.na(result[[long]])
# which addresses were not found
not_found <- result[na_indices, intersect(colnames(result), colnames(.tbl))]
found <- result[!is.na(result[[lat]]) & !is.na(result[[long]]), ]
# aggregate results. if using cascade then separate the not-found addresses
all_results <- if (cascade == TRUE) c(all_results, list(found)) else c(all_results, list(result))
}
names(all_results) <- query_names[1:length(all_results)]
# if there are addresses that no method found then in cascade then
# separate them into their own list item
if (cascade == TRUE) {
if(nrow(not_found) != 0) all_results[[length(all_results) + 1]] <- not_found
}
# bind all results into one dataframe if return_list == FALSE
# otherwise return list
if (return_list == TRUE) {
return(all_results)
} else {
# label the dataframes contained in the all_results list with a 'query' column
all_results_labeled <- mapply(function(x, y) dplyr::bind_cols(x, tibble::tibble(query = y)),
all_results, names(all_results), SIMPLIFY = FALSE)
# put all results into a single dataframe
bound_data <- dplyr::bind_rows(all_results_labeled)
# remove .id column if it is present
bound_data <- bound_data[!names(bound_data) %in% '.id']
# reorder the dataset to match the order it was received in before returning it
proper_order <- unique(.tbl[used_address_colnames])
proper_order[['.id']] <- 1:nrow(proper_order)
# join to get our .id column so we can sort the output dataset
bound_data_joined <- dplyr::left_join(bound_data, proper_order, by = used_address_colnames)
# sort the dataset
bound_data_joined <- bound_data_joined[order(bound_data_joined[['.id']]), ]
# remove .id column before returning the dataset
return(bound_data_joined[!names(bound_data_joined) %in% '.id'])
}
}
|
# function test cases
stopifnot(!four.in.a.row(player="X", v=c(rep("X", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c(rep("X", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="X", v=c(rep("X", 3), rep("O", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c(rep("X", 3), rep("O", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="X", v=c("O", rep("X", 3), "O"), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c("O", rep("X", 3), "O"), debug=TRUE))
stopifnot(four.in.a.row(player="X", v=c(rep("X", 4)), debug=TRUE))
stopifnot(four.in.a.row(player="O", v=c(rep("O", 4)), debug=TRUE))
stopifnot(four.in.a.row(player="X", v=c("O", rep("X", 4), "O"), debug=TRUE))
stopifnot(four.in.a.row(player="O", v=c("X", rep("O", 4), "X"), debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot(!won(player="X", board=board, r=1, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=1, c=1, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="O", board=board, r=2, c=3, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","X",
"X","X","X","X","O","E","X"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=6, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=6, c=1, debug=TRUE))
stopifnot(!won(player="X", board=board, r=1, c=7, debug=TRUE))
stopifnot( won(player="O", board=board, r=1, c=7, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","X","O","E","E","E",
"E","E","O","X","O","E","E",
"E","E","X","X","X","O","E",
"E","E","O","X","O","X","O",
"E","E","X","O","X","X","O"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="O", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=4, debug=TRUE))
stopifnot( won(player="O", board=board, r=2, c=4, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","E","X","O","E","E",
"E","E","X","O","X","E","E",
"E","X","O","X","O","E","E",
"X","O","O","O","X","E","E",
"X","O","X","X","O","E","E"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=5, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=5, c=1, debug=TRUE))
stopifnot( won(player="X", board=board, r=4, c=2, debug=TRUE))
stopifnot(!won(player="O", board=board, r=4, c=2, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=5, debug=TRUE))
stopifnot( won(player="O", board=board, r=2, c=5, debug=TRUE))
stopifnot(4 == largest.empty.row(board=board, col=1, debug=TRUE))
stopifnot(3 == largest.empty.row(board=board, col=2, debug=TRUE))
stopifnot(2 == largest.empty.row(board=board, col=3, debug=TRUE))
stopifnot(1 == largest.empty.row(board=board, col=4, debug=TRUE))
stopifnot(1 == largest.empty.row(board=board, col=5, debug=TRUE))
stopifnot(6 == largest.empty.row(board=board, col=6, debug=TRUE))
stopifnot(6 == largest.empty.row(board=board, col=7, debug=TRUE))
|
/Section2/hw3/hw3test.R
|
no_license
|
davidfastovich/STAT327
|
R
| false | false | 3,699 |
r
|
# function test cases
stopifnot(!four.in.a.row(player="X", v=c(rep("X", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c(rep("X", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="X", v=c(rep("X", 3), rep("O", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c(rep("X", 3), rep("O", 3)), debug=TRUE))
stopifnot(!four.in.a.row(player="X", v=c("O", rep("X", 3), "O"), debug=TRUE))
stopifnot(!four.in.a.row(player="O", v=c("O", rep("X", 3), "O"), debug=TRUE))
stopifnot(four.in.a.row(player="X", v=c(rep("X", 4)), debug=TRUE))
stopifnot(four.in.a.row(player="O", v=c(rep("O", 4)), debug=TRUE))
stopifnot(four.in.a.row(player="X", v=c("O", rep("X", 4), "O"), debug=TRUE))
stopifnot(four.in.a.row(player="O", v=c("X", rep("O", 4), "X"), debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E",
"E","E","E","E","E","E","E"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot(!won(player="X", board=board, r=1, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=1, c=1, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="O", board=board, r=2, c=3, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","O",
"E","E","E","E","E","E","X",
"X","X","X","X","O","E","X"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=6, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=6, c=1, debug=TRUE))
stopifnot(!won(player="X", board=board, r=1, c=7, debug=TRUE))
stopifnot( won(player="O", board=board, r=1, c=7, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","X","O","E","E","E",
"E","E","O","X","O","E","E",
"E","E","X","X","X","O","E",
"E","E","O","X","O","X","O",
"E","E","X","O","X","X","O"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="O", board=board, r=2, c=3, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=4, debug=TRUE))
stopifnot( won(player="O", board=board, r=2, c=4, debug=TRUE))
board = matrix(data=c(
"E","E","E","E","E","E","E",
"E","E","E","X","O","E","E",
"E","E","X","O","X","E","E",
"E","X","O","X","O","E","E",
"X","O","O","O","X","E","E",
"X","O","X","X","O","E","E"
), nrow=6, ncol=7, byrow=TRUE)
stopifnot( won(player="X", board=board, r=5, c=1, debug=TRUE))
stopifnot(!won(player="O", board=board, r=5, c=1, debug=TRUE))
stopifnot( won(player="X", board=board, r=4, c=2, debug=TRUE))
stopifnot(!won(player="O", board=board, r=4, c=2, debug=TRUE))
stopifnot(!won(player="X", board=board, r=2, c=5, debug=TRUE))
stopifnot( won(player="O", board=board, r=2, c=5, debug=TRUE))
stopifnot(4 == largest.empty.row(board=board, col=1, debug=TRUE))
stopifnot(3 == largest.empty.row(board=board, col=2, debug=TRUE))
stopifnot(2 == largest.empty.row(board=board, col=3, debug=TRUE))
stopifnot(1 == largest.empty.row(board=board, col=4, debug=TRUE))
stopifnot(1 == largest.empty.row(board=board, col=5, debug=TRUE))
stopifnot(6 == largest.empty.row(board=board, col=6, debug=TRUE))
stopifnot(6 == largest.empty.row(board=board, col=7, debug=TRUE))
|
library(lattice);
# load data
FILE.british.doctors <- "../../data/table-09-1_British-doctors-smoking-and-coronary-death.csv";
DF.british.doctors <- read.table(
file = FILE.british.doctors,
sep = "\t",
header = TRUE
);
DF.british.doctors;
pdf("example-9-2-1_british-doctors_data.pdf");
dotplot(
x = I(100000 * deaths / person.years) ~ age,
groups = smoking,
data = DF.british.doctors,
);
dev.off();
DF.british.doctors;
# augment auxiliary columns:
DF.british.doctors[,'agecat'] <- rep(0,nrow(DF.british.doctors));
DF.british.doctors[DF.british.doctors[,'age']=='35_to_44','agecat'] <- 1;
DF.british.doctors[DF.british.doctors[,'age']=='45_to_54','agecat'] <- 2;
DF.british.doctors[DF.british.doctors[,'age']=='55_to_64','agecat'] <- 3;
DF.british.doctors[DF.british.doctors[,'age']=='65_to_74','agecat'] <- 4;
DF.british.doctors[DF.british.doctors[,'age']=='75_to_84','agecat'] <- 5;
DF.british.doctors['agesq'] <- DF.british.doctors[,'agecat']^2;
DF.british.doctors[,'smkage'] <- DF.british.doctors[,'agecat'];
DF.british.doctors[DF.british.doctors[,'smoking']=='non.smoker','smkage'] <- 0;
DF.british.doctors;
### Saturated model (i.e each data point gets its own a fixed-effect parameter) ####################
DF.british.doctors.2 <- DF.british.doctors;
DF.british.doctors.2[,'agecat'] <- factor(DF.british.doctors.2[,'agecat']);
DF.british.doctors.2[,'smoking'] <- factor(DF.british.doctors.2[,'smoking']);
DF.british.doctors.2;
GLM.british.doctors.2 <- glm(
formula = deaths ~ offset(log(person.years)) + agecat * smoking,
data = DF.british.doctors.2,
family = poisson
);
summary(GLM.british.doctors.2);
# expected values (computed from maximum likelihood estimates of model coefficients),
# (observation-wise) likelihood, Pearson residuals, and deviance residuals:
X <- model.matrix(GLM.british.doctors.2);
beta <- coefficients(GLM.british.doctors.2);
X;
beta;
cbind( X %*% beta, DF.british.doctors[,'person.years'] * exp(X %*% beta));
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.2);
DF.residuals.2 <- cbind(
observed = observed.values,
expected = expected.values,
by.hand = DF.british.doctors[,'person.years'] * exp(X %*% beta),
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = rep(0,length(observed.values))
# deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.2;
# maximum likelihood
prod(DF.residuals.2[,'likelihood']);
log.max.likelihood.2 <- log( prod(DF.residuals.2[,'likelihood']) );
log.max.likelihood.2;
### Model of interest (in this case, the model given by Eqn (9.9)) #################################
# GLM model using R built-in functions:
GLM.british.doctors.1 <- glm(
formula = deaths ~ offset(log(person.years)) + agecat + agesq + smoking + smkage,
data = DF.british.doctors,
family = poisson
);
summary(GLM.british.doctors.1);
# expected values (computed from maximum likelihood estimates of model coefficients),
# (observation-wise) likelihood, Pearson residuals, and deviance residuals:
X <- model.matrix(GLM.british.doctors.1);
beta <- coefficients(GLM.british.doctors.1);
X;
beta;
cbind( X %*% beta, DF.british.doctors[,'person.years'] * exp(X %*% beta));
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.1);
DF.residuals.1 <- cbind(
observed = observed.values,
expected = expected.values,
by.hand = DF.british.doctors[,'person.years'] * exp(X %*% beta),
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.1;
# maximum likelihood
prod(DF.residuals.1[,'likelihood']);
log.max.likelihood.1 <- log( prod(DF.residuals.1[,'likelihood']) );
log.max.likelihood.1;
### Minimal model (i.e. intercept-only model) ######################################################
GLM.british.doctors.0 <- glm(
formula = deaths ~ offset(log(person.years)),
data = DF.british.doctors,
family = poisson
);
summary(GLM.british.doctors.0);
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.0);
DF.residuals.0 <- cbind(
observed = observed.values,
expected = expected.values,
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.0;
# maximum likelihood
prod(DF.residuals.0[,'likelihood']);
log.max.likelihood.0 <- log( prod(DF.residuals.0[,'likelihood']) );
log.max.likelihood.0;
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Saturated Model
sum(DF.residuals.2[, 'Pearson.residual']^2);
# pseudo-R-squared of Saturated Model
(log.max.likelihood.0 - log.max.likelihood.2) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Saturated Model --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.2 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Saturated Model --- method 2
GLM.british.doctors.2$null.deviance;
GLM.british.doctors.2$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.2$null.deviance - GLM.british.doctors.2$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Saturated Model --- method 1
sum(DF.residuals.2[,'deviance.residual']^2);
# deviance of Saturated Model --- method 2
# Recall that:
# deviance(model of interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
2 * (log.max.likelihood.2 - log.max.likelihood.2);
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Model given by Eqn (9.9)
sum(DF.residuals.1[, 'Pearson.residual']^2);
# pseudo-R-squared of Model given by Eqn (9.9)
(log.max.likelihood.0 - log.max.likelihood.1) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Model given by Eqn (9.9) --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.1 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Model given by Eqn (9.9) --- method 2
GLM.british.doctors.1$null.deviance;
GLM.british.doctors.1$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.1$null.deviance - GLM.british.doctors.1$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Model given by Eqn (9.9) --- method 1
sum(DF.residuals.1[,'deviance.residual']^2);
# deviance of Model given by Eqn (9.9) --- method 2
# Recall that:
# deviance(model interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
2 * (log.max.likelihood.2 - log.max.likelihood.1);
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Intercept-only Model
sum(DF.residuals.0[, 'Pearson.residual']^2);
# pseudo-R-squared of Intercept-only Model
(log.max.likelihood.0 - log.max.likelihood.0) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Intercept-only Model --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.0 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Intercept-only Model --- method 2
GLM.british.doctors.0$null.deviance;
GLM.british.doctors.0$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.0$null.deviance - GLM.british.doctors.0$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Intercept-only Model (i.e. minimal model) --- method 1
sum(DF.residuals.0[,'deviance.residual']^2);
# deviance of Intercept-only Model (i.e. minimal model) --- method 2
# Recall that:
# deviance(model of interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
log.max.likelihood.2;
log.max.likelihood.0;
2 * (log.max.likelihood.2 - log.max.likelihood.0);
####################################################################################################
####################################################################################################
anova(GLM.british.doctors.0,GLM.british.doctors.1);
anova(GLM.british.doctors.0,GLM.british.doctors.2);
anova(GLM.british.doctors.1,GLM.british.doctors.2);
print("ZZZ");
|
/exercises/statistics/generalized-linear-models/dobson-barnett/chap09/examples/example-9-2-1.R
|
no_license
|
paradisepilot/statistics
|
R
| false | false | 9,714 |
r
|
library(lattice);
# load data
FILE.british.doctors <- "../../data/table-09-1_British-doctors-smoking-and-coronary-death.csv";
DF.british.doctors <- read.table(
file = FILE.british.doctors,
sep = "\t",
header = TRUE
);
DF.british.doctors;
pdf("example-9-2-1_british-doctors_data.pdf");
dotplot(
x = I(100000 * deaths / person.years) ~ age,
groups = smoking,
data = DF.british.doctors,
);
dev.off();
DF.british.doctors;
# augment auxiliary columns:
DF.british.doctors[,'agecat'] <- rep(0,nrow(DF.british.doctors));
DF.british.doctors[DF.british.doctors[,'age']=='35_to_44','agecat'] <- 1;
DF.british.doctors[DF.british.doctors[,'age']=='45_to_54','agecat'] <- 2;
DF.british.doctors[DF.british.doctors[,'age']=='55_to_64','agecat'] <- 3;
DF.british.doctors[DF.british.doctors[,'age']=='65_to_74','agecat'] <- 4;
DF.british.doctors[DF.british.doctors[,'age']=='75_to_84','agecat'] <- 5;
DF.british.doctors['agesq'] <- DF.british.doctors[,'agecat']^2;
DF.british.doctors[,'smkage'] <- DF.british.doctors[,'agecat'];
DF.british.doctors[DF.british.doctors[,'smoking']=='non.smoker','smkage'] <- 0;
DF.british.doctors;
### Saturated model (i.e each data point gets its own a fixed-effect parameter) ####################
DF.british.doctors.2 <- DF.british.doctors;
DF.british.doctors.2[,'agecat'] <- factor(DF.british.doctors.2[,'agecat']);
DF.british.doctors.2[,'smoking'] <- factor(DF.british.doctors.2[,'smoking']);
DF.british.doctors.2;
GLM.british.doctors.2 <- glm(
formula = deaths ~ offset(log(person.years)) + agecat * smoking,
data = DF.british.doctors.2,
family = poisson
);
summary(GLM.british.doctors.2);
# expected values (computed from maximum likelihood estimates of model coefficients),
# (observation-wise) likelihood, Pearson residuals, and deviance residuals:
X <- model.matrix(GLM.british.doctors.2);
beta <- coefficients(GLM.british.doctors.2);
X;
beta;
cbind( X %*% beta, DF.british.doctors[,'person.years'] * exp(X %*% beta));
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.2);
DF.residuals.2 <- cbind(
observed = observed.values,
expected = expected.values,
by.hand = DF.british.doctors[,'person.years'] * exp(X %*% beta),
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = rep(0,length(observed.values))
# deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.2;
# maximum likelihood
prod(DF.residuals.2[,'likelihood']);
log.max.likelihood.2 <- log( prod(DF.residuals.2[,'likelihood']) );
log.max.likelihood.2;
### Model of interest (in this case, the model given by Eqn (9.9)) #################################
# GLM model using R built-in functions:
GLM.british.doctors.1 <- glm(
formula = deaths ~ offset(log(person.years)) + agecat + agesq + smoking + smkage,
data = DF.british.doctors,
family = poisson
);
summary(GLM.british.doctors.1);
# expected values (computed from maximum likelihood estimates of model coefficients),
# (observation-wise) likelihood, Pearson residuals, and deviance residuals:
X <- model.matrix(GLM.british.doctors.1);
beta <- coefficients(GLM.british.doctors.1);
X;
beta;
cbind( X %*% beta, DF.british.doctors[,'person.years'] * exp(X %*% beta));
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.1);
DF.residuals.1 <- cbind(
observed = observed.values,
expected = expected.values,
by.hand = DF.british.doctors[,'person.years'] * exp(X %*% beta),
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.1;
# maximum likelihood
prod(DF.residuals.1[,'likelihood']);
log.max.likelihood.1 <- log( prod(DF.residuals.1[,'likelihood']) );
log.max.likelihood.1;
### Minimal model (i.e. intercept-only model) ######################################################
GLM.british.doctors.0 <- glm(
formula = deaths ~ offset(log(person.years)),
data = DF.british.doctors,
family = poisson
);
summary(GLM.british.doctors.0);
observed.values <- DF.british.doctors[,'deaths'];
expected.values <- fitted.values(GLM.british.doctors.0);
DF.residuals.0 <- cbind(
observed = observed.values,
expected = expected.values,
likelihood = dpois(x=observed.values,lambda=expected.values),
Pearson.residual = (observed.values - expected.values) / sqrt(expected.values),
deviance.residual = sign(observed.values - expected.values) * sqrt( 2 * (observed.values * log(observed.values/expected.values) - (observed.values - expected.values)) )
);
DF.residuals.0;
# maximum likelihood
prod(DF.residuals.0[,'likelihood']);
log.max.likelihood.0 <- log( prod(DF.residuals.0[,'likelihood']) );
log.max.likelihood.0;
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Saturated Model
sum(DF.residuals.2[, 'Pearson.residual']^2);
# pseudo-R-squared of Saturated Model
(log.max.likelihood.0 - log.max.likelihood.2) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Saturated Model --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.2 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Saturated Model --- method 2
GLM.british.doctors.2$null.deviance;
GLM.british.doctors.2$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.2$null.deviance - GLM.british.doctors.2$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Saturated Model --- method 1
sum(DF.residuals.2[,'deviance.residual']^2);
# deviance of Saturated Model --- method 2
# Recall that:
# deviance(model of interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
2 * (log.max.likelihood.2 - log.max.likelihood.2);
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Model given by Eqn (9.9)
sum(DF.residuals.1[, 'Pearson.residual']^2);
# pseudo-R-squared of Model given by Eqn (9.9)
(log.max.likelihood.0 - log.max.likelihood.1) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Model given by Eqn (9.9) --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.1 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Model given by Eqn (9.9) --- method 2
GLM.british.doctors.1$null.deviance;
GLM.british.doctors.1$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.1$null.deviance - GLM.british.doctors.1$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Model given by Eqn (9.9) --- method 1
sum(DF.residuals.1[,'deviance.residual']^2);
# deviance of Model given by Eqn (9.9) --- method 2
# Recall that:
# deviance(model interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
2 * (log.max.likelihood.2 - log.max.likelihood.1);
####################################################################################################
####################################################################################################
# Pearson chi-squared statistic of Intercept-only Model
sum(DF.residuals.0[, 'Pearson.residual']^2);
# pseudo-R-squared of Intercept-only Model
(log.max.likelihood.0 - log.max.likelihood.0) / log.max.likelihood.0;
# likelihood ratio chi-squared statistic of Intercept-only Model --- method 1
# likelihood ratio chi-squared statistic(model of interest)
# = 2 * [ log(max. likelihood of model of interest) - log(max. likelihood of intercept-only model) ]
2 * (log.max.likelihood.0 - log.max.likelihood.0);
# likelihood ratio chi-squared statistic of Intercept-only Model --- method 2
GLM.british.doctors.0$null.deviance;
GLM.british.doctors.0$deviance;
likelihood.ratio.chi.squared.statistic <- GLM.british.doctors.0$null.deviance - GLM.british.doctors.0$deviance;
likelihood.ratio.chi.squared.statistic;
# deviance of Intercept-only Model (i.e. minimal model) --- method 1
sum(DF.residuals.0[,'deviance.residual']^2);
# deviance of Intercept-only Model (i.e. minimal model) --- method 2
# Recall that:
# deviance(model of interest) := 2 * [ log(max. likelihood of saturated model) - log(max. likelihood of model of interest)) ]
log.max.likelihood.2;
log.max.likelihood.0;
2 * (log.max.likelihood.2 - log.max.likelihood.0);
####################################################################################################
####################################################################################################
anova(GLM.british.doctors.0,GLM.british.doctors.1);
anova(GLM.british.doctors.0,GLM.british.doctors.2);
anova(GLM.british.doctors.1,GLM.british.doctors.2);
print("ZZZ");
|
//Plot a point using ggplot2 library
//Choose x and y coordinates
x=4
y=9
//Create a dataframe
D = data.frame(x,y)
//import ggplot2 library
library(ggplot2)
//Plot the point
ggplot()+geom_point(data=D,aes(x=x,y=y),size=10,color="blue")
|
/plot_a_point.R
|
no_license
|
Praneet460/Get-Started-With-R
|
R
| false | false | 263 |
r
|
//Plot a point using ggplot2 library
//Choose x and y coordinates
x=4
y=9
//Create a dataframe
D = data.frame(x,y)
//import ggplot2 library
library(ggplot2)
//Plot the point
ggplot()+geom_point(data=D,aes(x=x,y=y),size=10,color="blue")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settings.R
\name{settings}
\alias{settings}
\title{MachineShop Settings}
\usage{
settings(...)
}
\arguments{
\item{...}{character names of settings to view, \code{name = value} pairs
giving the values of settings to change, a vector of these, \code{"reset"}
to restore all package defaults, or no arguments to view all settings.
Partial matching of setting names is supported.}
}
\value{
The setting value if only one is specified to view. Otherwise, a
list of the values of specified settings as they existed prior to any
requested changes. Such a list can be passed as an argument to
\code{settings} to restore their values.
}
\description{
Allow the user to view or change global settings which affect default
behaviors of functions in the \pkg{MachineShop} package.
}
\section{Settings}{
\describe{
\item{\code{\link[=controls]{control}}}{function, function name, or object
defining a default resampling method [default: \code{"CVControl"}].}
\item{\code{cutoff}}{numeric (0, 1) threshold above which binary factor
probabilities are classified as events and below which survival
probabilities are classified [default: 0.5].}
\item{\code{distr.SurvMeans}}{character string specifying distributional
approximations to estimated survival curves for predicting survival
means. Choices are \code{"empirical"} for the Kaplan-Meier estimator,
\code{"exponential"}, \code{"rayleigh"}, or \code{"weibull"} (default).}
\item{\code{distr.SurvProbs}}{character string specifying distributional
approximations to estimated survival curves for predicting survival
events/probabilities. Choices are \code{"empirical"} (default) for the
Kaplan-Meier estimator, \code{"exponential"}, \code{"rayleigh"}, or
\code{"weibull"}.}
\item{\code{grid}}{\code{size} argument to \code{\link{TuningGrid}}
indicating the number of parameter-specific values to generate
automatically for \link[=TunedModel]{tuning} of models that have
pre-defined grids or a \code{\link{TuningGrid}} function, function name,
or object [default: 3].}
\item{\code{method.EmpiricalSurv}}{character string specifying the
empirical method of estimating baseline survival curves for Cox
proportional hazards-based models. Choices are \code{"breslow"} or
\code{"efron"} (default).}
\item{\code{metrics.ConfusionMatrix}}{function, function name, or vector of
these with which to calculate \link{performance} \link{metrics} for
confusion matrices [default: \code{c(Accuracy = "accuracy", Kappa =
"kappa2", `Weighted Kappa` = "weighted_kappa2", Sensitivity =
"sensitivity", Specificity = "specificity")}].}
\item{\code{metrics.factor}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for factor
responses [default: \code{c(Brier = "brier", Accuracy = "accuracy",
Kappa = "kappa2", `Weighted Kappa` = "weighted_kappa2", `ROC AUC` =
"roc_auc", Sensitivity = "sensitivity", Specificity = "specificity")}].}
\item{\code{metrics.matrix}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for matrix
responses [default: \code{c(RMSE = "rmse", R2 = "r2", MAE = "mae")}].}
\item{\code{metrics.numeric}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for numeric
responses [default: \code{c(RMSE = "rmse", R2 = "r2", MAE = "mae")}].}
\item{\code{metrics.Surv}}{function, function name, or vector of these with
which to calculate \link{performance} \link{metrics} for survival
responses [default: \code{c(`C-Index` = "cindex", Brier = "brier",
`ROC AUC` = "roc_auc", Accuracy = "accuracy")}].}
\item{\code{print_max}}{number of models or data rows to show with print
methods or \code{Inf} to show all [default: 10].}
\item{\code{require}}{names of installed packages to load during parallel
execution of resampling algorithms [default: \code{"MachineShop"}].}
\item{\code{reset}}{character names of settings to reset to their default
values.}
\item{\code{RHS.formula}}{non-modifiable character vector of operators and
functions allowed in traditional formula specifications.}
\item{\code{stat.Curve}}{function or character string naming a function
to compute one \link{summary} statistic at each cutoff value of resampled
metrics in performance curves, or \code{NULL} for resample-specific
metrics [default: \code{"base::mean"}].}
\item{\code{stat.Resample}}{function or character string naming a function
to compute one summary statistic to control the ordering of models in
\link[=plot]{plots} [default: \code{"base::mean"}].}
\item{\code{stat.TrainingParams}}{function or character string naming a function
to compute one summary statistic on resampled performance metrics for
input \link[=SelectedInput]{selection} or \link[=TunedInput]{tuning} or
for model \link[=SelectedModel]{selection} or \link[=TunedModel]{tuning}
[default: \code{"base::mean"}].}
\item{\code{stats.PartialDependence}}{function, function name, or vector of
these with which to compute \link[=dependence]{partial dependence}
summary statistics [default: \code{c(Mean = "base::mean")}].}
\item{\code{stats.Resample}}{function, function name, or vector of these
with which to compute \link{summary} statistics on resampled performance
metrics [default: \code{c(Mean = "base::mean", Median = "stats::median",
SD = "stats::sd", Min = "base::min", Max = "base::max")}].}
}
}
\examples{
## View all current settings
settings()
## Change settings
presets <- settings(control = "BootControl", grid = 10)
## View one setting
settings("control")
## View multiple settings
settings("control", "grid")
## Restore the previous settings
settings(presets)
}
|
/man/settings.Rd
|
no_license
|
cran/MachineShop
|
R
| false | true | 6,065 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/settings.R
\name{settings}
\alias{settings}
\title{MachineShop Settings}
\usage{
settings(...)
}
\arguments{
\item{...}{character names of settings to view, \code{name = value} pairs
giving the values of settings to change, a vector of these, \code{"reset"}
to restore all package defaults, or no arguments to view all settings.
Partial matching of setting names is supported.}
}
\value{
The setting value if only one is specified to view. Otherwise, a
list of the values of specified settings as they existed prior to any
requested changes. Such a list can be passed as an argument to
\code{settings} to restore their values.
}
\description{
Allow the user to view or change global settings which affect default
behaviors of functions in the \pkg{MachineShop} package.
}
\section{Settings}{
\describe{
\item{\code{\link[=controls]{control}}}{function, function name, or object
defining a default resampling method [default: \code{"CVControl"}].}
\item{\code{cutoff}}{numeric (0, 1) threshold above which binary factor
probabilities are classified as events and below which survival
probabilities are classified [default: 0.5].}
\item{\code{distr.SurvMeans}}{character string specifying distributional
approximations to estimated survival curves for predicting survival
means. Choices are \code{"empirical"} for the Kaplan-Meier estimator,
\code{"exponential"}, \code{"rayleigh"}, or \code{"weibull"} (default).}
\item{\code{distr.SurvProbs}}{character string specifying distributional
approximations to estimated survival curves for predicting survival
events/probabilities. Choices are \code{"empirical"} (default) for the
Kaplan-Meier estimator, \code{"exponential"}, \code{"rayleigh"}, or
\code{"weibull"}.}
\item{\code{grid}}{\code{size} argument to \code{\link{TuningGrid}}
indicating the number of parameter-specific values to generate
automatically for \link[=TunedModel]{tuning} of models that have
pre-defined grids or a \code{\link{TuningGrid}} function, function name,
or object [default: 3].}
\item{\code{method.EmpiricalSurv}}{character string specifying the
empirical method of estimating baseline survival curves for Cox
proportional hazards-based models. Choices are \code{"breslow"} or
\code{"efron"} (default).}
\item{\code{metrics.ConfusionMatrix}}{function, function name, or vector of
these with which to calculate \link{performance} \link{metrics} for
confusion matrices [default: \code{c(Accuracy = "accuracy", Kappa =
"kappa2", `Weighted Kappa` = "weighted_kappa2", Sensitivity =
"sensitivity", Specificity = "specificity")}].}
\item{\code{metrics.factor}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for factor
responses [default: \code{c(Brier = "brier", Accuracy = "accuracy",
Kappa = "kappa2", `Weighted Kappa` = "weighted_kappa2", `ROC AUC` =
"roc_auc", Sensitivity = "sensitivity", Specificity = "specificity")}].}
\item{\code{metrics.matrix}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for matrix
responses [default: \code{c(RMSE = "rmse", R2 = "r2", MAE = "mae")}].}
\item{\code{metrics.numeric}}{function, function name, or vector of these
with which to calculate \link{performance} \link{metrics} for numeric
responses [default: \code{c(RMSE = "rmse", R2 = "r2", MAE = "mae")}].}
\item{\code{metrics.Surv}}{function, function name, or vector of these with
which to calculate \link{performance} \link{metrics} for survival
responses [default: \code{c(`C-Index` = "cindex", Brier = "brier",
`ROC AUC` = "roc_auc", Accuracy = "accuracy")}].}
\item{\code{print_max}}{number of models or data rows to show with print
methods or \code{Inf} to show all [default: 10].}
\item{\code{require}}{names of installed packages to load during parallel
execution of resampling algorithms [default: \code{"MachineShop"}].}
\item{\code{reset}}{character names of settings to reset to their default
values.}
\item{\code{RHS.formula}}{non-modifiable character vector of operators and
functions allowed in traditional formula specifications.}
\item{\code{stat.Curve}}{function or character string naming a function
to compute one \link{summary} statistic at each cutoff value of resampled
metrics in performance curves, or \code{NULL} for resample-specific
metrics [default: \code{"base::mean"}].}
\item{\code{stat.Resample}}{function or character string naming a function
to compute one summary statistic to control the ordering of models in
\link[=plot]{plots} [default: \code{"base::mean"}].}
\item{\code{stat.TrainingParams}}{function or character string naming a function
to compute one summary statistic on resampled performance metrics for
input \link[=SelectedInput]{selection} or \link[=TunedInput]{tuning} or
for model \link[=SelectedModel]{selection} or \link[=TunedModel]{tuning}
[default: \code{"base::mean"}].}
\item{\code{stats.PartialDependence}}{function, function name, or vector of
these with which to compute \link[=dependence]{partial dependence}
summary statistics [default: \code{c(Mean = "base::mean")}].}
\item{\code{stats.Resample}}{function, function name, or vector of these
with which to compute \link{summary} statistics on resampled performance
metrics [default: \code{c(Mean = "base::mean", Median = "stats::median",
SD = "stats::sd", Min = "base::min", Max = "base::max")}].}
}
}
\examples{
## View all current settings
settings()
## Change settings
presets <- settings(control = "BootControl", grid = 10)
## View one setting
settings("control")
## View multiple settings
settings("control", "grid")
## Restore the previous settings
settings(presets)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.