content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{aux.preprocess}
\alias{aux.preprocess}
\title{Preprocessing the data}
\usage{
aux.preprocess(data, type = c("center", "scale", "cscale", "decorrelate",
"whiten"))
}
\arguments{
\item{data}{an \eqn{(n\times p)} matrix or data frame whose rows are observations
and columns represent independent variables.}
\item{type}{one of \code{"center"}, \code{"scale"}, \code{"cscale"}, \code{"decorrelate"} or \code{"whiten"}.}
}
\value{
named list containing:
\describe{
\item{pX}{an \eqn{(n\times p)} matrix after preprocessing in accordance with \code{type} parameter}
\item{info}{a list containing \itemize{
\item \code{type:} name of preprocessing procedure.
\item \code{mean:} a mean vector of length \eqn{p}.
\item \code{multiplier:} a \eqn{(p\times p)} matrix or 1 for "center".}}
}
}
\description{
\code{aux.preprocess} can perform one of following operations; \code{"center"}, \code{"scale"},
\code{"cscale"}, \code{"decorrelate"} and \code{"whiten"}. See below for more details.
}
\details{
@section Operations:
We have following operations,
\describe{
\item{\code{"center"}}{subtracts mean of each column so that every variable has mean \eqn{0}.}
\item{\code{"scale"}}{turns each column corresponding to variable have variance \eqn{1}.}
\item{\code{"cscale"}}{combines \code{"center"} and \code{"scale"}.}
\item{\code{"decorrelate"}}{\code{"center"} and sets its covariance term having diagonal entries only.}
\item{\code{"whiten"}}{\code{"decorrelate"} and sets all diagonal elements be \eqn{1}.}
}
}
\examples{
\dontrun{
## Generate data
X = aux.gensamples()
## 5 types of preprocessing
X_center = aux.preprocess(X)
X_scale = aux.preprocess(X,type="scale")
X_cscale = aux.preprocess(X,type="cscale")
X_decorr = aux.preprocess(X,type="decorrelate")
X_whiten = aux.preprocess(X,type="whiten")
## Check with Covariance matrix
par(mfrow=c(2,3))
image(cov(X)[,3:1], zlim=c(-5,5)); title("original covariance")
image(cov(X_center$pX)[,3:1],zlim=c(-5,5)); title("opt::center")
image(cov(X_scale$pX)[,3:1], zlim=c(-5,5)); title("opt::scale")
image(cov(X_cscale$pX)[,3:1],zlim=c(-5,5)); title("opt::cscale")
image(cov(X_decorr$pX)[,3:1],zlim=c(-5,5)); title("opt::decorrelate")
image(cov(X_whiten$pX)[,3:1],zlim=c(-5,5)); title("opt::whiten")
}
}
\author{
Kisung You
}
|
/man/aux_preprocess.Rd
|
no_license
|
rcannood/Rdimtools
|
R
| false | true | 2,382 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxiliary.R
\name{aux.preprocess}
\alias{aux.preprocess}
\title{Preprocessing the data}
\usage{
aux.preprocess(data, type = c("center", "scale", "cscale", "decorrelate",
"whiten"))
}
\arguments{
\item{data}{an \eqn{(n\times p)} matrix or data frame whose rows are observations
and columns represent independent variables.}
\item{type}{one of \code{"center"}, \code{"scale"}, \code{"cscale"}, \code{"decorrelate"} or \code{"whiten"}.}
}
\value{
named list containing:
\describe{
\item{pX}{an \eqn{(n\times p)} matrix after preprocessing in accordance with \code{type} parameter}
\item{info}{a list containing \itemize{
\item \code{type:} name of preprocessing procedure.
\item \code{mean:} a mean vector of length \eqn{p}.
\item \code{multiplier:} a \eqn{(p\times p)} matrix or 1 for "center".}}
}
}
\description{
\code{aux.preprocess} can perform one of following operations; \code{"center"}, \code{"scale"},
\code{"cscale"}, \code{"decorrelate"} and \code{"whiten"}. See below for more details.
}
\details{
@section Operations:
We have following operations,
\describe{
\item{\code{"center"}}{subtracts mean of each column so that every variable has mean \eqn{0}.}
\item{\code{"scale"}}{turns each column corresponding to variable have variance \eqn{1}.}
\item{\code{"cscale"}}{combines \code{"center"} and \code{"scale"}.}
\item{\code{"decorrelate"}}{\code{"center"} and sets its covariance term having diagonal entries only.}
\item{\code{"whiten"}}{\code{"decorrelate"} and sets all diagonal elements be \eqn{1}.}
}
}
\examples{
\dontrun{
## Generate data
X = aux.gensamples()
## 5 types of preprocessing
X_center = aux.preprocess(X)
X_scale = aux.preprocess(X,type="scale")
X_cscale = aux.preprocess(X,type="cscale")
X_decorr = aux.preprocess(X,type="decorrelate")
X_whiten = aux.preprocess(X,type="whiten")
## Check with Covariance matrix
par(mfrow=c(2,3))
image(cov(X)[,3:1], zlim=c(-5,5)); title("original covariance")
image(cov(X_center$pX)[,3:1],zlim=c(-5,5)); title("opt::center")
image(cov(X_scale$pX)[,3:1], zlim=c(-5,5)); title("opt::scale")
image(cov(X_cscale$pX)[,3:1],zlim=c(-5,5)); title("opt::cscale")
image(cov(X_decorr$pX)[,3:1],zlim=c(-5,5)); title("opt::decorrelate")
image(cov(X_whiten$pX)[,3:1],zlim=c(-5,5)); title("opt::whiten")
}
}
\author{
Kisung You
}
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(rvest)
library(ggplot2)
library(scales)
library(chron)
library(timeDate)
source("loadAndPrepare.R")
source("regressionAnalysis.R")
shinyServer(function(input, output) {
analysisData <- reactive({
err.handler <- function(err){
stop("Unable to retrieve data or data do not have enough samples")
}
result<-searchOnNumbers(input$searchterm)
movieId<-numbersMovieId(result)
df <- tryCatch(loadFromNumbers(movieId), error=err.handler)
c(movieId, mrAnalysis(df), srAnalysis(df), df)
})
# Render movie image
# output$sideImage <- renderImage({
# movieId = analysisData()[[1]]
# return(list(
# src = paste("http://www.the-numbers.com/images/movies/opusdata/", movieId, ".jpg", sep=''),
# filetype = "image/jpeg",
# alt = ""
# ))
# })
output$sideImage<-renderText({
movieId = analysisData()[[1]]
err.handler.img<-function(err) {return("https://upload.wikimedia.org/wikipedia/en/d/dc/Academy_Award_trophy.jpg")}
src = tryCatch(paste("http://www.the-numbers.com/images/movies/opusdata/", movieId, ".jpg", sep=''), error=err.handler.img)
c('<br/><img width=75% src="',src,'">')
})
output$movieName <- renderText({
data<-analysisData()
movieDisplayText(data[[1]])
})
#
# Summary Tab output
#
output$summaryPlot <- renderPlot({
data<-analysisData()
data[[2]]
})
output$summaryTable <- renderTable({
data<-analysisData()[[5]]
data$PredictedRevenues<-paste("$",format(round(data$PredictedRevenues), big.mark=","),sep="")
data<-data[22:28, c("Day", "Weekday", "PredictedRevenues")] #Show only future predictions
}, digits=0, bordered = TRUE, spacing = 'xs')
output$keyStats <- renderTable({
data<-analysisData()[[8]]
autocor<-analysisData()[[6]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered=TRUE, spacing = 'xs')
output$dataTable <- renderTable({
data<-analysisData()[[5]]
data$Date<-format(data$Date,'%Y-%m-%d')
data$Revenues<-paste("$",format(round(data$Revenues), big.mark=","),sep="")
data$PredictedRevenues<-paste("$",format(round(data$PredictedRevenues), big.mark=","),sep="")
data # Show complete data including sample and predictions
}, digits=0, bordered=TRUE, spacing = 'xs')
#
# Multiple Regression Tab output
#
# Key Statistics
output$keyStatsAnalysis <- renderTable({
data<-analysisData()[[8]]
autocor<-analysisData()[[6]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered = TRUE, spacing = 'xs')
# Analysis of Variance
output$anova <- renderTable({
d<-as.data.frame(analysisData()[[7]])
d
}, bordered = TRUE, spacing = 'xs')
# Coefficients
output$coeff <- renderTable({
# Get Matix of coefficients and convert to data frame
d<-as.data.frame(analysisData()[[8]]$coefficients)
colnames(d)<-c("Estimate", "Std Error", "t value", "Pr(>|t|)")
d
}, bordered = TRUE,colnames = TRUE, rownames = TRUE, spacing = 'xs')
# Residual Plot
output$resPlot <- renderPlot({
data<-analysisData()
data[[4]]
})
# Histogram of Residuals
output$histPlot <- renderPlot({
data<-analysisData()
data[[3]]
})
#
# Simple Regression
#
# Revenue Trend plot
output$trendPlot <- renderPlot({
data<-analysisData()
data[[11]]
})
# Key Statistics for simple regression
output$keyStatsSimple <- renderTable({
data<-analysisData()[[16]]
autocor<-analysisData()[[15]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered = TRUE, spacing = 'xs')
# Residual Plot simple regression
output$resPlotSimple <- renderPlot({
data<-analysisData()
data[[13]]
})
# Histogram of Residuals - simple regression
output$histPlotSimple <- renderPlot({
data<-analysisData()
data[[12]]
})
# Generate report
output$report <- downloadHandler(
# For PDF output, change this to "report.html"
filename = "report.html",
content = function(file) {
# Copy the report file to a temporary directory before processing it, in
# case we don't have write permissions to the current working dir (which
# can happen when deployed).
tempReport <- file.path(tempdir(), "report.Rmd")
file.copy("report.Rmd", tempReport, overwrite = TRUE)
# Set up parameters to pass to Rmd document
params <- list(dat = analysisData())
# Knit the document, passing in the `params` list, and eval it in a
# child of the global environment (this isolates the code in the document
# from the code in this app).
rmarkdown::render(tempReport, output_file = file,
params = params,
envir = new.env(parent = globalenv())
)
}
)
})
|
/server.R
|
no_license
|
apparaokalimireddy/movie-revenue-predictor
|
R
| false | false | 5,478 |
r
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(rvest)
library(ggplot2)
library(scales)
library(chron)
library(timeDate)
source("loadAndPrepare.R")
source("regressionAnalysis.R")
shinyServer(function(input, output) {
analysisData <- reactive({
err.handler <- function(err){
stop("Unable to retrieve data or data do not have enough samples")
}
result<-searchOnNumbers(input$searchterm)
movieId<-numbersMovieId(result)
df <- tryCatch(loadFromNumbers(movieId), error=err.handler)
c(movieId, mrAnalysis(df), srAnalysis(df), df)
})
# Render movie image
# output$sideImage <- renderImage({
# movieId = analysisData()[[1]]
# return(list(
# src = paste("http://www.the-numbers.com/images/movies/opusdata/", movieId, ".jpg", sep=''),
# filetype = "image/jpeg",
# alt = ""
# ))
# })
output$sideImage<-renderText({
movieId = analysisData()[[1]]
err.handler.img<-function(err) {return("https://upload.wikimedia.org/wikipedia/en/d/dc/Academy_Award_trophy.jpg")}
src = tryCatch(paste("http://www.the-numbers.com/images/movies/opusdata/", movieId, ".jpg", sep=''), error=err.handler.img)
c('<br/><img width=75% src="',src,'">')
})
output$movieName <- renderText({
data<-analysisData()
movieDisplayText(data[[1]])
})
#
# Summary Tab output
#
output$summaryPlot <- renderPlot({
data<-analysisData()
data[[2]]
})
output$summaryTable <- renderTable({
data<-analysisData()[[5]]
data$PredictedRevenues<-paste("$",format(round(data$PredictedRevenues), big.mark=","),sep="")
data<-data[22:28, c("Day", "Weekday", "PredictedRevenues")] #Show only future predictions
}, digits=0, bordered = TRUE, spacing = 'xs')
output$keyStats <- renderTable({
data<-analysisData()[[8]]
autocor<-analysisData()[[6]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered=TRUE, spacing = 'xs')
output$dataTable <- renderTable({
data<-analysisData()[[5]]
data$Date<-format(data$Date,'%Y-%m-%d')
data$Revenues<-paste("$",format(round(data$Revenues), big.mark=","),sep="")
data$PredictedRevenues<-paste("$",format(round(data$PredictedRevenues), big.mark=","),sep="")
data # Show complete data including sample and predictions
}, digits=0, bordered=TRUE, spacing = 'xs')
#
# Multiple Regression Tab output
#
# Key Statistics
output$keyStatsAnalysis <- renderTable({
data<-analysisData()[[8]]
autocor<-analysisData()[[6]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered = TRUE, spacing = 'xs')
# Analysis of Variance
output$anova <- renderTable({
d<-as.data.frame(analysisData()[[7]])
d
}, bordered = TRUE, spacing = 'xs')
# Coefficients
output$coeff <- renderTable({
# Get Matix of coefficients and convert to data frame
d<-as.data.frame(analysisData()[[8]]$coefficients)
colnames(d)<-c("Estimate", "Std Error", "t value", "Pr(>|t|)")
d
}, bordered = TRUE,colnames = TRUE, rownames = TRUE, spacing = 'xs')
# Residual Plot
output$resPlot <- renderPlot({
data<-analysisData()
data[[4]]
})
# Histogram of Residuals
output$histPlot <- renderPlot({
data<-analysisData()
data[[3]]
})
#
# Simple Regression
#
# Revenue Trend plot
output$trendPlot <- renderPlot({
data<-analysisData()
data[[11]]
})
# Key Statistics for simple regression
output$keyStatsSimple <- renderTable({
data<-analysisData()[[16]]
autocor<-analysisData()[[15]]
stat_names<-c("R Squared", "Adj R Squared", "Std Error", "Autocorrelation")
stat_values<-c(data$r.squared, data$adj.r.squared, data$sigma, autocor)
data<-data.frame(stat_names, stat_values)
}, include.colnames=FALSE, bordered = TRUE, spacing = 'xs')
# Residual Plot simple regression
output$resPlotSimple <- renderPlot({
data<-analysisData()
data[[13]]
})
# Histogram of Residuals - simple regression
output$histPlotSimple <- renderPlot({
data<-analysisData()
data[[12]]
})
# Generate report
output$report <- downloadHandler(
# For PDF output, change this to "report.html"
filename = "report.html",
content = function(file) {
# Copy the report file to a temporary directory before processing it, in
# case we don't have write permissions to the current working dir (which
# can happen when deployed).
tempReport <- file.path(tempdir(), "report.Rmd")
file.copy("report.Rmd", tempReport, overwrite = TRUE)
# Set up parameters to pass to Rmd document
params <- list(dat = analysisData())
# Knit the document, passing in the `params` list, and eval it in a
# child of the global environment (this isolates the code in the document
# from the code in this app).
rmarkdown::render(tempReport, output_file = file,
params = params,
envir = new.env(parent = globalenv())
)
}
)
})
|
vessel_registry <- function(con, standardize = FALSE) {
q <-
tbl_mar(con, "kvoti.skipaskra_siglo")
if( standardize ) {
q <-
q %>%
dplyr::select(vid = skipnr,
name = nafnskips,
uid = umdnr,
cs = kallmerki,
imo = imonr,
vclass = notkunarteg,
homeharbour = heimahofn,
propeller_diameter = thvermskrufu,
engine_kw = vel_kw,
power_index = aflvisir,
length_registered = skradlengd,
width = skradbreidd,
depth = skraddypt,
length = mestalengd,
brl = bruttoruml, # neet a proper acronym
grt = bruttotonn) %>%
# Need to double check ghost ships
dplyr::mutate(length_registered = length_registered / 100,
# units of cm to meters
width = width / 100,
depth = depth / 100,
length = dplyr::case_when(vid == 9928 ~ 5,
TRUE ~ length / 100),
# "correct" brl for Asgrimur Halldorsson
brl = dplyr::case_when(vid == 2780 ~ brl / 100000,
vid == 9928 ~ 2,
TRUE ~ brl / 100),
grt = dplyr::case_when(vid == 9928 ~ 2,
TRUE ~ grt / 100),
# Blidfari has abnormal engine_kw, divided by 100
engine_kw = dplyr::case_when(vid == 2069 ~ engine_kw / 10000,
TRUE ~ engine_kw / 100),
name = str_trim(name),
homeharbour = str_trim(homeharbour),
vessel_length_class = dplyr::case_when(length < 8 ~ "<8",
length >= 8 & length < 10 ~ "08-10",
length >= 10 & length < 12 ~ "10-12",
length >= 12 & length < 15 ~ "12-15",
length >= 15 ~ ">=15",
TRUE ~ NA_character_),
name = str_trim(name),
name = ifelse(name == "", NA_character_, name),
uid = str_trim(uid),
uid = ifelse(uid == "", NA_character_, uid),
# NOTE: Below does not get rid of the period
uid = str_replace(uid, "\\.", ""),
uid = dplyr::case_when(uid == "IS" ~ "ÍS",
uid == "OF" ~ "ÓF",
uid == "KÓ" ~ "KO",
uid == "ZZ0" ~ "ZZ", # Not valid but is also in skipaskra fiskistofu
TRUE ~ uid),
uid = dplyr::case_when(nchar(uid) > 2 ~ paste0(str_sub(uid, 1, 2), "-", str_sub(uid, 3)),
TRUE ~ uid),
cs = str_trim(cs),
cs = ifelse(cs == "", NA_character_, cs),
cs = ifelse(cs == "", NA_character_, cs),
cs = ifelse(nchar(cs) == 4 & str_sub(cs, 1, 2) == "TF",
cs,
NA_character_),
imo = ifelse(imo == 0, NA_integer_, imo),
vclass = as.integer(vclass))
}
return(q)
}
vessel_history <- function(con) {
tbl_mar(con, "kvoti.skipasaga") %>%
dplyr::filter(skip_nr > 1) %>%
dplyr::mutate(einknr = dplyr::case_when(nchar(einknr) == 1 ~ paste0("00", einknr),
nchar(einknr) == 2 ~ paste0("0", einknr),
TRUE ~ as.character(einknr)),
einkst = paste0(einkst, einknr)) %>%
dplyr::rename(vid = skip_nr, hist = saga_nr, t1 = i_gildi, t2 = ur_gildi,
uid = einkst, code = flokkur) %>%
dplyr::left_join(tbl_mar(con, "kvoti.utg_fl") %>%
select(code = flokkur, flokkur = heiti)) %>%
dplyr::select(-c(einknr, snn:sbn)) %>%
dplyr::arrange(vid, hist) %>%
dplyr::select(vid:code, flokkur, dplyr::everything())
}
# pth <- "https://www.pfs.is/library/Skrar/Tidnir-og-taekni/Numeramal/MMSI/NUMER%20Query270619.xlsx"
# download.file(pth, destfile = "data-raw/ss-270619_mmsi.xlsx")
# v_mmsi <-
# readxl::read_excel("data-raw/ss-270619_mmsi.xlsx") %>%
# janitor::clean_names() %>%
# dplyr::select(SKNR = sknr,
# NAME = skip,
# CS = kallm,
# MMSI = mmsi_nr,
# STDC = standard_c) %>%
# dplyr::mutate(VID = case_when(str_sub(MMSI, 1, 3) == "251" ~ as.integer(SKNR),
# TRUE ~ NA_integer_),
# VID2 = case_when(str_sub(MMSI, 1, 3) != "251" ~ as.integer(SKNR),
# TRUE ~ NA_integer_)) %>%
# dplyr::select(SKNR, VID, VID2, NAME, MMSI, CS) %>%
# dplyr::arrange(VID)
# dbWriteTable(con, name = "VESSEL_MMSI_20190627", value = v_mmsi, overwrite = TRUE)
vessel_mmsi <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_MMSI_20190627")
}
# ------------------------------------------------------------------------------
# MMSI country code
# library(rvest)
# library(countrycode)
# library(tidyverse)
# url <- "https://en.wikipedia.org/wiki/Maritime_identification_digits"
# mid <-
# url %>%
# read_html() %>%
# #html_nodes(xpath='//*[@id="mw-content-text"]/table[1]') %>%
# html_table()
# mid <-
# mid[[1]] %>%
# as_tibble() %>%
# rename(country = Country) %>%
# separate(col = "Codes", into = paste0("c", 1:20)) %>%
# gather(dummy, mid, -country) %>%
# drop_na() %>%
# select(-dummy)
#
# mid <-
# mid %>%
# mutate(iso2 = countrycode(country, "country.name", "iso2c")) %>%
# mutate(iso2 = case_when(mid == "303" ~ "US",
# mid == "608" ~ "GB",
# mid == "204" ~ "PT",
# mid == "306" ~ "NL",
# mid == "618" ~ "FR",
# mid == "635" ~ "FR",
# mid == "255" ~ "PT",
# mid == "661" ~ "RW",
# mid == "607" ~ "FR",
# TRUE ~ iso2))
# mid <-
# mid %>%
# select(MID = mid, ISO2 = iso2, COUNTRY = country)
# dbWriteTable(con, name = "VESSEL_MID", value = mid, overwrite = TRUE)
vessel_mid <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_MID")
}
# ------------------------------------------------------------------------------
# Vessel call signs - ITU prefixes
# library(rvest)
# library(countrycode)
# library(tidyverse)
#url <- "https://en.wikipedia.org/wiki/ITU_prefix#Allocation_table"
# need to check Swaziland and Fiji
# x <-
# rio::import("../ITU_prefix.csv", setclass = "tibble") %>%
# janitor::clean_names() %>%
# rename(cs = call_sign_series, cntr = allocated_to) %>%
# mutate(cntr = str_replace(cntr, "\\[Note 1\\]", ""),
# cntr = str_replace(cntr, "\\[Note 2\\]", ""),
# cntr = str_replace(cntr, "\\[Note 4\\]", ""),
# cntr = ifelse(str_starts(cntr, "France"), "France", cntr),
# cntr = ifelse(str_starts(cntr, "United Kingdom"), "United Kingdom", cntr),
# cntr = ifelse(str_starts(cntr, "Canada"), "Canada", cntr),
# cntr = ifelse(str_starts(cntr, "Hong Kong"), "Hong Kong", cntr),
# cntr = ifelse(str_starts(cntr, "Macao"), "Macao", cntr),
# cntr = ifelse(str_starts(cntr, "Netherlands"), "Netherlands", cntr),
# cntr = ifelse(str_detect(cntr, "Bosnia and Herzegovina"), "Bosnia and Herzegovina", cntr)) %>%
# filter(!cntr %in% c("", "Republic of China (Taiwan)",
# "Liechtenstein (uses prefixes allocated to Switzerland)",
# "Swaziland", "Fiji")) %>%
# add_row(cs = "BM-BQ", cntr = "Republic of China (Taiwan)") %>%
# add_row(cs = "BU-BX", cntr = "Republic of China (Taiwan)") %>%
# add_row(cs = c("HB0", "HB3Y", "HBL"), cntr = rep("Liechtenstein", 3)) %>%
# separate(cs, c("from", "to", "to2"), remove = FALSE) %>%
# mutate(to = ifelse(!is.na(to), to, from),
# first = str_sub(from, 1, 1),
# t1 = str_sub(from, 2, 2),
# t2 = str_sub(to, 2, 2))
#
# # Poor mans loop
# n <- length(c(1:9, LETTERS))
# ltrs <- 1:n
# names(ltrs) <- c(1:9, LETTERS)
# # NOTE: Needs further work
# res <- list()
# for(i in 1:nrow(x)) {
# print(i)
# if(x$cs[[i]] %>% nchar() == 1) {
# res[[i]] <- tibble(cs = x$cs[[i]], cntr = x$cntr[[i]])
# } else {
# res[[i]] <-
# tibble(cs = paste0(x$first[[i]], names(ltrs[ltrs[[x$t1[[i]]]]:ltrs[[x$t2[[i]]]]])),
# cntr = x$cntr[[i]])
# }
# }
# ITU_prefix <-
# bind_rows(res) %>%
# mutate(iso2 = countrycode(cntr, "country.name", "iso2c"),
# iso3 = countrycode(cntr, "country.name", "iso3c")) %>%
# rename(CS_PREFIX = cs, COUNTRY = cntr, ISO2 = iso2)
#
# dbWriteTable(con, name = "VESSEL_CS_ITU_PREFIX", value = ITU_prefix, overwrite = TRUE)
vessel_csprefix <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_CS_ITU_PREFIX")
}
# Umdæmisbókstafir íslenskra skip
# ust <-
# tribble(~UST, ~STADUR,
# "AK", "Akranes",
# "NS", "Norður-Múlasýsla og Seyðisfjörður",
# "ÁR", "Árnessýsla",
# "ÓF", "Ólafsfjörður",
# "BA", "Barðastrandarsýsla",
# "RE", "Reykjavík",
# "DA", "Dalasýsla",
# "SF", "Austur-Skaftafellssýsla",
# "EA", "Eyjafjarðarsýsla og Akureyri",
# "SH", "Snæfellsness-og Hnappadalssýsla",
# "GK", "Gullbringusýsla",
# "SI", "Siglufjörður",
# "HF", "Kjósarsýsla og Hafnarfjörður",
# "SK", "Skagafjarðarsýsla og Sauðárkrókur",
# "HU", "Húnavatnssýsla",
# "ST", "Strandasýsla",
# "ÍS", "Ísafjarðarsýsla",
# "SU", "Suður-Múlasýsla",
# "KE", "Keflavík",
# "VE", "Vestmannaeyjar",
# "KO", "Kópavogur",
# "VS", "Vestur-Skaftafellssýsla",
# "MB", "Mýra-og Borgarfjarðarsýsla",
# "ÞH", "Þingeyjarsýslur",
# "NK", "Neskaupstaður")
# dbWriteTable(con, name = "VESSEL_UMDAEMISBOKSTAFIR", value = ust, overwrite = TRUE)
vessel_ust <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_UMDAEMISBOKSTAFIR")
}
# ------------------------------------------------------------------------------
# Vessel table -----------------------------------------------------------------
# NOTE: A static file, date notes time of last uploading
# Official registry
# library(tidyverse)
# library(mar)
# con <- connect_mar()
#
# # Offical regstry --------------------------------------------------------------
# kvoti.skipaskra_siglo <-
# mar:::vessel_registry(con, TRUE) %>%
# collect(n = Inf) %>%
# filter(vid > 1) %>%
# mutate(uid = str_replace(uid, "\\.", ""),
# source = "registry",
# iso2 = "IS")
# # Additional vessels kept by fiskistofa ----------------------------------------
# vlookup <- function(this, df, key, value) {
# m <- match(this, df[[key]])
# df[[value]][m]
# }
# orri.skipaskra <-
# lesa_skipaskra(con) %>%
# collect(n = Inf) %>%
# filter(!skip_nr %in% kvoti.skipaskra_siglo$vid) %>%
# mutate(name = str_trim(heiti),
# einknr = ifelse(einknr %in% c(0, 999), NA_real_, einknr),
# einknr = str_pad(einknr, width = 3, side = "left", pad = "0"),
# einkst = ifelse(einkst %in% c("??", "X"), NA_character_, einkst),
# uid = ifelse(!is.na(einknr), paste0(einkst, "-", einknr), einkst)) %>%
# select(vid = skip_nr,
# name = heiti,
# uid,
# brl,
# length = lengd,
# fclass = flokkur) %>%
# mutate(length = ifelse(length <= 1, NA_real_, length),
# brl = ifelse(brl == 0 |
# (brl < 1.0001 & is.na(length)) |
# (brl < 1.0001 & length > 15),
# NA_real_,
# brl),
# source = "skipaskra") %>%
# separate(name, c("dummy", "cs"), sep = "\\(", remove = FALSE) %>%
# select(-dummy) %>%
# mutate(cs = str_replace(cs, "\\)", ""),
# cs = str_trim(cs),
# cs = ifelse(vid >= 5000, "TF", cs),
# cs_prefix = str_sub(cs, 1, 2)) %>%
# left_join(mar:::vessel_csprefix(con) %>%
# select(cs_prefix, iso2) %>%
# collect(n = Inf)) %>%
# mutate(iso2 = case_when(!is.na(iso2) ~ str_sub(uid, 1, 2),
# TRUE ~ iso2)) %>%
# select(-c(cs_prefix))
# # orri.skipaskra %>%
# # filter(!is.na(cs)) %>%
# # group_by(cs) %>%
# # mutate(n.cs = n()) %>%
# # ungroup() %>%
# # arrange(-n.cs, cs) %>%
# # select(cs, n.cs, vid, name, uid) %>%
# # left_join(tbl_mar(con, "kvoti.skipasaga") %>% select(vid = skip_nr, saga_nr:ur_gildi, heiti) %>%
# # collect(n = Inf))
# ## Some vessels pickup up from the MMSI registry -------------------------------
# einarhj.VESSEL_MMSI_20190627 <-
# mar:::vessel_mmsi(con) %>%
# filter(!is.na(vid)) %>%
# collect(n = Inf) %>%
# filter(!vid %in% c(kvoti.skipaskra_siglo$vid, orri.skipaskra$vid)) %>%
# select(vid, name, cs) %>%
# separate(name, c("name", "uid"), sep = " ") %>%
# mutate(uid = str_sub(uid, 1, 2),
# source = "mmsi",
# iso2 = "IS")
# ## Join the stuff --------------------------------------------------------------
# vessels <-
# bind_rows(kvoti.skipaskra_siglo,
# orri.skipaskra,
# einarhj.VESSEL_MMSI_20190627) %>%
# arrange(vid) %>%
# mutate(cntr = ifelse(nchar(uid) == 2, uid, NA_character_)) # %>%
# # NOTE: Below gives the wrong match on cs
# # 3899 Kaldbakur flutningaskip (TFBC) TFBC NA IS 101227 1395 TFBC 261044 1
# # mutate(cs = ifelse(vid == 3899, NA_character_, cs))
# vessels %>% write_rds("data/vessels.rds")
# VS <- vessels %>% select_all(toupper)
# dbWriteTable(con, name = "VESSELS", value = vessels, overwrite = TRUE)
vessel_vessels <- function(con) {
tbl_mar(con, "ops$einarhj.VESSELS")
}
# Vessel class - get the proper one from siglo ---------------------------------
# vclass <-
# tribble(~code, ~flokkur, ~class,
# 0L, "unspecified", "unspecified",
# 33L, "FISKISKIP", "fishing",
# 35L, "SKUTTOGARI", "fishing",
# 36L, "NÓTAVEIÐI, SKUTTOGARI", "fishing",
# 37L, "HVALVEIÐISKIP", "whaler",
# 38L, "unspecified", "unspecified",
# 39L, "vöruflutningaskip", "cargo",
# 40L, "unspecified", "unspecified",
# 41L, "FARÞEGASKIP", "passenger",
# 42L, "VARÐSKIP", "coast guard",
# 43L, "SKÓLASKIP", "school ship",
# 44L, "RANNSÓKNARSKIP", "research",
# 45L, "SJÓMÆLINGASKIP", "research",
# 46L, "BJÖRGUNARSKIP", "sar",
# 48L, "OLÍUFLUTNINGASKIP", "tanker",
# 49L, "olíuskip", "tanker",
# 50L, "DRÁTTARSKIP", "tug boat",
# 51L, "unspecified", "unspecified",
# 53L, "LÓÐSSKIP", "pilot vessel",
# 54L, "VINNUSKIP", "utility vessel",
# 55L, "DÝPK. OG SANDSKIP", "hopper dredger",
# 56L, "DÝPKUNARSKIP", "dredger",
# 57L, "PRAMMI", "barge",
# 58L, "FLOTBRYGGJA", "flotbryggja",
# 59L, "FLOTKVÍ", "flotkví",
# 60L, "SEGLSKIP", "sailing vessel",
# 61L, "VÍKINGASKIP", "longboat",
# 62L, "SKEMMTISKIP", "passenger?",
# 63L, "AFSKRÁÐUR", "Decomissioned",
# 64L, "FISKI, FARÞEGASKIP", "turist fisher",
# 65L, "HAFNSÖGU, DRÁTTARSKIP", "pilot/tugboat",
# 66L, "ÞANGSKURÐARPRAMMI", "kelp vessel",
# 67L, "unspecified", "unspecified",
# 68L, "FRÍSTUNDAFISKISKIP", "pleasure vessel",
# 69L, "EFTIRLITS- OG BJÖRGUNARSKIP", "unspecified",
# 70L, "unspecified", "unspecified",
# 73L, "FARÞEGABÁTUR", "passenger",
# 74L, "FISKI, FARÞEGABÁTUR", "turist fisher",
# 75L, "SJÓKVÍA VINNUSKIP", "utility vessel",
# NA_integer_, NA_character_, NA_character_) %>%
# select_all(toupper)
# vclass %>% count(CODE) %>% filter(n > 1)
# vclass %>% count(FLOKKUR) %>% filter(n > 1)
# vclass %>% count(CLASS) %>% filter(n > 1)
# dbWriteTable(con, name = "VESSEL_CLASS", value = vclass, overwrite = TRUE)
vessel_class <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_CLASS")
}
## Siglingamálastofnun - skipaflokkur
|
/R/vessel.R
|
no_license
|
vonStadarhraun/mar
|
R
| false | false | 17,054 |
r
|
vessel_registry <- function(con, standardize = FALSE) {
q <-
tbl_mar(con, "kvoti.skipaskra_siglo")
if( standardize ) {
q <-
q %>%
dplyr::select(vid = skipnr,
name = nafnskips,
uid = umdnr,
cs = kallmerki,
imo = imonr,
vclass = notkunarteg,
homeharbour = heimahofn,
propeller_diameter = thvermskrufu,
engine_kw = vel_kw,
power_index = aflvisir,
length_registered = skradlengd,
width = skradbreidd,
depth = skraddypt,
length = mestalengd,
brl = bruttoruml, # neet a proper acronym
grt = bruttotonn) %>%
# Need to double check ghost ships
dplyr::mutate(length_registered = length_registered / 100,
# units of cm to meters
width = width / 100,
depth = depth / 100,
length = dplyr::case_when(vid == 9928 ~ 5,
TRUE ~ length / 100),
# "correct" brl for Asgrimur Halldorsson
brl = dplyr::case_when(vid == 2780 ~ brl / 100000,
vid == 9928 ~ 2,
TRUE ~ brl / 100),
grt = dplyr::case_when(vid == 9928 ~ 2,
TRUE ~ grt / 100),
# Blidfari has abnormal engine_kw, divided by 100
engine_kw = dplyr::case_when(vid == 2069 ~ engine_kw / 10000,
TRUE ~ engine_kw / 100),
name = str_trim(name),
homeharbour = str_trim(homeharbour),
vessel_length_class = dplyr::case_when(length < 8 ~ "<8",
length >= 8 & length < 10 ~ "08-10",
length >= 10 & length < 12 ~ "10-12",
length >= 12 & length < 15 ~ "12-15",
length >= 15 ~ ">=15",
TRUE ~ NA_character_),
name = str_trim(name),
name = ifelse(name == "", NA_character_, name),
uid = str_trim(uid),
uid = ifelse(uid == "", NA_character_, uid),
# NOTE: Below does not get rid of the period
uid = str_replace(uid, "\\.", ""),
uid = dplyr::case_when(uid == "IS" ~ "ÍS",
uid == "OF" ~ "ÓF",
uid == "KÓ" ~ "KO",
uid == "ZZ0" ~ "ZZ", # Not valid but is also in skipaskra fiskistofu
TRUE ~ uid),
uid = dplyr::case_when(nchar(uid) > 2 ~ paste0(str_sub(uid, 1, 2), "-", str_sub(uid, 3)),
TRUE ~ uid),
cs = str_trim(cs),
cs = ifelse(cs == "", NA_character_, cs),
cs = ifelse(cs == "", NA_character_, cs),
cs = ifelse(nchar(cs) == 4 & str_sub(cs, 1, 2) == "TF",
cs,
NA_character_),
imo = ifelse(imo == 0, NA_integer_, imo),
vclass = as.integer(vclass))
}
return(q)
}
vessel_history <- function(con) {
tbl_mar(con, "kvoti.skipasaga") %>%
dplyr::filter(skip_nr > 1) %>%
dplyr::mutate(einknr = dplyr::case_when(nchar(einknr) == 1 ~ paste0("00", einknr),
nchar(einknr) == 2 ~ paste0("0", einknr),
TRUE ~ as.character(einknr)),
einkst = paste0(einkst, einknr)) %>%
dplyr::rename(vid = skip_nr, hist = saga_nr, t1 = i_gildi, t2 = ur_gildi,
uid = einkst, code = flokkur) %>%
dplyr::left_join(tbl_mar(con, "kvoti.utg_fl") %>%
select(code = flokkur, flokkur = heiti)) %>%
dplyr::select(-c(einknr, snn:sbn)) %>%
dplyr::arrange(vid, hist) %>%
dplyr::select(vid:code, flokkur, dplyr::everything())
}
# pth <- "https://www.pfs.is/library/Skrar/Tidnir-og-taekni/Numeramal/MMSI/NUMER%20Query270619.xlsx"
# download.file(pth, destfile = "data-raw/ss-270619_mmsi.xlsx")
# v_mmsi <-
# readxl::read_excel("data-raw/ss-270619_mmsi.xlsx") %>%
# janitor::clean_names() %>%
# dplyr::select(SKNR = sknr,
# NAME = skip,
# CS = kallm,
# MMSI = mmsi_nr,
# STDC = standard_c) %>%
# dplyr::mutate(VID = case_when(str_sub(MMSI, 1, 3) == "251" ~ as.integer(SKNR),
# TRUE ~ NA_integer_),
# VID2 = case_when(str_sub(MMSI, 1, 3) != "251" ~ as.integer(SKNR),
# TRUE ~ NA_integer_)) %>%
# dplyr::select(SKNR, VID, VID2, NAME, MMSI, CS) %>%
# dplyr::arrange(VID)
# dbWriteTable(con, name = "VESSEL_MMSI_20190627", value = v_mmsi, overwrite = TRUE)
vessel_mmsi <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_MMSI_20190627")
}
# ------------------------------------------------------------------------------
# MMSI country code
# library(rvest)
# library(countrycode)
# library(tidyverse)
# url <- "https://en.wikipedia.org/wiki/Maritime_identification_digits"
# mid <-
# url %>%
# read_html() %>%
# #html_nodes(xpath='//*[@id="mw-content-text"]/table[1]') %>%
# html_table()
# mid <-
# mid[[1]] %>%
# as_tibble() %>%
# rename(country = Country) %>%
# separate(col = "Codes", into = paste0("c", 1:20)) %>%
# gather(dummy, mid, -country) %>%
# drop_na() %>%
# select(-dummy)
#
# mid <-
# mid %>%
# mutate(iso2 = countrycode(country, "country.name", "iso2c")) %>%
# mutate(iso2 = case_when(mid == "303" ~ "US",
# mid == "608" ~ "GB",
# mid == "204" ~ "PT",
# mid == "306" ~ "NL",
# mid == "618" ~ "FR",
# mid == "635" ~ "FR",
# mid == "255" ~ "PT",
# mid == "661" ~ "RW",
# mid == "607" ~ "FR",
# TRUE ~ iso2))
# mid <-
# mid %>%
# select(MID = mid, ISO2 = iso2, COUNTRY = country)
# dbWriteTable(con, name = "VESSEL_MID", value = mid, overwrite = TRUE)
vessel_mid <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_MID")
}
# ------------------------------------------------------------------------------
# Vessel call signs - ITU prefixes
# library(rvest)
# library(countrycode)
# library(tidyverse)
#url <- "https://en.wikipedia.org/wiki/ITU_prefix#Allocation_table"
# need to check Swaziland and Fiji
# x <-
# rio::import("../ITU_prefix.csv", setclass = "tibble") %>%
# janitor::clean_names() %>%
# rename(cs = call_sign_series, cntr = allocated_to) %>%
# mutate(cntr = str_replace(cntr, "\\[Note 1\\]", ""),
# cntr = str_replace(cntr, "\\[Note 2\\]", ""),
# cntr = str_replace(cntr, "\\[Note 4\\]", ""),
# cntr = ifelse(str_starts(cntr, "France"), "France", cntr),
# cntr = ifelse(str_starts(cntr, "United Kingdom"), "United Kingdom", cntr),
# cntr = ifelse(str_starts(cntr, "Canada"), "Canada", cntr),
# cntr = ifelse(str_starts(cntr, "Hong Kong"), "Hong Kong", cntr),
# cntr = ifelse(str_starts(cntr, "Macao"), "Macao", cntr),
# cntr = ifelse(str_starts(cntr, "Netherlands"), "Netherlands", cntr),
# cntr = ifelse(str_detect(cntr, "Bosnia and Herzegovina"), "Bosnia and Herzegovina", cntr)) %>%
# filter(!cntr %in% c("", "Republic of China (Taiwan)",
# "Liechtenstein (uses prefixes allocated to Switzerland)",
# "Swaziland", "Fiji")) %>%
# add_row(cs = "BM-BQ", cntr = "Republic of China (Taiwan)") %>%
# add_row(cs = "BU-BX", cntr = "Republic of China (Taiwan)") %>%
# add_row(cs = c("HB0", "HB3Y", "HBL"), cntr = rep("Liechtenstein", 3)) %>%
# separate(cs, c("from", "to", "to2"), remove = FALSE) %>%
# mutate(to = ifelse(!is.na(to), to, from),
# first = str_sub(from, 1, 1),
# t1 = str_sub(from, 2, 2),
# t2 = str_sub(to, 2, 2))
#
# # Poor mans loop
# n <- length(c(1:9, LETTERS))
# ltrs <- 1:n
# names(ltrs) <- c(1:9, LETTERS)
# # NOTE: Needs further work
# res <- list()
# for(i in 1:nrow(x)) {
# print(i)
# if(x$cs[[i]] %>% nchar() == 1) {
# res[[i]] <- tibble(cs = x$cs[[i]], cntr = x$cntr[[i]])
# } else {
# res[[i]] <-
# tibble(cs = paste0(x$first[[i]], names(ltrs[ltrs[[x$t1[[i]]]]:ltrs[[x$t2[[i]]]]])),
# cntr = x$cntr[[i]])
# }
# }
# ITU_prefix <-
# bind_rows(res) %>%
# mutate(iso2 = countrycode(cntr, "country.name", "iso2c"),
# iso3 = countrycode(cntr, "country.name", "iso3c")) %>%
# rename(CS_PREFIX = cs, COUNTRY = cntr, ISO2 = iso2)
#
# dbWriteTable(con, name = "VESSEL_CS_ITU_PREFIX", value = ITU_prefix, overwrite = TRUE)
vessel_csprefix <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_CS_ITU_PREFIX")
}
# Umdæmisbókstafir íslenskra skip
# ust <-
# tribble(~UST, ~STADUR,
# "AK", "Akranes",
# "NS", "Norður-Múlasýsla og Seyðisfjörður",
# "ÁR", "Árnessýsla",
# "ÓF", "Ólafsfjörður",
# "BA", "Barðastrandarsýsla",
# "RE", "Reykjavík",
# "DA", "Dalasýsla",
# "SF", "Austur-Skaftafellssýsla",
# "EA", "Eyjafjarðarsýsla og Akureyri",
# "SH", "Snæfellsness-og Hnappadalssýsla",
# "GK", "Gullbringusýsla",
# "SI", "Siglufjörður",
# "HF", "Kjósarsýsla og Hafnarfjörður",
# "SK", "Skagafjarðarsýsla og Sauðárkrókur",
# "HU", "Húnavatnssýsla",
# "ST", "Strandasýsla",
# "ÍS", "Ísafjarðarsýsla",
# "SU", "Suður-Múlasýsla",
# "KE", "Keflavík",
# "VE", "Vestmannaeyjar",
# "KO", "Kópavogur",
# "VS", "Vestur-Skaftafellssýsla",
# "MB", "Mýra-og Borgarfjarðarsýsla",
# "ÞH", "Þingeyjarsýslur",
# "NK", "Neskaupstaður")
# dbWriteTable(con, name = "VESSEL_UMDAEMISBOKSTAFIR", value = ust, overwrite = TRUE)
vessel_ust <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_UMDAEMISBOKSTAFIR")
}
# ------------------------------------------------------------------------------
# Vessel table -----------------------------------------------------------------
# NOTE: A static file, date notes time of last uploading
# Official registry
# library(tidyverse)
# library(mar)
# con <- connect_mar()
#
# # Offical regstry --------------------------------------------------------------
# kvoti.skipaskra_siglo <-
# mar:::vessel_registry(con, TRUE) %>%
# collect(n = Inf) %>%
# filter(vid > 1) %>%
# mutate(uid = str_replace(uid, "\\.", ""),
# source = "registry",
# iso2 = "IS")
# # Additional vessels kept by fiskistofa ----------------------------------------
# vlookup <- function(this, df, key, value) {
# m <- match(this, df[[key]])
# df[[value]][m]
# }
# orri.skipaskra <-
# lesa_skipaskra(con) %>%
# collect(n = Inf) %>%
# filter(!skip_nr %in% kvoti.skipaskra_siglo$vid) %>%
# mutate(name = str_trim(heiti),
# einknr = ifelse(einknr %in% c(0, 999), NA_real_, einknr),
# einknr = str_pad(einknr, width = 3, side = "left", pad = "0"),
# einkst = ifelse(einkst %in% c("??", "X"), NA_character_, einkst),
# uid = ifelse(!is.na(einknr), paste0(einkst, "-", einknr), einkst)) %>%
# select(vid = skip_nr,
# name = heiti,
# uid,
# brl,
# length = lengd,
# fclass = flokkur) %>%
# mutate(length = ifelse(length <= 1, NA_real_, length),
# brl = ifelse(brl == 0 |
# (brl < 1.0001 & is.na(length)) |
# (brl < 1.0001 & length > 15),
# NA_real_,
# brl),
# source = "skipaskra") %>%
# separate(name, c("dummy", "cs"), sep = "\\(", remove = FALSE) %>%
# select(-dummy) %>%
# mutate(cs = str_replace(cs, "\\)", ""),
# cs = str_trim(cs),
# cs = ifelse(vid >= 5000, "TF", cs),
# cs_prefix = str_sub(cs, 1, 2)) %>%
# left_join(mar:::vessel_csprefix(con) %>%
# select(cs_prefix, iso2) %>%
# collect(n = Inf)) %>%
# mutate(iso2 = case_when(!is.na(iso2) ~ str_sub(uid, 1, 2),
# TRUE ~ iso2)) %>%
# select(-c(cs_prefix))
# # orri.skipaskra %>%
# # filter(!is.na(cs)) %>%
# # group_by(cs) %>%
# # mutate(n.cs = n()) %>%
# # ungroup() %>%
# # arrange(-n.cs, cs) %>%
# # select(cs, n.cs, vid, name, uid) %>%
# # left_join(tbl_mar(con, "kvoti.skipasaga") %>% select(vid = skip_nr, saga_nr:ur_gildi, heiti) %>%
# # collect(n = Inf))
# ## Some vessels pickup up from the MMSI registry -------------------------------
# einarhj.VESSEL_MMSI_20190627 <-
# mar:::vessel_mmsi(con) %>%
# filter(!is.na(vid)) %>%
# collect(n = Inf) %>%
# filter(!vid %in% c(kvoti.skipaskra_siglo$vid, orri.skipaskra$vid)) %>%
# select(vid, name, cs) %>%
# separate(name, c("name", "uid"), sep = " ") %>%
# mutate(uid = str_sub(uid, 1, 2),
# source = "mmsi",
# iso2 = "IS")
# ## Join the stuff --------------------------------------------------------------
# vessels <-
# bind_rows(kvoti.skipaskra_siglo,
# orri.skipaskra,
# einarhj.VESSEL_MMSI_20190627) %>%
# arrange(vid) %>%
# mutate(cntr = ifelse(nchar(uid) == 2, uid, NA_character_)) # %>%
# # NOTE: Below gives the wrong match on cs
# # 3899 Kaldbakur flutningaskip (TFBC) TFBC NA IS 101227 1395 TFBC 261044 1
# # mutate(cs = ifelse(vid == 3899, NA_character_, cs))
# vessels %>% write_rds("data/vessels.rds")
# VS <- vessels %>% select_all(toupper)
# dbWriteTable(con, name = "VESSELS", value = vessels, overwrite = TRUE)
vessel_vessels <- function(con) {
tbl_mar(con, "ops$einarhj.VESSELS")
}
# Vessel class - get the proper one from siglo ---------------------------------
# vclass <-
# tribble(~code, ~flokkur, ~class,
# 0L, "unspecified", "unspecified",
# 33L, "FISKISKIP", "fishing",
# 35L, "SKUTTOGARI", "fishing",
# 36L, "NÓTAVEIÐI, SKUTTOGARI", "fishing",
# 37L, "HVALVEIÐISKIP", "whaler",
# 38L, "unspecified", "unspecified",
# 39L, "vöruflutningaskip", "cargo",
# 40L, "unspecified", "unspecified",
# 41L, "FARÞEGASKIP", "passenger",
# 42L, "VARÐSKIP", "coast guard",
# 43L, "SKÓLASKIP", "school ship",
# 44L, "RANNSÓKNARSKIP", "research",
# 45L, "SJÓMÆLINGASKIP", "research",
# 46L, "BJÖRGUNARSKIP", "sar",
# 48L, "OLÍUFLUTNINGASKIP", "tanker",
# 49L, "olíuskip", "tanker",
# 50L, "DRÁTTARSKIP", "tug boat",
# 51L, "unspecified", "unspecified",
# 53L, "LÓÐSSKIP", "pilot vessel",
# 54L, "VINNUSKIP", "utility vessel",
# 55L, "DÝPK. OG SANDSKIP", "hopper dredger",
# 56L, "DÝPKUNARSKIP", "dredger",
# 57L, "PRAMMI", "barge",
# 58L, "FLOTBRYGGJA", "flotbryggja",
# 59L, "FLOTKVÍ", "flotkví",
# 60L, "SEGLSKIP", "sailing vessel",
# 61L, "VÍKINGASKIP", "longboat",
# 62L, "SKEMMTISKIP", "passenger?",
# 63L, "AFSKRÁÐUR", "Decomissioned",
# 64L, "FISKI, FARÞEGASKIP", "turist fisher",
# 65L, "HAFNSÖGU, DRÁTTARSKIP", "pilot/tugboat",
# 66L, "ÞANGSKURÐARPRAMMI", "kelp vessel",
# 67L, "unspecified", "unspecified",
# 68L, "FRÍSTUNDAFISKISKIP", "pleasure vessel",
# 69L, "EFTIRLITS- OG BJÖRGUNARSKIP", "unspecified",
# 70L, "unspecified", "unspecified",
# 73L, "FARÞEGABÁTUR", "passenger",
# 74L, "FISKI, FARÞEGABÁTUR", "turist fisher",
# 75L, "SJÓKVÍA VINNUSKIP", "utility vessel",
# NA_integer_, NA_character_, NA_character_) %>%
# select_all(toupper)
# vclass %>% count(CODE) %>% filter(n > 1)
# vclass %>% count(FLOKKUR) %>% filter(n > 1)
# vclass %>% count(CLASS) %>% filter(n > 1)
# dbWriteTable(con, name = "VESSEL_CLASS", value = vclass, overwrite = TRUE)
vessel_class <- function(con) {
tbl_mar(con, "ops$einarhj.VESSEL_CLASS")
}
## Siglingamálastofnun - skipaflokkur
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na_example.R
\docType{data}
\name{na_example}
\alias{na_example}
\title{Count data with some missing values}
\format{An object of class \code{"integer"}.}
\usage{
data(na_example)
}
\description{
This dataset was randomly generated.
}
\examples{
data(na_example)
print(sum(is.na(na_example)))
}
\keyword{datasets}
|
/man/na_example.Rd
|
no_license
|
Elena8719/dslabs
|
R
| false | true | 392 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na_example.R
\docType{data}
\name{na_example}
\alias{na_example}
\title{Count data with some missing values}
\format{An object of class \code{"integer"}.}
\usage{
data(na_example)
}
\description{
This dataset was randomly generated.
}
\examples{
data(na_example)
print(sum(is.na(na_example)))
}
\keyword{datasets}
|
library("dplyr")
library("devtools")
library("psychonetrics")
#' Read the data, subset of summary statistics reported by:
#' Duncan, S. C., & Duncan, T. E. (1996). A multivariate latent
#' growth curve analysis of adolescent substance use. Structural
#' Equation Modeling: A Multidisciplinary Journal, 3(4), 323-347.
#' Sample size: 321
covMat <- as.matrix(read.csv("covmat_univariate.csv"))
rownames(covMat) <- colnames(covMat)
means <- read.csv("means_univariate.csv")[,1]
#' I have implemented a wrapper called 'latentgrowth' that automates the model specification
#' This requires a Design matrix. This matrix controls the design of the latent growth model.
#' It must contain character strings with the names of your variables.
#' The rows indicate variables and the columns indicare time points.
#' E.g., here the row indicates "alcohol", the columns indicate time points
#' 1, 2, 3 and 4, and the unique values indicate the names of the variables
#' indicating alcohol at time 1, 2, 3 and 4:
vars <- matrix(colnames(covMat), nrow = 1, ncol = 4, byrow = TRUE)
rownames(vars) <- "alc"
#' If you have raw data you can ignore the argument covs, means and nobs and use
#' the argument "data" instead". We can construct the model (note I rescale the cov matrix
#' here manually to max likelihood estimate):
mod <- latentgrowth(vars, covs = (321-1)/321 * covMat, means = means, nobs = 321)
#' Run model:
mod <- mod %>% runmodel
#' Look at fit:
mod
mod %>% fit
#' Look at parameters:
mod %>% parameters
#' Latent correlations:
cov2cor(getmatrix(mod,"sigma_zeta"))
#' We could also specify the model manually via lvm:
mod_lvm <- lvm(covs = (321-1)/321 * covMat, means = means, nobs = 321,
lambda = matrix(1,4,2))
#' Fix intercept loadings:
mod_lvm <- mod_lvm %>% fixpar("lambda",row=1:4,col=1,value = 1)
#' Fix slope loadings:
mod_lvm <- mod_lvm %>%
fixpar("lambda",row=1,col=2,value = 1) %>%
fixpar("lambda",row=2,col=2,value = 2) %>%
fixpar("lambda",row=3,col=2,value = 3) %>%
fixpar("lambda",row=4,col=2,value = 4)
#' Fix intercepts:
mod_lvm <- mod_lvm %>% fixpar("nu", row = 1:4, col = 1, value = 1)
#' Free latent means:
mod_lvm <- mod_lvm %>% freepar("nu_eta", row = 1:2, col = 1)
#' Run the model:
mod_lvm <- mod_lvm %>% runmodel
#' This model is the same!
compare(mod, mod_lvm)
|
/Latent_growth_examples/psychonetrics/LGC_psychonetrics_univariate.R
|
no_license
|
SachaEpskamp/SEM-code-examples
|
R
| false | false | 2,332 |
r
|
library("dplyr")
library("devtools")
library("psychonetrics")
#' Read the data, subset of summary statistics reported by:
#' Duncan, S. C., & Duncan, T. E. (1996). A multivariate latent
#' growth curve analysis of adolescent substance use. Structural
#' Equation Modeling: A Multidisciplinary Journal, 3(4), 323-347.
#' Sample size: 321
covMat <- as.matrix(read.csv("covmat_univariate.csv"))
rownames(covMat) <- colnames(covMat)
means <- read.csv("means_univariate.csv")[,1]
#' I have implemented a wrapper called 'latentgrowth' that automates the model specification
#' This requires a Design matrix. This matrix controls the design of the latent growth model.
#' It must contain character strings with the names of your variables.
#' The rows indicate variables and the columns indicare time points.
#' E.g., here the row indicates "alcohol", the columns indicate time points
#' 1, 2, 3 and 4, and the unique values indicate the names of the variables
#' indicating alcohol at time 1, 2, 3 and 4:
vars <- matrix(colnames(covMat), nrow = 1, ncol = 4, byrow = TRUE)
rownames(vars) <- "alc"
#' If you have raw data you can ignore the argument covs, means and nobs and use
#' the argument "data" instead". We can construct the model (note I rescale the cov matrix
#' here manually to max likelihood estimate):
mod <- latentgrowth(vars, covs = (321-1)/321 * covMat, means = means, nobs = 321)
#' Run model:
mod <- mod %>% runmodel
#' Look at fit:
mod
mod %>% fit
#' Look at parameters:
mod %>% parameters
#' Latent correlations:
cov2cor(getmatrix(mod,"sigma_zeta"))
#' We could also specify the model manually via lvm:
mod_lvm <- lvm(covs = (321-1)/321 * covMat, means = means, nobs = 321,
lambda = matrix(1,4,2))
#' Fix intercept loadings:
mod_lvm <- mod_lvm %>% fixpar("lambda",row=1:4,col=1,value = 1)
#' Fix slope loadings:
mod_lvm <- mod_lvm %>%
fixpar("lambda",row=1,col=2,value = 1) %>%
fixpar("lambda",row=2,col=2,value = 2) %>%
fixpar("lambda",row=3,col=2,value = 3) %>%
fixpar("lambda",row=4,col=2,value = 4)
#' Fix intercepts:
mod_lvm <- mod_lvm %>% fixpar("nu", row = 1:4, col = 1, value = 1)
#' Free latent means:
mod_lvm <- mod_lvm %>% freepar("nu_eta", row = 1:2, col = 1)
#' Run the model:
mod_lvm <- mod_lvm %>% runmodel
#' This model is the same!
compare(mod, mod_lvm)
|
#' Space-filling parameter grids
#'
#' Experimental designs for computer experiments are used to construct parameter
#' grids that try to cover the parameter space such that any portion of the
#' space has an observed combination that is not too far from it.
#'
#' The types of designs supported here are latin hypercube designs and designs
#' that attempt to maximize the determinant of the spatial correlation matrix
#' between coordinates. Both designs use random sampling of points in the
#' parameter space.
#'
#' @inheritParams grid_random
#' @param size A single integer for the total number of parameter value
#' combinations returned.
#' @param variogram_range A numeric value greater than zero. Larger values
#' reduce the likelihood of empty regions in the parameter space.
#' @param iter An integer for the maximum number of iterations used to find
#' a good design.
#' @references Sacks, Jerome & Welch, William & J. Mitchell, Toby, and Wynn, Henry.
#' (1989). Design and analysis of computer experiments. With comments and a
#' rejoinder by the authors. Statistical Science. 4. 10.1214/ss/1177012413.
#'
#' Santner, Thomas, Williams, Brian, and Notz, William. (2003). The Design and
#' Analysis Computer Experiments. Springer.
#'
#' Dupuy, D., Helbert, C., and Franco, J. (2015). DiceDesign and DiceEval: Two R
#' packages for design and analysis of computer experiments. Journal of
#' Statistical Software, 65(11)
#' @examples
#' grid_max_entropy(
#' hidden_units(),
#' penalty(),
#' epochs(),
#' activation(),
#' learn_rate(c(0, 1), trans = scales::log_trans()),
#' size = 10,
#' original = FALSE)
#'
#' grid_latin_hypercube(penalty(), mixture(), original = TRUE)
#' @export
grid_max_entropy <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
dots <- list(...)
if (any(names(dots) == "levels")) {
rlang::warn("`levels` is not an argument to `grid_max_entropy()`. Did you mean `size`?")
}
UseMethod("grid_max_entropy")
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.parameters <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
# test for NA and finalized
# test for empty ...
params <- x$object
names(params) <- x$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- x$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.list <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
y <- parameters(x)
params <- y$object
names(params) <- y$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.param <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
y <- parameters(list(x, ...))
params <- y$object
names(params) <- y$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.workflow <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
grid_max_entropy.parameters(parameters(x), ..., size = size, original = original,
variogram_range = variogram_range, iter = iter)
}
make_max_entropy_grid <- function(..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
validate_params(...)
param_quos <- quos(...)
params <- map(param_quos, eval_tidy)
param_names <- names(param_quos)
param_labs <- map_chr(params, function(x) x$label)
names(param_labs) <- param_names
# ----------------------------------------------------------------------------
rngs <- map(params, range_get, original = FALSE)
sfd <-
DiceDesign::dmaxDesign(
n = size,
dimension = length(params),
range = variogram_range,
niter_max = iter,
seed = sample.int(10^5, 1)
)
colnames(sfd$design) <- param_names
sf_grid <- as_tibble(sfd$design)
# Get back to parameter units
sf_grid <- map2_dfc(params, sf_grid, encode_unit, direction = "backward",
original = original)
colnames(sf_grid) <- param_names
sf_grid
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube <- function(x, ..., size = 3, original = TRUE) {
dots <- list(...)
if (any(names(dots) == "levels")) {
rlang::warn("`levels` is not an argument to `grid_latin_hypercube()`. Did you mean `size`?")
}
UseMethod("grid_latin_hypercube")
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.parameters <- function(x, ..., size = 3, original = TRUE) {
# test for NA and finalized
# test for empty ...
params <- x$object
names(params) <- x$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- x$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.list <- function(x, ..., size = 3, original = TRUE) {
y <- parameters(x)
params <- y$object
names(params) <- y$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.param <- function(x, ..., size = 3, original = TRUE) {
y <- parameters(list(x, ...))
params <- y$object
names(params) <- y$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.workflow <- function(x, ..., size = 3, original = TRUE) {
grid_latin_hypercube.parameters(parameters(x), ..., size = size, original = original)
}
make_latin_hypercube_grid <- function(..., size = 3, original = TRUE) {
validate_params(...)
param_quos <- quos(...)
params <- map(param_quos, eval_tidy)
param_labs <- map_chr(params, function(x) x$label)
param_names <- names(param_quos)
names(param_labs) <- param_names
# ----------------------------------------------------------------------------
sfd <-
DiceDesign::lhsDesign(
n = size,
dimension = length(params),
seed = sample.int(10^5, 1)
)
colnames(sfd$design) <- param_names
sf_grid <- as_tibble(sfd$design)
# Get back to parameter units
sf_grid <- map2_dfc(params, sf_grid, encode_unit, direction = "backward",
original = original)
colnames(sf_grid) <- param_names
sf_grid
}
|
/R/space_filling.R
|
no_license
|
hplieninger/dials
|
R
| false | false | 6,884 |
r
|
#' Space-filling parameter grids
#'
#' Experimental designs for computer experiments are used to construct parameter
#' grids that try to cover the parameter space such that any portion of the
#' space has an observed combination that is not too far from it.
#'
#' The types of designs supported here are latin hypercube designs and designs
#' that attempt to maximize the determinant of the spatial correlation matrix
#' between coordinates. Both designs use random sampling of points in the
#' parameter space.
#'
#' @inheritParams grid_random
#' @param size A single integer for the total number of parameter value
#' combinations returned.
#' @param variogram_range A numeric value greater than zero. Larger values
#' reduce the likelihood of empty regions in the parameter space.
#' @param iter An integer for the maximum number of iterations used to find
#' a good design.
#' @references Sacks, Jerome & Welch, William & J. Mitchell, Toby, and Wynn, Henry.
#' (1989). Design and analysis of computer experiments. With comments and a
#' rejoinder by the authors. Statistical Science. 4. 10.1214/ss/1177012413.
#'
#' Santner, Thomas, Williams, Brian, and Notz, William. (2003). The Design and
#' Analysis Computer Experiments. Springer.
#'
#' Dupuy, D., Helbert, C., and Franco, J. (2015). DiceDesign and DiceEval: Two R
#' packages for design and analysis of computer experiments. Journal of
#' Statistical Software, 65(11)
#' @examples
#' grid_max_entropy(
#' hidden_units(),
#' penalty(),
#' epochs(),
#' activation(),
#' learn_rate(c(0, 1), trans = scales::log_trans()),
#' size = 10,
#' original = FALSE)
#'
#' grid_latin_hypercube(penalty(), mixture(), original = TRUE)
#' @export
grid_max_entropy <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
dots <- list(...)
if (any(names(dots) == "levels")) {
rlang::warn("`levels` is not an argument to `grid_max_entropy()`. Did you mean `size`?")
}
UseMethod("grid_max_entropy")
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.parameters <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
# test for NA and finalized
# test for empty ...
params <- x$object
names(params) <- x$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- x$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.list <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
y <- parameters(x)
params <- y$object
names(params) <- y$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.param <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
y <- parameters(list(x, ...))
params <- y$object
names(params) <- y$id
grd <- make_max_entropy_grid(!!!params, size = size, original = original,
variogram_range = variogram_range, iter = iter)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_max_entropy.workflow <- function(x, ..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
grid_max_entropy.parameters(parameters(x), ..., size = size, original = original,
variogram_range = variogram_range, iter = iter)
}
make_max_entropy_grid <- function(..., size = 3, original = TRUE,
variogram_range = 0.5, iter = 1000) {
validate_params(...)
param_quos <- quos(...)
params <- map(param_quos, eval_tidy)
param_names <- names(param_quos)
param_labs <- map_chr(params, function(x) x$label)
names(param_labs) <- param_names
# ----------------------------------------------------------------------------
rngs <- map(params, range_get, original = FALSE)
sfd <-
DiceDesign::dmaxDesign(
n = size,
dimension = length(params),
range = variogram_range,
niter_max = iter,
seed = sample.int(10^5, 1)
)
colnames(sfd$design) <- param_names
sf_grid <- as_tibble(sfd$design)
# Get back to parameter units
sf_grid <- map2_dfc(params, sf_grid, encode_unit, direction = "backward",
original = original)
colnames(sf_grid) <- param_names
sf_grid
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube <- function(x, ..., size = 3, original = TRUE) {
dots <- list(...)
if (any(names(dots) == "levels")) {
rlang::warn("`levels` is not an argument to `grid_latin_hypercube()`. Did you mean `size`?")
}
UseMethod("grid_latin_hypercube")
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.parameters <- function(x, ..., size = 3, original = TRUE) {
# test for NA and finalized
# test for empty ...
params <- x$object
names(params) <- x$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- x$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.list <- function(x, ..., size = 3, original = TRUE) {
y <- parameters(x)
params <- y$object
names(params) <- y$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.param <- function(x, ..., size = 3, original = TRUE) {
y <- parameters(list(x, ...))
params <- y$object
names(params) <- y$id
grd <- make_latin_hypercube_grid(!!!params, size = size, original = original)
names(grd) <- y$id
grd
}
#' @export
#' @rdname grid_max_entropy
grid_latin_hypercube.workflow <- function(x, ..., size = 3, original = TRUE) {
grid_latin_hypercube.parameters(parameters(x), ..., size = size, original = original)
}
make_latin_hypercube_grid <- function(..., size = 3, original = TRUE) {
validate_params(...)
param_quos <- quos(...)
params <- map(param_quos, eval_tidy)
param_labs <- map_chr(params, function(x) x$label)
param_names <- names(param_quos)
names(param_labs) <- param_names
# ----------------------------------------------------------------------------
sfd <-
DiceDesign::lhsDesign(
n = size,
dimension = length(params),
seed = sample.int(10^5, 1)
)
colnames(sfd$design) <- param_names
sf_grid <- as_tibble(sfd$design)
# Get back to parameter units
sf_grid <- map2_dfc(params, sf_grid, encode_unit, direction = "backward",
original = original)
colnames(sf_grid) <- param_names
sf_grid
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRanges_helpers.R
\name{asTX}
\alias{asTX}
\title{Map genomic to transcript coordinates by reference}
\usage{
asTX(grl, reference)
}
\arguments{
\item{grl}{a \code{\link[GenomicRanges]{GRangesList}} of ranges within
the reference, grl must have column called names that gives
grouping for result}
\item{reference}{a GrangesList of ranges
that include and are bigger or equal to grl
ig. cds is grl and gene can be reference
@export}
}
\description{
Map genomic to transcript coordinates by reference
}
|
/man/asTX.Rd
|
permissive
|
katchyz/ORFik
|
R
| false | true | 580 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRanges_helpers.R
\name{asTX}
\alias{asTX}
\title{Map genomic to transcript coordinates by reference}
\usage{
asTX(grl, reference)
}
\arguments{
\item{grl}{a \code{\link[GenomicRanges]{GRangesList}} of ranges within
the reference, grl must have column called names that gives
grouping for result}
\item{reference}{a GrangesList of ranges
that include and are bigger or equal to grl
ig. cds is grl and gene can be reference
@export}
}
\description{
Map genomic to transcript coordinates by reference
}
|
#----------------------------------------------------------------------------------
# performs k-fold cross validation
#----------------------------------------------------------------------------------
# input : tr.v (training set where tau-category == v (v in 1:6))
# output : measure (AUC, loss, which tau) of the combined cross validation
#----------------------------------------------------------------------------------
library(caret)
library(pROC)
library(data.table)
library(mlr)
# k-fold cross validation
k = k # is choosen in parent script
sample.idx = sample(nrow(tr.v))
train.rnd = tr.v[sample.idx,] # randomised tr.v (same rows but random order)
folds = cut(1:nrow(train.rnd), breaks = k, labels = FALSE)
vec = list()
results.par = list()
for (n in 1:nrow(parameters)){
vec.1 = results.par
for (i in 1:k){
set.seed(1234)
# Split data into training and validation
idx.val = which(folds == i, arr.ind = TRUE)
cv.train = train.rnd[-idx.val,]
cv.train = cv.train[order(cv.train$order_item_id),]
cv.val = train.rnd[idx.val,]
cv.val = cv.val[order(cv.val$order_item_id),]
cv.train$return = as.factor(cv.train$return)
task = makeClassifTask(data = cv.train, target = "return", positive = "1")
xgb.learner = makeLearner("classif.xgboost", predict.type = "prob",
par.vals = list("verbose" = 1))
#set tuning parameters
par.vals = list("nrounds" = parameters$nrounds[n],
"max_depth" = parameters$max_depth[n],
"eta" = parameters$eta[n],
"gamma" = parameters$gamma[n],
"colsample_bytree" = parameters$colsample_bytree[n],
"min_child_weight" = parameters$min_child_weight[n])
xgb.learner = setHyperPars(xgb.learner, par.vals = par.vals, "verbose" = 0)
# train xgboost and make prediction
xgb = mlr::train(xgb.learner, task = task)
yhat = predict(xgb, newdata = cv.val)
yhat.val = yhat[2]$data$prob.1
loss = helper.loss(tau_candidates = tau_candidates,
truevals = cv.val$return,
predictedvals = yhat.val,
itemprice = real_price$item_price[cv.val$order_item_id])
res = list("loss" = max(loss),
"tau" = tau_candidates[which.max(loss)],
"parameters" = parameters[n,])
vec[[i]] = res
}
results.par = cbind(vec.1, vec)
}
rm(sample.idx, train.rnd, folds, vec)
|
/xgboost/00-1-kfold_cv.R
|
no_license
|
fractaldust/SPL_DFK
|
R
| false | false | 2,589 |
r
|
#----------------------------------------------------------------------------------
# performs k-fold cross validation
#----------------------------------------------------------------------------------
# input : tr.v (training set where tau-category == v (v in 1:6))
# output : measure (AUC, loss, which tau) of the combined cross validation
#----------------------------------------------------------------------------------
library(caret)
library(pROC)
library(data.table)
library(mlr)
# k-fold cross validation
k = k # is choosen in parent script
sample.idx = sample(nrow(tr.v))
train.rnd = tr.v[sample.idx,] # randomised tr.v (same rows but random order)
folds = cut(1:nrow(train.rnd), breaks = k, labels = FALSE)
vec = list()
results.par = list()
for (n in 1:nrow(parameters)){
vec.1 = results.par
for (i in 1:k){
set.seed(1234)
# Split data into training and validation
idx.val = which(folds == i, arr.ind = TRUE)
cv.train = train.rnd[-idx.val,]
cv.train = cv.train[order(cv.train$order_item_id),]
cv.val = train.rnd[idx.val,]
cv.val = cv.val[order(cv.val$order_item_id),]
cv.train$return = as.factor(cv.train$return)
task = makeClassifTask(data = cv.train, target = "return", positive = "1")
xgb.learner = makeLearner("classif.xgboost", predict.type = "prob",
par.vals = list("verbose" = 1))
#set tuning parameters
par.vals = list("nrounds" = parameters$nrounds[n],
"max_depth" = parameters$max_depth[n],
"eta" = parameters$eta[n],
"gamma" = parameters$gamma[n],
"colsample_bytree" = parameters$colsample_bytree[n],
"min_child_weight" = parameters$min_child_weight[n])
xgb.learner = setHyperPars(xgb.learner, par.vals = par.vals, "verbose" = 0)
# train xgboost and make prediction
xgb = mlr::train(xgb.learner, task = task)
yhat = predict(xgb, newdata = cv.val)
yhat.val = yhat[2]$data$prob.1
loss = helper.loss(tau_candidates = tau_candidates,
truevals = cv.val$return,
predictedvals = yhat.val,
itemprice = real_price$item_price[cv.val$order_item_id])
res = list("loss" = max(loss),
"tau" = tau_candidates[which.max(loss)],
"parameters" = parameters[n,])
vec[[i]] = res
}
results.par = cbind(vec.1, vec)
}
rm(sample.idx, train.rnd, folds, vec)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info_tables.R
\name{info_paletteer}
\alias{info_paletteer}
\title{View a table with info on color palettes}
\usage{
info_paletteer(color_pkgs = NULL)
}
\arguments{
\item{color_pkgs}{\emph{Filter to specific color packages}
\verb{vector<character>} // \emph{default:} \code{NULL} (\code{optional})
A vector of color packages that determines which sets of palettes should be
displayed in the information table. If this is \code{NULL} (the default) then
all of the discrete palettes from all of the color packages represented in
\strong{paletteer} will be displayed.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
While the \code{\link[=data_color]{data_color()}} function allows us to flexibly color data cells in
our \strong{gt} table, the harder part of this process is discovering and
choosing color palettes that are suitable for the table output. We can make
this process much easier in two ways: (1) by using the \strong{paletteer}
package, which makes a wide range of palettes from various R packages readily
available, and (2) calling the \code{info_paletteer()} function to give us an
information table that serves as a quick reference for all of the discrete
color palettes available in \strong{paletteer}.
}
\details{
The palettes displayed are organized by package and by palette name. These
values are required when obtaining a palette (as a vector of hexadecimal
colors), from the the \code{paletteer::paletteer_d()} function. Once we are
familiar with the names of the color palette packages (e.g.,
\strong{RColorBrewer}, \strong{ggthemes}, \strong{wesanderson}), we can narrow down
the content of this information table by supplying a vector of such package
names to \code{color_pkgs}.
Colors from the following color packages (all supported by \strong{paletteer})
are shown by default with \code{info_paletteer()}:
\itemize{
\item \strong{awtools}, 5 palettes
\item \strong{dichromat}, 17 palettes
\item \strong{dutchmasters}, 6 palettes
\item \strong{ggpomological}, 2 palettes
\item \strong{ggsci}, 42 palettes
\item \strong{ggthemes}, 31 palettes
\item \strong{ghibli}, 27 palettes
\item \strong{grDevices}, 1 palette
\item \strong{jcolors}, 13 palettes
\item \strong{LaCroixColoR}, 21 palettes
\item \strong{NineteenEightyR}, 12 palettes
\item \strong{nord}, 16 palettes
\item \strong{ochRe}, 16 palettes
\item \strong{palettetown}, 389 palettes
\item \strong{pals}, 8 palettes
\item \strong{Polychrome}, 7 palettes
\item \strong{quickpalette}, 17 palettes
\item \strong{rcartocolor}, 34 palettes
\item \strong{RColorBrewer}, 35 palettes
\item \strong{Redmonder}, 41 palettes
\item \strong{wesanderson}, 19 palettes
\item \strong{yarrr}, 21 palettes
}
}
\section{Examples}{
Get a table of info on just the \code{"ggthemes"} color palette (easily accessible
from the \strong{paletteer} package).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{info_paletteer(color_pkgs = "ggthemes")
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_info_paletteer_1.png" alt="This image of a table was generated from the first code example in the `info_paletteer()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
11-5
}
\section{Function Introduced}{
\code{v0.2.0.5} (March 31, 2020)
}
\seealso{
Other information functions:
\code{\link{info_currencies}()},
\code{\link{info_date_style}()},
\code{\link{info_flags}()},
\code{\link{info_google_fonts}()},
\code{\link{info_icons}()},
\code{\link{info_locales}()},
\code{\link{info_time_style}()}
}
\concept{information functions}
|
/man/info_paletteer.Rd
|
permissive
|
rstudio/gt
|
R
| false | true | 3,674 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/info_tables.R
\name{info_paletteer}
\alias{info_paletteer}
\title{View a table with info on color palettes}
\usage{
info_paletteer(color_pkgs = NULL)
}
\arguments{
\item{color_pkgs}{\emph{Filter to specific color packages}
\verb{vector<character>} // \emph{default:} \code{NULL} (\code{optional})
A vector of color packages that determines which sets of palettes should be
displayed in the information table. If this is \code{NULL} (the default) then
all of the discrete palettes from all of the color packages represented in
\strong{paletteer} will be displayed.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
While the \code{\link[=data_color]{data_color()}} function allows us to flexibly color data cells in
our \strong{gt} table, the harder part of this process is discovering and
choosing color palettes that are suitable for the table output. We can make
this process much easier in two ways: (1) by using the \strong{paletteer}
package, which makes a wide range of palettes from various R packages readily
available, and (2) calling the \code{info_paletteer()} function to give us an
information table that serves as a quick reference for all of the discrete
color palettes available in \strong{paletteer}.
}
\details{
The palettes displayed are organized by package and by palette name. These
values are required when obtaining a palette (as a vector of hexadecimal
colors), from the the \code{paletteer::paletteer_d()} function. Once we are
familiar with the names of the color palette packages (e.g.,
\strong{RColorBrewer}, \strong{ggthemes}, \strong{wesanderson}), we can narrow down
the content of this information table by supplying a vector of such package
names to \code{color_pkgs}.
Colors from the following color packages (all supported by \strong{paletteer})
are shown by default with \code{info_paletteer()}:
\itemize{
\item \strong{awtools}, 5 palettes
\item \strong{dichromat}, 17 palettes
\item \strong{dutchmasters}, 6 palettes
\item \strong{ggpomological}, 2 palettes
\item \strong{ggsci}, 42 palettes
\item \strong{ggthemes}, 31 palettes
\item \strong{ghibli}, 27 palettes
\item \strong{grDevices}, 1 palette
\item \strong{jcolors}, 13 palettes
\item \strong{LaCroixColoR}, 21 palettes
\item \strong{NineteenEightyR}, 12 palettes
\item \strong{nord}, 16 palettes
\item \strong{ochRe}, 16 palettes
\item \strong{palettetown}, 389 palettes
\item \strong{pals}, 8 palettes
\item \strong{Polychrome}, 7 palettes
\item \strong{quickpalette}, 17 palettes
\item \strong{rcartocolor}, 34 palettes
\item \strong{RColorBrewer}, 35 palettes
\item \strong{Redmonder}, 41 palettes
\item \strong{wesanderson}, 19 palettes
\item \strong{yarrr}, 21 palettes
}
}
\section{Examples}{
Get a table of info on just the \code{"ggthemes"} color palette (easily accessible
from the \strong{paletteer} package).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{info_paletteer(color_pkgs = "ggthemes")
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_info_paletteer_1.png" alt="This image of a table was generated from the first code example in the `info_paletteer()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
11-5
}
\section{Function Introduced}{
\code{v0.2.0.5} (March 31, 2020)
}
\seealso{
Other information functions:
\code{\link{info_currencies}()},
\code{\link{info_date_style}()},
\code{\link{info_flags}()},
\code{\link{info_google_fonts}()},
\code{\link{info_icons}()},
\code{\link{info_locales}()},
\code{\link{info_time_style}()}
}
\concept{information functions}
|
#' @title Get rain event of a landslide
#'
#' @description This function calcuates different precipitation characteristics for a specific time-series:
#' total precipitation, number of rainfall events, weighted mean intensitiy of rainfall events (normalized by MAP, RD or RDN),
#' cumulative critical event rainfall (normalized by MAP, RD or RDN), maximum rainfall during critical rainfall event,
#' duration of critical rainfall event, critical rainfall intensitiy (normalized by MAP, RD or RDN), rainfall at day of failure (start date),
#' rainfall intensity at day of failure (start date), maximum rainfall at day of failure (start date).
#'
#' @param Re vector containing the rain event variable, e.g. cumulated event rainfall (in mm) or intensity (mm/h)
#' @param D vector containing the duration of the rainfall events
#' @param method method to compute threshold. Either "LS" for least square, or "NLS" for non-linear least squares Method. Default: "nls"
#' @param prob.threshold exceedance probability level. Default: 0.05 (5 [percent] )
#' @param log10.transform log-transformation of input vectors Re and/or D. Default: TRUE
#' @param bootstrapping If TRUE bootstrapping is performed. Default: TRUE
#' @param R the number of bootstrap replicates, see boot::boot() for more information. Default: 1000
#' @param seed replicable bootstrapping. Default: 123
#' @param use.integralError for estimating x of prob.threshold, the entire function is integrated first to estimate the bias: (1 - INTEGRAL)/2. Default. TRUE
#' @param ... more options passed to boot:boot() function, i.e. parallel for paralell processing
#'
#' @return vector containing rainfall metrics (see description). If return.DataFrame is TRUE a data.frame is returned containing similar rain metrics for all rain events.
#'
#'
#' @note
#' \itemize{
#' \item Brunetti, M. T., Peruccacci, S., Rossi, M., Luciani, S., Valigi, D., & Guzzetti, F. (2010). Rainfall thresholds for the possible occurrence of landslides in Italy. Natural Hazards and Earth System Sciences, 10(3), 447.
#' \item Peruccacci, S., Brunetti, M. T., Luciani, S., Vennari, C., & Guzzetti, F. (2012). Lithological and seasonal control on rainfall thresholds for the possible initiation of landslides in central Italy. Geomorphology, 139, 79-90.
#' \item Rossi, M., Luciani, S., Valigi, D., Kirschbaum, D., Brunetti, M. T., Peruccacci, S., & Guzzetti, F. (2017). Statistical approaches for the definition of landslide rainfall thresholds and their uncertainty using rain gauge and satellite data. Geomorphology, 285, 16-27.
#' \item Guzzetti, F., Peruccacci, S., Rossi, M., & Stark, C. P. (2007). Rainfall thresholds for the initiation of landslides in central and southern Europe. Meteorology and atmospheric physics, 98(3-4), 239-267.
#' }
#'
#' @keywords rainfall tresholds, rainfall event, landslide, automatic appraoch
#'
#'
#' @export
getRainThreshFreqM <- function(Re, D, method = "NLS", prob.threshold = 0.05, log10.transform = FALSE,
bootstrapping = TRUE, R = 1000, seed = 123, use.integralError = TRUE, ...){
## log transformation of input vectors
if(log10.transform)
{
Re <- log(x = Re, base = 10) # cumultive precipitation of rain event
D <- log(x = D, base = 10) # duration of rain event
}
if(method == "LS"){
## Least Squares Method (LS)
# ReD.LS <- stats::lsfit(y = Re, x = D) # delivers same result as lm()
if(bootstrapping){
# ... create bootstrap function
boot.LS <- function(formula, data, indices){
# subset data
data <- data[indices,]
# boot model
LM.boot <- stats::lm(formula, data)
return(coef(LM.boot))
}
# set seed for replication
set.seed(seed)
# bootstrapping with R replications
ReD.LS.boot <- boot::boot(data = data.frame(Re = Re, D = D), statistic = boot.LS,
R = R, formula = Re ~ D)
# boot::boot.ci(ReD.LS.boot, conf= c(.05, 0.5, .95))
# ... according to Rossi et al. (2017: 20)
# compute quantiles: 5th for min, 50 as median for best fit, 95th for max
ReD.LS.boot.conf <- apply(X = ReD.LS.boot$t, MARGIN = 2, FUN = quantile, probs = c(.05, .50, .95), na.rm=TRUE)
# ReD.LS.boot.stat <- apply(X = ReD.LS.boot$t, MARGIN = 2, FUN = median, na.rm=TRUE)
# ... intercept alpha from model
# T50 <- ReD.LS.boot.stat[1]
T5 <- ReD.LS.boot.conf[1, 1]
T50 <- ReD.LS.boot.conf[2, 1]
T95 <- ReD.LS.boot.conf[3, 1]
# ... slope gamma from model
# gamma <- ReD.LS.boot.stat[2]
gamma <- c(ReD.LS.boot.conf[2, 2], ReD.LS.boot.conf[1, 2], ReD.LS.boot.conf[3, 2])
names(gamma) <- c("gamma_Tx_median", "gamma_Tx_min", "gamma_Tx_max")
# Getting residuals of fit
# ... defining linear function
linFunct <- function(x, intercept, gamma){return((intercept + gamma * x))}
## ... fitting median model and get residuals and error estimates
# ReD.Model.fit <- linFunct(x = D, intercept = ReD.LS.boot.stat[1], gamma = ReD.LS.boot.stat[2])
ReD.Model.fit <- linFunct(x = D, intercept = ReD.LS.boot.conf[2, 1], gamma = ReD.LS.boot.conf[2, 2])
ReD.Model.Res <- Re - ReD.Model.fit
# ... computing error estimates using http://pages.mtu.edu/~fmorriso/cm3215/UncertaintySlopeInterceptOfLeastSquaresFit.pdf
# n <- length(D)
# SSE <- sum(ReD.Model.Res^2, na.rm = TRUE) # Error sum of squares
# SST <- sum((Re - mean(Re))^2, na.rm = TRUE) # Total sum of squares
# SSR <- SST - SSE # Regression sum of squares
# Sxx <- sum((D - mean(D))^2, na.rm = TRUE)
# Sxy <- sum((D - mean(D)) * (Re - mean(Re)), na.rm = TRUE)
# SDyx <- sqrt(SSE/(n-2)) # Standard Deviation of y(x)
# SDm <- sqrt(SDyx^2/Sxx) # Standard Deviation of Slope
# SDb <- sqrt(SDyx^2 * (1/n + ((mean(D, na.rm = TRUE)^2)/Sxx))) # Standard Deviation of Intercept
# ... get 5th and 95th percentile of gamma (slope) and intercept
# ... gamma (slope)
# ReD.Model.gamma.quant <- ReD.LS.boot.stat[2] + qt(c((.05/2), .95+(0.05/2)), df = (n-2)) * SDm
# ... intercept
# ReD.Model.interc.quant <- ReD.LS.boot.stat[1] + qt(c((.05/2), .95+(0.05/2)), df = (n-2)) * SDb
# T5 <- ReD.Model.interc.quant[1]
# T95 <- ReD.Model.interc.quant[2]
## ... fitting min and max model and get residuals
# min
ReD.Model.fit.min <- linFunct(x = D, intercept = ReD.LS.boot.conf[1, 1], gamma = ReD.LS.boot.conf[1, 2])
# ReD.Model.fit.min <- linFunct(x = D, intercept = ReD.Model.interc.quant[1], gamma = ReD.Model.gamma.quant[1])
ReD.Model.Res.min <- Re - ReD.Model.fit.min
# max
ReD.Model.fit.max <- linFunct(x = D, intercept = ReD.LS.boot.conf[3, 1], gamma = ReD.LS.boot.conf[3, 2])
# ReD.Model.fit.max <- linFunct(x = D, intercept = ReD.Model.interc.quant[2], gamma = ReD.Model.gamma.quant[2])
ReD.Model.Res.max <- Re - ReD.Model.fit.max
# plot(Re ~ D)
# abline(lm(Re ~ D))
# abline(a = ReD.LS.boot.stat[1], b = ReD.LS.boot.stat[2], col = "red")
# abline(a = ReD.LS.boot.stat[1] - ReD.Model.interc.quant[1], b = ReD.LS.boot.stat[2], col = "orange")
} else {
ReD.Model <- stats::lm(Re ~ D)
ReD.Model.fit <- ReD.Model$fitted.values
ReD.Model.Res <- ReD.Model$residuals
# ReD.LS <- stats::lsfit(y = Re, x = D)
# intercept alpha from model
T50 <- coef(ReD.Model)[[1]]
# slope gamma from model
gamma <- coef(ReD.Model)[[2]]
names(gamma) <- "gamma"
}# end of if bootstrapping
} else if(method == "NLS") { # end of if method == "LS"
## Nonlinear Least Squares Method (NLS)
# ... optimizer function
opt.NLS <- function(par, x, y)
{
t <- par[1]
alpha <- par[2]
gamma <- par[3]
y.fit <- t + alpha * (x^gamma)
sum((y - y.fit)^2)
}
# ... optimize parameter
par.opt.NLS <- optim(x = D, y = Re, par = c(mean(D), sd(D), 1),
fn = opt.NLS, method = "Nelder-Mead", #, "BFGS",
control = list(pgtol = 1e-9, maxit = 10000))$par
if(bootstrapping)
{
# ... create bootstrap function
boot.NLS <- function(formula, data, start, control, indices)
{
# subset data
data <- data[indices,]
# boot model
NLS.boot <- minpack.lm::nlsLM(formula = formula, data = data,
start = start, control = control)
return(coef(NLS.boot))
} # end of boot.NLS
# set seed for replication
set.seed(seed)
# bootstrapping with R replications
ReD.NLS.boot <- boot::boot(data = data.frame(y = Re, x = D), statistic = boot.NLS,
R = R, formula = y ~ t + a * (x^gamma), control = list(maxiter = 500),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]))
# ... according to Rossi et al. (2017: 20)
# compute quantiles: 5th for min, 50 as median for best fit, 95th for max
ReD.NLS.boot.conf <- apply(X = ReD.NLS.boot$t, MARGIN = 2, FUN = quantile, probs = c(.05, .50, .95), na.rm=TRUE)
T5 <- ReD.NLS.boot.conf[1, 1]
T50 <- ReD.NLS.boot.conf[2, 1]
T95 <- ReD.NLS.boot.conf[3, 1]
gamma <- c(ReD.NLS.boot.conf[1, 2], ReD.NLS.boot.conf[2, 2], ReD.NLS.boot.conf[3, 2])
names(gamma) <- c("gamma_Tx_min", "gamma_Tx_median", "gamma_Tx_max")
# Getting residuals of fit
# ... defining linear function
powerLawFunct <- function(x, t, a, gamma){return((t + a * (x^gamma)))}
## ... fitting median model and get residuals
ReD.Model.fit <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[2, 1], a = ReD.NLS.boot.conf[2, 2], gamma = ReD.NLS.boot.conf[2, 3])
ReD.Model.Res <- Re - ReD.Model.fit
## ... fitting min and max model and get residuals
# min
ReD.Model.fit.min <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[1, 1], a = ReD.NLS.boot.conf[1, 2], gamma = ReD.NLS.boot.conf[1, 3])
ReD.Model.Res.min <- Re - ReD.Model.fit.min
# max
ReD.Model.fit.max <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[3, 1], a = ReD.NLS.boot.conf[3, 2], gamma = ReD.NLS.boot.conf[3, 3])
ReD.Model.Res.max <- Re - ReD.Model.fit.max
} else{
# get model
ReD.NLS <- nls(formula = y ~ t + a * (x^gamma), data = data.frame(x = D, y = Re),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]),
control = list(maxiter = 500))
ReD.NLS <- minpack.lm::nlsLM(formula = y ~ t + a * (x^gamma), data = data.frame(x = D, y = Re),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]),
control = nls.lm.control(maxiter = 500))
ReD.NLS.fit <- predict(ReD.NLS)
# ReD.Model.fit <- powerLawFunct(x = D, t = coef(ReD.NLS)[1], a = coef(ReD.NLS)[2], gamma = coef(ReD.NLS)[3])
}
} else { # end of if method == "NLS"
stop('Selected method is not supported. Please use "NLS" or "LS"!')
} # end of if method
# PLOT NLS
# plot(y = Re, x = D)
# a<-coef(ReD.NLS)[1]
# b<-coef(ReD.NLS)[2]
# k<-coef(ReD.NLS)[3]
# x <- seq(from = 0, to = 5, by = 0.001)
# lines(x = x, a+b*x^k,col='red')
# ggplot2::ggplot(data.frame(D = D, ReD.NLS.fit = ReD.NLS.fit),
# ggplot2::aes(D,ReD.NLS.fit)) + ggplot2::geom_point() + ggplot2::geom_smooth()
# IS THAT NECESAIRY?
## standardize residuals
# ReD.Model.SDRes <- ReD.Model$residuals
# ReD.Model.SDRes <- scale(ReD.Model$residuals)[, 1]
## check normality of residuals (limit: large sample sizes easily produce significant results from small deviations from normality)
# if(shapiro.test(ReD.Model.Res)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
# if(bootstrapping && shapiro.test(ReD.Model.Res.min)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
# if(bootstrapping &&shapiro.test(ReD.Model.Res.max)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
## Probability Densitiy Function (PDF) with Kernel Density Estimation (KDE) with a Gaussian function
ReD.PDF <- stats::density(x = ReD.Model.Res, kernel = "gaussian")
ReD.PDF.df <- data.frame(x = ReD.PDF$x, y = ReD.PDF$y, type = "PDF")
if(bootstrapping)
{
# min
ReD.PDF.min <- stats::density(x = ReD.Model.Res.min, kernel = "gaussian")
ReD.PDF.df.min <- data.frame(x = ReD.PDF.min$x, y = ReD.PDF.min$y, type = "PDF")
# plot(ReD.PDF.min)
# max
ReD.PDF.max <- stats::density(x = ReD.Model.Res.max, kernel = "gaussian")
ReD.PDF.df.max <- data.frame(x = ReD.PDF.max$x, y = ReD.PDF.max$y, type = "PDF")
# plot(ReD.PDF.max)
}
## Modelling PDF using a Gaussian Function
GaussianFunction <- function(ReD.PDF)
{
# ... using Nonlinear regression model (NLS)
# # https://stats.stackexchange.com/questions/220109/fit-a-gaussian-to-data-with-r-with-optim-and-nls
# ... optimizer function
opt.Gauss <- function(par, x, y)
{
m <- par[1]
sd <- par[2]
k <- par[3]
rhat <- k * exp(-0.5 * ((x - m)/sd)^2)
sum((y - rhat)^2)
}
# ... optimize parameter
par.opt.Gauss <- stats::optim(x = ReD.PDF$x, y = ReD.PDF$y, par = c(mean(ReD.PDF$x), sd(ReD.PDF$x), 1), fn = opt.Gauss,
method = "BFGS")
# ... comute NLS using a Gaussian Function and fit values
ReD.PDF.NLS <- stats::nls(y ~ k*exp(-1/2*(x-mu)^2/sigma^2),
start = c(mu = par.opt.Gauss$par[1], sigma = par.opt.Gauss$par[2], k = par.opt.Gauss$par[3]),
data = data.frame(x = ReD.PDF$x, y = ReD.PDF$y), control = list(maxiter = 10000, reltol=1e-9))
# ... return model and fitted values
return(list(stats::predict(ReD.PDF.NLS), ReD.PDF.NLS))
} # end of Gaussian Function
ReD.PDF.result <- GaussianFunction(ReD.PDF = ReD.PDF)
ReD.PDF.fit <- ReD.PDF.result[[1]]
ReD.PDF.NLS <- ReD.PDF.result[[2]]
if(bootstrapping)
{
ReD.PDF.result.min <- GaussianFunction(ReD.PDF = ReD.PDF.min)
ReD.PDF.fit.min <- ReD.PDF.result.min[[1]]
ReD.PDF.NLS.min <- ReD.PDF.result.min[[2]]
ReD.PDF.result.max <- GaussianFunction(ReD.PDF = ReD.PDF.max)
ReD.PDF.fit.max <- ReD.PDF.result.max[[1]]
ReD.PDF.NLS.max <- ReD.PDF.result.max[[2]]
}
# plotting density curves
# ReD.PDF.fit.df <- data.frame(x = ReD.PDF$x, y = ReD.PDF.fit, type = "Gaussian fit")
# ReD.PDF.df <- rbind(ReD.PDF.df, ReD.PDF.fit.df)
#
# ggExample <- ggplot2::ggplot(data = ReD.PDF.df, ggplot2::aes(x = x, y = y, color = type, linetype = type)) +
# ggplot2::geom_line(size = 0.9) +
# ggplot2::scale_color_manual(values = c("royalblue4", "black")) +
# ggplot2::scale_linetype_manual(values = c(2, 1))
#
# ggExample
## Extracting the x procent proceeding probability
# https://stackoverflow.com/questions/44313022/how-to-compute-confidence-interval-of-the-fitted-value-via-nls
# https://stackoverflow.com/questions/37455512/predict-x-values-from-simple-fitting-and-annoting-it-in-the-plot?rq=1
# https://stackoverflow.com/questions/43322568/predict-x-value-from-y-value-with-a-fitted-model
# integrate.xy is not 1 for the whole function. Therehore, the error is estimated first and divided by 2 for both sides
if(use.integralError)
{
integral.error <- (1 - sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, a = min(ReD.PDF$x), b = max(ReD.PDF$x)))/2
} else{
integral.error <- 0
}
integrFunctToThresh <- function(ReD.PDF, ReD.PDF.fit, thresh, lower, upper, par, integral.error)
{
# ... optimizer function for integration
opt.Integral <- function(par, x, y, thresh)
{
b <- par[1]
abs(sfsmisc::integrate.xy(x = x, fx = y, b = b) - thresh)
}
# ... find x parameter to area threshold of integral
par.opt <- optim(thresh = (thresh - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
lower = lower, upper = upper,
par = par, # start value
fn = opt.Integral, method = "L-BFGS-B",
control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, b = par.opt.Mean)
# predict(object = ReD.PDF.NLS, newdata = data.frame(x = par.opt.Mean))
# return correspond to x which fulfills threshold
return(par.opt)
}
fit.Gauss.50 <- integrFunctToThresh(ReD.PDF = ReD.PDF, ReD.PDF.fit = ReD.PDF.fit, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF$x) - sd(ReD.PDF$x)), upper = (mean(ReD.PDF$x) + sd(ReD.PDF$x)), par = mean(ReD.PDF$x))
fit.Gauss.x <- integrFunctToThresh(ReD.PDF = ReD.PDF, ReD.PDF.fit = ReD.PDF.fit, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF$x) + 0.0001), upper = mean(ReD.PDF$x), par = mean(c(min(ReD.PDF$x), mean(ReD.PDF$x)))) # if b becomes the min value, then in sfsmisc::integrate.xy a = b fails!
if(bootstrapping)
{
# min
fit.Gauss.50.min <- integrFunctToThresh(ReD.PDF = ReD.PDF.min, ReD.PDF.fit = ReD.PDF.fit.min, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF.min$x) - sd(ReD.PDF.min$x)), upper = (mean(ReD.PDF.min$x) + sd(ReD.PDF.min$x)), par = mean(ReD.PDF.min$x))
fit.Gauss.x.min <- integrFunctToThresh(ReD.PDF = ReD.PDF.min, ReD.PDF.fit = ReD.PDF.fit.min, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF.min$x) + 0.0001), upper = mean(ReD.PDF.min$x), par = mean(c(min(ReD.PDF.min$x), mean(ReD.PDF.min$x))))
# max
fit.Gauss.50.max <- integrFunctToThresh(ReD.PDF = ReD.PDF.max, ReD.PDF.fit = ReD.PDF.fit.max, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF.max$x) - sd(ReD.PDF.max$x)), upper = (mean(ReD.PDF.max$x) + sd(ReD.PDF.max$x)), par = mean(ReD.PDF.max$x))
fit.Gauss.x.max <- integrFunctToThresh(ReD.PDF = ReD.PDF.max, ReD.PDF.fit = ReD.PDF.fit.max, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF.max$x) + 0.0001), upper = mean(ReD.PDF.max$x), par = mean(c(min(ReD.PDF.max$x), mean(ReD.PDF.max$x))))
}
# par.opt.Mean <- optim(thresh = (0.5 - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
# lower = (mean(ReD.PDF$x) - sd(ReD.PDF$x)),
# upper = (mean(ReD.PDF$x) + sd(ReD.PDF$x)),
# par = mean(ReD.PDF$x), # start value
# fn = opt.Integral, method = "L-BFGS-B",
# control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# ... ... (2) integration for probablilty level threshold
# fit.Gauss.x <- optim(thresh = (prob.threshold - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
# lower = (min(ReD.PDF$x) + 0.0000001), # if b becomes the min value, then in sfsmisc::integrate.xy a = b fails!
# upper = mean(ReD.PDF$x),
# par = mean(min(ReD.PDF$x), (mean(ReD.PDF$x) - sd(ReD.PDF$x))), # start value
# fn = opt.Integral, method = "L-BFGS-B",
# control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, a = min(ReD.PDF$x), b = par.opt.probLevel)
# ggExample + ggplot2::geom_segment(inherit.aes = FALSE, color = "red", size = 0.8,
# ggplot2::aes(x = fit.Gauss.x , xend = fit.Gauss.x,
# y = 0, yend = predict(object = ReD.PDF.NLS, newdata = data.frame(x = fit.Gauss.x))))
## get Intercept of probablilty level
# Tx is the curve parallel to the best-fit line T50 (slope = gamma), with intercept alpha.x= alpha.50−alpha.opt
Tx <- T50 - (fit.Gauss.50 - fit.Gauss.x)
# alpha <- exp(Tx) # re-transform log alpha to alpha ?
if(bootstrapping)
{
Tx.min <- T5 - (fit.Gauss.50.min - fit.Gauss.x.min)
Tx.max <- T95 - (fit.Gauss.50.max - fit.Gauss.x.max)
}
### LS
## create data frame for ggplot2
# df.fit.LS <- data.frame(x = D, y = Re, y_fit = ReD.Model.fit,
# y_fit_min = ReD.Model.fit.min, y_fit_max = ReD.Model.fit.max,
# y_fit_Lx = linFunct(x = D, intercept = Tx, gamma = gamma),
# y_fit_Lx_min = linFunct(x = D, intercept = Tx.min, gamma = ReD.Model.gamma.quant[1]),
# y_fit_Lx_max = linFunct(x = D, intercept = Tx.max, gamma = ReD.Model.gamma.quant[2]))
#
#
# ggplot2::ggplot(data = df.fit.LS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# # ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit)) + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red") + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# # ... plot best fit with 5-95th quantile
# ggplot2::ggplot(data = df.fit.LS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "blue") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit), col = "black") +
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "red") +
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ### NLS
# ## create data frame for ggplot2
# df.fit.NLS <- data.frame(x = D, y = Re, y_fit = ReD.Model.fit,
# y_fit_min = ReD.Model.fit.min, y_fit_max = ReD.Model.fit.max,
# y_fit_Lx = powerLawFunct(x = D, t = Tx, a = ReD.NLS.boot.conf[2, 2], gamma = ReD.NLS.boot.conf[2, 3]),
# y_fit_Lx_min = powerLawFunct(x = D, t = Tx.min, a = ReD.NLS.boot.conf[1, 2], gamma = ReD.NLS.boot.conf[1, 3]),
# y_fit_Lx_max = powerLawFunct(x = D, t = Tx.max, a = ReD.NLS.boot.conf[3, 2], gamma = ReD.NLS.boot.conf[3, 3]))
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "orange", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "yellow", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit), col = "black", method = "loess", size = 0.7)+ # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit), col = "black", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "orange", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "yellow", method = "loess", size = 0.7) +
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ## combine both LS and NSL
# ggplot2::ggplot(data = df.fit.NLS) +
# # NLS
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "yellow", method = "loess", size = 0.8) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# # LS
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "orange") + # add fit of regression line (median fit)
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "red") + # add fit of regression line (median fit)
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red") + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
if(log10.transform)
{
alpha.Tx <- 10^Tx
names(alpha.Tx) <- "alpha_Tx"
alpha.res <- alpha.Tx
if(bootstrapping)
{
alpha.Tx.min <- 10^Tx.min
names(alpha.Tx.min) <- "alpha_Tx_min"
alpha.Tx.max <- 10^Tx.max
names(alpha.Tx.max) <- "alpha_Tx_max"
alpha.res <- c(alpha.res, alpha.Tx.min, alpha.Tx.max)
}
} else {# end of log10 alpha check
alpha.Tx <- Tx
names(alpha.Tx) <- "alpha_Tx"
alpha.res <- alpha.Tx
if(bootstrapping)
{
alpha.Tx.min <- Tx.min
names(alpha.Tx.min) <- "alpha_Tx_min"
alpha.Tx.max <- Tx.max
names(alpha.Tx.max) <- "alpha_Tx_max"
alpha.res <- c(alpha.res, alpha.Tx.min, alpha.Tx.max)
}
}
if(method == "LS")
{
funct <- linFunct
}
if(method == "NLS")
{
funct <- powerLawFunct
}
return(list(alpha.res, gamma, funct))
} # end of function getRainThreshFreqM
|
/R/getRainThreshFreqM.R
|
no_license
|
yxw027/Lslide
|
R
| false | false | 27,377 |
r
|
#' @title Get rain event of a landslide
#'
#' @description This function calcuates different precipitation characteristics for a specific time-series:
#' total precipitation, number of rainfall events, weighted mean intensitiy of rainfall events (normalized by MAP, RD or RDN),
#' cumulative critical event rainfall (normalized by MAP, RD or RDN), maximum rainfall during critical rainfall event,
#' duration of critical rainfall event, critical rainfall intensitiy (normalized by MAP, RD or RDN), rainfall at day of failure (start date),
#' rainfall intensity at day of failure (start date), maximum rainfall at day of failure (start date).
#'
#' @param Re vector containing the rain event variable, e.g. cumulated event rainfall (in mm) or intensity (mm/h)
#' @param D vector containing the duration of the rainfall events
#' @param method method to compute threshold. Either "LS" for least square, or "NLS" for non-linear least squares Method. Default: "nls"
#' @param prob.threshold exceedance probability level. Default: 0.05 (5 [percent] )
#' @param log10.transform log-transformation of input vectors Re and/or D. Default: TRUE
#' @param bootstrapping If TRUE bootstrapping is performed. Default: TRUE
#' @param R the number of bootstrap replicates, see boot::boot() for more information. Default: 1000
#' @param seed replicable bootstrapping. Default: 123
#' @param use.integralError for estimating x of prob.threshold, the entire function is integrated first to estimate the bias: (1 - INTEGRAL)/2. Default. TRUE
#' @param ... more options passed to boot:boot() function, i.e. parallel for paralell processing
#'
#' @return vector containing rainfall metrics (see description). If return.DataFrame is TRUE a data.frame is returned containing similar rain metrics for all rain events.
#'
#'
#' @note
#' \itemize{
#' \item Brunetti, M. T., Peruccacci, S., Rossi, M., Luciani, S., Valigi, D., & Guzzetti, F. (2010). Rainfall thresholds for the possible occurrence of landslides in Italy. Natural Hazards and Earth System Sciences, 10(3), 447.
#' \item Peruccacci, S., Brunetti, M. T., Luciani, S., Vennari, C., & Guzzetti, F. (2012). Lithological and seasonal control on rainfall thresholds for the possible initiation of landslides in central Italy. Geomorphology, 139, 79-90.
#' \item Rossi, M., Luciani, S., Valigi, D., Kirschbaum, D., Brunetti, M. T., Peruccacci, S., & Guzzetti, F. (2017). Statistical approaches for the definition of landslide rainfall thresholds and their uncertainty using rain gauge and satellite data. Geomorphology, 285, 16-27.
#' \item Guzzetti, F., Peruccacci, S., Rossi, M., & Stark, C. P. (2007). Rainfall thresholds for the initiation of landslides in central and southern Europe. Meteorology and atmospheric physics, 98(3-4), 239-267.
#' }
#'
#' @keywords rainfall tresholds, rainfall event, landslide, automatic appraoch
#'
#'
#' @export
getRainThreshFreqM <- function(Re, D, method = "NLS", prob.threshold = 0.05, log10.transform = FALSE,
bootstrapping = TRUE, R = 1000, seed = 123, use.integralError = TRUE, ...){
## log transformation of input vectors
if(log10.transform)
{
Re <- log(x = Re, base = 10) # cumultive precipitation of rain event
D <- log(x = D, base = 10) # duration of rain event
}
if(method == "LS"){
## Least Squares Method (LS)
# ReD.LS <- stats::lsfit(y = Re, x = D) # delivers same result as lm()
if(bootstrapping){
# ... create bootstrap function
boot.LS <- function(formula, data, indices){
# subset data
data <- data[indices,]
# boot model
LM.boot <- stats::lm(formula, data)
return(coef(LM.boot))
}
# set seed for replication
set.seed(seed)
# bootstrapping with R replications
ReD.LS.boot <- boot::boot(data = data.frame(Re = Re, D = D), statistic = boot.LS,
R = R, formula = Re ~ D)
# boot::boot.ci(ReD.LS.boot, conf= c(.05, 0.5, .95))
# ... according to Rossi et al. (2017: 20)
# compute quantiles: 5th for min, 50 as median for best fit, 95th for max
ReD.LS.boot.conf <- apply(X = ReD.LS.boot$t, MARGIN = 2, FUN = quantile, probs = c(.05, .50, .95), na.rm=TRUE)
# ReD.LS.boot.stat <- apply(X = ReD.LS.boot$t, MARGIN = 2, FUN = median, na.rm=TRUE)
# ... intercept alpha from model
# T50 <- ReD.LS.boot.stat[1]
T5 <- ReD.LS.boot.conf[1, 1]
T50 <- ReD.LS.boot.conf[2, 1]
T95 <- ReD.LS.boot.conf[3, 1]
# ... slope gamma from model
# gamma <- ReD.LS.boot.stat[2]
gamma <- c(ReD.LS.boot.conf[2, 2], ReD.LS.boot.conf[1, 2], ReD.LS.boot.conf[3, 2])
names(gamma) <- c("gamma_Tx_median", "gamma_Tx_min", "gamma_Tx_max")
# Getting residuals of fit
# ... defining linear function
linFunct <- function(x, intercept, gamma){return((intercept + gamma * x))}
## ... fitting median model and get residuals and error estimates
# ReD.Model.fit <- linFunct(x = D, intercept = ReD.LS.boot.stat[1], gamma = ReD.LS.boot.stat[2])
ReD.Model.fit <- linFunct(x = D, intercept = ReD.LS.boot.conf[2, 1], gamma = ReD.LS.boot.conf[2, 2])
ReD.Model.Res <- Re - ReD.Model.fit
# ... computing error estimates using http://pages.mtu.edu/~fmorriso/cm3215/UncertaintySlopeInterceptOfLeastSquaresFit.pdf
# n <- length(D)
# SSE <- sum(ReD.Model.Res^2, na.rm = TRUE) # Error sum of squares
# SST <- sum((Re - mean(Re))^2, na.rm = TRUE) # Total sum of squares
# SSR <- SST - SSE # Regression sum of squares
# Sxx <- sum((D - mean(D))^2, na.rm = TRUE)
# Sxy <- sum((D - mean(D)) * (Re - mean(Re)), na.rm = TRUE)
# SDyx <- sqrt(SSE/(n-2)) # Standard Deviation of y(x)
# SDm <- sqrt(SDyx^2/Sxx) # Standard Deviation of Slope
# SDb <- sqrt(SDyx^2 * (1/n + ((mean(D, na.rm = TRUE)^2)/Sxx))) # Standard Deviation of Intercept
# ... get 5th and 95th percentile of gamma (slope) and intercept
# ... gamma (slope)
# ReD.Model.gamma.quant <- ReD.LS.boot.stat[2] + qt(c((.05/2), .95+(0.05/2)), df = (n-2)) * SDm
# ... intercept
# ReD.Model.interc.quant <- ReD.LS.boot.stat[1] + qt(c((.05/2), .95+(0.05/2)), df = (n-2)) * SDb
# T5 <- ReD.Model.interc.quant[1]
# T95 <- ReD.Model.interc.quant[2]
## ... fitting min and max model and get residuals
# min
ReD.Model.fit.min <- linFunct(x = D, intercept = ReD.LS.boot.conf[1, 1], gamma = ReD.LS.boot.conf[1, 2])
# ReD.Model.fit.min <- linFunct(x = D, intercept = ReD.Model.interc.quant[1], gamma = ReD.Model.gamma.quant[1])
ReD.Model.Res.min <- Re - ReD.Model.fit.min
# max
ReD.Model.fit.max <- linFunct(x = D, intercept = ReD.LS.boot.conf[3, 1], gamma = ReD.LS.boot.conf[3, 2])
# ReD.Model.fit.max <- linFunct(x = D, intercept = ReD.Model.interc.quant[2], gamma = ReD.Model.gamma.quant[2])
ReD.Model.Res.max <- Re - ReD.Model.fit.max
# plot(Re ~ D)
# abline(lm(Re ~ D))
# abline(a = ReD.LS.boot.stat[1], b = ReD.LS.boot.stat[2], col = "red")
# abline(a = ReD.LS.boot.stat[1] - ReD.Model.interc.quant[1], b = ReD.LS.boot.stat[2], col = "orange")
} else {
ReD.Model <- stats::lm(Re ~ D)
ReD.Model.fit <- ReD.Model$fitted.values
ReD.Model.Res <- ReD.Model$residuals
# ReD.LS <- stats::lsfit(y = Re, x = D)
# intercept alpha from model
T50 <- coef(ReD.Model)[[1]]
# slope gamma from model
gamma <- coef(ReD.Model)[[2]]
names(gamma) <- "gamma"
}# end of if bootstrapping
} else if(method == "NLS") { # end of if method == "LS"
## Nonlinear Least Squares Method (NLS)
# ... optimizer function
opt.NLS <- function(par, x, y)
{
t <- par[1]
alpha <- par[2]
gamma <- par[3]
y.fit <- t + alpha * (x^gamma)
sum((y - y.fit)^2)
}
# ... optimize parameter
par.opt.NLS <- optim(x = D, y = Re, par = c(mean(D), sd(D), 1),
fn = opt.NLS, method = "Nelder-Mead", #, "BFGS",
control = list(pgtol = 1e-9, maxit = 10000))$par
if(bootstrapping)
{
# ... create bootstrap function
boot.NLS <- function(formula, data, start, control, indices)
{
# subset data
data <- data[indices,]
# boot model
NLS.boot <- minpack.lm::nlsLM(formula = formula, data = data,
start = start, control = control)
return(coef(NLS.boot))
} # end of boot.NLS
# set seed for replication
set.seed(seed)
# bootstrapping with R replications
ReD.NLS.boot <- boot::boot(data = data.frame(y = Re, x = D), statistic = boot.NLS,
R = R, formula = y ~ t + a * (x^gamma), control = list(maxiter = 500),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]))
# ... according to Rossi et al. (2017: 20)
# compute quantiles: 5th for min, 50 as median for best fit, 95th for max
ReD.NLS.boot.conf <- apply(X = ReD.NLS.boot$t, MARGIN = 2, FUN = quantile, probs = c(.05, .50, .95), na.rm=TRUE)
T5 <- ReD.NLS.boot.conf[1, 1]
T50 <- ReD.NLS.boot.conf[2, 1]
T95 <- ReD.NLS.boot.conf[3, 1]
gamma <- c(ReD.NLS.boot.conf[1, 2], ReD.NLS.boot.conf[2, 2], ReD.NLS.boot.conf[3, 2])
names(gamma) <- c("gamma_Tx_min", "gamma_Tx_median", "gamma_Tx_max")
# Getting residuals of fit
# ... defining linear function
powerLawFunct <- function(x, t, a, gamma){return((t + a * (x^gamma)))}
## ... fitting median model and get residuals
ReD.Model.fit <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[2, 1], a = ReD.NLS.boot.conf[2, 2], gamma = ReD.NLS.boot.conf[2, 3])
ReD.Model.Res <- Re - ReD.Model.fit
## ... fitting min and max model and get residuals
# min
ReD.Model.fit.min <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[1, 1], a = ReD.NLS.boot.conf[1, 2], gamma = ReD.NLS.boot.conf[1, 3])
ReD.Model.Res.min <- Re - ReD.Model.fit.min
# max
ReD.Model.fit.max <- powerLawFunct(x = D, t = ReD.NLS.boot.conf[3, 1], a = ReD.NLS.boot.conf[3, 2], gamma = ReD.NLS.boot.conf[3, 3])
ReD.Model.Res.max <- Re - ReD.Model.fit.max
} else{
# get model
ReD.NLS <- nls(formula = y ~ t + a * (x^gamma), data = data.frame(x = D, y = Re),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]),
control = list(maxiter = 500))
ReD.NLS <- minpack.lm::nlsLM(formula = y ~ t + a * (x^gamma), data = data.frame(x = D, y = Re),
start = list(t = par.opt.NLS[1], a = par.opt.NLS[2], gamma = par.opt.NLS[3]),
control = nls.lm.control(maxiter = 500))
ReD.NLS.fit <- predict(ReD.NLS)
# ReD.Model.fit <- powerLawFunct(x = D, t = coef(ReD.NLS)[1], a = coef(ReD.NLS)[2], gamma = coef(ReD.NLS)[3])
}
} else { # end of if method == "NLS"
stop('Selected method is not supported. Please use "NLS" or "LS"!')
} # end of if method
# PLOT NLS
# plot(y = Re, x = D)
# a<-coef(ReD.NLS)[1]
# b<-coef(ReD.NLS)[2]
# k<-coef(ReD.NLS)[3]
# x <- seq(from = 0, to = 5, by = 0.001)
# lines(x = x, a+b*x^k,col='red')
# ggplot2::ggplot(data.frame(D = D, ReD.NLS.fit = ReD.NLS.fit),
# ggplot2::aes(D,ReD.NLS.fit)) + ggplot2::geom_point() + ggplot2::geom_smooth()
# IS THAT NECESAIRY?
## standardize residuals
# ReD.Model.SDRes <- ReD.Model$residuals
# ReD.Model.SDRes <- scale(ReD.Model$residuals)[, 1]
## check normality of residuals (limit: large sample sizes easily produce significant results from small deviations from normality)
# if(shapiro.test(ReD.Model.Res)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
# if(bootstrapping && shapiro.test(ReD.Model.Res.min)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
# if(bootstrapping &&shapiro.test(ReD.Model.Res.max)$p.value <= 0.005){warning("Residuals are highly significantly not normal distributed")}
## Probability Densitiy Function (PDF) with Kernel Density Estimation (KDE) with a Gaussian function
ReD.PDF <- stats::density(x = ReD.Model.Res, kernel = "gaussian")
ReD.PDF.df <- data.frame(x = ReD.PDF$x, y = ReD.PDF$y, type = "PDF")
if(bootstrapping)
{
# min
ReD.PDF.min <- stats::density(x = ReD.Model.Res.min, kernel = "gaussian")
ReD.PDF.df.min <- data.frame(x = ReD.PDF.min$x, y = ReD.PDF.min$y, type = "PDF")
# plot(ReD.PDF.min)
# max
ReD.PDF.max <- stats::density(x = ReD.Model.Res.max, kernel = "gaussian")
ReD.PDF.df.max <- data.frame(x = ReD.PDF.max$x, y = ReD.PDF.max$y, type = "PDF")
# plot(ReD.PDF.max)
}
## Modelling PDF using a Gaussian Function
GaussianFunction <- function(ReD.PDF)
{
# ... using Nonlinear regression model (NLS)
# # https://stats.stackexchange.com/questions/220109/fit-a-gaussian-to-data-with-r-with-optim-and-nls
# ... optimizer function
opt.Gauss <- function(par, x, y)
{
m <- par[1]
sd <- par[2]
k <- par[3]
rhat <- k * exp(-0.5 * ((x - m)/sd)^2)
sum((y - rhat)^2)
}
# ... optimize parameter
par.opt.Gauss <- stats::optim(x = ReD.PDF$x, y = ReD.PDF$y, par = c(mean(ReD.PDF$x), sd(ReD.PDF$x), 1), fn = opt.Gauss,
method = "BFGS")
# ... comute NLS using a Gaussian Function and fit values
ReD.PDF.NLS <- stats::nls(y ~ k*exp(-1/2*(x-mu)^2/sigma^2),
start = c(mu = par.opt.Gauss$par[1], sigma = par.opt.Gauss$par[2], k = par.opt.Gauss$par[3]),
data = data.frame(x = ReD.PDF$x, y = ReD.PDF$y), control = list(maxiter = 10000, reltol=1e-9))
# ... return model and fitted values
return(list(stats::predict(ReD.PDF.NLS), ReD.PDF.NLS))
} # end of Gaussian Function
ReD.PDF.result <- GaussianFunction(ReD.PDF = ReD.PDF)
ReD.PDF.fit <- ReD.PDF.result[[1]]
ReD.PDF.NLS <- ReD.PDF.result[[2]]
if(bootstrapping)
{
ReD.PDF.result.min <- GaussianFunction(ReD.PDF = ReD.PDF.min)
ReD.PDF.fit.min <- ReD.PDF.result.min[[1]]
ReD.PDF.NLS.min <- ReD.PDF.result.min[[2]]
ReD.PDF.result.max <- GaussianFunction(ReD.PDF = ReD.PDF.max)
ReD.PDF.fit.max <- ReD.PDF.result.max[[1]]
ReD.PDF.NLS.max <- ReD.PDF.result.max[[2]]
}
# plotting density curves
# ReD.PDF.fit.df <- data.frame(x = ReD.PDF$x, y = ReD.PDF.fit, type = "Gaussian fit")
# ReD.PDF.df <- rbind(ReD.PDF.df, ReD.PDF.fit.df)
#
# ggExample <- ggplot2::ggplot(data = ReD.PDF.df, ggplot2::aes(x = x, y = y, color = type, linetype = type)) +
# ggplot2::geom_line(size = 0.9) +
# ggplot2::scale_color_manual(values = c("royalblue4", "black")) +
# ggplot2::scale_linetype_manual(values = c(2, 1))
#
# ggExample
## Extracting the x procent proceeding probability
# https://stackoverflow.com/questions/44313022/how-to-compute-confidence-interval-of-the-fitted-value-via-nls
# https://stackoverflow.com/questions/37455512/predict-x-values-from-simple-fitting-and-annoting-it-in-the-plot?rq=1
# https://stackoverflow.com/questions/43322568/predict-x-value-from-y-value-with-a-fitted-model
# integrate.xy is not 1 for the whole function. Therehore, the error is estimated first and divided by 2 for both sides
if(use.integralError)
{
integral.error <- (1 - sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, a = min(ReD.PDF$x), b = max(ReD.PDF$x)))/2
} else{
integral.error <- 0
}
integrFunctToThresh <- function(ReD.PDF, ReD.PDF.fit, thresh, lower, upper, par, integral.error)
{
# ... optimizer function for integration
opt.Integral <- function(par, x, y, thresh)
{
b <- par[1]
abs(sfsmisc::integrate.xy(x = x, fx = y, b = b) - thresh)
}
# ... find x parameter to area threshold of integral
par.opt <- optim(thresh = (thresh - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
lower = lower, upper = upper,
par = par, # start value
fn = opt.Integral, method = "L-BFGS-B",
control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, b = par.opt.Mean)
# predict(object = ReD.PDF.NLS, newdata = data.frame(x = par.opt.Mean))
# return correspond to x which fulfills threshold
return(par.opt)
}
fit.Gauss.50 <- integrFunctToThresh(ReD.PDF = ReD.PDF, ReD.PDF.fit = ReD.PDF.fit, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF$x) - sd(ReD.PDF$x)), upper = (mean(ReD.PDF$x) + sd(ReD.PDF$x)), par = mean(ReD.PDF$x))
fit.Gauss.x <- integrFunctToThresh(ReD.PDF = ReD.PDF, ReD.PDF.fit = ReD.PDF.fit, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF$x) + 0.0001), upper = mean(ReD.PDF$x), par = mean(c(min(ReD.PDF$x), mean(ReD.PDF$x)))) # if b becomes the min value, then in sfsmisc::integrate.xy a = b fails!
if(bootstrapping)
{
# min
fit.Gauss.50.min <- integrFunctToThresh(ReD.PDF = ReD.PDF.min, ReD.PDF.fit = ReD.PDF.fit.min, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF.min$x) - sd(ReD.PDF.min$x)), upper = (mean(ReD.PDF.min$x) + sd(ReD.PDF.min$x)), par = mean(ReD.PDF.min$x))
fit.Gauss.x.min <- integrFunctToThresh(ReD.PDF = ReD.PDF.min, ReD.PDF.fit = ReD.PDF.fit.min, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF.min$x) + 0.0001), upper = mean(ReD.PDF.min$x), par = mean(c(min(ReD.PDF.min$x), mean(ReD.PDF.min$x))))
# max
fit.Gauss.50.max <- integrFunctToThresh(ReD.PDF = ReD.PDF.max, ReD.PDF.fit = ReD.PDF.fit.max, thresh = 0.5, integral.error = integral.error,
lower = (mean(ReD.PDF.max$x) - sd(ReD.PDF.max$x)), upper = (mean(ReD.PDF.max$x) + sd(ReD.PDF.max$x)), par = mean(ReD.PDF.max$x))
fit.Gauss.x.max <- integrFunctToThresh(ReD.PDF = ReD.PDF.max, ReD.PDF.fit = ReD.PDF.fit.max, thresh = prob.threshold, integral.error = integral.error,
lower = (min(ReD.PDF.max$x) + 0.0001), upper = mean(ReD.PDF.max$x), par = mean(c(min(ReD.PDF.max$x), mean(ReD.PDF.max$x))))
}
# par.opt.Mean <- optim(thresh = (0.5 - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
# lower = (mean(ReD.PDF$x) - sd(ReD.PDF$x)),
# upper = (mean(ReD.PDF$x) + sd(ReD.PDF$x)),
# par = mean(ReD.PDF$x), # start value
# fn = opt.Integral, method = "L-BFGS-B",
# control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# ... ... (2) integration for probablilty level threshold
# fit.Gauss.x <- optim(thresh = (prob.threshold - integral.error), x = ReD.PDF$x, y = ReD.PDF.fit,
# lower = (min(ReD.PDF$x) + 0.0000001), # if b becomes the min value, then in sfsmisc::integrate.xy a = b fails!
# upper = mean(ReD.PDF$x),
# par = mean(min(ReD.PDF$x), (mean(ReD.PDF$x) - sd(ReD.PDF$x))), # start value
# fn = opt.Integral, method = "L-BFGS-B",
# control = list(pgtol = 1e-9, maxit = 10000, ndeps = 1e-9))$par
# sfsmisc::integrate.xy(x = ReD.PDF$x, fx = ReD.PDF.fit, a = min(ReD.PDF$x), b = par.opt.probLevel)
# ggExample + ggplot2::geom_segment(inherit.aes = FALSE, color = "red", size = 0.8,
# ggplot2::aes(x = fit.Gauss.x , xend = fit.Gauss.x,
# y = 0, yend = predict(object = ReD.PDF.NLS, newdata = data.frame(x = fit.Gauss.x))))
## get Intercept of probablilty level
# Tx is the curve parallel to the best-fit line T50 (slope = gamma), with intercept alpha.x= alpha.50−alpha.opt
Tx <- T50 - (fit.Gauss.50 - fit.Gauss.x)
# alpha <- exp(Tx) # re-transform log alpha to alpha ?
if(bootstrapping)
{
Tx.min <- T5 - (fit.Gauss.50.min - fit.Gauss.x.min)
Tx.max <- T95 - (fit.Gauss.50.max - fit.Gauss.x.max)
}
### LS
## create data frame for ggplot2
# df.fit.LS <- data.frame(x = D, y = Re, y_fit = ReD.Model.fit,
# y_fit_min = ReD.Model.fit.min, y_fit_max = ReD.Model.fit.max,
# y_fit_Lx = linFunct(x = D, intercept = Tx, gamma = gamma),
# y_fit_Lx_min = linFunct(x = D, intercept = Tx.min, gamma = ReD.Model.gamma.quant[1]),
# y_fit_Lx_max = linFunct(x = D, intercept = Tx.max, gamma = ReD.Model.gamma.quant[2]))
#
#
# ggplot2::ggplot(data = df.fit.LS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# # ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit)) + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red") + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# # ... plot best fit with 5-95th quantile
# ggplot2::ggplot(data = df.fit.LS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "blue") + # add fit of regression line (median fit)
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit), col = "black") +
# ggplot2::geom_line(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "red") +
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ### NLS
# ## create data frame for ggplot2
# df.fit.NLS <- data.frame(x = D, y = Re, y_fit = ReD.Model.fit,
# y_fit_min = ReD.Model.fit.min, y_fit_max = ReD.Model.fit.max,
# y_fit_Lx = powerLawFunct(x = D, t = Tx, a = ReD.NLS.boot.conf[2, 2], gamma = ReD.NLS.boot.conf[2, 3]),
# y_fit_Lx_min = powerLawFunct(x = D, t = Tx.min, a = ReD.NLS.boot.conf[1, 2], gamma = ReD.NLS.boot.conf[1, 3]),
# y_fit_Lx_max = powerLawFunct(x = D, t = Tx.max, a = ReD.NLS.boot.conf[3, 2], gamma = ReD.NLS.boot.conf[3, 3]))
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "orange", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "yellow", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit), col = "black", method = "loess", size = 0.7)+ # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit), col = "black", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_min), col = "orange", method = "loess", size = 0.7) +
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_max), col = "yellow", method = "loess", size = 0.7) +
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ggplot2::ggplot(data = df.fit.NLS) +
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "green", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
#
#
# ## combine both LS and NSL
# ggplot2::ggplot(data = df.fit.NLS) +
# # NLS
# ggplot2::geom_point(mapping = ggplot2::aes(x = x, y = y)) + # add Re-D point pairs
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "yellow", method = "loess", size = 0.8) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# ggplot2::geom_smooth(mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "blue", method = "loess", size = 0.7) + # add fit of regression line (median fit)
# # LS
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx), col = "orange") + # add fit of regression line (median fit)
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx_min), col = "red") + # add fit of regression line (median fit)
# ggplot2::geom_line(data = df.fit.LS, mapping = ggplot2::aes(x = x, y = y_fit_Lx_max), col = "red") + # add fit of regression line (median fit)
# ggplot2::xlim(0, 6) + ggplot2::ylim(-1, 6)
if(log10.transform)
{
alpha.Tx <- 10^Tx
names(alpha.Tx) <- "alpha_Tx"
alpha.res <- alpha.Tx
if(bootstrapping)
{
alpha.Tx.min <- 10^Tx.min
names(alpha.Tx.min) <- "alpha_Tx_min"
alpha.Tx.max <- 10^Tx.max
names(alpha.Tx.max) <- "alpha_Tx_max"
alpha.res <- c(alpha.res, alpha.Tx.min, alpha.Tx.max)
}
} else {# end of log10 alpha check
alpha.Tx <- Tx
names(alpha.Tx) <- "alpha_Tx"
alpha.res <- alpha.Tx
if(bootstrapping)
{
alpha.Tx.min <- Tx.min
names(alpha.Tx.min) <- "alpha_Tx_min"
alpha.Tx.max <- Tx.max
names(alpha.Tx.max) <- "alpha_Tx_max"
alpha.res <- c(alpha.res, alpha.Tx.min, alpha.Tx.max)
}
}
if(method == "LS")
{
funct <- linFunct
}
if(method == "NLS")
{
funct <- powerLawFunct
}
return(list(alpha.res, gamma, funct))
} # end of function getRainThreshFreqM
|
complete_data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F)
data_plot1 <- subset(complete_data, Date %in% c("1/2/2007","2/2/2007"))
data_plot1$Date <- as.Date(data_plot1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data_plot1$Date), data_plot1$Time)
data_plot1$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,0.005,0))
with(data_plot1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/Plot4.R
|
no_license
|
Oscar017/ExData_Plotting1
|
R
| false | false | 1,156 |
r
|
complete_data <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F)
data_plot1 <- subset(complete_data, Date %in% c("1/2/2007","2/2/2007"))
data_plot1$Date <- as.Date(data_plot1$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(data_plot1$Date), data_plot1$Time)
data_plot1$Datetime <- as.POSIXct(datetime)
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,0.005,0))
with(data_plot1, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
##' Calculate residual matrix.
##'
##' Run lm(Y~X) with specific method through Rcpp and return a
##' residual matrix.
##'
##' @param X A model matrix
##' @param Y The response matrix
##' @param method 'llt' for the LLT Cholesky, 'qr' for the
##' column-pivoted QR decomposition, 'svd' for the Jacobi singular
##' value decomposition (SVD)
##' @return The residual matrix.
##' @export
lm.resid <- function(X, Y, method=c("llt", "qr", "svd")){
stopifnot(is.matrix(X))
stopifnot(is.matrix(Y))
stopifnot(nrow(X)==nrow(Y))
method <- match.arg(method)
if(method=="llt"){
res <- lm_resid_llt(X, Y)
}else if(method=="qr"){
res <- lm_resid_qr(X, Y)
}else if(method=="svd"){
res <- lm_resid_svd(X, Y)
}
dimnames(res) <- dimnames(Y)
return(res)
}
|
/R/lm.resid.R
|
no_license
|
jianan/qtlpvl
|
R
| false | false | 769 |
r
|
##' Calculate residual matrix.
##'
##' Run lm(Y~X) with specific method through Rcpp and return a
##' residual matrix.
##'
##' @param X A model matrix
##' @param Y The response matrix
##' @param method 'llt' for the LLT Cholesky, 'qr' for the
##' column-pivoted QR decomposition, 'svd' for the Jacobi singular
##' value decomposition (SVD)
##' @return The residual matrix.
##' @export
lm.resid <- function(X, Y, method=c("llt", "qr", "svd")){
stopifnot(is.matrix(X))
stopifnot(is.matrix(Y))
stopifnot(nrow(X)==nrow(Y))
method <- match.arg(method)
if(method=="llt"){
res <- lm_resid_llt(X, Y)
}else if(method=="qr"){
res <- lm_resid_qr(X, Y)
}else if(method=="svd"){
res <- lm_resid_svd(X, Y)
}
dimnames(res) <- dimnames(Y)
return(res)
}
|
startTime <- Sys.time()
cat(paste0("> Rscript AUC_coexprDist_paralogs_boxplot.R\n"))
# Rscript AUC_coexprDist_paralogs_boxplot.R ENCSR079VIJ_G401_40kb TCGAkich_norm_kich
options(scipen=100)
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(ggplot2, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
registerDoMC(40)
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
registerDoMC(ifelse(SSHFS, 2, 40))
hicds = "ENCSR079VIJ_G401_40kb"
exprds= "TCGAkich_norm_kich"
plotType <- "svg"
myHeightGG <- myWidthGG <- 7
plotType <- "png"
myHeightGG <- myWidthGG <- 7
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 2)
hicds <- args[1]
exprds <- args[2]
outFolder <- file.path("AUC_COEXPRDIST_PARALOGS_BOXPLOT", hicds, exprds)
dir.create(outFolder, recursive = TRUE)
inFolder <- file.path("AUC_COEXPRDIST_WITHPARALOGS_SORTNODUP")
maxDist <- 500*1000
### HARD CODEDrequire(ggplot2)
allData_dt <- get(load(file.path(inFolder, hicds, exprds, "allData_dt.Rdata")))
allData_dt <- allData_dt[allData_dt$dist <= maxDist,]
allData_dt$sameTAD <- ifelse(allData_dt$sameTAD == 1, "sameTAD",
ifelse(allData_dt$sameTAD == 0, "diffTAD", NA))
stopifnot(!is.na(allData_dt$sameTAD))
allData_dt$paralogs <- ifelse(allData_dt$paralogs == 1, "paralogs",
ifelse(allData_dt$paralogs == 0, "not para.", NA))
stopifnot(!is.na(allData_dt$paralogs))
dist_histBreaks_vect <- seq(0, maxDist, length.out=10+1)
dist_histBreaks_vect_labs <- paste0("]", dist_histBreaks_vect[1:(length(dist_histBreaks_vect)-1)]/1000, ", ",dist_histBreaks_vect[2:length(dist_histBreaks_vect)]/1000, "]")
dist_histBreaks_vect_labs[1] <- sub("]", "[", dist_histBreaks_vect_labs[1])
allData_dt$dist_cat <- foreach(i = 1:nrow(allData_dt), .combine='c' ) %dopar% { which(hist(allData_dt$dist[i], breaks=dist_histBreaks_vect, plot=FALSE)$counts == 1) }
outFile <- file.path(outFolder, "allData_dt.Rdata")
save(allData_dt, file=outFile, version=2)
cat(paste0("... written: ", outFile, "\n"))
allData_dt$cond <- interaction(allData_dt$sameTAD, allData_dt$paralogs)
allData_dt$cond <- factor(allData_dt$cond, levels = c( "sameTAD.paralogs", "sameTAD.not para.", "diffTAD.paralogs", "diffTAD.not para."))
stopifnot(!is.na(allData_dt$cond))
allData_dt$dist_cat <- factor(allData_dt$dist_cat, levels=as.character(sort(unique(allData_dt$dist_cat))))
subTit <- paste0("max dist. = ", maxDist/1000, " kb")
my_ylab <- "Gene1-gene2 expr. corr."
my_xlab <- " Gene1-gene2 dist. range [kb]"
p_coexpr_boxplot <- ggplot(allData_dt, aes(x=dist_cat, y = coexpr, fill = cond, color = cond)) +
geom_boxplot(notch = TRUE, outlier.shape=NA)+
ggtitle(paste0(hicds, " - ", exprds), subtitle = paste0(subTit))+
scale_x_discrete(name=my_xlab, labels =dist_histBreaks_vect_labs )+
scale_y_continuous(name=paste0(my_ylab),
breaks = scales::pretty_breaks(n = 20))+
labs(fill = paste0(""), color=paste0("")) +
theme(
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size = 14),
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_line(colour = "grey"),
axis.line.x= element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .2, color = "black"),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5, size=12),
axis.text.x =element_text(color="black", hjust=0.5,vjust = 0.5, size=10, face="bold"),
# axis.ticks.x = element_blank(),
axis.title.y = element_text(color="black", size=14),
axis.title.x = element_text(color="black", size=14),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.text = element_text(size=12),
legend.key = element_blank(),
legend.key.size = unit(1.2, 'cm'),
legend.title = element_text(face="bold", size=12)
)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_boxplot.", plotType))
ggsave(plot = p_coexpr_boxplot, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
p_coexpr_boxplot_jitter <- p_coexpr_boxplot +
geom_point( position=position_jitterdodge(), stroke=0.8, shape=21, alpha=0.8)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_boxplot_jitter.", plotType))
ggsave(plot = p_coexpr_boxplot_jitter, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_countPairs_dt.txt"))
write.table(table(allData_dt$cond, allData_dt$dist_cat), file =outFile, col.names=T, row.names = T, quote=F, sep="\t")
cat(paste0("... written: ", outFile, "\n"))
count_dt <- as.data.frame(table(allData_dt$cond, allData_dt$dist_cat))
count_dt$Freq_log10 <- log10(count_dt$Freq)
my_ylab <- "# gene pairs"
p_count_barplot <- ggplot(count_dt, aes(x=Var2, y = Freq, fill = Var1, color = Var1)) +
geom_bar(position="dodge", stat="identity") +
ggtitle(paste0(hicds, " - ", exprds), subtitle = paste0(subTit))+
scale_x_discrete(name=my_xlab, labels =dist_histBreaks_vect_labs )+
scale_y_continuous(name=paste0(my_ylab),
breaks = scales::pretty_breaks(n = 20))+
labs(fill = paste0(""), color=paste0("")) +
theme(
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size = 14),
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_line(colour = "grey"),
axis.line.x= element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .2, color = "black"),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5, size=12),
axis.text.x =element_text(color="black", hjust=0.5,vjust = 0.5, size=10, face="bold"),
# axis.ticks.x = element_blank(),
axis.title.y = element_text(color="black", size=14),
axis.title.x = element_text(color="black", size=14),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.text = element_text(size=12),
legend.key = element_blank(),
legend.key.size = unit(1, 'cm'),
legend.title = element_text(face="bold", size=12)
)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_TAD_paralogs_countPairs_barplot.", plotType))
ggsave(plot = p_count_barplot, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
######################################################################################
cat("*** DONE\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
# foo_breaks <- c(0,10,20,30,40,50)
# hist(5, breaks=foo_breaks)$breaks
# # c(0,10,20,30,40,50)
# hist(5, breaks=foo_breaks)$counts
# # 1 0 0 0 0
# which(hist(5, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(0, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(10, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(50, breaks=foo_breaks)$counts == 1)
# # 5
# which(hist(39, breaks=foo_breaks)$counts == 1)
# # 4
# which(hist(40, breaks=foo_breaks)$counts == 1)
# # 4
# which(hist(41, breaks=foo_breaks)$counts == 1)
# # 5
# which(hist(11, breaks=foo_breaks)$counts == 1)
# # 2
# which(hist(55, breaks=foo_breaks)$counts == 1)
# # ERROR
# which(hist(-2, breaks=foo_breaks)$counts == 1)
# # ERROR
|
/AUC_coexprDist_paralogs_boxplot.R
|
no_license
|
marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA
|
R
| false | false | 7,864 |
r
|
startTime <- Sys.time()
cat(paste0("> Rscript AUC_coexprDist_paralogs_boxplot.R\n"))
# Rscript AUC_coexprDist_paralogs_boxplot.R ENCSR079VIJ_G401_40kb TCGAkich_norm_kich
options(scipen=100)
suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
suppressPackageStartupMessages(library(ggplot2, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
registerDoMC(40)
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
registerDoMC(ifelse(SSHFS, 2, 40))
hicds = "ENCSR079VIJ_G401_40kb"
exprds= "TCGAkich_norm_kich"
plotType <- "svg"
myHeightGG <- myWidthGG <- 7
plotType <- "png"
myHeightGG <- myWidthGG <- 7
args <- commandArgs(trailingOnly = TRUE)
stopifnot(length(args) == 2)
hicds <- args[1]
exprds <- args[2]
outFolder <- file.path("AUC_COEXPRDIST_PARALOGS_BOXPLOT", hicds, exprds)
dir.create(outFolder, recursive = TRUE)
inFolder <- file.path("AUC_COEXPRDIST_WITHPARALOGS_SORTNODUP")
maxDist <- 500*1000
### HARD CODEDrequire(ggplot2)
allData_dt <- get(load(file.path(inFolder, hicds, exprds, "allData_dt.Rdata")))
allData_dt <- allData_dt[allData_dt$dist <= maxDist,]
allData_dt$sameTAD <- ifelse(allData_dt$sameTAD == 1, "sameTAD",
ifelse(allData_dt$sameTAD == 0, "diffTAD", NA))
stopifnot(!is.na(allData_dt$sameTAD))
allData_dt$paralogs <- ifelse(allData_dt$paralogs == 1, "paralogs",
ifelse(allData_dt$paralogs == 0, "not para.", NA))
stopifnot(!is.na(allData_dt$paralogs))
dist_histBreaks_vect <- seq(0, maxDist, length.out=10+1)
dist_histBreaks_vect_labs <- paste0("]", dist_histBreaks_vect[1:(length(dist_histBreaks_vect)-1)]/1000, ", ",dist_histBreaks_vect[2:length(dist_histBreaks_vect)]/1000, "]")
dist_histBreaks_vect_labs[1] <- sub("]", "[", dist_histBreaks_vect_labs[1])
allData_dt$dist_cat <- foreach(i = 1:nrow(allData_dt), .combine='c' ) %dopar% { which(hist(allData_dt$dist[i], breaks=dist_histBreaks_vect, plot=FALSE)$counts == 1) }
outFile <- file.path(outFolder, "allData_dt.Rdata")
save(allData_dt, file=outFile, version=2)
cat(paste0("... written: ", outFile, "\n"))
allData_dt$cond <- interaction(allData_dt$sameTAD, allData_dt$paralogs)
allData_dt$cond <- factor(allData_dt$cond, levels = c( "sameTAD.paralogs", "sameTAD.not para.", "diffTAD.paralogs", "diffTAD.not para."))
stopifnot(!is.na(allData_dt$cond))
allData_dt$dist_cat <- factor(allData_dt$dist_cat, levels=as.character(sort(unique(allData_dt$dist_cat))))
subTit <- paste0("max dist. = ", maxDist/1000, " kb")
my_ylab <- "Gene1-gene2 expr. corr."
my_xlab <- " Gene1-gene2 dist. range [kb]"
p_coexpr_boxplot <- ggplot(allData_dt, aes(x=dist_cat, y = coexpr, fill = cond, color = cond)) +
geom_boxplot(notch = TRUE, outlier.shape=NA)+
ggtitle(paste0(hicds, " - ", exprds), subtitle = paste0(subTit))+
scale_x_discrete(name=my_xlab, labels =dist_histBreaks_vect_labs )+
scale_y_continuous(name=paste0(my_ylab),
breaks = scales::pretty_breaks(n = 20))+
labs(fill = paste0(""), color=paste0("")) +
theme(
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size = 14),
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_line(colour = "grey"),
axis.line.x= element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .2, color = "black"),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5, size=12),
axis.text.x =element_text(color="black", hjust=0.5,vjust = 0.5, size=10, face="bold"),
# axis.ticks.x = element_blank(),
axis.title.y = element_text(color="black", size=14),
axis.title.x = element_text(color="black", size=14),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.text = element_text(size=12),
legend.key = element_blank(),
legend.key.size = unit(1.2, 'cm'),
legend.title = element_text(face="bold", size=12)
)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_boxplot.", plotType))
ggsave(plot = p_coexpr_boxplot, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
p_coexpr_boxplot_jitter <- p_coexpr_boxplot +
geom_point( position=position_jitterdodge(), stroke=0.8, shape=21, alpha=0.8)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_boxplot_jitter.", plotType))
ggsave(plot = p_coexpr_boxplot_jitter, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_coexpr_TAD_paralogs_countPairs_dt.txt"))
write.table(table(allData_dt$cond, allData_dt$dist_cat), file =outFile, col.names=T, row.names = T, quote=F, sep="\t")
cat(paste0("... written: ", outFile, "\n"))
count_dt <- as.data.frame(table(allData_dt$cond, allData_dt$dist_cat))
count_dt$Freq_log10 <- log10(count_dt$Freq)
my_ylab <- "# gene pairs"
p_count_barplot <- ggplot(count_dt, aes(x=Var2, y = Freq, fill = Var1, color = Var1)) +
geom_bar(position="dodge", stat="identity") +
ggtitle(paste0(hicds, " - ", exprds), subtitle = paste0(subTit))+
scale_x_discrete(name=my_xlab, labels =dist_histBreaks_vect_labs )+
scale_y_continuous(name=paste0(my_ylab),
breaks = scales::pretty_breaks(n = 20))+
labs(fill = paste0(""), color=paste0("")) +
theme(
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size = 14),
panel.grid = element_blank(),
panel.grid.major.y = element_line(colour = "grey"),
panel.grid.minor.y = element_line(colour = "grey"),
axis.line.x= element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .2, color = "black"),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5, size=12),
axis.text.x =element_text(color="black", hjust=0.5,vjust = 0.5, size=10, face="bold"),
# axis.ticks.x = element_blank(),
axis.title.y = element_text(color="black", size=14),
axis.title.x = element_text(color="black", size=14),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.text = element_text(size=12),
legend.key = element_blank(),
legend.key.size = unit(1, 'cm'),
legend.title = element_text(face="bold", size=12)
)
outFile <- file.path(outFolder, paste0(hicds, "_", exprds, "_TAD_paralogs_countPairs_barplot.", plotType))
ggsave(plot = p_count_barplot, filename = outFile, height=myHeightGG, width = myWidthGG*1.5)
cat(paste0("... written: ", outFile, "\n"))
######################################################################################
cat("*** DONE\n")
cat(paste0(startTime, "\n", Sys.time(), "\n"))
# foo_breaks <- c(0,10,20,30,40,50)
# hist(5, breaks=foo_breaks)$breaks
# # c(0,10,20,30,40,50)
# hist(5, breaks=foo_breaks)$counts
# # 1 0 0 0 0
# which(hist(5, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(0, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(10, breaks=foo_breaks)$counts == 1)
# # 1
# which(hist(50, breaks=foo_breaks)$counts == 1)
# # 5
# which(hist(39, breaks=foo_breaks)$counts == 1)
# # 4
# which(hist(40, breaks=foo_breaks)$counts == 1)
# # 4
# which(hist(41, breaks=foo_breaks)$counts == 1)
# # 5
# which(hist(11, breaks=foo_breaks)$counts == 1)
# # 2
# which(hist(55, breaks=foo_breaks)$counts == 1)
# # ERROR
# which(hist(-2, breaks=foo_breaks)$counts == 1)
# # ERROR
|
#Establecer carpeta 'Tarea_Nro4" como directorio de trabajo
setwd("~/Tarea_Nro4")
#dv: int -> int
#Def dv: funcion para generar digito verificador para un rut valido
#Ejemplo: dv(18749743)->4
dv <- function(rut){
rut = as.character(rut)
x = as.numeric(rev(strsplit(rut,NULL)[[1]]))
Multiplo = rep(2:7,length.out=length(x))
y = sum(x*Multiplo)
z = 11 - y + floor(y/11)*11
key = c(1:11)
val = c(1:9,"k",0)
dv = val[match(z, key)]
return(dv)
}
|
/funcionesR.R
|
permissive
|
cherieyan/Tarea_Nro4
|
R
| false | false | 479 |
r
|
#Establecer carpeta 'Tarea_Nro4" como directorio de trabajo
setwd("~/Tarea_Nro4")
#dv: int -> int
#Def dv: funcion para generar digito verificador para un rut valido
#Ejemplo: dv(18749743)->4
dv <- function(rut){
rut = as.character(rut)
x = as.numeric(rev(strsplit(rut,NULL)[[1]]))
Multiplo = rep(2:7,length.out=length(x))
y = sum(x*Multiplo)
z = 11 - y + floor(y/11)*11
key = c(1:11)
val = c(1:9,"k",0)
dv = val[match(z, key)]
return(dv)
}
|
######### b)
library(MASS)
data <- c(0.17,0.22,0.37,0.38,0.5,0.58,0.72,0.85,0.9,0.93,1,1,1,1,1,1,1,1,1,1) # initial data given
dist <- function(data,alpha,beta){ # Conditional distribution of alpha to be used in MH step
(1/(gamma(alpha))^20)*(beta^((20*alpha)))*(prod(data^(alpha-1)))*alpha*exp(-alpha)
}
gibbs<- function(start.alpha,start.beta,n.sims){
n <- length(data)
y.mis <- integer(0)
res <- matrix(NA,nrow=n.sims,ncol=2) #Matrix of alpha,beta samples
res[1,] <- c(start.alpha,start.beta)
for (i in 2:n.sims){
for(j in 1:10){
while(TRUE){ #Simulates gamma random variable conditioned on it's value being greater than 1
y.mis[j]<-rgamma(1,res[i-1,1],res[i-1,2])
if(y.mis[j]>1) break
}
}
data <- replace(data,11:20,y.mis) # augments the data with missing values
x <- sum(data)
res[i,2]<- rgamma(1,shape=n*res[i-1,1]+2,rate= x+1) #Gibbs step for beta using conditional distribution of beta
can <- rnorm(1,res[i-1,1],1) #Generates a proposal from normal distribution for MH step
acc <- min(1,dist(data,can,res[i,2])/dist(data,res[i-1],res[i,2])) #Acceptance probability using earlier dist funciton
u<-runif(1)
if (u<acc){
res[i,1]<-can
}else{
res[i,1]<-res[i-1,1]
}
print(i) #Just to check progress - slow algorithm (can be improved?)
Sys.sleep(0.01)
flush.console()
}
ans <- list(sample=res,mis=y.mis)
}
samp <- gibbs(2,1,10000)$sample
plot(samp[,1],samp[,2])
mu1 <- mean(samp[,1]) # mean of alpha
mu2 <- mean(samp[,2]) # mean of beta
sigma <- var(samp) # variance- covariance matrix of alpha and beta
######### f)
post <- function(data,alpha,beta){ # posterior we are trying to sample from
(1/(gamma(alpha))^20)*(beta^((20*alpha)+1))*(prod(data^(alpha-1)))*alpha*exp(-beta*(sum(data)+1)-alpha)
}
con <- matrix(NA,10000,2) # Setting up matrix of alpha and beta values
con[1,] <- c(mu1,mu2) # Taking initial step to be estimates of alpha and beta
for(i in 2:10000){ #independence sampler
for(j in 1:10){
while(TRUE){ #Simulates gamma random variable conditioned on it's value being greater than 1
y.mis[j] <- rgamma(1,con[i-1,1],con[i-1,2])
if(y.mis[j]>1) break
}
}
data <- replace(data,11:20,y.mis)
cand <- mvrnorm(1,c(con[i-1,1],con[i-1,2]),sigma) #proposal from bivariate normal with mean vector and variance matrix taken from previous estimate
accu <- min(1,post(data,cand[1],cand[2])/post(data,con[i-1,1],con[i-1,2])) # acceptance probability
if(is.nan(accu)==TRUE){ # adjusting for possible NaN's produced when value is near 0
accu=0
}
u <- runif(1)
if(u<accu){
con[i,] <- cand
}else{
con[i,] <- con[i-1,]
}
print(i) #Just to check progress - slow algorithm (can be improved?)
Sys.sleep(0.01)
flush.console()
}
plot(con[,1],con[,2])
|
/mcmc.R
|
no_license
|
GitJoycee/MCMC-coursework-questions
|
R
| false | false | 2,993 |
r
|
######### b)
library(MASS)
data <- c(0.17,0.22,0.37,0.38,0.5,0.58,0.72,0.85,0.9,0.93,1,1,1,1,1,1,1,1,1,1) # initial data given
dist <- function(data,alpha,beta){ # Conditional distribution of alpha to be used in MH step
(1/(gamma(alpha))^20)*(beta^((20*alpha)))*(prod(data^(alpha-1)))*alpha*exp(-alpha)
}
gibbs<- function(start.alpha,start.beta,n.sims){
n <- length(data)
y.mis <- integer(0)
res <- matrix(NA,nrow=n.sims,ncol=2) #Matrix of alpha,beta samples
res[1,] <- c(start.alpha,start.beta)
for (i in 2:n.sims){
for(j in 1:10){
while(TRUE){ #Simulates gamma random variable conditioned on it's value being greater than 1
y.mis[j]<-rgamma(1,res[i-1,1],res[i-1,2])
if(y.mis[j]>1) break
}
}
data <- replace(data,11:20,y.mis) # augments the data with missing values
x <- sum(data)
res[i,2]<- rgamma(1,shape=n*res[i-1,1]+2,rate= x+1) #Gibbs step for beta using conditional distribution of beta
can <- rnorm(1,res[i-1,1],1) #Generates a proposal from normal distribution for MH step
acc <- min(1,dist(data,can,res[i,2])/dist(data,res[i-1],res[i,2])) #Acceptance probability using earlier dist funciton
u<-runif(1)
if (u<acc){
res[i,1]<-can
}else{
res[i,1]<-res[i-1,1]
}
print(i) #Just to check progress - slow algorithm (can be improved?)
Sys.sleep(0.01)
flush.console()
}
ans <- list(sample=res,mis=y.mis)
}
samp <- gibbs(2,1,10000)$sample
plot(samp[,1],samp[,2])
mu1 <- mean(samp[,1]) # mean of alpha
mu2 <- mean(samp[,2]) # mean of beta
sigma <- var(samp) # variance- covariance matrix of alpha and beta
######### f)
post <- function(data,alpha,beta){ # posterior we are trying to sample from
(1/(gamma(alpha))^20)*(beta^((20*alpha)+1))*(prod(data^(alpha-1)))*alpha*exp(-beta*(sum(data)+1)-alpha)
}
con <- matrix(NA,10000,2) # Setting up matrix of alpha and beta values
con[1,] <- c(mu1,mu2) # Taking initial step to be estimates of alpha and beta
for(i in 2:10000){ #independence sampler
for(j in 1:10){
while(TRUE){ #Simulates gamma random variable conditioned on it's value being greater than 1
y.mis[j] <- rgamma(1,con[i-1,1],con[i-1,2])
if(y.mis[j]>1) break
}
}
data <- replace(data,11:20,y.mis)
cand <- mvrnorm(1,c(con[i-1,1],con[i-1,2]),sigma) #proposal from bivariate normal with mean vector and variance matrix taken from previous estimate
accu <- min(1,post(data,cand[1],cand[2])/post(data,con[i-1,1],con[i-1,2])) # acceptance probability
if(is.nan(accu)==TRUE){ # adjusting for possible NaN's produced when value is near 0
accu=0
}
u <- runif(1)
if(u<accu){
con[i,] <- cand
}else{
con[i,] <- con[i-1,]
}
print(i) #Just to check progress - slow algorithm (can be improved?)
Sys.sleep(0.01)
flush.console()
}
plot(con[,1],con[,2])
|
---
title: "AdvancedR_JHU01"
author: "Yayehirad A Melsew"
date: "29/05/2021"
output: html_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r}
# Install swirl
install.packages("swirl")
packageVersion("swirl")
library(swirl)
#Install the R Programming Environment course
install_course("Advanced R Programming")
swirl()
```
Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot.
|
/Week1_AdvancedR.R
|
no_license
|
Yayehirad/Advanced-R-Programming
|
R
| false | false | 485 |
r
|
---
title: "AdvancedR_JHU01"
author: "Yayehirad A Melsew"
date: "29/05/2021"
output: html_document
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
```{r}
# Install swirl
install.packages("swirl")
packageVersion("swirl")
library(swirl)
#Install the R Programming Environment course
install_course("Advanced R Programming")
swirl()
```
Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot.
|
#' ---
#' title: "Regression and Other Stories: Health Expenditure"
#' author: "Andrew Gelman, Jennifer Hill, Aki Vehtari"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' toc: true
#' toc_depth: 2
#' toc_float: true
#' code_download: true
#' ---
#' Health Expenditure - Discovery through graphs of data and
#' models. See Chapter 2 in Regression and Other Stories.
#'
#' -------------
#'
#+ setup, include=FALSE
knitr::opts_chunk$set(message=FALSE, error=FALSE, warning=FALSE, comment=NA)
# switch this to TRUE to save figures in separate files
savefigs <- FALSE
#' #### Load packages
library("rprojroot")
root<-has_dirname("ROS-Examples")$make_fix_file()
#' #### Load data
read.page <- function (datapage){
variables.keep <- paste ("X", 1960:2007, sep="")
data <- read.csv (datapage, skip=3)
data <- data[1:30,] # file has only 30 rows of data
countries <- as.character (data[,"X"])
numbers <- data[,variables.keep]
n <- length(countries)
recent.data <- rep(NA, n)
for (i in 1:n) {
y <- as.numeric(numbers[i,])
ok <- !is.na(y)
if (sum(ok)>0) {
years.keep <- (1:length(y))[ok]
recent.data[i] <- y[max(years.keep)]
}
else {
recent.data[i] <- NA
}
}
return(list (countries=countries, recent.data=recent.data))
}
expend <- read.page(root("HealthExpenditure/data","healthexpenditure.csv"))
life <- read.page(root("HealthExpenditure/data","lifeexpectancy.csv"))
doctor <- read.page(root("HealthExpenditure/data","doctorvisits.csv"))
# shorten some country names
countries <- expend$countries
countries[countries=="Czech Republic"] <- "Czech"
countries[countries=="New Zealand"] <- "N.Zealand"
countries[countries=="Slovak Republic"] <- "Slovakia"
countries[countries=="United Kingdom"] <- "UK"
countries[countries=="United States"] <- "USA"
# specific colors
color <- ifelse (countries %in% c("USA","Mexico"), "red","black")
#
expend <- expend$recent.data
life <- life$recent.data
doctor <- doctor$recent.data
#' #### Scatterplot
#+ eval=FALSE, include=FALSE
png(root("HealthExpenditure/figs","healthscatter.png"), height=600, width=700)
#+
par(mgp=c(1.7,.5,0), tck=-.01, mar=c(3,3,.1,.1))
plot(expend, life, xlim=c(0,1.05*max(expend)), xaxs="i",
type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)")
#symbols(expend, life, circles=sqrt(doctor), inches=.8, add=TRUE, fg="gray80")
text(expend, life, countries, col=color)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot scatterplot, excluding some countries
removec <- countries %in% c("Netherlands", "Belgium", "Germany",
"Ireland", "Iceland", "Greece", "Italy", "Sweden", "UK")
#+ eval=FALSE, include=FALSE
png(root("HealthExpenditure/figs","healthscatter2.png"), height=600, width=700)
#+
par(mgp=c(2.5,.7,0), tck=-.01, mar=c(4,4,.1,.1))
plot(expend[!removec], life[!removec], xlim=c(0,1.05*max(expend)),
xaxs="i", type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)", cex.axis=1.3, cex.lab=1.3, las=1, xaxt="n", bty="l")
#symbols(expend[!removec], life[!removec], circles=sqrt(doctor[!removec]), inches=.8, add=TRUE, fg="gray80")
axis(1, seq(0,8000,2000), cex.axis=1.3, cex.lab=1.3)
text(expend[!removec], life[!removec], countries[!removec],
col=color[!removec], cex=1.3)
for (x in seq(2000,6000,2000)) abline(v=x, col="gray", lwd=.5)
for (y in seq(74,82,2)) abline(y,0,col="gray", lwd=.5)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off ()
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("HealthExpenditure/figs","healthscatter3.pdf"), height=4, width=5.5)
#+
par(mgp=c(1.7,.5, 0), tck=-.01, mar=c(3,3,.1,.1))
plot(expend[!removec], life[!removec], xlim=c(0,1.05*max(expend)),
xaxs="i", type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)", bty="l", xaxt="n")
axis(1, seq(0,6000,2000))
#symbols (expend[!removec], life[!removec], circles=sqrt(doctor[!removec]), inches=.8, add=TRUE, fg="gray80")
text(expend[!removec], life[!removec], countries[!removec], cex=.9)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
|
/HealthExpenditure/healthexpenditure.R
|
no_license
|
dkillian/RAOS-Examples
|
R
| false | false | 4,161 |
r
|
#' ---
#' title: "Regression and Other Stories: Health Expenditure"
#' author: "Andrew Gelman, Jennifer Hill, Aki Vehtari"
#' date: "`r format(Sys.Date())`"
#' output:
#' html_document:
#' theme: readable
#' toc: true
#' toc_depth: 2
#' toc_float: true
#' code_download: true
#' ---
#' Health Expenditure - Discovery through graphs of data and
#' models. See Chapter 2 in Regression and Other Stories.
#'
#' -------------
#'
#+ setup, include=FALSE
knitr::opts_chunk$set(message=FALSE, error=FALSE, warning=FALSE, comment=NA)
# switch this to TRUE to save figures in separate files
savefigs <- FALSE
#' #### Load packages
library("rprojroot")
root<-has_dirname("ROS-Examples")$make_fix_file()
#' #### Load data
read.page <- function (datapage){
variables.keep <- paste ("X", 1960:2007, sep="")
data <- read.csv (datapage, skip=3)
data <- data[1:30,] # file has only 30 rows of data
countries <- as.character (data[,"X"])
numbers <- data[,variables.keep]
n <- length(countries)
recent.data <- rep(NA, n)
for (i in 1:n) {
y <- as.numeric(numbers[i,])
ok <- !is.na(y)
if (sum(ok)>0) {
years.keep <- (1:length(y))[ok]
recent.data[i] <- y[max(years.keep)]
}
else {
recent.data[i] <- NA
}
}
return(list (countries=countries, recent.data=recent.data))
}
expend <- read.page(root("HealthExpenditure/data","healthexpenditure.csv"))
life <- read.page(root("HealthExpenditure/data","lifeexpectancy.csv"))
doctor <- read.page(root("HealthExpenditure/data","doctorvisits.csv"))
# shorten some country names
countries <- expend$countries
countries[countries=="Czech Republic"] <- "Czech"
countries[countries=="New Zealand"] <- "N.Zealand"
countries[countries=="Slovak Republic"] <- "Slovakia"
countries[countries=="United Kingdom"] <- "UK"
countries[countries=="United States"] <- "USA"
# specific colors
color <- ifelse (countries %in% c("USA","Mexico"), "red","black")
#
expend <- expend$recent.data
life <- life$recent.data
doctor <- doctor$recent.data
#' #### Scatterplot
#+ eval=FALSE, include=FALSE
png(root("HealthExpenditure/figs","healthscatter.png"), height=600, width=700)
#+
par(mgp=c(1.7,.5,0), tck=-.01, mar=c(3,3,.1,.1))
plot(expend, life, xlim=c(0,1.05*max(expend)), xaxs="i",
type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)")
#symbols(expend, life, circles=sqrt(doctor), inches=.8, add=TRUE, fg="gray80")
text(expend, life, countries, col=color)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
#' #### Plot scatterplot, excluding some countries
removec <- countries %in% c("Netherlands", "Belgium", "Germany",
"Ireland", "Iceland", "Greece", "Italy", "Sweden", "UK")
#+ eval=FALSE, include=FALSE
png(root("HealthExpenditure/figs","healthscatter2.png"), height=600, width=700)
#+
par(mgp=c(2.5,.7,0), tck=-.01, mar=c(4,4,.1,.1))
plot(expend[!removec], life[!removec], xlim=c(0,1.05*max(expend)),
xaxs="i", type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)", cex.axis=1.3, cex.lab=1.3, las=1, xaxt="n", bty="l")
#symbols(expend[!removec], life[!removec], circles=sqrt(doctor[!removec]), inches=.8, add=TRUE, fg="gray80")
axis(1, seq(0,8000,2000), cex.axis=1.3, cex.lab=1.3)
text(expend[!removec], life[!removec], countries[!removec],
col=color[!removec], cex=1.3)
for (x in seq(2000,6000,2000)) abline(v=x, col="gray", lwd=.5)
for (y in seq(74,82,2)) abline(y,0,col="gray", lwd=.5)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off ()
#+ eval=FALSE, include=FALSE
if (savefigs) pdf(root("HealthExpenditure/figs","healthscatter3.pdf"), height=4, width=5.5)
#+
par(mgp=c(1.7,.5, 0), tck=-.01, mar=c(3,3,.1,.1))
plot(expend[!removec], life[!removec], xlim=c(0,1.05*max(expend)),
xaxs="i", type="n", xlab="Health care spending (PPP US$)",
ylab="Life expectancy (years)", bty="l", xaxt="n")
axis(1, seq(0,6000,2000))
#symbols (expend[!removec], life[!removec], circles=sqrt(doctor[!removec]), inches=.8, add=TRUE, fg="gray80")
text(expend[!removec], life[!removec], countries[!removec], cex=.9)
#+ eval=FALSE, include=FALSE
if (savefigs) dev.off()
|
.getVp <- function(.fit, .obj, .lsp, .lpi)
{
.Vp <- if(inherits(.obj$family, "general.family")){
.fit$gcv.ubre <- as.numeric(.fit$REML)
.fit$outer.info <- NULL
.fit$sp <- exp(.lsp)
.fit$scale.estimated <- FALSE
.fit$scale <- 1
.fit$method <- "REML"
.Vp <- mgcv:::gam.fit5.post.proc(.fit,.obj$Sl,.obj$L,.obj$lsp0,.obj$S,.obj$off)$Vb
.Vp <- .Vp[.lpi[[1]], .lpi[[1]]]
} else {
.Vp <- .fit$Vb
}
return( .Vp )
}
|
/R/I_getVp.R
|
no_license
|
davidruegamer/qgam
|
R
| false | false | 454 |
r
|
.getVp <- function(.fit, .obj, .lsp, .lpi)
{
.Vp <- if(inherits(.obj$family, "general.family")){
.fit$gcv.ubre <- as.numeric(.fit$REML)
.fit$outer.info <- NULL
.fit$sp <- exp(.lsp)
.fit$scale.estimated <- FALSE
.fit$scale <- 1
.fit$method <- "REML"
.Vp <- mgcv:::gam.fit5.post.proc(.fit,.obj$Sl,.obj$L,.obj$lsp0,.obj$S,.obj$off)$Vb
.Vp <- .Vp[.lpi[[1]], .lpi[[1]]]
} else {
.Vp <- .fit$Vb
}
return( .Vp )
}
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(
message = FALSE,
digits = 3,
collapse = TRUE,
comment = "#>"
)
options(digits = 3)
library(recipes)
## ----iris-base-rec-------------------------------------------------------
library(recipes)
iris_rec <- recipe( ~ ., data = iris)
summary(iris_rec)
## ----iris-ref-cell-------------------------------------------------------
ref_cell <- iris_rec %>%
step_dummy(Species) %>%
prep(training = iris, retain = TRUE)
summary(ref_cell)
# Get a row for each factor level
rows <- c(1, 51, 101)
juice(ref_cell, starts_with("Species"))[rows,]
## ----defaults------------------------------------------------------------
param <- getOption("contrasts")
param
## ----iris-helmert--------------------------------------------------------
# change it:
new_cont <- param
new_cont["unordered"] <- "contr.helmert"
options(contrasts = new_cont)
# now make dummy variables with new parameterization
helmert <- iris_rec %>%
step_dummy(Species) %>%
prep(training = iris, retain = TRUE)
summary(helmert)
juice(helmert, starts_with("Species"))[rows,]
# Yuk; go back to the original method
options(contrasts = param)
## ----iris-2int-----------------------------------------------------------
iris_int <- iris_rec %>%
step_interact( ~ Sepal.Width:Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
## ----mm-int--------------------------------------------------------------
model.matrix(~ Species*Sepal.Length, data = iris)[rows,]
## ----nope, eval = FALSE--------------------------------------------------
# # Must I do this?
# iris_rec %>%
# step_interact( ~ Species_versicolor:Sepal.Length +
# Species_virginica:Sepal.Length)
## ----iris-sel------------------------------------------------------------
iris_int <- iris_rec %>%
step_dummy(Species) %>%
step_interact( ~ starts_with("Species"):Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
## ----sel-input, eval = FALSE---------------------------------------------
# starts_with("Species")
## ----sel-output, eval = FALSE--------------------------------------------
# (Species_versicolor + Species_virginica)
## ----int-form------------------------------------------------------------
iris_int
## ----iris-dont-----------------------------------------------------------
iris_int <- iris_rec %>%
step_interact( ~ Species:Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
|
/R-Portable-Mac/library/recipes/doc/Dummies.R
|
permissive
|
sdownin/sequencer
|
R
| false | false | 2,545 |
r
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(
message = FALSE,
digits = 3,
collapse = TRUE,
comment = "#>"
)
options(digits = 3)
library(recipes)
## ----iris-base-rec-------------------------------------------------------
library(recipes)
iris_rec <- recipe( ~ ., data = iris)
summary(iris_rec)
## ----iris-ref-cell-------------------------------------------------------
ref_cell <- iris_rec %>%
step_dummy(Species) %>%
prep(training = iris, retain = TRUE)
summary(ref_cell)
# Get a row for each factor level
rows <- c(1, 51, 101)
juice(ref_cell, starts_with("Species"))[rows,]
## ----defaults------------------------------------------------------------
param <- getOption("contrasts")
param
## ----iris-helmert--------------------------------------------------------
# change it:
new_cont <- param
new_cont["unordered"] <- "contr.helmert"
options(contrasts = new_cont)
# now make dummy variables with new parameterization
helmert <- iris_rec %>%
step_dummy(Species) %>%
prep(training = iris, retain = TRUE)
summary(helmert)
juice(helmert, starts_with("Species"))[rows,]
# Yuk; go back to the original method
options(contrasts = param)
## ----iris-2int-----------------------------------------------------------
iris_int <- iris_rec %>%
step_interact( ~ Sepal.Width:Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
## ----mm-int--------------------------------------------------------------
model.matrix(~ Species*Sepal.Length, data = iris)[rows,]
## ----nope, eval = FALSE--------------------------------------------------
# # Must I do this?
# iris_rec %>%
# step_interact( ~ Species_versicolor:Sepal.Length +
# Species_virginica:Sepal.Length)
## ----iris-sel------------------------------------------------------------
iris_int <- iris_rec %>%
step_dummy(Species) %>%
step_interact( ~ starts_with("Species"):Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
## ----sel-input, eval = FALSE---------------------------------------------
# starts_with("Species")
## ----sel-output, eval = FALSE--------------------------------------------
# (Species_versicolor + Species_virginica)
## ----int-form------------------------------------------------------------
iris_int
## ----iris-dont-----------------------------------------------------------
iris_int <- iris_rec %>%
step_interact( ~ Species:Sepal.Length) %>%
prep(training = iris, retain = TRUE)
summary(iris_int)
|
\name{ptc}
\alias{ptc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Prepares and fits a PTC Cox model
}
\description{
Calls treatinit() to prepare dataset
Calls addtc() to create TC intervals
Calls ptcfitter() to fit PTC model
}
\usage{
ptc(dataset, ncov, cov_names, maxfollow, nmaxint, interval_width, min_future_events)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
data.frame organized as expected by tc()
}
\item{ncov}{
number of baseline covariates (including treatment) to be included in model
}
\item{cov_names}{
vector of baseline covariate names (including treatment)
}
\item{maxfollow}{
maximum followup for any subject in dataset
}
\item{nmaxint}{
maximum number of TC intervals allowed
}
\item{interval_width}{
width of the TC intervals
}
\item{min_future_events}{
minimum number of events expected of future starters(stoppers) of treatment for determining upper bound on starting(stopping) TC intervals
}
}
\value{
\item{fit_ptc }{fit of PTC model}
\item{nstartint }{number of TC starting intervals}
\item{startint }{vector containing the TC starting interval endpoints}
\item{nstopint }{number of TC stopping intervals}
\item{stopint }{vector containing the TC stopping interval endpoints}
\item{cov_names1 }{vector containing the covariate names of the model}
\item{nperson }{number of subjects in dataset}
\item{numevents }{number of events in dataset}
\item{medianfollowup }{median followup for subjects in dataset}
}
\references{
Troendle, JF, Leifer, E, Zhang Z, Yang, S, and Tewes H (2017) How to Control for Unmeasured Confounding in an Observational Time-To-Event Study With Exposure Incidence Information: the Treatment Choice Cox Model. Statistics in Medicine 36: 3654-3669.
}
\author{
James F. Troendle
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/ptc.Rd
|
no_license
|
cran/tccox
|
R
| false | false | 2,352 |
rd
|
\name{ptc}
\alias{ptc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Prepares and fits a PTC Cox model
}
\description{
Calls treatinit() to prepare dataset
Calls addtc() to create TC intervals
Calls ptcfitter() to fit PTC model
}
\usage{
ptc(dataset, ncov, cov_names, maxfollow, nmaxint, interval_width, min_future_events)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
data.frame organized as expected by tc()
}
\item{ncov}{
number of baseline covariates (including treatment) to be included in model
}
\item{cov_names}{
vector of baseline covariate names (including treatment)
}
\item{maxfollow}{
maximum followup for any subject in dataset
}
\item{nmaxint}{
maximum number of TC intervals allowed
}
\item{interval_width}{
width of the TC intervals
}
\item{min_future_events}{
minimum number of events expected of future starters(stoppers) of treatment for determining upper bound on starting(stopping) TC intervals
}
}
\value{
\item{fit_ptc }{fit of PTC model}
\item{nstartint }{number of TC starting intervals}
\item{startint }{vector containing the TC starting interval endpoints}
\item{nstopint }{number of TC stopping intervals}
\item{stopint }{vector containing the TC stopping interval endpoints}
\item{cov_names1 }{vector containing the covariate names of the model}
\item{nperson }{number of subjects in dataset}
\item{numevents }{number of events in dataset}
\item{medianfollowup }{median followup for subjects in dataset}
}
\references{
Troendle, JF, Leifer, E, Zhang Z, Yang, S, and Tewes H (2017) How to Control for Unmeasured Confounding in an Observational Time-To-Event Study With Exposure Incidence Information: the Treatment Choice Cox Model. Statistics in Medicine 36: 3654-3669.
}
\author{
James F. Troendle
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#inputs and output have suffix .t1
tstat <- function(xbar1, xbar2, s, n)
{
(xbar1 - xbar2) / sqrt(2*s^2/n)
}
tstat_ui <- function(){
fluidPage(
fluidRow(wellPanel(
h2(strong('The two-sample t test statistic')),
p('Investigating how it depends on the summary statistics
changed with the sliders in the left panel.')
)),
fluidRow(
column(3, wellPanel(
h3(strong('Summary statistics')),
p('For simplicity, we will assume the standard deviations
and sample sizes of both groups are equal, i.e.',
HTML('s<sub>1</sub> = s<sub>2</sub> and
n<sub>1</sub> = n<sub>2</sub>.')),
sliderInput('xbar1.t1', label=HTML('Sample 1 mean (x̄<sub>1</sub>)'), min=0,
max=10, value=5, step=.1),
sliderInput('xbar2.t1', label=HTML('Sample 2 mean (x̄<sub>2</sub>)'), min=0,
max=10, value=3, step=.1),
hr(),
sliderInput(inputId='s.t1', label=
HTML('Sample std devs (s<sub>1</sub> = s<sub>2</sub>)'),
min=1, max=10, value=3, step=.1),
hr(),
sliderInput(inputId='n.t1', label=
HTML('Sample sizes (n<sub>1</sub> = n<sub>2</sub>)'),
min=5, max=200, value=30, step=5)
)),
column(4, wellPanel(
h3(strong('Test statistic')),
plotOutput(outputId='tplot.t1', height="140px"),
hr(),
h3(strong('Test statistic formula')),
p(style="text-align:center", tags$img(width='150px', src='t_formula.PNG'))
)),
column(4, wellPanel(
h3(strong('Analysis')),
h4(strong('Two parts of formula:')),
tags$ul(
tags$li(strong('Numerator:'), 'difference in means'),
verbatimTextOutput('diff.t1'),
tags$ul(
tags$li('Which mean is greater determines sign of the test statistic'),
tags$li('Distance between means determines size of test statistic')
),
tags$li(strong('Denominator:'), 'standard error'),
verbatimTextOutput('se.t1'),
tags$ul(
tags$li('Inverse relationship with test statistic'),
tags$li('Larger standard deviations -> larger standard error'),
tags$li('Larger sample sizes -> smaller standard error')
)
)
)
))
)
}
tstat_serv <- function(input, output){
output$tplot.t1 <- renderPlot({
par(mar=c(2.5, .5, 2, .5), cex.main=2, cex.lab=1.7, cex.axis=1.5)
t.obs <- tstat(input$xbar1.t1, input$xbar2.t1, input$s.t1, input$n.t1)
plot(x=NULL, y=NULL, xlim=c(-10, 10), ylim=c(-1, 1), yaxt='n', xlab='',
main=substitute('t = '*t.obs, list(t.obs=round(t.obs, 2))))
abline(v=seq(-10, 10, by=1), col=grey(.8))
abline(v=0)
col <- ifelse(t.obs > 0, 4, 2)
if(t.obs!=0) arrows(0, 0, t.obs, 0, length=.1, lwd=3, col=col)
if(t.obs==0) points(0, 0 , pch=16)
if(t.obs < -11) text(-11, .5, 'Off-screen', pos=4)
if(t.obs > 11) text(11, .5, 'Off-screen', pos=2)
})
output$diff.t1 <- renderText(input$xbar1.t1 - input$xbar2.t1)
output$se.t1 <- renderText(
round(sqrt(2*input$s.t1^2/input$n.t1), 3)
)
}
|
/introstats/t_dist/two_sample_apps/tstat_functions.R
|
no_license
|
dtkaplan/adriand_shiny
|
R
| false | false | 3,187 |
r
|
#inputs and output have suffix .t1
tstat <- function(xbar1, xbar2, s, n)
{
(xbar1 - xbar2) / sqrt(2*s^2/n)
}
tstat_ui <- function(){
fluidPage(
fluidRow(wellPanel(
h2(strong('The two-sample t test statistic')),
p('Investigating how it depends on the summary statistics
changed with the sliders in the left panel.')
)),
fluidRow(
column(3, wellPanel(
h3(strong('Summary statistics')),
p('For simplicity, we will assume the standard deviations
and sample sizes of both groups are equal, i.e.',
HTML('s<sub>1</sub> = s<sub>2</sub> and
n<sub>1</sub> = n<sub>2</sub>.')),
sliderInput('xbar1.t1', label=HTML('Sample 1 mean (x̄<sub>1</sub>)'), min=0,
max=10, value=5, step=.1),
sliderInput('xbar2.t1', label=HTML('Sample 2 mean (x̄<sub>2</sub>)'), min=0,
max=10, value=3, step=.1),
hr(),
sliderInput(inputId='s.t1', label=
HTML('Sample std devs (s<sub>1</sub> = s<sub>2</sub>)'),
min=1, max=10, value=3, step=.1),
hr(),
sliderInput(inputId='n.t1', label=
HTML('Sample sizes (n<sub>1</sub> = n<sub>2</sub>)'),
min=5, max=200, value=30, step=5)
)),
column(4, wellPanel(
h3(strong('Test statistic')),
plotOutput(outputId='tplot.t1', height="140px"),
hr(),
h3(strong('Test statistic formula')),
p(style="text-align:center", tags$img(width='150px', src='t_formula.PNG'))
)),
column(4, wellPanel(
h3(strong('Analysis')),
h4(strong('Two parts of formula:')),
tags$ul(
tags$li(strong('Numerator:'), 'difference in means'),
verbatimTextOutput('diff.t1'),
tags$ul(
tags$li('Which mean is greater determines sign of the test statistic'),
tags$li('Distance between means determines size of test statistic')
),
tags$li(strong('Denominator:'), 'standard error'),
verbatimTextOutput('se.t1'),
tags$ul(
tags$li('Inverse relationship with test statistic'),
tags$li('Larger standard deviations -> larger standard error'),
tags$li('Larger sample sizes -> smaller standard error')
)
)
)
))
)
}
tstat_serv <- function(input, output){
output$tplot.t1 <- renderPlot({
par(mar=c(2.5, .5, 2, .5), cex.main=2, cex.lab=1.7, cex.axis=1.5)
t.obs <- tstat(input$xbar1.t1, input$xbar2.t1, input$s.t1, input$n.t1)
plot(x=NULL, y=NULL, xlim=c(-10, 10), ylim=c(-1, 1), yaxt='n', xlab='',
main=substitute('t = '*t.obs, list(t.obs=round(t.obs, 2))))
abline(v=seq(-10, 10, by=1), col=grey(.8))
abline(v=0)
col <- ifelse(t.obs > 0, 4, 2)
if(t.obs!=0) arrows(0, 0, t.obs, 0, length=.1, lwd=3, col=col)
if(t.obs==0) points(0, 0 , pch=16)
if(t.obs < -11) text(-11, .5, 'Off-screen', pos=4)
if(t.obs > 11) text(11, .5, 'Off-screen', pos=2)
})
output$diff.t1 <- renderText(input$xbar1.t1 - input$xbar2.t1)
output$se.t1 <- renderText(
round(sqrt(2*input$s.t1^2/input$n.t1), 3)
)
}
|
# check + load for package availablity in R
packages <- c("shiny", "shinythemes", "DT", "data.table")
pkgs_check <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
pkgs_check(packages)
# increase file upload size for shiny::fileInput()
options(shiny.maxRequestSize=30*1024^2)
|
/Cool_shiny_practices/point1.R
|
no_license
|
chaudharyparth/R
|
R
| false | false | 450 |
r
|
# check + load for package availablity in R
packages <- c("shiny", "shinythemes", "DT", "data.table")
pkgs_check <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
pkgs_check(packages)
# increase file upload size for shiny::fileInput()
options(shiny.maxRequestSize=30*1024^2)
|
if (!file.exists("household_power_consumption.txt")){
message("No data in the working directory")
} else {
classes <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = classes, nrows = 2075259, comment.char = "")
data <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
#Sys.setlocale("LC_TIME", "C")
datetime <- strptime(with(data, paste(Date, Time)), format = "%d/%m/%Y %H:%M:%S")
png("plot3.png", bg = "transparent")
plot(datetime, data$Sub_metering_1, type = "n", xlab = "", ylab = "")
points(datetime, data$Sub_metering_1, type = "l", col = "black")
points(datetime, data$Sub_metering_2, type = "l", col = "red")
points(datetime, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1, 1, 1), col = c("black", "red", "blue"))
title(ylab = "Energy sub metering")
dev.off()
}
|
/plot3.R
|
no_license
|
Nucleusis/ExData_Plotting1
|
R
| false | false | 1,103 |
r
|
if (!file.exists("household_power_consumption.txt")){
message("No data in the working directory")
} else {
classes <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", colClasses = classes, nrows = 2075259, comment.char = "")
data <- subset(data, Date == "1/2/2007" | Date == "2/2/2007")
#Sys.setlocale("LC_TIME", "C")
datetime <- strptime(with(data, paste(Date, Time)), format = "%d/%m/%Y %H:%M:%S")
png("plot3.png", bg = "transparent")
plot(datetime, data$Sub_metering_1, type = "n", xlab = "", ylab = "")
points(datetime, data$Sub_metering_1, type = "l", col = "black")
points(datetime, data$Sub_metering_2, type = "l", col = "red")
points(datetime, data$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1, 1, 1), col = c("black", "red", "blue"))
title(ylab = "Energy sub metering")
dev.off()
}
|
# 2022 Wyoming House Republican Primary results by county
# Comparing counties
library(tidyverse)
library(tidycensus)
#vars <- load_variables(2020, "acs5/subject", cache = TRUE)
View(vars)
#----- Census educational attainment data ---
edu <- get_acs(geography = "county",
state = "WY",
variables = c(bachelor_up_25_up = "S1501_C01_015", # bachelor or higher age 25 or higher
hs_up_25_up = "S1501_C01_014",
pop_25_up = "S1501_C01_006", # population age 25 or higher
),
survey = "acs5",
year = 2020,
output = "wide",
cache_table = TRUE)
edu <- edu %>%
mutate(pct_bach_up = bachelor_up_25_upE * 100 / pop_25_upE,
county = str_remove(NAME, " County, Wyoming"))
edu
#--- WY 2022 Republican House primary election results by county
# https://sos.wyo.gov/Elections/Docs/2022/Results/Primary/2022_Statewide_Candidates_Summary.pdf
wy <- tibble::tribble(
~county, ~Robyn.M..Belinskey, ~Anthony.Bouchard, ~Liz.Cheney, ~Harriet.Hageman, ~Denton.Knapp, ~`Write-Ins`, ~Overvotes, ~Undervotes,
"Albany", 40L, 109L, 4218L, 3967L, 65L, 9L, 13L, 32L,
"Big Horn", 78L, 123L, 732L, 3123L, 48L, 5L, 13L, 28L,
"Campbell", 71L, 346L, 1633L, 9164L, 789L, 5L, 65L, 76L,
"Carbon", 38L, 173L, 819L, 2852L, 49L, 6L, 23L, 33L,
"Converse", 45L, 164L, 822L, 3829L, 45L, 1L, 23L, 23L,
"Crook", 31L, 98L, 403L, 2370L, 55L, 2L, 20L, 22L,
"Fremont", 46L, 260L, 3458L, 7380L, 77L, 9L, 23L, 41L,
"Goshen", 17L, 96L, 958L, 3356L, 29L, 1L, 3L, 12L,
"Hot Springs", 10L, 58L, 383L, 1440L, 15L, 1L, 7L, 9L,
"Johnson", 45L, 123L, 890L, 2738L, 38L, 3L, 16L, 28L,
"Laramie", 179L, 772L, 9757L, 14424L, 208L, 44L, 22L, 99L,
"Lincoln", 75L, 212L, 1221L, 4886L, 62L, 5L, 27L, 32L,
"Natrona", 113L, 393L, 6511L, 14200L, 157L, 13L, 46L, 56L,
"Niobrara", 9L, 25L, 113L, 908L, 10L, 0L, 4L, 6L,
"Park", 71L, 197L, 2821L, 8672L, 101L, 16L, 34L, 39L,
"Platte", 19L, 104L, 535L, 2777L, 25L, 3L, 9L, 14L,
"Sheridan", 118L, 228L, 3057L, 7366L, 113L, 13L, 42L, 59L,
"Sublette", 18L, 95L, 849L, 2538L, 32L, 8L, 15L, 28L,
"Sweetwater", 132L, 449L, 2162L, 6722L, 176L, 12L, 51L, 103L,
"Teton", 15L, 34L, 5955L, 1928L, 13L, 4L, 9L, 20L,
"Uinta", 72L, 215L, 1156L, 4030L, 84L, 7L, 36L, 55L,
"Washakie", 30L, 114L, 531L, 2213L, 31L, 8L, 18L, 15L,
"Weston", 33L, 117L, 332L, 2142L, 36L, 0L, 16L, 15L
)
wy <- janitor::clean_names(wy)
wy <- wy %>%
mutate(tot_votes = rowSums(select(., -1)), # sum of row values
pct_cheney = liz_cheney * 100 / tot_votes,
pct_hageman = harriet_hageman * 100 / tot_votes,
winner = case_when (pct_cheney > pct_hageman ~ "Cheney",
pct_cheney < pct_hageman ~ "Hageman"))
joined <- wy %>%
inner_join(edu) %>%
select(county, tot_votes, pct_cheney, pct_hageman, winner, pct_bach_up, pop_25_up = pop_25_upE)
joined %>%
ggplot(aes(x = pct_bach_up, y = pct_cheney, label = county)) +
geom_point(aes(size = tot_votes), alpha = 0.5) +
geom_smooth(method = "lm") +
geom_text(check_overlap = TRUE, nudge_x = .8, nudge_y = -1.1, size = 3) +
labs(title = "2022 WY Republican Primary",
subtitle = "Cheney won Teton and Albany counties",
x = "% of county (age 25 or up) with a bachelor's degree or higher",
y = "% of county that voted for Liz Cheney",
size = "Total Votes",
caption = "Chart: @bdill\nEducation Data: US Census Burear ACS 5 year\nElection Data: sos.wyo.gov")
write_csv(joined, "C:/users/bdill/Desktop/2022_WY_Republican_primary_data.csv")
joined %>% arrange(desc(tot_votes))
#---
ggplot(joined, aes(x = pop_25_up, y = pct_bach_up)) +
geom_point()
#--- linear model
mod <- lm(pct_cheney ~ pct_bach_up, data = joined)
summary(mod)
#-----
wy_long <- pivot_longer(wy,cols = 2:9, names_to = "candidate", values_to = "votes")
# "liz_cheney", "harriet_hageman"
wy_long %>%
filter(candidate %in% c("liz_cheney", "harriet_hageman")) %>%
ggplot(aes(x = county, y = votes, group = candidate, color = candidate)) +
geom_bar(aes(fill = candidate),position = 'dodge',stat='identity') +
theme(axis.text.x = element_text(angle = 90, vjust = .2)) +
labs(title = "2022 WY Republican Primary",
subtitle = "Cheney won Teton and Albany counties",
caption = "Chart: @bdill\nElection Data: sos.wyo.gov")
|
/2022_WY_Republican_House_primary.R
|
no_license
|
wbdill/r-sandbox01
|
R
| false | false | 6,544 |
r
|
# 2022 Wyoming House Republican Primary results by county
# Comparing counties
library(tidyverse)
library(tidycensus)
#vars <- load_variables(2020, "acs5/subject", cache = TRUE)
View(vars)
#----- Census educational attainment data ---
edu <- get_acs(geography = "county",
state = "WY",
variables = c(bachelor_up_25_up = "S1501_C01_015", # bachelor or higher age 25 or higher
hs_up_25_up = "S1501_C01_014",
pop_25_up = "S1501_C01_006", # population age 25 or higher
),
survey = "acs5",
year = 2020,
output = "wide",
cache_table = TRUE)
edu <- edu %>%
mutate(pct_bach_up = bachelor_up_25_upE * 100 / pop_25_upE,
county = str_remove(NAME, " County, Wyoming"))
edu
#--- WY 2022 Republican House primary election results by county
# https://sos.wyo.gov/Elections/Docs/2022/Results/Primary/2022_Statewide_Candidates_Summary.pdf
wy <- tibble::tribble(
~county, ~Robyn.M..Belinskey, ~Anthony.Bouchard, ~Liz.Cheney, ~Harriet.Hageman, ~Denton.Knapp, ~`Write-Ins`, ~Overvotes, ~Undervotes,
"Albany", 40L, 109L, 4218L, 3967L, 65L, 9L, 13L, 32L,
"Big Horn", 78L, 123L, 732L, 3123L, 48L, 5L, 13L, 28L,
"Campbell", 71L, 346L, 1633L, 9164L, 789L, 5L, 65L, 76L,
"Carbon", 38L, 173L, 819L, 2852L, 49L, 6L, 23L, 33L,
"Converse", 45L, 164L, 822L, 3829L, 45L, 1L, 23L, 23L,
"Crook", 31L, 98L, 403L, 2370L, 55L, 2L, 20L, 22L,
"Fremont", 46L, 260L, 3458L, 7380L, 77L, 9L, 23L, 41L,
"Goshen", 17L, 96L, 958L, 3356L, 29L, 1L, 3L, 12L,
"Hot Springs", 10L, 58L, 383L, 1440L, 15L, 1L, 7L, 9L,
"Johnson", 45L, 123L, 890L, 2738L, 38L, 3L, 16L, 28L,
"Laramie", 179L, 772L, 9757L, 14424L, 208L, 44L, 22L, 99L,
"Lincoln", 75L, 212L, 1221L, 4886L, 62L, 5L, 27L, 32L,
"Natrona", 113L, 393L, 6511L, 14200L, 157L, 13L, 46L, 56L,
"Niobrara", 9L, 25L, 113L, 908L, 10L, 0L, 4L, 6L,
"Park", 71L, 197L, 2821L, 8672L, 101L, 16L, 34L, 39L,
"Platte", 19L, 104L, 535L, 2777L, 25L, 3L, 9L, 14L,
"Sheridan", 118L, 228L, 3057L, 7366L, 113L, 13L, 42L, 59L,
"Sublette", 18L, 95L, 849L, 2538L, 32L, 8L, 15L, 28L,
"Sweetwater", 132L, 449L, 2162L, 6722L, 176L, 12L, 51L, 103L,
"Teton", 15L, 34L, 5955L, 1928L, 13L, 4L, 9L, 20L,
"Uinta", 72L, 215L, 1156L, 4030L, 84L, 7L, 36L, 55L,
"Washakie", 30L, 114L, 531L, 2213L, 31L, 8L, 18L, 15L,
"Weston", 33L, 117L, 332L, 2142L, 36L, 0L, 16L, 15L
)
wy <- janitor::clean_names(wy)
wy <- wy %>%
mutate(tot_votes = rowSums(select(., -1)), # sum of row values
pct_cheney = liz_cheney * 100 / tot_votes,
pct_hageman = harriet_hageman * 100 / tot_votes,
winner = case_when (pct_cheney > pct_hageman ~ "Cheney",
pct_cheney < pct_hageman ~ "Hageman"))
joined <- wy %>%
inner_join(edu) %>%
select(county, tot_votes, pct_cheney, pct_hageman, winner, pct_bach_up, pop_25_up = pop_25_upE)
joined %>%
ggplot(aes(x = pct_bach_up, y = pct_cheney, label = county)) +
geom_point(aes(size = tot_votes), alpha = 0.5) +
geom_smooth(method = "lm") +
geom_text(check_overlap = TRUE, nudge_x = .8, nudge_y = -1.1, size = 3) +
labs(title = "2022 WY Republican Primary",
subtitle = "Cheney won Teton and Albany counties",
x = "% of county (age 25 or up) with a bachelor's degree or higher",
y = "% of county that voted for Liz Cheney",
size = "Total Votes",
caption = "Chart: @bdill\nEducation Data: US Census Burear ACS 5 year\nElection Data: sos.wyo.gov")
write_csv(joined, "C:/users/bdill/Desktop/2022_WY_Republican_primary_data.csv")
joined %>% arrange(desc(tot_votes))
#---
ggplot(joined, aes(x = pop_25_up, y = pct_bach_up)) +
geom_point()
#--- linear model
mod <- lm(pct_cheney ~ pct_bach_up, data = joined)
summary(mod)
#-----
wy_long <- pivot_longer(wy,cols = 2:9, names_to = "candidate", values_to = "votes")
# "liz_cheney", "harriet_hageman"
wy_long %>%
filter(candidate %in% c("liz_cheney", "harriet_hageman")) %>%
ggplot(aes(x = county, y = votes, group = candidate, color = candidate)) +
geom_bar(aes(fill = candidate),position = 'dodge',stat='identity') +
theme(axis.text.x = element_text(angle = 90, vjust = .2)) +
labs(title = "2022 WY Republican Primary",
subtitle = "Cheney won Teton and Albany counties",
caption = "Chart: @bdill\nElection Data: sos.wyo.gov")
|
library(readxl)
library(stringr)
library(tidyverse)
#function 1
getMultipleReactionFormula <- function(description, reaction_ko, ko) {###description can be any charater of metabolite
index <- vector()
result <- vector()
tt <- vector()
for (i in 1:length(ko)){
if(length( which (reaction_ko %in% ko[i]))){
index <- which (reaction_ko %in% ko[i])
tt <- description[index]
result[i] <- paste0(tt, collapse = ";")
} else{
result[i] <- NA
}
}
return(result)
}
#function 2 estimate whether the gene exist in presnet model
geneExist <- function (original, newgenelist){ ##input: original is from yeast model v7.7;
index <- vector()
for (i in 1: length(newgenelist)){
if (length(which(newgenelist[i] %in% original))){
index[i] <- "YES"
} else {
index[i] <- "NO"
}
}
return(index)
}
#function 3 establish mapping betweent single gene and single rxn
splitAndCombine <- function(gene, rxn) { ##one rxn has several genes, this function was used to splite the genes
gene <- str_split(gene,";")
tt<- length(gene)
gene0 <- list()
for (i in 1:tt){
gene0[[i]] <- paste(rxn[i], gene[[i]], sep = "@@@")
}
gene1 <- unique(unlist(gene0))
gene2 <- str_split(gene1, "@@@" )
rxnGene <- data.frame(v1=character(length(gene2)),stringsAsFactors = FALSE)
tt1 <- length(gene2)
for (j in 1:tt1){
rxnGene$v1[j] <- gene2[[j]][2]
rxnGene$v2[j] <- gene2[[j]][1]
}
return(rxnGene)
}
#data input
#Obtain the sytematic name of gene related with the reactions
gene_standard_name <- read_excel("input/gene_name.xlsx")
gene_standard_name <- select(gene_standard_name, `Accession-1`, `Common-Name`)
colnames(gene_standard_name)<- c("systematic_name","comman_name")
#input the genelist in present model
genelist_v7_7 <- read.csv("input/genelist in v7.7.csv", sep = ";", stringsAsFactors = FALSE)
#input the yeast metabolic model downloaded from biocyc
reaction <- read_excel("input/reaction.xlsx")
reaction$X__1 <- str_replace_all(reaction$X__1, "↔", "<=>")
reaction$X__1 <- str_replace_all(reaction$X__1, " → ", " => ") ## some metabolites have "→"
reaction$X__1 <- str_replace_all(reaction$X__1, "→", "->") ## some metabolites have "→"
reaction$X__1 <- str_replace_all(reaction$X__1, "α", "alpha")
reaction$X__1 <- str_replace_all(reaction$X__1, "β", "beta")
reaction$X__1 <- str_replace_all(reaction$X__1, "ω", "omega")
## Establish relations between genes and reactions one by one
index_r <- which(!is.na(reaction$X__1))
reaction1 <- reaction
for (i in 2:3036) {
if(is.na(reaction1$X__1[i])){
reaction1$X__1[i] <- reaction1$X__1[i-1]
} else{
reaction1$X__1[i] <- reaction1$X__1[i]
}
} ## in reaction1, for each gene, a reaction can be found
rxn_withGENE <- select(reaction1, X__1, gene)
rxn_withGENE <- filter(reaction1, !is.na(gene))
GR <- select(rxn_withGENE, gene, X__1)
GR$sytematic_name <- getMultipleReactionFormula(gene_standard_name$systematic_name,gene_standard_name$comman_name,GR$gene)
GR0 <- select(GR, X__1, sytematic_name)
GR_biocyc <- splitAndCombine(GR0$sytematic_name, GR0$X__1)
colnames(GR_biocyc) <- c("gene","reaction_biocyc")
GR_biocyc$sign <- geneExist(genelist_v7_7$geneNames, GR_biocyc$gene)
newGR_biocyc <- filter(GR_biocyc,sign =="NO")
write.table(newGR_biocyc, "output/newGR_biocyc.txt", row.names = FALSE, sep = "\t")
## gene annotation from biocyc
gene_ec_biocyc <- read.table("input/gene_ec_biocyc.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE) %>%
select(., Gene.Name, Reactions.of.gene) %>%
filter(., str_detect(Reactions.of.gene, "\\."))
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"-RXN","")
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"RXN0-","")
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"RXN-","")
gene_ec_biocyc0 <- splitAndCombine(gene_ec_biocyc$Reactions.of.gene, gene_ec_biocyc$Gene.Name) %>%
filter(., str_detect(v1,"\\."))
## get annotation and subsystem for each gene
gene_pathway_biocyc <- read.table("input/All_genes_pathway_biocyc.txt", header = TRUE, sep="\t", stringsAsFactors = FALSE)
gene_pathway_biocyc$gene_standard_name <- getMultipleReactionFormula(gene_standard_name$systematic_name,gene_standard_name$comman_name,gene_pathway_biocyc$Gene.Name)
gene_pathway_biocyc$check <- gene_pathway_biocyc$gene_standard_name == gene_pathway_biocyc$Accession.1
write.table(gene_pathway_biocyc, "output/gene_pathway and annotation in biocyc.txt", row.names = FALSE, sep="\t") ## >>> for manual check
##>>>>>after manual check
gene_pathway_biocyc0 <- read_excel("input/gene_pathway and annotation in biocyc after manual check.xlsx",
sheet = "Sheet1")
gene_pathway_biocyc0$ec <- getMultipleReactionFormula(gene_ec_biocyc0$v1,gene_ec_biocyc0$v2,gene_pathway_biocyc0$Gene.Name)
gene_pathway_biocyc0$reaction_biocyc <- getMultipleReactionFormula(GR_biocyc$reaction_biocyc,GR_biocyc$gene,gene_pathway_biocyc0$gene_standard_name)
write.table(gene_pathway_biocyc0, "output/gene_pathway and annotation in biocyc0.txt", row.names = FALSE, sep="\t")
##get reactions according to EC
## get reactions from Rhea
#reaction_rhea<- read.csv("rhea reaction summary.csv", sep = ",", stringsAsFactors = FALSE)
#newGP$reaction_R <- getMultipleReactionFormula(reaction_rhea$formula,reaction_rhea$EC,newGP$ec)
#newGP$keggID_R <- getMultipleReactionFormula(reaction_rhea$keggID,reaction_rhea$EC,newGP$ec)
#newGP$masterID_R <- getMultipleReactionFormula(reaction_rhea$masterID,reaction_rhea$EC,newGP$ec)
##get reaction from Brenda
#reaction_brenda <- read.csv("Reactions_BKMS.csv", sep = ";", stringsAsFactors = FALSE)
#newGP$reaction_B <- getMultipleReactionFormula(reaction_brenda$Reaction,reaction_brenda$EC.Number,newGP$ec)
#newGP$keggID_B <- getMultipleReactionFormula(reaction_brenda$Reaction.ID.KEGG,reaction_brenda$EC.Number,newGP$ec)
#newGP$keggPathwayName_B <- getMultipleReactionFormula(reaction_brenda$KEGG.Pathway.Name,reaction_brenda$EC.Number,newGP$ec)
#newGP$brendaID_B <- getMultipleReactionFormula(reaction_brenda$Reaction.ID.BRENDA,reaction_brenda$EC.Number,newGP$ec)
#newGP0 <- filter(newGP, reaction_R !="NA" | reaction_B != "NA")
#write.table(newGP0, "newGP_biocyc.txt", row.names = FALSE, sep = "\t")
|
/Biocyc GPRs summary/newGPR based on biocyc.R
|
permissive
|
hongzhonglu/yeast-model-update
|
R
| false | false | 6,386 |
r
|
library(readxl)
library(stringr)
library(tidyverse)
#function 1
getMultipleReactionFormula <- function(description, reaction_ko, ko) {###description can be any charater of metabolite
index <- vector()
result <- vector()
tt <- vector()
for (i in 1:length(ko)){
if(length( which (reaction_ko %in% ko[i]))){
index <- which (reaction_ko %in% ko[i])
tt <- description[index]
result[i] <- paste0(tt, collapse = ";")
} else{
result[i] <- NA
}
}
return(result)
}
#function 2 estimate whether the gene exist in presnet model
geneExist <- function (original, newgenelist){ ##input: original is from yeast model v7.7;
index <- vector()
for (i in 1: length(newgenelist)){
if (length(which(newgenelist[i] %in% original))){
index[i] <- "YES"
} else {
index[i] <- "NO"
}
}
return(index)
}
#function 3 establish mapping betweent single gene and single rxn
splitAndCombine <- function(gene, rxn) { ##one rxn has several genes, this function was used to splite the genes
gene <- str_split(gene,";")
tt<- length(gene)
gene0 <- list()
for (i in 1:tt){
gene0[[i]] <- paste(rxn[i], gene[[i]], sep = "@@@")
}
gene1 <- unique(unlist(gene0))
gene2 <- str_split(gene1, "@@@" )
rxnGene <- data.frame(v1=character(length(gene2)),stringsAsFactors = FALSE)
tt1 <- length(gene2)
for (j in 1:tt1){
rxnGene$v1[j] <- gene2[[j]][2]
rxnGene$v2[j] <- gene2[[j]][1]
}
return(rxnGene)
}
#data input
#Obtain the sytematic name of gene related with the reactions
gene_standard_name <- read_excel("input/gene_name.xlsx")
gene_standard_name <- select(gene_standard_name, `Accession-1`, `Common-Name`)
colnames(gene_standard_name)<- c("systematic_name","comman_name")
#input the genelist in present model
genelist_v7_7 <- read.csv("input/genelist in v7.7.csv", sep = ";", stringsAsFactors = FALSE)
#input the yeast metabolic model downloaded from biocyc
reaction <- read_excel("input/reaction.xlsx")
reaction$X__1 <- str_replace_all(reaction$X__1, "↔", "<=>")
reaction$X__1 <- str_replace_all(reaction$X__1, " → ", " => ") ## some metabolites have "→"
reaction$X__1 <- str_replace_all(reaction$X__1, "→", "->") ## some metabolites have "→"
reaction$X__1 <- str_replace_all(reaction$X__1, "α", "alpha")
reaction$X__1 <- str_replace_all(reaction$X__1, "β", "beta")
reaction$X__1 <- str_replace_all(reaction$X__1, "ω", "omega")
## Establish relations between genes and reactions one by one
index_r <- which(!is.na(reaction$X__1))
reaction1 <- reaction
for (i in 2:3036) {
if(is.na(reaction1$X__1[i])){
reaction1$X__1[i] <- reaction1$X__1[i-1]
} else{
reaction1$X__1[i] <- reaction1$X__1[i]
}
} ## in reaction1, for each gene, a reaction can be found
rxn_withGENE <- select(reaction1, X__1, gene)
rxn_withGENE <- filter(reaction1, !is.na(gene))
GR <- select(rxn_withGENE, gene, X__1)
GR$sytematic_name <- getMultipleReactionFormula(gene_standard_name$systematic_name,gene_standard_name$comman_name,GR$gene)
GR0 <- select(GR, X__1, sytematic_name)
GR_biocyc <- splitAndCombine(GR0$sytematic_name, GR0$X__1)
colnames(GR_biocyc) <- c("gene","reaction_biocyc")
GR_biocyc$sign <- geneExist(genelist_v7_7$geneNames, GR_biocyc$gene)
newGR_biocyc <- filter(GR_biocyc,sign =="NO")
write.table(newGR_biocyc, "output/newGR_biocyc.txt", row.names = FALSE, sep = "\t")
## gene annotation from biocyc
gene_ec_biocyc <- read.table("input/gene_ec_biocyc.txt", header = TRUE, sep = "\t", stringsAsFactors = FALSE) %>%
select(., Gene.Name, Reactions.of.gene) %>%
filter(., str_detect(Reactions.of.gene, "\\."))
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"-RXN","")
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"RXN0-","")
gene_ec_biocyc$Reactions.of.gene <- str_replace_all(gene_ec_biocyc$Reactions.of.gene,"RXN-","")
gene_ec_biocyc0 <- splitAndCombine(gene_ec_biocyc$Reactions.of.gene, gene_ec_biocyc$Gene.Name) %>%
filter(., str_detect(v1,"\\."))
## get annotation and subsystem for each gene
gene_pathway_biocyc <- read.table("input/All_genes_pathway_biocyc.txt", header = TRUE, sep="\t", stringsAsFactors = FALSE)
gene_pathway_biocyc$gene_standard_name <- getMultipleReactionFormula(gene_standard_name$systematic_name,gene_standard_name$comman_name,gene_pathway_biocyc$Gene.Name)
gene_pathway_biocyc$check <- gene_pathway_biocyc$gene_standard_name == gene_pathway_biocyc$Accession.1
write.table(gene_pathway_biocyc, "output/gene_pathway and annotation in biocyc.txt", row.names = FALSE, sep="\t") ## >>> for manual check
##>>>>>after manual check
gene_pathway_biocyc0 <- read_excel("input/gene_pathway and annotation in biocyc after manual check.xlsx",
sheet = "Sheet1")
gene_pathway_biocyc0$ec <- getMultipleReactionFormula(gene_ec_biocyc0$v1,gene_ec_biocyc0$v2,gene_pathway_biocyc0$Gene.Name)
gene_pathway_biocyc0$reaction_biocyc <- getMultipleReactionFormula(GR_biocyc$reaction_biocyc,GR_biocyc$gene,gene_pathway_biocyc0$gene_standard_name)
write.table(gene_pathway_biocyc0, "output/gene_pathway and annotation in biocyc0.txt", row.names = FALSE, sep="\t")
##get reactions according to EC
## get reactions from Rhea
#reaction_rhea<- read.csv("rhea reaction summary.csv", sep = ",", stringsAsFactors = FALSE)
#newGP$reaction_R <- getMultipleReactionFormula(reaction_rhea$formula,reaction_rhea$EC,newGP$ec)
#newGP$keggID_R <- getMultipleReactionFormula(reaction_rhea$keggID,reaction_rhea$EC,newGP$ec)
#newGP$masterID_R <- getMultipleReactionFormula(reaction_rhea$masterID,reaction_rhea$EC,newGP$ec)
##get reaction from Brenda
#reaction_brenda <- read.csv("Reactions_BKMS.csv", sep = ";", stringsAsFactors = FALSE)
#newGP$reaction_B <- getMultipleReactionFormula(reaction_brenda$Reaction,reaction_brenda$EC.Number,newGP$ec)
#newGP$keggID_B <- getMultipleReactionFormula(reaction_brenda$Reaction.ID.KEGG,reaction_brenda$EC.Number,newGP$ec)
#newGP$keggPathwayName_B <- getMultipleReactionFormula(reaction_brenda$KEGG.Pathway.Name,reaction_brenda$EC.Number,newGP$ec)
#newGP$brendaID_B <- getMultipleReactionFormula(reaction_brenda$Reaction.ID.BRENDA,reaction_brenda$EC.Number,newGP$ec)
#newGP0 <- filter(newGP, reaction_R !="NA" | reaction_B != "NA")
#write.table(newGP0, "newGP_biocyc.txt", row.names = FALSE, sep = "\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searcher_efficiency_functions.R
\name{pkmFail}
\alias{pkmFail}
\title{Check if a pk model is well-fit}
\usage{
pkmFail(pkmod)
}
\arguments{
\item{pkmod}{A \code{\link{pkm}} object to test}
}
\value{
logical value indicating a failed fit (TRUE) or successful (FALSE)
}
\description{
Run a check the arg is a well-fit pkm object
}
|
/man/pkmFail.Rd
|
permissive
|
ddalthorp/GenEst
|
R
| false | true | 424 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/searcher_efficiency_functions.R
\name{pkmFail}
\alias{pkmFail}
\title{Check if a pk model is well-fit}
\usage{
pkmFail(pkmod)
}
\arguments{
\item{pkmod}{A \code{\link{pkm}} object to test}
}
\value{
logical value indicating a failed fit (TRUE) or successful (FALSE)
}
\description{
Run a check the arg is a well-fit pkm object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MariaDBResult.R
\docType{class}
\name{MariaDBResult-class}
\alias{MariaDBResult-class}
\alias{dbIsValid,MariaDBResult-method}
\title{Class MariaDBResult}
\usage{
\S4method{dbIsValid}{MariaDBResult}(dbObj)
}
\description{
MariaDB's query results class. This classes encapsulates the result of an SQL
statement (either \code{select} or not).
}
\keyword{internal}
|
/man/MariaDBResult-class.Rd
|
no_license
|
noahwilliamsson/RMariaDB
|
R
| false | true | 440 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MariaDBResult.R
\docType{class}
\name{MariaDBResult-class}
\alias{MariaDBResult-class}
\alias{dbIsValid,MariaDBResult-method}
\title{Class MariaDBResult}
\usage{
\S4method{dbIsValid}{MariaDBResult}(dbObj)
}
\description{
MariaDB's query results class. This classes encapsulates the result of an SQL
statement (either \code{select} or not).
}
\keyword{internal}
|
#' Extract multiple imputed datasets from an object of class testpack
#'
#' This function returns a dataset containing multiple imputed datasets stacked
#' onto each other (i.e., long format; optionally including the original,
#' incomplete data).\cr
#' These data can be automatically exported to SPSS (as a .txt file containing
#' the data and a .sps file containing syntax to generate a .sav file).
#' For the export function the
#' \href{https://CRAN.R-project.org/package=foreign}{\strong{foreign}} package
#' needs to be installed.
#'
#' @inheritParams sharedParams
#' @param m number of imputed datasets
#' @param include should the original, incomplete data be included? Default is
#' \code{TRUE}.
#' @param minspace minimum number of iterations between iterations to be chosen
#' as imputed values (to prevent strong correlation between
#' imputed datasets in the case of high autocorrelation of the
#' MCMC chains).
#' @param seed optional seed value
#' @param export_to_SPSS logical; should the completed data be exported to SPSS?
#' @param resdir optional; directory for results. If unspecified and
#' \code{export_to_SPSS = TRUE} the current working directory is
#' used.
#' @param filename optional; file name (without ending). If unspecified and
#' \code{export_to_SPSS = TRUE} a name is generated
#' automatically.
#'
#' @return A \code{data.frame} in which the original data (if
#' \code{include = TRUE}) and the imputed datasets are stacked onto
#' each other.\cr
#' The variable \code{Imputation_} indexes the imputation, while
#' \code{.rownr} links the rows to the rows of the original data.
#' In cross-sectional datasets the
#' variable \code{.id} is added as subject identifier.
#'
#' @section Note:
#' In order to be able to extract (multiple) imputed datasets the imputed values
#' must have been monitored, i.e., \code{imps = TRUE} had to be specified in the
#' argument \code{monitor_params} in \code{\link[testpack:model_imp]{*_imp}}.
#'
#' @seealso \code{\link{plot_imp_distr}}
#'
#' @examples
#' # fit a model and monitor the imputed values with
#' # monitor_params = c(imps = TRUE)
#'
#' mod <- lm_imp(y ~ C1 + C2 + M2, data = wideDF,
#' monitor_params = c(imps = TRUE), n.iter = 100)
#'
#' # Example 1: without export to SPSS
#' MIs <- get_MIdat(mod, m = 3, seed = 123)
#'
#'
#' \dontrun{
#' # Example 2: with export for SPSS
#' # (here: to the temporary directory "temp_dir")
#'
#' temp_dir <- tempdir()
#' MIs <- get_MIdat(mod, m = 3, seed = 123, resdir = temp_dir,
#' filename = "example_imputation",
#' export_to_SPSS = TRUE)
#'
#' }
#'
#' @export
#'
get_MIdat <- function(object, m = 10, include = TRUE,
start = NULL, minspace = 50, seed = NULL,
export_to_SPSS = FALSE,
resdir = NULL, filename = NULL) {
if (!"foreign" %in% rownames(installed.packages()))
errormsg("This function requires the 'foreign' package to be installed.")
if (is.null(object$MCMC))
errormsg("The object does not contain any MCMC samples.")
oldseed <- .Random.seed
on.exit({
.Random.seed <<- oldseed
})
# set seed value if provided
if (!is.null(seed)) {
set_seed(seed)
}
# extract original data and add
# - column with row numbers (needed for plot_imp_distr())
# - an id variable if there is none
DF <- object$data
DF$.rownr <- seq_len(nrow(DF))
if (length(object$Mlist$groups) < 2) DF$.id <- seq_len(nrow(DF))
# extract variable levels
Mlvls <- object$Mlist$Mlvls
# names of variables that were imputed
vars <- intersect(names(object$models), names(DF)[colSums(is.na(DF)) > 0])
# get a summary of the relevant characteristics of the imputed variables
varinfo <- lapply(object$info_list[vars], function(x) {
data.frame(varname = x$varname,
modeltype = x$modeltype,
family = ifelse(!is.null(x$family), x$family, NA),
stringsAsFactors = FALSE)
})
if (is.null(start)) {
start <- start(object$MCMC)
} else {
start <- max(start, start(object$MCMC))
}
MCMC <- do.call(rbind, window(object$MCMC, start = start))
# randomly draw which iterations should be used as imputation
if (nrow(MCMC) / minspace < m)
errormsg("The total number of iterations (%s) is too small to select %s
iterations with spacing of >= %s.", nrow(MCMC), m, minspace)
cand_iters <- seq(from = sample.int(minspace, size = 1), to = nrow(MCMC),
by = minspace)
imp_iters <- sort(sample(cand_iters, size = m))
# reduce MCMC to the relevant rows
MCMC <- MCMC[imp_iters, , drop = FALSE]
# prepare a list of copies of the original data
df_list <- list()
for (i in 1:(m + 1)) {
df_list[[i]] <- cbind("Imputation_" = i - 1, DF)
}
for (i in vars) {
impval <- NULL
# identify the names of the columns in MCMC corresponding to variable i
pat <- paste0(Mlvls[i], "\\[[[:digit:]]*,",
match(i, colnames(object$data_list[[Mlvls[i]]])),
"\\]")
if (!any(grepl(pat, colnames(MCMC))))
errormsg("I cannot find imputed values for %s. Did you monitor them?",
dQuote(i))
impval <- MCMC[, grep(pat, colnames(MCMC), value = TRUE), drop = FALSE]
if (length(impval) > 0) {
rownrs <- gsub(",[[:digit:]]*\\]", "",
gsub("^[[:print:]]*\\[", "", colnames(impval)))
for (j in (1:m) + 1) {
iv <- impval[j - 1, na.omit(match(
object$Mlist$groups[[gsub("M_", "", Mlvls[i])]],
as.numeric(rownrs)
))]
if (is.factor(df_list[[j]][, i])) {
df_list[[j]][is.na(df_list[[j]][, i]), i] <-
factor(iv, labels = levels(df_list[[j]][, i]),
levels = seq_along(levels(df_list[[j]][, i])) -
as.numeric(length(levels(df_list[[j]][, i])) == 2)
)
} else {
df_list[[j]][is.na(df_list[[j]][, i]), i] <- iv
}
}
}
}
if (!include)
df_list <- df_list[-1]
# build dataset --------------------------------------------------------------
imp_df <- do.call(rbind, df_list)
if (is.null(resdir))
resdir <- getwd()
if (is.null(filename))
filename <- paste0("testpack-imputation_", Sys.Date())
if (export_to_SPSS == TRUE) {
foreign::write.foreign(imp_df,
file.path(resdir, paste0(filename, ".txt")),
file.path(resdir, paste0(filename, ".sps")),
package = "SPSS"
)
}
return(imp_df)
}
|
/R/get_MIdat.R
|
permissive
|
NErler/testpack
|
R
| false | false | 6,786 |
r
|
#' Extract multiple imputed datasets from an object of class testpack
#'
#' This function returns a dataset containing multiple imputed datasets stacked
#' onto each other (i.e., long format; optionally including the original,
#' incomplete data).\cr
#' These data can be automatically exported to SPSS (as a .txt file containing
#' the data and a .sps file containing syntax to generate a .sav file).
#' For the export function the
#' \href{https://CRAN.R-project.org/package=foreign}{\strong{foreign}} package
#' needs to be installed.
#'
#' @inheritParams sharedParams
#' @param m number of imputed datasets
#' @param include should the original, incomplete data be included? Default is
#' \code{TRUE}.
#' @param minspace minimum number of iterations between iterations to be chosen
#' as imputed values (to prevent strong correlation between
#' imputed datasets in the case of high autocorrelation of the
#' MCMC chains).
#' @param seed optional seed value
#' @param export_to_SPSS logical; should the completed data be exported to SPSS?
#' @param resdir optional; directory for results. If unspecified and
#' \code{export_to_SPSS = TRUE} the current working directory is
#' used.
#' @param filename optional; file name (without ending). If unspecified and
#' \code{export_to_SPSS = TRUE} a name is generated
#' automatically.
#'
#' @return A \code{data.frame} in which the original data (if
#' \code{include = TRUE}) and the imputed datasets are stacked onto
#' each other.\cr
#' The variable \code{Imputation_} indexes the imputation, while
#' \code{.rownr} links the rows to the rows of the original data.
#' In cross-sectional datasets the
#' variable \code{.id} is added as subject identifier.
#'
#' @section Note:
#' In order to be able to extract (multiple) imputed datasets the imputed values
#' must have been monitored, i.e., \code{imps = TRUE} had to be specified in the
#' argument \code{monitor_params} in \code{\link[testpack:model_imp]{*_imp}}.
#'
#' @seealso \code{\link{plot_imp_distr}}
#'
#' @examples
#' # fit a model and monitor the imputed values with
#' # monitor_params = c(imps = TRUE)
#'
#' mod <- lm_imp(y ~ C1 + C2 + M2, data = wideDF,
#' monitor_params = c(imps = TRUE), n.iter = 100)
#'
#' # Example 1: without export to SPSS
#' MIs <- get_MIdat(mod, m = 3, seed = 123)
#'
#'
#' \dontrun{
#' # Example 2: with export for SPSS
#' # (here: to the temporary directory "temp_dir")
#'
#' temp_dir <- tempdir()
#' MIs <- get_MIdat(mod, m = 3, seed = 123, resdir = temp_dir,
#' filename = "example_imputation",
#' export_to_SPSS = TRUE)
#'
#' }
#'
#' @export
#'
get_MIdat <- function(object, m = 10, include = TRUE,
start = NULL, minspace = 50, seed = NULL,
export_to_SPSS = FALSE,
resdir = NULL, filename = NULL) {
if (!"foreign" %in% rownames(installed.packages()))
errormsg("This function requires the 'foreign' package to be installed.")
if (is.null(object$MCMC))
errormsg("The object does not contain any MCMC samples.")
oldseed <- .Random.seed
on.exit({
.Random.seed <<- oldseed
})
# set seed value if provided
if (!is.null(seed)) {
set_seed(seed)
}
# extract original data and add
# - column with row numbers (needed for plot_imp_distr())
# - an id variable if there is none
DF <- object$data
DF$.rownr <- seq_len(nrow(DF))
if (length(object$Mlist$groups) < 2) DF$.id <- seq_len(nrow(DF))
# extract variable levels
Mlvls <- object$Mlist$Mlvls
# names of variables that were imputed
vars <- intersect(names(object$models), names(DF)[colSums(is.na(DF)) > 0])
# get a summary of the relevant characteristics of the imputed variables
varinfo <- lapply(object$info_list[vars], function(x) {
data.frame(varname = x$varname,
modeltype = x$modeltype,
family = ifelse(!is.null(x$family), x$family, NA),
stringsAsFactors = FALSE)
})
if (is.null(start)) {
start <- start(object$MCMC)
} else {
start <- max(start, start(object$MCMC))
}
MCMC <- do.call(rbind, window(object$MCMC, start = start))
# randomly draw which iterations should be used as imputation
if (nrow(MCMC) / minspace < m)
errormsg("The total number of iterations (%s) is too small to select %s
iterations with spacing of >= %s.", nrow(MCMC), m, minspace)
cand_iters <- seq(from = sample.int(minspace, size = 1), to = nrow(MCMC),
by = minspace)
imp_iters <- sort(sample(cand_iters, size = m))
# reduce MCMC to the relevant rows
MCMC <- MCMC[imp_iters, , drop = FALSE]
# prepare a list of copies of the original data
df_list <- list()
for (i in 1:(m + 1)) {
df_list[[i]] <- cbind("Imputation_" = i - 1, DF)
}
for (i in vars) {
impval <- NULL
# identify the names of the columns in MCMC corresponding to variable i
pat <- paste0(Mlvls[i], "\\[[[:digit:]]*,",
match(i, colnames(object$data_list[[Mlvls[i]]])),
"\\]")
if (!any(grepl(pat, colnames(MCMC))))
errormsg("I cannot find imputed values for %s. Did you monitor them?",
dQuote(i))
impval <- MCMC[, grep(pat, colnames(MCMC), value = TRUE), drop = FALSE]
if (length(impval) > 0) {
rownrs <- gsub(",[[:digit:]]*\\]", "",
gsub("^[[:print:]]*\\[", "", colnames(impval)))
for (j in (1:m) + 1) {
iv <- impval[j - 1, na.omit(match(
object$Mlist$groups[[gsub("M_", "", Mlvls[i])]],
as.numeric(rownrs)
))]
if (is.factor(df_list[[j]][, i])) {
df_list[[j]][is.na(df_list[[j]][, i]), i] <-
factor(iv, labels = levels(df_list[[j]][, i]),
levels = seq_along(levels(df_list[[j]][, i])) -
as.numeric(length(levels(df_list[[j]][, i])) == 2)
)
} else {
df_list[[j]][is.na(df_list[[j]][, i]), i] <- iv
}
}
}
}
if (!include)
df_list <- df_list[-1]
# build dataset --------------------------------------------------------------
imp_df <- do.call(rbind, df_list)
if (is.null(resdir))
resdir <- getwd()
if (is.null(filename))
filename <- paste0("testpack-imputation_", Sys.Date())
if (export_to_SPSS == TRUE) {
foreign::write.foreign(imp_df,
file.path(resdir, paste0(filename, ".txt")),
file.path(resdir, paste0(filename, ".sps")),
package = "SPSS"
)
}
return(imp_df)
}
|
library(ISLR)
library(MASS)
summary(Weekly)
#LDA
trainCond = (Weekly$Year <= 2008)
l1 = lda(Direction ~ Lag2, data = Weekly, subset = trainCond)
l1
pred = predict(l1, Weekly[!trainCond, ])
class = pred$class
class
testDirection = Weekly$Direction[!trainCond]
table(class, testDirection)
mean(class == testDirection)
trainCond = (Weekly$Year <= 2008)
l1 = qda(Direction ~ Lag2, data = Weekly, subset = trainCond)
l1
pred = predict(l1, Weekly[!trainCond, ])
qdaClass = pred$class
testDirection = Weekly$Direction[!trainCond]
table(qdaClass, testDirection)
mean(qdaClass == testDirection)
#glm 0.625
#lda 0.625
#qda 0.5865385
#Ex2.
auto = read.csv("Auto.csv")
med = median(auto$mpg)
med
mpg01 = ifelse(auto$mpg > med, 1, 0)
mpg01
auto = data.frame(auto, mpg01)
pairs(auto)
#mpg, displacement, horsepower, weight, acceleration
lrnSet = sample(nrow(auto),200)
l1 = lda(mpg01 ~ displacement + horsepower + weight + acceleration,
data = auto, family = binomial, subset = lrnSet)
l1
pred = predict(l1, auto[-lrnSet, ])
ldaClass = pred$class
ldaClass
test = mpg01[-lrnSet]
k = table(ldaClass, test)
k
mean(ldaClass == test) #0.877193
k
k[1,1]/sum(k[,1])
k[1,1]/sum(k[,2])
q1 = qda(mpg01 ~ displacement + horsepower + weight + acceleration,
data = auto, family = binomial, subset = lrnSet)
q1
pred = predict(q1, auto[-lrnSet, ])
qdaClass = pred$class
test = mpg01[-lrnSet]
k=table(qdaClass, test)
mean(qdaClass == test) #0.8421053
k
k[1,1]/sum(k[,1])
k[1,1]/sum(k[,2])
|
/lab6/lab6.R
|
no_license
|
veronica948/r-project
|
R
| false | false | 1,484 |
r
|
library(ISLR)
library(MASS)
summary(Weekly)
#LDA
trainCond = (Weekly$Year <= 2008)
l1 = lda(Direction ~ Lag2, data = Weekly, subset = trainCond)
l1
pred = predict(l1, Weekly[!trainCond, ])
class = pred$class
class
testDirection = Weekly$Direction[!trainCond]
table(class, testDirection)
mean(class == testDirection)
trainCond = (Weekly$Year <= 2008)
l1 = qda(Direction ~ Lag2, data = Weekly, subset = trainCond)
l1
pred = predict(l1, Weekly[!trainCond, ])
qdaClass = pred$class
testDirection = Weekly$Direction[!trainCond]
table(qdaClass, testDirection)
mean(qdaClass == testDirection)
#glm 0.625
#lda 0.625
#qda 0.5865385
#Ex2.
auto = read.csv("Auto.csv")
med = median(auto$mpg)
med
mpg01 = ifelse(auto$mpg > med, 1, 0)
mpg01
auto = data.frame(auto, mpg01)
pairs(auto)
#mpg, displacement, horsepower, weight, acceleration
lrnSet = sample(nrow(auto),200)
l1 = lda(mpg01 ~ displacement + horsepower + weight + acceleration,
data = auto, family = binomial, subset = lrnSet)
l1
pred = predict(l1, auto[-lrnSet, ])
ldaClass = pred$class
ldaClass
test = mpg01[-lrnSet]
k = table(ldaClass, test)
k
mean(ldaClass == test) #0.877193
k
k[1,1]/sum(k[,1])
k[1,1]/sum(k[,2])
q1 = qda(mpg01 ~ displacement + horsepower + weight + acceleration,
data = auto, family = binomial, subset = lrnSet)
q1
pred = predict(q1, auto[-lrnSet, ])
qdaClass = pred$class
test = mpg01[-lrnSet]
k=table(qdaClass, test)
mean(qdaClass == test) #0.8421053
k
k[1,1]/sum(k[,1])
k[1,1]/sum(k[,2])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orderCross.R
\name{orderCross}
\alias{orderCross}
\title{Order markers
Order markers within linkage groups using simulated annealing}
\usage{
orderCross(
mpcrossLG,
cool = 0.5,
tmin = 0.1,
nReps = 1,
maxMove = 0,
effortMultiplier = 1,
randomStart = TRUE,
verbose = FALSE
)
}
\arguments{
\item{mpcrossLG}{An object of class \code{mpcrossLG}, containing genetic data and linkage groups.}
\item{cool}{Rate of cooling}
\item{tmin}{Minimum temperature}
\item{nReps}{Number of independent replications of the simulated annealing algorithm}
\item{maxMove}{Maximum number of positions by which to shift a single marker, as part of the simulated annealing. A value of zero indicates no limit.}
\item{effortMultiplier}{Multiplier for the amount of computational effort}
\item{randomStart}{If TRUE, start from the current ordering}
\item{verbose}{If TRUE, generate more detailed output}
}
\value{
An object of class \code{mpcrossLG}, identical to the input except with the markers rearranged.
}
\description{
This function orders markers within linkage groups using a simulated annealing heuristic. The underlying implementation is a C++ reimplementation of the fortran code \code{arsa.f} from the \code{seriation} package. The reimplementation allows for multithreading, and is therefore much faster. It also fixes a couple of bugs in the original code.
Parameters \code{cool} and \code{tmin} are standard simulated annealing parameters, and decreasing \code{cool} increases the amount of computation effort. Parameter \code{nReps} gives the number of independent replications of the simulated annealing algorithm to be used. The result of the best replication is then chosen.
Parameter \code{maxMove} gives the maximum number of positions by which to shift a marker, as part of a step within the simulated annealing algorithm. The computational effort of determining whether a proposed move of a particular marker should be accepted, depends on the number of positions by which it is moved. So if the ordering is already approximately correct at the start of the algorithm, proposals that move markers by large distances are expensive, and also unneccessary. These types of proposed changes to the ordering can be avoided by setting \code{maxMove} to some positive value, maybe one tenth of the number of markers.
Parameter \code{effortMultiplier} simply increases or decreases the amount of computational effort. A value of 0.5 requires half as much effort, a value of 1.0 uses the default amount of effort, and a value of 2.0 requires twice as much computational effort.
Parameter \code{randomStart} controls the starting point of each replication of the algorithm. If this parameter is TRUE, then every replication starts form an independent random ordering. If this parameter is FALSE, then every replication starts from the marker ordering given in the input object.
}
|
/man/orderCross.Rd
|
no_license
|
rohan-shah/mpMap2
|
R
| false | true | 2,975 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/orderCross.R
\name{orderCross}
\alias{orderCross}
\title{Order markers
Order markers within linkage groups using simulated annealing}
\usage{
orderCross(
mpcrossLG,
cool = 0.5,
tmin = 0.1,
nReps = 1,
maxMove = 0,
effortMultiplier = 1,
randomStart = TRUE,
verbose = FALSE
)
}
\arguments{
\item{mpcrossLG}{An object of class \code{mpcrossLG}, containing genetic data and linkage groups.}
\item{cool}{Rate of cooling}
\item{tmin}{Minimum temperature}
\item{nReps}{Number of independent replications of the simulated annealing algorithm}
\item{maxMove}{Maximum number of positions by which to shift a single marker, as part of the simulated annealing. A value of zero indicates no limit.}
\item{effortMultiplier}{Multiplier for the amount of computational effort}
\item{randomStart}{If TRUE, start from the current ordering}
\item{verbose}{If TRUE, generate more detailed output}
}
\value{
An object of class \code{mpcrossLG}, identical to the input except with the markers rearranged.
}
\description{
This function orders markers within linkage groups using a simulated annealing heuristic. The underlying implementation is a C++ reimplementation of the fortran code \code{arsa.f} from the \code{seriation} package. The reimplementation allows for multithreading, and is therefore much faster. It also fixes a couple of bugs in the original code.
Parameters \code{cool} and \code{tmin} are standard simulated annealing parameters, and decreasing \code{cool} increases the amount of computation effort. Parameter \code{nReps} gives the number of independent replications of the simulated annealing algorithm to be used. The result of the best replication is then chosen.
Parameter \code{maxMove} gives the maximum number of positions by which to shift a marker, as part of a step within the simulated annealing algorithm. The computational effort of determining whether a proposed move of a particular marker should be accepted, depends on the number of positions by which it is moved. So if the ordering is already approximately correct at the start of the algorithm, proposals that move markers by large distances are expensive, and also unneccessary. These types of proposed changes to the ordering can be avoided by setting \code{maxMove} to some positive value, maybe one tenth of the number of markers.
Parameter \code{effortMultiplier} simply increases or decreases the amount of computational effort. A value of 0.5 requires half as much effort, a value of 1.0 uses the default amount of effort, and a value of 2.0 requires twice as much computational effort.
Parameter \code{randomStart} controls the starting point of each replication of the algorithm. If this parameter is TRUE, then every replication starts form an independent random ordering. If this parameter is FALSE, then every replication starts from the marker ordering given in the input object.
}
|
plot2 <- function() {
#read the data. Every line of data is read in that matches only the 1st and 2nd Feb 2007 as per the assignment.
energyData <- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'),header=F, sep=';',dec=".")
col <-read.table('household_power_consumption.txt', header=TRUE,sep=";",nrows=1)
colnames(energyData) <- c(colnames(col))
dateTime <- strptime( paste(energyData$Date,energyData$Time), format="%d/%m/%Y %H:%M:%S")
png(file="plot2.png")
with(energyData, plot(dateTime, Global_active_power, type="l",xlab="",ylab="Global Active Power (killowatts)"))
dev.off()
}
|
/plot2.R
|
no_license
|
rkuttan/ExData_Plotting1
|
R
| false | false | 640 |
r
|
plot2 <- function() {
#read the data. Every line of data is read in that matches only the 1st and 2nd Feb 2007 as per the assignment.
energyData <- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'),header=F, sep=';',dec=".")
col <-read.table('household_power_consumption.txt', header=TRUE,sep=";",nrows=1)
colnames(energyData) <- c(colnames(col))
dateTime <- strptime( paste(energyData$Date,energyData$Time), format="%d/%m/%Y %H:%M:%S")
png(file="plot2.png")
with(energyData, plot(dateTime, Global_active_power, type="l",xlab="",ylab="Global Active Power (killowatts)"))
dev.off()
}
|
#############################################
## dataProcessPlots
#############################################
#' @export
#' @import ggplot2
#' @importFrom graphics axis image legend mtext par plot.new title plot
#' @importFrom grDevices dev.off hcl pdf
dataProcessPlots <- function(data=data,
type=type,
featureName="Transition",
ylimUp=FALSE,
ylimDown=FALSE,
scale=FALSE,
interval="CI",
x.axis.size=10,
y.axis.size=10,
text.size=4,
text.angle=0,
legend.size=7,
dot.size.profile=2,
dot.size.condition=3,
width=10,
height=10,
which.Protein="all",
originalPlot=TRUE,
summaryPlot=TRUE,
save_condition_plot_result=FALSE,
remove_uninformative_feature_outlier=FALSE,
address="") {
datafeature <- data$ProcessedData
datarun <- data$RunlevelData
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
datarun$Protein <- factor(datarun$Protein)
if (!is.element("SUBJECT_NESTED", colnames(datafeature))) {
stop("Input for dataProcessPlots function should be processed by dataProcess function previously. Please use 'dataProcess' function first.")
}
if (length(setdiff(toupper(type), c(toupper("ProfilePlot"), toupper("QCPlot"), toupper("ConditionPlot")))) != 0) {
stop(paste0("Input for type=", type,
". However,'type' should be one of \"ProfilePlot\", \"QCPlot\",\"ConditionPlot\"."))
}
if (address == FALSE){ ## here I used == FALSE, instead of !address. Because address can be logical or characters.
if (which.Protein == 'all') {
stop('** Cannnot generate all plots in a screen. Please set one protein at a time.')
} else if (length(which.Protein) > 1) {
stop('** Cannnot generate multiple plots in a screen. Please set one protein at a time.')
}
}
## Profile plot ##
## ---------------
if (toupper(type) == "PROFILEPLOT") {
if(remove_uninformative_feature_outlier){
### v3.15.2 (2019/04/29) by Meena
if( any(is.element(colnames(datafeature), 'feature_quality')) ) {
datafeature[datafeature$feature_quality == 'Noninformative', 'ABUNDANCE'] <- NA
datafeature[datafeature$is_outlier, 'ABUNDANCE'] <- NA
message("** Filtered out uninformative feature and outliers in the profile plots.")
} else {
message("** To remove uninformative features or outliers, please use \"featureSubset == \"highQuality\" option in \"dataProcess\" function.")
}
### end : v3.15.2 (2019/04/29) by Meena
}
## choose Proteins or not
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name,unique(datafeature$PROTEIN))) > 0) {
stop(paste0("Please check protein name. Data set does not have this protein. - ", toString(temp.name)))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datafeature$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datafeature$PROTEIN)) < max(which.Protein)) {
stop(paste0("Please check your selection of proteins. There are ",
length(levels(datafeature$PROTEIN))," proteins in this dataset."))
}
}
## use only assigned proteins
datafeature <- datafeature[which(datafeature$PROTEIN %in% temp.name), ]
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
datarun <- datarun[which(datarun$Protein %in% temp.name), ]
datarun$PROTEIN <- factor(datarun$Protein)
}
## assign upper or lower limit
# MC, 2016/04/21, default upper limit is maximum log2(intensity) after normalization+3, then round-up
y.limup <- ceiling(max(datafeature$ABUNDANCE, na.rm=TRUE) + 3)
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- -1
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
datafeature <- datafeature[with(datafeature, order(GROUP_ORIGINAL, SUBJECT_ORIGINAL, LABEL)), ]
datafeature$RUN <- factor(datafeature$RUN, levels=unique(datafeature$RUN), labels=seq(1, length(unique(datafeature$RUN))))
datafeature$RUN <- as.numeric(datafeature$RUN)
tempGroupName <- unique(datafeature[, c("GROUP_ORIGINAL", "RUN")])
groupAxis <- as.numeric(xtabs(~GROUP_ORIGINAL, tempGroupName))
cumGroupAxis <- cumsum(groupAxis)
lineNameAxis <- cumGroupAxis[-nlevels(datafeature$GROUP_ORIGINAL)]
groupName <- data.frame(RUN=c(0, lineNameAxis) + groupAxis / 2 + 0.5,
ABUNDANCE=rep(y.limup-1, length(groupAxis)),
Name=levels(datafeature$GROUP_ORIGINAL))
if (length(unique(datafeature$LABEL)) == 2) {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference", "Endogenous"))
} else {
if (unique(datafeature$LABEL) == "L") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Endogenous"))
}
if (unique(datafeature$LABEL) == "H") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference"))
}
}
## need to fill in incomplete rows for Runlevel data
haverun <- FALSE
if (sum(is.element(colnames(datarun), "RUN")) != 0) {
datamat <- dcast( Protein ~ RUN, data=datarun, value.var='LogIntensities', keep=TRUE)
datarun <- melt(datamat, id.vars=c('Protein'))
colnames(datarun)[colnames(datarun) %in% c("variable", "value")] <- c('RUN', 'ABUNDANCE')
haverun <- TRUE
}
## remove the column called 'SuggestToFilter' if there.
if (any(is.element(colnames(datafeature), "SuggestToFilter"))) {
datafeature$SuggestToFilter <- NULL
}
## remove the column called 'Fiter.Repro' if there.
if (any(is.element(colnames(datafeature), "Filter.Repro"))) {
datafeature$Filter.Repro <- NULL
}
## v3.15.2 updated by Meena
## remove the column called 'feature_quality' if there.
if (any(is.element(colnames(datafeature), "feature_quality"))) {
datafeature$feature_quality <- NULL
}
## remove the column called 'is_outlier' if there.
if (any(is.element(colnames(datafeature), "is_outlier"))) {
datafeature$is_outlier <- NULL
}
## end : v3.15.2 updated by Meena
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
## y-axis labeling
temp <- datafeature[!is.na(datafeature[, "ABUNDANCE"]) & !is.na(datafeature[, "INTENSITY"]), ]
temp <- temp[1, ]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
if (originalPlot) {
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ProfilePlot")
finalfile <- paste0(address, "ProfilePlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
sub$FEATURE <- factor(as.character(sub$FEATURE))
sub$SUBJECT <- factor(sub$SUBJECT)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
sub$PEPTIDE <- factor(as.character(sub$PEPTIDE))
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste0("Can't the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## seq for peptide and transition
b <- unique(sub[, c("PEPTIDE", "FEATURE")])
b <- b[with(b, order(PEPTIDE, FEATURE)), ] ## add because if there are missing value, orders are different.
temp1 <- xtabs(~b[, 1])
ss <- NULL
s <- NULL
for (j in 1:length(temp1)) {
temp3 <- rep(j, temp1[j])
s <- c(s, temp3)
temp2 <- seq(1, temp1[j])
ss <- c(ss, temp2)
}
## for annotation of condition
groupNametemp <- data.frame(groupName,
"FEATURE"=unique(sub$FEATURE)[1],
"PEPTIDE"=unique(sub$PEPTIDE)[1])
if (toupper(featureName) == "TRANSITION") {
if (any(is.element(colnames(sub), "censored"))) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
## 1st plot for original plot
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='FEATURE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE', color='FEATURE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=s) +
scale_linetype_manual(values=ss) +
scale_shape_manual(values=c(16, 1),
labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis + 0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size, angle=text.angle, color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
linetype=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
## 1st plot for original plot
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='FEATURE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=s) +
scale_linetype_manual(values=ss) +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis + 0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size))+
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
linetype=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
if (toupper(featureName) == "PEPTIDE") {
if ( any(is.element(colnames(sub), "censored")) ) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
olor='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=unique(s)) + ## unique(s) ??
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp,
aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=unique(s)) +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
if (toupper(featureName) == "NA") {
if ( any(is.element(colnames(sub), "censored")) ) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=unique(s), guide="none") +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=unique(s), guide="none") +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
} # end-loop for each protein
if (address != FALSE) {
dev.off()
}
} # end original plot
## 2st plot for original plot : summary ##
## ---------------------------------------
if (summaryPlot) {
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ProfilePlot_wSummarization")
finalfile <- paste0(address, "ProfilePlot_wSummarization.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
sub$FEATURE <- factor(as.character(sub$FEATURE))
sub$SUBJECT <- factor(sub$SUBJECT)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
sub$PEPTIDE <- factor(as.character(sub$PEPTIDE))
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## seq for peptide and transition
b <- unique(sub[, c("PEPTIDE", "FEATURE")])
b <- b[with(b, order(PEPTIDE, FEATURE)), ] ## add because if there are missing value, orders are different.
temp1 <- xtabs(~b[, 1])
ss <- NULL
s <- NULL
for(j in 1:length(temp1)) {
temp3 <- rep(j, temp1[j])
s <- c(s, temp3)
temp2 <- seq(1, temp1[j])
ss <- c(ss, temp2)
}
## for annotation of condition
groupNametemp <- data.frame(groupName, FEATURE=unique(sub$FEATURE)[1], analysis="Run summary")
if (haverun) {
subrun <- datarun[datarun$Protein == levels(datafeature$PROTEIN)[i], ]
if (nrow(subrun) != 0) {
quantrun <- sub[1, ]
quantrun[, 2:ncol(quantrun)] <- NA
quantrun <- quantrun[rep(seq_len(nrow(subrun))), ]
quantrun$PROTEIN <- subrun$Protein
quantrun$PEPTIDE <- "Run summary"
quantrun$TRANSITION <- "Run summary"
quantrun$FEATURE <- "Run summary"
quantrun$LABEL <- "Endogenous"
quantrun$RUN <- subrun$RUN
quantrun$ABUNDANCE <- subrun$ABUNDANCE
quantrun$FRACTION <- 1
} else { # if there is only one Run measured across all runs, no Run information for linear with censored
quantrun <- datafeature[1, ]
quantrun[, 2:ncol(quantrun)] <- NA
quantrun$PROTEIN <- levels(datafeature$PROTEIN)[i]
quantrun$PEPTIDE <- "Run summary"
quantrun$TRANSITION <- "Run summary"
quantrun$FEATURE <- "Run summary"
quantrun$LABEL <- "Endogenous"
quantrun$RUN <- unique(datafeature$RUN)[1]
quantrun$ABUNDANCE <- NA
quantrun$FRACTION <- 1
}
if (any(is.element(colnames(sub), "censored"))) {
quantrun$censored <- FALSE
}
quantrun$analysis <- "Run summary"
sub$analysis <- "Processed feature-level data"
## if 'Filter' column after feature selection, remove this column in order to match columns with run quantification
filter_column <- is.element(colnames(sub), "Filter")
if (any(filter_column)) {
sub<-sub[, !filter_column]
}
final <- rbind(sub, quantrun)
final$analysis <- factor(final$analysis)
final$FEATURE <- factor(final$FEATURE)
final$RUN <- as.numeric(final$RUN)
if (any(is.element(colnames(sub), "censored"))) {
final$censored <- factor(final$censored, levels=c('FALSE', 'TRUE'))
ptempall <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='analysis',linetype='FEATURE', size='analysis'),
data=final) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='analysis', size='analysis', shape='censored'), data=final) +
scale_colour_manual(values=c("lightgray", "darkred")) +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_size_manual(values=c(1.7, 2), guide="none") +
scale_linetype_manual(values=c(rep(1, times=length(unique(final$FEATURE))-1), 2), guide="none") +
scale_x_continuous('MS runs',breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(final$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size),
legend.title=element_blank()) +
guides(color=guide_legend(order=1,
title=NULL,
label.theme = element_text(size=10, angle=0)),
shape=guide_legend(order=2,
title=NULL,
label.theme = element_text(size=10, angle=0)))
} else {
ptempall <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='analysis', linetype='FEATURE', size='analysis'), data=final) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=c("lightgray", "darkred")) +
scale_shape_manual(values=c(16)) +
scale_size_manual(values=c(1.7, 2), guide="none") +
scale_linetype_manual(values=c(rep(1, times=length(unique(final$FEATURE))-1), 2), guide="none") +
scale_x_continuous('MS runs',breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(final$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size),
legend.title=element_blank()) +
guides(color=guide_legend(order=1,
title=NULL,
label.theme = element_text(size=10, angle=0)))
## draw point again because some red summary dots could be hiden
ptempall <- ptempall+geom_point(data=final, aes(x=RUN, y=ABUNDANCE, size=analysis, color=analysis))
}
print(ptempall)
message(paste("Drew the Profile plot with summarization for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
} # end-loop for each protein
if (address!=FALSE) {
dev.off()
}
}
} # end Profile plot
## QC plot (Quality control plot) ##
## ---------------------------------
if (toupper(type) == "QCPLOT") {
## y-axis labeling
temp <- datafeature[!is.na(datafeature[,"ABUNDANCE"]) & !is.na(datafeature[,"INTENSITY"]), ]
temp <- temp[1, ]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address,"QCPlot")
finalfile <- paste0(address,"QCPlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
## assign upper or lower limit
# MC, 2016/04/21, default upper limit is maximum log2(intensity) after normalization+3, then round-up
y.limup <- ceiling(max(datafeature$ABUNDANCE, na.rm=TRUE) + 3)
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- -1
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
## relabel the Run (make it sorted by group first)
datafeature <- datafeature[with(datafeature, order(GROUP_ORIGINAL, SUBJECT_ORIGINAL)), ]
datafeature$RUN <- factor(datafeature$RUN,
levels=unique(datafeature$RUN),
labels=seq(1, length(unique(datafeature$RUN))))
if (length(unique(datafeature$LABEL)) == 2) {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference", "Endogenous"))
label.color <- c("darkseagreen1", "lightblue")
} else {
if (unique(datafeature$LABEL) == "L") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Endogenous"))
label.color <- c("lightblue")
}
if (unique(datafeature$LABEL) == "H") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference"))
label.color <- c("darkseagreen1")
}
}
tempGroupName <- unique(datafeature[, c("GROUP_ORIGINAL", "RUN")])
datafeature <- datafeature[with(datafeature, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL)), ]
groupAxis <- as.numeric(xtabs(~GROUP_ORIGINAL, tempGroupName))
cumGroupAxis <- cumsum(groupAxis)
lineNameAxis <- cumGroupAxis[-nlevels(datafeature$GROUP_ORIGINAL)]
groupName <- data.frame("RUN"=c(0, lineNameAxis)+groupAxis / 2 + 0.5,
"ABUNDANCE"=rep(y.limup-1, length(groupAxis)),
"Name"=levels(datafeature$GROUP_ORIGINAL))
## all protein
if (which.Protein == 'all' | which.Protein == 'allonly') {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE'), data=datafeature) +
facet_grid(~LABEL) +
geom_boxplot(aes_string(fill='LABEL'), outlier.shape=1, outlier.size=1.5) +
scale_fill_manual(values=label.color, guide="none") +
scale_x_discrete('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title="All proteins") +
geom_text(data=groupName, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background = element_rect(fill='white', colour="black"),
legend.key = element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background = element_rect(fill='gray95'),
strip.text.x = element_text(colour=c("#00B0F6"), size=14),
axis.text.x = element_text(size=x.axis.size,colour="black"),
axis.text.y = element_text(size=y.axis.size,colour="black"),
axis.ticks = element_line(colour="black"),
axis.title.x = element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y = element_text(size=y.axis.size+5, vjust=0.3),
title = element_text(size=x.axis.size+8, vjust=1.5))
print(ptemp)
message("Drew the Quality Contol plot(boxplot) for all proteins.")
}
## each protein
## choose Proteins or not
if (which.Protein != 'allonly') {
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name, unique(datafeature$PROTEIN))) > 0) {
dev.off()
stop(paste0("Please check protein name. Data set does not have this protein. - ",
toString(temp.name)))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datafeature$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datafeature$PROTEIN))<max(which.Protein)) {
dev.off()
stop(paste0("Please check your selection of proteins. There are ",
length(levels(datafeature$PROTEIN)), " proteins in this dataset."))
}
}
## use only assigned proteins
datafeature <- datafeature[which(datafeature$PROTEIN %in% temp.name), ]
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
subTemp <- sub[!is.na(sub$ABUNDANCE), ]
sub <- sub[with(sub, order(LABEL, RUN)), ]
## if all measurements are NA,
if (nrow(sub)==sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Quality Control plot for ",unique(sub$PROTEIN),
"(",i," of ",length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE'), data=sub)+
facet_grid(~LABEL)+
geom_boxplot(aes_string(fill='LABEL'), outlier.shape=1, outlier.size=1.5)+
scale_fill_manual(values=label.color, guide="none")+
scale_x_discrete('MS runs', breaks=cumGroupAxis)+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash")+
labs(title=unique(sub$PROTEIN))+
geom_text(data=groupName, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size, angle=text.angle, color="black")+
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
print(ptemp)
message(paste("Drew the Quality Contol plot(boxplot) for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
} # end-loop
}
if (address != FALSE) {
dev.off()
}
} # end QC plot
## Condition plot ##
## -----------------
if (toupper(type) == "CONDITIONPLOT") {
colnames(datarun)[colnames(datarun) == "Protein"] <- "PROTEIN"
colnames(datarun)[colnames(datarun) == "LogIntensities"] <- "ABUNDANCE"
## choose Proteins or not
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name, unique(datarun$PROTEIN))) > 0) {
stop(paste("Please check protein name. Dataset does not have this protein. -", toString(temp.name), sep=" "))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datarun$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datarun$PROTEIN))<max(which.Protein)) {
stop(paste("Please check your selection of proteins. There are ",
length(levels(datarun$PROTEIN))," proteins in this dataset."))
}
}
## use only assigned proteins
datarun <- datarun[which(datarun$PROTEIN %in% temp.name), ]
datarun$PROTEIN <- factor(datarun$PROTEIN)
}
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ConditionPlot")
finalfile <- paste0(address, "ConditionPlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
## save all results
resultall <- NULL
## y-axis labeling, find log 2 or log 10
temp <- datafeature[!is.na(datafeature[, "ABUNDANCE"]) & !is.na(datafeature[, "INTENSITY"]), ]
temp <- temp[1,]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
for (i in 1:nlevels(datarun$PROTEIN)) {
suball <- NULL
sub <- datarun[datarun$PROTEIN == levels(datarun$PROTEIN)[i], ]
sub <- na.omit(sub)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Condition plot for ", unique(sub$PROTEIN),
"(", i, " of ",length(unique(datarun$PROTEIN)), ") because all measurements are NAs."))
next()
}
## statistics
sub.mean <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, mean, na.rm=TRUE)
sub.sd <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, sd)
sub.len <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, length)
## make the table for result
colnames(sub.mean)[colnames(sub.mean) == "ABUNDANCE"] <- "Mean"
colnames(sub.sd)[colnames(sub.sd) == "ABUNDANCE"] <- "SD"
colnames(sub.len)[colnames(sub.len) == "ABUNDANCE"] <- "numMeasurement"
suball <- merge(sub.mean, sub.sd, by="GROUP_ORIGINAL")
suball <- merge(suball, sub.len, by="GROUP_ORIGINAL")
if (interval == "CI") {
suball$ciw <- qt(0.975, suball$numMeasurement) * suball$SD / sqrt(suball$numMeasurement)
}
if (interval == "SD") {
suball$ciw <- suball$SD
}
if (sum(is.na(suball$ciw)) >= 1) {
suball$ciw[is.na(suball$ciw)] <- 0
}
## assign upper or lower limit
y.limup <- ceiling(max(suball$Mean + suball$ciw))
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- floor(min(suball$Mean - suball$ciw))
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
## re-order (1, 10, 2, 3, -> 1, 2, 3, ... , 10)
suball <- suball[order(suball$GROUP_ORIGINAL), ]
suball <- data.frame(Protein=unique(sub$PROTEIN), suball)
resultall <- rbind(resultall, suball)
if (!scale) { ## scale: false
## reformat as data.frame
#tempsummary <- data.frame(Label=unique(sub$GROUP_ORIGINAL), mean=as.vector(sub.mean), ciw=as.vector(ciw))
tempsummary <- suball
colnames(tempsummary)[colnames(tempsummary) == "GROUP_ORIGINAL"] <- "Label"
ptemp <- ggplot(aes_string(x='Label', y='Mean'), data=tempsummary)+
geom_errorbar(aes(ymax = Mean + ciw, ymin= Mean - ciw),
data=tempsummary, width=0.1, colour="red")+
geom_point(size = dot.size.condition, colour = "darkred")+
scale_x_discrete('Condition')+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_hline(yintercept = 0, linetype = "twodash", colour = "darkgrey", size = 0.6)+
labs(title=unique(sub$PROTEIN))+
theme(
panel.background=element_rect(fill='white', colour="black"),
panel.grid.major.y = element_line(colour="grey95"),
panel.grid.minor.y = element_blank(),
axis.text.x=element_text(size=x.axis.size, colour="black", angle=text.angle),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
} else {
## scale : true
## extract numeric value, don't use levels (because T1,T10,T3,...)
## reformat as data.frame
tempsummary <- suball
colnames(tempsummary)[colnames(tempsummary) == "GROUP_ORIGINAL"] <- "Label"
tempsummary$Label <- as.numeric(gsub("\\D", "", unique(tempsummary$Label)))
ptemp <- ggplot(aes_string(x='Label', y='Mean'), data=tempsummary)+
geom_errorbar(aes(ymax = Mean + ciw, ymin = Mean - ciw),
data=tempsummary, width=0.1, colour="red")+
geom_point(size=dot.size.condition, colour="darkred")+
scale_x_continuous('Condition', breaks=tempsummary$Label, labels=tempsummary$Label)+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_hline(yintercept=0, linetype="twodash", colour="darkgrey", size=0.6)+
labs(title=unique(sub$PROTEIN))+
theme(
panel.background=element_rect(fill='white', colour="black"),
panel.grid.major.y = element_line(colour="grey95"),
panel.grid.minor.y = element_blank(),
axis.text.x=element_text(size=x.axis.size, colour="black", angle=text.angle),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
}
print(ptemp)
message(paste("Drew the condition plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datarun$PROTEIN)), ")"))
} # end-loop
if (address != FALSE) {
dev.off()
}
## save the table for condition plot
if (save_condition_plot_result) {
colnames(resultall)[colnames(resultall) == "GROUP_ORIGINAL"] <- 'Condition'
if (interval == "CI") {
colnames(resultall)[colnames(resultall) == "ciw"] <- '95% CI'
}
if (interval == "SD") {
colnames(resultall)[colnames(resultall) == "ciw"] <- 'SD'
}
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ConditionPlot_value")
finalfile <- paste(address, "ConditionPlot_value.csv")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".csv")
}
write.csv(resultall, file=finalfile, row.names=FALSE)
}
}
} # end Condition plot
}
|
/scripts/data_process_plots_module.R
|
permissive
|
thammatthew/rsi-designSampleSizeClassification-gui
|
R
| false | false | 52,633 |
r
|
#############################################
## dataProcessPlots
#############################################
#' @export
#' @import ggplot2
#' @importFrom graphics axis image legend mtext par plot.new title plot
#' @importFrom grDevices dev.off hcl pdf
dataProcessPlots <- function(data=data,
type=type,
featureName="Transition",
ylimUp=FALSE,
ylimDown=FALSE,
scale=FALSE,
interval="CI",
x.axis.size=10,
y.axis.size=10,
text.size=4,
text.angle=0,
legend.size=7,
dot.size.profile=2,
dot.size.condition=3,
width=10,
height=10,
which.Protein="all",
originalPlot=TRUE,
summaryPlot=TRUE,
save_condition_plot_result=FALSE,
remove_uninformative_feature_outlier=FALSE,
address="") {
datafeature <- data$ProcessedData
datarun <- data$RunlevelData
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
datarun$Protein <- factor(datarun$Protein)
if (!is.element("SUBJECT_NESTED", colnames(datafeature))) {
stop("Input for dataProcessPlots function should be processed by dataProcess function previously. Please use 'dataProcess' function first.")
}
if (length(setdiff(toupper(type), c(toupper("ProfilePlot"), toupper("QCPlot"), toupper("ConditionPlot")))) != 0) {
stop(paste0("Input for type=", type,
". However,'type' should be one of \"ProfilePlot\", \"QCPlot\",\"ConditionPlot\"."))
}
if (address == FALSE){ ## here I used == FALSE, instead of !address. Because address can be logical or characters.
if (which.Protein == 'all') {
stop('** Cannnot generate all plots in a screen. Please set one protein at a time.')
} else if (length(which.Protein) > 1) {
stop('** Cannnot generate multiple plots in a screen. Please set one protein at a time.')
}
}
## Profile plot ##
## ---------------
if (toupper(type) == "PROFILEPLOT") {
if(remove_uninformative_feature_outlier){
### v3.15.2 (2019/04/29) by Meena
if( any(is.element(colnames(datafeature), 'feature_quality')) ) {
datafeature[datafeature$feature_quality == 'Noninformative', 'ABUNDANCE'] <- NA
datafeature[datafeature$is_outlier, 'ABUNDANCE'] <- NA
message("** Filtered out uninformative feature and outliers in the profile plots.")
} else {
message("** To remove uninformative features or outliers, please use \"featureSubset == \"highQuality\" option in \"dataProcess\" function.")
}
### end : v3.15.2 (2019/04/29) by Meena
}
## choose Proteins or not
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name,unique(datafeature$PROTEIN))) > 0) {
stop(paste0("Please check protein name. Data set does not have this protein. - ", toString(temp.name)))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datafeature$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datafeature$PROTEIN)) < max(which.Protein)) {
stop(paste0("Please check your selection of proteins. There are ",
length(levels(datafeature$PROTEIN))," proteins in this dataset."))
}
}
## use only assigned proteins
datafeature <- datafeature[which(datafeature$PROTEIN %in% temp.name), ]
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
datarun <- datarun[which(datarun$Protein %in% temp.name), ]
datarun$PROTEIN <- factor(datarun$Protein)
}
## assign upper or lower limit
# MC, 2016/04/21, default upper limit is maximum log2(intensity) after normalization+3, then round-up
y.limup <- ceiling(max(datafeature$ABUNDANCE, na.rm=TRUE) + 3)
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- -1
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
datafeature <- datafeature[with(datafeature, order(GROUP_ORIGINAL, SUBJECT_ORIGINAL, LABEL)), ]
datafeature$RUN <- factor(datafeature$RUN, levels=unique(datafeature$RUN), labels=seq(1, length(unique(datafeature$RUN))))
datafeature$RUN <- as.numeric(datafeature$RUN)
tempGroupName <- unique(datafeature[, c("GROUP_ORIGINAL", "RUN")])
groupAxis <- as.numeric(xtabs(~GROUP_ORIGINAL, tempGroupName))
cumGroupAxis <- cumsum(groupAxis)
lineNameAxis <- cumGroupAxis[-nlevels(datafeature$GROUP_ORIGINAL)]
groupName <- data.frame(RUN=c(0, lineNameAxis) + groupAxis / 2 + 0.5,
ABUNDANCE=rep(y.limup-1, length(groupAxis)),
Name=levels(datafeature$GROUP_ORIGINAL))
if (length(unique(datafeature$LABEL)) == 2) {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference", "Endogenous"))
} else {
if (unique(datafeature$LABEL) == "L") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Endogenous"))
}
if (unique(datafeature$LABEL) == "H") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference"))
}
}
## need to fill in incomplete rows for Runlevel data
haverun <- FALSE
if (sum(is.element(colnames(datarun), "RUN")) != 0) {
datamat <- dcast( Protein ~ RUN, data=datarun, value.var='LogIntensities', keep=TRUE)
datarun <- melt(datamat, id.vars=c('Protein'))
colnames(datarun)[colnames(datarun) %in% c("variable", "value")] <- c('RUN', 'ABUNDANCE')
haverun <- TRUE
}
## remove the column called 'SuggestToFilter' if there.
if (any(is.element(colnames(datafeature), "SuggestToFilter"))) {
datafeature$SuggestToFilter <- NULL
}
## remove the column called 'Fiter.Repro' if there.
if (any(is.element(colnames(datafeature), "Filter.Repro"))) {
datafeature$Filter.Repro <- NULL
}
## v3.15.2 updated by Meena
## remove the column called 'feature_quality' if there.
if (any(is.element(colnames(datafeature), "feature_quality"))) {
datafeature$feature_quality <- NULL
}
## remove the column called 'is_outlier' if there.
if (any(is.element(colnames(datafeature), "is_outlier"))) {
datafeature$is_outlier <- NULL
}
## end : v3.15.2 updated by Meena
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
## y-axis labeling
temp <- datafeature[!is.na(datafeature[, "ABUNDANCE"]) & !is.na(datafeature[, "INTENSITY"]), ]
temp <- temp[1, ]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
if (originalPlot) {
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ProfilePlot")
finalfile <- paste0(address, "ProfilePlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
sub$FEATURE <- factor(as.character(sub$FEATURE))
sub$SUBJECT <- factor(sub$SUBJECT)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
sub$PEPTIDE <- factor(as.character(sub$PEPTIDE))
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste0("Can't the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## seq for peptide and transition
b <- unique(sub[, c("PEPTIDE", "FEATURE")])
b <- b[with(b, order(PEPTIDE, FEATURE)), ] ## add because if there are missing value, orders are different.
temp1 <- xtabs(~b[, 1])
ss <- NULL
s <- NULL
for (j in 1:length(temp1)) {
temp3 <- rep(j, temp1[j])
s <- c(s, temp3)
temp2 <- seq(1, temp1[j])
ss <- c(ss, temp2)
}
## for annotation of condition
groupNametemp <- data.frame(groupName,
"FEATURE"=unique(sub$FEATURE)[1],
"PEPTIDE"=unique(sub$PEPTIDE)[1])
if (toupper(featureName) == "TRANSITION") {
if (any(is.element(colnames(sub), "censored"))) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
## 1st plot for original plot
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='FEATURE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE', color='FEATURE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=s) +
scale_linetype_manual(values=ss) +
scale_shape_manual(values=c(16, 1),
labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis + 0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size, angle=text.angle, color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
linetype=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
## 1st plot for original plot
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='FEATURE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=s) +
scale_linetype_manual(values=ss) +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis + 0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size))+
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
linetype=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
if (toupper(featureName) == "PEPTIDE") {
if ( any(is.element(colnames(sub), "censored")) ) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
olor='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=unique(s)) + ## unique(s) ??
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp,
aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3),
shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=unique(s)) +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(color=guide_legend(title=paste("# peptide:", nlevels(sub$PEPTIDE)),
title.theme = element_text(size=13, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch',
ncol=3))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
if (toupper(featureName) == "NA") {
if ( any(is.element(colnames(sub), "censored")) ) {
sub$censored <- factor(sub$censored, levels=c('FALSE', 'TRUE'))
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', shape='censored'), data=sub,
size=dot.size.profile) +
scale_colour_manual(values=unique(s), guide="none") +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size)) +
guides(shape=guide_legend(title=NULL,
label.theme = element_text(size=11, angle=0),
keywidth=0.1,
keyheight = 0.1,
default.unit = 'inch'))
} else {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='PEPTIDE', linetype='FEATURE'), data=sub) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=unique(s), guide="none") +
scale_linetype_manual(values=ss, guide="none") +
scale_shape_manual(values=c(16)) +
scale_x_continuous('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(sub$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size))
}
print(ptemp)
message(paste("Drew the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
} # end-loop for each protein
if (address != FALSE) {
dev.off()
}
} # end original plot
## 2st plot for original plot : summary ##
## ---------------------------------------
if (summaryPlot) {
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ProfilePlot_wSummarization")
finalfile <- paste0(address, "ProfilePlot_wSummarization.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
sub$FEATURE <- factor(as.character(sub$FEATURE))
sub$SUBJECT <- factor(sub$SUBJECT)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
sub$PEPTIDE <- factor(as.character(sub$PEPTIDE))
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Profile plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
## seq for peptide and transition
b <- unique(sub[, c("PEPTIDE", "FEATURE")])
b <- b[with(b, order(PEPTIDE, FEATURE)), ] ## add because if there are missing value, orders are different.
temp1 <- xtabs(~b[, 1])
ss <- NULL
s <- NULL
for(j in 1:length(temp1)) {
temp3 <- rep(j, temp1[j])
s <- c(s, temp3)
temp2 <- seq(1, temp1[j])
ss <- c(ss, temp2)
}
## for annotation of condition
groupNametemp <- data.frame(groupName, FEATURE=unique(sub$FEATURE)[1], analysis="Run summary")
if (haverun) {
subrun <- datarun[datarun$Protein == levels(datafeature$PROTEIN)[i], ]
if (nrow(subrun) != 0) {
quantrun <- sub[1, ]
quantrun[, 2:ncol(quantrun)] <- NA
quantrun <- quantrun[rep(seq_len(nrow(subrun))), ]
quantrun$PROTEIN <- subrun$Protein
quantrun$PEPTIDE <- "Run summary"
quantrun$TRANSITION <- "Run summary"
quantrun$FEATURE <- "Run summary"
quantrun$LABEL <- "Endogenous"
quantrun$RUN <- subrun$RUN
quantrun$ABUNDANCE <- subrun$ABUNDANCE
quantrun$FRACTION <- 1
} else { # if there is only one Run measured across all runs, no Run information for linear with censored
quantrun <- datafeature[1, ]
quantrun[, 2:ncol(quantrun)] <- NA
quantrun$PROTEIN <- levels(datafeature$PROTEIN)[i]
quantrun$PEPTIDE <- "Run summary"
quantrun$TRANSITION <- "Run summary"
quantrun$FEATURE <- "Run summary"
quantrun$LABEL <- "Endogenous"
quantrun$RUN <- unique(datafeature$RUN)[1]
quantrun$ABUNDANCE <- NA
quantrun$FRACTION <- 1
}
if (any(is.element(colnames(sub), "censored"))) {
quantrun$censored <- FALSE
}
quantrun$analysis <- "Run summary"
sub$analysis <- "Processed feature-level data"
## if 'Filter' column after feature selection, remove this column in order to match columns with run quantification
filter_column <- is.element(colnames(sub), "Filter")
if (any(filter_column)) {
sub<-sub[, !filter_column]
}
final <- rbind(sub, quantrun)
final$analysis <- factor(final$analysis)
final$FEATURE <- factor(final$FEATURE)
final$RUN <- as.numeric(final$RUN)
if (any(is.element(colnames(sub), "censored"))) {
final$censored <- factor(final$censored, levels=c('FALSE', 'TRUE'))
ptempall <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='analysis',linetype='FEATURE', size='analysis'),
data=final) +
facet_grid(~LABEL) +
geom_line(size=0.5) +
geom_point(aes_string(x='RUN', y='ABUNDANCE',
color='analysis', size='analysis', shape='censored'), data=final) +
scale_colour_manual(values=c("lightgray", "darkred")) +
scale_shape_manual(values=c(16, 1), labels=c("Detected data", "Censored missing data")) +
scale_size_manual(values=c(1.7, 2), guide="none") +
scale_linetype_manual(values=c(rep(1, times=length(unique(final$FEATURE))-1), 2), guide="none") +
scale_x_continuous('MS runs',breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(final$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size),
legend.title=element_blank()) +
guides(color=guide_legend(order=1,
title=NULL,
label.theme = element_text(size=10, angle=0)),
shape=guide_legend(order=2,
title=NULL,
label.theme = element_text(size=10, angle=0)))
} else {
ptempall <- ggplot(aes_string(x='RUN', y='ABUNDANCE',
color='analysis', linetype='FEATURE', size='analysis'), data=final) +
facet_grid(~LABEL) +
geom_point(size=dot.size.profile) +
geom_line(size=0.5) +
scale_colour_manual(values=c("lightgray", "darkred")) +
scale_shape_manual(values=c(16)) +
scale_size_manual(values=c(1.7, 2), guide="none") +
scale_linetype_manual(values=c(rep(1, times=length(unique(final$FEATURE))-1), 2), guide="none") +
scale_x_continuous('MS runs',breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title=unique(final$PROTEIN)) +
geom_text(data=groupNametemp, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5),
legend.position="top",
legend.text=element_text(size=legend.size),
legend.title=element_blank()) +
guides(color=guide_legend(order=1,
title=NULL,
label.theme = element_text(size=10, angle=0)))
## draw point again because some red summary dots could be hiden
ptempall <- ptempall+geom_point(data=final, aes(x=RUN, y=ABUNDANCE, size=analysis, color=analysis))
}
print(ptempall)
message(paste("Drew the Profile plot with summarization for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
}
} # end-loop for each protein
if (address!=FALSE) {
dev.off()
}
}
} # end Profile plot
## QC plot (Quality control plot) ##
## ---------------------------------
if (toupper(type) == "QCPLOT") {
## y-axis labeling
temp <- datafeature[!is.na(datafeature[,"ABUNDANCE"]) & !is.na(datafeature[,"INTENSITY"]), ]
temp <- temp[1, ]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address,"QCPlot")
finalfile <- paste0(address,"QCPlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
## assign upper or lower limit
# MC, 2016/04/21, default upper limit is maximum log2(intensity) after normalization+3, then round-up
y.limup <- ceiling(max(datafeature$ABUNDANCE, na.rm=TRUE) + 3)
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- -1
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
## relabel the Run (make it sorted by group first)
datafeature <- datafeature[with(datafeature, order(GROUP_ORIGINAL, SUBJECT_ORIGINAL)), ]
datafeature$RUN <- factor(datafeature$RUN,
levels=unique(datafeature$RUN),
labels=seq(1, length(unique(datafeature$RUN))))
if (length(unique(datafeature$LABEL)) == 2) {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference", "Endogenous"))
label.color <- c("darkseagreen1", "lightblue")
} else {
if (unique(datafeature$LABEL) == "L") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Endogenous"))
label.color <- c("lightblue")
}
if (unique(datafeature$LABEL) == "H") {
datafeature$LABEL <- factor(datafeature$LABEL, labels=c("Reference"))
label.color <- c("darkseagreen1")
}
}
tempGroupName <- unique(datafeature[, c("GROUP_ORIGINAL", "RUN")])
datafeature <- datafeature[with(datafeature, order(LABEL, GROUP_ORIGINAL, SUBJECT_ORIGINAL)), ]
groupAxis <- as.numeric(xtabs(~GROUP_ORIGINAL, tempGroupName))
cumGroupAxis <- cumsum(groupAxis)
lineNameAxis <- cumGroupAxis[-nlevels(datafeature$GROUP_ORIGINAL)]
groupName <- data.frame("RUN"=c(0, lineNameAxis)+groupAxis / 2 + 0.5,
"ABUNDANCE"=rep(y.limup-1, length(groupAxis)),
"Name"=levels(datafeature$GROUP_ORIGINAL))
## all protein
if (which.Protein == 'all' | which.Protein == 'allonly') {
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE'), data=datafeature) +
facet_grid(~LABEL) +
geom_boxplot(aes_string(fill='LABEL'), outlier.shape=1, outlier.size=1.5) +
scale_fill_manual(values=label.color, guide="none") +
scale_x_discrete('MS runs', breaks=cumGroupAxis) +
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup)) +
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash") +
labs(title="All proteins") +
geom_text(data=groupName, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size,
angle=text.angle,
color="black") +
theme(
panel.background = element_rect(fill='white', colour="black"),
legend.key = element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background = element_rect(fill='gray95'),
strip.text.x = element_text(colour=c("#00B0F6"), size=14),
axis.text.x = element_text(size=x.axis.size,colour="black"),
axis.text.y = element_text(size=y.axis.size,colour="black"),
axis.ticks = element_line(colour="black"),
axis.title.x = element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y = element_text(size=y.axis.size+5, vjust=0.3),
title = element_text(size=x.axis.size+8, vjust=1.5))
print(ptemp)
message("Drew the Quality Contol plot(boxplot) for all proteins.")
}
## each protein
## choose Proteins or not
if (which.Protein != 'allonly') {
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name, unique(datafeature$PROTEIN))) > 0) {
dev.off()
stop(paste0("Please check protein name. Data set does not have this protein. - ",
toString(temp.name)))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datafeature$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datafeature$PROTEIN))<max(which.Protein)) {
dev.off()
stop(paste0("Please check your selection of proteins. There are ",
length(levels(datafeature$PROTEIN)), " proteins in this dataset."))
}
}
## use only assigned proteins
datafeature <- datafeature[which(datafeature$PROTEIN %in% temp.name), ]
datafeature$PROTEIN <- factor(datafeature$PROTEIN)
}
for (i in 1:nlevels(datafeature$PROTEIN)) {
sub <- datafeature[datafeature$PROTEIN == levels(datafeature$PROTEIN)[i], ]
subTemp <- sub[!is.na(sub$ABUNDANCE), ]
sub <- sub[with(sub, order(LABEL, RUN)), ]
## if all measurements are NA,
if (nrow(sub)==sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Quality Control plot for ",unique(sub$PROTEIN),
"(",i," of ",length(unique(datafeature$PROTEIN)),
") because all measurements are NAs."))
next()
}
ptemp <- ggplot(aes_string(x='RUN', y='ABUNDANCE'), data=sub)+
facet_grid(~LABEL)+
geom_boxplot(aes_string(fill='LABEL'), outlier.shape=1, outlier.size=1.5)+
scale_fill_manual(values=label.color, guide="none")+
scale_x_discrete('MS runs', breaks=cumGroupAxis)+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_vline(xintercept=lineNameAxis+0.5, colour="grey", linetype="longdash")+
labs(title=unique(sub$PROTEIN))+
geom_text(data=groupName, aes(x=RUN, y=ABUNDANCE, label=Name),
size=text.size, angle=text.angle, color="black")+
theme(
panel.background=element_rect(fill='white', colour="black"),
legend.key=element_rect(fill='white', colour='white'),
panel.grid.minor = element_blank(),
strip.background=element_rect(fill='gray95'),
strip.text.x=element_text(colour=c("#00B0F6"), size=14),
axis.text.x=element_text(size=x.axis.size, colour="black"),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
print(ptemp)
message(paste("Drew the Quality Contol plot(boxplot) for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datafeature$PROTEIN)), ")"))
} # end-loop
}
if (address != FALSE) {
dev.off()
}
} # end QC plot
## Condition plot ##
## -----------------
if (toupper(type) == "CONDITIONPLOT") {
colnames(datarun)[colnames(datarun) == "Protein"] <- "PROTEIN"
colnames(datarun)[colnames(datarun) == "LogIntensities"] <- "ABUNDANCE"
## choose Proteins or not
if (which.Protein != "all") {
## check which.Protein is name of Protein
if (is.character(which.Protein)) {
temp.name <- which.Protein
## message if name of Protein is wrong.
if (length(setdiff(temp.name, unique(datarun$PROTEIN))) > 0) {
stop(paste("Please check protein name. Dataset does not have this protein. -", toString(temp.name), sep=" "))
}
}
## check which.Protein is order number of Protein
if (is.numeric(which.Protein)) {
temp.name <- levels(datarun$PROTEIN)[which.Protein]
## message if name of Protein is wrong.
if (length(levels(datarun$PROTEIN))<max(which.Protein)) {
stop(paste("Please check your selection of proteins. There are ",
length(levels(datarun$PROTEIN))," proteins in this dataset."))
}
}
## use only assigned proteins
datarun <- datarun[which(datarun$PROTEIN %in% temp.name), ]
datarun$PROTEIN <- factor(datarun$PROTEIN)
}
## save the plots as pdf or not
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ConditionPlot")
finalfile <- paste0(address, "ConditionPlot.pdf")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".pdf")
}
pdf(finalfile, width=width, height=height)
}
## save all results
resultall <- NULL
## y-axis labeling, find log 2 or log 10
temp <- datafeature[!is.na(datafeature[, "ABUNDANCE"]) & !is.na(datafeature[, "INTENSITY"]), ]
temp <- temp[1,]
temptest <- abs(log2(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"]) < abs(log10(temp[1, "INTENSITY"]) - temp[1, "ABUNDANCE"])
if (temptest) {
yaxis.name <- 'Log2-intensities'
} else {
yaxis.name <- 'Log10-intensities'
}
for (i in 1:nlevels(datarun$PROTEIN)) {
suball <- NULL
sub <- datarun[datarun$PROTEIN == levels(datarun$PROTEIN)[i], ]
sub <- na.omit(sub)
sub$GROUP_ORIGINAL <- factor(sub$GROUP_ORIGINAL)
sub$SUBJECT_ORIGINAL <- factor(sub$SUBJECT_ORIGINAL)
## if all measurements are NA,
if (nrow(sub) == sum(is.na(sub$ABUNDANCE))) {
message(paste("Can't the Condition plot for ", unique(sub$PROTEIN),
"(", i, " of ",length(unique(datarun$PROTEIN)), ") because all measurements are NAs."))
next()
}
## statistics
sub.mean <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, mean, na.rm=TRUE)
sub.sd <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, sd)
sub.len <- aggregate(ABUNDANCE ~ GROUP_ORIGINAL, data=sub, length)
## make the table for result
colnames(sub.mean)[colnames(sub.mean) == "ABUNDANCE"] <- "Mean"
colnames(sub.sd)[colnames(sub.sd) == "ABUNDANCE"] <- "SD"
colnames(sub.len)[colnames(sub.len) == "ABUNDANCE"] <- "numMeasurement"
suball <- merge(sub.mean, sub.sd, by="GROUP_ORIGINAL")
suball <- merge(suball, sub.len, by="GROUP_ORIGINAL")
if (interval == "CI") {
suball$ciw <- qt(0.975, suball$numMeasurement) * suball$SD / sqrt(suball$numMeasurement)
}
if (interval == "SD") {
suball$ciw <- suball$SD
}
if (sum(is.na(suball$ciw)) >= 1) {
suball$ciw[is.na(suball$ciw)] <- 0
}
## assign upper or lower limit
y.limup <- ceiling(max(suball$Mean + suball$ciw))
if (is.numeric(ylimUp)) {
y.limup <- ylimUp
}
y.limdown <- floor(min(suball$Mean - suball$ciw))
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
}
## re-order (1, 10, 2, 3, -> 1, 2, 3, ... , 10)
suball <- suball[order(suball$GROUP_ORIGINAL), ]
suball <- data.frame(Protein=unique(sub$PROTEIN), suball)
resultall <- rbind(resultall, suball)
if (!scale) { ## scale: false
## reformat as data.frame
#tempsummary <- data.frame(Label=unique(sub$GROUP_ORIGINAL), mean=as.vector(sub.mean), ciw=as.vector(ciw))
tempsummary <- suball
colnames(tempsummary)[colnames(tempsummary) == "GROUP_ORIGINAL"] <- "Label"
ptemp <- ggplot(aes_string(x='Label', y='Mean'), data=tempsummary)+
geom_errorbar(aes(ymax = Mean + ciw, ymin= Mean - ciw),
data=tempsummary, width=0.1, colour="red")+
geom_point(size = dot.size.condition, colour = "darkred")+
scale_x_discrete('Condition')+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_hline(yintercept = 0, linetype = "twodash", colour = "darkgrey", size = 0.6)+
labs(title=unique(sub$PROTEIN))+
theme(
panel.background=element_rect(fill='white', colour="black"),
panel.grid.major.y = element_line(colour="grey95"),
panel.grid.minor.y = element_blank(),
axis.text.x=element_text(size=x.axis.size, colour="black", angle=text.angle),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
} else {
## scale : true
## extract numeric value, don't use levels (because T1,T10,T3,...)
## reformat as data.frame
tempsummary <- suball
colnames(tempsummary)[colnames(tempsummary) == "GROUP_ORIGINAL"] <- "Label"
tempsummary$Label <- as.numeric(gsub("\\D", "", unique(tempsummary$Label)))
ptemp <- ggplot(aes_string(x='Label', y='Mean'), data=tempsummary)+
geom_errorbar(aes(ymax = Mean + ciw, ymin = Mean - ciw),
data=tempsummary, width=0.1, colour="red")+
geom_point(size=dot.size.condition, colour="darkred")+
scale_x_continuous('Condition', breaks=tempsummary$Label, labels=tempsummary$Label)+
scale_y_continuous(yaxis.name, limits=c(y.limdown, y.limup))+
geom_hline(yintercept=0, linetype="twodash", colour="darkgrey", size=0.6)+
labs(title=unique(sub$PROTEIN))+
theme(
panel.background=element_rect(fill='white', colour="black"),
panel.grid.major.y = element_line(colour="grey95"),
panel.grid.minor.y = element_blank(),
axis.text.x=element_text(size=x.axis.size, colour="black", angle=text.angle),
axis.text.y=element_text(size=y.axis.size, colour="black"),
axis.ticks=element_line(colour="black"),
axis.title.x=element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y=element_text(size=y.axis.size+5, vjust=0.3),
title=element_text(size=x.axis.size+8, vjust=1.5))
}
print(ptemp)
message(paste("Drew the condition plot for ", unique(sub$PROTEIN),
"(", i, " of ", length(unique(datarun$PROTEIN)), ")"))
} # end-loop
if (address != FALSE) {
dev.off()
}
## save the table for condition plot
if (save_condition_plot_result) {
colnames(resultall)[colnames(resultall) == "GROUP_ORIGINAL"] <- 'Condition'
if (interval == "CI") {
colnames(resultall)[colnames(resultall) == "ciw"] <- '95% CI'
}
if (interval == "SD") {
colnames(resultall)[colnames(resultall) == "ciw"] <- 'SD'
}
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste0(address, "ConditionPlot_value")
finalfile <- paste(address, "ConditionPlot_value.csv")
while (is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep="-"), ".csv")
}
write.csv(resultall, file=finalfile, row.names=FALSE)
}
}
} # end Condition plot
}
|
f824ec3d3e0f34fa0c70458bbc4261ce dungeon_i30-m150-u5-v0.pddl_planlen=14.qdimacs 17906 235009
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i30-m150-u5-v0.pddl_planlen=14/dungeon_i30-m150-u5-v0.pddl_planlen=14.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 92 |
r
|
f824ec3d3e0f34fa0c70458bbc4261ce dungeon_i30-m150-u5-v0.pddl_planlen=14.qdimacs 17906 235009
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pubsub_objects.R
\name{TestIamPermissionsRequest}
\alias{TestIamPermissionsRequest}
\title{TestIamPermissionsRequest Object}
\usage{
TestIamPermissionsRequest(permissions = NULL)
}
\arguments{
\item{permissions}{The set of permissions to check for the `resource`}
}
\value{
TestIamPermissionsRequest object
}
\description{
TestIamPermissionsRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Request message for `TestIamPermissions` method.
}
\seealso{
Other TestIamPermissionsRequest functions: \code{\link{projects.subscriptions.testIamPermissions}},
\code{\link{projects.topics.testIamPermissions}}
}
|
/googlepubsubv1beta2.auto/man/TestIamPermissionsRequest.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false | true | 729 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pubsub_objects.R
\name{TestIamPermissionsRequest}
\alias{TestIamPermissionsRequest}
\title{TestIamPermissionsRequest Object}
\usage{
TestIamPermissionsRequest(permissions = NULL)
}
\arguments{
\item{permissions}{The set of permissions to check for the `resource`}
}
\value{
TestIamPermissionsRequest object
}
\description{
TestIamPermissionsRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Request message for `TestIamPermissions` method.
}
\seealso{
Other TestIamPermissionsRequest functions: \code{\link{projects.subscriptions.testIamPermissions}},
\code{\link{projects.topics.testIamPermissions}}
}
|
\name{info.binomial.kgroup}
\alias{info.binomial.kgroup}
\title{
Expected Information Matrix for Single or Multiple Group Binomial
}
\description{
Calculates expected information matrix for a single observation
for single or multiple group binomial distribution.
The natural null hypothesis for a single group is that that the probability
is some specified value. For multiple groups, the natural null hypothesis
is that the group probabilities are the same.
}
\usage{
info.binomial.kgroup(p, group.size=1)
}
\arguments{
\item{p}{
Scalar or vector of probability values. The i'th component is the
(alternative hypothesis or true) probability of an event in the i'th
group.
}
\item{group.size}{
Needed only if there are several groups with unequal sample
sizes. The value of the i'th component is the relative sample size of
the i'th group. The calculation made is for a single observation
spread over the several groups in proportion to the specified relative
sizes. If this value is specified, it should be a vector whose
length is the same as p.
}}
\value{
Expected information matrix for a single observation. The matrix is
square with each dimension the number of groups.
}
\references{
Cox, D.R. and Hinkley, D.V. (1974).
\emph{Theoretical Statistics}
Chapman and Hall, London.
}
\seealso{
\code{\link{info.poisson.kgroup}},
\code{\link{info.ordinal.kgroup}},
\code{\link{info.expsurv.kgroup}}
}
\examples{
# Find the information matrix for a 2 sample binomial with
# probability of events .2 and .4 and sample sizes 10 and 11
info.binom <- info.binomial.kgroup(c(.2,.4), c(10,11))
print(info.binom)
}
\keyword{htest}
\concept{information}
% Converted by Sd2Rd version 1.21.
|
/man/info.binomial.kgroup.Rd
|
permissive
|
cran/asypow
|
R
| false | false | 1,783 |
rd
|
\name{info.binomial.kgroup}
\alias{info.binomial.kgroup}
\title{
Expected Information Matrix for Single or Multiple Group Binomial
}
\description{
Calculates expected information matrix for a single observation
for single or multiple group binomial distribution.
The natural null hypothesis for a single group is that that the probability
is some specified value. For multiple groups, the natural null hypothesis
is that the group probabilities are the same.
}
\usage{
info.binomial.kgroup(p, group.size=1)
}
\arguments{
\item{p}{
Scalar or vector of probability values. The i'th component is the
(alternative hypothesis or true) probability of an event in the i'th
group.
}
\item{group.size}{
Needed only if there are several groups with unequal sample
sizes. The value of the i'th component is the relative sample size of
the i'th group. The calculation made is for a single observation
spread over the several groups in proportion to the specified relative
sizes. If this value is specified, it should be a vector whose
length is the same as p.
}}
\value{
Expected information matrix for a single observation. The matrix is
square with each dimension the number of groups.
}
\references{
Cox, D.R. and Hinkley, D.V. (1974).
\emph{Theoretical Statistics}
Chapman and Hall, London.
}
\seealso{
\code{\link{info.poisson.kgroup}},
\code{\link{info.ordinal.kgroup}},
\code{\link{info.expsurv.kgroup}}
}
\examples{
# Find the information matrix for a 2 sample binomial with
# probability of events .2 and .4 and sample sizes 10 and 11
info.binom <- info.binomial.kgroup(c(.2,.4), c(10,11))
print(info.binom)
}
\keyword{htest}
\concept{information}
% Converted by Sd2Rd version 1.21.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proteinpaint_transefer.R
\name{fusions2pp_meta}
\alias{fusions2pp_meta}
\title{Function to convert fusion data to ProteinPaint heatmap meta rows format.}
\usage{
fusions2pp_meta(input_data, input_type = "fusioncatcher",
config_file = system.file("extdata", "config/proteinpaint.toml",
package = "ngstk"), config_list = NULL,
handler_confg_file = system.file("extdata", "config/handler.toml",
package = "ngstk"), mhandler_confg_file = system.file("extdata",
"config/mhandler.toml", package = "ngstk"), handler_funs = NULL,
mhandler_funs = NULL, handler_extra_params = NULL,
mhandler_extra_params = NULL, outfn = NULL)
}
\arguments{
\item{input_data}{A gene fusions data.frame need to be converted to ProteinPaint input.}
\item{input_type}{Point the input data format (fusioncatcher or others)}
\item{config_file}{ngstk ProteinPaint configuration file path, default is
system.file('extdata', 'config/proteinpaint.toml', package = 'ngstk')}
\item{config_list}{ngstk ProteinPaint configuration, default is NULL and
read from config_file}
\item{handler_confg_file}{ngstk handler configuration file path, default is
system.file('extdata', 'config/handler.toml', package = 'ngstk')}
\item{mhandler_confg_file}{ngstk handler configuration file path, default is
system.file('extdata', 'config/mhandler.toml', package = 'ngstk')}
\item{handler_funs}{handler function for single colnum,
default is NULL and get value from config_file}
\item{mhandler_funs}{handler function for mulitple colnums,
#' default is NULL and get value from config_file}
\item{handler_extra_params}{Extra parameters pass to handler}
\item{mhandler_extra_params}{Extra parameters pass to mhandler}
\item{outfn}{Default is NULL and not output the result to file}
}
\value{
A data frame
}
\description{
Function to convert fusion data to ProteinPaint heatmap meta rows format.
}
\examples{
demo_file <- system.file('extdata',
'demo/proteinpaint/fusions2pp_fusioncatcher.txt', package = 'ngstk')
input_data <- read.table(demo_file, sep = '\\t', header = TRUE, stringsAsFactors = FALSE)
disease <- 'B-ALL'
sampletype <- 'diagnose'
input_data <- data.frame(input_data, disease, sampletype)
input_data$disease <- as.character(input_data$disease)
#handler_data <- fusions2pp_meta(input_data, input_type = 'fusioncatcher')
}
|
/man/fusions2pp_meta.Rd
|
permissive
|
JhuangLab/ngstk
|
R
| false | true | 2,388 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proteinpaint_transefer.R
\name{fusions2pp_meta}
\alias{fusions2pp_meta}
\title{Function to convert fusion data to ProteinPaint heatmap meta rows format.}
\usage{
fusions2pp_meta(input_data, input_type = "fusioncatcher",
config_file = system.file("extdata", "config/proteinpaint.toml",
package = "ngstk"), config_list = NULL,
handler_confg_file = system.file("extdata", "config/handler.toml",
package = "ngstk"), mhandler_confg_file = system.file("extdata",
"config/mhandler.toml", package = "ngstk"), handler_funs = NULL,
mhandler_funs = NULL, handler_extra_params = NULL,
mhandler_extra_params = NULL, outfn = NULL)
}
\arguments{
\item{input_data}{A gene fusions data.frame need to be converted to ProteinPaint input.}
\item{input_type}{Point the input data format (fusioncatcher or others)}
\item{config_file}{ngstk ProteinPaint configuration file path, default is
system.file('extdata', 'config/proteinpaint.toml', package = 'ngstk')}
\item{config_list}{ngstk ProteinPaint configuration, default is NULL and
read from config_file}
\item{handler_confg_file}{ngstk handler configuration file path, default is
system.file('extdata', 'config/handler.toml', package = 'ngstk')}
\item{mhandler_confg_file}{ngstk handler configuration file path, default is
system.file('extdata', 'config/mhandler.toml', package = 'ngstk')}
\item{handler_funs}{handler function for single colnum,
default is NULL and get value from config_file}
\item{mhandler_funs}{handler function for mulitple colnums,
#' default is NULL and get value from config_file}
\item{handler_extra_params}{Extra parameters pass to handler}
\item{mhandler_extra_params}{Extra parameters pass to mhandler}
\item{outfn}{Default is NULL and not output the result to file}
}
\value{
A data frame
}
\description{
Function to convert fusion data to ProteinPaint heatmap meta rows format.
}
\examples{
demo_file <- system.file('extdata',
'demo/proteinpaint/fusions2pp_fusioncatcher.txt', package = 'ngstk')
input_data <- read.table(demo_file, sep = '\\t', header = TRUE, stringsAsFactors = FALSE)
disease <- 'B-ALL'
sampletype <- 'diagnose'
input_data <- data.frame(input_data, disease, sampletype)
input_data$disease <- as.character(input_data$disease)
#handler_data <- fusions2pp_meta(input_data, input_type = 'fusioncatcher')
}
|
library(testthat)
library(dostats)
test_check("dostats")
|
/tests/test_package.R
|
no_license
|
halpo/dostats
|
R
| false | false | 57 |
r
|
library(testthat)
library(dostats)
test_check("dostats")
|
#' @title Determine ChIP-Seq Type of Protein Complex Binding
#'
#' @description Estimate the types of protein complex binding. Protein complex
#' binding might act similarly to normal transcription factors, where the
#' changes are symmetrical between two biological conditions (unimodel on fold
#' changes); or the changes might be globally one-side accompanied with some
#' non-changing bindings (bimodel on fold changes). This function help to
#' determine the binding type of given ChIP-Seq samples from two biological
#' conditions using kernal density information of raw fold changes. As this
#' function was designed to work on the raw counts (no normalization needed),
#' only one replicate from each condition is allowed (input a two-column count
#' matrix); otherwise, coverage difference acorss replicates might bias
#' determination.
#'
#' @param count A two-column matrix of read counts or a SummarizedExperiment,
#' where columns are samples and rows are peaks or high coverage bins. This
#' object can be generated by function \code{regionReads}.
#' @param cutoff A numeric cut off on \code{count} matrix. If positive, only
#' peaks/bins with counts larger than \code{cutoff} in at least one sample are
#' used to estimate the binding type. We recommend a larger cutoff since
#' background signal can dramatically mask the right estimation of kernal
#' density, especially for deep sequenced ChIP-seq samples. (Default: 50)
#' @param fold A numeric threshold to help determine the binding type. In
#' detail, if top two estimated modes on smoothed kernal density have a height
#' differece less than the folds given by \code{fold}, binding type will be
#' determined as bimodel; otherwise, it is unimodel. This number should be
#' larger than 1. (Default: 10)
#' @param h Initial smoothing factor when estimating kernal density of raw fold
#' changes for bump hunting. (Default: 0.1)
#' @param plot A logical indicator that if M-A plot and smoothed kernal density
#' should be visualized. (Default: TRUE)
#'
#' @importFrom matrixStats rowMaxs
#' @importFrom matrixStats rowDiffs
#' @import SummarizedExperiment
#' @importFrom methods is
#' @importFrom graphics abline
#' @importFrom graphics layout
#' @importFrom stats dnorm
#'
#' @return
#' A character with value either "bimodel" or "unimodel" to represent estimated
#' binding type.
#'
#' @export
#'
#' @examples
#' ## load sample data
#' data(complex)
#' names(complex)
#'
#' ## test sample data
#' chipType(count=complex$counts)
chipType <- function(count, cutoff=50L, fold=10, h=0.1, plot=TRUE){
stopifnot((is.matrix(count) || is(count,"SummarizedExperiment")) &&
ncol(count) == 2)
stopifnot(is.numeric(cutoff) && length(cutoff) == 1 && cutoff >= 0)
stopifnot(is.numeric(fold) && length(fold) == 1 && fold > 1)
stopifnot(is.numeric(h) && length(h) == 1 && h > 0)
stopifnot(is.logical(plot) && length(plot) == 1)
## raw M & A
if(is(count,"SummarizedExperiment")) count <- assay(count,1)
counttmp <- count[rowMaxs(count) >= cutoff,]
logcount <- log2(counttmp + 0.5)
M <- rowDiffs(logcount)
A <- rowMeans(logcount)
## kernal density bumps
bump <- c()
ix <- seq(round(min(M), 1), max(M), 0.05)
while(length(bump)<2){
dm <- rowMeans(sapply(M, function(m) dnorm(ix, mean=m, sd=h)))
bump <- which(diff(sign(diff(dm))) == -2) + 1
bump2 <- bump[order(dm[bump],
decreasing=TRUE)[seq_len(min(2,length(bump)))]]
mu <- ix[bump2]
mudm <- dm[bump2]
h <- h * 0.8
}
## protein complex type
enrich <- abs(log(mudm[1] / mudm[2]))
if(enrich <= log(fold)){
cmplxtype <- "bimodel"
}else{
cmplxtype <- "unimodel"
}
## plots
if(plot){
layout(matrix(1:2,1,2))
plot(A,M,pch=20,cex=0.5,main=cmplxtype)
abline(h=0,lty=2,col='red',lwd=2)
plot(density(M,na.rm = TRUE,adjust=1),xlab='M',main=cmplxtype,lwd=2)
abline(v=mu,lty=2,col='blue',lwd=2)
}
cmplxtype
}
|
/R/chipType.R
|
no_license
|
tengmx/ComplexDiff
|
R
| false | false | 4,048 |
r
|
#' @title Determine ChIP-Seq Type of Protein Complex Binding
#'
#' @description Estimate the types of protein complex binding. Protein complex
#' binding might act similarly to normal transcription factors, where the
#' changes are symmetrical between two biological conditions (unimodel on fold
#' changes); or the changes might be globally one-side accompanied with some
#' non-changing bindings (bimodel on fold changes). This function help to
#' determine the binding type of given ChIP-Seq samples from two biological
#' conditions using kernal density information of raw fold changes. As this
#' function was designed to work on the raw counts (no normalization needed),
#' only one replicate from each condition is allowed (input a two-column count
#' matrix); otherwise, coverage difference acorss replicates might bias
#' determination.
#'
#' @param count A two-column matrix of read counts or a SummarizedExperiment,
#' where columns are samples and rows are peaks or high coverage bins. This
#' object can be generated by function \code{regionReads}.
#' @param cutoff A numeric cut off on \code{count} matrix. If positive, only
#' peaks/bins with counts larger than \code{cutoff} in at least one sample are
#' used to estimate the binding type. We recommend a larger cutoff since
#' background signal can dramatically mask the right estimation of kernal
#' density, especially for deep sequenced ChIP-seq samples. (Default: 50)
#' @param fold A numeric threshold to help determine the binding type. In
#' detail, if top two estimated modes on smoothed kernal density have a height
#' differece less than the folds given by \code{fold}, binding type will be
#' determined as bimodel; otherwise, it is unimodel. This number should be
#' larger than 1. (Default: 10)
#' @param h Initial smoothing factor when estimating kernal density of raw fold
#' changes for bump hunting. (Default: 0.1)
#' @param plot A logical indicator that if M-A plot and smoothed kernal density
#' should be visualized. (Default: TRUE)
#'
#' @importFrom matrixStats rowMaxs
#' @importFrom matrixStats rowDiffs
#' @import SummarizedExperiment
#' @importFrom methods is
#' @importFrom graphics abline
#' @importFrom graphics layout
#' @importFrom stats dnorm
#'
#' @return
#' A character with value either "bimodel" or "unimodel" to represent estimated
#' binding type.
#'
#' @export
#'
#' @examples
#' ## load sample data
#' data(complex)
#' names(complex)
#'
#' ## test sample data
#' chipType(count=complex$counts)
chipType <- function(count, cutoff=50L, fold=10, h=0.1, plot=TRUE){
stopifnot((is.matrix(count) || is(count,"SummarizedExperiment")) &&
ncol(count) == 2)
stopifnot(is.numeric(cutoff) && length(cutoff) == 1 && cutoff >= 0)
stopifnot(is.numeric(fold) && length(fold) == 1 && fold > 1)
stopifnot(is.numeric(h) && length(h) == 1 && h > 0)
stopifnot(is.logical(plot) && length(plot) == 1)
## raw M & A
if(is(count,"SummarizedExperiment")) count <- assay(count,1)
counttmp <- count[rowMaxs(count) >= cutoff,]
logcount <- log2(counttmp + 0.5)
M <- rowDiffs(logcount)
A <- rowMeans(logcount)
## kernal density bumps
bump <- c()
ix <- seq(round(min(M), 1), max(M), 0.05)
while(length(bump)<2){
dm <- rowMeans(sapply(M, function(m) dnorm(ix, mean=m, sd=h)))
bump <- which(diff(sign(diff(dm))) == -2) + 1
bump2 <- bump[order(dm[bump],
decreasing=TRUE)[seq_len(min(2,length(bump)))]]
mu <- ix[bump2]
mudm <- dm[bump2]
h <- h * 0.8
}
## protein complex type
enrich <- abs(log(mudm[1] / mudm[2]))
if(enrich <= log(fold)){
cmplxtype <- "bimodel"
}else{
cmplxtype <- "unimodel"
}
## plots
if(plot){
layout(matrix(1:2,1,2))
plot(A,M,pch=20,cex=0.5,main=cmplxtype)
abline(h=0,lty=2,col='red',lwd=2)
plot(density(M,na.rm = TRUE,adjust=1),xlab='M',main=cmplxtype,lwd=2)
abline(v=mu,lty=2,col='blue',lwd=2)
}
cmplxtype
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rf.R
\name{rf}
\alias{rf}
\title{rf models}
\usage{
rf(Y, X, ntree = 500, mtry, Ylabel = NULL)
}
\arguments{
\item{Y}{numerical vector of size n containing the response to predict.}
\item{X}{numerical matrix of dimension n by p regressors}
\item{Ylabel}{a character value containing the name of the Y varaible}
}
\description{
rf is a method used to fit random forest models using the randomForest package.
}
\examples{
library(mfe)
data(indicateurs)
X <- indicateurs[, -c(1,2,3)]
Y <- indicateurs[,1]
model <- rf(X = X, Y = Y, Ylabel = colnames(indicateurs)[1])
predict(model, newdata = indicateurs[1,])
}
\seealso{
\code{\link{predict.rf}}
}
|
/man/rf.Rd
|
no_license
|
alex-conanec/OptFilBov
|
R
| false | true | 726 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rf.R
\name{rf}
\alias{rf}
\title{rf models}
\usage{
rf(Y, X, ntree = 500, mtry, Ylabel = NULL)
}
\arguments{
\item{Y}{numerical vector of size n containing the response to predict.}
\item{X}{numerical matrix of dimension n by p regressors}
\item{Ylabel}{a character value containing the name of the Y varaible}
}
\description{
rf is a method used to fit random forest models using the randomForest package.
}
\examples{
library(mfe)
data(indicateurs)
X <- indicateurs[, -c(1,2,3)]
Y <- indicateurs[,1]
model <- rf(X = X, Y = Y, Ylabel = colnames(indicateurs)[1])
predict(model, newdata = indicateurs[1,])
}
\seealso{
\code{\link{predict.rf}}
}
|
context("Parsing Tests")
test_that("Entered Data", {
path <- system.file("extdata/excel", package="wateRuse")
exportData <- parseExport(file.path(path,"Export_2010_County.xlsx"),citation=TRUE)
expect_is(exportData, 'list')
expect_equal(length(exportData), 13)
})
test_that("Compare Data", {
})
test_that("Excel Data Gets Normalized", {
path <- system.file("extdata/excel", package="wateRuse")
awuds <- get_awuds_data(path)
expect_equal(nrow(awuds),21)
expect_equal(ncol(awuds),98)
})
test_that("Dump Data Gets Normalized", {
path <- system.file("extdata/dump", package="wateRuse")
awudsdump <- get_awuds_data(path)
expect_equal(nrow(awudsdump),48)
expect_equal(ncol(awudsdump),286)
})
test_that("Excel Data Gets Normalized When Given List of Files", {
fileList <- c(system.file("extdata/excel/Export_2005_County.xlsx", package="wateRuse"),
system.file("extdata/excel/Export_2010_County.xlsx", package="wateRuse"),
system.file("extdata/excel/Export_2015_County.xlsx", package="wateRuse"))
awuds <- get_awuds_data(awuds.data.files=fileList)
expect_equal(nrow(awuds),21)
expect_equal(ncol(awuds),98)
})
test_that("Dump Data gets read in when given as a direct file", {
file <- c(system.file("extdata/dump/exampleAWUDSdump.txt", package="wateRuse"))
awuds <- get_awuds_data(awuds.data.files=file)
expect_equal(nrow(awuds),48)
expect_equal(ncol(awuds),286)
})
|
/tests/testthat/tests_imports.R
|
permissive
|
mamaupin-usgs/wateRuse
|
R
| false | false | 1,438 |
r
|
context("Parsing Tests")
test_that("Entered Data", {
path <- system.file("extdata/excel", package="wateRuse")
exportData <- parseExport(file.path(path,"Export_2010_County.xlsx"),citation=TRUE)
expect_is(exportData, 'list')
expect_equal(length(exportData), 13)
})
test_that("Compare Data", {
})
test_that("Excel Data Gets Normalized", {
path <- system.file("extdata/excel", package="wateRuse")
awuds <- get_awuds_data(path)
expect_equal(nrow(awuds),21)
expect_equal(ncol(awuds),98)
})
test_that("Dump Data Gets Normalized", {
path <- system.file("extdata/dump", package="wateRuse")
awudsdump <- get_awuds_data(path)
expect_equal(nrow(awudsdump),48)
expect_equal(ncol(awudsdump),286)
})
test_that("Excel Data Gets Normalized When Given List of Files", {
fileList <- c(system.file("extdata/excel/Export_2005_County.xlsx", package="wateRuse"),
system.file("extdata/excel/Export_2010_County.xlsx", package="wateRuse"),
system.file("extdata/excel/Export_2015_County.xlsx", package="wateRuse"))
awuds <- get_awuds_data(awuds.data.files=fileList)
expect_equal(nrow(awuds),21)
expect_equal(ncol(awuds),98)
})
test_that("Dump Data gets read in when given as a direct file", {
file <- c(system.file("extdata/dump/exampleAWUDSdump.txt", package="wateRuse"))
awuds <- get_awuds_data(awuds.data.files=file)
expect_equal(nrow(awuds),48)
expect_equal(ncol(awuds),286)
})
|
##### Wesley Janson and Santiago Lacouture
#' Sales Taxes
#' Replication File. Updated on 03/07/2023
#' Step 1: Reduced Form Evidence portion of replication
library(data.table)
library(futile.logger)
library(lfe)
library(multcomp)
setwd("/project/igaarder")
rm(list = ls())
## input filepaths ----------------------------------------------
all_pi <- fread("/project/igaarder/Data/Replication_v2/all_pi.csv")
## output filepaths ----------------------------------------------
output.results.file <- "/project/igaarder/Data/Replication_v2/LRdiff_semesterly_main.csv"
########## Estimations -----------------
### 1. Reduced Form Evidence -----------------
## Set up
formula_lags <- paste0("L", 1:4, ".D.ln_sales_tax", collapse = "+")
formula_leads <- paste0("F", 1:4, ".D.ln_sales_tax", collapse = "+")
formula_RHS <- paste0("D.ln_sales_tax + ", formula_lags, "+", formula_leads)
outcomes <- c("D.ln_cpricei", "D.ln_cpricei2", "D.ln_quantity", "D.ln_quantity2", "D.ln_quantity3")
FE_opts <- c("region_by_module_by_time", "division_by_module_by_time")
## for linear hypothesis tests
lead.vars <- paste(paste0("F", 4:1, ".D.ln_sales_tax"), collapse = " + ")
lag.vars <- paste(paste0("L", 4:1, ".D.ln_sales_tax"), collapse = " + ")
lead.lp.restr <- paste(lead.vars, "= 0")
lag.lp.restr <- paste(lag.vars, "+ D.ln_sales_tax = 0")
total.lp.restr <- paste(lag.vars, "+", lead.vars, "+ D.ln_sales_tax = 0")
# Define samples
samples <- c("all", "non_imp_tax", "non_imp_tax_strong")
LRdiff_res <- data.table(NULL)
## Run
for (s in samples) {
data.est <- all_pi[get(s) == 1,]
for (Y in c(outcomes)) {
for (FE in FE_opts) {
formula1 <- as.formula(paste0(
Y, "~", formula_RHS, "| ", FE, " | 0 | module_by_state"
))
flog.info("Estimating with %s as outcome with %s FE in sample %s.", Y, FE, s)
res1 <- felm(formula = formula1, data = data.est,
weights = data.est$base.sales)
flog.info("Finished estimating with %s as outcome with %s FE in sample %s.", Y, FE, s)
## attach results
flog.info("Writing results...")
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := Y]
res1.dt[, controls := FE]
res1.dt[, sample := s]
res1.dt[, econ := "none"]
res1.dt[, Rsq := summary(res1)$r.squared]
res1.dt[, adj.Rsq := summary(res1)$adj.r.squared]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
## sum leads
flog.info("Summing leads...")
lead.test <- glht(res1, linfct = lead.lp.restr)
lead.test.est <- coef(summary(lead.test))[[1]]
lead.test.se <- sqrt(vcov(summary(lead.test)))[[1]]
lead.test.pval <- 2*(1 - pnorm(abs(lead.test.est/lead.test.se)))
## sum lags
flog.info("Summing lags...")
lag.test <- glht(res1, linfct = lag.lp.restr)
lag.test.est <- coef(summary(lag.test))[[1]]
lag.test.se <- sqrt(vcov(summary(lag.test)))[[1]]
lag.test.pval <- 2*(1 - pnorm(abs(lag.test.est/lag.test.se)))
## sum all
flog.info("Summing all...")
total.test <- glht(res1, linfct = total.lp.restr)
total.test.est <- coef(summary(total.test))[[1]]
total.test.se <- sqrt(vcov(summary(total.test)))[[1]]
total.test.pval <- 2*(1 - pnorm(abs(total.test.est/total.test.se)))
## linear hypothesis results
lp.dt <- data.table(
rn = c("Pre.D.ln_sales_tax", "Post.D.ln_sales_tax", "All.D.ln_sales_tax"),
Estimate = c(lead.test.est, lag.test.est, total.test.est),
`Cluster s.e.` = c(lead.test.se, lag.test.se, total.test.se),
`Pr(>|t|)` = c(lead.test.pval, lag.test.pval, total.test.pval),
outcome = Y,
controls = FE,
sample = s,
econ = "none",
Rsq = summary(res1)$r.squared,
adj.Rsq = summary(res1)$adj.r.squared)
LRdiff_res <- rbind(LRdiff_res, lp.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
##### Add the cumulative effect at each lead/lag (relative to -1)
cumul.lead1.est <- 0
cumul.lead1.se <- NA
cumul.lead1.pval <- NA
#cumul.lead3.est is just equal to minus the change between -2 and -1
cumul.lead2.est <- - coef(summary(res1))[ "F1.D.ln_sales_tax", "Estimate"]
cumul.lead2.se <- coef(summary(res1))[ "F1.D.ln_sales_tax", "Cluster s.e."]
cumul.lead2.pval <- coef(summary(res1))[ "F1.D.ln_sales_tax", "Pr(>|t|)"]
##LEADS
for(j in 3:5) {
## Create a name for estimate, se and pval of each lead
cumul.test.est.name <- paste("cumul.lead", j, ".est", sep = "")
cumul.test.se.name <- paste("cumul.lead", j, ".se", sep = "")
cumul.test.pval.name <- paste("cumul.lead", j, ".pval", sep = "")
## Create the formula to compute cumulative estimate at each lead/lag
cumul.test.form <- paste0("-", paste(paste0("F", (j-1):1, ".D.ln_sales_tax"), collapse = " - "))
cumul.test.form <- paste(cumul.test.form, " = 0")
## Compute estimate and store in variables names
cumul.test <- glht(res1, linfct = cumul.test.form)
assign(cumul.test.est.name, coef(summary(cumul.test))[[1]])
assign(cumul.test.se.name, sqrt(vcov(summary(cumul.test)))[[1]])
assign(cumul.test.pval.name, 2*(1 - pnorm(abs(coef(summary(cumul.test))[[1]]/sqrt(vcov(summary(cumul.test)))[[1]]))))
}
##LAGS
## On Impact --> Effect = coefficient on D.ln_sales_tax + F1.D.ln_sales_tax
cumul.lag0.est <- coef(summary(res1))[ "D.ln_sales_tax", "Estimate"]
cumul.lag0.se <- coef(summary(res1))[ "D.ln_sales_tax", "Cluster s.e."]
cumul.lag0.pval <- coef(summary(res1))[ "D.ln_sales_tax", "Pr(>|t|)"]
for(j in 1:4) {
## Create a name for estimate, se and pval of each lead
cumul.test.est.name <- paste("cumul.lag", j, ".est", sep = "")
cumul.test.se.name <- paste("cumul.lag", j, ".se", sep = "")
cumul.test.pval.name <- paste("cumul.lag", j, ".pval", sep = "")
## Create the formula to compute cumulative estimate at each lead/lag
cumul.test.form <- paste("D.ln_sales_tax + ", paste(paste0("L", 1:j, ".D.ln_sales_tax"), collapse = " + "), sep = "")
cumul.test.form <- paste(cumul.test.form, " = 0")
## Compute estimate and store in variables names
cumul.test <- glht(res1, linfct = cumul.test.form)
assign(cumul.test.est.name, coef(summary(cumul.test))[[1]])
assign(cumul.test.se.name, sqrt(vcov(summary(cumul.test)))[[1]])
assign(cumul.test.pval.name, 2*(1 - pnorm(abs(coef(summary(cumul.test))[[1]]/sqrt(vcov(summary(cumul.test)))[[1]]))))
}
## linear hypothesis results
lp.dt <- data.table(
rn = c("cumul.lead5.D.ln_sales_tax", "cumul.lead4.D.ln_sales_tax", "cumul.lead3.D.ln_sales_tax", "cumul.lead2.D.ln_sales_tax", "cumul.lead1.D.ln_sales_tax", "cumul.lag0.D.ln_sales_tax", "cumul.lag1.D.ln_sales_tax", "cumul.lag2.D.ln_sales_tax", "cumul.lag3.D.ln_sales_tax", "cumul.lag4.D.ln_sales_tax"),
Estimate = c(cumul.lead5.est, cumul.lead4.est, cumul.lead3.est, cumul.lead2.est, cumul.lead1.est, cumul.lag0.est, cumul.lag1.est, cumul.lag2.est, cumul.lag3.est, cumul.lag4.est),
`Cluster s.e.` = c(cumul.lead5.se, cumul.lead4.se, cumul.lead3.se, cumul.lead2.se, cumul.lead1.se, cumul.lag0.se, cumul.lag1.se, cumul.lag2.se, cumul.lag3.se, cumul.lag4.se),
`Pr(>|t|)` = c(cumul.lead5.pval, cumul.lead4.pval, cumul.lead3.pval, cumul.lead2.pval, cumul.lead1.pval, cumul.lag0.pval, cumul.lag1.pval, cumul.lag2.pval, cumul.lag3.pval, cumul.lag4.pval),
outcome = Y,
controls = FE,
sample = s,
econ = "none",
Rsq = summary(res1)$r.squared,
adj.Rsq = summary(res1)$adj.r.squared)
LRdiff_res <- rbind(LRdiff_res, lp.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
}
}
LRdiff_res[sample == s, N_obs := nrow(data.est)]
LRdiff_res[sample == s, N_modules := length(unique(data.est$product_module_code))]
LRdiff_res[sample == s, N_stores := length(unique(data.est$store_code_uc))]
LRdiff_res[sample == s, N_counties := uniqueN(data.est, by = c("fips_state", "fips_county"))]
LRdiff_res[sample == s, N_years := uniqueN(data.est, by = c("year"))] # should be 7 (we lose one because we difference)
LRdiff_res[sample == s, N_county_modules := uniqueN(data.est, by = c("fips_state", "fips_county",
"product_module_code"))]
fwrite(LRdiff_res, output.results.file)
}
|
/Replication/Replication_v3/reduced_form_evidence_v3.R
|
no_license
|
lancelothdf/sales.taxes
|
R
| false | false | 8,763 |
r
|
##### Wesley Janson and Santiago Lacouture
#' Sales Taxes
#' Replication File. Updated on 03/07/2023
#' Step 1: Reduced Form Evidence portion of replication
library(data.table)
library(futile.logger)
library(lfe)
library(multcomp)
setwd("/project/igaarder")
rm(list = ls())
## input filepaths ----------------------------------------------
all_pi <- fread("/project/igaarder/Data/Replication_v2/all_pi.csv")
## output filepaths ----------------------------------------------
output.results.file <- "/project/igaarder/Data/Replication_v2/LRdiff_semesterly_main.csv"
########## Estimations -----------------
### 1. Reduced Form Evidence -----------------
## Set up
formula_lags <- paste0("L", 1:4, ".D.ln_sales_tax", collapse = "+")
formula_leads <- paste0("F", 1:4, ".D.ln_sales_tax", collapse = "+")
formula_RHS <- paste0("D.ln_sales_tax + ", formula_lags, "+", formula_leads)
outcomes <- c("D.ln_cpricei", "D.ln_cpricei2", "D.ln_quantity", "D.ln_quantity2", "D.ln_quantity3")
FE_opts <- c("region_by_module_by_time", "division_by_module_by_time")
## for linear hypothesis tests
lead.vars <- paste(paste0("F", 4:1, ".D.ln_sales_tax"), collapse = " + ")
lag.vars <- paste(paste0("L", 4:1, ".D.ln_sales_tax"), collapse = " + ")
lead.lp.restr <- paste(lead.vars, "= 0")
lag.lp.restr <- paste(lag.vars, "+ D.ln_sales_tax = 0")
total.lp.restr <- paste(lag.vars, "+", lead.vars, "+ D.ln_sales_tax = 0")
# Define samples
samples <- c("all", "non_imp_tax", "non_imp_tax_strong")
LRdiff_res <- data.table(NULL)
## Run
for (s in samples) {
data.est <- all_pi[get(s) == 1,]
for (Y in c(outcomes)) {
for (FE in FE_opts) {
formula1 <- as.formula(paste0(
Y, "~", formula_RHS, "| ", FE, " | 0 | module_by_state"
))
flog.info("Estimating with %s as outcome with %s FE in sample %s.", Y, FE, s)
res1 <- felm(formula = formula1, data = data.est,
weights = data.est$base.sales)
flog.info("Finished estimating with %s as outcome with %s FE in sample %s.", Y, FE, s)
## attach results
flog.info("Writing results...")
res1.dt <- data.table(coef(summary(res1)), keep.rownames=T)
res1.dt[, outcome := Y]
res1.dt[, controls := FE]
res1.dt[, sample := s]
res1.dt[, econ := "none"]
res1.dt[, Rsq := summary(res1)$r.squared]
res1.dt[, adj.Rsq := summary(res1)$adj.r.squared]
LRdiff_res <- rbind(LRdiff_res, res1.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
## sum leads
flog.info("Summing leads...")
lead.test <- glht(res1, linfct = lead.lp.restr)
lead.test.est <- coef(summary(lead.test))[[1]]
lead.test.se <- sqrt(vcov(summary(lead.test)))[[1]]
lead.test.pval <- 2*(1 - pnorm(abs(lead.test.est/lead.test.se)))
## sum lags
flog.info("Summing lags...")
lag.test <- glht(res1, linfct = lag.lp.restr)
lag.test.est <- coef(summary(lag.test))[[1]]
lag.test.se <- sqrt(vcov(summary(lag.test)))[[1]]
lag.test.pval <- 2*(1 - pnorm(abs(lag.test.est/lag.test.se)))
## sum all
flog.info("Summing all...")
total.test <- glht(res1, linfct = total.lp.restr)
total.test.est <- coef(summary(total.test))[[1]]
total.test.se <- sqrt(vcov(summary(total.test)))[[1]]
total.test.pval <- 2*(1 - pnorm(abs(total.test.est/total.test.se)))
## linear hypothesis results
lp.dt <- data.table(
rn = c("Pre.D.ln_sales_tax", "Post.D.ln_sales_tax", "All.D.ln_sales_tax"),
Estimate = c(lead.test.est, lag.test.est, total.test.est),
`Cluster s.e.` = c(lead.test.se, lag.test.se, total.test.se),
`Pr(>|t|)` = c(lead.test.pval, lag.test.pval, total.test.pval),
outcome = Y,
controls = FE,
sample = s,
econ = "none",
Rsq = summary(res1)$r.squared,
adj.Rsq = summary(res1)$adj.r.squared)
LRdiff_res <- rbind(LRdiff_res, lp.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
##### Add the cumulative effect at each lead/lag (relative to -1)
cumul.lead1.est <- 0
cumul.lead1.se <- NA
cumul.lead1.pval <- NA
#cumul.lead3.est is just equal to minus the change between -2 and -1
cumul.lead2.est <- - coef(summary(res1))[ "F1.D.ln_sales_tax", "Estimate"]
cumul.lead2.se <- coef(summary(res1))[ "F1.D.ln_sales_tax", "Cluster s.e."]
cumul.lead2.pval <- coef(summary(res1))[ "F1.D.ln_sales_tax", "Pr(>|t|)"]
##LEADS
for(j in 3:5) {
## Create a name for estimate, se and pval of each lead
cumul.test.est.name <- paste("cumul.lead", j, ".est", sep = "")
cumul.test.se.name <- paste("cumul.lead", j, ".se", sep = "")
cumul.test.pval.name <- paste("cumul.lead", j, ".pval", sep = "")
## Create the formula to compute cumulative estimate at each lead/lag
cumul.test.form <- paste0("-", paste(paste0("F", (j-1):1, ".D.ln_sales_tax"), collapse = " - "))
cumul.test.form <- paste(cumul.test.form, " = 0")
## Compute estimate and store in variables names
cumul.test <- glht(res1, linfct = cumul.test.form)
assign(cumul.test.est.name, coef(summary(cumul.test))[[1]])
assign(cumul.test.se.name, sqrt(vcov(summary(cumul.test)))[[1]])
assign(cumul.test.pval.name, 2*(1 - pnorm(abs(coef(summary(cumul.test))[[1]]/sqrt(vcov(summary(cumul.test)))[[1]]))))
}
##LAGS
## On Impact --> Effect = coefficient on D.ln_sales_tax + F1.D.ln_sales_tax
cumul.lag0.est <- coef(summary(res1))[ "D.ln_sales_tax", "Estimate"]
cumul.lag0.se <- coef(summary(res1))[ "D.ln_sales_tax", "Cluster s.e."]
cumul.lag0.pval <- coef(summary(res1))[ "D.ln_sales_tax", "Pr(>|t|)"]
for(j in 1:4) {
## Create a name for estimate, se and pval of each lead
cumul.test.est.name <- paste("cumul.lag", j, ".est", sep = "")
cumul.test.se.name <- paste("cumul.lag", j, ".se", sep = "")
cumul.test.pval.name <- paste("cumul.lag", j, ".pval", sep = "")
## Create the formula to compute cumulative estimate at each lead/lag
cumul.test.form <- paste("D.ln_sales_tax + ", paste(paste0("L", 1:j, ".D.ln_sales_tax"), collapse = " + "), sep = "")
cumul.test.form <- paste(cumul.test.form, " = 0")
## Compute estimate and store in variables names
cumul.test <- glht(res1, linfct = cumul.test.form)
assign(cumul.test.est.name, coef(summary(cumul.test))[[1]])
assign(cumul.test.se.name, sqrt(vcov(summary(cumul.test)))[[1]])
assign(cumul.test.pval.name, 2*(1 - pnorm(abs(coef(summary(cumul.test))[[1]]/sqrt(vcov(summary(cumul.test)))[[1]]))))
}
## linear hypothesis results
lp.dt <- data.table(
rn = c("cumul.lead5.D.ln_sales_tax", "cumul.lead4.D.ln_sales_tax", "cumul.lead3.D.ln_sales_tax", "cumul.lead2.D.ln_sales_tax", "cumul.lead1.D.ln_sales_tax", "cumul.lag0.D.ln_sales_tax", "cumul.lag1.D.ln_sales_tax", "cumul.lag2.D.ln_sales_tax", "cumul.lag3.D.ln_sales_tax", "cumul.lag4.D.ln_sales_tax"),
Estimate = c(cumul.lead5.est, cumul.lead4.est, cumul.lead3.est, cumul.lead2.est, cumul.lead1.est, cumul.lag0.est, cumul.lag1.est, cumul.lag2.est, cumul.lag3.est, cumul.lag4.est),
`Cluster s.e.` = c(cumul.lead5.se, cumul.lead4.se, cumul.lead3.se, cumul.lead2.se, cumul.lead1.se, cumul.lag0.se, cumul.lag1.se, cumul.lag2.se, cumul.lag3.se, cumul.lag4.se),
`Pr(>|t|)` = c(cumul.lead5.pval, cumul.lead4.pval, cumul.lead3.pval, cumul.lead2.pval, cumul.lead1.pval, cumul.lag0.pval, cumul.lag1.pval, cumul.lag2.pval, cumul.lag3.pval, cumul.lag4.pval),
outcome = Y,
controls = FE,
sample = s,
econ = "none",
Rsq = summary(res1)$r.squared,
adj.Rsq = summary(res1)$adj.r.squared)
LRdiff_res <- rbind(LRdiff_res, lp.dt, fill = T)
fwrite(LRdiff_res, output.results.file)
}
}
LRdiff_res[sample == s, N_obs := nrow(data.est)]
LRdiff_res[sample == s, N_modules := length(unique(data.est$product_module_code))]
LRdiff_res[sample == s, N_stores := length(unique(data.est$store_code_uc))]
LRdiff_res[sample == s, N_counties := uniqueN(data.est, by = c("fips_state", "fips_county"))]
LRdiff_res[sample == s, N_years := uniqueN(data.est, by = c("year"))] # should be 7 (we lose one because we difference)
LRdiff_res[sample == s, N_county_modules := uniqueN(data.est, by = c("fips_state", "fips_county",
"product_module_code"))]
fwrite(LRdiff_res, output.results.file)
}
|
# Conditional Statements:----
#If-else statement----
# We want to examine whether a variable stored as "quantity" is above 20.
# If quantity greater than 20, print "You sold a lot!" otherwise "Not enough for today."
# Create vector quantity
quantity <- 25
# Set the if-else statement
if (quantity > 20) {
print('You sold a lot!')
} else {
print('Not enough for today')
}
# Else-if statement----
# We are interested to know if we sold quantities between 20 and 30.
# If we did, then print: "Average day", if sold quantity is > 30 then print:
# "What a great day!", otherwise print: "Not enough for today".
# Create vector quantiy
quantity <- 10
# Create multiple condition statement
if (quantity <20) {
print('Not enough for today')
} else if (quantity > 20 &quantity <= 30) {
print('Average day')
} else {
print('What a great day!')
}
# Loops: ----
# For Loop----
# Looping over a vector.---------------------------------
# Example 1: We iterate over all the elements of a vector and print the current value.
# Create fruit vector
fruit <- c('Apple', 'Orange', 'Passion fruit', 'Banana')
# Create the for statement
for ( i in fruit){
print(i)
}
# Looping over a matrix.---------------------------------
# Example 3: A matrix has 2-dimension, rows and columns.
# To iterate over matrix, we define two for loop:
# one for the rows and another for the column.
# Create a matrix
mat <- matrix(data = seq(1, 18, by=1), nrow = 6, ncol =3)
# Create the loop with r and c to iterate over the matrix
for (r in 1:nrow(mat))
for (c in 1:ncol(mat))
print(paste("Row", r, "and column",c, "have values of", mat[r,c]))
# Looping over a list.--------------------
# Example 4:
# Create a list with three vectors
fruit <- list(Basket = c('Apple','Orange','Passion-fruit','Banana'),
Money = c(10, 12, 15), purchase = FALSE)
# Create a for loop
for (p in fruit)
{
print(p)
}
# While Loop----
# Example: We create a while loop and after each run add 1 to the stored variable. We need to close the loop, therefore we explicitely tell R to stop looping when the variable reached 3.
#Create a variable with value 1
begin <- 1
#Create the loop
while (begin <= 3){
#See which we are
cat('This is loop number',begin)
#add 1 to the variable begin after each loop
begin <- begin+1
print(begin)
}
#Infinite Loop if end condition not given:
#Create a variable with value 1
begin <- 1
#Create the loop
while (begin <= 3){
#See which we are
cat('This is loop number',begin)
#add 1 to the variable begin after each loop
# begin <- begin+1
#print(begin)
}
|
/Loops&conditionalstatements.R
|
no_license
|
bhorkomal/DataAnalysis-R
|
R
| false | false | 2,706 |
r
|
# Conditional Statements:----
#If-else statement----
# We want to examine whether a variable stored as "quantity" is above 20.
# If quantity greater than 20, print "You sold a lot!" otherwise "Not enough for today."
# Create vector quantity
quantity <- 25
# Set the if-else statement
if (quantity > 20) {
print('You sold a lot!')
} else {
print('Not enough for today')
}
# Else-if statement----
# We are interested to know if we sold quantities between 20 and 30.
# If we did, then print: "Average day", if sold quantity is > 30 then print:
# "What a great day!", otherwise print: "Not enough for today".
# Create vector quantiy
quantity <- 10
# Create multiple condition statement
if (quantity <20) {
print('Not enough for today')
} else if (quantity > 20 &quantity <= 30) {
print('Average day')
} else {
print('What a great day!')
}
# Loops: ----
# For Loop----
# Looping over a vector.---------------------------------
# Example 1: We iterate over all the elements of a vector and print the current value.
# Create fruit vector
fruit <- c('Apple', 'Orange', 'Passion fruit', 'Banana')
# Create the for statement
for ( i in fruit){
print(i)
}
# Looping over a matrix.---------------------------------
# Example 3: A matrix has 2-dimension, rows and columns.
# To iterate over matrix, we define two for loop:
# one for the rows and another for the column.
# Create a matrix
mat <- matrix(data = seq(1, 18, by=1), nrow = 6, ncol =3)
# Create the loop with r and c to iterate over the matrix
for (r in 1:nrow(mat))
for (c in 1:ncol(mat))
print(paste("Row", r, "and column",c, "have values of", mat[r,c]))
# Looping over a list.--------------------
# Example 4:
# Create a list with three vectors
fruit <- list(Basket = c('Apple','Orange','Passion-fruit','Banana'),
Money = c(10, 12, 15), purchase = FALSE)
# Create a for loop
for (p in fruit)
{
print(p)
}
# While Loop----
# Example: We create a while loop and after each run add 1 to the stored variable. We need to close the loop, therefore we explicitely tell R to stop looping when the variable reached 3.
#Create a variable with value 1
begin <- 1
#Create the loop
while (begin <= 3){
#See which we are
cat('This is loop number',begin)
#add 1 to the variable begin after each loop
begin <- begin+1
print(begin)
}
#Infinite Loop if end condition not given:
#Create a variable with value 1
begin <- 1
#Create the loop
while (begin <= 3){
#See which we are
cat('This is loop number',begin)
#add 1 to the variable begin after each loop
# begin <- begin+1
#print(begin)
}
|
library(mTEC.10x.pipeline)
file_paths <- "/home/kwells4/mTEC_dev/mtec_snakemake/"
save_path <- "/home/kwells4/mTEC_dev/geo_files/"
file_names <- c(aireTrace = "aireTrace",
isoCtl_wk2 = "isoControlBeg",
isoCtl_wk10 = "isoControlEnd",
RANKL_wk2 = "timepoint1",
RANKL_wk4 = "timepoint2",
RANKL_wk6 = "timepoint3",
RANKL_wk10 = "timepoint5",
allSamples = "allSamples",
combinedControl = "controls")
save_matrix <- function(seurat_name, save_name){
object_path <- paste0(file_paths, seurat_name, "/analysis_outs/seurat_",
seurat_name, ".rda")
print(object_path)
seurat_object <- get(load(object_path))
data_matrix <- as.data.frame(as.matrix(seurat_object@data))
write.table(data_matrix, file = paste0(save_path, save_name, ".csv"),
sep = ",", row.names = TRUE, col.names = TRUE)
}
lapply(names(file_names), function(x) save_matrix(file_names[x], x))
|
/scripts/make_geo_files.R
|
permissive
|
kwells4/mtec_analysis
|
R
| false | false | 972 |
r
|
library(mTEC.10x.pipeline)
file_paths <- "/home/kwells4/mTEC_dev/mtec_snakemake/"
save_path <- "/home/kwells4/mTEC_dev/geo_files/"
file_names <- c(aireTrace = "aireTrace",
isoCtl_wk2 = "isoControlBeg",
isoCtl_wk10 = "isoControlEnd",
RANKL_wk2 = "timepoint1",
RANKL_wk4 = "timepoint2",
RANKL_wk6 = "timepoint3",
RANKL_wk10 = "timepoint5",
allSamples = "allSamples",
combinedControl = "controls")
save_matrix <- function(seurat_name, save_name){
object_path <- paste0(file_paths, seurat_name, "/analysis_outs/seurat_",
seurat_name, ".rda")
print(object_path)
seurat_object <- get(load(object_path))
data_matrix <- as.data.frame(as.matrix(seurat_object@data))
write.table(data_matrix, file = paste0(save_path, save_name, ".csv"),
sep = ",", row.names = TRUE, col.names = TRUE)
}
lapply(names(file_names), function(x) save_matrix(file_names[x], x))
|
\name{cor.rect.plot}
\alias{cor.rect.plot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot a visualization of the correlation using colored rectangles
}
\description{
This function creates a scatterplot of the data, then adds colored
rectangles between the points and the mean of x and y to represent the
idea of the correlation coefficient.
}
\usage{
cor.rect.plot(x, y, corr = TRUE, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)), col = c("#ff000055", "#0000ff55"),
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The \code{x} value or any object that can be interpreted by \code{xy.coords}
}
\item{y}{
The \code{y} value
}
\item{corr}{
Should the standardized axes (right and top) show the values divided
by the standard deviation (TRUE, which shows correlation ideas) or not
(FALSE, shows covariance idea)
}
\item{xlab}{
The label for the \code{x} axis
}
\item{ylab}{
The label for the \code{y} axis
}
\item{col}{
A vector of length 2 with the colors to use for the fill of the
rectangles, the 1st value will be used for "positive" rectangles and the 2nd value will be used for the "negative" rectangles.
}
\item{\dots}{
Possible further arguments, currently ignored
}
}
\details{
This will create a scatterplot of the data, draw refrence lines at
the mean of \code{x} and the mean of \code{y}, then draw rectangles
from the mean point to the data points. The right and top axes will
show the centered (and possibly scaled if \code{corr=TRUE}) values.
The idea is that the correlation/covariance is based on summing the
area of the "positive" rectangles and subtracting the sum of the areas
of the "negative" rectangles (then dividing by n-1). If the positive
and negative areas are about the same then the correlation/covariance
is near 0, if there is more area in the positive rectangles then the
correlation/covariance will be positive.
}
\value{
This function returns an invisible NULL, it is run for its side effects.
}
\author{Greg Snow, \email{538280@gmail.com}}
\seealso{
\code{\link{cor}}
}
\examples{
## low correlation
x <- rnorm(25)
y <- rnorm(25)
cor(x,y)
cor.rect.plot(x,y)
## Positive correlation
x <- rnorm(25)
y <- x + rnorm(25,3, .5)
cor(x,y)
cor.rect.plot(x,y)
## negative correlation
x <- rnorm(25)
y <- rnorm(25,10,1.5) - x
cor(x,y)
cor.rect.plot(x,y)
## zero correlation but a definite relationship
x <- -5:5
y <- x^2
cor(x,y)
cor.rect.plot(x,y)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ hplot }
|
/man/correct.Rd
|
no_license
|
glsnow/TeachingDemos
|
R
| false | false | 2,723 |
rd
|
\name{cor.rect.plot}
\alias{cor.rect.plot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot a visualization of the correlation using colored rectangles
}
\description{
This function creates a scatterplot of the data, then adds colored
rectangles between the points and the mean of x and y to represent the
idea of the correlation coefficient.
}
\usage{
cor.rect.plot(x, y, corr = TRUE, xlab = deparse(substitute(x)),
ylab = deparse(substitute(y)), col = c("#ff000055", "#0000ff55"),
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
The \code{x} value or any object that can be interpreted by \code{xy.coords}
}
\item{y}{
The \code{y} value
}
\item{corr}{
Should the standardized axes (right and top) show the values divided
by the standard deviation (TRUE, which shows correlation ideas) or not
(FALSE, shows covariance idea)
}
\item{xlab}{
The label for the \code{x} axis
}
\item{ylab}{
The label for the \code{y} axis
}
\item{col}{
A vector of length 2 with the colors to use for the fill of the
rectangles, the 1st value will be used for "positive" rectangles and the 2nd value will be used for the "negative" rectangles.
}
\item{\dots}{
Possible further arguments, currently ignored
}
}
\details{
This will create a scatterplot of the data, draw refrence lines at
the mean of \code{x} and the mean of \code{y}, then draw rectangles
from the mean point to the data points. The right and top axes will
show the centered (and possibly scaled if \code{corr=TRUE}) values.
The idea is that the correlation/covariance is based on summing the
area of the "positive" rectangles and subtracting the sum of the areas
of the "negative" rectangles (then dividing by n-1). If the positive
and negative areas are about the same then the correlation/covariance
is near 0, if there is more area in the positive rectangles then the
correlation/covariance will be positive.
}
\value{
This function returns an invisible NULL, it is run for its side effects.
}
\author{Greg Snow, \email{538280@gmail.com}}
\seealso{
\code{\link{cor}}
}
\examples{
## low correlation
x <- rnorm(25)
y <- rnorm(25)
cor(x,y)
cor.rect.plot(x,y)
## Positive correlation
x <- rnorm(25)
y <- x + rnorm(25,3, .5)
cor(x,y)
cor.rect.plot(x,y)
## negative correlation
x <- rnorm(25)
y <- rnorm(25,10,1.5) - x
cor(x,y)
cor.rect.plot(x,y)
## zero correlation but a definite relationship
x <- -5:5
y <- x^2
cor(x,y)
cor.rect.plot(x,y)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ hplot }
|
etf4.all<-readRDS("etf4_xts_all")
head(etf4.all)
etf4.all.1 = etf4.all[complete.cases(etf4.all),]
head(etf4.all.1)
etf4.all<-readRDS("D:/FinDB-master/finDB/etf4_xts_all")
head(etf4.all)
str(etf4.all)
etf4.all.1<-etf4.all[complete.cases(etf4.all),]
head(etf4.all.1)
tail(etf4.all.1)
# 0050
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`0050`
prices<-data1$prices
prices
sma50<-SMA(prices, 50)
head(sma50, 51)
# 0056
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`0056`
prices<-data1$prices
prices
sma56<-SMA(prices, 50)
head(sma50, 51)
# 006205
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`006205`
prices<-data1$prices
prices
sma6205<-SMA(prices, 50)
head(sma50, 51)
# 00646
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`00646`
prices<-data1$prices
prices
sma646<-SMA(prices, 50)
head(sma50, 51)
# buy and hold for 0050
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`0050`
data1$weight[] = 1
buy.hold.0050 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.0050 <-bt.run(data1)
#0056
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`0056`
data1$weight[] = 1
buy.hold.0056 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.0056 <-bt.run(data1)
#006205
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`006205`
data1$weight[] = 1
buy.hold.006205 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.006205 <-bt.run(data1)
#00646
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`00646`
data1$weight[] = 1
buy.hold.00646 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.00646 <-bt.run(data1)
# sma 200 for 0050
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.0050 <- bt.run(data1, trade.summary=T)
sma200.0050$trade.summary
#0056
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.0056 <- bt.run(data1, trade.summary=T)
sma200.0056$trade.summary
#006205
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.006205 <- bt.run(data1, trade.summary=T)
sma200.006205$trade.summary
#00646
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.00646 <- bt.run(data1, trade.summary=T)
sma200.00646$trade.summary
# sma 50 for 0050
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.0050 <- bt.run(data1, trade.summary=T)
#0056
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.0056 <- bt.run(data1, trade.summary=T)
#006205
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.006205 <- bt.run(data1, trade.summary=T)
#00646
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.00646 <- bt.run(data1, trade.summary=T)
# sma 50 for 005, short allowed
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.0050.short <- bt.run(data1, trade.summary=T)
#0056
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.0056.short <- bt.run(data1, trade.summary=T)
#006205
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.006205.short <- bt.run(data1, trade.summary=T)
#00646
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.00646.short <- bt.run(data1, trade.summary=T)
# summary of investment
models<-list("SMA50"= sma50.0050,
"SMA200"= sma200.0050,
"SMA50_short" = sma50.0050.short,
"BH 0050" = buy.hold.0050)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#0056
models<-list("SMA50"= sma50.0056,
"SMA200"= sma200.0056,
"SMA50_short" = sma50.0056.short,
"BH 0050" = buy.hold.0056)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#006205
models<-list("SMA50"= sma50.006205,
"SMA200"= sma200.006205,
"SMA50_short" = sma50.006205.short,
"BH 0050" = buy.hold.006205)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#00646
models<-list("SMA50"= sma50.00646,
"SMA200"= sma200.00646,
"SMA50_short" = sma50.00646.short,
"BH 0050" = buy.hold.00646)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
# You can plot in ggplot2
library(ggplot2)
all.0050<-merge.xts(sma50.0050$equity,
sma50.0050.short$equity,
sma200.0050$equity,
buy.hold.0050$equity)
colnames(all.0050)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.0050)
all.0050.long<-fortify(all.0050, melt=T)
head(all.0050.long)
#
title = "Cumulative returns of 0050s"
p = ggplot(all.0050.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#0056
library(ggplot2)
all.0056<-merge.xts(sma50.0056$equity,
sma50.0056.short$equity,
sma200.0056$equity,
buy.hold.0056$equity)
colnames(all.0056)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.0056)
all.0056.long<-fortify(all.0056, melt=T)
head(all.0056.long)
#
title = "Cumulative returns of 0056s"
p = ggplot(all.0056.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#006205
library(ggplot2)
all.006205<-merge.xts(sma50.006205$equity,
sma50.006205.short$equity,
sma200.006205$equity,
buy.hold.006205$equity)
colnames(all.006205)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.006205)
all.006205.long<-fortify(all.006205, melt=T)
head(all.0050.long)
#
title = "Cumulative returns of 006205s"
p = ggplot(all.006205.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#00646
library(ggplot2)
all.00646<-merge.xts(sma50.00646$equity,
sma50.00646.short$equity,
sma200.00646$equity,
buy.hold.00646$equity)
colnames(all.00646)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.00646)
all.00646.long<-fortify(all.00646, melt=T)
head(all.00646.long)
#
title = "Cumulative returns of 00646s"
p = ggplot(all.00646.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
|
/hw07.R
|
no_license
|
106035007/finDB
|
R
| false | false | 7,702 |
r
|
etf4.all<-readRDS("etf4_xts_all")
head(etf4.all)
etf4.all.1 = etf4.all[complete.cases(etf4.all),]
head(etf4.all.1)
etf4.all<-readRDS("D:/FinDB-master/finDB/etf4_xts_all")
head(etf4.all)
str(etf4.all)
etf4.all.1<-etf4.all[complete.cases(etf4.all),]
head(etf4.all.1)
tail(etf4.all.1)
# 0050
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`0050`
prices<-data1$prices
prices
sma50<-SMA(prices, 50)
head(sma50, 51)
# 0056
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`0056`
prices<-data1$prices
prices
sma56<-SMA(prices, 50)
head(sma50, 51)
# 006205
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`006205`
prices<-data1$prices
prices
sma6205<-SMA(prices, 50)
head(sma50, 51)
# 00646
library(xts)
data1<-new.env()
data1$prices<-etf4.all.1$`00646`
prices<-data1$prices
prices
sma646<-SMA(prices, 50)
head(sma50, 51)
# buy and hold for 0050
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`0050`
data1$weight[] = 1
buy.hold.0050 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.0050 <-bt.run(data1)
#0056
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`0056`
data1$weight[] = 1
buy.hold.0056 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.0056 <-bt.run(data1)
#006205
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`006205`
data1$weight[] = 1
buy.hold.006205 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.006205 <-bt.run(data1)
#00646
bt.prep(data1, align='keep.all')
names(data1)
data1$dates
data1$prices
data1$prices = prices
data1$prices
data1$weight
data1$execution.price = data1$prices = etf4.all.1$`00646`
data1$weight[] = 1
buy.hold.00646 <- bt.run.share(data1, clean.signal=F, trade.summary = TRUE)
buy.hold.00646 <-bt.run(data1)
# sma 200 for 0050
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.0050 <- bt.run(data1, trade.summary=T)
sma200.0050$trade.summary
#0056
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.0056 <- bt.run(data1, trade.summary=T)
sma200.0056$trade.summary
#006205
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.006205 <- bt.run(data1, trade.summary=T)
sma200.006205$trade.summary
#00646
prices<-data1$prices
sma200<-SMA(prices, 200)
head(sma200, 201)
data1$weight[] <- iif(prices >= sma200, 1, 0)
sma200.00646 <- bt.run(data1, trade.summary=T)
sma200.00646$trade.summary
# sma 50 for 0050
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.0050 <- bt.run(data1, trade.summary=T)
#0056
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.0056 <- bt.run(data1, trade.summary=T)
#006205
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.006205 <- bt.run(data1, trade.summary=T)
#00646
sma50<-SMA(prices, 50)
head(sma50, 51)
data1$weight[] <- iif(prices >= sma50, 1, 0)
sma50.00646 <- bt.run(data1, trade.summary=T)
# sma 50 for 005, short allowed
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.0050.short <- bt.run(data1, trade.summary=T)
#0056
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.0056.short <- bt.run(data1, trade.summary=T)
#006205
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.006205.short <- bt.run(data1, trade.summary=T)
#00646
data1$weight[] <- iif(prices >= sma50, 1, -1)
sma50.00646.short <- bt.run(data1, trade.summary=T)
# summary of investment
models<-list("SMA50"= sma50.0050,
"SMA200"= sma200.0050,
"SMA50_short" = sma50.0050.short,
"BH 0050" = buy.hold.0050)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#0056
models<-list("SMA50"= sma50.0056,
"SMA200"= sma200.0056,
"SMA50_short" = sma50.0056.short,
"BH 0050" = buy.hold.0056)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#006205
models<-list("SMA50"= sma50.006205,
"SMA200"= sma200.006205,
"SMA50_short" = sma50.006205.short,
"BH 0050" = buy.hold.006205)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
#00646
models<-list("SMA50"= sma50.00646,
"SMA200"= sma200.00646,
"SMA50_short" = sma50.00646.short,
"BH 0050" = buy.hold.00646)
strategy.performance.snapshoot(models, T)
strategy.performance.snapshoot(models, control=list(comparison=T), sort.performance=T)
plotbt.strategy.sidebyside(models, return.table=T)
# You can plot in ggplot2
library(ggplot2)
all.0050<-merge.xts(sma50.0050$equity,
sma50.0050.short$equity,
sma200.0050$equity,
buy.hold.0050$equity)
colnames(all.0050)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.0050)
all.0050.long<-fortify(all.0050, melt=T)
head(all.0050.long)
#
title = "Cumulative returns of 0050s"
p = ggplot(all.0050.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#0056
library(ggplot2)
all.0056<-merge.xts(sma50.0056$equity,
sma50.0056.short$equity,
sma200.0056$equity,
buy.hold.0056$equity)
colnames(all.0056)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.0056)
all.0056.long<-fortify(all.0056, melt=T)
head(all.0056.long)
#
title = "Cumulative returns of 0056s"
p = ggplot(all.0056.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#006205
library(ggplot2)
all.006205<-merge.xts(sma50.006205$equity,
sma50.006205.short$equity,
sma200.006205$equity,
buy.hold.006205$equity)
colnames(all.006205)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.006205)
all.006205.long<-fortify(all.006205, melt=T)
head(all.0050.long)
#
title = "Cumulative returns of 006205s"
p = ggplot(all.006205.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
#00646
library(ggplot2)
all.00646<-merge.xts(sma50.00646$equity,
sma50.00646.short$equity,
sma200.00646$equity,
buy.hold.00646$equity)
colnames(all.00646)<-c("sma50", "sma50 short", "sma200", "BH")
head(all.00646)
all.00646.long<-fortify(all.00646, melt=T)
head(all.00646.long)
#
title = "Cumulative returns of 00646s"
p = ggplot(all.00646.long, aes(x = Index, y = Value)) +
geom_line(aes(linetype = Series, color = Series)) +
#geom_point(aes(shape = Series))+
xlab("year") + ylab("cumulative returns")+
ggtitle(title)
p
|
#' Statistics List
#'
#' Selectable statistics about the Rocket.Chat server. It supports the Offset, Count and Sort Query Parameters along with just the Fields and Query Parameters.. Requires view-statistics permission.
#'
#' @param token The token to connect to the app.
#' @param query See Query Parameter Required
#' @param offset See Offset Parameter Optional
#' @param count See Count Parameter Optional
#' @param fields See Fields Parameters Optional
#' @param sort See Sort Query Parameters Optional
#'
#' @export
#' @importFrom httr POST GET add_headers content stop_for_status
#' @importFrom jsonlite toJSON
miscellaneous_statistics_list <- function(tok,
query,
offset = NULL,
count = NULL,
fields = NULL,
sort = NULL) {
params <- list(
query = query,
offset = offset,
count = count,
fields = fields,
sort = sort
)
params <- no_null(params)
params <- toJSON(params, auto_unbox = TRUE)
res <- httr::GET(
add_headers(
"Content-type" = "application/json",
"X-Auth-Token" = tok$data$authToken,
"X-User-Id" = tok$data$userId
),
url = paste0(tok$url, "/api/v1/statistics.list")
)
stop_for_status(res)
content(res)
}
|
/R/miscellaneous_statistics_list.R
|
no_license
|
ColinFay/rrocketchat
|
R
| false | false | 1,398 |
r
|
#' Statistics List
#'
#' Selectable statistics about the Rocket.Chat server. It supports the Offset, Count and Sort Query Parameters along with just the Fields and Query Parameters.. Requires view-statistics permission.
#'
#' @param token The token to connect to the app.
#' @param query See Query Parameter Required
#' @param offset See Offset Parameter Optional
#' @param count See Count Parameter Optional
#' @param fields See Fields Parameters Optional
#' @param sort See Sort Query Parameters Optional
#'
#' @export
#' @importFrom httr POST GET add_headers content stop_for_status
#' @importFrom jsonlite toJSON
miscellaneous_statistics_list <- function(tok,
query,
offset = NULL,
count = NULL,
fields = NULL,
sort = NULL) {
params <- list(
query = query,
offset = offset,
count = count,
fields = fields,
sort = sort
)
params <- no_null(params)
params <- toJSON(params, auto_unbox = TRUE)
res <- httr::GET(
add_headers(
"Content-type" = "application/json",
"X-Auth-Token" = tok$data$authToken,
"X-User-Id" = tok$data$userId
),
url = paste0(tok$url, "/api/v1/statistics.list")
)
stop_for_status(res)
content(res)
}
|
library(RLLVMCompile)
# Checks an if statement inside a loop.
#
# Need to also test an if statement with a continue/next/break
f =
function(x, n)
{
ctr = 0L
for(i in 1:n) {
if(x[i] > 2)
ctr = ctr + 1L
}
ctr
}
f = compileFunction(f, Int32Type, list(INTSXPType, Int32Type))
#ee = ExecutionEngine(f)
set.seed(13125)
v = rpois(100, 4)
stopifnot(.llvm(f, v, length(v)) == sum(v > 2))
|
/tests/LoopIf.R
|
no_license
|
duncantl/RLLVMCompile
|
R
| false | false | 404 |
r
|
library(RLLVMCompile)
# Checks an if statement inside a loop.
#
# Need to also test an if statement with a continue/next/break
f =
function(x, n)
{
ctr = 0L
for(i in 1:n) {
if(x[i] > 2)
ctr = ctr + 1L
}
ctr
}
f = compileFunction(f, Int32Type, list(INTSXPType, Int32Type))
#ee = ExecutionEngine(f)
set.seed(13125)
v = rpois(100, 4)
stopifnot(.llvm(f, v, length(v)) == sum(v > 2))
|
library(tidyverse)
library(here)
song_counts = readRDS(here("out/songs_plays.rds"))
artist_info_spotify = read_csv(here("out/artist_info_spotify_clean.csv")) %>%
filter(!is.na(art_id_spotify))
song_info_spotify = read_csv(here("out/song_info_spotify.csv"))
song_spotify_IDs = readRDS(here("out/song_IDs_spotify.rds"))
spotify_info_join = song_spotify_IDs %>%
inner_join(song_info_spotify, by = c("id" = "track_id")) %>%
distinct()
artist_song_plays = song_counts %>%
inner_join(spotify_info_join,
by = c("uuid_artist" = "uuid_artist",
"Artist" = "Artist",
"Song" = "Song"))
artist_song_plays %>%
select(
uuid_artist,
Artist,
Song,
song_number_of_plays_lfm = number_of_plays,
song_number_of_users_listened_lfm = number_of_users_listened,
artist_id_spotify = artists_IDs,
artist_name_spotify = artists,
track_name_spotify = display_name,
track_id_spotify = id,
track_duration_ms_spotify = track_duration_ms,
track_explicit_spotify = track_explicit,
track_popularity_spotify = track_popularity,
album_id_spotify = album_id,
album_name_spotify = album_name,
album_release_date_spotify = album_release_date,
album_release_date_precision_spotify = album_release_date_precision
) %>%
write_csv(., here("out/song_info_spotify_clean.csv"))
|
/code/11_clean_track_info_spotify.R
|
no_license
|
mariobecerra/data_vis_project_2020
|
R
| false | false | 1,385 |
r
|
library(tidyverse)
library(here)
song_counts = readRDS(here("out/songs_plays.rds"))
artist_info_spotify = read_csv(here("out/artist_info_spotify_clean.csv")) %>%
filter(!is.na(art_id_spotify))
song_info_spotify = read_csv(here("out/song_info_spotify.csv"))
song_spotify_IDs = readRDS(here("out/song_IDs_spotify.rds"))
spotify_info_join = song_spotify_IDs %>%
inner_join(song_info_spotify, by = c("id" = "track_id")) %>%
distinct()
artist_song_plays = song_counts %>%
inner_join(spotify_info_join,
by = c("uuid_artist" = "uuid_artist",
"Artist" = "Artist",
"Song" = "Song"))
artist_song_plays %>%
select(
uuid_artist,
Artist,
Song,
song_number_of_plays_lfm = number_of_plays,
song_number_of_users_listened_lfm = number_of_users_listened,
artist_id_spotify = artists_IDs,
artist_name_spotify = artists,
track_name_spotify = display_name,
track_id_spotify = id,
track_duration_ms_spotify = track_duration_ms,
track_explicit_spotify = track_explicit,
track_popularity_spotify = track_popularity,
album_id_spotify = album_id,
album_name_spotify = album_name,
album_release_date_spotify = album_release_date,
album_release_date_precision_spotify = album_release_date_precision
) %>%
write_csv(., here("out/song_info_spotify_clean.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hints.r
\name{hint.else.active}
\alias{hint.else.active}
\title{Get or set whether hint.else or
auto.hint.else would be triggered.}
\usage{
hint.else.active(activate = NULL, ps = get.ps())
}
\description{
If a hint.stud.call or hint.stud.assign is shown
then a hint.else or auto.hint.else would not be
triggered. This function returns TRUE if hint.else
would still be triggered or otherwise FALSE.
}
\details{
If you set the argument activate you can change this status.
}
|
/man/hint.else.active.Rd
|
no_license
|
skranz/RTutor
|
R
| false | true | 551 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hints.r
\name{hint.else.active}
\alias{hint.else.active}
\title{Get or set whether hint.else or
auto.hint.else would be triggered.}
\usage{
hint.else.active(activate = NULL, ps = get.ps())
}
\description{
If a hint.stud.call or hint.stud.assign is shown
then a hint.else or auto.hint.else would not be
triggered. This function returns TRUE if hint.else
would still be triggered or otherwise FALSE.
}
\details{
If you set the argument activate you can change this status.
}
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = integer(0), item_score = integer(0), person_id = c(0L, -14595414L, 906002649L, -640034343L, -642205512L, 666417152L, 0L, 16L, -14286848L, 0L, 852185L, -1438733632L, 46137343L, -223L, -1426128896L, 0L, 852185L, -1438733632L, 46137343L, 724298528L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
/dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612726503-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false | false | 435 |
r
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = integer(0), item_score = integer(0), person_id = c(0L, -14595414L, 906002649L, -640034343L, -642205512L, 666417152L, 0L, 16L, -14286848L, 0L, 852185L, -1438733632L, 46137343L, -223L, -1426128896L, 0L, 852185L, -1438733632L, 46137343L, 724298528L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
### VIP.R: Implementation of VIP (variable importance in projection)(*) for the
### `pls' package.
### $Id: VIP.R,v 1.2 2007/07/30 09:17:36 bhm Exp $
### Copyright © 2006,2007 Bjørn-Helge Mevik
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License version 2 as
### published by the Free Software Foundation.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### A copy of the GPL text is available here:
### http://www.gnu.org/licenses/gpl-2.0.txt
### Contact info:
### Bjørn-Helge Mevik
### bhx6@mevik.net
### Rødtvetvien 20
### N-0955 Oslo
### Norway
### (*) As described in Chong, Il-Gyo & Jun, Chi-Hyuck, 2005, Performance of
### some variable selection methods when multicollinearity is present,
### Chemometrics and Intelligent Laboratory Systems 78, 103--112.
## VIP returns all VIP values for all variables and all number of components,
## as a ncomp x nvars matrix.
VIP <- function(object) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
SS <- c(object$Yloadings)^2 * colSums(object$scores^2)
Wnorm2 <- colSums(object$loading.weights^2)
SSW <- sweep(object$loading.weights^2, 2, SS / Wnorm2, "*")
sqrt(nrow(SSW) * apply(SSW, 1, cumsum) / cumsum(SS))
}
## VIPjh returns the VIP of variable j with h components
VIPjh <- function(object, j, h) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
b <- c(object$Yloadings)[1:h]
T <- object$scores[,1:h, drop = FALSE]
SS <- b^2 * colSums(T^2)
W <- object$loading.weights[,1:h, drop = FALSE]
Wnorm2 <- colSums(W^2)
sqrt(nrow(W) * sum(SS * W[j,]^2 / Wnorm2) / sum(SS))
}
|
/VIP.R
|
no_license
|
horto2dj/diel_wetland_comm_str
|
R
| false | false | 2,196 |
r
|
### VIP.R: Implementation of VIP (variable importance in projection)(*) for the
### `pls' package.
### $Id: VIP.R,v 1.2 2007/07/30 09:17:36 bhm Exp $
### Copyright © 2006,2007 Bjørn-Helge Mevik
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License version 2 as
### published by the Free Software Foundation.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### A copy of the GPL text is available here:
### http://www.gnu.org/licenses/gpl-2.0.txt
### Contact info:
### Bjørn-Helge Mevik
### bhx6@mevik.net
### Rødtvetvien 20
### N-0955 Oslo
### Norway
### (*) As described in Chong, Il-Gyo & Jun, Chi-Hyuck, 2005, Performance of
### some variable selection methods when multicollinearity is present,
### Chemometrics and Intelligent Laboratory Systems 78, 103--112.
## VIP returns all VIP values for all variables and all number of components,
## as a ncomp x nvars matrix.
VIP <- function(object) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
SS <- c(object$Yloadings)^2 * colSums(object$scores^2)
Wnorm2 <- colSums(object$loading.weights^2)
SSW <- sweep(object$loading.weights^2, 2, SS / Wnorm2, "*")
sqrt(nrow(SSW) * apply(SSW, 1, cumsum) / cumsum(SS))
}
## VIPjh returns the VIP of variable j with h components
VIPjh <- function(object, j, h) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
b <- c(object$Yloadings)[1:h]
T <- object$scores[,1:h, drop = FALSE]
SS <- b^2 * colSums(T^2)
W <- object$loading.weights[,1:h, drop = FALSE]
Wnorm2 <- colSums(W^2)
sqrt(nrow(W) * sum(SS * W[j,]^2 / Wnorm2) / sum(SS))
}
|
library(testthat)
library(imputeMulti)
library(parallel)
context("int- count_levels works")
test_that("errors work", {
df <- data.frame(x=rnorm(100))
expect_error(count_levels(dat=df, hasNA= "absolutely", parallel= FALSE))
expect_error(count_levels(dat=df, hasNA= "count", parallel= FALSE))
expect_error(count_levels(dat=df, hasNA= "count.obs", parallel= TRUE,
leave_cores= -1))
expect_error(count_levels(dat=df, hasNA= "count.obs", parallel= TRUE,
leave_cores= 1.5))
})
test_that("count levels works with all missing data options... parallel = FALSE", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
cnt.comp <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= FALSE)
cnt.ob <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= FALSE)
cnt.mis <- count_levels(dat[!complete.cases(dat),], enum_list= enum,
hasNA= "count.miss", parallel= FALSE)
### no missing data tests
expect_equal(sum(cnt.comp$counts), sum(complete.cases(dat)))
expect_equal(sum(cnt.comp$counts == 0), 0)
expect_lt(nrow(cnt.comp), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.comp))
### missing data tests -- count.obs
expect_equal(sum(cnt.ob$counts == 0), 0)
expect_lt(nrow(cnt.ob), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.ob))
### missing data tests -- count.miss
expect_equal(sum(cnt.mis$counts), sum(!complete.cases(dat)))
expect_equal(sum(cnt.mis$counts == 0), 0)
expect_lt(nrow(cnt.mis), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.mis))
expect_equal(sum(cnt.mis$counts) + sum(cnt.comp$counts), nrow(dat))
})
test_that("count levels works with all missing data options... parallel = TRUE", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
cnt.comp <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= TRUE)
cnt.ob <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= TRUE)
cnt.mis <- count_levels(dat[!complete.cases(dat),], enum_list= enum,
hasNA= "count.miss", parallel= TRUE)
### no missing data tests
expect_equal(sum(cnt.comp$counts), sum(complete.cases(dat)))
expect_equal(sum(cnt.comp$counts == 0), 0)
expect_lt(nrow(cnt.comp), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.comp))
### missing data tests -- count.obs
expect_equal(sum(cnt.ob$counts == 0), 0)
expect_lt(nrow(cnt.ob), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.ob))
### missing data tests -- count.miss
expect_equal(sum(cnt.mis$counts), sum(!complete.cases(dat)))
expect_equal(sum(cnt.mis$counts == 0), 0)
expect_lt(nrow(cnt.mis), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.mis))
expect_equal(sum(cnt.mis$counts) + sum(cnt.comp$counts), nrow(dat))
})
test_that("(parallel = TRUE) == (parallel = FALSE)", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
# parallel = TRUE
cnt.comp1 <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= TRUE)
cnt.ob1 <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= TRUE)
cnt.mis1 <- count_levels(dat, enum_list= enum, hasNA= "count.miss", parallel= TRUE)
# parallel = FALSE
cnt.comp2 <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= FALSE)
cnt.ob2 <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= FALSE)
cnt.mis2 <- count_levels(dat, enum_list= enum, hasNA= "count.miss", parallel= FALSE)
### equality with parallel options
expect_equal(cnt.comp1, cnt.comp2)
expect_equal(cnt.ob1, cnt.ob2)
expect_equal(cnt.mis1, cnt.mis2)
})
|
/fuzzedpackages/imputeMulti/tests/testthat/test-count_levels.R
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 5,648 |
r
|
library(testthat)
library(imputeMulti)
library(parallel)
context("int- count_levels works")
test_that("errors work", {
df <- data.frame(x=rnorm(100))
expect_error(count_levels(dat=df, hasNA= "absolutely", parallel= FALSE))
expect_error(count_levels(dat=df, hasNA= "count", parallel= FALSE))
expect_error(count_levels(dat=df, hasNA= "count.obs", parallel= TRUE,
leave_cores= -1))
expect_error(count_levels(dat=df, hasNA= "count.obs", parallel= TRUE,
leave_cores= 1.5))
})
test_that("count levels works with all missing data options... parallel = FALSE", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
cnt.comp <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= FALSE)
cnt.ob <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= FALSE)
cnt.mis <- count_levels(dat[!complete.cases(dat),], enum_list= enum,
hasNA= "count.miss", parallel= FALSE)
### no missing data tests
expect_equal(sum(cnt.comp$counts), sum(complete.cases(dat)))
expect_equal(sum(cnt.comp$counts == 0), 0)
expect_lt(nrow(cnt.comp), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.comp))
### missing data tests -- count.obs
expect_equal(sum(cnt.ob$counts == 0), 0)
expect_lt(nrow(cnt.ob), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.ob))
### missing data tests -- count.miss
expect_equal(sum(cnt.mis$counts), sum(!complete.cases(dat)))
expect_equal(sum(cnt.mis$counts == 0), 0)
expect_lt(nrow(cnt.mis), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.mis))
expect_equal(sum(cnt.mis$counts) + sum(cnt.comp$counts), nrow(dat))
})
test_that("count levels works with all missing data options... parallel = TRUE", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
cnt.comp <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= TRUE)
cnt.ob <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= TRUE)
cnt.mis <- count_levels(dat[!complete.cases(dat),], enum_list= enum,
hasNA= "count.miss", parallel= TRUE)
### no missing data tests
expect_equal(sum(cnt.comp$counts), sum(complete.cases(dat)))
expect_equal(sum(cnt.comp$counts == 0), 0)
expect_lt(nrow(cnt.comp), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.comp))
### missing data tests -- count.obs
expect_equal(sum(cnt.ob$counts == 0), 0)
expect_lt(nrow(cnt.ob), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.ob))
### missing data tests -- count.miss
expect_equal(sum(cnt.mis$counts), sum(!complete.cases(dat)))
expect_equal(sum(cnt.mis$counts == 0), 0)
expect_lt(nrow(cnt.mis), nrow(enum))
expect_lt(ncol(enum), ncol(cnt.mis))
expect_equal(sum(cnt.mis$counts) + sum(cnt.comp$counts), nrow(dat))
})
test_that("(parallel = TRUE) == (parallel = FALSE)", {
# create test data
set.seed(12315)
x1 <- factor(sample(1:5, size=100, replace= TRUE))
x2 <- factor(sample(6:10, size=100, replace= TRUE))
x3 <- factor(sample(11:15, size=100, replace= TRUE))
x4 <- factor(sample(16:20, size=100, replace= TRUE))
x5 <- factor(sample(21:26, size=100, replace= TRUE))
dat <- c(x1, x2, x3, x4, x5)
# insert missing values
mis.ind <- sample(1:length(dat), size= 75, replace= FALSE)
dat[mis.ind] <- NA
rm(x1,x2,x3,x4,x5, mis.ind)
dim(dat)<- c(100, 5)
dat <- data.frame(apply(dat, 2, function(x) as.factor(x)))
enum <- expand.grid(sapply(dat, function(x) return(c(levels(x), NA))))
# parallel = TRUE
cnt.comp1 <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= TRUE)
cnt.ob1 <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= TRUE)
cnt.mis1 <- count_levels(dat, enum_list= enum, hasNA= "count.miss", parallel= TRUE)
# parallel = FALSE
cnt.comp2 <- count_levels(dat[complete.cases(dat),], enum_list= enum,
hasNA= "no", parallel= FALSE)
cnt.ob2 <- count_levels(dat, enum_list= enum, hasNA= "count.obs", parallel= FALSE)
cnt.mis2 <- count_levels(dat, enum_list= enum, hasNA= "count.miss", parallel= FALSE)
### equality with parallel options
expect_equal(cnt.comp1, cnt.comp2)
expect_equal(cnt.ob1, cnt.ob2)
expect_equal(cnt.mis1, cnt.mis2)
})
|
/HW4/HW4/HW4Part1.R
|
no_license
|
SlightlyUnorthodox/DialogueSys
|
R
| false | false | 3,304 |
r
| ||
# Input:
# - counts
# - class
#
# Output:
# - p-values
# - BH-adjusted p-values
library(limma)
library(edgeR)
library(NBPSeq)
NBPSeq.dgelist = DGEList(counts = counts, group = factor(class))
NBPSeq.dgelist = calcNormFactors(NBPSeq.dgelist, method = "TMM")
NBPSeq.norm.factors = as.vector(NBPSeq.dgelist$samples$norm.factors)
NBPSeq.test = nbp.test(counts = counts, grp.ids = class,
grp1 = 1, grp2 = 2, norm.factors = NBPSeq.norm.factors)
NBPSeq.pvalues = NBPSeq.test$p.values
NBPSeq.adjpvalues = NBPSeq.test$q.values
|
/analysis-nbpseq.R
|
no_license
|
jdblischak/dsc-soneson-2013
|
R
| false | false | 549 |
r
|
# Input:
# - counts
# - class
#
# Output:
# - p-values
# - BH-adjusted p-values
library(limma)
library(edgeR)
library(NBPSeq)
NBPSeq.dgelist = DGEList(counts = counts, group = factor(class))
NBPSeq.dgelist = calcNormFactors(NBPSeq.dgelist, method = "TMM")
NBPSeq.norm.factors = as.vector(NBPSeq.dgelist$samples$norm.factors)
NBPSeq.test = nbp.test(counts = counts, grp.ids = class,
grp1 = 1, grp2 = 2, norm.factors = NBPSeq.norm.factors)
NBPSeq.pvalues = NBPSeq.test$p.values
NBPSeq.adjpvalues = NBPSeq.test$q.values
|
\name{is_dso_loaded-methods}
\docType{methods}
\alias{is_dso_loaded}
\alias{is_dso_loaded-methods}
\alias{is_dso_loaded,cxxdso-method}
\title{Tell if a \code{cxxdso} object is loaded}
\description{
The \code{is_dso_loaded} function tell if the dynamic shared object (DSO, or DLL) in
an object of \code{cxxdso}, created by function \code{\link{cxxfunctionplus}},
is loaded.
}
\section{Methods}{
\describe{
\item{\code{signature(x = "cxxdso")}}{Tell if a \code{cxxdso} object is
loaded in the sense that the contained DSO is loaded or not.}}
}
\seealso{
\code{\linkS4class{cxxdso}}
}
\examples{
\dontrun{
dso <- cxxfunctionplus(signature(), "return R_NilValue ;")
print(is_dso_loaded(dso))
}}
|
/cxxfunplus/man/is_dso_loaded.Rd
|
no_license
|
maverickg/cxxfunplus
|
R
| false | false | 715 |
rd
|
\name{is_dso_loaded-methods}
\docType{methods}
\alias{is_dso_loaded}
\alias{is_dso_loaded-methods}
\alias{is_dso_loaded,cxxdso-method}
\title{Tell if a \code{cxxdso} object is loaded}
\description{
The \code{is_dso_loaded} function tell if the dynamic shared object (DSO, or DLL) in
an object of \code{cxxdso}, created by function \code{\link{cxxfunctionplus}},
is loaded.
}
\section{Methods}{
\describe{
\item{\code{signature(x = "cxxdso")}}{Tell if a \code{cxxdso} object is
loaded in the sense that the contained DSO is loaded or not.}}
}
\seealso{
\code{\linkS4class{cxxdso}}
}
\examples{
\dontrun{
dso <- cxxfunctionplus(signature(), "return R_NilValue ;")
print(is_dso_loaded(dso))
}}
|
\name{Nonpareil.fit_model}
\alias{Nonpareil.fit_model}
\title{Nonpareil fit model}
\description{Fit the sigmoidal model to the rarefied coverage}
\usage{Nonpareil.fit_model(np, weights.exp)}
\arguments{
\item{np}{`Nonpareil.Curve` object}
\item{weights.exp}{Numeric; See `Nonpareil.curve` for details}
}
\author{Luis M. Rodriguez-R [aut, cre]}
|
/utils/Nonpareil/man/Nonpareil.fit_model.Rd
|
permissive
|
lptolik/nonpareil
|
R
| false | false | 374 |
rd
|
\name{Nonpareil.fit_model}
\alias{Nonpareil.fit_model}
\title{Nonpareil fit model}
\description{Fit the sigmoidal model to the rarefied coverage}
\usage{Nonpareil.fit_model(np, weights.exp)}
\arguments{
\item{np}{`Nonpareil.Curve` object}
\item{weights.exp}{Numeric; See `Nonpareil.curve` for details}
}
\author{Luis M. Rodriguez-R [aut, cre]}
|
#matrix()
?matrix #nrow * ncol must equal amount of values in matrix
mydata <- 1:20
A <- matrix(mydata, 4, 5)
A[2,3]
A
B <- matrix(mydata, 4, 5, byrow = T)
B[2,5]
A
#rbind()
r1 <- c("I", "am", "happy")
r2 <- c("what", "a", "day")
r3 <- c(1,2,3) # cannot mix tpyes these numbers become characters
rbind(r1,r2,r3)
#cbind()
c1 <- 1:5
c2 <- -1:-5
cbind(c1,c2)
#Named Vectors
charlie <- 1:5
#give names
names(charlie) <- c("a", "b", "c", "d", "e")
names(charlie)
charlie["b"]
#------------------------
#naming matrix dimensions 1
temp.vec <- rep(c("a", "b", "Zz"), each=3)
temp.vec
Bravo <- matrix(temp.vec, 3, 3)
Bravo
rownames(Bravo) <- c("How", "Are", "You?")
Bravo
colnames(Bravo) <- c("x", "y", "z")
Bravo
Bravo[2,2]
Bravo["Are", "y"] <- 0 # change things inside a matrix!
rownames(Bravo) <- NULL
Bravo
|
/Tutorial10CreatingMatrixs.R
|
no_license
|
lucaslytel/R_A-to-Z
|
R
| false | false | 826 |
r
|
#matrix()
?matrix #nrow * ncol must equal amount of values in matrix
mydata <- 1:20
A <- matrix(mydata, 4, 5)
A[2,3]
A
B <- matrix(mydata, 4, 5, byrow = T)
B[2,5]
A
#rbind()
r1 <- c("I", "am", "happy")
r2 <- c("what", "a", "day")
r3 <- c(1,2,3) # cannot mix tpyes these numbers become characters
rbind(r1,r2,r3)
#cbind()
c1 <- 1:5
c2 <- -1:-5
cbind(c1,c2)
#Named Vectors
charlie <- 1:5
#give names
names(charlie) <- c("a", "b", "c", "d", "e")
names(charlie)
charlie["b"]
#------------------------
#naming matrix dimensions 1
temp.vec <- rep(c("a", "b", "Zz"), each=3)
temp.vec
Bravo <- matrix(temp.vec, 3, 3)
Bravo
rownames(Bravo) <- c("How", "Are", "You?")
Bravo
colnames(Bravo) <- c("x", "y", "z")
Bravo
Bravo[2,2]
Bravo["Are", "y"] <- 0 # change things inside a matrix!
rownames(Bravo) <- NULL
Bravo
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{SticklebackPlates}
\alias{SticklebackPlates}
\title{Number of Lateral Plates in Sticklebacks}
\format{A data frame with 344 observations on the following 2 variables.
\describe{ \item{genotype}{a factor with levels \code{mm},
\code{Mm}, and \code{MM}} \item{plates}{number of plates} }}
\source{
Colosimo, P.F., C.L. Peichel, K. Nereng, B.K. Blackman, M.D.
Shapiro, D. Schluter, and D.M. Kingsley. 2004. The genetic architecture of
parallel armor plate reduction in threespine sticklebacks. \emph{PLoS
Biology} 2: 635-641.
\url{http://www.plosbiology.org/article/info:doi/10.1371/journal.pbio.0020109}
}
\description{
Number of lateral plates (\code{plates}) in threespine sticklebacks
(\emph{Gasterosteus aculeatus}) with three different \emph{Ectodysplasin}
genotypes (\code{mm}, \code{Mm}, and \code{MM}).
}
\examples{
aggregate(plates ~ genotype, SticklebackPlates, FUN = favstats)
histogram( ~ plates | genotype, SticklebackPlates,
layout = c(1,3),
n = 15,
xlab = "Number of Lateral Body Plates"
)
densityplot( ~ plates | genotype, SticklebackPlates,
xlab = "Number of Lateral Body Plates",
layout = c(1,3)
)
}
\references{
Colosimo P.F., K.E. Hosemann, S. Balabhadra, G. Villarreal, M.
Dickson, J. Grimwood, J Schmutz, R.M. Myers, D. Schluter, D.M. Kingsley.
2005. Widespread parallel evolution in sticklebacks by repeated fixation of
ectodysplasin alleles. \emph{Science }307: 1928-33.
\url{http://www.sciencemag.org/cgi/content/full/307/5717/1928}
}
\keyword{datasets}
|
/man/SticklebackPlates.Rd
|
no_license
|
cran/abd
|
R
| false | false | 1,612 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{SticklebackPlates}
\alias{SticklebackPlates}
\title{Number of Lateral Plates in Sticklebacks}
\format{A data frame with 344 observations on the following 2 variables.
\describe{ \item{genotype}{a factor with levels \code{mm},
\code{Mm}, and \code{MM}} \item{plates}{number of plates} }}
\source{
Colosimo, P.F., C.L. Peichel, K. Nereng, B.K. Blackman, M.D.
Shapiro, D. Schluter, and D.M. Kingsley. 2004. The genetic architecture of
parallel armor plate reduction in threespine sticklebacks. \emph{PLoS
Biology} 2: 635-641.
\url{http://www.plosbiology.org/article/info:doi/10.1371/journal.pbio.0020109}
}
\description{
Number of lateral plates (\code{plates}) in threespine sticklebacks
(\emph{Gasterosteus aculeatus}) with three different \emph{Ectodysplasin}
genotypes (\code{mm}, \code{Mm}, and \code{MM}).
}
\examples{
aggregate(plates ~ genotype, SticklebackPlates, FUN = favstats)
histogram( ~ plates | genotype, SticklebackPlates,
layout = c(1,3),
n = 15,
xlab = "Number of Lateral Body Plates"
)
densityplot( ~ plates | genotype, SticklebackPlates,
xlab = "Number of Lateral Body Plates",
layout = c(1,3)
)
}
\references{
Colosimo P.F., K.E. Hosemann, S. Balabhadra, G. Villarreal, M.
Dickson, J. Grimwood, J Schmutz, R.M. Myers, D. Schluter, D.M. Kingsley.
2005. Widespread parallel evolution in sticklebacks by repeated fixation of
ectodysplasin alleles. \emph{Science }307: 1928-33.
\url{http://www.sciencemag.org/cgi/content/full/307/5717/1928}
}
\keyword{datasets}
|
#' Validate biblio.csv
#'
#' @param biblio (data.frame) A \code{data.frame} read in from \code{biblio.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_biblio <- function(biblio) {
stopifnot(is.data.frame(biblio))
if (nrow(biblio) <= 0) {
print(biblio)
stop(call. = FALSE,
"biblio.csv must have at least one row of data")
}
}
#' Validate attributes.csv
#'
#' @param attributes (data.frame) A \code{data.frame} read in from
#' \code{attributes.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_attributes <- function(attributes) {
stopifnot(is.data.frame(attributes))
if (nrow(attributes) <= 0) {
stop(call. = FALSE,
"attributes.csv must have at least one row of data")
}
}
#' Validate access.csv
#'
#' @param access (data.frame) A \code{data.frame} read in from \code{access.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_access <- function(access) {
stopifnot(is.data.frame(access))
if (nrow(access) <= 0) {
stop(call. = FALSE,
"access.csv must have at least one row of data")
}
}
#' Validate creators.csv
#'
#' @param creators (data.frame) A \code{data.frame} read in from
#' \code{creators.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_creators <- function(creators) {
stopifnot(is.data.frame(creators))
if (nrow(creators) <= 0) {
stop(call. = FALSE,
"creators.csv must have at least one row of data")
}
}
|
/R/validate_metadata.R
|
permissive
|
tdjames1/dataspice
|
R
| false | false | 1,592 |
r
|
#' Validate biblio.csv
#'
#' @param biblio (data.frame) A \code{data.frame} read in from \code{biblio.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_biblio <- function(biblio) {
stopifnot(is.data.frame(biblio))
if (nrow(biblio) <= 0) {
print(biblio)
stop(call. = FALSE,
"biblio.csv must have at least one row of data")
}
}
#' Validate attributes.csv
#'
#' @param attributes (data.frame) A \code{data.frame} read in from
#' \code{attributes.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_attributes <- function(attributes) {
stopifnot(is.data.frame(attributes))
if (nrow(attributes) <= 0) {
stop(call. = FALSE,
"attributes.csv must have at least one row of data")
}
}
#' Validate access.csv
#'
#' @param access (data.frame) A \code{data.frame} read in from \code{access.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_access <- function(access) {
stopifnot(is.data.frame(access))
if (nrow(access) <= 0) {
stop(call. = FALSE,
"access.csv must have at least one row of data")
}
}
#' Validate creators.csv
#'
#' @param creators (data.frame) A \code{data.frame} read in from
#' \code{creators.csv}
#'
#' @return Nothing. Side-effect: Can \code{stop} execution if validation fails.
validate_creators <- function(creators) {
stopifnot(is.data.frame(creators))
if (nrow(creators) <= 0) {
stop(call. = FALSE,
"creators.csv must have at least one row of data")
}
}
|
# The MIT License
#
# Copyright (c) 2017 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to
# deal in the Software without restriction, including
# without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom
# the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This script runs some quality control for the counts table
# Most of the quality control is performed on normalised counts tables.
# These tables are generated only if they don't already exist.
# download DESeq2 from:
# source("https://bioconductor.org/biocLite.R")
# biocLite("DESeq2")
library("DESeq2")
library('vsn')
library("dplyr")
library("ggplot2")
library(grid)
library(gridExtra)
library("hexbin")
library("pheatmap")
library("RColorBrewer")
library("PoiClaClu")
source('../utilities/plots.R')
# select the file countaining the data
location <- "../data"
filename.counts <- "summarised_mirna_counts_after_mapping_filtered"
filename.counts.metadata <- "summarised_mirna_counts_after_mapping_filtered_metadata"
suffix <-".csv"
#####################################
# Read Counts Table and Samples Table
#####################################
# load counts
counts <- read.table(paste0(location,"/",filename.counts,suffix), sep=",",fill=T,header=T,row.names=1)
# load counts metadata
counts.metadata <- read.table(paste0(location,"/",filename.counts.metadata,suffix), sep=",",fill=T,header=T,row.names=1)
# cast the column `time` from numeric to factor
counts.metadata$time <- as.factor(counts.metadata$time)
# cast the column `replicate` from numeric to factor
counts.metadata$replicate <- as.factor(counts.metadata$replicate)
#######################
# Prepare DESeq dataset
#######################
# create the dataset for DESeq. We use this as an annotated SummarizedExperiment object
# use assay(se), colData(se), design(se) to read the most important info
se <- DESeqDataSetFromMatrix(countData = counts, colData=counts.metadata, design = ~ 1)
######################
# Estimate dispersions
######################
# estimate the size factors from counts, counts.metadata
se <- estimateSizeFactors(se)
se <- estimateDispersions(se)
#str(fitInfo(se))
png(paste0(filename.counts,"_DESeq_dispersions_estimates",".png"), width=2000, height=2000, pointsize=14, res=300)
plotDispEsts(se)
dev.off()
############################
# Normalise the counts table
############################
# See workflow: http://www.bioconductor.org/help/workflows/rnaseqGene/
# 1) transformations of the counts in order to visually explore sample relationships.
# 2) original raw counts for statistical testing.
# => This is critical because the statistical testing methods rely on original count data (not scaled or transformed)
# for calculating the precision of measurements.
# Many common statistical methods for exploratory analysis of multidimensional data,
# for example clustering and principal components analysis (PCA), work best for data
# that generally has the same range of variance at different ranges of the mean values.
# When the expected amount of variance is approximately the same across different mean
# values, the data is said to be homoskedastic. For RNA-seq counts, however, the expected
# variance grows with the mean. For example, if one performs PCA directly on a matrix of
# counts or normalized counts (e.g. correcting for differences in sequencing depth), the
# resulting plot typically depends mostly on the genes with highest counts because they
# show the largest absolute differences between samples. A simple and often used strategy
# to avoid this is to take the logarithm of the normalized count values plus a pseudocount of 1;
# however, depending on the choice of pseudocount, now the genes with the very lowest counts
# will contribute a great deal of noise to the resulting plot, because taking the logarithm
# of small counts actually inflates their variance.
# Plot SD vs mean
# We plot the standard deviation of each row (genes) against the mean
p<-meanSdPlot(assay(se), ranks=FALSE)
ggsave(paste0(filename.counts, "_meanSdPlot_counts.png"), width=4, height=4, dpi=300)
# The logarithm with a small pseudocount amplifies differences when the values
# are close to 0. The low count genes with low signal-to-noise ratio will overly
# contribute to sample-sample distances and PCA plots.
# apply a log(counts + 1) transformation
norm.log<-normTransform(se)
meanSdPlot(assay(norm.log), ranks = FALSE)
ggsave(paste0(filename.counts, "_meanSdPlot_log_counts.png"), width=4, height=4, dpi=300)
# The logarithm with a small pseudocount amplifies differences when the values are close to 0.
# The low count genes with low signal-to-noise ratio will overly contribute to sample-sample distances and PCA plots.
# As a solution, DESeq2 offers two transformations for count data that stabilize the variance
# across the mean: the regularized-logarithm transformation or rlog (Love, Huber, and Anders 2014),
# and the variance stabilizing transformation (VST) for negative binomial data with a
# dispersion-mean trend (Anders and Huber 2010), implemented in the vst function.
# For genes with high counts, the rlog and VST (see below) will give similar result to the ordinary log2
# transformation of normalized counts.
# For genes with lower counts, however, the values are shrunken towards the genes’ averages across all samples.
# The rlog-transformed or VST data then becomes approximately homoskedastic, and can be used directly for
# computing distances between samples, making PCA plots, or as input to downstream methods which perform
# best with homoskedastic data.
# Which transformation to choose?
# The rlog tends to work well on small datasets (n < 30), sometimes outperforming the VST when there is a
# large range of sequencing depth across samples (an order of magnitude difference). The VST is much faster
# to compute and is less sensitive to high count outliers than the rlog. We therefore recommend the VST for
# large datasets (hundreds of samples). You can perform both transformations and compare the meanSdPlot or
# PCA plots generated, as described below.
# Note that the two transformations offered by DESeq2 are provided for applications other than differential
# testing. For differential testing we recommend the DESeq function applied to raw counts, as described later
# in this workflow, which also takes into account the dependence of the variance of counts on the mean value
# during the dispersion estimation step.
# As alternative, compute rlog as implemented in DESeq2. The rlog tends to work well on small datasets (n < 30)
# In the below function calls, we specified blind = FALSE, which means that differences between cell lines and
# treatment (the variables in the design) will not contribute to the expected variance-mean trend of the experiment.
# The experimental design is not used directly in the transformation, only in estimating the global amount of
# variability in the counts. For a fully unsupervised transformation, one can set blind = TRUE (which is the default).
rld <- rlog(se, blind = FALSE)
vst <- varianceStabilizingTransformation(se, blind = FALSE)
df <- bind_rows(
#as_data_frame(log2(counts(se, normalized=TRUE)[, 1:2]+1)) %>% mutate(transformation = "log2(x + 1)"),
as_data_frame(assay(norm.log)[, 1:2]) %>% mutate(transformation = "log2(x+1)"),
as_data_frame(assay(rld)[, 1:2]) %>% mutate(transformation = "rlog"),
as_data_frame(assay(vst)[, 1:2]) %>% mutate(transformation = "vst"))
colnames(df)[1:2] <- c("x", "y")
ggplot(df, aes(x = x, y = y)) + geom_hex(bins = 80) + coord_fixed() + facet_grid( . ~ transformation)
ggsave(paste0(filename.counts, "_norm_compare.png"), width=5, height=2, dpi=300)
##################################
# Write normalised tables to files
###################################
# convert rownames to a new column
norm.log.counts <- assay(norm.log)
norm.log.counts <- cbind(miRNA = rownames(norm.log.counts), norm.log.counts)
write.csv(norm.log.counts,file=paste0(location,"/",filename.counts,"_norm_log",suffix),quote=FALSE,row.names=FALSE)
# convert rownames to a new column
rld.counts <- assay(rld)
rld.counts <- cbind(miRNA = rownames(rld.counts), rld.counts)
write.csv(rld.counts,file=paste0(location,"/",filename.counts,"_norm_rlog",suffix),quote=FALSE,row.names=FALSE)
# convert rownames to a new column
vst.counts <- assay(vst)
vst.counts <- cbind(miRNA = rownames(vst.counts), vst.counts)
write.csv(vst.counts,file=paste0(location,"/",filename.counts,"norm_vst",suffix),quote=FALSE,row.names=FALSE)
##################
# Quality Control
##################
# Sample distances
# A useful first step in an RNA-seq analysis is often to assess overall similarity between samples:
# Which samples are similar to each other, which are different? Does this fit to the expectation from the experiment’s design?
# We use the R function dist to calculate the Euclidean distance between samples.
# To ensure we have a roughly equal contribution from all genes, we use it on the rlog-transformed data.
# We need to transpose the matrix of values using t, because the dist function expects the different samples
# to be rows of its argument, and different dimensions (here, genes) to be columns.
# In order to plot the sample distance matrix with the rows/columns arranged by the distances
# in our distance matrix, we manually provide sampleDists to the clustering_distance argument
# of the pheatmap function. Otherwise the pheatmap function would assume that the matrix contains
# the data values themselves, and would calculate distances between the rows/columns of the distance
# matrix, which is not desired. We also manually specify a blue color palette using the colorRampPalette
# function from the RColorBrewer package.
# norm.log
sampleDists <- dist(t(assay(norm.log)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( norm.log$strain, norm.log$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_log",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# rlog
sampleDists <- dist(t(assay(rld)))
sampleDistMatrix <- as.matrix( sampleDists )
#print(sampleDistMatrix)
rownames(sampleDistMatrix) <- paste( rld$strain, rld$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_rlog",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# vst
sampleDists <- dist(t(assay(vst)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( vst$strain, vst$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_vst",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# Heatmap of sample-to-sample distances using the original count matrix values and PoissonDistance.
# Another option for calculating sample distances is to use the Poisson Distance (Witten 2011),
# implemented in the PoiClaClu package. This measure of dissimilarity between counts also takes
# the inherent variance structure of counts into consideration when calculating the distances
# between samples. The PoissonDistance function takes the original count matrix (not normalized)
# with samples as rows instead of columns, so we need to transpose the counts in se.
poisd <- PoissonDistance(t(counts(se)))
samplePoisDistMatrix <- as.matrix( poisd$dd )
rownames(samplePoisDistMatrix) <- paste( rld$strain, rld$time, sep=" - " )
colnames(samplePoisDistMatrix) <- NULL
png(paste0(filename.counts,"_strain_time_sample_distances_PoissonDist",".png"), width=2000, height=2000, res=300)
pheatmap(samplePoisDistMatrix,
clustering_distance_rows = poisd$dd,
clustering_distance_cols = poisd$dd,
col = colors)
dev.off()
# PCA with log2(x+1), rlog or vst (VarianceStabilizingTransformation). vst is generally used for datasets with many samples
# log
# we redefine the plot so that we can change the theme. To do so, we need to return the data.
# This plot redesign is done by the functions plotPrettyPCA1Var() and plotPrettyPCA2Var().
# p1 <- plotPCA(norm.log, intgroup = c("time"))
p1 <- plotPrettyPCA1Var(norm.log, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_log.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(norm.log, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_log.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(norm.log, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_log.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_log.png"), plot=p.combined, width=7, height=7, dpi=300)
# rlog
p1 <- plotPrettyPCA1Var(rld, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_rlog.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(rld, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_rlog.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(rld, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_rlog.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_rlog.png"), plot=p.combined, width=7, height=7, dpi=300)
# vst
p1 <- plotPrettyPCA1Var(vst, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_vst.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(vst, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_vst.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(vst, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_vst.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_vst.png"), plot=p.combined, width=7, height=7, dpi=300)
|
/3_quality_control_w_deseq2/quality_control_w_deseq2.R
|
permissive
|
pdp10/pip3.mirna.seq
|
R
| false | false | 15,583 |
r
|
# The MIT License
#
# Copyright (c) 2017 Piero Dalle Pezze
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to
# deal in the Software without restriction, including
# without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom
# the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This script runs some quality control for the counts table
# Most of the quality control is performed on normalised counts tables.
# These tables are generated only if they don't already exist.
# download DESeq2 from:
# source("https://bioconductor.org/biocLite.R")
# biocLite("DESeq2")
library("DESeq2")
library('vsn')
library("dplyr")
library("ggplot2")
library(grid)
library(gridExtra)
library("hexbin")
library("pheatmap")
library("RColorBrewer")
library("PoiClaClu")
source('../utilities/plots.R')
# select the file countaining the data
location <- "../data"
filename.counts <- "summarised_mirna_counts_after_mapping_filtered"
filename.counts.metadata <- "summarised_mirna_counts_after_mapping_filtered_metadata"
suffix <-".csv"
#####################################
# Read Counts Table and Samples Table
#####################################
# load counts
counts <- read.table(paste0(location,"/",filename.counts,suffix), sep=",",fill=T,header=T,row.names=1)
# load counts metadata
counts.metadata <- read.table(paste0(location,"/",filename.counts.metadata,suffix), sep=",",fill=T,header=T,row.names=1)
# cast the column `time` from numeric to factor
counts.metadata$time <- as.factor(counts.metadata$time)
# cast the column `replicate` from numeric to factor
counts.metadata$replicate <- as.factor(counts.metadata$replicate)
#######################
# Prepare DESeq dataset
#######################
# create the dataset for DESeq. We use this as an annotated SummarizedExperiment object
# use assay(se), colData(se), design(se) to read the most important info
se <- DESeqDataSetFromMatrix(countData = counts, colData=counts.metadata, design = ~ 1)
######################
# Estimate dispersions
######################
# estimate the size factors from counts, counts.metadata
se <- estimateSizeFactors(se)
se <- estimateDispersions(se)
#str(fitInfo(se))
png(paste0(filename.counts,"_DESeq_dispersions_estimates",".png"), width=2000, height=2000, pointsize=14, res=300)
plotDispEsts(se)
dev.off()
############################
# Normalise the counts table
############################
# See workflow: http://www.bioconductor.org/help/workflows/rnaseqGene/
# 1) transformations of the counts in order to visually explore sample relationships.
# 2) original raw counts for statistical testing.
# => This is critical because the statistical testing methods rely on original count data (not scaled or transformed)
# for calculating the precision of measurements.
# Many common statistical methods for exploratory analysis of multidimensional data,
# for example clustering and principal components analysis (PCA), work best for data
# that generally has the same range of variance at different ranges of the mean values.
# When the expected amount of variance is approximately the same across different mean
# values, the data is said to be homoskedastic. For RNA-seq counts, however, the expected
# variance grows with the mean. For example, if one performs PCA directly on a matrix of
# counts or normalized counts (e.g. correcting for differences in sequencing depth), the
# resulting plot typically depends mostly on the genes with highest counts because they
# show the largest absolute differences between samples. A simple and often used strategy
# to avoid this is to take the logarithm of the normalized count values plus a pseudocount of 1;
# however, depending on the choice of pseudocount, now the genes with the very lowest counts
# will contribute a great deal of noise to the resulting plot, because taking the logarithm
# of small counts actually inflates their variance.
# Plot SD vs mean
# We plot the standard deviation of each row (genes) against the mean
p<-meanSdPlot(assay(se), ranks=FALSE)
ggsave(paste0(filename.counts, "_meanSdPlot_counts.png"), width=4, height=4, dpi=300)
# The logarithm with a small pseudocount amplifies differences when the values
# are close to 0. The low count genes with low signal-to-noise ratio will overly
# contribute to sample-sample distances and PCA plots.
# apply a log(counts + 1) transformation
norm.log<-normTransform(se)
meanSdPlot(assay(norm.log), ranks = FALSE)
ggsave(paste0(filename.counts, "_meanSdPlot_log_counts.png"), width=4, height=4, dpi=300)
# The logarithm with a small pseudocount amplifies differences when the values are close to 0.
# The low count genes with low signal-to-noise ratio will overly contribute to sample-sample distances and PCA plots.
# As a solution, DESeq2 offers two transformations for count data that stabilize the variance
# across the mean: the regularized-logarithm transformation or rlog (Love, Huber, and Anders 2014),
# and the variance stabilizing transformation (VST) for negative binomial data with a
# dispersion-mean trend (Anders and Huber 2010), implemented in the vst function.
# For genes with high counts, the rlog and VST (see below) will give similar result to the ordinary log2
# transformation of normalized counts.
# For genes with lower counts, however, the values are shrunken towards the genes’ averages across all samples.
# The rlog-transformed or VST data then becomes approximately homoskedastic, and can be used directly for
# computing distances between samples, making PCA plots, or as input to downstream methods which perform
# best with homoskedastic data.
# Which transformation to choose?
# The rlog tends to work well on small datasets (n < 30), sometimes outperforming the VST when there is a
# large range of sequencing depth across samples (an order of magnitude difference). The VST is much faster
# to compute and is less sensitive to high count outliers than the rlog. We therefore recommend the VST for
# large datasets (hundreds of samples). You can perform both transformations and compare the meanSdPlot or
# PCA plots generated, as described below.
# Note that the two transformations offered by DESeq2 are provided for applications other than differential
# testing. For differential testing we recommend the DESeq function applied to raw counts, as described later
# in this workflow, which also takes into account the dependence of the variance of counts on the mean value
# during the dispersion estimation step.
# As alternative, compute rlog as implemented in DESeq2. The rlog tends to work well on small datasets (n < 30)
# In the below function calls, we specified blind = FALSE, which means that differences between cell lines and
# treatment (the variables in the design) will not contribute to the expected variance-mean trend of the experiment.
# The experimental design is not used directly in the transformation, only in estimating the global amount of
# variability in the counts. For a fully unsupervised transformation, one can set blind = TRUE (which is the default).
rld <- rlog(se, blind = FALSE)
vst <- varianceStabilizingTransformation(se, blind = FALSE)
df <- bind_rows(
#as_data_frame(log2(counts(se, normalized=TRUE)[, 1:2]+1)) %>% mutate(transformation = "log2(x + 1)"),
as_data_frame(assay(norm.log)[, 1:2]) %>% mutate(transformation = "log2(x+1)"),
as_data_frame(assay(rld)[, 1:2]) %>% mutate(transformation = "rlog"),
as_data_frame(assay(vst)[, 1:2]) %>% mutate(transformation = "vst"))
colnames(df)[1:2] <- c("x", "y")
ggplot(df, aes(x = x, y = y)) + geom_hex(bins = 80) + coord_fixed() + facet_grid( . ~ transformation)
ggsave(paste0(filename.counts, "_norm_compare.png"), width=5, height=2, dpi=300)
##################################
# Write normalised tables to files
###################################
# convert rownames to a new column
norm.log.counts <- assay(norm.log)
norm.log.counts <- cbind(miRNA = rownames(norm.log.counts), norm.log.counts)
write.csv(norm.log.counts,file=paste0(location,"/",filename.counts,"_norm_log",suffix),quote=FALSE,row.names=FALSE)
# convert rownames to a new column
rld.counts <- assay(rld)
rld.counts <- cbind(miRNA = rownames(rld.counts), rld.counts)
write.csv(rld.counts,file=paste0(location,"/",filename.counts,"_norm_rlog",suffix),quote=FALSE,row.names=FALSE)
# convert rownames to a new column
vst.counts <- assay(vst)
vst.counts <- cbind(miRNA = rownames(vst.counts), vst.counts)
write.csv(vst.counts,file=paste0(location,"/",filename.counts,"norm_vst",suffix),quote=FALSE,row.names=FALSE)
##################
# Quality Control
##################
# Sample distances
# A useful first step in an RNA-seq analysis is often to assess overall similarity between samples:
# Which samples are similar to each other, which are different? Does this fit to the expectation from the experiment’s design?
# We use the R function dist to calculate the Euclidean distance between samples.
# To ensure we have a roughly equal contribution from all genes, we use it on the rlog-transformed data.
# We need to transpose the matrix of values using t, because the dist function expects the different samples
# to be rows of its argument, and different dimensions (here, genes) to be columns.
# In order to plot the sample distance matrix with the rows/columns arranged by the distances
# in our distance matrix, we manually provide sampleDists to the clustering_distance argument
# of the pheatmap function. Otherwise the pheatmap function would assume that the matrix contains
# the data values themselves, and would calculate distances between the rows/columns of the distance
# matrix, which is not desired. We also manually specify a blue color palette using the colorRampPalette
# function from the RColorBrewer package.
# norm.log
sampleDists <- dist(t(assay(norm.log)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( norm.log$strain, norm.log$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_log",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# rlog
sampleDists <- dist(t(assay(rld)))
sampleDistMatrix <- as.matrix( sampleDists )
#print(sampleDistMatrix)
rownames(sampleDistMatrix) <- paste( rld$strain, rld$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_rlog",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# vst
sampleDists <- dist(t(assay(vst)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste( vst$strain, vst$time, sep=" - " )
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
png(paste0(filename.counts,"_strain_time_sample_distances_norm_vst",".png"), width=2000, height=2000, res=300)
pheatmap(sampleDistMatrix,
clustering_distance_rows = sampleDists,
clustering_distance_cols = sampleDists,
col = colors)
dev.off()
# Heatmap of sample-to-sample distances using the original count matrix values and PoissonDistance.
# Another option for calculating sample distances is to use the Poisson Distance (Witten 2011),
# implemented in the PoiClaClu package. This measure of dissimilarity between counts also takes
# the inherent variance structure of counts into consideration when calculating the distances
# between samples. The PoissonDistance function takes the original count matrix (not normalized)
# with samples as rows instead of columns, so we need to transpose the counts in se.
poisd <- PoissonDistance(t(counts(se)))
samplePoisDistMatrix <- as.matrix( poisd$dd )
rownames(samplePoisDistMatrix) <- paste( rld$strain, rld$time, sep=" - " )
colnames(samplePoisDistMatrix) <- NULL
png(paste0(filename.counts,"_strain_time_sample_distances_PoissonDist",".png"), width=2000, height=2000, res=300)
pheatmap(samplePoisDistMatrix,
clustering_distance_rows = poisd$dd,
clustering_distance_cols = poisd$dd,
col = colors)
dev.off()
# PCA with log2(x+1), rlog or vst (VarianceStabilizingTransformation). vst is generally used for datasets with many samples
# log
# we redefine the plot so that we can change the theme. To do so, we need to return the data.
# This plot redesign is done by the functions plotPrettyPCA1Var() and plotPrettyPCA2Var().
# p1 <- plotPCA(norm.log, intgroup = c("time"))
p1 <- plotPrettyPCA1Var(norm.log, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_log.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(norm.log, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_log.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(norm.log, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_log.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_log.png"), plot=p.combined, width=7, height=7, dpi=300)
# rlog
p1 <- plotPrettyPCA1Var(rld, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_rlog.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(rld, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_rlog.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(rld, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_rlog.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_rlog.png"), plot=p.combined, width=7, height=7, dpi=300)
# vst
p1 <- plotPrettyPCA1Var(vst, intgroup = c("time"), plotColor=FALSE)
#ggsave(paste0(filename.counts, "_pca_time_norm_vst.png"), width=4, height=4, dpi=300)
p2 <- plotPrettyPCA1Var(vst, intgroup = c("strain"))
#ggsave(paste0(filename.counts, "_pca_strain_norm_vst.png"), width=4, height=4, dpi=300)
p3 <- plotPrettyPCA2Var(vst, intgroup = c("strain", "time"))
#ggsave(paste0(filename.counts, "_pca_time_strain_norm_vst.png"), width=4, height=4, dpi=300)
p.combined <- arrangeGrob(p1,p2,p3,ncol=2)
ggsave(paste0(filename.counts, "_pca_combined_norm_vst.png"), plot=p.combined, width=7, height=7, dpi=300)
|
rm(list=ls())
setwd("C:/cloud/Dropbox/lupine")
library(dplyr)
library(tidyr)
library(testthat)
library(bbmle)
library(lme4)
options(stringsAsFactors = F)
source("analysis/format_data/format_scripts.R")
source("analysis/format_data/format_functions.R")
# data
lupine_df <- read.csv( "data/lupine_all.csv")
enso <- read.csv( "data/enso_data.csv")
clim <- read.csv( "data/prism_point_reyes_87_18.csv")
# data format --------------------------------------------------------------
flow <- subset(lupine_df, !is.na(flow_t1) ) %>%
subset( area_t1 != 0) %>%
mutate( log_area_t1 = log(area_t1),
log_area_t12 = log(area_t1)^2,
year = year + 1 ) %>%
subset( !(year %in% 2015) )
# climate format ----------------------------------------------------------------
years <- unique(flow$year)
m_obs <- 5
m_back <- 36
# calculate yearly anomalies
year_anom <- function(x, var ){
# set names of climate variables
clim_names <- paste0( var,c('_t0','_tm1') )
mutate(x,
avgt0 = x %>% select(V1:V12) %>% rowSums,
avgtm1 = x %>% select(V13:V24) %>% rowSums ) %>%
select(year, avgt0, avgtm1) %>%
setNames( c('year',clim_names) )
}
# format climate - need to select climate predictor first
ppt_mat <- subset(clim, clim_var == "ppt") %>%
prism_clim_form("precip", years, m_back, m_obs) %>%
year_anom('ppt')
tmp_mat <- subset(clim, clim_var == 'tmean') %>%
prism_clim_form('tmean', years, m_back, m_obs) %>%
year_anom('tmp')
enso_mat <- subset(enso, clim_var == 'oni' ) %>%
month_clim_form('oni', years, m_back, m_obs) %>%
year_anom('oni')
# put together all climate
clim_mat <- Reduce( function(...) full_join(...),
list(ppt_mat,tmp_mat,enso_mat) )
# demography plus clim
flow_clim <- left_join(flow, clim_mat) %>%
subset( !is.na(location) )
# fit "structural" models ----------------------------------------------
structure_mods <- list(
# no quadratic
flow_t1 ~ log_area_t1 + (1 | year),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | location),
flow_t1 ~ log_area_t1 + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | newid) + (1 | location),
# quadratic
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | newid) + (1 | location),
# random slope no quadratic
flow_t1 ~ log_area_t1 + (log_area_t1 | year),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | newid),
# random slope quadratic
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | newid),
# year + location random slope
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + (1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | year) + (log_area_t1 | location) + (1 | newid),
# year + location random slope + quadratic effect
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (log_area_t1 | location) + (1 | newid)
)
# fit models
mods <- lapply( structure_mods,
function(x) glmer(x, data=flow_clim, family='binomial') ) %>%
setNames( c( 'yr', 'yr_loc', 'yr_loc_id',
'yr_id', 'loc', 'id', 'id_loc',
'yr_2', 'yr_loc_2', 'yr_loc_id_2',
'yr_id_2', 'loc_2', 'id_2', 'id_loc_2',
'yr_rs', 'yr_loc_rs', 'yr_loc_id_rs',
'yr_id_rs',
'yr_rs_2', 'yr_loc_rs_2', 'yr_loc_id_rs_2',
'yr_id_rs_2',
'yr_rsyl','yr_rsy', 'yr_id_rsyl','yr_id_rsy',
'yr_rsyl_2','yr_rsy_2', 'yr_id_rsyl_2','yr_id_rsy_2' ) )
# best model has YEAR, LOCATION, and log_area_t02
AICtab(mods, weights=T)
# fit climate models -------------------------------------------------
climate_mods <- list(
# null
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# precipitation
flow_t1 ~ log_area_t1 + log_area_t12 + ppt_t0 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + ppt_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# temperature
flow_t1 ~ log_area_t1 + log_area_t12 + tmp_t0 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + tmp_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# enso
flow_t1 ~ log_area_t1 + log_area_t12 + oni_t0 + (log_area_t1 | year) + (log_area_t1 | location)+ (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + oni_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid)
)
# fit models
mod_clim <- lapply( climate_mods,
function(x) glmer(x, data = flow_clim, family='binomial') ) %>%
setNames( c( 'null',
'ppt_t0', 'ppt_tm1',
'tmp_t0', 'tmp_tm1',
'oni_t0', 'oni_tm1') )
# flowering model
AICtab(mod_clim, weights=T)
# out data frame
out <- data.frame( model = AICtab(mod_clim, weights=T) %>% attributes %>% .$row.names,
dAIC = AICtab(mod_clim, weights=T) %>% .$dAIC,
df = AICtab(mod_clim, weights=T) %>% .$df,
weight = AICtab(mod_clim, weights=T) %>% .$weight )
write.csv(out, 'results/ml_mod_sel/flow/flow_mod_sel.csv', row.names = F)
# fit best model with all data
best_mod <- glmer(flow_t1 ~ log_area_t1 + log_area_t12 + tmp_t0
+ (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
data = flow_clim, family='binomial' )
# random effects
re_df <- ranef(best_mod)$year %>%
tibble::add_column(.before=1, coef = row.names(.) ) %>%
bind_rows( ranef(best_mod)$location %>%
tibble::add_column(.before=1, coef = row.names(.) )
) %>%
bind_rows( ranef(best_mod)$newid %>%
tibble::add_column(.before=1, coef = row.names(.) )
) %>%
mutate( type_coef = 'ranef' )
# out (binding fixed effects)
out_df <- fixef(best_mod) %>%
t %>% t %>%
as.data.frame %>%
tibble::add_column(.before=1, ranef = row.names(.) ) %>%
mutate( type_coef = 'fixef' ) %>%
bind_rows( re_df )
write.csv(out_df,
'results/ml_mod_sel/flow/flow_best_mod.csv',
row.names=F)
# plot -----------------------------------------------------------
coefs <- best_mod %>% fixef
# quantiles of climate predictor
clim_quant <- flow_clim$tmp_t0 %>% unique %>% quantile
x_seq <- seq( min(flow_clim$log_area_t1, na.rm=T),
max(flow_clim$log_area_t1, na.rm=T),
length.out = 100 )
y_low <- coefs[1] +
coefs[2] * x_seq +
coefs[3] * x_seq^2 +
coefs[4] * clim_quant['25%']
y_high <- coefs[1] +
coefs[2] * x_seq +
coefs[3] * x_seq^2 +
coefs[4] * clim_quant['75%']
tiff('results/ml_mod_sel/flow.tiff',
unit="in", width=6.3, height=6.3, res=400, compression="lzw")
par( mar = c(3.5,3.5,0.2,0.2), mgp = c(2.1,0.7,0), cex.lab = 2 )
plot(jitter(flow_t1) ~ log_area_t0, data = flow_clim,
ylab = 'survival probability')
lines(x_seq, boot::inv.logit(y_low), lwd=2, col = 'blue')
lines(x_seq, boot::inv.logit(y_high), lwd=2, col = 'red')
legend(-0.5,0.8,
c('low tmp t0','high tmp t0'), lwd = 2,
col = c('blue','red'), bty = 'n', cex = 2)
dev.off()
|
/analysis/vital_rates/flow_ml_no15.R
|
no_license
|
AldoCompagnoni/lupine
|
R
| false | false | 8,997 |
r
|
rm(list=ls())
setwd("C:/cloud/Dropbox/lupine")
library(dplyr)
library(tidyr)
library(testthat)
library(bbmle)
library(lme4)
options(stringsAsFactors = F)
source("analysis/format_data/format_scripts.R")
source("analysis/format_data/format_functions.R")
# data
lupine_df <- read.csv( "data/lupine_all.csv")
enso <- read.csv( "data/enso_data.csv")
clim <- read.csv( "data/prism_point_reyes_87_18.csv")
# data format --------------------------------------------------------------
flow <- subset(lupine_df, !is.na(flow_t1) ) %>%
subset( area_t1 != 0) %>%
mutate( log_area_t1 = log(area_t1),
log_area_t12 = log(area_t1)^2,
year = year + 1 ) %>%
subset( !(year %in% 2015) )
# climate format ----------------------------------------------------------------
years <- unique(flow$year)
m_obs <- 5
m_back <- 36
# calculate yearly anomalies
year_anom <- function(x, var ){
# set names of climate variables
clim_names <- paste0( var,c('_t0','_tm1') )
mutate(x,
avgt0 = x %>% select(V1:V12) %>% rowSums,
avgtm1 = x %>% select(V13:V24) %>% rowSums ) %>%
select(year, avgt0, avgtm1) %>%
setNames( c('year',clim_names) )
}
# format climate - need to select climate predictor first
ppt_mat <- subset(clim, clim_var == "ppt") %>%
prism_clim_form("precip", years, m_back, m_obs) %>%
year_anom('ppt')
tmp_mat <- subset(clim, clim_var == 'tmean') %>%
prism_clim_form('tmean', years, m_back, m_obs) %>%
year_anom('tmp')
enso_mat <- subset(enso, clim_var == 'oni' ) %>%
month_clim_form('oni', years, m_back, m_obs) %>%
year_anom('oni')
# put together all climate
clim_mat <- Reduce( function(...) full_join(...),
list(ppt_mat,tmp_mat,enso_mat) )
# demography plus clim
flow_clim <- left_join(flow, clim_mat) %>%
subset( !is.na(location) )
# fit "structural" models ----------------------------------------------
structure_mods <- list(
# no quadratic
flow_t1 ~ log_area_t1 + (1 | year),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | year) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | location),
flow_t1 ~ log_area_t1 + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | newid) + (1 | location),
# quadratic
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | newid) + (1 | location),
# random slope no quadratic
flow_t1 ~ log_area_t1 + (log_area_t1 | year),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (1 | newid),
# random slope quadratic
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (1 | newid),
# year + location random slope
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + (1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + (1 | year) + (log_area_t1 | location) + (1 | newid),
# year + location random slope + quadratic effect
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (log_area_t1 | location),
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + (1 | year) + (log_area_t1 | location) + (1 | newid)
)
# fit models
mods <- lapply( structure_mods,
function(x) glmer(x, data=flow_clim, family='binomial') ) %>%
setNames( c( 'yr', 'yr_loc', 'yr_loc_id',
'yr_id', 'loc', 'id', 'id_loc',
'yr_2', 'yr_loc_2', 'yr_loc_id_2',
'yr_id_2', 'loc_2', 'id_2', 'id_loc_2',
'yr_rs', 'yr_loc_rs', 'yr_loc_id_rs',
'yr_id_rs',
'yr_rs_2', 'yr_loc_rs_2', 'yr_loc_id_rs_2',
'yr_id_rs_2',
'yr_rsyl','yr_rsy', 'yr_id_rsyl','yr_id_rsy',
'yr_rsyl_2','yr_rsy_2', 'yr_id_rsyl_2','yr_id_rsy_2' ) )
# best model has YEAR, LOCATION, and log_area_t02
AICtab(mods, weights=T)
# fit climate models -------------------------------------------------
climate_mods <- list(
# null
flow_t1 ~ log_area_t1 + log_area_t12 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# precipitation
flow_t1 ~ log_area_t1 + log_area_t12 + ppt_t0 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + ppt_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# temperature
flow_t1 ~ log_area_t1 + log_area_t12 + tmp_t0 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + tmp_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
# enso
flow_t1 ~ log_area_t1 + log_area_t12 + oni_t0 + (log_area_t1 | year) + (log_area_t1 | location)+ (1 | newid),
flow_t1 ~ log_area_t1 + log_area_t12 + oni_tm1 + (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid)
)
# fit models
mod_clim <- lapply( climate_mods,
function(x) glmer(x, data = flow_clim, family='binomial') ) %>%
setNames( c( 'null',
'ppt_t0', 'ppt_tm1',
'tmp_t0', 'tmp_tm1',
'oni_t0', 'oni_tm1') )
# flowering model
AICtab(mod_clim, weights=T)
# out data frame
out <- data.frame( model = AICtab(mod_clim, weights=T) %>% attributes %>% .$row.names,
dAIC = AICtab(mod_clim, weights=T) %>% .$dAIC,
df = AICtab(mod_clim, weights=T) %>% .$df,
weight = AICtab(mod_clim, weights=T) %>% .$weight )
write.csv(out, 'results/ml_mod_sel/flow/flow_mod_sel.csv', row.names = F)
# fit best model with all data
best_mod <- glmer(flow_t1 ~ log_area_t1 + log_area_t12 + tmp_t0
+ (log_area_t1 | year) + (log_area_t1 | location) + (1 | newid),
data = flow_clim, family='binomial' )
# random effects
re_df <- ranef(best_mod)$year %>%
tibble::add_column(.before=1, coef = row.names(.) ) %>%
bind_rows( ranef(best_mod)$location %>%
tibble::add_column(.before=1, coef = row.names(.) )
) %>%
bind_rows( ranef(best_mod)$newid %>%
tibble::add_column(.before=1, coef = row.names(.) )
) %>%
mutate( type_coef = 'ranef' )
# out (binding fixed effects)
out_df <- fixef(best_mod) %>%
t %>% t %>%
as.data.frame %>%
tibble::add_column(.before=1, ranef = row.names(.) ) %>%
mutate( type_coef = 'fixef' ) %>%
bind_rows( re_df )
write.csv(out_df,
'results/ml_mod_sel/flow/flow_best_mod.csv',
row.names=F)
# plot -----------------------------------------------------------
coefs <- best_mod %>% fixef
# quantiles of climate predictor
clim_quant <- flow_clim$tmp_t0 %>% unique %>% quantile
x_seq <- seq( min(flow_clim$log_area_t1, na.rm=T),
max(flow_clim$log_area_t1, na.rm=T),
length.out = 100 )
y_low <- coefs[1] +
coefs[2] * x_seq +
coefs[3] * x_seq^2 +
coefs[4] * clim_quant['25%']
y_high <- coefs[1] +
coefs[2] * x_seq +
coefs[3] * x_seq^2 +
coefs[4] * clim_quant['75%']
tiff('results/ml_mod_sel/flow.tiff',
unit="in", width=6.3, height=6.3, res=400, compression="lzw")
par( mar = c(3.5,3.5,0.2,0.2), mgp = c(2.1,0.7,0), cex.lab = 2 )
plot(jitter(flow_t1) ~ log_area_t0, data = flow_clim,
ylab = 'survival probability')
lines(x_seq, boot::inv.logit(y_low), lwd=2, col = 'blue')
lines(x_seq, boot::inv.logit(y_high), lwd=2, col = 'red')
legend(-0.5,0.8,
c('low tmp t0','high tmp t0'), lwd = 2,
col = c('blue','red'), bty = 'n', cex = 2)
dev.off()
|
## retrieve_Hodgson<-function(species,TRAITS,rest,data_csr){
## url<-"http://people.exeter.ac.uk/rh203/plant-scientist-recent-science-functional-types-allocating-csr-csr-lookup-table.xls"
## temp_dest<-tempfile(fileext=".xls")
## download.file(url,temp_dest,mode="wb")
## catminat_df<-read_excel(temp_dest,sheet=1,col_names=T,col_types=rep("text",2))
## catminat_df<-catminat_df[grep("[0-9]+",catminat_df$rang_taxinomiqu,invert=T),]
## ##### TODO correggere nomi delle specie\
## ## remove entries for which CHOROLOGIE=="?"
## catminat_df<-catminat_df[catminat_df$CHOROLOGIE!="?",]
## ## change columns' name in order to
## ## avoid possible conflicts with non-
## ## ascii
## ## french chars
## recode_catminat_values<-c("Lumi.{1}re"="ell_L_fr",
## "Temp.{1}rature"="elle_T_fr",
## "Continentalit.{1}"="ell_C_fr",
## "Humidit.{1}_.{1}daphique"="ell_U_fr",
## "R.{1}action_du_sol_.pH."="ell_R_fr",
## "Niveau_trophique"="ell_N_fr",
## "Salinit.*"="ell_S_fr",
## "Texture"="Soil_texture_fr",
## "Mati.*re_organique"="organic_matter_fr",
## "Humidit.*_atmosph.*rique"="ell_U_atm_fr",
## "pollinisation"="poll_vect_fr",
## "diss.*mination"="dissemination_fr",
## "couleur_fleur"="flower_colour_fr",
## "fruit"="fruit_type_fr",
## "sexualit.*"="sex_reprod_fr",
## "ordre_maturation"="order_of_maturation",
## "inflorescence"="inflorescence_fr",
## "Nom.Phytobase"="species_name",
## "TYPE_BIOLOGIQUE"="li_form_fr"
## )
## for(i in names(recode_catminat_values)){
## names(catminat_df)<-gsub(i,recode_catminat_values[i],names(catminat_df))
## }
## catminat_df$species_name<-as.character(catminat_df$species_name)
## catminat_df<-catminat_df[!is.na(catminat_df$species_name),]
## catminat_df$species_name<-gsub("&","&",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub(";","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+\\*$","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+A$","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+B$","",catminat_df$species_name,perl=TRUE)
## ## I split flowering dates into 2 columns,
## ## flower begin and end
## flowering<-as.character(catminat_df$floraison)
## beg_flow_fr<-as.numeric(gsub("^([0-9]+)+\\-([0-9])+$","\\1",flowering))
## end_flow_fr<-as.numeric(gsub("^([0-9]+)+\\-([0-9])+$","\\2",flowering))
## catminat_df<-data.frame(catminat_df,beg_flow_fr,end_flow_fr)
## ## I choose only a subset of the available columns in baseflor dataset
## selected_columns_catminat<-c(
## "species_name",
## "CHOROLOGIE",
## "inflorescence_fr",
## "sex_reprod_fr",
## "order_of_maturation",
## "poll_vect_fr",
## "fruit_type_fr",
## "dissemination_fr",
## "flower_colour_fr",
## "macule",
## "type_ligneux",
## "li_form_fr",
## "ell_L_fr",
## "elle_T_fr",
## "ell_C_fr",
## "ell_U_atm_fr",
## "ell_U_fr",
## "ell_R_fr",
## "ell_N_fr",
## "ell_S_fr",
## "Soil_texture_fr",
## "organic_matter_fr",
## "beg_flow_fr",
## "end_flow_fr",
## "PhytobaseID"
## )
## catminat_df<-catminat_df[,selected_columns_catminat]
## ## recode inflorescenses types
## inflorescences<-c(
## "capitule de capitules$"="Compound capitulum",
## "capitule simple$"="Capitulum",
## "c.*ne$"="Cone",
## "corymbe$"="Corymb",
## "corymbe de capitules$"="Corymb of capitula",
## "cyathe$"="Cyathium",
## "cyme bipare$"="Dichasia cyme",
## "cyme biscorpio.*de$"="Scorpioid cyme",
## "cyme capituliforme$"="Capituliform cyme ",
## "cyme d'.+pis$"="Cyme of spikes",
## "cyme d'ombelles$"="Cyme of umbels",
## "cyme de capitules$"="Cyme of capitula",
## "cyme de glom.+rules$"="Cyme of glomerula",
## "cyme multipare$"="Pleiochasium",
## "cyme unipare h.*lico.*de$"="Helicoid cyme",
## "cyme unipare scorpio.*de$"="Scorpioid cyme",
## ".*pi d'.+pillets$"="Spike of spikelets",
## ".*pi de capitules$"="Spike of capitula",
## ".+pi de cymes triflores$"="Spike of three-flowers cymes",
## ".+pi simple$"="Simple spike",
## "fleur solitaire lat.+rale$"="Solitary lateral flower",
## "fleur solitaire terminale$"="Solitary terminal flower",
## "glom.+rules$"="Glomerula",
## "glom.*rules spiciformes$"="Spike-like glomerula",
## "ombelle d'ombellules$"="Umbel of umbels",
## "ombelle simple$"="Simple umbel",
## "ombelle simple d'.+pis$"="Simple umbel of spikes",
## "ombelle simple de capitules$"="Simple umbel of capitula",
## "panicule d'.+pillets$"="Panicle of spikelets",
## "panicule spiciforme$"="Spike-like panicle",
## "rac.{1}me capituliforme$"="Capitulum-like raceme",
## "rac.{1}me d'.+pis$"="Raceme of spikes",
## "rac.{1}me d'ombelles$"="Raceme of umbels",
## "rac.{1}me de capitules$"="Raceme of capitula",
## "ra.{1}me de cymes bipares$"="Raceme of dichasia cymes",
## "rac.{1}me de cymes unipares h.{1}lico.{1}des$"="Raceme of helicoid cymes",
## "rac.{1}me de cymes unipares scorpio.{1}des$"="Raceme of scorpioid cymes",
## "rac.{1}me de rac.{1}mes$"="Raceme of racemes",
## "rac.{1}me simple$"="Simple raceme",
## "rac.{1}me de cymes bipares$"="Raceme of dichasia cymes",
## "spadice$"="Spadix",
## "verticille d'ombelles$"="Verticil of umbels"
## )
## catminat_df$inflorescence_fr<-catminat_replace(catminat_df$inflorescence_fr,inflorescences)
## ## recode fruit types
## fruit_types=c("ak.{1}ne$"="achene",
## "baie$"="berry",
## "capsule$"="capsule",
## "caryopse$"="caryopsis",
## "c.{1}ne$"="cone",
## "drupe$"="drupe",
## "follicule$"="follicle",
## "catminat_dfusse$"="legume",
## "pyxide$"="pyxid",
## "samare$"="samara",
## "silique$"="silique"
## )
## catminat_df$fruit_type_fr<-catminat_replace(catminat_df$fruit_type_fr,fruit_types)
## ## recode flower colours
## flower_colours<-c("blanc$"="white",
## "jaune$"="yellow",
## "vert$"="green",
## "marron$"="brown",
## "bleu$"="blue",
## "jaune$"="yellow",
## "jauna$"="yellow",
## "noir$"="black"
## )
## catminat_df$flower_colour_fr<-catminat_replace(catminat_df$flower_colour_fr,flower_colours)
## ## recode dissemination types
## dissemination<-c(
## "an.*mochore$"="anemochores",
## "myrm.*cochore$"="myrmecochores",
## "myrm.*cochore$"="myrmecochores",
## "autochore$"="autochores",
## "barochore$"="barochores",
## "endozoochore$"="endozoochores",
## "endozoochorie$"="endozoochores",
## ".+pizoochore$"="epizoochores",
## "dyszoochore$"="dyszoochores",
## "hydrochore$"="hydrochores"
## )
## catminat_df$dissemination_fr<-catminat_replace(catminat_df$dissemination_fr,dissemination)
## ## recode sexual reproduction types
## sex_reprod<-c(
## "androdio.{1}que$"="Androdioecy",
## "gynomono.{1}que" ="Gynomonoecious",
## "gynodio.{1}que$"="Gynodioecious",
## "polygame$"="Polygamous",
## "mono.{1}que$"="Monoecious",
## "dio.{1}que$"="Dioecious",
## "hermaphrodite$"="Hermaphroditic",
## "polygame$"="Polygamous"
## )
## catminat_df$sex_reprod_fr<-catminat_replace(catminat_df$sex_reprod_fr,sex_reprod)
## ## remove species names where the phrase
## ## "sans nom" is found
## catminat_df<-catminat_df[grep("sans nom",catminat_df$species_name,invert=TRUE),]
## ## recode pollen vector
## poll_vec<-c(
## "an.{1}mogame"="wind",
## "autogame"="self",
## "apogame"="apogamy",
## "entomogame"="insect",
## "hydrogame"="water"
## )
## catminat_df$poll_vect_fr<-catminat_replace(as.character(catminat_df$poll_vect_fr),poll_vec)
## ## recode life_form_fr
## ## -- for the moment leave the original values
## ## remove entries without species names
## catminat_df<-catminat_df[catminat_df$species_name!="",]
## ## Remove double entries
## ## beware: catminat is now read with readxl package and empty cells are coded as <NA>. not as empty strings
## ## catminat_df<-catminat_df[!(catminat_df$species_name=="Juncus articulatus" & catminat_df$sex_reprod_fr==""),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Juncus articulatus" & is.na(catminat_df$sex_reprod_fr)),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Capparis spinosa" & catminat_df$PhytobaseID=="13450"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Centaurium ery. erythraea" & catminat_df$PhytobaseID=="2361"),]
## ## catminat_df<-catminat_df[!(catminat_df$species_name=="Centaurium ery. erythraea" & catminat_df$PhytobaseID=="2361"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Chenopodium ambrosioides" & is.na(catminat_df$flower_colour_fr)),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Chenopodium opulifolium" & catminat_df$CHOROLOGIE=="cosmopolite"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Cornus sanguinea" & catminat_df$PhytobaseID=="2258"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Crataegus mon. monogyna" & catminat_df$PhytobaseID=="1657"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Daphne laureola" & catminat_df$PhytobaseID=="2084"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Eryngium bourgatii" & catminat_df$PhytobaseID=="11913"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Festuca ovi. ovina" & catminat_df$PhytobaseID=="352"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Gaudinia fragilis" & catminat_df$li_form_fr=="test(hbis)"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Genista salzmannii" & catminat_df$PhytobaseID=="12481"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Medicago turbinata" & catminat_df$PhytobaseID=="12456"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Ophrys bertolonii" & catminat_df$PhytobaseID=="725"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Populus nigra" & catminat_df$PhytobaseID=="7038"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Rhamnus saxatilis" & catminat_df$PhytobaseID=="16181"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Rosmarinus officinalis" & catminat_df$PhytobaseID=="16080"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Salix rosmarinifolia" & catminat_df$PhytobaseID=="795"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Salvia officinalis" & catminat_df$PhytobaseID=="14526"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Taraxacum sagittilobum" & catminat_df$PhytobaseID=="15326"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Thlaspi rot. rotundifolium" & catminat_df$PhytobaseID=="1274"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Linum bienne" & catminat_df$li_form_fr=="test(hbis)"),]
## ## remove double entries
## catminat_df<-catminat_df[!(duplicated(catminat_df)),]
## ## fix flowering dates for Medicago turbinata
## catminat_df[catminat_df$PhytobaseID==12456,c("beg_flow_fr","end_flow_fr")]<-c(4,6)
## ## and remove double entry
## catminat_df<-catminat_df[catminat_df$PhytobaseID!=11083,]
## ## fix CHOROLOGIE for Rubus cinerascens and Rubus conspicuus and then remove double entries
## catminat_df<-catminat_df[(1:nrow(catminat_df))!=which(catminat_df$species_name=="Rubus cinerascens")[1],]
## catminat_df<-catminat_df[(1:nrow(catminat_df))!=which(catminat_df$species_name=="Rubus conspicuus")[1],]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Erodium glandulosum")[1]),]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Erodium rupicola")[1]),]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Onosma echioides")[1]),]
## ## remove CHOROLOGIE column
## catminat_df<-catminat_df[,names(catminat_df)!="CHOROLOGIE"]
## save(file=file.path(directory,"catminat.Rda"),catminat_df)
## }
|
/R/Hodgson.R
|
no_license
|
GioBo/TR8
|
R
| false | false | 13,772 |
r
|
## retrieve_Hodgson<-function(species,TRAITS,rest,data_csr){
## url<-"http://people.exeter.ac.uk/rh203/plant-scientist-recent-science-functional-types-allocating-csr-csr-lookup-table.xls"
## temp_dest<-tempfile(fileext=".xls")
## download.file(url,temp_dest,mode="wb")
## catminat_df<-read_excel(temp_dest,sheet=1,col_names=T,col_types=rep("text",2))
## catminat_df<-catminat_df[grep("[0-9]+",catminat_df$rang_taxinomiqu,invert=T),]
## ##### TODO correggere nomi delle specie\
## ## remove entries for which CHOROLOGIE=="?"
## catminat_df<-catminat_df[catminat_df$CHOROLOGIE!="?",]
## ## change columns' name in order to
## ## avoid possible conflicts with non-
## ## ascii
## ## french chars
## recode_catminat_values<-c("Lumi.{1}re"="ell_L_fr",
## "Temp.{1}rature"="elle_T_fr",
## "Continentalit.{1}"="ell_C_fr",
## "Humidit.{1}_.{1}daphique"="ell_U_fr",
## "R.{1}action_du_sol_.pH."="ell_R_fr",
## "Niveau_trophique"="ell_N_fr",
## "Salinit.*"="ell_S_fr",
## "Texture"="Soil_texture_fr",
## "Mati.*re_organique"="organic_matter_fr",
## "Humidit.*_atmosph.*rique"="ell_U_atm_fr",
## "pollinisation"="poll_vect_fr",
## "diss.*mination"="dissemination_fr",
## "couleur_fleur"="flower_colour_fr",
## "fruit"="fruit_type_fr",
## "sexualit.*"="sex_reprod_fr",
## "ordre_maturation"="order_of_maturation",
## "inflorescence"="inflorescence_fr",
## "Nom.Phytobase"="species_name",
## "TYPE_BIOLOGIQUE"="li_form_fr"
## )
## for(i in names(recode_catminat_values)){
## names(catminat_df)<-gsub(i,recode_catminat_values[i],names(catminat_df))
## }
## catminat_df$species_name<-as.character(catminat_df$species_name)
## catminat_df<-catminat_df[!is.na(catminat_df$species_name),]
## catminat_df$species_name<-gsub("&","&",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub(";","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+\\*$","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+A$","",catminat_df$species_name,perl=TRUE)
## catminat_df$species_name<-gsub("\\s+B$","",catminat_df$species_name,perl=TRUE)
## ## I split flowering dates into 2 columns,
## ## flower begin and end
## flowering<-as.character(catminat_df$floraison)
## beg_flow_fr<-as.numeric(gsub("^([0-9]+)+\\-([0-9])+$","\\1",flowering))
## end_flow_fr<-as.numeric(gsub("^([0-9]+)+\\-([0-9])+$","\\2",flowering))
## catminat_df<-data.frame(catminat_df,beg_flow_fr,end_flow_fr)
## ## I choose only a subset of the available columns in baseflor dataset
## selected_columns_catminat<-c(
## "species_name",
## "CHOROLOGIE",
## "inflorescence_fr",
## "sex_reprod_fr",
## "order_of_maturation",
## "poll_vect_fr",
## "fruit_type_fr",
## "dissemination_fr",
## "flower_colour_fr",
## "macule",
## "type_ligneux",
## "li_form_fr",
## "ell_L_fr",
## "elle_T_fr",
## "ell_C_fr",
## "ell_U_atm_fr",
## "ell_U_fr",
## "ell_R_fr",
## "ell_N_fr",
## "ell_S_fr",
## "Soil_texture_fr",
## "organic_matter_fr",
## "beg_flow_fr",
## "end_flow_fr",
## "PhytobaseID"
## )
## catminat_df<-catminat_df[,selected_columns_catminat]
## ## recode inflorescenses types
## inflorescences<-c(
## "capitule de capitules$"="Compound capitulum",
## "capitule simple$"="Capitulum",
## "c.*ne$"="Cone",
## "corymbe$"="Corymb",
## "corymbe de capitules$"="Corymb of capitula",
## "cyathe$"="Cyathium",
## "cyme bipare$"="Dichasia cyme",
## "cyme biscorpio.*de$"="Scorpioid cyme",
## "cyme capituliforme$"="Capituliform cyme ",
## "cyme d'.+pis$"="Cyme of spikes",
## "cyme d'ombelles$"="Cyme of umbels",
## "cyme de capitules$"="Cyme of capitula",
## "cyme de glom.+rules$"="Cyme of glomerula",
## "cyme multipare$"="Pleiochasium",
## "cyme unipare h.*lico.*de$"="Helicoid cyme",
## "cyme unipare scorpio.*de$"="Scorpioid cyme",
## ".*pi d'.+pillets$"="Spike of spikelets",
## ".*pi de capitules$"="Spike of capitula",
## ".+pi de cymes triflores$"="Spike of three-flowers cymes",
## ".+pi simple$"="Simple spike",
## "fleur solitaire lat.+rale$"="Solitary lateral flower",
## "fleur solitaire terminale$"="Solitary terminal flower",
## "glom.+rules$"="Glomerula",
## "glom.*rules spiciformes$"="Spike-like glomerula",
## "ombelle d'ombellules$"="Umbel of umbels",
## "ombelle simple$"="Simple umbel",
## "ombelle simple d'.+pis$"="Simple umbel of spikes",
## "ombelle simple de capitules$"="Simple umbel of capitula",
## "panicule d'.+pillets$"="Panicle of spikelets",
## "panicule spiciforme$"="Spike-like panicle",
## "rac.{1}me capituliforme$"="Capitulum-like raceme",
## "rac.{1}me d'.+pis$"="Raceme of spikes",
## "rac.{1}me d'ombelles$"="Raceme of umbels",
## "rac.{1}me de capitules$"="Raceme of capitula",
## "ra.{1}me de cymes bipares$"="Raceme of dichasia cymes",
## "rac.{1}me de cymes unipares h.{1}lico.{1}des$"="Raceme of helicoid cymes",
## "rac.{1}me de cymes unipares scorpio.{1}des$"="Raceme of scorpioid cymes",
## "rac.{1}me de rac.{1}mes$"="Raceme of racemes",
## "rac.{1}me simple$"="Simple raceme",
## "rac.{1}me de cymes bipares$"="Raceme of dichasia cymes",
## "spadice$"="Spadix",
## "verticille d'ombelles$"="Verticil of umbels"
## )
## catminat_df$inflorescence_fr<-catminat_replace(catminat_df$inflorescence_fr,inflorescences)
## ## recode fruit types
## fruit_types=c("ak.{1}ne$"="achene",
## "baie$"="berry",
## "capsule$"="capsule",
## "caryopse$"="caryopsis",
## "c.{1}ne$"="cone",
## "drupe$"="drupe",
## "follicule$"="follicle",
## "catminat_dfusse$"="legume",
## "pyxide$"="pyxid",
## "samare$"="samara",
## "silique$"="silique"
## )
## catminat_df$fruit_type_fr<-catminat_replace(catminat_df$fruit_type_fr,fruit_types)
## ## recode flower colours
## flower_colours<-c("blanc$"="white",
## "jaune$"="yellow",
## "vert$"="green",
## "marron$"="brown",
## "bleu$"="blue",
## "jaune$"="yellow",
## "jauna$"="yellow",
## "noir$"="black"
## )
## catminat_df$flower_colour_fr<-catminat_replace(catminat_df$flower_colour_fr,flower_colours)
## ## recode dissemination types
## dissemination<-c(
## "an.*mochore$"="anemochores",
## "myrm.*cochore$"="myrmecochores",
## "myrm.*cochore$"="myrmecochores",
## "autochore$"="autochores",
## "barochore$"="barochores",
## "endozoochore$"="endozoochores",
## "endozoochorie$"="endozoochores",
## ".+pizoochore$"="epizoochores",
## "dyszoochore$"="dyszoochores",
## "hydrochore$"="hydrochores"
## )
## catminat_df$dissemination_fr<-catminat_replace(catminat_df$dissemination_fr,dissemination)
## ## recode sexual reproduction types
## sex_reprod<-c(
## "androdio.{1}que$"="Androdioecy",
## "gynomono.{1}que" ="Gynomonoecious",
## "gynodio.{1}que$"="Gynodioecious",
## "polygame$"="Polygamous",
## "mono.{1}que$"="Monoecious",
## "dio.{1}que$"="Dioecious",
## "hermaphrodite$"="Hermaphroditic",
## "polygame$"="Polygamous"
## )
## catminat_df$sex_reprod_fr<-catminat_replace(catminat_df$sex_reprod_fr,sex_reprod)
## ## remove species names where the phrase
## ## "sans nom" is found
## catminat_df<-catminat_df[grep("sans nom",catminat_df$species_name,invert=TRUE),]
## ## recode pollen vector
## poll_vec<-c(
## "an.{1}mogame"="wind",
## "autogame"="self",
## "apogame"="apogamy",
## "entomogame"="insect",
## "hydrogame"="water"
## )
## catminat_df$poll_vect_fr<-catminat_replace(as.character(catminat_df$poll_vect_fr),poll_vec)
## ## recode life_form_fr
## ## -- for the moment leave the original values
## ## remove entries without species names
## catminat_df<-catminat_df[catminat_df$species_name!="",]
## ## Remove double entries
## ## beware: catminat is now read with readxl package and empty cells are coded as <NA>. not as empty strings
## ## catminat_df<-catminat_df[!(catminat_df$species_name=="Juncus articulatus" & catminat_df$sex_reprod_fr==""),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Juncus articulatus" & is.na(catminat_df$sex_reprod_fr)),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Capparis spinosa" & catminat_df$PhytobaseID=="13450"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Centaurium ery. erythraea" & catminat_df$PhytobaseID=="2361"),]
## ## catminat_df<-catminat_df[!(catminat_df$species_name=="Centaurium ery. erythraea" & catminat_df$PhytobaseID=="2361"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Chenopodium ambrosioides" & is.na(catminat_df$flower_colour_fr)),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Chenopodium opulifolium" & catminat_df$CHOROLOGIE=="cosmopolite"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Cornus sanguinea" & catminat_df$PhytobaseID=="2258"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Crataegus mon. monogyna" & catminat_df$PhytobaseID=="1657"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Daphne laureola" & catminat_df$PhytobaseID=="2084"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Eryngium bourgatii" & catminat_df$PhytobaseID=="11913"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Festuca ovi. ovina" & catminat_df$PhytobaseID=="352"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Gaudinia fragilis" & catminat_df$li_form_fr=="test(hbis)"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Genista salzmannii" & catminat_df$PhytobaseID=="12481"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Medicago turbinata" & catminat_df$PhytobaseID=="12456"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Ophrys bertolonii" & catminat_df$PhytobaseID=="725"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Populus nigra" & catminat_df$PhytobaseID=="7038"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Rhamnus saxatilis" & catminat_df$PhytobaseID=="16181"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Rosmarinus officinalis" & catminat_df$PhytobaseID=="16080"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Salix rosmarinifolia" & catminat_df$PhytobaseID=="795"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Salvia officinalis" & catminat_df$PhytobaseID=="14526"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Taraxacum sagittilobum" & catminat_df$PhytobaseID=="15326"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Thlaspi rot. rotundifolium" & catminat_df$PhytobaseID=="1274"),]
## catminat_df<-catminat_df[!(catminat_df$species_name=="Linum bienne" & catminat_df$li_form_fr=="test(hbis)"),]
## ## remove double entries
## catminat_df<-catminat_df[!(duplicated(catminat_df)),]
## ## fix flowering dates for Medicago turbinata
## catminat_df[catminat_df$PhytobaseID==12456,c("beg_flow_fr","end_flow_fr")]<-c(4,6)
## ## and remove double entry
## catminat_df<-catminat_df[catminat_df$PhytobaseID!=11083,]
## ## fix CHOROLOGIE for Rubus cinerascens and Rubus conspicuus and then remove double entries
## catminat_df<-catminat_df[(1:nrow(catminat_df))!=which(catminat_df$species_name=="Rubus cinerascens")[1],]
## catminat_df<-catminat_df[(1:nrow(catminat_df))!=which(catminat_df$species_name=="Rubus conspicuus")[1],]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Erodium glandulosum")[1]),]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Erodium rupicola")[1]),]
## ##catminat_df<-catminat_df[row.names(catminat_df)!=(which(catminat_df$species_name=="Onosma echioides")[1]),]
## ## remove CHOROLOGIE column
## catminat_df<-catminat_df[,names(catminat_df)!="CHOROLOGIE"]
## save(file=file.path(directory,"catminat.Rda"),catminat_df)
## }
|
paged_teaching_r_questions <- data.frame(
stringsAsFactors = FALSE,
question = c("What's your age?",
"Which best describes your gender?",
"Which best describes your gender?","Which best describes your gender?",
"Which best describes your gender?",
"Which best describes your gender?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?",
"What was your first language?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"Have you ever learned to program in R?",
"Have you ever learned to program in R?",
"If yes, how many years have you been using R?",
"Have you ever learned a programming language (other than R)?",
"Have you ever learned a programming language (other than R)?",
"If yes, which language(s) and how many years have you been using each language?",
"Have you ever completed a data analysis?",
"Have you ever completed a data analysis?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?"),
option = c("25","Female","Male",
"Prefer not to say","Prefer to self describe",NA,
"Did not attend high school","Some high school",
"High school graduate","Some college","College","Graduate Work","Arabic",
"Armenian","Chinese","English","French","Creole",
"German","Greek","Gujarati","Hebrew","Hindi",
"Italian","Japanese","Other",NA,"Arabic","Armenian",
"Chinese","English","French","Creole","German","Greek",
"Gujarati","Hebrew","Hindi","Italian","Japanese",
"Other",NA,"Yes","No","5","Yes","No",NA,"Yes","No",
"0 to 5","5 to 10","10 to 15","15+"),
input_type = c("numeric","mc","mc","mc",
"mc","text","select","select","select","select",
"select","select","select","select","select","select",
"select","select","select","select","select","select",
"select","select","select","select","text","select",
"select","select","select","select","select",
"select","select","select","select","select","select",
"select","select","text","y/n","y/n","numeric","y/n",
"y/n","text","y/n","y/n","mc","mc","mc","mc"),
input_id = c("age","gender","gender",
"gender","gender","self_describe_gender",
"education_attained","education_attained","education_attained",
"education_attained","education_attained","education_attained",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language_other",
"read_language","read_language","read_language",
"read_language","read_language","read_language","read_language",
"read_language","read_language","read_language",
"read_language","read_language","read_language","read_language",
"read_language_other","learned_r","learned_r",
"years_using_r","learned_programming_not_r",
"learned_programming_not_r","years_programming_not_r",
"completed_data_analysis","completed_data_analysis",
"number_completed_data_analysis","number_completed_data_analysis",
"number_completed_data_analysis","number_completed_data_analysis"),
dependence = c(NA,NA,NA,NA,NA,"gender",
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
NA,NA,NA,NA,NA,"first_language",NA,NA,NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"read_language",
NA,NA,"learned_r",NA,NA,"learned_programming_not_r",
NA,NA,"completed_data_analysis",
"completed_data_analysis","completed_data_analysis","completed_data_analysis"),
dependence_value = c(NA,NA,NA,NA,NA,
"Prefer to self describe",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"Other",NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"Other",
NA,NA,"Yes",NA,NA,"Yes",NA,NA,"Yes","Yes","Yes",
"Yes"),
required = c(TRUE,TRUE,TRUE,TRUE,TRUE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,TRUE,TRUE,FALSE,TRUE,
TRUE,FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE),
page = c("intro","intro","intro",
"intro","intro","intro","intro","intro","intro","intro",
"intro","intro","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid","mid",
"finale","finale","finale","finale","finale",
"finale","finale","finale","finale","finale","finale",
"finale")
)
test_that("surveyOutput() works - paged_questions", {
local_edition(3)
expect_snapshot_output(shiny::fluidPage(
shinysurveys::surveyOutput(df = paged_teaching_r_questions,
survey_title = "Test This MultiPaged Survey")
))
})
|
/tests/testthat/test-surveyOutput-paged_questions.R
|
permissive
|
nklepeis/shinysurveys
|
R
| false | false | 7,713 |
r
|
paged_teaching_r_questions <- data.frame(
stringsAsFactors = FALSE,
question = c("What's your age?",
"Which best describes your gender?",
"Which best describes your gender?","Which best describes your gender?",
"Which best describes your gender?",
"Which best describes your gender?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What is the highest level of education you have attained?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?","What was your first language?",
"What was your first language?",
"What was your first language?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?",
"In what language do you read most often?","In what language do you read most often?",
"In what language do you read most often?",
"Have you ever learned to program in R?",
"Have you ever learned to program in R?",
"If yes, how many years have you been using R?",
"Have you ever learned a programming language (other than R)?",
"Have you ever learned a programming language (other than R)?",
"If yes, which language(s) and how many years have you been using each language?",
"Have you ever completed a data analysis?",
"Have you ever completed a data analysis?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?",
"If yes, approximately how many data analyses have you completed?"),
option = c("25","Female","Male",
"Prefer not to say","Prefer to self describe",NA,
"Did not attend high school","Some high school",
"High school graduate","Some college","College","Graduate Work","Arabic",
"Armenian","Chinese","English","French","Creole",
"German","Greek","Gujarati","Hebrew","Hindi",
"Italian","Japanese","Other",NA,"Arabic","Armenian",
"Chinese","English","French","Creole","German","Greek",
"Gujarati","Hebrew","Hindi","Italian","Japanese",
"Other",NA,"Yes","No","5","Yes","No",NA,"Yes","No",
"0 to 5","5 to 10","10 to 15","15+"),
input_type = c("numeric","mc","mc","mc",
"mc","text","select","select","select","select",
"select","select","select","select","select","select",
"select","select","select","select","select","select",
"select","select","select","select","text","select",
"select","select","select","select","select",
"select","select","select","select","select","select",
"select","select","text","y/n","y/n","numeric","y/n",
"y/n","text","y/n","y/n","mc","mc","mc","mc"),
input_id = c("age","gender","gender",
"gender","gender","self_describe_gender",
"education_attained","education_attained","education_attained",
"education_attained","education_attained","education_attained",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language",
"first_language","first_language","first_language_other",
"read_language","read_language","read_language",
"read_language","read_language","read_language","read_language",
"read_language","read_language","read_language",
"read_language","read_language","read_language","read_language",
"read_language_other","learned_r","learned_r",
"years_using_r","learned_programming_not_r",
"learned_programming_not_r","years_programming_not_r",
"completed_data_analysis","completed_data_analysis",
"number_completed_data_analysis","number_completed_data_analysis",
"number_completed_data_analysis","number_completed_data_analysis"),
dependence = c(NA,NA,NA,NA,NA,"gender",
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
NA,NA,NA,NA,NA,"first_language",NA,NA,NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"read_language",
NA,NA,"learned_r",NA,NA,"learned_programming_not_r",
NA,NA,"completed_data_analysis",
"completed_data_analysis","completed_data_analysis","completed_data_analysis"),
dependence_value = c(NA,NA,NA,NA,NA,
"Prefer to self describe",NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"Other",NA,NA,
NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,"Other",
NA,NA,"Yes",NA,NA,"Yes",NA,NA,"Yes","Yes","Yes",
"Yes"),
required = c(TRUE,TRUE,TRUE,TRUE,TRUE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,
FALSE,FALSE,FALSE,FALSE,TRUE,TRUE,FALSE,TRUE,
TRUE,FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,FALSE),
page = c("intro","intro","intro",
"intro","intro","intro","intro","intro","intro","intro",
"intro","intro","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid",
"mid","mid","mid","mid","mid","mid","mid","mid","mid",
"finale","finale","finale","finale","finale",
"finale","finale","finale","finale","finale","finale",
"finale")
)
test_that("surveyOutput() works - paged_questions", {
local_edition(3)
expect_snapshot_output(shiny::fluidPage(
shinysurveys::surveyOutput(df = paged_teaching_r_questions,
survey_title = "Test This MultiPaged Survey")
))
})
|
/codes/Q4.R
|
no_license
|
marcelo-tibau/mc2
|
R
| false | false | 2,120 |
r
| ||
.onAttach <- function(lib, pkg)
cat( c( "skogR version 0.3", "Type skogR.News() to see new features/changes/bug fixes."), sep="\n")
|
/R/skogR.R
|
no_license
|
hansoleorka/skogR
|
R
| false | false | 134 |
r
|
.onAttach <- function(lib, pkg)
cat( c( "skogR version 0.3", "Type skogR.News() to see new features/changes/bug fixes."), sep="\n")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 124282
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 124281
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 124281
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b18_PR_4_2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 40826
c no.of clauses 124282
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 124281
c
c QBFLIB/Sauer-Reimer/ITC99/b18_PR_4_2.qdimacs 40826 124282 E1 [1] 0 524 40207 124281 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ITC99/b18_PR_4_2/b18_PR_4_2.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 724 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 124282
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 124281
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 124281
c
c Input Parameter (command line, file):
c input filename QBFLIB/Sauer-Reimer/ITC99/b18_PR_4_2.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 40826
c no.of clauses 124282
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 124281
c
c QBFLIB/Sauer-Reimer/ITC99/b18_PR_4_2.qdimacs 40826 124282 E1 [1] 0 524 40207 124281 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze_binary.R
\name{deepstate_harness_analyze_pkg}
\alias{deepstate_harness_analyze_pkg}
\title{Analyze Harness for the Package}
\usage{
deepstate_harness_analyze_pkg(path, testfiles = "all", max_inputs = "all")
}
\arguments{
\item{path}{path of the test package to analyze}
\item{testfiles}{number of functions to analyze in the package}
\item{max_inputs}{maximum number of inputs to run on the executable under valgrind. defaults to all}
}
\value{
A list of data tables with inputs, error messages, address trace and line numbers for specified testfiles.
}
\description{
Analyze all the function specific testharness in the package under valgrind.
}
\examples{
path <- system.file("testpkgs/testSAN", package = "RcppDeepState")
analyzed.harness <- deepstate_harness_analyze_pkg(path)
print(analyzed.harness)
}
|
/man/deepstate_harness_analyze_pkg.Rd
|
no_license
|
akhikolla/RcppDeepState
|
R
| false | true | 895 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze_binary.R
\name{deepstate_harness_analyze_pkg}
\alias{deepstate_harness_analyze_pkg}
\title{Analyze Harness for the Package}
\usage{
deepstate_harness_analyze_pkg(path, testfiles = "all", max_inputs = "all")
}
\arguments{
\item{path}{path of the test package to analyze}
\item{testfiles}{number of functions to analyze in the package}
\item{max_inputs}{maximum number of inputs to run on the executable under valgrind. defaults to all}
}
\value{
A list of data tables with inputs, error messages, address trace and line numbers for specified testfiles.
}
\description{
Analyze all the function specific testharness in the package under valgrind.
}
\examples{
path <- system.file("testpkgs/testSAN", package = "RcppDeepState")
analyzed.harness <- deepstate_harness_analyze_pkg(path)
print(analyzed.harness)
}
|
library(data.table)
# the directory where all data is located
dataDir <- file.path(getwd(), "UCI HAR Dataset")
# the file with activity labels
actfile <- file.path(dataDir, "activity_labels.txt")
# the file with features
featfile <- file.path(dataDir, "features.txt")
# read the feature names
features <- read.table(featfile)
featureNames <- features$V2
# we are only interested in the means and the standard deviations
# these are feature names with mean(), meanFreq() and std() in it.
extractedFeatures <- grepl("mean\\(\\)|meanFreq\\(\\)|std\\(\\)", featureNames)
# read the activity labels
activity_labels <- read.table(actfile)
# the train data and test data must be handled in the same way
# to be sure that this is satisfied it is captured in a function
# that can read them both.
ReadDataFrame <- function(mode="train") {
# the directory where the train/test data is located
dir <- file.path(dataDir, mode)
# the names of the files
subfile <- file.path(dir, paste0("subject_", mode, ".txt"))
xfile <- file.path(dir, paste0("X_", mode, ".txt"))
yfile <- file.path(dir, paste0("Y_", mode, ".txt"))
# read the three files
x <- read.table(xfile)
y <- read.table(yfile)
subjects <- read.table(subfile)
# give the features of the x-file descriptive names
names(x) <- featureNames
# and select only those columns we are interested in.
x <- x[,extractedFeatures]
# add a column indicating whether it is train or test data
x$train_test_status <- mode
# give the activities descriptive labels instead of numbers
y[,1] <- activity_labels[y[,1], 2]
# give the columns descriptive names
names(y) <- "Activity_Label"
names(subjects) <- "Subject"
# combine the subjects, y and x in a single data frame
cbind(subjects, y, x)
}
# read the training data
traindf <- ReadDataFrame("train")
# read the test data
testdf <- ReadDataFrame("test")
# combine them in a single frame
df <- rbind(traindf, testdf)
# calculate the average for every column grouped for every person and activity
# of course, we use dplyr and tidyr
library(dplyr)
library(tidyr)
# to avoid taking the average of the "train_test_status", this is part of the split
tidydf <-
df %>%
group_by (Subject, Activity_Label, train_test_status) %>%
summarise_each(funs(mean))
# write out the results
datafile <- file.path(dataDir, "data.txt")
tidydatafile <- file.path(dataDir, "tidy_data.txt")
# the whole table is not required, only the tidy table
# write.table(df, file = datafile)
write.table(tidydf, file = tidydatafile, row.name=FALSE)
|
/run_analysis.R
|
no_license
|
fritss/GettingAndCleaningData
|
R
| false | false | 2,574 |
r
|
library(data.table)
# the directory where all data is located
dataDir <- file.path(getwd(), "UCI HAR Dataset")
# the file with activity labels
actfile <- file.path(dataDir, "activity_labels.txt")
# the file with features
featfile <- file.path(dataDir, "features.txt")
# read the feature names
features <- read.table(featfile)
featureNames <- features$V2
# we are only interested in the means and the standard deviations
# these are feature names with mean(), meanFreq() and std() in it.
extractedFeatures <- grepl("mean\\(\\)|meanFreq\\(\\)|std\\(\\)", featureNames)
# read the activity labels
activity_labels <- read.table(actfile)
# the train data and test data must be handled in the same way
# to be sure that this is satisfied it is captured in a function
# that can read them both.
ReadDataFrame <- function(mode="train") {
# the directory where the train/test data is located
dir <- file.path(dataDir, mode)
# the names of the files
subfile <- file.path(dir, paste0("subject_", mode, ".txt"))
xfile <- file.path(dir, paste0("X_", mode, ".txt"))
yfile <- file.path(dir, paste0("Y_", mode, ".txt"))
# read the three files
x <- read.table(xfile)
y <- read.table(yfile)
subjects <- read.table(subfile)
# give the features of the x-file descriptive names
names(x) <- featureNames
# and select only those columns we are interested in.
x <- x[,extractedFeatures]
# add a column indicating whether it is train or test data
x$train_test_status <- mode
# give the activities descriptive labels instead of numbers
y[,1] <- activity_labels[y[,1], 2]
# give the columns descriptive names
names(y) <- "Activity_Label"
names(subjects) <- "Subject"
# combine the subjects, y and x in a single data frame
cbind(subjects, y, x)
}
# read the training data
traindf <- ReadDataFrame("train")
# read the test data
testdf <- ReadDataFrame("test")
# combine them in a single frame
df <- rbind(traindf, testdf)
# calculate the average for every column grouped for every person and activity
# of course, we use dplyr and tidyr
library(dplyr)
library(tidyr)
# to avoid taking the average of the "train_test_status", this is part of the split
tidydf <-
df %>%
group_by (Subject, Activity_Label, train_test_status) %>%
summarise_each(funs(mean))
# write out the results
datafile <- file.path(dataDir, "data.txt")
tidydatafile <- file.path(dataDir, "tidy_data.txt")
# the whole table is not required, only the tidy table
# write.table(df, file = datafile)
write.table(tidydf, file = tidydatafile, row.name=FALSE)
|
#' Set values of Initial Richness
#'
#' Esta funcion establece los valores iniciales de riqueza de especies
#'
#' @param r A \code{raster} object
#'
#' @param draster A \code{raster} object with distance to nearest oak TODO (COMPLETAR)
#'
#' @param r_range A \code{data frame} with three columns: \code{value} of land use
#' (\code{integer}: 0 = "Other", 1 = "Pine plantation", 2 = "Natural Forests",
#' 3 = "Crop"); \code{lowRich} and \code{upRich} (lower an upper value of the
#' range of Richness: See Gomez-Aparicio et al 2009)
#'
#' @param treedensity density of the pine plantation (\code{integer})
#'
#' @param pastUse the past land use of the pine plantation (\code{character}).
#' One of "Oak", "Shrubland", "Pasture" or "Crop"
#'
#' @param rescale If "TRUE" the results are rescaled
#'
#' @return A \code{raster} object with values of initial Richness for each
#' pixel.
#'
#' @references
#'
#' Gomez-Aparicio L, Zavala MA, Bonet FJ, Zamora R (2009) Are pine plantations
#' valid tools for restoring Mediterranean forests? An assessment along abiotic
#' and biotic gradients. Ecological Applications, 19: 2124 - 2141.
#'
#'
initRichness <- function(r, draster, r_range, treedensity, pastUse, rescale=TRUE){
# --- N cells
ncell_pp <- ncell(r[r == 1])
ncell_nf <- ncell(r[r == 2])
ncell_crop <- ncell(r[r == 3])
# --- Potential Richness values
## Ranges
range_pp <- r_range[which(r_range$value == 1), ]
range_nf <- r_range[which(r_range$value == 2), ]
range_crop <- r_range[which(r_range$value == 3), ]
## Potential vectors
potR_pp <- runif(ncell_pp*3, range_pp$lowRich, range_pp$upRich)
potR_nf <- runif(ncell_nf*3, range_nf$lowRich, range_nf$upRich)
potR_crop <- runif(ncell_crop*3, range_crop$lowRich, range_crop$upRich)
# --- Reclassify
r[r == 0] <- NA
r[r == 1] <- -100
r[r == 2] <- -200
r[r == 3] <- -300
# --- Pine plantation
## ~ TreeDensity
### Fraction of Potential Richness (tree Density Eq. 3 Gomez Aparicio et al. 2009)
ftreeden <- exp(-0.5*((treedensity - 0.22)/1504.1)^2)
## ~ Distance to Seed Source
### Compute diversity raster (See Gonzalez-Moreno et al. 2011)
sh <- calc(draster, fun=function(x){1.7605 - 0.0932*(sqrt(sqrt(x)))})
### Create a stack with the shanon diversity raster and landuse raster, and then compute values for pine plantations
s <- calc(stack(r, sh), fun=function(x) ifelse(x[1] == -100 , (x[1]/-100)*x[2], NA))
### Scale the distance effect from 0 to 1
sh_scaled <- (s - cellStats(s, "min"))/(cellStats(s, "max") - cellStats(s, "min"))
## ~ PastUSE
### Past Land Use
fplu <- ifelse(pastUse == 'Oak', .9999,
ifelse(pastUse == 'Shrubland', .4982,
ifelse(pastUse == 'Crop', .0279, .0001)))
## Combine factor to correct pine plantations
f_pine <- (sh_scaled*0.35) + (.45*ftreeden + .2*fplu)
r[r == -100] <- sample(potR_pp, ncell_pp, replace = TRUE)
r <- calc(stack(r, f_pine), fun = function(x) ifelse(x[1] < -100, x[1], x[1]*x[2]))
# --- Crops
r[r == -300] <- sample(potR_crop, ncell_crop, replace = TRUE)
# --- Natural forest
r[r == -200] <- sample(potR_nf, ncell_nf, replace = TRUE)
# Rescale results
if (rescale)
r <- (r - cellStats(r, "min"))/(cellStats(r, "max") - cellStats(r, "min"))
return(r)
}
|
/R/initRichness.R
|
no_license
|
ajpelu/respineDocencia
|
R
| false | false | 3,301 |
r
|
#' Set values of Initial Richness
#'
#' Esta funcion establece los valores iniciales de riqueza de especies
#'
#' @param r A \code{raster} object
#'
#' @param draster A \code{raster} object with distance to nearest oak TODO (COMPLETAR)
#'
#' @param r_range A \code{data frame} with three columns: \code{value} of land use
#' (\code{integer}: 0 = "Other", 1 = "Pine plantation", 2 = "Natural Forests",
#' 3 = "Crop"); \code{lowRich} and \code{upRich} (lower an upper value of the
#' range of Richness: See Gomez-Aparicio et al 2009)
#'
#' @param treedensity density of the pine plantation (\code{integer})
#'
#' @param pastUse the past land use of the pine plantation (\code{character}).
#' One of "Oak", "Shrubland", "Pasture" or "Crop"
#'
#' @param rescale If "TRUE" the results are rescaled
#'
#' @return A \code{raster} object with values of initial Richness for each
#' pixel.
#'
#' @references
#'
#' Gomez-Aparicio L, Zavala MA, Bonet FJ, Zamora R (2009) Are pine plantations
#' valid tools for restoring Mediterranean forests? An assessment along abiotic
#' and biotic gradients. Ecological Applications, 19: 2124 - 2141.
#'
#'
initRichness <- function(r, draster, r_range, treedensity, pastUse, rescale=TRUE){
# --- N cells
ncell_pp <- ncell(r[r == 1])
ncell_nf <- ncell(r[r == 2])
ncell_crop <- ncell(r[r == 3])
# --- Potential Richness values
## Ranges
range_pp <- r_range[which(r_range$value == 1), ]
range_nf <- r_range[which(r_range$value == 2), ]
range_crop <- r_range[which(r_range$value == 3), ]
## Potential vectors
potR_pp <- runif(ncell_pp*3, range_pp$lowRich, range_pp$upRich)
potR_nf <- runif(ncell_nf*3, range_nf$lowRich, range_nf$upRich)
potR_crop <- runif(ncell_crop*3, range_crop$lowRich, range_crop$upRich)
# --- Reclassify
r[r == 0] <- NA
r[r == 1] <- -100
r[r == 2] <- -200
r[r == 3] <- -300
# --- Pine plantation
## ~ TreeDensity
### Fraction of Potential Richness (tree Density Eq. 3 Gomez Aparicio et al. 2009)
ftreeden <- exp(-0.5*((treedensity - 0.22)/1504.1)^2)
## ~ Distance to Seed Source
### Compute diversity raster (See Gonzalez-Moreno et al. 2011)
sh <- calc(draster, fun=function(x){1.7605 - 0.0932*(sqrt(sqrt(x)))})
### Create a stack with the shanon diversity raster and landuse raster, and then compute values for pine plantations
s <- calc(stack(r, sh), fun=function(x) ifelse(x[1] == -100 , (x[1]/-100)*x[2], NA))
### Scale the distance effect from 0 to 1
sh_scaled <- (s - cellStats(s, "min"))/(cellStats(s, "max") - cellStats(s, "min"))
## ~ PastUSE
### Past Land Use
fplu <- ifelse(pastUse == 'Oak', .9999,
ifelse(pastUse == 'Shrubland', .4982,
ifelse(pastUse == 'Crop', .0279, .0001)))
## Combine factor to correct pine plantations
f_pine <- (sh_scaled*0.35) + (.45*ftreeden + .2*fplu)
r[r == -100] <- sample(potR_pp, ncell_pp, replace = TRUE)
r <- calc(stack(r, f_pine), fun = function(x) ifelse(x[1] < -100, x[1], x[1]*x[2]))
# --- Crops
r[r == -300] <- sample(potR_crop, ncell_crop, replace = TRUE)
# --- Natural forest
r[r == -200] <- sample(potR_nf, ncell_nf, replace = TRUE)
# Rescale results
if (rescale)
r <- (r - cellStats(r, "min"))/(cellStats(r, "max") - cellStats(r, "min"))
return(r)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw.arc.R
\name{getYmult}
\alias{getYmult}
\title{Correct for aspect and coordinate ratio}
\usage{
getYmult()
}
\description{
From plotrix
}
|
/man/getYmult.Rd
|
no_license
|
antiphon/Kdirectional
|
R
| false | true | 220 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/draw.arc.R
\name{getYmult}
\alias{getYmult}
\title{Correct for aspect and coordinate ratio}
\usage{
getYmult()
}
\description{
From plotrix
}
|
countOdds <- function(x) {
k <- 0
for (n in x) {
if (n%%2 == 1) k <- k+1
}
return(k)
}
countEvens <- function(x) {
k <- 0
for (n in x) {
if (n%%2 == 0) k <- k+1
}
return(k)
}
hypothenuseLength <- function(a, b) {
a > 0
b > 0
return(sqrt(a^2 + b^2))
}
lawofCosines <- function(a,b,t) {
a > 0
b > 0
t > 0 & t < 2*pi
return(sqrt(a^2+b^2 - 2*a*b*cos(t)))
}
thetafromLengths = function(a,b,c) {
a > 0
b > 0
c > 0
return(acos((a^2 + b^2 - c^2) / (2*a*b)))
}
thetaFromLengthsTest = function(a,b,t) {
a > 0
b > 0
t > 0 & t < 2*pi
c <- sqrt(a^2+b^2 - 2*a*b*cos(t))
theta = acos((a^2 + b^2 - c^2) / (2*a*b))
return(theta - t)
} #when I run it, it returns 0, which it should, because it compares theta to theta
|
/Functions for Graphics.R
|
no_license
|
skrgovic/CS121-Work
|
R
| false | false | 790 |
r
|
countOdds <- function(x) {
k <- 0
for (n in x) {
if (n%%2 == 1) k <- k+1
}
return(k)
}
countEvens <- function(x) {
k <- 0
for (n in x) {
if (n%%2 == 0) k <- k+1
}
return(k)
}
hypothenuseLength <- function(a, b) {
a > 0
b > 0
return(sqrt(a^2 + b^2))
}
lawofCosines <- function(a,b,t) {
a > 0
b > 0
t > 0 & t < 2*pi
return(sqrt(a^2+b^2 - 2*a*b*cos(t)))
}
thetafromLengths = function(a,b,c) {
a > 0
b > 0
c > 0
return(acos((a^2 + b^2 - c^2) / (2*a*b)))
}
thetaFromLengthsTest = function(a,b,t) {
a > 0
b > 0
t > 0 & t < 2*pi
c <- sqrt(a^2+b^2 - 2*a*b*cos(t))
theta = acos((a^2 + b^2 - c^2) / (2*a*b))
return(theta - t)
} #when I run it, it returns 0, which it should, because it compares theta to theta
|
##' reroot a tree
##'
##'
##' @rdname reroot-methods
##' @exportMethod reroot
setMethod("reroot", signature(object="phylo"),
function(object, node, ...) {
pos <- 0.5* object$edge.length[which(object$edge[,2] == node)]
## @importFrom phytools reroot
phytools <- "phytools"
require(phytools, character.only = TRUE)
phytools_reroot <- eval(parse(text="phytools::reroot"))
tree <- phytools_reroot(object, node, pos)
attr(tree, "reroot") <- TRUE
node_map <- reroot_node_mapping(object, tree)
attr(tree, "node_map") <- node_map
return(tree)
})
##' @rdname reroot-methods
##' @exportMethod reroot
setMethod("reroot", signature(object="treedata"),
function(object, node, ...) {
# warning message
message("The use of this method may cause some node data to become incorrect (e.g. bootstrap values).")
newobject <- object
# ensure nodes/tips have a label to properly map @anc_seq/@tip_seq
tree <- object@phylo
if (is.null(tree$tip.label)) {
tree$tip.label <- as.character(1:Ntip(tree))
}
if (is.null(tree$node.label)) {
tree$node.label <- as.character((1:tree$Nnode) + Ntip(tree))
}
# reroot tree
tree <- reroot(tree, node, ...)
newobject@phylo <- tree
# update node numbers in data
n.tips <- Ntip(tree)
node_map<- attr(tree, "node_map")
update_data <- function(data, node_map) {
newdata <- data
newdata[match(node_map$from, data$node), 'node'] <- node_map$to
# clear root data
root <- newdata$node == (n.tips + 1)
newdata[root,] <- NA
newdata[root,'node'] <- n.tips + 1
return(newdata)
}
if (nrow(newobject@data) > 0) {
newobject@data <- update_data(object@data, node_map)
}
if (nrow(object@extraInfo) > 0) {
newobject@extraInfo <- update_data(object@extraInfo, node_map)
}
return(newobject)
})
|
/R/method-reroot.R
|
no_license
|
lzh93/ggtree
|
R
| false | false | 2,366 |
r
|
##' reroot a tree
##'
##'
##' @rdname reroot-methods
##' @exportMethod reroot
setMethod("reroot", signature(object="phylo"),
function(object, node, ...) {
pos <- 0.5* object$edge.length[which(object$edge[,2] == node)]
## @importFrom phytools reroot
phytools <- "phytools"
require(phytools, character.only = TRUE)
phytools_reroot <- eval(parse(text="phytools::reroot"))
tree <- phytools_reroot(object, node, pos)
attr(tree, "reroot") <- TRUE
node_map <- reroot_node_mapping(object, tree)
attr(tree, "node_map") <- node_map
return(tree)
})
##' @rdname reroot-methods
##' @exportMethod reroot
setMethod("reroot", signature(object="treedata"),
function(object, node, ...) {
# warning message
message("The use of this method may cause some node data to become incorrect (e.g. bootstrap values).")
newobject <- object
# ensure nodes/tips have a label to properly map @anc_seq/@tip_seq
tree <- object@phylo
if (is.null(tree$tip.label)) {
tree$tip.label <- as.character(1:Ntip(tree))
}
if (is.null(tree$node.label)) {
tree$node.label <- as.character((1:tree$Nnode) + Ntip(tree))
}
# reroot tree
tree <- reroot(tree, node, ...)
newobject@phylo <- tree
# update node numbers in data
n.tips <- Ntip(tree)
node_map<- attr(tree, "node_map")
update_data <- function(data, node_map) {
newdata <- data
newdata[match(node_map$from, data$node), 'node'] <- node_map$to
# clear root data
root <- newdata$node == (n.tips + 1)
newdata[root,] <- NA
newdata[root,'node'] <- n.tips + 1
return(newdata)
}
if (nrow(newobject@data) > 0) {
newobject@data <- update_data(object@data, node_map)
}
if (nrow(object@extraInfo) > 0) {
newobject@extraInfo <- update_data(object@extraInfo, node_map)
}
return(newobject)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_lineup_data.R
\name{make_null_dat}
\alias{make_null_dat}
\title{Function to permute y column in dataframe}
\usage{
make_null_dat(dat, xname = "x", yname = "y", bootstrap = FALSE,
jitter = 0)
}
\arguments{
\item{dat}{dataframe}
\item{xname}{column name from dat for x-dimension in plots (defaults "x")}
\item{yname}{column name from dat for y-dimension in plots (defaults "y")}
\item{bootstrap}{logical indicator of if bootstrapping should be used following permutation}
\item{jitter}{amount of jitter to add to point locations - units of sds}
}
\value{
dataframe with columns x, permy and type
}
\description{
Function to permute y column in dataframe
}
\examples{
dat=data.frame(x=rnorm(5))
dat$y=dat$x+rnorm(5)
null_dat <- make_null_dat(dat)
null_dat <- make_null_dat(dat, bootstrap=TRUE)
null_dat <- make_null_dat(dat, bootstrap=TRUE, jitter=1)
}
|
/man/make_null_dat.Rd
|
no_license
|
kmaurer/teaTasteR
|
R
| false | true | 939 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_lineup_data.R
\name{make_null_dat}
\alias{make_null_dat}
\title{Function to permute y column in dataframe}
\usage{
make_null_dat(dat, xname = "x", yname = "y", bootstrap = FALSE,
jitter = 0)
}
\arguments{
\item{dat}{dataframe}
\item{xname}{column name from dat for x-dimension in plots (defaults "x")}
\item{yname}{column name from dat for y-dimension in plots (defaults "y")}
\item{bootstrap}{logical indicator of if bootstrapping should be used following permutation}
\item{jitter}{amount of jitter to add to point locations - units of sds}
}
\value{
dataframe with columns x, permy and type
}
\description{
Function to permute y column in dataframe
}
\examples{
dat=data.frame(x=rnorm(5))
dat$y=dat$x+rnorm(5)
null_dat <- make_null_dat(dat)
null_dat <- make_null_dat(dat, bootstrap=TRUE)
null_dat <- make_null_dat(dat, bootstrap=TRUE, jitter=1)
}
|
#This came from Chris Mack on youtube--I added as many comments as possible for you to follow along
#Generalized Linear Model
#import data in a csv format
#sample data
y = c(4.26, 5.68, 7.24, 4.82, 6.95, 8.81, 8.04, 8.33, 10.84, 7.58, 9.96)
#x values spaced from 4-14
x = 4:14
x = seq(4,14,1)
#plot data
plot(y ~ x)
#linear regression model
model = lm(y ~ x)
summary (model)
#generalized linear model
model.glm = glm(y ~ x, family = gaussian(link = "identity"))
summary(model.glm)
#family default link
#binomial (link = "logit")
#gaussian (link = "identity")
#gamma (link = "inverse")
#inverse.gaussian (link = "1/mu^2")
#poisson (link = "log")
#quasi (link = "identity", variance = "constant")
#quasibinomial (link = "logit")
#quasipoisson (link = "log")
#Example of logistic regression
#who survived the sinking of the Titanic
#https://www.r-bloggers.com/how-to-perform-a-logistic-regression-in-r/
install.packages("titanic")
library(titanic)
data.raw = titanic_train
#data has blanks....convert them to "NA"
data.raw[data.raw==""] <- NA
#check for missing values using the sapply() function
sapply(data.raw, function(x) sum(is.na(x)))
length(data.raw$Pclass) #Number of data points
levels(factor(data.raw$Sex)) #asks how and type of variables in a column
#drop columns we aren't going to use
data <- subset(data.raw, select = c(2,3,5,6,7,8,10,12))
#perform logistic regression
#we are just determining if someone survived or not, hence the use of binomial(0 or 1)
#If you use GLM and has a column with only 2 possible outcomes R will put the appropriate id(0 or 1) next to it.
model <- glm(Survived ~ Sex, family = binomial(link = 'logit'), data = data)
summary(model)
confint(model) #confidence intervals for the coefficients
#ratio for male and females
exp(model$coefficients)
lines(x, m)
|
/glm_example.r
|
no_license
|
raikon123/SampleCode
|
R
| false | false | 1,959 |
r
|
#This came from Chris Mack on youtube--I added as many comments as possible for you to follow along
#Generalized Linear Model
#import data in a csv format
#sample data
y = c(4.26, 5.68, 7.24, 4.82, 6.95, 8.81, 8.04, 8.33, 10.84, 7.58, 9.96)
#x values spaced from 4-14
x = 4:14
x = seq(4,14,1)
#plot data
plot(y ~ x)
#linear regression model
model = lm(y ~ x)
summary (model)
#generalized linear model
model.glm = glm(y ~ x, family = gaussian(link = "identity"))
summary(model.glm)
#family default link
#binomial (link = "logit")
#gaussian (link = "identity")
#gamma (link = "inverse")
#inverse.gaussian (link = "1/mu^2")
#poisson (link = "log")
#quasi (link = "identity", variance = "constant")
#quasibinomial (link = "logit")
#quasipoisson (link = "log")
#Example of logistic regression
#who survived the sinking of the Titanic
#https://www.r-bloggers.com/how-to-perform-a-logistic-regression-in-r/
install.packages("titanic")
library(titanic)
data.raw = titanic_train
#data has blanks....convert them to "NA"
data.raw[data.raw==""] <- NA
#check for missing values using the sapply() function
sapply(data.raw, function(x) sum(is.na(x)))
length(data.raw$Pclass) #Number of data points
levels(factor(data.raw$Sex)) #asks how and type of variables in a column
#drop columns we aren't going to use
data <- subset(data.raw, select = c(2,3,5,6,7,8,10,12))
#perform logistic regression
#we are just determining if someone survived or not, hence the use of binomial(0 or 1)
#If you use GLM and has a column with only 2 possible outcomes R will put the appropriate id(0 or 1) next to it.
model <- glm(Survived ~ Sex, family = binomial(link = 'logit'), data = data)
summary(model)
confint(model) #confidence intervals for the coefficients
#ratio for male and females
exp(model$coefficients)
lines(x, m)
|
`ll.toRCI.X` =
function(par0, yi, ind.lst, X, twosex, iphi, l.tau.r, l.tau.a){
if(twosex){
b0f = par0[1]
b0m = par0[2]
betas = par0[-(1:2)]
}else{
b0f = par0[1]
betas = par0[-1]
}
etas = rep(0, length(yi)) #AxA, fem AxA, mal, AxB, mal #3, 7, 5
etas[ind.lst[[4]]] = b0f #BxB, fem #4
etas[ind.lst[[1]]] = log1p(exp(l.tau.r[ind.lst[[1]]] + b0f)) + l.tau.a[ind.lst[[1]]]#AxB, fem #1
etas[ind.lst[[2]]] = log1p(exp(l.tau.r[ind.lst[[2]]] + b0f)) + l.tau.a[ind.lst[[2]]]#BxA, fem #2
if(twosex){
etas[ind.lst[[6]]] = b0m #BxA, mal #6
etas[ind.lst[[8]]] = b0m #BxB, mal #8
}
lmu = c(X%*%betas) + etas
logL = -loglikNB(iphi=iphi, lmu=lmu, y=yi)
return(logL)
}
|
/R/ll.toRCI.X.R
|
no_license
|
cran/rxSeq
|
R
| false | false | 1,047 |
r
|
`ll.toRCI.X` =
function(par0, yi, ind.lst, X, twosex, iphi, l.tau.r, l.tau.a){
if(twosex){
b0f = par0[1]
b0m = par0[2]
betas = par0[-(1:2)]
}else{
b0f = par0[1]
betas = par0[-1]
}
etas = rep(0, length(yi)) #AxA, fem AxA, mal, AxB, mal #3, 7, 5
etas[ind.lst[[4]]] = b0f #BxB, fem #4
etas[ind.lst[[1]]] = log1p(exp(l.tau.r[ind.lst[[1]]] + b0f)) + l.tau.a[ind.lst[[1]]]#AxB, fem #1
etas[ind.lst[[2]]] = log1p(exp(l.tau.r[ind.lst[[2]]] + b0f)) + l.tau.a[ind.lst[[2]]]#BxA, fem #2
if(twosex){
etas[ind.lst[[6]]] = b0m #BxA, mal #6
etas[ind.lst[[8]]] = b0m #BxB, mal #8
}
lmu = c(X%*%betas) + etas
logL = -loglikNB(iphi=iphi, lmu=lmu, y=yi)
return(logL)
}
|
################################### Set up and run in bayesian ###################################
# set WD to location where post.summ function exists
# session > set working disrectory > To project location
#load packages
library(R2OpenBUGS)
library(rjags)
library(coda)
source("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/Repro_analysis/post_summ_function.R")
#read and prepare data
#seamap reproductive analyses
repros <- read.csv("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/SEAMAP_2017/Reproductive_analysis/seamap_frozencrabs.csv")
head(repros)
sapply(repros, class)
#select by unique ID for one row per crab, top rows contain the mean egg diameters and mean egg volumes
repros <- repros[!duplicated(repros$Unique_ID),]
#write.csv(repros,"L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/SEAMAP_2017/Reproductive_analysis/seamap_onerow.csv")
eggs <- repros[which(repros$ovigerous == 'yes'),]
eggs$Estimated_fecundity <- as.numeric(as.character(eggs$Estimated_fecundity))
noeggs <- repros[which(repros$ovigerous != 'no'),]
eggs$Estimated_fecundity <- eggs$Estimated_fecundity/1e+06
eggs[which(eggs$Estimated_fecundity < 2),]
#only using eggs for any fecundity analysis
dat <- eggs
head(dat); tail(dat)
# start with fecundity and carapace width
#check classes
sapply(dat, class)
plot(Estimated_fecundity ~ Carapace_width, data = dat)
pred.x <- seq(min(dat$Carapace_width), max(dat$Carapace_width), length = 30)
########################## Making Jags List ################################
# compile data into a list to pass to BUGS
jags.dat = list(n.obs = nrow(dat), y = dat$Estimated_fecundity, x1 = dat$Carapace_width,
pred.x = pred.x, n.pred = length(pred.x))
##### SPECIFY MODEL CODE #####
##### MODEL 1 ALL SEAMAP CRABS #####
mod = function() {
# PRIORS
b0 ~ dnorm(0,.001) #small precision = big variance
b1 ~ dnorm(0,.001)
sig ~ dunif(0,10)
tau <- 1/(sig^2) #precision
# LIKELIHOOD
for (i in 1:n.obs) {
y[i] ~ dnorm(y.hat[i], tau)
y.hat[i] <- b0 + b1 * x1[i] #x1 is age in the data list
resid[i] <- y[i] - y.hat[i]
}
for (i in 1:n.pred) {
pred.y[i] <- b0 + b1 * pred.x[i]
}
#Posterior Predictive check
for (i in 1:n.obs) {
# generate data under the model
y.new[i] ~ dnorm(y.hat[i], tau)
# use pearson's residuals
obs.resid[i] <- (y[i] - y.hat[i])/sig
new.resid[i] <- (y.new[i] - y.hat[i])/sig
# calculate squared residuals
D.obs[i] <- pow(obs.resid[i], 2)
D.new[i] <- pow(new.resid[i], 2)
}
# calculate deviations
fit.obs <- sum(D.obs[])
fit.new <- sum(D.new[])
# "Bayesian P-value"
bp <- step(fit.obs - fit.new)
}
# write model to a text file
model.file = "model.txt"
R2WinBUGS::write.model(mod, model.file)
##### INITIAL VALUES #####
inits1 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits2 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits = list(inits1, inits2)
##### PARAMETERS TO MONITOR #####
params = c("b0", "b1", "sig","resid",'pred.y', "y.hat","bp","fit.obs","fit.new")
##### MCMC DIMENSIONS #####
ni = 15000 # number of post-burn-in samples per chain
nb = 5000 # number of burn-in samples
nt = 1 # thinning rate
nc = 2 # number of chains
##### RUN THE MODEL IN BUGS #####
starttime = Sys.time()
jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
mod1_dic <- dic.samples(jmod, n.iter = ni, thin = nt)
post = coda.samples(jmod, params, n.iter = ni, thin = nt)
Sys.time() - starttime
##### CONVERGENCE DIAGNOSTIC #####
# view BGR convergence diagnostic
#gelman.diag(post, multivariate = F)
# visualize trace and posterior plots
#windows(record = T)
#plot(post)
# plot(post[,'b0'])
# plot(post[,'b1'])
##### MAKE INFERENCE #####
b0.est = post.summ(post, "b0"); b0.est
b1.est = post.summ(post, "b1"); b1.est
b0.samps = post.summ(post, "b0")['mean']
b1.samps = post.summ(post, "b1")['mean']
bp.est = post.summ(post, "bp")
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)', ylab = 'Estimated Fecundity')
# for (i in 1:20) {
# abline(c(b0.samps[i], b1.samps[i]), col = "grey")
# }
#abline(c(b0.est[1], b1.est[1]), lwd = 2, lty = 2, col = "blue")
## CONFIDENCE INTERVALS: don't run until we add prediction code ####
pred.y = post.summ(post, "pred.y[")
pred.y
dev.off()
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/linear_glm.png")
plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
ylab = 'Estimated Fecundity (millions)', las = 1)
lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
dev.off()
#points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
#residuals
resi <- post.summ(post, "resid[")
y.hat = post.summ(post,"y.hat[")
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/linear_glm_resids.png")
plot(resi[1,] ~ y.hat[1,], type = 'h', ylab = "Residual Values", las = 1) #want them spread above and below evenly
abline(h = 0, col = 'blue')
dev.off()
hist(resi[1,], main = 'Residual Frequencies', xlab = "Residual Values")
fit.obs <- post.summ(post, "fit.obs")
fit.est <- post.summ(post, "fit.new")
############################### Model 2 log(fecundity) ############################
#try non-linear
# par(mfrow = c(1,2))
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Estimated Fecundity (1e+06)', main = 'Fecundity ~ Carapace Width (mm)')
#
# plot(log(Estimated_fecundity) ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Log(Fecundity) (1e+06)', main = 'log(Fecundity) ~ Carapace Width (mm)')
#
# dev.off()
#for the credibility intervals
pred.x <- seq(min(dat$Carapace_width), max(dat$Carapace_width), length = 30)
# compile data into a list to pass to BUGS
jags.dat = list(n.obs = nrow(dat), y = dat$Estimated_fecundity,x1 = dat$Carapace_width,
pred.x = pred.x, n.pred = length(pred.x))
##### MODEL 2 ALL SEAMAP CRABS, log-fecundity #####
##### SPECIFY MODEL CODE #####
mod = function() {
# PRIORS
b0 ~ dnorm(0,.001) #small precision = big variance
b1 ~ dnorm(0,.001)
sig ~ dunif(0,10)
tau <- 1/(sig^2)
# LIKELIHOOD
for (i in 1:n.obs) {
y[i] ~ dlnorm(log(y.hat[i]), tau)
log(y.hat[i]) <- b0 + b1 * x1[i] #x1 is carapace with data
resid[i] <- y[i] - y.hat[i]
}
for (i in 1:n.pred) {
log(pred.y[i]) <- b0 + b1 * pred.x[i]
}
#Posterior Predictive check
for (i in 1:n.obs) {
# generate data under the model
y.new[i] ~ dlnorm(log(y.hat[i]), tau)
# use pearson's residuals
obs.resid[i] <- (y[i] - y.hat[i])/sig
new.resid[i] <- (y.new[i] - y.hat[i])/sig
# calculate squared residuals
D.obs[i] <- pow(obs.resid[i], 2)
D.new[i] <- pow(new.resid[i], 2)
}
# calculate deviations
fit.obs <- sum(D.obs[])
fit.new <- sum(D.new[])
# "Bayesian P-value"
bp <- step(fit.obs - fit.new)
}
# write model to a text file
model.file = "model.txt"
R2WinBUGS::write.model(mod, model.file)
##### INITIAL VALUES #####
inits1 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits2 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits = list(inits1, inits2)
##### PARAMETERS TO MONITOR #####
params = c("b0", "b1", "sig","resid",'pred.y', "y.hat","bp")
##### MCMC DIMENSIONS #####
ni = 25000 # number of post-burn-in samples per chain
nb = 10000 # number of burn-in samples
nt = 1 # thinning rate
nc = 2 # number of chains
##### RUN THE MODEL IN BUGS #####
starttime = Sys.time()
jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
mod2_dic <- dic.samples(jmod, n.iter = ni, thin = nt)
post = coda.samples(jmod, params, n.iter = ni, thin = nt)
Sys.time() - starttime
source("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/GAM_plays/Zuuretal_2016_ZIM_AllData/MCMCSupportHighstatV4.R")
##### CONVERGENCE DIAGNOSTIC #####
# view BGR convergence diagnostic
#gelman.diag(post, multivariate = F)
# visualize trace and posterior plots
#plot(post)
#plot(post[,'b0'])
#plot(post[,'b1'])
b0.est = post.summ(post, "b0"); b0.est
b1.est = post.summ(post, "b1"); b1.est
bp.est = post.summ(post, "bp"); bp.est
## CONFIDENCE INTERVALS: don't run until we add prediction code ####
pred.y = post.summ(post, "pred.y[")
pred.y
#dev.off()
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/log_glm.png")
plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
ylab = 'Estimated Fecundity (millions)', las = 1)
lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
dev.off()
#points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
#residuals, something is off with residuals, log transformation makes them all positive
resi <- post.summ(post, "resid[")
y.hat = post.summ(post,"y.hat[")
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/log_glm_resids.png")
plot(resi[1,] ~ y.hat[1,], type = 'h', ylab = "Residual Values", las = 1) #want them spread above and below evenly
abline(h = 0, col = 'blue')
dev.off()
plot(resi[1,] ~ y.hat[1,])
#hist(resi[1,], main = 'Residual Frequencies', xlab = "Residual Values")
#################### model 3 bayesian GLM, gamma family, log link ##################################
##### SPECIFY MODEL CODE #####
#
# #reparameterization by mean and sd
# mod = function() {
# # PRIORS
# b0 ~ dnorm(0,.001) #small precision = big variance
# b1 ~ dnorm(0,.001)
#
# # parameterized by mean (mu) and standard deviation (sd)
# sh <- pow(mu,2) / pow(sd,2)
# ra <- mu/pow(sd/2)
# mu ~ dunif(0,100)
# sd ~ dunif(0,100)
#
# # likelihood
# for(i in 1:n.obs) {
# y[i] ~ dgamma(y.hat[i], ra)
# y.hat[i] <- exp(b0 + b1 * x1[i])
# resid <- y[i] - y.hat[i]
# }
#
# # calculate residuals
# for (i in 1:n.pred) {
# pred.y[i] <- exp(b0 + b1 * pred.x[i])
#
# }
#
#
# }
#
# # write model to a text file
# model.file = "model.txt"
# write.model(mod, model.file)
#
# ##### INITIAL VALUES #####
# inits1 = list(b0 = rnorm(1), b1 = rnorm(1), mu = rnorm(1), sd = rlnorm(1))
# inits2 = list(b0 = rnorm(1), b1 = rnorm(1), mu = rnorm(1), sd = rlnorm(1))
# inits = list(inits1, inits2)
#
# ##### PARAMETERS TO MONITOR #####
# params = c("b0", "b1", "mu", "sd","resid",'pred.y', "y.hat")
#
# ##### MCMC DIMENSIONS #####
# ni = 30000 # number of post-burn-in samples per chain
# nb = 15000 # number of burn-in samples
# nt = 1 # thinning rate
# nc = 2 # number of chains
#
# ##### RUN THE MODEL IN BUGS #####
# starttime = Sys.time()
# jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
# update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
# post = coda.samples(jmod, params, n.iter = ni, thin = nt)
# Sys.time() - starttime
#
# #### look at convergence ####
# # view BGR convergence diagnostic
# gelman.diag(post, multivariate = F)
#
# # visualize trace and posterior plots
# #plot(post)
# #plot(post[,'b0'])
# #plot(post[,'b1'])
#
# pred.y = post.summ(post, "pred.y[")
# pred.y
# #dev.off()
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Fecundity (1e+06)', main = 'Gamma log-link')
# lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
# lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
# lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
# #points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
######################## Try a glm for it ############
#### model selection ####
#Carapace Width
#gaussian
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = gaussian(link = 'log'))
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = gaussian(link = 'identity'))
#Gamma
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'identity'))
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'log'))
#best model
cw_mod <- glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'log'))
#coefficients, Confidence intervals
coef(cw_mod);confint.default(cw_mod)
#plot best model
range(dat$Carapace_width) #get range of x values
xcw <- seq(120, 200, 0.1)
ypred <- predict(cw_mod, list(Carapace_width = xcw),type="response")
low_pred <- ypred- (sd(dat$Carapace_width))/sqrt(length(dat$Carapace_width))
high_pred <- ypred+ (sd(dat$Carapace_width))/sqrt(length(dat$Carapace_width))
plot(Estimated_fecundity ~ Carapace_width, data = dat, main = 'GLM Gamma log link', ylab = 'Fecundity (10e6)')
lines(xcw,ypred)
lines(xcw,low_pred)
lines(xcw,high_pred)
#standard carapace width
#gaussian
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = gaussian(link = 'log'))
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = gaussian(link = 'identity'))
#Gamma
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'identity'))
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'log'))
sw_mod <- glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'log'))
#plot models
range(dat$Standard_cw)
xsw <- seq(90,150,0.1)
ypred.sw <- predict(sw_mod, list(Standard_cw = xsw), type = 'response')
low_pred_sw <- ypred.sw - (sd(dat$Standard_cw))/sqrt(length(dat$Standard_cw))
high_pred_sw <- ypred.sw + (sd(dat$Standard_cw))/sqrt(length(dat$Standard_cw))
plot(Estimated_fecundity ~ Standard_cw, data = dat, main = 'Fecundity ~ Standard Width', las = 1,
xlab = "Standard Carapace Width", ylab = "Estimated Fecundity (millions)")
lines(xsw,ypred.sw)
lines(xsw,low_pred_sw)
lines(xsw,high_pred_sw)
|
/2019_edits/R/fecundity_analyses/bayesian_fecundity.R
|
no_license
|
adamkemberling/Seamap_offshore_modeling
|
R
| false | false | 14,909 |
r
|
################################### Set up and run in bayesian ###################################
# set WD to location where post.summ function exists
# session > set working disrectory > To project location
#load packages
library(R2OpenBUGS)
library(rjags)
library(coda)
source("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/Repro_analysis/post_summ_function.R")
#read and prepare data
#seamap reproductive analyses
repros <- read.csv("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/SEAMAP_2017/Reproductive_analysis/seamap_frozencrabs.csv")
head(repros)
sapply(repros, class)
#select by unique ID for one row per crab, top rows contain the mean egg diameters and mean egg volumes
repros <- repros[!duplicated(repros$Unique_ID),]
#write.csv(repros,"L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/SEAMAP_2017/Reproductive_analysis/seamap_onerow.csv")
eggs <- repros[which(repros$ovigerous == 'yes'),]
eggs$Estimated_fecundity <- as.numeric(as.character(eggs$Estimated_fecundity))
noeggs <- repros[which(repros$ovigerous != 'no'),]
eggs$Estimated_fecundity <- eggs$Estimated_fecundity/1e+06
eggs[which(eggs$Estimated_fecundity < 2),]
#only using eggs for any fecundity analysis
dat <- eggs
head(dat); tail(dat)
# start with fecundity and carapace width
#check classes
sapply(dat, class)
plot(Estimated_fecundity ~ Carapace_width, data = dat)
pred.x <- seq(min(dat$Carapace_width), max(dat$Carapace_width), length = 30)
########################## Making Jags List ################################
# compile data into a list to pass to BUGS
jags.dat = list(n.obs = nrow(dat), y = dat$Estimated_fecundity, x1 = dat$Carapace_width,
pred.x = pred.x, n.pred = length(pred.x))
##### SPECIFY MODEL CODE #####
##### MODEL 1 ALL SEAMAP CRABS #####
mod = function() {
# PRIORS
b0 ~ dnorm(0,.001) #small precision = big variance
b1 ~ dnorm(0,.001)
sig ~ dunif(0,10)
tau <- 1/(sig^2) #precision
# LIKELIHOOD
for (i in 1:n.obs) {
y[i] ~ dnorm(y.hat[i], tau)
y.hat[i] <- b0 + b1 * x1[i] #x1 is age in the data list
resid[i] <- y[i] - y.hat[i]
}
for (i in 1:n.pred) {
pred.y[i] <- b0 + b1 * pred.x[i]
}
#Posterior Predictive check
for (i in 1:n.obs) {
# generate data under the model
y.new[i] ~ dnorm(y.hat[i], tau)
# use pearson's residuals
obs.resid[i] <- (y[i] - y.hat[i])/sig
new.resid[i] <- (y.new[i] - y.hat[i])/sig
# calculate squared residuals
D.obs[i] <- pow(obs.resid[i], 2)
D.new[i] <- pow(new.resid[i], 2)
}
# calculate deviations
fit.obs <- sum(D.obs[])
fit.new <- sum(D.new[])
# "Bayesian P-value"
bp <- step(fit.obs - fit.new)
}
# write model to a text file
model.file = "model.txt"
R2WinBUGS::write.model(mod, model.file)
##### INITIAL VALUES #####
inits1 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits2 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits = list(inits1, inits2)
##### PARAMETERS TO MONITOR #####
params = c("b0", "b1", "sig","resid",'pred.y', "y.hat","bp","fit.obs","fit.new")
##### MCMC DIMENSIONS #####
ni = 15000 # number of post-burn-in samples per chain
nb = 5000 # number of burn-in samples
nt = 1 # thinning rate
nc = 2 # number of chains
##### RUN THE MODEL IN BUGS #####
starttime = Sys.time()
jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
mod1_dic <- dic.samples(jmod, n.iter = ni, thin = nt)
post = coda.samples(jmod, params, n.iter = ni, thin = nt)
Sys.time() - starttime
##### CONVERGENCE DIAGNOSTIC #####
# view BGR convergence diagnostic
#gelman.diag(post, multivariate = F)
# visualize trace and posterior plots
#windows(record = T)
#plot(post)
# plot(post[,'b0'])
# plot(post[,'b1'])
##### MAKE INFERENCE #####
b0.est = post.summ(post, "b0"); b0.est
b1.est = post.summ(post, "b1"); b1.est
b0.samps = post.summ(post, "b0")['mean']
b1.samps = post.summ(post, "b1")['mean']
bp.est = post.summ(post, "bp")
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)', ylab = 'Estimated Fecundity')
# for (i in 1:20) {
# abline(c(b0.samps[i], b1.samps[i]), col = "grey")
# }
#abline(c(b0.est[1], b1.est[1]), lwd = 2, lty = 2, col = "blue")
## CONFIDENCE INTERVALS: don't run until we add prediction code ####
pred.y = post.summ(post, "pred.y[")
pred.y
dev.off()
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/linear_glm.png")
plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
ylab = 'Estimated Fecundity (millions)', las = 1)
lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
dev.off()
#points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
#residuals
resi <- post.summ(post, "resid[")
y.hat = post.summ(post,"y.hat[")
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/linear_glm_resids.png")
plot(resi[1,] ~ y.hat[1,], type = 'h', ylab = "Residual Values", las = 1) #want them spread above and below evenly
abline(h = 0, col = 'blue')
dev.off()
hist(resi[1,], main = 'Residual Frequencies', xlab = "Residual Values")
fit.obs <- post.summ(post, "fit.obs")
fit.est <- post.summ(post, "fit.new")
############################### Model 2 log(fecundity) ############################
#try non-linear
# par(mfrow = c(1,2))
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Estimated Fecundity (1e+06)', main = 'Fecundity ~ Carapace Width (mm)')
#
# plot(log(Estimated_fecundity) ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Log(Fecundity) (1e+06)', main = 'log(Fecundity) ~ Carapace Width (mm)')
#
# dev.off()
#for the credibility intervals
pred.x <- seq(min(dat$Carapace_width), max(dat$Carapace_width), length = 30)
# compile data into a list to pass to BUGS
jags.dat = list(n.obs = nrow(dat), y = dat$Estimated_fecundity,x1 = dat$Carapace_width,
pred.x = pred.x, n.pred = length(pred.x))
##### MODEL 2 ALL SEAMAP CRABS, log-fecundity #####
##### SPECIFY MODEL CODE #####
mod = function() {
# PRIORS
b0 ~ dnorm(0,.001) #small precision = big variance
b1 ~ dnorm(0,.001)
sig ~ dunif(0,10)
tau <- 1/(sig^2)
# LIKELIHOOD
for (i in 1:n.obs) {
y[i] ~ dlnorm(log(y.hat[i]), tau)
log(y.hat[i]) <- b0 + b1 * x1[i] #x1 is carapace with data
resid[i] <- y[i] - y.hat[i]
}
for (i in 1:n.pred) {
log(pred.y[i]) <- b0 + b1 * pred.x[i]
}
#Posterior Predictive check
for (i in 1:n.obs) {
# generate data under the model
y.new[i] ~ dlnorm(log(y.hat[i]), tau)
# use pearson's residuals
obs.resid[i] <- (y[i] - y.hat[i])/sig
new.resid[i] <- (y.new[i] - y.hat[i])/sig
# calculate squared residuals
D.obs[i] <- pow(obs.resid[i], 2)
D.new[i] <- pow(new.resid[i], 2)
}
# calculate deviations
fit.obs <- sum(D.obs[])
fit.new <- sum(D.new[])
# "Bayesian P-value"
bp <- step(fit.obs - fit.new)
}
# write model to a text file
model.file = "model.txt"
R2WinBUGS::write.model(mod, model.file)
##### INITIAL VALUES #####
inits1 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits2 = list(b0 = rnorm(1), b1 = rnorm(1), sig = rlnorm(1))
inits = list(inits1, inits2)
##### PARAMETERS TO MONITOR #####
params = c("b0", "b1", "sig","resid",'pred.y', "y.hat","bp")
##### MCMC DIMENSIONS #####
ni = 25000 # number of post-burn-in samples per chain
nb = 10000 # number of burn-in samples
nt = 1 # thinning rate
nc = 2 # number of chains
##### RUN THE MODEL IN BUGS #####
starttime = Sys.time()
jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
mod2_dic <- dic.samples(jmod, n.iter = ni, thin = nt)
post = coda.samples(jmod, params, n.iter = ni, thin = nt)
Sys.time() - starttime
source("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/GAM_plays/Zuuretal_2016_ZIM_AllData/MCMCSupportHighstatV4.R")
##### CONVERGENCE DIAGNOSTIC #####
# view BGR convergence diagnostic
#gelman.diag(post, multivariate = F)
# visualize trace and posterior plots
#plot(post)
#plot(post[,'b0'])
#plot(post[,'b1'])
b0.est = post.summ(post, "b0"); b0.est
b1.est = post.summ(post, "b1"); b1.est
bp.est = post.summ(post, "bp"); bp.est
## CONFIDENCE INTERVALS: don't run until we add prediction code ####
pred.y = post.summ(post, "pred.y[")
pred.y
#dev.off()
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/log_glm.png")
plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
ylab = 'Estimated Fecundity (millions)', las = 1)
lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
dev.off()
#points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
#residuals, something is off with residuals, log transformation makes them all positive
resi <- post.summ(post, "resid[")
y.hat = post.summ(post,"y.hat[")
png("L:/Dropbox (The Craboratory)/The Craboratory/Kemberling/Rwork/reproductive_analysis/log_glm_resids.png")
plot(resi[1,] ~ y.hat[1,], type = 'h', ylab = "Residual Values", las = 1) #want them spread above and below evenly
abline(h = 0, col = 'blue')
dev.off()
plot(resi[1,] ~ y.hat[1,])
#hist(resi[1,], main = 'Residual Frequencies', xlab = "Residual Values")
#################### model 3 bayesian GLM, gamma family, log link ##################################
##### SPECIFY MODEL CODE #####
#
# #reparameterization by mean and sd
# mod = function() {
# # PRIORS
# b0 ~ dnorm(0,.001) #small precision = big variance
# b1 ~ dnorm(0,.001)
#
# # parameterized by mean (mu) and standard deviation (sd)
# sh <- pow(mu,2) / pow(sd,2)
# ra <- mu/pow(sd/2)
# mu ~ dunif(0,100)
# sd ~ dunif(0,100)
#
# # likelihood
# for(i in 1:n.obs) {
# y[i] ~ dgamma(y.hat[i], ra)
# y.hat[i] <- exp(b0 + b1 * x1[i])
# resid <- y[i] - y.hat[i]
# }
#
# # calculate residuals
# for (i in 1:n.pred) {
# pred.y[i] <- exp(b0 + b1 * pred.x[i])
#
# }
#
#
# }
#
# # write model to a text file
# model.file = "model.txt"
# write.model(mod, model.file)
#
# ##### INITIAL VALUES #####
# inits1 = list(b0 = rnorm(1), b1 = rnorm(1), mu = rnorm(1), sd = rlnorm(1))
# inits2 = list(b0 = rnorm(1), b1 = rnorm(1), mu = rnorm(1), sd = rlnorm(1))
# inits = list(inits1, inits2)
#
# ##### PARAMETERS TO MONITOR #####
# params = c("b0", "b1", "mu", "sd","resid",'pred.y', "y.hat")
#
# ##### MCMC DIMENSIONS #####
# ni = 30000 # number of post-burn-in samples per chain
# nb = 15000 # number of burn-in samples
# nt = 1 # thinning rate
# nc = 2 # number of chains
#
# ##### RUN THE MODEL IN BUGS #####
# starttime = Sys.time()
# jmod = jags.model(file = model.file, data = jags.dat, n.chains = nc, inits = inits, n.adapt = 1000)
# update(jmod, n.iter = nb, by = 1, progress.bar = 'text')
# post = coda.samples(jmod, params, n.iter = ni, thin = nt)
# Sys.time() - starttime
#
# #### look at convergence ####
# # view BGR convergence diagnostic
# gelman.diag(post, multivariate = F)
#
# # visualize trace and posterior plots
# #plot(post)
# #plot(post[,'b0'])
# #plot(post[,'b1'])
#
# pred.y = post.summ(post, "pred.y[")
# pred.y
# #dev.off()
# plot(Estimated_fecundity ~ Carapace_width, data = dat, xlab = 'Carapace Width (mm)',
# ylab = 'Fecundity (1e+06)', main = 'Gamma log-link')
# lines(pred.y["mean",] ~ jags.dat$pred.x, lty = 2, col = "blue", lwd = 2)
# lines(pred.y["2.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
# lines(pred.y["97.5%",] ~ jags.dat$pred.x, lty = 2, col = "grey", lwd = 2)
# #points(Estimated_fecundity ~Standard_cw, data = dat, col = 'black')
######################## Try a glm for it ############
#### model selection ####
#Carapace Width
#gaussian
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = gaussian(link = 'log'))
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = gaussian(link = 'identity'))
#Gamma
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'identity'))
glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'log'))
#best model
cw_mod <- glm(formula = Estimated_fecundity ~ Carapace_width, data = dat,family = Gamma(link = 'log'))
#coefficients, Confidence intervals
coef(cw_mod);confint.default(cw_mod)
#plot best model
range(dat$Carapace_width) #get range of x values
xcw <- seq(120, 200, 0.1)
ypred <- predict(cw_mod, list(Carapace_width = xcw),type="response")
low_pred <- ypred- (sd(dat$Carapace_width))/sqrt(length(dat$Carapace_width))
high_pred <- ypred+ (sd(dat$Carapace_width))/sqrt(length(dat$Carapace_width))
plot(Estimated_fecundity ~ Carapace_width, data = dat, main = 'GLM Gamma log link', ylab = 'Fecundity (10e6)')
lines(xcw,ypred)
lines(xcw,low_pred)
lines(xcw,high_pred)
#standard carapace width
#gaussian
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = gaussian(link = 'log'))
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = gaussian(link = 'identity'))
#Gamma
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'identity'))
glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'log'))
sw_mod <- glm(formula = Estimated_fecundity ~ Standard_cw, data = dat,family = Gamma(link = 'log'))
#plot models
range(dat$Standard_cw)
xsw <- seq(90,150,0.1)
ypred.sw <- predict(sw_mod, list(Standard_cw = xsw), type = 'response')
low_pred_sw <- ypred.sw - (sd(dat$Standard_cw))/sqrt(length(dat$Standard_cw))
high_pred_sw <- ypred.sw + (sd(dat$Standard_cw))/sqrt(length(dat$Standard_cw))
plot(Estimated_fecundity ~ Standard_cw, data = dat, main = 'Fecundity ~ Standard Width', las = 1,
xlab = "Standard Carapace Width", ylab = "Estimated Fecundity (millions)")
lines(xsw,ypred.sw)
lines(xsw,low_pred_sw)
lines(xsw,high_pred_sw)
|
## hyperLOPIT U2OS with endosomes
setStockcol(paste0(getStockcol(), 90))
load("hlU2OSTagmaddMarkers_2.rda")
data(hyperLOPITU2OS2018)
load("endosomeMarkers.rda")
fData(hyperLOPITU2OS2018)$nt.markers <- fData(hyperLOPITU2OS2018)$markers
levels(fData(hyperLOPITU2OS2018)$nt.markers) <- factor(c(levels(fData(hyperLOPITU2OS2018)$markers), "ENDOSOME"),
levels = c("CHROMATIN","CYTOSOL","ER","GA","LYSOSOME","MITOCHONDRION","NUCLEUS",
"PEROXISOME","PM","PROTEASOME","RIBOSOME 40S","RIBOSOME 60S","ENDOSOME","unknown" ))
fData(hyperLOPITU2OS2018)[endosomeMarkers,]$nt.markers <- "ENDOSOME" #make sure levels are in right order!!
# check convergence
outliers <- mcmc_get_outliers(hlU2OSTagmaddMarkers_2)
u2osdiag <- gelman.diag(outliers[-c(1,4,6)]) # converged yay
u2ostagmNov_conv <- hlU2OSTagmaddMarkers_2[-c(1,4,6)]
u2osTagmNoveltyRes <- tagmNoveltyProcess(object = hyperLOPITU2OS2018, params = u2ostagmNov_conv, fcol = "nt.markers")
u2ostagmNov_conv <- tagmMcmcProcess(u2ostagmNov_conv)
u2ostagm <- tagmPredict(object = hyperLOPITU2OS2018, params = u2ostagmNov_conv, fcol = "nt.markers")
save(u2osTagmNoveltyRes, file = "u2osTagmNoveltyRes_endo.rda")
## attach information to MSnset
u2ostagm <- tagmNoveltyPredict(object = u2ostagm, params = u2osTagmNoveltyRes)
fData(u2ostagm)$tagm.mcmc.allocation[fData(u2ostagm)$tagm.mcmc.allocation == "endosome"] <- "ENDOSOME"
ptsze <- exp(fData(u2ostagm)$tagm.mcmc.probability) - 1.5
ptsze[fData(u2ostagm)$tagm.mcmc.outlier > 10^{-12}] <- 0.01
ptsze[fData(u2ostagm)$tagm.mcmc.allocation == "Phenotype 1"] <- 0.01
plot2D(u2ostagm, fcol = "tagm.mcmc.allocation", cex = ptsze, main = "PCA of U2OS hyperLOPIT data", cex.main = 2, grid = FALSE)
addLegend(u2ostagm, fcol = "tagm.mcmc.allocation", ncol = 5, where = "topleft", cex = 1)
cls <- getStockcol()[as.factor(fData(u2ostagm)$tagm.mcmc.allocation)]
plot(fData(u2ostagm)$tagm.mcmc.probability,
fData(u2ostagm)$tagm.mcmc.mean.shannon,
col = cls, pch = 19,
xlab = "Localisation probability",
ylab = "Shannon entropy")
plot(u2ostagmNov_conv, "A6NGN9")
unknownLoc <- rownames(u2ostagm)[(fData(u2ostagm)$tagm.mcmc.probability * (1 - fData(u2ostagm)$tagm.mcmc.outlier) < 0.99)]
mf1u2os_endo <- enrichGO(gene = unknownLoc,
OrgDb = "org.Hs.eg.db",
keyType = "UNIPROT",
ont = "MF",
universe = rownames(fData(u2ostagm)))
bp1u2os_endo <- enrichGO(gene = unknownLoc,
OrgDb = "org.Hs.eg.db",
keyType = "UNIPROT",
ont = "BP",
universe = rownames(fData(u2ostagm)))
tempNames <- rownames(fData(u2ostagm)[fData(u2ostagm)$tagm.mcmc.allocation == "endosome",])[order(fData(u2ostagm)$tagm.mcmc.mean.shannon[fData(u2ostagm)$tagm.mcmc.allocation == "endosome"], decreasing = T)]
library(patchwork)
## following fixes capitalisation
ch <-chains(u2ostagmNov_conv)
rownames(u2ostagmNov_conv@chains@chains[[1]]@ComponentParam@mk)[rownames(u2ostagmNov_conv@chains@chains[[1]]@ComponentParam@mk) == "endosome"] <- "ENDOSOME"
gg1 <- plot(u2ostagmNov_conv, "Q92738") # Interesting
gg2 <- plot(u2ostagmNov_conv, "Q15833" ) # vescile
gg3 <- plot(u2ostagmNov_conv, "P61020") # known RAB5B
gg4 <- plot(u2ostagmNov_conv, "O15498") # maybe
# "Q92738" "Q15833" "P61020" "O15498"
tempNames2 <- rownames(fData(u2ostagm)[fData(u2ostagm)$tagm.mcmc.allocation == "PM",])[order(fData(u2ostagm)$tagm.mcmc.mean.shannon[fData(u2ostagm)$tagm.mcmc.allocation == "PM"], decreasing = T)]
gg5 <- plot(u2ostagmNov_conv, tempNames2[3]) # probably not
gg6 <- plot(u2ostagmNov_conv, tempNames2[9]) # traffiking from endosome to membrane
gg7 <- plot(u2ostagmNov_conv, tempNames2[18]) # known RAB
gg8 <- plot(u2ostagmNov_conv, tempNames2[34]) # KIF16B
gg9 <- plot(u2ostagmNov_conv, tempNames2[35]) # known PM, LYS, ENDO
# "O00186" "Q9NZN3" "P20339-2" "Q96L93-6" "Q8NHG8"
gg1 + gg3 + gg4 + gg6 + gg7 + gg8 + gg9 + plot_layout(ncol = 3)
## Barplot of different number of proteins
numMarkers <- sum(fData(hyperLOPITU2OS2018)$markers != "unknown")
allocThres <- (fData(u2ostagm)$tagm.mcmc.probability * (1 - fData(u2ostagm)$tagm.mcmc.outlier) > 0.999)
tb <- table((fData(u2ostagm)$tagm.mcmc.allocation[allocThres]))
numAlloc <- sum(tb[!(names(tb) %in% c("ENDOSOME", "Phenotype 1"))])
numAlloc_withend <- sum(tb[!(names(tb) %in% c("Phenotype 1"))])
totalProteins <- nrow(u2ostagm)
# 240 endosome allocations
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
df <- matrix(NA, ncol = 1, nrow = 4)
names(df) <- c("Markers", "TAGM allocation", "Reannotation allocations")
#df[, 1] <- c(numMarkers, totalProteins - numMarkers, 0, 0)
#df[, 2] <- c(numMarkers, numAlloc - numMarkers, totalProteins - (numAlloc), 0)
df[, 1] <- c(numMarkers, numAlloc - numMarkers, numAlloc_withend - (numAlloc), totalProteins - (numAlloc_withend))
df_long <- melt(df)
#df_long$Var1 <- c("a", "d", "b", "c", "a", "b", "d", "c", "a", "b", "c", "d")
df_long$Var1 <- c("a", "b", "c", "d")
df_long$Var1[df_long$Var1 == "a"] <- c("Markers")
df_long$Var1[df_long$Var1 == "b"] <- c("Protein allocations")
df_long$Var1[df_long$Var1 == "c"] <- c("Reannotation allocations")
df_long$Var1[df_long$Var1 == "d"] <- c("Unknown")
df_long$Var2 <- c("TAGM allocations")
gg <- ggplot(df_long, aes(x = Var2, y = value, fill = Var1, width = 0.5)) + geom_bar(stat="identity", position = position_stack(reverse = TRUE)) #+ coord_flip()
gg <- gg + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
text = element_text(size=20)) + scale_fill_manual(values=cbPalette, name = "Legend")
gg <- gg + ylab("Number of Proteins ") + xlab("Method") + ggtitle("Protein allocations")
gg
|
/code/codeforHPC/hluso2reanalysis.R
|
no_license
|
ococrook/2019-noveltyTagm
|
R
| false | false | 5,995 |
r
|
## hyperLOPIT U2OS with endosomes
setStockcol(paste0(getStockcol(), 90))
load("hlU2OSTagmaddMarkers_2.rda")
data(hyperLOPITU2OS2018)
load("endosomeMarkers.rda")
fData(hyperLOPITU2OS2018)$nt.markers <- fData(hyperLOPITU2OS2018)$markers
levels(fData(hyperLOPITU2OS2018)$nt.markers) <- factor(c(levels(fData(hyperLOPITU2OS2018)$markers), "ENDOSOME"),
levels = c("CHROMATIN","CYTOSOL","ER","GA","LYSOSOME","MITOCHONDRION","NUCLEUS",
"PEROXISOME","PM","PROTEASOME","RIBOSOME 40S","RIBOSOME 60S","ENDOSOME","unknown" ))
fData(hyperLOPITU2OS2018)[endosomeMarkers,]$nt.markers <- "ENDOSOME" #make sure levels are in right order!!
# check convergence
outliers <- mcmc_get_outliers(hlU2OSTagmaddMarkers_2)
u2osdiag <- gelman.diag(outliers[-c(1,4,6)]) # converged yay
u2ostagmNov_conv <- hlU2OSTagmaddMarkers_2[-c(1,4,6)]
u2osTagmNoveltyRes <- tagmNoveltyProcess(object = hyperLOPITU2OS2018, params = u2ostagmNov_conv, fcol = "nt.markers")
u2ostagmNov_conv <- tagmMcmcProcess(u2ostagmNov_conv)
u2ostagm <- tagmPredict(object = hyperLOPITU2OS2018, params = u2ostagmNov_conv, fcol = "nt.markers")
save(u2osTagmNoveltyRes, file = "u2osTagmNoveltyRes_endo.rda")
## attach information to MSnset
u2ostagm <- tagmNoveltyPredict(object = u2ostagm, params = u2osTagmNoveltyRes)
fData(u2ostagm)$tagm.mcmc.allocation[fData(u2ostagm)$tagm.mcmc.allocation == "endosome"] <- "ENDOSOME"
ptsze <- exp(fData(u2ostagm)$tagm.mcmc.probability) - 1.5
ptsze[fData(u2ostagm)$tagm.mcmc.outlier > 10^{-12}] <- 0.01
ptsze[fData(u2ostagm)$tagm.mcmc.allocation == "Phenotype 1"] <- 0.01
plot2D(u2ostagm, fcol = "tagm.mcmc.allocation", cex = ptsze, main = "PCA of U2OS hyperLOPIT data", cex.main = 2, grid = FALSE)
addLegend(u2ostagm, fcol = "tagm.mcmc.allocation", ncol = 5, where = "topleft", cex = 1)
cls <- getStockcol()[as.factor(fData(u2ostagm)$tagm.mcmc.allocation)]
plot(fData(u2ostagm)$tagm.mcmc.probability,
fData(u2ostagm)$tagm.mcmc.mean.shannon,
col = cls, pch = 19,
xlab = "Localisation probability",
ylab = "Shannon entropy")
plot(u2ostagmNov_conv, "A6NGN9")
unknownLoc <- rownames(u2ostagm)[(fData(u2ostagm)$tagm.mcmc.probability * (1 - fData(u2ostagm)$tagm.mcmc.outlier) < 0.99)]
mf1u2os_endo <- enrichGO(gene = unknownLoc,
OrgDb = "org.Hs.eg.db",
keyType = "UNIPROT",
ont = "MF",
universe = rownames(fData(u2ostagm)))
bp1u2os_endo <- enrichGO(gene = unknownLoc,
OrgDb = "org.Hs.eg.db",
keyType = "UNIPROT",
ont = "BP",
universe = rownames(fData(u2ostagm)))
tempNames <- rownames(fData(u2ostagm)[fData(u2ostagm)$tagm.mcmc.allocation == "endosome",])[order(fData(u2ostagm)$tagm.mcmc.mean.shannon[fData(u2ostagm)$tagm.mcmc.allocation == "endosome"], decreasing = T)]
library(patchwork)
## following fixes capitalisation
ch <-chains(u2ostagmNov_conv)
rownames(u2ostagmNov_conv@chains@chains[[1]]@ComponentParam@mk)[rownames(u2ostagmNov_conv@chains@chains[[1]]@ComponentParam@mk) == "endosome"] <- "ENDOSOME"
gg1 <- plot(u2ostagmNov_conv, "Q92738") # Interesting
gg2 <- plot(u2ostagmNov_conv, "Q15833" ) # vescile
gg3 <- plot(u2ostagmNov_conv, "P61020") # known RAB5B
gg4 <- plot(u2ostagmNov_conv, "O15498") # maybe
# "Q92738" "Q15833" "P61020" "O15498"
tempNames2 <- rownames(fData(u2ostagm)[fData(u2ostagm)$tagm.mcmc.allocation == "PM",])[order(fData(u2ostagm)$tagm.mcmc.mean.shannon[fData(u2ostagm)$tagm.mcmc.allocation == "PM"], decreasing = T)]
gg5 <- plot(u2ostagmNov_conv, tempNames2[3]) # probably not
gg6 <- plot(u2ostagmNov_conv, tempNames2[9]) # traffiking from endosome to membrane
gg7 <- plot(u2ostagmNov_conv, tempNames2[18]) # known RAB
gg8 <- plot(u2ostagmNov_conv, tempNames2[34]) # KIF16B
gg9 <- plot(u2ostagmNov_conv, tempNames2[35]) # known PM, LYS, ENDO
# "O00186" "Q9NZN3" "P20339-2" "Q96L93-6" "Q8NHG8"
gg1 + gg3 + gg4 + gg6 + gg7 + gg8 + gg9 + plot_layout(ncol = 3)
## Barplot of different number of proteins
numMarkers <- sum(fData(hyperLOPITU2OS2018)$markers != "unknown")
allocThres <- (fData(u2ostagm)$tagm.mcmc.probability * (1 - fData(u2ostagm)$tagm.mcmc.outlier) > 0.999)
tb <- table((fData(u2ostagm)$tagm.mcmc.allocation[allocThres]))
numAlloc <- sum(tb[!(names(tb) %in% c("ENDOSOME", "Phenotype 1"))])
numAlloc_withend <- sum(tb[!(names(tb) %in% c("Phenotype 1"))])
totalProteins <- nrow(u2ostagm)
# 240 endosome allocations
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
df <- matrix(NA, ncol = 1, nrow = 4)
names(df) <- c("Markers", "TAGM allocation", "Reannotation allocations")
#df[, 1] <- c(numMarkers, totalProteins - numMarkers, 0, 0)
#df[, 2] <- c(numMarkers, numAlloc - numMarkers, totalProteins - (numAlloc), 0)
df[, 1] <- c(numMarkers, numAlloc - numMarkers, numAlloc_withend - (numAlloc), totalProteins - (numAlloc_withend))
df_long <- melt(df)
#df_long$Var1 <- c("a", "d", "b", "c", "a", "b", "d", "c", "a", "b", "c", "d")
df_long$Var1 <- c("a", "b", "c", "d")
df_long$Var1[df_long$Var1 == "a"] <- c("Markers")
df_long$Var1[df_long$Var1 == "b"] <- c("Protein allocations")
df_long$Var1[df_long$Var1 == "c"] <- c("Reannotation allocations")
df_long$Var1[df_long$Var1 == "d"] <- c("Unknown")
df_long$Var2 <- c("TAGM allocations")
gg <- ggplot(df_long, aes(x = Var2, y = value, fill = Var1, width = 0.5)) + geom_bar(stat="identity", position = position_stack(reverse = TRUE)) #+ coord_flip()
gg <- gg + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
text = element_text(size=20)) + scale_fill_manual(values=cbPalette, name = "Legend")
gg <- gg + ylab("Number of Proteins ") + xlab("Method") + ggtitle("Protein allocations")
gg
|
# practice with functions
df <- data.frame(a=1:10, b=seq(200,400,length=10),c=11:20,d=NA)
my_function <- function(a) {
value <- (a - min(a)) / (max(a) - min(a))
return(value)
}
my_function(df$a)
my_function(df$b)
my_function(df$c)
my_function(df$d)
# next exercise
install.packages("bio3d", dependencies=TRUE)
library(bio3d)
s1 <- read.pdb("4AKE") # kinase with drug
s2 <- read.pdb("1AKE") # kinase no drug
s3 <- read.pdb("1E4Y") # kinase with drug
s1.chainA <- trim.pdb(s1, chain="A", elety="CA")
s2.chainA <- trim.pdb(s2, chain="A", elety="CA")
s3.chainA <- trim.pdb(s1, chain="A", elety="CA")
s1.b <- s1.chainA$atom$b
s2.b <- s2.chainA$atom$b
s3.b <- s3.chainA$atom$b
plotb3(s1.b, sse=s1.chainA, typ="l", ylab="Bfactor")
plotb3(s2.b, sse=s2.chainA, typ="l", ylab="Bfactor")
plotb3(s3.b, sse=s3.chainA, typ="l", ylab="Bfactor")
hc <- hclust( dist( rbind(s1.b, s2.b, s3.b) ) )
plot(hc)
s1.chainA$atom
|
/class06/class06_practice.R
|
no_license
|
macatbu/bggn213
|
R
| false | false | 922 |
r
|
# practice with functions
df <- data.frame(a=1:10, b=seq(200,400,length=10),c=11:20,d=NA)
my_function <- function(a) {
value <- (a - min(a)) / (max(a) - min(a))
return(value)
}
my_function(df$a)
my_function(df$b)
my_function(df$c)
my_function(df$d)
# next exercise
install.packages("bio3d", dependencies=TRUE)
library(bio3d)
s1 <- read.pdb("4AKE") # kinase with drug
s2 <- read.pdb("1AKE") # kinase no drug
s3 <- read.pdb("1E4Y") # kinase with drug
s1.chainA <- trim.pdb(s1, chain="A", elety="CA")
s2.chainA <- trim.pdb(s2, chain="A", elety="CA")
s3.chainA <- trim.pdb(s1, chain="A", elety="CA")
s1.b <- s1.chainA$atom$b
s2.b <- s2.chainA$atom$b
s3.b <- s3.chainA$atom$b
plotb3(s1.b, sse=s1.chainA, typ="l", ylab="Bfactor")
plotb3(s2.b, sse=s2.chainA, typ="l", ylab="Bfactor")
plotb3(s3.b, sse=s3.chainA, typ="l", ylab="Bfactor")
hc <- hclust( dist( rbind(s1.b, s2.b, s3.b) ) )
plot(hc)
s1.chainA$atom
|
library('microbenchmark')
x <- matrix(rnorm(10), 5)
w <- 1:5
cen <- colMeans(w*x)/sum(w)
## benchmark alternative rowsums
microbenchmark(
colSums(w*x),
(rep(1,nrow(x))%*%(w*x))[1,],
times=1e4)
## benchmark matrix transpose
microbenchmark(t(x), aperm(x), times=1e5)
## benckmark centering columns of matrix
microbenchmark(
scale(x, center=cen, scale=F),
sweep(x, 2, cen),
t(t(x) - cen),
t.default(t.default(x) - cen),
times=1e4)
x <- scale(x, center=cen, scale=F)
## t() is expensive; test alternative
microbenchmark(
crossprod(t(t(x) - cen)*sqrt(w)),
tcrossprod((t(x) - cen)*matrix(sqrt(w),dim(x)[2],dim(x)[1],byrow=T)),
times=1e4)
## benckmark computing second moments
microbenchmark(
cov.wt(x, w, center=F, method='ML')$cov*sum(w),
crossprod(sqrt(w)*x),
times=1e4)
|
/benchmark_tests.R
|
no_license
|
jarretrt/zucchini
|
R
| false | false | 803 |
r
|
library('microbenchmark')
x <- matrix(rnorm(10), 5)
w <- 1:5
cen <- colMeans(w*x)/sum(w)
## benchmark alternative rowsums
microbenchmark(
colSums(w*x),
(rep(1,nrow(x))%*%(w*x))[1,],
times=1e4)
## benchmark matrix transpose
microbenchmark(t(x), aperm(x), times=1e5)
## benckmark centering columns of matrix
microbenchmark(
scale(x, center=cen, scale=F),
sweep(x, 2, cen),
t(t(x) - cen),
t.default(t.default(x) - cen),
times=1e4)
x <- scale(x, center=cen, scale=F)
## t() is expensive; test alternative
microbenchmark(
crossprod(t(t(x) - cen)*sqrt(w)),
tcrossprod((t(x) - cen)*matrix(sqrt(w),dim(x)[2],dim(x)[1],byrow=T)),
times=1e4)
## benckmark computing second moments
microbenchmark(
cov.wt(x, w, center=F, method='ML')$cov*sum(w),
crossprod(sqrt(w)*x),
times=1e4)
|
#
# Exploratory Data Analysis
# Project #1
#
# 2015-10-09
# jptanguay
#
#
# plot3.r
#
#############################
#
#############################
# set the working directory to the correct path
# and load the common script that loads and prepares data
setwd("C:/Users/jptanguay/Documents/coursera/ExploratoryDataAnalysis/project1-ver2")
source(file="project1_common.r")
#############################
#plot 3
#############################
par(mfrow=c(1,1))
plot(dat2$datetime, dat2$Sub_metering_1, xlab= "", ylab = "Energy sub metering", type = "l", col="black")
lines(dat2$datetime, dat2$Sub_metering_2, ylab = "Energy sub metering", type = "l", col="red")
lines(dat2$datetime, dat2$Sub_metering_3, ylab = "Energy sub metering", type = "l", col="blue")
legend('topright', c("Sub_metering_1", "Sub_metering_2" ,"Sub_metering_3"), lty=1, col=c('black', 'red', 'blue'), bty='1', cex=.75)
dev.copy(png,filename="plot3.png");
dev.off ();
|
/plot3.r
|
no_license
|
jptanguay/ExData_Plotting1
|
R
| false | false | 952 |
r
|
#
# Exploratory Data Analysis
# Project #1
#
# 2015-10-09
# jptanguay
#
#
# plot3.r
#
#############################
#
#############################
# set the working directory to the correct path
# and load the common script that loads and prepares data
setwd("C:/Users/jptanguay/Documents/coursera/ExploratoryDataAnalysis/project1-ver2")
source(file="project1_common.r")
#############################
#plot 3
#############################
par(mfrow=c(1,1))
plot(dat2$datetime, dat2$Sub_metering_1, xlab= "", ylab = "Energy sub metering", type = "l", col="black")
lines(dat2$datetime, dat2$Sub_metering_2, ylab = "Energy sub metering", type = "l", col="red")
lines(dat2$datetime, dat2$Sub_metering_3, ylab = "Energy sub metering", type = "l", col="blue")
legend('topright', c("Sub_metering_1", "Sub_metering_2" ,"Sub_metering_3"), lty=1, col=c('black', 'red', 'blue'), bty='1', cex=.75)
dev.copy(png,filename="plot3.png");
dev.off ();
|
testlist <- list(n = c(NA_integer_, NA_integer_))
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
/gdalcubes/inst/testfiles/libgdalcubes_set_threads/libFuzzer_libgdalcubes_set_threads/libgdalcubes_set_threads_valgrind_files/1609874498-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 126 |
r
|
testlist <- list(n = c(NA_integer_, NA_integer_))
result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClassFunctions.R
\name{print.summary.Lines3D}
\alias{print.summary.Lines3D}
\title{Print a summary of a \code{Lines3D} \code{object}}
\usage{
\method{print}{summary.Lines3D}(x, ...)
}
\arguments{
\item{x}{An \code{object} of class \code{"summary.Lines3D"},
generated by \code{summary.Lines3D}.}
\item{\dots}{Additional parameters for \code{print}.}
}
\value{
None
}
\description{
Prints some information about the \code{object}.
}
\seealso{
\code{\link{print.Lines3D}},
\code{\link{summary.Lines3D}},
and \code{\link{plot.Lines3D}}
}
|
/man/print.summary.Lines3D.Rd
|
no_license
|
elvanceyhan/pcds
|
R
| false | true | 613 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ClassFunctions.R
\name{print.summary.Lines3D}
\alias{print.summary.Lines3D}
\title{Print a summary of a \code{Lines3D} \code{object}}
\usage{
\method{print}{summary.Lines3D}(x, ...)
}
\arguments{
\item{x}{An \code{object} of class \code{"summary.Lines3D"},
generated by \code{summary.Lines3D}.}
\item{\dots}{Additional parameters for \code{print}.}
}
\value{
None
}
\description{
Prints some information about the \code{object}.
}
\seealso{
\code{\link{print.Lines3D}},
\code{\link{summary.Lines3D}},
and \code{\link{plot.Lines3D}}
}
|
require(RJDBC)
require(dplyr)
require(mapvizieR)
require(tidyr)
require(purrr)
require(readr)
source("data/map_all_silo.R")
source("data/current_ps_roster.R")
source("munge/01-A.R")
map_w16 <- map_viz$cdf %>%
mutate(cohort = map_year_academic + 1 + 12 - grade) %>%
filter(termname == "Winter 2015-2016")
m_goal_scores <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9]ritscore"))
m_goal_stderr <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9]stderr"))
m_goal_names <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9](name)"))
m_goal_range <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9](range)"))
# time to gather
m_goal_names_long <- m_goal_names %>%
gather(key = variable, value = goal_name, goal1name:goal8name)
m_goal_scores_long <- m_goal_scores %>%
gather(key = variable, value = goal_score, goal1ritscore:goal8ritscore)
m_goal_stderr_long <- m_goal_stderr %>%
gather(key = variable, value = goal_stderr, goal1stderr:goal8stderr)
m_goal_range_long <- m_goal_range %>%
gather(key = variable, value = goal_range, goal1range:goal8range)
m_goals <- m_goal_names_long %>%
mutate(goal_score = m_goal_scores_long$goal_score,
goal_stderr = m_goal_stderr_long$goal_stderr,
goal_low = round(goal_score - goal_stderr),
goal_high = round(goal_score + goal_stderr),
goal_range = m_goal_range_long$goal_range
) %>%
select(-variable) %>%
filter(!is.na(goal_name))
mean_strand_scores<-m_goals %>%
group_by(measurementscale, grade, schoolname, goal_name) %>%
summarize(n_tests = n(),
avg_score = round(mean(goal_score),1)
)
mean_rit_scores <- map_w16 %>%
group_by(measurementscale, grade, schoolname) %>%
summarise(n_tests = n(),
avg_score = round(mean(testritscore),1)) %>%
mutate(goal_name = "1. Overall RIT Score")
mean_scores <- bind_rows(mean_strand_scores, mean_rit_scores) %>%
rename(score_type = goal_name)
# averages for KAMS ####
mean_scores_wide <- mean_scores %>%
filter(grepl("Ascend Middle", schoolname)) %>%
split(list(.$measurementscale, .$schoolname)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide %>%
map(~ write_csv(., path=sprintf("%s_%s_mean_scores.csv",
abbreviate(unique(.$schoolname)),
unique(.$measurementscale)
)
)
)
# for all schools #####
mean_scores_wide_6_plus <- mean_scores %>%
filter(grade >= 6) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_2_5 <- mean_scores %>%
filter(grade %in% c(2:5)) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_mpg <- mean_scores %>%
filter(grade < 2) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_6_plus %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_6_plus.csv",
unique(.$measurementscale)
)
)
)
mean_scores_wide_2_5 %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_2_5.csv",
unique(.$measurementscale)
)
)
)
mean_scores_wide_mpg %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_mpg.csv",
unique(.$measurementscale)
)
)
)
|
/MAP Analysis/2015-2016/src/Mean_RIT_and_goal_Scores.R
|
no_license
|
kippchicago/Data_Analysis
|
R
| false | false | 3,828 |
r
|
require(RJDBC)
require(dplyr)
require(mapvizieR)
require(tidyr)
require(purrr)
require(readr)
source("data/map_all_silo.R")
source("data/current_ps_roster.R")
source("munge/01-A.R")
map_w16 <- map_viz$cdf %>%
mutate(cohort = map_year_academic + 1 + 12 - grade) %>%
filter(termname == "Winter 2015-2016")
m_goal_scores <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9]ritscore"))
m_goal_stderr <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9]stderr"))
m_goal_names <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9](name)"))
m_goal_range <- map_w16 %>%
select(studentid, testid, measurementscale, schoolname, cohort, termname, fallwinterspring, map_year_academic, grade,
matches("(goal)[0-9](range)"))
# time to gather
m_goal_names_long <- m_goal_names %>%
gather(key = variable, value = goal_name, goal1name:goal8name)
m_goal_scores_long <- m_goal_scores %>%
gather(key = variable, value = goal_score, goal1ritscore:goal8ritscore)
m_goal_stderr_long <- m_goal_stderr %>%
gather(key = variable, value = goal_stderr, goal1stderr:goal8stderr)
m_goal_range_long <- m_goal_range %>%
gather(key = variable, value = goal_range, goal1range:goal8range)
m_goals <- m_goal_names_long %>%
mutate(goal_score = m_goal_scores_long$goal_score,
goal_stderr = m_goal_stderr_long$goal_stderr,
goal_low = round(goal_score - goal_stderr),
goal_high = round(goal_score + goal_stderr),
goal_range = m_goal_range_long$goal_range
) %>%
select(-variable) %>%
filter(!is.na(goal_name))
mean_strand_scores<-m_goals %>%
group_by(measurementscale, grade, schoolname, goal_name) %>%
summarize(n_tests = n(),
avg_score = round(mean(goal_score),1)
)
mean_rit_scores <- map_w16 %>%
group_by(measurementscale, grade, schoolname) %>%
summarise(n_tests = n(),
avg_score = round(mean(testritscore),1)) %>%
mutate(goal_name = "1. Overall RIT Score")
mean_scores <- bind_rows(mean_strand_scores, mean_rit_scores) %>%
rename(score_type = goal_name)
# averages for KAMS ####
mean_scores_wide <- mean_scores %>%
filter(grepl("Ascend Middle", schoolname)) %>%
split(list(.$measurementscale, .$schoolname)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide %>%
map(~ write_csv(., path=sprintf("%s_%s_mean_scores.csv",
abbreviate(unique(.$schoolname)),
unique(.$measurementscale)
)
)
)
# for all schools #####
mean_scores_wide_6_plus <- mean_scores %>%
filter(grade >= 6) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_2_5 <- mean_scores %>%
filter(grade %in% c(2:5)) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_mpg <- mean_scores %>%
filter(grade < 2) %>%
split(list(.$measurementscale)) %>%
map(~spread(., score_type, avg_score))
mean_scores_wide_6_plus %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_6_plus.csv",
unique(.$measurementscale)
)
)
)
mean_scores_wide_2_5 %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_2_5.csv",
unique(.$measurementscale)
)
)
)
mean_scores_wide_mpg %>%
map(~ write_csv(., path=sprintf("reports/%s_mean_scores_mpg.csv",
unique(.$measurementscale)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tk_ts.R
\name{tk_ts}
\alias{tk_ts}
\alias{tk_ts_}
\title{Coerce time series objects and tibbles with date/date-time columns to ts.}
\usage{
tk_ts(data, select = NULL, start = 1, end = numeric(), frequency = 1,
deltat = 1, ts.eps = getOption("ts.eps"), silent = FALSE)
tk_ts_(data, select = NULL, start = 1, end = numeric(), frequency = 1,
deltat = 1, ts.eps = getOption("ts.eps"), silent = FALSE)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{start}{the time of the first observation. Either a single
number or a vector of two integers, which specify a natural time
unit and a (1-based) number of samples into the time unit. See
the examples for the use of the second form.}
\item{end}{the time of the last observation, specified in the same way
as \code{start}.}
\item{frequency}{the number of observations per unit of time.}
\item{deltat}{the fraction of the sampling period between successive
observations; e.g., 1/12 for monthly data. Only one of
\code{frequency} or \code{deltat} should be provided.}
\item{ts.eps}{time series comparison tolerance. Frequencies are
considered equal if their absolute difference is less than
\code{ts.eps}.}
\item{silent}{Used to toggle printing of messages and warnings.}
}
\value{
Returns a \code{ts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to ts.
}
\details{
\code{tk_ts()} is a wrapper for \code{stats::ts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{ts} class objects. There are two main advantages:
\enumerate{
\item Non-numeric columns get removed instead of being populated by NA's.
\item The returned \code{ts} object retains a "timekit index" (and various other attributes) if detected.
The "timekit index" can be used to coerce between \code{tbl}, \code{xts}, \code{zoo}, and \code{ts} data types.
}
The \code{select} argument is used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced. \emph{At a minimum, a \code{frequency}
and a \code{start} should be specified.}
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{stats::ts()}.
\code{tk_ts_} is a nonstandard evaluation method.
}
\examples{
library(tidyverse)
library(timekit)
### tibble to ts: Comparison between tk_ts() and stats::ts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# as.ts: Character columns introduce NA's; Result does not retain index
stats::ts(data_tbl[,-1], start = 2016)
# tk_ts: Only numeric columns get coerced; Result retains index in numeric format
data_ts <- tk_ts(data_tbl, start = 2016)
data_ts
# timekit index
tk_index(data_ts, timekit_idx = FALSE) # Regularized index returned
tk_index(data_ts, timekit_idx = TRUE) # Original date index returned
# Coerce back to tibble
data_ts \%>\% tk_tbl(timekit_idx = TRUE)
### Using select
tk_ts(data_tbl, select = y)
### NSE: Enables programming
select <- "y"
tk_ts_(data_tbl, select = select)
}
\seealso{
\code{\link[=tk_index]{tk_index()}}, \code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_xts]{tk_xts()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}
}
|
/man/tk_ts.Rd
|
no_license
|
datactivist/timekit
|
R
| false | true | 3,664 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tk_ts.R
\name{tk_ts}
\alias{tk_ts}
\alias{tk_ts_}
\title{Coerce time series objects and tibbles with date/date-time columns to ts.}
\usage{
tk_ts(data, select = NULL, start = 1, end = numeric(), frequency = 1,
deltat = 1, ts.eps = getOption("ts.eps"), silent = FALSE)
tk_ts_(data, select = NULL, start = 1, end = numeric(), frequency = 1,
deltat = 1, ts.eps = getOption("ts.eps"), silent = FALSE)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{start}{the time of the first observation. Either a single
number or a vector of two integers, which specify a natural time
unit and a (1-based) number of samples into the time unit. See
the examples for the use of the second form.}
\item{end}{the time of the last observation, specified in the same way
as \code{start}.}
\item{frequency}{the number of observations per unit of time.}
\item{deltat}{the fraction of the sampling period between successive
observations; e.g., 1/12 for monthly data. Only one of
\code{frequency} or \code{deltat} should be provided.}
\item{ts.eps}{time series comparison tolerance. Frequencies are
considered equal if their absolute difference is less than
\code{ts.eps}.}
\item{silent}{Used to toggle printing of messages and warnings.}
}
\value{
Returns a \code{ts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to ts.
}
\details{
\code{tk_ts()} is a wrapper for \code{stats::ts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{ts} class objects. There are two main advantages:
\enumerate{
\item Non-numeric columns get removed instead of being populated by NA's.
\item The returned \code{ts} object retains a "timekit index" (and various other attributes) if detected.
The "timekit index" can be used to coerce between \code{tbl}, \code{xts}, \code{zoo}, and \code{ts} data types.
}
The \code{select} argument is used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced. \emph{At a minimum, a \code{frequency}
and a \code{start} should be specified.}
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{stats::ts()}.
\code{tk_ts_} is a nonstandard evaluation method.
}
\examples{
library(tidyverse)
library(timekit)
### tibble to ts: Comparison between tk_ts() and stats::ts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# as.ts: Character columns introduce NA's; Result does not retain index
stats::ts(data_tbl[,-1], start = 2016)
# tk_ts: Only numeric columns get coerced; Result retains index in numeric format
data_ts <- tk_ts(data_tbl, start = 2016)
data_ts
# timekit index
tk_index(data_ts, timekit_idx = FALSE) # Regularized index returned
tk_index(data_ts, timekit_idx = TRUE) # Original date index returned
# Coerce back to tibble
data_ts \%>\% tk_tbl(timekit_idx = TRUE)
### Using select
tk_ts(data_tbl, select = y)
### NSE: Enables programming
select <- "y"
tk_ts_(data_tbl, select = select)
}
\seealso{
\code{\link[=tk_index]{tk_index()}}, \code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_xts]{tk_xts()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}
}
|
rm(list=ls())
##主要工作:限制test集的item在train集中
library(plyr)
library(dplyr)
library(reshape2)
library(data.table)
##对数据进行描述
data_path <- '/Users/baifrank/Desktop/硕士毕业论文/数据集/ml-1m/'
col_names <- c('user_id','item_id','rating','timestamp')
ua.base <- fread(paste(data_path,'ratings.dat',sep=''),header=FALSE)
ua.base <- ua.base[,c(1,3,5,7)]
##user id | item id | rating | timestamp
colnames(ua.base) <- col_names
user_all <- unique(ua.base$user_id)##6040
movie_all <- unique(ua.base$item_id)##3706
sort(unique(ua.base$rating))##只存在整数评分
##make continuous id:both users and movies
user_id_df <- data.frame(original = user_all , user_transform = 1:length(user_all))
item_id_df <- data.frame(original = movie_all , transform = 1:length(movie_all))
#write.csv(user_id_df,file="/Users/baifrank/Desktop/recomm_output/ml_1m_output/uid_transform.csv",row.names=FALSE)
#write.csv(item_id_df,file="/Users/baifrank/Desktop/recomm_output/ml_1m_output/item_transform.csv",row.names=FALSE)
ua.base_transform <- left_join(ua.base,user_id_df,c("user_id" = "original"))
ua.base_transform <- ua.base_transform[,c(1,2,3,5)]
colnames(ua.base_transform) <- c("user_id_original","item_id_original","rating","user_id")
ua.base_transform <- left_join(ua.base_transform,item_id_df,c("item_id_original" = "original"))
ua.base_transform <- plyr::rename(ua.base_transform,c("transform" = "item_id"))
ua.base <- select(ua.base_transform,user_id,item_id,rating)
##乱序排列
ua.base <- mutate(ua.base,sort_factor = rnorm(nrow(ua.base)))
ua.base <- arrange(ua.base,user_id,sort_factor)
##choose 10 ratings for each user,make the test set
##统计每个用户评分的电影数量
ua.base_group <- dplyr::group_by(ua.base,user_id)%>%
dplyr::summarise(count_ratings = n())
seq_count <- sapply(ua.base_group$count_ratings,function(i) seq(1,i))
seq_count <- unlist(seq_count)
##从每个list中随机抽取10个index
index_test <- which(seq_count <= 10)
ua.train <- ua.base[-index_test,]
ua.test <- ua.base[index_test,]
##计算训练集每个用户的平均分:标准化评分
ua.train_group <- dplyr::group_by(ua.train,user_id)%>%
dplyr::summarise(rating_avg = mean(rating))
ua.train <- left_join(ua.train,ua.train_group)[,c(1,2,3,5)]
ua.test <- ua.test[,c(1,2,3)]
##输出构造好的1m数据集的训练集和测试集
#write.table(ua.train,file='/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.train.txt',row.names=FALSE)
#write.table(ua.test,file='/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.test.txt',row.names=FALSE)
##读取
#ua.train <- read.table("/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.train.txt",header=T)
#ua.test <- read.table("/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.test.txt",header=T)
movie_train <- unique(ua.train$item_id)
movie_test <- unique(ua.test$item_id)
movie_union <- union(movie_train,movie_test)
##统一训练集和测试集的movie
##处理方法:手动给train、test集添加上这些movie,打分记为NA
item_add_train <- setdiff(movie_union,movie_train)
ua.train_fill <- rbind(ua.train,data.frame(user_id=1,item_id=item_add_train,rating=NA,rating_avg=NA))
ua.train <- ua.train_fill
item_add_test <- setdiff(movie_union,movie_test)
ua.test_fill <- rbind(ua.test,data.frame(user_id=1,item_id=item_add_test,rating=NA))
ua.test <- ua.test_fill
##实值训练集和测试集的DataFrame处置到此
##定义将dataframe转化成matrix的函数
df_to_mat <- function(df){
mat <- dcast(df,user_id~item_id)
uid <- mat$user_id
iid <- colnames(mat)
mat <- as.matrix(mat[,-1])
rownames(mat) <- uid
colnames(mat) <- iid[-1]
return(mat)
}
##将训练集转化为矩阵的形式:6040*3706,user*item
mat_train <- df_to_mat(ua.train[,1:3])
mat_test <- df_to_mat(ua.test[,1:3])
##直接用RData把两个Matrix进行保存
path_RData <- paste("/Users/baifrank/Desktop/recomm_output/ml_1m_output/",'matrix_original.RData',sep='')
#save("mat_test","mat_train",file=path_RData)
##另外还要将df中的rating进行标准化:减去对应用户的平均打分,0/1——Binary处理
ua_base_group <- group_by(ua.base,user_id)%>%
summarise(rating_mean = mean(rating))
ua_base_group <- as.data.frame(ua_base_group)
ua_base_scale <- left_join(ua.base,ua_base_group)
ua_base_scale <- mutate(ua_base_scale,rating_binary=as.numeric(rating>=rating_mean))
##>=3的定义为1,否则为0
ua_base_scale$rating_binary_3 <- 0
index_gt_3 <- which(ua_base_scale$rating>=3)
ua_base_scale$rating_binary_3[index_gt_3] <- 1
path_binary <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/'
path_binary_train <- paste(path_binary,'ua_base_df_continuous_binary.txt')
#write.table(ua_base_scale,file=path_binary_train,row.names=FALSE)
ua_test_scale <- left_join(ua.test,ua_base_group)
ua_test_scale <- mutate(ua_test_scale,rating_binary=as.numeric(rating>=rating_mean))
ua_test_scale$rating_binary_3 <- 0
index_gt_3 <- which(ua_test_scale$rating>=3)
ua_test_scale$rating_binary_3[index_gt_3] <- 1
path_binary_test <- paste(path_binary,'ua_test_df_continuous_binary.txt')
#write.table(ua_test_scale,file=path_binary_test,row.names=FALSE)
##定义将dataframe转化成matrix的函数
df_to_mat <- function(df){
mat <- dcast(df,user_id~item_id)
uid <- mat$user_id
iid <- colnames(mat)
mat <- as.matrix(mat[,-1])
rownames(mat) <- uid
colnames(mat) <- iid[-1]
return(mat)
}
##将训练集转化为矩阵的形式:69878*10667,user*item
mat_train <- df_to_mat(ua.base)
##对于test集手动添加不在train集中的item项
item_add <- setdiff(colnames(mat_train),ua.test$item_id)
ua.test_fill <- rbind(ua.test[,1:3],data.frame(user_id=1,item_id=item_add,rating=NA))
ua.test_fill <- mutate(ua.test_fill,item_id=as.numeric(item_id))
mat_test <- df_to_mat(ua.test_fill)
##直接用RData把两个Matrix进行保存
path_RData <- paste(path_output,'matrix_original.RData',sep='')
# save("mat_test","mat_train",file=path_RData)
load(path_RData)
##基于训练集:求出每个用户的平均评分再进行二元化(标准化)处理
user_rating_mean <- apply(mat_train,1,mean,na.rm=T)
##如果用户的评分大于等于其平均分,则为好评
mat_to_binary <- function(mat){
mat_scale <- mat-user_rating_mean
mat_scale[which(mat_scale>=0)] <- 1
mat_scale[which(mat_scale<0)] <- 0
mat_scale[which(is.na(mat_scale)==T)] <- -1
return(mat_scale)
}
mat_train_scale <- mat_to_binary(mat_train)
mat_test_scale <- mat_to_binary(mat_test)
##用RData把Binary化之后的评分矩阵保存下来
path_RData <- paste(path_binary,'matrix_binary.RData',sep='')
# save("mat_test_scale","mat_train_scale",file=path_RData)
load(path_RData)
##评分>=3的赋值1,否则为0,进行二元化处理
##如果用户的评分大于等于3分,则为好评
mat_to_binary_3 <- function(mat){
mat_scale <- mat
mat_scale[which(mat>=3)] <- 1
mat_scale[which(mat<3)] <- 0
mat_scale[which(is.na(mat)==T)] <- -1
return(mat_scale)
}
mat_train_scale_3 <- mat_to_binary_3(mat_train)
mat_test_scale_3 <- mat_to_binary_3(mat_test)
##用RData把Binary化之后的评分矩阵保存下来
path_RData <- paste(path_binary,'matrix_binary_3.RData',sep='')
# save("mat_test_scale_3","mat_train_scale_3",file=path_RData)
load(path_RData)
##没有近邻用户的添补计划
##实值的情况:以各个物品的平均打分进行填补
##输出各个物品的平均打分矩阵
vec_rating_mean <- apply(mat_train,2,mean,na.rm=TRUE)
value_mean <- mean(mat_train,na.rm=TRUE)
##如果平均分为NA,即该物品没人打分,那么使用全部物品的平均打分
index_na <- which(is.na(vec_rating_mean)==TRUE)
vec_rating_mean[index_na] <- value_mean
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/item_rating_mean.RData'
#save(vec_rating_mean,file=path_RData)
load(path_RData)
##计算各个物品的最大倾向性打分:以大多数人的打分情况为标准
##计算每一件物品中打分为1/0的人数
mat_train_scale_1 <- matrix(0,nrow(mat_train_scale),ncol(mat_train_scale))
mat_train_scale_1[which(mat_train_scale == 1)] <- 1
cnt_rating_1 <- apply(mat_train_scale_1,2,sum)
mat_train_scale_0 <- matrix(0,nrow(mat_train_scale),ncol(mat_train_scale))
mat_train_scale_0[which(mat_train_scale == 0)] <- 1
cnt_rating_0 <- apply(mat_train_scale_0,2,sum)
##得到大多数人对于各个物品的打分情况向量
vec_rating_mode <- as.numeric(cnt_rating_1 > cnt_rating_0)
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/item_rating_mode.RData'
#save(vec_rating_mode,file=path_RData)
load(path_RData)
##gt3
mat_train_scale_3_1 <- matrix(0,nrow(mat_train_scale_3),ncol(mat_train_scale_3))
mat_train_scale_3_1[which(mat_train_scale_3 == 1)] <- 1
cnt_rating_1 <- apply(mat_train_scale_3_1,2,sum)
mat_train_scale_3_0 <- matrix(0,nrow(mat_train_scale_3),ncol(mat_train_scale_3))
mat_train_scale_3_0[which(mat_train_scale_3 == 0)] <- 1
cnt_rating_0 <- apply(mat_train_scale_3_0,2,sum)
##得到大多数人对于各个物品的打分情况向量
vec_rating_mode <- as.numeric(cnt_rating_1 > cnt_rating_0)
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/item_rating_mode_3.RData'
#save(vec_rating_mode,file=path_RData)
load(path_RData)
|
/MovieLens_1m_数据预处理.R
|
no_license
|
baizhongliu/collaborative-recommender-on-R
|
R
| false | false | 9,274 |
r
|
rm(list=ls())
##主要工作:限制test集的item在train集中
library(plyr)
library(dplyr)
library(reshape2)
library(data.table)
##对数据进行描述
data_path <- '/Users/baifrank/Desktop/硕士毕业论文/数据集/ml-1m/'
col_names <- c('user_id','item_id','rating','timestamp')
ua.base <- fread(paste(data_path,'ratings.dat',sep=''),header=FALSE)
ua.base <- ua.base[,c(1,3,5,7)]
##user id | item id | rating | timestamp
colnames(ua.base) <- col_names
user_all <- unique(ua.base$user_id)##6040
movie_all <- unique(ua.base$item_id)##3706
sort(unique(ua.base$rating))##只存在整数评分
##make continuous id:both users and movies
user_id_df <- data.frame(original = user_all , user_transform = 1:length(user_all))
item_id_df <- data.frame(original = movie_all , transform = 1:length(movie_all))
#write.csv(user_id_df,file="/Users/baifrank/Desktop/recomm_output/ml_1m_output/uid_transform.csv",row.names=FALSE)
#write.csv(item_id_df,file="/Users/baifrank/Desktop/recomm_output/ml_1m_output/item_transform.csv",row.names=FALSE)
ua.base_transform <- left_join(ua.base,user_id_df,c("user_id" = "original"))
ua.base_transform <- ua.base_transform[,c(1,2,3,5)]
colnames(ua.base_transform) <- c("user_id_original","item_id_original","rating","user_id")
ua.base_transform <- left_join(ua.base_transform,item_id_df,c("item_id_original" = "original"))
ua.base_transform <- plyr::rename(ua.base_transform,c("transform" = "item_id"))
ua.base <- select(ua.base_transform,user_id,item_id,rating)
##乱序排列
ua.base <- mutate(ua.base,sort_factor = rnorm(nrow(ua.base)))
ua.base <- arrange(ua.base,user_id,sort_factor)
##choose 10 ratings for each user,make the test set
##统计每个用户评分的电影数量
ua.base_group <- dplyr::group_by(ua.base,user_id)%>%
dplyr::summarise(count_ratings = n())
seq_count <- sapply(ua.base_group$count_ratings,function(i) seq(1,i))
seq_count <- unlist(seq_count)
##从每个list中随机抽取10个index
index_test <- which(seq_count <= 10)
ua.train <- ua.base[-index_test,]
ua.test <- ua.base[index_test,]
##计算训练集每个用户的平均分:标准化评分
ua.train_group <- dplyr::group_by(ua.train,user_id)%>%
dplyr::summarise(rating_avg = mean(rating))
ua.train <- left_join(ua.train,ua.train_group)[,c(1,2,3,5)]
ua.test <- ua.test[,c(1,2,3)]
##输出构造好的1m数据集的训练集和测试集
#write.table(ua.train,file='/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.train.txt',row.names=FALSE)
#write.table(ua.test,file='/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.test.txt',row.names=FALSE)
##读取
#ua.train <- read.table("/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.train.txt",header=T)
#ua.test <- read.table("/Users/baifrank/Desktop/recomm_output/ml_1m_output/ua.test.txt",header=T)
movie_train <- unique(ua.train$item_id)
movie_test <- unique(ua.test$item_id)
movie_union <- union(movie_train,movie_test)
##统一训练集和测试集的movie
##处理方法:手动给train、test集添加上这些movie,打分记为NA
item_add_train <- setdiff(movie_union,movie_train)
ua.train_fill <- rbind(ua.train,data.frame(user_id=1,item_id=item_add_train,rating=NA,rating_avg=NA))
ua.train <- ua.train_fill
item_add_test <- setdiff(movie_union,movie_test)
ua.test_fill <- rbind(ua.test,data.frame(user_id=1,item_id=item_add_test,rating=NA))
ua.test <- ua.test_fill
##实值训练集和测试集的DataFrame处置到此
##定义将dataframe转化成matrix的函数
df_to_mat <- function(df){
mat <- dcast(df,user_id~item_id)
uid <- mat$user_id
iid <- colnames(mat)
mat <- as.matrix(mat[,-1])
rownames(mat) <- uid
colnames(mat) <- iid[-1]
return(mat)
}
##将训练集转化为矩阵的形式:6040*3706,user*item
mat_train <- df_to_mat(ua.train[,1:3])
mat_test <- df_to_mat(ua.test[,1:3])
##直接用RData把两个Matrix进行保存
path_RData <- paste("/Users/baifrank/Desktop/recomm_output/ml_1m_output/",'matrix_original.RData',sep='')
#save("mat_test","mat_train",file=path_RData)
##另外还要将df中的rating进行标准化:减去对应用户的平均打分,0/1——Binary处理
ua_base_group <- group_by(ua.base,user_id)%>%
summarise(rating_mean = mean(rating))
ua_base_group <- as.data.frame(ua_base_group)
ua_base_scale <- left_join(ua.base,ua_base_group)
ua_base_scale <- mutate(ua_base_scale,rating_binary=as.numeric(rating>=rating_mean))
##>=3的定义为1,否则为0
ua_base_scale$rating_binary_3 <- 0
index_gt_3 <- which(ua_base_scale$rating>=3)
ua_base_scale$rating_binary_3[index_gt_3] <- 1
path_binary <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/'
path_binary_train <- paste(path_binary,'ua_base_df_continuous_binary.txt')
#write.table(ua_base_scale,file=path_binary_train,row.names=FALSE)
ua_test_scale <- left_join(ua.test,ua_base_group)
ua_test_scale <- mutate(ua_test_scale,rating_binary=as.numeric(rating>=rating_mean))
ua_test_scale$rating_binary_3 <- 0
index_gt_3 <- which(ua_test_scale$rating>=3)
ua_test_scale$rating_binary_3[index_gt_3] <- 1
path_binary_test <- paste(path_binary,'ua_test_df_continuous_binary.txt')
#write.table(ua_test_scale,file=path_binary_test,row.names=FALSE)
##定义将dataframe转化成matrix的函数
df_to_mat <- function(df){
mat <- dcast(df,user_id~item_id)
uid <- mat$user_id
iid <- colnames(mat)
mat <- as.matrix(mat[,-1])
rownames(mat) <- uid
colnames(mat) <- iid[-1]
return(mat)
}
##将训练集转化为矩阵的形式:69878*10667,user*item
mat_train <- df_to_mat(ua.base)
##对于test集手动添加不在train集中的item项
item_add <- setdiff(colnames(mat_train),ua.test$item_id)
ua.test_fill <- rbind(ua.test[,1:3],data.frame(user_id=1,item_id=item_add,rating=NA))
ua.test_fill <- mutate(ua.test_fill,item_id=as.numeric(item_id))
mat_test <- df_to_mat(ua.test_fill)
##直接用RData把两个Matrix进行保存
path_RData <- paste(path_output,'matrix_original.RData',sep='')
# save("mat_test","mat_train",file=path_RData)
load(path_RData)
##基于训练集:求出每个用户的平均评分再进行二元化(标准化)处理
user_rating_mean <- apply(mat_train,1,mean,na.rm=T)
##如果用户的评分大于等于其平均分,则为好评
mat_to_binary <- function(mat){
mat_scale <- mat-user_rating_mean
mat_scale[which(mat_scale>=0)] <- 1
mat_scale[which(mat_scale<0)] <- 0
mat_scale[which(is.na(mat_scale)==T)] <- -1
return(mat_scale)
}
mat_train_scale <- mat_to_binary(mat_train)
mat_test_scale <- mat_to_binary(mat_test)
##用RData把Binary化之后的评分矩阵保存下来
path_RData <- paste(path_binary,'matrix_binary.RData',sep='')
# save("mat_test_scale","mat_train_scale",file=path_RData)
load(path_RData)
##评分>=3的赋值1,否则为0,进行二元化处理
##如果用户的评分大于等于3分,则为好评
mat_to_binary_3 <- function(mat){
mat_scale <- mat
mat_scale[which(mat>=3)] <- 1
mat_scale[which(mat<3)] <- 0
mat_scale[which(is.na(mat)==T)] <- -1
return(mat_scale)
}
mat_train_scale_3 <- mat_to_binary_3(mat_train)
mat_test_scale_3 <- mat_to_binary_3(mat_test)
##用RData把Binary化之后的评分矩阵保存下来
path_RData <- paste(path_binary,'matrix_binary_3.RData',sep='')
# save("mat_test_scale_3","mat_train_scale_3",file=path_RData)
load(path_RData)
##没有近邻用户的添补计划
##实值的情况:以各个物品的平均打分进行填补
##输出各个物品的平均打分矩阵
vec_rating_mean <- apply(mat_train,2,mean,na.rm=TRUE)
value_mean <- mean(mat_train,na.rm=TRUE)
##如果平均分为NA,即该物品没人打分,那么使用全部物品的平均打分
index_na <- which(is.na(vec_rating_mean)==TRUE)
vec_rating_mean[index_na] <- value_mean
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/item_rating_mean.RData'
#save(vec_rating_mean,file=path_RData)
load(path_RData)
##计算各个物品的最大倾向性打分:以大多数人的打分情况为标准
##计算每一件物品中打分为1/0的人数
mat_train_scale_1 <- matrix(0,nrow(mat_train_scale),ncol(mat_train_scale))
mat_train_scale_1[which(mat_train_scale == 1)] <- 1
cnt_rating_1 <- apply(mat_train_scale_1,2,sum)
mat_train_scale_0 <- matrix(0,nrow(mat_train_scale),ncol(mat_train_scale))
mat_train_scale_0[which(mat_train_scale == 0)] <- 1
cnt_rating_0 <- apply(mat_train_scale_0,2,sum)
##得到大多数人对于各个物品的打分情况向量
vec_rating_mode <- as.numeric(cnt_rating_1 > cnt_rating_0)
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/item_rating_mode.RData'
#save(vec_rating_mode,file=path_RData)
load(path_RData)
##gt3
mat_train_scale_3_1 <- matrix(0,nrow(mat_train_scale_3),ncol(mat_train_scale_3))
mat_train_scale_3_1[which(mat_train_scale_3 == 1)] <- 1
cnt_rating_1 <- apply(mat_train_scale_3_1,2,sum)
mat_train_scale_3_0 <- matrix(0,nrow(mat_train_scale_3),ncol(mat_train_scale_3))
mat_train_scale_3_0[which(mat_train_scale_3 == 0)] <- 1
cnt_rating_0 <- apply(mat_train_scale_3_0,2,sum)
##得到大多数人对于各个物品的打分情况向量
vec_rating_mode <- as.numeric(cnt_rating_1 > cnt_rating_0)
path_RData <- '/Users/baifrank/Desktop/recomm_output/MovieLens_10M_output/data_binary/item_rating_mode_3.RData'
#save(vec_rating_mode,file=path_RData)
load(path_RData)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{convert_df_to_matrix}
\alias{convert_df_to_matrix}
\title{Convert a dataframe to a matrix for chord diagrams}
\usage{
convert_df_to_matrix(df, column_var, cell)
}
\arguments{
\item{df}{A data frame with three columns: a row variable, a column variable,
and a value variable.}
\item{column_var}{The name of the variable to use for columns.}
\item{cell}{The name of the variable to use for filling the matrix.}
}
\value{
A square matrix.
}
\description{
Convert a dataframe to a matrix for chord diagrams
}
|
/man/convert_df_to_matrix.Rd
|
no_license
|
transportfoundry/tfplotr
|
R
| false | true | 603 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{convert_df_to_matrix}
\alias{convert_df_to_matrix}
\title{Convert a dataframe to a matrix for chord diagrams}
\usage{
convert_df_to_matrix(df, column_var, cell)
}
\arguments{
\item{df}{A data frame with three columns: a row variable, a column variable,
and a value variable.}
\item{column_var}{The name of the variable to use for columns.}
\item{cell}{The name of the variable to use for filling the matrix.}
}
\value{
A square matrix.
}
\description{
Convert a dataframe to a matrix for chord diagrams
}
|
setwd("~/workspace/R/")
# free memory
rm(list = ls())
gc()
# dados$x <- NULL ## remove a coluna x
dfM <- read.csv("./data/prfAcidentesATratar.csv") ## dados originais, sem tratamento da inconsistência
## separnado por BR
df.Br104 <- subset(dfM,BR=='104')
## encontra um determinado intervalo baseado em um quilometro
## para saber a que Municipio pertence
library(plyr)
df.Br104.Mun <- subset(df.Br104, df.Br104$KM >= 85 & df.Br104$KM < 95)
|
/verificandoMunicipios.R
|
no_license
|
otluiz/R-twitters
|
R
| false | false | 448 |
r
|
setwd("~/workspace/R/")
# free memory
rm(list = ls())
gc()
# dados$x <- NULL ## remove a coluna x
dfM <- read.csv("./data/prfAcidentesATratar.csv") ## dados originais, sem tratamento da inconsistência
## separnado por BR
df.Br104 <- subset(dfM,BR=='104')
## encontra um determinado intervalo baseado em um quilometro
## para saber a que Municipio pertence
library(plyr)
df.Br104.Mun <- subset(df.Br104, df.Br104$KM >= 85 & df.Br104$KM < 95)
|
library(tidyverse)
load("rda/murders.rda")
murders %>% mutate(abb = reorder(abb,rate)) %>%
ggplot(aes(abb,rate)) +
geom_bar(width = 0.5, stat = "identity", color ="black")+
coord_flip()
ggsave("figs/barplot.png")
|
/analysis.R
|
no_license
|
sszymanska/murders
|
R
| false | false | 221 |
r
|
library(tidyverse)
load("rda/murders.rda")
murders %>% mutate(abb = reorder(abb,rate)) %>%
ggplot(aes(abb,rate)) +
geom_bar(width = 0.5, stat = "identity", color ="black")+
coord_flip()
ggsave("figs/barplot.png")
|
library(mlr3verse)
library(data.table)
library(matrixStats)
ames = data.table(AmesHousing::make_ames())
ts = fread("data/energy_usage.csv")
ts[, `:=`(MIN = rowMins(as.matrix(.SD), na.rm=T),
MAX = rowMaxs(as.matrix(.SD), na.rm=T),
AVG = rowMeans(.SD, na.rm=T),
SUM = rowSums(.SD, na.rm=T))]
ames_ext = cbind(ames, ts[, .(MIN, MAX, AVG, SUM)])
t1 = TaskRegr$new(ames, id = "ames", target = "Sale_Price")
t2 = TaskRegr$new(ames_ext, id = "ames_with_ts_feats", target = "Sale_Price")
r = rsmp("cv")
r$instantiate(t1)
grid = benchmark_grid(list(t1, t2), lrn("regr.ranger"), r)
res = benchmark(grid)
print(res$aggregate(msr("regr.mae")))
learner = lrn("regr.ranger", importance = "permutation")
learner$train(t2)
print(learner$importance())
|
/data/code/test_timeseries_pred_quality.R
|
permissive
|
mlr-org/mlr3book
|
R
| false | false | 778 |
r
|
library(mlr3verse)
library(data.table)
library(matrixStats)
ames = data.table(AmesHousing::make_ames())
ts = fread("data/energy_usage.csv")
ts[, `:=`(MIN = rowMins(as.matrix(.SD), na.rm=T),
MAX = rowMaxs(as.matrix(.SD), na.rm=T),
AVG = rowMeans(.SD, na.rm=T),
SUM = rowSums(.SD, na.rm=T))]
ames_ext = cbind(ames, ts[, .(MIN, MAX, AVG, SUM)])
t1 = TaskRegr$new(ames, id = "ames", target = "Sale_Price")
t2 = TaskRegr$new(ames_ext, id = "ames_with_ts_feats", target = "Sale_Price")
r = rsmp("cv")
r$instantiate(t1)
grid = benchmark_grid(list(t1, t2), lrn("regr.ranger"), r)
res = benchmark(grid)
print(res$aggregate(msr("regr.mae")))
learner = lrn("regr.ranger", importance = "permutation")
learner$train(t2)
print(learner$importance())
|
plot_nwis_timeseries <- function(fileout, site_data_styled_file, width = 12, height = 7, units = 'in'){
site_data_styled = read_csv(site_data_styled_file)
ggplot(data = site_data_styled, aes(x = dateTime, y = water_temperature, color = station_name)) +
geom_line() + theme_bw()
ggsave(fileout, width = width, height = height, units = units)
}
|
/3_visualize/src/plot_timeseries.R
|
no_license
|
jsadler2/ds-pipelines-2
|
R
| false | false | 352 |
r
|
plot_nwis_timeseries <- function(fileout, site_data_styled_file, width = 12, height = 7, units = 'in'){
site_data_styled = read_csv(site_data_styled_file)
ggplot(data = site_data_styled, aes(x = dateTime, y = water_temperature, color = station_name)) +
geom_line() + theme_bw()
ggsave(fileout, width = width, height = height, units = units)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Feasibility.R
\name{assessFeasibility}
\alias{assessFeasibility}
\title{Execute the Feasibility assessment}
\usage{
assessFeasibility(
connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
outputFolder,
databaseId,
databaseName = databaseId,
databaseDescription = databaseId,
cdmVersion = "5",
createNegativeControlCohorts = TRUE,
runCohortDiagnostics = TRUE
)
}
\arguments{
\item{connectionDetails}{An object of type \code{connectionDetails} as created using the
\code{\link[DatabaseConnector]{createConnectionDetails}} function in the
DatabaseConnector package.}
\item{cdmDatabaseSchema}{Schema name where your patient-level data in OMOP CDM format resides.
Note that for SQL Server, this should include both the database and
schema name, for example 'cdm_data.dbo'.}
\item{outputFolder}{Name of local folder to place results; make sure to use forward slashes
(/). Do not use a folder on a network drive since this greatly impacts
performance.}
\item{databaseName}{A short string for identifying the database (e.g.
'Synpuf').}
\item{cdmVersion}{Version of the Common Data Model used. Currently only version 5 is supported.}
\item{createNegativeControlCohorts}{Create the negative control outcome and nesting cohorts?}
\item{outcomeDatabaseSchema}{Schema name where outcome data can be stored. You will need to have
write privileges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{outcomeTable}{The name of the table that will be created in the outcomeDatabaseSchema.
This table will hold the outcome cohorts used in this
study.}
\item{exposureDatabaseSchema}{For PanTher only: Schema name where exposure data can be stored. You will need to have
write priviliges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{exposureTable}{For PanTher only: The name of the table that will be created in the exposureDatabaseSchema
This table will hold the exposure cohorts used in this
study.}
\item{nestingCohortDatabaseSchema}{Schema name where nesting cohort data can be stored. You will need to have
write priviliges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{nestingCohortTable}{The name of the table that will be created in the nestingCohortDatabaseSchema
This table will hold the nesting cohorts used in this
study.}
\item{maxCores}{How many parallel cores should be used? If more cores are made available
this can speed up the analyses.}
\item{imputeExposureLengthForPanther}{For PanTher only: impute exposure length?}
\item{synthesizePositiveControls}{Should positive controls be synthesized?}
\item{runCohortMethod}{Perform the cohort method analyses?}
\item{runSelfControlledCaseSeries}{Perform the SCCS analyses?}
\item{runSelfControlledCohort}{Perform the SCC analyses?}
\item{runCaseControl}{Perform the case-control analyses?}
\item{runCaseCrossover}{Perform the case-crossover analyses?}
\item{createCharacterization}{Create a general characterization of the database population?}
\item{packageResults}{Should results be packaged for later sharing and viewing?}
}
\description{
Execute the Feasibility assessment
}
\details{
This function executes the Study.
The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments
are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional.
}
|
/man/assessFeasibility.Rd
|
no_license
|
ohdsi-studies/VaccineEffectivenessEvaluation
|
R
| false | true | 3,698 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Feasibility.R
\name{assessFeasibility}
\alias{assessFeasibility}
\title{Execute the Feasibility assessment}
\usage{
assessFeasibility(
connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable,
outputFolder,
databaseId,
databaseName = databaseId,
databaseDescription = databaseId,
cdmVersion = "5",
createNegativeControlCohorts = TRUE,
runCohortDiagnostics = TRUE
)
}
\arguments{
\item{connectionDetails}{An object of type \code{connectionDetails} as created using the
\code{\link[DatabaseConnector]{createConnectionDetails}} function in the
DatabaseConnector package.}
\item{cdmDatabaseSchema}{Schema name where your patient-level data in OMOP CDM format resides.
Note that for SQL Server, this should include both the database and
schema name, for example 'cdm_data.dbo'.}
\item{outputFolder}{Name of local folder to place results; make sure to use forward slashes
(/). Do not use a folder on a network drive since this greatly impacts
performance.}
\item{databaseName}{A short string for identifying the database (e.g.
'Synpuf').}
\item{cdmVersion}{Version of the Common Data Model used. Currently only version 5 is supported.}
\item{createNegativeControlCohorts}{Create the negative control outcome and nesting cohorts?}
\item{outcomeDatabaseSchema}{Schema name where outcome data can be stored. You will need to have
write privileges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{outcomeTable}{The name of the table that will be created in the outcomeDatabaseSchema.
This table will hold the outcome cohorts used in this
study.}
\item{exposureDatabaseSchema}{For PanTher only: Schema name where exposure data can be stored. You will need to have
write priviliges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{exposureTable}{For PanTher only: The name of the table that will be created in the exposureDatabaseSchema
This table will hold the exposure cohorts used in this
study.}
\item{nestingCohortDatabaseSchema}{Schema name where nesting cohort data can be stored. You will need to have
write priviliges in this schema. Note that for SQL Server, this should
include both the database and schema name, for example 'cdm_data.dbo'.}
\item{nestingCohortTable}{The name of the table that will be created in the nestingCohortDatabaseSchema
This table will hold the nesting cohorts used in this
study.}
\item{maxCores}{How many parallel cores should be used? If more cores are made available
this can speed up the analyses.}
\item{imputeExposureLengthForPanther}{For PanTher only: impute exposure length?}
\item{synthesizePositiveControls}{Should positive controls be synthesized?}
\item{runCohortMethod}{Perform the cohort method analyses?}
\item{runSelfControlledCaseSeries}{Perform the SCCS analyses?}
\item{runSelfControlledCohort}{Perform the SCC analyses?}
\item{runCaseControl}{Perform the case-control analyses?}
\item{runCaseCrossover}{Perform the case-crossover analyses?}
\item{createCharacterization}{Create a general characterization of the database population?}
\item{packageResults}{Should results be packaged for later sharing and viewing?}
}
\description{
Execute the Feasibility assessment
}
\details{
This function executes the Study.
The \code{createCohorts}, \code{synthesizePositiveControls}, \code{runAnalyses}, and \code{runDiagnostics} arguments
are intended to be used to run parts of the full study at a time, but none of the parts are considerd to be optional.
}
|
#parmet Bayes Sim
para_cov<-rep(0, 1000)
nonpara_cov<-rep(0, 1000)
for (i in 1:1000){
times<-rexp(30, 5)
#Our parameter of interest in parametric form is theta
#Set a prior on theta of Gamma(.01,.01)
#Then our posterior on theta is gamma with parameters:
alpha= .01+length(times)
beta = .01+sum(times)
#Approx posterior dist of theta
thetas=rgamma(10000, alpha, beta)
#Get a 95% CI on the probability of having failed by time=.4
##Now let's compare that to the BSP method
prior<-bsp(c(.1,1,2), c(.05,.8, .99), precision = .01)
posterior<-bspPosterior(prior, cbind(times,1))
#Now let's compare the series
##First let's calculate the truth Series~exp(10)
truth<-pexp(.1,10)
series_thetas<-sample(thetas, 10000, replace=T)+sample(thetas,10000, replace=T)
ci =quantile(pexp(.2, series_thetas),c(.025,.975))
para_cov[i]<-as.numeric(ci[1]<truth & truth<ci[2])
a=bspFromMoments(E1E2_series(posterior, posterior))
ci<-bspConfint(a, .1)
nonpara_cov[i]<-as.numeric(ci[1,]<truth & truth<ci[2,])
}
mean(para_cov)
mean(nonpara_cov)
|
/Simulation/ParametricSim.R
|
no_license
|
jntrcs/BnpSysRel
|
R
| false | false | 1,071 |
r
|
#parmet Bayes Sim
para_cov<-rep(0, 1000)
nonpara_cov<-rep(0, 1000)
for (i in 1:1000){
times<-rexp(30, 5)
#Our parameter of interest in parametric form is theta
#Set a prior on theta of Gamma(.01,.01)
#Then our posterior on theta is gamma with parameters:
alpha= .01+length(times)
beta = .01+sum(times)
#Approx posterior dist of theta
thetas=rgamma(10000, alpha, beta)
#Get a 95% CI on the probability of having failed by time=.4
##Now let's compare that to the BSP method
prior<-bsp(c(.1,1,2), c(.05,.8, .99), precision = .01)
posterior<-bspPosterior(prior, cbind(times,1))
#Now let's compare the series
##First let's calculate the truth Series~exp(10)
truth<-pexp(.1,10)
series_thetas<-sample(thetas, 10000, replace=T)+sample(thetas,10000, replace=T)
ci =quantile(pexp(.2, series_thetas),c(.025,.975))
para_cov[i]<-as.numeric(ci[1]<truth & truth<ci[2])
a=bspFromMoments(E1E2_series(posterior, posterior))
ci<-bspConfint(a, .1)
nonpara_cov[i]<-as.numeric(ci[1,]<truth & truth<ci[2,])
}
mean(para_cov)
mean(nonpara_cov)
|
#' function to simulate multiple sparse graphs
#' @param p number of features
#' @param N number of tasks
#' @param seedNum seed number for random simulation
#' @param s controls sparsity of the generated graph
#' @param ss controls sparsity of the generated graph
#' @return a list of N related sparse pXp precision matrices (graphs)
#' @import MASS
#' @importFrom stats rbinom
simulateGraph <-
function(p = 20,
N = 2,
seedNum = 37,
s = 0.1,
ss = 0.1) {
#library(MASS)
#set parameters
# p = 1000
# N = 3
set.seed(seedNum)
graphs <- list()
I = diag(1, p, p)
graph_shared = matrix(1, p, p)
for (j in 1:p) {
for (k in j:p) {
graph_shared[j, k] = 0.5 * rbinom(1, 1, ss)
graph_shared[k, j] = graph_shared[j, k]
}
}
#generate the simulate graph
#first one is the all off diag element has 0.1*N to be 0.5 and others to be 0
for (i in 1:N) {
graphs[[i]] <- matrix(1, p, p)
}
for (i in 1:N) {
for (j in 1:p) {
for (k in j:p) {
graphs[[i]][j, k] = 0.5 * rbinom(1, 1, s * N)
graphs[[i]][k, j] = graphs[[i]][j, k]
}
}
graphs[[i]] = graphs[[i]] + graph_shared
}
for (i in 1:N) {
for (j in 1:p) {
graphs[[i]][j, j] = 1
}
}
for (i in 1:N) {
graphs[[i]] = (graphs[[i]] - I) + (abs(min(eigen(graphs[[i]] - I)$value)) + 1) * I
}
out = list(graphs = graphs, share = graph_shared)
class(out) = "simulation"
return(out)
}
#' function to generate samples from a single precision matrix
#' @param precision pxp precision matrix (generated from simulateGraph)
#' @param n number of samples
#' @return a list of nXp randomly generated gaussian samples from pxp precision matrix
generateSamples <-
function(precision,
n = 100)
{
invg <- solve(precision)
mu = matrix(0, 1, dim(precision)[1])
samples = mvrnorm(n = n,
mu = mu,
Sigma = invg)
return(samples)
}
#' function to generate a list of samples from simulatedGraph result
#' @param simulate result from simulateGraph
#' @param n a vector of corresponding size to indicate number of samples for each task
#' @return a list of length(n) data matrices
#' @details if n is c(100,200,300) and p is 20, the function will return a list of 3 data matrices of size (100x20,200x20,300x20)
generateSampleList <-
function(simulate, n)
{
K = length(simulate$graphs)
out = list()
for (i in 1:K) {
out[[i]] = generateSamples(simulate$graphs[[i]], n[i])
}
return(out)
}
# this is the main function
#' simulate multiple sparse graphs and generate samples
#' @param p number of features (number of nodes)
#' @param n a vector indicating number of samples and tasks, for example c(100,200,300) for 3 tasks and 100,200 and 300 samples for task 1, 2 and 3
#' @param seedNum seed number for random simulation
#' @param s positive number that controls sparsity of the generated graphs
#' @param ss positive number that controls sparsity of the shared part of generated graphs
#' @return a list comprising $simulatedgraphs (multiple related simulated graphs) and $simulatedsamples (samples generated from multiple related graphs)
#' @import MASS
#' @export
#' @examples
#' library(JointNets)
#' simulateresult = simulation(p = 20, n = c(100,100))
#' plot(simulateresult$simulatedgraphs)
simulation <- function(p = 20,
n,
seedNum = 37,
s = 0.1,
ss = 0.1
)
{
N = length(n)
graphs = simulateGraph(p,N,seedNum,s,ss)
samples = generateSampleList(graphs,n)
out = list(simulatedgraphs = graphs, simulatedsamples = samples)
return(out)
}
|
/R/simulation.R
|
permissive
|
QData/JointNets
|
R
| false | false | 3,826 |
r
|
#' function to simulate multiple sparse graphs
#' @param p number of features
#' @param N number of tasks
#' @param seedNum seed number for random simulation
#' @param s controls sparsity of the generated graph
#' @param ss controls sparsity of the generated graph
#' @return a list of N related sparse pXp precision matrices (graphs)
#' @import MASS
#' @importFrom stats rbinom
simulateGraph <-
function(p = 20,
N = 2,
seedNum = 37,
s = 0.1,
ss = 0.1) {
#library(MASS)
#set parameters
# p = 1000
# N = 3
set.seed(seedNum)
graphs <- list()
I = diag(1, p, p)
graph_shared = matrix(1, p, p)
for (j in 1:p) {
for (k in j:p) {
graph_shared[j, k] = 0.5 * rbinom(1, 1, ss)
graph_shared[k, j] = graph_shared[j, k]
}
}
#generate the simulate graph
#first one is the all off diag element has 0.1*N to be 0.5 and others to be 0
for (i in 1:N) {
graphs[[i]] <- matrix(1, p, p)
}
for (i in 1:N) {
for (j in 1:p) {
for (k in j:p) {
graphs[[i]][j, k] = 0.5 * rbinom(1, 1, s * N)
graphs[[i]][k, j] = graphs[[i]][j, k]
}
}
graphs[[i]] = graphs[[i]] + graph_shared
}
for (i in 1:N) {
for (j in 1:p) {
graphs[[i]][j, j] = 1
}
}
for (i in 1:N) {
graphs[[i]] = (graphs[[i]] - I) + (abs(min(eigen(graphs[[i]] - I)$value)) + 1) * I
}
out = list(graphs = graphs, share = graph_shared)
class(out) = "simulation"
return(out)
}
#' function to generate samples from a single precision matrix
#' @param precision pxp precision matrix (generated from simulateGraph)
#' @param n number of samples
#' @return a list of nXp randomly generated gaussian samples from pxp precision matrix
generateSamples <-
function(precision,
n = 100)
{
invg <- solve(precision)
mu = matrix(0, 1, dim(precision)[1])
samples = mvrnorm(n = n,
mu = mu,
Sigma = invg)
return(samples)
}
#' function to generate a list of samples from simulatedGraph result
#' @param simulate result from simulateGraph
#' @param n a vector of corresponding size to indicate number of samples for each task
#' @return a list of length(n) data matrices
#' @details if n is c(100,200,300) and p is 20, the function will return a list of 3 data matrices of size (100x20,200x20,300x20)
generateSampleList <-
function(simulate, n)
{
K = length(simulate$graphs)
out = list()
for (i in 1:K) {
out[[i]] = generateSamples(simulate$graphs[[i]], n[i])
}
return(out)
}
# this is the main function
#' simulate multiple sparse graphs and generate samples
#' @param p number of features (number of nodes)
#' @param n a vector indicating number of samples and tasks, for example c(100,200,300) for 3 tasks and 100,200 and 300 samples for task 1, 2 and 3
#' @param seedNum seed number for random simulation
#' @param s positive number that controls sparsity of the generated graphs
#' @param ss positive number that controls sparsity of the shared part of generated graphs
#' @return a list comprising $simulatedgraphs (multiple related simulated graphs) and $simulatedsamples (samples generated from multiple related graphs)
#' @import MASS
#' @export
#' @examples
#' library(JointNets)
#' simulateresult = simulation(p = 20, n = c(100,100))
#' plot(simulateresult$simulatedgraphs)
simulation <- function(p = 20,
n,
seedNum = 37,
s = 0.1,
ss = 0.1
)
{
N = length(n)
graphs = simulateGraph(p,N,seedNum,s,ss)
samples = generateSampleList(graphs,n)
out = list(simulatedgraphs = graphs, simulatedsamples = samples)
return(out)
}
|
#Swift radiation parameter----
#Swift 1976
#Courtney Giebink
#clgiebink@gmail.com
#February 2020
#calculates daily solar radiation
#daily total in gram calories per square centimeter
#by integration equations
#inputs
#lat, slope, aspect, julian dates
swift_rad <- function(TRE_CN, LAT, SLOPE, ASPECT){
#create dataframe with empty rows for julian date solar radiation
sol_rad_df <- data.frame(TRE_CN, LAT, SLOPE, ASPECT,
Julian = 1:365, L1 = NA, L2 = NA, R4 = NA)
LAT <- sol_rad_df$LAT[1]
SLOPE <- sol_rad_df$SLOPE[1]
ASPECT <- sol_rad_df$ASPECT[1]
#calculate constants for tree
#L1 & L2 for given slope
sol_rad_df$L1 <- asin(cos(SLOPE) * sin(LAT) + sin(SLOPE) * cos(LAT) * cos(ASPECT))
#alternative route for poleward facing slope
#when sum of the slope inclination + latitude of the slope exceeds 66
#north poleward is azimuth/aspect <= 45 or >= 315
if((LAT+SLOPE > 66) & (LAT <= 45 | LAT >= 315)){
D1 <- cos(SLOPE) * cos(LAT) - sin(SLOPE) * sin(LAT) * cos(ASPECT)
ifelse(D1 == 0, D1 <- 0.0000000001, D1 <- D1)
L2 = atan(sin(SLOPE)*sin(ASPECT)/D1)
ifelse(D1 < 0, L2 <- L2+180, L2 <- L2)
}
else{
L2 <- atan((sin(SLOPE) * sin(ASPECT))/
(cos(SLOPE) * cos(LAT) - sin(SLOPE) * sin(LAT) * cos(ASPECT)))
}
sol_rad_df$L2 <- L2
#julian date in row
#func1
#Z = W - X * cos((J+Y) * 0.986)
#D = solar declination
#D = func1
for(i in 1:nrow(sol_rad_df)){
J <- sol_rad_df$Julian[i]
D <- asin(0.39785 * sin(278.9709 + 0.9856*J
+ 1.9163 * sin(356.6153+0.9856*J))) #complex
#E is radius vector of sun
#E = func1
E <- 1 - 0.0167 * cos((J - 3) * 0.986)
#R1 is solar constant for 60 min period adjusted for eccentriciy of the orbit of earth
R1 <- 60 * 1.95 / (E*E)
#RO solar constant
#frank and lee (1966) use 2
#drummond et al. (1986) use 1.95
#func2 calculation of sunrise and sunset times
#Z = arcos(-tan(Y) * tan(D))
#T = func2
S <- acos(-tan(sol_rad_df$L1[i]) * tan(D))
#TODO check undefined
#prints undefined S values
if(is.nan(S)){
cat("L1[", i, "]=",sol_rad_df$L1[i],"\n")
cat("D=",D, ", tan(D)=",tan(D), "\n")
cat("TRE_CN[",i,"]=",TRE_CN[i], "\n\n")
}
T7 <- S - sol_rad_df$L2[i]
T6 <- -S - sol_rad_df$L2[i]
S2 <- acos(-tan(sol_rad_df$LAT[i]) * tan(D))
T1 <- S2
T0 <- -S2
T3 <- NA
T2 <- NA
ifelse(T7 < T1, T3 <- T7, T3 <- T1)
ifelse(T6 > T0, T2 <- T6, T2 <- T0)
#R4 is potential solar radiation for mountain slope
if((LAT+SLOPE > 66) & (LAT <= 45 | LAT >= 315)){
ifelse(T3 < T2, (T2 <- 0) & (T3 <- 0), (T2 <- T2) & (T3 <- T3))
T6 <- T6 + 360
if(T6 < T1){
T8 <- T6
T9 <- T1
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi) +
R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T9-T8) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T9+sol_rad_df$L2[i]) - sin(T8+sol_rad_df$L2[i])) * 12/pi)
} else {
T7 <- T7 - 360
if(T7 > T0){
T8 <- T0
T9 <- T7
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi) +
R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T9-T8) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T9+sol_rad_df$L2[i]) - sin(T8+sol_rad_df$L2[i])) * 12/pi)
} else {
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi)
}
}
} else {
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) -
sin(T2+sol_rad_df$L2[i])) * 12/pi)
#TODO check for negatives
}
sol_rad_df$R4[i] <- R4
}
return(sol_rad_df$R4)
}
#load data
load("./data/formatted/data_all.Rdata")
#test function
sol_rad_test <- data_all %>%
ungroup()%>%
filter(SPCD %in% c(93, 122, 202)) %>%
select(TRE_CN,LAT,SLOPE,tASPECT) %>%
distinct()%>% #only unique values
slice(rep(1:n(), each = 365)) %>% #repeat each row 365 times
group_by(TRE_CN) %>% #loop function over each tree
mutate(Day = seq(1,365,1),
R4 = swift_rad(TRE_CN = TRE_CN,LAT = LAT,SLOPE = SLOPE,ASPECT = tASPECT))
summary(sol_rad_all$R4) #NAs
#TODO check NAs
solrad_check <- data_all %>%
ungroup()%>%
filter(SPCD %in% c(93, 122, 202)) %>%
select(TRE_CN,LAT,SLOPE,tASPECT) %>%
distinct()%>%
mutate(limit = LAT + SLOPE) #check paper - this needs to be a certain number or what???
#once the above works...
#calculate seasonal and annual solar radiation
seas_solrad <- sol_rad_all %>%
group_by(TRE_CN) %>%
summarise(mean_annual = mean(R4),
mean_seas_JanApr = mean(R4[Day %in% c(1:120)]),
mean_seas_MayAug = mean(R4[Day %in% c(121:243)]),
mean_seas_SepDec = mean(R4[Day %in% c(244:365)]),
ttl_annual = sum(R4),
ttl_seas_JanApr = sum(R4[Day %in% c(1:120)]),
ttl_seas_MayAug = sum(R4[Day %in% c(121:243)]),
ttl_seas_SepDec = sum(R4[Day %in% c(244:365)]))
# 70 trees with NAs
# Notes
#R3 is potential solar radiation for horizontal surface
R3 = R1 + (sin(D) * sin(L0) * (T1-T0) / 15 +
cos(D) * cos(L0) * (sin(T1) - sin(T0)) * 12/pi)
#F ratio of potential solar radiation for a slope surface to horizontal surface
#could be used to estimate actual solar radiation
#if R2 (radiation measured on nearby horizontal surface) not available
#F can be used as index of relative energy
F = R4 / R3
#continue if R2 input
#if poleward-facing slope
#when the sum of the slope inclination plus the absolute value of the latitude of the slope exceeds 66
#solrad ----
library(solrad)
load("./data/formatted/incr_calcov.Rdata")
incr_calcov <- incr_calcov %>%
mutate(tASPECT = ifelse(is.na(ASPECT) & SLOPE <= 5, 0, ASPECT))
miss_asp <- unique(incr_calcov$TRE_CN[is.na(incr_calcov$tASPECT)])
asp_check_df <- incr_calcov %>%
filter(TRE_CN %in% miss_asp) %>%
select(TRE_CN,SPCD,ASPECT,SLOPE) %>% #most are 106 - pinyon
distinct()
save(incr_calcov, file = "./data/formatted/incr_calcov.Rdata")
un_tre_cov <- incr_calcov %>%
select(-c(Year,RW,DIA_C)) %>%
ungroup() %>%
distinct() #same as per_cov
length(unique(un_tre_cov$TRE_CN)) #568
save(un_tre_cov, file = "./data/formatted/un_tre_cov.Rdata")
#function applied to all trees.
seas_dirad <- function(begin, end, Lat, Lon, Elevation, Slope, Aspect) {
DOY <- seq(1,365,1)
aspect_s <- ifelse(Aspect[1] <= 180, Aspect[1] + 180, Aspect[1] - 180)
yr_dirad <- DirectRadiation(DOY = DOY, Lat = abs(Lat[1]), Lon = abs(Lon[1]), #lat & lon in degrees
SLon = -105, DS = 60, #Slon and DS for UT; SLon = -7*15, DS = 60 minutes
Elevation = Elevation[1]/3.281, #from ft to meters
Slope[1], Aspect = aspect_s) #Aspect
sum_rad = sum(yr_dirad[begin:end])
return(sum_rad) #W/m2
}
#JanApr = (1:120)
#MayAug = (121:243)
#SepDec = (244:365)
incr_calcov <- incr_calcov %>%
group_by(TRE_CN) %>%
mutate(solrad_an = seas_dirad(begin = 1, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_JanApr = seas_dirad(begin = 1, end = 120, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_MayAug = seas_dirad(begin = 121, end = 243, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_SepDec = seas_dirad(begin = 244, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT))
save(incr_calcov,file = "./data/formatted/incr_calcov.Rdata")
#Validation data ----
val_dset <- val_dset %>%
mutate(sin = sin((ASPECT * (pi/180)) - 0.7854) * SLOPE,
cos = cos((ASPECT * (pi/180)) - 0.7854) * SLOPE)
#function applied to all trees.
#seas_dirad
#JanApr = (1:120)
#MayAug = (121:243)
#SepDec = (244:365)
val_dset <- val_dset %>%
group_by(TRE_CN) %>%
mutate(solrad_an = seas_dirad(begin = 1, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_JanApr = seas_dirad(begin = 1, end = 120, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_MayAug = seas_dirad(begin = 121, end = 243, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_SepDec = seas_dirad(begin = 244, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT))
save(val_dset,file = "./data/formatted/val_dset.Rdata")
|
/scripts/sol_rad.R
|
no_license
|
clgiebink/UT_FVS
|
R
| false | false | 9,303 |
r
|
#Swift radiation parameter----
#Swift 1976
#Courtney Giebink
#clgiebink@gmail.com
#February 2020
#calculates daily solar radiation
#daily total in gram calories per square centimeter
#by integration equations
#inputs
#lat, slope, aspect, julian dates
swift_rad <- function(TRE_CN, LAT, SLOPE, ASPECT){
#create dataframe with empty rows for julian date solar radiation
sol_rad_df <- data.frame(TRE_CN, LAT, SLOPE, ASPECT,
Julian = 1:365, L1 = NA, L2 = NA, R4 = NA)
LAT <- sol_rad_df$LAT[1]
SLOPE <- sol_rad_df$SLOPE[1]
ASPECT <- sol_rad_df$ASPECT[1]
#calculate constants for tree
#L1 & L2 for given slope
sol_rad_df$L1 <- asin(cos(SLOPE) * sin(LAT) + sin(SLOPE) * cos(LAT) * cos(ASPECT))
#alternative route for poleward facing slope
#when sum of the slope inclination + latitude of the slope exceeds 66
#north poleward is azimuth/aspect <= 45 or >= 315
if((LAT+SLOPE > 66) & (LAT <= 45 | LAT >= 315)){
D1 <- cos(SLOPE) * cos(LAT) - sin(SLOPE) * sin(LAT) * cos(ASPECT)
ifelse(D1 == 0, D1 <- 0.0000000001, D1 <- D1)
L2 = atan(sin(SLOPE)*sin(ASPECT)/D1)
ifelse(D1 < 0, L2 <- L2+180, L2 <- L2)
}
else{
L2 <- atan((sin(SLOPE) * sin(ASPECT))/
(cos(SLOPE) * cos(LAT) - sin(SLOPE) * sin(LAT) * cos(ASPECT)))
}
sol_rad_df$L2 <- L2
#julian date in row
#func1
#Z = W - X * cos((J+Y) * 0.986)
#D = solar declination
#D = func1
for(i in 1:nrow(sol_rad_df)){
J <- sol_rad_df$Julian[i]
D <- asin(0.39785 * sin(278.9709 + 0.9856*J
+ 1.9163 * sin(356.6153+0.9856*J))) #complex
#E is radius vector of sun
#E = func1
E <- 1 - 0.0167 * cos((J - 3) * 0.986)
#R1 is solar constant for 60 min period adjusted for eccentriciy of the orbit of earth
R1 <- 60 * 1.95 / (E*E)
#RO solar constant
#frank and lee (1966) use 2
#drummond et al. (1986) use 1.95
#func2 calculation of sunrise and sunset times
#Z = arcos(-tan(Y) * tan(D))
#T = func2
S <- acos(-tan(sol_rad_df$L1[i]) * tan(D))
#TODO check undefined
#prints undefined S values
if(is.nan(S)){
cat("L1[", i, "]=",sol_rad_df$L1[i],"\n")
cat("D=",D, ", tan(D)=",tan(D), "\n")
cat("TRE_CN[",i,"]=",TRE_CN[i], "\n\n")
}
T7 <- S - sol_rad_df$L2[i]
T6 <- -S - sol_rad_df$L2[i]
S2 <- acos(-tan(sol_rad_df$LAT[i]) * tan(D))
T1 <- S2
T0 <- -S2
T3 <- NA
T2 <- NA
ifelse(T7 < T1, T3 <- T7, T3 <- T1)
ifelse(T6 > T0, T2 <- T6, T2 <- T0)
#R4 is potential solar radiation for mountain slope
if((LAT+SLOPE > 66) & (LAT <= 45 | LAT >= 315)){
ifelse(T3 < T2, (T2 <- 0) & (T3 <- 0), (T2 <- T2) & (T3 <- T3))
T6 <- T6 + 360
if(T6 < T1){
T8 <- T6
T9 <- T1
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi) +
R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T9-T8) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T9+sol_rad_df$L2[i]) - sin(T8+sol_rad_df$L2[i])) * 12/pi)
} else {
T7 <- T7 - 360
if(T7 > T0){
T8 <- T0
T9 <- T7
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi) +
R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T9-T8) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T9+sol_rad_df$L2[i]) - sin(T8+sol_rad_df$L2[i])) * 12/pi)
} else {
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) - sin(T2+sol_rad_df$L2[i])) * 12/pi)
}
}
} else {
R4 <- R1 + (sin(D) * sin(sol_rad_df$L1[i]) * (T3-T2) / 15 +
cos(D) * cos(sol_rad_df$L1[i]) * (sin(T3+sol_rad_df$L2[i]) -
sin(T2+sol_rad_df$L2[i])) * 12/pi)
#TODO check for negatives
}
sol_rad_df$R4[i] <- R4
}
return(sol_rad_df$R4)
}
#load data
load("./data/formatted/data_all.Rdata")
#test function
sol_rad_test <- data_all %>%
ungroup()%>%
filter(SPCD %in% c(93, 122, 202)) %>%
select(TRE_CN,LAT,SLOPE,tASPECT) %>%
distinct()%>% #only unique values
slice(rep(1:n(), each = 365)) %>% #repeat each row 365 times
group_by(TRE_CN) %>% #loop function over each tree
mutate(Day = seq(1,365,1),
R4 = swift_rad(TRE_CN = TRE_CN,LAT = LAT,SLOPE = SLOPE,ASPECT = tASPECT))
summary(sol_rad_all$R4) #NAs
#TODO check NAs
solrad_check <- data_all %>%
ungroup()%>%
filter(SPCD %in% c(93, 122, 202)) %>%
select(TRE_CN,LAT,SLOPE,tASPECT) %>%
distinct()%>%
mutate(limit = LAT + SLOPE) #check paper - this needs to be a certain number or what???
#once the above works...
#calculate seasonal and annual solar radiation
seas_solrad <- sol_rad_all %>%
group_by(TRE_CN) %>%
summarise(mean_annual = mean(R4),
mean_seas_JanApr = mean(R4[Day %in% c(1:120)]),
mean_seas_MayAug = mean(R4[Day %in% c(121:243)]),
mean_seas_SepDec = mean(R4[Day %in% c(244:365)]),
ttl_annual = sum(R4),
ttl_seas_JanApr = sum(R4[Day %in% c(1:120)]),
ttl_seas_MayAug = sum(R4[Day %in% c(121:243)]),
ttl_seas_SepDec = sum(R4[Day %in% c(244:365)]))
# 70 trees with NAs
# Notes
#R3 is potential solar radiation for horizontal surface
R3 = R1 + (sin(D) * sin(L0) * (T1-T0) / 15 +
cos(D) * cos(L0) * (sin(T1) - sin(T0)) * 12/pi)
#F ratio of potential solar radiation for a slope surface to horizontal surface
#could be used to estimate actual solar radiation
#if R2 (radiation measured on nearby horizontal surface) not available
#F can be used as index of relative energy
F = R4 / R3
#continue if R2 input
#if poleward-facing slope
#when the sum of the slope inclination plus the absolute value of the latitude of the slope exceeds 66
#solrad ----
library(solrad)
load("./data/formatted/incr_calcov.Rdata")
incr_calcov <- incr_calcov %>%
mutate(tASPECT = ifelse(is.na(ASPECT) & SLOPE <= 5, 0, ASPECT))
miss_asp <- unique(incr_calcov$TRE_CN[is.na(incr_calcov$tASPECT)])
asp_check_df <- incr_calcov %>%
filter(TRE_CN %in% miss_asp) %>%
select(TRE_CN,SPCD,ASPECT,SLOPE) %>% #most are 106 - pinyon
distinct()
save(incr_calcov, file = "./data/formatted/incr_calcov.Rdata")
un_tre_cov <- incr_calcov %>%
select(-c(Year,RW,DIA_C)) %>%
ungroup() %>%
distinct() #same as per_cov
length(unique(un_tre_cov$TRE_CN)) #568
save(un_tre_cov, file = "./data/formatted/un_tre_cov.Rdata")
#function applied to all trees.
seas_dirad <- function(begin, end, Lat, Lon, Elevation, Slope, Aspect) {
DOY <- seq(1,365,1)
aspect_s <- ifelse(Aspect[1] <= 180, Aspect[1] + 180, Aspect[1] - 180)
yr_dirad <- DirectRadiation(DOY = DOY, Lat = abs(Lat[1]), Lon = abs(Lon[1]), #lat & lon in degrees
SLon = -105, DS = 60, #Slon and DS for UT; SLon = -7*15, DS = 60 minutes
Elevation = Elevation[1]/3.281, #from ft to meters
Slope[1], Aspect = aspect_s) #Aspect
sum_rad = sum(yr_dirad[begin:end])
return(sum_rad) #W/m2
}
#JanApr = (1:120)
#MayAug = (121:243)
#SepDec = (244:365)
incr_calcov <- incr_calcov %>%
group_by(TRE_CN) %>%
mutate(solrad_an = seas_dirad(begin = 1, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_JanApr = seas_dirad(begin = 1, end = 120, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_MayAug = seas_dirad(begin = 121, end = 243, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT),
solrad_SepDec = seas_dirad(begin = 244, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = tASPECT))
save(incr_calcov,file = "./data/formatted/incr_calcov.Rdata")
#Validation data ----
val_dset <- val_dset %>%
mutate(sin = sin((ASPECT * (pi/180)) - 0.7854) * SLOPE,
cos = cos((ASPECT * (pi/180)) - 0.7854) * SLOPE)
#function applied to all trees.
#seas_dirad
#JanApr = (1:120)
#MayAug = (121:243)
#SepDec = (244:365)
val_dset <- val_dset %>%
group_by(TRE_CN) %>%
mutate(solrad_an = seas_dirad(begin = 1, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_JanApr = seas_dirad(begin = 1, end = 120, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_MayAug = seas_dirad(begin = 121, end = 243, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT),
solrad_SepDec = seas_dirad(begin = 244, end = 365, Lat = LAT, Lon = LON,
Elevation = ELEV, Slope = SLOPE, Aspect = ASPECT))
save(val_dset,file = "./data/formatted/val_dset.Rdata")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{load_sample_counts_matrix}
\alias{load_sample_counts_matrix}
\title{Read in Gene Expression and Antibody Capture data from a 10x Genomics Cell
Ranger sparse matrix or from a text file.}
\usage{
load_sample_counts_matrix(sample_name, path, log_file = NULL)
}
\arguments{
\item{sample_name}{A character that will be used as a prefix for all cell names.}
\item{path}{Path to directory containing 10x matrix, or path to a text file.}
\item{log_file}{Filename for the logfile.}
}
\value{
Named list of matrices. One matrix for each data type. Currently the
only two data types are 'Gene Expression' and 'Antibody Capture'
}
\description{
Read in Gene Expression and Antibody Capture data from a 10x Genomics Cell
Ranger sparse matrix or from a text file.
}
|
/man/load_sample_counts_matrix.Rd
|
permissive
|
JhuangLabReferences/scooter
|
R
| false | true | 847 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{load_sample_counts_matrix}
\alias{load_sample_counts_matrix}
\title{Read in Gene Expression and Antibody Capture data from a 10x Genomics Cell
Ranger sparse matrix or from a text file.}
\usage{
load_sample_counts_matrix(sample_name, path, log_file = NULL)
}
\arguments{
\item{sample_name}{A character that will be used as a prefix for all cell names.}
\item{path}{Path to directory containing 10x matrix, or path to a text file.}
\item{log_file}{Filename for the logfile.}
}
\value{
Named list of matrices. One matrix for each data type. Currently the
only two data types are 'Gene Expression' and 'Antibody Capture'
}
\description{
Read in Gene Expression and Antibody Capture data from a 10x Genomics Cell
Ranger sparse matrix or from a text file.
}
|
library(ggplot2)
library(ggseqlogo)
library(stringr)
library(gridExtra)
# function for getting all patterns that in the form of sequence files
find_all_pattern<-function(pslist,samples){
pattern.seqfile<-list()
i=1
for(i in 1:length(samples)){
pattern.seqfile[[samples[i]]]<-pslist[[samples[i]]]$Pattern
}
all.pattern<-unique(unname(unlist(pattern.seqfile)))
return(all.pattern)
}
# function for generating logomaps for each pattern
generate_logomap<-function(path,pattern,samples,methodin,ticktype,fileformat){
if (any(is.null(samples))){return()}
i=1
seqlogo.ls<-list()
pattern<-gsub("\\[|\\]", "", pattern)
pattern<-gsub("\\.","x",pattern)
if(ticktype=="1,2,3..."){
ticks<-c(1:nchar(pattern))
}
if(ticktype=="...,P2,P1,P1',P2'...(for even length only)"){
midpoint<-nchar(pattern)/2
ticks<-rep(NA,nchar(pattern))
ticks[midpoint:1]<-paste0("P",c(1:(nchar(pattern)/2)))
ticks[(midpoint+1):nchar(pattern)]<-paste0("P",c(1:(nchar(pattern)/2)),"'")
}
if(ticktype=="...-1,0,1,...(for odd length only)"){
midpoint<-floor(nchar(pattern)/2)
ticks<-as.character(c((-midpoint):midpoint))
}
for (i in 1:length(samples)){
sample<-samples[i]
ls.seq<-list()
path1<-paste0(path, sample,"/patterns/")
if(file.exists(paste0(path1, pattern,"_sequences.txt"))){
ls.seq[[pattern]]<-as.character(read.delim(paste0(path1,pattern,"_sequences.txt"),
header=FALSE,check.names = FALSE)[,1])
pcharind<-c(1:nchar(pattern))[-as.integer(gregexpr("x", pattern)[[1]])]
g<-ggseqlogo(data=ls.seq, method=methodin)
seqlogo.ls[[sample]]<-g+ggtitle(sample)+
scale_x_discrete(limits=ticks)+
annotate('rect', xmin=(pcharind-0.5), xmax=(pcharind+0.5), ymin = -0.05, ymax = max(g[["layers"]][[1]][["data"]][["y"]]), alpha =0.15,fill='red')+
theme(text=element_text(family="mono"))
}
}
logomap.pattern<-grid.arrange(grobs=seqlogo.ls)
return(logomap.pattern)
}
generate_summarytable<-function(pattern.summary.list, pattern, samples){
i=1
tbl<-data.frame(Sample=NA, Sample.Frequency=NA, Foreground.Frequency=NA,
Foreground.Size=NA, Enrichment.Score=NA)
for (i in 1:length(samples)){
sample<-samples[i]
ind<-which(pattern.summary.list[[sample]]$Pattern==pattern)
if(length(ind)!=0){
tbl<-rbind(tbl,c(sample,unname(pattern.summary.list[[sample]][ind,-1])))}
}
tbl<-tbl[-1,]
return(tbl)
}
|
/logomaps.R
|
no_license
|
LangeLab/Rolim_DE
|
R
| false | false | 2,552 |
r
|
library(ggplot2)
library(ggseqlogo)
library(stringr)
library(gridExtra)
# function for getting all patterns that in the form of sequence files
find_all_pattern<-function(pslist,samples){
pattern.seqfile<-list()
i=1
for(i in 1:length(samples)){
pattern.seqfile[[samples[i]]]<-pslist[[samples[i]]]$Pattern
}
all.pattern<-unique(unname(unlist(pattern.seqfile)))
return(all.pattern)
}
# function for generating logomaps for each pattern
generate_logomap<-function(path,pattern,samples,methodin,ticktype,fileformat){
if (any(is.null(samples))){return()}
i=1
seqlogo.ls<-list()
pattern<-gsub("\\[|\\]", "", pattern)
pattern<-gsub("\\.","x",pattern)
if(ticktype=="1,2,3..."){
ticks<-c(1:nchar(pattern))
}
if(ticktype=="...,P2,P1,P1',P2'...(for even length only)"){
midpoint<-nchar(pattern)/2
ticks<-rep(NA,nchar(pattern))
ticks[midpoint:1]<-paste0("P",c(1:(nchar(pattern)/2)))
ticks[(midpoint+1):nchar(pattern)]<-paste0("P",c(1:(nchar(pattern)/2)),"'")
}
if(ticktype=="...-1,0,1,...(for odd length only)"){
midpoint<-floor(nchar(pattern)/2)
ticks<-as.character(c((-midpoint):midpoint))
}
for (i in 1:length(samples)){
sample<-samples[i]
ls.seq<-list()
path1<-paste0(path, sample,"/patterns/")
if(file.exists(paste0(path1, pattern,"_sequences.txt"))){
ls.seq[[pattern]]<-as.character(read.delim(paste0(path1,pattern,"_sequences.txt"),
header=FALSE,check.names = FALSE)[,1])
pcharind<-c(1:nchar(pattern))[-as.integer(gregexpr("x", pattern)[[1]])]
g<-ggseqlogo(data=ls.seq, method=methodin)
seqlogo.ls[[sample]]<-g+ggtitle(sample)+
scale_x_discrete(limits=ticks)+
annotate('rect', xmin=(pcharind-0.5), xmax=(pcharind+0.5), ymin = -0.05, ymax = max(g[["layers"]][[1]][["data"]][["y"]]), alpha =0.15,fill='red')+
theme(text=element_text(family="mono"))
}
}
logomap.pattern<-grid.arrange(grobs=seqlogo.ls)
return(logomap.pattern)
}
generate_summarytable<-function(pattern.summary.list, pattern, samples){
i=1
tbl<-data.frame(Sample=NA, Sample.Frequency=NA, Foreground.Frequency=NA,
Foreground.Size=NA, Enrichment.Score=NA)
for (i in 1:length(samples)){
sample<-samples[i]
ind<-which(pattern.summary.list[[sample]]$Pattern==pattern)
if(length(ind)!=0){
tbl<-rbind(tbl,c(sample,unname(pattern.summary.list[[sample]][ind,-1])))}
}
tbl<-tbl[-1,]
return(tbl)
}
|
\name{GSEA.VarFilter}
\alias{GSEA.VarFilter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GSEA.VarFilter
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
GSEA.VarFilter(V, fold, delta, gene.names = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{V}{
%% ~~Describe \code{V} here~~
}
\item{fold}{
%% ~~Describe \code{fold} here~~
}
\item{delta}{
%% ~~Describe \code{delta} here~~
}
\item{gene.names}{
%% ~~Describe \code{gene.names} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (V, fold, delta, gene.names = "")
{
cols <- length(V[1, ])
rows <- length(V[, 1])
row.max <- apply(V, MARGIN = 1, FUN = max)
row.min <- apply(V, MARGIN = 1, FUN = min)
flag <- array(dim = rows)
flag <- (row.max/row.min >= fold) & (row.max - row.min >=
delta)
size <- sum(flag)
B <- matrix(0, nrow = size, ncol = cols)
j <- 1
if (length(gene.names) == 1) {
for (i in 1:rows) {
if (flag[i]) {
B[j, ] <- V[i, ]
j <- j + 1
}
}
return(B)
}
else {
new.list <- vector(mode = "character", length = size)
for (i in 1:rows) {
if (flag[i]) {
B[j, ] <- V[i, ]
new.list[j] <- gene.names[i]
j <- j + 1
}
}
return(list(V = B, new.list = new.list))
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/R/external_packages/ssgsea.GBM.classification/man/GSEA.VarFilter.Rd
|
no_license
|
gaberosser/qmul-bioinf
|
R
| false | false | 2,319 |
rd
|
\name{GSEA.VarFilter}
\alias{GSEA.VarFilter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
GSEA.VarFilter
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
GSEA.VarFilter(V, fold, delta, gene.names = "")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{V}{
%% ~~Describe \code{V} here~~
}
\item{fold}{
%% ~~Describe \code{fold} here~~
}
\item{delta}{
%% ~~Describe \code{delta} here~~
}
\item{gene.names}{
%% ~~Describe \code{gene.names} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (V, fold, delta, gene.names = "")
{
cols <- length(V[1, ])
rows <- length(V[, 1])
row.max <- apply(V, MARGIN = 1, FUN = max)
row.min <- apply(V, MARGIN = 1, FUN = min)
flag <- array(dim = rows)
flag <- (row.max/row.min >= fold) & (row.max - row.min >=
delta)
size <- sum(flag)
B <- matrix(0, nrow = size, ncol = cols)
j <- 1
if (length(gene.names) == 1) {
for (i in 1:rows) {
if (flag[i]) {
B[j, ] <- V[i, ]
j <- j + 1
}
}
return(B)
}
else {
new.list <- vector(mode = "character", length = size)
for (i in 1:rows) {
if (flag[i]) {
B[j, ] <- V[i, ]
new.list[j] <- gene.names[i]
j <- j + 1
}
}
return(list(V = B, new.list = new.list))
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# colnames(df) <- c("Date", "DayofWeek", "gender", "age", "si", 'gu','dong', 'category', 'calls')
library(tidyverse)
library(plotly)
library(gapminder)
df <- read.csv('dataset/chicken.csv',
header=T,
fill=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8',
skip=1)
colnames(df) <- c("기준일", "요일", "성별", "연령대", "city", "시군구", "d", "c", "통화건수")
head(df)
str(df)
names(df)
# Cleaning Data
# 기준일 -> 연 / 월 / 일
date <- as.Date(as.character(df$기준일), format = "%Y%m%d")
lt <- unclass(as.POSIXlt(date))
df[, c("날짜", "연", "월", "일")] <- with(lt, data.frame(날짜 = date
, 연 = year + 1900
, 월 = mon + 1
, 일 = mday))
df2 <- cbind(df[,11:13], df[,2:4], df[,6:7], df[9])
head(df)
head(df2)
# 성별 -> 남 = 1, 여 = 0
df2$성별 <- factor(df2$성별,
levels=c('남', '여'))
levels(df2$성별) <- c(1, 0)
# 요일 -> 월 = 1, 일 = 7
df2$요일 <- factor(df2$요일,
levels=c('월', '화', '수', '목', '금', '토', '일'))
levels(df2$요일) <- c(1, 2, 3, 4, 5, 6, 7)
head(df2)
View(df2)
# 연령대
df2$연령대 <- factor(df2$연령대,
levels=c('10대', '20대', '30대', '40대', '50대', '60대이상'))
levels(df2$연령대) <- c(10, 20, 30, 40, 50, 60)
#######quatile#############
df <- read.csv('dataset/chicken.csv',
header=T,
fill=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8')
summary(df$통화건수)
hist(df$통화건수)
r <- df$통화건수
하 <- which(r <= quantile(r, 0.33, na.rm = T))
중 <- which(r <= quantile(r, 0.66, na.rm = T))
상 <- which(r <= quantile(r, na.rm = T)[5])
r[상]
r[중]
r[하]
#########describe#############
library(COUNT)
#Example
ch <- read.csv('dataset/chicken.csv',
header=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8')
names(ch)
str(ch)
myTable(ch$성별)
myTable(ch$연령대)
summary(ch$기준일)
range(ch$기준일)
#group date
ch$date <- cut(ch$기준일, seq(20190101,20190131,7),right = F)
ch$date
levels(ch$date) <- c("w1","w2","w3","w4")
myTable(ch$date)
ch$re.date <- cut(ch$기준일,
br=c(20190100,20190106,20190113,20190120,20190127,20190132),
labels=c("w1","w2","w3","w4","w5"))
myTable(ch$re.date)
ch$re.date
######## 2.2 Making graphs
# Bar Plots
barplot(table(ch$re.date))
barplot(table(ch$re.date), horiz = T)
library(ggplot2)
ggplot(ch, aes(x=re.date)) + geom_bar(stat = "count")
qplot(re.date, data=ch, geom="bar")
# Pie chart
pie(table(guest.house), col= c("white","yellow","gray","blue","green"))
mytable <- table(ch$re.date)
date <- paste(names(mytable), "\n", mytable, sep="")
pie(mytable, labels = date, main="Pie Chart of Week (with sample sizes)")
library(plotrix)
pie3D(mytable,labels=date, explode=0.1, main="Pie Chart of Week (with sample sizes)")
pie3D(mytable,labels=date, explode=0.1,
col=c("white","gray90","yellow","sky blue","green"),
main="Pie Chart of Week")
# Histogram
hist(table(hh$new))
hist(ch$기준일, breaks=6)
hist(ch$기준일, breaks=6, probability = TRUE, col = "gray90")
lines(density(ch$기준일))
par(family = "AppleGothic")
hist(ch$기준일, col = "gray90", breaks= seq(20190101, 20190131, by=5))
hist(ch$기준일, col = "gray90", breaks= seq(20190101, 20190131, by=5),
main="An Example Histogram",
xlab="THE X-AXIS LABEL", ylab="THE Y-AXIS LABEL")
####################이 위까지 함.
head(df2)
View(df2)
p <- df2 %>%
plot_ly(
x = ~연령대,
y = ~통화건수,
color = ~시군구,
frame = ~일,
type = 'scatter',
mode = 'markers'
)
ggplotly(p)
View(gapminder)
dat <- map_data("world", "canada") %>% group_by(group)
p <- plot_mapbox(dat, x = ~long, y = ~lat) %>%
add_paths(size = I(2)) %>%
add_segments(x = -100, xend = -50, y = 50, 75) %>%
layout(mapbox = list(zoom = 0,
center = list(lat = ~median(lat),
lon = ~median(long))
))
####################
# 20190325
date <- as.Date(as.character(ch_17$기준일), format = "%Y%m%d")
lt <- unclass(as.POSIXlt(date))
date_vector <- c("날짜", "연", "월", "일")
ch_17[, date_vector ] <- with(lt,
data.frame(날짜 = date
, 연 = year + 1900
, 월 = mon + 1
, 일 = mday))
# 190326
min(df2$세대당인구)
max(df2$세대당인구)
p <- df2$세대당인구
하 <- which(p <= quantile(p, 0.33, na.rm = T))
중 <- which(p <= quantile(p, 0.66, na.rm = T))
상 <- which(p <= quantile(p, na.rm = T)[5])
p[상]
p[중]
p[하]
########
# as.factor
cols <- c("기간", "자치구")
seoul_pop[cols] <- lapply(seoul_pop[cols], factor)
head(seoul_pop)
names(seoul_pop)
unique(seoul_pop$기간)
# factor levels
unique(ch_2017$시군구)
# 기준일
str_df <- read_excel("dataset/stress_14-17.xlsx")
head(str_df)
#############
# 연령별, 시군구, 통화건수
ch_by_age <- chs %>%
group_by(연령대, 시군구) %>%
summarise(re.통화건수 = mean(통화건수))
head(ch_by_age)
tail(ch_by_age)
# 성별, 시군구 통화건수
ch_by_sex <- chs %>%
group_by(성별, 시군구) %>%
summarise(re.통화건수 = mean(통화건수))
|
/R/R_study/ex_chicken.r
|
no_license
|
roseline124/Data-Analysis
|
R
| false | false | 5,600 |
r
|
# colnames(df) <- c("Date", "DayofWeek", "gender", "age", "si", 'gu','dong', 'category', 'calls')
library(tidyverse)
library(plotly)
library(gapminder)
df <- read.csv('dataset/chicken.csv',
header=T,
fill=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8',
skip=1)
colnames(df) <- c("기준일", "요일", "성별", "연령대", "city", "시군구", "d", "c", "통화건수")
head(df)
str(df)
names(df)
# Cleaning Data
# 기준일 -> 연 / 월 / 일
date <- as.Date(as.character(df$기준일), format = "%Y%m%d")
lt <- unclass(as.POSIXlt(date))
df[, c("날짜", "연", "월", "일")] <- with(lt, data.frame(날짜 = date
, 연 = year + 1900
, 월 = mon + 1
, 일 = mday))
df2 <- cbind(df[,11:13], df[,2:4], df[,6:7], df[9])
head(df)
head(df2)
# 성별 -> 남 = 1, 여 = 0
df2$성별 <- factor(df2$성별,
levels=c('남', '여'))
levels(df2$성별) <- c(1, 0)
# 요일 -> 월 = 1, 일 = 7
df2$요일 <- factor(df2$요일,
levels=c('월', '화', '수', '목', '금', '토', '일'))
levels(df2$요일) <- c(1, 2, 3, 4, 5, 6, 7)
head(df2)
View(df2)
# 연령대
df2$연령대 <- factor(df2$연령대,
levels=c('10대', '20대', '30대', '40대', '50대', '60대이상'))
levels(df2$연령대) <- c(10, 20, 30, 40, 50, 60)
#######quatile#############
df <- read.csv('dataset/chicken.csv',
header=T,
fill=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8')
summary(df$통화건수)
hist(df$통화건수)
r <- df$통화건수
하 <- which(r <= quantile(r, 0.33, na.rm = T))
중 <- which(r <= quantile(r, 0.66, na.rm = T))
상 <- which(r <= quantile(r, na.rm = T)[5])
r[상]
r[중]
r[하]
#########describe#############
library(COUNT)
#Example
ch <- read.csv('dataset/chicken.csv',
header=T,
fileEncoding = 'euc-kr',
encoding = 'utf-8')
names(ch)
str(ch)
myTable(ch$성별)
myTable(ch$연령대)
summary(ch$기준일)
range(ch$기준일)
#group date
ch$date <- cut(ch$기준일, seq(20190101,20190131,7),right = F)
ch$date
levels(ch$date) <- c("w1","w2","w3","w4")
myTable(ch$date)
ch$re.date <- cut(ch$기준일,
br=c(20190100,20190106,20190113,20190120,20190127,20190132),
labels=c("w1","w2","w3","w4","w5"))
myTable(ch$re.date)
ch$re.date
######## 2.2 Making graphs
# Bar Plots
barplot(table(ch$re.date))
barplot(table(ch$re.date), horiz = T)
library(ggplot2)
ggplot(ch, aes(x=re.date)) + geom_bar(stat = "count")
qplot(re.date, data=ch, geom="bar")
# Pie chart
pie(table(guest.house), col= c("white","yellow","gray","blue","green"))
mytable <- table(ch$re.date)
date <- paste(names(mytable), "\n", mytable, sep="")
pie(mytable, labels = date, main="Pie Chart of Week (with sample sizes)")
library(plotrix)
pie3D(mytable,labels=date, explode=0.1, main="Pie Chart of Week (with sample sizes)")
pie3D(mytable,labels=date, explode=0.1,
col=c("white","gray90","yellow","sky blue","green"),
main="Pie Chart of Week")
# Histogram
hist(table(hh$new))
hist(ch$기준일, breaks=6)
hist(ch$기준일, breaks=6, probability = TRUE, col = "gray90")
lines(density(ch$기준일))
par(family = "AppleGothic")
hist(ch$기준일, col = "gray90", breaks= seq(20190101, 20190131, by=5))
hist(ch$기준일, col = "gray90", breaks= seq(20190101, 20190131, by=5),
main="An Example Histogram",
xlab="THE X-AXIS LABEL", ylab="THE Y-AXIS LABEL")
####################이 위까지 함.
head(df2)
View(df2)
p <- df2 %>%
plot_ly(
x = ~연령대,
y = ~통화건수,
color = ~시군구,
frame = ~일,
type = 'scatter',
mode = 'markers'
)
ggplotly(p)
View(gapminder)
dat <- map_data("world", "canada") %>% group_by(group)
p <- plot_mapbox(dat, x = ~long, y = ~lat) %>%
add_paths(size = I(2)) %>%
add_segments(x = -100, xend = -50, y = 50, 75) %>%
layout(mapbox = list(zoom = 0,
center = list(lat = ~median(lat),
lon = ~median(long))
))
####################
# 20190325
date <- as.Date(as.character(ch_17$기준일), format = "%Y%m%d")
lt <- unclass(as.POSIXlt(date))
date_vector <- c("날짜", "연", "월", "일")
ch_17[, date_vector ] <- with(lt,
data.frame(날짜 = date
, 연 = year + 1900
, 월 = mon + 1
, 일 = mday))
# 190326
min(df2$세대당인구)
max(df2$세대당인구)
p <- df2$세대당인구
하 <- which(p <= quantile(p, 0.33, na.rm = T))
중 <- which(p <= quantile(p, 0.66, na.rm = T))
상 <- which(p <= quantile(p, na.rm = T)[5])
p[상]
p[중]
p[하]
########
# as.factor
cols <- c("기간", "자치구")
seoul_pop[cols] <- lapply(seoul_pop[cols], factor)
head(seoul_pop)
names(seoul_pop)
unique(seoul_pop$기간)
# factor levels
unique(ch_2017$시군구)
# 기준일
str_df <- read_excel("dataset/stress_14-17.xlsx")
head(str_df)
#############
# 연령별, 시군구, 통화건수
ch_by_age <- chs %>%
group_by(연령대, 시군구) %>%
summarise(re.통화건수 = mean(통화건수))
head(ch_by_age)
tail(ch_by_age)
# 성별, 시군구 통화건수
ch_by_sex <- chs %>%
group_by(성별, 시군구) %>%
summarise(re.통화건수 = mean(통화건수))
|
## The cacheMatrix functions provide means of caching the inversion
## of a matrix. Two functions are provided; makeCacheMatrix and cacheSolve.
## makeCacheMatrix is a function to create a special matrix that is capable
## of caching its own inversion and thereby speeding up any process that
## requires the inverse of a matrix to be computated a number of times.
makeCacheMatrix <- function(x = matrix()) {
## Initialise the cached inverse (inv) to NULL
inv <- NULL
## set function to assign a new matrix to the caching Matrix
## The inverse is NULL'd to force a recalculation
set <- function(a) {
x <<- a
inv <<- NULL
}
## get function to return the matrix within the caching Matrix
get <- function() x
## setInverse function to store the inverse of the matrix
setInverse <- function(v) inv <<- v
## getInverse to retrieve the inverse of the matrix
getInverse <- function() inv
## No real idea what this is for!?
list(set = set,
get = get,
setinverse = setInverse,
getinverse = getInverse )
}
## cacheSolve is a function for solving the inversion of a caching matrix
## If the inversion has been calculated previously then the cached result
## is used otherwise it is calculated using the solve function.
cacheSolve <- function(x, ...) {
## Retrieve the inverse from the caching matrix
inv <- x$getinverse()
## If the inverse is not NULL then it has already
## been computed and the cached copy can be used
if(!is.null(inv)) {
message("getting cached data")
return (inv)
}
## If the inverse is NULL, then the matrix is retrieved
## and the solve function called to compute the inverse
data <- x$get()
inv <- solve(data, ...)
## The computed inverse is stored back into the caching matrix
x$setinverse(inv)
## The result is returned
inv
}
|
/cachematrix.R
|
no_license
|
g4zz4/ProgrammingAssignment2
|
R
| false | false | 1,974 |
r
|
## The cacheMatrix functions provide means of caching the inversion
## of a matrix. Two functions are provided; makeCacheMatrix and cacheSolve.
## makeCacheMatrix is a function to create a special matrix that is capable
## of caching its own inversion and thereby speeding up any process that
## requires the inverse of a matrix to be computated a number of times.
makeCacheMatrix <- function(x = matrix()) {
## Initialise the cached inverse (inv) to NULL
inv <- NULL
## set function to assign a new matrix to the caching Matrix
## The inverse is NULL'd to force a recalculation
set <- function(a) {
x <<- a
inv <<- NULL
}
## get function to return the matrix within the caching Matrix
get <- function() x
## setInverse function to store the inverse of the matrix
setInverse <- function(v) inv <<- v
## getInverse to retrieve the inverse of the matrix
getInverse <- function() inv
## No real idea what this is for!?
list(set = set,
get = get,
setinverse = setInverse,
getinverse = getInverse )
}
## cacheSolve is a function for solving the inversion of a caching matrix
## If the inversion has been calculated previously then the cached result
## is used otherwise it is calculated using the solve function.
cacheSolve <- function(x, ...) {
## Retrieve the inverse from the caching matrix
inv <- x$getinverse()
## If the inverse is not NULL then it has already
## been computed and the cached copy can be used
if(!is.null(inv)) {
message("getting cached data")
return (inv)
}
## If the inverse is NULL, then the matrix is retrieved
## and the solve function called to compute the inverse
data <- x$get()
inv <- solve(data, ...)
## The computed inverse is stored back into the caching matrix
x$setinverse(inv)
## The result is returned
inv
}
|
library(ggvis)
library(shiny)
# Basic dynamic example
mtc1 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(mtc1, props(x = ~wt, y = ~mpg)) + layer_point()
# Rapidly changing dynamic example
df <- data.frame(x = runif(20), y = runif(20))
# Basic dynamic example
mtc1 <- reactive({
invalidateLater(20, NULL);
df$x <<- df$x + runif(20, -0.05, 0.05)
df$y <<- df$y + runif(20, -0.05, 0.05)
df
})
ggvis(mtc1, props(x = ~x, y = ~y)) +
layer_point() +
dscale("x", "numeric", domain = c(0, 1))
# Two separate data sets, equal in the tree
mtc1 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
mtc2 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(props(x = ~wt, y = ~mpg)) +
layer(mtc1, layer_point(props(stroke := "black", fill := "black"))) +
layer(mtc2, layer_point(props(fill := "red", size := 40)))
# With a transform
mtc1 <- reactive({
invalidateLater(1000, NULL)
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(mtc1, props(x = ~wt, y = ~mpg)) +
layer_point() +
layer_smooth()
# Data points moving from right to left
# (currently transitions aren't quite right)
set.seed(430)
dat <- data.frame(time = 1:10, value = runif(10))
ddat <- reactive({
invalidateLater(2000, NULL)
dat$time <<- c(dat$time[-1], dat$time[length(dat$time)] + 1)
dat$value <<- c(dat$value[-1], runif(1))
dat
})
ggvis(ddat, props(x = ~time, y = ~value, key := ~time)) +
layer_point() +
mark_path()
# Bars moving from right to left
set.seed(430)
dat <- data.frame(time = 1:10, value = runif(10))
ddat <- reactive({
invalidateLater(2000, NULL);
dat$time <<- c(dat$time[-1], dat$time[length(dat$time)] + 1)
dat$value <<- c(dat$value[-1], runif(1))
dat
})
ggvis(ddat, props(
x = ~time, x.enter = ~time + 1, x.exit = ~time - 1,
y = ~value, y.enter = 0, y.exit = 0,
y2 = 0, y2.enter = 0, y2.exit = 0,
fill := "#aaa",
fillOpacity := 1, fillOpacity.enter := 0, fillOpacity.exit := 0,
strokeOpacity := 1, strokeOpacity.enter := 0, strokeOpacity.exit := 0,
width = band(),
key := ~time
)) +
dscale("y", "numeric", domain = 0:1) +
dscale("x", "nominal", range = "width", padding = 0, points = FALSE) +
mark_rect()
# Dynamic stacked bars
dat <- data.frame(
g1 = rep(letters[1:4], 3),
g2 = rep(LETTERS[1:3], each = 4),
value = runif(12)
)
ddat <- reactive({
invalidateLater(3000, NULL)
dat$value <<- runif(12)
dat
})
ggvis(ddat, transform_stack(),
props(x = ~g1, y = ~value, fill = ~g2, fillOpacity := 0.5)) +
dscale("x", "nominal", range = "width", padding = 0, points = FALSE) +
mark_rect(props(y = ~ymin__, y2 = ~ymax__, width = band()))
|
/demo/dynamic.r
|
no_license
|
FvD/ggvis
|
R
| false | false | 2,727 |
r
|
library(ggvis)
library(shiny)
# Basic dynamic example
mtc1 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(mtc1, props(x = ~wt, y = ~mpg)) + layer_point()
# Rapidly changing dynamic example
df <- data.frame(x = runif(20), y = runif(20))
# Basic dynamic example
mtc1 <- reactive({
invalidateLater(20, NULL);
df$x <<- df$x + runif(20, -0.05, 0.05)
df$y <<- df$y + runif(20, -0.05, 0.05)
df
})
ggvis(mtc1, props(x = ~x, y = ~y)) +
layer_point() +
dscale("x", "numeric", domain = c(0, 1))
# Two separate data sets, equal in the tree
mtc1 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
mtc2 <- reactive({
invalidateLater(2000, NULL);
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(props(x = ~wt, y = ~mpg)) +
layer(mtc1, layer_point(props(stroke := "black", fill := "black"))) +
layer(mtc2, layer_point(props(fill := "red", size := 40)))
# With a transform
mtc1 <- reactive({
invalidateLater(1000, NULL)
mtcars[sample(nrow(mtcars), 10), ]
})
ggvis(mtc1, props(x = ~wt, y = ~mpg)) +
layer_point() +
layer_smooth()
# Data points moving from right to left
# (currently transitions aren't quite right)
set.seed(430)
dat <- data.frame(time = 1:10, value = runif(10))
ddat <- reactive({
invalidateLater(2000, NULL)
dat$time <<- c(dat$time[-1], dat$time[length(dat$time)] + 1)
dat$value <<- c(dat$value[-1], runif(1))
dat
})
ggvis(ddat, props(x = ~time, y = ~value, key := ~time)) +
layer_point() +
mark_path()
# Bars moving from right to left
set.seed(430)
dat <- data.frame(time = 1:10, value = runif(10))
ddat <- reactive({
invalidateLater(2000, NULL);
dat$time <<- c(dat$time[-1], dat$time[length(dat$time)] + 1)
dat$value <<- c(dat$value[-1], runif(1))
dat
})
ggvis(ddat, props(
x = ~time, x.enter = ~time + 1, x.exit = ~time - 1,
y = ~value, y.enter = 0, y.exit = 0,
y2 = 0, y2.enter = 0, y2.exit = 0,
fill := "#aaa",
fillOpacity := 1, fillOpacity.enter := 0, fillOpacity.exit := 0,
strokeOpacity := 1, strokeOpacity.enter := 0, strokeOpacity.exit := 0,
width = band(),
key := ~time
)) +
dscale("y", "numeric", domain = 0:1) +
dscale("x", "nominal", range = "width", padding = 0, points = FALSE) +
mark_rect()
# Dynamic stacked bars
dat <- data.frame(
g1 = rep(letters[1:4], 3),
g2 = rep(LETTERS[1:3], each = 4),
value = runif(12)
)
ddat <- reactive({
invalidateLater(3000, NULL)
dat$value <<- runif(12)
dat
})
ggvis(ddat, transform_stack(),
props(x = ~g1, y = ~value, fill = ~g2, fillOpacity := 0.5)) +
dscale("x", "nominal", range = "width", padding = 0, points = FALSE) +
mark_rect(props(y = ~ymin__, y2 = ~ymax__, width = band()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.