content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vs_trawl.R \name{vs_trawl} \alias{vs_trawl} \title{Variance signature values} \usage{ vs_trawl(object, h, method = "sample", ...) } \arguments{ \item{object}{object containing all the specifications for the process, see details} \item{h}{vector containing the observation frequencies at which to compute the variance signature values} \item{method}{"sample" for the empirical values, "vs_C" or "vs_SY" for theoretical values when estimated by these respective mtehods with fit_trawl} \item{\dots}{any other passthrough parameters} } \description{ compute empirical and theoretical variance signature values } \details{ the object is either an output object from sim_trawl for the empirical variance signature values, or an output object from fit_trawl for the theoretical ones. When computing the sample variance signature plot, it is possible to add the argument 'multi' for reducing the estimation variance. See Shephard and Yang (2017) for a definition of the variance signature values. } \examples{ # simulate a trawl process sim <- sim_trawl(list("levy_seed" = "Skellam", levy_par = c(0.13, 0.11), b = 0.3)) h <- exp(seq(log(1e-2), log(60), length.out = 51)) # empirical variance signature values vsS <- vs_trawl(sim, h) # theoretical values when estimated with "vs_C" sim$method <- "vs_C" # add fitting method sim$h <- h # add grid on which to fit ft <- fit_trawl(sim) # fit the trawl process vsC <- vs_trawl(ft, h, method = "vs_C") # compute fitted variance signature values # theoretical values when estimated with "vs_SY" sim$method <- "vs_SY" # add fitting method sim$h <- h # add grid on which to fit ft <- fit_trawl(sim) # fit the trawl process vsSY <- vs_trawl(ft, h, method = "vs_SY") # compute fitted variance signature values # plot resulting fits plot(log(h), vsS) lines(log(h), vsC, col = "blue") lines(log(h), vsSY, col = "red") legend("topright", c("vs_C", "vs_SY"), lwd = c(2, 2), col = c("blue", "red")) # trawl process estimated by fitting the autocorrelation function sim <- sim_trawl(list()) sim$h <- 0.5 sim$trawl <- "exp" sim$lag_max <- 3 sim$method <- "acf" ft <- fit_trawl(sim) h <- exp(seq(log(1e-2), log(60), length.out = 51)) vsS <- vs_trawl(sim, h) vsACF <- vs_trawl(ft, h, method = "acf") plot(log(h), vsS) lines(log(h), vsACF, col = "blue") } \references{ Shephard, N., & Yang, J. J. (2017). Continuous time analysis of fleeting discrete price moves. Journal of the American Statistical Association, 112(519), 1090-1106. } \seealso{ \code{\link{fit_trawl}}, \code{\link{sim_trawl}} } \author{ Dries Cornilly } \concept{trawl}
/man/vs_trawl.Rd
no_license
cdries/rTrawl
R
false
true
2,700
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vs_trawl.R \name{vs_trawl} \alias{vs_trawl} \title{Variance signature values} \usage{ vs_trawl(object, h, method = "sample", ...) } \arguments{ \item{object}{object containing all the specifications for the process, see details} \item{h}{vector containing the observation frequencies at which to compute the variance signature values} \item{method}{"sample" for the empirical values, "vs_C" or "vs_SY" for theoretical values when estimated by these respective mtehods with fit_trawl} \item{\dots}{any other passthrough parameters} } \description{ compute empirical and theoretical variance signature values } \details{ the object is either an output object from sim_trawl for the empirical variance signature values, or an output object from fit_trawl for the theoretical ones. When computing the sample variance signature plot, it is possible to add the argument 'multi' for reducing the estimation variance. See Shephard and Yang (2017) for a definition of the variance signature values. } \examples{ # simulate a trawl process sim <- sim_trawl(list("levy_seed" = "Skellam", levy_par = c(0.13, 0.11), b = 0.3)) h <- exp(seq(log(1e-2), log(60), length.out = 51)) # empirical variance signature values vsS <- vs_trawl(sim, h) # theoretical values when estimated with "vs_C" sim$method <- "vs_C" # add fitting method sim$h <- h # add grid on which to fit ft <- fit_trawl(sim) # fit the trawl process vsC <- vs_trawl(ft, h, method = "vs_C") # compute fitted variance signature values # theoretical values when estimated with "vs_SY" sim$method <- "vs_SY" # add fitting method sim$h <- h # add grid on which to fit ft <- fit_trawl(sim) # fit the trawl process vsSY <- vs_trawl(ft, h, method = "vs_SY") # compute fitted variance signature values # plot resulting fits plot(log(h), vsS) lines(log(h), vsC, col = "blue") lines(log(h), vsSY, col = "red") legend("topright", c("vs_C", "vs_SY"), lwd = c(2, 2), col = c("blue", "red")) # trawl process estimated by fitting the autocorrelation function sim <- sim_trawl(list()) sim$h <- 0.5 sim$trawl <- "exp" sim$lag_max <- 3 sim$method <- "acf" ft <- fit_trawl(sim) h <- exp(seq(log(1e-2), log(60), length.out = 51)) vsS <- vs_trawl(sim, h) vsACF <- vs_trawl(ft, h, method = "acf") plot(log(h), vsS) lines(log(h), vsACF, col = "blue") } \references{ Shephard, N., & Yang, J. J. (2017). Continuous time analysis of fleeting discrete price moves. Journal of the American Statistical Association, 112(519), 1090-1106. } \seealso{ \code{\link{fit_trawl}}, \code{\link{sim_trawl}} } \author{ Dries Cornilly } \concept{trawl}
context("Parse clarion file") test_that("file is parsed correctly", { object <- parser(file = "wiki_example.clarion", dec = ",") expect_is(object, "Clarion") expect_equal(object$get_delimiter(), "|") expect_equal(object$get_factors(), object$metadata[, c("key", "factor1", "factor2")]) expect_equal(object$get_id(), "id") expect_equal(object$get_name(), "name") expect_equal(object$is_delimited(names(object$data)), c(rep(FALSE, 3), TRUE, rep(FALSE, 8))) })
/tests/testthat/test-parser.R
permissive
loosolab/wilson
R
false
false
474
r
context("Parse clarion file") test_that("file is parsed correctly", { object <- parser(file = "wiki_example.clarion", dec = ",") expect_is(object, "Clarion") expect_equal(object$get_delimiter(), "|") expect_equal(object$get_factors(), object$metadata[, c("key", "factor1", "factor2")]) expect_equal(object$get_id(), "id") expect_equal(object$get_name(), "name") expect_equal(object$is_delimited(names(object$data)), c(rep(FALSE, 3), TRUE, rep(FALSE, 8))) })
add2 <- function(x,y){ x+ y } above10 <- function(x){ use <- x > 10 x[use] } above <- function(x,n=10){ use <- x > n x[use] } columnmean <- function(y){ nc <- nol(y) means <- numeric(nc) for(i in 1:nc){ means[i] <- mean(y[,i]) } means } cube <- function(x, n) { x^3 } f <- function(x) { g <- function(y) { y + z } z <- 4 x + g(x) }
/Function.R
no_license
kennethchung/HopkinsDataScience
R
false
false
356
r
add2 <- function(x,y){ x+ y } above10 <- function(x){ use <- x > 10 x[use] } above <- function(x,n=10){ use <- x > n x[use] } columnmean <- function(y){ nc <- nol(y) means <- numeric(nc) for(i in 1:nc){ means[i] <- mean(y[,i]) } means } cube <- function(x, n) { x^3 } f <- function(x) { g <- function(y) { y + z } z <- 4 x + g(x) }
#' #' Return C++ code generated by \pkg{rstan} #' #' @param object a \code{bdm} class object #' @param ... additional arguments to generic function #' #' @export getcpp <- function(object, ...) UseMethod("getcpp") #' #' @rdname getcpp #' @export getcpp.bdm <- function(object, ...) { if (length(object@model_cpp) == 0) { stanc_list_return <- stanc(model_code = object@model_code, model_name = object@model_name) cpp <- stanc_list_return$cppcode } else { cpp <- object@model_cpp$model_cppcode } return(cpp) }
/R/getcpp.R
no_license
cttedwards/bdm
R
false
false
561
r
#' #' Return C++ code generated by \pkg{rstan} #' #' @param object a \code{bdm} class object #' @param ... additional arguments to generic function #' #' @export getcpp <- function(object, ...) UseMethod("getcpp") #' #' @rdname getcpp #' @export getcpp.bdm <- function(object, ...) { if (length(object@model_cpp) == 0) { stanc_list_return <- stanc(model_code = object@model_code, model_name = object@model_name) cpp <- stanc_list_return$cppcode } else { cpp <- object@model_cpp$model_cppcode } return(cpp) }
library(sp) library(raster) library(rgdal) library(rgeos) library(caTools) library(icesTAF) install.packages("rgdal") install.packages("rgeos") install.packages("caTools") install.packages("icesTAF") install.packages("") cy0.data.path <- "cors.csv" cy0.data <- read.csv(cy0.data.path, check.names=FALSE) start.year <-1999 end.year<-2017 year=2012 c=6 plot(mapp.cp) for (year in c(start.year:end.year)){ sleuth.data.path<-sprintf("/Users/kaso/Downloads/R/LandScan/lspop%s.tif",year) mapp <- raster(sleuth.data.path) column.blank1 <- rep.int(0, 143) for (c in c(1:143)){ mapRange <- 0.0435*(189-c)**0.5 cy <- cy0.data["city"][[1]][c] coy <- cy0.data["corY"][[1]][c] cox <- cy0.data["corX"][[1]][c] urban.map.ext <-extent(cox-mapRange,cox+mapRange,coy-mapRange,coy+mapRange) mapp.cp <- crop(mapp, urban.map.ext) values(mapp.cp)[values(mapp.cp)<1000]=0 values(mapp.cp)[values(mapp.cp)>=1000]=1 alal=sum(na.omit(values(mapp.cp))) column.blank1[c] <- alal } clm.name1 <- sprintf("%s_ls",year) cy0.data[, clm.name1] <- column.blank1 } write.csv(cy0.data,"ls_hist.csv")
/python/.ipynb_checkpoints/pick-checkpoint.R
no_license
kamomehz/graduate-git
R
false
false
1,134
r
library(sp) library(raster) library(rgdal) library(rgeos) library(caTools) library(icesTAF) install.packages("rgdal") install.packages("rgeos") install.packages("caTools") install.packages("icesTAF") install.packages("") cy0.data.path <- "cors.csv" cy0.data <- read.csv(cy0.data.path, check.names=FALSE) start.year <-1999 end.year<-2017 year=2012 c=6 plot(mapp.cp) for (year in c(start.year:end.year)){ sleuth.data.path<-sprintf("/Users/kaso/Downloads/R/LandScan/lspop%s.tif",year) mapp <- raster(sleuth.data.path) column.blank1 <- rep.int(0, 143) for (c in c(1:143)){ mapRange <- 0.0435*(189-c)**0.5 cy <- cy0.data["city"][[1]][c] coy <- cy0.data["corY"][[1]][c] cox <- cy0.data["corX"][[1]][c] urban.map.ext <-extent(cox-mapRange,cox+mapRange,coy-mapRange,coy+mapRange) mapp.cp <- crop(mapp, urban.map.ext) values(mapp.cp)[values(mapp.cp)<1000]=0 values(mapp.cp)[values(mapp.cp)>=1000]=1 alal=sum(na.omit(values(mapp.cp))) column.blank1[c] <- alal } clm.name1 <- sprintf("%s_ls",year) cy0.data[, clm.name1] <- column.blank1 } write.csv(cy0.data,"ls_hist.csv")
library(httr) library(jsonlite) library(dplyr) library(plotly) source("api.R") scatter_plot <- function(dataset, year_start, year_end, genre, rating_low, rating_high) { # create new column called "release_year", get rid of NA for "release_year" # column dataset$release_year <- as.numeric(format(as.Date( dataset$release_date, "%Y-%m-%d" ), "%Y")) complete_vec <- complete.cases(dataset[, "release_year"]) dataset <- dataset[complete_vec, ] # get genre list response <- GET(paste0( "https://api.themoviedb.org/3/genre/movie/list?api_key=", api_key )) response_content <- content(response, type = "text") genre_list <- fromJSON(response_content)$genres # get language list response_new <- GET(paste0( "https://api.themoviedb.org/3/configuration/languages?api_key=", api_key )) response_content_new <- content(response_new, type = "text") language_list <- fromJSON(response_content_new) # join dataset with language list language_list$original_language <- language_list$iso_639_1 language <- select(language_list, original_language, english_name) dataset <- full_join(dataset, language) # max for x-axis and y-axis xmax <- max(dataset$release_year) ymax <- max(dataset$vote_average) # filter dataset if (genre != "") { genre <- as.character(genre_list[genre_list$name == genre, ]$id) } new_data <- dataset %>% filter(release_year >= year_start & release_year <= year_end) %>% filter(grepl(genre, genre_ids)) %>% filter(vote_average >= rating_low & vote_average <= rating_high) # scatter plot plot_ly(new_data, x = ~ release_year, y = ~ vote_average, hoverinfo = "text", text = ~ paste( "Movie: ", title, " (", original_title, ")", "<br>Year: ", release_date, "<br>Rating: ", vote_average, "<br>Language: ", english_name ), color = ~ english_name, mode = "markers", marker = list(opacity = .7, size = 10) ) %>% layout( title = "Movie Release Year vs Vote Average", xaxis = list(range = c(1920, xmax), title = "Release Year"), yaxis = list(range = c(0, ymax), title = "Vote Average") ) %>% return() }
/scripts/scatter_plot.R
permissive
katiechen00/Final-Project
R
false
false
2,217
r
library(httr) library(jsonlite) library(dplyr) library(plotly) source("api.R") scatter_plot <- function(dataset, year_start, year_end, genre, rating_low, rating_high) { # create new column called "release_year", get rid of NA for "release_year" # column dataset$release_year <- as.numeric(format(as.Date( dataset$release_date, "%Y-%m-%d" ), "%Y")) complete_vec <- complete.cases(dataset[, "release_year"]) dataset <- dataset[complete_vec, ] # get genre list response <- GET(paste0( "https://api.themoviedb.org/3/genre/movie/list?api_key=", api_key )) response_content <- content(response, type = "text") genre_list <- fromJSON(response_content)$genres # get language list response_new <- GET(paste0( "https://api.themoviedb.org/3/configuration/languages?api_key=", api_key )) response_content_new <- content(response_new, type = "text") language_list <- fromJSON(response_content_new) # join dataset with language list language_list$original_language <- language_list$iso_639_1 language <- select(language_list, original_language, english_name) dataset <- full_join(dataset, language) # max for x-axis and y-axis xmax <- max(dataset$release_year) ymax <- max(dataset$vote_average) # filter dataset if (genre != "") { genre <- as.character(genre_list[genre_list$name == genre, ]$id) } new_data <- dataset %>% filter(release_year >= year_start & release_year <= year_end) %>% filter(grepl(genre, genre_ids)) %>% filter(vote_average >= rating_low & vote_average <= rating_high) # scatter plot plot_ly(new_data, x = ~ release_year, y = ~ vote_average, hoverinfo = "text", text = ~ paste( "Movie: ", title, " (", original_title, ")", "<br>Year: ", release_date, "<br>Rating: ", vote_average, "<br>Language: ", english_name ), color = ~ english_name, mode = "markers", marker = list(opacity = .7, size = 10) ) %>% layout( title = "Movie Release Year vs Vote Average", xaxis = list(range = c(1920, xmax), title = "Release Year"), yaxis = list(range = c(0, ymax), title = "Vote Average") ) %>% return() }
#Input dataset ExpDegree <- read.csv("ExpectedDegree.csv", header=FALSE) ActualDegreeADR <- read.csv("ActualDegreeADR.csv", header=FALSE) ActualDegreeTRPW <- read.csv("ActualDegreeTRPW.csv", header=FALSE) ExpTriangle <- read.csv("ExpectedTriangleDegree.csv", header=FALSE) ActualTriangleADR <- read.csv("ActualTriangleDegreeADR.csv", header=FALSE) ActualTriangleTRPW <- read.csv("ActualTriangleDegreeTRPW.csv", header=FALSE) ExpCoefficient <- read.csv("ExpectedCoefficient.csv", header=FALSE) ActualCoefficientADR <- read.csv("ActualCoefficientADR.csv", header=FALSE) ActualCoefficientTRPW <- read.csv("ActualCoefficientTRPW.csv", header=FALSE) #Rename names(ExpDegree) <- c("ExpDegree") names(ActualDegreeADR) <- c("ActualDegreeADR") names(ActualDegreeTRPW) <- c("ActualDegreeTRPW") names(ExpTriangle) <- c("ExpTriangle") names(ActualTriangleADR) <- c("ActualTriangleADR") names(ActualTriangleTRPW) <- c("ActualTriangleTRPW") names(ExpCoefficient) <- c("ExpCoefficient") names(ActualCoefficientADR) <- c("ActualCoefficientADR") names(ActualCoefficientTRPW) <- c("ActualCoefficientTRPW") #Round ExpDegreeRound <- round(ExpDegree,0) ExpTriangleRound <- round(ExpTriangle,0) ExpCoefficientRound <- round(ExpCoefficient,1) ActualCoefficientADRRound <- round(ActualCoefficientADR,1) ActualCoefficientTRPWRound <- round(ActualCoefficientTRPW,1) #Sort ExpDegreeSorted <- data.frame(sort(ExpDegreeRound$ExpDegree)) ActualDegreeADRSorted <- data.frame(sort(ActualDegreeADR$ActualDegreeADR)) ActualDegreeTRPWSorted <- data.frame(sort(ActualDegreeTRPW$ActualDegreeTRPW)) ExpTriangleSorted <- data.frame(sort(ExpTriangleRound$ExpTriangle)) ActualTriangleADRSorted <- data.frame(sort(ActualTriangleADR$ActualTriangleADR)) ActualTriangleTRPWSorted <- data.frame(sort(ActualTriangleTRPW$ActualTriangleTRPW)) ExpCoefficientSorted <- data.frame(sort(ExpCoefficientRound$ExpCoefficient)) ActualCoefficientADRSorted <- data.frame(sort(ActualCoefficientADRRound$ActualCoefficientADR)) ActualCoefficientTRPWSorted <- data.frame(sort(ActualCoefficientTRPWRound$ActualCoefficientTRPW)) #Find times of exisits ExpDegreeCount <- data.frame(table(ExpDegreeSorted$sort.ExpDegreeRound.ExpDegree.)) names(ExpDegreeCount) <- c("Degree", "Count") ActualDegreeADRCount <- data.frame(table(ActualDegreeADRSorted$sort.ActualDegreeADR.ActualDegreeADR.)) names(ActualDegreeADRCount) <- c("Degree", "Count") ActualDegreeTRPWCount <- data.frame(table(ActualDegreeTRPWSorted$sort.ActualDegreeTRPW.ActualDegreeTRPW.)) names(ActualDegreeTRPWCount) <- c("Degree", "Count") ExpTriangleCount <- data.frame(table(ExpTriangleSorted$sort.ExpTriangleRound.ExpTriangle.)) names(ExpTriangleCount) <- c("TriangleDegree", "Count") ActualTriangleADRCount <- data.frame(table(ActualTriangleADRSorted$sort.ActualTriangleADR.ActualTriangleADR.)) names(ActualTriangleADRCount) <- c("TriangleDegree", "Count") ActualTriangleTRPWCount <- data.frame(table(ActualTriangleTRPWSorted$sort.ActualTriangleTRPW.ActualTriangleTRPW.)) names(ActualTriangleTRPWCount) <- c("TriangleDegree", "Count") ExpCoefficientCount <- data.frame(table(ExpCoefficientSorted$sort.ExpCoefficientRound.ExpCoefficient.)) names(ExpCoefficientCount) <- c("Coefficient", "Count") ActualCoefficientADRCount <- data.frame(table(ActualCoefficientADRSorted$sort.ActualCoefficientADRRound.ActualCoefficientADR.)) names(ActualCoefficientADRCount) <- c("Coefficient", "Count") ActualCoefficientTRPWCount <- data.frame(table(ActualCoefficientTRPWSorted$sort.ActualCoefficientTRPWRound.ActualCoefficientTRPW.)) names(ActualCoefficientTRPWCount) <- c("Coefficient", "Count") #Calculate Percentages ExpDegreeFinal <- within(ExpDegreeCount, { percentage = Count / nrow(ExpDegreeSorted) group="Expected" }) ActualDegreeADRFinal <- within(ActualDegreeADRCount, { percentage = Count / nrow(ActualDegreeADRSorted) group="ADR" }) ActualDegreeTRPWFinal <- within(ActualDegreeTRPWCount, { percentage = Count / nrow(ActualDegreeTRPWSorted) group="TRPW" }) ExpTriangleFinal <- within(ExpTriangleCount, { percentage = Count / nrow(ExpTriangleSorted) group="Expected" }) ActualTriangleADRFinal <- within(ActualTriangleADRCount, { percentage = Count / nrow(ActualTriangleADRSorted) group="ADR" }) ActualTriangleTRPWFinal <- within(ActualTriangleTRPWCount, { percentage = Count / nrow(ActualTriangleTRPWSorted) group="TRPW" }) ExpCoefficientFinal <- within(ExpCoefficientCount, { percentage = Count / nrow(ExpCoefficientSorted) group="Expected" }) ActualCoefficientADRFinal <- within(ActualCoefficientADRCount, { percentage = Count / nrow(ActualCoefficientADRSorted) group="ADR" }) ActualCoefficientTRPWFinal <- within(ActualCoefficientTRPWCount, { percentage = Count / nrow(ActualCoefficientTRPWSorted) group="TRPW" }) #Plot library(ggplot2) #Bind data DegreeFinal <- rbind(ExpDegreeFinal, ActualDegreeADRFinal, ActualDegreeTRPWFinal) TriangleFinal <- rbind(ExpTriangleFinal, ActualTriangleADRFinal, ActualTriangleTRPWFinal) CoefficientFinal <- rbind(ExpCoefficientFinal, ActualCoefficientADRFinal, ActualCoefficientTRPWFinal) #Vertex Degree ggplot(DegreeFinal, aes(x=Degree,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10(limits=c(1e-3,1))+ # scale_y_log10()+ coord_cartesian(xlim=c(0,20))+ # scale_x_discrete(breaks=seq(0,20,by=1))+ # coord_cartesian(xlim=c(0,100))+ # scale_x_discrete(breaks=seq(0,100,by=5))+ labs(title="Vertex Degree Distribution AsSkitter", x="Vertex Degree", y="Percentage Vertices") #Triangle Degree ggplot(TriangleFinal, aes(x=TriangleDegree,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10(limits=c(1e-4,1))+ # scale_y_log10()+ coord_cartesian(xlim=c(0,20))+ scale_x_discrete(breaks=seq(0, 20, by=1))+ labs(title="Triangle Degree Distribution AsSkitter", x="Triangle Degree", y="Percentage Vertices") #Clustering Coefficient ggplot(CoefficientFinal, aes(x=Coefficient,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10()+ coord_cartesian(xlim=c(0,8))+ scale_x_discrete(breaks=seq(0, 1, by=0.1))+ labs(title="Clustering Coefficient Distribution AsSkitter", x="Clustering Coefficient Degree", y="Percentage Vertices") #Triangle count ExpTriangleCount <- data.frame(sum(as.numeric(ExpTriangleFinal$TriangleDegree)*as.numeric(ExpTriangleFinal$Count))) names(ExpTriangleCount) <- c("TriangleDegree") ADRTriangleCount <- data.frame(sum(as.numeric(ActualTriangleADRFinal$TriangleDegree)*as.numeric(ActualTriangleADRFinal$Count))) names(ADRTriangleCount) <- c("TriangleDegree") TRPWTriangleCount <- data.frame(sum(as.numeric(ActualTriangleTRPWFinal$TriangleDegree)*as.numeric(ActualTriangleTRPWFinal$Count))) names(TRPWTriangleCount) <- c("TriangleDegree") ExpTriangleCountFinal <- within(ExpTriangleCount, { group = "Expected" }) ADRTriangleCountFinal <- within(ADRTriangleCount, { group = "ADR" }) TRPWTriangleCountFinal <- within(TRPWTriangleCount, { group = "TRPW" }) TriangleCount <- rbind(ExpTriangleCountFinal, ADRTriangleCountFinal, TRPWTriangleCountFinal) ggplot(TriangleCount, aes(x=group,y=TriangleDegree,group=group,fill=group))+ geom_bar(stat="identity")+ labs(title="Tiangle Count AsSkitter",x="Algorithm",y="Number of Triangles")+ guides(fill=guide_legend(title="Algorithm"))
/Experiments/AsSkitter/AsSkitter.r
no_license
t-liu93/2IMW20-Project
R
false
false
7,335
r
#Input dataset ExpDegree <- read.csv("ExpectedDegree.csv", header=FALSE) ActualDegreeADR <- read.csv("ActualDegreeADR.csv", header=FALSE) ActualDegreeTRPW <- read.csv("ActualDegreeTRPW.csv", header=FALSE) ExpTriangle <- read.csv("ExpectedTriangleDegree.csv", header=FALSE) ActualTriangleADR <- read.csv("ActualTriangleDegreeADR.csv", header=FALSE) ActualTriangleTRPW <- read.csv("ActualTriangleDegreeTRPW.csv", header=FALSE) ExpCoefficient <- read.csv("ExpectedCoefficient.csv", header=FALSE) ActualCoefficientADR <- read.csv("ActualCoefficientADR.csv", header=FALSE) ActualCoefficientTRPW <- read.csv("ActualCoefficientTRPW.csv", header=FALSE) #Rename names(ExpDegree) <- c("ExpDegree") names(ActualDegreeADR) <- c("ActualDegreeADR") names(ActualDegreeTRPW) <- c("ActualDegreeTRPW") names(ExpTriangle) <- c("ExpTriangle") names(ActualTriangleADR) <- c("ActualTriangleADR") names(ActualTriangleTRPW) <- c("ActualTriangleTRPW") names(ExpCoefficient) <- c("ExpCoefficient") names(ActualCoefficientADR) <- c("ActualCoefficientADR") names(ActualCoefficientTRPW) <- c("ActualCoefficientTRPW") #Round ExpDegreeRound <- round(ExpDegree,0) ExpTriangleRound <- round(ExpTriangle,0) ExpCoefficientRound <- round(ExpCoefficient,1) ActualCoefficientADRRound <- round(ActualCoefficientADR,1) ActualCoefficientTRPWRound <- round(ActualCoefficientTRPW,1) #Sort ExpDegreeSorted <- data.frame(sort(ExpDegreeRound$ExpDegree)) ActualDegreeADRSorted <- data.frame(sort(ActualDegreeADR$ActualDegreeADR)) ActualDegreeTRPWSorted <- data.frame(sort(ActualDegreeTRPW$ActualDegreeTRPW)) ExpTriangleSorted <- data.frame(sort(ExpTriangleRound$ExpTriangle)) ActualTriangleADRSorted <- data.frame(sort(ActualTriangleADR$ActualTriangleADR)) ActualTriangleTRPWSorted <- data.frame(sort(ActualTriangleTRPW$ActualTriangleTRPW)) ExpCoefficientSorted <- data.frame(sort(ExpCoefficientRound$ExpCoefficient)) ActualCoefficientADRSorted <- data.frame(sort(ActualCoefficientADRRound$ActualCoefficientADR)) ActualCoefficientTRPWSorted <- data.frame(sort(ActualCoefficientTRPWRound$ActualCoefficientTRPW)) #Find times of exisits ExpDegreeCount <- data.frame(table(ExpDegreeSorted$sort.ExpDegreeRound.ExpDegree.)) names(ExpDegreeCount) <- c("Degree", "Count") ActualDegreeADRCount <- data.frame(table(ActualDegreeADRSorted$sort.ActualDegreeADR.ActualDegreeADR.)) names(ActualDegreeADRCount) <- c("Degree", "Count") ActualDegreeTRPWCount <- data.frame(table(ActualDegreeTRPWSorted$sort.ActualDegreeTRPW.ActualDegreeTRPW.)) names(ActualDegreeTRPWCount) <- c("Degree", "Count") ExpTriangleCount <- data.frame(table(ExpTriangleSorted$sort.ExpTriangleRound.ExpTriangle.)) names(ExpTriangleCount) <- c("TriangleDegree", "Count") ActualTriangleADRCount <- data.frame(table(ActualTriangleADRSorted$sort.ActualTriangleADR.ActualTriangleADR.)) names(ActualTriangleADRCount) <- c("TriangleDegree", "Count") ActualTriangleTRPWCount <- data.frame(table(ActualTriangleTRPWSorted$sort.ActualTriangleTRPW.ActualTriangleTRPW.)) names(ActualTriangleTRPWCount) <- c("TriangleDegree", "Count") ExpCoefficientCount <- data.frame(table(ExpCoefficientSorted$sort.ExpCoefficientRound.ExpCoefficient.)) names(ExpCoefficientCount) <- c("Coefficient", "Count") ActualCoefficientADRCount <- data.frame(table(ActualCoefficientADRSorted$sort.ActualCoefficientADRRound.ActualCoefficientADR.)) names(ActualCoefficientADRCount) <- c("Coefficient", "Count") ActualCoefficientTRPWCount <- data.frame(table(ActualCoefficientTRPWSorted$sort.ActualCoefficientTRPWRound.ActualCoefficientTRPW.)) names(ActualCoefficientTRPWCount) <- c("Coefficient", "Count") #Calculate Percentages ExpDegreeFinal <- within(ExpDegreeCount, { percentage = Count / nrow(ExpDegreeSorted) group="Expected" }) ActualDegreeADRFinal <- within(ActualDegreeADRCount, { percentage = Count / nrow(ActualDegreeADRSorted) group="ADR" }) ActualDegreeTRPWFinal <- within(ActualDegreeTRPWCount, { percentage = Count / nrow(ActualDegreeTRPWSorted) group="TRPW" }) ExpTriangleFinal <- within(ExpTriangleCount, { percentage = Count / nrow(ExpTriangleSorted) group="Expected" }) ActualTriangleADRFinal <- within(ActualTriangleADRCount, { percentage = Count / nrow(ActualTriangleADRSorted) group="ADR" }) ActualTriangleTRPWFinal <- within(ActualTriangleTRPWCount, { percentage = Count / nrow(ActualTriangleTRPWSorted) group="TRPW" }) ExpCoefficientFinal <- within(ExpCoefficientCount, { percentage = Count / nrow(ExpCoefficientSorted) group="Expected" }) ActualCoefficientADRFinal <- within(ActualCoefficientADRCount, { percentage = Count / nrow(ActualCoefficientADRSorted) group="ADR" }) ActualCoefficientTRPWFinal <- within(ActualCoefficientTRPWCount, { percentage = Count / nrow(ActualCoefficientTRPWSorted) group="TRPW" }) #Plot library(ggplot2) #Bind data DegreeFinal <- rbind(ExpDegreeFinal, ActualDegreeADRFinal, ActualDegreeTRPWFinal) TriangleFinal <- rbind(ExpTriangleFinal, ActualTriangleADRFinal, ActualTriangleTRPWFinal) CoefficientFinal <- rbind(ExpCoefficientFinal, ActualCoefficientADRFinal, ActualCoefficientTRPWFinal) #Vertex Degree ggplot(DegreeFinal, aes(x=Degree,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10(limits=c(1e-3,1))+ # scale_y_log10()+ coord_cartesian(xlim=c(0,20))+ # scale_x_discrete(breaks=seq(0,20,by=1))+ # coord_cartesian(xlim=c(0,100))+ # scale_x_discrete(breaks=seq(0,100,by=5))+ labs(title="Vertex Degree Distribution AsSkitter", x="Vertex Degree", y="Percentage Vertices") #Triangle Degree ggplot(TriangleFinal, aes(x=TriangleDegree,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10(limits=c(1e-4,1))+ # scale_y_log10()+ coord_cartesian(xlim=c(0,20))+ scale_x_discrete(breaks=seq(0, 20, by=1))+ labs(title="Triangle Degree Distribution AsSkitter", x="Triangle Degree", y="Percentage Vertices") #Clustering Coefficient ggplot(CoefficientFinal, aes(x=Coefficient,y=percentage,group=group,color=group,shape=group))+ geom_point()+ geom_line()+ scale_y_log10()+ coord_cartesian(xlim=c(0,8))+ scale_x_discrete(breaks=seq(0, 1, by=0.1))+ labs(title="Clustering Coefficient Distribution AsSkitter", x="Clustering Coefficient Degree", y="Percentage Vertices") #Triangle count ExpTriangleCount <- data.frame(sum(as.numeric(ExpTriangleFinal$TriangleDegree)*as.numeric(ExpTriangleFinal$Count))) names(ExpTriangleCount) <- c("TriangleDegree") ADRTriangleCount <- data.frame(sum(as.numeric(ActualTriangleADRFinal$TriangleDegree)*as.numeric(ActualTriangleADRFinal$Count))) names(ADRTriangleCount) <- c("TriangleDegree") TRPWTriangleCount <- data.frame(sum(as.numeric(ActualTriangleTRPWFinal$TriangleDegree)*as.numeric(ActualTriangleTRPWFinal$Count))) names(TRPWTriangleCount) <- c("TriangleDegree") ExpTriangleCountFinal <- within(ExpTriangleCount, { group = "Expected" }) ADRTriangleCountFinal <- within(ADRTriangleCount, { group = "ADR" }) TRPWTriangleCountFinal <- within(TRPWTriangleCount, { group = "TRPW" }) TriangleCount <- rbind(ExpTriangleCountFinal, ADRTriangleCountFinal, TRPWTriangleCountFinal) ggplot(TriangleCount, aes(x=group,y=TriangleDegree,group=group,fill=group))+ geom_bar(stat="identity")+ labs(title="Tiangle Count AsSkitter",x="Algorithm",y="Number of Triangles")+ guides(fill=guide_legend(title="Algorithm"))
\name{coefOther} \alias{coefOther} \title{Exogenous Coefficients of the Fitted Model in the \code{ivmodel} Object} \description{This \code{coefOther} returns the point estimates, standard errors, test statistics and p values for the exogenous covariates associated with the outcome. It returns a list of matrices where each matrix is one of the k-Class estimates from an \code{ivmodel} object.} \usage{ coefOther(ivmodel) } \arguments{ \item{ivmodel}{\code{ivmodel} object.} } \value{ A list of matrices swhere each matrix summarizes the estimated coefficients from one of hte k-Class estimates. } \author{Hyunseung Kang} \seealso{See also \code{\link{ivmodel}} for details on the instrumental variables model.} \examples{ data(card.data) Y=card.data[,"lwage"] D=card.data[,"educ"] Z=card.data[,"nearc4"] Xname=c("exper", "expersq", "black", "south", "smsa", "reg661", "reg662", "reg663", "reg664", "reg665", "reg666", "reg667", "reg668", "smsa66") X=card.data[,Xname] foo = ivmodel(Y=Y,D=D,Z=Z,X=X) coefOther(foo) }
/man/coefOther.Rd
permissive
hyunseungkang/ivmodel
R
false
false
1,030
rd
\name{coefOther} \alias{coefOther} \title{Exogenous Coefficients of the Fitted Model in the \code{ivmodel} Object} \description{This \code{coefOther} returns the point estimates, standard errors, test statistics and p values for the exogenous covariates associated with the outcome. It returns a list of matrices where each matrix is one of the k-Class estimates from an \code{ivmodel} object.} \usage{ coefOther(ivmodel) } \arguments{ \item{ivmodel}{\code{ivmodel} object.} } \value{ A list of matrices swhere each matrix summarizes the estimated coefficients from one of hte k-Class estimates. } \author{Hyunseung Kang} \seealso{See also \code{\link{ivmodel}} for details on the instrumental variables model.} \examples{ data(card.data) Y=card.data[,"lwage"] D=card.data[,"educ"] Z=card.data[,"nearc4"] Xname=c("exper", "expersq", "black", "south", "smsa", "reg661", "reg662", "reg663", "reg664", "reg665", "reg666", "reg667", "reg668", "smsa66") X=card.data[,Xname] foo = ivmodel(Y=Y,D=D,Z=Z,X=X) coefOther(foo) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GauPro_S3.R \name{summary.GauPro} \alias{summary.GauPro} \title{if (F) { # Plot is automatically dispatched, same with print and format #' Plot for class GauPro #' #' @param x Object of class GauPro #' @param ... Additional parameters #' #' @return Nothing #' @export #' #' @examples #' n <- 12 #' x <- matrix(seq(0,1,length.out = n), ncol=1) #' y <- sin(2*pi*x) + rnorm(n,0,1e-1) #' gp <- GauPro(X=x, Z=y, parallel=FALSE) #' if (requireNamespace("MASS", quietly = TRUE)) { #' plot(gp) #' } #' plot.GauPro <- function(x, ...) { x$plot(...) # if (x$D == 1) { # x$cool1Dplot(...) # } else if (x$D == 2) { # x$plot2D(...) # } else { # # stop("No plot method for higher than 2 dimension") # x$plotmarginal() # } } } Summary for GauPro object} \usage{ \method{summary}{GauPro}(object, ...) } \arguments{ \item{object}{GauPro R6 object} \item{...}{Additional arguments passed to summary} } \value{ Summary } \description{ if (F) { # Plot is automatically dispatched, same with print and format #' Plot for class GauPro #' #' @param x Object of class GauPro #' @param ... Additional parameters #' #' @return Nothing #' @export #' #' @examples #' n <- 12 #' x <- matrix(seq(0,1,length.out = n), ncol=1) #' y <- sin(2*pi*x) + rnorm(n,0,1e-1) #' gp <- GauPro(X=x, Z=y, parallel=FALSE) #' if (requireNamespace("MASS", quietly = TRUE)) { #' plot(gp) #' } #' plot.GauPro <- function(x, ...) { x$plot(...) # if (x$D == 1) { # x$cool1Dplot(...) # } else if (x$D == 2) { # x$plot2D(...) # } else { # # stop("No plot method for higher than 2 dimension") # x$plotmarginal() # } } } Summary for GauPro object }
/man/summary.GauPro.Rd
no_license
CollinErickson/GauPro
R
false
true
1,845
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GauPro_S3.R \name{summary.GauPro} \alias{summary.GauPro} \title{if (F) { # Plot is automatically dispatched, same with print and format #' Plot for class GauPro #' #' @param x Object of class GauPro #' @param ... Additional parameters #' #' @return Nothing #' @export #' #' @examples #' n <- 12 #' x <- matrix(seq(0,1,length.out = n), ncol=1) #' y <- sin(2*pi*x) + rnorm(n,0,1e-1) #' gp <- GauPro(X=x, Z=y, parallel=FALSE) #' if (requireNamespace("MASS", quietly = TRUE)) { #' plot(gp) #' } #' plot.GauPro <- function(x, ...) { x$plot(...) # if (x$D == 1) { # x$cool1Dplot(...) # } else if (x$D == 2) { # x$plot2D(...) # } else { # # stop("No plot method for higher than 2 dimension") # x$plotmarginal() # } } } Summary for GauPro object} \usage{ \method{summary}{GauPro}(object, ...) } \arguments{ \item{object}{GauPro R6 object} \item{...}{Additional arguments passed to summary} } \value{ Summary } \description{ if (F) { # Plot is automatically dispatched, same with print and format #' Plot for class GauPro #' #' @param x Object of class GauPro #' @param ... Additional parameters #' #' @return Nothing #' @export #' #' @examples #' n <- 12 #' x <- matrix(seq(0,1,length.out = n), ncol=1) #' y <- sin(2*pi*x) + rnorm(n,0,1e-1) #' gp <- GauPro(X=x, Z=y, parallel=FALSE) #' if (requireNamespace("MASS", quietly = TRUE)) { #' plot(gp) #' } #' plot.GauPro <- function(x, ...) { x$plot(...) # if (x$D == 1) { # x$cool1Dplot(...) # } else if (x$D == 2) { # x$plot2D(...) # } else { # # stop("No plot method for higher than 2 dimension") # x$plotmarginal() # } } } Summary for GauPro object }
rm(list = ls()) home = '~/Google Drive/projects/TFbenchmark/' setwd(home) # Load expression exp = get(load('~/data/GTEx_expressionatlas/raw/GTex_v6_atlas.genes.voom.batchcor.merged.rdata')) fpkms = get(load('~/data/GTEx_expressionatlas/raw/GTex_v6_atlas.genes.fpkms.merged.rdata')) # Load samples annotation load('~/data/GTEx_expressionatlas/annot/E-MTAB-5214.sdrf.rdata') # Load TFs TFs = read.delim(file = 'data/TF_census/vaquerizas/TF_census.txt', header = F, stringsAsFactors = F)[,1] # Load gene annot and convert gene names exp = exp[ rownames(exp) %in% ensemblgenes_annot$ensembl_gene_id, ] rownames(exp) = ensemblgenes_annot$hgnc_symbol[ match(rownames(exp), ensemblgenes_annot$ensembl_gene_id) ] exp = exp[ ! duplicated(rownames(exp)), ] fpkms = fpkms[ rownames(fpkms) %in% ensemblgenes_annot$ensembl_gene_id, ] rownames(fpkms) = ensemblgenes_annot$hgnc_symbol[ match(rownames(fpkms), ensemblgenes_annot$ensembl_gene_id) ] fpkms = fpkms[ ! duplicated(rownames(fpkms)), ] # Filter exp for TFs table(rownames(exp) %in% TFs) table(TFs %in% rownames(exp)) exp = exp[ rownames(exp) %in% TFs, ] fpkms = fpkms[ rownames(fpkms) %in% TFs, ] # plot profiles tissues = sample_annotation$Comment.histological.type.[ match(colnames(exp), sample_annotation$Source.Name)] idx = sample(1:ncol(exp), 1000) tissue_cluster = tissues[idx] library(Rtsne) library(RColorBrewer) colors = sample(c(brewer.pal(n = 10, name = "Paired"), brewer.pal(n = 8, name = "Accent"), brewer.pal(n = 8, name = "Dark2"), brewer.pal(n = 4, name = "Set1")[3:4])[1:length(unique(tissue_cluster))]) names(colors) = unique(tissue_cluster) tsne = Rtsne(scale(t(exp[, idx ])), dims = 2, perplexity=50, verbose=TRUE, max_iter = 1000) plot(tsne$Y, t='n', main="tSNE") text(tsne$Y, labels=tissue_cluster, col=colors[tissue_cluster], cex = .7) tissues = sample_annotation$Comment.histological.type.[ sample_annotation$Source.Name %in% colnames(exp) ] ########################################################################################################################################################################################################################################## # differential expression per tissue type ########################################################################################################################################################################################################################################## ggplot( sample_annotation[ sample_annotation$Source.Name %in% colnames(exp) , ], aes (x = Comment.histological.type.) ) + geom_bar() + theme_classic(18) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5)) + xlab('tissue') table(tissues) results = list() for (ti in sort(unique(tissues))){ message(ti) design = model.matrix(~factor((sample_annotation$Comment.histological.type.[ match(colnames(exp), sample_annotation$Source.Name) ] == ti) + 0)) ## fit the same linear model now to the enrichment scores fit = lmFit(exp, design) ## estimate moderated t-statistics fit = eBayes(fit, robust=TRUE, trend=TRUE) ## set1 is differentially expressed results[[ti]] = topTable(fit, coef = 2, p.value = 1, number = nrow(exp)) results[[ti]]$TF = rownames(results[[ti]]) } df = melt(results, id.vars = names(results[[1]])) df$adj.P.Val_global = p.adjust(df$P.Value, method = 'fdr') save(df, file = 'data/TF_target_sources/reverse_engenieered_networks/tf_differential_expression.rdata') # pvalue heatmap df$value = df$adj.P.Val +1E-322 M = -log10(acast(df, formula = L1~TF)) df$value = df$logFC M = M * sign(acast(df, formula = L1~TF)) pheatmap::pheatmap(M) # Counts df$veredict = 'neutral' df$veredict[ df$adj.P.Val < 0.01 & df$logFC < 0 ] = '-' df$veredict[ df$adj.P.Val < 0.01 & df$logFC > 0 ] = '+' ggplot(df, aes(x= veredict, fill = veredict)) + geom_bar() + theme_bw(18) + xlab('TF status in tissue') + scale_fill_manual(values = c('steelblue3', 'coral3', 'grey')) + theme(legend.position = 'none') ddf = melt(ddply(df, 'TF', function(x) table(x$veredict) ), id.vars = 'TF') ddf$value = as.factor(ddf$value) ggplot(ddf[ ddf$variable != 'neutral', ], aes(x= value, fill = variable) ) + geom_bar(position = 'dodge') + scale_fill_manual(values = c('steelblue3', 'coral3'), name = 'TF status in tissue') + theme_bw(18) + ylab('number of TFs') + xlab('number of tissues') + ggtitle('TF expression in 30 tissues') ########################################################################################################################################################################################################################################## ########################################################################################################################################################################################################################################## # 25th > 5 fpkms ########################################################################################################################################################################################################################################## results = list() for (ti in sort(unique(tissues))){ message(ti) results[[ti]]$active = names(which(apply(fpkms[, colnames(fpkms) %in% sample_annotation$Source.Name[ sample_annotation$Comment.histological.type. == ti ] ], 1, quantile, 0.25) > 1)) results[[ti]]$inactive = names(which(apply(fpkms[, colnames(fpkms) %in% sample_annotation$Source.Name[ sample_annotation$Comment.histological.type. == ti ] ], 1, quantile, 0.75) < 1)) } df = melt(results) ddf = as.data.frame(rbind( cbind(table(df$value[ df$L2 == 'active']), 'active'), cbind(table(df$value[ df$L2 == 'inactive']), 'inactive')), stringsAsFactors = F) ddf$V1 = as.factor(as.integer(ddf$V1)) ggplot(ddf, aes(x= V1, fill = V2) ) + geom_bar(position = 'dodge') + scale_fill_manual(values = c('steelblue3', 'coral3'), name = 'TF status in tissue') + theme_bw(18) + ylab('number of TFs') + xlab('number of tissues') + ggtitle('TF expression in 30 tissues') save(df, file = 'data/TF_target_sources/reverse_engenieered_networks/tf_tissues_call_percentile.rdata')
/code/TF_targets/inferred/old/research/differentialy_expressed_TFs.r
no_license
Ran485/TFbenchmark
R
false
false
6,172
r
rm(list = ls()) home = '~/Google Drive/projects/TFbenchmark/' setwd(home) # Load expression exp = get(load('~/data/GTEx_expressionatlas/raw/GTex_v6_atlas.genes.voom.batchcor.merged.rdata')) fpkms = get(load('~/data/GTEx_expressionatlas/raw/GTex_v6_atlas.genes.fpkms.merged.rdata')) # Load samples annotation load('~/data/GTEx_expressionatlas/annot/E-MTAB-5214.sdrf.rdata') # Load TFs TFs = read.delim(file = 'data/TF_census/vaquerizas/TF_census.txt', header = F, stringsAsFactors = F)[,1] # Load gene annot and convert gene names exp = exp[ rownames(exp) %in% ensemblgenes_annot$ensembl_gene_id, ] rownames(exp) = ensemblgenes_annot$hgnc_symbol[ match(rownames(exp), ensemblgenes_annot$ensembl_gene_id) ] exp = exp[ ! duplicated(rownames(exp)), ] fpkms = fpkms[ rownames(fpkms) %in% ensemblgenes_annot$ensembl_gene_id, ] rownames(fpkms) = ensemblgenes_annot$hgnc_symbol[ match(rownames(fpkms), ensemblgenes_annot$ensembl_gene_id) ] fpkms = fpkms[ ! duplicated(rownames(fpkms)), ] # Filter exp for TFs table(rownames(exp) %in% TFs) table(TFs %in% rownames(exp)) exp = exp[ rownames(exp) %in% TFs, ] fpkms = fpkms[ rownames(fpkms) %in% TFs, ] # plot profiles tissues = sample_annotation$Comment.histological.type.[ match(colnames(exp), sample_annotation$Source.Name)] idx = sample(1:ncol(exp), 1000) tissue_cluster = tissues[idx] library(Rtsne) library(RColorBrewer) colors = sample(c(brewer.pal(n = 10, name = "Paired"), brewer.pal(n = 8, name = "Accent"), brewer.pal(n = 8, name = "Dark2"), brewer.pal(n = 4, name = "Set1")[3:4])[1:length(unique(tissue_cluster))]) names(colors) = unique(tissue_cluster) tsne = Rtsne(scale(t(exp[, idx ])), dims = 2, perplexity=50, verbose=TRUE, max_iter = 1000) plot(tsne$Y, t='n', main="tSNE") text(tsne$Y, labels=tissue_cluster, col=colors[tissue_cluster], cex = .7) tissues = sample_annotation$Comment.histological.type.[ sample_annotation$Source.Name %in% colnames(exp) ] ########################################################################################################################################################################################################################################## # differential expression per tissue type ########################################################################################################################################################################################################################################## ggplot( sample_annotation[ sample_annotation$Source.Name %in% colnames(exp) , ], aes (x = Comment.histological.type.) ) + geom_bar() + theme_classic(18) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .5)) + xlab('tissue') table(tissues) results = list() for (ti in sort(unique(tissues))){ message(ti) design = model.matrix(~factor((sample_annotation$Comment.histological.type.[ match(colnames(exp), sample_annotation$Source.Name) ] == ti) + 0)) ## fit the same linear model now to the enrichment scores fit = lmFit(exp, design) ## estimate moderated t-statistics fit = eBayes(fit, robust=TRUE, trend=TRUE) ## set1 is differentially expressed results[[ti]] = topTable(fit, coef = 2, p.value = 1, number = nrow(exp)) results[[ti]]$TF = rownames(results[[ti]]) } df = melt(results, id.vars = names(results[[1]])) df$adj.P.Val_global = p.adjust(df$P.Value, method = 'fdr') save(df, file = 'data/TF_target_sources/reverse_engenieered_networks/tf_differential_expression.rdata') # pvalue heatmap df$value = df$adj.P.Val +1E-322 M = -log10(acast(df, formula = L1~TF)) df$value = df$logFC M = M * sign(acast(df, formula = L1~TF)) pheatmap::pheatmap(M) # Counts df$veredict = 'neutral' df$veredict[ df$adj.P.Val < 0.01 & df$logFC < 0 ] = '-' df$veredict[ df$adj.P.Val < 0.01 & df$logFC > 0 ] = '+' ggplot(df, aes(x= veredict, fill = veredict)) + geom_bar() + theme_bw(18) + xlab('TF status in tissue') + scale_fill_manual(values = c('steelblue3', 'coral3', 'grey')) + theme(legend.position = 'none') ddf = melt(ddply(df, 'TF', function(x) table(x$veredict) ), id.vars = 'TF') ddf$value = as.factor(ddf$value) ggplot(ddf[ ddf$variable != 'neutral', ], aes(x= value, fill = variable) ) + geom_bar(position = 'dodge') + scale_fill_manual(values = c('steelblue3', 'coral3'), name = 'TF status in tissue') + theme_bw(18) + ylab('number of TFs') + xlab('number of tissues') + ggtitle('TF expression in 30 tissues') ########################################################################################################################################################################################################################################## ########################################################################################################################################################################################################################################## # 25th > 5 fpkms ########################################################################################################################################################################################################################################## results = list() for (ti in sort(unique(tissues))){ message(ti) results[[ti]]$active = names(which(apply(fpkms[, colnames(fpkms) %in% sample_annotation$Source.Name[ sample_annotation$Comment.histological.type. == ti ] ], 1, quantile, 0.25) > 1)) results[[ti]]$inactive = names(which(apply(fpkms[, colnames(fpkms) %in% sample_annotation$Source.Name[ sample_annotation$Comment.histological.type. == ti ] ], 1, quantile, 0.75) < 1)) } df = melt(results) ddf = as.data.frame(rbind( cbind(table(df$value[ df$L2 == 'active']), 'active'), cbind(table(df$value[ df$L2 == 'inactive']), 'inactive')), stringsAsFactors = F) ddf$V1 = as.factor(as.integer(ddf$V1)) ggplot(ddf, aes(x= V1, fill = V2) ) + geom_bar(position = 'dodge') + scale_fill_manual(values = c('steelblue3', 'coral3'), name = 'TF status in tissue') + theme_bw(18) + ylab('number of TFs') + xlab('number of tissues') + ggtitle('TF expression in 30 tissues') save(df, file = 'data/TF_target_sources/reverse_engenieered_networks/tf_tissues_call_percentile.rdata')
createPlot3 <- function() { require(data.table) plotData<-fread("exdata_data_household_power_consumption\\household_power_consumption.txt", colClasses=c("Date", "time", "character","character","character","character","character","character","character" )) #str(plot1Data) plotData$Global_active_power[plotData$Global_active_power == "?"] <- NA plotData$Date <- as.Date(plotData$Date, "%d/%m/%Y") plotData <- plotData[plotData$Date >="2007-02-01" & plotData$Date <= "2007-02-02"] png("plot3.png", width=480, height=480) g_range <- range(0, plotData$Sub_metering_1) plot(as.numeric(plotData$Sub_metering_1) , type="l", axes=FALSE, xlab="", ylab="Energy Sub Metering") lines(as.numeric(plotData$Sub_metering_2), type="l", col="red") lines(as.numeric(plotData$Sub_metering_3), type="l", col="blue") axis(1, at=1, lab=c("Thu"), xlab="NULL") axis(1, at=nrow(plotData)/2, lab=c("Fri"),xlab="NULL") axis(1, at=nrow(plotData), lab=c("Sat"),xlab="NULL") axis(2, las=1, at=10*0:g_range[2]) box() legend("topright", c("Sub Metering 1" ,"Sub Metering 2","Sub Metering 3"), col=c("black","red", "blue"), lty=1); dev.off() }
/plot3.R
no_license
richbenmintz/ExData_Plotting1
R
false
false
1,285
r
createPlot3 <- function() { require(data.table) plotData<-fread("exdata_data_household_power_consumption\\household_power_consumption.txt", colClasses=c("Date", "time", "character","character","character","character","character","character","character" )) #str(plot1Data) plotData$Global_active_power[plotData$Global_active_power == "?"] <- NA plotData$Date <- as.Date(plotData$Date, "%d/%m/%Y") plotData <- plotData[plotData$Date >="2007-02-01" & plotData$Date <= "2007-02-02"] png("plot3.png", width=480, height=480) g_range <- range(0, plotData$Sub_metering_1) plot(as.numeric(plotData$Sub_metering_1) , type="l", axes=FALSE, xlab="", ylab="Energy Sub Metering") lines(as.numeric(plotData$Sub_metering_2), type="l", col="red") lines(as.numeric(plotData$Sub_metering_3), type="l", col="blue") axis(1, at=1, lab=c("Thu"), xlab="NULL") axis(1, at=nrow(plotData)/2, lab=c("Fri"),xlab="NULL") axis(1, at=nrow(plotData), lab=c("Sat"),xlab="NULL") axis(2, las=1, at=10*0:g_range[2]) box() legend("topright", c("Sub Metering 1" ,"Sub Metering 2","Sub Metering 3"), col=c("black","red", "blue"), lty=1); dev.off() }
geodistance <- function(longvar,latvar,lotarget,latarget,dcoor=FALSE) { latvar <- 2*pi*latvar/360 longvar <- 2*pi*longvar/360 lotarget <- 2*pi*lotarget/360 latarget <- 2*pi*latarget/360 dnorth <- NULL deast <- NULL dist <- pmin(sin(latvar)*sin(latarget) + cos(latvar)*cos(latarget)*cos(lotarget-longvar), 1) dist <- acos(dist)*3958 if (dcoor==TRUE) { dnorth <- pmin(sin(latvar)*sin(latarget) + cos(latvar)*cos(latarget)*cos(0), 1) dnorth <- acos(dnorth)*3958 dnorth <- ifelse(latvar<latarget, -dnorth, dnorth) } if (dcoor==TRUE) { deast <- pmin(sin(latvar)^2 + (cos(latvar)^2)*cos(lotarget-longvar), 1) deast <- acos(deast)*3958 deast <- ifelse(longvar<lotarget, -deast, deast) } out <- list(dist,dnorth,deast) names(out) <- c("dist","dnorth","deast") return(out) }
/McSpatial/R/geodistance.R
no_license
albrizre/spatstat.revdep
R
false
false
827
r
geodistance <- function(longvar,latvar,lotarget,latarget,dcoor=FALSE) { latvar <- 2*pi*latvar/360 longvar <- 2*pi*longvar/360 lotarget <- 2*pi*lotarget/360 latarget <- 2*pi*latarget/360 dnorth <- NULL deast <- NULL dist <- pmin(sin(latvar)*sin(latarget) + cos(latvar)*cos(latarget)*cos(lotarget-longvar), 1) dist <- acos(dist)*3958 if (dcoor==TRUE) { dnorth <- pmin(sin(latvar)*sin(latarget) + cos(latvar)*cos(latarget)*cos(0), 1) dnorth <- acos(dnorth)*3958 dnorth <- ifelse(latvar<latarget, -dnorth, dnorth) } if (dcoor==TRUE) { deast <- pmin(sin(latvar)^2 + (cos(latvar)^2)*cos(lotarget-longvar), 1) deast <- acos(deast)*3958 deast <- ifelse(longvar<lotarget, -deast, deast) } out <- list(dist,dnorth,deast) names(out) <- c("dist","dnorth","deast") return(out) }
###################### # AUXILLIARY FUNCTIONS ###################### generate_png <- function(origin_file_name, output_file_name) { system(paste0("sips -s formatOptions best -s format png ", origin_file_name, " --out ", output_file_name)) file.info(output_file_name) } trim_png_read <- function(file_path) { png_file <- png::readPNG(file_path) trim_png_blanks(png_file) } are_all_one <- function(x) { all(x == 1) } trim_png_blanks <- function(png_mat) { chosen_rows <- !apply(png_mat[, , 1], 1, are_all_one) chosen_cols <- !apply(png_mat[, , 1], 2, are_all_one) png_mat[chosen_rows, chosen_cols, ] } make_grob_png <- function(...) { grid::rasterGrob(trim_png_read(...), interpolate = TRUE) } # from stackoverflow # question 22488563 # ggplot2-annotate-layer-position-in-r annotate_textp <- function(label, x, y, facets = NULL, hjust = 0, vjust = 0, color = "black", alpha = NA, family = thm$text$family, size = thm$text$size, fontface = 1, line_height = 1.0, box_just = ifelse(c(x, y) < 0.5, 0, 1), margin = grid::unit(size / 2, "pt"), thm = theme_get()) { x <- scales::squish_infinite(x) y <- scales::squish_infinite(y) tg <- grid::textGrob(label, x = 0, y = 0, hjust = hjust, vjust = vjust, gp = grid::gpar(col = alpha(color, alpha), fontsize = size, fontfamily = family, fontface = fontface, lineheight = line_height)) ts <- grid::unit.c(grid::grobWidth(tg), grid::grobHeight(tg)) vp <- grid::viewport(x = x, y = y, width = ts[1], height = ts[2], just = box_just) tg <- grid::editGrob(tg, x = ts[1] * hjust, y = ts[2] * vjust, vp = vp) unt <- grid::unit(1, "npc") - margin * 2 inr <- grid::grobTree(tg, vp = grid::viewport(width = unt, height = unt)) layer(data = NULL, stat = StatIdentity, position = PositionIdentity, geom = GeomCustomAnn, inherit.aes = TRUE, params = list(grob = grid::grobTree(inr), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf)) } clip_vec_hdi <- function(data, probs = c(0.005, 0.995)) { lim_x <- quantile_hdi(data$param, probs = probs) data %>% dplyr::filter(param >= lim_x[[1]] & param <= lim_x[[2]]) } empty_panel_w_txt <- function(color, label) { grid::grobTree(grid::rectGrob(width = grid::unit(0.915, "npc"), height = grid::unit(0.9, "npc"), just = 0.45, gp = grid::gpar(fill = color)), grid::textGrob(label, gp = grid::gpar(fontsize = 15, col = "black"), hjust = 0.4)) } sim_pp_mean <- function(data, model, type) { posteriors <- as.data.frame(model) data <- dplyr::filter(data, sample == tidyselect::all_of(type)) y_rep <- matrix(0, nrow(posteriors), nrow(data)) for (j in seq_len(ncol(y_rep))) { target_phi <- paste0("r_phi[", data$pond[j], "]") target_ka <- paste0("r_ka[", data$pond[j], "]") target_ke <- paste0("r_ke[", data$pond[j], "]") phi <- posteriors[, target_phi] ka <- posteriors[, target_ka] ke <- posteriors[, target_ke] sg <- posteriors[, "sigma"] mu <- calc_eqn_1(data$day[j], ka, ke, phi) y_rep[, j] <- rnorm(rep(1, nrow(y_rep)), mu, sg) } y_rep } ############### # PAPER FIGURES ############### make_fig_1 <- function(dest, fig_out_folder, ...) { pdf(dest, width = 5.819004, height = 2.950226) p <- fig_1(...) print(p) grid::grid.text("a", x = grid::unit(0.11, "npc"), y = grid::unit(0.82, "npc"), gp = grid::gpar("fontsize" = 13, "fontface" = "bold")) grid::grid.text("b", x = grid::unit(0.58, "npc"), y = grid::unit(0.82, "npc"), gp = grid::gpar("fontsize" = 13, "fontface" = "bold")) dev.off() } fig_1 <- function(post_pred_means, my_cols_treatment, phy_png, zoo_png) { polygons <- post_pred_means$polygons linesd <- post_pred_means$lines tp <- polygons %>% dplyr::distinct(sample, treatment) %>% dplyr::mutate(x = 1) ggplot(data = polygons, mapping = aes(x = x, y = y_cred)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_polygon(data = polygons, mapping = aes(x = x, y = y_cred, fill = treatment)) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment), col = "black", lty = 1, size = 0.5) + geom_polygon(data = polygons, mapping = aes(x = x, y = y_cred, fill = treatment), alpha = 0.7, inherit.aes = FALSE) + scale_fill_manual(labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.4, 0.1, 0.1, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 14), legend.position = c(0.5, 1.2), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab(substitute("Excess "^15 * "N"["%"] * ", " * chi)) + xlab("Time (days)") + layer(data = polygons %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.5, ymax = 0.65, xmin = 43, xmax = 63)) + layer(data = polygons %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.3, ymax = 0.45, xmin = 45, xmax = 65)) } make_fig_2 <- function(dest, fig_out_folder, ...) { p <- fig_2(...) pdf(dest, width = 5.737556, height = 6.950226) print(p) grid.text(substitute(kappa["a"] * " (d"^-1 * ")"), x = grid::unit(0.56, "npc"), y = grid::unit(0.775, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute(kappa["e"] * " (d"^-1 * ")"), x = grid::unit(0.56, "npc"), y = grid::unit(0.54, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute(italic(phi) * " (% d)"), x = grid::unit(0.565, "npc"), y = grid::unit(0.31, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute("Efficiency of N transfer (" * bar(epsilon) * ")"), x = grid::unit(0.54, "npc"), y = grid::unit(0.075, "npc"), gp = grid::gpar("fontsize" = 10)) # legends grid.text("a", x = grid::unit(0.13, "npc"), y = grid::unit(0.985, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("b", x = grid::unit(0.13, "npc"), y = grid::unit(0.76, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("c", x = grid::unit(0.13, "npc"), y = grid::unit(0.525, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("d", x = grid::unit(0.13, "npc"), y = grid::unit(0.295, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) dev.off() } fig_2 <- function(data_eff, data_ka, data_ke, data_phi, my_cols_treatment, phy_png, zoo_png) { nreps <- nrow(data_eff) plot_data <- do.call("rbind.data.frame", list(data_ka, data_ke, data_phi, data_eff)) %>% dplyr::mutate(name = rep(c("a_kas", "b_kes", "c_phi", "d_eff"), each = nreps)) %>% plyr::ddply(.(name, sample, treatment), clip_vec_hdi) tp <- plot_data %>% dplyr::distinct(sample, name) %>% dplyr::mutate(param = 1) ggplot(data = plot_data, mapping = aes(x = param)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_density(data = plot_data, adjust = 2, trim = TRUE, mapping = aes(x = param, fill = treatment), colour = NA) + geom_density(adjust = 2, alpha = 0.7, trim = TRUE, mapping = aes(fill = treatment, colour = treatment), show.legend = FALSE) + scale_fill_manual(values = rev(my_cols_treatment), labels = c("Ambient", "Warmed")) + facet_wrap(name~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(-0.3, 0.1, 0.6, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.y = grid::unit(-1.2, "lines"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.x = element_blank(), axis.title.y = element_text(size = 14), legend.position = c(0.5, -0.1), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab("Posterior density (99% C.I.)\n") + scale_color_manual(values = rev(my_cols_treatment)) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + coord_cartesian(clip = "off") + layer(data = plot_data %>% dplyr::filter(sample == "phytoplankton" & name == "a_kas"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 2.2, ymax = 3, xmin = 1, xmax = 1.25)) + layer(data = plot_data %>% dplyr::filter(sample == "zooplankton" & name == "a_kas"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 6, ymax = 10.5, xmin = 0.62, xmax = 0.74)) } make_fig_3 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, fig_3(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } fig_3 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) data <- data %>% dplyr::group_by(sample, pond, treatment) %>% dplyr::summarise(av_C_ug_L = mean(av_C_ug_L), ln_av_C_ug_L = log(av_C_ug_L)) a <- ggplot(data = data, mapping = aes(y = ln_av_C_ug_L, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 1.8, ymax = 1.8 + (10.7 - 1.8) / 2), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 1.8 + (10.7 - 1.8) / 2, ymax = 10.7), fill = "#CCECE6") + geom_boxplot(mapping = aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = substitute("Total Biomass (" * mu * "g C L"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) + scale_y_continuous(limits = c(1.8, 10.7), expand = c(0, 0), breaks = c(log(8), log(50), log(400), log(3e3), log(22e3)), labels = c(8, 50, 400, 3e3, 22e3)) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = 6.6, ymax = 7.4, xmin = 0.3, xmax = 1.3)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = 4.5, ymax = 6, xmin = 2, xmax = 2.5)) } ####################### # EXTENDED DATA FIGURES ####################### make_internal_ed_fig_1d <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, internal_ed_fig_1d(...), device = "pdf", width = 8.5, height = 3.5, units = "in", onefile = FALSE) } internal_ed_fig_1d <- function(physchem_data, my_cols_treatment) { make_bef_aft_data <- function(data, vrb) { data$y_var <- data[[vrb]] data <- data %>% dplyr::select(Date, pairs, treatment, pond, y_var) %>% tidyr::drop_na() bef <- data %>% dplyr::filter(Date < as.Date("2013-07-16")) %>% dplyr::group_by(pairs, treatment, pond) %>% dplyr::summarise(mean = mean(y_var)) %>% dplyr::mutate(period = "before") %>% data.frame() aft <- data %>% dplyr::filter(Date > as.Date("2013-07-16") & Date < as.Date("2013-07-20")) %>% dplyr::group_by(pairs, treatment, pond) %>% dplyr::summarise(mean = mean(y_var)) %>% dplyr::mutate(period = "after") %>% data.frame() rbind(bef, aft) } plot_bef_aft <- function(data, vrb, ylab, my_cols_treatment) { data$vrb <- data[[vrb]] ggplot(data = data, mapping = aes(y = vrb, x = period, fill = treatment, shape = treatment)) + geom_boxplot(colour = "black", show.legend = FALSE, width = 0.6, size = 0.3, outlier.shape = NA) + geom_point(position = position_dodge(0.4), size = 2.5) + labs(x = "Treatment", y = ylab) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(labels = c("Before", "After")) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) } pdosat <- make_bef_aft_data(physchem_data, "DOsat") %>% plot_bef_aft("mean", "DO saturation (%)", my_cols_treatment) + scale_y_continuous(trans = "log10") + theme(legend.position = "none") pph <- make_bef_aft_data(physchem_data, "pH") %>% plot_bef_aft("mean", "pH", my_cols_treatment) + theme(legend.position = "none") gridExtra::grid.arrange(pdosat, pph, ncol = 2) } make_ed_fig_2 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_2(...), device = "pdf", width = 12, height = 4, units = "in", onefile = FALSE) } ed_fig_2 <- function(data, my_cols_treatment) { my_theme <- function(leg = FALSE) { theme_bw() + theme(axis.title.y = element_text(size = 14, margin = margin(t = 0, r = 10, b = 0, l = 10)), axis.title.x = element_text(size = 14, margin = margin(t = 10, r = 0, b = 10, l = 0)), axis.text.x = element_text(size = 12, margin = margin(t = 4, r = 0, b = 0, l = 0)), axis.text.y = element_text(size = 12, margin = margin(t = 0, r = 4, b = 0, l = 0)), axis.ticks.length = grid::unit(5, "pt"), strip.background = element_blank(), legend.position = if (leg) c(0.8, 0.8) else "none") } unique_ggplot <- function(data, vrb, my_cols_treatment, my_ylab, leg = FALSE) { data <- data %>% dplyr::mutate(sample_date = as.Date(sample_date, format = "%Y-%m-%d")) %>% dplyr::select(vrb = tidyselect::all_of(vrb), "sample_date", "treatment") %>% tidyr::drop_na() %>% dplyr::group_by(sample_date, treatment) %>% dplyr::summarise(mean = mean(vrb), sd = sd(vrb), se = sd / sqrt(n()), conf = se * 1.96) ggplot(data = data, mapping = aes(x = sample_date, y = mean, fill = treatment, shape = treatment)) + geom_errorbar(mapping = aes(ymax = mean + conf, ymin = ifelse(mean - conf < 0, 0, mean - conf)), width = 0.2, color = "black", lwd = 0.3, position = position_dodge(width = 1)) + geom_point(size = 2, position = position_dodge(width = 1)) + geom_vline(mapping = aes(xintercept = as.Date("2013-07-16")), color = "black", size = 0.5, lty = 2) + scale_fill_manual(values = my_cols_treatment, labels = c("Ambient", "Control", "Warmed"), name = "Treatment") + scale_shape_manual(values = c(24, 23, 25), labels = c("Ambient", "Control", "Warmed"), name = "Treatment") + ylab(my_ylab) + xlab("Sampling date (2013)") + my_theme(leg) } my_cols_treatment <- c(my_cols_treatment, "white") names(my_cols_treatment) <- c("H", "A", "C") no2 <- unique_ggplot(data, "NO2_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NO2-"))), leg = TRUE) no3 <- unique_ggplot(data, "NO3_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NO3-")))) nh4 <- unique_ggplot(data, "NH3_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NH4+")))) gridExtra::grid.arrange(no2, no3, nh4, ncol = 3) } make_ed_fig_3 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_3(...), device = "pdf", width = 7, height = 6, units = "in", onefile = FALSE) } ed_fig_3 <- function(co2_data, my_cols_treatment) { ylims <- co2_data %>% dplyr::group_by(treatment, period) %>% dplyr::summarise(lims = boxplot.stats(influx)$stats[c(1, 5)]) %>% data.frame() ylims <- range(ylims$lims) ################### ## OULIERS EXCLUDED ################### ggplot(data = co2_data, mapping = aes(y = influx, x = period, fill = treatment, shape = treatment)) + geom_boxplot(colour = "black", show.legend = FALSE, width = 0.6, size = 0.3, outlier.shape = NA) + geom_point(position = position_jitterdodge(dodge.width = 0.6, jitter.width = 0.1), size = 2.5) + labs(x = "Period", y = substitute("Daytime CO"[2] * " influx (" * mu * "mol m"^-2 * " d"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 20, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 20, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 17, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 17), panel.grid = element_blank(), legend.position = c(0.85, 0.12), legend.background = element_blank(), legend.title = element_text(size = 20), legend.text = element_text(size = 16)) + scale_x_discrete(labels = c("Before", "After"), limits = rev(sort(unique(co2_data$period)))) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) + coord_cartesian(ylim = ylims * 1.03) } make_ed_fig_5 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_5(...), device = "pdf", width = 10.5, height = 7.86, units = "in", onefile = FALSE) } ed_fig_5 <- function(data, posterior_predictions, my_cols_treatment, my_cols_group) { empty_void <- function() { ggplot() + theme_void() } ambient_panel <- empty_panel_w_txt(color = my_cols_treatment[2], label = "Ambient") warmed_panel <- empty_panel_w_txt(color = my_cols_treatment[1], label = "Warmed") ponds <- posterior_predictions$ponds preds <- posterior_predictions$preds out <- vector(mode = "list", length = nrow(ponds) + 4) out[[1]] <- ambient_panel out[[2]] <- warmed_panel out[[19]] <- empty_void() out[[20]] <- empty_void() for (i in ponds$pond) { grob1 <- grid::grobTree(grid::textGrob(paste0("Pond #", ponds$pond[i]), x = 0.95, y = 0.9, hjust = 1, gp = grid::gpar(col = "grey30", fontsize = 10))) p <- ggplot(data = preds[[i]], mapping = aes(x = x, y = y_conf, fill = sample)) + geom_polygon(alpha = 0.2) + labs(x = "", y = "") + scale_color_manual(values = c(my_cols_group[1], my_cols_group[2]), aesthetics = "fill") + theme_bw() + theme(plot.margin = margin(-0.2, 0, 0, 0, "in"), axis.text.y = element_text(size = 9), legend.position = "none") + geom_line(mapping = aes(x = x, y = y_mean, colour = sample), data = preds[[i]][complete.cases(preds[[i]]), ], linetype = 2, size = 0.6, inherit.aes = FALSE) + scale_color_manual(values = c("seagreen4", my_cols_group[2]), aesthetics = "colour") + geom_point(mapping = aes(x = day, y = add15N_perc, shape = sample, fill = sample), size = 2, data = data[data$pond == ponds$pond[i], ], inherit.aes = FALSE) + scale_shape_manual(values = c(21, 22)) + scale_fill_manual(values = c(my_cols_group[1], my_cols_group[2])) + annotation_custom(grob1) if (i %in% seq(4, 16, 4)) { out[[i + 2]] <- p + theme(axis.text.x = element_text(size = 9)) } else { out[[i + 2]] <- p + theme(axis.text.x = element_text(size = 9)) + scale_x_continuous(labels = rep("", 4), breaks = seq(0, 60, 20)) } if (i == 8) { out[[i + 2]] <- out[[i + 2]] + scale_y_continuous(labels = seq(0, 0.2, 0.1), breaks = seq(0, 0.2, 0.1)) } } lay_mat <- rbind(matrix(c(1, 1, 2, 2), 1, 4, byrow = TRUE), matrix(NA, 1, 4, byrow = TRUE), matrix(rep(3:18, each = 4), 16, 4), matrix(c(19, 19, 20, 20), 1, 4, byrow = TRUE)) x <- gridExtra::arrangeGrob(grobs = out, layout_matrix = lay_mat) ggpubr::annotate_figure(x, bottom = ggpubr::text_grob("Time (days)", hjust = 0.5, vjust = -1, size = 25, lineheight = grid::unit(2, "in")), left = ggpubr::text_grob(substitute("Excess "^15 * "N"["%"] * ", " * chi), hjust = 0.25, vjust = 0.5, size = 25, rot = 90)) + annotation_custom(grid::grid.points(x = grid::unit(1.4, "in"), y = grid::unit(0.7, "in"), size = grid::unit(0.15, "in"), pch = 21, gp = grid::gpar(col = "black", fill = my_cols_group[1]))) + annotation_custom(grid::grid.text("Phytoplankton", x = grid::unit(1.5, "in"), y = grid::unit(0.7, "in"), hjust = 0, vjust = 0.5, gp = grid::gpar(cex = 1.2))) + annotation_custom(grid::grid.points(x = grid::unit(1.4, "in"), y = grid::unit(0.4, "in"), size = grid::unit(0.15, "in"), pch = 22, gp = grid::gpar(col = "black", fill = my_cols_group[2]))) + annotation_custom(grid::grid.text("Zooplankton", x = grid::unit(1.5, "in"), y = grid::unit(0.4, "in"), hjust = 0, vjust = 0.5, gp = grid::gpar(cex = 1.2))) } make_ed_fig_6 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_6(...), device = "pdf", width = 8.2, height = 3.8, units = "in", onefile = FALSE) } ed_fig_6 <- function(post_effs, diff_biomass, phy_png, zoo_png) { diff_effs <- tapply(post_effs$param, list(post_effs$iter, post_effs$sample), function(x) 1 - (x[2] / x[1])) %>% as.data.frame %>% dplyr::mutate(iter = seq_len(nrow(.))) %>% tidyr::pivot_longer(c(phytoplankton, zooplankton), names_to = "sample", values_to = "diff") %>% dplyr::mutate(variable = "efficiency") %>% dplyr::arrange(sample, iter) %>% dplyr::select(sample, iter, diff, variable) %>% as.data.frame plot_data <- rbind(diff_effs, diff_biomass) %>% dplyr::rename(param = diff) %>% dplyr::mutate(param = param * 1e2) %>% plyr::ddply(c("sample", "variable"), clip_vec_hdi) tp <- plot_data %>% dplyr::distinct(sample) %>% dplyr::mutate(param = 1) ggplot(data = plot_data, mapping = aes(x = param)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_density(data = plot_data, adjust = 2, trim = TRUE, mapping = aes(x = param, fill = variable), colour = NA) + geom_density(adjust = 2, alpha = 0.7, trim = TRUE, mapping = aes(fill = variable, colour = variable), show.legend = FALSE) + scale_fill_manual(values = c("black", "grey60"), labels = c("Biomass", "Efficiency")) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.3, 0.1, 0.3, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.x = element_text(size = 14, margin = grid::unit(c(0.15, 0, 0, 0), "in")), axis.title.y = element_text(size = 14), legend.position = c(0.5, 1.1), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab("Posterior density (99% C.I.)\n") + xlab("Percentage decline relative to ambient") + scale_color_manual(values = c("black", "grey60")) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + coord_cartesian(clip = "off") + layer(data = plot_data %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.028, ymax = 0.033, xmin = -10, xmax = 10)) + layer(data = plot_data %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.017, ymax = 0.024, xmin = -40, xmax = -15)) } make_ed_fig_7 <- function(dest, fig_out_folder, ...) { pdf(dest, width = 9, height = 2.8) ed_fig_7(...) dev.off() } ed_fig_7 <- function(control_data, my_cols_group) { ponds <- unique(control_data$pond) par(mfrow = c(1, 3), omi = c(0, 0.5, 0, 0), mai = c(1.02, 0.72, 0.2, 0.1), cex = 1, cex.lab = 1.2, cex.axis = 0.8, cex.main = 1.2, xpd = NA) for (i in seq_along(ponds)) { x <- control_data %>% dplyr::filter(pond == ponds[i]) if (i == 1) { ylab <- substitute("Excess "^15 * "N"["%"] * ", " * chi) } else { ylab <- "" } plot(NA, xlim = c(0, 60), ylim = c(0, 0.1), xlab = ifelse(i == 2, "Time (days)", ""), ylab = ylab, las = 1) x %>% dplyr::filter(sample == "phytoplankton") %>% { points(.$day, .$add15N_perc, pch = 21, bg = my_cols_group[1], cex = 1.3) } x %>% dplyr::filter(sample == "zooplankton") %>% { points(.$day, .$add15N_perc, pch = 22, bg = my_cols_group[2], cex = 1.3) } LoLinR::proportionalLabel(0.95, 0.9, paste0("Control pond ", i), adj = c(1, 0.5), cex = 0.9) } } make_ed_fig_8 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_8(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } ed_fig_8 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) data <- data %>% dplyr::group_by(sample, pond, treatment) %>% dplyr::summarise(av_N_ug_L = mean(av_N_ug_L), ln_av_N_ug_L = log(av_N_ug_L)) a <- ggplot(data = data, mapping = aes(y = ln_av_N_ug_L, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 0.5, ymax = 0.5 + (8.5 - 0.5) / 2), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 0.5 + (8.5 - 0.5) / 2, ymax = 8.5), fill = "#CCECE6") + geom_boxplot(aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = substitute("Total Biomass (" * mu * "g N L"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) + scale_y_continuous(limits = c(0.5, 8.5), expand = c(0, 0), breaks = c(log(8), log(50), log(400), log(3e3)), labels = c(8, 50, 400, 3e3)) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = 7.4, ymax = 8.2, xmin = 0.3, xmax = 1.3)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = 3, ymax = 4.3, xmin = 2, xmax = 2.5)) } make_ed_fig_9 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_9(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } ed_fig_9 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) a <- ggplot(data = data, mapping = aes(y = mean, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 3, ymax = 7.5), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 7.5, ymax = 25), fill = "#CCECE6") + geom_boxplot(mapping = aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = "C:N ratio") + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_y_continuous(trans = "log10", limits = c(3, 25), expand = c(0, 0)) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = log10(20), ymax = log10(24), xmin = 2, xmax = 2.5)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = log10(3.2), ymax = log10(4.8), xmin = 0.5, xmax = 1)) } ####################### # SUPPLEMENTARY FIGURES ####################### make_sp_fig_inorganic <- function(dest, fig_out_folder, model_list, vrb, ...) { mod <- model_list[[vrb]] ggplot2::ggsave(dest, make_ba_plots(mod, ...), device = "pdf", width = 14, height = 7, units = "in", onefile = FALSE) } make_sp_fig_4 <- function(dest, fig_out_folder, mod, ...) { names(mod$data)[1] <- "y" ggplot2::ggsave(dest, make_ba_plots(mod, ...), device = "pdf", width = 14, height = 7, units = "in", onefile = FALSE) } make_ba_plots <- function(mod, x_lab, my_cols_treatment) { a <- mod$data %>% dplyr::select(period, treatment) %>% dplyr::distinct() %>% tidybayes::add_fitted_draws(mod, dpar = c("mu", "sigma"), re_formula = ~ 0) %>% tidybayes::sample_draws(200) %>% ggplot(aes(y = interaction(period, treatment))) + ggdist::stat_dist_slab(mapping = aes(dist = "norm", arg1 = mu, arg2 = sigma), slab_color = "gray65", alpha = 5e-2, fill = NA, size = 0.2) + geom_point(data = mod$data, inherit.aes = FALSE, mapping = aes(x = y, y = interaction(period, treatment), fill = treatment, colour = treatment, shape = treatment), size = 3, alpha = 0.3) + labs(x = x_lab, y = "") + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 20, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 20, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 17, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 17), panel.grid = element_blank(), legend.position = c(0.85, 0.9), legend.background = element_blank(), legend.title = element_text(size = 20), legend.text = element_text(size = 16)) + scale_y_discrete(labels = c("After", "Before", "After", "Before")) + scale_colour_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) b <- plot(mod, N = 6, plot = FALSE) gridExtra::grid.arrange(a, b[[1]], ncol = 2) } make_sp_fig_5 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_5(...), device = "pdf", width = 10.2, height = 4.8, units = "in", onefile = FALSE) } sp_fig_5 <- function(cn_ratios, phy_png, zoo_png, my_cols_treatment) { targets <- names(cn_ratios) both <- plyr::llply(targets, function(x, models, my_cols_treatment) { my_cs <- my_cols_treatment my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) data <- models[[x]]$data names(my_cs) <- c("H", "A") type <- c("mod_phy" = "phytoplankton", "mod_zoo" = "zooplankton") s_dat <- data.frame(y = data$CN_ratio, y_rep = colMeans(brms::posterior_epred(models[[x]])), group = data$treatment, stringsAsFactors = FALSE) %>% dplyr::mutate(cols = my_cs[group]) my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } a <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[type[x]], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") list(data = s_dat, plot = a) }, models = cn_ratios, my_cols_treatment) a <- both[[1]]$plot + layer(data = both[[1]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 27, ymax = 30, xmin = 6, xmax = 11)) b <- both[[2]]$plot + layer(data = both[[2]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 9, ymax = 12, xmin = 2, xmax = 4)) gridExtra::grid.arrange(a, b, ncol = 2) } make_sp_fig_6 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_6(...), device = "pdf", width = 14, height = 6, units = "in", onefile = FALSE) } sp_fig_6 <- function(...) { pp_checks_set <- function(type, data, model_list, my_cols_treatment, my_shape) { fit <- model_list[[type]]$model y_rep <- sim_pp_mean(data, fit, type) sdat <- data %>% dplyr::filter(sample == type) y <- sdat$add15N_perc x <- sdat$day bayesplot::color_scheme_set("gray") # preamble my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } mean_y <- function(data, group) { mean(data %>% dplyr::filter(treatment == group) %>% dplyr::select(add15N_perc) %>% unlist) } # for p_b h_dat <- plyr::adply(y_rep, 1, function(x, treat) { data.frame(group = c("A", "H"), y_rep = tapply(x, treat, mean), stringsAsFactors = FALSE) }, treat = sdat$treatment) # for p_c my_cs <- my_cols_treatment names(my_cs) <- c("H", "A") s_dat <- data.frame(y = y, y_rep = colMeans(y_rep), group = sdat$treatment, cols = my_cs[sdat$treatment], stringsAsFactors = FALSE) p_a <- bayesplot::ppc_dens_overlay(y, y_rep[1:200, ]) + labs(x = substitute("Excess "^15 * "N"["%"] * ", " * chi), y = "Density") + my_theme() + theme(legend.text = element_blank(), legend.position = c(0.58, 0.79), legend.background = element_blank()) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + annotate_textp("Observed", x = 0.9, y = 0.85, hjust = 0) + annotate_textp("Predicted", x = 0.9, y = 0.74, hjust = 0) p_b <- ggplot(h_dat, aes(x = y_rep)) + geom_histogram(data = dplyr::filter(h_dat, group == "A"), colour = "black", fill = my_cols_treatment[2], size = 0.2) + geom_vline(mapping = aes(xintercept = mean_y(sdat, "A")), color = "black", size = 0.5, lty = 2) + geom_histogram(data = dplyr::filter(h_dat, group == "H"), colour = "black", fill = my_cols_treatment[1], size = 0.2) + geom_vline(mapping = aes(xintercept = mean_y(sdat, "H")), color = "black", size = 0.5, lty = 2) + scale_y_continuous(expand = expansion(mult = c(0, 0.05)), breaks = seq(0, 6000, 2000), labels = c("0.0", "2.0", "4.0", "6.0")) + labs(x = substitute("Mean " * chi * " (%)"), y = "Frequency (thousands)") + my_theme() + theme(legend.title = element_blank(), legend.text = element_text(size = 18, hjust = 0), legend.position = c(0.8, 0.8), legend.background = element_blank()) p_c <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[type], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") p_d <- bayesplot::ppc_intervals(y = y, yrep = y_rep, x = x, prob = 0.5) + labs(x = "Time (days)", y = substitute("Excess "^15 * "N"["%"] * ", " * chi)) + my_theme() + theme(legend.text = element_blank(), legend.position = c(0.58, 0.79), legend.background = element_blank()) + annotate_textp("Observed", x = 0.92, y = 0.85, hjust = 0) + annotate_textp("Predicted", x = 0.92, y = 0.74, hjust = 0) list(p_a = p_a, p_b = p_b, p_c = p_c, p_d = p_d) } out <- vector(mode = "list", length = 2) names(out) <- c("phy", "zoo") my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) for (i in seq_along(out)) { out[[i]] <- pp_checks_set(names(my_shape)[i], my_shape = my_shape, ...) names(out[[i]]) <- paste0(names(out)[i], "_", names(out[[i]])) } p <- append(out[["phy"]], out[["zoo"]]) p <- gridExtra::grid.arrange(p$phy_p_a, p$phy_p_b, p$phy_p_c, p$phy_p_d, p$zoo_p_a, p$zoo_p_b, p$zoo_p_c, p$zoo_p_d, ncol = 4) ggpubr::annotate_figure(p, left = ggpubr::text_grob("", hjust = 0.2, vjust = 0.4, size = 40, rot = 90)) + annotation_custom(grid::grid.text("Phytoplankton", x = grid::unit(0.015, "npc"), y = grid::unit(0.8, "npc"), gp = grid::gpar("fontsize" = 20), hjust = 0.5, vjust = 0.5, rot = 90)) + annotation_custom(grid::grid.text("Zooplankton", x = grid::unit(0.015, "npc"), y = grid::unit(0.3, "npc"), gp = grid::gpar("fontsize" = 20), hjust = 0.5, vjust = 0.5, rot = 90)) } make_sp_fig_7 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_7(...), device = "pdf", width = 10.2, height = 4.8, units = "in", onefile = FALSE) } sp_fig_7 <- function(hierarchical_biomass, phy_png, zoo_png, my_cols_treatment) { targets <- names(hierarchical_biomass) both <- plyr::llply(targets, function(x, models, my_cols_treatment) { my_cs <- my_cols_treatment my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) names(my_cs) <- c("H", "A") data <- models[[x]]$data s_dat <- data.frame(y = data$ln_av_C_ug_L, y_rep = colMeans(brms::posterior_epred(models[[x]])), group = data$treatment, stringsAsFactors = FALSE) %>% dplyr::mutate(cols = my_cs[group]) my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } a <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[x], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") list(data = s_dat, plot = a) }, models = hierarchical_biomass, my_cols_treatment) a <- both[[1]]$plot + layer(data = both[[1]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 10, ymax = 10.6, xmin = 6.2, xmax = 7)) b <- both[[2]]$plot + layer(data = both[[2]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 4.5, ymax = 6.5, xmin = -1.3, xmax = 0.2)) gridExtra::grid.arrange(a, b, ncol = 2) } make_sp_fig_8 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_8(...), device = "pdf", width = 7, height = 7, units = "in", onefile = FALSE) } sp_fig_8 <- function(community_data_2012, community_data_2016, size_data_2012, size_data_2016, my_cols_treatment) { sp_fig_8_bxpl <- function(data, my_cols_treatment, title) { cols <- rev(my_cols_treatment) ggplot(data = data, mapping = aes(y = log_av_C_ug, x = treatment)) + geom_boxplot(mapping = aes(colour = treatment), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 0.7, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = treatment, shape = treatment), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(24, 25)) + labs(x = "Treatment", y = expression(log[10]~Organism~mass~(µg~C))) + theme_classic() + theme(plot.title = element_text(face = "bold")) + ggtitle(title) + scale_x_discrete(labels = c("Ambient", "Warmed")) } sp_fig_8_nmds <- function(data, my_cols_treatment, tlab) { treat_rest <- data$treatment data <- data %>% dplyr::select(-pond, -treatment) bc_mds <- vegan::metaMDS(data, distance = "bray", trace = FALSE, autotransform = FALSE, k = 2) ponds <- data.frame(bc_mds$points) %>% dplyr::mutate(treat = treat_rest, shape = ifelse(treat == "H", 25, 24)) species <- data.frame(bc_mds$species) %>% dplyr::arrange(MDS1) %>% tidyr::drop_na() spp_h <- head(species, 4) spp_a <- tail(species, 4) spp <- rbind(spp_h, spp_a) %>% dplyr::mutate(treat = rep(c("H", "A"), each = 4), shape = rep(c(25, 24), each = 4)) treat_a <- ponds[ponds$treat == "A", ][chull(ponds[ponds$treat == "A", c("MDS1", "MDS2")]), ] treat_h <- ponds[ponds$treat == "H", ][chull(ponds[ponds$treat == "H", c("MDS1", "MDS2")]), ] hull_data <- rbind(treat_a, treat_h) ggplot() + geom_point(data = ponds, mapping = aes(x = MDS1, y = MDS2, fill = treat), size = 3, shape = ponds$shape) + geom_point(data = species, mapping = aes(x = MDS1, y = MDS2, alpha = 0.001), size = 1.5) + geom_point(data = spp, mapping = aes(x = MDS1, y = MDS2, alpha = 0.001), size = 5, shape = spp$shape) + geom_polygon(data = hull_data, mapping = aes(x = MDS1, y = MDS2, fill = treat, group = treat), alpha = 0.30) + scale_colour_manual(values = rev(my_cols_treatment)) + scale_fill_manual(values = rev(my_cols_treatment)) + scale_shape_manual(values = c(21, 21)) + labs(x = "NMDS1", y = "NMDS2") + theme_classic() + ggtitle(tlab) + theme(legend.position = "none", plot.title = element_text(face = "bold")) } nmds_2012 <- sp_fig_8_nmds(community_data_2012, my_cols_treatment, "a (2012)") + geom_text(mapping = aes(x = -1, y = -1, label = "Stress = 0.12", size = 5), hjust = 0) + scale_y_continuous(limits = c(-1, 1), breaks = c(-1, -0.5, 0, 0.5, 1)) nmds_2016 <- sp_fig_8_nmds(community_data_2016, my_cols_treatment, "b (2016)") + geom_text(mapping = aes(x = -1.1, y = -1.8, label = "Stress = 0.05", size = 5), hjust = 0) + scale_y_continuous(limits = c(-1.8, 2), breaks = c(-1.8, -0.9, 0, 0.9, 1.8)) bxpl_2012 <- sp_fig_8_bxpl(size_data_2012, my_cols_treatment, "c (2012)") + scale_y_continuous(limits = c(-4.8, -2.4), breaks = c(-4.8, -3.8, -2.8)) bxpl_2016 <- sp_fig_8_bxpl(size_data_2016, my_cols_treatment, "d (2016)") + scale_y_continuous(limits = c(-4.8, -3.5), breaks = c(-4.8, -4.2, -3.8)) gridExtra::grid.arrange(nmds_2012, nmds_2016, bxpl_2012, bxpl_2016, ncol = 2) } make_sp_fig_9 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_9(...), device = "pdf", width = 6.3, height = 3.76, units = "in", onefile = FALSE) } sp_fig_9 <- function(data, my_cols_treatment) { data <- data %>% dplyr::mutate(shape = ifelse(treat == "H", 25, 24)) ggplot(data = data) + geom_line(mapping = aes(x = doy, y = ratio_fits, colour = treat, linetype = treat)) + scale_colour_manual(values = rev(my_cols_treatment)) + scale_fill_manual(values = rev(my_cols_treatment)) + geom_point(mapping = aes(x = doy, y = ER.GPP, colour = treat, fill = treat), alpha = 0.3, shape = data$shape) + cowplot::theme_cowplot(font_size = 12) + theme(legend.position = "none") + facet_wrap(~ year, scales = "fixed") + theme(strip.background = element_blank()) + xlab("Day of the Year") + ylab(expression(R[eco] / GPP)) } make_sp_fig_10 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_10(...), device = "pdf", width = 5.82, height = 2.95, units = "in", onefile = FALSE) } sp_fig_10 <- function(pp_means, phy_png, zoo_png, my_cols_treatment) { make_curve <- function(days, ln_ka, logit_ke, logit_phi) { ka <- exp(ln_ka) ke <- 1 / (1 + exp(-logit_ke)) phi <- (1 / ke) / (1 + exp(-logit_phi)) calc_eqn_1(days, ka, ke, phi) } curve_df <- function(data, days) { c_amb <- make_curve(days, data$ln_ka_amb, data$logit_ke_amb, data$logit_phi_amb) c_war <- make_curve(days, data$ln_ka_war, data$logit_ke_war, data$logit_phi_war) # cap samples that yield predictions # that are within the reasonable range of # data if (max(c_amb) < 1 && max(c_war) < 1) { data.frame(sample = data$sample, days = days, values = c(c_amb, c_war), treatment = rep(c("amb", "war"), each = length(c_amb))) } } make_prior_lines <- function(ln_ka_amb, ln_ka_war, logit_ke_amb, logit_ke_war, logit_phi_amb, logit_phi_war, sample, iter) { plot_data <- data.frame(iter = seq_len(iter)) %>% dplyr::mutate(ln_ka_amb = rnorm(iter, ln_ka_amb, 1), logit_ke_amb = rnorm(iter, logit_ke_amb, 1), logit_phi_amb = rnorm(iter, logit_phi_amb, 1), ln_ka_war = rnorm(iter, ln_ka_war, 1), logit_ke_war = rnorm(iter, logit_ke_war, 1), logit_phi_war = rnorm(iter, logit_phi_war, 1), sample = sample) days <- seq(0.01, 60, length.out = 60) plyr::ddply(plot_data, .(iter), curve_df, days) } prior_bkg <- data.frame(sample = c("phytoplankton", "zooplankton")) %>% dplyr::mutate(ln_ka_amb = c(0, 0), ln_ka_war = c(0, 0), logit_ke_amb = c(0, 0), logit_ke_war = c(0, 0), logit_phi_amb = c(0, 0), logit_phi_war = c(0, 0)) set.seed(1) prior_lines <- plyr::ddply(prior_bkg, .(sample), function(data) { make_prior_lines(data$ln_ka_amb, data$ln_ka_war, data$logit_ke_amb, data$logit_ke_war, data$logit_phi_amb, data$logit_phi_war, data$sample, iter = 1000) }) chosen <- dplyr::distinct(prior_lines, iter, sample) %>% dplyr::group_by(sample) %>% dplyr::summarise(chosen = sample(iter, 300)) %>% data.frame() prior_lines <- prior_lines %>% dplyr::group_by(sample) %>% dplyr::filter(iter %in% chosen$chosen[chosen$sample %in% unique(sample)]) %>% data.frame() prior_means <- plyr::ddply(prior_bkg, .(sample), curve_df, days = seq(0.01, 60, length.out = 60)) %>% dplyr::filter(treatment == "amb") %>% dplyr::select(sample, days, values) linesd <- pp_means$lines %>% dplyr::mutate(col = ifelse(treatment == "amb", my_cols_treatment[2], my_cols_treatment[1])) tp <- linesd %>% dplyr::distinct(sample, treatment) %>% dplyr::mutate(x = 1) ggplot() + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_line(data = prior_lines, mapping = aes(x = days, y = values, group = paste(iter, treatment)), colour = "grey40", lty = 1, size = 0.5, alpha = 0.07, show.legend = FALSE) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment, linetype = treatment), col = "black", size = 1, show.legend = FALSE) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment, linetype = treatment, colour = treatment), size = 1) + scale_colour_manual(labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_linetype_manual(labels = c("Ambient", "Warmed"), values = c(1, 3)) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.4, 0.1, 0.1, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 14), legend.position = c(0.5, 1.2), legend.direction = "horizontal", legend.background = element_blank(), legend.text = element_text(size = 12), legend.title = element_blank()) + ylab(substitute("Excess "^15 * "N"["%"] * ", " * chi)) + xlab("Time (days)") + layer(data = linesd %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.55, ymax = 0.75, xmin = 50, xmax = 60)) + layer(data = linesd %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.45, ymax = 0.65, xmin = 50, xmax = 63)) } make_sp_fig_11_13 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, supp_resid_figure(...), device = "pdf", width = 10.5, height = 7.86, units = "in", onefile = FALSE) } supp_resid_figure <- function(data, pred_tag, my_cols_group, lab) { names(my_cols_group) <- c("phytoplankton", "zooplankton") data <- data %>% dplyr::filter(predictor == pred_tag) ggplot(data = data) + geom_polygon(mapping = aes(x = x_conf, y = y_conf, fill = sample), alpha = 0.5, show.legend = FALSE) + geom_line(mapping = aes(x = x, y = y, linetype = sample), colour = "black", show.legend = FALSE) + geom_point(mapping = aes(x = raw_x, y = raw_y, shape = sample, fill = sample), show.legend = FALSE, size = 2) + labs(x = lab, y = "Model residuals") + scale_shape_manual(values = c("phytoplankton" = 21, "zooplankton" = 22)) + scale_fill_manual(values = my_cols_group) + theme_bw() + facet_wrap(~ pond, scales = "free", dir = "v") + theme(axis.title = element_text(size = 15)) } make_resid_plot_data <- function(resid_brm_model, data) { plot_data <- plyr::ldply(resid_brm_model, function(model) { df <- model$data %>% tidyr::pivot_longer(cols = c(NO2_uM, NO3_uM, NH3_uM), names_to = "predictor", values_to = "value") plyr::ddply(df, .(pond), function(df_i, ...) { means <- tapply(df_i$value, df_i$predictor, mean) plyr::ddply(df_i, .(predictor), function(dd, model, means) { pred_name <- unique(dd$predictor) means <- means[names(means) != pred_name] nd <- data.frame(x = seq(min(dd$value), max(dd$value), length.out = 50), pond = unique(dd$pond), pred_1 = means[[1]], pred_2 = means[[2]]) names(nd) <- c(pred_name, "pond", names(means)) fits <- fitted(model, newdata = nd) %>% cbind(nd) fits %>% { data.frame(y = NA, x = NA, x_conf = c(.[[pred_name]], rev(.[[pred_name]])), y_conf = c(.$Q2.5, rev(.$Q97.5)), raw_x = NA, raw_y = NA) } %>% rbind(fits %>% { data.frame(y = .$Estimate, x = .[[pred_name]], x_conf = NA, y_conf = NA, raw_x = NA, raw_y = NA) }) %>% rbind(dd %>% { data.frame(y = NA, x = NA, x_conf = NA, y_conf = NA, raw_x = .$value, raw_y = .$residual_15N_perc) }) }, means = means, ...) }, model = model) }) plot_data$treatment <- data$treatment[match(plot_data$pond, data$pond)] plot_data$pond <- paste0("Pond = ", formatC(plot_data$pond, width = 2, format = "d", flag = "0"), "; Treatment = ", ifelse(plot_data$treatment == "H", "W", plot_data$treatment)) plot_data }
/R/figures.R
permissive
dbarneche/nature20200508666
R
false
false
71,886
r
###################### # AUXILLIARY FUNCTIONS ###################### generate_png <- function(origin_file_name, output_file_name) { system(paste0("sips -s formatOptions best -s format png ", origin_file_name, " --out ", output_file_name)) file.info(output_file_name) } trim_png_read <- function(file_path) { png_file <- png::readPNG(file_path) trim_png_blanks(png_file) } are_all_one <- function(x) { all(x == 1) } trim_png_blanks <- function(png_mat) { chosen_rows <- !apply(png_mat[, , 1], 1, are_all_one) chosen_cols <- !apply(png_mat[, , 1], 2, are_all_one) png_mat[chosen_rows, chosen_cols, ] } make_grob_png <- function(...) { grid::rasterGrob(trim_png_read(...), interpolate = TRUE) } # from stackoverflow # question 22488563 # ggplot2-annotate-layer-position-in-r annotate_textp <- function(label, x, y, facets = NULL, hjust = 0, vjust = 0, color = "black", alpha = NA, family = thm$text$family, size = thm$text$size, fontface = 1, line_height = 1.0, box_just = ifelse(c(x, y) < 0.5, 0, 1), margin = grid::unit(size / 2, "pt"), thm = theme_get()) { x <- scales::squish_infinite(x) y <- scales::squish_infinite(y) tg <- grid::textGrob(label, x = 0, y = 0, hjust = hjust, vjust = vjust, gp = grid::gpar(col = alpha(color, alpha), fontsize = size, fontfamily = family, fontface = fontface, lineheight = line_height)) ts <- grid::unit.c(grid::grobWidth(tg), grid::grobHeight(tg)) vp <- grid::viewport(x = x, y = y, width = ts[1], height = ts[2], just = box_just) tg <- grid::editGrob(tg, x = ts[1] * hjust, y = ts[2] * vjust, vp = vp) unt <- grid::unit(1, "npc") - margin * 2 inr <- grid::grobTree(tg, vp = grid::viewport(width = unt, height = unt)) layer(data = NULL, stat = StatIdentity, position = PositionIdentity, geom = GeomCustomAnn, inherit.aes = TRUE, params = list(grob = grid::grobTree(inr), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf)) } clip_vec_hdi <- function(data, probs = c(0.005, 0.995)) { lim_x <- quantile_hdi(data$param, probs = probs) data %>% dplyr::filter(param >= lim_x[[1]] & param <= lim_x[[2]]) } empty_panel_w_txt <- function(color, label) { grid::grobTree(grid::rectGrob(width = grid::unit(0.915, "npc"), height = grid::unit(0.9, "npc"), just = 0.45, gp = grid::gpar(fill = color)), grid::textGrob(label, gp = grid::gpar(fontsize = 15, col = "black"), hjust = 0.4)) } sim_pp_mean <- function(data, model, type) { posteriors <- as.data.frame(model) data <- dplyr::filter(data, sample == tidyselect::all_of(type)) y_rep <- matrix(0, nrow(posteriors), nrow(data)) for (j in seq_len(ncol(y_rep))) { target_phi <- paste0("r_phi[", data$pond[j], "]") target_ka <- paste0("r_ka[", data$pond[j], "]") target_ke <- paste0("r_ke[", data$pond[j], "]") phi <- posteriors[, target_phi] ka <- posteriors[, target_ka] ke <- posteriors[, target_ke] sg <- posteriors[, "sigma"] mu <- calc_eqn_1(data$day[j], ka, ke, phi) y_rep[, j] <- rnorm(rep(1, nrow(y_rep)), mu, sg) } y_rep } ############### # PAPER FIGURES ############### make_fig_1 <- function(dest, fig_out_folder, ...) { pdf(dest, width = 5.819004, height = 2.950226) p <- fig_1(...) print(p) grid::grid.text("a", x = grid::unit(0.11, "npc"), y = grid::unit(0.82, "npc"), gp = grid::gpar("fontsize" = 13, "fontface" = "bold")) grid::grid.text("b", x = grid::unit(0.58, "npc"), y = grid::unit(0.82, "npc"), gp = grid::gpar("fontsize" = 13, "fontface" = "bold")) dev.off() } fig_1 <- function(post_pred_means, my_cols_treatment, phy_png, zoo_png) { polygons <- post_pred_means$polygons linesd <- post_pred_means$lines tp <- polygons %>% dplyr::distinct(sample, treatment) %>% dplyr::mutate(x = 1) ggplot(data = polygons, mapping = aes(x = x, y = y_cred)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_polygon(data = polygons, mapping = aes(x = x, y = y_cred, fill = treatment)) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment), col = "black", lty = 1, size = 0.5) + geom_polygon(data = polygons, mapping = aes(x = x, y = y_cred, fill = treatment), alpha = 0.7, inherit.aes = FALSE) + scale_fill_manual(labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.4, 0.1, 0.1, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 14), legend.position = c(0.5, 1.2), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab(substitute("Excess "^15 * "N"["%"] * ", " * chi)) + xlab("Time (days)") + layer(data = polygons %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.5, ymax = 0.65, xmin = 43, xmax = 63)) + layer(data = polygons %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.3, ymax = 0.45, xmin = 45, xmax = 65)) } make_fig_2 <- function(dest, fig_out_folder, ...) { p <- fig_2(...) pdf(dest, width = 5.737556, height = 6.950226) print(p) grid.text(substitute(kappa["a"] * " (d"^-1 * ")"), x = grid::unit(0.56, "npc"), y = grid::unit(0.775, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute(kappa["e"] * " (d"^-1 * ")"), x = grid::unit(0.56, "npc"), y = grid::unit(0.54, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute(italic(phi) * " (% d)"), x = grid::unit(0.565, "npc"), y = grid::unit(0.31, "npc"), gp = grid::gpar("fontsize" = 10)) grid.text(substitute("Efficiency of N transfer (" * bar(epsilon) * ")"), x = grid::unit(0.54, "npc"), y = grid::unit(0.075, "npc"), gp = grid::gpar("fontsize" = 10)) # legends grid.text("a", x = grid::unit(0.13, "npc"), y = grid::unit(0.985, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("b", x = grid::unit(0.13, "npc"), y = grid::unit(0.76, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("c", x = grid::unit(0.13, "npc"), y = grid::unit(0.525, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) grid.text("d", x = grid::unit(0.13, "npc"), y = grid::unit(0.295, "npc"), gp = grid::gpar("fontsize" = 11, "fontface" = "bold")) dev.off() } fig_2 <- function(data_eff, data_ka, data_ke, data_phi, my_cols_treatment, phy_png, zoo_png) { nreps <- nrow(data_eff) plot_data <- do.call("rbind.data.frame", list(data_ka, data_ke, data_phi, data_eff)) %>% dplyr::mutate(name = rep(c("a_kas", "b_kes", "c_phi", "d_eff"), each = nreps)) %>% plyr::ddply(.(name, sample, treatment), clip_vec_hdi) tp <- plot_data %>% dplyr::distinct(sample, name) %>% dplyr::mutate(param = 1) ggplot(data = plot_data, mapping = aes(x = param)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_density(data = plot_data, adjust = 2, trim = TRUE, mapping = aes(x = param, fill = treatment), colour = NA) + geom_density(adjust = 2, alpha = 0.7, trim = TRUE, mapping = aes(fill = treatment, colour = treatment), show.legend = FALSE) + scale_fill_manual(values = rev(my_cols_treatment), labels = c("Ambient", "Warmed")) + facet_wrap(name~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(-0.3, 0.1, 0.6, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.y = grid::unit(-1.2, "lines"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.x = element_blank(), axis.title.y = element_text(size = 14), legend.position = c(0.5, -0.1), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab("Posterior density (99% C.I.)\n") + scale_color_manual(values = rev(my_cols_treatment)) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + coord_cartesian(clip = "off") + layer(data = plot_data %>% dplyr::filter(sample == "phytoplankton" & name == "a_kas"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 2.2, ymax = 3, xmin = 1, xmax = 1.25)) + layer(data = plot_data %>% dplyr::filter(sample == "zooplankton" & name == "a_kas"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 6, ymax = 10.5, xmin = 0.62, xmax = 0.74)) } make_fig_3 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, fig_3(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } fig_3 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) data <- data %>% dplyr::group_by(sample, pond, treatment) %>% dplyr::summarise(av_C_ug_L = mean(av_C_ug_L), ln_av_C_ug_L = log(av_C_ug_L)) a <- ggplot(data = data, mapping = aes(y = ln_av_C_ug_L, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 1.8, ymax = 1.8 + (10.7 - 1.8) / 2), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 1.8 + (10.7 - 1.8) / 2, ymax = 10.7), fill = "#CCECE6") + geom_boxplot(mapping = aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = substitute("Total Biomass (" * mu * "g C L"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) + scale_y_continuous(limits = c(1.8, 10.7), expand = c(0, 0), breaks = c(log(8), log(50), log(400), log(3e3), log(22e3)), labels = c(8, 50, 400, 3e3, 22e3)) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = 6.6, ymax = 7.4, xmin = 0.3, xmax = 1.3)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = 4.5, ymax = 6, xmin = 2, xmax = 2.5)) } ####################### # EXTENDED DATA FIGURES ####################### make_internal_ed_fig_1d <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, internal_ed_fig_1d(...), device = "pdf", width = 8.5, height = 3.5, units = "in", onefile = FALSE) } internal_ed_fig_1d <- function(physchem_data, my_cols_treatment) { make_bef_aft_data <- function(data, vrb) { data$y_var <- data[[vrb]] data <- data %>% dplyr::select(Date, pairs, treatment, pond, y_var) %>% tidyr::drop_na() bef <- data %>% dplyr::filter(Date < as.Date("2013-07-16")) %>% dplyr::group_by(pairs, treatment, pond) %>% dplyr::summarise(mean = mean(y_var)) %>% dplyr::mutate(period = "before") %>% data.frame() aft <- data %>% dplyr::filter(Date > as.Date("2013-07-16") & Date < as.Date("2013-07-20")) %>% dplyr::group_by(pairs, treatment, pond) %>% dplyr::summarise(mean = mean(y_var)) %>% dplyr::mutate(period = "after") %>% data.frame() rbind(bef, aft) } plot_bef_aft <- function(data, vrb, ylab, my_cols_treatment) { data$vrb <- data[[vrb]] ggplot(data = data, mapping = aes(y = vrb, x = period, fill = treatment, shape = treatment)) + geom_boxplot(colour = "black", show.legend = FALSE, width = 0.6, size = 0.3, outlier.shape = NA) + geom_point(position = position_dodge(0.4), size = 2.5) + labs(x = "Treatment", y = ylab) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(labels = c("Before", "After")) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) } pdosat <- make_bef_aft_data(physchem_data, "DOsat") %>% plot_bef_aft("mean", "DO saturation (%)", my_cols_treatment) + scale_y_continuous(trans = "log10") + theme(legend.position = "none") pph <- make_bef_aft_data(physchem_data, "pH") %>% plot_bef_aft("mean", "pH", my_cols_treatment) + theme(legend.position = "none") gridExtra::grid.arrange(pdosat, pph, ncol = 2) } make_ed_fig_2 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_2(...), device = "pdf", width = 12, height = 4, units = "in", onefile = FALSE) } ed_fig_2 <- function(data, my_cols_treatment) { my_theme <- function(leg = FALSE) { theme_bw() + theme(axis.title.y = element_text(size = 14, margin = margin(t = 0, r = 10, b = 0, l = 10)), axis.title.x = element_text(size = 14, margin = margin(t = 10, r = 0, b = 10, l = 0)), axis.text.x = element_text(size = 12, margin = margin(t = 4, r = 0, b = 0, l = 0)), axis.text.y = element_text(size = 12, margin = margin(t = 0, r = 4, b = 0, l = 0)), axis.ticks.length = grid::unit(5, "pt"), strip.background = element_blank(), legend.position = if (leg) c(0.8, 0.8) else "none") } unique_ggplot <- function(data, vrb, my_cols_treatment, my_ylab, leg = FALSE) { data <- data %>% dplyr::mutate(sample_date = as.Date(sample_date, format = "%Y-%m-%d")) %>% dplyr::select(vrb = tidyselect::all_of(vrb), "sample_date", "treatment") %>% tidyr::drop_na() %>% dplyr::group_by(sample_date, treatment) %>% dplyr::summarise(mean = mean(vrb), sd = sd(vrb), se = sd / sqrt(n()), conf = se * 1.96) ggplot(data = data, mapping = aes(x = sample_date, y = mean, fill = treatment, shape = treatment)) + geom_errorbar(mapping = aes(ymax = mean + conf, ymin = ifelse(mean - conf < 0, 0, mean - conf)), width = 0.2, color = "black", lwd = 0.3, position = position_dodge(width = 1)) + geom_point(size = 2, position = position_dodge(width = 1)) + geom_vline(mapping = aes(xintercept = as.Date("2013-07-16")), color = "black", size = 0.5, lty = 2) + scale_fill_manual(values = my_cols_treatment, labels = c("Ambient", "Control", "Warmed"), name = "Treatment") + scale_shape_manual(values = c(24, 23, 25), labels = c("Ambient", "Control", "Warmed"), name = "Treatment") + ylab(my_ylab) + xlab("Sampling date (2013)") + my_theme(leg) } my_cols_treatment <- c(my_cols_treatment, "white") names(my_cols_treatment) <- c("H", "A", "C") no2 <- unique_ggplot(data, "NO2_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NO2-"))), leg = TRUE) no3 <- unique_ggplot(data, "NO3_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NO3-")))) nh4 <- unique_ggplot(data, "NH3_uM", my_cols_treatment, substitute(mu * "mol " * a * " L"^-1, list(a = chemf("NH4+")))) gridExtra::grid.arrange(no2, no3, nh4, ncol = 3) } make_ed_fig_3 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_3(...), device = "pdf", width = 7, height = 6, units = "in", onefile = FALSE) } ed_fig_3 <- function(co2_data, my_cols_treatment) { ylims <- co2_data %>% dplyr::group_by(treatment, period) %>% dplyr::summarise(lims = boxplot.stats(influx)$stats[c(1, 5)]) %>% data.frame() ylims <- range(ylims$lims) ################### ## OULIERS EXCLUDED ################### ggplot(data = co2_data, mapping = aes(y = influx, x = period, fill = treatment, shape = treatment)) + geom_boxplot(colour = "black", show.legend = FALSE, width = 0.6, size = 0.3, outlier.shape = NA) + geom_point(position = position_jitterdodge(dodge.width = 0.6, jitter.width = 0.1), size = 2.5) + labs(x = "Period", y = substitute("Daytime CO"[2] * " influx (" * mu * "mol m"^-2 * " d"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 20, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 20, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 17, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 17), panel.grid = element_blank(), legend.position = c(0.85, 0.12), legend.background = element_blank(), legend.title = element_text(size = 20), legend.text = element_text(size = 16)) + scale_x_discrete(labels = c("Before", "After"), limits = rev(sort(unique(co2_data$period)))) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) + coord_cartesian(ylim = ylims * 1.03) } make_ed_fig_5 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_5(...), device = "pdf", width = 10.5, height = 7.86, units = "in", onefile = FALSE) } ed_fig_5 <- function(data, posterior_predictions, my_cols_treatment, my_cols_group) { empty_void <- function() { ggplot() + theme_void() } ambient_panel <- empty_panel_w_txt(color = my_cols_treatment[2], label = "Ambient") warmed_panel <- empty_panel_w_txt(color = my_cols_treatment[1], label = "Warmed") ponds <- posterior_predictions$ponds preds <- posterior_predictions$preds out <- vector(mode = "list", length = nrow(ponds) + 4) out[[1]] <- ambient_panel out[[2]] <- warmed_panel out[[19]] <- empty_void() out[[20]] <- empty_void() for (i in ponds$pond) { grob1 <- grid::grobTree(grid::textGrob(paste0("Pond #", ponds$pond[i]), x = 0.95, y = 0.9, hjust = 1, gp = grid::gpar(col = "grey30", fontsize = 10))) p <- ggplot(data = preds[[i]], mapping = aes(x = x, y = y_conf, fill = sample)) + geom_polygon(alpha = 0.2) + labs(x = "", y = "") + scale_color_manual(values = c(my_cols_group[1], my_cols_group[2]), aesthetics = "fill") + theme_bw() + theme(plot.margin = margin(-0.2, 0, 0, 0, "in"), axis.text.y = element_text(size = 9), legend.position = "none") + geom_line(mapping = aes(x = x, y = y_mean, colour = sample), data = preds[[i]][complete.cases(preds[[i]]), ], linetype = 2, size = 0.6, inherit.aes = FALSE) + scale_color_manual(values = c("seagreen4", my_cols_group[2]), aesthetics = "colour") + geom_point(mapping = aes(x = day, y = add15N_perc, shape = sample, fill = sample), size = 2, data = data[data$pond == ponds$pond[i], ], inherit.aes = FALSE) + scale_shape_manual(values = c(21, 22)) + scale_fill_manual(values = c(my_cols_group[1], my_cols_group[2])) + annotation_custom(grob1) if (i %in% seq(4, 16, 4)) { out[[i + 2]] <- p + theme(axis.text.x = element_text(size = 9)) } else { out[[i + 2]] <- p + theme(axis.text.x = element_text(size = 9)) + scale_x_continuous(labels = rep("", 4), breaks = seq(0, 60, 20)) } if (i == 8) { out[[i + 2]] <- out[[i + 2]] + scale_y_continuous(labels = seq(0, 0.2, 0.1), breaks = seq(0, 0.2, 0.1)) } } lay_mat <- rbind(matrix(c(1, 1, 2, 2), 1, 4, byrow = TRUE), matrix(NA, 1, 4, byrow = TRUE), matrix(rep(3:18, each = 4), 16, 4), matrix(c(19, 19, 20, 20), 1, 4, byrow = TRUE)) x <- gridExtra::arrangeGrob(grobs = out, layout_matrix = lay_mat) ggpubr::annotate_figure(x, bottom = ggpubr::text_grob("Time (days)", hjust = 0.5, vjust = -1, size = 25, lineheight = grid::unit(2, "in")), left = ggpubr::text_grob(substitute("Excess "^15 * "N"["%"] * ", " * chi), hjust = 0.25, vjust = 0.5, size = 25, rot = 90)) + annotation_custom(grid::grid.points(x = grid::unit(1.4, "in"), y = grid::unit(0.7, "in"), size = grid::unit(0.15, "in"), pch = 21, gp = grid::gpar(col = "black", fill = my_cols_group[1]))) + annotation_custom(grid::grid.text("Phytoplankton", x = grid::unit(1.5, "in"), y = grid::unit(0.7, "in"), hjust = 0, vjust = 0.5, gp = grid::gpar(cex = 1.2))) + annotation_custom(grid::grid.points(x = grid::unit(1.4, "in"), y = grid::unit(0.4, "in"), size = grid::unit(0.15, "in"), pch = 22, gp = grid::gpar(col = "black", fill = my_cols_group[2]))) + annotation_custom(grid::grid.text("Zooplankton", x = grid::unit(1.5, "in"), y = grid::unit(0.4, "in"), hjust = 0, vjust = 0.5, gp = grid::gpar(cex = 1.2))) } make_ed_fig_6 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_6(...), device = "pdf", width = 8.2, height = 3.8, units = "in", onefile = FALSE) } ed_fig_6 <- function(post_effs, diff_biomass, phy_png, zoo_png) { diff_effs <- tapply(post_effs$param, list(post_effs$iter, post_effs$sample), function(x) 1 - (x[2] / x[1])) %>% as.data.frame %>% dplyr::mutate(iter = seq_len(nrow(.))) %>% tidyr::pivot_longer(c(phytoplankton, zooplankton), names_to = "sample", values_to = "diff") %>% dplyr::mutate(variable = "efficiency") %>% dplyr::arrange(sample, iter) %>% dplyr::select(sample, iter, diff, variable) %>% as.data.frame plot_data <- rbind(diff_effs, diff_biomass) %>% dplyr::rename(param = diff) %>% dplyr::mutate(param = param * 1e2) %>% plyr::ddply(c("sample", "variable"), clip_vec_hdi) tp <- plot_data %>% dplyr::distinct(sample) %>% dplyr::mutate(param = 1) ggplot(data = plot_data, mapping = aes(x = param)) + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_density(data = plot_data, adjust = 2, trim = TRUE, mapping = aes(x = param, fill = variable), colour = NA) + geom_density(adjust = 2, alpha = 0.7, trim = TRUE, mapping = aes(fill = variable, colour = variable), show.legend = FALSE) + scale_fill_manual(values = c("black", "grey60"), labels = c("Biomass", "Efficiency")) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.3, 0.1, 0.3, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.x = element_text(size = 14, margin = grid::unit(c(0.15, 0, 0, 0), "in")), axis.title.y = element_text(size = 14), legend.position = c(0.5, 1.1), legend.direction = "horizontal", legend.text = element_text(size = 12), legend.title = element_blank()) + ylab("Posterior density (99% C.I.)\n") + xlab("Percentage decline relative to ambient") + scale_color_manual(values = c("black", "grey60")) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + coord_cartesian(clip = "off") + layer(data = plot_data %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.028, ymax = 0.033, xmin = -10, xmax = 10)) + layer(data = plot_data %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.017, ymax = 0.024, xmin = -40, xmax = -15)) } make_ed_fig_7 <- function(dest, fig_out_folder, ...) { pdf(dest, width = 9, height = 2.8) ed_fig_7(...) dev.off() } ed_fig_7 <- function(control_data, my_cols_group) { ponds <- unique(control_data$pond) par(mfrow = c(1, 3), omi = c(0, 0.5, 0, 0), mai = c(1.02, 0.72, 0.2, 0.1), cex = 1, cex.lab = 1.2, cex.axis = 0.8, cex.main = 1.2, xpd = NA) for (i in seq_along(ponds)) { x <- control_data %>% dplyr::filter(pond == ponds[i]) if (i == 1) { ylab <- substitute("Excess "^15 * "N"["%"] * ", " * chi) } else { ylab <- "" } plot(NA, xlim = c(0, 60), ylim = c(0, 0.1), xlab = ifelse(i == 2, "Time (days)", ""), ylab = ylab, las = 1) x %>% dplyr::filter(sample == "phytoplankton") %>% { points(.$day, .$add15N_perc, pch = 21, bg = my_cols_group[1], cex = 1.3) } x %>% dplyr::filter(sample == "zooplankton") %>% { points(.$day, .$add15N_perc, pch = 22, bg = my_cols_group[2], cex = 1.3) } LoLinR::proportionalLabel(0.95, 0.9, paste0("Control pond ", i), adj = c(1, 0.5), cex = 0.9) } } make_ed_fig_8 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_8(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } ed_fig_8 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) data <- data %>% dplyr::group_by(sample, pond, treatment) %>% dplyr::summarise(av_N_ug_L = mean(av_N_ug_L), ln_av_N_ug_L = log(av_N_ug_L)) a <- ggplot(data = data, mapping = aes(y = ln_av_N_ug_L, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 0.5, ymax = 0.5 + (8.5 - 0.5) / 2), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 0.5 + (8.5 - 0.5) / 2, ymax = 8.5), fill = "#CCECE6") + geom_boxplot(aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = substitute("Total Biomass (" * mu * "g N L"^-1 * ")")) + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) + scale_y_continuous(limits = c(0.5, 8.5), expand = c(0, 0), breaks = c(log(8), log(50), log(400), log(3e3)), labels = c(8, 50, 400, 3e3)) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = 7.4, ymax = 8.2, xmin = 0.3, xmax = 1.3)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = 3, ymax = 4.3, xmin = 2, xmax = 2.5)) } make_ed_fig_9 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, ed_fig_9(...), device = "pdf", width = 3.87, height = 5.46, units = "in", onefile = FALSE) } ed_fig_9 <- function(data, phy_png, zoo_png, my_cols_treatment) { cols <- rep(c(my_cols_treatment[2], my_cols_treatment[1]), 2) a <- ggplot(data = data, mapping = aes(y = mean, x = treatment)) + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 3, ymax = 7.5), fill = "#F6F1EB") + geom_rect(mapping = aes(xmin = -Inf, xmax = Inf, ymin = 7.5, ymax = 25), fill = "#CCECE6") + geom_boxplot(mapping = aes(colour = interaction(treatment, sample)), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 1, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = interaction(treatment, sample), shape = sample), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(21, 22)) + labs(x = "Treatment", y = "C:N ratio") + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10), panel.grid = element_blank()) + scale_y_continuous(trans = "log10", limits = c(3, 25), expand = c(0, 0)) + scale_x_discrete(expand = c(1, -0.5), labels = c("Ambient", "Warmed")) a$coordinates$clip <- "off" a + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = phy_png, ymin = log10(20), ymax = log10(24), xmin = 2, xmax = 2.5)) + layer(data = data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = TRUE, params = list(grob = zoo_png, ymin = log10(3.2), ymax = log10(4.8), xmin = 0.5, xmax = 1)) } ####################### # SUPPLEMENTARY FIGURES ####################### make_sp_fig_inorganic <- function(dest, fig_out_folder, model_list, vrb, ...) { mod <- model_list[[vrb]] ggplot2::ggsave(dest, make_ba_plots(mod, ...), device = "pdf", width = 14, height = 7, units = "in", onefile = FALSE) } make_sp_fig_4 <- function(dest, fig_out_folder, mod, ...) { names(mod$data)[1] <- "y" ggplot2::ggsave(dest, make_ba_plots(mod, ...), device = "pdf", width = 14, height = 7, units = "in", onefile = FALSE) } make_ba_plots <- function(mod, x_lab, my_cols_treatment) { a <- mod$data %>% dplyr::select(period, treatment) %>% dplyr::distinct() %>% tidybayes::add_fitted_draws(mod, dpar = c("mu", "sigma"), re_formula = ~ 0) %>% tidybayes::sample_draws(200) %>% ggplot(aes(y = interaction(period, treatment))) + ggdist::stat_dist_slab(mapping = aes(dist = "norm", arg1 = mu, arg2 = sigma), slab_color = "gray65", alpha = 5e-2, fill = NA, size = 0.2) + geom_point(data = mod$data, inherit.aes = FALSE, mapping = aes(x = y, y = interaction(period, treatment), fill = treatment, colour = treatment, shape = treatment), size = 3, alpha = 0.3) + labs(x = x_lab, y = "") + theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.3, 0.2, 0.4), "in"), axis.title.x = element_text(size = 20, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 20, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 17, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 17), panel.grid = element_blank(), legend.position = c(0.85, 0.9), legend.background = element_blank(), legend.title = element_text(size = 20), legend.text = element_text(size = 16)) + scale_y_discrete(labels = c("After", "Before", "After", "Before")) + scale_colour_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_fill_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_shape_manual(name = "Treatment", labels = c("Ambient", "Warmed"), values = c(24, 25)) b <- plot(mod, N = 6, plot = FALSE) gridExtra::grid.arrange(a, b[[1]], ncol = 2) } make_sp_fig_5 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_5(...), device = "pdf", width = 10.2, height = 4.8, units = "in", onefile = FALSE) } sp_fig_5 <- function(cn_ratios, phy_png, zoo_png, my_cols_treatment) { targets <- names(cn_ratios) both <- plyr::llply(targets, function(x, models, my_cols_treatment) { my_cs <- my_cols_treatment my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) data <- models[[x]]$data names(my_cs) <- c("H", "A") type <- c("mod_phy" = "phytoplankton", "mod_zoo" = "zooplankton") s_dat <- data.frame(y = data$CN_ratio, y_rep = colMeans(brms::posterior_epred(models[[x]])), group = data$treatment, stringsAsFactors = FALSE) %>% dplyr::mutate(cols = my_cs[group]) my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } a <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[type[x]], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") list(data = s_dat, plot = a) }, models = cn_ratios, my_cols_treatment) a <- both[[1]]$plot + layer(data = both[[1]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 27, ymax = 30, xmin = 6, xmax = 11)) b <- both[[2]]$plot + layer(data = both[[2]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 9, ymax = 12, xmin = 2, xmax = 4)) gridExtra::grid.arrange(a, b, ncol = 2) } make_sp_fig_6 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_6(...), device = "pdf", width = 14, height = 6, units = "in", onefile = FALSE) } sp_fig_6 <- function(...) { pp_checks_set <- function(type, data, model_list, my_cols_treatment, my_shape) { fit <- model_list[[type]]$model y_rep <- sim_pp_mean(data, fit, type) sdat <- data %>% dplyr::filter(sample == type) y <- sdat$add15N_perc x <- sdat$day bayesplot::color_scheme_set("gray") # preamble my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } mean_y <- function(data, group) { mean(data %>% dplyr::filter(treatment == group) %>% dplyr::select(add15N_perc) %>% unlist) } # for p_b h_dat <- plyr::adply(y_rep, 1, function(x, treat) { data.frame(group = c("A", "H"), y_rep = tapply(x, treat, mean), stringsAsFactors = FALSE) }, treat = sdat$treatment) # for p_c my_cs <- my_cols_treatment names(my_cs) <- c("H", "A") s_dat <- data.frame(y = y, y_rep = colMeans(y_rep), group = sdat$treatment, cols = my_cs[sdat$treatment], stringsAsFactors = FALSE) p_a <- bayesplot::ppc_dens_overlay(y, y_rep[1:200, ]) + labs(x = substitute("Excess "^15 * "N"["%"] * ", " * chi), y = "Density") + my_theme() + theme(legend.text = element_blank(), legend.position = c(0.58, 0.79), legend.background = element_blank()) + scale_y_continuous(expand = expansion(mult = c(0, 0.05))) + annotate_textp("Observed", x = 0.9, y = 0.85, hjust = 0) + annotate_textp("Predicted", x = 0.9, y = 0.74, hjust = 0) p_b <- ggplot(h_dat, aes(x = y_rep)) + geom_histogram(data = dplyr::filter(h_dat, group == "A"), colour = "black", fill = my_cols_treatment[2], size = 0.2) + geom_vline(mapping = aes(xintercept = mean_y(sdat, "A")), color = "black", size = 0.5, lty = 2) + geom_histogram(data = dplyr::filter(h_dat, group == "H"), colour = "black", fill = my_cols_treatment[1], size = 0.2) + geom_vline(mapping = aes(xintercept = mean_y(sdat, "H")), color = "black", size = 0.5, lty = 2) + scale_y_continuous(expand = expansion(mult = c(0, 0.05)), breaks = seq(0, 6000, 2000), labels = c("0.0", "2.0", "4.0", "6.0")) + labs(x = substitute("Mean " * chi * " (%)"), y = "Frequency (thousands)") + my_theme() + theme(legend.title = element_blank(), legend.text = element_text(size = 18, hjust = 0), legend.position = c(0.8, 0.8), legend.background = element_blank()) p_c <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[type], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") p_d <- bayesplot::ppc_intervals(y = y, yrep = y_rep, x = x, prob = 0.5) + labs(x = "Time (days)", y = substitute("Excess "^15 * "N"["%"] * ", " * chi)) + my_theme() + theme(legend.text = element_blank(), legend.position = c(0.58, 0.79), legend.background = element_blank()) + annotate_textp("Observed", x = 0.92, y = 0.85, hjust = 0) + annotate_textp("Predicted", x = 0.92, y = 0.74, hjust = 0) list(p_a = p_a, p_b = p_b, p_c = p_c, p_d = p_d) } out <- vector(mode = "list", length = 2) names(out) <- c("phy", "zoo") my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) for (i in seq_along(out)) { out[[i]] <- pp_checks_set(names(my_shape)[i], my_shape = my_shape, ...) names(out[[i]]) <- paste0(names(out)[i], "_", names(out[[i]])) } p <- append(out[["phy"]], out[["zoo"]]) p <- gridExtra::grid.arrange(p$phy_p_a, p$phy_p_b, p$phy_p_c, p$phy_p_d, p$zoo_p_a, p$zoo_p_b, p$zoo_p_c, p$zoo_p_d, ncol = 4) ggpubr::annotate_figure(p, left = ggpubr::text_grob("", hjust = 0.2, vjust = 0.4, size = 40, rot = 90)) + annotation_custom(grid::grid.text("Phytoplankton", x = grid::unit(0.015, "npc"), y = grid::unit(0.8, "npc"), gp = grid::gpar("fontsize" = 20), hjust = 0.5, vjust = 0.5, rot = 90)) + annotation_custom(grid::grid.text("Zooplankton", x = grid::unit(0.015, "npc"), y = grid::unit(0.3, "npc"), gp = grid::gpar("fontsize" = 20), hjust = 0.5, vjust = 0.5, rot = 90)) } make_sp_fig_7 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_7(...), device = "pdf", width = 10.2, height = 4.8, units = "in", onefile = FALSE) } sp_fig_7 <- function(hierarchical_biomass, phy_png, zoo_png, my_cols_treatment) { targets <- names(hierarchical_biomass) both <- plyr::llply(targets, function(x, models, my_cols_treatment) { my_cs <- my_cols_treatment my_shape <- c("phytoplankton" = 21, "zooplankton" = 22) names(my_cs) <- c("H", "A") data <- models[[x]]$data s_dat <- data.frame(y = data$ln_av_C_ug_L, y_rep = colMeans(brms::posterior_epred(models[[x]])), group = data$treatment, stringsAsFactors = FALSE) %>% dplyr::mutate(cols = my_cs[group]) my_theme <- function() { theme_bw() + theme(plot.margin = grid::unit(c(0.2, 0.1, 0.4, 0.2), "in"), panel.grid = element_blank(), axis.title.x = element_text(size = 15, vjust = -1, hjust = 0.5), axis.title.y = element_text(size = 15, vjust = 4, hjust = 0.5), axis.text.x = element_text(size = 10, vjust = -1, hjust = 0.5), axis.text.y = element_text(size = 10)) } a <- ggplot(data = s_dat, mapping = aes(x = y_rep, y = y, fill = group)) + geom_point(shape = my_shape[x], size = 2, data = s_dat) + geom_smooth(mapping = aes(x = y_rep, y = y), method = "lm", se = FALSE, lty = 2, colour = "grey60", size = 0.5, inherit.aes = FALSE) + geom_smooth(mapping = aes(x = y, y = y), method = "lm", se = FALSE, lty = 1, colour = "black", size = 0.5, inherit.aes = FALSE) + scale_fill_manual(values = my_cs) + labs(x = "Predicted", y = "Observed") + my_theme() + theme(legend.position = "none") list(data = s_dat, plot = a) }, models = hierarchical_biomass, my_cols_treatment) a <- both[[1]]$plot + layer(data = both[[1]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 10, ymax = 10.6, xmin = 6.2, xmax = 7)) b <- both[[2]]$plot + layer(data = both[[2]]$data, stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 4.5, ymax = 6.5, xmin = -1.3, xmax = 0.2)) gridExtra::grid.arrange(a, b, ncol = 2) } make_sp_fig_8 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_8(...), device = "pdf", width = 7, height = 7, units = "in", onefile = FALSE) } sp_fig_8 <- function(community_data_2012, community_data_2016, size_data_2012, size_data_2016, my_cols_treatment) { sp_fig_8_bxpl <- function(data, my_cols_treatment, title) { cols <- rev(my_cols_treatment) ggplot(data = data, mapping = aes(y = log_av_C_ug, x = treatment)) + geom_boxplot(mapping = aes(colour = treatment), position = position_dodge(0), fill = cols, show.legend = FALSE, width = 0.7, size = 0.3, outlier.shape = NA) + geom_point(mapping = aes(fill = treatment, shape = treatment), colour = "black", position = position_jitterdodge(jitter.width = 0.4, dodge.width = 0, seed = 1), size = 2.5, show.legend = FALSE) + scale_colour_manual(values = rep("black", 4)) + scale_fill_manual(values = cols) + scale_shape_manual(values = c(24, 25)) + labs(x = "Treatment", y = expression(log[10]~Organism~mass~(µg~C))) + theme_classic() + theme(plot.title = element_text(face = "bold")) + ggtitle(title) + scale_x_discrete(labels = c("Ambient", "Warmed")) } sp_fig_8_nmds <- function(data, my_cols_treatment, tlab) { treat_rest <- data$treatment data <- data %>% dplyr::select(-pond, -treatment) bc_mds <- vegan::metaMDS(data, distance = "bray", trace = FALSE, autotransform = FALSE, k = 2) ponds <- data.frame(bc_mds$points) %>% dplyr::mutate(treat = treat_rest, shape = ifelse(treat == "H", 25, 24)) species <- data.frame(bc_mds$species) %>% dplyr::arrange(MDS1) %>% tidyr::drop_na() spp_h <- head(species, 4) spp_a <- tail(species, 4) spp <- rbind(spp_h, spp_a) %>% dplyr::mutate(treat = rep(c("H", "A"), each = 4), shape = rep(c(25, 24), each = 4)) treat_a <- ponds[ponds$treat == "A", ][chull(ponds[ponds$treat == "A", c("MDS1", "MDS2")]), ] treat_h <- ponds[ponds$treat == "H", ][chull(ponds[ponds$treat == "H", c("MDS1", "MDS2")]), ] hull_data <- rbind(treat_a, treat_h) ggplot() + geom_point(data = ponds, mapping = aes(x = MDS1, y = MDS2, fill = treat), size = 3, shape = ponds$shape) + geom_point(data = species, mapping = aes(x = MDS1, y = MDS2, alpha = 0.001), size = 1.5) + geom_point(data = spp, mapping = aes(x = MDS1, y = MDS2, alpha = 0.001), size = 5, shape = spp$shape) + geom_polygon(data = hull_data, mapping = aes(x = MDS1, y = MDS2, fill = treat, group = treat), alpha = 0.30) + scale_colour_manual(values = rev(my_cols_treatment)) + scale_fill_manual(values = rev(my_cols_treatment)) + scale_shape_manual(values = c(21, 21)) + labs(x = "NMDS1", y = "NMDS2") + theme_classic() + ggtitle(tlab) + theme(legend.position = "none", plot.title = element_text(face = "bold")) } nmds_2012 <- sp_fig_8_nmds(community_data_2012, my_cols_treatment, "a (2012)") + geom_text(mapping = aes(x = -1, y = -1, label = "Stress = 0.12", size = 5), hjust = 0) + scale_y_continuous(limits = c(-1, 1), breaks = c(-1, -0.5, 0, 0.5, 1)) nmds_2016 <- sp_fig_8_nmds(community_data_2016, my_cols_treatment, "b (2016)") + geom_text(mapping = aes(x = -1.1, y = -1.8, label = "Stress = 0.05", size = 5), hjust = 0) + scale_y_continuous(limits = c(-1.8, 2), breaks = c(-1.8, -0.9, 0, 0.9, 1.8)) bxpl_2012 <- sp_fig_8_bxpl(size_data_2012, my_cols_treatment, "c (2012)") + scale_y_continuous(limits = c(-4.8, -2.4), breaks = c(-4.8, -3.8, -2.8)) bxpl_2016 <- sp_fig_8_bxpl(size_data_2016, my_cols_treatment, "d (2016)") + scale_y_continuous(limits = c(-4.8, -3.5), breaks = c(-4.8, -4.2, -3.8)) gridExtra::grid.arrange(nmds_2012, nmds_2016, bxpl_2012, bxpl_2016, ncol = 2) } make_sp_fig_9 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_9(...), device = "pdf", width = 6.3, height = 3.76, units = "in", onefile = FALSE) } sp_fig_9 <- function(data, my_cols_treatment) { data <- data %>% dplyr::mutate(shape = ifelse(treat == "H", 25, 24)) ggplot(data = data) + geom_line(mapping = aes(x = doy, y = ratio_fits, colour = treat, linetype = treat)) + scale_colour_manual(values = rev(my_cols_treatment)) + scale_fill_manual(values = rev(my_cols_treatment)) + geom_point(mapping = aes(x = doy, y = ER.GPP, colour = treat, fill = treat), alpha = 0.3, shape = data$shape) + cowplot::theme_cowplot(font_size = 12) + theme(legend.position = "none") + facet_wrap(~ year, scales = "fixed") + theme(strip.background = element_blank()) + xlab("Day of the Year") + ylab(expression(R[eco] / GPP)) } make_sp_fig_10 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, sp_fig_10(...), device = "pdf", width = 5.82, height = 2.95, units = "in", onefile = FALSE) } sp_fig_10 <- function(pp_means, phy_png, zoo_png, my_cols_treatment) { make_curve <- function(days, ln_ka, logit_ke, logit_phi) { ka <- exp(ln_ka) ke <- 1 / (1 + exp(-logit_ke)) phi <- (1 / ke) / (1 + exp(-logit_phi)) calc_eqn_1(days, ka, ke, phi) } curve_df <- function(data, days) { c_amb <- make_curve(days, data$ln_ka_amb, data$logit_ke_amb, data$logit_phi_amb) c_war <- make_curve(days, data$ln_ka_war, data$logit_ke_war, data$logit_phi_war) # cap samples that yield predictions # that are within the reasonable range of # data if (max(c_amb) < 1 && max(c_war) < 1) { data.frame(sample = data$sample, days = days, values = c(c_amb, c_war), treatment = rep(c("amb", "war"), each = length(c_amb))) } } make_prior_lines <- function(ln_ka_amb, ln_ka_war, logit_ke_amb, logit_ke_war, logit_phi_amb, logit_phi_war, sample, iter) { plot_data <- data.frame(iter = seq_len(iter)) %>% dplyr::mutate(ln_ka_amb = rnorm(iter, ln_ka_amb, 1), logit_ke_amb = rnorm(iter, logit_ke_amb, 1), logit_phi_amb = rnorm(iter, logit_phi_amb, 1), ln_ka_war = rnorm(iter, ln_ka_war, 1), logit_ke_war = rnorm(iter, logit_ke_war, 1), logit_phi_war = rnorm(iter, logit_phi_war, 1), sample = sample) days <- seq(0.01, 60, length.out = 60) plyr::ddply(plot_data, .(iter), curve_df, days) } prior_bkg <- data.frame(sample = c("phytoplankton", "zooplankton")) %>% dplyr::mutate(ln_ka_amb = c(0, 0), ln_ka_war = c(0, 0), logit_ke_amb = c(0, 0), logit_ke_war = c(0, 0), logit_phi_amb = c(0, 0), logit_phi_war = c(0, 0)) set.seed(1) prior_lines <- plyr::ddply(prior_bkg, .(sample), function(data) { make_prior_lines(data$ln_ka_amb, data$ln_ka_war, data$logit_ke_amb, data$logit_ke_war, data$logit_phi_amb, data$logit_phi_war, data$sample, iter = 1000) }) chosen <- dplyr::distinct(prior_lines, iter, sample) %>% dplyr::group_by(sample) %>% dplyr::summarise(chosen = sample(iter, 300)) %>% data.frame() prior_lines <- prior_lines %>% dplyr::group_by(sample) %>% dplyr::filter(iter %in% chosen$chosen[chosen$sample %in% unique(sample)]) %>% data.frame() prior_means <- plyr::ddply(prior_bkg, .(sample), curve_df, days = seq(0.01, 60, length.out = 60)) %>% dplyr::filter(treatment == "amb") %>% dplyr::select(sample, days, values) linesd <- pp_means$lines %>% dplyr::mutate(col = ifelse(treatment == "amb", my_cols_treatment[2], my_cols_treatment[1])) tp <- linesd %>% dplyr::distinct(sample, treatment) %>% dplyr::mutate(x = 1) ggplot() + geom_rect(data = tp, mapping = aes(fill = sample), xmin = -Inf, xmax = Inf, ymin = -Inf, ymax = Inf, inherit.aes = FALSE, show.legend = FALSE) + scale_fill_manual(values = c("#CCECE6", "#F6F1EB")) + ggnewscale::new_scale("fill") + geom_line(data = prior_lines, mapping = aes(x = days, y = values, group = paste(iter, treatment)), colour = "grey40", lty = 1, size = 0.5, alpha = 0.07, show.legend = FALSE) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment, linetype = treatment), col = "black", size = 1, show.legend = FALSE) + geom_line(data = linesd, mapping = aes(x = x, y = mean, group = treatment, linetype = treatment, colour = treatment), size = 1) + scale_colour_manual(labels = c("Ambient", "Warmed"), values = rev(my_cols_treatment)) + scale_linetype_manual(labels = c("Ambient", "Warmed"), values = c(1, 3)) + facet_wrap(~sample, scales = "free", ncol = 2) + theme_bw() + theme(plot.margin = grid::unit(c(0.4, 0.1, 0.1, 0.2), "in"), strip.background = element_blank(), strip.text = element_text(color = "transparent"), panel.spacing.x = grid::unit(1.2, "lines"), axis.title.y = element_text(size = 14), axis.title.x = element_text(size = 14), legend.position = c(0.5, 1.2), legend.direction = "horizontal", legend.background = element_blank(), legend.text = element_text(size = 12), legend.title = element_blank()) + ylab(substitute("Excess "^15 * "N"["%"] * ", " * chi)) + xlab("Time (days)") + layer(data = linesd %>% dplyr::filter(sample == "phytoplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = phy_png, ymin = 0.55, ymax = 0.75, xmin = 50, xmax = 60)) + layer(data = linesd %>% dplyr::filter(sample == "zooplankton"), stat = StatIdentity, position = PositionIdentity, geom = ggplot2:::GeomCustomAnn, inherit.aes = FALSE, params = list(grob = zoo_png, ymin = 0.45, ymax = 0.65, xmin = 50, xmax = 63)) } make_sp_fig_11_13 <- function(dest, fig_out_folder, ...) { ggplot2::ggsave(dest, supp_resid_figure(...), device = "pdf", width = 10.5, height = 7.86, units = "in", onefile = FALSE) } supp_resid_figure <- function(data, pred_tag, my_cols_group, lab) { names(my_cols_group) <- c("phytoplankton", "zooplankton") data <- data %>% dplyr::filter(predictor == pred_tag) ggplot(data = data) + geom_polygon(mapping = aes(x = x_conf, y = y_conf, fill = sample), alpha = 0.5, show.legend = FALSE) + geom_line(mapping = aes(x = x, y = y, linetype = sample), colour = "black", show.legend = FALSE) + geom_point(mapping = aes(x = raw_x, y = raw_y, shape = sample, fill = sample), show.legend = FALSE, size = 2) + labs(x = lab, y = "Model residuals") + scale_shape_manual(values = c("phytoplankton" = 21, "zooplankton" = 22)) + scale_fill_manual(values = my_cols_group) + theme_bw() + facet_wrap(~ pond, scales = "free", dir = "v") + theme(axis.title = element_text(size = 15)) } make_resid_plot_data <- function(resid_brm_model, data) { plot_data <- plyr::ldply(resid_brm_model, function(model) { df <- model$data %>% tidyr::pivot_longer(cols = c(NO2_uM, NO3_uM, NH3_uM), names_to = "predictor", values_to = "value") plyr::ddply(df, .(pond), function(df_i, ...) { means <- tapply(df_i$value, df_i$predictor, mean) plyr::ddply(df_i, .(predictor), function(dd, model, means) { pred_name <- unique(dd$predictor) means <- means[names(means) != pred_name] nd <- data.frame(x = seq(min(dd$value), max(dd$value), length.out = 50), pond = unique(dd$pond), pred_1 = means[[1]], pred_2 = means[[2]]) names(nd) <- c(pred_name, "pond", names(means)) fits <- fitted(model, newdata = nd) %>% cbind(nd) fits %>% { data.frame(y = NA, x = NA, x_conf = c(.[[pred_name]], rev(.[[pred_name]])), y_conf = c(.$Q2.5, rev(.$Q97.5)), raw_x = NA, raw_y = NA) } %>% rbind(fits %>% { data.frame(y = .$Estimate, x = .[[pred_name]], x_conf = NA, y_conf = NA, raw_x = NA, raw_y = NA) }) %>% rbind(dd %>% { data.frame(y = NA, x = NA, x_conf = NA, y_conf = NA, raw_x = .$value, raw_y = .$residual_15N_perc) }) }, means = means, ...) }, model = model) }) plot_data$treatment <- data$treatment[match(plot_data$pond, data$pond)] plot_data$pond <- paste0("Pond = ", formatC(plot_data$pond, width = 2, format = "d", flag = "0"), "; Treatment = ", ifelse(plot_data$treatment == "H", "W", plot_data$treatment)) plot_data }
#' Lemmatization List #' #' A dataset based on Mechura's (2016) English lemmatization list. This #' data set can be useful for join style lemma replacement of inflected token #' forms to their root lemmas. While this is not a true morphological analysis #' this style of lemma replacement is fast and typically still robust. #' #' @details #' \itemize{ #' \item token. An inflected token with affixes #' \item lemma. A base form #' } #' #' ## ODC Open Database License (ODbL) #' #' ### Preamble #' #' The Open Database License (ODbL) is a license agreement intended to #' allow users to freely share, modify, and use this Database while #' maintaining this same freedom for others. Many databases are covered by #' copyright, and therefore this document licenses these rights. Some #' jurisdictions, mainly in the European Union, have specific rights that #' cover databases, and so the ODbL addresses these rights, too. Finally, #' the ODbL is also an agreement in contract for users of this Database to #' act in certain ways in return for accessing this Database. #' #' Databases can contain a wide variety of types of content (images, #' audiovisual material, and sounds all in the same database, for example), #' and so the ODbL only governs the rights over the Database, and not the #' contents of the Database individually. Licensors should use the ODbL #' together with another license for the contents, if the contents have a #' single set of rights that uniformly covers all of the contents. If the #' contents have multiple sets of different rights, Licensors should #' describe what rights govern what contents together in the individual #' record or in some other way that clarifies what rights apply. #' #' Sometimes the contents of a database, or the database itself, can be #' covered by other rights not addressed here (such as private contracts, #' trade mark over the name, or privacy rights / data protection rights #' over information in the contents), and so you are advised that you may #' have to consult other documents or clear other rights before doing #' activities not covered by this License. #' #' ------ #' #' The Licensor (as defined below) #' #' and #' #' You (as defined below) #' #' agree as follows: #' #' ### 1.0 Definitions of Capitalised Words #' #' "Collective Database" - Means this Database in unmodified form as part #' of a collection of independent databases in themselves that together are #' assembled into a collective whole. A work that constitutes a Collective #' Database will not be considered a Derivative Database. #' #' "Convey" - As a verb, means Using the Database, a Derivative Database, #' or the Database as part of a Collective Database in any way that enables #' a Person to make or receive copies of the Database or a Derivative #' Database. Conveying does not include interaction with a user through a #' computer network, or creating and Using a Produced Work, where no #' transfer of a copy of the Database or a Derivative Database occurs. #' "Contents" - The contents of this Database, which includes the #' information, independent works, or other material collected into the #' Database. For example, the contents of the Database could be factual #' data or works such as images, audiovisual material, text, or sounds. #' #' "Database" - A collection of material (the Contents) arranged in a #' systematic or methodical way and individually accessible by electronic #' or other means offered under the terms of this License. #' #' "Database Directive" - Means Directive 96/9/EC of the European #' Parliament and of the Council of 11 March 1996 on the legal protection #' of databases, as amended or succeeded. #' #' "Database Right" - Means rights resulting from the Chapter III ("sui #' generis") rights in the Database Directive (as amended and as transposed #' by member states), which includes the Extraction and Re-utilisation of #' the whole or a Substantial part of the Contents, as well as any similar #' rights available in the relevant jurisdiction under Section 10.4. #' #' "Derivative Database" - Means a database based upon the Database, and #' includes any translation, adaptation, arrangement, modification, or any #' other alteration of the Database or of a Substantial part of the #' Contents. This includes, but is not limited to, Extracting or #' Re-utilising the whole or a Substantial part of the Contents in a new #' Database. #' #' "Extraction" - Means the permanent or temporary transfer of all or a #' Substantial part of the Contents to another medium by any means or in #' any form. #' #' "License" - Means this license agreement and is both a license of rights #' such as copyright and Database Rights and an agreement in contract. #' #' "Licensor" - Means the Person that offers the Database under the terms #' of this License. #' #' "Person" - Means a natural or legal person or a body of persons #' corporate or incorporate. #' #' "Produced Work" - a work (such as an image, audiovisual material, text, #' or sounds) resulting from using the whole or a Substantial part of the #' Contents (via a search or other query) from this Database, a Derivative #' Database, or this Database as part of a Collective Database. #' #' "Publicly" - means to Persons other than You or under Your control by #' either more than 50% ownership or by the power to direct their #' activities (such as contracting with an independent consultant). #' #' "Re-utilisation" - means any form of making available to the public all #' or a Substantial part of the Contents by the distribution of copies, by #' renting, by online or other forms of transmission. #' #' "Substantial" - Means substantial in terms of quantity or quality or a #' combination of both. The repeated and systematic Extraction or #' Re-utilisation of insubstantial parts of the Contents may amount to the #' Extraction or Re-utilisation of a Substantial part of the Contents. #' #' "Use" - As a verb, means doing any act that is restricted by copyright #' or Database Rights whether in the original medium or any other; and #' includes without limitation distributing, copying, publicly performing, #' publicly displaying, and preparing derivative works of the Database, as #' well as modifying the Database as may be technically necessary to use it #' in a different mode or format. #' #' "You" - Means a Person exercising rights under this License who has not #' previously violated the terms of this License with respect to the #' Database, or who has received express permission from the Licensor to #' exercise rights under this License despite a previous violation. #' #' Words in the singular include the plural and vice versa. #' #' ### 2.0 What this License covers #' #' 2.1. Legal effect of this document. This License is: #' #' a. A license of applicable copyright and neighbouring rights; #' #' b. A license of the Database Right; and #' #' c. An agreement in contract between You and the Licensor. #' #' 2.2 Legal rights covered. This License covers the legal rights in the #' Database, including: #' #' a. Copyright. Any copyright or neighbouring rights in the Database. #' The copyright licensed includes any individual elements of the #' Database, but does not cover the copyright over the Contents #' independent of this Database. See Section 2.4 for details. Copyright #' law varies between jurisdictions, but is likely to cover: the Database #' model or schema, which is the structure, arrangement, and organisation #' of the Database, and can also include the Database tables and table #' indexes; the data entry and output sheets; and the Field names of #' Contents stored in the Database; #' #' b. Database Rights. Database Rights only extend to the Extraction and #' Re-utilisation of the whole or a Substantial part of the Contents. #' Database Rights can apply even when there is no copyright over the #' Database. Database Rights can also apply when the Contents are removed #' from the Database and are selected and arranged in a way that would #' not infringe any applicable copyright; and #' #' c. Contract. This is an agreement between You and the Licensor for #' access to the Database. In return you agree to certain conditions of #' use on this access as outlined in this License. #' #' 2.3 Rights not covered. #' #' a. This License does not apply to computer programs used in the making #' or operation of the Database; #' #' b. This License does not cover any patents over the Contents or the #' Database; and #' #' c. This License does not cover any trademarks associated with the #' Database. #' #' 2.4 Relationship to Contents in the Database. The individual items of #' the Contents contained in this Database may be covered by other rights, #' including copyright, patent, data protection, privacy, or personality #' rights, and this License does not cover any rights (other than Database #' Rights or in contract) in individual Contents contained in the Database. #' For example, if used on a Database of images (the Contents), this #' License would not apply to copyright over individual images, which could #' have their own separate licenses, or one single license covering all of #' the rights over the images. #' #' ### 3.0 Rights granted #' #' 3.1 Subject to the terms and conditions of this License, the Licensor #' grants to You a worldwide, royalty-free, non-exclusive, terminable (but #' only under Section 9) license to Use the Database for the duration of #' any applicable copyright and Database Rights. These rights explicitly #' include commercial use, and do not exclude any field of endeavour. To #' the extent possible in the relevant jurisdiction, these rights may be #' exercised in all media and formats whether now known or created in the #' future. #' #' The rights granted cover, for example: #' #' a. Extraction and Re-utilisation of the whole or a Substantial part of #' the Contents; #' #' b. Creation of Derivative Databases; #' #' c. Creation of Collective Databases; #' #' d. Creation of temporary or permanent reproductions by any means and #' in any form, in whole or in part, including of any Derivative #' Databases or as a part of Collective Databases; and #' #' e. Distribution, communication, display, lending, making available, or #' performance to the public by any means and in any form, in whole or in #' part, including of any Derivative Database or as a part of Collective #' Databases. #' #' 3.2 Compulsory license schemes. For the avoidance of doubt: #' #' a. Non-waivable compulsory license schemes. In those jurisdictions in #' which the right to collect royalties through any statutory or #' compulsory licensing scheme cannot be waived, the Licensor reserves #' the exclusive right to collect such royalties for any exercise by You #' of the rights granted under this License; #' #' b. Waivable compulsory license schemes. In those jurisdictions in #' which the right to collect royalties through any statutory or #' compulsory licensing scheme can be waived, the Licensor waives the #' exclusive right to collect such royalties for any exercise by You of #' the rights granted under this License; and, #' #' c. Voluntary license schemes. The Licensor waives the right to collect #' royalties, whether individually or, in the event that the Licensor is #' a member of a collecting society that administers voluntary licensing #' schemes, via that society, from any exercise by You of the rights #' granted under this License. #' #' 3.3 The right to release the Database under different terms, or to stop #' distributing or making available the Database, is reserved. Note that #' this Database may be multiple-licensed, and so You may have the choice #' of using alternative licenses for this Database. Subject to Section #' 10.4, all other rights not expressly granted by Licensor are reserved. #' #' ### 4.0 Conditions of Use #' #' 4.1 The rights granted in Section 3 above are expressly made subject to #' Your complying with the following conditions of use. These are important #' conditions of this License, and if You fail to follow them, You will be #' in material breach of its terms. #' #' 4.2 Notices. If You Publicly Convey this Database, any Derivative #' Database, or the Database as part of a Collective Database, then You #' must: #' #' a. Do so only under the terms of this License or another license #' permitted under Section 4.4; #' #' b. Include a copy of this License (or, as applicable, a license #' permitted under Section 4.4) or its Uniform Resource Identifier (URI) #' with the Database or Derivative Database, including both in the #' Database or Derivative Database and in any relevant documentation; and #' #' c. Keep intact any copyright or Database Right notices and notices #' that refer to this License. #' #' d. If it is not possible to put the required notices in a particular #' file due to its structure, then You must include the notices in a #' location (such as a relevant directory) where users would be likely to #' look for it. #' #' 4.3 Notice for using output (Contents). Creating and Using a Produced #' Work does not require the notice in Section 4.2. However, if you #' Publicly Use a Produced Work, You must include a notice associated with #' the Produced Work reasonably calculated to make any Person that uses, #' views, accesses, interacts with, or is otherwise exposed to the Produced #' Work aware that Content was obtained from the Database, Derivative #' Database, or the Database as part of a Collective Database, and that it #' is available under this License. #' #' a. Example notice. The following text will satisfy notice under #' Section 4.3: #' #' Contains information from DATABASE NAME, which is made available #' here under the Open Database License (ODbL). #' #' DATABASE NAME should be replaced with the name of the Database and a #' hyperlink to the URI of the Database. "Open Database License" should #' contain a hyperlink to the URI of the text of this License. If #' hyperlinks are not possible, You should include the plain text of the #' required URI's with the above notice. #' #' 4.4 Share alike. #' #' a. Any Derivative Database that You Publicly Use must be only under #' the terms of: #' #' i. This License; #' #' ii. A later version of this License similar in spirit to this #' License; or #' #' iii. A compatible license. #' #' If You license the Derivative Database under one of the licenses #' mentioned in (iii), You must comply with the terms of that license. #' #' b. For the avoidance of doubt, Extraction or Re-utilisation of the #' whole or a Substantial part of the Contents into a new database is a #' Derivative Database and must comply with Section 4.4. #' #' c. Derivative Databases and Produced Works. A Derivative Database is #' Publicly Used and so must comply with Section 4.4. if a Produced Work #' created from the Derivative Database is Publicly Used. #' #' d. Share Alike and additional Contents. For the avoidance of doubt, #' You must not add Contents to Derivative Databases under Section 4.4 a #' that are incompatible with the rights granted under this License. #' #' e. Compatible licenses. Licensors may authorise a proxy to determine #' compatible licenses under Section 4.4 a iii. If they do so, the #' authorised proxy's public statement of acceptance of a compatible #' license grants You permission to use the compatible license. #' #' #' 4.5 Limits of Share Alike. The requirements of Section 4.4 do not apply #' in the following: #' #' a. For the avoidance of doubt, You are not required to license #' Collective Databases under this License if You incorporate this #' Database or a Derivative Database in the collection, but this License #' still applies to this Database or a Derivative Database as a part of #' the Collective Database; #' #' b. Using this Database, a Derivative Database, or this Database as #' part of a Collective Database to create a Produced Work does not #' create a Derivative Database for purposes of Section 4.4; and #' #' c. Use of a Derivative Database internally within an organisation is #' not to the public and therefore does not fall under the requirements #' of Section 4.4. #' #' 4.6 Access to Derivative Databases. If You Publicly Use a Derivative #' Database or a Produced Work from a Derivative Database, You must also #' offer to recipients of the Derivative Database or Produced Work a copy #' in a machine readable form of: #' #' a. The entire Derivative Database; or #' #' b. A file containing all of the alterations made to the Database or #' the method of making the alterations to the Database (such as an #' algorithm), including any additional Contents, that make up all the #' differences between the Database and the Derivative Database. #' #' The Derivative Database (under a.) or alteration file (under b.) must be #' available at no more than a reasonable production cost for physical #' distributions and free of charge if distributed over the internet. #' #' 4.7 Technological measures and additional terms #' #' a. This License does not allow You to impose (except subject to #' Section 4.7 b.) any terms or any technological measures on the #' Database, a Derivative Database, or the whole or a Substantial part of #' the Contents that alter or restrict the terms of this License, or any #' rights granted under it, or have the effect or intent of restricting #' the ability of any person to exercise those rights. #' #' b. Parallel distribution. You may impose terms or technological #' measures on the Database, a Derivative Database, or the whole or a #' Substantial part of the Contents (a "Restricted Database") in #' contravention of Section 4.74 a. only if You also make a copy of the #' Database or a Derivative Database available to the recipient of the #' Restricted Database: #' #' i. That is available without additional fee; #' #' ii. That is available in a medium that does not alter or restrict #' the terms of this License, or any rights granted under it, or have #' the effect or intent of restricting the ability of any person to #' exercise those rights (an "Unrestricted Database"); and #' #' iii. The Unrestricted Database is at least as accessible to the #' recipient as a practical matter as the Restricted Database. #' #' c. For the avoidance of doubt, You may place this Database or a #' Derivative Database in an authenticated environment, behind a #' password, or within a similar access control scheme provided that You #' do not alter or restrict the terms of this License or any rights #' granted under it or have the effect or intent of restricting the #' ability of any person to exercise those rights. #' #' 4.8 Licensing of others. You may not sublicense the Database. Each time #' You communicate the Database, the whole or Substantial part of the #' Contents, or any Derivative Database to anyone else in any way, the #' Licensor offers to the recipient a license to the Database on the same #' terms and conditions as this License. You are not responsible for #' enforcing compliance by third parties with this License, but You may #' enforce any rights that You have over a Derivative Database. You are #' solely responsible for any modifications of a Derivative Database made #' by You or another Person at Your direction. You may not impose any #' further restrictions on the exercise of the rights granted or affirmed #' under this License. #' #' ### 5.0 Moral rights #' #' 5.1 Moral rights. This section covers moral rights, including any rights #' to be identified as the author of the Database or to object to treatment #' that would otherwise prejudice the author's honour and reputation, or #' any other derogatory treatment: #' #' a. For jurisdictions allowing waiver of moral rights, Licensor waives #' all moral rights that Licensor may have in the Database to the fullest #' extent possible by the law of the relevant jurisdiction under Section #' 10.4; #' #' b. If waiver of moral rights under Section 5.1 a in the relevant #' jurisdiction is not possible, Licensor agrees not to assert any moral #' rights over the Database and waives all claims in moral rights to the #' fullest extent possible by the law of the relevant jurisdiction under #' Section 10.4; and #' #' c. For jurisdictions not allowing waiver or an agreement not to assert #' moral rights under Section 5.1 a and b, the author may retain their #' moral rights over certain aspects of the Database. #' #' Please note that some jurisdictions do not allow for the waiver of moral #' rights, and so moral rights may still subsist over the Database in some #' jurisdictions. #' #' ### 6.0 Fair dealing, Database exceptions, and other rights not affected #' #' 6.1 This License does not affect any rights that You or anyone else may #' independently have under any applicable law to make any use of this #' Database, including without limitation: #' #' a. Exceptions to the Database Right including: Extraction of Contents #' from non-electronic Databases for private purposes, Extraction for #' purposes of illustration for teaching or scientific research, and #' Extraction or Re-utilisation for public security or an administrative #' or judicial procedure. #' #' b. Fair dealing, fair use, or any other legally recognised limitation #' or exception to infringement of copyright or other applicable laws. #' #' 6.2 This License does not affect any rights of lawful users to Extract #' and Re-utilise insubstantial parts of the Contents, evaluated #' quantitatively or qualitatively, for any purposes whatsoever, including #' creating a Derivative Database (subject to other rights over the #' Contents, see Section 2.4). The repeated and systematic Extraction or #' Re-utilisation of insubstantial parts of the Contents may however amount #' to the Extraction or Re-utilisation of a Substantial part of the #' Contents. #' #' ### 7.0 Warranties and Disclaimer #' #' 7.1 The Database is licensed by the Licensor "as is" and without any #' warranty of any kind, either express, implied, or arising by statute, #' custom, course of dealing, or trade usage. Licensor specifically #' disclaims any and all implied warranties or conditions of title, #' non-infringement, accuracy or completeness, the presence or absence of #' errors, fitness for a particular purpose, merchantability, or otherwise. #' Some jurisdictions do not allow the exclusion of implied warranties, so #' this exclusion may not apply to You. #' #' ### 8.0 Limitation of liability #' #' 8.1 Subject to any liability that may not be excluded or limited by law, #' the Licensor is not liable for, and expressly excludes, all liability #' for loss or damage however and whenever caused to anyone by any use #' under this License, whether by You or by anyone else, and whether caused #' by any fault on the part of the Licensor or not. This exclusion of #' liability includes, but is not limited to, any special, incidental, #' consequential, punitive, or exemplary damages such as loss of revenue, #' data, anticipated profits, and lost business. This exclusion applies #' even if the Licensor has been advised of the possibility of such #' damages. #' #' 8.2 If liability may not be excluded by law, it is limited to actual and #' direct financial loss to the extent it is caused by proved negligence on #' the part of the Licensor. #' #' ### 9.0 Termination of Your rights under this License #' #' 9.1 Any breach by You of the terms and conditions of this License #' automatically terminates this License with immediate effect and without #' notice to You. For the avoidance of doubt, Persons who have received the #' Database, the whole or a Substantial part of the Contents, Derivative #' Databases, or the Database as part of a Collective Database from You #' under this License will not have their licenses terminated provided #' their use is in full compliance with this License or a license granted #' under Section 4.8 of this License. Sections 1, 2, 7, 8, 9 and 10 will #' survive any termination of this License. #' #' 9.2 If You are not in breach of the terms of this License, the Licensor #' will not terminate Your rights under it. #' #' 9.3 Unless terminated under Section 9.1, this License is granted to You #' for the duration of applicable rights in the Database. #' #' 9.4 Reinstatement of rights. If you cease any breach of the terms and #' conditions of this License, then your full rights under this License #' will be reinstated: #' #' a. Provisionally and subject to permanent termination until the 60th #' day after cessation of breach; #' #' b. Permanently on the 60th day after cessation of breach unless #' otherwise reasonably notified by the Licensor; or #' #' c. Permanently if reasonably notified by the Licensor of the #' violation, this is the first time You have received notice of #' violation of this License from the Licensor, and You cure the #' violation prior to 30 days after your receipt of the notice. #' #' Persons subject to permanent termination of rights are not eligible to #' be a recipient and receive a license under Section 4.8. #' #' 9.5 Notwithstanding the above, Licensor reserves the right to release #' the Database under different license terms or to stop distributing or #' making available the Database. Releasing the Database under different #' license terms or stopping the distribution of the Database will not #' withdraw this License (or any other license that has been, or is #' required to be, granted under the terms of this License), and this #' License will continue in full force and effect unless terminated as #' stated above. #' #' ### 10.0 General #' #' 10.1 If any provision of this License is held to be invalid or #' unenforceable, that must not affect the validity or enforceability of #' the remainder of the terms and conditions of this License and each #' remaining provision of this License shall be valid and enforced to the #' fullest extent permitted by law. #' #' 10.2 This License is the entire agreement between the parties with #' respect to the rights granted here over the Database. It replaces any #' earlier understandings, agreements or representations with respect to #' the Database. #' #' 10.3 If You are in breach of the terms of this License, You will not be #' entitled to rely on the terms of this License or to complain of any #' breach by the Licensor. #' #' 10.4 Choice of law. This License takes effect in and will be governed by #' the laws of the relevant jurisdiction in which the License terms are #' sought to be enforced. If the standard suite of rights granted under #' applicable copyright law and Database Rights in the relevant #' jurisdiction includes additional rights not granted under this License, #' these additional rights are granted in this License in order to meet the #' terms of this License. #' @docType data #' @keywords datasets #' @name hash_lemmas #' @usage data(hash_lemmas) #' @format A data frame with 41,531 rows and 2 variables #' @references Mechura, M. B. (2016). \emph{Lemmatization list: English (en)} [Data file]. Retrieved from \url{http://www.lexiconista.com} NULL
/trinker-lexicon-4c5e22b/R/hash_lemmas.R
no_license
pratyushaj/abusive-language-online
R
false
false
27,633
r
#' Lemmatization List #' #' A dataset based on Mechura's (2016) English lemmatization list. This #' data set can be useful for join style lemma replacement of inflected token #' forms to their root lemmas. While this is not a true morphological analysis #' this style of lemma replacement is fast and typically still robust. #' #' @details #' \itemize{ #' \item token. An inflected token with affixes #' \item lemma. A base form #' } #' #' ## ODC Open Database License (ODbL) #' #' ### Preamble #' #' The Open Database License (ODbL) is a license agreement intended to #' allow users to freely share, modify, and use this Database while #' maintaining this same freedom for others. Many databases are covered by #' copyright, and therefore this document licenses these rights. Some #' jurisdictions, mainly in the European Union, have specific rights that #' cover databases, and so the ODbL addresses these rights, too. Finally, #' the ODbL is also an agreement in contract for users of this Database to #' act in certain ways in return for accessing this Database. #' #' Databases can contain a wide variety of types of content (images, #' audiovisual material, and sounds all in the same database, for example), #' and so the ODbL only governs the rights over the Database, and not the #' contents of the Database individually. Licensors should use the ODbL #' together with another license for the contents, if the contents have a #' single set of rights that uniformly covers all of the contents. If the #' contents have multiple sets of different rights, Licensors should #' describe what rights govern what contents together in the individual #' record or in some other way that clarifies what rights apply. #' #' Sometimes the contents of a database, or the database itself, can be #' covered by other rights not addressed here (such as private contracts, #' trade mark over the name, or privacy rights / data protection rights #' over information in the contents), and so you are advised that you may #' have to consult other documents or clear other rights before doing #' activities not covered by this License. #' #' ------ #' #' The Licensor (as defined below) #' #' and #' #' You (as defined below) #' #' agree as follows: #' #' ### 1.0 Definitions of Capitalised Words #' #' "Collective Database" - Means this Database in unmodified form as part #' of a collection of independent databases in themselves that together are #' assembled into a collective whole. A work that constitutes a Collective #' Database will not be considered a Derivative Database. #' #' "Convey" - As a verb, means Using the Database, a Derivative Database, #' or the Database as part of a Collective Database in any way that enables #' a Person to make or receive copies of the Database or a Derivative #' Database. Conveying does not include interaction with a user through a #' computer network, or creating and Using a Produced Work, where no #' transfer of a copy of the Database or a Derivative Database occurs. #' "Contents" - The contents of this Database, which includes the #' information, independent works, or other material collected into the #' Database. For example, the contents of the Database could be factual #' data or works such as images, audiovisual material, text, or sounds. #' #' "Database" - A collection of material (the Contents) arranged in a #' systematic or methodical way and individually accessible by electronic #' or other means offered under the terms of this License. #' #' "Database Directive" - Means Directive 96/9/EC of the European #' Parliament and of the Council of 11 March 1996 on the legal protection #' of databases, as amended or succeeded. #' #' "Database Right" - Means rights resulting from the Chapter III ("sui #' generis") rights in the Database Directive (as amended and as transposed #' by member states), which includes the Extraction and Re-utilisation of #' the whole or a Substantial part of the Contents, as well as any similar #' rights available in the relevant jurisdiction under Section 10.4. #' #' "Derivative Database" - Means a database based upon the Database, and #' includes any translation, adaptation, arrangement, modification, or any #' other alteration of the Database or of a Substantial part of the #' Contents. This includes, but is not limited to, Extracting or #' Re-utilising the whole or a Substantial part of the Contents in a new #' Database. #' #' "Extraction" - Means the permanent or temporary transfer of all or a #' Substantial part of the Contents to another medium by any means or in #' any form. #' #' "License" - Means this license agreement and is both a license of rights #' such as copyright and Database Rights and an agreement in contract. #' #' "Licensor" - Means the Person that offers the Database under the terms #' of this License. #' #' "Person" - Means a natural or legal person or a body of persons #' corporate or incorporate. #' #' "Produced Work" - a work (such as an image, audiovisual material, text, #' or sounds) resulting from using the whole or a Substantial part of the #' Contents (via a search or other query) from this Database, a Derivative #' Database, or this Database as part of a Collective Database. #' #' "Publicly" - means to Persons other than You or under Your control by #' either more than 50% ownership or by the power to direct their #' activities (such as contracting with an independent consultant). #' #' "Re-utilisation" - means any form of making available to the public all #' or a Substantial part of the Contents by the distribution of copies, by #' renting, by online or other forms of transmission. #' #' "Substantial" - Means substantial in terms of quantity or quality or a #' combination of both. The repeated and systematic Extraction or #' Re-utilisation of insubstantial parts of the Contents may amount to the #' Extraction or Re-utilisation of a Substantial part of the Contents. #' #' "Use" - As a verb, means doing any act that is restricted by copyright #' or Database Rights whether in the original medium or any other; and #' includes without limitation distributing, copying, publicly performing, #' publicly displaying, and preparing derivative works of the Database, as #' well as modifying the Database as may be technically necessary to use it #' in a different mode or format. #' #' "You" - Means a Person exercising rights under this License who has not #' previously violated the terms of this License with respect to the #' Database, or who has received express permission from the Licensor to #' exercise rights under this License despite a previous violation. #' #' Words in the singular include the plural and vice versa. #' #' ### 2.0 What this License covers #' #' 2.1. Legal effect of this document. This License is: #' #' a. A license of applicable copyright and neighbouring rights; #' #' b. A license of the Database Right; and #' #' c. An agreement in contract between You and the Licensor. #' #' 2.2 Legal rights covered. This License covers the legal rights in the #' Database, including: #' #' a. Copyright. Any copyright or neighbouring rights in the Database. #' The copyright licensed includes any individual elements of the #' Database, but does not cover the copyright over the Contents #' independent of this Database. See Section 2.4 for details. Copyright #' law varies between jurisdictions, but is likely to cover: the Database #' model or schema, which is the structure, arrangement, and organisation #' of the Database, and can also include the Database tables and table #' indexes; the data entry and output sheets; and the Field names of #' Contents stored in the Database; #' #' b. Database Rights. Database Rights only extend to the Extraction and #' Re-utilisation of the whole or a Substantial part of the Contents. #' Database Rights can apply even when there is no copyright over the #' Database. Database Rights can also apply when the Contents are removed #' from the Database and are selected and arranged in a way that would #' not infringe any applicable copyright; and #' #' c. Contract. This is an agreement between You and the Licensor for #' access to the Database. In return you agree to certain conditions of #' use on this access as outlined in this License. #' #' 2.3 Rights not covered. #' #' a. This License does not apply to computer programs used in the making #' or operation of the Database; #' #' b. This License does not cover any patents over the Contents or the #' Database; and #' #' c. This License does not cover any trademarks associated with the #' Database. #' #' 2.4 Relationship to Contents in the Database. The individual items of #' the Contents contained in this Database may be covered by other rights, #' including copyright, patent, data protection, privacy, or personality #' rights, and this License does not cover any rights (other than Database #' Rights or in contract) in individual Contents contained in the Database. #' For example, if used on a Database of images (the Contents), this #' License would not apply to copyright over individual images, which could #' have their own separate licenses, or one single license covering all of #' the rights over the images. #' #' ### 3.0 Rights granted #' #' 3.1 Subject to the terms and conditions of this License, the Licensor #' grants to You a worldwide, royalty-free, non-exclusive, terminable (but #' only under Section 9) license to Use the Database for the duration of #' any applicable copyright and Database Rights. These rights explicitly #' include commercial use, and do not exclude any field of endeavour. To #' the extent possible in the relevant jurisdiction, these rights may be #' exercised in all media and formats whether now known or created in the #' future. #' #' The rights granted cover, for example: #' #' a. Extraction and Re-utilisation of the whole or a Substantial part of #' the Contents; #' #' b. Creation of Derivative Databases; #' #' c. Creation of Collective Databases; #' #' d. Creation of temporary or permanent reproductions by any means and #' in any form, in whole or in part, including of any Derivative #' Databases or as a part of Collective Databases; and #' #' e. Distribution, communication, display, lending, making available, or #' performance to the public by any means and in any form, in whole or in #' part, including of any Derivative Database or as a part of Collective #' Databases. #' #' 3.2 Compulsory license schemes. For the avoidance of doubt: #' #' a. Non-waivable compulsory license schemes. In those jurisdictions in #' which the right to collect royalties through any statutory or #' compulsory licensing scheme cannot be waived, the Licensor reserves #' the exclusive right to collect such royalties for any exercise by You #' of the rights granted under this License; #' #' b. Waivable compulsory license schemes. In those jurisdictions in #' which the right to collect royalties through any statutory or #' compulsory licensing scheme can be waived, the Licensor waives the #' exclusive right to collect such royalties for any exercise by You of #' the rights granted under this License; and, #' #' c. Voluntary license schemes. The Licensor waives the right to collect #' royalties, whether individually or, in the event that the Licensor is #' a member of a collecting society that administers voluntary licensing #' schemes, via that society, from any exercise by You of the rights #' granted under this License. #' #' 3.3 The right to release the Database under different terms, or to stop #' distributing or making available the Database, is reserved. Note that #' this Database may be multiple-licensed, and so You may have the choice #' of using alternative licenses for this Database. Subject to Section #' 10.4, all other rights not expressly granted by Licensor are reserved. #' #' ### 4.0 Conditions of Use #' #' 4.1 The rights granted in Section 3 above are expressly made subject to #' Your complying with the following conditions of use. These are important #' conditions of this License, and if You fail to follow them, You will be #' in material breach of its terms. #' #' 4.2 Notices. If You Publicly Convey this Database, any Derivative #' Database, or the Database as part of a Collective Database, then You #' must: #' #' a. Do so only under the terms of this License or another license #' permitted under Section 4.4; #' #' b. Include a copy of this License (or, as applicable, a license #' permitted under Section 4.4) or its Uniform Resource Identifier (URI) #' with the Database or Derivative Database, including both in the #' Database or Derivative Database and in any relevant documentation; and #' #' c. Keep intact any copyright or Database Right notices and notices #' that refer to this License. #' #' d. If it is not possible to put the required notices in a particular #' file due to its structure, then You must include the notices in a #' location (such as a relevant directory) where users would be likely to #' look for it. #' #' 4.3 Notice for using output (Contents). Creating and Using a Produced #' Work does not require the notice in Section 4.2. However, if you #' Publicly Use a Produced Work, You must include a notice associated with #' the Produced Work reasonably calculated to make any Person that uses, #' views, accesses, interacts with, or is otherwise exposed to the Produced #' Work aware that Content was obtained from the Database, Derivative #' Database, or the Database as part of a Collective Database, and that it #' is available under this License. #' #' a. Example notice. The following text will satisfy notice under #' Section 4.3: #' #' Contains information from DATABASE NAME, which is made available #' here under the Open Database License (ODbL). #' #' DATABASE NAME should be replaced with the name of the Database and a #' hyperlink to the URI of the Database. "Open Database License" should #' contain a hyperlink to the URI of the text of this License. If #' hyperlinks are not possible, You should include the plain text of the #' required URI's with the above notice. #' #' 4.4 Share alike. #' #' a. Any Derivative Database that You Publicly Use must be only under #' the terms of: #' #' i. This License; #' #' ii. A later version of this License similar in spirit to this #' License; or #' #' iii. A compatible license. #' #' If You license the Derivative Database under one of the licenses #' mentioned in (iii), You must comply with the terms of that license. #' #' b. For the avoidance of doubt, Extraction or Re-utilisation of the #' whole or a Substantial part of the Contents into a new database is a #' Derivative Database and must comply with Section 4.4. #' #' c. Derivative Databases and Produced Works. A Derivative Database is #' Publicly Used and so must comply with Section 4.4. if a Produced Work #' created from the Derivative Database is Publicly Used. #' #' d. Share Alike and additional Contents. For the avoidance of doubt, #' You must not add Contents to Derivative Databases under Section 4.4 a #' that are incompatible with the rights granted under this License. #' #' e. Compatible licenses. Licensors may authorise a proxy to determine #' compatible licenses under Section 4.4 a iii. If they do so, the #' authorised proxy's public statement of acceptance of a compatible #' license grants You permission to use the compatible license. #' #' #' 4.5 Limits of Share Alike. The requirements of Section 4.4 do not apply #' in the following: #' #' a. For the avoidance of doubt, You are not required to license #' Collective Databases under this License if You incorporate this #' Database or a Derivative Database in the collection, but this License #' still applies to this Database or a Derivative Database as a part of #' the Collective Database; #' #' b. Using this Database, a Derivative Database, or this Database as #' part of a Collective Database to create a Produced Work does not #' create a Derivative Database for purposes of Section 4.4; and #' #' c. Use of a Derivative Database internally within an organisation is #' not to the public and therefore does not fall under the requirements #' of Section 4.4. #' #' 4.6 Access to Derivative Databases. If You Publicly Use a Derivative #' Database or a Produced Work from a Derivative Database, You must also #' offer to recipients of the Derivative Database or Produced Work a copy #' in a machine readable form of: #' #' a. The entire Derivative Database; or #' #' b. A file containing all of the alterations made to the Database or #' the method of making the alterations to the Database (such as an #' algorithm), including any additional Contents, that make up all the #' differences between the Database and the Derivative Database. #' #' The Derivative Database (under a.) or alteration file (under b.) must be #' available at no more than a reasonable production cost for physical #' distributions and free of charge if distributed over the internet. #' #' 4.7 Technological measures and additional terms #' #' a. This License does not allow You to impose (except subject to #' Section 4.7 b.) any terms or any technological measures on the #' Database, a Derivative Database, or the whole or a Substantial part of #' the Contents that alter or restrict the terms of this License, or any #' rights granted under it, or have the effect or intent of restricting #' the ability of any person to exercise those rights. #' #' b. Parallel distribution. You may impose terms or technological #' measures on the Database, a Derivative Database, or the whole or a #' Substantial part of the Contents (a "Restricted Database") in #' contravention of Section 4.74 a. only if You also make a copy of the #' Database or a Derivative Database available to the recipient of the #' Restricted Database: #' #' i. That is available without additional fee; #' #' ii. That is available in a medium that does not alter or restrict #' the terms of this License, or any rights granted under it, or have #' the effect or intent of restricting the ability of any person to #' exercise those rights (an "Unrestricted Database"); and #' #' iii. The Unrestricted Database is at least as accessible to the #' recipient as a practical matter as the Restricted Database. #' #' c. For the avoidance of doubt, You may place this Database or a #' Derivative Database in an authenticated environment, behind a #' password, or within a similar access control scheme provided that You #' do not alter or restrict the terms of this License or any rights #' granted under it or have the effect or intent of restricting the #' ability of any person to exercise those rights. #' #' 4.8 Licensing of others. You may not sublicense the Database. Each time #' You communicate the Database, the whole or Substantial part of the #' Contents, or any Derivative Database to anyone else in any way, the #' Licensor offers to the recipient a license to the Database on the same #' terms and conditions as this License. You are not responsible for #' enforcing compliance by third parties with this License, but You may #' enforce any rights that You have over a Derivative Database. You are #' solely responsible for any modifications of a Derivative Database made #' by You or another Person at Your direction. You may not impose any #' further restrictions on the exercise of the rights granted or affirmed #' under this License. #' #' ### 5.0 Moral rights #' #' 5.1 Moral rights. This section covers moral rights, including any rights #' to be identified as the author of the Database or to object to treatment #' that would otherwise prejudice the author's honour and reputation, or #' any other derogatory treatment: #' #' a. For jurisdictions allowing waiver of moral rights, Licensor waives #' all moral rights that Licensor may have in the Database to the fullest #' extent possible by the law of the relevant jurisdiction under Section #' 10.4; #' #' b. If waiver of moral rights under Section 5.1 a in the relevant #' jurisdiction is not possible, Licensor agrees not to assert any moral #' rights over the Database and waives all claims in moral rights to the #' fullest extent possible by the law of the relevant jurisdiction under #' Section 10.4; and #' #' c. For jurisdictions not allowing waiver or an agreement not to assert #' moral rights under Section 5.1 a and b, the author may retain their #' moral rights over certain aspects of the Database. #' #' Please note that some jurisdictions do not allow for the waiver of moral #' rights, and so moral rights may still subsist over the Database in some #' jurisdictions. #' #' ### 6.0 Fair dealing, Database exceptions, and other rights not affected #' #' 6.1 This License does not affect any rights that You or anyone else may #' independently have under any applicable law to make any use of this #' Database, including without limitation: #' #' a. Exceptions to the Database Right including: Extraction of Contents #' from non-electronic Databases for private purposes, Extraction for #' purposes of illustration for teaching or scientific research, and #' Extraction or Re-utilisation for public security or an administrative #' or judicial procedure. #' #' b. Fair dealing, fair use, or any other legally recognised limitation #' or exception to infringement of copyright or other applicable laws. #' #' 6.2 This License does not affect any rights of lawful users to Extract #' and Re-utilise insubstantial parts of the Contents, evaluated #' quantitatively or qualitatively, for any purposes whatsoever, including #' creating a Derivative Database (subject to other rights over the #' Contents, see Section 2.4). The repeated and systematic Extraction or #' Re-utilisation of insubstantial parts of the Contents may however amount #' to the Extraction or Re-utilisation of a Substantial part of the #' Contents. #' #' ### 7.0 Warranties and Disclaimer #' #' 7.1 The Database is licensed by the Licensor "as is" and without any #' warranty of any kind, either express, implied, or arising by statute, #' custom, course of dealing, or trade usage. Licensor specifically #' disclaims any and all implied warranties or conditions of title, #' non-infringement, accuracy or completeness, the presence or absence of #' errors, fitness for a particular purpose, merchantability, or otherwise. #' Some jurisdictions do not allow the exclusion of implied warranties, so #' this exclusion may not apply to You. #' #' ### 8.0 Limitation of liability #' #' 8.1 Subject to any liability that may not be excluded or limited by law, #' the Licensor is not liable for, and expressly excludes, all liability #' for loss or damage however and whenever caused to anyone by any use #' under this License, whether by You or by anyone else, and whether caused #' by any fault on the part of the Licensor or not. This exclusion of #' liability includes, but is not limited to, any special, incidental, #' consequential, punitive, or exemplary damages such as loss of revenue, #' data, anticipated profits, and lost business. This exclusion applies #' even if the Licensor has been advised of the possibility of such #' damages. #' #' 8.2 If liability may not be excluded by law, it is limited to actual and #' direct financial loss to the extent it is caused by proved negligence on #' the part of the Licensor. #' #' ### 9.0 Termination of Your rights under this License #' #' 9.1 Any breach by You of the terms and conditions of this License #' automatically terminates this License with immediate effect and without #' notice to You. For the avoidance of doubt, Persons who have received the #' Database, the whole or a Substantial part of the Contents, Derivative #' Databases, or the Database as part of a Collective Database from You #' under this License will not have their licenses terminated provided #' their use is in full compliance with this License or a license granted #' under Section 4.8 of this License. Sections 1, 2, 7, 8, 9 and 10 will #' survive any termination of this License. #' #' 9.2 If You are not in breach of the terms of this License, the Licensor #' will not terminate Your rights under it. #' #' 9.3 Unless terminated under Section 9.1, this License is granted to You #' for the duration of applicable rights in the Database. #' #' 9.4 Reinstatement of rights. If you cease any breach of the terms and #' conditions of this License, then your full rights under this License #' will be reinstated: #' #' a. Provisionally and subject to permanent termination until the 60th #' day after cessation of breach; #' #' b. Permanently on the 60th day after cessation of breach unless #' otherwise reasonably notified by the Licensor; or #' #' c. Permanently if reasonably notified by the Licensor of the #' violation, this is the first time You have received notice of #' violation of this License from the Licensor, and You cure the #' violation prior to 30 days after your receipt of the notice. #' #' Persons subject to permanent termination of rights are not eligible to #' be a recipient and receive a license under Section 4.8. #' #' 9.5 Notwithstanding the above, Licensor reserves the right to release #' the Database under different license terms or to stop distributing or #' making available the Database. Releasing the Database under different #' license terms or stopping the distribution of the Database will not #' withdraw this License (or any other license that has been, or is #' required to be, granted under the terms of this License), and this #' License will continue in full force and effect unless terminated as #' stated above. #' #' ### 10.0 General #' #' 10.1 If any provision of this License is held to be invalid or #' unenforceable, that must not affect the validity or enforceability of #' the remainder of the terms and conditions of this License and each #' remaining provision of this License shall be valid and enforced to the #' fullest extent permitted by law. #' #' 10.2 This License is the entire agreement between the parties with #' respect to the rights granted here over the Database. It replaces any #' earlier understandings, agreements or representations with respect to #' the Database. #' #' 10.3 If You are in breach of the terms of this License, You will not be #' entitled to rely on the terms of this License or to complain of any #' breach by the Licensor. #' #' 10.4 Choice of law. This License takes effect in and will be governed by #' the laws of the relevant jurisdiction in which the License terms are #' sought to be enforced. If the standard suite of rights granted under #' applicable copyright law and Database Rights in the relevant #' jurisdiction includes additional rights not granted under this License, #' these additional rights are granted in this License in order to meet the #' terms of this License. #' @docType data #' @keywords datasets #' @name hash_lemmas #' @usage data(hash_lemmas) #' @format A data frame with 41,531 rows and 2 variables #' @references Mechura, M. B. (2016). \emph{Lemmatization list: English (en)} [Data file]. Retrieved from \url{http://www.lexiconista.com} NULL
#' Example phenotype data file from Prefrontal Cortex (PFC) Methylation Data of Alzheimer's Disease subjects #' #' @description Subset of phenotype information for Alzheimer's methylation dataset. #' #' @format A data frame containing variables for Braak stage (stage), subject.id, Batch (slide), Sex, #' Sample, age of brain sample (age.brain) #' #' @source GEO accession: GSE59685 "pheno_df"
/R/data_pheno_df.R
no_license
lissettegomez/coMethDMR
R
false
false
394
r
#' Example phenotype data file from Prefrontal Cortex (PFC) Methylation Data of Alzheimer's Disease subjects #' #' @description Subset of phenotype information for Alzheimer's methylation dataset. #' #' @format A data frame containing variables for Braak stage (stage), subject.id, Batch (slide), Sex, #' Sample, age of brain sample (age.brain) #' #' @source GEO accession: GSE59685 "pheno_df"
#' Elastic Net Weigthing Method #' #' @param pred_train A list that contain multi-study datasets (can be generated from the simulation function) #' @param dat_train A dataframe that combines all multi-study datasets #' @return The coeficient for each study using stacking regression with intercept weighting method #' @seealso [MultiStudySim()] #' @seealso [get_input_data()] #' @examples #' lst_ori <- MultiStudySim(k,nk,p,m,mu_x,SIG,pii,mu_beta,sigma_beta) #' a <- get_input_data(lst_ori)[[1]] #' b <- get_input_data(lst_ori)[[2]] #' fit <- lapply(a, function(a) lm(y~.)) #' b1 <- lapply(fit, function(fit) #' predict(fit,b[,-which(names(b) %in% c('y'))])) #' elnet_weight_int(b1,b) elnet_weight_int <- function(pred_train,dat_train){ train.data <- as.matrix.data.frame(cbind(do.call(cbind,pred_train),dat_train$y)) colnames(train.data) <- c(1:(ncol(train.data)-1),'y') model <- caret::train( y ~., data = train.data, method = "glmnet", trControl = trainControl("cv", number = 5), tuneLength = 10 ) nl_elnet_int <- coef(model$finalModel, model$bestTune$lambda) return(nl_elnet_int) }
/MultiStudyPack/mssp/R/elnet_weight_int.R
no_license
xutao-wang/MultiStudySim
R
false
false
1,134
r
#' Elastic Net Weigthing Method #' #' @param pred_train A list that contain multi-study datasets (can be generated from the simulation function) #' @param dat_train A dataframe that combines all multi-study datasets #' @return The coeficient for each study using stacking regression with intercept weighting method #' @seealso [MultiStudySim()] #' @seealso [get_input_data()] #' @examples #' lst_ori <- MultiStudySim(k,nk,p,m,mu_x,SIG,pii,mu_beta,sigma_beta) #' a <- get_input_data(lst_ori)[[1]] #' b <- get_input_data(lst_ori)[[2]] #' fit <- lapply(a, function(a) lm(y~.)) #' b1 <- lapply(fit, function(fit) #' predict(fit,b[,-which(names(b) %in% c('y'))])) #' elnet_weight_int(b1,b) elnet_weight_int <- function(pred_train,dat_train){ train.data <- as.matrix.data.frame(cbind(do.call(cbind,pred_train),dat_train$y)) colnames(train.data) <- c(1:(ncol(train.data)-1),'y') model <- caret::train( y ~., data = train.data, method = "glmnet", trControl = trainControl("cv", number = 5), tuneLength = 10 ) nl_elnet_int <- coef(model$finalModel, model$bestTune$lambda) return(nl_elnet_int) }
#' classify_layer #' #' @param rna_counts count matrix #' @param cells_to_use vector indicating which cells to classify in layer #' @param imputation whether to apply MAGIC imputation - recommended #' @param genes_in_model genes used in the model #' @param model model #' @param ref_based whether to use ref based - currently depreceated #' @param mc.cores number of cores #' @param unimodal_nsd number of sd for setting threshold in unimodal distributions #' @param bimodal_nsd number of sd for setting threshold in bimodal distributions #' @param layer which layer is being classified #' #' @return returns a prediction matrix for a layer - used in run_scATOMIC #' @export classify_layer <- function(rna_counts, cells_to_use, imputation = TRUE, genes_in_model, model, ref_based = F, mc.cores = (parallel::detectCores()-1), unimodal_nsd = 3, bimodal_nsd = 2, layer, normalized_counts){ if(.Platform$OS.type == "windows"){ mc.cores = 1 } layer_predictions <- classify_cells_RNA_no_scale(rna_counts = rna_counts, imputation = imputation, genes_in_model = genes_in_model, model = model, cells_to_use = cells_to_use, ref_based = ref_based, normalized_counts=normalized_counts, mc.cores = mc.cores) layer_predictions <- add_class_per_layer(layer_predictions = layer_predictions, layer = layer) predicted_class <- unlist(parallel::mclapply(row.names(layer_predictions), score_class, predictions = layer_predictions, layer = layer, mc.cores = mc.cores), use.names = F) layer_predictions <- cbind(layer_predictions, predicted_class) layer_predictions$predicted_class <- as.character(predicted_class) threshold_per_class <- get_auto_threshold(layer_predictions, mc.cores=mc.cores) predicted_tissue_with_cutoff <- unlist(parallel::mclapply(row.names(layer_predictions), add_unclassified_automatic_threshold, predictions = layer_predictions, layer = layer, threshold_use = threshold_per_class, mc.cores = mc.cores), use.names = F) layer_predictions <- cbind(layer_predictions, predicted_tissue_with_cutoff) return(layer_predictions) }
/R/classify_layer.R
permissive
abelson-lab/scATOMIC
R
false
false
2,657
r
#' classify_layer #' #' @param rna_counts count matrix #' @param cells_to_use vector indicating which cells to classify in layer #' @param imputation whether to apply MAGIC imputation - recommended #' @param genes_in_model genes used in the model #' @param model model #' @param ref_based whether to use ref based - currently depreceated #' @param mc.cores number of cores #' @param unimodal_nsd number of sd for setting threshold in unimodal distributions #' @param bimodal_nsd number of sd for setting threshold in bimodal distributions #' @param layer which layer is being classified #' #' @return returns a prediction matrix for a layer - used in run_scATOMIC #' @export classify_layer <- function(rna_counts, cells_to_use, imputation = TRUE, genes_in_model, model, ref_based = F, mc.cores = (parallel::detectCores()-1), unimodal_nsd = 3, bimodal_nsd = 2, layer, normalized_counts){ if(.Platform$OS.type == "windows"){ mc.cores = 1 } layer_predictions <- classify_cells_RNA_no_scale(rna_counts = rna_counts, imputation = imputation, genes_in_model = genes_in_model, model = model, cells_to_use = cells_to_use, ref_based = ref_based, normalized_counts=normalized_counts, mc.cores = mc.cores) layer_predictions <- add_class_per_layer(layer_predictions = layer_predictions, layer = layer) predicted_class <- unlist(parallel::mclapply(row.names(layer_predictions), score_class, predictions = layer_predictions, layer = layer, mc.cores = mc.cores), use.names = F) layer_predictions <- cbind(layer_predictions, predicted_class) layer_predictions$predicted_class <- as.character(predicted_class) threshold_per_class <- get_auto_threshold(layer_predictions, mc.cores=mc.cores) predicted_tissue_with_cutoff <- unlist(parallel::mclapply(row.names(layer_predictions), add_unclassified_automatic_threshold, predictions = layer_predictions, layer = layer, threshold_use = threshold_per_class, mc.cores = mc.cores), use.names = F) layer_predictions <- cbind(layer_predictions, predicted_tissue_with_cutoff) return(layer_predictions) }
\name{ghq} \docType{data} \alias{ghq} \title{Psychiatric diagnosis based on GHQ} \description{ These data were published by Silvapulle, and come from a psychiatric study of the relation between psychiatric diagnosis (as case or non-case) and the value of the score on a 12-item General Health Questionnaire (\acronym{GHQ}), for 120 patients attending a general practitioner's surgery. Each patient was administered the \acronym{GHQ}, resulting in a score between 0 and 12, (however there were no cases or non-cases with \acronym{GHQ} scores of 11 or 12) and was subsequently given a full psychiatric examination by a psychiatrist who did not know the patient's \acronym{GHQ} score. The patient was classified by the psychiatrist as either a ``case'', requiring psychiatric treatment, or a ``non-case''. } \usage{data(ghq)} \format{ \tabular{rll}{ [,1]\tab sex\tab Factor w/ 2 levels "men","women"\cr [,2] \tab ghq\tab integer, score from 0,...,12\cr [,3] \tab c \tab integer, number of patients considered a "case"\cr [,4] \tab nc \tab integer, number of patients considered a "non-case"\cr } } \source{Silvapulle, M. J. (1981), "On the existence of maximum likelihood estimators for the binomial response model", \emph{J. Roy. Statist. Soc. B.}, \bold{43}, 310--13. } \note{See p.235 in SMIR} \keyword{datasets}
/man/ghq.Rd
no_license
cran/SMIR
R
false
false
1,375
rd
\name{ghq} \docType{data} \alias{ghq} \title{Psychiatric diagnosis based on GHQ} \description{ These data were published by Silvapulle, and come from a psychiatric study of the relation between psychiatric diagnosis (as case or non-case) and the value of the score on a 12-item General Health Questionnaire (\acronym{GHQ}), for 120 patients attending a general practitioner's surgery. Each patient was administered the \acronym{GHQ}, resulting in a score between 0 and 12, (however there were no cases or non-cases with \acronym{GHQ} scores of 11 or 12) and was subsequently given a full psychiatric examination by a psychiatrist who did not know the patient's \acronym{GHQ} score. The patient was classified by the psychiatrist as either a ``case'', requiring psychiatric treatment, or a ``non-case''. } \usage{data(ghq)} \format{ \tabular{rll}{ [,1]\tab sex\tab Factor w/ 2 levels "men","women"\cr [,2] \tab ghq\tab integer, score from 0,...,12\cr [,3] \tab c \tab integer, number of patients considered a "case"\cr [,4] \tab nc \tab integer, number of patients considered a "non-case"\cr } } \source{Silvapulle, M. J. (1981), "On the existence of maximum likelihood estimators for the binomial response model", \emph{J. Roy. Statist. Soc. B.}, \bold{43}, 310--13. } \note{See p.235 in SMIR} \keyword{datasets}
##---------------------------------------------------------------- ## ## fire.polygon.pcvalues.R ## ## Purpose: append princiipal component values (mean within fire perimeter) ## to fire polygon shp to use in analysis of high severity metrics ## ## Author: S. Haire, @HaireLab ## ## ## Date: 3 july 2020 ## ##-------------------------------------------------------------- ## ## Notes: ## Before running this script, download the fire perimeters ## < https://doi.org/10.5066/P9BB5TIO > ## ## PC1 and PC2 rasters are available in the respository data folder ## ##--------------------------------------------------------------- library(raster) library(rgdal) library(landscapemetrics) library(plyr) library(dplyr) library(geosphere) ## Read in the data and project the fire perimeter polygons to match the principal component layers ## paths to input data perimpath<-'./data/sp'# fire perimeters...put the shp with new attributes here too pcpath<-'./data/PCA/' ## PCA rasters ## data ## pc's have bioclim data projection bioclim.prj<-"+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs" # from metadata lambert conformal conic ## read in pc's and assign prj pc1<-raster(paste(pcpath, "PC1b.rot.tif", sep="")); crs(pc1)<-bioclim.prj pc2<-raster(paste(pcpath, "PC2b.rot.tif", sep="")); crs(pc2)<-bioclim.prj ## read in perimeters/sp polys and project the perimeters to match the pc's perims<-readOGR(perimpath, "Sky_Island_Fire_Polys_1985_2017") perims.lcc<-spTransform(perims, bioclim.prj) ## Extract pc 1 & pc2 values and output the mean w/in polygon (fire perimeter). Save appended shp. ## stack the pc's and extract values within the polygons s<-stack(pc1,pc2) pc.ex<-extract(s, perims.lcc, method="bilinear",df=TRUE) ## calulate the mean values within the fire perimeters mean.pc<-ddply(pc.ex,~ID, summarise, mean.pc1 = mean(PC1b.rot), mean.pc2=mean(PC2b.rot)) ## add pc mean values to the spatial polygons perims$pc1<-mean.pc[,2]; perims$pc2<-mean.pc[,3] perims<-perims[,c(1:5,15,16)] ## just save the year, name & id, country, sky island and pc values ## save the perim shp writeOGR(perims, "./data/sp", "fire.polys.pcvalues", driver="ESRI Shapefile", overwrite=TRUE)
/fire.pcvalues.R
no_license
HaireLab/Fuego-en-la-Frontera
R
false
false
2,293
r
##---------------------------------------------------------------- ## ## fire.polygon.pcvalues.R ## ## Purpose: append princiipal component values (mean within fire perimeter) ## to fire polygon shp to use in analysis of high severity metrics ## ## Author: S. Haire, @HaireLab ## ## ## Date: 3 july 2020 ## ##-------------------------------------------------------------- ## ## Notes: ## Before running this script, download the fire perimeters ## < https://doi.org/10.5066/P9BB5TIO > ## ## PC1 and PC2 rasters are available in the respository data folder ## ##--------------------------------------------------------------- library(raster) library(rgdal) library(landscapemetrics) library(plyr) library(dplyr) library(geosphere) ## Read in the data and project the fire perimeter polygons to match the principal component layers ## paths to input data perimpath<-'./data/sp'# fire perimeters...put the shp with new attributes here too pcpath<-'./data/PCA/' ## PCA rasters ## data ## pc's have bioclim data projection bioclim.prj<-"+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs" # from metadata lambert conformal conic ## read in pc's and assign prj pc1<-raster(paste(pcpath, "PC1b.rot.tif", sep="")); crs(pc1)<-bioclim.prj pc2<-raster(paste(pcpath, "PC2b.rot.tif", sep="")); crs(pc2)<-bioclim.prj ## read in perimeters/sp polys and project the perimeters to match the pc's perims<-readOGR(perimpath, "Sky_Island_Fire_Polys_1985_2017") perims.lcc<-spTransform(perims, bioclim.prj) ## Extract pc 1 & pc2 values and output the mean w/in polygon (fire perimeter). Save appended shp. ## stack the pc's and extract values within the polygons s<-stack(pc1,pc2) pc.ex<-extract(s, perims.lcc, method="bilinear",df=TRUE) ## calulate the mean values within the fire perimeters mean.pc<-ddply(pc.ex,~ID, summarise, mean.pc1 = mean(PC1b.rot), mean.pc2=mean(PC2b.rot)) ## add pc mean values to the spatial polygons perims$pc1<-mean.pc[,2]; perims$pc2<-mean.pc[,3] perims<-perims[,c(1:5,15,16)] ## just save the year, name & id, country, sky island and pc values ## save the perim shp writeOGR(perims, "./data/sp", "fire.polys.pcvalues", driver="ESRI Shapefile", overwrite=TRUE)
# Example 5.3 n<- 10 nrep<-1000 xbar<- 1.4 # the only value needed from the data set.seed(6) x<- matrix(rpois(nrep*n,xbar),ncol=10) xmean <- x %*% rep(1,n)/n xmean<- c(xmean) par(mfrow=c(1,1)) hist(exp(-xmean), nclass=20, xlab=expression(paste(exp,'(',-bar(x),'*)')), main='') title('Bootstrap distribution of exp(-xbar)',cex=.4)
/R/LKPACK/EX5-3.R
permissive
yanliangs/in-all-likelihood
R
false
false
359
r
# Example 5.3 n<- 10 nrep<-1000 xbar<- 1.4 # the only value needed from the data set.seed(6) x<- matrix(rpois(nrep*n,xbar),ncol=10) xmean <- x %*% rep(1,n)/n xmean<- c(xmean) par(mfrow=c(1,1)) hist(exp(-xmean), nclass=20, xlab=expression(paste(exp,'(',-bar(x),'*)')), main='') title('Bootstrap distribution of exp(-xbar)',cex=.4)
\name{fractionation} \alias{fractionation} \docType{data} \title{British Institute of Radiology Fractionation Studies} \description{ The British Institute of Radiology (BIR) conducted two large-scale randomized clinical trials to assess the effectiveness of different radiotherapy treatment schedules for cancer of the larynx and pharynx. The cambined data come from 858 subjects with laryngeal squamous cell carcinomas and no involvement of asjacent organs. These data have been described and analyzed by Rezvani, Fowler, Hopewell, and Alcock (1993). } \usage{data(fractionation)} \format{ A data frame with 858 observations on the following 6 variables. \describe{ \item{\code{response}}{Three-year local control 1 (0 no control).} \item{\code{dose}}{Total dose (grays).} \item{\code{df}}{Total dose x dose/fraction.} \item{\code{time}}{Total time of treatment (days).} \item{\code{kt2}}{Tumor status (indicators for 2nd level of factor).} \item{\code{kt3}}{Tumor status (indicators for 3rd level of factor).} } } \details{ Three-year local control - meaning no detection of laryngeal carcinoma within three years after treatment - is the binary response, coded as 1 if local control is achieved and 0 otherwise. For this data set, three-year local control is achieved for 69% of the cases. In a treatment schedule, a total dose of radiation is administered in fractions over a treatment period. The dose per fraction df is measured in grays (Gy), the length of treatment period time is measured in days, and the number of fractions of the dose is nf. Tumors are classified by stage (i.e., the extent of invasion), into three groups. This categorical covariate is coded by two indicator variables kt2 and kt3, which are defined by kt2=1 (kt3=1) is the tumor is stage II (stage III) and zero otherwise. Chappell, Nondahl and Fowler (1995) argued that the tumor stage, the total dose, the total time, and the interaction of the total dose per fraction are the relevant explanantory variables affecting probability of local control. } \references{ Chappell, R., Nondahl, D.M., and Fowler, J.F. (1995) Modelling Dose and Local Control in Radiotheraphy. Journal of the American Statistical Asso- ciation, 90: 829 - 838. Newton, M.A., Czado, C., and Chappell, R. (1996) Bayesian inference for semiparametric binary regression. Journal of the American Statistical Association, 91, 142-153. Rezvani, M., Fowler, J., Hopewell, J., and Alcock, C. (1993) Sensitivity of Human Squamous Cell Carcinoma of the Larynx to Fractionated Radiotherapy. British Journal of Radiology, 66: 245 - 255. } \examples{ data(fractionation) ## maybe str(fractionation) ; plot(fractionation) ... } \keyword{datasets}
/man/fractionation.Rd
no_license
cran/DPpackage
R
false
false
2,853
rd
\name{fractionation} \alias{fractionation} \docType{data} \title{British Institute of Radiology Fractionation Studies} \description{ The British Institute of Radiology (BIR) conducted two large-scale randomized clinical trials to assess the effectiveness of different radiotherapy treatment schedules for cancer of the larynx and pharynx. The cambined data come from 858 subjects with laryngeal squamous cell carcinomas and no involvement of asjacent organs. These data have been described and analyzed by Rezvani, Fowler, Hopewell, and Alcock (1993). } \usage{data(fractionation)} \format{ A data frame with 858 observations on the following 6 variables. \describe{ \item{\code{response}}{Three-year local control 1 (0 no control).} \item{\code{dose}}{Total dose (grays).} \item{\code{df}}{Total dose x dose/fraction.} \item{\code{time}}{Total time of treatment (days).} \item{\code{kt2}}{Tumor status (indicators for 2nd level of factor).} \item{\code{kt3}}{Tumor status (indicators for 3rd level of factor).} } } \details{ Three-year local control - meaning no detection of laryngeal carcinoma within three years after treatment - is the binary response, coded as 1 if local control is achieved and 0 otherwise. For this data set, three-year local control is achieved for 69% of the cases. In a treatment schedule, a total dose of radiation is administered in fractions over a treatment period. The dose per fraction df is measured in grays (Gy), the length of treatment period time is measured in days, and the number of fractions of the dose is nf. Tumors are classified by stage (i.e., the extent of invasion), into three groups. This categorical covariate is coded by two indicator variables kt2 and kt3, which are defined by kt2=1 (kt3=1) is the tumor is stage II (stage III) and zero otherwise. Chappell, Nondahl and Fowler (1995) argued that the tumor stage, the total dose, the total time, and the interaction of the total dose per fraction are the relevant explanantory variables affecting probability of local control. } \references{ Chappell, R., Nondahl, D.M., and Fowler, J.F. (1995) Modelling Dose and Local Control in Radiotheraphy. Journal of the American Statistical Asso- ciation, 90: 829 - 838. Newton, M.A., Czado, C., and Chappell, R. (1996) Bayesian inference for semiparametric binary regression. Journal of the American Statistical Association, 91, 142-153. Rezvani, M., Fowler, J., Hopewell, J., and Alcock, C. (1993) Sensitivity of Human Squamous Cell Carcinoma of the Larynx to Fractionated Radiotherapy. British Journal of Radiology, 66: 245 - 255. } \examples{ data(fractionation) ## maybe str(fractionation) ; plot(fractionation) ... } \keyword{datasets}
" This function takes a specific data frame with numeric items in the 'rate' column and character items in the 'hospital' column. It creates a data frame that is ordered first by the rate column and second by the hospital name column. " orderData <- function(info) { orderedIndexes <- order(info$rate, info$hospital) rates <- info$rate[orderedIndexes] names <- info$hospital[orderedIndexes] return(data.frame(hospital=names, rate=rates)) }
/RCourse/assignments/assignment3/baseRWay/orderdata.R
no_license
statisticallyfit/R
R
false
false
463
r
" This function takes a specific data frame with numeric items in the 'rate' column and character items in the 'hospital' column. It creates a data frame that is ordered first by the rate column and second by the hospital name column. " orderData <- function(info) { orderedIndexes <- order(info$rate, info$hospital) rates <- info$rate[orderedIndexes] names <- info$hospital[orderedIndexes] return(data.frame(hospital=names, rate=rates)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/room.R \name{hipchat_delete_room} \alias{hipchat_delete_room} \title{Delete a Hipchat room.} \usage{ hipchat_delete_room(room_name_or_id, confirm = TRUE) } \arguments{ \item{room_name_or_id}{character or integer.} \item{confirm}{logical. Whether or not to ask for a confirmation message before deleting the room. By default, \code{TRUE}. (Deleting rooms is dangerous!)} } \value{ \code{TRUE} or \code{FALSE} according as the room was deleted. } \description{ Delete a Hipchat room. } \examples{ \dontrun{ hipchat_create_room('Example room') hipchat_delete_room('Example room') # Will ask a confirmation message. hipchat_delete_room('Example room', confirm = FALSE) # Dangerous! No confirmation. } }
/man/hipchat_delete_room.Rd
no_license
davidboren/hipchat
R
false
true
788
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/room.R \name{hipchat_delete_room} \alias{hipchat_delete_room} \title{Delete a Hipchat room.} \usage{ hipchat_delete_room(room_name_or_id, confirm = TRUE) } \arguments{ \item{room_name_or_id}{character or integer.} \item{confirm}{logical. Whether or not to ask for a confirmation message before deleting the room. By default, \code{TRUE}. (Deleting rooms is dangerous!)} } \value{ \code{TRUE} or \code{FALSE} according as the room was deleted. } \description{ Delete a Hipchat room. } \examples{ \dontrun{ hipchat_create_room('Example room') hipchat_delete_room('Example room') # Will ask a confirmation message. hipchat_delete_room('Example room', confirm = FALSE) # Dangerous! No confirmation. } }
testlist <- list(x = c(1.90986016056392e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(diceR:::indicator_matrix,testlist) str(result)
/diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609959638-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
282
r
testlist <- list(x = c(1.90986016056392e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) result <- do.call(diceR:::indicator_matrix,testlist) str(result)
#topics ---- #factors, env, import/export. package install #rep, recode, split, partition, subset, loops, cast & melt #missing values. duplicates, apply #graphs - bar, multiple line, pie, box, corrgram fit = lm(weight ~ height,data = women) summary(fit) (ndata = data.frame(height = c(60.5, 75.5))) (predictedwt = predict(fit, newdata = ndata)) cbind(ndata, predictedwt) resid(fit) fitted(fit)
/Simple Linear Regression - Women.R
no_license
Nirgun32/analytics
R
false
false
396
r
#topics ---- #factors, env, import/export. package install #rep, recode, split, partition, subset, loops, cast & melt #missing values. duplicates, apply #graphs - bar, multiple line, pie, box, corrgram fit = lm(weight ~ height,data = women) summary(fit) (ndata = data.frame(height = c(60.5, 75.5))) (predictedwt = predict(fit, newdata = ndata)) cbind(ndata, predictedwt) resid(fit) fitted(fit)
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Classifier/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=TRUE) sink('./NSCLC_003.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/NSCLC/NSCLC_003.R
no_license
esbgkannan/QSMART
R
false
false
344
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Classifier/NSCLC.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0,family="gaussian",standardize=TRUE) sink('./NSCLC_003.txt',append=TRUE) print(glm$glmnet.fit) sink()
library(magrittr) #TODO: update for auth with webAPi stop(1) overwrite <- TRUE atlasCohorts <- data.frame(atlasName = c("[EPI_851] New users of beta-blockers", "[EPI_851] New users of ACE-inhibitors", "[EPI_851] New users of thiazides or thiazide-like diuretics", "[EPI_851] Angioedema"), entityType = c("C", "T", "C", "O")) tryCatch({ ROhdsiWebApi:::getWebApiVersion(baseUrl = Sys.getenv("WEBAPI_BASE_URL")) }, error = function(e) { print(sprintf("Could not interface with WebAPI:\n\t%s", e)) stop(1) }) replaceExpr <- function(x, extras = NULL, extrasReplace = NULL) { x <- gsub("^\\[.*\\]\\s+", "", x) if(!is.null(extras) && !is.null(extrasReplace) && length(extras) == length(extrasReplace)) { for(i in 1:length(extras)) { x <- gsub(extras[i], extrasReplace[i], x) } } return(x) } cohortsToCreate <- NULL for (id in 1:nrow(atlasCohorts)) { atlasCohort <- atlasCohorts[id, ] print(sprintf('checking if cohort %s exists', atlasCohort$atlasName)) t <- ROhdsiWebApi::existsCohortName(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), cohortName = atlasCohort$atlasName) if (tibble::is_tibble(t) && overwrite) { cohortDef <- ROhdsiWebApi::getCohortDefinition(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), cohortId = t$id) json <- RJSONIO::toJSON(cohortDef$expression, pretty = TRUE) normalizedName <- stringr::str_to_title(gsub("-", " ", t$name)) normalizedName <- gsub("^\\[.*\\]\\s+|\\s+", "", normalizedName) if(grepl("Or", normalizedName)) { normalizedName <- strsplit(normalizedName, "Or")[[1]][1] } #normalizedName <- replaceExpr(stringr::str_to_title(replaceExpr(t$name)), extras = c("-", " "), extrasReplace = c(" ", "")) SqlRender::writeSql(json, file.path("inst", "cohorts", paste0(normalizedName, ".json"))) if(is.null(cohortsToCreate)) { cohortsToCreate <- data.frame(atlasId = t$id, atlasName = t$name, cohortId = t$id, name = normalizedName, entityType = atlasCohort$entityType) next } cohortsToCreate <- rbind(cohortsToCreate, c(t$id, t$name, t$id, normalizedName, atlasCohort$entityType)) } else { print(sprintf('cohort %s does not exist??', atlasCohort$atlasName)) next } } readr::write_csv(cohortsToCreate, file.path("inst", "settings", "CohortsToCreate.csv")) # something up with generating sql? # ROhdsiWebApi::insertCohortDefinitionSetInPackage(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), insertTableSql = FALSE, insertCohortCreationR = FALSE) jsonFiles <- list.files(file.path("inst", "cohorts"), pattern = "\\.json$", full.names=TRUE) options <- CirceR::createGenerateOptions() for(file in jsonFiles) { print(file) fileData <- readChar(file, file.info(file)$size) j <- CirceR::cohortExpressionFromJson(fileData) sql <- CirceR::buildCohortQuery(j, options) print(sprintf("writing %s to disc", basename(file))) SqlRender::writeSql(sql, file.path("inst", "sql", "sql_server", paste0(gsub("json", "sql", basename(file))))) } targetIds <- cohortsToCreate %>% dplyr::filter(entityType == "T") %>% dplyr::pull(cohortId) comparatorIds <- cohortsToCreate %>% dplyr::filter(entityType == "C") %>% dplyr::pull(cohortId) outcomeIds <- cohortsToCreate %>% dplyr::filter(entityType == "O") %>% dplyr::pull(cohortId) print("Creating TCOs of interest") tcosOfInterest <- expand.grid(targetIds, comparatorIds) colnames(tcosOfInterest) <- c("targetId", "comparatorId") tcosOfInterest[, "outcomeIds"] = paste0(outcomeIds, collapse = ";") readr::write_csv(tcosOfInterest, file.path("inst", "settings", "TcosOfInterest.csv")) print("Harmonizing negative controls") negativeControlInit <- readr::read_csv(file.path("inst", "settings", "NegativeControlsInit.csv"), col_types = readr::cols()) negativeControlConcepts <- unique(negativeControlInit$outcomeId) # ncs <- expand.grid(combn(cohortsToCreate$cohortId, 2, simplify=FALSE), negativeControlConcepts) # ncs <- cbind(do.call(rbind, ncs$Var1), ncs$Var2) ncs <- expand.grid(targetIds, comparatorIds, negativeControlConcepts) colnames(ncs) <- c("targetId", "comparatorId", "outcomeId") ncs <- merge(ncs, unique(negativeControlInit[, c("outcomeId", "outcomeName", "type")])) ncs <- ncs[, c(2, 3, 1, 4, 5)] ncs <- ncs[order(ncs[, 1], ncs[, 2], ncs[, 3]),] readr::write_csv(ncs, file.path("inst", "settings", "NegativeControls.csv")) source(file.path("extras", "CreateAnalyses.R"))
/extras/Setup.R
no_license
ohdsi-studies/CovariateImbalanceDiagnosticsEvaluation
R
false
false
4,641
r
library(magrittr) #TODO: update for auth with webAPi stop(1) overwrite <- TRUE atlasCohorts <- data.frame(atlasName = c("[EPI_851] New users of beta-blockers", "[EPI_851] New users of ACE-inhibitors", "[EPI_851] New users of thiazides or thiazide-like diuretics", "[EPI_851] Angioedema"), entityType = c("C", "T", "C", "O")) tryCatch({ ROhdsiWebApi:::getWebApiVersion(baseUrl = Sys.getenv("WEBAPI_BASE_URL")) }, error = function(e) { print(sprintf("Could not interface with WebAPI:\n\t%s", e)) stop(1) }) replaceExpr <- function(x, extras = NULL, extrasReplace = NULL) { x <- gsub("^\\[.*\\]\\s+", "", x) if(!is.null(extras) && !is.null(extrasReplace) && length(extras) == length(extrasReplace)) { for(i in 1:length(extras)) { x <- gsub(extras[i], extrasReplace[i], x) } } return(x) } cohortsToCreate <- NULL for (id in 1:nrow(atlasCohorts)) { atlasCohort <- atlasCohorts[id, ] print(sprintf('checking if cohort %s exists', atlasCohort$atlasName)) t <- ROhdsiWebApi::existsCohortName(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), cohortName = atlasCohort$atlasName) if (tibble::is_tibble(t) && overwrite) { cohortDef <- ROhdsiWebApi::getCohortDefinition(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), cohortId = t$id) json <- RJSONIO::toJSON(cohortDef$expression, pretty = TRUE) normalizedName <- stringr::str_to_title(gsub("-", " ", t$name)) normalizedName <- gsub("^\\[.*\\]\\s+|\\s+", "", normalizedName) if(grepl("Or", normalizedName)) { normalizedName <- strsplit(normalizedName, "Or")[[1]][1] } #normalizedName <- replaceExpr(stringr::str_to_title(replaceExpr(t$name)), extras = c("-", " "), extrasReplace = c(" ", "")) SqlRender::writeSql(json, file.path("inst", "cohorts", paste0(normalizedName, ".json"))) if(is.null(cohortsToCreate)) { cohortsToCreate <- data.frame(atlasId = t$id, atlasName = t$name, cohortId = t$id, name = normalizedName, entityType = atlasCohort$entityType) next } cohortsToCreate <- rbind(cohortsToCreate, c(t$id, t$name, t$id, normalizedName, atlasCohort$entityType)) } else { print(sprintf('cohort %s does not exist??', atlasCohort$atlasName)) next } } readr::write_csv(cohortsToCreate, file.path("inst", "settings", "CohortsToCreate.csv")) # something up with generating sql? # ROhdsiWebApi::insertCohortDefinitionSetInPackage(baseUrl = Sys.getenv("WEBAPI_BASE_URL"), insertTableSql = FALSE, insertCohortCreationR = FALSE) jsonFiles <- list.files(file.path("inst", "cohorts"), pattern = "\\.json$", full.names=TRUE) options <- CirceR::createGenerateOptions() for(file in jsonFiles) { print(file) fileData <- readChar(file, file.info(file)$size) j <- CirceR::cohortExpressionFromJson(fileData) sql <- CirceR::buildCohortQuery(j, options) print(sprintf("writing %s to disc", basename(file))) SqlRender::writeSql(sql, file.path("inst", "sql", "sql_server", paste0(gsub("json", "sql", basename(file))))) } targetIds <- cohortsToCreate %>% dplyr::filter(entityType == "T") %>% dplyr::pull(cohortId) comparatorIds <- cohortsToCreate %>% dplyr::filter(entityType == "C") %>% dplyr::pull(cohortId) outcomeIds <- cohortsToCreate %>% dplyr::filter(entityType == "O") %>% dplyr::pull(cohortId) print("Creating TCOs of interest") tcosOfInterest <- expand.grid(targetIds, comparatorIds) colnames(tcosOfInterest) <- c("targetId", "comparatorId") tcosOfInterest[, "outcomeIds"] = paste0(outcomeIds, collapse = ";") readr::write_csv(tcosOfInterest, file.path("inst", "settings", "TcosOfInterest.csv")) print("Harmonizing negative controls") negativeControlInit <- readr::read_csv(file.path("inst", "settings", "NegativeControlsInit.csv"), col_types = readr::cols()) negativeControlConcepts <- unique(negativeControlInit$outcomeId) # ncs <- expand.grid(combn(cohortsToCreate$cohortId, 2, simplify=FALSE), negativeControlConcepts) # ncs <- cbind(do.call(rbind, ncs$Var1), ncs$Var2) ncs <- expand.grid(targetIds, comparatorIds, negativeControlConcepts) colnames(ncs) <- c("targetId", "comparatorId", "outcomeId") ncs <- merge(ncs, unique(negativeControlInit[, c("outcomeId", "outcomeName", "type")])) ncs <- ncs[, c(2, 3, 1, 4, 5)] ncs <- ncs[order(ncs[, 1], ncs[, 2], ncs[, 3]),] readr::write_csv(ncs, file.path("inst", "settings", "NegativeControls.csv")) source(file.path("extras", "CreateAnalyses.R"))
library(e1071) library(mlbench) library(caret) library(Amelia) # uploading data data<-read.table(file.choose(),header=T,sep=",") # To Check is.na(data) missmap(data) # To Remove na.omit(data)
/1. Missing.R
no_license
zeeshan-ramzan/Cervical-Cancer-Risk-Assessment
R
false
false
210
r
library(e1071) library(mlbench) library(caret) library(Amelia) # uploading data data<-read.table(file.choose(),header=T,sep=",") # To Check is.na(data) missmap(data) # To Remove na.omit(data)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Basic_EndoPathExtractors.R \name{HistolTypeAndSite} \alias{HistolTypeAndSite} \title{HistolTypeAndSite} \usage{ HistolTypeAndSite(inputString1, inputString2, procedureString) } \arguments{ \item{inputString1}{The first column to look in} \item{inputString2}{The second column to look in} \item{procedureString}{The column with the procedure in it} } \value{ a list with two columns, one is the type and site and the other is the index to be used for OPCS4 coding later if needed. } \description{ This needs some blurb to be written. Used in the OPCS4 coding } \examples{ Myendo2<-Endomerge2(Myendo,'Dateofprocedure','HospitalNumber', Mypath,'Dateofprocedure','HospitalNumber') PathSiteAndType <- HistolTypeAndSite(Myendo2$PathReportWhole, Myendo2$Macroscopicdescription, Myendo2$ProcedurePerformed) } \keyword{Find} \keyword{and} \keyword{replace}
/man/HistolTypeAndSite.Rd
no_license
camcaan/enDo
R
false
true
928
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Basic_EndoPathExtractors.R \name{HistolTypeAndSite} \alias{HistolTypeAndSite} \title{HistolTypeAndSite} \usage{ HistolTypeAndSite(inputString1, inputString2, procedureString) } \arguments{ \item{inputString1}{The first column to look in} \item{inputString2}{The second column to look in} \item{procedureString}{The column with the procedure in it} } \value{ a list with two columns, one is the type and site and the other is the index to be used for OPCS4 coding later if needed. } \description{ This needs some blurb to be written. Used in the OPCS4 coding } \examples{ Myendo2<-Endomerge2(Myendo,'Dateofprocedure','HospitalNumber', Mypath,'Dateofprocedure','HospitalNumber') PathSiteAndType <- HistolTypeAndSite(Myendo2$PathReportWhole, Myendo2$Macroscopicdescription, Myendo2$ProcedurePerformed) } \keyword{Find} \keyword{and} \keyword{replace}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{users_delete_2fa} \alias{users_delete_2fa} \title{Wipes the user's current 2FA settings so that they must reset them upon next login} \usage{ users_delete_2fa(id) } \arguments{ \item{id}{integer required. The ID of this user.} } \value{ A list containing the following elements: \item{id}{integer, The ID of this user.} \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} \item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. \item slug string, The slug of this group. \item organizationId integer, The ID of the organization associated with this group. \item organizationName string, The name of the organization associated with this group. }} \item{city}{string, The city of this user.} \item{state}{string, The state of this user.} \item{timeZone}{string, The time zone of this user.} \item{initials}{string, The initials of this user.} \item{department}{string, The department of this user.} \item{title}{string, The title of this user.} \item{githubUsername}{string, The GitHub username of this user.} \item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} \item{vpnEnabled}{boolean, The availability of vpn for this user.} \item{ssoDisabled}{boolean, The availability of SSO for this user.} \item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} \item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} \item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} \item{robot}{boolean, Whether the user is a robot.} \item{phone}{string, The phone number of this user.} \item{organizationSlug}{string, The slug of the organization the user belongs to.} \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} \item{organizationLoginType}{string, The user's organization's login type.} \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, The date and time when the user was created.} \item{updatedAt}{string, The date and time when the user was last updated.} \item{lastSeenAt}{string, The date and time when the user last visited Platform.} \item{suspended}{boolean, Whether the user is suspended due to inactivity.} \item{createdById}{integer, The ID of the user who created this user.} \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \description{ Wipes the user's current 2FA settings so that they must reset them upon next login }
/man/users_delete_2fa.Rd
no_license
cran/civis
R
false
true
3,399
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/generated_client.R \name{users_delete_2fa} \alias{users_delete_2fa} \title{Wipes the user's current 2FA settings so that they must reset them upon next login} \usage{ users_delete_2fa(id) } \arguments{ \item{id}{integer required. The ID of this user.} } \value{ A list containing the following elements: \item{id}{integer, The ID of this user.} \item{user}{string, The username of this user.} \item{name}{string, The name of this user.} \item{email}{string, The email of this user.} \item{active}{boolean, Whether this user account is active or deactivated.} \item{primaryGroupId}{integer, The ID of the primary group of this user.} \item{groups}{array, An array containing the following fields: \itemize{ \item id integer, The ID of this group. \item name string, The name of this group. \item slug string, The slug of this group. \item organizationId integer, The ID of the organization associated with this group. \item organizationName string, The name of the organization associated with this group. }} \item{city}{string, The city of this user.} \item{state}{string, The state of this user.} \item{timeZone}{string, The time zone of this user.} \item{initials}{string, The initials of this user.} \item{department}{string, The department of this user.} \item{title}{string, The title of this user.} \item{githubUsername}{string, The GitHub username of this user.} \item{prefersSmsOtp}{boolean, The preference for phone authorization of this user} \item{vpnEnabled}{boolean, The availability of vpn for this user.} \item{ssoDisabled}{boolean, The availability of SSO for this user.} \item{otpRequiredForLogin}{boolean, The two factor authentication requirement for this user.} \item{exemptFromOrgSmsOtpDisabled}{boolean, Whether the user has SMS OTP enabled on an individual level. This field does not matter if the org does not have SMS OTP disabled.} \item{smsOtpAllowed}{boolean, Whether the user is allowed to receive two factor authentication codes via SMS.} \item{robot}{boolean, Whether the user is a robot.} \item{phone}{string, The phone number of this user.} \item{organizationSlug}{string, The slug of the organization the user belongs to.} \item{organizationSSODisableCapable}{boolean, The user's organization's ability to disable sso for their users.} \item{organizationLoginType}{string, The user's organization's login type.} \item{organizationSmsOtpDisabled}{boolean, Whether the user's organization has SMS OTP disabled.} \item{myPermissionLevel}{string, Your permission level on the object. One of "read", "write", or "manage".} \item{createdAt}{string, The date and time when the user was created.} \item{updatedAt}{string, The date and time when the user was last updated.} \item{lastSeenAt}{string, The date and time when the user last visited Platform.} \item{suspended}{boolean, Whether the user is suspended due to inactivity.} \item{createdById}{integer, The ID of the user who created this user.} \item{lastUpdatedById}{integer, The ID of the user who last updated this user.} \item{unconfirmedEmail}{string, The new email address awaiting confirmation from the user.} \item{accountStatus}{string, Account status of this user. One of: "Active", "Deactivated", "Suspended", "Unsuspended"} } \description{ Wipes the user's current 2FA settings so that they must reset them upon next login }
get_exclusions <- function(wbd_gdb) { if("HUC12" %in% st_layers(wbd_gdb)$name) { wbd <- read_sf(wbd_gdb, "HUC12") wbd <- rename(wbd, HUC12 = HUC_12, TOHUC = HU_12_DS) } else { wbd <- read_sf(wbd_gdb, "WBDHU12") } wbd_type <- st_set_geometry(wbd, NULL) if("HUTYPE" %in% names(wbd_type)) { wbd_type <- distinct(select(wbd_type, HUC12, HUTYPE)) } else { wbd_type <- distinct(select(wbd_type, HUC12, HUTYPE = HU_12_TYPE)) } wbd <- group_by(wbd, HUC12) %>% summarise(TOHUC = TOHUC[1]) # Exclusions where river-flow does not apply: exclude_type <- wbd_type$HUC12[wbd_type$HUTYPE %in% c("F", "I", "C", "U")] # frontal closed or island exclude_first_order_toHUC <- wbd$HUC12[wbd$TOHUC %in% c("OCEAN", "CANADA", "GEATLAKES", "UNKNOWN") & !wbd$HUC12 %in% wbd$TOHUC] # Unless it has something flowing to it. exclude <- unique(c(exclude_type, exclude_first_order_toHUC)) return(exclude) }
/R/5_find_outlets.R
permissive
jsta/HU12_NHD
R
false
false
992
r
get_exclusions <- function(wbd_gdb) { if("HUC12" %in% st_layers(wbd_gdb)$name) { wbd <- read_sf(wbd_gdb, "HUC12") wbd <- rename(wbd, HUC12 = HUC_12, TOHUC = HU_12_DS) } else { wbd <- read_sf(wbd_gdb, "WBDHU12") } wbd_type <- st_set_geometry(wbd, NULL) if("HUTYPE" %in% names(wbd_type)) { wbd_type <- distinct(select(wbd_type, HUC12, HUTYPE)) } else { wbd_type <- distinct(select(wbd_type, HUC12, HUTYPE = HU_12_TYPE)) } wbd <- group_by(wbd, HUC12) %>% summarise(TOHUC = TOHUC[1]) # Exclusions where river-flow does not apply: exclude_type <- wbd_type$HUC12[wbd_type$HUTYPE %in% c("F", "I", "C", "U")] # frontal closed or island exclude_first_order_toHUC <- wbd$HUC12[wbd$TOHUC %in% c("OCEAN", "CANADA", "GEATLAKES", "UNKNOWN") & !wbd$HUC12 %in% wbd$TOHUC] # Unless it has something flowing to it. exclude <- unique(c(exclude_type, exclude_first_order_toHUC)) return(exclude) }
#' specify a general vecchia approximation #' #' specify the vecchia approximation for later use in likelihood evaluation or prediction. #' This function does not depend on parameter values, and only has to be run once before #' repeated likelihood evaluations. #' @param locs nxd matrix of observed locs #' @param m Number of nearby points to condition on #' @param ordering options are 'coord' or 'maxmin' #' @param cond.yz options are 'y', 'z', 'SGV', 'SGVT', 'RVP', 'LK', and 'zy' #' @param locs.pred nxd matrix of locations at which to make predictions #' @param ordering.pred options are 'obspred' or 'general' #' @param pred.cond prediction conditioning, options are 'general' or 'independent' #' @param conditioning conditioning on 'NN' (nearest neighbor) or 'firstm' (fixed set for low rank) #' or 'mra' #' @param mra.options Settings for number of levels and neighbors per level #' @param verbose Provide more detail when using MRA calculations. Default is false. #' #' @return An object that specifies the vecchia approximation for later use in likelihood #' evaluation or prediction. #' @examples #' locs=matrix(1:5,ncol=1); vecchia_specify(locs,m=2) #' @export # specify the vecchia approximation, prepare U # this fct does not depend on data or parameter values # only has to be run once before repeated likelihood evals vecchia_specify=function(locs,m=-1,ordering,cond.yz,locs.pred,ordering.pred,pred.cond, conditioning, mra.options=NULL, ic0=FALSE, verbose=FALSE) { if(!is.matrix(locs)) { warning("Locations must be in matrix form") return(NA) } if(m==-1 || is.null(m)) { if(conditioning=='mra' && !is.null(mra.options) && !is.null(mra.options$J) && !is.null(mra.options$r) && !is.null(mra.options$J)) warning("m not defined; using MRA parameters") else if(is.null(mra.options$r)) stop("neither m nor r defined!") } spatial.dim=ncol(locs) n=nrow(locs) # check that locs.preds does not contain any locations in locs if(!missing(locs.pred)){ locs.all = rbind(locs, locs.pred) if(anyDuplicated(locs.all)>0) stop("Prediction locations contain observed location(s), remove redundancies.") } if(m>n){ warning("Conditioning set size m chosen to be larger than n. Changing to m=n-1") m=n-1 } # The fully independent case with no conditioning if(m==0){ if(!missing(locs.pred)) warning("Attempting to make predictions with m=0. Prediction ignored") ord = 1:n ord.z=ord locsord=locs[ord,,drop=FALSE] NNarray= matrix(cbind(ord, rep(NA, nrow(locs))), ncol=2) Cond=matrix(NA,n,2); Cond[,1]=T obs=rep(TRUE,n) ### determine the sparsity structure of U U.prep=U_sparsity( locsord, NNarray, obs, Cond ) ### object that specifies the vecchia approximation vecchia.approx=list(locsord=locsord, obs=obs, ord=ord, ord.z=ord.z, ord.pred='general', U.prep=U.prep,cond.yz='false',conditioning='NN') return(vecchia.approx) } # subsume firstm into mra if(!missing(conditioning) && conditioning == 'firstm'){ conditioning = 'mra' mra.options = list(r=c(m,1)) } # default options if(missing(ordering)){ if(spatial.dim==1) {ordering = 'coord'} else ordering = 'maxmin' } if(missing(pred.cond)) pred.cond='general' if(missing(conditioning)) conditioning='NN' if(conditioning %in% c('mra', 'firstm')){ if(ordering!='maxmin') warning("ordering for the selected conditioning scheme changed to required 'maxmin'") ordering='maxmin' } if(missing(cond.yz)){ if (conditioning %in% c('mra', 'firstm')) { cond.yz='y' } else if(missing(locs.pred) | spatial.dim==1 ){ cond.yz = 'SGV' } else cond.yz = 'zy' } ### order locs and z if(missing(locs.pred)){ # no prediction if(ordering=='coord') { ord=order_coordinate(locs) } else if(ordering=='maxmin'){ ord = order_maxmin_exact(locs) cut = min(n, 9) ord = c(ord[1], ord[-seq(1,cut)], ord[2:cut]) } else if(ordering=='outsidein'){ ord = order_outsidein(locs) } else if(ordering=='none'){ ord = seq(n) } ord.z=ord locsord=locs[ord,,drop=FALSE] obs=rep(TRUE,n) ordering.pred='general' } else { # prediction is desired n.p=nrow(locs.pred) locs.all=rbind(locs,locs.pred) observed.obspred=c(rep(TRUE,n),rep(FALSE,n.p)) if(missing(ordering.pred)) if(spatial.dim==1 & ordering=='coord') ordering.pred='general' else ordering.pred='obspred' if(ordering.pred=='general'){ if(ordering=='coord') ord=order_coordinate(locs.all) else { ord = order_maxmin_exact(locs.all) } ord.obs=ord[ord<=n] } else { if(ordering=='coord') { ord.obs=order_coordinate(locs) ord.pred=order_coordinate(locs.pred) } else if(ordering=='none') { ord.obs = seq(n) ord.pred = seq(n.p) } else { temp=order_maxmin_exact_obs_pred(locs,locs.pred) ord.obs=temp$ord ord.pred=temp$ord_pred } ord=c(ord.obs,ord.pred+n) } ord.z=ord.obs locsord=locs.all[ord,,drop=FALSE] obs=observed.obspred[ord] } ### obtain conditioning sets if( conditioning == 'mra' ){ NNarray = findOrderedNN_mra(locsord, mra.options, m, verbose) if(!methods::hasArg(m)) m = ncol(NNarray)-1 } else if( conditioning %in% c('firstm', 'NN')){ if(spatial.dim==1) { NNarray=findOrderedNN_kdtree2(locsord,m) } else NNarray <- GpGp::find_ordered_nn(locsord,m) if(conditioning == 'firstm'){ first_m = NNarray[m+1,2:(m+1)] n.all=nrow(NNarray) if (m < n.all-1){ # if m=n-1, nothing to replace NNarray[(m+2):n.all, 2:(m+1)] = matrix(rep(first_m, n.all-m-1), byrow = TRUE, ncol = m) } } } else stop(paste0("conditioning='",conditioning,"' not defined")) if(!missing(locs.pred) & pred.cond=='independent'){ if(ordering.pred=='obspred'){ NNarray.pred <- array(dim=c(n.p,m+1)) for(j in 1:n.p){ dists=fields::rdist(locsord[n+j,,drop=FALSE],locsord[1:n,,drop=FALSE]) m.nearest.obs=sort(order(dists)[1:m],decreasing=TRUE) NNarray.pred[j,]=c(n+j,m.nearest.obs) } NNarray[n+(1:n.p),]=NNarray.pred } else warning('indep. conditioning currently only implemented for obspred ordering') } ### conditioning on y or z? if(cond.yz=='SGV'){ Cond=whichCondOnLatent(NNarray,firstind.pred=n+1) } else if(cond.yz=='SGVT'){ Cond=rbind(whichCondOnLatent(NNarray[1:n,]),matrix(TRUE,nrow=n.p,ncol=m+1)) } else if(cond.yz=='y'){ Cond=matrix(NA,nrow(NNarray),ncol(NNarray)); Cond[!is.na(NNarray)]=TRUE #Cond=matrix(NA,nrow(NNarray),m+1); Cond[!is.na(NNarray)]=FALSE; Cond[,1]=TRUE } else if(cond.yz=='z'){ Cond=matrix(NA,nrow(NNarray),m+1); Cond[!is.na(NNarray)]=FALSE; Cond[,1]=TRUE } else if(cond.yz %in% c('RVP','LK','zy')){ ### "trick" code into response-latent ('zy') ordering ## reset variables obs=c(rep(TRUE,n),rep(FALSE,nrow(locsord))) locsord=rbind(locsord[1:n,,drop=FALSE],locsord) ## specify neighbors NNs=FNN::get.knn(locsord[1:n,,drop=FALSE],m-1)$nn.index if(cond.yz %in% c('RVP','zy')){ prev=(NNs<matrix(rep(1:n,m-1),nrow=n)) NNs[prev]=NNs[prev]+n # condition on latent y.obs if possible } ## create NN array NNarray.z=cbind(1:n,matrix(nrow=n,ncol=m)) NNarray.y=cbind((1:n)+n,1:n,NNs) if(missing(locs.pred)){ NNarray.yp=matrix(nrow=0,ncol=m+1) ordering.pred='obspred' } else { if(ordering.pred!='obspred') warning('ZY only implemented for obspred ordering') if(cond.yz=='zy'){ NNarray.yp=NNarray[n+(1:n.p),]+n } else { NNarray.yp=NNarray[n+(1:n.p),] NNarray.yp[NNarray.yp>n]=NNarray.yp[NNarray.yp>n]+n } } NNarray=rbind(NNarray.z,NNarray.y,NNarray.yp) ## conditioning Cond=(NNarray>n); Cond[,1]=TRUE cond.yz='zy' } else stop(paste0("cond.yz='",cond.yz,"' not defined")) ### determine the sparsity structure of U U.prep=U_sparsity( locsord, NNarray, obs, Cond ) ### object that specifies the vecchia approximation vecchia.approx=list(locsord=locsord,obs=obs,ord=ord,ord.z=ord.z,ord.pred=ordering.pred, U.prep=U.prep,cond.yz=cond.yz, ic0=ic0, conditioning=conditioning) # U.prep has attributes: revNNarray,revCond,n.cores,size,rowpointers,colindices,y.ind) return(vecchia.approx) }
/R/vecchia_specify.R
no_license
Sadra1999/GPvecchia
R
false
false
8,451
r
#' specify a general vecchia approximation #' #' specify the vecchia approximation for later use in likelihood evaluation or prediction. #' This function does not depend on parameter values, and only has to be run once before #' repeated likelihood evaluations. #' @param locs nxd matrix of observed locs #' @param m Number of nearby points to condition on #' @param ordering options are 'coord' or 'maxmin' #' @param cond.yz options are 'y', 'z', 'SGV', 'SGVT', 'RVP', 'LK', and 'zy' #' @param locs.pred nxd matrix of locations at which to make predictions #' @param ordering.pred options are 'obspred' or 'general' #' @param pred.cond prediction conditioning, options are 'general' or 'independent' #' @param conditioning conditioning on 'NN' (nearest neighbor) or 'firstm' (fixed set for low rank) #' or 'mra' #' @param mra.options Settings for number of levels and neighbors per level #' @param verbose Provide more detail when using MRA calculations. Default is false. #' #' @return An object that specifies the vecchia approximation for later use in likelihood #' evaluation or prediction. #' @examples #' locs=matrix(1:5,ncol=1); vecchia_specify(locs,m=2) #' @export # specify the vecchia approximation, prepare U # this fct does not depend on data or parameter values # only has to be run once before repeated likelihood evals vecchia_specify=function(locs,m=-1,ordering,cond.yz,locs.pred,ordering.pred,pred.cond, conditioning, mra.options=NULL, ic0=FALSE, verbose=FALSE) { if(!is.matrix(locs)) { warning("Locations must be in matrix form") return(NA) } if(m==-1 || is.null(m)) { if(conditioning=='mra' && !is.null(mra.options) && !is.null(mra.options$J) && !is.null(mra.options$r) && !is.null(mra.options$J)) warning("m not defined; using MRA parameters") else if(is.null(mra.options$r)) stop("neither m nor r defined!") } spatial.dim=ncol(locs) n=nrow(locs) # check that locs.preds does not contain any locations in locs if(!missing(locs.pred)){ locs.all = rbind(locs, locs.pred) if(anyDuplicated(locs.all)>0) stop("Prediction locations contain observed location(s), remove redundancies.") } if(m>n){ warning("Conditioning set size m chosen to be larger than n. Changing to m=n-1") m=n-1 } # The fully independent case with no conditioning if(m==0){ if(!missing(locs.pred)) warning("Attempting to make predictions with m=0. Prediction ignored") ord = 1:n ord.z=ord locsord=locs[ord,,drop=FALSE] NNarray= matrix(cbind(ord, rep(NA, nrow(locs))), ncol=2) Cond=matrix(NA,n,2); Cond[,1]=T obs=rep(TRUE,n) ### determine the sparsity structure of U U.prep=U_sparsity( locsord, NNarray, obs, Cond ) ### object that specifies the vecchia approximation vecchia.approx=list(locsord=locsord, obs=obs, ord=ord, ord.z=ord.z, ord.pred='general', U.prep=U.prep,cond.yz='false',conditioning='NN') return(vecchia.approx) } # subsume firstm into mra if(!missing(conditioning) && conditioning == 'firstm'){ conditioning = 'mra' mra.options = list(r=c(m,1)) } # default options if(missing(ordering)){ if(spatial.dim==1) {ordering = 'coord'} else ordering = 'maxmin' } if(missing(pred.cond)) pred.cond='general' if(missing(conditioning)) conditioning='NN' if(conditioning %in% c('mra', 'firstm')){ if(ordering!='maxmin') warning("ordering for the selected conditioning scheme changed to required 'maxmin'") ordering='maxmin' } if(missing(cond.yz)){ if (conditioning %in% c('mra', 'firstm')) { cond.yz='y' } else if(missing(locs.pred) | spatial.dim==1 ){ cond.yz = 'SGV' } else cond.yz = 'zy' } ### order locs and z if(missing(locs.pred)){ # no prediction if(ordering=='coord') { ord=order_coordinate(locs) } else if(ordering=='maxmin'){ ord = order_maxmin_exact(locs) cut = min(n, 9) ord = c(ord[1], ord[-seq(1,cut)], ord[2:cut]) } else if(ordering=='outsidein'){ ord = order_outsidein(locs) } else if(ordering=='none'){ ord = seq(n) } ord.z=ord locsord=locs[ord,,drop=FALSE] obs=rep(TRUE,n) ordering.pred='general' } else { # prediction is desired n.p=nrow(locs.pred) locs.all=rbind(locs,locs.pred) observed.obspred=c(rep(TRUE,n),rep(FALSE,n.p)) if(missing(ordering.pred)) if(spatial.dim==1 & ordering=='coord') ordering.pred='general' else ordering.pred='obspred' if(ordering.pred=='general'){ if(ordering=='coord') ord=order_coordinate(locs.all) else { ord = order_maxmin_exact(locs.all) } ord.obs=ord[ord<=n] } else { if(ordering=='coord') { ord.obs=order_coordinate(locs) ord.pred=order_coordinate(locs.pred) } else if(ordering=='none') { ord.obs = seq(n) ord.pred = seq(n.p) } else { temp=order_maxmin_exact_obs_pred(locs,locs.pred) ord.obs=temp$ord ord.pred=temp$ord_pred } ord=c(ord.obs,ord.pred+n) } ord.z=ord.obs locsord=locs.all[ord,,drop=FALSE] obs=observed.obspred[ord] } ### obtain conditioning sets if( conditioning == 'mra' ){ NNarray = findOrderedNN_mra(locsord, mra.options, m, verbose) if(!methods::hasArg(m)) m = ncol(NNarray)-1 } else if( conditioning %in% c('firstm', 'NN')){ if(spatial.dim==1) { NNarray=findOrderedNN_kdtree2(locsord,m) } else NNarray <- GpGp::find_ordered_nn(locsord,m) if(conditioning == 'firstm'){ first_m = NNarray[m+1,2:(m+1)] n.all=nrow(NNarray) if (m < n.all-1){ # if m=n-1, nothing to replace NNarray[(m+2):n.all, 2:(m+1)] = matrix(rep(first_m, n.all-m-1), byrow = TRUE, ncol = m) } } } else stop(paste0("conditioning='",conditioning,"' not defined")) if(!missing(locs.pred) & pred.cond=='independent'){ if(ordering.pred=='obspred'){ NNarray.pred <- array(dim=c(n.p,m+1)) for(j in 1:n.p){ dists=fields::rdist(locsord[n+j,,drop=FALSE],locsord[1:n,,drop=FALSE]) m.nearest.obs=sort(order(dists)[1:m],decreasing=TRUE) NNarray.pred[j,]=c(n+j,m.nearest.obs) } NNarray[n+(1:n.p),]=NNarray.pred } else warning('indep. conditioning currently only implemented for obspred ordering') } ### conditioning on y or z? if(cond.yz=='SGV'){ Cond=whichCondOnLatent(NNarray,firstind.pred=n+1) } else if(cond.yz=='SGVT'){ Cond=rbind(whichCondOnLatent(NNarray[1:n,]),matrix(TRUE,nrow=n.p,ncol=m+1)) } else if(cond.yz=='y'){ Cond=matrix(NA,nrow(NNarray),ncol(NNarray)); Cond[!is.na(NNarray)]=TRUE #Cond=matrix(NA,nrow(NNarray),m+1); Cond[!is.na(NNarray)]=FALSE; Cond[,1]=TRUE } else if(cond.yz=='z'){ Cond=matrix(NA,nrow(NNarray),m+1); Cond[!is.na(NNarray)]=FALSE; Cond[,1]=TRUE } else if(cond.yz %in% c('RVP','LK','zy')){ ### "trick" code into response-latent ('zy') ordering ## reset variables obs=c(rep(TRUE,n),rep(FALSE,nrow(locsord))) locsord=rbind(locsord[1:n,,drop=FALSE],locsord) ## specify neighbors NNs=FNN::get.knn(locsord[1:n,,drop=FALSE],m-1)$nn.index if(cond.yz %in% c('RVP','zy')){ prev=(NNs<matrix(rep(1:n,m-1),nrow=n)) NNs[prev]=NNs[prev]+n # condition on latent y.obs if possible } ## create NN array NNarray.z=cbind(1:n,matrix(nrow=n,ncol=m)) NNarray.y=cbind((1:n)+n,1:n,NNs) if(missing(locs.pred)){ NNarray.yp=matrix(nrow=0,ncol=m+1) ordering.pred='obspred' } else { if(ordering.pred!='obspred') warning('ZY only implemented for obspred ordering') if(cond.yz=='zy'){ NNarray.yp=NNarray[n+(1:n.p),]+n } else { NNarray.yp=NNarray[n+(1:n.p),] NNarray.yp[NNarray.yp>n]=NNarray.yp[NNarray.yp>n]+n } } NNarray=rbind(NNarray.z,NNarray.y,NNarray.yp) ## conditioning Cond=(NNarray>n); Cond[,1]=TRUE cond.yz='zy' } else stop(paste0("cond.yz='",cond.yz,"' not defined")) ### determine the sparsity structure of U U.prep=U_sparsity( locsord, NNarray, obs, Cond ) ### object that specifies the vecchia approximation vecchia.approx=list(locsord=locsord,obs=obs,ord=ord,ord.z=ord.z,ord.pred=ordering.pred, U.prep=U.prep,cond.yz=cond.yz, ic0=ic0, conditioning=conditioning) # U.prep has attributes: revNNarray,revCond,n.cores,size,rowpointers,colindices,y.ind) return(vecchia.approx) }
# Introduction to webpage Intro <- function() { return("Using a dataset from FiveThirtyEight that has information on comic book characters, we were interested in the diversity of comic book characters. As the dataset contains information such as a character's physical attributes, the year of their first appearance, gender orientation, and intentions, we focused on comparing different variables and if minority groups are accurately represented for comic characters." ) }
/script/introduction.R
no_license
aysiab/final-test
R
false
false
490
r
# Introduction to webpage Intro <- function() { return("Using a dataset from FiveThirtyEight that has information on comic book characters, we were interested in the diversity of comic book characters. As the dataset contains information such as a character's physical attributes, the year of their first appearance, gender orientation, and intentions, we focused on comparing different variables and if minority groups are accurately represented for comic characters." ) }
## Put comments here that give an overall description of what #your functions do ## creates a special "vector", which is a list containing a function to ## 1.set the value of the matrix ## 2.get the value of the matrix ## 3.set the value of the inverse ## 4.get the value of the inverse makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setinverse<-function(solve) m<<-solve getinverse<-function() m list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## This function first checks to see if the inverse has already been calculated. If so, it gets the inverse ## from the cache and skips the computation.Otherwise, it calculates the inverse of the matrix and sets the ## value of the matrix in the cache via the setinverse function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m<-x$getinverse if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
/cachematrix.R
no_license
malikrocks/ProgrammingAssignment2
R
false
false
1,104
r
## Put comments here that give an overall description of what #your functions do ## creates a special "vector", which is a list containing a function to ## 1.set the value of the matrix ## 2.get the value of the matrix ## 3.set the value of the inverse ## 4.get the value of the inverse makeCacheMatrix <- function(x = matrix()) { m<-NULL set<-function(y){ x<<-y m<<-NULL } get<-function() x setinverse<-function(solve) m<<-solve getinverse<-function() m list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## This function first checks to see if the inverse has already been calculated. If so, it gets the inverse ## from the cache and skips the computation.Otherwise, it calculates the inverse of the matrix and sets the ## value of the matrix in the cache via the setinverse function. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m<-x$getinverse if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
#' plotExternalCostTotalCost #' #' Plots the welfare loss measures for the voters of interest, in the round the proposal passed, for each of the kMajority rules. #' @param outputDataList The output data list of summaries, that is generated by the iterations() function #' @param plotMeanEC if TRUE plot the costs for the Mean Group #' @param plotBestEC if TRUE plot the costs for the Best Group #' @param plotWorstEC if TRUE plot the costs for the Worst Group #' @return A plot of the mean, across all iterations, of the external cost incurred for each k-majority rule. #' @export plotExternalCostTotalCost <- function(outputDataList,plotMeanEC,plotBestEC,plotWorstEC){ # Set the margins so the labels in the Top Margin will display. # par(mar=c(5.1, 4.1, 5.1, 2.1)) #Extract External Cost meanExternalCost <- outputDataList$externalCost$meanOfMeanVotersExternalCostEachIteration bestExternalCost <- outputDataList$externalCost$meanOfbestOffGroupsMeanExternalCostEachIteration worstExternalCost <- outputDataList$externalCost$meanOfworstOffGroupsMeanExternalCostEachIteration # Calculate Decision Cost decisionCost <- outputDataList$rounds$meanNumberOfProposalsConsideredEachIteration * outputDataList$theInputParameters$perProposalDecisionCost # Calculate Total Cost for Mean, Highest and Lowest groups meanTotalCost <- meanExternalCost + decisionCost bestTotalCost <- bestExternalCost + decisionCost worstTotalCost <- worstExternalCost + decisionCost # Store a few parameters for use in plots parameters <- outputDataList$theInputParameters numK <- nrow(outputDataList$externalCost) K <- seq(1:numK) # PLOTS xRange <- c(0,numK) yRange <- c(0,1) ## full yaxis ## yRange <- (0,max(tc.best)) x_axislabels <- seq(0,numK,by=10) y_axislabels <- seq(0,1,by=.1) ## full yaxis ## y_axislabels <- seq(0,max(tc.best),by=.1) plot(xRange, yRange, type="n", main="", xlab="K-Majority Rule", ylab="Expected Costs", ylim=c(0,1), font=2, axes=FALSE, frame=TRUE) ## full yaxis ## plot(K,ec.worst,type="n", main="", xlab="K-Majority Rule", ylab="Expected Costs", ylim=c(0,max(tc.best)), font=2, axes=FALSE, frame=TRUE) axis(side=1, at=x_axislabels) axis(side=2, at=y_axislabels) lines(K,decisionCost,lty=1,lwd=3,col='black') lineNames <- c("decision costs") # For the Legend lineLty <- c(1) # For the Legend lineLwd <- c(3) # For the Legend lineCol <- c("black") if (plotMeanEC==TRUE){ lines(K,meanExternalCost,lty=1,lwd=1,col='black') lines(K,meanTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "mean group e.c.", "mean group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } if (plotBestEC==TRUE){ lines(K,bestExternalCost,lty=1,lwd=1,col='black') lines(K,bestTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "best group e.c.", "best group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } if (plotWorstEC==TRUE){ lines(K,worstExternalCost,lty=1,lwd=1,col='black') lines(K,worstTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "worst group e.c.", "worst group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } mtext(text=paste("Group Size:", paste(outputDataList$theInputParameters$groupSize, collapse=", ")), side=3, line=1.50, adj=0, cex=1.1, font=2) mtext(text=paste("Initial Utility:", paste( toupper( substring( outputDataList$allGroups[[1]]$utilityDistribution,1,1)), "(", sub('^(-)?0[.]', '\\1.', outputDataList$allGroups[[1]]$utilityDistributionParam1), ",", sub('^(-)?0[.]', '\\1.',outputDataList$allGroups[[1]]$utilityDistributionParam2), ")", collapse=", ", sep="")), side=3, line=1.50, adj=1, cex=1.1, font=2) mtext(text=paste("Group Error:", paste( toupper( substring( outputDataList$allGroups[[1]]$errorDistribution,1,1)), "(", sub('^(-)?0[.]', '\\1.', outputDataList$allGroups[[1]]$errorDistributionParam1), ",", sub('^(-)?0[.]', '\\1.',outputDataList$allGroups[[1]]$errorDistributionParam2), ")", collapse=", ", sep="")), side=3, line=.25, adj=0, cex=1.1, font=2) mtext(text=paste("Change Mean Utility:", paste(outputDataList$theInputParameters$groupPostFailingProposalMeanUiIncrease, collapse=", ")), side=3, line=.25, adj=1, cex=1.1, font=2) mtext(text=paste("Per Round Decision Cost:", paste(outputDataList$theInputParameters$perProposalDecisionCost, collapse=", ")), side=1, line=2.00, adj=0, cex=1.1, font=2) legend(5,1, # places a legend at the appropriate place lineNames, # puts text in the legend lty=lineLty, # gives the legend appropriate symbols (lines) lwd=lineLwd, # gives the legend the appropriate weight col=lineCol # gives the legend the appropriate color ) # }
/R/plotExternalCostTotalCost.R
no_license
codeForReviewer/kMajorityRule
R
false
false
5,224
r
#' plotExternalCostTotalCost #' #' Plots the welfare loss measures for the voters of interest, in the round the proposal passed, for each of the kMajority rules. #' @param outputDataList The output data list of summaries, that is generated by the iterations() function #' @param plotMeanEC if TRUE plot the costs for the Mean Group #' @param plotBestEC if TRUE plot the costs for the Best Group #' @param plotWorstEC if TRUE plot the costs for the Worst Group #' @return A plot of the mean, across all iterations, of the external cost incurred for each k-majority rule. #' @export plotExternalCostTotalCost <- function(outputDataList,plotMeanEC,plotBestEC,plotWorstEC){ # Set the margins so the labels in the Top Margin will display. # par(mar=c(5.1, 4.1, 5.1, 2.1)) #Extract External Cost meanExternalCost <- outputDataList$externalCost$meanOfMeanVotersExternalCostEachIteration bestExternalCost <- outputDataList$externalCost$meanOfbestOffGroupsMeanExternalCostEachIteration worstExternalCost <- outputDataList$externalCost$meanOfworstOffGroupsMeanExternalCostEachIteration # Calculate Decision Cost decisionCost <- outputDataList$rounds$meanNumberOfProposalsConsideredEachIteration * outputDataList$theInputParameters$perProposalDecisionCost # Calculate Total Cost for Mean, Highest and Lowest groups meanTotalCost <- meanExternalCost + decisionCost bestTotalCost <- bestExternalCost + decisionCost worstTotalCost <- worstExternalCost + decisionCost # Store a few parameters for use in plots parameters <- outputDataList$theInputParameters numK <- nrow(outputDataList$externalCost) K <- seq(1:numK) # PLOTS xRange <- c(0,numK) yRange <- c(0,1) ## full yaxis ## yRange <- (0,max(tc.best)) x_axislabels <- seq(0,numK,by=10) y_axislabels <- seq(0,1,by=.1) ## full yaxis ## y_axislabels <- seq(0,max(tc.best),by=.1) plot(xRange, yRange, type="n", main="", xlab="K-Majority Rule", ylab="Expected Costs", ylim=c(0,1), font=2, axes=FALSE, frame=TRUE) ## full yaxis ## plot(K,ec.worst,type="n", main="", xlab="K-Majority Rule", ylab="Expected Costs", ylim=c(0,max(tc.best)), font=2, axes=FALSE, frame=TRUE) axis(side=1, at=x_axislabels) axis(side=2, at=y_axislabels) lines(K,decisionCost,lty=1,lwd=3,col='black') lineNames <- c("decision costs") # For the Legend lineLty <- c(1) # For the Legend lineLwd <- c(3) # For the Legend lineCol <- c("black") if (plotMeanEC==TRUE){ lines(K,meanExternalCost,lty=1,lwd=1,col='black') lines(K,meanTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "mean group e.c.", "mean group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } if (plotBestEC==TRUE){ lines(K,bestExternalCost,lty=1,lwd=1,col='black') lines(K,bestTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "best group e.c.", "best group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } if (plotWorstEC==TRUE){ lines(K,worstExternalCost,lty=1,lwd=1,col='black') lines(K,worstTotalCost,lty=2,lwd=3,col='black') lineNames <- c(lineNames, "worst group e.c.", "worst group t.c.") lineLty <- c(lineLty,1,2) # For the Legend lineLwd <- c(lineLwd,1,3) # For the Legend lineCol <- c(lineCol,"black") } mtext(text=paste("Group Size:", paste(outputDataList$theInputParameters$groupSize, collapse=", ")), side=3, line=1.50, adj=0, cex=1.1, font=2) mtext(text=paste("Initial Utility:", paste( toupper( substring( outputDataList$allGroups[[1]]$utilityDistribution,1,1)), "(", sub('^(-)?0[.]', '\\1.', outputDataList$allGroups[[1]]$utilityDistributionParam1), ",", sub('^(-)?0[.]', '\\1.',outputDataList$allGroups[[1]]$utilityDistributionParam2), ")", collapse=", ", sep="")), side=3, line=1.50, adj=1, cex=1.1, font=2) mtext(text=paste("Group Error:", paste( toupper( substring( outputDataList$allGroups[[1]]$errorDistribution,1,1)), "(", sub('^(-)?0[.]', '\\1.', outputDataList$allGroups[[1]]$errorDistributionParam1), ",", sub('^(-)?0[.]', '\\1.',outputDataList$allGroups[[1]]$errorDistributionParam2), ")", collapse=", ", sep="")), side=3, line=.25, adj=0, cex=1.1, font=2) mtext(text=paste("Change Mean Utility:", paste(outputDataList$theInputParameters$groupPostFailingProposalMeanUiIncrease, collapse=", ")), side=3, line=.25, adj=1, cex=1.1, font=2) mtext(text=paste("Per Round Decision Cost:", paste(outputDataList$theInputParameters$perProposalDecisionCost, collapse=", ")), side=1, line=2.00, adj=0, cex=1.1, font=2) legend(5,1, # places a legend at the appropriate place lineNames, # puts text in the legend lty=lineLty, # gives the legend appropriate symbols (lines) lwd=lineLwd, # gives the legend the appropriate weight col=lineCol # gives the legend the appropriate color ) # }
# SOMnn topology-based classifier # Copyright (C) 2017 Andreas Dominik # THM University of Applied Sciences # Gießen, Germany # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #' Plot method for S4 class \code{SOMnn} #' #' Creates a plot of the hexagonal som in the model of type \code{SOMnn}. #' #' In addition to the required parameters, many options can be #' specified to plot predicted samples and to modify colours, legend and scaling. #' #' #' @rdname plot-methods #' @aliases plot,SOMnn-method #' #' @param x trained som of type \code{SOMnn}. #' @param title \code{logical}; if TRUE, slots name and date are used as main title. #' @param col defines colours for the classes of the dataset. Possible values include: #' \code{NA}: default value; colours are generated with \code{rainbow}, #' a \code{vector} of colour definitions or a #' \code{data.frame} with categories in the first and respective colours in the second column. #' @param onlyDefCols \code{logical}; if TRUE, only categories are plotted, for which colours are defined. #' Default: FALSE. #' @param edit.cols \code{logical}; if TRUE, colour definitions can be edited interactively before plotting. #' Default: FALSE. #' @param show.legend \code{logical}; if TRUE, a legend is displayed,. Default: TRUE. #' @param legend.loc Legend position as specified for \code{\link{legend}}. Default is \code{"bottomright"}. #' @param legend.width size of the legend. #' @param window.width Manual setting of window width. Default is NA. #' @param window.height Manual setting of window height. Default is NA. #' @param show.box Show frame around the plot . Default is TRUE. #' @param show.counter.border Percentile as limit for the display of labels in the pie charts. Default is 0.98. #' Higher counts are displayed as numbers in the neuron. #' @param predict \code{data.frame} as returned by the \code{som.nn::predict} function #' or a \code{data.frame} or matrix that follows the specification: #' If columns \code{x} and \code{y} exist, these are used as coordinates #' for the traget neuron; otherwise the first two columns are used. #' Default: NULL. #' @param add \code{logical}; if TRUE, points are plotted on an existing plot. This can be used to #' stepwise plot #' points of different classes with different colours or symbols. #' @param pch.col Colour of the markers for predicted samples. #' @param pch Symbol of the markers for predicted samples. #' @param ... More parameters as well as general #' plot parameters are allowed; see \code{\link{par}}. #' #' #' @import hexbin #' #' @example examples/example.train.R #' #' @export setMethod( f = "plot", signature = "SOMnn", definition = function(x, title = TRUE, col = NA, onlyDefCols = FALSE, edit.cols = FALSE, show.legend = TRUE, legend.loc = "bottomright", legend.width = 4, window.width = NA, window.height = NA, show.box = TRUE, show.counter.border = 0.98, predict=NULL, add = FALSE, pch.col = "black", pch = 19, ...){ # make vis from prediction (cave: in somplot, indices start at 0): som <- x classes <- som@classes grid <- make.codes.grid(som@xdim, som@ydim, topo = "hexagonal") counts <- som@class.counts counts$i <- grid$i - 1 counts$x <- grid$ix - 1 counts$y <- grid$iy -1 # count class matches: vis <- data.frame(x=numeric(0), y=numeric(0), kat=character(0), stringsAsFactors = FALSE) for (code in seq_along(counts[,1])) { for (class in classes) { for (i.count in seq_len( counts[code,class])){ vis <- rbind(vis, data.frame(x=counts[code,"x"], y=counts[code,"y"], kat=class, stringsAsFactors = FALSE)) } } } # print(vis) if (!add) { makehexbinplot(data = vis, col = col, show.legend = show.legend, legend.loc = legend.loc, legend.width = legend.width, window.width = window.width, window.height = window.height, onlyDefCols = onlyDefCols, show.box = show.box, edit.cols = edit.cols, show.counter.border = show.counter.border, ...) if (title) {title(paste(som@name, "-", som@date))} } # plot samples, if arg predict is given: if (!is.null(predict)){ # make data.frame with columns i, x, y: if (("x" %in% names(predict) && ("y" %in% names(predict)))){ predict <- data.frame(x = predict[,"x"], y = predict[,"y"]) } else { predict <- data.frame(x = predict[,1], y = predict[,2]) } predict$i <- (predict$y-1) * som@xdim + predict$x predict <- data.frame(i = predict$i, x = predict$x, y = predict$y) plot.predictions(grid, predict, pch.col = pch.col, pch = pch, ...) } }) #' Plots the hexagonals and pi charts. #' Adapted code from package somplot. #' #' @keywords internal hexbinpie <- function(x, y, kat, xbnds=range(x), ybnds=range(y), hbc = NA, pal = NA, hex = "gray", circ = "gray50", cnt = "black", show.counter.border, ...) { hb <- hexbin(shape = (diff(ybnds) + 1) / (diff(xbnds) + 1), x, y, xbnds = xbnds, ybnds = ybnds, IDs = TRUE, xbins = diff(xbnds)*2) rx <- 0.5 -> ry hexC <- hexcoords(dx = rx, dy = ry / sqrt(3), n = 1) nl <- length(levels(as.factor(kat))) zbnds <- stats::quantile(hb@count, prob = c(0.05, 0.98, show.counter.border), na.rm = TRUE ) # quantile borders for circle diameter and display counter zz <- pmax(pmin(sqrt(hb@count / zbnds[2]), 0.85), 0.2) # circle diameter from 20 to 85% of hexgon diameter tt <- unclass(table(kat, hb@cID)) for (i in seq(along=zz)) # loop neurons { if (!is.na(hex)) { graphics::polygon(hbc$x[i] + hexC$x, hbc$y[i] + hexC$y, col = NA, border = hex) } tp <- pi / 2 - 2 * pi * c(0, cumsum(tt[,i]) / sum(tt[,i])) used = FALSE for (j in 1:nl) # loop categories { if (tp[j+1] == tp[j]) { next } if (j >= 2) { used = TRUE pp <- seq(tp[j], tp[j+1], length = floor((tp[j] - tp[j + 1]) * 4) + 2) xi <- hbc$x[i] + c(0, zz[i] * rx * cos(pp)) yi <- hbc$y[i] + c(0, zz[i] * ry * sin(pp)) graphics::polygon(xi, yi, col = pal[j], border = NA, ...) } #print(j) } if (!is.na(circ) & used) { graphics::polygon(hbc$x[i] + rx * zz[i] * cos((1:18) * pi / 9), hbc$y[i] + ry * zz[i] * sin((1:18) * pi / 9), col = NA, border = circ) } } for (i in seq(along = zz)) { if ((!is.na(cnt)) & (hb@count[i] > zbnds[3])) { graphics::text(hbc$x[i], hbc$y[i], hb@count[i], col = cnt, cex = 0.5) } } } #' makes the actual heagonal plot. #' Adapted code from package somplot. #' #' @keywords internal makehexbinplot <-function(data, col = NA, show.legend = TRUE, legend.loc = "bottomright", legend.width = 4, window.width = NA, window.height = NA, onlyDefCols = FALSE, show.box = TRUE, edit.cols = FALSE, show.counter.border = 0.98, ...) { if (!show.legend) { legend.width = 0 } # calc hbc an fill up empty coordinates with an "empty" element pos = 1 range.x = max(data$x) - min(data$x) + 1 range.y = max(data$y) - min(data$y) + 1 hbc = data.frame(x = seq(1,(range.x) * (range.y),1), y = NA) for (y in c(min(data$y) : max(data$y))) { for (x in c(min(data$x):max(data$x))) { hbc$x[pos] = ifelse(y %% 2 == 1, x - 0.5, x) hbc$y[pos] = y * 0.866 pos = pos + 1 if (nrow(data[data$x == x & data$y == y,]) == 0) { data = rbind(data, data.frame(x = x, y = y, kat = "")) } } } lvls = levels(as.factor(data$kat)) lvls = lvls[lvls != ""] pal = grDevices::rainbow(length(lvls)) if (!is.na(col[1])) { if (onlyDefCols) { tmp.pal = rep("white", length(lvls)) } else { tmp.pal = vector("character", length = length(lvls)) } if (is.data.frame(col)) { for (i in c(1 : nrow(col))) { tmp.pal[lvls == col[i,1]] = as.character(col[i,2]) } } else { tmp.pal[c(1:length(col))] = col } # convert color names into hex values and fill up colors if(!onlyDefCols) { dbl.pal = sprintf("#%02X%02X%02XFF", grDevices::col2rgb(tmp.pal[tmp.pal != ""])[1,], grDevices::col2rgb(tmp.pal[tmp.pal != ""])[2,], grDevices::col2rgb(tmp.pal[tmp.pal != ""])[3,]) pal = setdiff(pal, dbl.pal) for (i in c(1 : length(lvls))) { if (is.na(tmp.pal[i]) | tmp.pal[i] == "") { tmp.pal[i] = pal[1] pal = pal[-1] } } } pal = tmp.pal } if(edit.cols) { pal = as.vector(utils::edit(data.frame(kat = lvls, col = pal))[,2]) } lvls = append("empty", lvls) pal = c("white", pal) if(!is.na(window.width)) { window.height = ifelse(is.na(window.height), window.width * (max(hbc$y) - min(hbc$y) - 1 + (range.x / range.y * 2)) / (max(hbc$x) - min(hbc$x) + legend.width), window.height) grDevices::dev.new(width = window.width, height = window.height) } graphics::plot.new() graphics::plot.window(c(min(hbc$x) - 0.5, max(hbc$x) + 0.5 + legend.width), c(min(hbc$y) - 0.5, max(hbc$y) + 1), asp=0.866) if(show.box) { graphics::box() } if (show.legend) { graphics::legend(legend.loc, lvls[-1], fill=pal[-1], x.intersp=0.2) } hexbinpie(data$x, data$y, kat=data$kat, hbc = hbc, pal = pal, show.counter.border = show.counter.border, ...) } #' Plots predicted samples as points into a plotted som. #' #' @keywords internal plot.predictions <- function(grid, predict, pch.col, pch, ...){ # fit grid to plot coordinates (left-bootom is (0,0) in plot, but (1,5,0.8660254) in somgrid: grid$x <- grid$x - grid$x[1] grid$y <- grid$y - grid$y[1] # map indices to coors: coors <- grid[predict$i,c("x","y")] # function get.pattern # returns a pattern of points with relative coors- # n : number of points to be organised # sep : separation between points # get.pattern <- function(n, sep = 0.2){ sml <- sep * 0.65 big <- sep * 1.2 if (n == 1){ return(data.frame(x=0, y=0)) } else if (n == 2) { return(data.frame(x=c(-sml, sml), y=c(sml, -sml))) } else if (n == 3) { return(data.frame(x=c(-sml, 0, sml), y=c(-sml*0.87*2/3, sml*0.87*4/3, -sml*0.87*2/3))) } else if (n == 4) { return(data.frame(x=c(-sml, -sml, sml, sml), y=c(sml,-sml, -sml, sml))) } else if (n == 5) { return(data.frame(x=c(-sep, -sep, sep, sep, 0), y=c(sep,-sep, -sep, sep, 0))) } else if (n == 6) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep), y=c(sep, 0, -sep, -sep, 0, sep))) } else if (n == 7) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0), y=c(sep, 0, -sep, -sep, 0, sep, 0))) } else if (n == 8) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0, 0), y=c(sep, 0, -sep, -sep, 0, sep, sep, -sep))) } else if (n == 9) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0, 0, 0), y=c(sep, 0, -sep, -sep, 0, sep, sep, -sep, 0))) } else { return(data.frame(x=stats::runif(n, min=-big, max=big), y=stats::runif(n, min=-big, max=big))) } } # group points in the same neuron: nums <- by(predict, predict$i, function(x){ # process all points in same neuron as one group n <- nrow(x) coors <- grid[x$i,c("x","y")] + get.pattern(n) graphics::points(coors, pch = pch, col = pch.col) }) }
/R/plot.SOMnn.R
no_license
cran/som.nn
R
false
false
12,955
r
# SOMnn topology-based classifier # Copyright (C) 2017 Andreas Dominik # THM University of Applied Sciences # Gießen, Germany # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #' Plot method for S4 class \code{SOMnn} #' #' Creates a plot of the hexagonal som in the model of type \code{SOMnn}. #' #' In addition to the required parameters, many options can be #' specified to plot predicted samples and to modify colours, legend and scaling. #' #' #' @rdname plot-methods #' @aliases plot,SOMnn-method #' #' @param x trained som of type \code{SOMnn}. #' @param title \code{logical}; if TRUE, slots name and date are used as main title. #' @param col defines colours for the classes of the dataset. Possible values include: #' \code{NA}: default value; colours are generated with \code{rainbow}, #' a \code{vector} of colour definitions or a #' \code{data.frame} with categories in the first and respective colours in the second column. #' @param onlyDefCols \code{logical}; if TRUE, only categories are plotted, for which colours are defined. #' Default: FALSE. #' @param edit.cols \code{logical}; if TRUE, colour definitions can be edited interactively before plotting. #' Default: FALSE. #' @param show.legend \code{logical}; if TRUE, a legend is displayed,. Default: TRUE. #' @param legend.loc Legend position as specified for \code{\link{legend}}. Default is \code{"bottomright"}. #' @param legend.width size of the legend. #' @param window.width Manual setting of window width. Default is NA. #' @param window.height Manual setting of window height. Default is NA. #' @param show.box Show frame around the plot . Default is TRUE. #' @param show.counter.border Percentile as limit for the display of labels in the pie charts. Default is 0.98. #' Higher counts are displayed as numbers in the neuron. #' @param predict \code{data.frame} as returned by the \code{som.nn::predict} function #' or a \code{data.frame} or matrix that follows the specification: #' If columns \code{x} and \code{y} exist, these are used as coordinates #' for the traget neuron; otherwise the first two columns are used. #' Default: NULL. #' @param add \code{logical}; if TRUE, points are plotted on an existing plot. This can be used to #' stepwise plot #' points of different classes with different colours or symbols. #' @param pch.col Colour of the markers for predicted samples. #' @param pch Symbol of the markers for predicted samples. #' @param ... More parameters as well as general #' plot parameters are allowed; see \code{\link{par}}. #' #' #' @import hexbin #' #' @example examples/example.train.R #' #' @export setMethod( f = "plot", signature = "SOMnn", definition = function(x, title = TRUE, col = NA, onlyDefCols = FALSE, edit.cols = FALSE, show.legend = TRUE, legend.loc = "bottomright", legend.width = 4, window.width = NA, window.height = NA, show.box = TRUE, show.counter.border = 0.98, predict=NULL, add = FALSE, pch.col = "black", pch = 19, ...){ # make vis from prediction (cave: in somplot, indices start at 0): som <- x classes <- som@classes grid <- make.codes.grid(som@xdim, som@ydim, topo = "hexagonal") counts <- som@class.counts counts$i <- grid$i - 1 counts$x <- grid$ix - 1 counts$y <- grid$iy -1 # count class matches: vis <- data.frame(x=numeric(0), y=numeric(0), kat=character(0), stringsAsFactors = FALSE) for (code in seq_along(counts[,1])) { for (class in classes) { for (i.count in seq_len( counts[code,class])){ vis <- rbind(vis, data.frame(x=counts[code,"x"], y=counts[code,"y"], kat=class, stringsAsFactors = FALSE)) } } } # print(vis) if (!add) { makehexbinplot(data = vis, col = col, show.legend = show.legend, legend.loc = legend.loc, legend.width = legend.width, window.width = window.width, window.height = window.height, onlyDefCols = onlyDefCols, show.box = show.box, edit.cols = edit.cols, show.counter.border = show.counter.border, ...) if (title) {title(paste(som@name, "-", som@date))} } # plot samples, if arg predict is given: if (!is.null(predict)){ # make data.frame with columns i, x, y: if (("x" %in% names(predict) && ("y" %in% names(predict)))){ predict <- data.frame(x = predict[,"x"], y = predict[,"y"]) } else { predict <- data.frame(x = predict[,1], y = predict[,2]) } predict$i <- (predict$y-1) * som@xdim + predict$x predict <- data.frame(i = predict$i, x = predict$x, y = predict$y) plot.predictions(grid, predict, pch.col = pch.col, pch = pch, ...) } }) #' Plots the hexagonals and pi charts. #' Adapted code from package somplot. #' #' @keywords internal hexbinpie <- function(x, y, kat, xbnds=range(x), ybnds=range(y), hbc = NA, pal = NA, hex = "gray", circ = "gray50", cnt = "black", show.counter.border, ...) { hb <- hexbin(shape = (diff(ybnds) + 1) / (diff(xbnds) + 1), x, y, xbnds = xbnds, ybnds = ybnds, IDs = TRUE, xbins = diff(xbnds)*2) rx <- 0.5 -> ry hexC <- hexcoords(dx = rx, dy = ry / sqrt(3), n = 1) nl <- length(levels(as.factor(kat))) zbnds <- stats::quantile(hb@count, prob = c(0.05, 0.98, show.counter.border), na.rm = TRUE ) # quantile borders for circle diameter and display counter zz <- pmax(pmin(sqrt(hb@count / zbnds[2]), 0.85), 0.2) # circle diameter from 20 to 85% of hexgon diameter tt <- unclass(table(kat, hb@cID)) for (i in seq(along=zz)) # loop neurons { if (!is.na(hex)) { graphics::polygon(hbc$x[i] + hexC$x, hbc$y[i] + hexC$y, col = NA, border = hex) } tp <- pi / 2 - 2 * pi * c(0, cumsum(tt[,i]) / sum(tt[,i])) used = FALSE for (j in 1:nl) # loop categories { if (tp[j+1] == tp[j]) { next } if (j >= 2) { used = TRUE pp <- seq(tp[j], tp[j+1], length = floor((tp[j] - tp[j + 1]) * 4) + 2) xi <- hbc$x[i] + c(0, zz[i] * rx * cos(pp)) yi <- hbc$y[i] + c(0, zz[i] * ry * sin(pp)) graphics::polygon(xi, yi, col = pal[j], border = NA, ...) } #print(j) } if (!is.na(circ) & used) { graphics::polygon(hbc$x[i] + rx * zz[i] * cos((1:18) * pi / 9), hbc$y[i] + ry * zz[i] * sin((1:18) * pi / 9), col = NA, border = circ) } } for (i in seq(along = zz)) { if ((!is.na(cnt)) & (hb@count[i] > zbnds[3])) { graphics::text(hbc$x[i], hbc$y[i], hb@count[i], col = cnt, cex = 0.5) } } } #' makes the actual heagonal plot. #' Adapted code from package somplot. #' #' @keywords internal makehexbinplot <-function(data, col = NA, show.legend = TRUE, legend.loc = "bottomright", legend.width = 4, window.width = NA, window.height = NA, onlyDefCols = FALSE, show.box = TRUE, edit.cols = FALSE, show.counter.border = 0.98, ...) { if (!show.legend) { legend.width = 0 } # calc hbc an fill up empty coordinates with an "empty" element pos = 1 range.x = max(data$x) - min(data$x) + 1 range.y = max(data$y) - min(data$y) + 1 hbc = data.frame(x = seq(1,(range.x) * (range.y),1), y = NA) for (y in c(min(data$y) : max(data$y))) { for (x in c(min(data$x):max(data$x))) { hbc$x[pos] = ifelse(y %% 2 == 1, x - 0.5, x) hbc$y[pos] = y * 0.866 pos = pos + 1 if (nrow(data[data$x == x & data$y == y,]) == 0) { data = rbind(data, data.frame(x = x, y = y, kat = "")) } } } lvls = levels(as.factor(data$kat)) lvls = lvls[lvls != ""] pal = grDevices::rainbow(length(lvls)) if (!is.na(col[1])) { if (onlyDefCols) { tmp.pal = rep("white", length(lvls)) } else { tmp.pal = vector("character", length = length(lvls)) } if (is.data.frame(col)) { for (i in c(1 : nrow(col))) { tmp.pal[lvls == col[i,1]] = as.character(col[i,2]) } } else { tmp.pal[c(1:length(col))] = col } # convert color names into hex values and fill up colors if(!onlyDefCols) { dbl.pal = sprintf("#%02X%02X%02XFF", grDevices::col2rgb(tmp.pal[tmp.pal != ""])[1,], grDevices::col2rgb(tmp.pal[tmp.pal != ""])[2,], grDevices::col2rgb(tmp.pal[tmp.pal != ""])[3,]) pal = setdiff(pal, dbl.pal) for (i in c(1 : length(lvls))) { if (is.na(tmp.pal[i]) | tmp.pal[i] == "") { tmp.pal[i] = pal[1] pal = pal[-1] } } } pal = tmp.pal } if(edit.cols) { pal = as.vector(utils::edit(data.frame(kat = lvls, col = pal))[,2]) } lvls = append("empty", lvls) pal = c("white", pal) if(!is.na(window.width)) { window.height = ifelse(is.na(window.height), window.width * (max(hbc$y) - min(hbc$y) - 1 + (range.x / range.y * 2)) / (max(hbc$x) - min(hbc$x) + legend.width), window.height) grDevices::dev.new(width = window.width, height = window.height) } graphics::plot.new() graphics::plot.window(c(min(hbc$x) - 0.5, max(hbc$x) + 0.5 + legend.width), c(min(hbc$y) - 0.5, max(hbc$y) + 1), asp=0.866) if(show.box) { graphics::box() } if (show.legend) { graphics::legend(legend.loc, lvls[-1], fill=pal[-1], x.intersp=0.2) } hexbinpie(data$x, data$y, kat=data$kat, hbc = hbc, pal = pal, show.counter.border = show.counter.border, ...) } #' Plots predicted samples as points into a plotted som. #' #' @keywords internal plot.predictions <- function(grid, predict, pch.col, pch, ...){ # fit grid to plot coordinates (left-bootom is (0,0) in plot, but (1,5,0.8660254) in somgrid: grid$x <- grid$x - grid$x[1] grid$y <- grid$y - grid$y[1] # map indices to coors: coors <- grid[predict$i,c("x","y")] # function get.pattern # returns a pattern of points with relative coors- # n : number of points to be organised # sep : separation between points # get.pattern <- function(n, sep = 0.2){ sml <- sep * 0.65 big <- sep * 1.2 if (n == 1){ return(data.frame(x=0, y=0)) } else if (n == 2) { return(data.frame(x=c(-sml, sml), y=c(sml, -sml))) } else if (n == 3) { return(data.frame(x=c(-sml, 0, sml), y=c(-sml*0.87*2/3, sml*0.87*4/3, -sml*0.87*2/3))) } else if (n == 4) { return(data.frame(x=c(-sml, -sml, sml, sml), y=c(sml,-sml, -sml, sml))) } else if (n == 5) { return(data.frame(x=c(-sep, -sep, sep, sep, 0), y=c(sep,-sep, -sep, sep, 0))) } else if (n == 6) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep), y=c(sep, 0, -sep, -sep, 0, sep))) } else if (n == 7) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0), y=c(sep, 0, -sep, -sep, 0, sep, 0))) } else if (n == 8) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0, 0), y=c(sep, 0, -sep, -sep, 0, sep, sep, -sep))) } else if (n == 9) { return(data.frame(x=c(-sep, -sep, -sep, sep, sep, sep, 0, 0, 0), y=c(sep, 0, -sep, -sep, 0, sep, sep, -sep, 0))) } else { return(data.frame(x=stats::runif(n, min=-big, max=big), y=stats::runif(n, min=-big, max=big))) } } # group points in the same neuron: nums <- by(predict, predict$i, function(x){ # process all points in same neuron as one group n <- nrow(x) coors <- grid[x$i,c("x","y")] + get.pattern(n) graphics::points(coors, pch = pch, col = pch.col) }) }
norm = readr::read_csv("./outdir/2020/FIA_2020_no_mlbs_full.csv") norm = sf::st_as_sf(norm, coords=c("LON", "LAT"), crs = 4326) ecoregions = sf::read_sf("/Users/sergiomarconi/Documents/Data/Data_products/Chapter3_product/atlantic/atlantic_outer.shp") ecoregions = sf::st_transform(ecoregions, st_crs(norm)) norm = sf::st_join(norm, ecoregions) norm = norm %>% filter(!is.na(ECO_US_)) yep= norm #%>% #group_by(statecd, countycd, unitcd, plot) %>% filter(taxonID == "QULA3") yep$N_LMA = yep$nitrogenPercent.Estimate/yep$leafMassPerArea.Estimate crds = st_coordinates(yep) yep = cbind.data.frame(crds, yep) yep = yep %>% group_by(Y, taxonID) %>% summarize_if(is.numeric, mean) # ggplot(data = world) + # xlim(-95,-65)+ ylim(24,50)+ # theme_bw() + # geom_point(data = yep, aes(x = LON, y = LAT, # fill = cut(yep$N_LMA,quantile(yep$N_LMA))), alpha = 0.6, # size = 0.7, shape = 23, stroke = 0) + scale_fill_viridis_d()+ # geom_sf(alpha = 0)+ # geom_point(data = ACSA, aes(x = LON_site, y = LAT_site, color = "red", alpha = 0.9), # size = 1, shape =4, stroke = 1) # #scale_colour_gradient("red") model_95 = quantile(yep$N_LMA,probs=c(.025,.975)) #plot(yep$LAT, yep$N_LMA) nLm=ggplot(yep, aes(x = Y, y = N_LMA))+ geom_density2d() + geom_smooth(method = "loess") + theme_bw()+ facet_wrap(.~taxonID, scales = "free") ggsave(nLm, "n_lma_ratio.png")
/src/plots/Figure S17.R
permissive
MarconiS/Disentangling-the-role-of-phylogeny-and-climate-on-joint-leaf-trait-distributions-across-Eastern-Uni
R
false
false
1,416
r
norm = readr::read_csv("./outdir/2020/FIA_2020_no_mlbs_full.csv") norm = sf::st_as_sf(norm, coords=c("LON", "LAT"), crs = 4326) ecoregions = sf::read_sf("/Users/sergiomarconi/Documents/Data/Data_products/Chapter3_product/atlantic/atlantic_outer.shp") ecoregions = sf::st_transform(ecoregions, st_crs(norm)) norm = sf::st_join(norm, ecoregions) norm = norm %>% filter(!is.na(ECO_US_)) yep= norm #%>% #group_by(statecd, countycd, unitcd, plot) %>% filter(taxonID == "QULA3") yep$N_LMA = yep$nitrogenPercent.Estimate/yep$leafMassPerArea.Estimate crds = st_coordinates(yep) yep = cbind.data.frame(crds, yep) yep = yep %>% group_by(Y, taxonID) %>% summarize_if(is.numeric, mean) # ggplot(data = world) + # xlim(-95,-65)+ ylim(24,50)+ # theme_bw() + # geom_point(data = yep, aes(x = LON, y = LAT, # fill = cut(yep$N_LMA,quantile(yep$N_LMA))), alpha = 0.6, # size = 0.7, shape = 23, stroke = 0) + scale_fill_viridis_d()+ # geom_sf(alpha = 0)+ # geom_point(data = ACSA, aes(x = LON_site, y = LAT_site, color = "red", alpha = 0.9), # size = 1, shape =4, stroke = 1) # #scale_colour_gradient("red") model_95 = quantile(yep$N_LMA,probs=c(.025,.975)) #plot(yep$LAT, yep$N_LMA) nLm=ggplot(yep, aes(x = Y, y = N_LMA))+ geom_density2d() + geom_smooth(method = "loess") + theme_bw()+ facet_wrap(.~taxonID, scales = "free") ggsave(nLm, "n_lma_ratio.png")
# load library library(seqinr) library(Biostrings) # get parameters get_param <- commandArgs(trailingOnly = TRUE) # read RPF counts per nucleotide rpf <- read.table(get_param[1], sep = "\t", header = F, stringsAsFactors = F) # get genes u_genes <- unique(rpf[,1:6]) # cutoff for selection of candidates: (FT at period 3) / (FT between 1.5 and 3) cutoff <- 3 ## function for fourier transform input counts # counts: input count # it will only take a vector which is of length 3 of dividable by 3, others are skipped # output(in default mode): 4 values (FT at 1.5, FT at 3, mean of FT between 1.5 and 3, mean > 3) # normalze: per default it normalizes the transformed values by length (which we have to do) # full_output: if TRUE will output list with all data, e.g. to plot get_FT_signal = function(counts, normalize = T, full_output = F){ # check if length diviable by 3 if( length(counts)%%3 == 0){ # get frequency (x-axis) for FT transform # this is a bit complicated to explain and understand (we can try later) freq <- length(counts)/(0:(length(counts)-1)) # perform fast FT ft <- abs(fft(counts)) # normalize by length if(normalize){ ft <- ft / length(counts) } # get identity of period 3 and 1.5 together with mean of inter-regions idx3 <- which(freq == 3) idx15 <- which(freq == 1.5) res <- c( ft[idx15], ft[idx3], mean(ft[(idx3+1):(idx15-1)]), mean(ft[(idx15+1):(length(ft))]) ) # return if(!full_output){ res } else { list( res = res, ft = ft, freq = freq ) } # else skip with message } else { cat('skipped\n') NULL } } ### calculate FT for RPFs # store FT results storeFT <- matrix(NA, nrow=nrow(u_genes),ncol=4) # for each unique genes for(i in 1:nrow(u_genes)){ # status print status every 100 genes / ORFs if(i == 1){ cat('Start obtaining FT signals\n') } if(i %% 100 == 0){ cat(paste0(i, ' genes/ORFs processed \n')) } # get counts for selected gene only rpf_sel <- rpf[rpf[,4]==u_genes[i,4],] # check if dividable by 3 if(nrow(rpf_sel)%%3 == 0){ # do FT transform (remove 50nt at start and end) ft <- get_FT_signal(rpf_sel[,8]) storeFT[i,1:4] <- ft } else { # if not dividable by 3 print skipped cat(paste0(' ',u_genes[i,4],' skipped\n')) } } cat('DONE\n') # select candidates with high FT at period 3 idx <- which(storeFT[,2]/storeFT[,3] > cutoff) # this is the genes we would be interested # create table verified <- cbind(u_genes[idx,],round(storeFT[idx,2]/storeFT[idx,3], digits=6)) colnames(verified)[7] <- 'FT_ratio' # write candidates write.table(verified,paste0(get_param[2],'/output/RPF_3nt_translated.txt'), sep = "\t", col.names = F, row.names = F, quote = F)
/modulB_RPF_analysis/5_FT_RPF/FT_RPF.R
no_license
AlexanderBartholomaeus/smORFer
R
false
false
2,817
r
# load library library(seqinr) library(Biostrings) # get parameters get_param <- commandArgs(trailingOnly = TRUE) # read RPF counts per nucleotide rpf <- read.table(get_param[1], sep = "\t", header = F, stringsAsFactors = F) # get genes u_genes <- unique(rpf[,1:6]) # cutoff for selection of candidates: (FT at period 3) / (FT between 1.5 and 3) cutoff <- 3 ## function for fourier transform input counts # counts: input count # it will only take a vector which is of length 3 of dividable by 3, others are skipped # output(in default mode): 4 values (FT at 1.5, FT at 3, mean of FT between 1.5 and 3, mean > 3) # normalze: per default it normalizes the transformed values by length (which we have to do) # full_output: if TRUE will output list with all data, e.g. to plot get_FT_signal = function(counts, normalize = T, full_output = F){ # check if length diviable by 3 if( length(counts)%%3 == 0){ # get frequency (x-axis) for FT transform # this is a bit complicated to explain and understand (we can try later) freq <- length(counts)/(0:(length(counts)-1)) # perform fast FT ft <- abs(fft(counts)) # normalize by length if(normalize){ ft <- ft / length(counts) } # get identity of period 3 and 1.5 together with mean of inter-regions idx3 <- which(freq == 3) idx15 <- which(freq == 1.5) res <- c( ft[idx15], ft[idx3], mean(ft[(idx3+1):(idx15-1)]), mean(ft[(idx15+1):(length(ft))]) ) # return if(!full_output){ res } else { list( res = res, ft = ft, freq = freq ) } # else skip with message } else { cat('skipped\n') NULL } } ### calculate FT for RPFs # store FT results storeFT <- matrix(NA, nrow=nrow(u_genes),ncol=4) # for each unique genes for(i in 1:nrow(u_genes)){ # status print status every 100 genes / ORFs if(i == 1){ cat('Start obtaining FT signals\n') } if(i %% 100 == 0){ cat(paste0(i, ' genes/ORFs processed \n')) } # get counts for selected gene only rpf_sel <- rpf[rpf[,4]==u_genes[i,4],] # check if dividable by 3 if(nrow(rpf_sel)%%3 == 0){ # do FT transform (remove 50nt at start and end) ft <- get_FT_signal(rpf_sel[,8]) storeFT[i,1:4] <- ft } else { # if not dividable by 3 print skipped cat(paste0(' ',u_genes[i,4],' skipped\n')) } } cat('DONE\n') # select candidates with high FT at period 3 idx <- which(storeFT[,2]/storeFT[,3] > cutoff) # this is the genes we would be interested # create table verified <- cbind(u_genes[idx,],round(storeFT[idx,2]/storeFT[idx,3], digits=6)) colnames(verified)[7] <- 'FT_ratio' # write candidates write.table(verified,paste0(get_param[2],'/output/RPF_3nt_translated.txt'), sep = "\t", col.names = F, row.names = F, quote = F)
library(glmnet) mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=TRUE) sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_081.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Correlation/central_nervous_system/central_nervous_system_081.R
no_license
leon1003/QSMART
R
false
false
407
r
library(glmnet) mydata = read.table("./TrainingSet/Correlation/central_nervous_system.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=TRUE) sink('./Model/EN/Correlation/central_nervous_system/central_nervous_system_081.txt',append=TRUE) print(glm$glmnet.fit) sink()
source("./tirSettings.R") setwd(dir.tif) ## very important tips for use rLandsat8 l8.lst <- lapply(dir(dir.tif), ReadLandsat8) bandnames <-c("tirs1", "tirs2") sceneList <- list.files(dir.toaTbk, full.names = TRUE) for (i in sceneList) { bandList <- list.files(sceneList, full.names = TRUE) emiName <- paste0(basename(i), ".tif") pngName <- paste0(emiName,".png") Tb10 <- raster::raster(bandList[1]) Tb11 <- raster::raster(bandList[2]) TbS <- raster::stack(Tb10, Tb11) a <- 1.438*10^-2 L10 <- 10.9 L11 <- 12.0 TbE <- exp((a*(Tb10 - Tb11))/(Tb10*Tb11*(L10-L11))) Ts10 <- Tb10/(1 + (L10*Tb10/a)*log(TbE) writeRaster(TbE, filename = file.path(dir.toaTe,emiName), overwrite = T) png(file.path(dir.toaTe,pngName)) plot(TbE) dev.off() raster::removeTmpFiles(h = 1) ## Improtant tips for save hardisk } }
/demo/tir14_mergeTOABrightTemperature.R
permissive
bwtian/TIR
R
false
false
939
r
source("./tirSettings.R") setwd(dir.tif) ## very important tips for use rLandsat8 l8.lst <- lapply(dir(dir.tif), ReadLandsat8) bandnames <-c("tirs1", "tirs2") sceneList <- list.files(dir.toaTbk, full.names = TRUE) for (i in sceneList) { bandList <- list.files(sceneList, full.names = TRUE) emiName <- paste0(basename(i), ".tif") pngName <- paste0(emiName,".png") Tb10 <- raster::raster(bandList[1]) Tb11 <- raster::raster(bandList[2]) TbS <- raster::stack(Tb10, Tb11) a <- 1.438*10^-2 L10 <- 10.9 L11 <- 12.0 TbE <- exp((a*(Tb10 - Tb11))/(Tb10*Tb11*(L10-L11))) Ts10 <- Tb10/(1 + (L10*Tb10/a)*log(TbE) writeRaster(TbE, filename = file.path(dir.toaTe,emiName), overwrite = T) png(file.path(dir.toaTe,pngName)) plot(TbE) dev.off() raster::removeTmpFiles(h = 1) ## Improtant tips for save hardisk } }
# load utility functions source("UtilityFunctions.R") library(dplyr) # reduce data function # keep data that their value >= threshold reduceData<- function(df, threshold=1) { df<- df %>% group_by(name) %>% summarise(value= sum(value)) %>% arrange(desc(value)) %>% filter(value >= threshold) } # compute number of lines to be sampled samplePct<- 1 # 1% sample rate nIter<- 100/ samplePct # output file name fout<- "./sampledData/AllData.RData" filect<- 0 # file read counter ng1df<- data.frame() ng2df<- data.frame() ng3df<- data.frame() ng4df<- data.frame() for (i in c(35,75))#seq(1,nIter)) { # file name fname<- sprintf('./sampledData/sampleData%d.RData',i) if (!file.exists(fname)) next sout<- sprintf('Reading Iter %d of %d...',i,nIter) print(sout) # get sampling now filect<- filect+1 load(fname) # remove non english and number character ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng1)) ng1a<- ng1[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng2)) ng2a<- ng2[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng3)) ng3a<- ng3[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng4)) ng4a<- ng4[-ind] # combine data now df1<- data.frame(name= names(ng1a), value= ng1a) df2<- data.frame(name= names(ng2a), value= ng2a) df3<- data.frame(name= names(ng3a), value= ng3a) df4<- data.frame(name= names(ng4a), value= ng4a) ng1df<- rbind(ng1df,df1) ng2df<- rbind(ng2df,df2) ng3df<- rbind(ng3df,df3) ng4df<- rbind(ng4df,df4) # make names unique if ((filect%%5==0) || (filect==98)) { sout<- sprintf('Combine data now...') print(sout) th<- round(filect/5) ng1df<- reduceData(ng1df,th) print("unigram...") ng2df<- reduceData(ng2df,th) print("2-gram...") ng3df<- reduceData(ng3df,th) print("3-gram...") ng4df<- reduceData(ng4df,th) print("4-gram...") # ======== save data to files ========== print("save file now...") save(ng1df,ng2df,ng3df,ng4df, file= fout) } }
/NGramModel_CollectData.R
no_license
AndresSotoA/DataScienceCapstone
R
false
false
2,230
r
# load utility functions source("UtilityFunctions.R") library(dplyr) # reduce data function # keep data that their value >= threshold reduceData<- function(df, threshold=1) { df<- df %>% group_by(name) %>% summarise(value= sum(value)) %>% arrange(desc(value)) %>% filter(value >= threshold) } # compute number of lines to be sampled samplePct<- 1 # 1% sample rate nIter<- 100/ samplePct # output file name fout<- "./sampledData/AllData.RData" filect<- 0 # file read counter ng1df<- data.frame() ng2df<- data.frame() ng3df<- data.frame() ng4df<- data.frame() for (i in c(35,75))#seq(1,nIter)) { # file name fname<- sprintf('./sampledData/sampleData%d.RData',i) if (!file.exists(fname)) next sout<- sprintf('Reading Iter %d of %d...',i,nIter) print(sout) # get sampling now filect<- filect+1 load(fname) # remove non english and number character ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng1)) ng1a<- ng1[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng2)) ng2a<- ng2[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng3)) ng3a<- ng3[-ind] ind<- grep("[^a-zA-Z0-9[:punct:][:space:]]", names(ng4)) ng4a<- ng4[-ind] # combine data now df1<- data.frame(name= names(ng1a), value= ng1a) df2<- data.frame(name= names(ng2a), value= ng2a) df3<- data.frame(name= names(ng3a), value= ng3a) df4<- data.frame(name= names(ng4a), value= ng4a) ng1df<- rbind(ng1df,df1) ng2df<- rbind(ng2df,df2) ng3df<- rbind(ng3df,df3) ng4df<- rbind(ng4df,df4) # make names unique if ((filect%%5==0) || (filect==98)) { sout<- sprintf('Combine data now...') print(sout) th<- round(filect/5) ng1df<- reduceData(ng1df,th) print("unigram...") ng2df<- reduceData(ng2df,th) print("2-gram...") ng3df<- reduceData(ng3df,th) print("3-gram...") ng4df<- reduceData(ng4df,th) print("4-gram...") # ======== save data to files ========== print("save file now...") save(ng1df,ng2df,ng3df,ng4df, file= fout) } }
#' DatasetsList #' #' List datasets #' #' @param page integer, Page number. Defaults to 1. Retrieve datasets via #' page, search, or (ownerSlug and datasetSlug) #' @param search string, Search terms. Defaults to . Retrieve datasets via #' page, search, or (ownerSlug and datasetSlug) #' @param owner_dataset Alternative to page/search. The owner and dataset #' slug as it appears in the URL, i.e., #' \code{"mathan/fifa-2018-match-statistics"}. #' @export kgl_datasets_list <- function(page = 1, search = "", owner_dataset = NULL) { if (!is.null(owner_dataset)) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue("datasets/list/{ownerSlug}/{datasetSlug}")) } else { kgl_api_get("datasets/list", page = page, search = search) } } #' DatasetsView #' #' Show details about a dataset #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @export kgl_datasets_view <- function(owner_dataset) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue("datasets/view/{ownerSlug}/{datasetSlug}")) } #' DatasetsDownloadFile #' #' Download dataset file #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @param fileName string, File name. Required: TRUE. #' @param datasetVersionNumber string, Dataset version number. Required: FALSE. #' @param type string, Forcing datacet type. Required: FALSE. #' @export kgl_datasets_download <- function(owner_dataset, fileName, datasetVersionNumber = NULL, type = NULL) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue( "datasets/download/{ownerSlug}/{datasetSlug}/{fileName}"), datasetVersionNumber = datasetVersionNumber, type = type) } #' DatasetsUploadFile #' #' Get URL and token to start uploading a data file #' #' @param fileName string, Dataset file name. Required: TRUE. #' @param contentLength integer, Content length of file in bytes. Required: TRUE. #' @param lastModifiedDateUtc integer, Last modified date of file in milliseconds #' since epoch in UTC. Required: TRUE. #' @export kgl_datasets_upload_file <- function(fileName, contentLength, lastModifiedDateUtc) { contentLength <- file.size(fileName) lastModifiedDateUtc <- format(file.info(fileName)$mtime, format = "%Y-%m-%d %H-%M-%S", tz = "UTC") kgl_api_post(glue::glue( "datasets/upload/file/{contentLength}/{lastModifiedDateUtc}"), fileName = fileName) } #' DatasetsCreateVersion #' #' Create a new dataset version #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @param datasetNewVersionRequest Information for creating a new dataset version. #' Required: TRUE. #' @export kgl_datasets_create_version <- function(owner_dataset, datasetNewVersionRequest) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_post(glue::glue( "datasets/create/version/{ownerSlug}/{datasetSlug}"), datasetNewVersionRequest = datasetNewVersionRequest) } #' DatasetsCreateNew #' #' Create a new dataset #' #' @param datasetNewRequest Information for creating a new dataset. Required: TRUE. #' @export kgl_datasets_create_new <- function(datasetNewRequest) { kgl_api_post("datasets/create/new", datasetNewRequest = datasetNewRequest) }
/R/datasets.R
permissive
bernardo-dauria/kaggler
R
false
false
3,889
r
#' DatasetsList #' #' List datasets #' #' @param page integer, Page number. Defaults to 1. Retrieve datasets via #' page, search, or (ownerSlug and datasetSlug) #' @param search string, Search terms. Defaults to . Retrieve datasets via #' page, search, or (ownerSlug and datasetSlug) #' @param owner_dataset Alternative to page/search. The owner and dataset #' slug as it appears in the URL, i.e., #' \code{"mathan/fifa-2018-match-statistics"}. #' @export kgl_datasets_list <- function(page = 1, search = "", owner_dataset = NULL) { if (!is.null(owner_dataset)) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue("datasets/list/{ownerSlug}/{datasetSlug}")) } else { kgl_api_get("datasets/list", page = page, search = search) } } #' DatasetsView #' #' Show details about a dataset #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @export kgl_datasets_view <- function(owner_dataset) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue("datasets/view/{ownerSlug}/{datasetSlug}")) } #' DatasetsDownloadFile #' #' Download dataset file #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @param fileName string, File name. Required: TRUE. #' @param datasetVersionNumber string, Dataset version number. Required: FALSE. #' @param type string, Forcing datacet type. Required: FALSE. #' @export kgl_datasets_download <- function(owner_dataset, fileName, datasetVersionNumber = NULL, type = NULL) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_get(glue::glue( "datasets/download/{ownerSlug}/{datasetSlug}/{fileName}"), datasetVersionNumber = datasetVersionNumber, type = type) } #' DatasetsUploadFile #' #' Get URL and token to start uploading a data file #' #' @param fileName string, Dataset file name. Required: TRUE. #' @param contentLength integer, Content length of file in bytes. Required: TRUE. #' @param lastModifiedDateUtc integer, Last modified date of file in milliseconds #' since epoch in UTC. Required: TRUE. #' @export kgl_datasets_upload_file <- function(fileName, contentLength, lastModifiedDateUtc) { contentLength <- file.size(fileName) lastModifiedDateUtc <- format(file.info(fileName)$mtime, format = "%Y-%m-%d %H-%M-%S", tz = "UTC") kgl_api_post(glue::glue( "datasets/upload/file/{contentLength}/{lastModifiedDateUtc}"), fileName = fileName) } #' DatasetsCreateVersion #' #' Create a new dataset version #' #' @param owner_dataset The owner and data set slug as it appears in the URL, #' i.e., \code{"mathan/fifa-2018-match-statistics"}. #' @param datasetNewVersionRequest Information for creating a new dataset version. #' Required: TRUE. #' @export kgl_datasets_create_version <- function(owner_dataset, datasetNewVersionRequest) { owner_dataset <- strsplit(owner_dataset, "/")[[1]] ownerSlug <- owner_dataset[1] datasetSlug <- owner_dataset[2] kgl_api_post(glue::glue( "datasets/create/version/{ownerSlug}/{datasetSlug}"), datasetNewVersionRequest = datasetNewVersionRequest) } #' DatasetsCreateNew #' #' Create a new dataset #' #' @param datasetNewRequest Information for creating a new dataset. Required: TRUE. #' @export kgl_datasets_create_new <- function(datasetNewRequest) { kgl_api_post("datasets/create/new", datasetNewRequest = datasetNewRequest) }
#' @examples #' #' ## Example data #' sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") #' sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli") #' #' ## Read spectral library #' sli <- readSLI(sliFile) #' head(sli) #' plot(sli[,1:2], col = "orange", type = "l") #' lines(sli[,c(1,3)], col = "green") #' #' ## Write to binary spectral library #' writeSLI(sli, path = sliTmpFile)
/man-roxygen/examples_SLI.R
no_license
bleutner/RStoolbox
R
false
false
406
r
#' @examples #' #' ## Example data #' sliFile <- system.file("external/vegSpec.sli", package="RStoolbox") #' sliTmpFile <- paste0(tempdir(),"/vegetationSpectra.sli") #' #' ## Read spectral library #' sli <- readSLI(sliFile) #' head(sli) #' plot(sli[,1:2], col = "orange", type = "l") #' lines(sli[,c(1,3)], col = "green") #' #' ## Write to binary spectral library #' writeSLI(sli, path = sliTmpFile)
/plugins/MacAU/UltrasonicMed/UltrasonicMed.r
permissive
themucha/airwindows
R
false
false
3,270
r
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/libraryDepth.R \name{estimateDepthFactors} \alias{estimateDepthFactors} \title{estimate library size correction factors} \usage{ estimateDepthFactors( obj, lib.factor = NULL, which.lib = "both", depth.estimator = "uq" ) } \arguments{ \item{obj}{the MpraObject} \item{lib.factor}{the factor associating each sample to a library. Can be a factor or the name of a column in the object's colAnnot. If not provided, the data is assumed to have been generated from a single library, and constant library depth is set.} \item{which.lib}{which library to compute the depth factors for. Options are "both" (default), "dna" or "rna". If the DNA and RNA counts have different library factors, this function should be called twice: once with "dna" and once with "rna"} \item{depth.estimator}{a character indicating which depth estimation to use, or a function to perform the estimation. Currently supported values are "uq" for upper quantile of non-zero values (default), "rle" for RLE (uses geometric mean, and is therefore not recommended if libraries have 0 counts), or "totsum" for total sum. For a function input: function should take a numeric vector and return a single numeric, and preferably handle NA values. See examples.} } \value{ the MpraObject with estimated values for sequencing depth factors } \description{ estimate library size correction factors } \note{ since in most MPRA experiments multiple barcodes exist within a single library, each column in the matrix is usually not a separate library. For this reason, it is recommended to supply this function with the appropriate partitioning of the data matrix columns into libraries, see lib.factor } \examples{ data <- simulateMPRA(tr = rep(2,10), da=NULL, nbatch=2, nbc=20) obj <- MpraObject(dnaCounts = data$obs.dna, rnaCounts = data$obs.rna, colAnnot = data$annot) obj <- estimateDepthFactors(obj, lib.factor = "batch", which.lib = "both") ## Upper quantile, using a higher quantile than 0.75: obj <- estimateDepthFactors(obj, lib.factor = "batch", which.lib = "both", depth.estimator = function(x) quantile(x, .95, na.rm=TRUE)) }
/man/estimateDepthFactors.Rd
no_license
YosefLab/MPRAnalyze
R
false
true
2,286
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/libraryDepth.R \name{estimateDepthFactors} \alias{estimateDepthFactors} \title{estimate library size correction factors} \usage{ estimateDepthFactors( obj, lib.factor = NULL, which.lib = "both", depth.estimator = "uq" ) } \arguments{ \item{obj}{the MpraObject} \item{lib.factor}{the factor associating each sample to a library. Can be a factor or the name of a column in the object's colAnnot. If not provided, the data is assumed to have been generated from a single library, and constant library depth is set.} \item{which.lib}{which library to compute the depth factors for. Options are "both" (default), "dna" or "rna". If the DNA and RNA counts have different library factors, this function should be called twice: once with "dna" and once with "rna"} \item{depth.estimator}{a character indicating which depth estimation to use, or a function to perform the estimation. Currently supported values are "uq" for upper quantile of non-zero values (default), "rle" for RLE (uses geometric mean, and is therefore not recommended if libraries have 0 counts), or "totsum" for total sum. For a function input: function should take a numeric vector and return a single numeric, and preferably handle NA values. See examples.} } \value{ the MpraObject with estimated values for sequencing depth factors } \description{ estimate library size correction factors } \note{ since in most MPRA experiments multiple barcodes exist within a single library, each column in the matrix is usually not a separate library. For this reason, it is recommended to supply this function with the appropriate partitioning of the data matrix columns into libraries, see lib.factor } \examples{ data <- simulateMPRA(tr = rep(2,10), da=NULL, nbatch=2, nbc=20) obj <- MpraObject(dnaCounts = data$obs.dna, rnaCounts = data$obs.rna, colAnnot = data$annot) obj <- estimateDepthFactors(obj, lib.factor = "batch", which.lib = "both") ## Upper quantile, using a higher quantile than 0.75: obj <- estimateDepthFactors(obj, lib.factor = "batch", which.lib = "both", depth.estimator = function(x) quantile(x, .95, na.rm=TRUE)) }
source("get_dataset.R") make_plot3 <- function() { # read dataset, prefiltering so only early february dates are imported # get_dataset downloads and unpacks, then returns name of the raw data file data <- read.csv(pipe(paste('grep -E \"Date|^[12]/2/2007\"', get_dataset())), sep = ";", na.strings = "?") data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S") data$Date <- as.Date(data$Time) png("plot3.png") with(data, { plot(Time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering" ) points(Time, Sub_metering_2, type = "l", col = "red") points(Time, Sub_metering_3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue")) }) dev.off() }
/plot3.R
no_license
cdcooley/ExData_Plotting1
R
false
false
889
r
source("get_dataset.R") make_plot3 <- function() { # read dataset, prefiltering so only early february dates are imported # get_dataset downloads and unpacks, then returns name of the raw data file data <- read.csv(pipe(paste('grep -E \"Date|^[12]/2/2007\"', get_dataset())), sep = ";", na.strings = "?") data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S") data$Date <- as.Date(data$Time) png("plot3.png") with(data, { plot(Time, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering" ) points(Time, Sub_metering_2, type = "l", col = "red") points(Time, Sub_metering_3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue")) }) dev.off() }
library('affy') library('gplots') library('lattice') library('biomaRt') convertEnsemblToEntrez1 = function (ensemblListFile, ensemblToEntrezFile) { mitocarta = read.table(ensemblListFile) ensemblHuman = useMart('ensembl', dataset='hsapiens_gene_ensembl') ensemblMouse = useMart('ensembl', dataset='mmusculus_gene_ensembl') mapmitocarta1 = getBM(attributes = c('ensembl_gene_id','hsapiens_homolog_ensembl_gene'), filters = c('ensembl_gene_id','with_homolog_hsap'), values = list( as.vector(mitocarta[,1]),TRUE ), mart = ensemblMouse) mapmitocarta2 = getBM(attributes = c('ensembl_gene_id','entrezgene'), filters = c('ensembl_gene_id','with_entrezgene'), values = list( as.vector(mapmitocarta1[,2]),TRUE ), mart = ensemblHuman) matchIdxs1 = match(intersect( as.vector(mapmitocarta1[,2]),as.vector(mapmitocarta2[,1]) ),mapmitocarta1[,2]) matchIdxs2 = match(intersect( as.vector(mapmitocarta1[,2]),as.vector(mapmitocarta2[,1]) ),mapmitocarta2[,1]) mapmitocarta3 = cbind(mapmitocarta1[matchIdxs1,1], mapmitocarta2[matchIdxs2,2]) write.table(mapmitocarta3, file=ensemblToEntrezFile, quote=FALSE, sep=",", row.names=FALSE) } convertEnsemblToEntrez2 = function (ensemblListFile, ensemblToEntrezFile, entrezFile) { mitocarta = read.table(ensemblListFile) ensemblMouse = useMart('ensembl', dataset='mmusculus_gene_ensembl') mapmitocarta = getBM(attributes = c('ensembl_gene_id','entrezgene'), filters = c('ensembl_gene_id','with_entrezgene'), values = list( as.vector(mitocarta[,1]),TRUE ), mart = ensemblMouse) write.table(mapmitocarta, file=ensemblToEntrezFile, quote=FALSE, sep=",", row.names=FALSE) write.table(mapmitocarta[,2], file=entrezFile, quote=FALSE, sep=",", row.names=FALSE, col.names=FALSE) } convertEnsemblToEntrez1("input/MitoCarta Mouse Ensembl List.txt","input/MitoCarta Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez1("input/MitoMiner Mouse Ensembl List.txt","input/MitoMiner Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez1("input/All Mouse Ensembl List.txt","input/All Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez2("input/MitoCarta Mouse Ensembl List.txt","input/MitoCarta Mouse Ensembl To Mouse Entrez.csv","input/MitoCarta Mouse Entrez List.txt") convertEnsemblToEntrez2("input/All Mouse Ensembl List.txt","input/All Mouse Ensembl To Mouse Entrez.csv","input/All Mouse Entrez List.txt")
/Bioinformatics/biomaRtCommandsOrigHD.R
no_license
yw595/AllUtils
R
false
false
2,346
r
library('affy') library('gplots') library('lattice') library('biomaRt') convertEnsemblToEntrez1 = function (ensemblListFile, ensemblToEntrezFile) { mitocarta = read.table(ensemblListFile) ensemblHuman = useMart('ensembl', dataset='hsapiens_gene_ensembl') ensemblMouse = useMart('ensembl', dataset='mmusculus_gene_ensembl') mapmitocarta1 = getBM(attributes = c('ensembl_gene_id','hsapiens_homolog_ensembl_gene'), filters = c('ensembl_gene_id','with_homolog_hsap'), values = list( as.vector(mitocarta[,1]),TRUE ), mart = ensemblMouse) mapmitocarta2 = getBM(attributes = c('ensembl_gene_id','entrezgene'), filters = c('ensembl_gene_id','with_entrezgene'), values = list( as.vector(mapmitocarta1[,2]),TRUE ), mart = ensemblHuman) matchIdxs1 = match(intersect( as.vector(mapmitocarta1[,2]),as.vector(mapmitocarta2[,1]) ),mapmitocarta1[,2]) matchIdxs2 = match(intersect( as.vector(mapmitocarta1[,2]),as.vector(mapmitocarta2[,1]) ),mapmitocarta2[,1]) mapmitocarta3 = cbind(mapmitocarta1[matchIdxs1,1], mapmitocarta2[matchIdxs2,2]) write.table(mapmitocarta3, file=ensemblToEntrezFile, quote=FALSE, sep=",", row.names=FALSE) } convertEnsemblToEntrez2 = function (ensemblListFile, ensemblToEntrezFile, entrezFile) { mitocarta = read.table(ensemblListFile) ensemblMouse = useMart('ensembl', dataset='mmusculus_gene_ensembl') mapmitocarta = getBM(attributes = c('ensembl_gene_id','entrezgene'), filters = c('ensembl_gene_id','with_entrezgene'), values = list( as.vector(mitocarta[,1]),TRUE ), mart = ensemblMouse) write.table(mapmitocarta, file=ensemblToEntrezFile, quote=FALSE, sep=",", row.names=FALSE) write.table(mapmitocarta[,2], file=entrezFile, quote=FALSE, sep=",", row.names=FALSE, col.names=FALSE) } convertEnsemblToEntrez1("input/MitoCarta Mouse Ensembl List.txt","input/MitoCarta Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez1("input/MitoMiner Mouse Ensembl List.txt","input/MitoMiner Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez1("input/All Mouse Ensembl List.txt","input/All Mouse Ensembl To Human Entrez.csv") convertEnsemblToEntrez2("input/MitoCarta Mouse Ensembl List.txt","input/MitoCarta Mouse Ensembl To Mouse Entrez.csv","input/MitoCarta Mouse Entrez List.txt") convertEnsemblToEntrez2("input/All Mouse Ensembl List.txt","input/All Mouse Ensembl To Mouse Entrez.csv","input/All Mouse Entrez List.txt")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/catboost.R \name{catboost.from_data_frame} \alias{catboost.from_data_frame} \title{catboost.from_data_frame} \usage{ catboost.from_data_frame(data, target = NULL, pairs = NULL, weight = NULL, query_id = NULL, pairs_weight = NULL, baseline = NULL, feature_names = NULL) } \arguments{ \item{data}{A data.frame with features. The following column types are supported: \itemize{ \item double \item factor. It is assumed that categorical features are given in this type of columns. A standard CatBoost processing procedure is applied to this type of columns: \describe{ \item{1.}{The values are converted to strings.} \item{2.}{The ConvertCatFeatureToFloat function is applied to the resulting string.} } } Default value: Required argument} \item{target}{The target vector.} \item{pairs}{A data.frame that contains the pairs descriptions. The shape should be Nx2, where N is the pairs' count. The first element of pair is the index of winner document in training set. The second element of pair is the index of loser document in training set.} \item{weight}{The weights of the target vector.} \item{query_id}{The query_id of the target vector.} \item{pairs_weight}{The weights of the pairs.} \item{baseline}{Vector of initial (raw) values of the target function for the object. Used in the calculation of final values of trees.} \item{feature_names}{A list of names for each feature in the dataset.} } \value{ catboost.Pool } \description{ Create a dataset from the given data.frame. Only numeric features are supported (their type should be double or factor). Categorical features must be converted to numerical first. For example, use \code{as.factor()} or the \code{colClasses} argument of the \code{read.table} method. The target type should be double. } \examples{ pool_path <- 'train_full3' cd_vector <- c('numeric', rep('numeric',2), rep('factor',7)) data <- read.table(pool_path, head = F, sep = "\\t", colClasses = cd_vector) target <- c(1) learn_size <- floor(0.8 * nrow(data)) learn_ind <- sample(nrow(data), learn_size) learn <- data[learn_ind,] test <- data[-learn_ind,] learn_pool <- catboost.from_data_frame(data = learn[,-target], target = learn[,target]) test_pool <- catboost.from_data_frame(data = test[,-target], target = test[,target]) }
/catboost/R-package/man/catboost.from_data_frame.Rd
permissive
exprmntr/test
R
false
true
2,386
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/catboost.R \name{catboost.from_data_frame} \alias{catboost.from_data_frame} \title{catboost.from_data_frame} \usage{ catboost.from_data_frame(data, target = NULL, pairs = NULL, weight = NULL, query_id = NULL, pairs_weight = NULL, baseline = NULL, feature_names = NULL) } \arguments{ \item{data}{A data.frame with features. The following column types are supported: \itemize{ \item double \item factor. It is assumed that categorical features are given in this type of columns. A standard CatBoost processing procedure is applied to this type of columns: \describe{ \item{1.}{The values are converted to strings.} \item{2.}{The ConvertCatFeatureToFloat function is applied to the resulting string.} } } Default value: Required argument} \item{target}{The target vector.} \item{pairs}{A data.frame that contains the pairs descriptions. The shape should be Nx2, where N is the pairs' count. The first element of pair is the index of winner document in training set. The second element of pair is the index of loser document in training set.} \item{weight}{The weights of the target vector.} \item{query_id}{The query_id of the target vector.} \item{pairs_weight}{The weights of the pairs.} \item{baseline}{Vector of initial (raw) values of the target function for the object. Used in the calculation of final values of trees.} \item{feature_names}{A list of names for each feature in the dataset.} } \value{ catboost.Pool } \description{ Create a dataset from the given data.frame. Only numeric features are supported (their type should be double or factor). Categorical features must be converted to numerical first. For example, use \code{as.factor()} or the \code{colClasses} argument of the \code{read.table} method. The target type should be double. } \examples{ pool_path <- 'train_full3' cd_vector <- c('numeric', rep('numeric',2), rep('factor',7)) data <- read.table(pool_path, head = F, sep = "\\t", colClasses = cd_vector) target <- c(1) learn_size <- floor(0.8 * nrow(data)) learn_ind <- sample(nrow(data), learn_size) learn <- data[learn_ind,] test <- data[-learn_ind,] learn_pool <- catboost.from_data_frame(data = learn[,-target], target = learn[,target]) test_pool <- catboost.from_data_frame(data = test[,-target], target = test[,target]) }
## Downloading the file, unzipping and saving it. Commented as reading data is asked in the assignment. ##fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" ##temp <- tempfile() ##download.file(fileUrl, destfile=temp) ##unzip(temp) ##unlink(temp) colnames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") feb_1_2_Data_temp <- read.table(pipe('grep "^[12]/2/2007" household_power_consumption.txt'),sep=";",col.names=colnames) ## if not Windows machine. Please use the following method. ## x <- read.table("household_power_consumption.txt",sep=";",header=TRUE) ## feb_1_2_Data_temp <- subset(x, Date %in% c("01/02/2007","02/02/2007")) feb_1_2_Data_temp <- transform(feb_1_2_Data_temp,DateTime = as.POSIXlt(paste(Date,Time, sep=" "),format="%d/%m/%Y %H:%M:%S")) feb_1_2_Data <- feb_1_2_Data_temp[,c(length(feb_1_2_Data_temp),3:(length(feb_1_2_Data_temp)-1))] png(filename="plot2.png",width=480,height=480) with(feb_1_2_Data,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")) dev.off()
/plot2.R
no_license
sidj-14/ExData_Plotting1
R
false
false
1,179
r
## Downloading the file, unzipping and saving it. Commented as reading data is asked in the assignment. ##fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" ##temp <- tempfile() ##download.file(fileUrl, destfile=temp) ##unzip(temp) ##unlink(temp) colnames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") feb_1_2_Data_temp <- read.table(pipe('grep "^[12]/2/2007" household_power_consumption.txt'),sep=";",col.names=colnames) ## if not Windows machine. Please use the following method. ## x <- read.table("household_power_consumption.txt",sep=";",header=TRUE) ## feb_1_2_Data_temp <- subset(x, Date %in% c("01/02/2007","02/02/2007")) feb_1_2_Data_temp <- transform(feb_1_2_Data_temp,DateTime = as.POSIXlt(paste(Date,Time, sep=" "),format="%d/%m/%Y %H:%M:%S")) feb_1_2_Data <- feb_1_2_Data_temp[,c(length(feb_1_2_Data_temp),3:(length(feb_1_2_Data_temp)-1))] png(filename="plot2.png",width=480,height=480) with(feb_1_2_Data,plot(DateTime,Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")) dev.off()
load("C:/Users/Kevin Caref/Google Drive/RScripts/Functions/unineuralhist.rFunc") load("C:/Users/Kevin Caref/Google Drive/RScripts/Functions/KC.sigbins_inhib_uni.Rfunc") side = "ipsi" prefiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 0 , endt = 2000, binw = 50, psthmin = 5, psthmax = 1, event = 4, cueexonly = T, side) cueexidx = prefiring[[2]] prefiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 0 , endt = 2000, binw = 50, psthmin = 5, psthmax = 1, event = 1, cueexonly = F, side) postfiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 2720 , endt = 4720, binw = 50, psthmin = 5, psthmax = 1, event = 1, cueexonly = F, side) #cueexidx = prefiring[[2]] #prefiring = prefiring[[1]] cuein = KC.sigbins_inhib_uni.Rfunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt=0, endt=2000, event=1, side) cueinidx = which(colSums(cuein) > 0) prebaseline = sapply(seq(1, ncol(prefiring)), function(x) mean(as.numeric(prefiring[1:100,x]))) postbaseline = sapply(seq(1, ncol(postfiring)), function(x) mean(as.numeric(postfiring[1:100,x]))) #peaksort = order(sapply(seq(1,ncol(prefiring)), function(x) mean(prefiring[c(51:56),x]))) #cueencoding = peaksort[21:53] otheridx = which(seq(1:length(prebaseline)) %in% c(cueexidx, cueinidx)== F) unipre = prebaseline unipost = postbaseline bipre = prebaseline bipost = postbaseline bl.lm = lm(c(unipost, bipost) ~ c(unipre, bipre)) confint(bl.lm, level = .95) #bl.lm = lm(postbaseline[-16]~prebaseline[-16]) #remove contra outlier #confint(bl.lm, level = .95) cueex.lm = lm(postbaseline[cueexidx]~prebaseline[cueexidx]) confint(cueex.lm, level = .95) cuein.lm = lm(postbaseline[cueinidx]~prebaseline[cueinidx]) confint(cuein.lm, level = .95) other.lm = lm(postbaseline[otheridx]~prebaseline[otheridx]) confint(other.lm, level = .95) #remove contra outlier #cueinidx = cueinidx[-1] par(pty = "s") plot.new() plot.window(xlim = c(0,10), ylim = c(0,10)) #uni points points(prebaseline[otheridx], postbaseline[otheridx], pch = 19, cex = 2, col = "black") points(prebaseline[cueexidx], postbaseline[cueexidx], pch = 19, cex = 2, col = "firebrick1") points(prebaseline[cueinidx], postbaseline[cueinidx], pch = 19, cex = 2, col = "blue") #bi points points(prebaseline[otheridx], postbaseline[otheridx], pch = 18, cex = 2, col = "black") points(prebaseline[cueexidx], postbaseline[cueexidx], pch = 18, cex = 2, col = "firebrick1") points(prebaseline[cueinidx], postbaseline[cueinidx], pch = 18, cex = 2, col = "blue") #abline(a = 0, b = 1) segments(0,0,10,10, lwd = 2,col = "black", lty=2) #abline(cue.lm, col = "gray", lwd = 2) #abline(other.lm, col = "black", lwd = 2) abline(bl.lm, col = "gray", lwd = 2) axis(1, at = seq(0,10, 2), cex.axis = 1.75) axis(2, at = seq(0,10, 2), cex.axis = 1.75, las = 2, tcl = -.8) mtext("Pre-injection Baseline (Hz)", side = 1, cex = 1.75, line=3) mtext("Post-injection Baseline (Hz)", side = 2, cex = 1.75, line=3) mtext("Cue-excited", side = 3, cex = 1.5, line = -2, at = 2, col = "firebrick1") mtext("Cue-inhibited", side = 3, cex = 1.5, line = -3, at = 2, col = "blue") mtext("Other", side = 3, cex = 1.5, line = -4, at = 2, col = "black") mtext("Red = unity line, gray = regression for all points", side = 3, cex = 2, line=4)
/scripts/baseline firing comparison_pooled uni and bi.R
permissive
kcaref/neural-analysis
R
false
false
3,617
r
load("C:/Users/Kevin Caref/Google Drive/RScripts/Functions/unineuralhist.rFunc") load("C:/Users/Kevin Caref/Google Drive/RScripts/Functions/KC.sigbins_inhib_uni.Rfunc") side = "ipsi" prefiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 0 , endt = 2000, binw = 50, psthmin = 5, psthmax = 1, event = 4, cueexonly = T, side) cueexidx = prefiring[[2]] prefiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 0 , endt = 2000, binw = 50, psthmin = 5, psthmax = 1, event = 1, cueexonly = F, side) postfiring = unineuralhist.rFunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt = 2720 , endt = 4720, binw = 50, psthmin = 5, psthmax = 1, event = 1, cueexonly = F, side) #cueexidx = prefiring[[2]] #prefiring = prefiring[[1]] cuein = KC.sigbins_inhib_uni.Rfunc(path = "C:/Users/Kevin Caref/Google Drive/Opioid Pilot/Neural Data/sorted/unilateral ctap/", startt=0, endt=2000, event=1, side) cueinidx = which(colSums(cuein) > 0) prebaseline = sapply(seq(1, ncol(prefiring)), function(x) mean(as.numeric(prefiring[1:100,x]))) postbaseline = sapply(seq(1, ncol(postfiring)), function(x) mean(as.numeric(postfiring[1:100,x]))) #peaksort = order(sapply(seq(1,ncol(prefiring)), function(x) mean(prefiring[c(51:56),x]))) #cueencoding = peaksort[21:53] otheridx = which(seq(1:length(prebaseline)) %in% c(cueexidx, cueinidx)== F) unipre = prebaseline unipost = postbaseline bipre = prebaseline bipost = postbaseline bl.lm = lm(c(unipost, bipost) ~ c(unipre, bipre)) confint(bl.lm, level = .95) #bl.lm = lm(postbaseline[-16]~prebaseline[-16]) #remove contra outlier #confint(bl.lm, level = .95) cueex.lm = lm(postbaseline[cueexidx]~prebaseline[cueexidx]) confint(cueex.lm, level = .95) cuein.lm = lm(postbaseline[cueinidx]~prebaseline[cueinidx]) confint(cuein.lm, level = .95) other.lm = lm(postbaseline[otheridx]~prebaseline[otheridx]) confint(other.lm, level = .95) #remove contra outlier #cueinidx = cueinidx[-1] par(pty = "s") plot.new() plot.window(xlim = c(0,10), ylim = c(0,10)) #uni points points(prebaseline[otheridx], postbaseline[otheridx], pch = 19, cex = 2, col = "black") points(prebaseline[cueexidx], postbaseline[cueexidx], pch = 19, cex = 2, col = "firebrick1") points(prebaseline[cueinidx], postbaseline[cueinidx], pch = 19, cex = 2, col = "blue") #bi points points(prebaseline[otheridx], postbaseline[otheridx], pch = 18, cex = 2, col = "black") points(prebaseline[cueexidx], postbaseline[cueexidx], pch = 18, cex = 2, col = "firebrick1") points(prebaseline[cueinidx], postbaseline[cueinidx], pch = 18, cex = 2, col = "blue") #abline(a = 0, b = 1) segments(0,0,10,10, lwd = 2,col = "black", lty=2) #abline(cue.lm, col = "gray", lwd = 2) #abline(other.lm, col = "black", lwd = 2) abline(bl.lm, col = "gray", lwd = 2) axis(1, at = seq(0,10, 2), cex.axis = 1.75) axis(2, at = seq(0,10, 2), cex.axis = 1.75, las = 2, tcl = -.8) mtext("Pre-injection Baseline (Hz)", side = 1, cex = 1.75, line=3) mtext("Post-injection Baseline (Hz)", side = 2, cex = 1.75, line=3) mtext("Cue-excited", side = 3, cex = 1.5, line = -2, at = 2, col = "firebrick1") mtext("Cue-inhibited", side = 3, cex = 1.5, line = -3, at = 2, col = "blue") mtext("Other", side = 3, cex = 1.5, line = -4, at = 2, col = "black") mtext("Red = unity line, gray = regression for all points", side = 3, cex = 2, line=4)
## The functions herein create a special "matrix" and calculate its inverse ## The first function, makeCacheMatrix, creates a special "matrix", which is really a list containing a function to #set the value of the matrix #get the value of the matrix #set the inverse of the matrix #get the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverseMatrix <- function(InvMatrix) m <<- solve getInverseMatrix <- function() m list(set = set, get = get, setInverseMatrix = setInverseMatrix, getInverseMatrix = getInverseMatrix) } ##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. #If the inverse has already been calculated (and the matrix has not changed), #then the cachesolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { m <- x$getInverseMatrix() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- mean(data, ...) x$setInverseMatrix(m) m }
/cachematrix.R
no_license
shumakerro/ProgrammingAssignment2
R
false
false
1,278
r
## The functions herein create a special "matrix" and calculate its inverse ## The first function, makeCacheMatrix, creates a special "matrix", which is really a list containing a function to #set the value of the matrix #get the value of the matrix #set the inverse of the matrix #get the inverse of the matrix makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInverseMatrix <- function(InvMatrix) m <<- solve getInverseMatrix <- function() m list(set = set, get = get, setInverseMatrix = setInverseMatrix, getInverseMatrix = getInverseMatrix) } ##cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. #If the inverse has already been calculated (and the matrix has not changed), #then the cachesolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { m <- x$getInverseMatrix() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- mean(data, ...) x$setInverseMatrix(m) m }
## sQTL. # Maps splice eQTLs. sQTL <- function () { ### Pre-processing ## Load data. # Set working directory. #setwd("~/Dropbox/Erik\ Schutte\ Internship\ 2016/") cat("Starting splice QTL analysis..\n") # Load in the data files. trans.exp.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/gsTcell_TranscriptExpression.csv" cat("Loading gene bed file..\n") gene.bed.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genes.bed" ## Order data. # Ordered genotype file should be compressed and indexed if not done before. cat("Indexing genotype file..\n") if ( file.exists("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") ) { load("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") } else{ # Load the genotype Information without the samples, they are generated in this script so we are going to do it on the fly. genoTypeInformation.unfinished = read.table("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeInformation.tsv",header=T,sep="\t") # There are less snps in the positions file than in my genotype file, find out which are not there and exclude them. snps_present = which(genoTypeInformation.unfinished[,4] %in% rownames(snps.t)) # Create a tmp variable to store the new matrix in. tmp = genoTypeInformation.unfinished[snps_present,] # Save an order for the chromosomes. chrOrder<-c(paste(1:22,sep=""),"chrX") # Order the chromosomes factors on the new order. tmp[,1] <-factor(tmp[,1], levels=chrOrder) # Order the data on the new chromosome order. tmp <- tmp[order(tmp[,1],tmp[,2]),] # We can just cbind here, because the rs from snps.t and tmp are orderd the same. genoTypeInf <- cbind(tmp, snps.t) # Remove the Missing ones, or rather replace them with -1. genoTypeInf[is.na(genoTypeInf)] <- -1 # Change the factor level so we don't get an NA as chr. levels(genoTypeInf[,1])[23] <- 23 # Change the X chromosome to 23. genoTypeInf[is.na(genoTypeInf), 1] <- 23 #colnames(genoTypeInf) <- gsub("\"","",colnames(genoTypeInf)) cat("Loading genotype file..\n") # Load the genotype file, we have to do this, because sQTLseeker's logic only accepts a file path instead of a file/matrix or anythings else. genotype.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeinf.tsv" # Write the now finished genotype information matrix to a file, since sqtlseeker only accepts file input. write.table( genoTypeInf, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeinf.tsv", sep="\t", row.names = F, quote = F) # Index the genotype file. genotype.indexed.f = index.genotype(genotype.f) # Save an image of the genotype file. save.image(file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") save(genotype.indexed.f, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") } ## Read data. # Import transcript expression, cleaned. if ( file.exists("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") ) { load("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") } else{ cat("Reading transcript expression..\n") te.df = read.table(trans.exp.f, as.is=T, header=T, sep="\t") cat("Prepare transcript expression..\n") tre.df = prepare.trans.exp(te.df, min.transcript.exp = 0, min.gene.exp = 0, min.dispersion = 0, verbose = T ) save.image(file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") save(tre.df, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") } ### Test for gene/SNP associations ## Tests. # Run test with transcript expression and genotype file and gene coordinates. cat("Read gene bed file...\n") gene.bed = read.table(gene.bed.f, as.is=T, sep="\t") # Set column names. cat("Set column gene bed file..\n") colnames(gene.bed) = c("chr","start","end","geneId") # Change col names format for tre.df to _ instead of . colnames(tre.df) <- sub("\\.","_",colnames(tre.df)) colnames(tre.df) <- sub("\\.","_",colnames(tre.df)) # Filter Tre.df for samples that don't exist in the genotype file. ### DYNAMIC SQTL ### # Save index for each time point. #sort(colnames(tre.df[3:length(colnames(tre.df))])) timepoints = list(t0 = grep("_t0",colnames(tre.df)), t10 = grep("_t10",colnames(tre.df)), t30 = grep("_t30",colnames(tre.df)), t180 = grep("_t180",colnames(tre.df))) times <- c("t0","t10","t30","t180") iteration = 1 for ( timepoint in timepoints) { cat("Do sqtlseeker dynamic..\n") # For every timepoint create a new data frame, set the first two columns to trId and geneId. tre.df.tp <- tre.df[,1:2] # Add the timepoints according to a grepped index to the new table. tre.df.tp <- cbind(tre.df.tp, tre.df[,timepoint]) # Change the column names, so that they match with the sample names from the genotype file. colnames(tre.df.tp) <- sub("batch[0-9]+_T","T",colnames(tre.df.tp)) colnames(tre.df.tp) <- sub("_t[0-9]+","",colnames(tre.df.tp)) # Filter the samples from the transcript expression file that don't exist in the genotype file. noGenoTypeSampleIndex <- which(!colnames(tre.df.tp) %in% colnames(snps.t)) tre.df.tp <- tre.df.tp[,-noGenoTypeSampleIndex[3:length(noGenoTypeSampleIndex)]] # There is apperantly a sample in batch 2 that is also in batch 4. That only comes to the front if I execute this # twice, I don't know why this happens but this solves this. noGenoTypeSampleIndex <- which(!colnames(tre.df.tp) %in% colnames(snps.t)) tre.df.tp <- tre.df.tp[,-noGenoTypeSampleIndex[3:length(noGenoTypeSampleIndex)]] # Start sqtl.seeker. res.df.dynamic = sqtl.seeker.modified(tre.df.tp, genotype.indexed.f, gene.bed, svQTL=F, genic.window=250000, verbose=F) cat("write to table..\n") # Write the output to a tsv file. write.table(res.df.dynamic, file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-dynamic-",times[iteration],"all.tsv",sep=""), quote=FALSE, row.names=FALSE, sep="\t") # Save an image so we can work with the data later if we want to. save.image(file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/res.df.dynamic-",times[iteration],"-.Rdata",sep="")) save(res.df.dynamic, file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/res.df.dynamic-",times[iteration],"-.Rdata",sep="")) # Execute sqtls, a function that checks the qvalue of ech sqtl. sqtls.df = sqtls(res.df = res.df.dynamic, FDR = 1, out.pdf=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-dyanmic-",times[iteration],"-FDR1.pdf", sep="") ) cat("\nProcessed: ",times[iteration]) # Increase iteration time, mainly for name. iteration <- iteration + 1 } ### STATIC SQTL ### tre.df.static <- tre.df cat("Do sqtlseeker static..\n") colnames(tre.df.static) <- sub("batch[0-9]+_T","T",colnames(tre.df.static)) colnames(tre.df.static) <- sub("_t[0-9]+","",colnames(tre.df.static)) res.df.static = sqtl.seeker.modified(tre.df.static, genotype.indexed.f, gene.bed, svQTL=F, verbose=F, genic.window=250000) cat("write to table..\n") write.table(res.df.static, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-static-all.tsv", quote=FALSE, row.names=FALSE, sep="\t") sqtls.df = sqtls(res.df.static, FDR = 1, out.pdf="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-static-FDR01.pdf") }
/mapping/archive/spliceQTL-Mapping-calculon.R
no_license
ErikSchutte/QTL-Mapping
R
false
false
8,367
r
## sQTL. # Maps splice eQTLs. sQTL <- function () { ### Pre-processing ## Load data. # Set working directory. #setwd("~/Dropbox/Erik\ Schutte\ Internship\ 2016/") cat("Starting splice QTL analysis..\n") # Load in the data files. trans.exp.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/gsTcell_TranscriptExpression.csv" cat("Loading gene bed file..\n") gene.bed.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genes.bed" ## Order data. # Ordered genotype file should be compressed and indexed if not done before. cat("Indexing genotype file..\n") if ( file.exists("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") ) { load("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") } else{ # Load the genotype Information without the samples, they are generated in this script so we are going to do it on the fly. genoTypeInformation.unfinished = read.table("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeInformation.tsv",header=T,sep="\t") # There are less snps in the positions file than in my genotype file, find out which are not there and exclude them. snps_present = which(genoTypeInformation.unfinished[,4] %in% rownames(snps.t)) # Create a tmp variable to store the new matrix in. tmp = genoTypeInformation.unfinished[snps_present,] # Save an order for the chromosomes. chrOrder<-c(paste(1:22,sep=""),"chrX") # Order the chromosomes factors on the new order. tmp[,1] <-factor(tmp[,1], levels=chrOrder) # Order the data on the new chromosome order. tmp <- tmp[order(tmp[,1],tmp[,2]),] # We can just cbind here, because the rs from snps.t and tmp are orderd the same. genoTypeInf <- cbind(tmp, snps.t) # Remove the Missing ones, or rather replace them with -1. genoTypeInf[is.na(genoTypeInf)] <- -1 # Change the factor level so we don't get an NA as chr. levels(genoTypeInf[,1])[23] <- 23 # Change the X chromosome to 23. genoTypeInf[is.na(genoTypeInf), 1] <- 23 #colnames(genoTypeInf) <- gsub("\"","",colnames(genoTypeInf)) cat("Loading genotype file..\n") # Load the genotype file, we have to do this, because sQTLseeker's logic only accepts a file path instead of a file/matrix or anythings else. genotype.f = "/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeinf.tsv" # Write the now finished genotype information matrix to a file, since sqtlseeker only accepts file input. write.table( genoTypeInf, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genoTypeinf.tsv", sep="\t", row.names = F, quote = F) # Index the genotype file. genotype.indexed.f = index.genotype(genotype.f) # Save an image of the genotype file. save.image(file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") save(genotype.indexed.f, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/genotype.indexed.f.Rdata") } ## Read data. # Import transcript expression, cleaned. if ( file.exists("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") ) { load("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") } else{ cat("Reading transcript expression..\n") te.df = read.table(trans.exp.f, as.is=T, header=T, sep="\t") cat("Prepare transcript expression..\n") tre.df = prepare.trans.exp(te.df, min.transcript.exp = 0, min.gene.exp = 0, min.dispersion = 0, verbose = T ) save.image(file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") save(tre.df, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/data/tre.df.Rdata") } ### Test for gene/SNP associations ## Tests. # Run test with transcript expression and genotype file and gene coordinates. cat("Read gene bed file...\n") gene.bed = read.table(gene.bed.f, as.is=T, sep="\t") # Set column names. cat("Set column gene bed file..\n") colnames(gene.bed) = c("chr","start","end","geneId") # Change col names format for tre.df to _ instead of . colnames(tre.df) <- sub("\\.","_",colnames(tre.df)) colnames(tre.df) <- sub("\\.","_",colnames(tre.df)) # Filter Tre.df for samples that don't exist in the genotype file. ### DYNAMIC SQTL ### # Save index for each time point. #sort(colnames(tre.df[3:length(colnames(tre.df))])) timepoints = list(t0 = grep("_t0",colnames(tre.df)), t10 = grep("_t10",colnames(tre.df)), t30 = grep("_t30",colnames(tre.df)), t180 = grep("_t180",colnames(tre.df))) times <- c("t0","t10","t30","t180") iteration = 1 for ( timepoint in timepoints) { cat("Do sqtlseeker dynamic..\n") # For every timepoint create a new data frame, set the first two columns to trId and geneId. tre.df.tp <- tre.df[,1:2] # Add the timepoints according to a grepped index to the new table. tre.df.tp <- cbind(tre.df.tp, tre.df[,timepoint]) # Change the column names, so that they match with the sample names from the genotype file. colnames(tre.df.tp) <- sub("batch[0-9]+_T","T",colnames(tre.df.tp)) colnames(tre.df.tp) <- sub("_t[0-9]+","",colnames(tre.df.tp)) # Filter the samples from the transcript expression file that don't exist in the genotype file. noGenoTypeSampleIndex <- which(!colnames(tre.df.tp) %in% colnames(snps.t)) tre.df.tp <- tre.df.tp[,-noGenoTypeSampleIndex[3:length(noGenoTypeSampleIndex)]] # There is apperantly a sample in batch 2 that is also in batch 4. That only comes to the front if I execute this # twice, I don't know why this happens but this solves this. noGenoTypeSampleIndex <- which(!colnames(tre.df.tp) %in% colnames(snps.t)) tre.df.tp <- tre.df.tp[,-noGenoTypeSampleIndex[3:length(noGenoTypeSampleIndex)]] # Start sqtl.seeker. res.df.dynamic = sqtl.seeker.modified(tre.df.tp, genotype.indexed.f, gene.bed, svQTL=F, genic.window=250000, verbose=F) cat("write to table..\n") # Write the output to a tsv file. write.table(res.df.dynamic, file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-dynamic-",times[iteration],"all.tsv",sep=""), quote=FALSE, row.names=FALSE, sep="\t") # Save an image so we can work with the data later if we want to. save.image(file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/res.df.dynamic-",times[iteration],"-.Rdata",sep="")) save(res.df.dynamic, file=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/res.df.dynamic-",times[iteration],"-.Rdata",sep="")) # Execute sqtls, a function that checks the qvalue of ech sqtl. sqtls.df = sqtls(res.df = res.df.dynamic, FDR = 1, out.pdf=paste("/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-dyanmic-",times[iteration],"-FDR1.pdf", sep="") ) cat("\nProcessed: ",times[iteration]) # Increase iteration time, mainly for name. iteration <- iteration + 1 } ### STATIC SQTL ### tre.df.static <- tre.df cat("Do sqtlseeker static..\n") colnames(tre.df.static) <- sub("batch[0-9]+_T","T",colnames(tre.df.static)) colnames(tre.df.static) <- sub("_t[0-9]+","",colnames(tre.df.static)) res.df.static = sqtl.seeker.modified(tre.df.static, genotype.indexed.f, gene.bed, svQTL=F, verbose=F, genic.window=250000) cat("write to table..\n") write.table(res.df.static, file="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-static-all.tsv", quote=FALSE, row.names=FALSE, sep="\t") sqtls.df = sqtls(res.df.static, FDR = 1, out.pdf="/groups/umcg-wijmenga/tmp04/umcg-eschutte/projects/gluten_specific_Tcells/R/Splice/sQTLs-static-FDR01.pdf") }
#' \pkg{rdhs} DHS database through R #' #' Provides a client for (1) querying the DHS API for survey indicators #' and metadata, (2) identifying surveys and datasets for analysis, #' (3) downloading survey datasets from the DHS website, (4) loading #' datasets and associate metadata into R, and (5) extracting variables #' and combining datasets for pooled analysis. #' #' @docType package #' @name rdhs #' #' @importFrom stats setNames #' @importFrom utils tail type.convert packageVersion unzip str capture.output #' @importFrom rappdirs user_cache_dir #' @importFrom R6 R6Class #' @importFrom storr storr_rds #' "_PACKAGE" globalVariables(c("x"))
/R/rdhs.R
no_license
LucyMcGowan/rdhs
R
false
false
652
r
#' \pkg{rdhs} DHS database through R #' #' Provides a client for (1) querying the DHS API for survey indicators #' and metadata, (2) identifying surveys and datasets for analysis, #' (3) downloading survey datasets from the DHS website, (4) loading #' datasets and associate metadata into R, and (5) extracting variables #' and combining datasets for pooled analysis. #' #' @docType package #' @name rdhs #' #' @importFrom stats setNames #' @importFrom utils tail type.convert packageVersion unzip str capture.output #' @importFrom rappdirs user_cache_dir #' @importFrom R6 R6Class #' @importFrom storr storr_rds #' "_PACKAGE" globalVariables(c("x"))
#' Gower factor Kernel R6 class #' #' For a factor that has been converted to its indices. #' Each factor will need a separate kernel. #' #' #' @docType class #' @importFrom R6 R6Class #' @export #' @useDynLib GauPro, .registration = TRUE #' @importFrom Rcpp evalCpp #' @importFrom stats optim # @keywords data, kriging, Gaussian process, regression #' @return Object of \code{\link{R6Class}} with methods for fitting GP model. #' @format \code{\link{R6Class}} object. #' @field p Parameter for correlation #' @field p_est Should p be estimated? #' @field p_lower Lower bound of p #' @field p_upper Upper bound of p #' @field s2 variance #' @field s2_est Is s2 estimated? #' @field logs2 Log of s2 #' @field logs2_lower Lower bound of logs2 #' @field logs2_upper Upper bound of logs2 #' @field xindex Index of the factor (which column of X) #' @field nlevels Number of levels for the factor #' @field offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. #' @examples #' kk <- GowerFactorKernel$new(D=1, nlevels=5, xindex=1, p=.2) #' kmat <- outer(1:5, 1:5, Vectorize(kk$k)) #' kmat #' kk$plot() #' #' #' # 2D, Gaussian on 1D, index on 2nd dim #' library(dplyr) #' n <- 20 #' X <- cbind(matrix(runif(n,2,6), ncol=1), #' matrix(sample(1:2, size=n, replace=TRUE), ncol=1)) #' X <- rbind(X, c(3.3,3)) #' n <- nrow(X) #' Z <- X[,1] - (X[,2]-1.8)^2 + rnorm(n,0,.1) #' tibble(X=X, Z) %>% arrange(X,Z) #' k2a <- IgnoreIndsKernel$new(k=Gaussian$new(D=1), ignoreinds = 2) #' k2b <- GowerFactorKernel$new(D=2, nlevels=3, xind=2) #' k2 <- k2a * k2b #' k2b$p_upper <- .65*k2b$p_upper #' gp <- GauPro_kernel_model$new(X=X, Z=Z, kernel = k2, verbose = 5, #' nug.min=1e-2, restarts=0) #' gp$kernel$k1$kernel$beta #' gp$kernel$k2$p #' gp$kernel$k(x = gp$X) #' tibble(X=X, Z=Z, pred=gp$predict(X)) %>% arrange(X, Z) #' tibble(X=X[,2], Z) %>% group_by(X) %>% summarize(n=n(), mean(Z)) #' curve(gp$pred(cbind(matrix(x,ncol=1),1)),2,6, ylim=c(min(Z), max(Z))) #' points(X[X[,2]==1,1], Z[X[,2]==1]) #' curve(gp$pred(cbind(matrix(x,ncol=1),2)), add=TRUE, col=2) #' points(X[X[,2]==2,1], Z[X[,2]==2], col=2) #' curve(gp$pred(cbind(matrix(x,ncol=1),3)), add=TRUE, col=3) #' points(X[X[,2]==3,1], Z[X[,2]==3], col=3) #' legend(legend=1:3, fill=1:3, x="topleft") #' # See which points affect (5.5, 3 themost) #' data.frame(X, cov=gp$kernel$k(X, c(5.5,3))) %>% arrange(-cov) #' plot(k2b) #' #' # GowerFactorKernel ---- GowerFactorKernel <- R6::R6Class( classname = "GauPro_kernel_GowerFactorKernel", inherit = GauPro_kernel, public = list( p = NULL, # vector of correlations p_est = NULL, p_lower = NULL, p_upper = NULL, s2 = NULL, # variance coefficient to scale correlation matrix to covariance s2_est = NULL, logs2 = NULL, logs2_lower = NULL, logs2_upper = NULL, nlevels = NULL, xindex = NULL, offdiagequal = NULL, #' @description Initialize kernel object #' @param s2 Initial variance #' @param D Number of input dimensions of data #' @param p_lower Lower bound for p #' @param p_upper Upper bound for p #' @param p_est Should p be estimated? #' @param p Vector of correlations #' @param s2_lower Lower bound for s2 #' @param s2_upper Upper bound for s2 #' @param s2_est Should s2 be estimated? #' @param xindex Index of the factor (which column of X) #' @param nlevels Number of levels for the factor #' @param useC Should C code used? Not implemented for FactorKernel yet. #' @param offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. initialize = function(s2=1, D, nlevels, xindex, p_lower=0, p_upper=.9, p_est=TRUE, s2_lower=1e-8, s2_upper=1e8, s2_est=TRUE, p, useC=TRUE, offdiagequal=1-1e-6 ) { # Must give in D if (missing(D)) {stop("Must give Index kernel D")} self$D <- D self$nlevels <- nlevels self$xindex <- xindex if (missing(p)) { p <- 0 } else { stopifnot(is.numeric(p), length(p) == 1, p>=0, p<=1) } self$p <- p stopifnot(is.numeric(p_lower), length(p_lower) == 1, p_lower>=0, p_lower<=1) self$p_lower <- p_lower stopifnot(is.numeric(p_upper), length(p_upper) == 1, p_upper>=0, p_upper<=1, p_lower <= p_upper) # Don't give upper 1 since it will give optimization error self$p_upper <-p_upper self$p_est <- p_est self$s2 <- s2 self$logs2 <- log(s2, 10) self$logs2_lower <- log(s2_lower, 10) self$logs2_upper <- log(s2_upper, 10) self$s2_est <- s2_est self$useC <- useC self$offdiagequal <- offdiagequal }, #' @description Calculate covariance between two points #' @param x vector. #' @param y vector, optional. If excluded, find correlation #' of x with itself. #' @param p Correlation parameters. #' @param s2 Variance parameter. #' @param params parameters to use instead of beta and s2. k = function(x, y=NULL, p=self$p, s2=self$s2, params=NULL) { if (!is.null(params)) { lenparams <- length(params) if (self$p_est) { p <- params[1] } else { p <- self$p } # if (self$alpha_est) { # logalpha <- params[1 + as.integer(self$p_est) * self$p_length] # } else { # logalpha <- self$logalpha # } if (self$s2_est) { logs2 <- params[lenparams] } else { logs2 <- self$logs2 } s2 <- 10^logs2 } else { if (is.null(p)) {p <- self$p} if (is.null(s2)) {s2 <- self$s2} } if (is.null(y)) { if (is.matrix(x)) { val <- outer(1:nrow(x), 1:nrow(x), Vectorize(function(i,j){ self$kone(x[i,],x[j,],p=p, s2=s2, isdiag=i==j) })) return(val) } else { return(s2 * 1) } } if (is.matrix(x) & is.matrix(y)) { outer(1:nrow(x), 1:nrow(y), Vectorize(function(i,j){self$kone(x[i,],y[j,],p=p, s2=s2)})) } else if (is.matrix(x) & !is.matrix(y)) { apply(x, 1, function(xx) {self$kone(xx, y, p=p, s2=s2)}) } else if (is.matrix(y)) { apply(y, 1, function(yy) {self$kone(yy, x, p=p, s2=s2)}) } else { self$kone(x, y, p=p, s2=s2) } }, #' @description Find covariance of two points #' @param x vector #' @param y vector #' @param p correlation parameters on regular scale #' @param s2 Variance parameter #' @param isdiag Is this on the diagonal of the covariance? #' @param offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. kone = function(x, y, p, s2, isdiag=TRUE, offdiagequal=self$offdiagequal) { x <- x[self$xindex] y <- y[self$xindex] stopifnot(x>=1, y>=1, x<=self$nlevels, y<=self$nlevels, abs(x-as.integer(x)) < 1e-8, abs(y-as.integer(y)) < 1e-8) if (x==y) { # out <- s2 * 1 # Trying to avoid singular values if (isdiag) { out <- s2 * 1 } else { out <- s2 * offdiagequal } } else { out <- s2 * p } if (any(is.nan(out))) {stop("Error #9228878341")} out }, #' @description Derivative of covariance with respect to parameters #' @param params Kernel parameters #' @param X matrix of points in rows #' @param C_nonug Covariance without nugget added to diagonal #' @param C Covariance with nugget #' @param nug Value of nugget dC_dparams = function(params=NULL, X, C_nonug, C, nug) { # stop("not implemented, kernel index, dC_dp") n <- nrow(X) lenparams <- length(params) if (lenparams > 0) { if (self$p_est) { p <- params[1] } else { p <- self$p } if (self$s2_est) { logs2 <- params[lenparams] } else { logs2 <- self$logs2 } } else { p <- self$p logs2 <- self$logs2 } log10 <- log(10) s2 <- 10 ^ logs2 if (missing(C_nonug)) { # Assume C missing too, must have nug C_nonug <- self$k(x=X, params=params) C <- C_nonug + diag(nug*s2, nrow(C_nonug)) } lenparams_D <- as.integer(self$p_est + self$s2_est) dC_dparams <- array(dim=c(lenparams_D, n, n), data=0) if (self$s2_est) { dC_dparams[lenparams_D,,] <- C * log10 } if (self$p_est) { for (k in 1:length(p)) { # k is index of parameter for (i in seq(1, n-1, 1)) { xx <- X[i, self$xindex] for (j in seq(i+1, n, 1)) { yy <- X[j, self$xindex] if (xx == yy) { # Corr is just 1, parameter has no effect } else { dC_dparams[k,i,j] <- 1 * s2 dC_dparams[k,j,i] <- dC_dparams[k,i,j] } # # r2 <- sum(p * (X[i,]-X[j,])^2) # dC_dparams[k,i,j] <- -C_nonug[i,j] * alpha * # dC_dparams[k,j,i] <- dC_dparams[k,i,j] } } for (i in seq(1, n, 1)) { # Get diagonal set to zero dC_dparams[k,i,i] <- 0 } } } return(dC_dparams) }, #' @description Calculate covariance matrix and its derivative #' with respect to parameters #' @param params Kernel parameters #' @param X matrix of points in rows #' @param nug Value of nugget C_dC_dparams = function(params=NULL, X, nug) { s2 <- self$s2_from_params(params) C_nonug <- self$k(x=X, params=params) C <- C_nonug + diag(s2*nug, nrow(X)) dC_dparams <- self$dC_dparams(params=params, X=X, C_nonug=C_nonug, C=C, nug=nug) list(C=C, dC_dparams=dC_dparams) }, #' @description Derivative of covariance with respect to X #' @param XX matrix of points #' @param X matrix of points to take derivative with respect to #' @param ... Additional args, not used dC_dx = function(XX, X, ...) { if (!is.matrix(XX)) {stop()} d <- ncol(XX) if (ncol(X) != d) {stop()} n <- nrow(X) nn <- nrow(XX) dC_dx <- array(0, dim=c(nn, d, n)) dC_dx[, self$xindex, ] <- NA dC_dx }, #' @description Starting point for parameters for optimization #' @param jitter Should there be a jitter? #' @param y Output #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_start = function(jitter=F, y, p_est=self$p_est, s2_est=self$s2_est) { if (p_est) { vec <- min(max(self$p + jitter*rnorm(1, 0, .1), self$p_lower), self$p_upper) } else { vec <- c() } if (s2_est) { vec <- c(vec, self$logs2 + jitter*rnorm(1)) } vec }, #' @description Starting point for parameters for optimization #' @param jitter Should there be a jitter? #' @param y Output #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_start0 = function(jitter=F, y, p_est=self$p_est, s2_est=self$s2_est) { if (p_est) { vec <- min(max(0 + jitter*rnorm(1, 0, .1), self$p_lower), self$p_upper) } else { vec <- c() } if (s2_est) { vec <- c(vec, self$logs2 + jitter*rnorm(1)) } vec }, #' @description Lower bounds of parameters for optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_lower = function(p_est=self$p_est, s2_est=self$s2_est) { if (p_est) {vec <- c(self$p_lower)} else {vec <- c()} if (s2_est) {vec <- c(vec, self$logs2_lower)} else {} vec }, #' @description Upper bounds of parameters for optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_upper = function(p_est=self$p_est, # alpha_est=self$alpha_est, s2_est=self$s2_est) { if (p_est) {vec <- c(self$p_upper)} else {vec <- c()} if (s2_est) {vec <- c(vec, self$logs2_upper)} else {} vec }, #' @description Set parameters from optimization output #' @param optim_out Output from optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? set_params_from_optim = function(optim_out, p_est=self$p_est, s2_est=self$s2_est) { loo <- length(optim_out) if (p_est) { self$p <- optim_out[1] } if (s2_est) { self$logs2 <- optim_out[loo] self$s2 <- 10 ^ self$logs2 } }, #' @description Get s2 from params vector #' @param params parameter vector #' @param s2_est Is s2 being estimated? s2_from_params = function(params, s2_est=self$s2_est) { # 10 ^ params[length(params)] if (s2_est && !is.null(params)) { # Is last if in params 10 ^ params[length(params)] } else { # Else it is just using set value, not being estimated self$s2 } }, #' @description Print this object print = function() { cat('GauPro kernel: Gower Factor\n') cat('\tD =', self$D, '\n') cat('\ts2 =', self$s2, '\n') cat('\ton x-index', self$xindex, 'with', self$nlevels, 'levels\n') } ) )
/R/kernel_GowerFactor.R
no_license
CollinErickson/GauPro
R
false
false
14,246
r
#' Gower factor Kernel R6 class #' #' For a factor that has been converted to its indices. #' Each factor will need a separate kernel. #' #' #' @docType class #' @importFrom R6 R6Class #' @export #' @useDynLib GauPro, .registration = TRUE #' @importFrom Rcpp evalCpp #' @importFrom stats optim # @keywords data, kriging, Gaussian process, regression #' @return Object of \code{\link{R6Class}} with methods for fitting GP model. #' @format \code{\link{R6Class}} object. #' @field p Parameter for correlation #' @field p_est Should p be estimated? #' @field p_lower Lower bound of p #' @field p_upper Upper bound of p #' @field s2 variance #' @field s2_est Is s2 estimated? #' @field logs2 Log of s2 #' @field logs2_lower Lower bound of logs2 #' @field logs2_upper Upper bound of logs2 #' @field xindex Index of the factor (which column of X) #' @field nlevels Number of levels for the factor #' @field offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. #' @examples #' kk <- GowerFactorKernel$new(D=1, nlevels=5, xindex=1, p=.2) #' kmat <- outer(1:5, 1:5, Vectorize(kk$k)) #' kmat #' kk$plot() #' #' #' # 2D, Gaussian on 1D, index on 2nd dim #' library(dplyr) #' n <- 20 #' X <- cbind(matrix(runif(n,2,6), ncol=1), #' matrix(sample(1:2, size=n, replace=TRUE), ncol=1)) #' X <- rbind(X, c(3.3,3)) #' n <- nrow(X) #' Z <- X[,1] - (X[,2]-1.8)^2 + rnorm(n,0,.1) #' tibble(X=X, Z) %>% arrange(X,Z) #' k2a <- IgnoreIndsKernel$new(k=Gaussian$new(D=1), ignoreinds = 2) #' k2b <- GowerFactorKernel$new(D=2, nlevels=3, xind=2) #' k2 <- k2a * k2b #' k2b$p_upper <- .65*k2b$p_upper #' gp <- GauPro_kernel_model$new(X=X, Z=Z, kernel = k2, verbose = 5, #' nug.min=1e-2, restarts=0) #' gp$kernel$k1$kernel$beta #' gp$kernel$k2$p #' gp$kernel$k(x = gp$X) #' tibble(X=X, Z=Z, pred=gp$predict(X)) %>% arrange(X, Z) #' tibble(X=X[,2], Z) %>% group_by(X) %>% summarize(n=n(), mean(Z)) #' curve(gp$pred(cbind(matrix(x,ncol=1),1)),2,6, ylim=c(min(Z), max(Z))) #' points(X[X[,2]==1,1], Z[X[,2]==1]) #' curve(gp$pred(cbind(matrix(x,ncol=1),2)), add=TRUE, col=2) #' points(X[X[,2]==2,1], Z[X[,2]==2], col=2) #' curve(gp$pred(cbind(matrix(x,ncol=1),3)), add=TRUE, col=3) #' points(X[X[,2]==3,1], Z[X[,2]==3], col=3) #' legend(legend=1:3, fill=1:3, x="topleft") #' # See which points affect (5.5, 3 themost) #' data.frame(X, cov=gp$kernel$k(X, c(5.5,3))) %>% arrange(-cov) #' plot(k2b) #' #' # GowerFactorKernel ---- GowerFactorKernel <- R6::R6Class( classname = "GauPro_kernel_GowerFactorKernel", inherit = GauPro_kernel, public = list( p = NULL, # vector of correlations p_est = NULL, p_lower = NULL, p_upper = NULL, s2 = NULL, # variance coefficient to scale correlation matrix to covariance s2_est = NULL, logs2 = NULL, logs2_lower = NULL, logs2_upper = NULL, nlevels = NULL, xindex = NULL, offdiagequal = NULL, #' @description Initialize kernel object #' @param s2 Initial variance #' @param D Number of input dimensions of data #' @param p_lower Lower bound for p #' @param p_upper Upper bound for p #' @param p_est Should p be estimated? #' @param p Vector of correlations #' @param s2_lower Lower bound for s2 #' @param s2_upper Upper bound for s2 #' @param s2_est Should s2 be estimated? #' @param xindex Index of the factor (which column of X) #' @param nlevels Number of levels for the factor #' @param useC Should C code used? Not implemented for FactorKernel yet. #' @param offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. initialize = function(s2=1, D, nlevels, xindex, p_lower=0, p_upper=.9, p_est=TRUE, s2_lower=1e-8, s2_upper=1e8, s2_est=TRUE, p, useC=TRUE, offdiagequal=1-1e-6 ) { # Must give in D if (missing(D)) {stop("Must give Index kernel D")} self$D <- D self$nlevels <- nlevels self$xindex <- xindex if (missing(p)) { p <- 0 } else { stopifnot(is.numeric(p), length(p) == 1, p>=0, p<=1) } self$p <- p stopifnot(is.numeric(p_lower), length(p_lower) == 1, p_lower>=0, p_lower<=1) self$p_lower <- p_lower stopifnot(is.numeric(p_upper), length(p_upper) == 1, p_upper>=0, p_upper<=1, p_lower <= p_upper) # Don't give upper 1 since it will give optimization error self$p_upper <-p_upper self$p_est <- p_est self$s2 <- s2 self$logs2 <- log(s2, 10) self$logs2_lower <- log(s2_lower, 10) self$logs2_upper <- log(s2_upper, 10) self$s2_est <- s2_est self$useC <- useC self$offdiagequal <- offdiagequal }, #' @description Calculate covariance between two points #' @param x vector. #' @param y vector, optional. If excluded, find correlation #' of x with itself. #' @param p Correlation parameters. #' @param s2 Variance parameter. #' @param params parameters to use instead of beta and s2. k = function(x, y=NULL, p=self$p, s2=self$s2, params=NULL) { if (!is.null(params)) { lenparams <- length(params) if (self$p_est) { p <- params[1] } else { p <- self$p } # if (self$alpha_est) { # logalpha <- params[1 + as.integer(self$p_est) * self$p_length] # } else { # logalpha <- self$logalpha # } if (self$s2_est) { logs2 <- params[lenparams] } else { logs2 <- self$logs2 } s2 <- 10^logs2 } else { if (is.null(p)) {p <- self$p} if (is.null(s2)) {s2 <- self$s2} } if (is.null(y)) { if (is.matrix(x)) { val <- outer(1:nrow(x), 1:nrow(x), Vectorize(function(i,j){ self$kone(x[i,],x[j,],p=p, s2=s2, isdiag=i==j) })) return(val) } else { return(s2 * 1) } } if (is.matrix(x) & is.matrix(y)) { outer(1:nrow(x), 1:nrow(y), Vectorize(function(i,j){self$kone(x[i,],y[j,],p=p, s2=s2)})) } else if (is.matrix(x) & !is.matrix(y)) { apply(x, 1, function(xx) {self$kone(xx, y, p=p, s2=s2)}) } else if (is.matrix(y)) { apply(y, 1, function(yy) {self$kone(yy, x, p=p, s2=s2)}) } else { self$kone(x, y, p=p, s2=s2) } }, #' @description Find covariance of two points #' @param x vector #' @param y vector #' @param p correlation parameters on regular scale #' @param s2 Variance parameter #' @param isdiag Is this on the diagonal of the covariance? #' @param offdiagequal What should offdiagonal values be set to when the #' indices are the same? Use to avoid decomposition errors, similar to #' adding a nugget. kone = function(x, y, p, s2, isdiag=TRUE, offdiagequal=self$offdiagequal) { x <- x[self$xindex] y <- y[self$xindex] stopifnot(x>=1, y>=1, x<=self$nlevels, y<=self$nlevels, abs(x-as.integer(x)) < 1e-8, abs(y-as.integer(y)) < 1e-8) if (x==y) { # out <- s2 * 1 # Trying to avoid singular values if (isdiag) { out <- s2 * 1 } else { out <- s2 * offdiagequal } } else { out <- s2 * p } if (any(is.nan(out))) {stop("Error #9228878341")} out }, #' @description Derivative of covariance with respect to parameters #' @param params Kernel parameters #' @param X matrix of points in rows #' @param C_nonug Covariance without nugget added to diagonal #' @param C Covariance with nugget #' @param nug Value of nugget dC_dparams = function(params=NULL, X, C_nonug, C, nug) { # stop("not implemented, kernel index, dC_dp") n <- nrow(X) lenparams <- length(params) if (lenparams > 0) { if (self$p_est) { p <- params[1] } else { p <- self$p } if (self$s2_est) { logs2 <- params[lenparams] } else { logs2 <- self$logs2 } } else { p <- self$p logs2 <- self$logs2 } log10 <- log(10) s2 <- 10 ^ logs2 if (missing(C_nonug)) { # Assume C missing too, must have nug C_nonug <- self$k(x=X, params=params) C <- C_nonug + diag(nug*s2, nrow(C_nonug)) } lenparams_D <- as.integer(self$p_est + self$s2_est) dC_dparams <- array(dim=c(lenparams_D, n, n), data=0) if (self$s2_est) { dC_dparams[lenparams_D,,] <- C * log10 } if (self$p_est) { for (k in 1:length(p)) { # k is index of parameter for (i in seq(1, n-1, 1)) { xx <- X[i, self$xindex] for (j in seq(i+1, n, 1)) { yy <- X[j, self$xindex] if (xx == yy) { # Corr is just 1, parameter has no effect } else { dC_dparams[k,i,j] <- 1 * s2 dC_dparams[k,j,i] <- dC_dparams[k,i,j] } # # r2 <- sum(p * (X[i,]-X[j,])^2) # dC_dparams[k,i,j] <- -C_nonug[i,j] * alpha * # dC_dparams[k,j,i] <- dC_dparams[k,i,j] } } for (i in seq(1, n, 1)) { # Get diagonal set to zero dC_dparams[k,i,i] <- 0 } } } return(dC_dparams) }, #' @description Calculate covariance matrix and its derivative #' with respect to parameters #' @param params Kernel parameters #' @param X matrix of points in rows #' @param nug Value of nugget C_dC_dparams = function(params=NULL, X, nug) { s2 <- self$s2_from_params(params) C_nonug <- self$k(x=X, params=params) C <- C_nonug + diag(s2*nug, nrow(X)) dC_dparams <- self$dC_dparams(params=params, X=X, C_nonug=C_nonug, C=C, nug=nug) list(C=C, dC_dparams=dC_dparams) }, #' @description Derivative of covariance with respect to X #' @param XX matrix of points #' @param X matrix of points to take derivative with respect to #' @param ... Additional args, not used dC_dx = function(XX, X, ...) { if (!is.matrix(XX)) {stop()} d <- ncol(XX) if (ncol(X) != d) {stop()} n <- nrow(X) nn <- nrow(XX) dC_dx <- array(0, dim=c(nn, d, n)) dC_dx[, self$xindex, ] <- NA dC_dx }, #' @description Starting point for parameters for optimization #' @param jitter Should there be a jitter? #' @param y Output #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_start = function(jitter=F, y, p_est=self$p_est, s2_est=self$s2_est) { if (p_est) { vec <- min(max(self$p + jitter*rnorm(1, 0, .1), self$p_lower), self$p_upper) } else { vec <- c() } if (s2_est) { vec <- c(vec, self$logs2 + jitter*rnorm(1)) } vec }, #' @description Starting point for parameters for optimization #' @param jitter Should there be a jitter? #' @param y Output #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_start0 = function(jitter=F, y, p_est=self$p_est, s2_est=self$s2_est) { if (p_est) { vec <- min(max(0 + jitter*rnorm(1, 0, .1), self$p_lower), self$p_upper) } else { vec <- c() } if (s2_est) { vec <- c(vec, self$logs2 + jitter*rnorm(1)) } vec }, #' @description Lower bounds of parameters for optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_lower = function(p_est=self$p_est, s2_est=self$s2_est) { if (p_est) {vec <- c(self$p_lower)} else {vec <- c()} if (s2_est) {vec <- c(vec, self$logs2_lower)} else {} vec }, #' @description Upper bounds of parameters for optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? param_optim_upper = function(p_est=self$p_est, # alpha_est=self$alpha_est, s2_est=self$s2_est) { if (p_est) {vec <- c(self$p_upper)} else {vec <- c()} if (s2_est) {vec <- c(vec, self$logs2_upper)} else {} vec }, #' @description Set parameters from optimization output #' @param optim_out Output from optimization #' @param p_est Is p being estimated? #' @param alpha_est Is alpha being estimated? #' @param s2_est Is s2 being estimated? set_params_from_optim = function(optim_out, p_est=self$p_est, s2_est=self$s2_est) { loo <- length(optim_out) if (p_est) { self$p <- optim_out[1] } if (s2_est) { self$logs2 <- optim_out[loo] self$s2 <- 10 ^ self$logs2 } }, #' @description Get s2 from params vector #' @param params parameter vector #' @param s2_est Is s2 being estimated? s2_from_params = function(params, s2_est=self$s2_est) { # 10 ^ params[length(params)] if (s2_est && !is.null(params)) { # Is last if in params 10 ^ params[length(params)] } else { # Else it is just using set value, not being estimated self$s2 } }, #' @description Print this object print = function() { cat('GauPro kernel: Gower Factor\n') cat('\tD =', self$D, '\n') cat('\ts2 =', self$s2, '\n') cat('\ton x-index', self$xindex, 'with', self$nlevels, 'levels\n') } ) )
# 03/11/2016 # # # Get experiment-level estimates for the Overlap cases # of the Ribosome pathway. # The experiments are identified by its GSE id, # and we get the estimates for every random split of the # ribosome pathway into gene sets with a predetermined overlap # # The job array index is used to select the GSE series with the script, # and another command line argument to select the overlap case # RUN ON ODYSSEY (pcxn_over_estimates01.sh) [1-863] # # In an improved version of the SLURM script, the # job array index is used to select the GSE series and the # argument to select the overlap case is passed through a # shell script # (pcxn_over_estimates01a_shell.sh) # (pcxn_over_estimates01.R) rm(list=ls()) library(parallel) library(matrixStats) options(stringsAsFactors = F) pcxn_dir = "/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/" # ==== PCxN Functions ==== OverlapCoefficient <- function(x,y){ # function to calculate the overlap coefficient between x and y # which is defined as the size of the intersection divided by the # size of the smaller set # # Args # x: a vector # y: a vector # # Returns # the overlap coefficient, a number between 0 and 1 length(intersect(x,y))/min(length(unique(x)),length(unique(y))) } GetSummary = function(dat,gs,sum_fun){ # function to calculate the summary statistic for the pathway # # Args. # dat: genes by samples matrix # gs: vector with the names of the genes in the gene set # sum_fun: function to calculate the summary # # Returns # a 1 by samples vector with the summary statistic for the pathway if(length(gs) > 1){ # calculate summary for pathways with more than 1 element return(sum_fun(dat[rownames(dat) %in% gs,])) }else{ # return actual value for pathways with a single element return(dat[rownames(dat) %in% gs,]) } } GetCorEstimates = function(x,y,METHOD = "spearman"){ # function to estimate the correlation coefficient between x and y, # the corresponding t-statistic and p-value # # Args # x: a vector with n observations # y: a vector with n observations # method: character to pick either the Pearson or Spearman correlation coefficient # # Returns # a named vector with the correlation estimate, the sample size n, the t-statistic # and its corresponding p-value # function to estimate t-statistic GetStatistic = function(r,n){r*sqrt((n-2)/(1-r^2))} # get sample size if(length(x) == length(y)){ n <- length(x) }else{ cat("x and y have different lengths! >=( \n") return(NA) } # get correlation estimate estimate <- cor(x,y,method = METHOD) # get t-statistic statistic <- GetStatistic(estimate,n) # get p-value for the two-sided test p.value <- 2*pt(-abs(statistic),n-2) res <- c(estimate,n,statistic,p.value) names(res) <- c("estimate","n","statistic","p.value") return(res) } # ==== GSE annotation ==== gse_annot = readRDS(paste0(pcxn_dir,"data/GSE_annotation.RDS")) # get sample size per GSE series gse_count = table(gse_annot$GSE) gse_count = sort(gse_count,decreasing=T) # keep series with at least 5 samples gse_ids = names(gse_count[gse_count >= 15]) # ==== Expression Background ==== exprs_rnk = readRDS(paste0(pcxn_dir,"output/Benchmark/GPL570_Rb_mat.RDS")) # read command line arguments args = as.numeric(commandArgs(trailingOnly = T)) # the first argument is the overlap case [1-10] # the second argument is the GSE series [1-863] # ==== Ribosome Overlap Case ==== cat("Overlap Case:",args[1],"\n") gs_lst = readRDS(paste0(pcxn_dir,"output/Benchmark/ribosome_over",args[1],"_gs.RDS")) # argument to pick GSE series cat("GSE Series:", gse_ids[args[2]],"\n") (nc = detectCores()) # loop through all no overlap cases number_of_pathways = length(gs_lst) input = 1:number_of_pathways # select GSE series gse = gse_ids[args[2]] gsm_targets = gse_annot$GSM[gse_annot$GSE == gse] gsm_ind = colnames(exprs_rnk) %in% gsm_targets # subset expression ranks exprs_rnk = exprs_rnk[,gsm_ind] # R garbage collection gc() # function to process elements ProcessElement = function(ic,METHOD,sum_fun){ # pathway gene sets gsA = gs_lst[[ic]]$gs01 gsB = gs_lst[[ic]]$gs02 # split into three disjoint sets: # 1. genes shared by pathwatys A and B gsAB = intersect(gsA,gsB) # 2. genes unique to pathway A gsAu = setdiff(gsA,gsB) # 3. genes unique to pathway B gsBu = setdiff(gsB,gsA) # get pathway summaries for unique genes in each pathway summaryAu = GetSummary(dat=exprs_rnk,gs=gsAu,sum_fun=sum_fun) summaryBu = GetSummary(dat=exprs_rnk,gs=gsBu,sum_fun=sum_fun) # get correlation between the summaries for the unique genes tmp = data.frame(Pathway.A=names(gs_lst)[ic],Pathway.B=names(gs_lst)[ic]) tmp = c(tmp,GetCorEstimates(x=summaryAu,y=summaryBu,METHOD = METHOD)) # get overlap coefficient tmp$Overlap.Coeff= OverlapCoefficient(gsA,gsB) setTxtProgressBar(pb,ic) return(tmp) } # ==== PCxN Estimates: Mean/Pearson ==== ProcessElementMnPn = function(ic){ProcessElement(ic,METHOD="pearson",sum_fun=colMeans)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMnPn = mclapply(input,ProcessElementMnPn,mc.cores=nc) close(pb) # save results saveRDS(resMnPn,paste0(pcxn_dir,"output/Benchmark/Overlap/Mean/Pearson/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMnPn) # ==== PCxN Estimates: Mean/Spearman ==== ProcessElementMnSp = function(ic){ProcessElement(ic,METHOD="spearman",sum_fun=colMeans)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMnSp = mclapply(input,ProcessElementMnSp,mc.cores=nc) close(pb) # save results saveRDS(resMnSp,paste0(pcxn_dir,"output/Benchmark/Overlap/Mean/Spearman/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMnSp) # ==== PCxN Estimates: Median/Pearson ==== ProcessElementMdPn = function(ic){ProcessElement(ic,METHOD="pearson",sum_fun=colMedians)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMdPn = mclapply(input,ProcessElementMdPn,mc.cores=nc) close(pb) # save results saveRDS(resMdPn,paste0(pcxn_dir,"output/Benchmark/Overlap/Median/Pearson/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMdPn) # ==== PCxN Estimates: Median/Spearman ==== ProcessElementMdSp = function(ic){ProcessElement(ic,METHOD="spearman",sum_fun=colMedians)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMdSp = mclapply(input,ProcessElementMdSp,mc.cores=nc) close(pb) # save results saveRDS(resMdSp,paste0(pcxn_dir,"output/Benchmark/Overlap/Median/Spearman/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(list=ls())
/src/pcxn_over_estimates01.R
no_license
yeredh/PCxN_benchmark
R
false
false
6,759
r
# 03/11/2016 # # # Get experiment-level estimates for the Overlap cases # of the Ribosome pathway. # The experiments are identified by its GSE id, # and we get the estimates for every random split of the # ribosome pathway into gene sets with a predetermined overlap # # The job array index is used to select the GSE series with the script, # and another command line argument to select the overlap case # RUN ON ODYSSEY (pcxn_over_estimates01.sh) [1-863] # # In an improved version of the SLURM script, the # job array index is used to select the GSE series and the # argument to select the overlap case is passed through a # shell script # (pcxn_over_estimates01a_shell.sh) # (pcxn_over_estimates01.R) rm(list=ls()) library(parallel) library(matrixStats) options(stringsAsFactors = F) pcxn_dir = "/net/hsphfs1/srv/export/hsphfs1/share_root/hide_lab/PCxN/" # ==== PCxN Functions ==== OverlapCoefficient <- function(x,y){ # function to calculate the overlap coefficient between x and y # which is defined as the size of the intersection divided by the # size of the smaller set # # Args # x: a vector # y: a vector # # Returns # the overlap coefficient, a number between 0 and 1 length(intersect(x,y))/min(length(unique(x)),length(unique(y))) } GetSummary = function(dat,gs,sum_fun){ # function to calculate the summary statistic for the pathway # # Args. # dat: genes by samples matrix # gs: vector with the names of the genes in the gene set # sum_fun: function to calculate the summary # # Returns # a 1 by samples vector with the summary statistic for the pathway if(length(gs) > 1){ # calculate summary for pathways with more than 1 element return(sum_fun(dat[rownames(dat) %in% gs,])) }else{ # return actual value for pathways with a single element return(dat[rownames(dat) %in% gs,]) } } GetCorEstimates = function(x,y,METHOD = "spearman"){ # function to estimate the correlation coefficient between x and y, # the corresponding t-statistic and p-value # # Args # x: a vector with n observations # y: a vector with n observations # method: character to pick either the Pearson or Spearman correlation coefficient # # Returns # a named vector with the correlation estimate, the sample size n, the t-statistic # and its corresponding p-value # function to estimate t-statistic GetStatistic = function(r,n){r*sqrt((n-2)/(1-r^2))} # get sample size if(length(x) == length(y)){ n <- length(x) }else{ cat("x and y have different lengths! >=( \n") return(NA) } # get correlation estimate estimate <- cor(x,y,method = METHOD) # get t-statistic statistic <- GetStatistic(estimate,n) # get p-value for the two-sided test p.value <- 2*pt(-abs(statistic),n-2) res <- c(estimate,n,statistic,p.value) names(res) <- c("estimate","n","statistic","p.value") return(res) } # ==== GSE annotation ==== gse_annot = readRDS(paste0(pcxn_dir,"data/GSE_annotation.RDS")) # get sample size per GSE series gse_count = table(gse_annot$GSE) gse_count = sort(gse_count,decreasing=T) # keep series with at least 5 samples gse_ids = names(gse_count[gse_count >= 15]) # ==== Expression Background ==== exprs_rnk = readRDS(paste0(pcxn_dir,"output/Benchmark/GPL570_Rb_mat.RDS")) # read command line arguments args = as.numeric(commandArgs(trailingOnly = T)) # the first argument is the overlap case [1-10] # the second argument is the GSE series [1-863] # ==== Ribosome Overlap Case ==== cat("Overlap Case:",args[1],"\n") gs_lst = readRDS(paste0(pcxn_dir,"output/Benchmark/ribosome_over",args[1],"_gs.RDS")) # argument to pick GSE series cat("GSE Series:", gse_ids[args[2]],"\n") (nc = detectCores()) # loop through all no overlap cases number_of_pathways = length(gs_lst) input = 1:number_of_pathways # select GSE series gse = gse_ids[args[2]] gsm_targets = gse_annot$GSM[gse_annot$GSE == gse] gsm_ind = colnames(exprs_rnk) %in% gsm_targets # subset expression ranks exprs_rnk = exprs_rnk[,gsm_ind] # R garbage collection gc() # function to process elements ProcessElement = function(ic,METHOD,sum_fun){ # pathway gene sets gsA = gs_lst[[ic]]$gs01 gsB = gs_lst[[ic]]$gs02 # split into three disjoint sets: # 1. genes shared by pathwatys A and B gsAB = intersect(gsA,gsB) # 2. genes unique to pathway A gsAu = setdiff(gsA,gsB) # 3. genes unique to pathway B gsBu = setdiff(gsB,gsA) # get pathway summaries for unique genes in each pathway summaryAu = GetSummary(dat=exprs_rnk,gs=gsAu,sum_fun=sum_fun) summaryBu = GetSummary(dat=exprs_rnk,gs=gsBu,sum_fun=sum_fun) # get correlation between the summaries for the unique genes tmp = data.frame(Pathway.A=names(gs_lst)[ic],Pathway.B=names(gs_lst)[ic]) tmp = c(tmp,GetCorEstimates(x=summaryAu,y=summaryBu,METHOD = METHOD)) # get overlap coefficient tmp$Overlap.Coeff= OverlapCoefficient(gsA,gsB) setTxtProgressBar(pb,ic) return(tmp) } # ==== PCxN Estimates: Mean/Pearson ==== ProcessElementMnPn = function(ic){ProcessElement(ic,METHOD="pearson",sum_fun=colMeans)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMnPn = mclapply(input,ProcessElementMnPn,mc.cores=nc) close(pb) # save results saveRDS(resMnPn,paste0(pcxn_dir,"output/Benchmark/Overlap/Mean/Pearson/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMnPn) # ==== PCxN Estimates: Mean/Spearman ==== ProcessElementMnSp = function(ic){ProcessElement(ic,METHOD="spearman",sum_fun=colMeans)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMnSp = mclapply(input,ProcessElementMnSp,mc.cores=nc) close(pb) # save results saveRDS(resMnSp,paste0(pcxn_dir,"output/Benchmark/Overlap/Mean/Spearman/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMnSp) # ==== PCxN Estimates: Median/Pearson ==== ProcessElementMdPn = function(ic){ProcessElement(ic,METHOD="pearson",sum_fun=colMedians)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMdPn = mclapply(input,ProcessElementMdPn,mc.cores=nc) close(pb) # save results saveRDS(resMdPn,paste0(pcxn_dir,"output/Benchmark/Overlap/Median/Pearson/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(resMdPn) # ==== PCxN Estimates: Median/Spearman ==== ProcessElementMdSp = function(ic){ProcessElement(ic,METHOD="spearman",sum_fun=colMedians)} pb = txtProgressBar(min=0,max=number_of_pathways,style=3,initial=0) cat("\n") resMdSp = mclapply(input,ProcessElementMdSp,mc.cores=nc) close(pb) # save results saveRDS(resMdSp,paste0(pcxn_dir,"output/Benchmark/Overlap/Median/Spearman/Case",args[1],"/GSE/",gse,"_ribosome_over",args[1],".RDS")) rm(list=ls())
plot.DMFA <- function (x, axes = c(1, 2), choix = "ind", label = "all", lim.cos2.var = 0., xlim=NULL, ylim=NULL, title = NULL,palette = NULL, new.plot = FALSE, autoLab = c("auto","yes","no"), ...) { res.dmfa = x autoLab <- match.arg(autoLab,c("auto","yes","no")) if (autoLab=="yes") autoLab=TRUE if (autoLab=="no") autoLab=FALSE class(res.dmfa) <- c("PCA", "list ") if (choix == "ind"){ if(is.null(title)) titre = "Individuals factor map (PCA)" else titre = title plot.PCA(res.dmfa, habillage = 1, axes = axes, label = label, xlim = xlim, ylim = ylim, autoLab = autoLab, title = titre,...) } if (choix == "quali") { if (length(res.dmfa$call$quali.sup$modalite) == 1) stop("There is no supplementary qualitative variable") lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) nb.quali = (length(res.dmfa$call$quali.sup$modalite) - 1)/2 xlim = 1.1 * c(min(res.dmfa$quali.sup$coord[, axes[1]]), max(res.dmfa$quali.sup$coord[, axes[1]])) ylim = 1.1 * c(min(res.dmfa$quali.sup$coord[, axes[2]]), max(res.dmfa$quali.sup$coord[, axes[2]])) lab.x <- paste("Dim ", axes[1], " (", signif(res.dmfa$eig[axes[1], 2], 4), " %)", sep = "") lab.y <- paste("Dim ", axes[2], " (", signif(res.dmfa$eig[axes[2], 2], 4), " %)", sep = "") if(is.null(title)) titre = "Qualitative representation" else titre = title if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new() if (is.null(palette)) palette(c("black", "red", "green3", "blue", "cyan", "magenta","darkgray", "darkgoldenrod", "darkgreen", "violet","turquoise", "orange", "lightpink", "lavender", "yellow","lightgreen", "lightgrey", "lightblue", "darkkhaki","darkmagenta", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")) plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp = 1, ...) abline(v = 0, lty = 2,...) abline(h = 0, lty = 2,...) for (i in 1:sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)])) { points(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[i, axes[2]], pch = 15,...) text(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[i, axes[2]], rownames(res.dmfa$quali.sup$coord)[i], pos = 3,...) for (j in 1:ng) { points(res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[1]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[2]], col = j + 1, pch = 20,...) lines(c(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[1]]), c(res.dmfa$quali.sup$coord[i, axes[2]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[2]]), col = j + 1,...) } } legend("topleft", legend = rownames(res.dmfa$group$coord), text.col = 2:(1 + ng), cex = 0.8, bg = "white") } if (choix == "var") { lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) if(is.null(title)) titre = "Variables factor map (PCA)" else titre = title plot.PCA(res.dmfa, choix = "var", axes = axes, col.var = ng + 1, lim.cos2.var = lim.cos2.var, label = label, autoLab = autoLab, title = titre,...) for (j in 1:ng) { cor.partiel = res.dmfa$var.partiel[[j]][, axes] cor.cos2 = res.dmfa$cor.dim.gr[[j]][axes[1], axes[2]] for (v in 1:nrow(cor.partiel)) { qualite = (cor.partiel[v, 1]^2 + cor.partiel[v, 2]^2)/sqrt(cor.partiel[v, 1]^2 + cor.partiel[v, 2]^2 + 2 * cos(cor.cos2) * (cor.partiel[v, 1] * cor.partiel[v, 2])) arrows(0, 0, cor.partiel[v, 1], cor.partiel[v, 2], length = 0.1 * qualite, angle = 15, code = 2, col = j,...) if (abs(cor.partiel[v, 1]) > abs(cor.partiel[v, 2])) { if (cor.partiel[v, 1] >= 0) pos <- 4 else pos <- 2 } else { if (cor.partiel[v, 2] >= 0) pos <- 3 else pos <- 1 } if (label == "all") text(cor.partiel[v, 1], y = cor.partiel[v, 2], labels = rownames(cor.partiel)[v], pos = pos, col = j,...) } } legend("bottomleft", legend = c(lev, "var"), text.col = 1:(ng + 1), cex = 0.8, bg = "white") Xc = res.dmfa$Xc for (j in 1:ng) { auxil = res.dmfa$ind$coord[res.dmfa$call$X[, 1] == lev[j], axes] prefpls(cbind(auxil, Xc[[j]][rownames(auxil), ]), title = paste("Biplot between axes ", axes[1], " and ", axes[2], " for group ", lev[j], sep = "")) } } if (choix == "group") { if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new() if (is.null(palette)) palette(c("black", "red", "green3", "blue", "cyan", "magenta","darkgray", "darkgoldenrod", "darkgreen", "violet","turquoise", "orange", "lightpink", "lavender", "yellow","lightgreen", "lightgrey", "lightblue", "darkkhaki","darkmagenta", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")) coord.gr = res.dmfa$group$coord.n lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) xlim = 1.1 * c(0, max(1, max(coord.gr[, axes[1]]))) ylim = 1.1 * c(0, max(1, max(coord.gr[, axes[2]]))) if(is.null(title)) titre = "Projection of the groups" else titre = title plot(0, 0, xlab = paste("Dim", axes[1]), ylab = paste("Dim", axes[2]), xlim = xlim, ylim = ylim, col = "white", asp = 1, main = titre,...) for (j in 1:ng) { points(coord.gr[j, axes[1]], coord.gr[j, axes[2]], col = j, pch = 15,...) if (label == "all") text(coord.gr[j, axes[1]], coord.gr[j, axes[2]], labels = lev[j], pos = 3,...) } abline(v = 0, lty = 2,...) abline(h = 0, lty = 2,...) } }
/FactoMineR/R/plot.DMFA.R
no_license
Etjean/M1stuff
R
false
false
7,202
r
plot.DMFA <- function (x, axes = c(1, 2), choix = "ind", label = "all", lim.cos2.var = 0., xlim=NULL, ylim=NULL, title = NULL,palette = NULL, new.plot = FALSE, autoLab = c("auto","yes","no"), ...) { res.dmfa = x autoLab <- match.arg(autoLab,c("auto","yes","no")) if (autoLab=="yes") autoLab=TRUE if (autoLab=="no") autoLab=FALSE class(res.dmfa) <- c("PCA", "list ") if (choix == "ind"){ if(is.null(title)) titre = "Individuals factor map (PCA)" else titre = title plot.PCA(res.dmfa, habillage = 1, axes = axes, label = label, xlim = xlim, ylim = ylim, autoLab = autoLab, title = titre,...) } if (choix == "quali") { if (length(res.dmfa$call$quali.sup$modalite) == 1) stop("There is no supplementary qualitative variable") lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) nb.quali = (length(res.dmfa$call$quali.sup$modalite) - 1)/2 xlim = 1.1 * c(min(res.dmfa$quali.sup$coord[, axes[1]]), max(res.dmfa$quali.sup$coord[, axes[1]])) ylim = 1.1 * c(min(res.dmfa$quali.sup$coord[, axes[2]]), max(res.dmfa$quali.sup$coord[, axes[2]])) lab.x <- paste("Dim ", axes[1], " (", signif(res.dmfa$eig[axes[1], 2], 4), " %)", sep = "") lab.y <- paste("Dim ", axes[2], " (", signif(res.dmfa$eig[axes[2], 2], 4), " %)", sep = "") if(is.null(title)) titre = "Qualitative representation" else titre = title if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new() if (is.null(palette)) palette(c("black", "red", "green3", "blue", "cyan", "magenta","darkgray", "darkgoldenrod", "darkgreen", "violet","turquoise", "orange", "lightpink", "lavender", "yellow","lightgreen", "lightgrey", "lightblue", "darkkhaki","darkmagenta", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")) plot(0, 0, main = titre, xlab = lab.x, ylab = lab.y, xlim = xlim, ylim = ylim, col = "white", asp = 1, ...) abline(v = 0, lty = 2,...) abline(h = 0, lty = 2,...) for (i in 1:sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)])) { points(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[i, axes[2]], pch = 15,...) text(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[i, axes[2]], rownames(res.dmfa$quali.sup$coord)[i], pos = 3,...) for (j in 1:ng) { points(res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[1]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[2]], col = j + 1, pch = 20,...) lines(c(res.dmfa$quali.sup$coord[i, axes[1]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[1]]), c(res.dmfa$quali.sup$coord[i, axes[2]], res.dmfa$quali.sup$coord[sum(res.dmfa$call$quali.sup$modalite[2:(1 + nb.quali)]) + ng * (i - 1) + j, axes[2]]), col = j + 1,...) } } legend("topleft", legend = rownames(res.dmfa$group$coord), text.col = 2:(1 + ng), cex = 0.8, bg = "white") } if (choix == "var") { lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) if(is.null(title)) titre = "Variables factor map (PCA)" else titre = title plot.PCA(res.dmfa, choix = "var", axes = axes, col.var = ng + 1, lim.cos2.var = lim.cos2.var, label = label, autoLab = autoLab, title = titre,...) for (j in 1:ng) { cor.partiel = res.dmfa$var.partiel[[j]][, axes] cor.cos2 = res.dmfa$cor.dim.gr[[j]][axes[1], axes[2]] for (v in 1:nrow(cor.partiel)) { qualite = (cor.partiel[v, 1]^2 + cor.partiel[v, 2]^2)/sqrt(cor.partiel[v, 1]^2 + cor.partiel[v, 2]^2 + 2 * cos(cor.cos2) * (cor.partiel[v, 1] * cor.partiel[v, 2])) arrows(0, 0, cor.partiel[v, 1], cor.partiel[v, 2], length = 0.1 * qualite, angle = 15, code = 2, col = j,...) if (abs(cor.partiel[v, 1]) > abs(cor.partiel[v, 2])) { if (cor.partiel[v, 1] >= 0) pos <- 4 else pos <- 2 } else { if (cor.partiel[v, 2] >= 0) pos <- 3 else pos <- 1 } if (label == "all") text(cor.partiel[v, 1], y = cor.partiel[v, 2], labels = rownames(cor.partiel)[v], pos = pos, col = j,...) } } legend("bottomleft", legend = c(lev, "var"), text.col = 1:(ng + 1), cex = 0.8, bg = "white") Xc = res.dmfa$Xc for (j in 1:ng) { auxil = res.dmfa$ind$coord[res.dmfa$call$X[, 1] == lev[j], axes] prefpls(cbind(auxil, Xc[[j]][rownames(auxil), ]), title = paste("Biplot between axes ", axes[1], " and ", axes[2], " for group ", lev[j], sep = "")) } } if (choix == "group") { if ((new.plot)&!nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY"))) dev.new() if (is.null(palette)) palette(c("black", "red", "green3", "blue", "cyan", "magenta","darkgray", "darkgoldenrod", "darkgreen", "violet","turquoise", "orange", "lightpink", "lavender", "yellow","lightgreen", "lightgrey", "lightblue", "darkkhaki","darkmagenta", "darkolivegreen", "lightcyan", "darkorange","darkorchid", "darkred", "darksalmon", "darkseagreen","darkslateblue", "darkslategray", "darkslategrey","darkturquoise", "darkviolet", "lightgray", "lightsalmon","lightyellow", "maroon")) coord.gr = res.dmfa$group$coord.n lev = levels(res.dmfa$call$X[, 1]) ng = length(lev) xlim = 1.1 * c(0, max(1, max(coord.gr[, axes[1]]))) ylim = 1.1 * c(0, max(1, max(coord.gr[, axes[2]]))) if(is.null(title)) titre = "Projection of the groups" else titre = title plot(0, 0, xlab = paste("Dim", axes[1]), ylab = paste("Dim", axes[2]), xlim = xlim, ylim = ylim, col = "white", asp = 1, main = titre,...) for (j in 1:ng) { points(coord.gr[j, axes[1]], coord.gr[j, axes[2]], col = j, pch = 15,...) if (label == "all") text(coord.gr[j, axes[1]], coord.gr[j, axes[2]], labels = lev[j], pos = 3,...) } abline(v = 0, lty = 2,...) abline(h = 0, lty = 2,...) } }
library(ggplot2) install.packages('XLConnect', dependencies = T) library(XLConnect) install.packages('tidyr', dependencies = T) library(tidyr) jobRaw <- readWorksheet(loadWorkbook("indicator_t above 15 employ.xlsx"),sheet=1) colnames(jobRaw)[1] <- "country" #Treating job = gather(jobRaw, "year", "percentage", 2:18) str(substr(job$year,2,5)) job$year <- as.numeric((substr(job$year,2,5))) str(job$year) str(job$percentage) qplot(x=year, y=percentage, data=job, color=country, geom="line") #That's a complete mess! #Let's analyze this. by(job$percent, job$country, summary) #Country with highest job percentage. countryHigh = job[which.max(job$percentage),1] #Country with lowest job percentage. countryLow = job[which.min(job$percentage),1] #Reducing countries to a manageble number. jobCountries = subset(job, country %in% c("Brazil","Argentina","Germany", countryHigh, countryLow)) #Line qplot(x=year, y=percentage, data=jobCountries, color=country, geom="line") ggsave('jobContriesLinePercentageYear.png') #Frequency qplot(x = percentage, y = ..count.., data = jobCountries, binwidth = 11, geom="freqpoly", color=country ) ggsave('jobContriesFreqPercentageContry.png')
/lesson3/dataFromGapminder.R
no_license
pcontop/EDA-Udacity
R
false
false
1,222
r
library(ggplot2) install.packages('XLConnect', dependencies = T) library(XLConnect) install.packages('tidyr', dependencies = T) library(tidyr) jobRaw <- readWorksheet(loadWorkbook("indicator_t above 15 employ.xlsx"),sheet=1) colnames(jobRaw)[1] <- "country" #Treating job = gather(jobRaw, "year", "percentage", 2:18) str(substr(job$year,2,5)) job$year <- as.numeric((substr(job$year,2,5))) str(job$year) str(job$percentage) qplot(x=year, y=percentage, data=job, color=country, geom="line") #That's a complete mess! #Let's analyze this. by(job$percent, job$country, summary) #Country with highest job percentage. countryHigh = job[which.max(job$percentage),1] #Country with lowest job percentage. countryLow = job[which.min(job$percentage),1] #Reducing countries to a manageble number. jobCountries = subset(job, country %in% c("Brazil","Argentina","Germany", countryHigh, countryLow)) #Line qplot(x=year, y=percentage, data=jobCountries, color=country, geom="line") ggsave('jobContriesLinePercentageYear.png') #Frequency qplot(x = percentage, y = ..count.., data = jobCountries, binwidth = 11, geom="freqpoly", color=country ) ggsave('jobContriesFreqPercentageContry.png')
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Classifier/bone.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE) sink('./bone_016.txt',append=TRUE) print(glm$glmnet.fit) sink()
/Model/EN/Classifier/bone/bone_016.R
no_license
esbgkannan/QSMART
R
false
false
346
r
library(glmnet) mydata = read.table("../../../../TrainingSet/FullSet/Classifier/bone.csv",head=T,sep=",") x = as.matrix(mydata[,4:ncol(mydata)]) y = as.matrix(mydata[,1]) set.seed(123) glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.03,family="gaussian",standardize=FALSE) sink('./bone_016.txt',append=TRUE) print(glm$glmnet.fit) sink()
# Yige Wu @WashU Apr 2020 ## make barcode to cell type mapping table for the integrated dataset ## 2020-10-27 added Alla's latest immune cell types # set up libraries and output directory ----------------------------------- ## set working directory dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R") ## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir(), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input barcode to cluster mapping table from all_integrated_barcode2cluster_df <- fread(input = "./Resources/Analysis_Results/integration/30_aliquot_integration/fetch_data/20200212.v3/30_aliquot_integration.20200212.v3.umap_data.tsv", data.table = F) ## input cluster to cell type mapping table for all clusters in the integrated dataset all_integrated_cluster2celltype_df <- fread(input = "./Resources/snRNA_Processed_Data/Cell_Type_Assignment/Integration_AllClusters/integration.allcluster2celltype.20200213.v3.tsv") ## input Alla's immune cell type assignment ### should be 33004 # immune_barcode2celltype_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_immune_cells/20200626.v1/Barcode2ImmuneCellType.20200626.v1.tsv") immune_barcode2celltype_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_immune_cells_with_patch/20201027.v1/Barcode2ImmuneCellType.20201027.v1.tsv") ## input tumor subclustering cell type assignment: 93277 cells tumor_barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_barcode_with_manual_tumorsubcluster_id/20200616.v1/Barcode2TumorSubclusterId.20200616.v1.tsv", data.table = F) ## input barcode-to-cell-type table normal_epithelial_barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_normal_epithelial_cells/20200720.v1/normal_epithelial_reclustered.barcode2celltype.20200720.v1.tsv", data.table = F) ## input patch barcode2cell type table bc2celltype_patch_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_for_C3L-00088-N/20200904.v1/Barcode2CellType.C3L-00088-N.20200904.v1.tsv") # get barcode2celltype from integrated data ------------------------------- nrow(all_integrated_barcode2cluster_df) ## merge all_integrated_barcode2celltype_df <- merge(x = all_integrated_barcode2cluster_df, y = all_integrated_cluster2celltype_df, by.x = c("ident"), by.y = c("Cluster"), all.x = T) nrow(all_integrated_barcode2celltype_df) ## add individual barcode all_integrated_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% rename(integrated_barcode = barcode) %>% mutate(individual_barcode = str_split_fixed(string = integrated_barcode, pattern = "_", n = 2)[,1]) # format normal epithelial cells ------------------------------------------ normal_epithelial_barcode2celltype_df <- normal_epithelial_barcode2celltype_df %>% mutate(Id_TumorManualCluster = NA) %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # map stromal cells ------------------------------------------------------- stroma_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% filter(Most_Enriched_Cell_Group == "Stroma") %>% mutate(Cell_type.shorter = Most_Enriched_Cell_Type1) %>% mutate(Cell_type.detailed = Most_Enriched_Cell_Type1) ## see what needs to be corrected table(stroma_barcode2celltype_df$Most_Enriched_Cell_Type1) table(stroma_barcode2celltype_df$Most_Enriched_Cell_Type2) ## make short cell type names for myofibroblasts stroma_barcode2celltype_df$Cell_type.shorter[stroma_barcode2celltype_df$Most_Enriched_Cell_Type2 == "Myofibroblasts"] <- "Myofibroblasts" stroma_barcode2celltype_df$Cell_type.detailed[stroma_barcode2celltype_df$Most_Enriched_Cell_Type2 == "Myofibroblasts"] <- "Myofibroblasts" ## add columns stroma_barcode2celltype_df <- stroma_barcode2celltype_df %>% mutate(Id_TumorManualCluster = NA) %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # map unknown cells ------------------------------------------------------- assigned_integrated_barcodes <- c(immune_barcode2celltype_df$integrated_barcode, tumor_barcode2celltype_df$integrated_barcode, stroma_barcode2celltype_df$integrated_barcode, normal_epithelial_barcode2celltype_df$integrated_barcode) length(assigned_integrated_barcodes) nrow(all_integrated_barcode2celltype_df) unknown_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% filter(!(integrated_barcode %in% assigned_integrated_barcodes)) %>% mutate(Id_TumorManualCluster = NA) %>% mutate(Cell_type.shorter = "Unknown") %>% mutate(Cell_type.detailed = "Unknown") unknown_barcode2celltype_df$Most_Enriched_Cell_Group <- "Unknown" unknown_barcode2celltype_df$Most_Enriched_Cell_Type1 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type2 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type3 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type4 <- "" unknown_barcode2celltype_df <- unknown_barcode2celltype_df %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # merge the immune and tumor cells and other info ------------------------------------ barcode2celltype_df <- rbind(immune_barcode2celltype_df %>% mutate(Cell_group = ifelse(Most_Enriched_Cell_Group == "Immune", "Immune", "Unknown")), tumor_barcode2celltype_df %>% mutate(Cell_group = ifelse(Cell_type.shorter == "Tumor cells", "Tumor cells", "Unknown")), stroma_barcode2celltype_df %>% mutate(Cell_group = "Stroma"), normal_epithelial_barcode2celltype_df %>% mutate(Cell_group = ifelse(Most_Enriched_Cell_Group == "Nephron_Epithelium", "Normal epithelial cells", "Unknown")), unknown_barcode2celltype_df %>% mutate(Cell_group = "Unknown")) nrow(barcode2celltype_df) table(barcode2celltype_df$Cell_type.shorter) # add patch --------------------------------------------------------------- barcode2celltype_df <- rbind(barcode2celltype_df, bc2celltype_patch_df) nrow(barcode2celltype_df) ## 138547 table(barcode2celltype_df$Cell_type.shorter) table(barcode2celltype_df$Cell_type.detailed) # write output ------------------------------------------------------------ write.table(x = barcode2celltype_df, file = paste0(dir_out, "31AliquotIntegration.Barcode2CellType.TumorManualCluster.", run_id, ".tsv"), quote = F, sep = "\t", row.names = F)
/annotate_barcode/map_celltype_to_all_cells_with_patch.R
no_license
ding-lab/ccRCC_snRNA_analysis
R
false
false
7,649
r
# Yige Wu @WashU Apr 2020 ## make barcode to cell type mapping table for the integrated dataset ## 2020-10-27 added Alla's latest immune cell types # set up libraries and output directory ----------------------------------- ## set working directory dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R") ## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir(), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input barcode to cluster mapping table from all_integrated_barcode2cluster_df <- fread(input = "./Resources/Analysis_Results/integration/30_aliquot_integration/fetch_data/20200212.v3/30_aliquot_integration.20200212.v3.umap_data.tsv", data.table = F) ## input cluster to cell type mapping table for all clusters in the integrated dataset all_integrated_cluster2celltype_df <- fread(input = "./Resources/snRNA_Processed_Data/Cell_Type_Assignment/Integration_AllClusters/integration.allcluster2celltype.20200213.v3.tsv") ## input Alla's immune cell type assignment ### should be 33004 # immune_barcode2celltype_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_immune_cells/20200626.v1/Barcode2ImmuneCellType.20200626.v1.tsv") immune_barcode2celltype_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_immune_cells_with_patch/20201027.v1/Barcode2ImmuneCellType.20201027.v1.tsv") ## input tumor subclustering cell type assignment: 93277 cells tumor_barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_barcode_with_manual_tumorsubcluster_id/20200616.v1/Barcode2TumorSubclusterId.20200616.v1.tsv", data.table = F) ## input barcode-to-cell-type table normal_epithelial_barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_to_normal_epithelial_cells/20200720.v1/normal_epithelial_reclustered.barcode2celltype.20200720.v1.tsv", data.table = F) ## input patch barcode2cell type table bc2celltype_patch_df <- fread(data.table = F, input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_for_C3L-00088-N/20200904.v1/Barcode2CellType.C3L-00088-N.20200904.v1.tsv") # get barcode2celltype from integrated data ------------------------------- nrow(all_integrated_barcode2cluster_df) ## merge all_integrated_barcode2celltype_df <- merge(x = all_integrated_barcode2cluster_df, y = all_integrated_cluster2celltype_df, by.x = c("ident"), by.y = c("Cluster"), all.x = T) nrow(all_integrated_barcode2celltype_df) ## add individual barcode all_integrated_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% rename(integrated_barcode = barcode) %>% mutate(individual_barcode = str_split_fixed(string = integrated_barcode, pattern = "_", n = 2)[,1]) # format normal epithelial cells ------------------------------------------ normal_epithelial_barcode2celltype_df <- normal_epithelial_barcode2celltype_df %>% mutate(Id_TumorManualCluster = NA) %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # map stromal cells ------------------------------------------------------- stroma_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% filter(Most_Enriched_Cell_Group == "Stroma") %>% mutate(Cell_type.shorter = Most_Enriched_Cell_Type1) %>% mutate(Cell_type.detailed = Most_Enriched_Cell_Type1) ## see what needs to be corrected table(stroma_barcode2celltype_df$Most_Enriched_Cell_Type1) table(stroma_barcode2celltype_df$Most_Enriched_Cell_Type2) ## make short cell type names for myofibroblasts stroma_barcode2celltype_df$Cell_type.shorter[stroma_barcode2celltype_df$Most_Enriched_Cell_Type2 == "Myofibroblasts"] <- "Myofibroblasts" stroma_barcode2celltype_df$Cell_type.detailed[stroma_barcode2celltype_df$Most_Enriched_Cell_Type2 == "Myofibroblasts"] <- "Myofibroblasts" ## add columns stroma_barcode2celltype_df <- stroma_barcode2celltype_df %>% mutate(Id_TumorManualCluster = NA) %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # map unknown cells ------------------------------------------------------- assigned_integrated_barcodes <- c(immune_barcode2celltype_df$integrated_barcode, tumor_barcode2celltype_df$integrated_barcode, stroma_barcode2celltype_df$integrated_barcode, normal_epithelial_barcode2celltype_df$integrated_barcode) length(assigned_integrated_barcodes) nrow(all_integrated_barcode2celltype_df) unknown_barcode2celltype_df <- all_integrated_barcode2celltype_df %>% filter(!(integrated_barcode %in% assigned_integrated_barcodes)) %>% mutate(Id_TumorManualCluster = NA) %>% mutate(Cell_type.shorter = "Unknown") %>% mutate(Cell_type.detailed = "Unknown") unknown_barcode2celltype_df$Most_Enriched_Cell_Group <- "Unknown" unknown_barcode2celltype_df$Most_Enriched_Cell_Type1 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type2 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type3 <- "" unknown_barcode2celltype_df$Most_Enriched_Cell_Type4 <- "" unknown_barcode2celltype_df <- unknown_barcode2celltype_df %>% select(orig.ident, individual_barcode, integrated_barcode, Most_Enriched_Cell_Group, Cell_type.shorter, Cell_type.detailed, Most_Enriched_Cell_Type1, Most_Enriched_Cell_Type2, Most_Enriched_Cell_Type3, Most_Enriched_Cell_Type4, Id_TumorManualCluster) # merge the immune and tumor cells and other info ------------------------------------ barcode2celltype_df <- rbind(immune_barcode2celltype_df %>% mutate(Cell_group = ifelse(Most_Enriched_Cell_Group == "Immune", "Immune", "Unknown")), tumor_barcode2celltype_df %>% mutate(Cell_group = ifelse(Cell_type.shorter == "Tumor cells", "Tumor cells", "Unknown")), stroma_barcode2celltype_df %>% mutate(Cell_group = "Stroma"), normal_epithelial_barcode2celltype_df %>% mutate(Cell_group = ifelse(Most_Enriched_Cell_Group == "Nephron_Epithelium", "Normal epithelial cells", "Unknown")), unknown_barcode2celltype_df %>% mutate(Cell_group = "Unknown")) nrow(barcode2celltype_df) table(barcode2celltype_df$Cell_type.shorter) # add patch --------------------------------------------------------------- barcode2celltype_df <- rbind(barcode2celltype_df, bc2celltype_patch_df) nrow(barcode2celltype_df) ## 138547 table(barcode2celltype_df$Cell_type.shorter) table(barcode2celltype_df$Cell_type.detailed) # write output ------------------------------------------------------------ write.table(x = barcode2celltype_df, file = paste0(dir_out, "31AliquotIntegration.Barcode2CellType.TumorManualCluster.", run_id, ".tsv"), quote = F, sep = "\t", row.names = F)
get_seasons <- function(df) { paste0('seas_', unique(df$seas)) } ps_filter_df <- function(df, .pt_sprd) { dplyr::filter(df, ps == .pt_sprd) } get_n_percentile <- function(df, .ps_col, .n=.98){ df %>% dplyr::pull({{.ps_col}}) %>% quantile(.,.n) %>% floor() }
/R/utils_helpers.R
permissive
cswaters/bayesPushchartNFL
R
false
false
285
r
get_seasons <- function(df) { paste0('seas_', unique(df$seas)) } ps_filter_df <- function(df, .pt_sprd) { dplyr::filter(df, ps == .pt_sprd) } get_n_percentile <- function(df, .ps_col, .n=.98){ df %>% dplyr::pull({{.ps_col}}) %>% quantile(.,.n) %>% floor() }
#' @export doPadRows <- function(dataToBePadded, dataThatSuppliesRowCount) { ##################### # This function adds NA rows to the bottom of data.frame dataToBePadded # to ensure that it has the the number of observations (rows) as # dataThatSuppliesRowCount. The number of columns comes from dataToBePadded # Execution halts if nrow(dataThatSuppliesRowCount) < nrow(dataToBePadded) # Returns dataToBePadded with NA rows at the bottom. ## nRowsToAdd <- nrow(dataThatSuppliesRowCount) - nrow(dataToBePadded) if (nRowsToAdd < 0){ stop(paste("Model data frame has", abs(nRowsToAdd), "fewer rows than target.")) } dfToAppend <- as.data.frame(matrix(NA, ncol=ncol(dataToBePadded), nrow=nRowsToAdd)) colnames(dfToAppend) <- colnames(dataToBePadded) return(rbind(dataToBePadded, dfToAppend)) } #' @export padRows <- function(countryAbbrev, df, baseHistorical){ ##################### # This function adds NA rows to the bottom of data.frame df to ensure that it has the # the number of observations (rows) as the country data set for countryAbbrev # This function is a convenience wrapper function that simply loads # the data.frame for countryAbbrev and then calls doPadRows # returns a modified version of df that includes the padded rows filled with "NA". ## countryData <- loadData(countryAbbrev=countryAbbrev, baseHistorical=baseHistorical) return(doPadRows(dataToBePadded=df, dataThatSuppliesRowCount=countryData)) } #' @export columnIndex <- function(data, factor){ ############################## # Returns an integer representing the column index for some data # data the data.frame in which you want to change column names # factor should be a string and one of Year, Y, K, L, Q, X, or U ## if (factor == "Year"){ colName <- "iYear" } else if (factor == "Y"){ colName <- "iGDP" } else if (factor == "K"){ colName <- "iK" } else if (factor == "L"){ colName <- "iL" } else if (factor == "Q"){ colName <- "iQ" } else if (factor == "X"){ colName <- "iX" } else if (factor == "U"){ colName <- "iU" } else { print(paste("Unknown factor:", factor, "in colIndex. Terminating execution.")) quit() } # Get the desired column index. colIndex <- which(names(data) %in% colName) #Find index of desired column return(colIndex) } #' @export replaceColName <- function(data, factor, newName){ ############################## # Replaces a column name with the given string # data the data.frame that you're working with # factor should be a string and one of Year, Y, K, L, Q, X, or U # newName should be a string and the desired new name of the column # returns data.frame with a new name for one of its factor column. ## colIndex <- columnIndex(data=data, factor=factor) # colnames(data)[colIndex] <- newName #Change desired column name to newName data[,newName] <- data[,colIndex] return(data) }
/Packages/EconModels0/R/huh.R
no_license
anhnguyendepocen/MacroGrowth
R
false
false
2,934
r
#' @export doPadRows <- function(dataToBePadded, dataThatSuppliesRowCount) { ##################### # This function adds NA rows to the bottom of data.frame dataToBePadded # to ensure that it has the the number of observations (rows) as # dataThatSuppliesRowCount. The number of columns comes from dataToBePadded # Execution halts if nrow(dataThatSuppliesRowCount) < nrow(dataToBePadded) # Returns dataToBePadded with NA rows at the bottom. ## nRowsToAdd <- nrow(dataThatSuppliesRowCount) - nrow(dataToBePadded) if (nRowsToAdd < 0){ stop(paste("Model data frame has", abs(nRowsToAdd), "fewer rows than target.")) } dfToAppend <- as.data.frame(matrix(NA, ncol=ncol(dataToBePadded), nrow=nRowsToAdd)) colnames(dfToAppend) <- colnames(dataToBePadded) return(rbind(dataToBePadded, dfToAppend)) } #' @export padRows <- function(countryAbbrev, df, baseHistorical){ ##################### # This function adds NA rows to the bottom of data.frame df to ensure that it has the # the number of observations (rows) as the country data set for countryAbbrev # This function is a convenience wrapper function that simply loads # the data.frame for countryAbbrev and then calls doPadRows # returns a modified version of df that includes the padded rows filled with "NA". ## countryData <- loadData(countryAbbrev=countryAbbrev, baseHistorical=baseHistorical) return(doPadRows(dataToBePadded=df, dataThatSuppliesRowCount=countryData)) } #' @export columnIndex <- function(data, factor){ ############################## # Returns an integer representing the column index for some data # data the data.frame in which you want to change column names # factor should be a string and one of Year, Y, K, L, Q, X, or U ## if (factor == "Year"){ colName <- "iYear" } else if (factor == "Y"){ colName <- "iGDP" } else if (factor == "K"){ colName <- "iK" } else if (factor == "L"){ colName <- "iL" } else if (factor == "Q"){ colName <- "iQ" } else if (factor == "X"){ colName <- "iX" } else if (factor == "U"){ colName <- "iU" } else { print(paste("Unknown factor:", factor, "in colIndex. Terminating execution.")) quit() } # Get the desired column index. colIndex <- which(names(data) %in% colName) #Find index of desired column return(colIndex) } #' @export replaceColName <- function(data, factor, newName){ ############################## # Replaces a column name with the given string # data the data.frame that you're working with # factor should be a string and one of Year, Y, K, L, Q, X, or U # newName should be a string and the desired new name of the column # returns data.frame with a new name for one of its factor column. ## colIndex <- columnIndex(data=data, factor=factor) # colnames(data)[colIndex] <- newName #Change desired column name to newName data[,newName] <- data[,colIndex] return(data) }
# Yige Wu @WashU Aug 2020 # set up libraries and output directory ----------------------------------- ## getting the path to the current script thisFile <- function() { cmdArgs <- commandArgs(trailingOnly = FALSE) needle <- "--file=" match <- grep(needle, cmdArgs) if (length(match) > 0) { # Rscript return(normalizePath(sub(needle, "", cmdArgs[match]))) } else { # 'source'd via R console return(normalizePath(sys.frames()[[1]]$ofile)) } } path_this_script <- thisFile() ## set working directory dir_base = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snRNA/" # dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R") library(tibble) ## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir_katmai(path_this_script), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input the integrated data path_rds <- "./Resources/Analysis_Results/integration/30_aliquot_integration/docker_run_integration/20200212.v3/30_aliquot_integration.20200212.v3.RDS" srat <- readRDS(file = path_rds) print("Finish reading RDS file") ## input the barcode-cell-type table barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_corrected_by_individual_sample_inspection/20200811.v1/31Aliquot.Barcode2CellType.20200811.v1.tsv", data.table = F) barcode2celltype_df <- as.data.frame(barcode2celltype_df) cat("finish reading the barcode-to-cell type table!\n") ## specify the cell group cellgroup2process <- "Stroma" # subset seurat object ---------------------------------------------------- BC <- srat@meta.data %>% rownames #### get original barcode srat@meta.data$original_barcode <- BC %>% strsplit("_") %>% lapply("[[",1) %>% unlist srat@meta.data <- srat@meta.data %>% rownames_to_column srat@meta.data <- merge(srat@meta.data,barcode2celltype_df, by.x=c("orig.ident","original_barcode"), by.y=c("orig.ident","individual_barcode")) srat@meta.data <- column_to_rownames(srat@meta.data,"rowname")[BC,] Idents(srat) <- "Cell_group.shorter" ## subset data cat("Start subsetting\n") srat.new <- subset(srat, idents = "Stroma") cat(dim(srat.new)) rm(srat) ## set default assay DefaultAssay(object = srat.new) <- "RNA" cat("###########################################\n") # find variable genes ----------------------------------------------------- cat("Start running FindVariableFeatures\n") srat.new <- FindVariableFeatures(srat.new, selection.method = "vst", nfeatures = 3000, verbose = T) cat("###########################################\n") # write output ------------------------------------------------------------ file2write <- paste0(dir_out, "findvariablefeatures.", "cellgroup.", cellgroup2process, ".", run_id, ".tsv") var_features_df <- data.frame(gene = srat.new@assays$RNA@var.features) write.table(var_features_df, file = file2write, quote = F, sep = "\t", row.names = F) cat("Finished saving the output\n") cat("###########################################\n")
/findvariablefeatures/findvariablefeatures_cellgroup_stroma.R
no_license
ding-lab/ccRCC_snRNA_analysis
R
false
false
3,260
r
# Yige Wu @WashU Aug 2020 # set up libraries and output directory ----------------------------------- ## getting the path to the current script thisFile <- function() { cmdArgs <- commandArgs(trailingOnly = FALSE) needle <- "--file=" match <- grep(needle, cmdArgs) if (length(match) > 0) { # Rscript return(normalizePath(sub(needle, "", cmdArgs[match]))) } else { # 'source'd via R console return(normalizePath(sys.frames()[[1]]$ofile)) } } path_this_script <- thisFile() ## set working directory dir_base = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snRNA/" # dir_base = "~/Box/Ding_Lab/Projects_Current/RCC/ccRCC_snRNA/" setwd(dir_base) source("./ccRCC_snRNA_analysis/load_pkgs.R") source("./ccRCC_snRNA_analysis/functions.R") source("./ccRCC_snRNA_analysis/variables.R") library(tibble) ## set run id version_tmp <- 1 run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp) ## set output directory dir_out <- paste0(makeOutDir_katmai(path_this_script), run_id, "/") dir.create(dir_out) # input dependencies ------------------------------------------------------ ## input the integrated data path_rds <- "./Resources/Analysis_Results/integration/30_aliquot_integration/docker_run_integration/20200212.v3/30_aliquot_integration.20200212.v3.RDS" srat <- readRDS(file = path_rds) print("Finish reading RDS file") ## input the barcode-cell-type table barcode2celltype_df <- fread(input = "./Resources/Analysis_Results/annotate_barcode/map_celltype_corrected_by_individual_sample_inspection/20200811.v1/31Aliquot.Barcode2CellType.20200811.v1.tsv", data.table = F) barcode2celltype_df <- as.data.frame(barcode2celltype_df) cat("finish reading the barcode-to-cell type table!\n") ## specify the cell group cellgroup2process <- "Stroma" # subset seurat object ---------------------------------------------------- BC <- srat@meta.data %>% rownames #### get original barcode srat@meta.data$original_barcode <- BC %>% strsplit("_") %>% lapply("[[",1) %>% unlist srat@meta.data <- srat@meta.data %>% rownames_to_column srat@meta.data <- merge(srat@meta.data,barcode2celltype_df, by.x=c("orig.ident","original_barcode"), by.y=c("orig.ident","individual_barcode")) srat@meta.data <- column_to_rownames(srat@meta.data,"rowname")[BC,] Idents(srat) <- "Cell_group.shorter" ## subset data cat("Start subsetting\n") srat.new <- subset(srat, idents = "Stroma") cat(dim(srat.new)) rm(srat) ## set default assay DefaultAssay(object = srat.new) <- "RNA" cat("###########################################\n") # find variable genes ----------------------------------------------------- cat("Start running FindVariableFeatures\n") srat.new <- FindVariableFeatures(srat.new, selection.method = "vst", nfeatures = 3000, verbose = T) cat("###########################################\n") # write output ------------------------------------------------------------ file2write <- paste0(dir_out, "findvariablefeatures.", "cellgroup.", cellgroup2process, ".", run_id, ".tsv") var_features_df <- data.frame(gene = srat.new@assays$RNA@var.features) write.table(var_features_df, file = file2write, quote = F, sep = "\t", row.names = F) cat("Finished saving the output\n") cat("###########################################\n")
library(tswge) # Randomly generating signal plus noise gen.sigplusnoise.wge(100, b0 = 2, b1 = 4, vara = 100) gen.sigplusnoise.wge(100, b0 = 0, b1 = 0, vara = 10) gen.sigplusnoise.wge(100, b0 = 0, b1 = 0, phi = 0.975, vara = 10) # periodic Signal gen.sigplusnoise.wge(100, coef = c(5,0), freq = c(.1,0), psi = c(0,0.25),phi = 0.975, vara = 10) #AR(4) from the slides parms = mult.wge(c(.975), c(.2, -.45),c(-.53)) parms$model.coef gen.arma.wge(160, phi = parms$model.coef, vara = 1) # (1-B)X_t = a_t x = gen.arima.wge(200, phi = 0, var = 1, d = 1, sn =31) acf(x) # The following command differences the data in x y = artrans.wge(x, phi.tr = 1) #This simply means that y(i) = x(i) -x(i-1) # y has n-1 becuse x(1) has no x(0) before it. # example x = c(1,3,6,10,25) y = artrans.wge(x, phi.tr = 1) y # shows the 4 differences # Arima with 2 data = gen.arima.wge(200, phi = c(1.5,-.8), d = 2, theta = c(-.8), sn = 31) FirstDif = artrans.wge(data,phi.tr = 1) SecondDif = artrans.wge(FirstDif, phi.tr = 1) aic5.wge(SecondDif) ### ARUMA Models (seasonal) # (1-B^4) = a_t x1 = gen.aruma.wge(n=80, s = 4, sn = 6) plotts.sample.wge(x1) #(1-B^4) with ARMA(2,1) x2 = gen.aruma.wge(n = 80, phi =c(1,-.6), s = 4, theta = -.5, sn = 6, d = 1) plotts.sample.wge(x2) factor.wge(phi = c(1,-.6)) factor.wge(phi = -.5) #(1-B^12) with ARMA(2,1) x3 = gen.aruma.wge(n = 180, phi =c(1,-.6), s = 12, theta = -.5, sn = 6) plotts.sample.wge(x3, lag.max = 48) x2 = gen.aruma.wge(n = 180, phi =c(.6,-.94), s = 7, theta = -.3, sn = 19) plotts.sample.wge(x2) factor.wge(phi = c(.6,-.94)) factor.wge(phi = -.3) ### Stationarize the quaterly data (1-b^4)X_t = a_t x = gen.aruma.wge(n=80, s=4, sn = 81) dif = artrans.wge(x,c(0,0,0,1)) # Take out the (1-B^4) aic5.wge(dif) ## (1-.4B - .6B^2 + .74B^3)(1-B^12)X_t = (1 + .7B)a_t x = gen.aruma.wge(n = 80, phi = c(.4,.6,-.74), theta = -.7, s =12, sn = 31) dif = artrans.wge(x,c(rep(0,11),1)) # take out the (1-B^12) aic5.wge(dif) x = gen.aruma.wge(n = 500, phi = c(.6,-.8), theta = c(.3,-.7), s =12, sn = 37) dif = artrans.wge(x,c(rep(0,11),1)) # take out the (1-B^12) dif = artrans.wge(dif,c(rep(0,11),1)) # take out the (1-B^12) aic5.wge(dif) #factor ARUMA models #factor.wge(phi = c(0,0,0,1)) or factor.wge(phi = c(rep(0,11),1)) for (1-B^12) # (.2B - .4B^2 -.49B^3 -B^12 -.2B^13 + .4B^14 + .49B^15) factor.wge(c(-.2, .4, .49, rep(0,5), 1, .2, -.4, -.49)) factor.wge(c(-.5, .2, 0, -1, .5, -.2)) factor.wge(c(-.3, 1.2, .4,0,.5, rep(0,6), 1, .3, -1.2, -.4)) factor.wge(c(-.3, 1.2, .4,0,.5, rep(0,6), 1, .3, -1.2, -.4)) #Pre-live energy = read.csv("~/Documents/datascience/DS6373/Dataset/Appliance_Energy/energydata_complete.csv", header = TRUE) plotts.wge(energy$Appliances) aic5.wge(energy$Appliances) factor.wge(c(rep(0,11),1)) factor.wge(c(0.6996, 0.2599, 0.0079, -0.0646, 0.1381, -0.0953, 0.0235, -0.0969, 0.1770, -0.1191, 0.1030, 0.7754, -0.4590, -0.4099, 0.0501))
/Unit 6/unit6.r
no_license
nwaoguIkenna/DS6373_Time_Series
R
false
false
2,929
r
library(tswge) # Randomly generating signal plus noise gen.sigplusnoise.wge(100, b0 = 2, b1 = 4, vara = 100) gen.sigplusnoise.wge(100, b0 = 0, b1 = 0, vara = 10) gen.sigplusnoise.wge(100, b0 = 0, b1 = 0, phi = 0.975, vara = 10) # periodic Signal gen.sigplusnoise.wge(100, coef = c(5,0), freq = c(.1,0), psi = c(0,0.25),phi = 0.975, vara = 10) #AR(4) from the slides parms = mult.wge(c(.975), c(.2, -.45),c(-.53)) parms$model.coef gen.arma.wge(160, phi = parms$model.coef, vara = 1) # (1-B)X_t = a_t x = gen.arima.wge(200, phi = 0, var = 1, d = 1, sn =31) acf(x) # The following command differences the data in x y = artrans.wge(x, phi.tr = 1) #This simply means that y(i) = x(i) -x(i-1) # y has n-1 becuse x(1) has no x(0) before it. # example x = c(1,3,6,10,25) y = artrans.wge(x, phi.tr = 1) y # shows the 4 differences # Arima with 2 data = gen.arima.wge(200, phi = c(1.5,-.8), d = 2, theta = c(-.8), sn = 31) FirstDif = artrans.wge(data,phi.tr = 1) SecondDif = artrans.wge(FirstDif, phi.tr = 1) aic5.wge(SecondDif) ### ARUMA Models (seasonal) # (1-B^4) = a_t x1 = gen.aruma.wge(n=80, s = 4, sn = 6) plotts.sample.wge(x1) #(1-B^4) with ARMA(2,1) x2 = gen.aruma.wge(n = 80, phi =c(1,-.6), s = 4, theta = -.5, sn = 6, d = 1) plotts.sample.wge(x2) factor.wge(phi = c(1,-.6)) factor.wge(phi = -.5) #(1-B^12) with ARMA(2,1) x3 = gen.aruma.wge(n = 180, phi =c(1,-.6), s = 12, theta = -.5, sn = 6) plotts.sample.wge(x3, lag.max = 48) x2 = gen.aruma.wge(n = 180, phi =c(.6,-.94), s = 7, theta = -.3, sn = 19) plotts.sample.wge(x2) factor.wge(phi = c(.6,-.94)) factor.wge(phi = -.3) ### Stationarize the quaterly data (1-b^4)X_t = a_t x = gen.aruma.wge(n=80, s=4, sn = 81) dif = artrans.wge(x,c(0,0,0,1)) # Take out the (1-B^4) aic5.wge(dif) ## (1-.4B - .6B^2 + .74B^3)(1-B^12)X_t = (1 + .7B)a_t x = gen.aruma.wge(n = 80, phi = c(.4,.6,-.74), theta = -.7, s =12, sn = 31) dif = artrans.wge(x,c(rep(0,11),1)) # take out the (1-B^12) aic5.wge(dif) x = gen.aruma.wge(n = 500, phi = c(.6,-.8), theta = c(.3,-.7), s =12, sn = 37) dif = artrans.wge(x,c(rep(0,11),1)) # take out the (1-B^12) dif = artrans.wge(dif,c(rep(0,11),1)) # take out the (1-B^12) aic5.wge(dif) #factor ARUMA models #factor.wge(phi = c(0,0,0,1)) or factor.wge(phi = c(rep(0,11),1)) for (1-B^12) # (.2B - .4B^2 -.49B^3 -B^12 -.2B^13 + .4B^14 + .49B^15) factor.wge(c(-.2, .4, .49, rep(0,5), 1, .2, -.4, -.49)) factor.wge(c(-.5, .2, 0, -1, .5, -.2)) factor.wge(c(-.3, 1.2, .4,0,.5, rep(0,6), 1, .3, -1.2, -.4)) factor.wge(c(-.3, 1.2, .4,0,.5, rep(0,6), 1, .3, -1.2, -.4)) #Pre-live energy = read.csv("~/Documents/datascience/DS6373/Dataset/Appliance_Energy/energydata_complete.csv", header = TRUE) plotts.wge(energy$Appliances) aic5.wge(energy$Appliances) factor.wge(c(rep(0,11),1)) factor.wge(c(0.6996, 0.2599, 0.0079, -0.0646, 0.1381, -0.0953, 0.0235, -0.0969, 0.1770, -0.1191, 0.1030, 0.7754, -0.4590, -0.4099, 0.0501))
# This script returns the plots shown in Figure 1. # # Author: KLEY ############################################################################### source("../../R/indecomposablePartitions.R") source("../../R/comp_bound.R") ## Case 1: eps_t ~ N(0,1) kappa_s_1 <- c(0, 1, 0, 0, 0, 0, 0, 0) ## Case 2: eps_t ~ nu^{-1/2} (nu - 2)^{1/2} t_nu nu <- 14 kappa_s_2 <- c(0, 1, 0, 6 / (nu - 4), 0, 240 / ((nu - 4) * (nu - 6)), 0, 5040 * (5*nu - 22) / ((nu - 4)^2 * (nu - 6) * (nu - 8))) nu <- 9 kappa_s_3 <- c(0, 1, 0, 6 / (nu - 4), 0, 240 / ((nu - 4) * (nu - 6)), 0, 5040 * (5*nu - 22) / ((nu - 4)^2 * (nu - 6) * (nu - 8))) # Fig 1 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_1, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_N_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4.8) par(mar = c(4, 5, 6, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = paste("n =", n_s[i])) mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() } # Fig 2 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_3, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_t9_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4) # plot(b$m_s, b$results[[i]]$bound_s, type = "l", # ylim = c(0, max(b$results[[i]]$bound_s)), # xlab = expression(m), ylab = "value of the bound", # main = paste("n =", n_s[i], "B_n(m*) =", round(b$results[[i]]$bound_star, 3))) # abline(v = b$results[[i]]$m_star, lty = 2) # abline(h = b$results[[i]]$bound_star, col = "gray") # points(b$m_s, b$results[[i]]$bound_s) par(mar = c(4, 5, 2, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = "") if (i == 2) { mtext(expression(m), side = 1, line = 3, cex = 2) } mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() } # Fig 3 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_2, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_t14_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4) # plot(b$m_s, b$results[[i]]$bound_s, type = "l", # ylim = c(0, max(b$results[[i]]$bound_s)), # xlab = expression(m), ylab = "value of the bound", # main = paste("n =", n_s[i], "B_n(m*) =", round(b$results[[i]]$bound_star, 3))) # abline(v = b$results[[i]]$m_star, lty = 2) # abline(h = b$results[[i]]$bound_star, col = "gray") # points(b$m_s, b$results[[i]]$bound_s) par(mar = c(4, 5, 2, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = "") if (i == 1) { mtext(expression(B[n](m)), side = 2, line = 3, cex = 2) } mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() }
/sim/3_tables_figures/figure_1.R
no_license
tobiaskley/ccf_bounds_replication_package
R
false
false
4,106
r
# This script returns the plots shown in Figure 1. # # Author: KLEY ############################################################################### source("../../R/indecomposablePartitions.R") source("../../R/comp_bound.R") ## Case 1: eps_t ~ N(0,1) kappa_s_1 <- c(0, 1, 0, 0, 0, 0, 0, 0) ## Case 2: eps_t ~ nu^{-1/2} (nu - 2)^{1/2} t_nu nu <- 14 kappa_s_2 <- c(0, 1, 0, 6 / (nu - 4), 0, 240 / ((nu - 4) * (nu - 6)), 0, 5040 * (5*nu - 22) / ((nu - 4)^2 * (nu - 6) * (nu - 8))) nu <- 9 kappa_s_3 <- c(0, 1, 0, 6 / (nu - 4), 0, 240 / ((nu - 4) * (nu - 6)), 0, 5040 * (5*nu - 22) / ((nu - 4)^2 * (nu - 6) * (nu - 8))) # Fig 1 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_1, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_N_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4.8) par(mar = c(4, 5, 6, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = paste("n =", n_s[i])) mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() } # Fig 2 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_3, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_t9_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4) # plot(b$m_s, b$results[[i]]$bound_s, type = "l", # ylim = c(0, max(b$results[[i]]$bound_s)), # xlab = expression(m), ylab = "value of the bound", # main = paste("n =", n_s[i], "B_n(m*) =", round(b$results[[i]]$bound_star, 3))) # abline(v = b$results[[i]]$m_star, lty = 2) # abline(h = b$results[[i]]$bound_star, col = "gray") # points(b$m_s, b$results[[i]]$bound_s) par(mar = c(4, 5, 2, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = "") if (i == 2) { mtext(expression(m), side = 1, line = 3, cex = 2) } mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() } # Fig 3 k <- 0 a <- 0.7 n_s <- c(25, 500, 2000) b <- comp_bound_AR1(n_s, a, k, kappa_s_2, m_incr = 30, m_max = 30) for (i in 1:3) { pdf(paste("bound_fct_t14_a07_n", n_s[i], ".pdf", sep = ""), width = 6, height = 4) # plot(b$m_s, b$results[[i]]$bound_s, type = "l", # ylim = c(0, max(b$results[[i]]$bound_s)), # xlab = expression(m), ylab = "value of the bound", # main = paste("n =", n_s[i], "B_n(m*) =", round(b$results[[i]]$bound_star, 3))) # abline(v = b$results[[i]]$m_star, lty = 2) # abline(h = b$results[[i]]$bound_star, col = "gray") # points(b$m_s, b$results[[i]]$bound_s) par(mar = c(4, 5, 2, 1) + 0.1) plot(b$m_s, b$results[[i]]$bound_s, type = "l", cex.main = 2, cex.axis = 1.5, ylim = c(0, max(b$results[[i]]$bound_s)), xlab = "", ylab = "", main = "") if (i == 1) { mtext(expression(B[n](m)), side = 2, line = 3, cex = 2) } mtext(substitute(paste(B[n](m^symbol("\052"))," = ",b), list(b = round(b$results[[i]]$bound_star, 3))), side = 3, line = 0, outer = FALSE, cex = 1.5) abline(v = b$results[[i]]$m_star, lty = 2) abline(h = b$results[[i]]$bound_star, col = "gray") points(b$m_s, b$results[[i]]$bound_s) dev.off() }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/score_forecasts.R \name{score_forecasts} \alias{score_forecasts} \title{Score forecasts} \usage{ score_forecasts( forecasts, truth, return_format = "wide", metrics = c("abs_error", "wis", "wis_components", "interval_coverage", "quantile_coverage"), use_median_as_point = FALSE ) } \arguments{ \item{forecasts}{required data.frame with forecasts in the format returned by \code{\link[=load_forecasts]{load_forecasts()}}} \item{truth}{required data.frame with forecasts in the format returned by \code{\link[=load_truth]{load_truth()}}} \item{return_format}{string: \code{"long"} returns long format with a column for \code{"score_name"} and a column for \code{"score_value"}; \code{"wide"} returns wide format with a separate column for each score. Defaults to \code{"wide"}.} \item{metrics}{character vector of the metrics to be returned with options \code{"abs_error"}, \code{"wis"}, \code{"wis_components"},\code{"interval_coverage"}, and \code{"quantile_coverage"}} \item{use_median_as_point}{logical: \code{TRUE} uses the median as the point forecast when scoring; \code{FALSE} uses the point forecasts from the data when scoring. Defaults to \code{FALSE}} } \value{ data.frame with scores. The result will have some columns that define the observation, namely, \code{model}, \code{forecast_date}, \code{location}, \code{horizon}, \code{temporal_resolution}, \code{target_variable}, \code{horizon}, and \code{target_end_date}. Other columns will contain scores dependent on metrics parameter: \itemize{ \item \code{true_value} is the observed truth at that \code{location} and \code{target_end_date} (always returned) \item \code{abs_error} is the absolute error based on median estimate if use_median_as_point is TRUE, and absolute error based on point forecast if use_median_as_point is FALSE \item \code{wis} is the weighted interval score \item \code{sharpness} the component of WIS made up of interval widths \item \code{overprediction} the component of WIS made up of overprediction of intervals \item \code{underprediction} the component of WIS made up of underprediction of intervals \item \code{coverage_X} are prediction interval coverage at alpha level X \item \code{quantile_coverage_0.X} are one-sided quantile coverage at quantile X } If return_format is \code{"long"}, also contains columns score_name and score_value where \code{score_name} is the type of score calculated and \code{score_value} has the numeric value of the score. If return_format is \code{"wide"}, each calculated score is in its own column. } \description{ Score forecasts } \examples{ forecasts <- load_latest_forecasts( models = c("COVIDhub-ensemble", "UMass-MechBayes"), last_forecast_date = "2020-12-14", forecast_date_window_size = 7, locations = c("US"), targets = paste(1:4, "wk ahead inc death"), source = "zoltar" ) truth <- load_truth("JHU", target_variable = "inc death", locations = "US") score_forecasts(forecasts, truth) forecasts <- load_latest_forecasts( models = c("ILM-EKF"), hub = c("ECDC", "US"), last_forecast_date = "2021-03-08", forecast_date_window_size = 0, locations = c("GB"), targets = paste(1:4, "wk ahead inc death"), source = "zoltar" ) truth <- load_truth("JHU", hub = c("ECDC", "US"), target_variable = "inc death", locations = "GB" ) score_forecasts(forecasts, truth) } \references{ Bracher J, Ray EL, Gneiting T, Reich NG. (2020) Evaluating epidemic forecasts in an interval format. arXiv:2005.12881. \url{https://arxiv.org/abs/2005.12881}. }
/man/score_forecasts.Rd
no_license
Serena-Wang/covidHubUtils
R
false
true
3,595
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/score_forecasts.R \name{score_forecasts} \alias{score_forecasts} \title{Score forecasts} \usage{ score_forecasts( forecasts, truth, return_format = "wide", metrics = c("abs_error", "wis", "wis_components", "interval_coverage", "quantile_coverage"), use_median_as_point = FALSE ) } \arguments{ \item{forecasts}{required data.frame with forecasts in the format returned by \code{\link[=load_forecasts]{load_forecasts()}}} \item{truth}{required data.frame with forecasts in the format returned by \code{\link[=load_truth]{load_truth()}}} \item{return_format}{string: \code{"long"} returns long format with a column for \code{"score_name"} and a column for \code{"score_value"}; \code{"wide"} returns wide format with a separate column for each score. Defaults to \code{"wide"}.} \item{metrics}{character vector of the metrics to be returned with options \code{"abs_error"}, \code{"wis"}, \code{"wis_components"},\code{"interval_coverage"}, and \code{"quantile_coverage"}} \item{use_median_as_point}{logical: \code{TRUE} uses the median as the point forecast when scoring; \code{FALSE} uses the point forecasts from the data when scoring. Defaults to \code{FALSE}} } \value{ data.frame with scores. The result will have some columns that define the observation, namely, \code{model}, \code{forecast_date}, \code{location}, \code{horizon}, \code{temporal_resolution}, \code{target_variable}, \code{horizon}, and \code{target_end_date}. Other columns will contain scores dependent on metrics parameter: \itemize{ \item \code{true_value} is the observed truth at that \code{location} and \code{target_end_date} (always returned) \item \code{abs_error} is the absolute error based on median estimate if use_median_as_point is TRUE, and absolute error based on point forecast if use_median_as_point is FALSE \item \code{wis} is the weighted interval score \item \code{sharpness} the component of WIS made up of interval widths \item \code{overprediction} the component of WIS made up of overprediction of intervals \item \code{underprediction} the component of WIS made up of underprediction of intervals \item \code{coverage_X} are prediction interval coverage at alpha level X \item \code{quantile_coverage_0.X} are one-sided quantile coverage at quantile X } If return_format is \code{"long"}, also contains columns score_name and score_value where \code{score_name} is the type of score calculated and \code{score_value} has the numeric value of the score. If return_format is \code{"wide"}, each calculated score is in its own column. } \description{ Score forecasts } \examples{ forecasts <- load_latest_forecasts( models = c("COVIDhub-ensemble", "UMass-MechBayes"), last_forecast_date = "2020-12-14", forecast_date_window_size = 7, locations = c("US"), targets = paste(1:4, "wk ahead inc death"), source = "zoltar" ) truth <- load_truth("JHU", target_variable = "inc death", locations = "US") score_forecasts(forecasts, truth) forecasts <- load_latest_forecasts( models = c("ILM-EKF"), hub = c("ECDC", "US"), last_forecast_date = "2021-03-08", forecast_date_window_size = 0, locations = c("GB"), targets = paste(1:4, "wk ahead inc death"), source = "zoltar" ) truth <- load_truth("JHU", hub = c("ECDC", "US"), target_variable = "inc death", locations = "GB" ) score_forecasts(forecasts, truth) } \references{ Bracher J, Ray EL, Gneiting T, Reich NG. (2020) Evaluating epidemic forecasts in an interval format. arXiv:2005.12881. \url{https://arxiv.org/abs/2005.12881}. }
######## ## MLE # ######## ## Jiaming Mao (jiaming.mao@gmail.com) ## https://jiamingmao.github.io/data-analysis/ library(bbmle) rm(list=ls()) set.seed(5) ## generate data n <- 500 e <- rnorm(n) x1 <- runif(n) x2 <- 0.5*x1 + 0.5*runif(n) y <- 1 - 2.5*x1 + 5*x2 + e # define the negative log likelihood function nll <-function(theta){ beta0 <- theta[1] beta1 <- theta[2] beta2 <- theta[3] sigma <- theta[4] N <- length(y) z <- (y - beta0 - beta1*x1 - beta2*x2)/sigma logL <- -1*N*log(sigma) - 0.5*sum(z^2) return(-logL)} # perform optimization mlefit <- optim(c(0,0,0,1),nll) mlefit$par # alternatively, use the mle2 function from the bbmle package parnames(nll) <- c("beta0","beta1","beta2","sigma") result <- mle2(nll,start=c(beta0=0,beta1=0,beta2=0,sigma=1)) summary(result)
/Materials/Estimation Methods/codes/R/mle.R
permissive
snowdj/data-analysis-2
R
false
false
797
r
######## ## MLE # ######## ## Jiaming Mao (jiaming.mao@gmail.com) ## https://jiamingmao.github.io/data-analysis/ library(bbmle) rm(list=ls()) set.seed(5) ## generate data n <- 500 e <- rnorm(n) x1 <- runif(n) x2 <- 0.5*x1 + 0.5*runif(n) y <- 1 - 2.5*x1 + 5*x2 + e # define the negative log likelihood function nll <-function(theta){ beta0 <- theta[1] beta1 <- theta[2] beta2 <- theta[3] sigma <- theta[4] N <- length(y) z <- (y - beta0 - beta1*x1 - beta2*x2)/sigma logL <- -1*N*log(sigma) - 0.5*sum(z^2) return(-logL)} # perform optimization mlefit <- optim(c(0,0,0,1),nll) mlefit$par # alternatively, use the mle2 function from the bbmle package parnames(nll) <- c("beta0","beta1","beta2","sigma") result <- mle2(nll,start=c(beta0=0,beta1=0,beta2=0,sigma=1)) summary(result)
#Formatting pitching database #Install/ load in packages library(baseballr) library(tidyverse) library(rvest) library(stringr) library(Hmisc) #load in pitching data from "Making stat database.R" #Set to your own directory #D3Pitch <- D3Pitch1920 #Add column for WHIP D3Pitch1 <- mutate(D3Pitch, WHIP = (BB+H)/(IP)) #Win Loss column D3Pitch1$WL = paste(D3Pitch1$W, D3Pitch1$L, sep="-") #Opponents batting average D3Pitch1 <- mutate(D3Pitch1, POAB = BF-(BB+HB+SHA+SFA+IBB)) D3Pitch1 <- mutate(D3Pitch1, BAA = H/POAB) D3Pitch1$P.OAB <- NULL #Now to calculate FIP #Since FIP depends on run scoring environment in ech year, we have to calculate them separately #Break databases into three for FIP calculation D3Pitch19 <- D3Pitch1 %>% filter(year == "2019") D3Pitch20 <- D3Pitch1 %>% filter(year == "2020") #Run each database through fip calculations and then rbind them #FIP for 2019 D3Pitch19 <- D3Pitch19 %>% mutate(addl_outs = ((IP %% 1)*10), ip_outs = ((floor(IP)*3 + addl_outs)), Ip1 = ip_outs / 3) #Get subset for BF > 25 BF2519 <- D3Pitch19 %>% filter(BF >= 25) #League ERA lgER = sum(BF2519$ER) lgIp1 = sum(BF2519$Ip1) lgERA = (lgER/lgIp1)*9 #League HR lgHR = sum(BF2519$HR) lgBB = sum(BF2519$BB) lgHBP = sum(BF2519$HB) lgK = sum(BF2519$SO) #Actual Constant FIPC19 = lgERA-(((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIp1) #Calculate FIP per player D3Pitch19 <- D3Pitch19 %>% mutate(FIP = ifelse(BF >= 25, (((13*HR.A)+(3*(IBB+HB))-(2*SO))/Ip1) + FIPC19, NA)) #FIP for 2020 D3Pitch20 <- D3Pitch20 %>% mutate(addl_outs = ((IP %% 1)*10), ip_outs = ((floor(IP)*3 + addl_outs)), Ip1 = ip_outs / 3) #Get subset for BF > 25 BF2520 <- D3Pitch20 %>% filter(BF >= 25) #League ERA lgER = sum(BF2520$ER) lgIp1 = sum(BF2520$Ip1) lgERA = (lgER/lgIp1)*9 #League HR lgHR = sum(BF2520$HR) lgBB = sum(BF2520$BB) lgHBP = sum(BF2520$HB) lgK = sum(BF2520$SO) #Actual Constant FIPC20 = lgERA-(((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIp1) #Calculate FIP per player D3Pitch20 <- D3Pitch20 %>% mutate(FIP1 = ifelse(BF >= 25, (((13*HR.A)+(3*(IBB+HB))-(2*SO))/Ip1) + FIPC20, NA)) %>% mutate(FIP = ifelse(FIP1 <= 0, 0, FIP1)) D3Pitch20$FIP1 <- NULL #combine both databases with their respective FIP calculations FipPitch20 <- rbind(D3Pitch19, D3Pitch20) #Delete the 4 unnamed players, they seem to be copies of other, already accounted for players FipPitch20 <- FipPitch20 %>% arrange(Player) FipPitch20 <- FipPitch20[-c(1:4),] #Fix CSE column FipPitch20$school = str_replace(FipPitch20$school, "0", "St. Elizabeth") FipPitch20$conference = str_replace(FipPitch20$conference, "0", "CSAC") #Replace N/A school years with " " FipPitch20$Yr <- str_replace(FipPitch20$Yr, "N/A", "") #Clean up unused columns FipPitch21 <- FipPitch20 %>% select(Player, school, Yr, ERA, WHIP, WL, SV, IP, H, R, ER, BB, SO, X2B.A, X3B.A, HR.A, BF, BAA, WP, HB, GO, FO, IBB, FIP, conference, year) #Round columns correctly FipPitch21$WHIP <- round(FipPitch21$WHIP, digits = 2) FipPitch21$BAA <- round(FipPitch21$BAA, digits = 3) FipPitch21$FIP <- round(FipPitch21$FIP, digits = 2) #Name columns correctly colnames(FipPitch21)[colnames(FipPitch21) == "school"] <- "School" colnames(FipPitch21)[colnames(FipPitch21) == "X2B.A"] <- "x2B" colnames(FipPitch21)[colnames(FipPitch21) == "X3B.A"] <- "x3B" colnames(FipPitch21)[colnames(FipPitch21) == "HR.A"] <- "HR" colnames(FipPitch21)[colnames(FipPitch21) == "HB"] <- "HBP" colnames(FipPitch21)[colnames(FipPitch21) == "conference"] <- "Conference" colnames(FipPitch21)[colnames(FipPitch21) == "year"] <- "Year" colnames(FipPitch21)[colnames(FipPitch21) == "Yr"] <- "Class" FinalPitchAll1 <- FipPitch21 %>% arrange(desc(IP)) #This is the pitching stats table as seen on the applicationo
/Player Stats/Cleaning Pitching Database.R
no_license
dantebozzuti/D3-Baseball-Graphics
R
false
false
4,308
r
#Formatting pitching database #Install/ load in packages library(baseballr) library(tidyverse) library(rvest) library(stringr) library(Hmisc) #load in pitching data from "Making stat database.R" #Set to your own directory #D3Pitch <- D3Pitch1920 #Add column for WHIP D3Pitch1 <- mutate(D3Pitch, WHIP = (BB+H)/(IP)) #Win Loss column D3Pitch1$WL = paste(D3Pitch1$W, D3Pitch1$L, sep="-") #Opponents batting average D3Pitch1 <- mutate(D3Pitch1, POAB = BF-(BB+HB+SHA+SFA+IBB)) D3Pitch1 <- mutate(D3Pitch1, BAA = H/POAB) D3Pitch1$P.OAB <- NULL #Now to calculate FIP #Since FIP depends on run scoring environment in ech year, we have to calculate them separately #Break databases into three for FIP calculation D3Pitch19 <- D3Pitch1 %>% filter(year == "2019") D3Pitch20 <- D3Pitch1 %>% filter(year == "2020") #Run each database through fip calculations and then rbind them #FIP for 2019 D3Pitch19 <- D3Pitch19 %>% mutate(addl_outs = ((IP %% 1)*10), ip_outs = ((floor(IP)*3 + addl_outs)), Ip1 = ip_outs / 3) #Get subset for BF > 25 BF2519 <- D3Pitch19 %>% filter(BF >= 25) #League ERA lgER = sum(BF2519$ER) lgIp1 = sum(BF2519$Ip1) lgERA = (lgER/lgIp1)*9 #League HR lgHR = sum(BF2519$HR) lgBB = sum(BF2519$BB) lgHBP = sum(BF2519$HB) lgK = sum(BF2519$SO) #Actual Constant FIPC19 = lgERA-(((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIp1) #Calculate FIP per player D3Pitch19 <- D3Pitch19 %>% mutate(FIP = ifelse(BF >= 25, (((13*HR.A)+(3*(IBB+HB))-(2*SO))/Ip1) + FIPC19, NA)) #FIP for 2020 D3Pitch20 <- D3Pitch20 %>% mutate(addl_outs = ((IP %% 1)*10), ip_outs = ((floor(IP)*3 + addl_outs)), Ip1 = ip_outs / 3) #Get subset for BF > 25 BF2520 <- D3Pitch20 %>% filter(BF >= 25) #League ERA lgER = sum(BF2520$ER) lgIp1 = sum(BF2520$Ip1) lgERA = (lgER/lgIp1)*9 #League HR lgHR = sum(BF2520$HR) lgBB = sum(BF2520$BB) lgHBP = sum(BF2520$HB) lgK = sum(BF2520$SO) #Actual Constant FIPC20 = lgERA-(((13*lgHR)+(3*(lgBB+lgHBP))-(2*lgK))/lgIp1) #Calculate FIP per player D3Pitch20 <- D3Pitch20 %>% mutate(FIP1 = ifelse(BF >= 25, (((13*HR.A)+(3*(IBB+HB))-(2*SO))/Ip1) + FIPC20, NA)) %>% mutate(FIP = ifelse(FIP1 <= 0, 0, FIP1)) D3Pitch20$FIP1 <- NULL #combine both databases with their respective FIP calculations FipPitch20 <- rbind(D3Pitch19, D3Pitch20) #Delete the 4 unnamed players, they seem to be copies of other, already accounted for players FipPitch20 <- FipPitch20 %>% arrange(Player) FipPitch20 <- FipPitch20[-c(1:4),] #Fix CSE column FipPitch20$school = str_replace(FipPitch20$school, "0", "St. Elizabeth") FipPitch20$conference = str_replace(FipPitch20$conference, "0", "CSAC") #Replace N/A school years with " " FipPitch20$Yr <- str_replace(FipPitch20$Yr, "N/A", "") #Clean up unused columns FipPitch21 <- FipPitch20 %>% select(Player, school, Yr, ERA, WHIP, WL, SV, IP, H, R, ER, BB, SO, X2B.A, X3B.A, HR.A, BF, BAA, WP, HB, GO, FO, IBB, FIP, conference, year) #Round columns correctly FipPitch21$WHIP <- round(FipPitch21$WHIP, digits = 2) FipPitch21$BAA <- round(FipPitch21$BAA, digits = 3) FipPitch21$FIP <- round(FipPitch21$FIP, digits = 2) #Name columns correctly colnames(FipPitch21)[colnames(FipPitch21) == "school"] <- "School" colnames(FipPitch21)[colnames(FipPitch21) == "X2B.A"] <- "x2B" colnames(FipPitch21)[colnames(FipPitch21) == "X3B.A"] <- "x3B" colnames(FipPitch21)[colnames(FipPitch21) == "HR.A"] <- "HR" colnames(FipPitch21)[colnames(FipPitch21) == "HB"] <- "HBP" colnames(FipPitch21)[colnames(FipPitch21) == "conference"] <- "Conference" colnames(FipPitch21)[colnames(FipPitch21) == "year"] <- "Year" colnames(FipPitch21)[colnames(FipPitch21) == "Yr"] <- "Class" FinalPitchAll1 <- FipPitch21 %>% arrange(desc(IP)) #This is the pitching stats table as seen on the applicationo
\name{world.stretch.time} \alias{world.stretch.time} \docType{package} \title{ Change the tempo of the speech signal } \description{ \code{world.stretch.time} changes the tempo of the speeh without changing the F0. } \usage{ world.stretch.time(worldobj,rate) } \arguments{ \item{worldobj}{A list calculated by \code{world.analysis()}.} \item{rate}{A float value for changing the tempo.} } \details{ \code{world.stretch.time()} stretches/compresses the speech signal. The argument \code{rate} is the rate. If \code{rate=2.0}, the speech signal becomes two times as quick as the original signal. The return value is a list of parameters analyzed by World. } \seealso{ \code{\link{world.analysis}}, \code{\link{world.synthesis}}, \code{\link{world.stretch.time}}, \code{\link{world.stretch.spectrum}} } \examples{ \dontrun{ } }
/man/world.stretch.time.Rd
no_license
akinori-ito/WorldR
R
false
false
828
rd
\name{world.stretch.time} \alias{world.stretch.time} \docType{package} \title{ Change the tempo of the speech signal } \description{ \code{world.stretch.time} changes the tempo of the speeh without changing the F0. } \usage{ world.stretch.time(worldobj,rate) } \arguments{ \item{worldobj}{A list calculated by \code{world.analysis()}.} \item{rate}{A float value for changing the tempo.} } \details{ \code{world.stretch.time()} stretches/compresses the speech signal. The argument \code{rate} is the rate. If \code{rate=2.0}, the speech signal becomes two times as quick as the original signal. The return value is a list of parameters analyzed by World. } \seealso{ \code{\link{world.analysis}}, \code{\link{world.synthesis}}, \code{\link{world.stretch.time}}, \code{\link{world.stretch.spectrum}} } \examples{ \dontrun{ } }
df <- read.csv('datalog_thursdays.csv') #Objectives #Filter Inverter, #string combiner box, #data, #time, #series of dates and times my.date <- as.Date(df$DATE ,format = "%d-%b-%y") sorted <- order(my.date, df$HOUR, df$INVERTER) df.date <- df$DATE df.date df2 <- df[sorted, ] df1 <- df[sorted, ]
/Oscar/python/datalog_thursdays.R
no_license
docligot/web-bootcamp-1
R
false
false
307
r
df <- read.csv('datalog_thursdays.csv') #Objectives #Filter Inverter, #string combiner box, #data, #time, #series of dates and times my.date <- as.Date(df$DATE ,format = "%d-%b-%y") sorted <- order(my.date, df$HOUR, df$INVERTER) df.date <- df$DATE df.date df2 <- df[sorted, ] df1 <- df[sorted, ]
##monty hall problem nrun <- 10000 #how many times to run sim n_door_start = 3 #how many doors do we have to begin with montyhallfx <- function(n_door_start = 3){#how many doors to start w/ - default 3 door_vector <- vector(length=n_door_start) door_vector[1] <- TRUE #create a 'winning' door, place it in slot 1 for convenience door_vector <- sample(door_vector) #shuffle doors around randomly door_pick <- sample(length(door_vector),1) #pick a position for your door choice false_doors <- which(!door_vector)#identify the false doors' locations true_door <- which(door_vector) is_original_pick_correct <- door_pick==true_door is_switch_pick_correct <- door_pick!=true_door #return door pick answer (T or F) and switch pick answer (T or F) as named DT row return(cbind(is_original_pick_correct,is_switch_pick_correct)) } #create answer DT for n runs dt <- data.frame("original_pick_correct"=vector(length=nrun), "switching_is_correct"=vector(length=nrun)) #run n timess for(i in 1:nrun){ dt[i,] <- rbind(montyhallfx(n_door_start)) } #stats on dtable #first: proof it works -- there is a correct answer in every row which(dt$original_pick_correct) == which(!dt$switching_is_correct) #will resolve to true #translation: if the original door is correct, then the switch option will be incorrect #and implicitly, vice versa hist(as.numeric(dt[,1]),main=paste0("Was the original door the winner? \n In an empirical simulation of ",n_door_start," initial doors"), xlab='No<-..................................................................->Yes', ylab=paste0('Frequency (x/',nrun,')')) #check against hypothesis! expected_odds_original <- 1/n_door_start #the expected odds of the original door being right outcome_original <- length(which(dt$original_pick_correct))/nrun
/montyhall.R
no_license
kaabre/montyhall
R
false
false
1,909
r
##monty hall problem nrun <- 10000 #how many times to run sim n_door_start = 3 #how many doors do we have to begin with montyhallfx <- function(n_door_start = 3){#how many doors to start w/ - default 3 door_vector <- vector(length=n_door_start) door_vector[1] <- TRUE #create a 'winning' door, place it in slot 1 for convenience door_vector <- sample(door_vector) #shuffle doors around randomly door_pick <- sample(length(door_vector),1) #pick a position for your door choice false_doors <- which(!door_vector)#identify the false doors' locations true_door <- which(door_vector) is_original_pick_correct <- door_pick==true_door is_switch_pick_correct <- door_pick!=true_door #return door pick answer (T or F) and switch pick answer (T or F) as named DT row return(cbind(is_original_pick_correct,is_switch_pick_correct)) } #create answer DT for n runs dt <- data.frame("original_pick_correct"=vector(length=nrun), "switching_is_correct"=vector(length=nrun)) #run n timess for(i in 1:nrun){ dt[i,] <- rbind(montyhallfx(n_door_start)) } #stats on dtable #first: proof it works -- there is a correct answer in every row which(dt$original_pick_correct) == which(!dt$switching_is_correct) #will resolve to true #translation: if the original door is correct, then the switch option will be incorrect #and implicitly, vice versa hist(as.numeric(dt[,1]),main=paste0("Was the original door the winner? \n In an empirical simulation of ",n_door_start," initial doors"), xlab='No<-..................................................................->Yes', ylab=paste0('Frequency (x/',nrun,')')) #check against hypothesis! expected_odds_original <- 1/n_door_start #the expected odds of the original door being right outcome_original <- length(which(dt$original_pick_correct))/nrun
\name{l0approximatorR-package} \alias{l0approximatorR-package} \alias{l0approximatorR} \docType{package} \title{ A short title line describing what the package does } \description{ A more detailed description of what the package does. A length of about one to five lines is recommended. } \details{ This section should provide a more detailed overview of how to use the package, including the most important functions. } \author{ Your Name, email optional. Maintainer: Your Name <your@email.com> } \references{ This optional section can contain literature or other references for background information. } \keyword{ package } \seealso{ Optional links to other man pages } \examples{ \dontrun{ ## Optional simple examples of the most important functions ## These can be in \dontrun{} and \donttest{} blocks. } }
/man/l0approximatorR-package.Rd
no_license
boooooogey/l0approximatorR
R
false
false
845
rd
\name{l0approximatorR-package} \alias{l0approximatorR-package} \alias{l0approximatorR} \docType{package} \title{ A short title line describing what the package does } \description{ A more detailed description of what the package does. A length of about one to five lines is recommended. } \details{ This section should provide a more detailed overview of how to use the package, including the most important functions. } \author{ Your Name, email optional. Maintainer: Your Name <your@email.com> } \references{ This optional section can contain literature or other references for background information. } \keyword{ package } \seealso{ Optional links to other man pages } \examples{ \dontrun{ ## Optional simple examples of the most important functions ## These can be in \dontrun{} and \donttest{} blocks. } }
library(tidyverse) ##many bits referenced from gmoney's moth code. ty, gmoney. datadir='/uru/Data/Nanopore/projects/nivar/paperfigs/annotation' gff=tibble( liftcer=file.path(datadir, 'liftoff', 'nivar_cer_lifted.gff'), ##liftalb=file.path(datadir, 'liftoff', 'nivar_alb_lifted.gff'), liftgla=file.path(datadir, 'liftoff', 'nivar_gla_lifted.gff'), braker=file.path(datadir, 'braker', 'braker.gff3'), drna=file.path(datadir, 'stringtie', 'denovo_drna.gff'), rnaseq=file.path(datadir, 'stringtie', 'denovo_rnaseq.gff')) cols=c('seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute') liftcergff=read_tsv(gff$liftcer, col_names=cols) ##liftalbgff=read_tsv(gff$liftalb, col_names=cols) liftglagff=read_tsv(gff$liftgla, col_names=cols) brakergff=read_tsv(gff$braker, col_names=cols) drnagff=read_tsv(gff$drna, col_names=cols, skip=2) ##find annots included in liftoff albicans that weren't found in liftoff cerevisiae ''' liftalbfile=file.path(datadir, 'liftoff', 'liftcer_liftalb.nivar_alb_lifted.gff.tmap') liftalb=read_tsv(liftalbfile) %>% filter(class_code=='u') fromalb=liftalbgff %>% rowwise() %>% mutate(geneid=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% filter(geneid %in% liftalb$qry_gene_id | geneid %in% liftalb$qry_id) %>% select(-geneid) lifted_annots=rbind(liftcergff, fromalb) lifted_tsv=file.path(datadir, 'combined', 'lifted_all.gff') write_tsv(lifted_annots, lifted_tsv, col_names=FALSE) ''' liftcerfile=file.path(datadir, 'liftoff', 'liftgla_liftcer.nivar_cer_lifted.gff.tmap') liftcer=read_tsv(liftcerfile) %>% filter(class_code=='u') fromcer=liftcergff %>% rowwise() %>% mutate(geneid=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% filter(geneid %in% liftcer$qry_gene_id | geneid %in% liftgla$qry_id) %>% select(-geneid) lifted_annots=rbind(liftcergff, fromgla) ##lifted_tsv=file.path(datadir, 'combined', 'lifted_all.gff') lifted_tsv=file.path(datadir, 'combined', 'lifted_all_gla.gff') write_tsv(lifted_annots, lifted_tsv, col_names=FALSE) ##find common ones between stringtie and braker drnabrakerfile=file.path(datadir, 'braker', 'drna_braker.braker.gff3.tmap') drnabraker=read_tsv(drnabrakerfile) %>% #keep these from drna annot filter(class_code=='=' | class_code=='c') drnabraker_rev=read_tsv(drnabrakerfile) %>% #keep these from braker annot filter(class_code=='k') fromdrna=drnagff %>% rowwise() %>% mutate(geneid=strsplit(attribute, '"', fixed=TRUE)[[1]][2]) %>% filter(geneid %in% drnabraker$ref_gene_id | geneid %in% drnabraker$ref_id) %>% select(-geneid) frombraker=brakergff %>% rowwise() %>% mutate(geneidraw=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% mutate(geneid=paste0(strsplit(geneidraw, '.', fixed=TRUE)[[1]][1],'.', strsplit(geneidraw, '.', fixed=TRUE)[[1]][2])) %>% filter(geneid %in% drnabraker_rev$qry_gene_id | geneid %in% drnabraker_rev$qry_id) %>% #mutate(attribute=paste0('"', attribute, '"')) %>% select(-geneid, -geneidraw) data_annots=rbind(frombraker, fromdrna) data_tsv=file.path(datadir, 'combined', 'data_all.gff') write.table(data_annots, data_tsv, quote=FALSE, sep='\t', col.names=FALSE, row.names=FALSE)
/paperfigs/annot_compare.R
no_license
timplab/nivar
R
false
false
3,274
r
library(tidyverse) ##many bits referenced from gmoney's moth code. ty, gmoney. datadir='/uru/Data/Nanopore/projects/nivar/paperfigs/annotation' gff=tibble( liftcer=file.path(datadir, 'liftoff', 'nivar_cer_lifted.gff'), ##liftalb=file.path(datadir, 'liftoff', 'nivar_alb_lifted.gff'), liftgla=file.path(datadir, 'liftoff', 'nivar_gla_lifted.gff'), braker=file.path(datadir, 'braker', 'braker.gff3'), drna=file.path(datadir, 'stringtie', 'denovo_drna.gff'), rnaseq=file.path(datadir, 'stringtie', 'denovo_rnaseq.gff')) cols=c('seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute') liftcergff=read_tsv(gff$liftcer, col_names=cols) ##liftalbgff=read_tsv(gff$liftalb, col_names=cols) liftglagff=read_tsv(gff$liftgla, col_names=cols) brakergff=read_tsv(gff$braker, col_names=cols) drnagff=read_tsv(gff$drna, col_names=cols, skip=2) ##find annots included in liftoff albicans that weren't found in liftoff cerevisiae ''' liftalbfile=file.path(datadir, 'liftoff', 'liftcer_liftalb.nivar_alb_lifted.gff.tmap') liftalb=read_tsv(liftalbfile) %>% filter(class_code=='u') fromalb=liftalbgff %>% rowwise() %>% mutate(geneid=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% filter(geneid %in% liftalb$qry_gene_id | geneid %in% liftalb$qry_id) %>% select(-geneid) lifted_annots=rbind(liftcergff, fromalb) lifted_tsv=file.path(datadir, 'combined', 'lifted_all.gff') write_tsv(lifted_annots, lifted_tsv, col_names=FALSE) ''' liftcerfile=file.path(datadir, 'liftoff', 'liftgla_liftcer.nivar_cer_lifted.gff.tmap') liftcer=read_tsv(liftcerfile) %>% filter(class_code=='u') fromcer=liftcergff %>% rowwise() %>% mutate(geneid=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% filter(geneid %in% liftcer$qry_gene_id | geneid %in% liftgla$qry_id) %>% select(-geneid) lifted_annots=rbind(liftcergff, fromgla) ##lifted_tsv=file.path(datadir, 'combined', 'lifted_all.gff') lifted_tsv=file.path(datadir, 'combined', 'lifted_all_gla.gff') write_tsv(lifted_annots, lifted_tsv, col_names=FALSE) ##find common ones between stringtie and braker drnabrakerfile=file.path(datadir, 'braker', 'drna_braker.braker.gff3.tmap') drnabraker=read_tsv(drnabrakerfile) %>% #keep these from drna annot filter(class_code=='=' | class_code=='c') drnabraker_rev=read_tsv(drnabrakerfile) %>% #keep these from braker annot filter(class_code=='k') fromdrna=drnagff %>% rowwise() %>% mutate(geneid=strsplit(attribute, '"', fixed=TRUE)[[1]][2]) %>% filter(geneid %in% drnabraker$ref_gene_id | geneid %in% drnabraker$ref_id) %>% select(-geneid) frombraker=brakergff %>% rowwise() %>% mutate(geneidraw=substring(strsplit(attribute, ';', fixed=TRUE)[[1]][1], 4)) %>% mutate(geneid=paste0(strsplit(geneidraw, '.', fixed=TRUE)[[1]][1],'.', strsplit(geneidraw, '.', fixed=TRUE)[[1]][2])) %>% filter(geneid %in% drnabraker_rev$qry_gene_id | geneid %in% drnabraker_rev$qry_id) %>% #mutate(attribute=paste0('"', attribute, '"')) %>% select(-geneid, -geneidraw) data_annots=rbind(frombraker, fromdrna) data_tsv=file.path(datadir, 'combined', 'data_all.gff') write.table(data_annots, data_tsv, quote=FALSE, sep='\t', col.names=FALSE, row.names=FALSE)
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/wrappers.R \name{getStyles} \alias{getStyles} \title{Returns a list of all styles in the workbook} \usage{ getStyles(wb) } \arguments{ \item{wb}{A workbook object} } \description{ Returns a list of all styles in the workbook } \examples{ ## load a workbook wb <- loadWorkbook(file = file.path(path.package("openxlsx"), "loadExample.xlsx")) getStyles(wb) } \author{ Alexander Walker } \seealso{ \code{\link{replaceStyle}} }
/man/getStyles.Rd
no_license
petersteiner/openxlsx
R
false
false
510
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/wrappers.R \name{getStyles} \alias{getStyles} \title{Returns a list of all styles in the workbook} \usage{ getStyles(wb) } \arguments{ \item{wb}{A workbook object} } \description{ Returns a list of all styles in the workbook } \examples{ ## load a workbook wb <- loadWorkbook(file = file.path(path.package("openxlsx"), "loadExample.xlsx")) getStyles(wb) } \author{ Alexander Walker } \seealso{ \code{\link{replaceStyle}} }
\name{genhistogram} \alias{genhistogram} \title{ genhistogram } \description{ Function \code{genhistogram} creates a histogram of (usually) concentrations with equidistant breaks placed in "good-looking" numbers. If plot=TRUE, the resulting histogram is plotted with several possibilities of graphical options. } \usage{ genhistogram(x, breaks=7, input="openair", pollutant=NA, delta=0, plot=TRUE, distr="norm", gap=0.05, columns=2, col="#A52375", emboss=0, xlab="Concentration", ylab="Number of samples", main=NA) } \arguments{ \item{x}{ vector of concentration values or data frame of genasis/openair type. See 'Details' for more detailed description of both data types.} \item{breaks}{ a number of breaks between individualhistogram cells.} \item{input}{ type of data.frame in the case of data.frame input. The allowed values are "openair" (default) and "genasis". In case of vector input, this argument is meaningless.} \item{pollutant}{ name of the pollutant, for which the plot is plotted. Not necessary if only data for one pollutant is available in x. If not specified, plots for all pollutants are drawn in a multi-plot arrangement.} \item{delta}{ a delta value before the lowest and after the highest value of x. Positive number or 0.} \item{plot}{ logical. Should the result be plotted?} \item{distr}{ a name of distribution to interlace. Values "norm" and "lnorm" are possible.} \item{gap}{ the size of a gap between columns in a relative width of the column (number between 0 and 1).} \item{columns}{ number of columns in the multi-plot arrangement.} \item{col}{ a specification for the default plotting color. See section 'Color Specification' in \link{par}.} \item{emboss}{ 0,1,2 or 3 for differnet emboss efect of genhistogram columns.} \item{xlab}{ a label for the x axis, defaults to \code{"Concentration"}.} \item{ylab}{ a label for the x axis, defaults to \code{"Number of samples"}.} \item{main}{ overall title for the histogram.} } \details{ Function \code{genhistogram} creates a histogram, e.g. distribution of given data values into several cells with equidistant breaks placed in "good-looking" numbers. The function considers both the magnitude of the data values and their precision (number of decimals) and choose the breaks to be interpretable well (not to have a lot of decimals).Some empty range is add before the first and behind the last data value in order to smooth the breaks numbers. The minimal size of this range could be determined by the delta parameter. The function recognises three different input formats: Option \code{input="openair"} uses openair format of data frame with first column of name 'date' and type Date, optional columns of names \code{"date_end"}, \code{"temp"}, \code{"wind"} and \code{"note"} and other columns of class \code{"numeric"}" containing concentration values and named by names of the compounds. \code{input="genasis"} is used for the data frame with six columns \code{"valu"}, \code{"comp"}, \code{"date_start"}, \code{"date_end"}, \code{"temp"} and \code{"wind"} where the first, fifth and sixth are of class \code{"numeric"}", second of type character and third and fourth columns could be both \code{"character"} or \code{"Date"} class. The names of columns in "genasis" type data frame are not rigid, only their order is assumed. There is also a possibility to specify \code{x} as a numeric vector. If \code{plot=TRUE}, the resulting histogram is plotted with several graphical possibilities. There are two options for plotting a curve of idealised distribution of concentration values. \code{distr="lnorm"} (default) draws curve of lognormal distribution, while \code{distr="norm"} draws curve of normal distribution defined by central tendency and variance of concentration data. The argument \code{emboss} allows to induce an impression of plasticity of the histogram colums by two different graphical effects or their combination. \code{emboss=0} implies flat columns without any effect, \code{emboss=1} makes an impression of shading, \code{emboss=2} bevels edges of the columns and \code{emboss=3} combines last two effect into one. } \value{ a list containing: \item{borders }{numerical vector of column cell borders } \item{distr }{numerical vector of counts of values in the individual column cells } } \author{ Jiri Kalina \cr \email{kalina@mail.muni.cz}} \seealso{ \code{\link{genloq}, \link{genoutlier}, \link{genpastoact}, \link{genanaggr}, \link{genplot}, \link{genstatistic}, \link{gentransform}, \link{genwhisker}} } \examples{ ## Vector input. genhistogram(rnorm(60)) ## Use of example data from the package: data(kosetice.pas.openair) genhistogram(kosetice.pas.openair[,1:8],col="orange",emboss=3) data(kosetice.pas.genasis) genhistogram(kosetice.pas.genasis[1:208,],input="genasis", distr="lnorm",col="orange",emboss=2) } \keyword{ genhistogram }
/man/genhistogram.Rd
no_license
cran/genasis
R
false
false
5,056
rd
\name{genhistogram} \alias{genhistogram} \title{ genhistogram } \description{ Function \code{genhistogram} creates a histogram of (usually) concentrations with equidistant breaks placed in "good-looking" numbers. If plot=TRUE, the resulting histogram is plotted with several possibilities of graphical options. } \usage{ genhistogram(x, breaks=7, input="openair", pollutant=NA, delta=0, plot=TRUE, distr="norm", gap=0.05, columns=2, col="#A52375", emboss=0, xlab="Concentration", ylab="Number of samples", main=NA) } \arguments{ \item{x}{ vector of concentration values or data frame of genasis/openair type. See 'Details' for more detailed description of both data types.} \item{breaks}{ a number of breaks between individualhistogram cells.} \item{input}{ type of data.frame in the case of data.frame input. The allowed values are "openair" (default) and "genasis". In case of vector input, this argument is meaningless.} \item{pollutant}{ name of the pollutant, for which the plot is plotted. Not necessary if only data for one pollutant is available in x. If not specified, plots for all pollutants are drawn in a multi-plot arrangement.} \item{delta}{ a delta value before the lowest and after the highest value of x. Positive number or 0.} \item{plot}{ logical. Should the result be plotted?} \item{distr}{ a name of distribution to interlace. Values "norm" and "lnorm" are possible.} \item{gap}{ the size of a gap between columns in a relative width of the column (number between 0 and 1).} \item{columns}{ number of columns in the multi-plot arrangement.} \item{col}{ a specification for the default plotting color. See section 'Color Specification' in \link{par}.} \item{emboss}{ 0,1,2 or 3 for differnet emboss efect of genhistogram columns.} \item{xlab}{ a label for the x axis, defaults to \code{"Concentration"}.} \item{ylab}{ a label for the x axis, defaults to \code{"Number of samples"}.} \item{main}{ overall title for the histogram.} } \details{ Function \code{genhistogram} creates a histogram, e.g. distribution of given data values into several cells with equidistant breaks placed in "good-looking" numbers. The function considers both the magnitude of the data values and their precision (number of decimals) and choose the breaks to be interpretable well (not to have a lot of decimals).Some empty range is add before the first and behind the last data value in order to smooth the breaks numbers. The minimal size of this range could be determined by the delta parameter. The function recognises three different input formats: Option \code{input="openair"} uses openair format of data frame with first column of name 'date' and type Date, optional columns of names \code{"date_end"}, \code{"temp"}, \code{"wind"} and \code{"note"} and other columns of class \code{"numeric"}" containing concentration values and named by names of the compounds. \code{input="genasis"} is used for the data frame with six columns \code{"valu"}, \code{"comp"}, \code{"date_start"}, \code{"date_end"}, \code{"temp"} and \code{"wind"} where the first, fifth and sixth are of class \code{"numeric"}", second of type character and third and fourth columns could be both \code{"character"} or \code{"Date"} class. The names of columns in "genasis" type data frame are not rigid, only their order is assumed. There is also a possibility to specify \code{x} as a numeric vector. If \code{plot=TRUE}, the resulting histogram is plotted with several graphical possibilities. There are two options for plotting a curve of idealised distribution of concentration values. \code{distr="lnorm"} (default) draws curve of lognormal distribution, while \code{distr="norm"} draws curve of normal distribution defined by central tendency and variance of concentration data. The argument \code{emboss} allows to induce an impression of plasticity of the histogram colums by two different graphical effects or their combination. \code{emboss=0} implies flat columns without any effect, \code{emboss=1} makes an impression of shading, \code{emboss=2} bevels edges of the columns and \code{emboss=3} combines last two effect into one. } \value{ a list containing: \item{borders }{numerical vector of column cell borders } \item{distr }{numerical vector of counts of values in the individual column cells } } \author{ Jiri Kalina \cr \email{kalina@mail.muni.cz}} \seealso{ \code{\link{genloq}, \link{genoutlier}, \link{genpastoact}, \link{genanaggr}, \link{genplot}, \link{genstatistic}, \link{gentransform}, \link{genwhisker}} } \examples{ ## Vector input. genhistogram(rnorm(60)) ## Use of example data from the package: data(kosetice.pas.openair) genhistogram(kosetice.pas.openair[,1:8],col="orange",emboss=3) data(kosetice.pas.genasis) genhistogram(kosetice.pas.genasis[1:208,],input="genasis", distr="lnorm",col="orange",emboss=2) } \keyword{ genhistogram }
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * # # Error Scaling with Mousetracking - Estimation Code # #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * library(gtools) library(bayesm) #Set working drive setwd("/Users/rogerbailey/Desktop/Projects/ESM/Code") set.seed(77) #Load data load("Simdata_BasicErrScaling.RData") #Set MCMC variables R=10000 #number of iterations keep=5#thinning accup=100#interval size for updating RW step size space=10 #number of betas nbeta=natt*nlvl+1 #Set priors nubeta=nbeta+3 Vb=nubeta*diag(nbeta) Abeta=matrix(.01) betadoublebar=matrix(double(nbeta),nc=1) Omegabar=double(nM) AOmega=1 #Set initial values oldbetabar=matrix(double(nbeta),nc=1)#initial betabar oldbetamat=matrix(double(nbeta*nresp),nr=nresp)#initial betas stepsizeb=.2 #beta stepsize stepsizeO=.025 #Omega stepsize oldVbeta=.5*diag(nbeta)#initial Vbeta oldVbetai=backsolve(chol(oldVbeta),diag(nbeta))#initial inv chol Vbeta oldOmega=tOmega#matrix(double(nM),nr=1)#Initial omega vector oldVOmega=.01*diag(nM) oldVOmegai=backsolve(chol(oldVOmega),diag(nM)) acceptpropb=double(accup)+.23 #proportion of beta draws accepted acceptpropO=double(accup)+.23 #proportion of Omega draws accepted llike=matrix(double(nresp),nc=1) #log likelihood #Setup functions #function that returns the value of the prior for betas logprior=function(beta,betabar,Vbi){ return((beta-betabar)%*%Vbi%*%t(beta-betabar)*(-.5)) } #loglike function loglike=function(xb,y){ probs=log(exp(xb))-log(matrix(rep(colSums(exp(xb)),length( xb[,1])),byrow=T,nc=length(xb[1,]))) loc=cbind(y,c(1:(ncol(xb)))) return(sum(probs[loc])) } #function for determining the change in the step size stepupdate=function(accprop){ step=1 if(is.na(accprop)){return(step)}else{ if(accprop<.21) {step=.99} if(accprop<.19) {step=.95} if(accprop<.15) {step=.85} if(accprop<.10) {step=.7} if(accprop>.25) {step=1.01} if(accprop>.27) {step=1.05} if(accprop>.3) {step=1.15} if(accprop>.4) {step=1.35} return(step)} } #Setup storage betadraws=array(double(nbeta*nresp*R/keep),dim=c(R/keep,nresp,nbeta)) betabardraws=matrix(double(nbeta*R/keep),nr=R/keep) Omegadraws=matrix(double(nM*R/keep),nr=R/keep) llikes=matrix(double(R/keep),nr=R/keep) acceptpropbs=matrix(double(R/keep),nr=R/keep) acceptpropOs=matrix(double(R/keep),nr=R/keep) stepsizebdraws=matrix(double(R/keep),nr=R/keep) stepsizeOdraws=matrix(double(R/keep),nr=R/keep) #timer itime = proc.time()[3] for(r in 1:R){ accept=matrix(double(nresp),nc=nresp)#iter accept storage oldgammas=oldOmega%*%M olduts=NULL #Draw new betas and screen cutoffs for each respondent for(i in 1:nresp){ #get new and old paramter vectors for RW oldbeta=matrix(oldbetamat[i,],nc=1) newbeta=oldbeta+t(chol(oldVbeta))%*%rnorm(nbeta)*stepsizeb scaler=matrix((1+oldgammas[i]*(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) oldutmat=matrix(data[[i]]$X%*%oldbeta,nr=nalt) newutmat=matrix(data[[i]]$X%*%newbeta,nr=nalt) #get likelihood and prior values oldllikeb=loglike(oldutmat*scaler,as.matrix(data[[i]]$y)) newllikeb=loglike(newutmat*scaler,as.matrix(data[[i]]$y)) oldlpriorb=logprior(t(oldbeta),t(oldbetabar),oldVbetai) newlpriorb=logprior(t(newbeta),t(oldbetabar),oldVbetai) diffvecb=newllikeb+newlpriorb-(oldllikeb+oldlpriorb) alphab=min(exp(diffvecb), 1) #accept or reject draw=runif(1) acceptb=0 if(alphab>draw){acceptb=1} accept[i]=acceptb if(acceptb==1){ oldbetamat[i,]=newbeta oldutmat=newutmat } olduts=cbind(olduts,oldutmat)#save unscaled utilities for later use } #Draw new Omega #create new omega matrix newOmega=oldOmega#+rnorm(nM)*stepsizeO newgammas=newOmega%*%M while((min(newgammas))*ntask<(-1)){ newOmega=oldOmega+rnorm(nM)*stepsizeO newgammas=newOmega%*%M } scalerold=matrix((1+oldgammas%x%(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) scalernew=matrix((1+newgammas%x%(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) #get likelihood and prior values oldllikeO=loglike(olduts*scalerold,ycomb) newllikeO=loglike(olduts*scalernew,ycomb) oldlpriorO=logprior(oldOmega,Omegabar,oldVOmegai) newlpriorO=logprior(newOmega,Omegabar,oldVOmegai) diffvecO=newllikeO+newlpriorO-(oldllikeO+oldlpriorO) alphaO=min(exp(diffvecO), 1) #accept or reject draw=runif(1) acceptO=0 if(alphaO>draw){acceptO=1} llike=oldllikeO if(acceptb==1){ oldOmega=newOmega llike=newllikeO } #Store acceptance proportions acceptpropb=c(acceptpropb[2:accup],mean(accept)) acceptpropO=c(acceptpropb[2:accup],acceptO) #Draw new values for betabar outbetaup=rmultireg(oldbetamat,matrix(1,nr=nresp,nc=1),matrix(betadoublebar,nr=1),Abeta,nubeta,Vb) oldbetabar=matrix(outbetaup$B) oldVbeta=outbetaup$Sigma oldVbetai=chol2inv(chol(oldVbeta)) #Store values if(r%%keep==0){ betadraws[r/keep,,]=oldbetamat betabardraws[r/keep,]=oldbetabar Omegadraws[r/keep,]=oldOmega acceptpropbs[r/keep]=mean(acceptpropb) acceptpropOs[r/keep]=mean(acceptpropO) llikes[r/keep]=llike stepsizebdraws[r/keep]=stepsizeb stepsizeOdraws[r/keep]=stepsizeO } #print progress #print tte and chart current draw progress if(r%%(keep*space)==0){ par(mfrow=c(3,1)) ctime = proc.time()[3] tuntilend = ((ctime - itime)/r) * (R + 1 - r) cat(" ", r, " (", round(tuntilend/60, 1), ")", fill = TRUE) plot(llikes,type="l",ylab="Log Likelihood") matplot(betabardraws,type="l",ylab="Betabar Draws") matplot(Omegadraws,type="l",ylab="Omega Draws") fsh() } #update stepsizes if(r%%accup==0&&r<(.3*R)){ stepsizeb=stepsizeb*stepupdate(mean(acceptpropb)) stepsizeO=stepsizeO*stepupdate(mean(acceptpropO)) } }
/Code/Proxy Model/Code/EstCode_ErrorScalingwithTracking.R
permissive
jeff-dotson/mouse-tracking
R
false
false
5,829
r
#* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * # # Error Scaling with Mousetracking - Estimation Code # #* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * library(gtools) library(bayesm) #Set working drive setwd("/Users/rogerbailey/Desktop/Projects/ESM/Code") set.seed(77) #Load data load("Simdata_BasicErrScaling.RData") #Set MCMC variables R=10000 #number of iterations keep=5#thinning accup=100#interval size for updating RW step size space=10 #number of betas nbeta=natt*nlvl+1 #Set priors nubeta=nbeta+3 Vb=nubeta*diag(nbeta) Abeta=matrix(.01) betadoublebar=matrix(double(nbeta),nc=1) Omegabar=double(nM) AOmega=1 #Set initial values oldbetabar=matrix(double(nbeta),nc=1)#initial betabar oldbetamat=matrix(double(nbeta*nresp),nr=nresp)#initial betas stepsizeb=.2 #beta stepsize stepsizeO=.025 #Omega stepsize oldVbeta=.5*diag(nbeta)#initial Vbeta oldVbetai=backsolve(chol(oldVbeta),diag(nbeta))#initial inv chol Vbeta oldOmega=tOmega#matrix(double(nM),nr=1)#Initial omega vector oldVOmega=.01*diag(nM) oldVOmegai=backsolve(chol(oldVOmega),diag(nM)) acceptpropb=double(accup)+.23 #proportion of beta draws accepted acceptpropO=double(accup)+.23 #proportion of Omega draws accepted llike=matrix(double(nresp),nc=1) #log likelihood #Setup functions #function that returns the value of the prior for betas logprior=function(beta,betabar,Vbi){ return((beta-betabar)%*%Vbi%*%t(beta-betabar)*(-.5)) } #loglike function loglike=function(xb,y){ probs=log(exp(xb))-log(matrix(rep(colSums(exp(xb)),length( xb[,1])),byrow=T,nc=length(xb[1,]))) loc=cbind(y,c(1:(ncol(xb)))) return(sum(probs[loc])) } #function for determining the change in the step size stepupdate=function(accprop){ step=1 if(is.na(accprop)){return(step)}else{ if(accprop<.21) {step=.99} if(accprop<.19) {step=.95} if(accprop<.15) {step=.85} if(accprop<.10) {step=.7} if(accprop>.25) {step=1.01} if(accprop>.27) {step=1.05} if(accprop>.3) {step=1.15} if(accprop>.4) {step=1.35} return(step)} } #Setup storage betadraws=array(double(nbeta*nresp*R/keep),dim=c(R/keep,nresp,nbeta)) betabardraws=matrix(double(nbeta*R/keep),nr=R/keep) Omegadraws=matrix(double(nM*R/keep),nr=R/keep) llikes=matrix(double(R/keep),nr=R/keep) acceptpropbs=matrix(double(R/keep),nr=R/keep) acceptpropOs=matrix(double(R/keep),nr=R/keep) stepsizebdraws=matrix(double(R/keep),nr=R/keep) stepsizeOdraws=matrix(double(R/keep),nr=R/keep) #timer itime = proc.time()[3] for(r in 1:R){ accept=matrix(double(nresp),nc=nresp)#iter accept storage oldgammas=oldOmega%*%M olduts=NULL #Draw new betas and screen cutoffs for each respondent for(i in 1:nresp){ #get new and old paramter vectors for RW oldbeta=matrix(oldbetamat[i,],nc=1) newbeta=oldbeta+t(chol(oldVbeta))%*%rnorm(nbeta)*stepsizeb scaler=matrix((1+oldgammas[i]*(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) oldutmat=matrix(data[[i]]$X%*%oldbeta,nr=nalt) newutmat=matrix(data[[i]]$X%*%newbeta,nr=nalt) #get likelihood and prior values oldllikeb=loglike(oldutmat*scaler,as.matrix(data[[i]]$y)) newllikeb=loglike(newutmat*scaler,as.matrix(data[[i]]$y)) oldlpriorb=logprior(t(oldbeta),t(oldbetabar),oldVbetai) newlpriorb=logprior(t(newbeta),t(oldbetabar),oldVbetai) diffvecb=newllikeb+newlpriorb-(oldllikeb+oldlpriorb) alphab=min(exp(diffvecb), 1) #accept or reject draw=runif(1) acceptb=0 if(alphab>draw){acceptb=1} accept[i]=acceptb if(acceptb==1){ oldbetamat[i,]=newbeta oldutmat=newutmat } olduts=cbind(olduts,oldutmat)#save unscaled utilities for later use } #Draw new Omega #create new omega matrix newOmega=oldOmega#+rnorm(nM)*stepsizeO newgammas=newOmega%*%M while((min(newgammas))*ntask<(-1)){ newOmega=oldOmega+rnorm(nM)*stepsizeO newgammas=newOmega%*%M } scalerold=matrix((1+oldgammas%x%(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) scalernew=matrix((1+newgammas%x%(c(0:(ntask-1))%x%rep(1,nalt)))^(-1),nr=nalt) #get likelihood and prior values oldllikeO=loglike(olduts*scalerold,ycomb) newllikeO=loglike(olduts*scalernew,ycomb) oldlpriorO=logprior(oldOmega,Omegabar,oldVOmegai) newlpriorO=logprior(newOmega,Omegabar,oldVOmegai) diffvecO=newllikeO+newlpriorO-(oldllikeO+oldlpriorO) alphaO=min(exp(diffvecO), 1) #accept or reject draw=runif(1) acceptO=0 if(alphaO>draw){acceptO=1} llike=oldllikeO if(acceptb==1){ oldOmega=newOmega llike=newllikeO } #Store acceptance proportions acceptpropb=c(acceptpropb[2:accup],mean(accept)) acceptpropO=c(acceptpropb[2:accup],acceptO) #Draw new values for betabar outbetaup=rmultireg(oldbetamat,matrix(1,nr=nresp,nc=1),matrix(betadoublebar,nr=1),Abeta,nubeta,Vb) oldbetabar=matrix(outbetaup$B) oldVbeta=outbetaup$Sigma oldVbetai=chol2inv(chol(oldVbeta)) #Store values if(r%%keep==0){ betadraws[r/keep,,]=oldbetamat betabardraws[r/keep,]=oldbetabar Omegadraws[r/keep,]=oldOmega acceptpropbs[r/keep]=mean(acceptpropb) acceptpropOs[r/keep]=mean(acceptpropO) llikes[r/keep]=llike stepsizebdraws[r/keep]=stepsizeb stepsizeOdraws[r/keep]=stepsizeO } #print progress #print tte and chart current draw progress if(r%%(keep*space)==0){ par(mfrow=c(3,1)) ctime = proc.time()[3] tuntilend = ((ctime - itime)/r) * (R + 1 - r) cat(" ", r, " (", round(tuntilend/60, 1), ")", fill = TRUE) plot(llikes,type="l",ylab="Log Likelihood") matplot(betabardraws,type="l",ylab="Betabar Draws") matplot(Omegadraws,type="l",ylab="Omega Draws") fsh() } #update stepsizes if(r%%accup==0&&r<(.3*R)){ stepsizeb=stepsizeb*stepupdate(mean(acceptpropb)) stepsizeO=stepsizeO*stepupdate(mean(acceptpropO)) } }
# Dustin Roten 11/05/2017 # This script is responsible for metrics for the shifted dispersion scenario ONLY. SetAlpha <- 0.3 # Load required libraries library(ggplot2) library(reshape) library(ggmap) source("TEST-DEMOFunctions.R") # The required information for the functions used includes the (Lat, Lon) of the plant # as well as the desired gridding resolution. PlantLAT <- 39.28682 PlantLON <- -96.1172 Resolution <- 0.1 Dispersion <- read.delim("JEC-10000m2.txt", header = TRUE, sep = "") Origin_Dispersion <- ShiftToOrigin("S", Dispersion, PlantLAT, PlantLON) Shifted_Dispersion <- ShiftToOrigin("U", ShiftDispersion(Origin_Dispersion, 10), PlantLAT, PlantLON) Rotated_Dispersion <- ShiftToOrigin("U", RotateDispersion(Origin_Dispersion, 15), PlantLAT, PlantLON) Radial_Dispersion <- ShiftToOrigin("U", RadialDilation(Origin_Dispersion, 24), PlantLAT, PlantLON) Angular_Dispersion <- ShiftToOrigin("U", AngularStretch(Origin_Dispersion, 55), PlantLAT, PlantLON) map <- get_map(location = c(PlantLON + 2, PlantLAT + 4), zoom =6, maptype = "terrain", color = "bw") Ex1 <- ggmap(map) + geom_point(data = Shifted_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("ShiftExample.jpg", Ex1, device = "jpg", width = 10, height = 10, units = "in") Ex2 <- ggmap(map) + geom_point(data = Angular_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("AngularExample.jpg", Ex2, device = "jpg", width = 10, height = 10, units = "in") Ex3 <- ggmap(map) + geom_point(data = Radial_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("RadialExample.jpg", Ex3, device = "jpg", width = 10, height = 10, units = "in") Ex4 <- ggmap(map) + geom_point(data = Rotated_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("RotatedExample.jpg", Ex4, device = "jpg", width = 10, height = 10, units = "in") ##### Moving files ##### NAME <- c("ShiftExample.jpg", "AngularExample.jpg", "RadialExample.jpg", "RotatedExample.jpg") TEST = NULL for(i in 1:length(NAME)) { TEST[i] <- file.exists(paste(NAME[i])) file.copy(from = paste(NAME[i]), to = paste("C:/Users/dusti/Google Drive/NASA/HysplitPaper1/images/", NAME[i], sep = ""), overwrite = TRUE) file.remove(paste(NAME[i])) TEST }
/Figures/POC_Images.R
no_license
DustinRoten/STACK-project
R
false
false
4,629
r
# Dustin Roten 11/05/2017 # This script is responsible for metrics for the shifted dispersion scenario ONLY. SetAlpha <- 0.3 # Load required libraries library(ggplot2) library(reshape) library(ggmap) source("TEST-DEMOFunctions.R") # The required information for the functions used includes the (Lat, Lon) of the plant # as well as the desired gridding resolution. PlantLAT <- 39.28682 PlantLON <- -96.1172 Resolution <- 0.1 Dispersion <- read.delim("JEC-10000m2.txt", header = TRUE, sep = "") Origin_Dispersion <- ShiftToOrigin("S", Dispersion, PlantLAT, PlantLON) Shifted_Dispersion <- ShiftToOrigin("U", ShiftDispersion(Origin_Dispersion, 10), PlantLAT, PlantLON) Rotated_Dispersion <- ShiftToOrigin("U", RotateDispersion(Origin_Dispersion, 15), PlantLAT, PlantLON) Radial_Dispersion <- ShiftToOrigin("U", RadialDilation(Origin_Dispersion, 24), PlantLAT, PlantLON) Angular_Dispersion <- ShiftToOrigin("U", AngularStretch(Origin_Dispersion, 55), PlantLAT, PlantLON) map <- get_map(location = c(PlantLON + 2, PlantLAT + 4), zoom =6, maptype = "terrain", color = "bw") Ex1 <- ggmap(map) + geom_point(data = Shifted_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("ShiftExample.jpg", Ex1, device = "jpg", width = 10, height = 10, units = "in") Ex2 <- ggmap(map) + geom_point(data = Angular_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("AngularExample.jpg", Ex2, device = "jpg", width = 10, height = 10, units = "in") Ex3 <- ggmap(map) + geom_point(data = Radial_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("RadialExample.jpg", Ex3, device = "jpg", width = 10, height = 10, units = "in") Ex4 <- ggmap(map) + geom_point(data = Rotated_Dispersion, aes(x = LON, y = LAT, color = CO2), alpha = SetAlpha) + geom_point(data = Dispersion, aes(x = LON, y = LAT, color = CO2)) + coord_cartesian() + theme_bw() + theme(legend.position="none") + theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) + theme(axis.title.y=element_blank(), axis.text.y=element_blank(), axis.ticks.y=element_blank()) + scale_colour_gradientn(colours = c("green", "darkgreen", "yellow", "darkorange", "orange", "red"), values = c(0, 0.0012, 0.002, 0.004, 0.009, 1)) ggsave("RotatedExample.jpg", Ex4, device = "jpg", width = 10, height = 10, units = "in") ##### Moving files ##### NAME <- c("ShiftExample.jpg", "AngularExample.jpg", "RadialExample.jpg", "RotatedExample.jpg") TEST = NULL for(i in 1:length(NAME)) { TEST[i] <- file.exists(paste(NAME[i])) file.copy(from = paste(NAME[i]), to = paste("C:/Users/dusti/Google Drive/NASA/HysplitPaper1/images/", NAME[i], sep = ""), overwrite = TRUE) file.remove(paste(NAME[i])) TEST }
library(tree) dataset_name <- "iris" k_parameter <- 10 values <- c("one",2,"three",4,5) values[2] values[-1] values[c(2,3)] # minus means exclude values[-c(2,3)] values # [1] means identifying which entry you are looking at rep(values,10) which(values == "three") which(values == "2") which(values == 2) a <- 5 + 4 b <- 5 + 4 c <- 5 + 4 sum_two_numbers <- function(a = 4,b){ return (a + b) } sum_two_numbers(4,5) sum_two_numbers(,3) A <- matrix(1:12, nrow = 3, ncol = 4, byrow = TRUE) matrix(1:12, nrow = 3, ncol = 2, byrow = TRUE) # it repeats the set, but n matrix(1:12, nrow = 3, ncol = 8, byrow = TRUE) A[1,] A[,1] A[,c(1,2)] A[c(2,3),c(1,2)] colnames(A) <- c("C1","C2","C3","C4") rownames(A) <- c("R1","R2","R3") write.csv(A,"data.csv") # read.csv returns a dataframe by default B <- read.csv(file = "data.csv") B <- read.csv(file = "data.csv", header = TRUE,row.names = 1) B <- matrix(1:20, nrow = 4, ncol = 5, byrow = TRUE) A %*% B # transpose of a matrix t(A) # if we want the result as a matrix use drop = FALSE A[,2,drop = FALSE]
/lab-demo.R
no_license
limoneren/R-Tutorial
R
false
false
1,079
r
library(tree) dataset_name <- "iris" k_parameter <- 10 values <- c("one",2,"three",4,5) values[2] values[-1] values[c(2,3)] # minus means exclude values[-c(2,3)] values # [1] means identifying which entry you are looking at rep(values,10) which(values == "three") which(values == "2") which(values == 2) a <- 5 + 4 b <- 5 + 4 c <- 5 + 4 sum_two_numbers <- function(a = 4,b){ return (a + b) } sum_two_numbers(4,5) sum_two_numbers(,3) A <- matrix(1:12, nrow = 3, ncol = 4, byrow = TRUE) matrix(1:12, nrow = 3, ncol = 2, byrow = TRUE) # it repeats the set, but n matrix(1:12, nrow = 3, ncol = 8, byrow = TRUE) A[1,] A[,1] A[,c(1,2)] A[c(2,3),c(1,2)] colnames(A) <- c("C1","C2","C3","C4") rownames(A) <- c("R1","R2","R3") write.csv(A,"data.csv") # read.csv returns a dataframe by default B <- read.csv(file = "data.csv") B <- read.csv(file = "data.csv", header = TRUE,row.names = 1) B <- matrix(1:20, nrow = 4, ncol = 5, byrow = TRUE) A %*% B # transpose of a matrix t(A) # if we want the result as a matrix use drop = FALSE A[,2,drop = FALSE]
library(IC2) library("rineq") library(convey) pof_design_moradores_mg <- readRDS("./outputs/moradores_impostos_mg_design.rds") pof_design_moradores_prep <- convey_prep(pof_design_moradores_mg) gini_renda_total <- coef( svygini( ~ RENDA_ANUAL_PC, pof_design_moradores_prep ) ) * 100 gini_pos_ipva <- coef( svygini( ~ renda_pos_ipva_pc, pof_design_moradores_prep )) * 100 gini_pos_icms <- coef( svygini( ~ renda_pos_icms_pc, pof_design_moradores_prep )) *100 gasto_icms_pc <- pof_design_moradores_mg$variables$ICMS_ANUAL_PC renda_pos_icms_pc <- pof_design_moradores_mg$variables$renda_pos_icms_pc gasto_ipva_pc <- pof_design_moradores_mg$variables$IPVA_ANUAL_pc renda_pos_ipva_pc <- pof_design_moradores_mg$variables$renda_pos_ipva_pc renda_pc <- pof_design_moradores_mg$variables$RENDA_ANUAL_PC pesos <- pof_design_moradores_mg$variables$peso renda_pos_icms_pc <- ifelse(renda_pos_icms_pc < 0, 0,renda_pos_icms_pc) curveConcent(y = renda_pos_icms_pc, x = gasto_icms_pc, w = pesos, col = "red", xlab = "Ranking pela renda pós ICMS") curveConcent(y = renda_pos_ipva_pc, x = renda_pos_ipva_pc, w = pesos, col = "black",add = TRUE) curveConcent(y = renda_pos_icms_pc, x = renda_pos_icms_pc, w = pesos, col = "grey",add = TRUE) curveConcent(y = renda_pos_ipva_pc, x = gasto_ipva_pc, w = pesos, col = "green",add = TRUE) legend("topleft", c("Renda pós ICMS","Renda pós IPVA","ICMS","IPVA"), pch=c(16), col=c("grey","black","red","green"), bty = "n") con_index_icms <- ci(gasto_icms_pc, renda_pos_icms_pc, wt=pesos, type = c("CI")) con_index_ipva <- ci(gasto_ipva_pc, renda_pos_ipva_pc, wt=pesos, type = c("CI"))
/scripts/5.1 Índice de Lerman Yitzhaki.R
permissive
LucasLBrandao/Efeitos-redistributivos-dos-impostos-estaduais-MG
R
false
false
1,638
r
library(IC2) library("rineq") library(convey) pof_design_moradores_mg <- readRDS("./outputs/moradores_impostos_mg_design.rds") pof_design_moradores_prep <- convey_prep(pof_design_moradores_mg) gini_renda_total <- coef( svygini( ~ RENDA_ANUAL_PC, pof_design_moradores_prep ) ) * 100 gini_pos_ipva <- coef( svygini( ~ renda_pos_ipva_pc, pof_design_moradores_prep )) * 100 gini_pos_icms <- coef( svygini( ~ renda_pos_icms_pc, pof_design_moradores_prep )) *100 gasto_icms_pc <- pof_design_moradores_mg$variables$ICMS_ANUAL_PC renda_pos_icms_pc <- pof_design_moradores_mg$variables$renda_pos_icms_pc gasto_ipva_pc <- pof_design_moradores_mg$variables$IPVA_ANUAL_pc renda_pos_ipva_pc <- pof_design_moradores_mg$variables$renda_pos_ipva_pc renda_pc <- pof_design_moradores_mg$variables$RENDA_ANUAL_PC pesos <- pof_design_moradores_mg$variables$peso renda_pos_icms_pc <- ifelse(renda_pos_icms_pc < 0, 0,renda_pos_icms_pc) curveConcent(y = renda_pos_icms_pc, x = gasto_icms_pc, w = pesos, col = "red", xlab = "Ranking pela renda pós ICMS") curveConcent(y = renda_pos_ipva_pc, x = renda_pos_ipva_pc, w = pesos, col = "black",add = TRUE) curveConcent(y = renda_pos_icms_pc, x = renda_pos_icms_pc, w = pesos, col = "grey",add = TRUE) curveConcent(y = renda_pos_ipva_pc, x = gasto_ipva_pc, w = pesos, col = "green",add = TRUE) legend("topleft", c("Renda pós ICMS","Renda pós IPVA","ICMS","IPVA"), pch=c(16), col=c("grey","black","red","green"), bty = "n") con_index_icms <- ci(gasto_icms_pc, renda_pos_icms_pc, wt=pesos, type = c("CI")) con_index_ipva <- ci(gasto_ipva_pc, renda_pos_ipva_pc, wt=pesos, type = c("CI"))
preparePathways <- function(db=c("kegg", "MSigDB"), type=c("H","C1","C2","C3","C4","C5","C6","C7"), genename= c("EntrezID", "symbol")) { this.call <- match.call() db <- match.arg(db) m.type <- match.arg(type) g.id <- match.arg(genename) if (db=="kegg" && is.null(g.id)){g.id="EntrezID"} if (db=="MSigDB" && is.null(m.type)){m.type="C2";g.id=="symbol"} if (db=="kegg"){ paths <- graphite::pathways('hsapiens','kegg') if (g.id=="symbol"){ paths<- lapply(paths, function(p) convertIdentifiers(p,g.id)) } genesets <- lapply(paths, nodes) genesets <- lapply(genesets, function(a) gsub("ENTREZID:","",a)) } else { m_df = msigdbr::msigdbr(species = "Homo sapiens", category = m.type) m_t2g = m_df %>% dplyr::select(.data$gs_name, .data$entrez_gene) %>% as.data.frame() if (g.id=="symbol"){ m_t2g = m_df %>% dplyr::select(.data$gs_name, .data$gene_symbol) %>% as.data.frame() } gs_df <- m_t2g %>% dplyr::group_by(.data$gs_name) %>% dplyr::summarise(entrez_gene = paste0(.data$entrez_gene, collapse=",")) genesets <- lapply(gs_df$entrez_gene, function(s) unlist(strsplit(s,","))) names(genesets) <- gs_df$gs_name } return(genesets) }
/R/preparePathways.R
no_license
mikehellstern/netgsa
R
false
false
1,306
r
preparePathways <- function(db=c("kegg", "MSigDB"), type=c("H","C1","C2","C3","C4","C5","C6","C7"), genename= c("EntrezID", "symbol")) { this.call <- match.call() db <- match.arg(db) m.type <- match.arg(type) g.id <- match.arg(genename) if (db=="kegg" && is.null(g.id)){g.id="EntrezID"} if (db=="MSigDB" && is.null(m.type)){m.type="C2";g.id=="symbol"} if (db=="kegg"){ paths <- graphite::pathways('hsapiens','kegg') if (g.id=="symbol"){ paths<- lapply(paths, function(p) convertIdentifiers(p,g.id)) } genesets <- lapply(paths, nodes) genesets <- lapply(genesets, function(a) gsub("ENTREZID:","",a)) } else { m_df = msigdbr::msigdbr(species = "Homo sapiens", category = m.type) m_t2g = m_df %>% dplyr::select(.data$gs_name, .data$entrez_gene) %>% as.data.frame() if (g.id=="symbol"){ m_t2g = m_df %>% dplyr::select(.data$gs_name, .data$gene_symbol) %>% as.data.frame() } gs_df <- m_t2g %>% dplyr::group_by(.data$gs_name) %>% dplyr::summarise(entrez_gene = paste0(.data$entrez_gene, collapse=",")) genesets <- lapply(gs_df$entrez_gene, function(s) unlist(strsplit(s,","))) names(genesets) <- gs_df$gs_name } return(genesets) }
library(scales) for (p in packages) { dynamic_require(p) } #path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/USA_ONLY/" #path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/WITH_MEXICO/" path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/WORLDCLIM/" setwd(path) #files = list.files(pattern="AddedPredThin.+USA.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+zoomedin.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+addedLAyers.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+LGM.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+MID.asc$",recursive = T) files = list.files(pattern="AddedPredThin.+worldclim.asc$",recursive = T) doThresh=T for (asc in files) { print(asc) print("--reading") ras = raster(asc) print("--calculating") maximum = max(values(ras),na.rm=T) minimum = min(values(ras),na.rm=T) print("--scaling raster") values(ras) = scales::rescale(values(ras),to=c(0,1)) newasc = paste(asc,"_",maximum,"_",minimum,".rescaled",sep="") print("--writing raster") writeRaster(ras,newasc,format="ascii",overwrite=T) if(doThresh==T) { print("--reading thresh") tab = sub(pattern="BestModel",replacement="ThreshTable",x=asc) tab = sub(pattern="asc",replacement="csv",x=tab) newtab = paste(tab,".rescaled.csv",sep="") read = read.csv(tab) print("--scaling thresh") newread = c("threshold",as.vector(sapply(read[,2:7],FUN=function(x){(x-minimum)/(maximum-minimum)}))) names(newread) = names(read) names(newread)[1] = "" newread = rbind(newread) print("--writing thresh") write.csv(newread,newtab,row.names=F) } }
/scripts/Niche_Models/convert_raster_to_range01.R
no_license
kaiyaprovost/GDM_pipeline
R
false
false
1,765
r
library(scales) for (p in packages) { dynamic_require(p) } #path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/USA_ONLY/" #path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/WITH_MEXICO/" path = "/Users/kprovost/Dropbox (AMNH)/Dissertation/CHAPTER3_TRAITS/ECOLOGY/OUTPUTS/3.enms/WORLDCLIM/" setwd(path) #files = list.files(pattern="AddedPredThin.+USA.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+zoomedin.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+addedLAyers.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+LGM.asc$",recursive = T) #files = list.files(pattern="AddedPredThin.+MID.asc$",recursive = T) files = list.files(pattern="AddedPredThin.+worldclim.asc$",recursive = T) doThresh=T for (asc in files) { print(asc) print("--reading") ras = raster(asc) print("--calculating") maximum = max(values(ras),na.rm=T) minimum = min(values(ras),na.rm=T) print("--scaling raster") values(ras) = scales::rescale(values(ras),to=c(0,1)) newasc = paste(asc,"_",maximum,"_",minimum,".rescaled",sep="") print("--writing raster") writeRaster(ras,newasc,format="ascii",overwrite=T) if(doThresh==T) { print("--reading thresh") tab = sub(pattern="BestModel",replacement="ThreshTable",x=asc) tab = sub(pattern="asc",replacement="csv",x=tab) newtab = paste(tab,".rescaled.csv",sep="") read = read.csv(tab) print("--scaling thresh") newread = c("threshold",as.vector(sapply(read[,2:7],FUN=function(x){(x-minimum)/(maximum-minimum)}))) names(newread) = names(read) names(newread)[1] = "" newread = rbind(newread) print("--writing thresh") write.csv(newread,newtab,row.names=F) } }
### Functions to facilitate fitting the CCl4 inhalation model initparms <- function(...) { arglist <- list(...) Pm <- numeric(36) ## The Changeable parameters are ones that can be modified on input Changeable <- c("BW", "QP", "QC", "VFC", "VLC", "VMC", "QFC", "QLC", "QMC", "PLA", "PFA", "PMA", "PTA", "PB", "MW", "VMAX", "KM", "CONC", "KL", "RATS", "VCHC") ## Computed parameters are strictly functions of the Changeable ones. Computed <- c("VCH", "AI0", "PL", "PF", "PT", "PM", "VTC", "VT", "VF", "VL", "VM", "QF", "QL", "QM", "QT") names(Pm) <- c(Changeable, Computed ) ### Physiological parameters Pm["BW"] <- 0.182 # Body weight (kg) Pm["QP"] <- 4.0 # Alveolar ventilation rate (hr^-1) Pm["QC"] <- 4.0 # Cardiac output (hr^-1) Pm["VFC"] <- 0.08 # Fraction fat tissue (kg/(kg/BW)) Pm["VLC"] <- 0.04 # Fraction liver tissue (kg/(kg/BW)) Pm["VMC"] <- 0.74 # Fraction of muscle tissue (kg/(kg/BW)) Pm["QFC"] <- 0.05 # Fractional blood flow to fat ((hr^-1)/QC Pm["QLC"] <- 0.15 # Fractional blood flow to liver ((hr^-1)/QC) Pm["QMC"] <- 0.32 # Fractional blood flow to muscle ((hr^-1)/QC) ## Chemical specific parameters for chemical Pm["PLA"] <- 16.17 # Liver/air partition coefficient Pm["PFA"] <- 281.48 # Fat/air partition coefficient Pm["PMA"] <- 13.3 # Muscle/air partition coefficient Pm["PTA"] <- 16.17 # Viscera/air partition coefficient Pm["PB"] <- 5.487 # Blood/air partition coefficient Pm["MW"] <- 153.8 # Molecular weight (g/mol) Pm["VMAX"] <- 0.11 # Maximum velocity of metabolism (mg/hr) Pm["KM"] <- 1.3 # Michaelis-Menten constant (mg/l) ## Parameters for simulated experiment Pm["CONC"] <- 1000 # Inhaled concentration Pm["KL"] <- 0.02 # Loss rate from empty chamber /hr Pm["RATS"] <- 1.0 # Number of rats enclosed in chamber Pm["VCHC"] <- 3.8 # Volume of closed chamber (l) ## Now, change anything from the argument list ## First, delete anything in arglist that is not in Changeable whichdel <- which(! names(arglist) %in% Changeable) if (length(whichdel)) { warning(paste("Parameters", paste(names(arglist)[whichdel], collapse=", "), "are not in this model\n")) } arglist[whichdel] <- NULL ## Is there anything else if (length(arglist)) { Pm[names(arglist)] <- as.vector(unlist(arglist)) } ## Computed parameter values Pm["VCH"] <- Pm["VCHC"] - Pm["RATS"]*Pm["BW"] # Net chamber volume Pm["AI0"] <- Pm["CONC"]*Pm["VCH"]*Pm["MW"]/24450 # Initial amt. in chamber (mg) Pm[c("PL", "PF", "PT", "PM")] <- Pm[c("PLA", "PFA", "PTA", "PMA")]/Pm["PB"] ## Fraction viscera (kg/(kg BW)) Pm["VTC"] <- 0.91 - sum(Pm[c("VLC", "VFC", "VMC")]) Pm[c("VT", "VF", "VL", "VM")] <- Pm[c("VTC", "VFC", "VLC", "VMC")]*Pm["BW"] Pm[c("QF", "QL", "QM")] <- Pm[c("QFC", "QLC", "QMC")]*Pm["QC"] Pm["QT"] <- Pm["QC"] - sum(Pm[c("QF", "QL", "QM")]) Pm } ### We don't actually use these functions (though they work) ### They exist because cclmodel.orig is easier to read than ccl4modelG ### The model function also computes some values that are of interest in ### checking the model and for calculating a dose metric: ### the amount metabolized (AM) ### the area under the concentration-time curve in the liver (CLT) ### and the mass balance (MASS), which should be constant if everything ### worked right. ## State variable, y, assignments. ## CI CM CT CF CL ## AI AAM AT AF AL CLT AM ## 1 2 3 4 5 6 7 initstate.orig <- function(Pm) { y <- rep(0, 7) names(y) <- c("AI", "AAM", "AT", "AF", "AL", "CLT", "AM") y["AI"] <- Pm["AI0"] y } parms <- initparms() ccl4model.orig <- with(as.list(parms), function(t, y, parms) { conc <- y[c("AI", "AAM", "AT", "AF", "AL")]/c(VCH, VM, VT, VF, VL) ## Vconc[1] is conc in mixed venous blood Vconc <- c(0, conc[2:5]/parms[c("PM", "PT", "PF", "PL")]) # '0' is a placeholder Vconc[1] <- sum(Vconc[2:5]*c(QM, QT, QF, QL))/QC ## CA is conc in arterial blood CA <- (QC * Vconc[1] + QP * conc[1])/ (QC + QP/PB) ## Exhaled chemical CX <- CA/PB ## return the derivatives and other computed items list(c(RATS*QP*(CX - conc[1]) - KL*y["AI"], QM*(CA - Vconc[2]), QT*(CA - Vconc[3]), QF*(CA - Vconc[4]), QL*(CA - Vconc[5]) - (RAM <- VMAX*Vconc[5]/(KM + Vconc[5])), conc[5], RAM), c(DOSE = as.vector(AI0 - y["AI"]), MASS = as.vector(sum(y[c("AAM","AT", "AF", "AL", "AM")])*RATS), CP=as.vector(conc[1]*24450.0/MW) )) }) ### Versions that only calculate what is needed for parameter estimation initparmmx <- function(parms) { mx <- matrix(nrow=5, ncol=7) mx[1, 6] <- parms["VCH"] mx[1, 7] <- parms["MW"] mx[4, 6] <- parms["VL"]*parms["PL"] mx[5, 6] <- parms["VMAX"] mx[5, 7] <- parms["KM"] mxx <- matrix(parms[c("QP", "QM", "QT", "QF", "QL")], nrow=5, ncol=5, byrow=TRUE) mxx <- sweep(mxx, 2, parms[c("VCH", "VM", "VT", "VF", "VL")], "/") mxx <- sweep(mxx, 2, c(1, parms[c("PM", "PT", "PF", "PL")]), "/") mxx <- mxx/(parms["QC"] + parms["QP"]/parms["PB"]) mxx <- sweep(mxx, 1, c(parms["RATS"]*parms["QP"]/parms["PB"], parms[c("QM", "QT", "QF", "QL")]), "*") dg <- diag(c(parms["RATS"]*parms["QP"]/parms["VCH"] + parms["KL"], parms[c("QM", "QT", "QF", "QL")]/ (parms[c("PM", "PT", "PF", "PL")]*parms[c("VM", "VT", "VF", "VL")]))) mxx <- mxx - dg mx[1:5, 1:5] <- mxx mx } ### Now, include the gradients wrt Vmax, Km, and initial chamber concentration initstateG <- function(Pm) { y <- rep(0, 20) names(y) <- c("AI", "AAM", "AT", "AF", "AL", "dAIdVm", "dAAMdVm", "dATdVm", "dAFdVm", "dALdVm", "dAIdK", "dAAMdK", "dATdK", "dAFdK", "dALdK", "dAIdy0", "dAAMdy0", "dATdy0", "dAFdy0", "dALdy0" ) y["AI"] <- Pm["AI0"] y["dAIdy0"] <- Pm["VCH"] * Pm["MW"]/24450.0 y } ccl4modelG <- function(t, y, parms) { list(c(parms[,1:5] %*% y[1:5] - c(0, 0, 0, 0, parms[5, 6]*y[5] / ((Kms <- parms[5, 7]*parms[4, 6]) + y[5])), parms[, 1:5] %*% y[6:10] - c(0, 0, 0, 0, y[5]/(Kms + y[5]) + parms[5, 6]*Kms*y[10]/ (Kms + y[5])^2), parms[, 1:5] %*% y[11:15] - c(0, 0, 0, 0, parms[5, 6]*(y[15]*Kms - parms[4, 6]*y[5])/ (Kms + y[5])^2), parms[,1:5] %*% y[16:20] - c(0, 0, 0, 0, parms[5, 6]*Kms*y[20]/(Kms + y[5])^2) ), c(CP = as.vector(y[1]*(zz <- 24450.0/parms[1, 6]/parms[1, 7])), dCPdVm = as.vector(y[6]*zz), dCPdK = as.vector(y[11]*zz), dCPdy0 = as.vector(y[16]*zz) ) ) } ### Function to use in gnls. This is more complicated than usual for such ### functions, because each value for each animal depends on the previous ### value for that animal. Normal vectorization doesn't work. Work with ### log(Vmax) and log(Km) ccl4gnls <- function(time, initconc, lVmax, lKm, lconc) { Vmax <- if(length(lVmax) == 1) rep(exp(lVmax), length(time)) else exp(lVmax) Km <- if (length(lKm) == 1) rep(exp(lKm), length(time)) else exp(lKm) conc <- if (length(lconc) == 1) rep(exp(lconc), length(time)) else exp(lconc) Concs <- levels(initconc) CP <- numeric(length(time)) .grad <- matrix(nrow=length(time), ncol=3, dimnames=list(NULL, c("lVmax", "lKm", "lconc"))) ### Run the model once for each unique initial concentration for (Conc in Concs) { sel <- initconc == Conc parms <- initparms(CONC=conc[sel][1], VMAX=Vmax[sel][1], KM=Km[sel][1]) parmmx <- initparmmx(parms) y <- initstateG(parms) TTime <- sort(unique(time[sel])) if (! 0 %in% TTime) TTime <- c(0, TTime) out <- lsoda(y, TTime, ccl4modelG, parmmx, rtol=1e-12, atol=1e-12) CP[sel] <- out[match(time[sel], out[,"time"]),"CP"] .grad[sel, "lVmax"] <- out[match(time[sel], out[, "time"]), "dCPdVm"] .grad[sel, "lKm"] <- out[match(time[sel], out[, "time"]), "dCPdK"] .grad[sel, "lconc"] <- out[match(time[sel], out[, "time"]), "dCPdy0"] } .grad <- .grad * cbind(Vmax, Km, conc) attr(CP, "gradient") <- .grad CP } if (require(nlme, quietly=TRUE)) { start <- log(c(lVmax = 0.11, lKm=1.3, 25, 100, 250, 1000)) ### Data are from: ### Evans, et al. (1994) Applications of sensitivity analysis to a ### physiologically ### based pharmacokinetic model for carbon tetrachloride in rats. ### Toxicology and Applied Pharmacology 128: 36--44. data(ccl4data) ccl4data.avg<-aggregate(ccl4data$ChamberConc, by=ccl4data[c("time", "initconc")], mean) names(ccl4data.avg)[3]<-"ChamberConc" ### Estimate log(Vmax), log(Km), and the logs of the initial ### concentrations with gnls cat("\nThis may take a little while ... \n") ccl4.gnls <- gnls(ChamberConc ~ ccl4gnls(time, factor(initconc), lVmax, lKm, lconc), params = list(lVmax + lKm ~ 1, lconc ~ factor(initconc)-1), data=ccl4data.avg, start=start, weights=varPower(fixed=1), verbose=TRUE) start <- coef(ccl4.gnls) ccl4.gnls2 <- gnls(ChamberConc ~ ccl4gnls(time, factor(initconc), lVmax, lKm, lconc), params = list(lVmax + lKm ~ 1, lconc ~ factor(initconc)-1), data=ccl4data, start=start, weights=varPower(fixed=1), verbose=TRUE) print(summary(ccl4.gnls2)) ### Now fit a separate initial concentration for each animal start <- c(coef(ccl4.gnls)) cat("\nApprox. 95% Confidence Intervals for Metabolic Parameters:\n") tmp <- exp(intervals(ccl4.gnls2)[[1]][1:2,]) row.names(tmp) <- c("Vmax", "Km") print(tmp) cat("\nOf course, the statistical model is inappropriate, since\nthe concentrations within animal are pretty highly autocorrelated:\nsee the graph.\n") opar <- par(ask=TRUE, no.readonly=TRUE) plot(ChamberConc ~ time, data=ccl4data, xlab="Time (hours)", xlim=range(c(0, ccl4data$time)), ylab="Chamber Concentration (ppm)", log="y") out <- predict(ccl4.gnls2, newdata=ccl4data.avg) concentrations <- sort(unique(ccl4data$initconc)) for (conc in concentrations) { times <- ccl4data.avg$time[sel <- ccl4data.avg$initconc == conc] CP <- out[sel] lines(CP ~ times) } par(opar) } else { cat("This example requires the package nlme\n") }
/deSolve/demo/CCL4model.R
permissive
solgenomics/R_libs
R
false
false
11,157
r
### Functions to facilitate fitting the CCl4 inhalation model initparms <- function(...) { arglist <- list(...) Pm <- numeric(36) ## The Changeable parameters are ones that can be modified on input Changeable <- c("BW", "QP", "QC", "VFC", "VLC", "VMC", "QFC", "QLC", "QMC", "PLA", "PFA", "PMA", "PTA", "PB", "MW", "VMAX", "KM", "CONC", "KL", "RATS", "VCHC") ## Computed parameters are strictly functions of the Changeable ones. Computed <- c("VCH", "AI0", "PL", "PF", "PT", "PM", "VTC", "VT", "VF", "VL", "VM", "QF", "QL", "QM", "QT") names(Pm) <- c(Changeable, Computed ) ### Physiological parameters Pm["BW"] <- 0.182 # Body weight (kg) Pm["QP"] <- 4.0 # Alveolar ventilation rate (hr^-1) Pm["QC"] <- 4.0 # Cardiac output (hr^-1) Pm["VFC"] <- 0.08 # Fraction fat tissue (kg/(kg/BW)) Pm["VLC"] <- 0.04 # Fraction liver tissue (kg/(kg/BW)) Pm["VMC"] <- 0.74 # Fraction of muscle tissue (kg/(kg/BW)) Pm["QFC"] <- 0.05 # Fractional blood flow to fat ((hr^-1)/QC Pm["QLC"] <- 0.15 # Fractional blood flow to liver ((hr^-1)/QC) Pm["QMC"] <- 0.32 # Fractional blood flow to muscle ((hr^-1)/QC) ## Chemical specific parameters for chemical Pm["PLA"] <- 16.17 # Liver/air partition coefficient Pm["PFA"] <- 281.48 # Fat/air partition coefficient Pm["PMA"] <- 13.3 # Muscle/air partition coefficient Pm["PTA"] <- 16.17 # Viscera/air partition coefficient Pm["PB"] <- 5.487 # Blood/air partition coefficient Pm["MW"] <- 153.8 # Molecular weight (g/mol) Pm["VMAX"] <- 0.11 # Maximum velocity of metabolism (mg/hr) Pm["KM"] <- 1.3 # Michaelis-Menten constant (mg/l) ## Parameters for simulated experiment Pm["CONC"] <- 1000 # Inhaled concentration Pm["KL"] <- 0.02 # Loss rate from empty chamber /hr Pm["RATS"] <- 1.0 # Number of rats enclosed in chamber Pm["VCHC"] <- 3.8 # Volume of closed chamber (l) ## Now, change anything from the argument list ## First, delete anything in arglist that is not in Changeable whichdel <- which(! names(arglist) %in% Changeable) if (length(whichdel)) { warning(paste("Parameters", paste(names(arglist)[whichdel], collapse=", "), "are not in this model\n")) } arglist[whichdel] <- NULL ## Is there anything else if (length(arglist)) { Pm[names(arglist)] <- as.vector(unlist(arglist)) } ## Computed parameter values Pm["VCH"] <- Pm["VCHC"] - Pm["RATS"]*Pm["BW"] # Net chamber volume Pm["AI0"] <- Pm["CONC"]*Pm["VCH"]*Pm["MW"]/24450 # Initial amt. in chamber (mg) Pm[c("PL", "PF", "PT", "PM")] <- Pm[c("PLA", "PFA", "PTA", "PMA")]/Pm["PB"] ## Fraction viscera (kg/(kg BW)) Pm["VTC"] <- 0.91 - sum(Pm[c("VLC", "VFC", "VMC")]) Pm[c("VT", "VF", "VL", "VM")] <- Pm[c("VTC", "VFC", "VLC", "VMC")]*Pm["BW"] Pm[c("QF", "QL", "QM")] <- Pm[c("QFC", "QLC", "QMC")]*Pm["QC"] Pm["QT"] <- Pm["QC"] - sum(Pm[c("QF", "QL", "QM")]) Pm } ### We don't actually use these functions (though they work) ### They exist because cclmodel.orig is easier to read than ccl4modelG ### The model function also computes some values that are of interest in ### checking the model and for calculating a dose metric: ### the amount metabolized (AM) ### the area under the concentration-time curve in the liver (CLT) ### and the mass balance (MASS), which should be constant if everything ### worked right. ## State variable, y, assignments. ## CI CM CT CF CL ## AI AAM AT AF AL CLT AM ## 1 2 3 4 5 6 7 initstate.orig <- function(Pm) { y <- rep(0, 7) names(y) <- c("AI", "AAM", "AT", "AF", "AL", "CLT", "AM") y["AI"] <- Pm["AI0"] y } parms <- initparms() ccl4model.orig <- with(as.list(parms), function(t, y, parms) { conc <- y[c("AI", "AAM", "AT", "AF", "AL")]/c(VCH, VM, VT, VF, VL) ## Vconc[1] is conc in mixed venous blood Vconc <- c(0, conc[2:5]/parms[c("PM", "PT", "PF", "PL")]) # '0' is a placeholder Vconc[1] <- sum(Vconc[2:5]*c(QM, QT, QF, QL))/QC ## CA is conc in arterial blood CA <- (QC * Vconc[1] + QP * conc[1])/ (QC + QP/PB) ## Exhaled chemical CX <- CA/PB ## return the derivatives and other computed items list(c(RATS*QP*(CX - conc[1]) - KL*y["AI"], QM*(CA - Vconc[2]), QT*(CA - Vconc[3]), QF*(CA - Vconc[4]), QL*(CA - Vconc[5]) - (RAM <- VMAX*Vconc[5]/(KM + Vconc[5])), conc[5], RAM), c(DOSE = as.vector(AI0 - y["AI"]), MASS = as.vector(sum(y[c("AAM","AT", "AF", "AL", "AM")])*RATS), CP=as.vector(conc[1]*24450.0/MW) )) }) ### Versions that only calculate what is needed for parameter estimation initparmmx <- function(parms) { mx <- matrix(nrow=5, ncol=7) mx[1, 6] <- parms["VCH"] mx[1, 7] <- parms["MW"] mx[4, 6] <- parms["VL"]*parms["PL"] mx[5, 6] <- parms["VMAX"] mx[5, 7] <- parms["KM"] mxx <- matrix(parms[c("QP", "QM", "QT", "QF", "QL")], nrow=5, ncol=5, byrow=TRUE) mxx <- sweep(mxx, 2, parms[c("VCH", "VM", "VT", "VF", "VL")], "/") mxx <- sweep(mxx, 2, c(1, parms[c("PM", "PT", "PF", "PL")]), "/") mxx <- mxx/(parms["QC"] + parms["QP"]/parms["PB"]) mxx <- sweep(mxx, 1, c(parms["RATS"]*parms["QP"]/parms["PB"], parms[c("QM", "QT", "QF", "QL")]), "*") dg <- diag(c(parms["RATS"]*parms["QP"]/parms["VCH"] + parms["KL"], parms[c("QM", "QT", "QF", "QL")]/ (parms[c("PM", "PT", "PF", "PL")]*parms[c("VM", "VT", "VF", "VL")]))) mxx <- mxx - dg mx[1:5, 1:5] <- mxx mx } ### Now, include the gradients wrt Vmax, Km, and initial chamber concentration initstateG <- function(Pm) { y <- rep(0, 20) names(y) <- c("AI", "AAM", "AT", "AF", "AL", "dAIdVm", "dAAMdVm", "dATdVm", "dAFdVm", "dALdVm", "dAIdK", "dAAMdK", "dATdK", "dAFdK", "dALdK", "dAIdy0", "dAAMdy0", "dATdy0", "dAFdy0", "dALdy0" ) y["AI"] <- Pm["AI0"] y["dAIdy0"] <- Pm["VCH"] * Pm["MW"]/24450.0 y } ccl4modelG <- function(t, y, parms) { list(c(parms[,1:5] %*% y[1:5] - c(0, 0, 0, 0, parms[5, 6]*y[5] / ((Kms <- parms[5, 7]*parms[4, 6]) + y[5])), parms[, 1:5] %*% y[6:10] - c(0, 0, 0, 0, y[5]/(Kms + y[5]) + parms[5, 6]*Kms*y[10]/ (Kms + y[5])^2), parms[, 1:5] %*% y[11:15] - c(0, 0, 0, 0, parms[5, 6]*(y[15]*Kms - parms[4, 6]*y[5])/ (Kms + y[5])^2), parms[,1:5] %*% y[16:20] - c(0, 0, 0, 0, parms[5, 6]*Kms*y[20]/(Kms + y[5])^2) ), c(CP = as.vector(y[1]*(zz <- 24450.0/parms[1, 6]/parms[1, 7])), dCPdVm = as.vector(y[6]*zz), dCPdK = as.vector(y[11]*zz), dCPdy0 = as.vector(y[16]*zz) ) ) } ### Function to use in gnls. This is more complicated than usual for such ### functions, because each value for each animal depends on the previous ### value for that animal. Normal vectorization doesn't work. Work with ### log(Vmax) and log(Km) ccl4gnls <- function(time, initconc, lVmax, lKm, lconc) { Vmax <- if(length(lVmax) == 1) rep(exp(lVmax), length(time)) else exp(lVmax) Km <- if (length(lKm) == 1) rep(exp(lKm), length(time)) else exp(lKm) conc <- if (length(lconc) == 1) rep(exp(lconc), length(time)) else exp(lconc) Concs <- levels(initconc) CP <- numeric(length(time)) .grad <- matrix(nrow=length(time), ncol=3, dimnames=list(NULL, c("lVmax", "lKm", "lconc"))) ### Run the model once for each unique initial concentration for (Conc in Concs) { sel <- initconc == Conc parms <- initparms(CONC=conc[sel][1], VMAX=Vmax[sel][1], KM=Km[sel][1]) parmmx <- initparmmx(parms) y <- initstateG(parms) TTime <- sort(unique(time[sel])) if (! 0 %in% TTime) TTime <- c(0, TTime) out <- lsoda(y, TTime, ccl4modelG, parmmx, rtol=1e-12, atol=1e-12) CP[sel] <- out[match(time[sel], out[,"time"]),"CP"] .grad[sel, "lVmax"] <- out[match(time[sel], out[, "time"]), "dCPdVm"] .grad[sel, "lKm"] <- out[match(time[sel], out[, "time"]), "dCPdK"] .grad[sel, "lconc"] <- out[match(time[sel], out[, "time"]), "dCPdy0"] } .grad <- .grad * cbind(Vmax, Km, conc) attr(CP, "gradient") <- .grad CP } if (require(nlme, quietly=TRUE)) { start <- log(c(lVmax = 0.11, lKm=1.3, 25, 100, 250, 1000)) ### Data are from: ### Evans, et al. (1994) Applications of sensitivity analysis to a ### physiologically ### based pharmacokinetic model for carbon tetrachloride in rats. ### Toxicology and Applied Pharmacology 128: 36--44. data(ccl4data) ccl4data.avg<-aggregate(ccl4data$ChamberConc, by=ccl4data[c("time", "initconc")], mean) names(ccl4data.avg)[3]<-"ChamberConc" ### Estimate log(Vmax), log(Km), and the logs of the initial ### concentrations with gnls cat("\nThis may take a little while ... \n") ccl4.gnls <- gnls(ChamberConc ~ ccl4gnls(time, factor(initconc), lVmax, lKm, lconc), params = list(lVmax + lKm ~ 1, lconc ~ factor(initconc)-1), data=ccl4data.avg, start=start, weights=varPower(fixed=1), verbose=TRUE) start <- coef(ccl4.gnls) ccl4.gnls2 <- gnls(ChamberConc ~ ccl4gnls(time, factor(initconc), lVmax, lKm, lconc), params = list(lVmax + lKm ~ 1, lconc ~ factor(initconc)-1), data=ccl4data, start=start, weights=varPower(fixed=1), verbose=TRUE) print(summary(ccl4.gnls2)) ### Now fit a separate initial concentration for each animal start <- c(coef(ccl4.gnls)) cat("\nApprox. 95% Confidence Intervals for Metabolic Parameters:\n") tmp <- exp(intervals(ccl4.gnls2)[[1]][1:2,]) row.names(tmp) <- c("Vmax", "Km") print(tmp) cat("\nOf course, the statistical model is inappropriate, since\nthe concentrations within animal are pretty highly autocorrelated:\nsee the graph.\n") opar <- par(ask=TRUE, no.readonly=TRUE) plot(ChamberConc ~ time, data=ccl4data, xlab="Time (hours)", xlim=range(c(0, ccl4data$time)), ylab="Chamber Concentration (ppm)", log="y") out <- predict(ccl4.gnls2, newdata=ccl4data.avg) concentrations <- sort(unique(ccl4data$initconc)) for (conc in concentrations) { times <- ccl4data.avg$time[sel <- ccl4data.avg$initconc == conc] CP <- out[sel] lines(CP ~ times) } par(opar) } else { cat("This example requires the package nlme\n") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AFROC.R \name{AFROC} \alias{AFROC} \title{AF\strong{ROC} curve (alternative free-response \strong{ROC} curve)} \usage{ AFROC(t, a = 0.14, b = 0.19, x.coordinate.also = FALSE) } \arguments{ \item{t}{A real number which moves in the domain of FROC curve} \item{a, b}{One of the parameter of model which characterize AFROC curve} \item{x.coordinate.also}{Logical, whether a vector of \code{1-exp(-t)} is included in a return value.} } \value{ if \code{x.coordinate.also =TRUE}, then A list, contains two vectors as x,y cooridinates of the AFROC curve for drawing curves. if \code{x.coordinate.also =FALSE}, then return is a vector as y coodinates of the AFROC curve exclueded its x-coordinates. (x coodinates is omitted.) } \description{ An AFROC curve is a plane curve whose area under the curve (AUC) indicates an observer performance ability. In the following, \eqn{\Phi()} denotes the cumulative distribution function on the standard Gaussian disribution. The so-called \emph{AFROC} curve is defined by \deqn{ (\xi(t),\eta(t) ) =(1-e^{-t}, \Phi( b\Phi^{-1}(\exp(-t) )- a ) )} for all \eqn{t >0} and some fixed real numbers \eqn{a,b}. Specifying two real numbers \eqn{a} and \eqn{b}, we can plot an AFROC curve. The are under the AFROC curve, or breafly AUC, is calculated as follows, whic are used to evaluate how physicians detect lesions in radiographs. \deqn{ AUC = \int \eta(t) d\xi(t) = \frac{ a }{ \sqrt{1+ b^2} }. } Note that the so-called FROC curve can be interpreted as the curve of expectations of data points. On the other hand, AFROC curve cannot be interpreted as the fitted curve, but its AUC is finite. Because AFROC can be obtained by modifying FROC curve, it reflects obeserver performance. } \examples{ #======================================================================================== # Plot AFROC curve #======================================================================================== tt <- seq(0, 1, length.out = 111) ttt <- stats::runif(1000,0.001,100) t <- c(tt,ttt) a <- AFROC(t,x.coordinate.also=TRUE) plot(a$x,a$y) # We note that the x-coordinates of AFROC curve is not t but x = 1 - exp(-t). # To emphasize that x-coordinates is not t, we prepare the another example #======================================================================================== # Plot AFROC curve #======================================================================================== tt <- seq(0, 1, length.out = 1111) #plot(1:length(tt),tt) ttt <- stats::runif(1000,0.001,100) t <- c(tt,ttt) t <- c(0,tt,ttt,1) t<-sort(t, method = "shell", index.return = FALSE) y <- AFROC(t,x.coordinate.also=FALSE) plot(1-exp(-t),y,type="l") Close_all_graphic_devices() # 2020 August; Revised 2022 Jan 6 }
/man/AFROC.Rd
no_license
cran/BayesianFROC
R
false
true
2,872
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AFROC.R \name{AFROC} \alias{AFROC} \title{AF\strong{ROC} curve (alternative free-response \strong{ROC} curve)} \usage{ AFROC(t, a = 0.14, b = 0.19, x.coordinate.also = FALSE) } \arguments{ \item{t}{A real number which moves in the domain of FROC curve} \item{a, b}{One of the parameter of model which characterize AFROC curve} \item{x.coordinate.also}{Logical, whether a vector of \code{1-exp(-t)} is included in a return value.} } \value{ if \code{x.coordinate.also =TRUE}, then A list, contains two vectors as x,y cooridinates of the AFROC curve for drawing curves. if \code{x.coordinate.also =FALSE}, then return is a vector as y coodinates of the AFROC curve exclueded its x-coordinates. (x coodinates is omitted.) } \description{ An AFROC curve is a plane curve whose area under the curve (AUC) indicates an observer performance ability. In the following, \eqn{\Phi()} denotes the cumulative distribution function on the standard Gaussian disribution. The so-called \emph{AFROC} curve is defined by \deqn{ (\xi(t),\eta(t) ) =(1-e^{-t}, \Phi( b\Phi^{-1}(\exp(-t) )- a ) )} for all \eqn{t >0} and some fixed real numbers \eqn{a,b}. Specifying two real numbers \eqn{a} and \eqn{b}, we can plot an AFROC curve. The are under the AFROC curve, or breafly AUC, is calculated as follows, whic are used to evaluate how physicians detect lesions in radiographs. \deqn{ AUC = \int \eta(t) d\xi(t) = \frac{ a }{ \sqrt{1+ b^2} }. } Note that the so-called FROC curve can be interpreted as the curve of expectations of data points. On the other hand, AFROC curve cannot be interpreted as the fitted curve, but its AUC is finite. Because AFROC can be obtained by modifying FROC curve, it reflects obeserver performance. } \examples{ #======================================================================================== # Plot AFROC curve #======================================================================================== tt <- seq(0, 1, length.out = 111) ttt <- stats::runif(1000,0.001,100) t <- c(tt,ttt) a <- AFROC(t,x.coordinate.also=TRUE) plot(a$x,a$y) # We note that the x-coordinates of AFROC curve is not t but x = 1 - exp(-t). # To emphasize that x-coordinates is not t, we prepare the another example #======================================================================================== # Plot AFROC curve #======================================================================================== tt <- seq(0, 1, length.out = 1111) #plot(1:length(tt),tt) ttt <- stats::runif(1000,0.001,100) t <- c(tt,ttt) t <- c(0,tt,ttt,1) t<-sort(t, method = "shell", index.return = FALSE) y <- AFROC(t,x.coordinate.also=FALSE) plot(1-exp(-t),y,type="l") Close_all_graphic_devices() # 2020 August; Revised 2022 Jan 6 }
\name{milk} \alias{milk} \docType{data} \title{Monthly Milk Production} \description{ Average monthly milk production per cow in the US, 01/1994 - 12/2005} \usage{data(milk)} \format{ The format is: 'ts' int [1:144, 1] 1343 1236 1401 1396 1457 1388 1389 1369 1318 1354 ... - attr(*, "dimnames")=List of 2 ..$ : NULL ..$ : chr "milk" - attr(*, "tsp")= num [1:3] 1994 2006 12 } \examples{ data(milk) str(milk) plot(milk) } \keyword{datasets}
/man/milk.Rd
no_license
cran/TSA
R
false
false
475
rd
\name{milk} \alias{milk} \docType{data} \title{Monthly Milk Production} \description{ Average monthly milk production per cow in the US, 01/1994 - 12/2005} \usage{data(milk)} \format{ The format is: 'ts' int [1:144, 1] 1343 1236 1401 1396 1457 1388 1389 1369 1318 1354 ... - attr(*, "dimnames")=List of 2 ..$ : NULL ..$ : chr "milk" - attr(*, "tsp")= num [1:3] 1994 2006 12 } \examples{ data(milk) str(milk) plot(milk) } \keyword{datasets}
#' @title plotRatioDensity Function to plot density of ratios between two #' treatments (using groupings from an annotation column) within an se #' object. #' @description This function plots the expression of the supplied se #' object, using ratios between a pair of selected treatments. #' @usage plotRatioDensity(se, groupings= NULL, treatment1=NULL, #' treatment2=NULL, mode_mean=TRUE, LOG2=TRUE,...) #' @param se An se object. #' @param groupings A grouping (annotation); groupings="annotation.ZA" #' @param treatment1 Symbol, treatment 1. #' @param treatment2 Symbol, treatment 2. #' @param mode_mean Boolean, Calculate RowMeans or RowMedians. #' @param LOG2 Boolean, Calculate LOG2. #' @param ... Passthrough arguments to boxplot (additional arguments affecting #' the summary produced). #' @details This function plots expression of the supplied se object #' using ratios of treatment1/treatment2. #' @return Returns an invisible data frame containing the x-values and #' corresponding density for each applicable annotation column entry. #' @examples #' data(hmel.se) #' plotRatioDensity(se, groupings='annotation.ZA', treatment1 = 'Male', #' treatment2 = 'Female',lty=1,type="l") #' @author AJ Vaestermark, JR Walters. #' @references The "doseR" package, 2018 (in press). plotRatioDensity <- function (se, groupings= NULL, treatment1=NULL, treatment2=NULL, mode_mean=TRUE, LOG2=TRUE,...) { MyGroups<-rowData(se)[[groupings]] if(is.null(groupings)) { stop ('No groupings, e.g. groupings="something"...') return (NULL) } if(length(assays(se)$rpkm) == 0) { stop ('No RPKM data saved in se object... cancelling...') return (NULL) } if(is.null(treatment1) | is.null(treatment2) ) { stop ('Indicate treatments, such as treatment1="A", treatment2="B"') return (NULL) } if ( is.factor(MyGroups) ) { MyGroups <- droplevels(MyGroups) } RM <- ( if(mode_mean) rowMeans(assays(se)$rpkm[,colData(se)$Treatment==treatment1]) else matrixStats::rowMedians( assays(se)$rpkm[,colData(se)$Treatment==treatment1]) ) / ( if(mode_mean) rowMeans(assays(se)$rpkm[,colData(se)$Treatment==treatment2]) else matrixStats::rowMedians( assays(se)$rpkm[,colData(se)$Treatment==treatment2]) ) if (LOG2) { RM <- log2(RM) } RM[is.infinite(RM)] <- NA val <- split(RM, MyGroups) xrge <- range(unlist(val), na.rm = TRUE, finite=TRUE) myX <- density(na.omit(val[[1]]), from = xrge[1], to = xrge[2])$x myDens <- vapply(val, FUN = function(x) { density(na.omit(x), from = xrge[1], to = xrge[2])$y }, numeric(512) ) cat("Current order of levels: ", paste(unique(MyGroups), sep="\t") ) matplot(x = myX, y = myDens, ...) invisible(data.frame(myX,myDens)) }# plotRatioDensity
/R/plotRatioDensity.R
no_license
avastermark19/doseR_0.99.8
R
false
false
2,719
r
#' @title plotRatioDensity Function to plot density of ratios between two #' treatments (using groupings from an annotation column) within an se #' object. #' @description This function plots the expression of the supplied se #' object, using ratios between a pair of selected treatments. #' @usage plotRatioDensity(se, groupings= NULL, treatment1=NULL, #' treatment2=NULL, mode_mean=TRUE, LOG2=TRUE,...) #' @param se An se object. #' @param groupings A grouping (annotation); groupings="annotation.ZA" #' @param treatment1 Symbol, treatment 1. #' @param treatment2 Symbol, treatment 2. #' @param mode_mean Boolean, Calculate RowMeans or RowMedians. #' @param LOG2 Boolean, Calculate LOG2. #' @param ... Passthrough arguments to boxplot (additional arguments affecting #' the summary produced). #' @details This function plots expression of the supplied se object #' using ratios of treatment1/treatment2. #' @return Returns an invisible data frame containing the x-values and #' corresponding density for each applicable annotation column entry. #' @examples #' data(hmel.se) #' plotRatioDensity(se, groupings='annotation.ZA', treatment1 = 'Male', #' treatment2 = 'Female',lty=1,type="l") #' @author AJ Vaestermark, JR Walters. #' @references The "doseR" package, 2018 (in press). plotRatioDensity <- function (se, groupings= NULL, treatment1=NULL, treatment2=NULL, mode_mean=TRUE, LOG2=TRUE,...) { MyGroups<-rowData(se)[[groupings]] if(is.null(groupings)) { stop ('No groupings, e.g. groupings="something"...') return (NULL) } if(length(assays(se)$rpkm) == 0) { stop ('No RPKM data saved in se object... cancelling...') return (NULL) } if(is.null(treatment1) | is.null(treatment2) ) { stop ('Indicate treatments, such as treatment1="A", treatment2="B"') return (NULL) } if ( is.factor(MyGroups) ) { MyGroups <- droplevels(MyGroups) } RM <- ( if(mode_mean) rowMeans(assays(se)$rpkm[,colData(se)$Treatment==treatment1]) else matrixStats::rowMedians( assays(se)$rpkm[,colData(se)$Treatment==treatment1]) ) / ( if(mode_mean) rowMeans(assays(se)$rpkm[,colData(se)$Treatment==treatment2]) else matrixStats::rowMedians( assays(se)$rpkm[,colData(se)$Treatment==treatment2]) ) if (LOG2) { RM <- log2(RM) } RM[is.infinite(RM)] <- NA val <- split(RM, MyGroups) xrge <- range(unlist(val), na.rm = TRUE, finite=TRUE) myX <- density(na.omit(val[[1]]), from = xrge[1], to = xrge[2])$x myDens <- vapply(val, FUN = function(x) { density(na.omit(x), from = xrge[1], to = xrge[2])$y }, numeric(512) ) cat("Current order of levels: ", paste(unique(MyGroups), sep="\t") ) matplot(x = myX, y = myDens, ...) invisible(data.frame(myX,myDens)) }# plotRatioDensity
# A function that takes in a dataset and returns a list of info about it: setwd("~/project-ianlindelI") get_summary_info <- function(dataset) { ret <- list() ret$length <- length(dataset) ret$rows <- nrow(dataset) ret$col <- ncol(dataset) return(ret) }
/project-ianlindelI/scripts/summary-info.R
permissive
getachew67/info201-5
R
false
false
277
r
# A function that takes in a dataset and returns a list of info about it: setwd("~/project-ianlindelI") get_summary_info <- function(dataset) { ret <- list() ret$length <- length(dataset) ret$rows <- nrow(dataset) ret$col <- ncol(dataset) return(ret) }
install.packages("readstata13") rm(list=ls()) library(readstata13) dat <- read.dta13("~/Downloads/rEA1.dta") ###2018 Welcoming analysis dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat <- read.dta13("~/Downloads/birth1.dta") dat$topp[dat$topp==1] <- "Animal welfare" dat$topp[dat$topp==2] <- "Cause prioritisation" dat$topp[dat$topp==3] <- "Biosecurity" dat$topp[dat$topp==4] <- "Climate change" dat$topp[dat$topp==5] <- "Nuclear security" dat$topp[dat$topp==6] <- "AI Risk" dat$topp[dat$topp==7] <- "Mental health" dat$topp[dat$topp==8] <- "Global poverty" dat$topp[dat$topp==9] <- "Improving rationality" dat$topp[dat$topp==10] <- "Meta charities" dat$topp[dat$topp==11] <- "Other X-Risk" dat$topp<-as.factor(dat$topp) dat <- read.dta13("~/Downloads/rEA4.dta") dat <- read.dta13("~/Downloads/rEA5.dta") dat$agegroup1[dat$agegroup1==0] <- "18-22" dat$agegroup1[dat$agegroup1==1] <- "23-25" dat$agegroup1[dat$agegroup1==2] <- "26-29" dat$agegroup1[dat$agegroup1==3] <- "30-34" dat$agegroup1[dat$agegroup1==4] <- "35+" dat$agegroup1<-as.factor(dat$agegroup1) dat <- read.dta13("~/Downloads/rEA6.dta") dat$first_heard_ea<- as.factor(dat$first_heard_ea) dat <- read.dta13("~/Downloads/rEA7.dta") dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$first_heard_ea <-as.factor(dat$first_heard_ea) require(likert) attach(dat) mylevels <- dat$wel # Create a dummy data frame. Note that "Item 1" has only four levels items <- data.frame(dat$wel) str(items) groups <- dat$first_heard_ea sapply(items, class) #Verify that all the columns are indeed factors sapply(items, function(x) { length(levels(x)) } ) # The number of levels in each factor for(i in seq_along(items)) { items[,i] <- factor(items[,i], levels=mylevels) } rm(list=ls()) library(readstata13) dat <- read.dta13("~/Downloads/rEA1.dta") dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$first_heard_ea <-as.factor(dat$first_heard_ea) require(likert) attach(dat) mylevels <- dat$wel # Create a dummy data frame. Note that "Item 1" has only four levels items <- data.frame(dat$wel) str(items) lgood <- likert(items) lgood summary(lgood) title <- "EA Welcomeness" plot(lgood, ylab="")+ ggtitle(title) dat$topprioritycause<-as.factor(dat$topprioritycause) dat$topprioritycause = factor(dat$topprioritycause, levels = c("Other","Unemployed","Direct Charity/Non-profit Work","Self-Employed", "Retired", "Research", "Earning to Give", "Homemaker"), ordered = TRUE) # Plot the bar chart title <- "How Welcoming is EA, by Top Priority Cause" ggplot(data=dat, aes(x=topprioritycause, y=meanwelcoming, fill=topprioritycause)) + geom_bar(colour="black", stat="identity") + xlab("Top Priority Cause") + ylab("Mean Welcomeness")+ ggtitle(title)+ guides(fill=FALSE) + theme_tufte()+ylim(3, 4.5) ### install.packages("ggthemes") # Install library(ggthemes) # Load require(likert) lgr <- likert(items) summary(lgr) tp=as.data.frame(lgr$results) tp=tp[,-2] names(tp)[1]="Item" lgr2=(likert(summary=tp)) title <- "EA Welcomingness by Top Priority Cause" plot(lgr2) + ggtitle(title) ## plot(lgood) +ggtitle(title) lgr <- likert(items, grouping=groups) summary(lgr) scale_height = knitr::opts_chunk$get('fig.height')*0.5 scale_width = knitr::opts_chunk$get('fig.width')*1.25 knitr::opts_chunk$set(fig.height = scale_height, fig.width = scale_width) theme_update(legend.text = element_text(size = rel(0.7))) title <- "EA Welcomingness by Country" plot(lgr, ordered=TRUE) + ggtitle(title) plot(lgr, ordered=FALSE, group.order=names(first_heard_ea)) plot(lgr, ordered = TRUE) desired.order <- c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming") install.packages("Epi") require(Epi) require(likert) for (i in seq_along(dat)) { dat[,i] = Relevel(dat[,i],mylevels) # desired.order must be specified beforehand } # Now it plots. lgr <- likert(items, grouping=groups) plot(lgr)
/likert_2018_welcomingness.R
no_license
NeiloDull/2019-EAS
R
false
false
4,438
r
install.packages("readstata13") rm(list=ls()) library(readstata13) dat <- read.dta13("~/Downloads/rEA1.dta") ###2018 Welcoming analysis dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat <- read.dta13("~/Downloads/birth1.dta") dat$topp[dat$topp==1] <- "Animal welfare" dat$topp[dat$topp==2] <- "Cause prioritisation" dat$topp[dat$topp==3] <- "Biosecurity" dat$topp[dat$topp==4] <- "Climate change" dat$topp[dat$topp==5] <- "Nuclear security" dat$topp[dat$topp==6] <- "AI Risk" dat$topp[dat$topp==7] <- "Mental health" dat$topp[dat$topp==8] <- "Global poverty" dat$topp[dat$topp==9] <- "Improving rationality" dat$topp[dat$topp==10] <- "Meta charities" dat$topp[dat$topp==11] <- "Other X-Risk" dat$topp<-as.factor(dat$topp) dat <- read.dta13("~/Downloads/rEA4.dta") dat <- read.dta13("~/Downloads/rEA5.dta") dat$agegroup1[dat$agegroup1==0] <- "18-22" dat$agegroup1[dat$agegroup1==1] <- "23-25" dat$agegroup1[dat$agegroup1==2] <- "26-29" dat$agegroup1[dat$agegroup1==3] <- "30-34" dat$agegroup1[dat$agegroup1==4] <- "35+" dat$agegroup1<-as.factor(dat$agegroup1) dat <- read.dta13("~/Downloads/rEA6.dta") dat$first_heard_ea<- as.factor(dat$first_heard_ea) dat <- read.dta13("~/Downloads/rEA7.dta") dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$first_heard_ea <-as.factor(dat$first_heard_ea) require(likert) attach(dat) mylevels <- dat$wel # Create a dummy data frame. Note that "Item 1" has only four levels items <- data.frame(dat$wel) str(items) groups <- dat$first_heard_ea sapply(items, class) #Verify that all the columns are indeed factors sapply(items, function(x) { length(levels(x)) } ) # The number of levels in each factor for(i in seq_along(items)) { items[,i] <- factor(items[,i], levels=mylevels) } rm(list=ls()) library(readstata13) dat <- read.dta13("~/Downloads/rEA1.dta") dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$wel = factor(dat$welcome, levels = c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming"), ordered = TRUE) dat$first_heard_ea <-as.factor(dat$first_heard_ea) require(likert) attach(dat) mylevels <- dat$wel # Create a dummy data frame. Note that "Item 1" has only four levels items <- data.frame(dat$wel) str(items) lgood <- likert(items) lgood summary(lgood) title <- "EA Welcomeness" plot(lgood, ylab="")+ ggtitle(title) dat$topprioritycause<-as.factor(dat$topprioritycause) dat$topprioritycause = factor(dat$topprioritycause, levels = c("Other","Unemployed","Direct Charity/Non-profit Work","Self-Employed", "Retired", "Research", "Earning to Give", "Homemaker"), ordered = TRUE) # Plot the bar chart title <- "How Welcoming is EA, by Top Priority Cause" ggplot(data=dat, aes(x=topprioritycause, y=meanwelcoming, fill=topprioritycause)) + geom_bar(colour="black", stat="identity") + xlab("Top Priority Cause") + ylab("Mean Welcomeness")+ ggtitle(title)+ guides(fill=FALSE) + theme_tufte()+ylim(3, 4.5) ### install.packages("ggthemes") # Install library(ggthemes) # Load require(likert) lgr <- likert(items) summary(lgr) tp=as.data.frame(lgr$results) tp=tp[,-2] names(tp)[1]="Item" lgr2=(likert(summary=tp)) title <- "EA Welcomingness by Top Priority Cause" plot(lgr2) + ggtitle(title) ## plot(lgood) +ggtitle(title) lgr <- likert(items, grouping=groups) summary(lgr) scale_height = knitr::opts_chunk$get('fig.height')*0.5 scale_width = knitr::opts_chunk$get('fig.width')*1.25 knitr::opts_chunk$set(fig.height = scale_height, fig.width = scale_width) theme_update(legend.text = element_text(size = rel(0.7))) title <- "EA Welcomingness by Country" plot(lgr, ordered=TRUE) + ggtitle(title) plot(lgr, ordered=FALSE, group.order=names(first_heard_ea)) plot(lgr, ordered = TRUE) desired.order <- c("Very unwelcoming", "Unwelcoming", "Neither", "Welcoming", "Very welcoming") install.packages("Epi") require(Epi) require(likert) for (i in seq_along(dat)) { dat[,i] = Relevel(dat[,i],mylevels) # desired.order must be specified beforehand } # Now it plots. lgr <- likert(items, grouping=groups) plot(lgr)
testlist <- list(n = c(-555819298L, -555819298L, -555819298L)) result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist) str(result)
/gdalcubes/inst/testfiles/libgdalcubes_set_threads/libFuzzer_libgdalcubes_set_threads/libgdalcubes_set_threads_valgrind_files/1609874833-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
139
r
testlist <- list(n = c(-555819298L, -555819298L, -555819298L)) result <- do.call(gdalcubes:::libgdalcubes_set_threads,testlist) str(result)
# Load all required packages source("Helper_Functions.R") list.of.packages = c("rpart", "lubridate", "outliers", "rpart.plot", "xgboost", "caret", "caretEnsemble", "randomForest", "e1071", "pROC", "tidyr", "klaR", "car", "devtools", "yamldebugger", "mlbench", "Hmisc", "ggvis", "relaimpo", "formatR", "data.table", "zoo", "ggplot2", "forecast", "reshape2", "pdp") sapply(list.of.packages, load.packages) # Loading the full dataset FullSet = readRDS("FullSet") # Generating an additional date variable FullSet$NewDate = FullSet$Date # Adding features with mean sales FullSet$Store = as.factor(FullSet$Store) FullSet$Date = as.Date(paste(FullSet$Date, sep = "-"), format = "%Y-%m-%d") FullSet = separate(FullSet, Date, into = c("Year", "Month", "Day"), sep = "-") features_avg = setNames(aggregate(FullSet$Sales, list(FullSet$Store), mean), c("Store", "AvgSalesPerStore")) features_avg$AvgVisitsPerStore = aggregate(FullSet$Customers, list(FullSet$Store), mean)[, 2] features_dow = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$DayOfWeek), mean), c("Store", "DayOfWeek", "AvgSalesPerStorePerDayOfWeek")) features_dow$AvgVisitsPerStorePerDayOfWeek = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$DayOfWeek), mean)[, 3] features_year = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$Year), mean), c("Store", "Year", "AvgSalesPerStorePerYear")) features_year$AvgVisitsPerStorePerYear = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$Year), mean)[, 3] features_mon = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$Year, FullSet$Month), mean), c("Store", "Year", "Month", "AvgSalesPerStorePerMonth")) features_mon$AvgVisitsPerStorePerMonth = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$Year, FullSet$Month), mean)[, 4] # Merging new features with the dataset FullSet = merge(FullSet, features_avg, by = "Store") FullSet = merge(FullSet, features_dow, by = c("Store", "DayOfWeek")) FullSet = merge(FullSet, features_year, by = c("Store", "Year")) FullSet = merge(FullSet, features_mon, by = c("Store", "Year", "Month")) FullSet$NewDate = as.Date(paste(FullSet$NewDate, sep = "-"), format = "%Y-%m-%d") varlist = c("Year", "Month", "Day") FullSet[, varlist] = lapply(FullSet[, varlist], factor) # Plotting average sales per store by promotion ggplot(FullSet, aes(x = Store, y = AvgSalesPerStore, color = Promo, shape = Promo)) + geom_point() + scale_color_brewer(palette = "Set2") + ggtitle("Average Sales Per Store by Promo") + labs(x = "Store", y = "Average Sales Per Store") + theme_classic() ggsave("Average_Sales_Per_Store_by_Promo.png") # Plotting average sales per store by competition distance FullSet.sub = subset(FullSet, Sales != 0 & !is.na(CompetitionDistance), drop = TRUE) SalesByDist = aggregate(FullSet.sub$AvgSalesPerStore, by = list(FullSet.sub$CompetitionDistance), mean) colnames(SalesByDist) = c("CompetitionDistance", "AvgSalesPerStore") ggplot(SalesByDist, aes(x = CompetitionDistance, y = AvgSalesPerStore)) + geom_point() + scale_color_brewer(palette = "Set2") + geom_smooth() + ggtitle("Average Sales Per Store by Competition Distance") + labs(x = "Competition Distance", y = "Average Sales Per Store") + theme_bw() ggsave("Average_Sales_by_Competition_Distance.png") # Plotting the log of average sales ggplot(SalesByDist, aes(x = log(CompetitionDistance), y = log(AvgSalesPerStore))) + geom_point() + scale_color_brewer(palette = "Set2") + geom_smooth() + ggtitle("Log of Average Sales per Store by Log of Competition Distance") + labs(x = "Log (Competition Distance)", y = "Log (Average Sales Per Store)") + theme_bw() ggsave("Log_of_Sales_by_Competition Distance.png") # Plotting dynamics of sales per store per month ggplot(FullSet, aes(x = as.Date(NewDate), y = AvgSalesPerStorePerMonth)) + geom_smooth(size = 2) + ggtitle("Average Sales Per Store Per Month over Time") + labs(x = "Date", y = "Average Sales Per Store Per Month") + theme_bw() ggsave("Average_Sales_per_Store_Per_Month.png") # Plotting dynamics of customers per store per month ggplot(FullSet, aes(x = as.Date(NewDate), y = AvgVisitsPerStorePerMonth)) + geom_smooth(size = 2) + ggtitle("Average Customers Per Store Per Month over Time") + labs(x = "Date", y = "Average Customers Per Store Per Month") + theme_bw() ggsave("Average_Customers_per_Store_Per_Month.png") # Loading xgb model for further plotting xgb = readRDS("xgb") # Calculating partial dependence xgb.partialPlots = list() # empty result list imp.var.xgb = c("Open", "CompetitionDistance", "Store", "Promo", "CompetitionSinceDate", "Date", "Promo2SinceDate", "StoreType", "Assortment", "Promo2", "DayOfWeek", "StoreAssortmentMatch") for (var in imp.var.xgb) { message("Now calculating for variable ", var) xgb.partialPlots[[var]] = do.call(partial, list(xgb, pred.var = var, type = "auto", plot = FALSE)) } # Creating the partial dependence plots and saving as pdf par(mfrow = c(1, 2)) for (var in names(xgb.partialPlots)) { png(paste("PDPSales", var, ".png", sep = "_")) plot(x = xgb.partialPlots[[var]][, 1], y = xgb.partialPlots[[var]][, 2], type = "l", xlab = var, ylab = "Sales", ylim = c(0, 8000), main = paste("Partial dependence of Sales on", var), bg = "transparent", lwd = 2) dev.off() }
/Removed/SPL_Data_Exploration.R
no_license
maggytheirish/SPL-SS17
R
false
false
5,357
r
# Load all required packages source("Helper_Functions.R") list.of.packages = c("rpart", "lubridate", "outliers", "rpart.plot", "xgboost", "caret", "caretEnsemble", "randomForest", "e1071", "pROC", "tidyr", "klaR", "car", "devtools", "yamldebugger", "mlbench", "Hmisc", "ggvis", "relaimpo", "formatR", "data.table", "zoo", "ggplot2", "forecast", "reshape2", "pdp") sapply(list.of.packages, load.packages) # Loading the full dataset FullSet = readRDS("FullSet") # Generating an additional date variable FullSet$NewDate = FullSet$Date # Adding features with mean sales FullSet$Store = as.factor(FullSet$Store) FullSet$Date = as.Date(paste(FullSet$Date, sep = "-"), format = "%Y-%m-%d") FullSet = separate(FullSet, Date, into = c("Year", "Month", "Day"), sep = "-") features_avg = setNames(aggregate(FullSet$Sales, list(FullSet$Store), mean), c("Store", "AvgSalesPerStore")) features_avg$AvgVisitsPerStore = aggregate(FullSet$Customers, list(FullSet$Store), mean)[, 2] features_dow = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$DayOfWeek), mean), c("Store", "DayOfWeek", "AvgSalesPerStorePerDayOfWeek")) features_dow$AvgVisitsPerStorePerDayOfWeek = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$DayOfWeek), mean)[, 3] features_year = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$Year), mean), c("Store", "Year", "AvgSalesPerStorePerYear")) features_year$AvgVisitsPerStorePerYear = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$Year), mean)[, 3] features_mon = setNames(aggregate(FullSet$Sales, list(FullSet$Store, FullSet$Year, FullSet$Month), mean), c("Store", "Year", "Month", "AvgSalesPerStorePerMonth")) features_mon$AvgVisitsPerStorePerMonth = aggregate(FullSet$Customers, list(FullSet$Store, FullSet$Year, FullSet$Month), mean)[, 4] # Merging new features with the dataset FullSet = merge(FullSet, features_avg, by = "Store") FullSet = merge(FullSet, features_dow, by = c("Store", "DayOfWeek")) FullSet = merge(FullSet, features_year, by = c("Store", "Year")) FullSet = merge(FullSet, features_mon, by = c("Store", "Year", "Month")) FullSet$NewDate = as.Date(paste(FullSet$NewDate, sep = "-"), format = "%Y-%m-%d") varlist = c("Year", "Month", "Day") FullSet[, varlist] = lapply(FullSet[, varlist], factor) # Plotting average sales per store by promotion ggplot(FullSet, aes(x = Store, y = AvgSalesPerStore, color = Promo, shape = Promo)) + geom_point() + scale_color_brewer(palette = "Set2") + ggtitle("Average Sales Per Store by Promo") + labs(x = "Store", y = "Average Sales Per Store") + theme_classic() ggsave("Average_Sales_Per_Store_by_Promo.png") # Plotting average sales per store by competition distance FullSet.sub = subset(FullSet, Sales != 0 & !is.na(CompetitionDistance), drop = TRUE) SalesByDist = aggregate(FullSet.sub$AvgSalesPerStore, by = list(FullSet.sub$CompetitionDistance), mean) colnames(SalesByDist) = c("CompetitionDistance", "AvgSalesPerStore") ggplot(SalesByDist, aes(x = CompetitionDistance, y = AvgSalesPerStore)) + geom_point() + scale_color_brewer(palette = "Set2") + geom_smooth() + ggtitle("Average Sales Per Store by Competition Distance") + labs(x = "Competition Distance", y = "Average Sales Per Store") + theme_bw() ggsave("Average_Sales_by_Competition_Distance.png") # Plotting the log of average sales ggplot(SalesByDist, aes(x = log(CompetitionDistance), y = log(AvgSalesPerStore))) + geom_point() + scale_color_brewer(palette = "Set2") + geom_smooth() + ggtitle("Log of Average Sales per Store by Log of Competition Distance") + labs(x = "Log (Competition Distance)", y = "Log (Average Sales Per Store)") + theme_bw() ggsave("Log_of_Sales_by_Competition Distance.png") # Plotting dynamics of sales per store per month ggplot(FullSet, aes(x = as.Date(NewDate), y = AvgSalesPerStorePerMonth)) + geom_smooth(size = 2) + ggtitle("Average Sales Per Store Per Month over Time") + labs(x = "Date", y = "Average Sales Per Store Per Month") + theme_bw() ggsave("Average_Sales_per_Store_Per_Month.png") # Plotting dynamics of customers per store per month ggplot(FullSet, aes(x = as.Date(NewDate), y = AvgVisitsPerStorePerMonth)) + geom_smooth(size = 2) + ggtitle("Average Customers Per Store Per Month over Time") + labs(x = "Date", y = "Average Customers Per Store Per Month") + theme_bw() ggsave("Average_Customers_per_Store_Per_Month.png") # Loading xgb model for further plotting xgb = readRDS("xgb") # Calculating partial dependence xgb.partialPlots = list() # empty result list imp.var.xgb = c("Open", "CompetitionDistance", "Store", "Promo", "CompetitionSinceDate", "Date", "Promo2SinceDate", "StoreType", "Assortment", "Promo2", "DayOfWeek", "StoreAssortmentMatch") for (var in imp.var.xgb) { message("Now calculating for variable ", var) xgb.partialPlots[[var]] = do.call(partial, list(xgb, pred.var = var, type = "auto", plot = FALSE)) } # Creating the partial dependence plots and saving as pdf par(mfrow = c(1, 2)) for (var in names(xgb.partialPlots)) { png(paste("PDPSales", var, ".png", sep = "_")) plot(x = xgb.partialPlots[[var]][, 1], y = xgb.partialPlots[[var]][, 2], type = "l", xlab = var, ylab = "Sales", ylim = c(0, 8000), main = paste("Partial dependence of Sales on", var), bg = "transparent", lwd = 2) dev.off() }
# Record start time time_start <- Sys.time() # Remove not needed variables rm(list=c('MC_Z','MC_h','MC_stock_log')) # Precalculate some numbers stock_n <- length(stockList) stock_days <- nrow(stock_log) pf_days <- length(pf_ret_nday) # Get mean returns of the stocks stock_log_mean <- apply(stock_log,2,mean) # Deduct the mean from every stock, for standardized news process in GARCH process stock_log_mean0 <- sweep(stock_log,2,stock_log_mean) #================================================================================================= ## GARCH model: Fit GARCH model on all marginal returns # Store all relevant parameters in variables. Preallocate all variables first: # GARCH model parameters: GARCH_mu <- vector(mode = "list", length = stock_n) GARCH_omega <- vector(mode = "list", length = stock_n) GARCH_alpha <- vector(mode = "list", length = stock_n) GARCH_beta <- vector(mode = "list", length = stock_n) GARCH_gamma <- vector(mode = "list", length = stock_n) # GARCH residuals, condtional volatility and standardized residuals GARCH_residuals <- matrix(nrow = stock_days,ncol = stock_n) GARCH_h.t <- matrix(nrow = stock_days,ncol = stock_n) GARCH_sigma.t <- matrix(nrow = stock_days,ncol = stock_n) GARCH_Z <- matrix(nrow = stock_days,ncol = stock_n) # For GARCH model if(GARCH_model == 'GARCH'){ for(s in 1:stock_n){ # Estimate GARCH for every individual stocks returns, adjusted for mean 0 # Use fGARCH package. Normal GARCH model is a special case of fGARCH model. GARCH <- garchFit(formula = ~ garch(1, 1), data=stock_log_mean0[,s], cond.dist=GARCHcondDist) # Store estiated parameters in lists GARCH_mu[s] <- coef(GARCH)[1] GARCH_omega[s] <- coef(GARCH)[2] GARCH_alpha[s] <- coef(GARCH)[3] GARCH_beta[s] <- coef(GARCH)[4] GARCH_residuals[,s] <- GARCH@residuals GARCH_h.t[,s] <- GARCH@h.t GARCH_sigma.t[,s] <- GARCH@sigma.t } # Standardized residuals are residuals divided by conditional volatility GARCH_Z <- GARCH_residuals/GARCH_sigma.t } # For TGARCH model if(GARCH_model == 'TGARCH'){ for(s in 1:stock_n){ # Estimate TGARCH for every individual stocks returns, adjusted for mean 0 # Use fGARCH package. GJR-GARCH model is a special case of fGARCH model with delta = 2 and leverage GARCH <- garchFit(formula = ~ garch(1, 1), delta = 2, leverage = TRUE, data=stock_log_mean0[,s], cond.dist=GARCHcondDist) # Store estiated parameters in lists GARCH_mu[s] <- coef(GARCH)[1] GARCH_omega[s] <- coef(GARCH)[2] GARCH_alpha[s] <- coef(GARCH)[3] GARCH_gamma[s] <- coef(GARCH)[4] GARCH_beta[s] <- coef(GARCH)[5] GARCH_residuals[,s] <- GARCH@residuals GARCH_h.t[,s] <- GARCH@h.t GARCH_sigma.t[,s] <- GARCH@sigma.t } # Standardized residuals are residuals divided by conditional volatility GARCH_Z <- GARCH_residuals/GARCH_sigma.t } # Delete not needed variables to free working memory rm(list='GARCH') #================================================================================================= ## Copula # This section creates a copula model in order to get the multivariate distribution of the marginal # returns. The copula package is used. # Define the used copula copula_obj <- claytonCopula(dim=length(stockList)) # Matrix of marginal returns as pseudo observations [0,1] copula_pobs <- pobs(GARCH_Z) # Fit the copula copula_fit <- fitCopula(copula_obj,copula_pobs,method='ml') # Get the fitted copula parameter copula_theta <- coef(copula_fit) # Fit a t distribution on standardised redisuals of marginal returns and store parameters in vectors copula_Z_mu <- vector(mode = "numeric", length = stock_n) copula_Z_s <- vector(mode = "numeric", length = stock_n) copula_Z_df <- vector(mode = "numeric", length = stock_n) for(s in 1:stock_n){ copula_Z_fit <- fitdistr(GARCH_Z[,s],"t") copula_Z_mu[s] <- copula_Z_fit$estimate[["m"]] copula_Z_s[s] <- copula_Z_fit$estimate[["s"]] copula_Z_df[s] <- copula_Z_fit$estimate[["df"]] } # Delete not needed variables rm(list='copula_Z_fit') ## Create lists with parameters for use of copula function # T distribution for all standardized residuals copula_margins_list <- matrix(data='t',nrow=1,ncol=stock_n)[1,] # Make a list with the degrees of freedom of the standardized residuals copula_paramMargins_list <- vector(mode="list", length = stock_n) copula_paramMargins_list_names <- matrix(data='df',nrow=1,ncol=stock_n)[1,] names(copula_paramMargins_list) <- copula_paramMargins_list_names for(s in 1:stock_n){ copula_paramMargins_list[s] <- copula_Z_df[s] } # Create a copula object with all correct parameters, that can later be used to generate an array with returns copula_dist <- mvdc(copula=claytonCopula(copula_theta, dim = length(stockList)), margins=copula_margins_list, paramMargins = copula_paramMargins_list) #================================================================================================= ## GARCH functions to simulate conditional variance h(t) as function of h(t-1) and z(t-1) and the estimated parameters # GARCH GARCH_ht_function <- function(omega,alpha,beta,hPrevious,zPrevious){ epsilon <- sqrt(hPrevious)*zPrevious h <- omega + alpha*epsilon^2 + beta*hPrevious return(h) } # GJR TGARCH as special version of fGARCH. Formula from introduction to the rugarch package, p.9 TGARCH_ht_function <- function(omega,alpha,beta,gamma,hPrevious,zPrevious){ h <- omega + alpha*hPrevious*(abs(zPrevious)-gamma*zPrevious)^2 + beta*hPrevious return(h) } #================================================================================================= ## Monte Carlo simulation # For high calculating performance 4 dimensional arrays are used, to reduce the number of loops # Dimensions: 1: the different stocks, 2: the total days of the model, 3: days of VaR, 4: Monte Carlo simulations for each day #============================================== ## Standardized residuals # Draw all random residuals at once with correct dependence between stocks. Use the copula object generated before. # In array the dimensions are filled consecutively with data. In the MC_Z array the first dimension are # the different stocks, so that they can be filled with the correct dependence. The residuals in other 3 dimensions are i.i.d. # For memory limitation reasons this part of the program is split up depending on working memory of the pc VaR_part_function <- function(h.t){ VaR_part_trading_days <- nrow(h.t) # Normal that this line takes a lot of time MC_Z <- array(as.vector(t(rMvdc(copula_dist, n=MC_n*VaR_days*VaR_part_trading_days))), dim=c(stock_n,VaR_part_trading_days,VaR_days,MC_n)) # Scale back all standardized residuals for(s in 1:stock_n){ MC_Z[s,,,] <- MC_Z[s,,,]*copula_Z_s[s]+copula_Z_mu[s] } #============================================== ## Conditional variance # Create array to store h(t) MC_h <- array(dim=c(stock_n,VaR_part_trading_days,VaR_days,MC_n)) # The variance of the first simulated day is taken from GARCH model and is the same for all simulated days for(s in 1:stock_n){ MC_h[s,,1,] <- matrix(h.t[,s],nrow=VaR_part_trading_days,ncol=MC_n) } # Apply GARCH or TGARCH function. Function works over the 2 dimensions all days of the model # and all simulated days at the same time if(GARCH_model == 'GARCH'){ for(s in 1:stock_n){ for(i in 2:VaR_days){ MC_h[s,,i,] <- GARCH_ht_function(GARCH_omega[[s]],GARCH_alpha[[s]],GARCH_beta[[s]],MC_h[s,,i-1,],MC_Z[s,,i-1,]) } } } if(GARCH_model == 'TGARCH'){ for(s in 1:stock_n){ for(i in 2:VaR_days){ MC_h[s,,i,] <- TGARCH_ht_function(GARCH_omega[[s]],GARCH_alpha[[s]],GARCH_beta[[s]],GARCH_gamma[[s]],MC_h[s,,i-1,],MC_Z[s,,i-1,]) } } } #================================================================================================= ## Simulate returns and get VaR # Multiply sampled standardized returns with conditional volatility. Works over all 4 dimensions MC_stock_log <- MC_Z*sqrt(MC_h) # Add back the mean for(s in 1:stock_n){ MC_stock_log[s,,,] <- MC_stock_log[s,,,] + stock_log_mean[s] } # Get cumulative returns of the n days. Apply function on the other 3 dimensions MC_log <- apply(MC_stock_log,c(1,2,4),sum) # Change to simple returns because they aggregate over assets MC_ret <- exp(MC_log)-1 # Get portfolio returns as mean of the returns of the different stocks MC_ret <- apply(MC_ret,c(2,3),mean) # Change back to log returns MC_log <- log(MC_ret+1) # Apply quantile function to get VaR VaR_part <- apply(MC_log,1,quantile,VaR_alpha) # Return several variables ret <- list("VaR" = VaR_part,"MC_Z" = MC_Z,"MC_stock_log" = MC_stock_log,'MC_log' = MC_log) return(ret) # Remove to free working memory rm(list=c('ret','MC_Z','MC_h','MC_stock_log','MC_log')) } ## In order to use the working memory efficiently, the simulation is split up into several parts # This tries to estimate the optimal number of parts VaR_part_n <- round(((MC_n*VaR_days*stock_days*stock_n*4)/(memory_mb*50000)),0) # Loop needs at least two parts to run if(VaR_part_n < 2){ VaR_part_n <- 2 } # Determine the start and the end day of the parts and save them in a matrix VaR_part_start_end <- matrix(nrow = VaR_part_n,ncol = 2) VaR_part_length <- round(nrow(GARCH_h.t)/VaR_part_n,0) VaR_part_start_end[1,1] <- 1 for(i in 1:(VaR_part_n-1)){ VaR_part_start_end[i,2] <- VaR_part_start_end[i,1] + VaR_part_length VaR_part_start_end[(i+1),1] <- VaR_part_start_end[i,2] + 1 } # Variables to store the resulting VaR VaR_part_start_end[VaR_part_n,2] <- nrow(GARCH_h.t) VaR <- vector(mode='numeric',length = nrow(GARCH_h.t)) # Store for plots MC_stock_log <- matrix(nrow=nrow(GARCH_h.t),ncol=stock_n) MC_log <- matrix(nrow=nrow(GARCH_h.t),ncol=MC_n) # For every part calculate the VaR for(i in 1:VaR_part_n){ # To see how far the sim is print('Model 2:') print(i/VaR_part_n) # Use the function to get VaR of this part VaR_part_ret <- VaR_part_function(GARCH_h.t[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],]) # Get the data from the returned list VaR_part <- VaR_part_ret$VaR VaR[VaR_part_start_end[i,1]:VaR_part_start_end[i,2]] <- VaR_part # Store for plots MC_Z <- VaR_part_ret$MC_Z MC_stock_log[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],] <- t(VaR_part_ret$MC_stock_log[,,1,1]) MC_log[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],] <- VaR_part_ret$MC_log } #================================================================================================= ## Check model # Exceedance ratio, code as shown in class hitSeq <- pf_ret_nday < VaR[1:length(pf_ret_nday)] numberOfHits <- sum(hitSeq) exRatio <- numberOfHits/length(pf_ret_nday) ## Kupiec test # Higher precision is needed, otherwise numerator and denumerator are treated as 0 N <- mpfr(length(pf_ret_nday),precBits= 128) exRatio <- mpfr(exRatio,precBits = 128) numberOfHits <- mpfr(numberOfHits,precBits = 128) VaR_alpha <- mpfr(VaR_alpha,precBits = 128) num <- (exRatio^numberOfHits)*(1-exRatio)^(N-numberOfHits) den <- (VaR_alpha^numberOfHits )*(1-VaR_alpha)^(N-numberOfHits) K <- as.numeric(2*log(num/den)) VaR_alpha <- as.numeric(VaR_alpha) exRatio <- as.numeric(exRatio) p <- 0.99 if(K < qchisq(p,1)){ print("VaR model is accurate at 99% level") }else{ print("VaR model is not accurate at 99% level") } #================================================================================================= ## Plots ## Plots related to GARCH. Made for one stock of the portfolio plot_stock_n <- 1 # Stock returns plot_name <- paste('plots/',stockList[plot_stock_n],'_1_day_log_returns.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- '1 day log returns' plot_main <- paste(stockList[plot_stock_n],'returns') pdf(plot_name) plot(index,stock_log[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) dev.off() # Conditional volatility plot_name <- paste('plots/',stockList[plot_stock_n],'_volatility.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- 'Std' plot_main <- paste(stockList[plot_stock_n],'annual stdev.') pdf(plot_name) plot(index,sqrt(252)*GARCH_sigma.t[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) plot_stock_uncond_vol <- vector(mode='numeric',length = length(index)) # Unconditional variance as mean of conditional variance plot_stock_uncond_vol[] <- mean(sqrt(252)*GARCH_sigma.t[,plot_stock_n][1:length(index)]) lines(index,plot_stock_uncond_vol,col='green',lwd = 2) legend('topright',c(expression(paste(sqrt('h'['t']),': GARCH cond. stdev.')), expression(paste(sigma,': uncond. stdev.'))),col=c('black', 'green'), lwd=2) dev.off() # Standardized residuals plot_name <- paste('plots/',stockList[plot_stock_n],'_standardized_residuals.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- 'Std' plot_main <- paste(stockList[plot_stock_n],'standardized residuals') pdf(plot_name) plot(index,GARCH_Z[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) dev.off() #============================================== ## Plots related to copula # Fitting of t dist on standardized residuals plot_name <- paste('plots/',stockList[plot_stock_n],'_hist_standardized_residuals.pdf',sep='') plot_xlab <- 'Std' plot_ylab <- 'Density' plot_main <- paste(stockList[plot_stock_n],'histogram standardized residuals') hist(sample(MC_Z[plot_stock_n,,,],10000),breaks=80,freq=F,col='blue',xlim=c(-6,6),ylim=c(0,0.6),main=plot_main,xlab=plot_xlab,ylab=plot_ylab) pdf(plot_name) hist(sample(MC_Z[plot_stock_n,,,],10000),breaks=80,freq=F,col='blue',xlim=c(-6,6),ylim=c(0,0.6),main=plot_main,xlab=plot_xlab,ylab=plot_ylab) lines(seq(-6,6,0.01),dt((seq(-6,6,0.01)-copula_Z_mu[plot_stock_n])/copula_Z_s[plot_stock_n],copula_Z_df[plot_stock_n])/copula_Z_s[plot_stock_n],col='red',lwd=2) legend('topright',c('10000 standardized \nresiduals','Fitted t-distribution'),col=c('blue','red'), lwd=2) dev.off() # Plot dependence between two stocks returns, observed and simulated with copula plot_stock_n <- 1 plot_stock_n2 <- 2 plot_name <- paste('plots/',stockList[plot_stock_n],'-',stockList[plot_stock_n2],'_observed_vs_simulated_returns.pdf',sep='') plot_main <- 'Observed vs. simulated returns' plot_xlab <- paste(stockList[plot_stock_n],'log returns') plot_ylab <- paste(stockList[plot_stock_n2],'log returns') plot_stock_dependence <- matrix(nrow = nrow(GARCH_h.t),ncol=2) plot_stock_dependence[,plot_stock_n] <- MC_stock_log[,plot_stock_n] plot_stock_dependence[,plot_stock_n2] <- MC_stock_log[,plot_stock_n2] #plot_stock_dependence[,plot_stock_n] <- as.vector(MC_stock_log[plot_stock_n,1,,]) #plot_stock_dependence[,plot_stock_n2] <- as.vector(MC_stock_log[plot_stock_n2,1,,]) #plot_stock_dependence <- plot_stock_dependence[1:nrow(stock_log),] plot_xmin <- min(min(stock_log[,plot_stock_n]),min(plot_stock_dependence[,plot_stock_n])) plot_xmax <- max(max(stock_log[,plot_stock_n]),max(plot_stock_dependence[,plot_stock_n])) plot_ymin <- min(min(stock_log[,plot_stock_n2]),min(plot_stock_dependence[,plot_stock_n2])) plot_ymax <- max(max(stock_log[,plot_stock_n2]),max(plot_stock_dependence[,plot_stock_n2])) pdf(plot_name) plot(stock_log[,plot_stock_n],stock_log[,plot_stock_n2],xlim=c(plot_xmin,plot_xmax),ylim=c(plot_ymin,plot_ymax),xlab=plot_xlab,ylab=plot_ylab, main=plot_main,pch=16,cex=0.5) points(plot_stock_dependence[,plot_stock_n],plot_stock_dependence[,plot_stock_n2],col='red',pch=16,cex=0.5) legend('topleft',c('Observed','Simulated'),col=c('black','red'),pch=16,cex=1) dev.off() # Check correct sampling with dependence between modeled standardised residuals plot_name <- paste('plots/pf',toString(pf_n),'_copula_simulated_returns_spearmans_rho.pdf',sep='') plot_main <- expression(paste("Spearman's ",rho, " simulated returns")) MC_residuals_plot <- t(MC_stock_log) pdf(plot_name) pairs.panels(t(MC_Z[,1,1,]),method='spearman',main=plot_main) dev.off() # VaR and portfolio returns plot_name <- paste('plots/VaR_model2_pf',toString(pf_n),'_',toString(VaR_days),'day_',toString(VaR_alpha),'VaR_',MC_n,'simulations_',strftime(Sys.time(),format = "%Y-%m-%d--%H-%M-%S"),'.pdf',sep='') plot_main <- paste('Portfolio ',toString(pf_n),' - ',toString(VaR_days),' day ','- ',toString(VaR_alpha*100),'% VaR',sep='') plot_ylab <- paste(toString(VaR_days),'day log returns') plot_xlab <- 'Year' index = index[1:length(pf_log_nday)] pdf(plot_name) plot(index, pf_log_nday, main = plot_main,xlab = plot_xlab,ylab = plot_ylab) lines(index, VaR[1:length(pf_log_nday)], col="red" ) points(index[hitSeq], pf_log_nday[hitSeq], pch="+", col="green") legend('topright',c('VaR Model 2','Exceedances'),col=c('red','green'), pch=c('-','+')) dev.off() #================================================================================================= ## Calculate some unconditional summary statistics for simulated portfolio returns # For simulated n day portfolio returns MC_log_mean <- mean(MC_log) MC_log_std <- sqrt(var(as.vector(MC_log))) MC_log_skewness <- skewness(as.vector(MC_log)) MC_log_kurtosis <- kurtosis(as.vector(MC_log),method = 'moment') hist(as.vector(MC_log)) #================================================================================================= ## Output file time_end <- Sys.time() time_model <- time_end- time_start writeFile(2) results <- fillData(2)
/model2.R
no_license
HugoCasa/daf-project
R
false
false
17,476
r
# Record start time time_start <- Sys.time() # Remove not needed variables rm(list=c('MC_Z','MC_h','MC_stock_log')) # Precalculate some numbers stock_n <- length(stockList) stock_days <- nrow(stock_log) pf_days <- length(pf_ret_nday) # Get mean returns of the stocks stock_log_mean <- apply(stock_log,2,mean) # Deduct the mean from every stock, for standardized news process in GARCH process stock_log_mean0 <- sweep(stock_log,2,stock_log_mean) #================================================================================================= ## GARCH model: Fit GARCH model on all marginal returns # Store all relevant parameters in variables. Preallocate all variables first: # GARCH model parameters: GARCH_mu <- vector(mode = "list", length = stock_n) GARCH_omega <- vector(mode = "list", length = stock_n) GARCH_alpha <- vector(mode = "list", length = stock_n) GARCH_beta <- vector(mode = "list", length = stock_n) GARCH_gamma <- vector(mode = "list", length = stock_n) # GARCH residuals, condtional volatility and standardized residuals GARCH_residuals <- matrix(nrow = stock_days,ncol = stock_n) GARCH_h.t <- matrix(nrow = stock_days,ncol = stock_n) GARCH_sigma.t <- matrix(nrow = stock_days,ncol = stock_n) GARCH_Z <- matrix(nrow = stock_days,ncol = stock_n) # For GARCH model if(GARCH_model == 'GARCH'){ for(s in 1:stock_n){ # Estimate GARCH for every individual stocks returns, adjusted for mean 0 # Use fGARCH package. Normal GARCH model is a special case of fGARCH model. GARCH <- garchFit(formula = ~ garch(1, 1), data=stock_log_mean0[,s], cond.dist=GARCHcondDist) # Store estiated parameters in lists GARCH_mu[s] <- coef(GARCH)[1] GARCH_omega[s] <- coef(GARCH)[2] GARCH_alpha[s] <- coef(GARCH)[3] GARCH_beta[s] <- coef(GARCH)[4] GARCH_residuals[,s] <- GARCH@residuals GARCH_h.t[,s] <- GARCH@h.t GARCH_sigma.t[,s] <- GARCH@sigma.t } # Standardized residuals are residuals divided by conditional volatility GARCH_Z <- GARCH_residuals/GARCH_sigma.t } # For TGARCH model if(GARCH_model == 'TGARCH'){ for(s in 1:stock_n){ # Estimate TGARCH for every individual stocks returns, adjusted for mean 0 # Use fGARCH package. GJR-GARCH model is a special case of fGARCH model with delta = 2 and leverage GARCH <- garchFit(formula = ~ garch(1, 1), delta = 2, leverage = TRUE, data=stock_log_mean0[,s], cond.dist=GARCHcondDist) # Store estiated parameters in lists GARCH_mu[s] <- coef(GARCH)[1] GARCH_omega[s] <- coef(GARCH)[2] GARCH_alpha[s] <- coef(GARCH)[3] GARCH_gamma[s] <- coef(GARCH)[4] GARCH_beta[s] <- coef(GARCH)[5] GARCH_residuals[,s] <- GARCH@residuals GARCH_h.t[,s] <- GARCH@h.t GARCH_sigma.t[,s] <- GARCH@sigma.t } # Standardized residuals are residuals divided by conditional volatility GARCH_Z <- GARCH_residuals/GARCH_sigma.t } # Delete not needed variables to free working memory rm(list='GARCH') #================================================================================================= ## Copula # This section creates a copula model in order to get the multivariate distribution of the marginal # returns. The copula package is used. # Define the used copula copula_obj <- claytonCopula(dim=length(stockList)) # Matrix of marginal returns as pseudo observations [0,1] copula_pobs <- pobs(GARCH_Z) # Fit the copula copula_fit <- fitCopula(copula_obj,copula_pobs,method='ml') # Get the fitted copula parameter copula_theta <- coef(copula_fit) # Fit a t distribution on standardised redisuals of marginal returns and store parameters in vectors copula_Z_mu <- vector(mode = "numeric", length = stock_n) copula_Z_s <- vector(mode = "numeric", length = stock_n) copula_Z_df <- vector(mode = "numeric", length = stock_n) for(s in 1:stock_n){ copula_Z_fit <- fitdistr(GARCH_Z[,s],"t") copula_Z_mu[s] <- copula_Z_fit$estimate[["m"]] copula_Z_s[s] <- copula_Z_fit$estimate[["s"]] copula_Z_df[s] <- copula_Z_fit$estimate[["df"]] } # Delete not needed variables rm(list='copula_Z_fit') ## Create lists with parameters for use of copula function # T distribution for all standardized residuals copula_margins_list <- matrix(data='t',nrow=1,ncol=stock_n)[1,] # Make a list with the degrees of freedom of the standardized residuals copula_paramMargins_list <- vector(mode="list", length = stock_n) copula_paramMargins_list_names <- matrix(data='df',nrow=1,ncol=stock_n)[1,] names(copula_paramMargins_list) <- copula_paramMargins_list_names for(s in 1:stock_n){ copula_paramMargins_list[s] <- copula_Z_df[s] } # Create a copula object with all correct parameters, that can later be used to generate an array with returns copula_dist <- mvdc(copula=claytonCopula(copula_theta, dim = length(stockList)), margins=copula_margins_list, paramMargins = copula_paramMargins_list) #================================================================================================= ## GARCH functions to simulate conditional variance h(t) as function of h(t-1) and z(t-1) and the estimated parameters # GARCH GARCH_ht_function <- function(omega,alpha,beta,hPrevious,zPrevious){ epsilon <- sqrt(hPrevious)*zPrevious h <- omega + alpha*epsilon^2 + beta*hPrevious return(h) } # GJR TGARCH as special version of fGARCH. Formula from introduction to the rugarch package, p.9 TGARCH_ht_function <- function(omega,alpha,beta,gamma,hPrevious,zPrevious){ h <- omega + alpha*hPrevious*(abs(zPrevious)-gamma*zPrevious)^2 + beta*hPrevious return(h) } #================================================================================================= ## Monte Carlo simulation # For high calculating performance 4 dimensional arrays are used, to reduce the number of loops # Dimensions: 1: the different stocks, 2: the total days of the model, 3: days of VaR, 4: Monte Carlo simulations for each day #============================================== ## Standardized residuals # Draw all random residuals at once with correct dependence between stocks. Use the copula object generated before. # In array the dimensions are filled consecutively with data. In the MC_Z array the first dimension are # the different stocks, so that they can be filled with the correct dependence. The residuals in other 3 dimensions are i.i.d. # For memory limitation reasons this part of the program is split up depending on working memory of the pc VaR_part_function <- function(h.t){ VaR_part_trading_days <- nrow(h.t) # Normal that this line takes a lot of time MC_Z <- array(as.vector(t(rMvdc(copula_dist, n=MC_n*VaR_days*VaR_part_trading_days))), dim=c(stock_n,VaR_part_trading_days,VaR_days,MC_n)) # Scale back all standardized residuals for(s in 1:stock_n){ MC_Z[s,,,] <- MC_Z[s,,,]*copula_Z_s[s]+copula_Z_mu[s] } #============================================== ## Conditional variance # Create array to store h(t) MC_h <- array(dim=c(stock_n,VaR_part_trading_days,VaR_days,MC_n)) # The variance of the first simulated day is taken from GARCH model and is the same for all simulated days for(s in 1:stock_n){ MC_h[s,,1,] <- matrix(h.t[,s],nrow=VaR_part_trading_days,ncol=MC_n) } # Apply GARCH or TGARCH function. Function works over the 2 dimensions all days of the model # and all simulated days at the same time if(GARCH_model == 'GARCH'){ for(s in 1:stock_n){ for(i in 2:VaR_days){ MC_h[s,,i,] <- GARCH_ht_function(GARCH_omega[[s]],GARCH_alpha[[s]],GARCH_beta[[s]],MC_h[s,,i-1,],MC_Z[s,,i-1,]) } } } if(GARCH_model == 'TGARCH'){ for(s in 1:stock_n){ for(i in 2:VaR_days){ MC_h[s,,i,] <- TGARCH_ht_function(GARCH_omega[[s]],GARCH_alpha[[s]],GARCH_beta[[s]],GARCH_gamma[[s]],MC_h[s,,i-1,],MC_Z[s,,i-1,]) } } } #================================================================================================= ## Simulate returns and get VaR # Multiply sampled standardized returns with conditional volatility. Works over all 4 dimensions MC_stock_log <- MC_Z*sqrt(MC_h) # Add back the mean for(s in 1:stock_n){ MC_stock_log[s,,,] <- MC_stock_log[s,,,] + stock_log_mean[s] } # Get cumulative returns of the n days. Apply function on the other 3 dimensions MC_log <- apply(MC_stock_log,c(1,2,4),sum) # Change to simple returns because they aggregate over assets MC_ret <- exp(MC_log)-1 # Get portfolio returns as mean of the returns of the different stocks MC_ret <- apply(MC_ret,c(2,3),mean) # Change back to log returns MC_log <- log(MC_ret+1) # Apply quantile function to get VaR VaR_part <- apply(MC_log,1,quantile,VaR_alpha) # Return several variables ret <- list("VaR" = VaR_part,"MC_Z" = MC_Z,"MC_stock_log" = MC_stock_log,'MC_log' = MC_log) return(ret) # Remove to free working memory rm(list=c('ret','MC_Z','MC_h','MC_stock_log','MC_log')) } ## In order to use the working memory efficiently, the simulation is split up into several parts # This tries to estimate the optimal number of parts VaR_part_n <- round(((MC_n*VaR_days*stock_days*stock_n*4)/(memory_mb*50000)),0) # Loop needs at least two parts to run if(VaR_part_n < 2){ VaR_part_n <- 2 } # Determine the start and the end day of the parts and save them in a matrix VaR_part_start_end <- matrix(nrow = VaR_part_n,ncol = 2) VaR_part_length <- round(nrow(GARCH_h.t)/VaR_part_n,0) VaR_part_start_end[1,1] <- 1 for(i in 1:(VaR_part_n-1)){ VaR_part_start_end[i,2] <- VaR_part_start_end[i,1] + VaR_part_length VaR_part_start_end[(i+1),1] <- VaR_part_start_end[i,2] + 1 } # Variables to store the resulting VaR VaR_part_start_end[VaR_part_n,2] <- nrow(GARCH_h.t) VaR <- vector(mode='numeric',length = nrow(GARCH_h.t)) # Store for plots MC_stock_log <- matrix(nrow=nrow(GARCH_h.t),ncol=stock_n) MC_log <- matrix(nrow=nrow(GARCH_h.t),ncol=MC_n) # For every part calculate the VaR for(i in 1:VaR_part_n){ # To see how far the sim is print('Model 2:') print(i/VaR_part_n) # Use the function to get VaR of this part VaR_part_ret <- VaR_part_function(GARCH_h.t[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],]) # Get the data from the returned list VaR_part <- VaR_part_ret$VaR VaR[VaR_part_start_end[i,1]:VaR_part_start_end[i,2]] <- VaR_part # Store for plots MC_Z <- VaR_part_ret$MC_Z MC_stock_log[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],] <- t(VaR_part_ret$MC_stock_log[,,1,1]) MC_log[VaR_part_start_end[i,1]:VaR_part_start_end[i,2],] <- VaR_part_ret$MC_log } #================================================================================================= ## Check model # Exceedance ratio, code as shown in class hitSeq <- pf_ret_nday < VaR[1:length(pf_ret_nday)] numberOfHits <- sum(hitSeq) exRatio <- numberOfHits/length(pf_ret_nday) ## Kupiec test # Higher precision is needed, otherwise numerator and denumerator are treated as 0 N <- mpfr(length(pf_ret_nday),precBits= 128) exRatio <- mpfr(exRatio,precBits = 128) numberOfHits <- mpfr(numberOfHits,precBits = 128) VaR_alpha <- mpfr(VaR_alpha,precBits = 128) num <- (exRatio^numberOfHits)*(1-exRatio)^(N-numberOfHits) den <- (VaR_alpha^numberOfHits )*(1-VaR_alpha)^(N-numberOfHits) K <- as.numeric(2*log(num/den)) VaR_alpha <- as.numeric(VaR_alpha) exRatio <- as.numeric(exRatio) p <- 0.99 if(K < qchisq(p,1)){ print("VaR model is accurate at 99% level") }else{ print("VaR model is not accurate at 99% level") } #================================================================================================= ## Plots ## Plots related to GARCH. Made for one stock of the portfolio plot_stock_n <- 1 # Stock returns plot_name <- paste('plots/',stockList[plot_stock_n],'_1_day_log_returns.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- '1 day log returns' plot_main <- paste(stockList[plot_stock_n],'returns') pdf(plot_name) plot(index,stock_log[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) dev.off() # Conditional volatility plot_name <- paste('plots/',stockList[plot_stock_n],'_volatility.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- 'Std' plot_main <- paste(stockList[plot_stock_n],'annual stdev.') pdf(plot_name) plot(index,sqrt(252)*GARCH_sigma.t[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) plot_stock_uncond_vol <- vector(mode='numeric',length = length(index)) # Unconditional variance as mean of conditional variance plot_stock_uncond_vol[] <- mean(sqrt(252)*GARCH_sigma.t[,plot_stock_n][1:length(index)]) lines(index,plot_stock_uncond_vol,col='green',lwd = 2) legend('topright',c(expression(paste(sqrt('h'['t']),': GARCH cond. stdev.')), expression(paste(sigma,': uncond. stdev.'))),col=c('black', 'green'), lwd=2) dev.off() # Standardized residuals plot_name <- paste('plots/',stockList[plot_stock_n],'_standardized_residuals.pdf',sep='') plot_xlab <- 'Year' plot_ylab <- 'Std' plot_main <- paste(stockList[plot_stock_n],'standardized residuals') pdf(plot_name) plot(index,GARCH_Z[,plot_stock_n][1:length(index)],type='l',main = plot_main,xlab = plot_xlab,ylab=plot_ylab) dev.off() #============================================== ## Plots related to copula # Fitting of t dist on standardized residuals plot_name <- paste('plots/',stockList[plot_stock_n],'_hist_standardized_residuals.pdf',sep='') plot_xlab <- 'Std' plot_ylab <- 'Density' plot_main <- paste(stockList[plot_stock_n],'histogram standardized residuals') hist(sample(MC_Z[plot_stock_n,,,],10000),breaks=80,freq=F,col='blue',xlim=c(-6,6),ylim=c(0,0.6),main=plot_main,xlab=plot_xlab,ylab=plot_ylab) pdf(plot_name) hist(sample(MC_Z[plot_stock_n,,,],10000),breaks=80,freq=F,col='blue',xlim=c(-6,6),ylim=c(0,0.6),main=plot_main,xlab=plot_xlab,ylab=plot_ylab) lines(seq(-6,6,0.01),dt((seq(-6,6,0.01)-copula_Z_mu[plot_stock_n])/copula_Z_s[plot_stock_n],copula_Z_df[plot_stock_n])/copula_Z_s[plot_stock_n],col='red',lwd=2) legend('topright',c('10000 standardized \nresiduals','Fitted t-distribution'),col=c('blue','red'), lwd=2) dev.off() # Plot dependence between two stocks returns, observed and simulated with copula plot_stock_n <- 1 plot_stock_n2 <- 2 plot_name <- paste('plots/',stockList[plot_stock_n],'-',stockList[plot_stock_n2],'_observed_vs_simulated_returns.pdf',sep='') plot_main <- 'Observed vs. simulated returns' plot_xlab <- paste(stockList[plot_stock_n],'log returns') plot_ylab <- paste(stockList[plot_stock_n2],'log returns') plot_stock_dependence <- matrix(nrow = nrow(GARCH_h.t),ncol=2) plot_stock_dependence[,plot_stock_n] <- MC_stock_log[,plot_stock_n] plot_stock_dependence[,plot_stock_n2] <- MC_stock_log[,plot_stock_n2] #plot_stock_dependence[,plot_stock_n] <- as.vector(MC_stock_log[plot_stock_n,1,,]) #plot_stock_dependence[,plot_stock_n2] <- as.vector(MC_stock_log[plot_stock_n2,1,,]) #plot_stock_dependence <- plot_stock_dependence[1:nrow(stock_log),] plot_xmin <- min(min(stock_log[,plot_stock_n]),min(plot_stock_dependence[,plot_stock_n])) plot_xmax <- max(max(stock_log[,plot_stock_n]),max(plot_stock_dependence[,plot_stock_n])) plot_ymin <- min(min(stock_log[,plot_stock_n2]),min(plot_stock_dependence[,plot_stock_n2])) plot_ymax <- max(max(stock_log[,plot_stock_n2]),max(plot_stock_dependence[,plot_stock_n2])) pdf(plot_name) plot(stock_log[,plot_stock_n],stock_log[,plot_stock_n2],xlim=c(plot_xmin,plot_xmax),ylim=c(plot_ymin,plot_ymax),xlab=plot_xlab,ylab=plot_ylab, main=plot_main,pch=16,cex=0.5) points(plot_stock_dependence[,plot_stock_n],plot_stock_dependence[,plot_stock_n2],col='red',pch=16,cex=0.5) legend('topleft',c('Observed','Simulated'),col=c('black','red'),pch=16,cex=1) dev.off() # Check correct sampling with dependence between modeled standardised residuals plot_name <- paste('plots/pf',toString(pf_n),'_copula_simulated_returns_spearmans_rho.pdf',sep='') plot_main <- expression(paste("Spearman's ",rho, " simulated returns")) MC_residuals_plot <- t(MC_stock_log) pdf(plot_name) pairs.panels(t(MC_Z[,1,1,]),method='spearman',main=plot_main) dev.off() # VaR and portfolio returns plot_name <- paste('plots/VaR_model2_pf',toString(pf_n),'_',toString(VaR_days),'day_',toString(VaR_alpha),'VaR_',MC_n,'simulations_',strftime(Sys.time(),format = "%Y-%m-%d--%H-%M-%S"),'.pdf',sep='') plot_main <- paste('Portfolio ',toString(pf_n),' - ',toString(VaR_days),' day ','- ',toString(VaR_alpha*100),'% VaR',sep='') plot_ylab <- paste(toString(VaR_days),'day log returns') plot_xlab <- 'Year' index = index[1:length(pf_log_nday)] pdf(plot_name) plot(index, pf_log_nday, main = plot_main,xlab = plot_xlab,ylab = plot_ylab) lines(index, VaR[1:length(pf_log_nday)], col="red" ) points(index[hitSeq], pf_log_nday[hitSeq], pch="+", col="green") legend('topright',c('VaR Model 2','Exceedances'),col=c('red','green'), pch=c('-','+')) dev.off() #================================================================================================= ## Calculate some unconditional summary statistics for simulated portfolio returns # For simulated n day portfolio returns MC_log_mean <- mean(MC_log) MC_log_std <- sqrt(var(as.vector(MC_log))) MC_log_skewness <- skewness(as.vector(MC_log)) MC_log_kurtosis <- kurtosis(as.vector(MC_log),method = 'moment') hist(as.vector(MC_log)) #================================================================================================= ## Output file time_end <- Sys.time() time_model <- time_end- time_start writeFile(2) results <- fillData(2)
source("tmsRunner.R") targetGene <- "CTBP2" chromosome <- "chr10" start.generous <- 124715246 end.generous <- 125430101 viz <- FALSE if(viz){ # determine start.focused, end.focused igv <- start.igv(targetGene, "hg38") showGenomicRegion(igv, sprintf("%s:%d-%d", chromosome, start.generous, end.generous)) ghdb <- GeneHancerDB() tbl.gh <- retrieveEnhancersFromDatabase(ghdb, targetGene, tissues="all") # "Common myeloid progenitor CD34+") track <- DataFrameQuantitativeTrack("gh", tbl.gh[, c("chrom", "start", "end", "combinedscore")], autoscale=TRUE, color="brown") displayTrack(igv, track) tbl.atac.merged <- get(load("~/github/TrenaProjectErythropoiesis/inst/extdata/genomicRegions/tbl.atacMerged.RData")) loc <- getGenomicRegion(igv) tbl.atac.sub <- subset(tbl.atac.merged, chrom==chromosome & start >= loc$start & end <= loc$end) dim(tbl.atac.sub) track <- DataFrameAnnotationTrack("atac", tbl.atac.sub, color="red") displayTrack(igv, track) } # first model, just atac in gh promoter: 12.5k start.focused <- 125145702 end.focused <- 125163569 # second model, many extra atac regions added: 71kb start.focused <- 124979054 end.focused <- 125211343 printf("width: %5.2f", (1 + end.focused - start.focused)/1000) mtx.rna <- getExpressionMatrix(trenaProject, "brandLabDifferentiationTimeCourse-27171x28") #mtx <- getExpressionMatrix(trenaProject, "brandLabDifferentiationTimeCourse-27171x28-namesCorrected") x <- runTMS(targetGene, chromosome, start.focused, end.focused, mtx.rna) lm.tables <- x$get.lm.tables() lm.rsquareds <- x$get.lm.Rsquareds() filename <- sprintf("%s-model-%s:%d-%d.RData", targetGene, chromosome, start.focused, end.focused) save(x, file=filename) lm.tables <- x$get.lm.tables() lapply(names(lm.tables), function(name) { printf("-------- %s", name) print(subset(lm.tables[[name]])) # , p.value < 0.25)) }) print(x$get.lm.Rsquareds())
/tools/runner/v1/ctbp2.R
permissive
PriceLab/TrenaMultiScore
R
false
false
1,963
r
source("tmsRunner.R") targetGene <- "CTBP2" chromosome <- "chr10" start.generous <- 124715246 end.generous <- 125430101 viz <- FALSE if(viz){ # determine start.focused, end.focused igv <- start.igv(targetGene, "hg38") showGenomicRegion(igv, sprintf("%s:%d-%d", chromosome, start.generous, end.generous)) ghdb <- GeneHancerDB() tbl.gh <- retrieveEnhancersFromDatabase(ghdb, targetGene, tissues="all") # "Common myeloid progenitor CD34+") track <- DataFrameQuantitativeTrack("gh", tbl.gh[, c("chrom", "start", "end", "combinedscore")], autoscale=TRUE, color="brown") displayTrack(igv, track) tbl.atac.merged <- get(load("~/github/TrenaProjectErythropoiesis/inst/extdata/genomicRegions/tbl.atacMerged.RData")) loc <- getGenomicRegion(igv) tbl.atac.sub <- subset(tbl.atac.merged, chrom==chromosome & start >= loc$start & end <= loc$end) dim(tbl.atac.sub) track <- DataFrameAnnotationTrack("atac", tbl.atac.sub, color="red") displayTrack(igv, track) } # first model, just atac in gh promoter: 12.5k start.focused <- 125145702 end.focused <- 125163569 # second model, many extra atac regions added: 71kb start.focused <- 124979054 end.focused <- 125211343 printf("width: %5.2f", (1 + end.focused - start.focused)/1000) mtx.rna <- getExpressionMatrix(trenaProject, "brandLabDifferentiationTimeCourse-27171x28") #mtx <- getExpressionMatrix(trenaProject, "brandLabDifferentiationTimeCourse-27171x28-namesCorrected") x <- runTMS(targetGene, chromosome, start.focused, end.focused, mtx.rna) lm.tables <- x$get.lm.tables() lm.rsquareds <- x$get.lm.Rsquareds() filename <- sprintf("%s-model-%s:%d-%d.RData", targetGene, chromosome, start.focused, end.focused) save(x, file=filename) lm.tables <- x$get.lm.tables() lapply(names(lm.tables), function(name) { printf("-------- %s", name) print(subset(lm.tables[[name]])) # , p.value < 0.25)) }) print(x$get.lm.Rsquareds())
library(pinnacle.API) ### Name: showOddsDF ### Title: showOddsDF - Takes a GetOdds JSON response and combines with ### Fixtures and Inrunning ### Aliases: showOddsDF ### ** Examples ## No test: SetCredentials("TESTAPI","APITEST") AcceptTermsAndConditions(accepted=TRUE) # Run without arguments, it will prompt you for the sport showOddsDF() ## End(No test)
/data/genthat_extracted_code/pinnacle.API/examples/showOddsDF.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
367
r
library(pinnacle.API) ### Name: showOddsDF ### Title: showOddsDF - Takes a GetOdds JSON response and combines with ### Fixtures and Inrunning ### Aliases: showOddsDF ### ** Examples ## No test: SetCredentials("TESTAPI","APITEST") AcceptTermsAndConditions(accepted=TRUE) # Run without arguments, it will prompt you for the sport showOddsDF() ## End(No test)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/proceduralnames-package.R \docType{package} \name{proceduralnames-package} \alias{proceduralnames} \alias{proceduralnames-package} \title{proceduralnames: Several Methods for Procedural Name Generation} \description{ \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} A small, dependency-free way to generate random names. Methods provided include the adjective-surname approach of Docker containers ('\url{https://github.com/moby/moby/blob/master/pkg/namesgenerator/names-generator.go}'), and combinations of common English or Spanish words. } \seealso{ Useful links: \itemize{ \item \url{https://mikemahoney218.github.io/proceduralnames/} \item \url{https://github.com/mikemahoney218/proceduralnames} \item Report bugs at \url{https://github.com/mikemahoney218/proceduralnames/issues} } } \author{ \strong{Maintainer}: Michael Mahoney \email{mike.mahoney.218@gmail.com} (\href{https://orcid.org/0000-0003-2402-304X}{ORCID}) } \keyword{internal}
/man/proceduralnames-package.Rd
permissive
mikemahoney218/proceduralnames
R
false
true
1,066
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/proceduralnames-package.R \docType{package} \name{proceduralnames-package} \alias{proceduralnames} \alias{proceduralnames-package} \title{proceduralnames: Several Methods for Procedural Name Generation} \description{ \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} A small, dependency-free way to generate random names. Methods provided include the adjective-surname approach of Docker containers ('\url{https://github.com/moby/moby/blob/master/pkg/namesgenerator/names-generator.go}'), and combinations of common English or Spanish words. } \seealso{ Useful links: \itemize{ \item \url{https://mikemahoney218.github.io/proceduralnames/} \item \url{https://github.com/mikemahoney218/proceduralnames} \item Report bugs at \url{https://github.com/mikemahoney218/proceduralnames/issues} } } \author{ \strong{Maintainer}: Michael Mahoney \email{mike.mahoney.218@gmail.com} (\href{https://orcid.org/0000-0003-2402-304X}{ORCID}) } \keyword{internal}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotMA.R \name{plotAllelicCounts} \alias{plotAllelicCounts} \title{plotAllelicCounts} \usage{ plotAllelicCounts( data, title = NULL, padjCutoff = 0.05, allelicRatioCutoff = 0.8, rare_column = NULL ) } \arguments{ \item{data}{A data.frame containing the counts or results table from \code{DESeq4MAE} function} \item{title}{The plot's title} \item{padjCutoff}{The significance level} \item{allelicRatioCutoff}{The minimum allelic ratio ALT/(ALT+REF) to be considered signficant} \item{rare_column}{The name of the column that indicates if a variant is rare or not. Default is \code{null} which means it won't be plotted.} } \value{ A ggplot object containing the MA plot. } \description{ Creates an allelic counts plot, ie, counts of the alternative vs counts of the reference on the log10 scale. If significant and minor allele frequency columns present, dots will be highlighted accordingly. } \examples{ file <- system.file("extdata", "allelic_counts_HG00187.csv", package = "tMAE", mustWork = TRUE) maeCounts <- fread(file) res <- DESeq4MAE(maeCounts) plotAllelicCounts(res) } \author{ Vicente Yepez }
/man/plotAllelicCounts.Rd
permissive
KalinNonchev/tMAE
R
false
true
1,209
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotMA.R \name{plotAllelicCounts} \alias{plotAllelicCounts} \title{plotAllelicCounts} \usage{ plotAllelicCounts( data, title = NULL, padjCutoff = 0.05, allelicRatioCutoff = 0.8, rare_column = NULL ) } \arguments{ \item{data}{A data.frame containing the counts or results table from \code{DESeq4MAE} function} \item{title}{The plot's title} \item{padjCutoff}{The significance level} \item{allelicRatioCutoff}{The minimum allelic ratio ALT/(ALT+REF) to be considered signficant} \item{rare_column}{The name of the column that indicates if a variant is rare or not. Default is \code{null} which means it won't be plotted.} } \value{ A ggplot object containing the MA plot. } \description{ Creates an allelic counts plot, ie, counts of the alternative vs counts of the reference on the log10 scale. If significant and minor allele frequency columns present, dots will be highlighted accordingly. } \examples{ file <- system.file("extdata", "allelic_counts_HG00187.csv", package = "tMAE", mustWork = TRUE) maeCounts <- fread(file) res <- DESeq4MAE(maeCounts) plotAllelicCounts(res) } \author{ Vicente Yepez }
## Load required packages library(data.table) library(lubridate) library(ggplot2) ## Set working Directory and load the data setwd("D:/SVN - BIDs/branches/sandpit/flintan/Data Science Coursera/Course Project 1 - DataViz") EPC_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character", "character", rep("numeric", 7)), na.strings = "?") # Subset data to use observations from 2007-02-01 and 2007-02-02 EPC_data$Date = as.Date(EPC_data$Date, format = "%d/%m/%Y") EPC_data_subset <- EPC_data[EPC_data$Date >= "2007-02-01" & EPC_data$Date <= "2007-02-02",] EPC_data_subset$Date.Time <- ymd_hms(paste0(EPC_data_subset$Date, " ", EPC_data_subset$Time)) EPC_data_subset$Day <- wday(EPC_data_subset$Date.Time) EPC_data_subset$Global_active_power <- as.numeric(as.character(EPC_data_subset$Global_active_power)) rm(EPC_data) ## Plot 1 png(filename = "plot1.png", width = 480, height = 480) hist(EPC_data_subset$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.off()
/Plot1.R
no_license
nicoflinta/ExData_Plotting1
R
false
false
1,197
r
## Load required packages library(data.table) library(lubridate) library(ggplot2) ## Set working Directory and load the data setwd("D:/SVN - BIDs/branches/sandpit/flintan/Data Science Coursera/Course Project 1 - DataViz") EPC_data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character", "character", rep("numeric", 7)), na.strings = "?") # Subset data to use observations from 2007-02-01 and 2007-02-02 EPC_data$Date = as.Date(EPC_data$Date, format = "%d/%m/%Y") EPC_data_subset <- EPC_data[EPC_data$Date >= "2007-02-01" & EPC_data$Date <= "2007-02-02",] EPC_data_subset$Date.Time <- ymd_hms(paste0(EPC_data_subset$Date, " ", EPC_data_subset$Time)) EPC_data_subset$Day <- wday(EPC_data_subset$Date.Time) EPC_data_subset$Global_active_power <- as.numeric(as.character(EPC_data_subset$Global_active_power)) rm(EPC_data) ## Plot 1 png(filename = "plot1.png", width = 480, height = 480) hist(EPC_data_subset$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)") dev.off()
library(testthat) library(queryr) test_check("queryr")
/tests/testthat.R
no_license
paulhendricks/queryr
R
false
false
56
r
library(testthat) library(queryr) test_check("queryr")
#Example data library(sva) i <- "/labs/mpsnyder/slancast/fiber/rnaseq/fall2019/batch_correction/rna_raw_counts.csv" rna_df = read.csv(i, sep=",", header=TRUE, row.names = 1) print(i) #Full# rna_metadata_rows = 5 rna_metadata = head(rna_df,rna_metadata_rows) rna_df2 <- tail(rna_df, -rna_metadata_rows) #Now getting rid of the metadata rna_df2 <- data.frame(lapply(rna_df2, as.character), stringsAsFactors=FALSE, row.names = rownames(rna_df2)) #To change to a double, this needs to go through character first rna_df2 <- data.frame(lapply(rna_df2, as.numeric), stringsAsFactors=FALSE, row.names = rownames(rna_df2)) #then numeric rna_metadata = data.frame(t(rna_metadata)) rna_df2 <- as.matrix(rna_df2) rna_df2 <- rna_df2[apply(rna_df2, 1, var) != 0,] rna_df2 <- log(rna_df2+1,2) rna_df2 <- na.omit(rna_df2) batch <- rna_metadata$batch modcombat <- cbind(as.numeric(rna_metadata$condition),as.numeric(rna_metadata$week),as.numeric(rna_metadata$participant)) rownames(modcombat) <- rownames(rna_metadata) combat_edata = ComBat(dat=rna_df2, batch=batch, mod=modcombat, par.prior=TRUE, prior.plots=FALSE) class(combat_edata) <- "character" rna_metadata = data.frame(t(rna_metadata)) combat_edata2 <- rbind(rna_metadata,combat_edata) write.table(combat_edata2, file="/labs/mpsnyder/slancast/fiber/rnaseq/fall2019/batch_correction/rna-final-combat.txt", sep="\t")
/Batch_Effects_rna_scg.R
no_license
linlin0026/fiber_-_10.1016-j.chom.2022.03.036
R
false
false
1,364
r
#Example data library(sva) i <- "/labs/mpsnyder/slancast/fiber/rnaseq/fall2019/batch_correction/rna_raw_counts.csv" rna_df = read.csv(i, sep=",", header=TRUE, row.names = 1) print(i) #Full# rna_metadata_rows = 5 rna_metadata = head(rna_df,rna_metadata_rows) rna_df2 <- tail(rna_df, -rna_metadata_rows) #Now getting rid of the metadata rna_df2 <- data.frame(lapply(rna_df2, as.character), stringsAsFactors=FALSE, row.names = rownames(rna_df2)) #To change to a double, this needs to go through character first rna_df2 <- data.frame(lapply(rna_df2, as.numeric), stringsAsFactors=FALSE, row.names = rownames(rna_df2)) #then numeric rna_metadata = data.frame(t(rna_metadata)) rna_df2 <- as.matrix(rna_df2) rna_df2 <- rna_df2[apply(rna_df2, 1, var) != 0,] rna_df2 <- log(rna_df2+1,2) rna_df2 <- na.omit(rna_df2) batch <- rna_metadata$batch modcombat <- cbind(as.numeric(rna_metadata$condition),as.numeric(rna_metadata$week),as.numeric(rna_metadata$participant)) rownames(modcombat) <- rownames(rna_metadata) combat_edata = ComBat(dat=rna_df2, batch=batch, mod=modcombat, par.prior=TRUE, prior.plots=FALSE) class(combat_edata) <- "character" rna_metadata = data.frame(t(rna_metadata)) combat_edata2 <- rbind(rna_metadata,combat_edata) write.table(combat_edata2, file="/labs/mpsnyder/slancast/fiber/rnaseq/fall2019/batch_correction/rna-final-combat.txt", sep="\t")
# Exercise 2: working with `dplyr` # Note that this exercise repeats the analysis from Exercise 1, but should be # performed using `dplyr` (do not directly access or manipulate the data frames) # Install and load the "fueleconomy" package #install.packages("devtools") #devtools::install_github("hadley/fueleconomy") library(fueleconomy) # Install and load the "dplyr" library install.packages("dplyr") library("dplyr") # Select the different manufacturers (makes) of the cars in this data set. # Save this vector in a variable makes <- select(vehicles, make) # Use the `distinct()` function to determine how many different car manufacturers # are represented by the data set nrow(distinct(vehicles,make)) length(unique(makes$make)) # Filter the data set for vehicles manufactured in 1997 cars_1997 <-filter(vehicles, year == 1997) # Arrange the 1997 cars by highway (`hwy`) gas milage cars_1997 <- arrange(cars_1997, hwy) # Mutate the 1997 cars data frame to add a column `average` that has the average # gas milage (between city and highway mpg) for each car cars_1997 <- mutate(cars_1997, average = (hwy + cty) / 2) # Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more # than 20 miles/gallon in the city. # Save this new data frame in a variable. two_wheel_20_mpg <- filter(vehicles, drive == '2-Wheel Drive', cty > 20) # Of the above vehicles, what is the vehicle ID of the vehicle with the worst # hwy mpg? # Hint: filter for the worst vehicle, then select its ID. filtered <- filter(two_wheel_20_mpg, hwy == min(hwy)) worst_hwy <- select(filtered, id) # Write a function that takes a `year_choice` and a `make_choice` as parameters, # and returns the vehicle model that gets the most hwy miles/gallon of vehicles # of that make in that year. # You'll need to filter more (and do some selecting)! make_year <- function(make_choice, year_choice) { filtered <- filter(vehicles, make == make_choice, year == year_choice) filtered <- filter(filtered, hwy == max(hwy)) selected <- select(filtered, model) selected } # What was the most efficient Honda model of 1995? make_year('Honda', 1995)
/exercise-2/exercise.R
permissive
tifftruong/ch10-dplyr
R
false
false
2,141
r
# Exercise 2: working with `dplyr` # Note that this exercise repeats the analysis from Exercise 1, but should be # performed using `dplyr` (do not directly access or manipulate the data frames) # Install and load the "fueleconomy" package #install.packages("devtools") #devtools::install_github("hadley/fueleconomy") library(fueleconomy) # Install and load the "dplyr" library install.packages("dplyr") library("dplyr") # Select the different manufacturers (makes) of the cars in this data set. # Save this vector in a variable makes <- select(vehicles, make) # Use the `distinct()` function to determine how many different car manufacturers # are represented by the data set nrow(distinct(vehicles,make)) length(unique(makes$make)) # Filter the data set for vehicles manufactured in 1997 cars_1997 <-filter(vehicles, year == 1997) # Arrange the 1997 cars by highway (`hwy`) gas milage cars_1997 <- arrange(cars_1997, hwy) # Mutate the 1997 cars data frame to add a column `average` that has the average # gas milage (between city and highway mpg) for each car cars_1997 <- mutate(cars_1997, average = (hwy + cty) / 2) # Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more # than 20 miles/gallon in the city. # Save this new data frame in a variable. two_wheel_20_mpg <- filter(vehicles, drive == '2-Wheel Drive', cty > 20) # Of the above vehicles, what is the vehicle ID of the vehicle with the worst # hwy mpg? # Hint: filter for the worst vehicle, then select its ID. filtered <- filter(two_wheel_20_mpg, hwy == min(hwy)) worst_hwy <- select(filtered, id) # Write a function that takes a `year_choice` and a `make_choice` as parameters, # and returns the vehicle model that gets the most hwy miles/gallon of vehicles # of that make in that year. # You'll need to filter more (and do some selecting)! make_year <- function(make_choice, year_choice) { filtered <- filter(vehicles, make == make_choice, year == year_choice) filtered <- filter(filtered, hwy == max(hwy)) selected <- select(filtered, model) selected } # What was the most efficient Honda model of 1995? make_year('Honda', 1995)
library(MASS) #box-cox library(DAAG) #cv library(car) #step library(tidyverse) library(ggplot2) ########################################### ## LINEAR MODELING: PREDICTING BIKE COUNT ########################################### # Data Preprocessing ---------------------------------------------------- day<-read.csv("by_day.csv") hour<-read.csv("by_hour.csv") # Treat categorical variables as factors day$Is.Weekend<-as.factor(day$Is.Weekend) day$Month<-as.factor(day$Month) day$Weekday<-as.factor(day$Weekday) day$Rain <- as.factor(day$Rain) day$Fog <- as.factor(day$Fog) day$Snow <- as.factor(day$Snow) day$Thunderstorm <- as.factor(day$Thunderstorm) # Subset variables we're interested in bikedata <- day[, c(2, 8:33)] # Investigate NAs colSums(is.na(bikedata)) # There are no NAs. # Linear Model (full) ---------------------------------------------------- lm.full<-lm(Bike.Count~.,data=bikedata) summary(lm.full) # Residual standard error: 1206 on 59 degrees of freedom # Multiple R-squared: 0.8735, Adjusted R-squared: 0.8092 # F-statistic: 13.59 on 30 and 59 DF, p-value: < 2.2e-16 # Model is significant. # The model identified Day, Wind_Avg, Rain_Inches, Snow, Weekday(Sunday) and Is.Holiday # as significant variables at the 5% significance level. # Is.Weekend and Vis_High were removed from the model because of singularities. # We do not know if the beta estimates above are being affected by multicollinearity. # We need to investigate multicollinearity in the data and consider transforming # and removing variables. # Investigate Multicollinearity -------------------------------------------------------------------------- # Look at correlation matrix between numeric variables is.num <- sapply(bikedata, is.numeric) num.df <- bikedata[, is.num] cor(num.df) # There are severe near-linear dependencies (corr > 0.9) between Temp_High and Temp_Avg, # Temp_Low and Temp_Avg, Dew_High and Dew_Avg, Dew_Low and Dew_Avg, Hum_High and Hum_Avg, # Pres_High and Pres_Avg, and Pres_Low and Pres_Avg. # Look at Variance Inflation Factors lm.full<-lm(Bike.Count~.-Is.Weekend -Vis_High,data=bikedata) vif(lm.full) # Again, many of weather variables have VIF > 5, so again, we need to deal with them. # Dealing With Multicollinearity -------------------------------------------------------------------------- # Collapse High/Low Variables into Range Variables. # This way, we keep the information contained in these variables and hopefully avoid multicollinearity # with the Avg. bikedata$Temp_Range <- bikedata$Temp_High-bikedata$Temp_Low bikedata$Dew_Range <- bikedata$Dew_High-bikedata$Dew_Low bikedata$Hum_Range <- bikedata$Hum_High-bikedata$Hum_Low bikedata$Pres_Range <- bikedata$Pres_High-bikedata$Pres_Low bikedata$Vis_Range <- bikedata$Vis_High-bikedata$Vis_Low # Drop and High and Low variables. We will also drop Is.Weekend because it is perfectly linearly # dependent with Weekday==Saturday and Weekday==Sunday. bikedata <- bikedata %>% select(-Is.Weekend, -Temp_High, -Temp_Low, -Dew_High, -Dew_Low, -Hum_High, -Hum_Low, -Pres_High, -Pres_Low, -Vis_High, -Vis_Low) # Look at correlation matrix between numeric variables is.num <- sapply(bikedata, is.numeric) num.df <- bikedata[, is.num] cor(num.df) # No more correlations above > 0.9 lm.full2 <- lm(Bike.Count~. ,data=bikedata) summary(lm.full2) # Residual standard error: 1174 on 63 degrees of freedom # Multiple R-squared: 0.872, Adjusted R-squared: 0.8192 # F-statistic: 16.51 on 26 and 63 DF, p-value: < 2.2e-16 # Model is significant. # Our Adj-R^2 improved from 0.8092 to 0.8192. # Variables identified by the model as significant at the 5% significance level: # Month(Jan), Day, Temp_Avg, Wind_Avg, Rain_Inches, Snow, Weekday(Sunday), Is.Holiday. vif(lm.full2) # VIF values are much lower now. However, Temp_Avg, Dew_Avg, Hum_Avg, Vis_Avg have VIF > 10. # Vis_Range has VIF > 5 and < 10. # According to the correlation matrix, Hum_Avg and Temp_Avg are both correlated with Dew_Avg # by 0.85317738 and 0.80249561 respectively. # What happens if we drop Dew_Avg and Vis_Range? lm.full3 <- lm(Bike.Count~. -Dew_Avg -Vis_Range,data=bikedata) summary(lm.full3) # Snow is no longer significant. # Residual standard error: 1179 on 65 degrees of freedom # Multiple R-squared: 0.8669, Adjusted R-squared: 0.8178 # F-statistic: 17.65 on 24 and 65 DF, p-value: < 2.2e-16 # Model is significant. vif(lm.full3) # Vis_Avg and Hum_Avg have GVIF ~ 5 or 6, but overall, looks much better. # Drop Dew_Avg and Vis_Range from dataframe. bikedata <- bikedata %>% select(-Dew_Avg, -Vis_Range) lm.full4 <- lm(Bike.Count~.,data=bikedata) # Stepwise/Foward/Backward Selection ----------------------------------------------------------------- ## Begin by defining the models with no variables (null) and all variables (full) lm.null <- lm(Bike.Count~1,data=bikedata) ## step selection lm.step<-step(lm.null, scope=list(lower=lm.null, upper=lm.full4), direction="both") # Step: AIC=1281.19 # Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + # Wind_Avg + Month + Day + Snow + Rain summary(lm.step) # Residual standard error: 1134 on 73 degrees of freedom # Multiple R-squared: 0.8617, Adjusted R-squared: 0.8314 # F-statistic: 28.43 on 16 and 73 DF, p-value: < 2.2e-16 # Model is significant. # Forward selection: lm.forward <- step(lm.null, scope=list(lower=lm.null, upper=lm.full4), direction="forward") summary(lm.forward) # Step: AIC=1281.19 # Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + # Wind_Avg + Month + Day + Snow + Rain # Forward selection picked the exact same model. # Backward elimination: lm.back <- step(lm.full4, scope=list(lower=lm.null, upper=lm.full4), direction="backward") # Step: AIC=1281.19 # Bike.Count ~ Month + Day + Temp_Avg + Hum_Avg + Wind_Avg + Rain_Inches + # Rain + Snow + Weekday + Is.Holiday # Backward elimination also picked the exact same model. # Residual Analysis/Influential Points ---------------------------------------------- ## R-student residuals ti<-rstudent(lm.step) ## Normal probabilty plot qqnorm(ti) qqline(ti) # Looks normal except for two points on the right tail. ## Residual vs. fitted values plot plot(fitted(lm.step),ti) # Seems normal with the exception of 2 points. summary(influence.measures(lm.step)) # Pt 84 is an influential point, with a COVRATIO of 0.07, a DFFIT of 1.94 and a DFBETA of 1.03 for Temp_Avg. # This suggests that point 84 has unusually high influence over the beta estimate of Temp_Avg, # and that the fitted values are being affected by the presence of point 84. # Pt 50 has a DFFIT of 1.56 and a COVRATIO of 0.1, suggesting that this point also exerts an unusual amount of # influence on the fitted values. bikedata[c(50, 84),] # These points correspond to Feb 19, 2017 and Mar 25, 2017. # Bike.Count Month Day Temp_Avg Hum_Avg Pres_Avg Vis_Avg Wind_Avg Rain_Inches Rain Fog Snow Thunderstorm Weekday Is.Holiday # 50 12350 Feb 19 60 60 29.91 10 9 0.01 1 0 0 0 Sunday 0 # 84 16191 March 25 66 58 30.17 10 9 0.00 0 0 0 0 Saturday 0 summary(bikedata$Bike.Count) # Min. 1st Qu. Median Mean 3rd Qu. Max. # 1583 5382 7237 7183 8608 16190 # In terms of bike count, both these days are in the topmost quartile and the bike count for Pt 84 corresponds # to the max bike count. # Does removing these two points change the model's beta coefficients? lm.full5 <- lm(Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + Wind_Avg + Month + Day + Snow + Rain, data = bikedata[-c(50, 84),]) summary(lm.full5) # Residual standard error: 946.7 on 71 degrees of freedom # Multiple R-squared: 0.8881, Adjusted R-squared: 0.8629 # F-statistic: 35.22 on 16 and 71 DF, p-value: < 2.2e-16 # An improvement in Adjusted R^2 compared to lm.step (up from 0.8314). lm.full5$coefficients # (Intercept) Temp_Avg Hum_Avg Rain_Inches WeekdayMonday WeekdaySaturday WeekdaySunday # 1540.024182 174.092476 -13.834248 -4194.415747 146.885718 -801.287525 -2075.045727 # WeekdayThursday WeekdayTuesday WeekdayWednesday Is.Holiday Wind_Avg MonthJan MonthMarch # 12.829004 -7.907761 -92.695427 -1789.780420 -92.307566 -850.845000 324.486204 # Day Snow1 Rain1 # 30.187008 491.145747 -547.949283 lm.step$coefficients # (Intercept) Temp_Avg Hum_Avg Rain_Inches WeekdayMonday WeekdaySaturday WeekdaySunday # 742.37329 199.83904 -20.72061 -4095.33739 193.89938 -387.05692 -1709.29852 # WeekdayThursday WeekdayTuesday WeekdayWednesday Is.Holiday Wind_Avg MonthJan MonthMarch # -86.16931 -40.64184 -198.33614 -1733.17718 -93.83794 -813.51719 321.05561 # Day Snow1 Rain1 # 31.76259 679.38475 -570.10086 # Yes, there are large differences in these coefficient estimates. # However, we cannot justify removing these points in our model. An internet search reveals that # nothing special happened on Feb 19, 2017 or Mar 25, 2017 in Washington D.C. We have no evidence that # these two observations are bad data points. It's possible that they seem influential in this model # because we do not have enough data points (n = 90). # Plotting residuals against explanatory variables. plot(bikedata$Temp_Avg, ti) # Looks ok except the two points in the upper right hand corner. plot(bikedata$Hum_Avg, ti) # Looks fine except for two points plot(bikedata$Rain_Inches, ti) # Highly abnormal residuals - !!!!! plot(bikedata$Wind_Avg, ti) # Some irregularities, but mostly ok # Looking At Rain_Inches ----------------------------------------------- plot(bikedata$Rain_Inches, bikedata$Bike.Count) # There are many days where Rain_Inches == 0, leading to the strange residual plot found above. # There seems to be no good way to transform this variable. Furthermore, it is # a significant variable, so we will leave it in as is. # Cross-Validate ----------------------------------------------- x.cv <- cv.lm(data=bikedata, form.lm=lm.step, m=2, plotit=T) # Sum of squares = 1.21e+08 Mean square = 2698332 n = 45 # # Overall (Sum over all 45 folds) # ms # 2352489 # Plot shows our final model is pretty good. # PRESS statistic pr <- resid(lm.step)/(1-lm.influence(lm.step)$hat) sum(pr^2) # PRESS = 1.44e+08 ####################################################### ## LINEAR MODELING: PREDICTING DAILY TOTAL DISTANCE ####################################################### plot(day$Bike.Count, day$Total.Dist) # We thought we would make a separating model predicting total distance travelled by all bikeshare # users on a given day, but the plot above shows that BikeCount and Total.Dist are very highly # correlated. A model predicting Total.Dist would use the exact same preidctors as the model above. ####################################################### ## LINEAR MODELING: PREDICTING HOURLY BIKE COUNT ####################################################### ## data preprocess by_hour <- hour %>% group_by(Date, Start.Hour) %>% summarise(Bike.Count = n()) by_hour$Start.Hour <- as.factor(by_hour$Start.Hour) predicted <- data.frame(Date = day$Date, Predicted.BC = fitted(lm.step)/24) by_hour <- merge(x = by_hour, y = predicted, by = "Date", all.x=TRUE) by_hour$Predicted.Residual <- by_hour$Bike.Count - by_hour$Predicted.BC ## Plot ggplot(by_hour, aes_string(x="Start.Hour", y="Predicted.Residual")) + geom_point(position=position_jitter(w=0.0, h=0.4)) + theme_light(base_size=20) + xlab("Hour of the Day") + ylab("Predicted Residual") + theme(plot.title=element_text(size=18)) ## lm hour.lm <- lm(Predicted.Residual ~ Start.Hour, data=by_hour) summary(hour.lm) # Residual standard error: 177 on 2119 degrees of freedom # Multiple R-squared: 0.641, Adjusted R-squared: 0.637 # F-statistic: 164 on 23 and 2119 DF, p-value: <2e-16 # Model is significant. # Residual analysis: hour.ti<-rstudent(hour.lm) ## Normal probabilty plot qqnorm(hour.ti) qqline(hour.ti) # The points in the middle are fine but the tails are extremely heavy and strange. ## Residual vs. fitted values plot plot(fitted(hour.lm),hour.ti) # Highly abnormal. plot(as.numeric(by_hour$Start.Hour), by_hour$Predicted.Residual) ## polynomial by_hour$Start.Hour<-as.numeric(by_hour$Start.Hour) for (i in c(exp(1),1:10)){ hour.lm.n <- lm(Predicted.Residual ~ poly(Start.Hour,i), data=by_hour) print(paste("i: ",i)) print(paste("r.squared: ",summary(hour.lm.n)$r.squared)) } # [1] "i: 1" # [1] "r.squared: 0.107612701237565" # [1] "i: 2" # [1] "r.squared: 0.358878782045627" # [1] "i: 2.71828182845905" # [1] "r.squared: 0.358878782045627" # [1] "i: 3" # [1] "r.squared: 0.427634603544068" # [1] "i: 4" # [1] "r.squared: 0.427796302964034" # [1] "i: 5" # [1] "r.squared: 0.431068009612039" # [1] "i: 6" # [1] "r.squared: 0.482216200704182" # [1] "i: 7" # [1] "r.squared: 0.517578873090201" # [1] "i: 8" # [1] "r.squared: 0.552582585118866" # [1] "i: 9" # [1] "r.squared: 0.566206272061443" # [1] "i: 10" # [1] "r.squared: 0.567498518111798"
/day_analysis.R
no_license
Jo-Pan/stat-final-bikeshare
R
false
false
13,465
r
library(MASS) #box-cox library(DAAG) #cv library(car) #step library(tidyverse) library(ggplot2) ########################################### ## LINEAR MODELING: PREDICTING BIKE COUNT ########################################### # Data Preprocessing ---------------------------------------------------- day<-read.csv("by_day.csv") hour<-read.csv("by_hour.csv") # Treat categorical variables as factors day$Is.Weekend<-as.factor(day$Is.Weekend) day$Month<-as.factor(day$Month) day$Weekday<-as.factor(day$Weekday) day$Rain <- as.factor(day$Rain) day$Fog <- as.factor(day$Fog) day$Snow <- as.factor(day$Snow) day$Thunderstorm <- as.factor(day$Thunderstorm) # Subset variables we're interested in bikedata <- day[, c(2, 8:33)] # Investigate NAs colSums(is.na(bikedata)) # There are no NAs. # Linear Model (full) ---------------------------------------------------- lm.full<-lm(Bike.Count~.,data=bikedata) summary(lm.full) # Residual standard error: 1206 on 59 degrees of freedom # Multiple R-squared: 0.8735, Adjusted R-squared: 0.8092 # F-statistic: 13.59 on 30 and 59 DF, p-value: < 2.2e-16 # Model is significant. # The model identified Day, Wind_Avg, Rain_Inches, Snow, Weekday(Sunday) and Is.Holiday # as significant variables at the 5% significance level. # Is.Weekend and Vis_High were removed from the model because of singularities. # We do not know if the beta estimates above are being affected by multicollinearity. # We need to investigate multicollinearity in the data and consider transforming # and removing variables. # Investigate Multicollinearity -------------------------------------------------------------------------- # Look at correlation matrix between numeric variables is.num <- sapply(bikedata, is.numeric) num.df <- bikedata[, is.num] cor(num.df) # There are severe near-linear dependencies (corr > 0.9) between Temp_High and Temp_Avg, # Temp_Low and Temp_Avg, Dew_High and Dew_Avg, Dew_Low and Dew_Avg, Hum_High and Hum_Avg, # Pres_High and Pres_Avg, and Pres_Low and Pres_Avg. # Look at Variance Inflation Factors lm.full<-lm(Bike.Count~.-Is.Weekend -Vis_High,data=bikedata) vif(lm.full) # Again, many of weather variables have VIF > 5, so again, we need to deal with them. # Dealing With Multicollinearity -------------------------------------------------------------------------- # Collapse High/Low Variables into Range Variables. # This way, we keep the information contained in these variables and hopefully avoid multicollinearity # with the Avg. bikedata$Temp_Range <- bikedata$Temp_High-bikedata$Temp_Low bikedata$Dew_Range <- bikedata$Dew_High-bikedata$Dew_Low bikedata$Hum_Range <- bikedata$Hum_High-bikedata$Hum_Low bikedata$Pres_Range <- bikedata$Pres_High-bikedata$Pres_Low bikedata$Vis_Range <- bikedata$Vis_High-bikedata$Vis_Low # Drop and High and Low variables. We will also drop Is.Weekend because it is perfectly linearly # dependent with Weekday==Saturday and Weekday==Sunday. bikedata <- bikedata %>% select(-Is.Weekend, -Temp_High, -Temp_Low, -Dew_High, -Dew_Low, -Hum_High, -Hum_Low, -Pres_High, -Pres_Low, -Vis_High, -Vis_Low) # Look at correlation matrix between numeric variables is.num <- sapply(bikedata, is.numeric) num.df <- bikedata[, is.num] cor(num.df) # No more correlations above > 0.9 lm.full2 <- lm(Bike.Count~. ,data=bikedata) summary(lm.full2) # Residual standard error: 1174 on 63 degrees of freedom # Multiple R-squared: 0.872, Adjusted R-squared: 0.8192 # F-statistic: 16.51 on 26 and 63 DF, p-value: < 2.2e-16 # Model is significant. # Our Adj-R^2 improved from 0.8092 to 0.8192. # Variables identified by the model as significant at the 5% significance level: # Month(Jan), Day, Temp_Avg, Wind_Avg, Rain_Inches, Snow, Weekday(Sunday), Is.Holiday. vif(lm.full2) # VIF values are much lower now. However, Temp_Avg, Dew_Avg, Hum_Avg, Vis_Avg have VIF > 10. # Vis_Range has VIF > 5 and < 10. # According to the correlation matrix, Hum_Avg and Temp_Avg are both correlated with Dew_Avg # by 0.85317738 and 0.80249561 respectively. # What happens if we drop Dew_Avg and Vis_Range? lm.full3 <- lm(Bike.Count~. -Dew_Avg -Vis_Range,data=bikedata) summary(lm.full3) # Snow is no longer significant. # Residual standard error: 1179 on 65 degrees of freedom # Multiple R-squared: 0.8669, Adjusted R-squared: 0.8178 # F-statistic: 17.65 on 24 and 65 DF, p-value: < 2.2e-16 # Model is significant. vif(lm.full3) # Vis_Avg and Hum_Avg have GVIF ~ 5 or 6, but overall, looks much better. # Drop Dew_Avg and Vis_Range from dataframe. bikedata <- bikedata %>% select(-Dew_Avg, -Vis_Range) lm.full4 <- lm(Bike.Count~.,data=bikedata) # Stepwise/Foward/Backward Selection ----------------------------------------------------------------- ## Begin by defining the models with no variables (null) and all variables (full) lm.null <- lm(Bike.Count~1,data=bikedata) ## step selection lm.step<-step(lm.null, scope=list(lower=lm.null, upper=lm.full4), direction="both") # Step: AIC=1281.19 # Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + # Wind_Avg + Month + Day + Snow + Rain summary(lm.step) # Residual standard error: 1134 on 73 degrees of freedom # Multiple R-squared: 0.8617, Adjusted R-squared: 0.8314 # F-statistic: 28.43 on 16 and 73 DF, p-value: < 2.2e-16 # Model is significant. # Forward selection: lm.forward <- step(lm.null, scope=list(lower=lm.null, upper=lm.full4), direction="forward") summary(lm.forward) # Step: AIC=1281.19 # Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + # Wind_Avg + Month + Day + Snow + Rain # Forward selection picked the exact same model. # Backward elimination: lm.back <- step(lm.full4, scope=list(lower=lm.null, upper=lm.full4), direction="backward") # Step: AIC=1281.19 # Bike.Count ~ Month + Day + Temp_Avg + Hum_Avg + Wind_Avg + Rain_Inches + # Rain + Snow + Weekday + Is.Holiday # Backward elimination also picked the exact same model. # Residual Analysis/Influential Points ---------------------------------------------- ## R-student residuals ti<-rstudent(lm.step) ## Normal probabilty plot qqnorm(ti) qqline(ti) # Looks normal except for two points on the right tail. ## Residual vs. fitted values plot plot(fitted(lm.step),ti) # Seems normal with the exception of 2 points. summary(influence.measures(lm.step)) # Pt 84 is an influential point, with a COVRATIO of 0.07, a DFFIT of 1.94 and a DFBETA of 1.03 for Temp_Avg. # This suggests that point 84 has unusually high influence over the beta estimate of Temp_Avg, # and that the fitted values are being affected by the presence of point 84. # Pt 50 has a DFFIT of 1.56 and a COVRATIO of 0.1, suggesting that this point also exerts an unusual amount of # influence on the fitted values. bikedata[c(50, 84),] # These points correspond to Feb 19, 2017 and Mar 25, 2017. # Bike.Count Month Day Temp_Avg Hum_Avg Pres_Avg Vis_Avg Wind_Avg Rain_Inches Rain Fog Snow Thunderstorm Weekday Is.Holiday # 50 12350 Feb 19 60 60 29.91 10 9 0.01 1 0 0 0 Sunday 0 # 84 16191 March 25 66 58 30.17 10 9 0.00 0 0 0 0 Saturday 0 summary(bikedata$Bike.Count) # Min. 1st Qu. Median Mean 3rd Qu. Max. # 1583 5382 7237 7183 8608 16190 # In terms of bike count, both these days are in the topmost quartile and the bike count for Pt 84 corresponds # to the max bike count. # Does removing these two points change the model's beta coefficients? lm.full5 <- lm(Bike.Count ~ Temp_Avg + Hum_Avg + Rain_Inches + Weekday + Is.Holiday + Wind_Avg + Month + Day + Snow + Rain, data = bikedata[-c(50, 84),]) summary(lm.full5) # Residual standard error: 946.7 on 71 degrees of freedom # Multiple R-squared: 0.8881, Adjusted R-squared: 0.8629 # F-statistic: 35.22 on 16 and 71 DF, p-value: < 2.2e-16 # An improvement in Adjusted R^2 compared to lm.step (up from 0.8314). lm.full5$coefficients # (Intercept) Temp_Avg Hum_Avg Rain_Inches WeekdayMonday WeekdaySaturday WeekdaySunday # 1540.024182 174.092476 -13.834248 -4194.415747 146.885718 -801.287525 -2075.045727 # WeekdayThursday WeekdayTuesday WeekdayWednesday Is.Holiday Wind_Avg MonthJan MonthMarch # 12.829004 -7.907761 -92.695427 -1789.780420 -92.307566 -850.845000 324.486204 # Day Snow1 Rain1 # 30.187008 491.145747 -547.949283 lm.step$coefficients # (Intercept) Temp_Avg Hum_Avg Rain_Inches WeekdayMonday WeekdaySaturday WeekdaySunday # 742.37329 199.83904 -20.72061 -4095.33739 193.89938 -387.05692 -1709.29852 # WeekdayThursday WeekdayTuesday WeekdayWednesday Is.Holiday Wind_Avg MonthJan MonthMarch # -86.16931 -40.64184 -198.33614 -1733.17718 -93.83794 -813.51719 321.05561 # Day Snow1 Rain1 # 31.76259 679.38475 -570.10086 # Yes, there are large differences in these coefficient estimates. # However, we cannot justify removing these points in our model. An internet search reveals that # nothing special happened on Feb 19, 2017 or Mar 25, 2017 in Washington D.C. We have no evidence that # these two observations are bad data points. It's possible that they seem influential in this model # because we do not have enough data points (n = 90). # Plotting residuals against explanatory variables. plot(bikedata$Temp_Avg, ti) # Looks ok except the two points in the upper right hand corner. plot(bikedata$Hum_Avg, ti) # Looks fine except for two points plot(bikedata$Rain_Inches, ti) # Highly abnormal residuals - !!!!! plot(bikedata$Wind_Avg, ti) # Some irregularities, but mostly ok # Looking At Rain_Inches ----------------------------------------------- plot(bikedata$Rain_Inches, bikedata$Bike.Count) # There are many days where Rain_Inches == 0, leading to the strange residual plot found above. # There seems to be no good way to transform this variable. Furthermore, it is # a significant variable, so we will leave it in as is. # Cross-Validate ----------------------------------------------- x.cv <- cv.lm(data=bikedata, form.lm=lm.step, m=2, plotit=T) # Sum of squares = 1.21e+08 Mean square = 2698332 n = 45 # # Overall (Sum over all 45 folds) # ms # 2352489 # Plot shows our final model is pretty good. # PRESS statistic pr <- resid(lm.step)/(1-lm.influence(lm.step)$hat) sum(pr^2) # PRESS = 1.44e+08 ####################################################### ## LINEAR MODELING: PREDICTING DAILY TOTAL DISTANCE ####################################################### plot(day$Bike.Count, day$Total.Dist) # We thought we would make a separating model predicting total distance travelled by all bikeshare # users on a given day, but the plot above shows that BikeCount and Total.Dist are very highly # correlated. A model predicting Total.Dist would use the exact same preidctors as the model above. ####################################################### ## LINEAR MODELING: PREDICTING HOURLY BIKE COUNT ####################################################### ## data preprocess by_hour <- hour %>% group_by(Date, Start.Hour) %>% summarise(Bike.Count = n()) by_hour$Start.Hour <- as.factor(by_hour$Start.Hour) predicted <- data.frame(Date = day$Date, Predicted.BC = fitted(lm.step)/24) by_hour <- merge(x = by_hour, y = predicted, by = "Date", all.x=TRUE) by_hour$Predicted.Residual <- by_hour$Bike.Count - by_hour$Predicted.BC ## Plot ggplot(by_hour, aes_string(x="Start.Hour", y="Predicted.Residual")) + geom_point(position=position_jitter(w=0.0, h=0.4)) + theme_light(base_size=20) + xlab("Hour of the Day") + ylab("Predicted Residual") + theme(plot.title=element_text(size=18)) ## lm hour.lm <- lm(Predicted.Residual ~ Start.Hour, data=by_hour) summary(hour.lm) # Residual standard error: 177 on 2119 degrees of freedom # Multiple R-squared: 0.641, Adjusted R-squared: 0.637 # F-statistic: 164 on 23 and 2119 DF, p-value: <2e-16 # Model is significant. # Residual analysis: hour.ti<-rstudent(hour.lm) ## Normal probabilty plot qqnorm(hour.ti) qqline(hour.ti) # The points in the middle are fine but the tails are extremely heavy and strange. ## Residual vs. fitted values plot plot(fitted(hour.lm),hour.ti) # Highly abnormal. plot(as.numeric(by_hour$Start.Hour), by_hour$Predicted.Residual) ## polynomial by_hour$Start.Hour<-as.numeric(by_hour$Start.Hour) for (i in c(exp(1),1:10)){ hour.lm.n <- lm(Predicted.Residual ~ poly(Start.Hour,i), data=by_hour) print(paste("i: ",i)) print(paste("r.squared: ",summary(hour.lm.n)$r.squared)) } # [1] "i: 1" # [1] "r.squared: 0.107612701237565" # [1] "i: 2" # [1] "r.squared: 0.358878782045627" # [1] "i: 2.71828182845905" # [1] "r.squared: 0.358878782045627" # [1] "i: 3" # [1] "r.squared: 0.427634603544068" # [1] "i: 4" # [1] "r.squared: 0.427796302964034" # [1] "i: 5" # [1] "r.squared: 0.431068009612039" # [1] "i: 6" # [1] "r.squared: 0.482216200704182" # [1] "i: 7" # [1] "r.squared: 0.517578873090201" # [1] "i: 8" # [1] "r.squared: 0.552582585118866" # [1] "i: 9" # [1] "r.squared: 0.566206272061443" # [1] "i: 10" # [1] "r.squared: 0.567498518111798"
#' Convert a stars or stars-proxy object into an EE Image object #' #' @param x stars or stars-proxy object to be converted into an ee$Image. #' @param assetId Character. Destination asset ID for the uploaded file. #' @param command_line_tool_path Character. Path to the Earth Engine command line #' tool (CLT). If NULL, rgee assumes that CLT is set in the system PATH. #' (ignore if \code{via} is not defined as "gcs_to_asset"). #' @param overwrite Logical. If TRUE, the assetId will be overwritten. #' @param bucket Character. Name of the GCS bucket. #' @param predefinedAcl Specify user access to object. Passed to #' \code{googleCloudStorageR::gcs_upload}. #' @param monitoring Logical. If TRUE the exportation task will be monitored. #' @param quiet Logical. Suppress info message. #' @param ... parameter(s) passed on to \code{\link{ee_utils_create_manifest_image}} #' #' @return An ee$Image object #' @family image upload functions #' @examples #' \dontrun{ #' library(rgee) #' library(stars) #' ee_Initialize(gcs = TRUE) #' #' # Get the filename of a image #' tif <- system.file("tif/L7_ETMs.tif", package = "stars") #' x <- read_stars(tif) #' assetId <- sprintf("%s/%s",ee_get_assethome(),'stars_l7') #' #' # # Method 1 #' # 1. Move from local to gcs #' gs_uri <- local_to_gcs(x = tif, bucket = 'rgee_dev') #' #' # 2. Create a manifest #' manifest <- ee_utils_create_manifest_image(gs_uri, assetId) #' #' # 3. Pass from gcs to asset #' gcs_to_ee_image( #' manifest = manifest, #' overwrite = TRUE #' ) #' #' # OPTIONAL: Monitoring progress #' ee_monitoring(max_attempts = Inf) #' #' # OPTIONAL: Display results #' ee_stars_01 <- ee$Image(assetId) #' Map$centerObject(ee_stars_01) #' Map$addLayer(ee_stars_01, list(min = 0, max = 255)) #' #' # Method 2 #' ee_stars_02 <- stars_as_ee( #' x = x, #' overwrite = TRUE, #' assetId = assetId, #' bucket = "rgee_dev" #' ) #' Map$centerObject(ee_stars_02) #' Map$addLayer(ee_stars_02, list(min = 0, max = 255)) #' } #' @export stars_as_ee <- function(x, assetId, bucket = NULL, predefinedAcl = "bucketLevel", command_line_tool_path = NULL, overwrite = FALSE, monitoring = TRUE, quiet = FALSE, ...) { # Folder to save intermediate upload files ee_temp <- tempdir() if (is.null(command_line_tool_path)) { command_line_tool_path <- "" } if (!quiet) { message("1. Converting stars (raster) object to GeoTIFF ... saving in /tmp") } stars_proxy <- ee_as_proxystars(x, temp_dir = ee_temp) if (!quiet) { message("2. From local to GCS") } gcs_filename <- local_to_gcs( x = stars_proxy[[1]], bucket = bucket, predefinedAcl = predefinedAcl, quiet = quiet ) if (!quiet) { message("3. Creating the manifest") } manifest <- ee_utils_create_manifest_image( gs_uri = gcs_filename, assetId = assetId, ... ) if (!quiet) { message("4. From GCS to Earth Engine") } # Verify is the EE assets path is valid assetId <- ee_verify_filename( path_asset = assetId, strict = FALSE ) gcs_to_ee_image( manifest, overwrite = overwrite, command_line_tool_path = command_line_tool_path ) if (isTRUE(monitoring)) { ee_monitoring(max_attempts = Inf) ee$Image(assetId) } else { ee$Image(assetId) } } #' Convert a Raster* object into an EE Image object #' #' @param x RasterLayer, RasterStack or RasterBrick object to be converted into #' an ee$Image. #' @param assetId Character. Destination asset ID for the uploaded file. #' @param command_line_tool_path Character. Path to the Earth Engine command line #' tool (CLT). If NULL, rgee assumes that CLT is set in the system PATH. #' (ignore if \code{via} is not defined as "gcs_to_asset"). #' @param overwrite Logical. If TRUE, the assetId will be overwritten. #' @param bucket Character. Name of the GCS bucket. #' @param predefinedAcl Specify user access to object. Passed to #' \code{googleCloudStorageR::gcs_upload}. #' @param monitoring Logical. If TRUE the exportation task will be monitored. #' @param quiet Logical. Suppress info message. #' @param ... parameter(s) passed on to \code{\link{ee_utils_create_manifest_image}} #' #' @return An ee$Image object #' @family image upload functions #' #' @examples #' \dontrun{ #' library(raster) #' library(stars) #' library(rgee) #' #' ee_Initialize(gcs = TRUE) #' #' # Get the filename of a image #' tif <- system.file("tif/L7_ETMs.tif", package = "stars") #' x <- stack(tif) #' assetId <- sprintf("%s/%s",ee_get_assethome(),'raster_l7') #' #' # Method 1 #' # 1. Move from local to gcs #' gs_uri <- local_to_gcs(x = tif, bucket = 'rgee_dev') #' #' # 2. Create a manifest #' manifest <- ee_utils_create_manifest_image(gs_uri, assetId) #' #' # 3. Pass from gcs to asset #' gcs_to_ee_image( #' manifest = manifest, #' overwrite = TRUE #' ) #' #' # OPTIONAL: Monitoring progress #' ee_monitoring(max_attempts = Inf) #' #' # OPTIONAL: Display results #' ee_stars_01 <- ee$Image(assetId) #' Map$centerObject(ee_stars_01) #' Map$addLayer(ee_stars_01, list(min = 0, max = 255)) #' #' # Method 2 #' ee_stars_02 <- raster_as_ee( #' x = x, #' overwrite = TRUE, #' assetId = assetId, #' bucket = "rgee_dev" #' ) #' Map$centerObject(ee_stars_02) #' Map$addLayer(ee_stars_02, list(min = 0, max = 255)) #' } #' @export raster_as_ee <- stars_as_ee
/R/raster_as_ee.R
permissive
r-spatial/rgee
R
false
false
5,492
r
#' Convert a stars or stars-proxy object into an EE Image object #' #' @param x stars or stars-proxy object to be converted into an ee$Image. #' @param assetId Character. Destination asset ID for the uploaded file. #' @param command_line_tool_path Character. Path to the Earth Engine command line #' tool (CLT). If NULL, rgee assumes that CLT is set in the system PATH. #' (ignore if \code{via} is not defined as "gcs_to_asset"). #' @param overwrite Logical. If TRUE, the assetId will be overwritten. #' @param bucket Character. Name of the GCS bucket. #' @param predefinedAcl Specify user access to object. Passed to #' \code{googleCloudStorageR::gcs_upload}. #' @param monitoring Logical. If TRUE the exportation task will be monitored. #' @param quiet Logical. Suppress info message. #' @param ... parameter(s) passed on to \code{\link{ee_utils_create_manifest_image}} #' #' @return An ee$Image object #' @family image upload functions #' @examples #' \dontrun{ #' library(rgee) #' library(stars) #' ee_Initialize(gcs = TRUE) #' #' # Get the filename of a image #' tif <- system.file("tif/L7_ETMs.tif", package = "stars") #' x <- read_stars(tif) #' assetId <- sprintf("%s/%s",ee_get_assethome(),'stars_l7') #' #' # # Method 1 #' # 1. Move from local to gcs #' gs_uri <- local_to_gcs(x = tif, bucket = 'rgee_dev') #' #' # 2. Create a manifest #' manifest <- ee_utils_create_manifest_image(gs_uri, assetId) #' #' # 3. Pass from gcs to asset #' gcs_to_ee_image( #' manifest = manifest, #' overwrite = TRUE #' ) #' #' # OPTIONAL: Monitoring progress #' ee_monitoring(max_attempts = Inf) #' #' # OPTIONAL: Display results #' ee_stars_01 <- ee$Image(assetId) #' Map$centerObject(ee_stars_01) #' Map$addLayer(ee_stars_01, list(min = 0, max = 255)) #' #' # Method 2 #' ee_stars_02 <- stars_as_ee( #' x = x, #' overwrite = TRUE, #' assetId = assetId, #' bucket = "rgee_dev" #' ) #' Map$centerObject(ee_stars_02) #' Map$addLayer(ee_stars_02, list(min = 0, max = 255)) #' } #' @export stars_as_ee <- function(x, assetId, bucket = NULL, predefinedAcl = "bucketLevel", command_line_tool_path = NULL, overwrite = FALSE, monitoring = TRUE, quiet = FALSE, ...) { # Folder to save intermediate upload files ee_temp <- tempdir() if (is.null(command_line_tool_path)) { command_line_tool_path <- "" } if (!quiet) { message("1. Converting stars (raster) object to GeoTIFF ... saving in /tmp") } stars_proxy <- ee_as_proxystars(x, temp_dir = ee_temp) if (!quiet) { message("2. From local to GCS") } gcs_filename <- local_to_gcs( x = stars_proxy[[1]], bucket = bucket, predefinedAcl = predefinedAcl, quiet = quiet ) if (!quiet) { message("3. Creating the manifest") } manifest <- ee_utils_create_manifest_image( gs_uri = gcs_filename, assetId = assetId, ... ) if (!quiet) { message("4. From GCS to Earth Engine") } # Verify is the EE assets path is valid assetId <- ee_verify_filename( path_asset = assetId, strict = FALSE ) gcs_to_ee_image( manifest, overwrite = overwrite, command_line_tool_path = command_line_tool_path ) if (isTRUE(monitoring)) { ee_monitoring(max_attempts = Inf) ee$Image(assetId) } else { ee$Image(assetId) } } #' Convert a Raster* object into an EE Image object #' #' @param x RasterLayer, RasterStack or RasterBrick object to be converted into #' an ee$Image. #' @param assetId Character. Destination asset ID for the uploaded file. #' @param command_line_tool_path Character. Path to the Earth Engine command line #' tool (CLT). If NULL, rgee assumes that CLT is set in the system PATH. #' (ignore if \code{via} is not defined as "gcs_to_asset"). #' @param overwrite Logical. If TRUE, the assetId will be overwritten. #' @param bucket Character. Name of the GCS bucket. #' @param predefinedAcl Specify user access to object. Passed to #' \code{googleCloudStorageR::gcs_upload}. #' @param monitoring Logical. If TRUE the exportation task will be monitored. #' @param quiet Logical. Suppress info message. #' @param ... parameter(s) passed on to \code{\link{ee_utils_create_manifest_image}} #' #' @return An ee$Image object #' @family image upload functions #' #' @examples #' \dontrun{ #' library(raster) #' library(stars) #' library(rgee) #' #' ee_Initialize(gcs = TRUE) #' #' # Get the filename of a image #' tif <- system.file("tif/L7_ETMs.tif", package = "stars") #' x <- stack(tif) #' assetId <- sprintf("%s/%s",ee_get_assethome(),'raster_l7') #' #' # Method 1 #' # 1. Move from local to gcs #' gs_uri <- local_to_gcs(x = tif, bucket = 'rgee_dev') #' #' # 2. Create a manifest #' manifest <- ee_utils_create_manifest_image(gs_uri, assetId) #' #' # 3. Pass from gcs to asset #' gcs_to_ee_image( #' manifest = manifest, #' overwrite = TRUE #' ) #' #' # OPTIONAL: Monitoring progress #' ee_monitoring(max_attempts = Inf) #' #' # OPTIONAL: Display results #' ee_stars_01 <- ee$Image(assetId) #' Map$centerObject(ee_stars_01) #' Map$addLayer(ee_stars_01, list(min = 0, max = 255)) #' #' # Method 2 #' ee_stars_02 <- raster_as_ee( #' x = x, #' overwrite = TRUE, #' assetId = assetId, #' bucket = "rgee_dev" #' ) #' Map$centerObject(ee_stars_02) #' Map$addLayer(ee_stars_02, list(min = 0, max = 255)) #' } #' @export raster_as_ee <- stars_as_ee
#' #'@title Compare multiple size comps. #' #'@description Function to compare multiple size comps. #' #'@param n_xmsz - array (or list of arrays) dimensioned xmsz #'@param title - title for plot #'@param showPlot - flag to show plot immediately #' #'@return ggplot2 object #' #'@import ggplot2 #'@import reshape2 #' #'@export #' compareSizeCompsGG<-function(n_xmsz=NULL, title='', ylab='Abundance (millions)', showPlot=TRUE){ oneModel<-FALSE; #size comps come in as array(s) if (is.array(n_xmsz)){ mdfr<-melt(n_xmsz,value.name='val'); mdfr$model<-''; oneModel<-TRUE; } else if (is.list(n_xmsz)) { #n_xmsz is a list of array objects with models as names mdls<-names(n_xmsz); mdfr<-NULL; for (mdl in mdls){ mdfrp<-melt(n_xmsz[[mdl]],value.name='val'); mdfrp$model<-mdl; mdfr<-rbind(mdfr,mdfrp); } } else { cat('Invalid specification for n_xmsz in compareSizeCompsGG\n'); cat('n_xmsz must be either an array or a list of arrays\n'); cat("Aborting...\n") stop(); } if (oneModel){ #plotting one model pl <- ggplot(aes(x=z,y=val,fill=s),data=mdfr); pl <- pl + facet_grid(m~x); } else { #plotting multiple models pl <- ggplot(aes(x=z,y=val,fill=model),data=mdfr); pl <- pl + facet_grid(m+s~x); } pl <- pl + geom_bar(alpha=1,stat='identity',position='dodge'); pl <- pl + labs(x='size (mm)',y=ylab); pl <- pl + guides(fill=guide_legend('')); if (title!='') pl <- pl + ggtitle(title); if (showPlot) print(pl); return(invisible(pl)) }
/R/compareSizeCompsGG.R
permissive
wStockhausen/rsimTCSAM
R
false
false
1,777
r
#' #'@title Compare multiple size comps. #' #'@description Function to compare multiple size comps. #' #'@param n_xmsz - array (or list of arrays) dimensioned xmsz #'@param title - title for plot #'@param showPlot - flag to show plot immediately #' #'@return ggplot2 object #' #'@import ggplot2 #'@import reshape2 #' #'@export #' compareSizeCompsGG<-function(n_xmsz=NULL, title='', ylab='Abundance (millions)', showPlot=TRUE){ oneModel<-FALSE; #size comps come in as array(s) if (is.array(n_xmsz)){ mdfr<-melt(n_xmsz,value.name='val'); mdfr$model<-''; oneModel<-TRUE; } else if (is.list(n_xmsz)) { #n_xmsz is a list of array objects with models as names mdls<-names(n_xmsz); mdfr<-NULL; for (mdl in mdls){ mdfrp<-melt(n_xmsz[[mdl]],value.name='val'); mdfrp$model<-mdl; mdfr<-rbind(mdfr,mdfrp); } } else { cat('Invalid specification for n_xmsz in compareSizeCompsGG\n'); cat('n_xmsz must be either an array or a list of arrays\n'); cat("Aborting...\n") stop(); } if (oneModel){ #plotting one model pl <- ggplot(aes(x=z,y=val,fill=s),data=mdfr); pl <- pl + facet_grid(m~x); } else { #plotting multiple models pl <- ggplot(aes(x=z,y=val,fill=model),data=mdfr); pl <- pl + facet_grid(m+s~x); } pl <- pl + geom_bar(alpha=1,stat='identity',position='dodge'); pl <- pl + labs(x='size (mm)',y=ylab); pl <- pl + guides(fill=guide_legend('')); if (title!='') pl <- pl + ggtitle(title); if (showPlot) print(pl); return(invisible(pl)) }