content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
## Read in the test and train files
train1 <- read.table("X_train.txt", stringsAsFactors = FALSE)
train2 <- read.table("y_train.txt", stringsAsFactors = FALSE)
subtrain <- read.table("subject_train.txt", stringsAsFactors = FALSE)
test1 <- read.table("X_test.txt", stringsAsFactors = FALSE)
test2 <- read.table("y_test.txt", stringsAsFactors = FALSE)
subtest <- read.table("subject_test.txt", stringsAsFactors = FALSE)
## combine the files into one data frame
train <- cbind(subtrain, train2, train1)
test <- cbind(subtest, test2, test1)
activity <- rbind(train, test)
##Assign column names from features.txt
colname <- read.table("features.txt", stringsAsFactors = FALSE)
colnames(activity) <- c("Subject", "Activity", colname[,2])
##remove duplicated columns
activity<- activity[,!duplicated(colnames(activity))]
##rename numbers in activity column with the activity names
activity$Activity[activity$Activity==1] <- "WALKING"
activity$Activity[activity$Activity==2] <- "WALKING UPSTAIRS"
activity$Activity[activity$Activity==3] <- "WALKING DOWNSTAIRS"
activity$Activity[activity$Activity==4] <- "SITTING"
activity$Activity[activity$Activity==5] <- "STANDING"
activity$Activity[activity$Activity==6] <- "LAYING"
##extract only the columns with mean and standard deviation measurements
newdata <- activity[ ,c("Subject", "Activity", colnames(activity)[grep("mean\\(\\)|std\\(\\)", names(activity))])]
##calculate the second tidy data set with the average of each variable for each activity and subject
library(dplyr)
mydata <- tbl_df(newdata)
final <- group_by(mydata, Subject, Activity) %>% summarise_each(funs(mean))
##write the txt file with the data set
write.table(final, file = "tidydata.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
dianav83/GetCleaningDataAssignment
|
R
| false | false | 1,725 |
r
|
## Read in the test and train files
train1 <- read.table("X_train.txt", stringsAsFactors = FALSE)
train2 <- read.table("y_train.txt", stringsAsFactors = FALSE)
subtrain <- read.table("subject_train.txt", stringsAsFactors = FALSE)
test1 <- read.table("X_test.txt", stringsAsFactors = FALSE)
test2 <- read.table("y_test.txt", stringsAsFactors = FALSE)
subtest <- read.table("subject_test.txt", stringsAsFactors = FALSE)
## combine the files into one data frame
train <- cbind(subtrain, train2, train1)
test <- cbind(subtest, test2, test1)
activity <- rbind(train, test)
##Assign column names from features.txt
colname <- read.table("features.txt", stringsAsFactors = FALSE)
colnames(activity) <- c("Subject", "Activity", colname[,2])
##remove duplicated columns
activity<- activity[,!duplicated(colnames(activity))]
##rename numbers in activity column with the activity names
activity$Activity[activity$Activity==1] <- "WALKING"
activity$Activity[activity$Activity==2] <- "WALKING UPSTAIRS"
activity$Activity[activity$Activity==3] <- "WALKING DOWNSTAIRS"
activity$Activity[activity$Activity==4] <- "SITTING"
activity$Activity[activity$Activity==5] <- "STANDING"
activity$Activity[activity$Activity==6] <- "LAYING"
##extract only the columns with mean and standard deviation measurements
newdata <- activity[ ,c("Subject", "Activity", colnames(activity)[grep("mean\\(\\)|std\\(\\)", names(activity))])]
##calculate the second tidy data set with the average of each variable for each activity and subject
library(dplyr)
mydata <- tbl_df(newdata)
final <- group_by(mydata, Subject, Activity) %>% summarise_each(funs(mean))
##write the txt file with the data set
write.table(final, file = "tidydata.txt", row.names = FALSE)
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
/dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609867566-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 1,135 |
r
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spark_td.R
\name{spark_write_td}
\alias{spark_write_td}
\title{Write a Spark DataFrame to Treasure Data}
\usage{
spark_write_td(x, name, mode = NULL, options = list(),
partition_by = NULL, ...)
}
\arguments{
\item{x}{A Spark DataFrame or dplyr operation}
\item{name}{The name to write table.}
\item{mode}{A \code{character} element. Specifies the behavior when data or
table already exists. Supported values include: 'error', 'append', 'overwrite' and
'ignore'. Notice that 'overwrite' will also change the column structure.}
\item{options}{A list of strings with additional options.}
\item{partition_by}{A \code{character} vector. Partitions the output by the given columns on the file system.}
\item{...}{Optional arguments; currently unused.}
}
\description{
Write a Spark DataFrame to Treasure Data
}
\examples{
\dontrun{
config <- spark_config()
config$spark.td.apikey <- Sys.getenv("TD_API_KEY")
config$spark.serializer <- "org.apache.spark.serializer.KryoSerializer"
config$spark.sql.execution.arrow.enabled <- "true"
sc <- spark_connect(master = "local", config = config)
spark_mtcars <- dplyr::copy_to(sc, mtcars, "spark_mtcars", overwrite = TRUE)
spark_write_td(
spark_mtcars,
name = "mydb.mtcars",
mode = "overwrite"
)
}
}
\seealso{
Other Spark serialization routines: \code{\link{spark_execute_td_presto}},
\code{\link{spark_read_td_presto}},
\code{\link{spark_read_td_query}},
\code{\link{spark_read_td}}
}
\concept{Spark serialization routines}
|
/man/spark_write_td.Rd
|
permissive
|
mstei4176/sparklytd
|
R
| false | true | 1,561 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spark_td.R
\name{spark_write_td}
\alias{spark_write_td}
\title{Write a Spark DataFrame to Treasure Data}
\usage{
spark_write_td(x, name, mode = NULL, options = list(),
partition_by = NULL, ...)
}
\arguments{
\item{x}{A Spark DataFrame or dplyr operation}
\item{name}{The name to write table.}
\item{mode}{A \code{character} element. Specifies the behavior when data or
table already exists. Supported values include: 'error', 'append', 'overwrite' and
'ignore'. Notice that 'overwrite' will also change the column structure.}
\item{options}{A list of strings with additional options.}
\item{partition_by}{A \code{character} vector. Partitions the output by the given columns on the file system.}
\item{...}{Optional arguments; currently unused.}
}
\description{
Write a Spark DataFrame to Treasure Data
}
\examples{
\dontrun{
config <- spark_config()
config$spark.td.apikey <- Sys.getenv("TD_API_KEY")
config$spark.serializer <- "org.apache.spark.serializer.KryoSerializer"
config$spark.sql.execution.arrow.enabled <- "true"
sc <- spark_connect(master = "local", config = config)
spark_mtcars <- dplyr::copy_to(sc, mtcars, "spark_mtcars", overwrite = TRUE)
spark_write_td(
spark_mtcars,
name = "mydb.mtcars",
mode = "overwrite"
)
}
}
\seealso{
Other Spark serialization routines: \code{\link{spark_execute_td_presto}},
\code{\link{spark_read_td_presto}},
\code{\link{spark_read_td_query}},
\code{\link{spark_read_td}}
}
\concept{Spark serialization routines}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066492e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615835157-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,048 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066492e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
#' Article search from ScienceDirect
#'
#' Search articles from the ScienceDirect database by specifying a list of key terms
#'
#' @param keywords a vector containing the key terms to search
#' @param size the number of articles from which the information is extracted
#' @param addInfo a logical value indicating whether the info of abstract, keyword, journalTitle, journalVol and authorName should be retrieved by each article
#' @param infoList a data frame of titles and URLs to skip the first step of getting the main information of each article
#'
#' @return a data frame containing the information requested in the function call
#'
#' @examples
#'
#'
#' @export
#'
#' @import rvest
#' @import xml2
ScienceDirectArticles <-
function(keywords, size, addInfo = FALSE, infoList) {
if (missing(keywords)){
if (missing(infoList)) {
stop("Neither keywords nor a list of articles names and links is provided, at least one must be specified")
} else {
cat("Getting info from the provided list...\n\n")
flush.console()
artTitle = infoList$Title
artUrl = infoList$URL
}
} else {
if (size > 999) {
stop("Only a maximum of 999 articles can be obtained from the ScienceDirect webpage")
}
keywords <- paste0("{", keywords, "}", collapse = "OR")
url <- "http://www.sciencedirect.com/"
originalSession <- rvest::html_session(url)
cat("Performing keyword search...\n")
flush.console()
searchForm <- rvest::html_form(xml2::read_html(url))[[3]]
searchValues <- rvest::set_values(searchForm, qs_all = keywords)
originalQuery <- rvest::submit_form(session = originalSession, form = searchValues, submit = "sdSearch")$url
currentSession <- xml2::read_html(originalQuery)
resultsFound <- rvest::html_text(rvest::html_node(currentSession, ".queryText"))
numberOfArt <- as.numeric(gsub(x = resultsFound, pattern = "[A-z]|\\s+|[:punct:]|[.]|[,]", replacement = ""))
if (numberOfArt < size) {
size <- numberOfArt
cat(" Only a total of", numberOfArt, "articles were found\n\n")
flush.console()
} else {
cat(" A total of", numberOfArt, "articles were found\n\n")
flush.console()
}
cat("Looking for the main information of the articles:\n")
flush.console()
artNodes <- rvest::html_nodes(currentSession, ".artTitle")
artTitle <- rvest::html_text(artNodes)
artUrl <- GetHREF(artNodes)
if (size <= 25) {
cat(" A total of", size, "articles were retrieved\n\n")
flush.console()
} else {
cat(" ", length(artUrl), "articles retrieved\r")
flush.console()
repeat {
artListForm <- rvest::html_form(currentSession)[[4]]
nextList <- rvest::submit_form(currentSession, artListForm, submit = "bottomNext")$url
currentSession <- rvest::jump_to(currentSession, nextList)
artNodes <- rvest::html_nodes(currentSession, ".artTitle")
artTitle <- c(artTitle, rvest::html_text(artNodes))
artUrl <- c(artUrl, GetHREF(artNodes))
cat(" ", length(artUrl), "articles retrieved\r")
flush.console()
if (length(artUrl) >= size) {
cat(" A total of", size, "articles were retrieved\n\n")
flush.console()
break
}
}
}
}
artData <- data.frame("Title" = artTitle, "URL" = artUrl, stringsAsFactors = FALSE)[1:size, ]
tryCatch({
if (addInfo) {
artInfo <- NULL
repeat{
if (is.null(nrow(artInfo))) {
currentSession <- rvest::html_session(artData$URL[1])
artPage <- xml2::read_html(artData$URL[1])
} else {
artPage <- xml2::read_html(artData$URL[nrow(artInfo)+1])
}
abstract <- gsub(x = TextFromHtml(artPage, "class", c("abstract")), pattern = "Abstract|Highlights|Graphical abstract", replacement = "")
keyword <- gsub(x = TextFromHtml(artPage, "class", c("keyword", "svKeywords")), pattern = "Keywords", replacement = "")
journalTitle <- gsub(x = TextFromHtml(artPage, "class", c("title", "journal-title")), pattern = "[\r\n]|\\s+", replacement = " ")
journalVol <- TextFromHtml(artPage, "class", c("volIssue", "journal-volume"))
authorName <- gsub(x = TextFromHtml(artPage, "class", c("author-name-link", "authorName"), sep = ", "), pattern = "[\r\n]|\\s+", replacement = " ")
#doi <- TextFromHtml(artPage, "id", c("ddDoi"))
#doiNodes <- html_node(artPage, xpath = '//*[(@id = "S_C_ddDoi")] | //*[(@id = "ddDoi")]')
#doi <- GetHREF(doiNodes)
artInfo <- rbind(artInfo, c(First(abstract), First(keyword), First(authorName), First(journalTitle), First(journalVol)))
cat("The entire information of", nrow(artInfo), "articles has been obtained\r")
flush.console()
if (nrow(artInfo) == size) {
cat("The entire information of", size, "articles has been obtained\n")
flush.console()
break
}
}
artData <- data.frame(cbind(artData$Title, artInfo, artData$URL), stringsAsFactors = FALSE)
names(artData) <- c("Title", "Abstract", "Keywords", "Authors", "Journal", "Volume", "URL")
} else {
cat("The main information of", size, "articles has been obtained\n")
flush.console()
}
}, error = function(e) print("Couldn't finish to get the entire information"))
rownames(artData) <- paste0("Art", rep(1:nrow(artData)))
return(artData)
}
|
/R/ScienceDirectArticles.R
|
no_license
|
andresfpc/KDViz
|
R
| false | false | 5,190 |
r
|
#' Article search from ScienceDirect
#'
#' Search articles from the ScienceDirect database by specifying a list of key terms
#'
#' @param keywords a vector containing the key terms to search
#' @param size the number of articles from which the information is extracted
#' @param addInfo a logical value indicating whether the info of abstract, keyword, journalTitle, journalVol and authorName should be retrieved by each article
#' @param infoList a data frame of titles and URLs to skip the first step of getting the main information of each article
#'
#' @return a data frame containing the information requested in the function call
#'
#' @examples
#'
#'
#' @export
#'
#' @import rvest
#' @import xml2
ScienceDirectArticles <-
function(keywords, size, addInfo = FALSE, infoList) {
if (missing(keywords)){
if (missing(infoList)) {
stop("Neither keywords nor a list of articles names and links is provided, at least one must be specified")
} else {
cat("Getting info from the provided list...\n\n")
flush.console()
artTitle = infoList$Title
artUrl = infoList$URL
}
} else {
if (size > 999) {
stop("Only a maximum of 999 articles can be obtained from the ScienceDirect webpage")
}
keywords <- paste0("{", keywords, "}", collapse = "OR")
url <- "http://www.sciencedirect.com/"
originalSession <- rvest::html_session(url)
cat("Performing keyword search...\n")
flush.console()
searchForm <- rvest::html_form(xml2::read_html(url))[[3]]
searchValues <- rvest::set_values(searchForm, qs_all = keywords)
originalQuery <- rvest::submit_form(session = originalSession, form = searchValues, submit = "sdSearch")$url
currentSession <- xml2::read_html(originalQuery)
resultsFound <- rvest::html_text(rvest::html_node(currentSession, ".queryText"))
numberOfArt <- as.numeric(gsub(x = resultsFound, pattern = "[A-z]|\\s+|[:punct:]|[.]|[,]", replacement = ""))
if (numberOfArt < size) {
size <- numberOfArt
cat(" Only a total of", numberOfArt, "articles were found\n\n")
flush.console()
} else {
cat(" A total of", numberOfArt, "articles were found\n\n")
flush.console()
}
cat("Looking for the main information of the articles:\n")
flush.console()
artNodes <- rvest::html_nodes(currentSession, ".artTitle")
artTitle <- rvest::html_text(artNodes)
artUrl <- GetHREF(artNodes)
if (size <= 25) {
cat(" A total of", size, "articles were retrieved\n\n")
flush.console()
} else {
cat(" ", length(artUrl), "articles retrieved\r")
flush.console()
repeat {
artListForm <- rvest::html_form(currentSession)[[4]]
nextList <- rvest::submit_form(currentSession, artListForm, submit = "bottomNext")$url
currentSession <- rvest::jump_to(currentSession, nextList)
artNodes <- rvest::html_nodes(currentSession, ".artTitle")
artTitle <- c(artTitle, rvest::html_text(artNodes))
artUrl <- c(artUrl, GetHREF(artNodes))
cat(" ", length(artUrl), "articles retrieved\r")
flush.console()
if (length(artUrl) >= size) {
cat(" A total of", size, "articles were retrieved\n\n")
flush.console()
break
}
}
}
}
artData <- data.frame("Title" = artTitle, "URL" = artUrl, stringsAsFactors = FALSE)[1:size, ]
tryCatch({
if (addInfo) {
artInfo <- NULL
repeat{
if (is.null(nrow(artInfo))) {
currentSession <- rvest::html_session(artData$URL[1])
artPage <- xml2::read_html(artData$URL[1])
} else {
artPage <- xml2::read_html(artData$URL[nrow(artInfo)+1])
}
abstract <- gsub(x = TextFromHtml(artPage, "class", c("abstract")), pattern = "Abstract|Highlights|Graphical abstract", replacement = "")
keyword <- gsub(x = TextFromHtml(artPage, "class", c("keyword", "svKeywords")), pattern = "Keywords", replacement = "")
journalTitle <- gsub(x = TextFromHtml(artPage, "class", c("title", "journal-title")), pattern = "[\r\n]|\\s+", replacement = " ")
journalVol <- TextFromHtml(artPage, "class", c("volIssue", "journal-volume"))
authorName <- gsub(x = TextFromHtml(artPage, "class", c("author-name-link", "authorName"), sep = ", "), pattern = "[\r\n]|\\s+", replacement = " ")
#doi <- TextFromHtml(artPage, "id", c("ddDoi"))
#doiNodes <- html_node(artPage, xpath = '//*[(@id = "S_C_ddDoi")] | //*[(@id = "ddDoi")]')
#doi <- GetHREF(doiNodes)
artInfo <- rbind(artInfo, c(First(abstract), First(keyword), First(authorName), First(journalTitle), First(journalVol)))
cat("The entire information of", nrow(artInfo), "articles has been obtained\r")
flush.console()
if (nrow(artInfo) == size) {
cat("The entire information of", size, "articles has been obtained\n")
flush.console()
break
}
}
artData <- data.frame(cbind(artData$Title, artInfo, artData$URL), stringsAsFactors = FALSE)
names(artData) <- c("Title", "Abstract", "Keywords", "Authors", "Journal", "Volume", "URL")
} else {
cat("The main information of", size, "articles has been obtained\n")
flush.console()
}
}, error = function(e) print("Couldn't finish to get the entire information"))
rownames(artData) <- paste0("Art", rep(1:nrow(artData)))
return(artData)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tessellate.R
\name{tessellate}
\alias{tessellate}
\title{tessellate}
\usage{
tessellate(
x,
x_min = NA,
x_max = NA,
y_min = NA,
y_max = NA,
z_min = NA,
z_max = NA,
output_definition = "\%i*\%P*\%t",
options = "-v",
voro_path = "voro++"
)
}
\arguments{
\item{x}{data.table/data.frame with the input points described by four variables (named columns):
\itemize{
\item id: id number that is passed to the output polygon (integer)
\item x: x-axis coordinate (numeric)
\item y: y-axis coordinate (numeric)
\item z: z-axis coordinate (numeric)
}}
\item{x_min}{minimum x-axis coordinate of the tessellation box. Default: min(x)}
\item{x_max}{maximum x-axis coordinate of the tessellation box. Default: max(x)}
\item{y_min}{minimum y-axis coordinate of the tessellation box. Default: min(y)}
\item{y_max}{maximum y-axis coordinate of the tessellation box. Default: max(y)}
\item{z_min}{minimum z-axis coordinate of the tessellation box. Default: min(z)}
\item{z_max}{maximum z-axis coordinate of the tessellation box. Default: max(z)}
\item{output_definition}{string that describes how the output file of voro++ should be structured.
This is passed to the -c option of the command line interface. All possible customization options
are documented \href{http://math.lbl.gov/voro++/doc/custom.html}{here}. Default: "\%i*\%P*\%t"}
\item{options}{string with additional options passed to voro++. All options are documented
\href{http://math.lbl.gov/voro++/doc/cmd.html}{here}. Default: "-v"}
\item{voro_path}{system path to the voro++ executable. Default: "voro++"}
}
\value{
raw, linewise output of voro++ in a character vector
}
\description{
Command line utility wrapper for the \href{http://math.lbl.gov/voro++}{voro++} software library.
voro++ must be installed on your system to use this function.
}
\examples{
random_unique_points <- unique(data.table::data.table(
id = NA,
x = runif(10),
y = runif(10),
z = runif(10)
))
random_unique_points$id <- 1:nrow(random_unique_points)
voro_output <- tessellate(random_unique_points)
polygon_points <- read_polygon_edges(voro_output)
cut_surfaces <- cut_polygons(polygon_points, c(0.2, 0.4, 0.6))
cut_surfaces_sf <- cut_polygons_to_sf(cut_surfaces, crs = 25832)
}
|
/man/tessellate.Rd
|
permissive
|
chaoshengt/bleiglas
|
R
| false | true | 2,327 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tessellate.R
\name{tessellate}
\alias{tessellate}
\title{tessellate}
\usage{
tessellate(
x,
x_min = NA,
x_max = NA,
y_min = NA,
y_max = NA,
z_min = NA,
z_max = NA,
output_definition = "\%i*\%P*\%t",
options = "-v",
voro_path = "voro++"
)
}
\arguments{
\item{x}{data.table/data.frame with the input points described by four variables (named columns):
\itemize{
\item id: id number that is passed to the output polygon (integer)
\item x: x-axis coordinate (numeric)
\item y: y-axis coordinate (numeric)
\item z: z-axis coordinate (numeric)
}}
\item{x_min}{minimum x-axis coordinate of the tessellation box. Default: min(x)}
\item{x_max}{maximum x-axis coordinate of the tessellation box. Default: max(x)}
\item{y_min}{minimum y-axis coordinate of the tessellation box. Default: min(y)}
\item{y_max}{maximum y-axis coordinate of the tessellation box. Default: max(y)}
\item{z_min}{minimum z-axis coordinate of the tessellation box. Default: min(z)}
\item{z_max}{maximum z-axis coordinate of the tessellation box. Default: max(z)}
\item{output_definition}{string that describes how the output file of voro++ should be structured.
This is passed to the -c option of the command line interface. All possible customization options
are documented \href{http://math.lbl.gov/voro++/doc/custom.html}{here}. Default: "\%i*\%P*\%t"}
\item{options}{string with additional options passed to voro++. All options are documented
\href{http://math.lbl.gov/voro++/doc/cmd.html}{here}. Default: "-v"}
\item{voro_path}{system path to the voro++ executable. Default: "voro++"}
}
\value{
raw, linewise output of voro++ in a character vector
}
\description{
Command line utility wrapper for the \href{http://math.lbl.gov/voro++}{voro++} software library.
voro++ must be installed on your system to use this function.
}
\examples{
random_unique_points <- unique(data.table::data.table(
id = NA,
x = runif(10),
y = runif(10),
z = runif(10)
))
random_unique_points$id <- 1:nrow(random_unique_points)
voro_output <- tessellate(random_unique_points)
polygon_points <- read_polygon_edges(voro_output)
cut_surfaces <- cut_polygons(polygon_points, c(0.2, 0.4, 0.6))
cut_surfaces_sf <- cut_polygons_to_sf(cut_surfaces, crs = 25832)
}
|
\name{mbbefd-package}
\alias{mbbefd-package}
\alias{mbbefd}
\docType{package}
\title{
\packageTitle{mbbefd}
}
\description{
The idea of this package emerged in 2013 from G Spedicato who
at this time worked in the area of quantitative risk assessment.
In 2015, M Gesmann and C Dutang joined the project.
This project is hosted at \href{https://github.com/spedygiorgio/mbbefd}{github}.
This package contains the core functions of the two parametrizations
of the MBBEFD distribution
(distribution function, density, quantile functions, random generation,
aka d, p, q, r)
as well as MBBEFD exposure curve (ec) and raw moments (m).
This package also provides other distributions used for destruction rate
modelling, that is the beta, the shifted truncated Pareto
and the generalized beta distributions.
Due to the presence of total loss, a one-inflated version of the
previous distributions is also provided.
The vignette shows code snippets to fit the distribution to empirical data: \href{../doc/introduction_to_mbbefd.pdf}{Exposure rating, destruction rate models and the mbbefd package}.
}
\details{
\tabular{ll}{
Package: \tab mbbefd\cr
Type: \tab Package\cr
Version: \tab 0.8.9\cr
License: \tab GPL-2\cr
}
}
\author{
\packageAuthor{mbbefd}
Maintainer: \packageMaintainer{mbbefd}
}
\references{
Bernegger, Stefan. The Swiss Re Exposure Curves And The MBBEFD Distribution Class. ASTIN Bulletin (1997) 27:1, p99.
}
\keyword{ package }
\seealso{
See \code{\link{mbbefd-distr}} for the MBBEFD distribution;\cr
see \code{\link{swissRe}}, \code{\link{exposureCurve}} for exposure curves;\cr
see \code{\link{gbeta}}, \code{\link{stpareto}} for finite-support distributions;\cr
see \code{\link{oidistribution}}, \code{\link{oibeta}}, \code{\link{oigbeta}}, \code{\link{oiunif}}, \code{\link{oistpareto}} for one-inflated distributions.
}
|
/man/mbbefd-package.Rd
|
no_license
|
dangdiep/mbbefd
|
R
| false | false | 1,843 |
rd
|
\name{mbbefd-package}
\alias{mbbefd-package}
\alias{mbbefd}
\docType{package}
\title{
\packageTitle{mbbefd}
}
\description{
The idea of this package emerged in 2013 from G Spedicato who
at this time worked in the area of quantitative risk assessment.
In 2015, M Gesmann and C Dutang joined the project.
This project is hosted at \href{https://github.com/spedygiorgio/mbbefd}{github}.
This package contains the core functions of the two parametrizations
of the MBBEFD distribution
(distribution function, density, quantile functions, random generation,
aka d, p, q, r)
as well as MBBEFD exposure curve (ec) and raw moments (m).
This package also provides other distributions used for destruction rate
modelling, that is the beta, the shifted truncated Pareto
and the generalized beta distributions.
Due to the presence of total loss, a one-inflated version of the
previous distributions is also provided.
The vignette shows code snippets to fit the distribution to empirical data: \href{../doc/introduction_to_mbbefd.pdf}{Exposure rating, destruction rate models and the mbbefd package}.
}
\details{
\tabular{ll}{
Package: \tab mbbefd\cr
Type: \tab Package\cr
Version: \tab 0.8.9\cr
License: \tab GPL-2\cr
}
}
\author{
\packageAuthor{mbbefd}
Maintainer: \packageMaintainer{mbbefd}
}
\references{
Bernegger, Stefan. The Swiss Re Exposure Curves And The MBBEFD Distribution Class. ASTIN Bulletin (1997) 27:1, p99.
}
\keyword{ package }
\seealso{
See \code{\link{mbbefd-distr}} for the MBBEFD distribution;\cr
see \code{\link{swissRe}}, \code{\link{exposureCurve}} for exposure curves;\cr
see \code{\link{gbeta}}, \code{\link{stpareto}} for finite-support distributions;\cr
see \code{\link{oidistribution}}, \code{\link{oibeta}}, \code{\link{oigbeta}}, \code{\link{oiunif}}, \code{\link{oistpareto}} for one-inflated distributions.
}
|
bbfb675609f005b6024e219c113363aa query36_query11_1344n.qdimacs 867 1901
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query36_query11_1344n/query36_query11_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 71 |
r
|
bbfb675609f005b6024e219c113363aa query36_query11_1344n.qdimacs 867 1901
|
#' @title Cross-plotting input variable vs. target variable
#' @description The cross_plot shows how the input variable is correlated with the target variable, getting the likelihood rates for each input's bin/bucket .
#' @param data data frame source
#' @param input input variable name (if empty, it runs for all numeric variable), it can take a single character value or a character vector.
#' @param target variable name to predict
#' @param str_input THIS PARAMETER WILL BE DEPRECATED. Please use 'input' insted. Only name changes, not functionality.string input variable (if empty, it runs for all numeric variable), it can take a single character value or a character vector.
#' @param str_target THIS PARAMETER WILL BE DEPRECATED. Please use 'target' insted. Only name changes, not functionality.
#' @param path_out path directory, if it has a value the plot is saved
#' @param auto_binning indicates the automatic binning of input variable based on equal frequency (function 'equal_freq'), default value=TRUE
#' @param plot_type indicates if the output is the 'percentual' plot, the 'quantity' or 'both' (default).
#' @examples
#' \dontrun{
#' ## Example 1:
#' cross_plot(data=heart_disease, input="chest_pain", target="has_heart_disease")
#'
#' ## Example 2: Disabling auto_binning:
#' cross_plot(data=heart_disease, input="oldpeak",
#' target="has_heart_disease", auto_binning=FALSE)
#'
#' ## Example 3: Saving the plot into a folder:
#' cross_plot(data=heart_disease, input="oldpeak",
#' target="has_heart_disease", path_out = "my_folder")
#'
#' ## Example 4: Running with multiple input variables at the same time:
#' cross_plot(data=heart_disease, input=c("age", "oldpeak", "max_heart_rate"),
#' target="has_heart_disease")
#'}
#' @return cross plot
#' @export
cross_plot <- function(data, input, target, str_input, str_target, path_out, auto_binning, plot_type='both')
{
if(!missing(str_input))
{
input=str_input
.Deprecated(msg="Parameter 'str_input' will be deprecated, please use 'input' insted (only name changed, not its functionality)")
}
if(!missing(str_target))
{
target=str_target
.Deprecated(msg = "Parameter 'str_target' will be deprecated, please use 'target' insted (only name changed, not its functionality)")
}
data=as.data.frame(data)
## Handling missing parameters
if(missing(auto_binning)) auto_binning=NA
if(missing(path_out)) path_out=NA
## If input then runs for all variables
if(missing(input))
{
## Excluding target variable
input=colnames(data)
input=input[input!=target]
}
## Iterator
for(i in 1:length(input))
{
cross_plot_logic(data = data, input=input[i], target=target, path_out = path_out, auto_binning, plot_type)
}
}
cross_plot_logic <- function(data, input, target, path_out, auto_binning, plot_type)
{
# data=heart_disease; input="max_heart_rate"; target="has_heart_disease"; auto_binning=T
check_target_existence(data, target=target)
data=remove_na_target(data, target=target)
check_target_2_values(data, target=target)
if(!(plot_type %in% c('both','percentual', 'quantity')))
stop("Value for 'plot_type' is not valid: available values: 'both', 'percentual' or 'quantity'")
## Initial assignments
varTarget=data[[as.character(target)]]
varInput=data[[as.character(input)]]
q_unique_input_values=length(unique(varInput))
## Auto binning #############################
if(is.numeric(varInput))
{
if(!is.na(auto_binning) & auto_binning )
{
print(sprintf("Plotting transformed variable '%s' with 'equal_freq', (too many values). Disable with 'auto_binning=FALSE'", input))
varInput=suppressWarnings(equal_freq(varInput, 10))
}
if(is.na(auto_binning) & q_unique_input_values>20)
{
print(sprintf("Plotting transformed variable '%s' with 'equal_freq', (too many values). Disable with 'auto_binning=FALSE'", input))
varInput=suppressWarnings(equal_freq(varInput, 10))
}
} else {
if(q_unique_input_values>50)
stop(sprintf('Skipping "%s" variable: more than 50 unique values.', input))
}
#############################################
## Infer the less representative class (commonly the one to predict)
df_target=data.frame(target=varTarget)
dcount=group_by(df_target, target) %>% summarise(freq=n()) %>% arrange(freq)
## Converting factors to character
dcount=data.frame(lapply(dcount, as.character), stringsAsFactors=FALSE)
posClass=dcount[1,1]
negClass=dcount[2,1]
dataCast = dcast(data,varInput~varTarget,fun.aggregate=length, value.var = target)
## Melt data for ggplot
dataMelt=melt(dataCast, measure.vars = c(posClass,negClass))
## Converting target into factor
dataMelt$variable=factor(dataMelt$variable, c(posClass,negClass))
## Getting percentage numbers
m1=group_by(dataMelt, varInput) %>% mutate(ratio = value/sum(value)) %>% select(varInput, ratio) %>% arrange(varInput)
## Order by var input
m2 = dataMelt[order(dataMelt$varInput ),]
dataGrafPrep=data.frame(m2,ratio=m1$ratio)
## Generating Id indicator for odd/even calculus
rownames(dataGrafPrep) = 1:nrow(dataGrafPrep)
# Computing if the row is odd/even
dataGrafPrep$fum=as.numeric(as.numeric(rownames(dataGrafPrep)) %% 2 == 0)
## Computing middle position in each sub bar
dataGrafPrep=group_by(dataGrafPrep, varInput) %>% mutate(position = 0.5*ratio+fum*(sum(value)-value)/sum(value))
lGraf=list()
## set factor to print correct bars
dataGrafPrep$variable=factor(dataGrafPrep$variable, levels = c(negClass,posClass), ordered = FALSE)
## Percentual
lGraf$percentual = ggplot(dataGrafPrep, aes(x=factor(varInput), y=value, fill=variable))+
geom_bar(position="fill",stat="identity") +
geom_text(aes(label = sprintf("%0.1f", 100*ratio), y = position)) +
guides(fill=FALSE) +
labs(x = input, y = paste(target, " (%)", sep=" ")) +
theme_bw() +
theme(axis.text.x=element_text(angle = 45, hjust = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
plot.background = element_blank(),
axis.title.x=element_text(margin=margin(15,0,0,0)),
axis.title.y=element_text(margin=margin(0,15,0,0))
) +
scale_y_continuous(labels=percent) +
scale_fill_manual(values=c("#00BFC4","#F8766D"))
## Quantity plot
lGraf$quantity = ggplot(dataGrafPrep, aes(x=factor(varInput), y=value, ymax=max(value)*1.05, fill=variable)) +
geom_bar(position=position_dodge(),stat="identity") +
geom_text(aes(label=value), position=position_dodge(width=0.9), vjust=-0.25, size=4) +
labs(x = input, y = paste(target, " (count)", sep=" ")) +
ylim(0, max(dataGrafPrep$value)+max(dataGrafPrep$value)*0.05) +
theme_bw() +
theme(plot.background = element_blank(),
panel.border = element_blank(),
axis.text.x=element_text(angle = 45, hjust = 1),
legend.title=element_blank(),
axis.title.x=element_text(margin=margin(15,0,0,0)),
axis.title.y=element_text(margin=margin(0,15,0,0))) +
guides(col = guide_legend(ncol = 1, byrow = TRUE)) +
scale_fill_manual(values=c("#00BFC4","#F8766D"))
if(plot_type=='both')
{
## Printing both plots
final_plot=grid.arrange(lGraf$percentual, lGraf$quantity, ncol=2)
}
if(plot_type=='percentual')
{
final_plot=lGraf$percentual
plot(final_plot)
}
if(plot_type=='quantity')
{
final_plot=lGraf$quantity
plot(final_plot)
}
## Save plot
if(!is.na(path_out))
{
dir.create(path_out, showWarnings = F)
if(dir.exists(path_out))
{
jpeg(sprintf("%s/%s.jpeg", path_out, input), width= 12.25, height= 6.25, units="in",res=200, quality = 90)
plot(final_plot)
dev.off()
} else {
warning(sprintf("The directory '%s' doesn't exists.", path_out))
}
}
}
#' Equal frequency binning
#' @description Equal frequency tries to put the same quantity of cases per bin when possible. It's a wrapper of function cut2 from Hmisc package.
#' @param var input variable
#' @param n_bins number of bins to split 'var' by equal frequency, if it not possible to calculate for the desired bins, it returns the closest number
#' @examples
#' ## Example 1
#' summary(heart_disease$age)
#' age_2=equal_freq(var=heart_disease$age, n_bins = 10)
#' summary(age_2)
#'
#' ## Example 2
#' age_3=equal_freq(var=heart_disease$age, n_bins = 5)
#' summary(age_3)
#' @return The binned variable.
#' @export
equal_freq <- function(var, n_bins)
{
n_bins_orig = n_bins
res = cut2(var, g = n_bins)
uq=unique(res)
n_bins_final = length(uq)
if (n_bins_final != n_bins & sum(is.na(uq))==0)
warning(sprintf("It's not possible to calculate with n_bins=%s, setting n_bins in: %s.",
n_bins, n_bins_final))
return(res)
}
|
/R/cross_plot.R
|
permissive
|
DataVizi/funModeling
|
R
| false | false | 8,885 |
r
|
#' @title Cross-plotting input variable vs. target variable
#' @description The cross_plot shows how the input variable is correlated with the target variable, getting the likelihood rates for each input's bin/bucket .
#' @param data data frame source
#' @param input input variable name (if empty, it runs for all numeric variable), it can take a single character value or a character vector.
#' @param target variable name to predict
#' @param str_input THIS PARAMETER WILL BE DEPRECATED. Please use 'input' insted. Only name changes, not functionality.string input variable (if empty, it runs for all numeric variable), it can take a single character value or a character vector.
#' @param str_target THIS PARAMETER WILL BE DEPRECATED. Please use 'target' insted. Only name changes, not functionality.
#' @param path_out path directory, if it has a value the plot is saved
#' @param auto_binning indicates the automatic binning of input variable based on equal frequency (function 'equal_freq'), default value=TRUE
#' @param plot_type indicates if the output is the 'percentual' plot, the 'quantity' or 'both' (default).
#' @examples
#' \dontrun{
#' ## Example 1:
#' cross_plot(data=heart_disease, input="chest_pain", target="has_heart_disease")
#'
#' ## Example 2: Disabling auto_binning:
#' cross_plot(data=heart_disease, input="oldpeak",
#' target="has_heart_disease", auto_binning=FALSE)
#'
#' ## Example 3: Saving the plot into a folder:
#' cross_plot(data=heart_disease, input="oldpeak",
#' target="has_heart_disease", path_out = "my_folder")
#'
#' ## Example 4: Running with multiple input variables at the same time:
#' cross_plot(data=heart_disease, input=c("age", "oldpeak", "max_heart_rate"),
#' target="has_heart_disease")
#'}
#' @return cross plot
#' @export
cross_plot <- function(data, input, target, str_input, str_target, path_out, auto_binning, plot_type='both')
{
if(!missing(str_input))
{
input=str_input
.Deprecated(msg="Parameter 'str_input' will be deprecated, please use 'input' insted (only name changed, not its functionality)")
}
if(!missing(str_target))
{
target=str_target
.Deprecated(msg = "Parameter 'str_target' will be deprecated, please use 'target' insted (only name changed, not its functionality)")
}
data=as.data.frame(data)
## Handling missing parameters
if(missing(auto_binning)) auto_binning=NA
if(missing(path_out)) path_out=NA
## If input then runs for all variables
if(missing(input))
{
## Excluding target variable
input=colnames(data)
input=input[input!=target]
}
## Iterator
for(i in 1:length(input))
{
cross_plot_logic(data = data, input=input[i], target=target, path_out = path_out, auto_binning, plot_type)
}
}
cross_plot_logic <- function(data, input, target, path_out, auto_binning, plot_type)
{
# data=heart_disease; input="max_heart_rate"; target="has_heart_disease"; auto_binning=T
check_target_existence(data, target=target)
data=remove_na_target(data, target=target)
check_target_2_values(data, target=target)
if(!(plot_type %in% c('both','percentual', 'quantity')))
stop("Value for 'plot_type' is not valid: available values: 'both', 'percentual' or 'quantity'")
## Initial assignments
varTarget=data[[as.character(target)]]
varInput=data[[as.character(input)]]
q_unique_input_values=length(unique(varInput))
## Auto binning #############################
if(is.numeric(varInput))
{
if(!is.na(auto_binning) & auto_binning )
{
print(sprintf("Plotting transformed variable '%s' with 'equal_freq', (too many values). Disable with 'auto_binning=FALSE'", input))
varInput=suppressWarnings(equal_freq(varInput, 10))
}
if(is.na(auto_binning) & q_unique_input_values>20)
{
print(sprintf("Plotting transformed variable '%s' with 'equal_freq', (too many values). Disable with 'auto_binning=FALSE'", input))
varInput=suppressWarnings(equal_freq(varInput, 10))
}
} else {
if(q_unique_input_values>50)
stop(sprintf('Skipping "%s" variable: more than 50 unique values.', input))
}
#############################################
## Infer the less representative class (commonly the one to predict)
df_target=data.frame(target=varTarget)
dcount=group_by(df_target, target) %>% summarise(freq=n()) %>% arrange(freq)
## Converting factors to character
dcount=data.frame(lapply(dcount, as.character), stringsAsFactors=FALSE)
posClass=dcount[1,1]
negClass=dcount[2,1]
dataCast = dcast(data,varInput~varTarget,fun.aggregate=length, value.var = target)
## Melt data for ggplot
dataMelt=melt(dataCast, measure.vars = c(posClass,negClass))
## Converting target into factor
dataMelt$variable=factor(dataMelt$variable, c(posClass,negClass))
## Getting percentage numbers
m1=group_by(dataMelt, varInput) %>% mutate(ratio = value/sum(value)) %>% select(varInput, ratio) %>% arrange(varInput)
## Order by var input
m2 = dataMelt[order(dataMelt$varInput ),]
dataGrafPrep=data.frame(m2,ratio=m1$ratio)
## Generating Id indicator for odd/even calculus
rownames(dataGrafPrep) = 1:nrow(dataGrafPrep)
# Computing if the row is odd/even
dataGrafPrep$fum=as.numeric(as.numeric(rownames(dataGrafPrep)) %% 2 == 0)
## Computing middle position in each sub bar
dataGrafPrep=group_by(dataGrafPrep, varInput) %>% mutate(position = 0.5*ratio+fum*(sum(value)-value)/sum(value))
lGraf=list()
## set factor to print correct bars
dataGrafPrep$variable=factor(dataGrafPrep$variable, levels = c(negClass,posClass), ordered = FALSE)
## Percentual
lGraf$percentual = ggplot(dataGrafPrep, aes(x=factor(varInput), y=value, fill=variable))+
geom_bar(position="fill",stat="identity") +
geom_text(aes(label = sprintf("%0.1f", 100*ratio), y = position)) +
guides(fill=FALSE) +
labs(x = input, y = paste(target, " (%)", sep=" ")) +
theme_bw() +
theme(axis.text.x=element_text(angle = 45, hjust = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
plot.background = element_blank(),
axis.title.x=element_text(margin=margin(15,0,0,0)),
axis.title.y=element_text(margin=margin(0,15,0,0))
) +
scale_y_continuous(labels=percent) +
scale_fill_manual(values=c("#00BFC4","#F8766D"))
## Quantity plot
lGraf$quantity = ggplot(dataGrafPrep, aes(x=factor(varInput), y=value, ymax=max(value)*1.05, fill=variable)) +
geom_bar(position=position_dodge(),stat="identity") +
geom_text(aes(label=value), position=position_dodge(width=0.9), vjust=-0.25, size=4) +
labs(x = input, y = paste(target, " (count)", sep=" ")) +
ylim(0, max(dataGrafPrep$value)+max(dataGrafPrep$value)*0.05) +
theme_bw() +
theme(plot.background = element_blank(),
panel.border = element_blank(),
axis.text.x=element_text(angle = 45, hjust = 1),
legend.title=element_blank(),
axis.title.x=element_text(margin=margin(15,0,0,0)),
axis.title.y=element_text(margin=margin(0,15,0,0))) +
guides(col = guide_legend(ncol = 1, byrow = TRUE)) +
scale_fill_manual(values=c("#00BFC4","#F8766D"))
if(plot_type=='both')
{
## Printing both plots
final_plot=grid.arrange(lGraf$percentual, lGraf$quantity, ncol=2)
}
if(plot_type=='percentual')
{
final_plot=lGraf$percentual
plot(final_plot)
}
if(plot_type=='quantity')
{
final_plot=lGraf$quantity
plot(final_plot)
}
## Save plot
if(!is.na(path_out))
{
dir.create(path_out, showWarnings = F)
if(dir.exists(path_out))
{
jpeg(sprintf("%s/%s.jpeg", path_out, input), width= 12.25, height= 6.25, units="in",res=200, quality = 90)
plot(final_plot)
dev.off()
} else {
warning(sprintf("The directory '%s' doesn't exists.", path_out))
}
}
}
#' Equal frequency binning
#' @description Equal frequency tries to put the same quantity of cases per bin when possible. It's a wrapper of function cut2 from Hmisc package.
#' @param var input variable
#' @param n_bins number of bins to split 'var' by equal frequency, if it not possible to calculate for the desired bins, it returns the closest number
#' @examples
#' ## Example 1
#' summary(heart_disease$age)
#' age_2=equal_freq(var=heart_disease$age, n_bins = 10)
#' summary(age_2)
#'
#' ## Example 2
#' age_3=equal_freq(var=heart_disease$age, n_bins = 5)
#' summary(age_3)
#' @return The binned variable.
#' @export
equal_freq <- function(var, n_bins)
{
n_bins_orig = n_bins
res = cut2(var, g = n_bins)
uq=unique(res)
n_bins_final = length(uq)
if (n_bins_final != n_bins & sum(is.na(uq))==0)
warning(sprintf("It's not possible to calculate with n_bins=%s, setting n_bins in: %s.",
n_bins, n_bins_final))
return(res)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{SCESet}
\alias{SCESet}
\alias{SCESet-class}
\title{The "Single Cell Expression Set" (SCESet) class}
\description{
S4 class and the main class used by scater to hold single cell expression
data. SCESet extends the basic Bioconductor ExpressionSet class.
}
\details{
This class is initialized from a matrix of expression values.
Methods that operate on SCESet objects constitute the basic scater workflow.
Thanks to the Monocle package (github.com/cole-trapnell-lab/monocle-release/)
for their CellDataSet class, which provided the inspiration and template for
SCESet.
}
\section{Slots}{
\describe{
\item{\code{logged}:}{Scalar of class \code{"logical"}, indicating whether
or not the expression data in the `exprs` slot have been log2-transformed
or not.}
\item{\code{logExprsOffset}:}{Scalar of class \code{"numeric"}, providing an offset
applied to expression data in the `exprs` slot when undergoing log2-transformation
to avoid trying to take logs of zero.}
\item{\code{lowerDetectionLimit}:}{Scalar of class \code{"numeric"},
giving the lower limit for an expression value to be classified as
"expressed".}
\item{\code{cellPairwiseDistances}:}{Matrix of class \code{"numeric"},
containing pairwise distances between cells.}
\item{\code{featurePairwiseDistances}:}{Matrix of class \code{"numeric"},
containing pairwise distances between features.}
\item{\code{reducedDimension}:}{Matrix of class \code{"numeric"}, containing
reduced-dimension coordinates for cells (generated, for example, by PCA).}
\item{\code{bootstraps}:}{Array of class \code{"numeric"} that can contain
bootstrap estimates of the expression or count values.}
}
}
|
/man/SCESet.Rd
|
no_license
|
SSICreative83/scater
|
R
| false | false | 1,816 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/AllClasses.R
\docType{class}
\name{SCESet}
\alias{SCESet}
\alias{SCESet-class}
\title{The "Single Cell Expression Set" (SCESet) class}
\description{
S4 class and the main class used by scater to hold single cell expression
data. SCESet extends the basic Bioconductor ExpressionSet class.
}
\details{
This class is initialized from a matrix of expression values.
Methods that operate on SCESet objects constitute the basic scater workflow.
Thanks to the Monocle package (github.com/cole-trapnell-lab/monocle-release/)
for their CellDataSet class, which provided the inspiration and template for
SCESet.
}
\section{Slots}{
\describe{
\item{\code{logged}:}{Scalar of class \code{"logical"}, indicating whether
or not the expression data in the `exprs` slot have been log2-transformed
or not.}
\item{\code{logExprsOffset}:}{Scalar of class \code{"numeric"}, providing an offset
applied to expression data in the `exprs` slot when undergoing log2-transformation
to avoid trying to take logs of zero.}
\item{\code{lowerDetectionLimit}:}{Scalar of class \code{"numeric"},
giving the lower limit for an expression value to be classified as
"expressed".}
\item{\code{cellPairwiseDistances}:}{Matrix of class \code{"numeric"},
containing pairwise distances between cells.}
\item{\code{featurePairwiseDistances}:}{Matrix of class \code{"numeric"},
containing pairwise distances between features.}
\item{\code{reducedDimension}:}{Matrix of class \code{"numeric"}, containing
reduced-dimension coordinates for cells (generated, for example, by PCA).}
\item{\code{bootstraps}:}{Array of class \code{"numeric"} that can contain
bootstrap estimates of the expression or count values.}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ladder_power.R
\name{ladder_power}
\alias{ladder_power}
\title{Find the nearest ladder-of-powers representation of a power transformation}
\usage{
ladder_power(p)
}
\arguments{
\item{p}{A numeric power, for use as a transformation of a response, y, of the form \eqn{y^p},
where \code{p=0} is interpreted to mean \eqn{log(y)}}
}
\value{
a named list of two elements: \code{power}, the ladder-of-power value, and
\code{name}, the name for the transformation
}
\description{
The input power value is rounded to the nearest integer or fractional powers, \eqn{\pm 1/3, 1/2}.
The function is presently designed just for display purposes.
}
\details{
In use, the transformation via the ladder of powers usually attaches a minus sign to
the transformation when the \code{power < 0}, so that the order of the response values
are preserved under the transformation. Thus, a result of \code{power = -0.5} is interpreted
to mean \eqn{-1 / \sqrt{y}}.
}
\examples{
ladder_power(0.6)
ladder_power(-0.6)
}
\references{
Tukey, J. W. (1977). \emph{Exploratory Data Analysis}, Reading MA: Addison-Wesley.
}
|
/man/ladder_power.Rd
|
no_license
|
cran/twoway
|
R
| false | true | 1,225 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ladder_power.R
\name{ladder_power}
\alias{ladder_power}
\title{Find the nearest ladder-of-powers representation of a power transformation}
\usage{
ladder_power(p)
}
\arguments{
\item{p}{A numeric power, for use as a transformation of a response, y, of the form \eqn{y^p},
where \code{p=0} is interpreted to mean \eqn{log(y)}}
}
\value{
a named list of two elements: \code{power}, the ladder-of-power value, and
\code{name}, the name for the transformation
}
\description{
The input power value is rounded to the nearest integer or fractional powers, \eqn{\pm 1/3, 1/2}.
The function is presently designed just for display purposes.
}
\details{
In use, the transformation via the ladder of powers usually attaches a minus sign to
the transformation when the \code{power < 0}, so that the order of the response values
are preserved under the transformation. Thus, a result of \code{power = -0.5} is interpreted
to mean \eqn{-1 / \sqrt{y}}.
}
\examples{
ladder_power(0.6)
ladder_power(-0.6)
}
\references{
Tukey, J. W. (1977). \emph{Exploratory Data Analysis}, Reading MA: Addison-Wesley.
}
|
# plot 3. Code to plot four types of PM25 sources (point, nonpoint, onroad,
# nonroad) from 1999–2008 for Baltimore City.
# Coursera Exploratory Data Analysis
# Project 2, due week 3
# set working directory
setwd("~/Documents/R/ExData2")
# load library that contains ddply & ggplot2
library(plyr)
library(ggplot2)
# read in data from .rds file
my_data <- readRDS("summarySCC_PM25.rds")
# set global parameter: ps = 12 smaller font for plot labels
# mar = plot margins, scipen = favor non-scientific notation
par(ps=12)
par(mar=c(5.1,4.1,2.1,2.1))
options(scipen = 7)
# aggregate my_data: sum the Emissions and keep associated year column
polluteBalt <- ddply(my_data[my_data$fips=="24510",],
c("year","type"),
summarise,
Emissions = sum(Emissions))
# change polluteBalt$type to factor in preparation for comparing emissions by type
polluteBalt$type <- as.factor(polluteBalt$type)
# open graphics file
png(file="plot3.png", width=480, height=480)
# Initial ggplot
qplot(year, Emissions, data=polluteBalt, color = type, geom="line") +
labs(x = "Year", y = expression(PM[2.5] ~ " Emissions (tons)")) +
labs(title = expression(PM[2.5] ~ " Emissions by Type, Baltimore")) +
labs(colour = "Type")
# close and save graphics file.
dev.off()
|
/plot3.R
|
no_license
|
jotaboca/Exploratory-Data-Analysis-2
|
R
| false | false | 1,334 |
r
|
# plot 3. Code to plot four types of PM25 sources (point, nonpoint, onroad,
# nonroad) from 1999–2008 for Baltimore City.
# Coursera Exploratory Data Analysis
# Project 2, due week 3
# set working directory
setwd("~/Documents/R/ExData2")
# load library that contains ddply & ggplot2
library(plyr)
library(ggplot2)
# read in data from .rds file
my_data <- readRDS("summarySCC_PM25.rds")
# set global parameter: ps = 12 smaller font for plot labels
# mar = plot margins, scipen = favor non-scientific notation
par(ps=12)
par(mar=c(5.1,4.1,2.1,2.1))
options(scipen = 7)
# aggregate my_data: sum the Emissions and keep associated year column
polluteBalt <- ddply(my_data[my_data$fips=="24510",],
c("year","type"),
summarise,
Emissions = sum(Emissions))
# change polluteBalt$type to factor in preparation for comparing emissions by type
polluteBalt$type <- as.factor(polluteBalt$type)
# open graphics file
png(file="plot3.png", width=480, height=480)
# Initial ggplot
qplot(year, Emissions, data=polluteBalt, color = type, geom="line") +
labs(x = "Year", y = expression(PM[2.5] ~ " Emissions (tons)")) +
labs(title = expression(PM[2.5] ~ " Emissions by Type, Baltimore")) +
labs(colour = "Type")
# close and save graphics file.
dev.off()
|
# INFO 201
# Final Group Project
# Date: December 9, 2020
library("dplyr")
library("tidyr")
# Load the `ggplot2` library for data manipulation
library("ggplot2")
library("plotly")
library("shiny")
# =====================================================================
# Read the U.S.Census "Historical Income Tables: Race" data
# =====================================================================
income_data <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable/master/Census_Summary.csv", stringsAsFactors = FALSE, header=TRUE)
# Function to remove ',' in CSV data for # in the thousands
replaceCommas<-function(x){
x<-as.numeric(gsub(",", "", x))
}
# =====================================================================
# Wrangle Data from US Census
# =====================================================================
# Clean Data, remove commas from columns with strings of numbers
# Note: in apply() use MARGIN=2 for columns
data_apply <- apply(income_data[ ,c(2:7)], 2, replaceCommas)
data_new <- income_data # Replicate original data
data_new[ , colnames(data_new) %in% colnames(data_apply)] <- data_apply # Replace specific columns
# Data for all four Race categories start at 2002; filter from 2002
data_new <- data_new %>%
filter(Year >= 2002)
# =====================================================================
# FUNCTIONS
# =====================================================================
# ========================
# Reactive Chart
# Function: Household income by Fifth (Line Chart)
# ggplot geom_ function
# Parameter chooses which fifth of the household income data to graph
plot_chart <- function(by_column) {
by_column <- ensym(by_column) # needed to read column name
df <- ggplot(data = data_new) +
geom_line(mapping = aes(x = Year, y = !!by_column, color = Race)) +
labs( # Add title and axis labels
title = paste("Household Income for Each Fifth"),
x = "Year", # x-axis label
y = "Income ($)", # y-axis label
color = "" # legend label for the "color" property
)
df
ggplotly(df) %>%
# Legend overlapped the plotly menus, used manual placement for legend
add_annotations( text="Race", xref="paper", yref="paper",
x=1.02, xanchor="left",
y=0.8, yanchor="bottom", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout( legend=list(y=0.8, yanchor="top" ) )
}
# ========================
# Reactive Chart
# Function: Percentage Comparison of Non-White to White Income (Line Chart)
# ggplot geom_ function
# Parameter chooses which fifth of the household income data to graph
plot_income_percent <- function(by_column) {
by_column <- ensym(by_column) # fixes column name passed as argument
income_long <- data_new %>% # change data frame to long format
select(Year, by_column, Race) # select fifth vector
income_dev <- spread(
income_long, # data frame to spread from
key = Race,
value = by_column
)
income_dev <- mutate(income_dev, # calculate % of non-white to white
Asians = Asian / White,
Hispanics = Hispanic / White,
Whites = 1.0,
Blacks = Black / White)
income_dev <- income_dev %>% # remove orig data, keep only %
select(Year, Asians, Hispanics, Whites, Blacks)
income_long <- gather( # reassemble from long to wide to plot
income_dev, # data frame to gather from
key = Race,
value = Income,
-Year # columns to gather data from, as in dplyr's `select
)
df <- ggplot(data = income_long) +
geom_line(mapping = aes(x = Year, y = Income, color = Race)) +
labs(
title = "Income by Race compared to Whites", # plot title
x = "Year", # x-axis label
y = "Income (%)", # y-axis label
color = "" # legend label deleted
) +
scale_y_continuous(breaks=seq(0.5, 1.5, by = 0.1))
df
ggplotly(df) %>%
# Legend overlapped the plotly menus, used manual placement for legend
add_annotations( text="Race", xref="paper", yref="paper",
x=1.02, xanchor="left",
y=0.8, yanchor="bottom", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout( legend=list(y=0.8, yanchor="top" ) )
}
# =====================================================================
# Use data from the NCES (National Center for Educational Statistics)
#
# =====================================================================
nces_grad <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable-ba-group-5/master/NCES_grad_rate.csv",
stringsAsFactors = FALSE, header=TRUE)
nces_enroll <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable-ba-group-5/master/NCES_enrollment_rate.csv",
stringsAsFactors = FALSE, header=TRUE)
# =======================
# Wrangle NCES data
# =======================
# Clean Year data by removing " starting chort ....." in String
# Rename value column 'His-panic' to 'Hispanic'
nces_grad <- mutate(nces_grad, Year_clean = substr(nces_grad$Year, 1, 4))
nces_grad <- nces_grad %>%
select(Year_clean, White, Black, His..panic, Asian) %>%
rename(Year=Year_clean, Hispanic=His..panic)
nces_enroll <- mutate(nces_enroll, Year_clean = substr(nces_enroll$Year, 1, 4))
nces_enroll <- nces_enroll %>%
select(Year_clean, White, Black, Hispanic, Asian) %>%
rename(Year=Year_clean)
# ==========================
# Format for plotting as long
# ============================
nces_grad_long <- gather( # reassemble from long to wide to plot
nces_grad, # data frame to gather from
key = Race,
value = Grad_Rate,
-Year # columns to gather data from, as in dplyr's `select
)
nces_enroll_long <- gather( # reassemble from long to wide to plot
nces_enroll, # data frame to gather from
key = Race,
value = Enroll_Rate,
-Year # columns to gather data from, as in dplyr's `select
)
# Join data sets
combined_nces <- left_join(nces_enroll_long, nces_grad_long,
by=c('Year'='Year', 'Race'='Race'))
combined_nces <- combined_nces %>%
rename("Graduation"="Grad_Rate", "Enrollment"="Enroll_Rate") %>%
filter(Year >= 2000)
# ===================================
# Plot NCES
# ===================================
plot_nces <- function(by_column) {
by_column <- ensym(by_column)
df <- ggplot(data = combined_nces) +
geom_line(mapping = aes(x=Year, y=!!by_column, group=Race, color=Race)) +
scale_x_discrete(breaks=seq(2000, 2020, 2)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# Add labels
labs(
title = paste("College", by_column,"Rates"), # plot title
x = "Year", # x-axis label
y = "Percent (%)", # y-axis label
color = "Race" # legend label for the "color" property
)
df
}
# ===================================
# UI code
# ===================================
# Define the first page content; uses `tabPanel()` and `sidebarLayout()`
# layout functions together (as an example)
page_one <- tabPanel(
"Intro tab", # label for the tab in the navbar
titlePanel("Exploring the U.S. Income Gap by Race"), # show with a displayed title
"It should be no surprise that the yearly income of households in the",
"United States varies by race. Despite the many decades that have passed",
"since the Civil Rights movement, the income gap between races remains.",
"This project uses data from the ",
a("U.S. Census Bureau", href="https://www.census.gov/data/tables/time-series/demo/income-poverty/historical-income-inequality.html"),
" and the ",
a("National Center for Education Statistics", href="https://nces.ed.gov/programs/digest/current_tables.asp"),
" to explore this gap.",
"",
"",
"",
br(), br(),
p("Income is just one measure of the economical financial racial gap ",
"in the United Stats. Ultimately, the historical roots of structural ",
"income inequality has manifested in a racial wealth gap that is ",
"difficult to overcome. The White middle class prospered and built ",
"wealth under the Jim Crow laws, the G.I. Bill, taxcuts, segregation, ",
"and discrimination against Blacks owning wealth. Future generations ",
"benefited from this accumulated wealth. According to ",
a("Racial Wealth Gap in the United States", href="https://www.thebalance.com/racial-wealth-gap-in-united-states-4169678"),
" the racial wealth gap is widening."),
br(),
img(src="https://cdn.vox-cdn.com/thumbor/pW69mUpDW5mxRD7Oo2n0cL-cfi8=/0x0:2400x1600/2820x1586/filters:focal(1280x496:1664x880)/cdn.vox-cdn.com/uploads/chorus_image/image/66634984/031920_healthfood_rg_21.0.jpg", width=600),
p("")
)
# Define content for the second page
page_two <- tabPanel(
"Income tab", # label for the tab in the navbar
# ...more content would go here...
# This content uses a sidebar layout
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen", # key assigned
label = "Fifth (quintile) Selection",
choices = list("Lowest" = "Lowest",
"Second" = "Second",
"Third" = "Third",
"Fourth" = "Fourth",
"Top 5 Percent" = "Top5")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("Historical Income Inequality"),
p("Source: U.S. Census Bureau (CPS ASEC)"),
plotlyOutput(outputId = "income_plot"), # panel output
br(),
br("The income gap for Blacks has not improved despite what people ",
"may think. The income gap for Hipanics has narrowed somewhat. ",
"What is most interesting is that the gaps have remained the same ",
"at every income level. Even within the top five percent of incomes ",
"for each race, Blacks and Hispanics experience the same amount ",
"of reduced income as the lower levels of income. "),
br("In order to close the gap, Blacks and Hispanics must ",
"move more of their population in to the middle class. ",
"This means more preparing them for employment in middle class ",
"jobs and ensuring they receive compensation equal to Whites for ",
"the same work.")
) )
)
# Define content for the third page
page_three <- tabPanel(
"Comparison tab", # label for the tab in the navbar
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen_c", # key assigned
label = "Fifth (quintile) Selection",
choices = list("Lowest" = "Lowest",
"Second" = "Second",
"Third" = "Third",
"Fourth" = "Fourth",
"Top 5 Percent" = "Top5")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("Historical Income Inequality"),
p("Source: U.S. Census Bureau (CPS ASEC)"),
plotlyOutput(outputId = "compare_plot"), # panel output
br(),
br("This visual illustrates a comparison of income between Non-Whites ",
"and Whites. This clearly shows that Blacks and Hipanics have not ",
"made significant strides in closing the income gap with Whites. "),
p(),
p("Black income has remained flat (no growth) at all income levels."),
p("Hispanics have made slight improvements in income over time."),
p("Asians have trended higher and even widened the gap with Whites.")
) )
)
# Define content for the fourth page
page_four <- tabPanel(
"Higher Education tab", # label for the tab in the navbar
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen_nces", # key assigned
label = "College Education",
choices = list("Enrollment Rate" = "Enroll",
"Graduation Rate" = "Grads")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("College Statistics"),
p("Source: National Center for Education Statistics"),
plotlyOutput(outputId = "nces_plot"), # panel output
br(),
br("The college enrollment rates, in general, matched the order of ",
"incomes by race, where Asians were at the top, followed by Whites, ",
"Hispanics then Blacks. The most encouraging news was that for all ",
"races, the percentage of students that enrolled in college after ",
"high school was above 50%."),
p(),
p("The graduation ratea of all races has trended up. Blacks have not increased ",
"their rates as much as the other races. When comparing the income ",
"data with college graduation data, the graduation data shows a much ",
"wider gap between Whites versus Blacks and Hispanics. An improvement ",
"in college graduation rates of Blacks could be a stimulus to potentially ",
"close the income disparity between races.")
) )
)
# =====================================================================
# Conclusion page
# =====================================================================
# Define content for the fifth page
page_five <- tabPanel(
"Conclusion tab", # label for the tab in the navbar
titlePanel("Final Thoughts on the Income Gap and Race"), # show with a displayed title
p("After our group conducted our research, it becomes clear that racial ",
"income gaps have only been sustained throughout recent history. Although ",
"Asians have been able to break out of the systemic income inequality present ",
"in the United States, one major takeaway from our analysis is that race still plays a ",
"major role in the issue of the income gap in the United States; underprivileged communities, ",
"specifically black and hispanic communities, are still experiencing systemic oppression ",
"that has failed to lessen over the past decade. ",
"",
"",
"",
br(), br(),
img(src="https://borgenproject.org/wp-content/uploads/32855286416_066fc5371d_k-930x621.jpg")),
p("Another takeaway from the data was how non-white racial groups, specifically ",
"Asians, have been successful in overtaking whites in income percentage, ",
"while other non-white racial groups (specifically Hispanics and blacks) ",
"have had little to no success in matching whites in their income percentage. ",
"This gap is mostly due to systemic racism that is present in all forms of life: ",
"mass incarceration, over policing, and other forms of systemic racism lead to less ",
"opportunities for minorities, overall leading to less average income. ",
br(), br(),
img(src="https://www.pambazuka.org/sites/default/files/styles/flexslider_full/public/field/image/HT-natalie-keyssar-blm-protest-01-as-170106_3x2_1600.jpg?itok=L3QqU18j")),
p("The final takeaway that we concluded from the data ",
"was that there is still much work to be done regarding ",
"the issue of the income gap and race. Although this project ",
"did allow us to gain a much better understanding of how ",
"race and economic issues are related, as a collective group ",
"we still possess only a small amount of the knowledge ",
"necessary to fully comprehend this issue. Therefore, as a ",
"future goal, we have all agreed to continue to pursue ",
"this topic in an attempt to solve the unjust inequalities ",
"present in the United States. "),
br(), br(),
img(src="https://www.mindingthecampus.org/wp-content/uploads/2019/03/hands-2082x1171.jpg"),
)
# Pass each page to a multi-page layout (`navbarPage`)
my_ui <- navbarPage(
"My Application", # application title
page_one, # include the first page content
page_two, # include the second page content
page_three, # include the third page content
page_four, # include the fourth page content
page_five # include the fifth page content
)
# =====================================================================
# Server code
# =====================================================================
my_server <- function(input, output) {
# Create and return line chart of Page two
output$income_plot <- renderPlotly({
plot_chart(!!input$rb_chosen)
})
# Create and return line chart of Page Three
output$compare_plot <- renderPlotly({
plot_income_percent(!!input$rb_chosen_c)
})
# Create and return line chart of Page Four
output$nces_plot <- renderPlotly({
if (!!input$rb_chosen_nces == "Enroll") {
plot_nces(Enrollment)
} else {
plot_nces(Graduation)
}
})
}
# =====================================================================
# Start running the application
# =====================================================================
shinyApp(ui = my_ui, server = my_server)
|
/app.R
|
no_license
|
brandonwong3/final-deliverable-ba-group-5
|
R
| false | false | 17,073 |
r
|
# INFO 201
# Final Group Project
# Date: December 9, 2020
library("dplyr")
library("tidyr")
# Load the `ggplot2` library for data manipulation
library("ggplot2")
library("plotly")
library("shiny")
# =====================================================================
# Read the U.S.Census "Historical Income Tables: Race" data
# =====================================================================
income_data <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable/master/Census_Summary.csv", stringsAsFactors = FALSE, header=TRUE)
# Function to remove ',' in CSV data for # in the thousands
replaceCommas<-function(x){
x<-as.numeric(gsub(",", "", x))
}
# =====================================================================
# Wrangle Data from US Census
# =====================================================================
# Clean Data, remove commas from columns with strings of numbers
# Note: in apply() use MARGIN=2 for columns
data_apply <- apply(income_data[ ,c(2:7)], 2, replaceCommas)
data_new <- income_data # Replicate original data
data_new[ , colnames(data_new) %in% colnames(data_apply)] <- data_apply # Replace specific columns
# Data for all four Race categories start at 2002; filter from 2002
data_new <- data_new %>%
filter(Year >= 2002)
# =====================================================================
# FUNCTIONS
# =====================================================================
# ========================
# Reactive Chart
# Function: Household income by Fifth (Line Chart)
# ggplot geom_ function
# Parameter chooses which fifth of the household income data to graph
plot_chart <- function(by_column) {
by_column <- ensym(by_column) # needed to read column name
df <- ggplot(data = data_new) +
geom_line(mapping = aes(x = Year, y = !!by_column, color = Race)) +
labs( # Add title and axis labels
title = paste("Household Income for Each Fifth"),
x = "Year", # x-axis label
y = "Income ($)", # y-axis label
color = "" # legend label for the "color" property
)
df
ggplotly(df) %>%
# Legend overlapped the plotly menus, used manual placement for legend
add_annotations( text="Race", xref="paper", yref="paper",
x=1.02, xanchor="left",
y=0.8, yanchor="bottom", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout( legend=list(y=0.8, yanchor="top" ) )
}
# ========================
# Reactive Chart
# Function: Percentage Comparison of Non-White to White Income (Line Chart)
# ggplot geom_ function
# Parameter chooses which fifth of the household income data to graph
plot_income_percent <- function(by_column) {
by_column <- ensym(by_column) # fixes column name passed as argument
income_long <- data_new %>% # change data frame to long format
select(Year, by_column, Race) # select fifth vector
income_dev <- spread(
income_long, # data frame to spread from
key = Race,
value = by_column
)
income_dev <- mutate(income_dev, # calculate % of non-white to white
Asians = Asian / White,
Hispanics = Hispanic / White,
Whites = 1.0,
Blacks = Black / White)
income_dev <- income_dev %>% # remove orig data, keep only %
select(Year, Asians, Hispanics, Whites, Blacks)
income_long <- gather( # reassemble from long to wide to plot
income_dev, # data frame to gather from
key = Race,
value = Income,
-Year # columns to gather data from, as in dplyr's `select
)
df <- ggplot(data = income_long) +
geom_line(mapping = aes(x = Year, y = Income, color = Race)) +
labs(
title = "Income by Race compared to Whites", # plot title
x = "Year", # x-axis label
y = "Income (%)", # y-axis label
color = "" # legend label deleted
) +
scale_y_continuous(breaks=seq(0.5, 1.5, by = 0.1))
df
ggplotly(df) %>%
# Legend overlapped the plotly menus, used manual placement for legend
add_annotations( text="Race", xref="paper", yref="paper",
x=1.02, xanchor="left",
y=0.8, yanchor="bottom", # Same y as legend below
legendtitle=TRUE, showarrow=FALSE ) %>%
layout( legend=list(y=0.8, yanchor="top" ) )
}
# =====================================================================
# Use data from the NCES (National Center for Educational Statistics)
#
# =====================================================================
nces_grad <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable-ba-group-5/master/NCES_grad_rate.csv",
stringsAsFactors = FALSE, header=TRUE)
nces_enroll <- read.csv("https://raw.githubusercontent.com/brandonwong3/final-deliverable-ba-group-5/master/NCES_enrollment_rate.csv",
stringsAsFactors = FALSE, header=TRUE)
# =======================
# Wrangle NCES data
# =======================
# Clean Year data by removing " starting chort ....." in String
# Rename value column 'His-panic' to 'Hispanic'
nces_grad <- mutate(nces_grad, Year_clean = substr(nces_grad$Year, 1, 4))
nces_grad <- nces_grad %>%
select(Year_clean, White, Black, His..panic, Asian) %>%
rename(Year=Year_clean, Hispanic=His..panic)
nces_enroll <- mutate(nces_enroll, Year_clean = substr(nces_enroll$Year, 1, 4))
nces_enroll <- nces_enroll %>%
select(Year_clean, White, Black, Hispanic, Asian) %>%
rename(Year=Year_clean)
# ==========================
# Format for plotting as long
# ============================
nces_grad_long <- gather( # reassemble from long to wide to plot
nces_grad, # data frame to gather from
key = Race,
value = Grad_Rate,
-Year # columns to gather data from, as in dplyr's `select
)
nces_enroll_long <- gather( # reassemble from long to wide to plot
nces_enroll, # data frame to gather from
key = Race,
value = Enroll_Rate,
-Year # columns to gather data from, as in dplyr's `select
)
# Join data sets
combined_nces <- left_join(nces_enroll_long, nces_grad_long,
by=c('Year'='Year', 'Race'='Race'))
combined_nces <- combined_nces %>%
rename("Graduation"="Grad_Rate", "Enrollment"="Enroll_Rate") %>%
filter(Year >= 2000)
# ===================================
# Plot NCES
# ===================================
plot_nces <- function(by_column) {
by_column <- ensym(by_column)
df <- ggplot(data = combined_nces) +
geom_line(mapping = aes(x=Year, y=!!by_column, group=Race, color=Race)) +
scale_x_discrete(breaks=seq(2000, 2020, 2)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# Add labels
labs(
title = paste("College", by_column,"Rates"), # plot title
x = "Year", # x-axis label
y = "Percent (%)", # y-axis label
color = "Race" # legend label for the "color" property
)
df
}
# ===================================
# UI code
# ===================================
# Define the first page content; uses `tabPanel()` and `sidebarLayout()`
# layout functions together (as an example)
page_one <- tabPanel(
"Intro tab", # label for the tab in the navbar
titlePanel("Exploring the U.S. Income Gap by Race"), # show with a displayed title
"It should be no surprise that the yearly income of households in the",
"United States varies by race. Despite the many decades that have passed",
"since the Civil Rights movement, the income gap between races remains.",
"This project uses data from the ",
a("U.S. Census Bureau", href="https://www.census.gov/data/tables/time-series/demo/income-poverty/historical-income-inequality.html"),
" and the ",
a("National Center for Education Statistics", href="https://nces.ed.gov/programs/digest/current_tables.asp"),
" to explore this gap.",
"",
"",
"",
br(), br(),
p("Income is just one measure of the economical financial racial gap ",
"in the United Stats. Ultimately, the historical roots of structural ",
"income inequality has manifested in a racial wealth gap that is ",
"difficult to overcome. The White middle class prospered and built ",
"wealth under the Jim Crow laws, the G.I. Bill, taxcuts, segregation, ",
"and discrimination against Blacks owning wealth. Future generations ",
"benefited from this accumulated wealth. According to ",
a("Racial Wealth Gap in the United States", href="https://www.thebalance.com/racial-wealth-gap-in-united-states-4169678"),
" the racial wealth gap is widening."),
br(),
img(src="https://cdn.vox-cdn.com/thumbor/pW69mUpDW5mxRD7Oo2n0cL-cfi8=/0x0:2400x1600/2820x1586/filters:focal(1280x496:1664x880)/cdn.vox-cdn.com/uploads/chorus_image/image/66634984/031920_healthfood_rg_21.0.jpg", width=600),
p("")
)
# Define content for the second page
page_two <- tabPanel(
"Income tab", # label for the tab in the navbar
# ...more content would go here...
# This content uses a sidebar layout
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen", # key assigned
label = "Fifth (quintile) Selection",
choices = list("Lowest" = "Lowest",
"Second" = "Second",
"Third" = "Third",
"Fourth" = "Fourth",
"Top 5 Percent" = "Top5")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("Historical Income Inequality"),
p("Source: U.S. Census Bureau (CPS ASEC)"),
plotlyOutput(outputId = "income_plot"), # panel output
br(),
br("The income gap for Blacks has not improved despite what people ",
"may think. The income gap for Hipanics has narrowed somewhat. ",
"What is most interesting is that the gaps have remained the same ",
"at every income level. Even within the top five percent of incomes ",
"for each race, Blacks and Hispanics experience the same amount ",
"of reduced income as the lower levels of income. "),
br("In order to close the gap, Blacks and Hispanics must ",
"move more of their population in to the middle class. ",
"This means more preparing them for employment in middle class ",
"jobs and ensuring they receive compensation equal to Whites for ",
"the same work.")
) )
)
# Define content for the third page
page_three <- tabPanel(
"Comparison tab", # label for the tab in the navbar
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen_c", # key assigned
label = "Fifth (quintile) Selection",
choices = list("Lowest" = "Lowest",
"Second" = "Second",
"Third" = "Third",
"Fourth" = "Fourth",
"Top 5 Percent" = "Top5")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("Historical Income Inequality"),
p("Source: U.S. Census Bureau (CPS ASEC)"),
plotlyOutput(outputId = "compare_plot"), # panel output
br(),
br("This visual illustrates a comparison of income between Non-Whites ",
"and Whites. This clearly shows that Blacks and Hipanics have not ",
"made significant strides in closing the income gap with Whites. "),
p(),
p("Black income has remained flat (no growth) at all income levels."),
p("Hispanics have made slight improvements in income over time."),
p("Asians have trended higher and even widened the gap with Whites.")
) )
)
# Define content for the fourth page
page_four <- tabPanel(
"Higher Education tab", # label for the tab in the navbar
sidebarLayout( sidebarPanel(
radioButtons(inputId = "rb_chosen_nces", # key assigned
label = "College Education",
choices = list("Enrollment Rate" = "Enroll",
"Graduation Rate" = "Grads")
), # close radio button
), # close sidebarLayout
mainPanel(
h3("College Statistics"),
p("Source: National Center for Education Statistics"),
plotlyOutput(outputId = "nces_plot"), # panel output
br(),
br("The college enrollment rates, in general, matched the order of ",
"incomes by race, where Asians were at the top, followed by Whites, ",
"Hispanics then Blacks. The most encouraging news was that for all ",
"races, the percentage of students that enrolled in college after ",
"high school was above 50%."),
p(),
p("The graduation ratea of all races has trended up. Blacks have not increased ",
"their rates as much as the other races. When comparing the income ",
"data with college graduation data, the graduation data shows a much ",
"wider gap between Whites versus Blacks and Hispanics. An improvement ",
"in college graduation rates of Blacks could be a stimulus to potentially ",
"close the income disparity between races.")
) )
)
# =====================================================================
# Conclusion page
# =====================================================================
# Define content for the fifth page
page_five <- tabPanel(
"Conclusion tab", # label for the tab in the navbar
titlePanel("Final Thoughts on the Income Gap and Race"), # show with a displayed title
p("After our group conducted our research, it becomes clear that racial ",
"income gaps have only been sustained throughout recent history. Although ",
"Asians have been able to break out of the systemic income inequality present ",
"in the United States, one major takeaway from our analysis is that race still plays a ",
"major role in the issue of the income gap in the United States; underprivileged communities, ",
"specifically black and hispanic communities, are still experiencing systemic oppression ",
"that has failed to lessen over the past decade. ",
"",
"",
"",
br(), br(),
img(src="https://borgenproject.org/wp-content/uploads/32855286416_066fc5371d_k-930x621.jpg")),
p("Another takeaway from the data was how non-white racial groups, specifically ",
"Asians, have been successful in overtaking whites in income percentage, ",
"while other non-white racial groups (specifically Hispanics and blacks) ",
"have had little to no success in matching whites in their income percentage. ",
"This gap is mostly due to systemic racism that is present in all forms of life: ",
"mass incarceration, over policing, and other forms of systemic racism lead to less ",
"opportunities for minorities, overall leading to less average income. ",
br(), br(),
img(src="https://www.pambazuka.org/sites/default/files/styles/flexslider_full/public/field/image/HT-natalie-keyssar-blm-protest-01-as-170106_3x2_1600.jpg?itok=L3QqU18j")),
p("The final takeaway that we concluded from the data ",
"was that there is still much work to be done regarding ",
"the issue of the income gap and race. Although this project ",
"did allow us to gain a much better understanding of how ",
"race and economic issues are related, as a collective group ",
"we still possess only a small amount of the knowledge ",
"necessary to fully comprehend this issue. Therefore, as a ",
"future goal, we have all agreed to continue to pursue ",
"this topic in an attempt to solve the unjust inequalities ",
"present in the United States. "),
br(), br(),
img(src="https://www.mindingthecampus.org/wp-content/uploads/2019/03/hands-2082x1171.jpg"),
)
# Pass each page to a multi-page layout (`navbarPage`)
my_ui <- navbarPage(
"My Application", # application title
page_one, # include the first page content
page_two, # include the second page content
page_three, # include the third page content
page_four, # include the fourth page content
page_five # include the fifth page content
)
# =====================================================================
# Server code
# =====================================================================
my_server <- function(input, output) {
# Create and return line chart of Page two
output$income_plot <- renderPlotly({
plot_chart(!!input$rb_chosen)
})
# Create and return line chart of Page Three
output$compare_plot <- renderPlotly({
plot_income_percent(!!input$rb_chosen_c)
})
# Create and return line chart of Page Four
output$nces_plot <- renderPlotly({
if (!!input$rb_chosen_nces == "Enroll") {
plot_nces(Enrollment)
} else {
plot_nces(Graduation)
}
})
}
# =====================================================================
# Start running the application
# =====================================================================
shinyApp(ui = my_ui, server = my_server)
|
# makeCacheMatrix creates a list of functions to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# cacheSolve function returns the inverse of the matrix.
# 1. checks if the inverse has been computed.
# a. if Yes, it gets the precomputed result and returns.
# b. else, it computes the inverse using solve() function
# and sets the value in cache via setinverse
# function.
# cacheSolve assumes that the matrix is always invertible, therefore no
# additional checks for this purpose will be made.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data.")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
kostaras/R-exercises
|
R
| false | false | 1,226 |
r
|
# makeCacheMatrix creates a list of functions to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
# cacheSolve function returns the inverse of the matrix.
# 1. checks if the inverse has been computed.
# a. if Yes, it gets the precomputed result and returns.
# b. else, it computes the inverse using solve() function
# and sets the value in cache via setinverse
# function.
# cacheSolve assumes that the matrix is always invertible, therefore no
# additional checks for this purpose will be made.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data.")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
|
back_superiority_probability <- function(fit) {
params <- extract(fit)
prob <- mean(params$mu[, 2] > params$mu[, 1])
sprintf("%.02f%%", prob * 100)
}
back_superiority_distance <- function(fit) {
params <- extract(fit)
delta <- mean(params$mu[, 2] - params$mu[, 1])
round(delta, 2)
}
back_superiority_percent <- function(fit) {
params <- extract(fit)
delta <- mean(params$mu[, 2] - params$mu[, 1])
delta.percent <- exp(delta) - 1
sprintf("%.02f%%", delta.percent * 100)
}
|
/utils/metrics.R
|
permissive
|
jmshoun/paper-airplane
|
R
| false | false | 493 |
r
|
back_superiority_probability <- function(fit) {
params <- extract(fit)
prob <- mean(params$mu[, 2] > params$mu[, 1])
sprintf("%.02f%%", prob * 100)
}
back_superiority_distance <- function(fit) {
params <- extract(fit)
delta <- mean(params$mu[, 2] - params$mu[, 1])
round(delta, 2)
}
back_superiority_percent <- function(fit) {
params <- extract(fit)
delta <- mean(params$mu[, 2] - params$mu[, 1])
delta.percent <- exp(delta) - 1
sprintf("%.02f%%", delta.percent * 100)
}
|
#' read contig annotation file
read_contig_annotation <- function(contig_annotation_file) {
if ( ! file.exists(contig_annotation_file)) {
stop("contig annotation file is not found.")
}
contig_annotation <- read.delim(contig_annotation_file)
if ( ! "family" %in% colnames(contig_annotation)) {
stop("Contig annotation should have family column.")
}
contig_annotation$contig_name <- as.character(contig_annotation$contig_name)
contig_annotation
}
#' read contig annotation file and add column sample names
read_contig_annotation_sample <- function(contig_annotation_file, sample_name) {
annotation <- read_contig_annotation(contig_annotation_file)
annotation$sample_name <- sample_name
annotation
}
#' for a given dataframe with info about how many reads are mapped to contig
#' add viral family annotation
#' @param idxstats produced by read_idxstats(filename)
#'
#' @return dataframe with 3 columns: contig name(contig_name),
#' number of reads mapped to contig(read_count), viral family(family).
add_family_annotation <- function(idxstats, contig_annotation) {
contig_family <- contig_annotation[ , c("contig_name", "family")]
contig_to_read_count <- idxstats[ , c("contig_name", "mapped")]
names(contig_to_read_count) <- c("contig_name", "read_count")
contig_read_family <- merge(contig_to_read_count, contig_family, all.x=TRUE)
contig_read_family
}
#' for each family conts how many reads were mapped to the family.
#'
#' @return dataframe with 2 columns: family name, number of reads mapped to the family.
#' @seealso add_family_annotation
get_family_to_read_count <- function(contig_read_family) {
if (sum(is.na(contig_read_family$family)) == length(contig_read_family$family)) { # all NAs and nothing else
no_family_annotation <- sum(contig_read_family$read_count[is.na(contig_read_family$family)])
return(data.frame(family="UNKNOWN", read_count=no_family_annotation))
}
with_family_annotation <- aggregate(read_count ~ family, contig_read_family, sum)
with_family_annotation$family <- as.character(with_family_annotation$family)
no_family_annotation <- sum(contig_read_family$read_count[is.na(contig_read_family$family)])
family_read <- rbind(with_family_annotation,
data.frame(family="UNKNOWN", read_count=no_family_annotation))
family_read$family <- as.factor(family_read$family)
family_read
}
#' for each family counts how many contigs mapped to the family.
#'
#' @param contig_annotation df
#' @return dataframe with 2 columns: family name, number of contigs mapped to the family.
get_family_to_contig_count <- function(contig_annotation) {
# aux read_count column added to use get_family_to_read_count f
fam_count <- data.frame(read_count=c(1.0), family=contig_annotation$family)
get_family_to_read_count(fam_count)
}
#' aux function that adds sample name to get_family_to_contig_count df
family_contig_count_for_sample <- function(contig_annotation, sample_name) {
family_count <- get_family_to_contig_count(contig_annotation)
family_count$sample <- sample_name
family_count
}
|
/family_to_read.R
|
no_license
|
anatolydryga/genomeDepth
|
R
| false | false | 3,158 |
r
|
#' read contig annotation file
read_contig_annotation <- function(contig_annotation_file) {
if ( ! file.exists(contig_annotation_file)) {
stop("contig annotation file is not found.")
}
contig_annotation <- read.delim(contig_annotation_file)
if ( ! "family" %in% colnames(contig_annotation)) {
stop("Contig annotation should have family column.")
}
contig_annotation$contig_name <- as.character(contig_annotation$contig_name)
contig_annotation
}
#' read contig annotation file and add column sample names
read_contig_annotation_sample <- function(contig_annotation_file, sample_name) {
annotation <- read_contig_annotation(contig_annotation_file)
annotation$sample_name <- sample_name
annotation
}
#' for a given dataframe with info about how many reads are mapped to contig
#' add viral family annotation
#' @param idxstats produced by read_idxstats(filename)
#'
#' @return dataframe with 3 columns: contig name(contig_name),
#' number of reads mapped to contig(read_count), viral family(family).
add_family_annotation <- function(idxstats, contig_annotation) {
contig_family <- contig_annotation[ , c("contig_name", "family")]
contig_to_read_count <- idxstats[ , c("contig_name", "mapped")]
names(contig_to_read_count) <- c("contig_name", "read_count")
contig_read_family <- merge(contig_to_read_count, contig_family, all.x=TRUE)
contig_read_family
}
#' for each family conts how many reads were mapped to the family.
#'
#' @return dataframe with 2 columns: family name, number of reads mapped to the family.
#' @seealso add_family_annotation
get_family_to_read_count <- function(contig_read_family) {
if (sum(is.na(contig_read_family$family)) == length(contig_read_family$family)) { # all NAs and nothing else
no_family_annotation <- sum(contig_read_family$read_count[is.na(contig_read_family$family)])
return(data.frame(family="UNKNOWN", read_count=no_family_annotation))
}
with_family_annotation <- aggregate(read_count ~ family, contig_read_family, sum)
with_family_annotation$family <- as.character(with_family_annotation$family)
no_family_annotation <- sum(contig_read_family$read_count[is.na(contig_read_family$family)])
family_read <- rbind(with_family_annotation,
data.frame(family="UNKNOWN", read_count=no_family_annotation))
family_read$family <- as.factor(family_read$family)
family_read
}
#' for each family counts how many contigs mapped to the family.
#'
#' @param contig_annotation df
#' @return dataframe with 2 columns: family name, number of contigs mapped to the family.
get_family_to_contig_count <- function(contig_annotation) {
# aux read_count column added to use get_family_to_read_count f
fam_count <- data.frame(read_count=c(1.0), family=contig_annotation$family)
get_family_to_read_count(fam_count)
}
#' aux function that adds sample name to get_family_to_contig_count df
family_contig_count_for_sample <- function(contig_annotation, sample_name) {
family_count <- get_family_to_contig_count(contig_annotation)
family_count$sample <- sample_name
family_count
}
|
#' @title Build a pattern
#' @description Builds data based on a pattern. This function uses another internal function \code{\link{genPattern}}.
#' @param n A natural number. This specifies the number of data points to build.
#' @param parts A natural number. This specifies the parts that make up the pattern.
#' @param probs A number between 0 and 1.
#' @return A vector.
#' @details This function helps in generating data based on a pattern. To explain in simple terms, this function aims to perform the exact opposite of a regular expression i.e regex function. In other words, this function generates data given a generic pattern.
#'The steps in the process of building data from a pattern is as follows.
#' \enumerate{
#' \item Identify the parts that make up the data. Ideally, these parts have a pattern and a probabilistic distribution of their own. For example, a phone number has three parts namely, country code, area code and a number.
#' \item Assign probabilities to each of the above parts. If a part contains only one member, then the corresponding probability must be 1. However, if there are multiple members in the part, then each member must have a probability provided in the respective order.
#' }
#' @examples
#' parts <- list(c("+91","+44","+64"), c(491,324,211), c(7821:8324))
#' probs <- list(c(0.25,0.25,0.50), c(0.30,0.60,0.10), c())
#' phoneNumbers <- buildPattern(n=20,parts = parts, probs = probs)
#' head(phoneNumbers)
#' parts <- list(c("+91","+44","+64"), c("("), c(491,324,211), c(")"), c(7821:8324))
#' probs <- list(c(0.25,0.25,0.50), c(1), c(0.30,0.60,0.10), c(1), c())
#' phoneNumbers <- buildPattern(n=20,parts = parts, probs = probs)
#' head(phoneNumbers)
#' @seealso \code{\link{genPattern}}.
#' @export
buildPattern <- function(n,parts,probs)
{
patternVector <- character(n)
orderedList <- vector(mode = "list", length = length(parts))
for(i in seq_along(orderedList))
{
orderedList[[i]]$values <- parts[[i]]
orderedList[[i]]$probs <- probs[[i]]
}
for (j in seq_len(n))
{
patternVector[j] <- genPattern(orderedList)
}
return(patternVector)
}
|
/R/buildPattern.R
|
no_license
|
cran/conjurer
|
R
| false | false | 2,164 |
r
|
#' @title Build a pattern
#' @description Builds data based on a pattern. This function uses another internal function \code{\link{genPattern}}.
#' @param n A natural number. This specifies the number of data points to build.
#' @param parts A natural number. This specifies the parts that make up the pattern.
#' @param probs A number between 0 and 1.
#' @return A vector.
#' @details This function helps in generating data based on a pattern. To explain in simple terms, this function aims to perform the exact opposite of a regular expression i.e regex function. In other words, this function generates data given a generic pattern.
#'The steps in the process of building data from a pattern is as follows.
#' \enumerate{
#' \item Identify the parts that make up the data. Ideally, these parts have a pattern and a probabilistic distribution of their own. For example, a phone number has three parts namely, country code, area code and a number.
#' \item Assign probabilities to each of the above parts. If a part contains only one member, then the corresponding probability must be 1. However, if there are multiple members in the part, then each member must have a probability provided in the respective order.
#' }
#' @examples
#' parts <- list(c("+91","+44","+64"), c(491,324,211), c(7821:8324))
#' probs <- list(c(0.25,0.25,0.50), c(0.30,0.60,0.10), c())
#' phoneNumbers <- buildPattern(n=20,parts = parts, probs = probs)
#' head(phoneNumbers)
#' parts <- list(c("+91","+44","+64"), c("("), c(491,324,211), c(")"), c(7821:8324))
#' probs <- list(c(0.25,0.25,0.50), c(1), c(0.30,0.60,0.10), c(1), c())
#' phoneNumbers <- buildPattern(n=20,parts = parts, probs = probs)
#' head(phoneNumbers)
#' @seealso \code{\link{genPattern}}.
#' @export
buildPattern <- function(n,parts,probs)
{
patternVector <- character(n)
orderedList <- vector(mode = "list", length = length(parts))
for(i in seq_along(orderedList))
{
orderedList[[i]]$values <- parts[[i]]
orderedList[[i]]$probs <- probs[[i]]
}
for (j in seq_len(n))
{
patternVector[j] <- genPattern(orderedList)
}
return(patternVector)
}
|
## This script loads parcels and their associated attributes from
## https://github.com/HCNData/landgrabu-data, keeping only those
## that fall within California.
## Load Packages
library(sf)
library(dplyr)
library(readr)
library(conflicted)
## Optional
library(DBI)
library(RSQLite)
# Set conflict preferences
conflict_prefer("filter", "dplyr", quiet = TRUE)
conflict_prefer("count", "dplyr", quiet = TRUE)
conflict_prefer("select", "dplyr", quiet = TRUE)
# Define Directories
dir_hcn_shp <- "D:/GitHub/uc_landgrant/landgrabu-data/Morrill_Act_of_1862_Indigenous_Land_Parcels_Database/Shapefiles"
dir_hcn_csv <- "D:/GitHub/uc_landgrant/landgrabu-data/Morrill_Act_of_1862_Indigenous_Land_Parcels_Database/CSVs"
dir_hcn_out <- "D:/GitHub/uc_landgrant/landgrabu-data-filt"
file.exists(dir_hcn_shp); file.exists(dir_hcn_csv); file.exists(dir_hcn_out)
# Read in the parcels Shapefile:
hcnparc_all_sf <- st_read(file.path(dir_hcn_shp, "Parcel_Polygons.shp"))
nrow(hcnparc_all_sf)
## See if the parcels.csv file has any embedded nuls
parcels_raw <- read_file_raw(file.path(dir_hcn_csv, "Parcels.csv"))
sum(parcels_raw == 0)
## Import the CSV (takes 3-4 minutes, most of which is the conversions)
hcnparc_all_tbl <- readr::read_csv(file.path(dir_hcn_csv, "Parcels.csv"),
col_names = TRUE,
na = c("", "na", "NA", "#N/A"),
col_types = cols(
MTRSA_LG = col_character(),
Loc_State = col_factor(),
Loc_County = col_character(),
Acres = col_double(),
LG_State = col_factor(),
LG_Reason = col_factor(),
University = col_factor(),
Uni_Ben_History = col_factor(),
Royce_ID = col_character(),
Tribal_Nation = col_factor(),
US_Acquired_Mode = col_factor(),
Cession_States = col_factor(),
Royce_Link = col_factor(),
Yr_US_Acquire = col_factor(),
Date_US_Acquire = col_character(),
US_Paid_for_Parcel = col_character(),
Endow_Raised_Parcel = col_character(),
Uni_Raise_US_Pay_Multiple = col_character(),
Yr_ST_Accept = col_factor(),
Yr_Uni_Assign = col_factor(),
Yr_Patent = col_integer(),
Date_Patent = col_integer(),
Patentees = col_factor(),
Patent_Source_Reason = col_factor(),
Source_ID = col_factor(),
Source = col_factor(),
Source_Loc = col_factor(),
Source_Type = col_factor(),
Source_Form = col_factor(),
Source_Acqu = col_factor(),
Source_Acqu_Detail = col_factor(),
Located_GIS = col_factor(),
Parcel_Link = col_character(),
MTRSA = col_character(),
MTRS = col_character(),
A_or_L = col_factor(),
Aliquot = col_factor(),
Types = col_factor(),
GISAcres = col_character(),
GIS_Acre_Div_List_Acre = col_double(),
Polygon = col_factor(),
Accuracy = col_factor(),
LG_Royce = col_factor()),
locale = readr::locale(encoding = "latin1")
)
## View results
dim(hcnparc_all_tbl)
## 79461 43
## Save a copy to disk (in a native R format)
save(hcnparc_all_tbl, file = file.path(dir_hcn_out, "hcnparc_all_tbl.RData"))
# load(file.path(dir_hcn_out, "hcnparc_all_tbl.RData"))
## Save this table as a SQLite database (for future use)
parc_db <- DBI::dbConnect(RSQLite::SQLite(), file.path(dir_hcn_out, "parcels.sqlite"))
DBI::dbListTables(parc_db)
## Write the table
DBI::dbWriteTable(parc_db, "parcels", hcnparc_all_tbl) ## works
DBI::dbListTables(parc_db)
## Add some indices
dplyr::db_create_index(parc_db, "parcels", "Loc_State", name = "loc_state_idx", unique = FALSE)
dplyr::db_create_index(parc_db, "parcels", "LG_State", name = "lg_state_idx", unique = FALSE)
## Close connection
dbDisconnect(parc_db)
##########################################
## CREATE A SUBSET OF ROWS AND COLUMNS
cols_keep <- c("MTRSA_LG", "Loc_State", "Loc_County", "Acres",
"LG_State", "LG_Reason", "University", "Tribal_Nation",
"US_Acquired_Mode", "Royce_Link", "Yr_US_Acquire",
"Date_Patent", "Patentees", "Source_ID", "Parcel_Link")
hcnparc_loc_ca_tbl <- hcnparc_all_tbl %>%
select(all_of( cols_keep)) %>%
filter(Loc_State == "CA")
nrow(hcnparc_loc_ca_tbl)
## 16607
## Inner-join to attribute fields
hcnparc_loc_ca_sf <- hcnparc_all_sf %>%
inner_join(hcnparc_loc_ca_tbl, by = "MTRSA_LG")
dim(hcnparc_loc_ca_sf)
## 16609 19
##################################################
## Export those parcels located in California to GeoJSON
## (my preferred format for uploading to AGOL, doesn't truncate field names)
## Write to GeoJSON
st_write(obj = hcnparc_loc_ca_sf,
dsn = file.path(dir_hcn_out, "hcnparc_loc_ca.geojson"),
layer = "ca_loc_parcels")
##################################################
## Subset those parcels that were transferred to CA to support
## the University of California
hcnparc_lg_ca_tbl <- hcnparc_all_tbl %>%
select(all_of( cols_keep)) %>%
filter(LG_State == "CA")
nrow(hcnparc_lg_ca_tbl)
# 2395
# Join the Shapefile to to the attribute table
hcnparc_lg_ca_sf <- hcnparc_all_sf %>%
inner_join(hcnparc_lg_ca_tbl, by = "MTRSA_LG")
hcnparc_lg_ca_sf
## Export to GeoJSON
st_write(hcnparc_lg_ca_sf,
dsn = file.path(dir_hcn_out, "hcnparc_lg_ca.geojson"),
layer = "ca_lg_parcels")
|
/hcn-parcels-prep.R
|
no_license
|
UCANR-IGIS/morrill-map
|
R
| false | false | 6,608 |
r
|
## This script loads parcels and their associated attributes from
## https://github.com/HCNData/landgrabu-data, keeping only those
## that fall within California.
## Load Packages
library(sf)
library(dplyr)
library(readr)
library(conflicted)
## Optional
library(DBI)
library(RSQLite)
# Set conflict preferences
conflict_prefer("filter", "dplyr", quiet = TRUE)
conflict_prefer("count", "dplyr", quiet = TRUE)
conflict_prefer("select", "dplyr", quiet = TRUE)
# Define Directories
dir_hcn_shp <- "D:/GitHub/uc_landgrant/landgrabu-data/Morrill_Act_of_1862_Indigenous_Land_Parcels_Database/Shapefiles"
dir_hcn_csv <- "D:/GitHub/uc_landgrant/landgrabu-data/Morrill_Act_of_1862_Indigenous_Land_Parcels_Database/CSVs"
dir_hcn_out <- "D:/GitHub/uc_landgrant/landgrabu-data-filt"
file.exists(dir_hcn_shp); file.exists(dir_hcn_csv); file.exists(dir_hcn_out)
# Read in the parcels Shapefile:
hcnparc_all_sf <- st_read(file.path(dir_hcn_shp, "Parcel_Polygons.shp"))
nrow(hcnparc_all_sf)
## See if the parcels.csv file has any embedded nuls
parcels_raw <- read_file_raw(file.path(dir_hcn_csv, "Parcels.csv"))
sum(parcels_raw == 0)
## Import the CSV (takes 3-4 minutes, most of which is the conversions)
hcnparc_all_tbl <- readr::read_csv(file.path(dir_hcn_csv, "Parcels.csv"),
col_names = TRUE,
na = c("", "na", "NA", "#N/A"),
col_types = cols(
MTRSA_LG = col_character(),
Loc_State = col_factor(),
Loc_County = col_character(),
Acres = col_double(),
LG_State = col_factor(),
LG_Reason = col_factor(),
University = col_factor(),
Uni_Ben_History = col_factor(),
Royce_ID = col_character(),
Tribal_Nation = col_factor(),
US_Acquired_Mode = col_factor(),
Cession_States = col_factor(),
Royce_Link = col_factor(),
Yr_US_Acquire = col_factor(),
Date_US_Acquire = col_character(),
US_Paid_for_Parcel = col_character(),
Endow_Raised_Parcel = col_character(),
Uni_Raise_US_Pay_Multiple = col_character(),
Yr_ST_Accept = col_factor(),
Yr_Uni_Assign = col_factor(),
Yr_Patent = col_integer(),
Date_Patent = col_integer(),
Patentees = col_factor(),
Patent_Source_Reason = col_factor(),
Source_ID = col_factor(),
Source = col_factor(),
Source_Loc = col_factor(),
Source_Type = col_factor(),
Source_Form = col_factor(),
Source_Acqu = col_factor(),
Source_Acqu_Detail = col_factor(),
Located_GIS = col_factor(),
Parcel_Link = col_character(),
MTRSA = col_character(),
MTRS = col_character(),
A_or_L = col_factor(),
Aliquot = col_factor(),
Types = col_factor(),
GISAcres = col_character(),
GIS_Acre_Div_List_Acre = col_double(),
Polygon = col_factor(),
Accuracy = col_factor(),
LG_Royce = col_factor()),
locale = readr::locale(encoding = "latin1")
)
## View results
dim(hcnparc_all_tbl)
## 79461 43
## Save a copy to disk (in a native R format)
save(hcnparc_all_tbl, file = file.path(dir_hcn_out, "hcnparc_all_tbl.RData"))
# load(file.path(dir_hcn_out, "hcnparc_all_tbl.RData"))
## Save this table as a SQLite database (for future use)
parc_db <- DBI::dbConnect(RSQLite::SQLite(), file.path(dir_hcn_out, "parcels.sqlite"))
DBI::dbListTables(parc_db)
## Write the table
DBI::dbWriteTable(parc_db, "parcels", hcnparc_all_tbl) ## works
DBI::dbListTables(parc_db)
## Add some indices
dplyr::db_create_index(parc_db, "parcels", "Loc_State", name = "loc_state_idx", unique = FALSE)
dplyr::db_create_index(parc_db, "parcels", "LG_State", name = "lg_state_idx", unique = FALSE)
## Close connection
dbDisconnect(parc_db)
##########################################
## CREATE A SUBSET OF ROWS AND COLUMNS
cols_keep <- c("MTRSA_LG", "Loc_State", "Loc_County", "Acres",
"LG_State", "LG_Reason", "University", "Tribal_Nation",
"US_Acquired_Mode", "Royce_Link", "Yr_US_Acquire",
"Date_Patent", "Patentees", "Source_ID", "Parcel_Link")
hcnparc_loc_ca_tbl <- hcnparc_all_tbl %>%
select(all_of( cols_keep)) %>%
filter(Loc_State == "CA")
nrow(hcnparc_loc_ca_tbl)
## 16607
## Inner-join to attribute fields
hcnparc_loc_ca_sf <- hcnparc_all_sf %>%
inner_join(hcnparc_loc_ca_tbl, by = "MTRSA_LG")
dim(hcnparc_loc_ca_sf)
## 16609 19
##################################################
## Export those parcels located in California to GeoJSON
## (my preferred format for uploading to AGOL, doesn't truncate field names)
## Write to GeoJSON
st_write(obj = hcnparc_loc_ca_sf,
dsn = file.path(dir_hcn_out, "hcnparc_loc_ca.geojson"),
layer = "ca_loc_parcels")
##################################################
## Subset those parcels that were transferred to CA to support
## the University of California
hcnparc_lg_ca_tbl <- hcnparc_all_tbl %>%
select(all_of( cols_keep)) %>%
filter(LG_State == "CA")
nrow(hcnparc_lg_ca_tbl)
# 2395
# Join the Shapefile to to the attribute table
hcnparc_lg_ca_sf <- hcnparc_all_sf %>%
inner_join(hcnparc_lg_ca_tbl, by = "MTRSA_LG")
hcnparc_lg_ca_sf
## Export to GeoJSON
st_write(hcnparc_lg_ca_sf,
dsn = file.path(dir_hcn_out, "hcnparc_lg_ca.geojson"),
layer = "ca_lg_parcels")
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
sort.int<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::sort.int(params)
}
}
|
/R/sort.int.R
|
no_license
|
granatb/RapeR
|
R
| false | false | 671 |
r
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
sort.int<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::sort.int(params)
}
}
|
install.packages("QuantPsyc")
###set working directory
setwd("~/OneDrive - Missouri State University/TEACHING/745 Grad Statistics/notes/fall 15")
options(scipen = 999)
##import the datafile
library(haven)
regdata = read_spss("c7 regression.sav")
c6_liar = as.data.frame(c6_liar)
regdata<-c7_regression
master = regdata[ , c(8:10)]
model1 = lm(CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = master)
mahal = mahalanobis(master,
colMeans(master),
cov(master))
cutmahal = qchisq(1-.001, ncol(master))
cutmahal
badmahal = as.numeric(mahal > cutmahal)
table(badmahal)
badmahal
0 1
266 1
k=3
leverage = hatvalues(model1)
cutleverage = (2*k+2) / nrow(master)
cutleverage
badleverage = as.numeric(leverage > cutleverage)
table(badleverage)
badleverage
0 1
251 16
cooks = cooks.distance(model1)
cutcooks = 4 / (nrow(master) - k - 1)
cutcooks
badcooks = as.numeric(cooks > cutcooks)
table(badcooks)
totalout = badmahal + badleverage + badcooks
table(totalout)
totalout
0 1 2 3
240 21 5 1
model2 = lm(CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
noout = subset(master, totalout < 2)
noout
summary(model2, correlation = TRUE)
Call:
lm(formula = CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
Residuals:
Min 1Q Median 3Q Max
-13.535 -5.172 -1.545 3.168 29.820
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 54.31164 4.16332 13.045 <2e-16 ***
PIL_total -0.37281 0.03603 -10.346 <2e-16 ***
AUDIT_TOTAL_NEW 0.00120 0.08315 0.014 0.988
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 7.422 on 258 degrees of freedom
Multiple R-squared: 0.298, Adjusted R-squared: 0.2926
F-statistic: 54.77 on 2 and 258 DF, p-value: < 2.2e-16
Correlation of Coefficients:
(Intercept) PIL_total
PIL_total -0.98
AUDIT_TOTAL_NEW -0.28 0.15
standardized = rstudent(model2)
fitted = scale(model2$fitted.values)
qqnorm(standardized)
abline(0,1)
hist(standardized)
plot(fitted, standardized)
abline(0,0)
abline(v = 0)
summary(model2)
Call:
lm(formula = CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
Residuals:
Min 1Q Median 3Q Max
-13.535 -5.172 -1.545 3.168 29.820
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 54.31164 4.16332 13.045 <2e-16 ***
PIL_total -0.37281 0.03603 -10.346 <2e-16 ***
AUDIT_TOTAL_NEW 0.00120 0.08315 0.014 0.988
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 7.422 on 258 degrees of freedom
Multiple R-squared: 0.298, Adjusted R-squared: 0.2926
F-statistic: 54.77 on 2 and 258 DF, p-value: < 2.2e-16
library(QuantPsyc)
lm.beta(model2)
PIL_total AUDIT_TOTAL_NEW
-0.5458229582 0.0007613226
|
/Regression Cooks Distance Mahalanobis.R
|
no_license
|
Rothgargeert/R-and-Python-Data-Science-Examples
|
R
| false | false | 3,027 |
r
|
install.packages("QuantPsyc")
###set working directory
setwd("~/OneDrive - Missouri State University/TEACHING/745 Grad Statistics/notes/fall 15")
options(scipen = 999)
##import the datafile
library(haven)
regdata = read_spss("c7 regression.sav")
c6_liar = as.data.frame(c6_liar)
regdata<-c7_regression
master = regdata[ , c(8:10)]
model1 = lm(CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = master)
mahal = mahalanobis(master,
colMeans(master),
cov(master))
cutmahal = qchisq(1-.001, ncol(master))
cutmahal
badmahal = as.numeric(mahal > cutmahal)
table(badmahal)
badmahal
0 1
266 1
k=3
leverage = hatvalues(model1)
cutleverage = (2*k+2) / nrow(master)
cutleverage
badleverage = as.numeric(leverage > cutleverage)
table(badleverage)
badleverage
0 1
251 16
cooks = cooks.distance(model1)
cutcooks = 4 / (nrow(master) - k - 1)
cutcooks
badcooks = as.numeric(cooks > cutcooks)
table(badcooks)
totalout = badmahal + badleverage + badcooks
table(totalout)
totalout
0 1 2 3
240 21 5 1
model2 = lm(CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
noout = subset(master, totalout < 2)
noout
summary(model2, correlation = TRUE)
Call:
lm(formula = CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
Residuals:
Min 1Q Median 3Q Max
-13.535 -5.172 -1.545 3.168 29.820
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 54.31164 4.16332 13.045 <2e-16 ***
PIL_total -0.37281 0.03603 -10.346 <2e-16 ***
AUDIT_TOTAL_NEW 0.00120 0.08315 0.014 0.988
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 7.422 on 258 degrees of freedom
Multiple R-squared: 0.298, Adjusted R-squared: 0.2926
F-statistic: 54.77 on 2 and 258 DF, p-value: < 2.2e-16
Correlation of Coefficients:
(Intercept) PIL_total
PIL_total -0.98
AUDIT_TOTAL_NEW -0.28 0.15
standardized = rstudent(model2)
fitted = scale(model2$fitted.values)
qqnorm(standardized)
abline(0,1)
hist(standardized)
plot(fitted, standardized)
abline(0,0)
abline(v = 0)
summary(model2)
Call:
lm(formula = CESD_total ~ PIL_total + AUDIT_TOTAL_NEW, data = noout)
Residuals:
Min 1Q Median 3Q Max
-13.535 -5.172 -1.545 3.168 29.820
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 54.31164 4.16332 13.045 <2e-16 ***
PIL_total -0.37281 0.03603 -10.346 <2e-16 ***
AUDIT_TOTAL_NEW 0.00120 0.08315 0.014 0.988
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
Residual standard error: 7.422 on 258 degrees of freedom
Multiple R-squared: 0.298, Adjusted R-squared: 0.2926
F-statistic: 54.77 on 2 and 258 DF, p-value: < 2.2e-16
library(QuantPsyc)
lm.beta(model2)
PIL_total AUDIT_TOTAL_NEW
-0.5458229582 0.0007613226
|
##-------------------------------------------------------------------------------
## Copyright (c) 2012 University of Illinois, NCSA.
## All rights reserved. This program and the accompanying materials
## are made available under the terms of the
## University of Illinois/NCSA Open Source License
## which accompanies this distribution, and is available at
## http://opensource.ncsa.illinois.edu/license.html
##-------------------------------------------------------------------------------
library(XML)
library(lubridate)
library(PEcAn.DB)
library(PEcAn.utils)
##--------------------------------------------------------------------------------------------------#
## INTERNAL FUNCTIONS
##--------------------------------------------------------------------------------------------------#
# check to see if inputs are specified
# this should be part of the model code
check.inputs <- function(settings) {
if (is.null(settings$model$type)) return(settings)
# don't know how to check inputs
if (is.null(settings$database$bety)) {
logger.info("No databasse connection, can't check inputs.")
return (settings)
}
# get list of inputs associated with model type
dbcon <- db.open(settings$database$bety)
inputs <- db.query(paste0("SELECT tag, format_id, required FROM modeltypes, modeltypes_formats WHERE modeltypes_formats.modeltype_id = modeltypes.id and modeltypes.name='", settings$model$type, "' AND modeltypes_formats.input;"), con=dbcon)
# check list of inputs
allinputs <- names(settings$run$inputs)
if (nrow(inputs) > 0) {
for(i in 1:nrow(inputs)) {
tag <- inputs$tag[i]
tagid <- paste0(tag, ".id")
hostname <- settings$run$host$name
allinputs <- allinputs[allinputs != tag]
# check if <tag.id> exists
if (!is.null(settings$run$inputs[[tagid]])) {
id <- settings$run$inputs[[tagid]]
file <- dbfile.file("Input", id, dbcon, hostname)
if (is.na(file)) {
logger.error("No file found for", tag, " and id", id, "on host", hostname)
} else {
if (is.null(settings$run$inputs[[tag]])) {
settings$run$inputs[[tag]] <- file
} else if (file != settings$run$inputs[[tag]]) {
logger.warn("Input file and id do not match for ", tag)
}
}
}
# check if file exists
if (is.null(settings$run$inputs[[tag]])) {
if (inputs$required[i]) {
logger.severe("Missing required input :", tag)
} else {
logger.info("Missing optional input :", tag)
}
} else {
# can we find the file so we can set the tag.id
if (is.null(settings$run$inputs[[tagid]])) {
id <- dbfile.id('Input', settings$run$inputs[[tag]], dbcon, hostname)
if (!is.na(id)) {
settings$run$inputs[[tagid]] <- id
}
}
}
# check to see if format is right type
if (!is.null(settings$run$inputs[[tagid]])) {
formats <- db.query(paste0("SELECT format_id FROM inputs WHERE id=", settings$run$inputs[[tagid]]), con=dbcon)
if (nrow(formats) > 1) {
if (formats[1, 'format_id'] != inputs$format_id[i]) {
logger.error("Format of input", tag, "does not match specified input.")
}
} else if (nrow(formats) == 1) {
if (formats[1, 'format_id'] != inputs$format_id[i]) {
logger.error("Format of input", tag, "does not match specified input.")
}
} else {
logger.error("Could not check format of", tag, ".")
}
}
}
}
if (length(allinputs) > 0) {
logger.info("Unused inputs found :", paste(allinputs, collapse=" "))
}
db.close(dbcon)
return(settings)
}
# check database section
check.database <- function(database) {
if (is.null(database)) return(NULL);
## check database settings
if (is.null(database$driver)) {
database$driver <- "PostgreSQL"
logger.warn("Please specify a database driver; using default 'PostgreSQL'")
}
# Attempt to load the driver
if (!require(paste0("R", database$driver), character.only=TRUE)) {
logger.warn("Could not load the database driver", paste0("R", database$driver))
}
# MySQL specific checks
if (database$driver == "MySQL") {
if (!is.null(database$passwd)) {
logger.info("passwd in database section should be password for MySQL")
database$password <- database$passwd
database$passwd <- NULL
}
if (!is.null(database$name)) {
logger.info("name in database section should be dbname for MySQL")
database$dbname <- database$name
database$name <- NULL
}
}
# PostgreSQL specific checks
if (database$driver == "PostgreSQL") {
if (!is.null(database$passwd)) {
logger.info("passwd in database section should be password for PostgreSQL")
database$password <- database$passwd
database$passwd <- NULL
}
if (!is.null(database$name)) {
logger.info("name in database section should be dbname for PostgreSQL")
database$dbname <- database$name
database$name <- NULL
}
}
## The following hack handles *.illinois.* to *.uiuc.* aliases of ebi-forecast
if(!is.null(database$host)){
forcastnames <- c("ebi-forecast.igb.uiuc.edu",
"ebi-forecast.igb.illinois.edu")
if((database$host %in% forcastnames) &
(Sys.info()['nodename'] %in% forcastnames)){
database$host <- "localhost"
}
} else if(is.null(database$host)){
database$host <- "localhost"
}
## convert strings around from old format to new format
if(is.null(database[["user"]])){
if (!is.null(database$userid)) {
logger.info("'userid' in database section should be 'user'")
database$user <- database$userid
} else if (!is.null(database$username)) {
logger.info("'username' in database section should be 'user'")
database$user <- database$username
} else {
logger.info("no database user specified, using 'bety'")
database$user <- "bety"
}
}
database$userid <- database$username <- NULL
# fill in defaults for the database
if(is.null(database$password)) {
database$password <- "bety"
}
if(is.null(database$dbname)) {
database$dbname <- "bety"
}
if (!db.exists(params=database, FALSE)) {
logger.severe("Invalid Database Settings : ", unlist(database))
}
# connected
logger.info("Successfully connected to database : ", unlist(database))
# return fixed up database
return(database)
}
# check to make sure BETY is up to date
check.bety.version <- function(dbcon) {
versions <- db.query("SELECT version FROM schema_migrations;", con=dbcon)[['version']]
# there should always be a versin 1
if (! ("1" %in% versions)) {
logger.severe("No version 1, how did this database get created?")
}
# check for specific version
if (! ("20140617163304" %in% versions)) {
logger.severe("Missing migration 20140617163304, this associates files with models.")
}
if (! ("20140708232320" %in% versions)) {
logger.severe("Missing migration 20140708232320, this introduces geometry column in sites")
}
if (! ("20140729045640" %in% versions)) {
logger.severe("Missing migration 20140729045640, this introduces modeltypes table")
}
# check if database is newer
if (tail(versions, n=1) > "20140729045640") {
logger.warn("Last migration", tail(versions, n=1), "is more recent than expected 20140729045640.",
"This could result in PEcAn not working as expected.")
}
}
##' Sanity checks. Checks the settings file to make sure expected fields exist. It will try to use
##' default values for any missing values, or stop the exection if no defaults are possible.
##'
##' Expected fields in settings file are:
##' - pfts with at least one pft defined
##' @title Check Settings
##' @param settings settings file
##' @return will return the updated settings values with defaults set.
##' @author Rob Kooper
check.settings <- function(settings) {
if (!is.null(settings$nocheck)) {
logger.info("Not doing sanity checks of pecan.xml")
return(settings)
}
scipen = getOption("scipen")
options(scipen=12)
# check database secions if exist
dbcon <- "NONE"
if (!is.null(settings$database)) {
# check all databases
for (name in names(settings$database)) {
settings$database[[name]] <- check.database(settings$database[[name]])
}
# check bety database
if (!is.null(settings$database$bety)) {
# should runs be written to database
if (is.null(settings$database$bety$write)) {
logger.info("Writing all runs/configurations to database.")
settings$database$bety$write <- TRUE
} else {
settings$database$bety$write <- as.logical(settings$database$bety$write)
if (settings$database$bety$write) {
logger.debug("Writing all runs/configurations to database.")
} else {
logger.warn("Will not write runs/configurations to database.")
}
}
# check if we can connect to the database with write permissions
if (settings$database$bety$write && !db.exists(params=settings$database$bety, TRUE)) {
logger.severe("Invalid Database Settings : ", unlist(settings$database))
}
# TODO check userid and userpassword
# Connect to database
dbcon <- db.open(settings$database$bety)
# check database version
check.bety.version(dbcon)
} else {
logger.warn("No BETY database information specified; not using database.")
}
} else {
logger.warn("No BETY database information specified; not using database.")
}
# make sure there are pfts defined
if (is.null(settings$pfts) || (length(settings$pfts) == 0)) {
logger.warn("No PFTS specified.")
}
# check for a run settings
if (is.null(settings[['run']])) {
logger.warn("No Run Settings specified")
}
# check to make sure a host is given
if (is.null(settings$run$host$name)) {
logger.info("Setting localhost for execution host.")
settings$run$host$name <- "localhost"
}
# check start/end date are specified and correct
if (is.null(settings$run$start.date)) {
logger.warn("No start.date specified in run section.")
} else if (is.null(settings$run$end.date)) {
logger.warn("No end.date specified in run section.")
} else {
startdate <- parse_date_time(settings$run$start.date, "ymd_hms", truncated=3)
enddate <- parse_date_time(settings$run$end.date, "ymd_hms", truncated=3)
if (startdate >= enddate) {
logger.severe("Start date should come before the end date.")
}
}
# check if there is either ensemble or sensitivy.analysis
if (is.null(settings$ensemble) && is.null(settings$sensitivity.analysis)) {
logger.warn("No ensemble or sensitivity analysis specified, no models will be executed!")
}
# check ensemble
if (!is.null(settings$ensemble)) {
if (is.null(settings$ensemble$variable)) {
if (is.null(settings$sensitivity.analysis$variable)) {
logger.severe("No variable specified to compute ensemble for.")
}
logger.info("Setting ensemble variable to the same as sensitivity analysis variable [", settings$sensitivity.analysis$variable, "]")
settings$ensemble$variable <- settings$sensitivity.analysis$variable
}
if (is.null(settings$ensemble$size)) {
logger.info("Setting ensemble size to 1.")
settings$ensemble$size <- 1
}
if(is.null(settings$ensemble$start.year)) {
if(is.null(settings$sensitivity.analysis$start.year)) {
settings$ensemble$start.year <- year(settings$run$start.date)
logger.info("No start date passed to ensemble - using the run date (", settings$ensemble$start.date, ").")
} else {
settings$ensemble$start.year <- settings$sensitivity.analysis$start.year
logger.info("No start date passed to ensemble - using the sensitivity.analysis date (", settings$ensemble$start.date, ").")
}
}
if(is.null(settings$ensemble$end.year)) {
if(is.null(settings$sensitivity.analysis$end.year)) {
settings$ensemble$end.year <- year(settings$run$end.date)
logger.info("No end date passed to ensemble - using the run date (", settings$ensemble$end.date, ").")
} else {
settings$ensemble$end.year <- settings$sensitivity.analysis$end.year
logger.info("No end date passed to ensemble - using the sensitivity.analysis date (", settings$ensemble$end.date, ").")
}
}
# check start and end dates
if (year(startdate) > settings$ensemble$start.year) {
logger.severe("Start year of ensemble should come after the start.date of the run")
}
if (year(enddate) < settings$ensemble$end.year) {
logger.severe("End year of ensemble should come before the end.date of the run")
}
if (settings$ensemble$start.year > settings$ensemble$end.year) {
logger.severe("Start year of ensemble should come before the end year of the ensemble")
}
}
# check sensitivity analysis
if (!is.null(settings$sensitivity.analysis)) {
if (is.null(settings$sensitivity.analysis$variable)) {
if (is.null(settings$ensemble$variable)) {
logger.severe("No variable specified to compute sensitivity.analysis for.")
}
logger.info("Setting sensitivity.analysis variable to the same as ensemble variable [", settings$ensemble$variable, "]")
settings$sensitivity.analysis$variable <- settings$ensemble$variable
}
if(is.null(settings$sensitivity.analysis$start.year)) {
if(is.null(settings$ensemble$start.year)) {
settings$sensitivity.analysis$start.year <- year(settings$run$start.date)
logger.info("No start date passed to sensitivity.analysis - using the run date (", settings$sensitivity.analysis$start.date, ").")
} else {
settings$sensitivity.analysis$start.year <- settings$ensemble$start.year
logger.info("No start date passed to sensitivity.analysis - using the ensemble date (", settings$sensitivity.analysis$start.date, ").")
}
}
if(is.null(settings$sensitivity.analysis$end.year)) {
if(is.null(settings$ensemble$end.year)) {
settings$sensitivity.analysis$end.year <- year(settings$run$end.date)
logger.info("No end date passed to sensitivity.analysis - using the run date (", settings$sensitivity.analysis$end.date, ").")
} else {
settings$sensitivity.analysis$end.year <- settings$ensemble$end.year
logger.info("No end date passed to sensitivity.analysis - using the ensemble date (", settings$sensitivity.analysis$end.date, ").")
}
}
# check start and end dates
if (year(startdate) > settings$sensitivity.analysis$start.year) {
logger.severe("Start year of sensitivity.analysis should come after the start.date of the run")
}
if (year(enddate) < settings$sensitivity.analysis$end.year) {
logger.severe("End year of sensitivity.analysis should come before the end.date of the run")
}
if (settings$sensitivity.analysis$start.year > settings$sensitivity.analysis$end.year) {
logger.severe("Start year of sensitivity.analysis should come before the end year of the ensemble")
}
}
# check meta-analysis
if (is.null(settings$meta.analysis) || is.null(settings$meta.analysis$iter)) {
settings$meta.analysis$iter <- 3000
logger.info("Setting meta.analysis iterations to ", settings$meta.analysis$iter)
}
if (is.null(settings$meta.analysis$random.effects)) {
settings$meta.analysis$random.effects <- FALSE
logger.info("Setting meta.analysis random effects to ", settings$meta.analysis$random.effects)
}
if (is.null(settings$meta.analysis$update)) {
settings$meta.analysis$update <- 'AUTO'
logger.info("Setting meta.analysis update to only update if no previous meta analysis was found")
}
if ((settings$meta.analysis$update != 'AUTO') && is.na(as.logical(settings$meta.analysis$update))) {
logger.info("meta.analysis update can only be AUTO/TRUE/FALSE, defaulting to FALSE")
settings$meta.analysis$update <- FALSE
}
# check modelid with values
if(!is.null(settings$model)){
if(!is.character(dbcon)){
if(!is.null(settings$model$id)){
if(as.numeric(settings$model$id) >= 0){
model <- db.query(paste0("SELECT models.id AS id, models.revision AS revision, modeltypes.name AS type FROM models, modeltypes WHERE models.id=", settings$model$id, " AND models.modeltype_id=modeltypes.id;"), con=dbcon)
if(nrow(model) == 0) {
logger.error("There is no record of model_id = ", settings$model$id, "in database")
}
} else {
model <- list()
}
} else if (!is.null(settings$model$type)) {
model <- db.query(paste0("SELECT models.id AS id, models.revision AS revision, modeltypes.name AS type FROM models, modeltypes ",
"WHERE modeltypes.name = '", toupper(settings$model$type), "' ",
"AND models.modeltype_id=modeltypes.id ",
ifelse(is.null(settings$model$revision), "",
paste0("AND revision like '%", settings$model$revision, "%' ")),
"ORDER BY models.updated_at"), con=dbcon)
if(nrow(model) > 1){
logger.warn("multiple records for", settings$model$name, "returned; using the latest")
row <- which.max(model$updated_at)
if (length(row) == 0) row <- nrow(model)
model <- model[row, ]
} else if (nrow(model) == 0) {
logger.warn("Model type", settings$model$type, "not in database")
}
} else {
logger.warn("no model settings given")
model <- list()
}
} else {
model <- list()
}
# copy data from database into missing fields
if (!is.null(model$id)) {
if (is.null(settings$model$id) || (settings$model$id == "")) {
settings$model$id <- model$id
logger.info("Setting model id to ", settings$model$id)
} else if (settings$model$id != model$id) {
logger.warn("Model id specified in settings file does not match database.")
}
} else {
if (is.null(settings$model$id) || (settings$model$id == "")) {
settings$model$id <- -1
logger.info("Setting model id to ", settings$model$id)
}
}
if (!is.null(model$type)) {
if (is.null(settings$model$type) || (settings$model$type == "")) {
settings$model$type <- model$type
logger.info("Setting model type to ", settings$model$type)
} else if (settings$model$type != model$type) {
logger.warn("Model type specified in settings file does not match database.")
}
}
if (!is.null(model$revision)) {
if (is.null(settings$model$revision) || (settings$model$revision == "")) {
settings$model$revision <- model$revision
logger.info("Setting model revision to ", settings$model$revision)
} else if (settings$model$revision != model$revision) {
logger.warn("Model revision specified in settings file does not match database.")
}
}
# make sure we have model type
if ((is.null(settings$model$type) || settings$model$type == "")) {
logger.severe("Need a model type.")
}
# check on binary for given host
if (!is.null(settings$model$id) && (settings$model$id >= 0)) {
binary <- dbfile.file("Model", settings$model$id, dbcon, settings$run$host$name)
if (!is.na(binary)) {
if (is.null(settings$model$binary)) {
settings$model$binary <- binary
logger.info("Setting model binary to ", settings$model$binary)
} else if (binary != settings$model$binary) {
logger.warn("Specified binary [", settings$model$binary, "] does not match path in database [", binary, "]")
}
}
} else {
logger.warn("No model binary sepcified in database for model ", settings$model$type)
}
}
# end model check
# check siteid with values
if(!is.null(settings$run$site)){
if (is.null(settings$run$site$id)) {
settings$run$site$id <- -1
} else if (settings$run$site$id >= 0) {
if (!is.character(dbcon)) {
site <- db.query(paste("SELECT sitename, ST_X(geometry) AS lon, ST_Y(geometry) AS lat FROM sites WHERE id =", settings$run$site$id), con=dbcon)
} else {
site <- data.frame(id=settings$run$site$id)
if (!is.null(settings$run$site$name)) {
site$sitename=settings$run$site$name
}
if (!is.null(settings$run$site$lat)) {
site$lat=settings$run$site$lat
}
if (!is.null(settings$run$site$lon)) {
site$lon=settings$run$site$lon
}
}
if((!is.null(settings$run$site$met)) && settings$run$site$met == "NULL") settings$run$site$met <- NULL
if (is.null(settings$run$site$name)) {
if ((is.null(site$sitename) || site$sitename == "")) {
logger.info("No site name specified.")
settings$run$site$name <- "NA"
} else {
settings$run$site$name <- site$sitename
logger.info("Setting site name to ", settings$run$site$name)
}
} else if (site$sitename != settings$run$site$name) {
logger.warn("Specified site name [", settings$run$site$name, "] does not match sitename in database [", site$sitename, "]")
}
if (is.null(settings$run$site$lat)) {
if ((is.null(site$lat) || site$lat == "")) {
logger.severe("No lat specified for site.")
} else {
settings$run$site$lat <- as.numeric(site$lat)
logger.info("Setting site lat to ", settings$run$site$lat)
}
} else if (as.numeric(site$lat) != as.numeric(settings$run$site$lat)) {
logger.warn("Specified site lat [", settings$run$site$lat, "] does not match lat in database [", site$lat, "]")
}
if (is.null(settings$run$site$lon)) {
if ((is.null(site$lon) || site$lon == "")) {
logger.severe("No lon specified for site.")
} else {
settings$run$site$lon <- as.numeric(site$lon)
logger.info("Setting site lon to ", settings$run$site$lon)
}
} else if (as.numeric(site$lon) != as.numeric(settings$run$site$lon)) {
logger.warn("Specified site lon [", settings$run$site$lon, "] does not match lon in database [", site$lon, "]")
}
}
} else {
settings$run$site$id <- -1
}
# end site check code
## if run$host is localhost, set to "localhost
if (any(settings$run$host %in% c(Sys.info()['nodename'], gsub("illinois", "uiuc", Sys.info()['nodename'])))){
settings$run$host$name <- "localhost"
}
# check if we need to use qsub
if ("qsub" %in% names(settings$run$host)) {
if (is.null(settings$run$host$qsub)) {
settings$run$host$qsub <- "qsub -N @NAME@ -o @STDOUT@ -e @STDERR@ -S /bin/bash"
logger.info("qsub not specified using default value :", settings$run$host$qsub)
}
if (is.null(settings$run$host$qsub.jobid)) {
settings$run$host$qsub.jobid <- "Your job ([0-9]+) .*"
logger.info("qsub.jobid not specified using default value :", settings$run$host$qsub.jobid)
}
if (is.null(settings$run$host$qstat)) {
settings$run$host$qstat <- "qstat -j @JOBID@ &> /dev/null || echo DONE"
logger.info("qstat not specified using default value :", settings$run$host$qstat)
}
}
# modellauncher to launch on multiple nodes/cores
if ("modellauncher" %in% names(settings$run$host)) {
if (is.null(settings$run$host$modellauncher$binary)) {
settings$run$host$modellauncher$binary <- "modellauncher"
logger.info("binary not specified using default value :", settings$run$host$modellauncher$binary)
}
if (is.null(settings$run$host$modellauncher$qsub.extra)) {
logger.severe("qsub.extra not specified, can not launch in parallel environment.")
}
if (is.null(settings$run$host$modellauncher$mpirun)) {
settings$run$host$modellauncher$mpirun <- "mpirun"
logger.info("mpirun not specified using default value :", settings$run$host$modellauncher$mpirun)
}
}
# Check folder where outputs are written before adding to dbfiles
if(is.null(settings$run$dbfiles)) {
settings$run$dbfiles <- normalizePath("~/.pecan/dbfiles", mustWork=FALSE)
} else {
settings$run$dbfiles <- normalizePath(settings$run$dbfiles, mustWork=FALSE)
}
dir.create(settings$run$dbfiles, showWarnings = FALSE, recursive = TRUE)
# check all inputs exist
settings <- check.inputs(settings)
# check for workflow defaults
fixoutdir <- FALSE
if(!is.character(dbcon) && settings$database$bety$write && ("model" %in% names(settings))) {
if (!'workflow' %in% names(settings)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
db.query(paste0("INSERT INTO workflows (site_id, model_id, hostname, start_date, end_date, started_at, created_at) values ('",
settings$run$site$id, "','", settings$model$id, "', '", settings$run$host$name, "', '",
settings$run$start.date, "', '", settings$run$end.date, "', '", now, "', '", now, "')"), con=dbcon)
settings$workflow$id <- db.query(paste0("SELECT id FROM workflows WHERE created_at='", now, "' ORDER BY id DESC LIMIT 1;"), con=dbcon)[['id']]
fixoutdir <- TRUE
}
} else {
settings$workflow$id <- format(Sys.time(), "%Y-%m-%d-%H-%M-%S")
}
# check/create the pecan folder
if (is.null(settings$outdir)) {
settings$outdir <- "PEcAn_@WORKFLOW@"
}
# replace @WORKFLOW@ with the id of the workflow
settings$outdir <- gsub("@WORKFLOW@", format(settings$workflow$id,scientific=FALSE), settings$outdir)
# create fully qualified pathname
if (substr(settings$outdir, 1, 1) != '/') {
settings$outdir <- file.path(getwd(), settings$outdir)
}
logger.info("output folder =", settings$outdir)
if (!file.exists(settings$outdir) && !dir.create(settings$outdir, recursive=TRUE)) {
logger.severe("Could not create folder", settings$outdir)
}
#update workflow
if (fixoutdir) {
db.query(paste0("UPDATE workflows SET folder='", normalizePath(settings$outdir), "' WHERE id=", settings$workflow$id), con=dbcon)
}
# check/create the local run folder
if (is.null(settings$rundir)) {
settings$rundir <- file.path(settings$outdir, "run")
}
if (!file.exists(settings$rundir) && !dir.create(settings$rundir, recursive=TRUE)) {
logger.severe("Could not create run folder", settings$rundir)
}
# check/create the local model out folder
if (is.null(settings$modeloutdir)) {
settings$modeloutdir <- file.path(settings$outdir, "out")
}
if (!file.exists(settings$modeloutdir) && !dir.create(settings$modeloutdir, recursive=TRUE)) {
logger.severe("Could not create model out folder", settings$modeloutdir)
}
# make sure remote folders are specified if need be
if (!is.null(settings$run$host$qsub) || (settings$run$host$name != "localhost")) {
homedir <- NA
if (is.null(settings$run$host$rundir)) {
if (is.na(homedir)) {
homedir <- system2("ssh", c(settings$run$host$name, "pwd"), stdout=TRUE)
}
settings$run$host$rundir <- paste0(homedir, "/pecan_remote/@WORKFLOW@/run")
}
settings$run$host$rundir <- gsub("@WORKFLOW@", settings$workflow$id, settings$run$host$rundir)
logger.info("Using ", settings$run$host$rundir, "to store runs on remote machine")
if (is.null(settings$run$host$outdir)) {
if (is.na(homedir)) {
homedir <- system2("ssh", c(settings$run$host$name, "pwd"), stdout=TRUE)
}
settings$run$host$outdir <- paste0(homedir, "/pecan_remote/@WORKFLOW@/out")
}
settings$run$host$outdir <- gsub("@WORKFLOW@", settings$workflow$id, settings$run$host$outdir)
logger.info("Using ", settings$run$host$outdir, "to store output on remote machine")
} else if (settings$run$host$name == "localhost") {
settings$run$host$rundir <- settings$rundir
settings$run$host$outdir <- settings$modeloutdir
}
# check/create the pft folders
if (!is.null(settings$pfts) && (length(settings$pfts) > 0)) {
for (i in 1:length(settings$pfts)) {
#check if name tag within pft
if (!"name" %in% names(settings$pfts[i]$pft)) {
logger.severe("No name specified for pft of index: ", i, ", please specify name")
}
if (settings$pfts[i]$pft$name == "") {
logger.severe("Name specified for pft of index: ", i, " can not be empty.")
}
#check to see if name of each pft in xml file is actually a name of a pft already in database
if (!is.character(dbcon)) {
x <- db.query(paste0("SELECT modeltypes.name AS type FROM pfts, modeltypes WHERE pfts.name = '", settings$pfts[i]$pft$name, "' AND modeltypes.id=pfts.modeltype_id;"), con=dbcon)
if (nrow(x) == 0) {
logger.severe("Did not find a pft with name ", settings$pfts[i]$pft$name)
}
if (nrow(x) > 1) {
logger.warn("Found multiple entries for pft with name ", settings$pfts[i]$pft$name)
}
if (!is.null(settings$model$type)) {
for (j in 1:nrow(x)) {
if (x[[j, 'type']] != settings$model$type) {
logger.severe(settings$pfts[i]$pft$name, "has different model type [", x[[j, 'type']], "] than selected model [", settings$model$type, "].")
}
}
}
}
if (is.null(settings$pfts[i]$pft$outdir)) {
settings$pfts[i]$pft$outdir <- file.path(settings$outdir, "pft", settings$pfts[i]$pft$name)
logger.info("Storing pft", settings$pfts[i]$pft$name, "in", settings$pfts[i]$pft$outdir)
} else {
logger.debug("Storing pft", settings$pfts[i]$pft$name, "in", settings$pfts[i]$pft$outdir)
}
out.dir <- settings$pfts[i]$pft$outdir
if (!file.exists(out.dir) && !dir.create(out.dir, recursive=TRUE)) {
if(identical(dir(out.dir), character(0))){
logger.warn(out.dir, "exists but is empty")
} else {
logger.severe("Could not create folder", out.dir)
}
}
}
}
if (!is.character(dbcon)) {
db.close(dbcon)
}
options(scipen=scipen)
# all done return cleaned up settings
invisible(settings)
}
##' Updates a pecan.xml file to match new layout. This will take care of the
##' conversion to the latest pecan.xml file.
##'
##' @title Update Settings
##' @param settings settings file
##' @return will return the updated settings values
##' @author Rob Kooper
update.settings <- function(settings) {
# update database section, now have different database definitions
# under database section, e.g. fia and bety
if (!is.null(settings$database)) {
# simple check to make sure the database tag is updated
if (!is.null(settings$database$dbname)) {
if (!is.null(settings$database$bety)) {
logger.severe("Please remove dbname etc from database configuration.")
}
logger.info("Database tag has changed, please use <database><bety> to store",
"information about accessing the BETY database. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.")
bety <- list()
for(name in names(settings$database)) {
bety[[name]] <- settings$database[[name]]
}
settings$database <- list(bety=bety)
}
# warn user about change and update settings
if (!is.null(settings$bety$write)) {
logger.warn("<bety><write> is now part of the database settings. For more",
"information about the database settings see",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.")
if (is.null(settings$database$bety$write)) {
settings$database$bety$write <- settings$bety$write
settings$bety$write <- NULL
if (length(settings$bety) == 0) settings$bety <- NULL
}
}
}
# model$model_type is now simply model$type and model$name is no longer used
if (!is.null(settings$model$model_type)) {
if (!is.null(settings$model$type)) {
if (settings$model$model_type != settings$model$type) {
logger.severe("Please remove model_type from model configuration.")
} else {
logger.info("Please remove model_type from model configuration.")
}
}
logger.info("Model tag has changed, please use <model><type> to specify",
"type of model. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.")
settings$model$type <- settings$model$model_type
settings$model$model_type <- NULL
}
if (!is.null(settings$model$name)) {
if (!is.null(settings$model$type)) {
if (settings$model$name != settings$model$type) {
logger.severe("Please remove name from model configuration.")
} else {
logger.info("Please remove name from model configuration.")
}
}
logger.info("Model tag has changed, please use <model><type> to specify",
"type of model. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.")
settings$model$type <- settings$model$name
settings$model$name <- NULL
}
# run$site$met is now run$inputs$met
if (!is.null(settings$run$site$met)) {
if (!is.null(settings$run$inputs$met)) {
if (settings$run$site$met != settings$run$inputs$met) {
logger.severe("Please remove met from model configuration.")
} else {
logger.info("Please remove met from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><met> to specify",
"met file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$met <- settings$run$site$met
settings$run$site$met <- NULL
}
# some specific ED changes
if (!is.null(settings$model$veg)) {
if (!is.null(settings$run$inputs$veg)) {
if (settings$model$veg != settings$run$inputs$veg) {
logger.severe("Please remove veg from model configuration.")
} else {
logger.info("Please remove veg from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><veg> to specify",
"veg file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$veg <- settings$model$veg
settings$model$veg <- NULL
}
if (!is.null(settings$model$soil)) {
if (!is.null(settings$run$inputs$soil)) {
if (settings$model$soil != settings$run$inputs$soil) {
logger.severe("Please remove soil from model configuration.")
} else {
logger.info("Please remove soil from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><soil> to specify",
"soil file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$soil <- settings$model$soil
settings$model$soil <- NULL
}
if (!is.null(settings$model$psscss)) {
if (!is.null(settings$run$inputs$pss)) {
logger.info("Please remove psscss from model configuration.")
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><pss/css/site> to specify",
"pss/css/site file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$pss <- file.path(settings$model$psscss, "foo.pss")
settings$run$inputs$css <- file.path(settings$model$psscss, "foo.css")
settings$run$inputs$site <- file.path(settings$model$psscss, "foo.site")
settings$model$psscss <- NULL
}
if (!is.null(settings$model$inputs)) {
if (!is.null(settings$run$inputs$inputs)) {
logger.info("Please remove inputs from model configuration.")
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><lu/thsums> to specify",
"lu/thsums file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$lu <- file.path(settings$model$inputs, "glu")
settings$run$inputs$thsums <- settings$model$inputs
settings$model$soil <- NULL
}
invisible(settings)
}
##--------------------------------------------------------------------------------------------------#
## EXTERNAL FUNCTIONS
##--------------------------------------------------------------------------------------------------#
##' Loads PEcAn settings file
##'
##' This will try and find the PEcAn settings file in the following order:
##' \enumerate{
##' \item {--settings <file>}{passed as command line argument using --settings}
##' \item {inputfile}{passed as argument to function}
##' \item {PECAN_SETTINGS}{environment variable PECAN_SETTINGS pointing to a specific file}
##' \item {./pecan.xml}{pecan.xml in the current folder}
##' }
##' Once the function finds a valid file, it will not look further.
##' Thus, if \code{inputfile} is supplied, \code{PECAN_SETTINGS} will be ignored.
##' Even if a \code{file} argument is passed, it will be ignored if a file is passed through
##' a higher priority method.
##' @param inputfile the PEcAn settings file to be used.
##' @param outputfile the name of file to which the settings will be
##' written inside the outputdir. If set to null nothing is saved.
##' @return list of all settings as loaded from the XML file(s)
##' @export
##' @import XML
##' @author Shawn Serbin
##' @author Rob Kooper
##' @examples
##' \dontrun{
##' ## bash shell:
##' R --vanilla -- --settings path/to/mypecan.xml < workflow.R
##'
##' ## R:
##'
##' settings <- read.settings()
##' settings <- read.settings(file="willowcreek.xml")
##' test.settings.file <- system.file("tests/test.xml", package = "PEcAn.all")
##' settings <- read.settings(test.settings.file)
##' }
read.settings <- function(inputfile = "pecan.xml", outputfile = "pecan.xml"){
if(inputfile == ""){
logger.warn("settings files specified as empty string; \n\t\tthis may be caused by an incorrect argument to system.file.")
}
loc <- which(commandArgs() == "--settings")
if (length(loc) != 0) {
# 1 filename is passed as argument to R
for(idx in loc) {
if (!is.null(commandArgs()[idx+1]) && file.exists(commandArgs()[idx+1])) {
logger.info("Loading --settings=", commandArgs()[idx+1])
xml <- xmlParse(commandArgs()[idx+1])
break
}
}
} else if (file.exists(Sys.getenv("PECAN_SETTINGS"))) {
# 2 load from PECAN_SETTINGS
logger.info("Loading PECAN_SETTINGS=", Sys.getenv("PECAN_SETTINGS"))
xml <- xmlParse(Sys.getenv("PECAN_SETTINGS"))
} else if(!is.null(inputfile) && file.exists(inputfile)) {
# 3 filename passed into function
logger.info("Loading inpufile=", inputfile)
xml <- xmlParse(inputfile)
} else if (file.exists("pecan.xml")) {
# 4 load ./pecan.xml
logger.info("Loading ./pecan.xml")
xml <- xmlParse("pecan.xml")
} else {
# file not found
logger.severe("Could not find a pecan.xml file")
}
## convert the xml to a list for ease and return
settings <- xmlToList(xml)
settings <- update.settings(settings)
settings <- check.settings(settings)
## save the checked/fixed pecan.xml
if (!is.null(outputfile)) {
pecanfile <- file.path(settings$outdir, outputfile)
if (file.exists(pecanfile)) {
logger.warn(paste("File already exists [", pecanfile, "] file will be overwritten"))
}
saveXML(listToXml(settings, "pecan"), file=pecanfile)
}
## setup Rlib from settings
if(!is.null(settings$Rlib)){
.libPaths(settings$Rlib)
}
## Return settings file as a list
invisible(settings)
}
##=================================================================================================#
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
/settings/R/read.settings.R
|
permissive
|
ajmann4/pecan
|
R
| false | false | 40,956 |
r
|
##-------------------------------------------------------------------------------
## Copyright (c) 2012 University of Illinois, NCSA.
## All rights reserved. This program and the accompanying materials
## are made available under the terms of the
## University of Illinois/NCSA Open Source License
## which accompanies this distribution, and is available at
## http://opensource.ncsa.illinois.edu/license.html
##-------------------------------------------------------------------------------
library(XML)
library(lubridate)
library(PEcAn.DB)
library(PEcAn.utils)
##--------------------------------------------------------------------------------------------------#
## INTERNAL FUNCTIONS
##--------------------------------------------------------------------------------------------------#
# check to see if inputs are specified
# this should be part of the model code
check.inputs <- function(settings) {
if (is.null(settings$model$type)) return(settings)
# don't know how to check inputs
if (is.null(settings$database$bety)) {
logger.info("No databasse connection, can't check inputs.")
return (settings)
}
# get list of inputs associated with model type
dbcon <- db.open(settings$database$bety)
inputs <- db.query(paste0("SELECT tag, format_id, required FROM modeltypes, modeltypes_formats WHERE modeltypes_formats.modeltype_id = modeltypes.id and modeltypes.name='", settings$model$type, "' AND modeltypes_formats.input;"), con=dbcon)
# check list of inputs
allinputs <- names(settings$run$inputs)
if (nrow(inputs) > 0) {
for(i in 1:nrow(inputs)) {
tag <- inputs$tag[i]
tagid <- paste0(tag, ".id")
hostname <- settings$run$host$name
allinputs <- allinputs[allinputs != tag]
# check if <tag.id> exists
if (!is.null(settings$run$inputs[[tagid]])) {
id <- settings$run$inputs[[tagid]]
file <- dbfile.file("Input", id, dbcon, hostname)
if (is.na(file)) {
logger.error("No file found for", tag, " and id", id, "on host", hostname)
} else {
if (is.null(settings$run$inputs[[tag]])) {
settings$run$inputs[[tag]] <- file
} else if (file != settings$run$inputs[[tag]]) {
logger.warn("Input file and id do not match for ", tag)
}
}
}
# check if file exists
if (is.null(settings$run$inputs[[tag]])) {
if (inputs$required[i]) {
logger.severe("Missing required input :", tag)
} else {
logger.info("Missing optional input :", tag)
}
} else {
# can we find the file so we can set the tag.id
if (is.null(settings$run$inputs[[tagid]])) {
id <- dbfile.id('Input', settings$run$inputs[[tag]], dbcon, hostname)
if (!is.na(id)) {
settings$run$inputs[[tagid]] <- id
}
}
}
# check to see if format is right type
if (!is.null(settings$run$inputs[[tagid]])) {
formats <- db.query(paste0("SELECT format_id FROM inputs WHERE id=", settings$run$inputs[[tagid]]), con=dbcon)
if (nrow(formats) > 1) {
if (formats[1, 'format_id'] != inputs$format_id[i]) {
logger.error("Format of input", tag, "does not match specified input.")
}
} else if (nrow(formats) == 1) {
if (formats[1, 'format_id'] != inputs$format_id[i]) {
logger.error("Format of input", tag, "does not match specified input.")
}
} else {
logger.error("Could not check format of", tag, ".")
}
}
}
}
if (length(allinputs) > 0) {
logger.info("Unused inputs found :", paste(allinputs, collapse=" "))
}
db.close(dbcon)
return(settings)
}
# check database section
check.database <- function(database) {
if (is.null(database)) return(NULL);
## check database settings
if (is.null(database$driver)) {
database$driver <- "PostgreSQL"
logger.warn("Please specify a database driver; using default 'PostgreSQL'")
}
# Attempt to load the driver
if (!require(paste0("R", database$driver), character.only=TRUE)) {
logger.warn("Could not load the database driver", paste0("R", database$driver))
}
# MySQL specific checks
if (database$driver == "MySQL") {
if (!is.null(database$passwd)) {
logger.info("passwd in database section should be password for MySQL")
database$password <- database$passwd
database$passwd <- NULL
}
if (!is.null(database$name)) {
logger.info("name in database section should be dbname for MySQL")
database$dbname <- database$name
database$name <- NULL
}
}
# PostgreSQL specific checks
if (database$driver == "PostgreSQL") {
if (!is.null(database$passwd)) {
logger.info("passwd in database section should be password for PostgreSQL")
database$password <- database$passwd
database$passwd <- NULL
}
if (!is.null(database$name)) {
logger.info("name in database section should be dbname for PostgreSQL")
database$dbname <- database$name
database$name <- NULL
}
}
## The following hack handles *.illinois.* to *.uiuc.* aliases of ebi-forecast
if(!is.null(database$host)){
forcastnames <- c("ebi-forecast.igb.uiuc.edu",
"ebi-forecast.igb.illinois.edu")
if((database$host %in% forcastnames) &
(Sys.info()['nodename'] %in% forcastnames)){
database$host <- "localhost"
}
} else if(is.null(database$host)){
database$host <- "localhost"
}
## convert strings around from old format to new format
if(is.null(database[["user"]])){
if (!is.null(database$userid)) {
logger.info("'userid' in database section should be 'user'")
database$user <- database$userid
} else if (!is.null(database$username)) {
logger.info("'username' in database section should be 'user'")
database$user <- database$username
} else {
logger.info("no database user specified, using 'bety'")
database$user <- "bety"
}
}
database$userid <- database$username <- NULL
# fill in defaults for the database
if(is.null(database$password)) {
database$password <- "bety"
}
if(is.null(database$dbname)) {
database$dbname <- "bety"
}
if (!db.exists(params=database, FALSE)) {
logger.severe("Invalid Database Settings : ", unlist(database))
}
# connected
logger.info("Successfully connected to database : ", unlist(database))
# return fixed up database
return(database)
}
# check to make sure BETY is up to date
check.bety.version <- function(dbcon) {
versions <- db.query("SELECT version FROM schema_migrations;", con=dbcon)[['version']]
# there should always be a versin 1
if (! ("1" %in% versions)) {
logger.severe("No version 1, how did this database get created?")
}
# check for specific version
if (! ("20140617163304" %in% versions)) {
logger.severe("Missing migration 20140617163304, this associates files with models.")
}
if (! ("20140708232320" %in% versions)) {
logger.severe("Missing migration 20140708232320, this introduces geometry column in sites")
}
if (! ("20140729045640" %in% versions)) {
logger.severe("Missing migration 20140729045640, this introduces modeltypes table")
}
# check if database is newer
if (tail(versions, n=1) > "20140729045640") {
logger.warn("Last migration", tail(versions, n=1), "is more recent than expected 20140729045640.",
"This could result in PEcAn not working as expected.")
}
}
##' Sanity checks. Checks the settings file to make sure expected fields exist. It will try to use
##' default values for any missing values, or stop the exection if no defaults are possible.
##'
##' Expected fields in settings file are:
##' - pfts with at least one pft defined
##' @title Check Settings
##' @param settings settings file
##' @return will return the updated settings values with defaults set.
##' @author Rob Kooper
check.settings <- function(settings) {
if (!is.null(settings$nocheck)) {
logger.info("Not doing sanity checks of pecan.xml")
return(settings)
}
scipen = getOption("scipen")
options(scipen=12)
# check database secions if exist
dbcon <- "NONE"
if (!is.null(settings$database)) {
# check all databases
for (name in names(settings$database)) {
settings$database[[name]] <- check.database(settings$database[[name]])
}
# check bety database
if (!is.null(settings$database$bety)) {
# should runs be written to database
if (is.null(settings$database$bety$write)) {
logger.info("Writing all runs/configurations to database.")
settings$database$bety$write <- TRUE
} else {
settings$database$bety$write <- as.logical(settings$database$bety$write)
if (settings$database$bety$write) {
logger.debug("Writing all runs/configurations to database.")
} else {
logger.warn("Will not write runs/configurations to database.")
}
}
# check if we can connect to the database with write permissions
if (settings$database$bety$write && !db.exists(params=settings$database$bety, TRUE)) {
logger.severe("Invalid Database Settings : ", unlist(settings$database))
}
# TODO check userid and userpassword
# Connect to database
dbcon <- db.open(settings$database$bety)
# check database version
check.bety.version(dbcon)
} else {
logger.warn("No BETY database information specified; not using database.")
}
} else {
logger.warn("No BETY database information specified; not using database.")
}
# make sure there are pfts defined
if (is.null(settings$pfts) || (length(settings$pfts) == 0)) {
logger.warn("No PFTS specified.")
}
# check for a run settings
if (is.null(settings[['run']])) {
logger.warn("No Run Settings specified")
}
# check to make sure a host is given
if (is.null(settings$run$host$name)) {
logger.info("Setting localhost for execution host.")
settings$run$host$name <- "localhost"
}
# check start/end date are specified and correct
if (is.null(settings$run$start.date)) {
logger.warn("No start.date specified in run section.")
} else if (is.null(settings$run$end.date)) {
logger.warn("No end.date specified in run section.")
} else {
startdate <- parse_date_time(settings$run$start.date, "ymd_hms", truncated=3)
enddate <- parse_date_time(settings$run$end.date, "ymd_hms", truncated=3)
if (startdate >= enddate) {
logger.severe("Start date should come before the end date.")
}
}
# check if there is either ensemble or sensitivy.analysis
if (is.null(settings$ensemble) && is.null(settings$sensitivity.analysis)) {
logger.warn("No ensemble or sensitivity analysis specified, no models will be executed!")
}
# check ensemble
if (!is.null(settings$ensemble)) {
if (is.null(settings$ensemble$variable)) {
if (is.null(settings$sensitivity.analysis$variable)) {
logger.severe("No variable specified to compute ensemble for.")
}
logger.info("Setting ensemble variable to the same as sensitivity analysis variable [", settings$sensitivity.analysis$variable, "]")
settings$ensemble$variable <- settings$sensitivity.analysis$variable
}
if (is.null(settings$ensemble$size)) {
logger.info("Setting ensemble size to 1.")
settings$ensemble$size <- 1
}
if(is.null(settings$ensemble$start.year)) {
if(is.null(settings$sensitivity.analysis$start.year)) {
settings$ensemble$start.year <- year(settings$run$start.date)
logger.info("No start date passed to ensemble - using the run date (", settings$ensemble$start.date, ").")
} else {
settings$ensemble$start.year <- settings$sensitivity.analysis$start.year
logger.info("No start date passed to ensemble - using the sensitivity.analysis date (", settings$ensemble$start.date, ").")
}
}
if(is.null(settings$ensemble$end.year)) {
if(is.null(settings$sensitivity.analysis$end.year)) {
settings$ensemble$end.year <- year(settings$run$end.date)
logger.info("No end date passed to ensemble - using the run date (", settings$ensemble$end.date, ").")
} else {
settings$ensemble$end.year <- settings$sensitivity.analysis$end.year
logger.info("No end date passed to ensemble - using the sensitivity.analysis date (", settings$ensemble$end.date, ").")
}
}
# check start and end dates
if (year(startdate) > settings$ensemble$start.year) {
logger.severe("Start year of ensemble should come after the start.date of the run")
}
if (year(enddate) < settings$ensemble$end.year) {
logger.severe("End year of ensemble should come before the end.date of the run")
}
if (settings$ensemble$start.year > settings$ensemble$end.year) {
logger.severe("Start year of ensemble should come before the end year of the ensemble")
}
}
# check sensitivity analysis
if (!is.null(settings$sensitivity.analysis)) {
if (is.null(settings$sensitivity.analysis$variable)) {
if (is.null(settings$ensemble$variable)) {
logger.severe("No variable specified to compute sensitivity.analysis for.")
}
logger.info("Setting sensitivity.analysis variable to the same as ensemble variable [", settings$ensemble$variable, "]")
settings$sensitivity.analysis$variable <- settings$ensemble$variable
}
if(is.null(settings$sensitivity.analysis$start.year)) {
if(is.null(settings$ensemble$start.year)) {
settings$sensitivity.analysis$start.year <- year(settings$run$start.date)
logger.info("No start date passed to sensitivity.analysis - using the run date (", settings$sensitivity.analysis$start.date, ").")
} else {
settings$sensitivity.analysis$start.year <- settings$ensemble$start.year
logger.info("No start date passed to sensitivity.analysis - using the ensemble date (", settings$sensitivity.analysis$start.date, ").")
}
}
if(is.null(settings$sensitivity.analysis$end.year)) {
if(is.null(settings$ensemble$end.year)) {
settings$sensitivity.analysis$end.year <- year(settings$run$end.date)
logger.info("No end date passed to sensitivity.analysis - using the run date (", settings$sensitivity.analysis$end.date, ").")
} else {
settings$sensitivity.analysis$end.year <- settings$ensemble$end.year
logger.info("No end date passed to sensitivity.analysis - using the ensemble date (", settings$sensitivity.analysis$end.date, ").")
}
}
# check start and end dates
if (year(startdate) > settings$sensitivity.analysis$start.year) {
logger.severe("Start year of sensitivity.analysis should come after the start.date of the run")
}
if (year(enddate) < settings$sensitivity.analysis$end.year) {
logger.severe("End year of sensitivity.analysis should come before the end.date of the run")
}
if (settings$sensitivity.analysis$start.year > settings$sensitivity.analysis$end.year) {
logger.severe("Start year of sensitivity.analysis should come before the end year of the ensemble")
}
}
# check meta-analysis
if (is.null(settings$meta.analysis) || is.null(settings$meta.analysis$iter)) {
settings$meta.analysis$iter <- 3000
logger.info("Setting meta.analysis iterations to ", settings$meta.analysis$iter)
}
if (is.null(settings$meta.analysis$random.effects)) {
settings$meta.analysis$random.effects <- FALSE
logger.info("Setting meta.analysis random effects to ", settings$meta.analysis$random.effects)
}
if (is.null(settings$meta.analysis$update)) {
settings$meta.analysis$update <- 'AUTO'
logger.info("Setting meta.analysis update to only update if no previous meta analysis was found")
}
if ((settings$meta.analysis$update != 'AUTO') && is.na(as.logical(settings$meta.analysis$update))) {
logger.info("meta.analysis update can only be AUTO/TRUE/FALSE, defaulting to FALSE")
settings$meta.analysis$update <- FALSE
}
# check modelid with values
if(!is.null(settings$model)){
if(!is.character(dbcon)){
if(!is.null(settings$model$id)){
if(as.numeric(settings$model$id) >= 0){
model <- db.query(paste0("SELECT models.id AS id, models.revision AS revision, modeltypes.name AS type FROM models, modeltypes WHERE models.id=", settings$model$id, " AND models.modeltype_id=modeltypes.id;"), con=dbcon)
if(nrow(model) == 0) {
logger.error("There is no record of model_id = ", settings$model$id, "in database")
}
} else {
model <- list()
}
} else if (!is.null(settings$model$type)) {
model <- db.query(paste0("SELECT models.id AS id, models.revision AS revision, modeltypes.name AS type FROM models, modeltypes ",
"WHERE modeltypes.name = '", toupper(settings$model$type), "' ",
"AND models.modeltype_id=modeltypes.id ",
ifelse(is.null(settings$model$revision), "",
paste0("AND revision like '%", settings$model$revision, "%' ")),
"ORDER BY models.updated_at"), con=dbcon)
if(nrow(model) > 1){
logger.warn("multiple records for", settings$model$name, "returned; using the latest")
row <- which.max(model$updated_at)
if (length(row) == 0) row <- nrow(model)
model <- model[row, ]
} else if (nrow(model) == 0) {
logger.warn("Model type", settings$model$type, "not in database")
}
} else {
logger.warn("no model settings given")
model <- list()
}
} else {
model <- list()
}
# copy data from database into missing fields
if (!is.null(model$id)) {
if (is.null(settings$model$id) || (settings$model$id == "")) {
settings$model$id <- model$id
logger.info("Setting model id to ", settings$model$id)
} else if (settings$model$id != model$id) {
logger.warn("Model id specified in settings file does not match database.")
}
} else {
if (is.null(settings$model$id) || (settings$model$id == "")) {
settings$model$id <- -1
logger.info("Setting model id to ", settings$model$id)
}
}
if (!is.null(model$type)) {
if (is.null(settings$model$type) || (settings$model$type == "")) {
settings$model$type <- model$type
logger.info("Setting model type to ", settings$model$type)
} else if (settings$model$type != model$type) {
logger.warn("Model type specified in settings file does not match database.")
}
}
if (!is.null(model$revision)) {
if (is.null(settings$model$revision) || (settings$model$revision == "")) {
settings$model$revision <- model$revision
logger.info("Setting model revision to ", settings$model$revision)
} else if (settings$model$revision != model$revision) {
logger.warn("Model revision specified in settings file does not match database.")
}
}
# make sure we have model type
if ((is.null(settings$model$type) || settings$model$type == "")) {
logger.severe("Need a model type.")
}
# check on binary for given host
if (!is.null(settings$model$id) && (settings$model$id >= 0)) {
binary <- dbfile.file("Model", settings$model$id, dbcon, settings$run$host$name)
if (!is.na(binary)) {
if (is.null(settings$model$binary)) {
settings$model$binary <- binary
logger.info("Setting model binary to ", settings$model$binary)
} else if (binary != settings$model$binary) {
logger.warn("Specified binary [", settings$model$binary, "] does not match path in database [", binary, "]")
}
}
} else {
logger.warn("No model binary sepcified in database for model ", settings$model$type)
}
}
# end model check
# check siteid with values
if(!is.null(settings$run$site)){
if (is.null(settings$run$site$id)) {
settings$run$site$id <- -1
} else if (settings$run$site$id >= 0) {
if (!is.character(dbcon)) {
site <- db.query(paste("SELECT sitename, ST_X(geometry) AS lon, ST_Y(geometry) AS lat FROM sites WHERE id =", settings$run$site$id), con=dbcon)
} else {
site <- data.frame(id=settings$run$site$id)
if (!is.null(settings$run$site$name)) {
site$sitename=settings$run$site$name
}
if (!is.null(settings$run$site$lat)) {
site$lat=settings$run$site$lat
}
if (!is.null(settings$run$site$lon)) {
site$lon=settings$run$site$lon
}
}
if((!is.null(settings$run$site$met)) && settings$run$site$met == "NULL") settings$run$site$met <- NULL
if (is.null(settings$run$site$name)) {
if ((is.null(site$sitename) || site$sitename == "")) {
logger.info("No site name specified.")
settings$run$site$name <- "NA"
} else {
settings$run$site$name <- site$sitename
logger.info("Setting site name to ", settings$run$site$name)
}
} else if (site$sitename != settings$run$site$name) {
logger.warn("Specified site name [", settings$run$site$name, "] does not match sitename in database [", site$sitename, "]")
}
if (is.null(settings$run$site$lat)) {
if ((is.null(site$lat) || site$lat == "")) {
logger.severe("No lat specified for site.")
} else {
settings$run$site$lat <- as.numeric(site$lat)
logger.info("Setting site lat to ", settings$run$site$lat)
}
} else if (as.numeric(site$lat) != as.numeric(settings$run$site$lat)) {
logger.warn("Specified site lat [", settings$run$site$lat, "] does not match lat in database [", site$lat, "]")
}
if (is.null(settings$run$site$lon)) {
if ((is.null(site$lon) || site$lon == "")) {
logger.severe("No lon specified for site.")
} else {
settings$run$site$lon <- as.numeric(site$lon)
logger.info("Setting site lon to ", settings$run$site$lon)
}
} else if (as.numeric(site$lon) != as.numeric(settings$run$site$lon)) {
logger.warn("Specified site lon [", settings$run$site$lon, "] does not match lon in database [", site$lon, "]")
}
}
} else {
settings$run$site$id <- -1
}
# end site check code
## if run$host is localhost, set to "localhost
if (any(settings$run$host %in% c(Sys.info()['nodename'], gsub("illinois", "uiuc", Sys.info()['nodename'])))){
settings$run$host$name <- "localhost"
}
# check if we need to use qsub
if ("qsub" %in% names(settings$run$host)) {
if (is.null(settings$run$host$qsub)) {
settings$run$host$qsub <- "qsub -N @NAME@ -o @STDOUT@ -e @STDERR@ -S /bin/bash"
logger.info("qsub not specified using default value :", settings$run$host$qsub)
}
if (is.null(settings$run$host$qsub.jobid)) {
settings$run$host$qsub.jobid <- "Your job ([0-9]+) .*"
logger.info("qsub.jobid not specified using default value :", settings$run$host$qsub.jobid)
}
if (is.null(settings$run$host$qstat)) {
settings$run$host$qstat <- "qstat -j @JOBID@ &> /dev/null || echo DONE"
logger.info("qstat not specified using default value :", settings$run$host$qstat)
}
}
# modellauncher to launch on multiple nodes/cores
if ("modellauncher" %in% names(settings$run$host)) {
if (is.null(settings$run$host$modellauncher$binary)) {
settings$run$host$modellauncher$binary <- "modellauncher"
logger.info("binary not specified using default value :", settings$run$host$modellauncher$binary)
}
if (is.null(settings$run$host$modellauncher$qsub.extra)) {
logger.severe("qsub.extra not specified, can not launch in parallel environment.")
}
if (is.null(settings$run$host$modellauncher$mpirun)) {
settings$run$host$modellauncher$mpirun <- "mpirun"
logger.info("mpirun not specified using default value :", settings$run$host$modellauncher$mpirun)
}
}
# Check folder where outputs are written before adding to dbfiles
if(is.null(settings$run$dbfiles)) {
settings$run$dbfiles <- normalizePath("~/.pecan/dbfiles", mustWork=FALSE)
} else {
settings$run$dbfiles <- normalizePath(settings$run$dbfiles, mustWork=FALSE)
}
dir.create(settings$run$dbfiles, showWarnings = FALSE, recursive = TRUE)
# check all inputs exist
settings <- check.inputs(settings)
# check for workflow defaults
fixoutdir <- FALSE
if(!is.character(dbcon) && settings$database$bety$write && ("model" %in% names(settings))) {
if (!'workflow' %in% names(settings)) {
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
db.query(paste0("INSERT INTO workflows (site_id, model_id, hostname, start_date, end_date, started_at, created_at) values ('",
settings$run$site$id, "','", settings$model$id, "', '", settings$run$host$name, "', '",
settings$run$start.date, "', '", settings$run$end.date, "', '", now, "', '", now, "')"), con=dbcon)
settings$workflow$id <- db.query(paste0("SELECT id FROM workflows WHERE created_at='", now, "' ORDER BY id DESC LIMIT 1;"), con=dbcon)[['id']]
fixoutdir <- TRUE
}
} else {
settings$workflow$id <- format(Sys.time(), "%Y-%m-%d-%H-%M-%S")
}
# check/create the pecan folder
if (is.null(settings$outdir)) {
settings$outdir <- "PEcAn_@WORKFLOW@"
}
# replace @WORKFLOW@ with the id of the workflow
settings$outdir <- gsub("@WORKFLOW@", format(settings$workflow$id,scientific=FALSE), settings$outdir)
# create fully qualified pathname
if (substr(settings$outdir, 1, 1) != '/') {
settings$outdir <- file.path(getwd(), settings$outdir)
}
logger.info("output folder =", settings$outdir)
if (!file.exists(settings$outdir) && !dir.create(settings$outdir, recursive=TRUE)) {
logger.severe("Could not create folder", settings$outdir)
}
#update workflow
if (fixoutdir) {
db.query(paste0("UPDATE workflows SET folder='", normalizePath(settings$outdir), "' WHERE id=", settings$workflow$id), con=dbcon)
}
# check/create the local run folder
if (is.null(settings$rundir)) {
settings$rundir <- file.path(settings$outdir, "run")
}
if (!file.exists(settings$rundir) && !dir.create(settings$rundir, recursive=TRUE)) {
logger.severe("Could not create run folder", settings$rundir)
}
# check/create the local model out folder
if (is.null(settings$modeloutdir)) {
settings$modeloutdir <- file.path(settings$outdir, "out")
}
if (!file.exists(settings$modeloutdir) && !dir.create(settings$modeloutdir, recursive=TRUE)) {
logger.severe("Could not create model out folder", settings$modeloutdir)
}
# make sure remote folders are specified if need be
if (!is.null(settings$run$host$qsub) || (settings$run$host$name != "localhost")) {
homedir <- NA
if (is.null(settings$run$host$rundir)) {
if (is.na(homedir)) {
homedir <- system2("ssh", c(settings$run$host$name, "pwd"), stdout=TRUE)
}
settings$run$host$rundir <- paste0(homedir, "/pecan_remote/@WORKFLOW@/run")
}
settings$run$host$rundir <- gsub("@WORKFLOW@", settings$workflow$id, settings$run$host$rundir)
logger.info("Using ", settings$run$host$rundir, "to store runs on remote machine")
if (is.null(settings$run$host$outdir)) {
if (is.na(homedir)) {
homedir <- system2("ssh", c(settings$run$host$name, "pwd"), stdout=TRUE)
}
settings$run$host$outdir <- paste0(homedir, "/pecan_remote/@WORKFLOW@/out")
}
settings$run$host$outdir <- gsub("@WORKFLOW@", settings$workflow$id, settings$run$host$outdir)
logger.info("Using ", settings$run$host$outdir, "to store output on remote machine")
} else if (settings$run$host$name == "localhost") {
settings$run$host$rundir <- settings$rundir
settings$run$host$outdir <- settings$modeloutdir
}
# check/create the pft folders
if (!is.null(settings$pfts) && (length(settings$pfts) > 0)) {
for (i in 1:length(settings$pfts)) {
#check if name tag within pft
if (!"name" %in% names(settings$pfts[i]$pft)) {
logger.severe("No name specified for pft of index: ", i, ", please specify name")
}
if (settings$pfts[i]$pft$name == "") {
logger.severe("Name specified for pft of index: ", i, " can not be empty.")
}
#check to see if name of each pft in xml file is actually a name of a pft already in database
if (!is.character(dbcon)) {
x <- db.query(paste0("SELECT modeltypes.name AS type FROM pfts, modeltypes WHERE pfts.name = '", settings$pfts[i]$pft$name, "' AND modeltypes.id=pfts.modeltype_id;"), con=dbcon)
if (nrow(x) == 0) {
logger.severe("Did not find a pft with name ", settings$pfts[i]$pft$name)
}
if (nrow(x) > 1) {
logger.warn("Found multiple entries for pft with name ", settings$pfts[i]$pft$name)
}
if (!is.null(settings$model$type)) {
for (j in 1:nrow(x)) {
if (x[[j, 'type']] != settings$model$type) {
logger.severe(settings$pfts[i]$pft$name, "has different model type [", x[[j, 'type']], "] than selected model [", settings$model$type, "].")
}
}
}
}
if (is.null(settings$pfts[i]$pft$outdir)) {
settings$pfts[i]$pft$outdir <- file.path(settings$outdir, "pft", settings$pfts[i]$pft$name)
logger.info("Storing pft", settings$pfts[i]$pft$name, "in", settings$pfts[i]$pft$outdir)
} else {
logger.debug("Storing pft", settings$pfts[i]$pft$name, "in", settings$pfts[i]$pft$outdir)
}
out.dir <- settings$pfts[i]$pft$outdir
if (!file.exists(out.dir) && !dir.create(out.dir, recursive=TRUE)) {
if(identical(dir(out.dir), character(0))){
logger.warn(out.dir, "exists but is empty")
} else {
logger.severe("Could not create folder", out.dir)
}
}
}
}
if (!is.character(dbcon)) {
db.close(dbcon)
}
options(scipen=scipen)
# all done return cleaned up settings
invisible(settings)
}
##' Updates a pecan.xml file to match new layout. This will take care of the
##' conversion to the latest pecan.xml file.
##'
##' @title Update Settings
##' @param settings settings file
##' @return will return the updated settings values
##' @author Rob Kooper
update.settings <- function(settings) {
# update database section, now have different database definitions
# under database section, e.g. fia and bety
if (!is.null(settings$database)) {
# simple check to make sure the database tag is updated
if (!is.null(settings$database$dbname)) {
if (!is.null(settings$database$bety)) {
logger.severe("Please remove dbname etc from database configuration.")
}
logger.info("Database tag has changed, please use <database><bety> to store",
"information about accessing the BETY database. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.")
bety <- list()
for(name in names(settings$database)) {
bety[[name]] <- settings$database[[name]]
}
settings$database <- list(bety=bety)
}
# warn user about change and update settings
if (!is.null(settings$bety$write)) {
logger.warn("<bety><write> is now part of the database settings. For more",
"information about the database settings see",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#database-access.")
if (is.null(settings$database$bety$write)) {
settings$database$bety$write <- settings$bety$write
settings$bety$write <- NULL
if (length(settings$bety) == 0) settings$bety <- NULL
}
}
}
# model$model_type is now simply model$type and model$name is no longer used
if (!is.null(settings$model$model_type)) {
if (!is.null(settings$model$type)) {
if (settings$model$model_type != settings$model$type) {
logger.severe("Please remove model_type from model configuration.")
} else {
logger.info("Please remove model_type from model configuration.")
}
}
logger.info("Model tag has changed, please use <model><type> to specify",
"type of model. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.")
settings$model$type <- settings$model$model_type
settings$model$model_type <- NULL
}
if (!is.null(settings$model$name)) {
if (!is.null(settings$model$type)) {
if (settings$model$name != settings$model$type) {
logger.severe("Please remove name from model configuration.")
} else {
logger.info("Please remove name from model configuration.")
}
}
logger.info("Model tag has changed, please use <model><type> to specify",
"type of model. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#model_setup.")
settings$model$type <- settings$model$name
settings$model$name <- NULL
}
# run$site$met is now run$inputs$met
if (!is.null(settings$run$site$met)) {
if (!is.null(settings$run$inputs$met)) {
if (settings$run$site$met != settings$run$inputs$met) {
logger.severe("Please remove met from model configuration.")
} else {
logger.info("Please remove met from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><met> to specify",
"met file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$met <- settings$run$site$met
settings$run$site$met <- NULL
}
# some specific ED changes
if (!is.null(settings$model$veg)) {
if (!is.null(settings$run$inputs$veg)) {
if (settings$model$veg != settings$run$inputs$veg) {
logger.severe("Please remove veg from model configuration.")
} else {
logger.info("Please remove veg from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><veg> to specify",
"veg file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$veg <- settings$model$veg
settings$model$veg <- NULL
}
if (!is.null(settings$model$soil)) {
if (!is.null(settings$run$inputs$soil)) {
if (settings$model$soil != settings$run$inputs$soil) {
logger.severe("Please remove soil from model configuration.")
} else {
logger.info("Please remove soil from model configuration.")
}
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><soil> to specify",
"soil file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$soil <- settings$model$soil
settings$model$soil <- NULL
}
if (!is.null(settings$model$psscss)) {
if (!is.null(settings$run$inputs$pss)) {
logger.info("Please remove psscss from model configuration.")
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><pss/css/site> to specify",
"pss/css/site file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$pss <- file.path(settings$model$psscss, "foo.pss")
settings$run$inputs$css <- file.path(settings$model$psscss, "foo.css")
settings$run$inputs$site <- file.path(settings$model$psscss, "foo.site")
settings$model$psscss <- NULL
}
if (!is.null(settings$model$inputs)) {
if (!is.null(settings$run$inputs$inputs)) {
logger.info("Please remove inputs from model configuration.")
}
if (is.null(settings$run$inputs)) {
settings$run$inputs <- list()
}
logger.info("Model tag has changed, please use <inputs><lu/thsums> to specify",
"lu/thsums file for a run. See also",
"https://github.com/PecanProject/pecan/wiki/PEcAn-Configuration#run_setup.")
settings$run$inputs$lu <- file.path(settings$model$inputs, "glu")
settings$run$inputs$thsums <- settings$model$inputs
settings$model$soil <- NULL
}
invisible(settings)
}
##--------------------------------------------------------------------------------------------------#
## EXTERNAL FUNCTIONS
##--------------------------------------------------------------------------------------------------#
##' Loads PEcAn settings file
##'
##' This will try and find the PEcAn settings file in the following order:
##' \enumerate{
##' \item {--settings <file>}{passed as command line argument using --settings}
##' \item {inputfile}{passed as argument to function}
##' \item {PECAN_SETTINGS}{environment variable PECAN_SETTINGS pointing to a specific file}
##' \item {./pecan.xml}{pecan.xml in the current folder}
##' }
##' Once the function finds a valid file, it will not look further.
##' Thus, if \code{inputfile} is supplied, \code{PECAN_SETTINGS} will be ignored.
##' Even if a \code{file} argument is passed, it will be ignored if a file is passed through
##' a higher priority method.
##' @param inputfile the PEcAn settings file to be used.
##' @param outputfile the name of file to which the settings will be
##' written inside the outputdir. If set to null nothing is saved.
##' @return list of all settings as loaded from the XML file(s)
##' @export
##' @import XML
##' @author Shawn Serbin
##' @author Rob Kooper
##' @examples
##' \dontrun{
##' ## bash shell:
##' R --vanilla -- --settings path/to/mypecan.xml < workflow.R
##'
##' ## R:
##'
##' settings <- read.settings()
##' settings <- read.settings(file="willowcreek.xml")
##' test.settings.file <- system.file("tests/test.xml", package = "PEcAn.all")
##' settings <- read.settings(test.settings.file)
##' }
read.settings <- function(inputfile = "pecan.xml", outputfile = "pecan.xml"){
if(inputfile == ""){
logger.warn("settings files specified as empty string; \n\t\tthis may be caused by an incorrect argument to system.file.")
}
loc <- which(commandArgs() == "--settings")
if (length(loc) != 0) {
# 1 filename is passed as argument to R
for(idx in loc) {
if (!is.null(commandArgs()[idx+1]) && file.exists(commandArgs()[idx+1])) {
logger.info("Loading --settings=", commandArgs()[idx+1])
xml <- xmlParse(commandArgs()[idx+1])
break
}
}
} else if (file.exists(Sys.getenv("PECAN_SETTINGS"))) {
# 2 load from PECAN_SETTINGS
logger.info("Loading PECAN_SETTINGS=", Sys.getenv("PECAN_SETTINGS"))
xml <- xmlParse(Sys.getenv("PECAN_SETTINGS"))
} else if(!is.null(inputfile) && file.exists(inputfile)) {
# 3 filename passed into function
logger.info("Loading inpufile=", inputfile)
xml <- xmlParse(inputfile)
} else if (file.exists("pecan.xml")) {
# 4 load ./pecan.xml
logger.info("Loading ./pecan.xml")
xml <- xmlParse("pecan.xml")
} else {
# file not found
logger.severe("Could not find a pecan.xml file")
}
## convert the xml to a list for ease and return
settings <- xmlToList(xml)
settings <- update.settings(settings)
settings <- check.settings(settings)
## save the checked/fixed pecan.xml
if (!is.null(outputfile)) {
pecanfile <- file.path(settings$outdir, outputfile)
if (file.exists(pecanfile)) {
logger.warn(paste("File already exists [", pecanfile, "] file will be overwritten"))
}
saveXML(listToXml(settings, "pecan"), file=pecanfile)
}
## setup Rlib from settings
if(!is.null(settings$Rlib)){
.libPaths(settings$Rlib)
}
## Return settings file as a list
invisible(settings)
}
##=================================================================================================#
####################################################################################################
### EOF. End of R script file.
####################################################################################################
|
library(base)
setwd("C:/Works/Vietnam/Model4/OutputData/data/sim")
Allf <- matrix(scan("Allout.txt"),1200,1254)
dd= (1:1200)*0
for( i in 0:56)
{
dd=cbind(dd,Allf[,(i*22)+7]) # [,(i*22)+DD]) DD is the subbasin number ranging from 1:22
}
dd=dd[,-1]
summ=matrix(0,100,57)
for(i in 1:100)
summ[i,]=apply(dd[((i-1)*12+1):(i*12),],2,sum)
plot(summ[,1],type="l",col="red",xlim=c(0,100),ylim=c(min(summ),max(summ)))
for(i in 1:56)
lines(summ[,i],type="l")
lines(summ[,1],type="l",col="RED")
win.graph()
boxplot(t(summ))
|
/CLIRUN/Senarios/CRMPUPCODE.r
|
no_license
|
sfletcher23/Fletcher_2019_Learning_Climate
|
R
| false | false | 540 |
r
|
library(base)
setwd("C:/Works/Vietnam/Model4/OutputData/data/sim")
Allf <- matrix(scan("Allout.txt"),1200,1254)
dd= (1:1200)*0
for( i in 0:56)
{
dd=cbind(dd,Allf[,(i*22)+7]) # [,(i*22)+DD]) DD is the subbasin number ranging from 1:22
}
dd=dd[,-1]
summ=matrix(0,100,57)
for(i in 1:100)
summ[i,]=apply(dd[((i-1)*12+1):(i*12),],2,sum)
plot(summ[,1],type="l",col="red",xlim=c(0,100),ylim=c(min(summ),max(summ)))
for(i in 1:56)
lines(summ[,i],type="l")
lines(summ[,1],type="l",col="RED")
win.graph()
boxplot(t(summ))
|
%% File Name: mids2mlwin.Rd
%% File Version: 0.09
\name{mids2mlwin}
\alias{mids2mlwin}
\title{Export \code{mids} object to MLwiN}
\usage{
mids2mlwin(imp, file.prefix, path=getwd(), sep=" ", dec=".", silent=FALSE,
X=NULL)
}
\arguments{
\item{imp}{The \code{imp} argument is an object of class
\code{mids}, typically produced by the \code{mice()}
function.}
\item{file.prefix}{A character string describing the
prefix of the output data files.}
\item{path}{A character string containing the path of the
output file. By default, files are written to the
current \code{R} working directory.}
\item{sep}{The separator between the data fields.}
\item{dec}{The decimal separator for numerical data.}
\item{silent}{A logical flag stating whether the names of
the files should be printed.}
\item{X}{Optional data frame of variables to be included in
imputed datasets.}
}
\value{
The return value is \code{NULL}.
}
\description{
Converts a \code{mids} object into a format recognized by the multilevel
software MLwiN.
}
%\details{
%xxx
%}
\author{
Thorsten Henke
}
\examples{
\dontrun{
# imputation nhanes data
data(nhanes)
imp <- mice::mice(nhanes)
# write files to MLwiN
mids2mlwin(imp, file.prefix="nhanes" )
}
}
%\seealso{
% \code{\link[=mids-class]{mids}}, \code{\link{mids2spss}}
%}
%\keyword{manip}
|
/man/mids2mlwin.Rd
|
no_license
|
cran/miceadds
|
R
| false | false | 1,407 |
rd
|
%% File Name: mids2mlwin.Rd
%% File Version: 0.09
\name{mids2mlwin}
\alias{mids2mlwin}
\title{Export \code{mids} object to MLwiN}
\usage{
mids2mlwin(imp, file.prefix, path=getwd(), sep=" ", dec=".", silent=FALSE,
X=NULL)
}
\arguments{
\item{imp}{The \code{imp} argument is an object of class
\code{mids}, typically produced by the \code{mice()}
function.}
\item{file.prefix}{A character string describing the
prefix of the output data files.}
\item{path}{A character string containing the path of the
output file. By default, files are written to the
current \code{R} working directory.}
\item{sep}{The separator between the data fields.}
\item{dec}{The decimal separator for numerical data.}
\item{silent}{A logical flag stating whether the names of
the files should be printed.}
\item{X}{Optional data frame of variables to be included in
imputed datasets.}
}
\value{
The return value is \code{NULL}.
}
\description{
Converts a \code{mids} object into a format recognized by the multilevel
software MLwiN.
}
%\details{
%xxx
%}
\author{
Thorsten Henke
}
\examples{
\dontrun{
# imputation nhanes data
data(nhanes)
imp <- mice::mice(nhanes)
# write files to MLwiN
mids2mlwin(imp, file.prefix="nhanes" )
}
}
%\seealso{
% \code{\link[=mids-class]{mids}}, \code{\link{mids2spss}}
%}
%\keyword{manip}
|
setwd('/Users/kaileyhoo/Documents/MSAN/Module 4/MSAN622/DataVisualization/homework/')
df <- data.frame(
state.name,
state.abb,
state.region,
state.division,
state.x77,
row.names = NULL
)
colnames(df) <- c("name", "abb", "region", "division",
"population", "income", "illiteracy",
"life_expectancy", "murders", "hs_grad",
"days_frost", "land_area")
write.csv(df, file = "state.csv", row.names = FALSE)
library(jsonlite)
json <- toJSON(df, dataframe = "rows", factor = "string", pretty = TRUE)
cat(json, file = "state.x77.json")
|
/homework/hw2/hw2.R
|
no_license
|
kmhoo/DataVisualization
|
R
| false | false | 597 |
r
|
setwd('/Users/kaileyhoo/Documents/MSAN/Module 4/MSAN622/DataVisualization/homework/')
df <- data.frame(
state.name,
state.abb,
state.region,
state.division,
state.x77,
row.names = NULL
)
colnames(df) <- c("name", "abb", "region", "division",
"population", "income", "illiteracy",
"life_expectancy", "murders", "hs_grad",
"days_frost", "land_area")
write.csv(df, file = "state.csv", row.names = FALSE)
library(jsonlite)
json <- toJSON(df, dataframe = "rows", factor = "string", pretty = TRUE)
cat(json, file = "state.x77.json")
|
## This class provides logging functionality (Stata's log and cmdlog commands).
## R's sink() doesn't quite do what we want - a) it doesn't let us distinguish
## between command and output result streams, but we want to handle these
## separately, and b) it can only have one sink at the top of the stack at any
## given time.
Logger <-
R6::R6Class("Logger",
public=list(
initialize = function()
{
private$logs <- list()
private$cmdlogs <- list()
private$use_log <- TRUE
private$use_cmdlog <- TRUE
},
has_sink = function(filename, type = NULL)
{
raiseifnot(is.null(type) || type %in% c("log", "cmdlog"),
msg="Invalid log type")
pth <- normalizePath(filename)
if(is.null(type))
{
return(pth %in% names(private$logs) ||
pth %in% names(private$cmdlogs))
} else if(type == "log")
{
return(pth %in% names(private$logs))
} else
{
return(pth %in% names(private$cmdlogs))
}
},
sink_type = function(filename)
{
raiseifnot(self$has_sink(filename),
msg="No such logging sink")
pth <- normalizePath(filename)
if(pth %in% names(private$logs))
{
return("log")
} else
{
return("cmdlog")
}
},
register_sink = function(filename, type = "log", header = TRUE)
{
raiseif(type %not_in% c("log", "cmdlog"),
msg="Invalid logging type")
raiseif(self$has_sink(filename),
msg="Log already exists")
pth <- normalizePath(filename)
ret <-
tryCatch(
{
con <- file(pth, open="wb")
},
error=function(e) e
)
raiseif(inherits(ret, "error"),
msg="Could not open logging sink " %p% pth)
if(type == "log")
{
private$logs[[pth]] = con
} else
{
private$cmdlogs[[pth]] = con
}
if(header)
{
msg <- paste0(rep('-', 80), collapse="") %p% '\n'
msg <- msg %p% ifelse(type == 'log', 'log: ', 'cmdlog: ')
msg <- msg %p% pth %p% '\n'
msg <- msg %p% 'log type: text\n' #SMCL isn't supported
msg <- msg %p% 'opened on: ' %p% date() %p% '\n'
msg <- msg %p% '\n'
cat(msg, file=con, sep="")
}
return(invisible(NULL))
},
deregister_sink = function(filename)
{
type <- self$sink_type(filename)
pth <- normalizePath(filename)
if(type == "log")
{
close(private$logs[[pth]])
private$logs[[pth]] <- NULL
} else
{
close(private$cmdlogs[[pth]])
private$cmdlogs[[pth]] <- NULL
}
return(invisible(NULL))
},
deregister_all_sinks = function(type = NULL)
{
raiseifnot(is.null(type) || type %in% c("log", "cmdlog"),
msg="Invalid log type")
if(is.null(type))
{
for(con in names(private$logs))
{
self$deregister_sink(con)
}
for(con in names(private$cmdlogs))
{
self$deregister_sink(con)
}
} else if(type == "log")
{
for(con in names(private$logs))
{
self$deregister_sink(con)
}
} else
{
for(con in names(private$cmdlogs))
{
self$deregister_sink(con)
}
}
return(invisible(NULL))
},
log_command = function(msg, echo = FALSE)
{
if(echo)
cat(msg, sep="")
if(self$log_enabled)
{
for(con in private$logs)
{
cat(msg, file=con, sep="", append=TRUE)
}
}
if(self$cmdlog_enabled)
{
for(con in private$cmdlogs)
{
cat(msg, file=con, sep="", append=TRUE)
}
}
return(invisible(NULL))
},
log_result = function(msg, print_results = FALSE)
{
if(print_results)
{
cat(msg, sep="")
cat("\n")
}
if(self$log_enabled)
{
for(con in private$logs)
{
cat(msg, file=con, sep="", append=TRUE)
cat("\n")
}
}
return(invisible(NULL))
}
),
active = list(
log_sinks = function() names(private$logs),
cmdlog_sinks = function() names(private$cmdlog_sinks),
log_enabled = function(value)
{
if(missing(value))
{
return(private$use_log)
} else
{
private$use_log <- as.logical(value)
}
},
cmdlog_enabled = function(value)
{
if(missing(value))
{
return(private$use_cmdlog)
} else
{
private$use_cmdlog <- as.logical(value)
}
}
),
private = list(
#The log and command log sink lists. These aren't stacks or queues;
#really they're hash tables: we just need to be able to a) get a
#connection given its name, and b) loop over all currently
#registered connections.
logs = NULL,
cmdlogs = NULL,
use_log = NULL,
use_cmdlog = NULL
)
)
|
/R/logger.R
|
permissive
|
mcomsa/ado
|
R
| false | false | 6,261 |
r
|
## This class provides logging functionality (Stata's log and cmdlog commands).
## R's sink() doesn't quite do what we want - a) it doesn't let us distinguish
## between command and output result streams, but we want to handle these
## separately, and b) it can only have one sink at the top of the stack at any
## given time.
Logger <-
R6::R6Class("Logger",
public=list(
initialize = function()
{
private$logs <- list()
private$cmdlogs <- list()
private$use_log <- TRUE
private$use_cmdlog <- TRUE
},
has_sink = function(filename, type = NULL)
{
raiseifnot(is.null(type) || type %in% c("log", "cmdlog"),
msg="Invalid log type")
pth <- normalizePath(filename)
if(is.null(type))
{
return(pth %in% names(private$logs) ||
pth %in% names(private$cmdlogs))
} else if(type == "log")
{
return(pth %in% names(private$logs))
} else
{
return(pth %in% names(private$cmdlogs))
}
},
sink_type = function(filename)
{
raiseifnot(self$has_sink(filename),
msg="No such logging sink")
pth <- normalizePath(filename)
if(pth %in% names(private$logs))
{
return("log")
} else
{
return("cmdlog")
}
},
register_sink = function(filename, type = "log", header = TRUE)
{
raiseif(type %not_in% c("log", "cmdlog"),
msg="Invalid logging type")
raiseif(self$has_sink(filename),
msg="Log already exists")
pth <- normalizePath(filename)
ret <-
tryCatch(
{
con <- file(pth, open="wb")
},
error=function(e) e
)
raiseif(inherits(ret, "error"),
msg="Could not open logging sink " %p% pth)
if(type == "log")
{
private$logs[[pth]] = con
} else
{
private$cmdlogs[[pth]] = con
}
if(header)
{
msg <- paste0(rep('-', 80), collapse="") %p% '\n'
msg <- msg %p% ifelse(type == 'log', 'log: ', 'cmdlog: ')
msg <- msg %p% pth %p% '\n'
msg <- msg %p% 'log type: text\n' #SMCL isn't supported
msg <- msg %p% 'opened on: ' %p% date() %p% '\n'
msg <- msg %p% '\n'
cat(msg, file=con, sep="")
}
return(invisible(NULL))
},
deregister_sink = function(filename)
{
type <- self$sink_type(filename)
pth <- normalizePath(filename)
if(type == "log")
{
close(private$logs[[pth]])
private$logs[[pth]] <- NULL
} else
{
close(private$cmdlogs[[pth]])
private$cmdlogs[[pth]] <- NULL
}
return(invisible(NULL))
},
deregister_all_sinks = function(type = NULL)
{
raiseifnot(is.null(type) || type %in% c("log", "cmdlog"),
msg="Invalid log type")
if(is.null(type))
{
for(con in names(private$logs))
{
self$deregister_sink(con)
}
for(con in names(private$cmdlogs))
{
self$deregister_sink(con)
}
} else if(type == "log")
{
for(con in names(private$logs))
{
self$deregister_sink(con)
}
} else
{
for(con in names(private$cmdlogs))
{
self$deregister_sink(con)
}
}
return(invisible(NULL))
},
log_command = function(msg, echo = FALSE)
{
if(echo)
cat(msg, sep="")
if(self$log_enabled)
{
for(con in private$logs)
{
cat(msg, file=con, sep="", append=TRUE)
}
}
if(self$cmdlog_enabled)
{
for(con in private$cmdlogs)
{
cat(msg, file=con, sep="", append=TRUE)
}
}
return(invisible(NULL))
},
log_result = function(msg, print_results = FALSE)
{
if(print_results)
{
cat(msg, sep="")
cat("\n")
}
if(self$log_enabled)
{
for(con in private$logs)
{
cat(msg, file=con, sep="", append=TRUE)
cat("\n")
}
}
return(invisible(NULL))
}
),
active = list(
log_sinks = function() names(private$logs),
cmdlog_sinks = function() names(private$cmdlog_sinks),
log_enabled = function(value)
{
if(missing(value))
{
return(private$use_log)
} else
{
private$use_log <- as.logical(value)
}
},
cmdlog_enabled = function(value)
{
if(missing(value))
{
return(private$use_cmdlog)
} else
{
private$use_cmdlog <- as.logical(value)
}
}
),
private = list(
#The log and command log sink lists. These aren't stacks or queues;
#really they're hash tables: we just need to be able to a) get a
#connection given its name, and b) loop over all currently
#registered connections.
logs = NULL,
cmdlogs = NULL,
use_log = NULL,
use_cmdlog = NULL
)
)
|
# working out under-reporting estimate and CIs
underReportingEstimates <- function(data, delay_fun){
dplyr::group_by(data, country) %>%
dplyr::do(scale_cfr(., delay_fun)) %>%
dplyr::filter(cum_known_t > 0 & cum_known_t >= total_deaths) %>%
dplyr::mutate(nCFR_UQ = binom.test(total_deaths, total_cases)$conf.int[2],
nCFR_LQ = binom.test(total_deaths, total_cases)$conf.int[1],
cCFR_UQ = binom.test(total_deaths, cum_known_t)$conf.int[2],
cCFR_LQ = binom.test(total_deaths, cum_known_t)$conf.int[1],
underreporting_estimate = cCFRBaseline / (100*cCFR),
lower = cCFREstimateRange[1] / (100 * cCFR_UQ),
upper = cCFREstimateRange[2] / (100 * cCFR_LQ),
quantile25 = binom.test(total_deaths, cum_known_t, conf.level = 0.5)$conf.int[1],
quantile75 = binom.test(total_deaths, cum_known_t, conf.level = 0.5)$conf.int[2]) %>%
dplyr::filter(total_deaths > 10)}
|
/R/table_of_estimates.R
|
no_license
|
thimotei/CFR_calculation
|
R
| false | false | 1,013 |
r
|
# working out under-reporting estimate and CIs
underReportingEstimates <- function(data, delay_fun){
dplyr::group_by(data, country) %>%
dplyr::do(scale_cfr(., delay_fun)) %>%
dplyr::filter(cum_known_t > 0 & cum_known_t >= total_deaths) %>%
dplyr::mutate(nCFR_UQ = binom.test(total_deaths, total_cases)$conf.int[2],
nCFR_LQ = binom.test(total_deaths, total_cases)$conf.int[1],
cCFR_UQ = binom.test(total_deaths, cum_known_t)$conf.int[2],
cCFR_LQ = binom.test(total_deaths, cum_known_t)$conf.int[1],
underreporting_estimate = cCFRBaseline / (100*cCFR),
lower = cCFREstimateRange[1] / (100 * cCFR_UQ),
upper = cCFREstimateRange[2] / (100 * cCFR_LQ),
quantile25 = binom.test(total_deaths, cum_known_t, conf.level = 0.5)$conf.int[1],
quantile75 = binom.test(total_deaths, cum_known_t, conf.level = 0.5)$conf.int[2]) %>%
dplyr::filter(total_deaths > 10)}
|
source("analysis/setup.R")
mod_name <- "hinge_threshold_age_effect"
###############################################################################
## Prep data for the mcmc ----
# format data for stan
fishdat_cut <- prep_stan_data(fishdat_cut)
# setting up for Stan
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
###############################################################################
## Set up the initial values list ----
# read in the model we'll take initial values from
init_model <- readRDS(file = "models/hinge_sex_diff.rds")
# calculate posterior mean of univariate parameters
pars <-
c("alpha", "d_alpha_male", "d_eta_male",
"d_beta_1_male", "d_beta_2_male",
"beta_1", "beta_2", "eta", "sigma_fish", "sigma_year", "sigma")
init_vals <- lapply(rstan::extract(init_model, pars), mean)
# calculate posterior mean of vector-valued parameters
init_vals$u_fish <- apply(rstan::extract(init_model, "u_fish")[[1]], 2, mean)
init_vals$u_year <- apply(rstan::extract(init_model, "u_year")[[1]], 2, mean)
# add any additional parameters in new model
init_vals$d_eta_age <- 0.0
# replicate the list of initial conditions to match number of chains
init_vals <- replicate(4, init_vals, simplify = FALSE)
# remove the massive object
rm(init_model); gc()
###############################################################################
## Run the mcmc: hinge threshold age effect model ----
# compile model
f_loc <- sub("XX", replacement = mod_name, x = "stan/XX.stan")
rt <- stanc(file = f_loc)
sm <- stan_model(stanc_ret = rt, verbose = FALSE)
#
system.time(
stan_fit <- sampling(
sm,
data = fishdat_cut,
seed = 1,
iter = 2000,
chains = 4,
init = init_vals,
control = list(max_treedepth = 10, adapt_delta = 0.8)
)
)
###############################################################################
## Save the model ----
stan_fit@stanmodel@dso <- new("cxxdso") # remove reference to stan model binary
f_loc <- sub("XX", replacement = mod_name, x = "models/XX.rds")
saveRDS(stan_fit, file = f_loc)
|
/analysis/fit_hinge_threshold_age_effect.R
|
no_license
|
clarkejames944/fish-data-analysis
|
R
| false | false | 2,085 |
r
|
source("analysis/setup.R")
mod_name <- "hinge_threshold_age_effect"
###############################################################################
## Prep data for the mcmc ----
# format data for stan
fishdat_cut <- prep_stan_data(fishdat_cut)
# setting up for Stan
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
###############################################################################
## Set up the initial values list ----
# read in the model we'll take initial values from
init_model <- readRDS(file = "models/hinge_sex_diff.rds")
# calculate posterior mean of univariate parameters
pars <-
c("alpha", "d_alpha_male", "d_eta_male",
"d_beta_1_male", "d_beta_2_male",
"beta_1", "beta_2", "eta", "sigma_fish", "sigma_year", "sigma")
init_vals <- lapply(rstan::extract(init_model, pars), mean)
# calculate posterior mean of vector-valued parameters
init_vals$u_fish <- apply(rstan::extract(init_model, "u_fish")[[1]], 2, mean)
init_vals$u_year <- apply(rstan::extract(init_model, "u_year")[[1]], 2, mean)
# add any additional parameters in new model
init_vals$d_eta_age <- 0.0
# replicate the list of initial conditions to match number of chains
init_vals <- replicate(4, init_vals, simplify = FALSE)
# remove the massive object
rm(init_model); gc()
###############################################################################
## Run the mcmc: hinge threshold age effect model ----
# compile model
f_loc <- sub("XX", replacement = mod_name, x = "stan/XX.stan")
rt <- stanc(file = f_loc)
sm <- stan_model(stanc_ret = rt, verbose = FALSE)
#
system.time(
stan_fit <- sampling(
sm,
data = fishdat_cut,
seed = 1,
iter = 2000,
chains = 4,
init = init_vals,
control = list(max_treedepth = 10, adapt_delta = 0.8)
)
)
###############################################################################
## Save the model ----
stan_fit@stanmodel@dso <- new("cxxdso") # remove reference to stan model binary
f_loc <- sub("XX", replacement = mod_name, x = "models/XX.rds")
saveRDS(stan_fit, file = f_loc)
|
# Uploading a scanned pdf file and trasforming them in png files
library(tesseract)
library(pdftools)
library(pdftables)
# All page extracted in single txt file
file_for_ocr <- pdf_convert(file.choose(new=FALSE), dpi=600)
file_ocr <- ocr(file_for_ocr)
cat(file_ocr, file="D:/Predictive Analytics/R/Optical Character Recognition/file_extracted.txt", sep="/n")
#Page by page extraction in txt file
page_file_ocr <- ocr(file.choose(new=FALSE))
cat(page_file_ocr, file="D:/Predictive Analytics/R/Optical Character Recognition/pagebypage_file_extracted.txt", sep="/n")
|
/OCR_project.R
|
no_license
|
subhadip2038/R
|
R
| false | false | 575 |
r
|
# Uploading a scanned pdf file and trasforming them in png files
library(tesseract)
library(pdftools)
library(pdftables)
# All page extracted in single txt file
file_for_ocr <- pdf_convert(file.choose(new=FALSE), dpi=600)
file_ocr <- ocr(file_for_ocr)
cat(file_ocr, file="D:/Predictive Analytics/R/Optical Character Recognition/file_extracted.txt", sep="/n")
#Page by page extraction in txt file
page_file_ocr <- ocr(file.choose(new=FALSE))
cat(page_file_ocr, file="D:/Predictive Analytics/R/Optical Character Recognition/pagebypage_file_extracted.txt", sep="/n")
|
rankhospital <- function(state, outcome, num = "best"){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
if(! ( state %in% levels(factor(data$State)) ) ) {
stop("invalid state")
}
if(! (outcome == "heart attack" || outcome == "heart failure" || outcome == "pneumonia") ) {
stop("invalid outcome")
}
if(class(num) == "character"){
if (! (num == "best" || num == "worst")){
stop("invalid number")
}
}
data = data[data$State==state,]
data = data[,c(1,3,4,5)]
if(outcome == "heart attack") {
data = data[,c(1,2)]
} else if(outcome == "heart failure") {
data = data[,c(1,3)]
} else if(outcome == "pneumonia") {
data = data[,c(1,4)]
}
names(data)[2] = "Deaths"
data[, 2] = suppressWarnings( as.numeric(data[, 2]) )
data = data[!is.na(data$Deaths),]
if(class(num) == "numeric" && num > nrow(data)){
return (NA)
}
data = data[order(data$Deaths, data$Hospital.Name),]
if(class(num) == "character") {
if(num == "best") {
return (data$Hospital.Name[1])
}
else if(num == "worst") {
return (data$Hospital.Name[nrow(data)])
}
}
else {
return (data$Hospital.Name[num])
}
}
|
/Hospital mortality/rankhospital.R
|
permissive
|
kausik1993/R_Programming_Coursera
|
R
| false | false | 1,289 |
r
|
rankhospital <- function(state, outcome, num = "best"){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
if(! ( state %in% levels(factor(data$State)) ) ) {
stop("invalid state")
}
if(! (outcome == "heart attack" || outcome == "heart failure" || outcome == "pneumonia") ) {
stop("invalid outcome")
}
if(class(num) == "character"){
if (! (num == "best" || num == "worst")){
stop("invalid number")
}
}
data = data[data$State==state,]
data = data[,c(1,3,4,5)]
if(outcome == "heart attack") {
data = data[,c(1,2)]
} else if(outcome == "heart failure") {
data = data[,c(1,3)]
} else if(outcome == "pneumonia") {
data = data[,c(1,4)]
}
names(data)[2] = "Deaths"
data[, 2] = suppressWarnings( as.numeric(data[, 2]) )
data = data[!is.na(data$Deaths),]
if(class(num) == "numeric" && num > nrow(data)){
return (NA)
}
data = data[order(data$Deaths, data$Hospital.Name),]
if(class(num) == "character") {
if(num == "best") {
return (data$Hospital.Name[1])
}
else if(num == "worst") {
return (data$Hospital.Name[nrow(data)])
}
}
else {
return (data$Hospital.Name[num])
}
}
|
#
# tail-signif.R, 30 Dec 15
#
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
plot_layout(1, 2)
par(mar=c(5, 1, 4, 1)+0.1)
pal_col=rainbow(3)
norm_mean=4
norm_sd=1
xpoints=seq(0, norm_mean+norm_sd*4, by=0.01)
ypoints=dnorm(xpoints, mean=norm_mean, sd=norm_sd)
qn=qnorm(c(0.975, 0.95), 0, norm_sd)
max_y=max(ypoints)
# Two-tailed significance test
plot(xpoints, ypoints, type="l", col=pal_col[3], fg="grey", col.axis="grey", yaxt="n",
bty="n", yaxt="n",
xlab="Two-tailed", ylab="")
q=c(norm_mean-qn[1], norm_mean+qn[1])
lines(c(q[1], q[1]), c(0, max_y), col=pal_col[1])
lines(c(q[2], q[2]), c(0, max_y), col=pal_col[1])
upper_y=subset(ypoints, xpoints <= norm_mean-qn[1])
upper_x=subset(xpoints, xpoints <= norm_mean-qn[1])
polygon(c(upper_x, norm_mean-qn[1], 0), c(upper_y, 0, 0), col=pal_col[2])
upper_y=subset(ypoints, xpoints >= norm_mean+qn[1])
upper_x=subset(xpoints, xpoints >= norm_mean+qn[1])
polygon(c(upper_x, norm_mean+qn[1]), c(upper_y, 0), col=pal_col[2])
text(norm_mean, max_y/1.5, "Fail to reject\nnull hypothesis", cex=1.4)
text(norm_mean-norm_sd*2.0, max_y/3, "Reject null\nhypothesis", pos=2, cex=1.4)
text(norm_mean+norm_sd*2.0, max_y/3, "Reject null\nhypothesis", pos=4, cex=1.4)
# One-tailed significance test
plot(xpoints, ypoints, type="l", col=pal_col[3], fg="grey", col.axis="grey", yaxt="n",
bty="n", yaxt="n",
xlab="One-tailed", ylab="")
q=norm_mean+qn[2]
lines(c(q[1], q[1]), c(0, max_y), col=pal_col[1])
upper_y=subset(ypoints, xpoints >= norm_mean+qn[2])
upper_x=subset(xpoints, xpoints >= norm_mean+qn[2])
polygon(c(upper_x, norm_mean+qn[2]), c(upper_y, 0), col=pal_col[2])
text(norm_mean-1.3, max_y/1.5, "Fail to reject null hypothesis", cex=1.4)
text(norm_mean+norm_sd*1.7, max_y/3, "Reject null\nhypothesis", pos=4, cex=1.4)
|
/statistics/tail-signif.R
|
no_license
|
vinodrajendran001/ESEUR-code-data
|
R
| false | false | 1,838 |
r
|
#
# tail-signif.R, 30 Dec 15
#
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
plot_layout(1, 2)
par(mar=c(5, 1, 4, 1)+0.1)
pal_col=rainbow(3)
norm_mean=4
norm_sd=1
xpoints=seq(0, norm_mean+norm_sd*4, by=0.01)
ypoints=dnorm(xpoints, mean=norm_mean, sd=norm_sd)
qn=qnorm(c(0.975, 0.95), 0, norm_sd)
max_y=max(ypoints)
# Two-tailed significance test
plot(xpoints, ypoints, type="l", col=pal_col[3], fg="grey", col.axis="grey", yaxt="n",
bty="n", yaxt="n",
xlab="Two-tailed", ylab="")
q=c(norm_mean-qn[1], norm_mean+qn[1])
lines(c(q[1], q[1]), c(0, max_y), col=pal_col[1])
lines(c(q[2], q[2]), c(0, max_y), col=pal_col[1])
upper_y=subset(ypoints, xpoints <= norm_mean-qn[1])
upper_x=subset(xpoints, xpoints <= norm_mean-qn[1])
polygon(c(upper_x, norm_mean-qn[1], 0), c(upper_y, 0, 0), col=pal_col[2])
upper_y=subset(ypoints, xpoints >= norm_mean+qn[1])
upper_x=subset(xpoints, xpoints >= norm_mean+qn[1])
polygon(c(upper_x, norm_mean+qn[1]), c(upper_y, 0), col=pal_col[2])
text(norm_mean, max_y/1.5, "Fail to reject\nnull hypothesis", cex=1.4)
text(norm_mean-norm_sd*2.0, max_y/3, "Reject null\nhypothesis", pos=2, cex=1.4)
text(norm_mean+norm_sd*2.0, max_y/3, "Reject null\nhypothesis", pos=4, cex=1.4)
# One-tailed significance test
plot(xpoints, ypoints, type="l", col=pal_col[3], fg="grey", col.axis="grey", yaxt="n",
bty="n", yaxt="n",
xlab="One-tailed", ylab="")
q=norm_mean+qn[2]
lines(c(q[1], q[1]), c(0, max_y), col=pal_col[1])
upper_y=subset(ypoints, xpoints >= norm_mean+qn[2])
upper_x=subset(xpoints, xpoints >= norm_mean+qn[2])
polygon(c(upper_x, norm_mean+qn[2]), c(upper_y, 0), col=pal_col[2])
text(norm_mean-1.3, max_y/1.5, "Fail to reject null hypothesis", cex=1.4)
text(norm_mean+norm_sd*1.7, max_y/3, "Reject null\nhypothesis", pos=4, cex=1.4)
|
ASKAT <-
function(Ped, kin1, Missing){
# Missing is a logical parameter, if TRUE, it means that there is missing values in the Pedigree data set: ASKAT function moves out subjects with missing data
# kin1 is the kinship matrix. If the kinship matrix noted is calculated from GenABEL we should convert it to the kinship matrix using this command: kin1 = diagReplace(kin1, upper=TRUE)
# Ped is the Pedigree data file: it has subject IDs as first column (IDs should be differents for all subjects), phenotype as second column and region-based SNPs that will be analized together
##### STEP 1: Check for Missing Data and construction of Pedigree without Missing Data #####
##### Also creation of the file "kin.FaST.data.miss.txt" which will be needed by FaST-LMM #####
if (Missing == "TRUE"){
data.Without.NA = pheno.geno.kin.Without.NA(Ped, kin1)
Ped = data.Without.NA$pheno.Geno.No.NA
}
if (Missing == "FALSE"){
kin.FaST = 2 * kin1
n.col = Ped[,1]
kin.FaST = cbind(n.col,kin.FaST)
kin.FaST = rbind(c("var",n.col),kin.FaST)
file.name.data = paste("kin.FaST.data.miss.txt",sep="")
write(t(kin.FaST[ ,1:(dim(kin.FaST)[1])]), file = file.name.data, ncolumns = dim(kin.FaST)[1], sep= "\t")
}
#####
p = dim(Ped)[1]
Y.trait = Ped[,2]
if (dim(Ped)[2] > 2){
X = as.matrix(Ped[,3:dim(Ped)[2]])
##### STEP 2: Under the NULL we call FaST-LMM to estimate the VCs ####
res.VC.FaST = VC.FaST.LMM(Ped)
Estim.Sigma.RG = res.VC.FaST$Polygenic
Estim.Sigma.e = res.VC.FaST$Env
pvalue.FaST.LMM = res.VC.FaST$pvalue.FaST.LMM
read.SVD.bin = file(paste("OURSKAT_FaST/S.bin",sep=""), "rb")
S = readBin(read.SVD.bin, "numeric", n=p, endian="little")
close(read.SVD.bin)
S = diag(sort(S, decreasing = T))
read.SVD.bin = file(paste("OURSKAT_FaST/U.bin",sep=""), "rb")
U = readBin(read.SVD.bin, "numeric", n=p*p, endian="little")
close(read.SVD.bin)
U = matrix(U,p,p, byrow=F)
U = U[ ,ncol(U):1]
system("rm -r OURSKAT_FaST")
##### STEP 3: Calculation of weights matrix W and the matrix K =GWG #####
freq.MAF = apply(X, 2, mean)/2
if( length(freq.MAF) == 1){
w = (dbeta(freq.MAF, 1, 25))^2
K = w * X %*% t(X)
} else
{
w = vector(length = length(freq.MAF))
for (i in 1:length(freq.MAF)){
w[i] = (dbeta(freq.MAF[i], 1, 25))^2
}
w = diag(w)
K = X %*% w %*% t(X)
}
##### STEP 4: ASKAT score test statistic calculations #####
Gamma = Estim.Sigma.RG / Estim.Sigma.e
D.0 = (Gamma * S) + diag(1, dim(X)[1], dim(X)[1])
inv.sqrt.D.0 = diag(1/sqrt(diag(D.0)))
K.tilde = inv.sqrt.D.0 %*% t(U)
un.n = c(rep(1,dim(U)[1]))
X.tilde = K.tilde %*% un.n
Y.tilde = K.tilde %*% Y.trait
K.tilde = K.tilde %*% K %*% t(K.tilde)
P.0.tilde = diag(1, dim(U)[1], dim(U)[2]) - ( X.tilde %*% solve( t(X.tilde) %*% X.tilde ) %*% t(X.tilde) )
res = P.0.tilde %*% Y.tilde
s2 = Estim.Sigma.e
Q = t(res) %*% K.tilde
Q = Q %*% res/(2 * s2)
W1 = P.0.tilde %*% K.tilde
W1 = W1 %*% P.0.tilde/2
out = Get_PValue.Modif(W1, Q) ### This function was taken from SKAT package
pvalue.davies = out$p.value
lambda = out$lambda
} else { pvalue.davies = runif(1,0,1)
Q = 0
Estim.Sigma.RG = 0
Estim.Sigma.e = 0
lambda = 0
print("There is no genotypes that match with the phenotype")
}
resultats = list(pvalue.ASKAT = pvalue.davies, Q.ASKAT = Q, Polygenic.VC = Estim.Sigma.RG, Env.VC = Estim.Sigma.e, lambda = lambda)
resultats
}
|
/karim4k_v2/ASKAT.R
|
permissive
|
pcingola/Askat
|
R
| false | false | 4,057 |
r
|
ASKAT <-
function(Ped, kin1, Missing){
# Missing is a logical parameter, if TRUE, it means that there is missing values in the Pedigree data set: ASKAT function moves out subjects with missing data
# kin1 is the kinship matrix. If the kinship matrix noted is calculated from GenABEL we should convert it to the kinship matrix using this command: kin1 = diagReplace(kin1, upper=TRUE)
# Ped is the Pedigree data file: it has subject IDs as first column (IDs should be differents for all subjects), phenotype as second column and region-based SNPs that will be analized together
##### STEP 1: Check for Missing Data and construction of Pedigree without Missing Data #####
##### Also creation of the file "kin.FaST.data.miss.txt" which will be needed by FaST-LMM #####
if (Missing == "TRUE"){
data.Without.NA = pheno.geno.kin.Without.NA(Ped, kin1)
Ped = data.Without.NA$pheno.Geno.No.NA
}
if (Missing == "FALSE"){
kin.FaST = 2 * kin1
n.col = Ped[,1]
kin.FaST = cbind(n.col,kin.FaST)
kin.FaST = rbind(c("var",n.col),kin.FaST)
file.name.data = paste("kin.FaST.data.miss.txt",sep="")
write(t(kin.FaST[ ,1:(dim(kin.FaST)[1])]), file = file.name.data, ncolumns = dim(kin.FaST)[1], sep= "\t")
}
#####
p = dim(Ped)[1]
Y.trait = Ped[,2]
if (dim(Ped)[2] > 2){
X = as.matrix(Ped[,3:dim(Ped)[2]])
##### STEP 2: Under the NULL we call FaST-LMM to estimate the VCs ####
res.VC.FaST = VC.FaST.LMM(Ped)
Estim.Sigma.RG = res.VC.FaST$Polygenic
Estim.Sigma.e = res.VC.FaST$Env
pvalue.FaST.LMM = res.VC.FaST$pvalue.FaST.LMM
read.SVD.bin = file(paste("OURSKAT_FaST/S.bin",sep=""), "rb")
S = readBin(read.SVD.bin, "numeric", n=p, endian="little")
close(read.SVD.bin)
S = diag(sort(S, decreasing = T))
read.SVD.bin = file(paste("OURSKAT_FaST/U.bin",sep=""), "rb")
U = readBin(read.SVD.bin, "numeric", n=p*p, endian="little")
close(read.SVD.bin)
U = matrix(U,p,p, byrow=F)
U = U[ ,ncol(U):1]
system("rm -r OURSKAT_FaST")
##### STEP 3: Calculation of weights matrix W and the matrix K =GWG #####
freq.MAF = apply(X, 2, mean)/2
if( length(freq.MAF) == 1){
w = (dbeta(freq.MAF, 1, 25))^2
K = w * X %*% t(X)
} else
{
w = vector(length = length(freq.MAF))
for (i in 1:length(freq.MAF)){
w[i] = (dbeta(freq.MAF[i], 1, 25))^2
}
w = diag(w)
K = X %*% w %*% t(X)
}
##### STEP 4: ASKAT score test statistic calculations #####
Gamma = Estim.Sigma.RG / Estim.Sigma.e
D.0 = (Gamma * S) + diag(1, dim(X)[1], dim(X)[1])
inv.sqrt.D.0 = diag(1/sqrt(diag(D.0)))
K.tilde = inv.sqrt.D.0 %*% t(U)
un.n = c(rep(1,dim(U)[1]))
X.tilde = K.tilde %*% un.n
Y.tilde = K.tilde %*% Y.trait
K.tilde = K.tilde %*% K %*% t(K.tilde)
P.0.tilde = diag(1, dim(U)[1], dim(U)[2]) - ( X.tilde %*% solve( t(X.tilde) %*% X.tilde ) %*% t(X.tilde) )
res = P.0.tilde %*% Y.tilde
s2 = Estim.Sigma.e
Q = t(res) %*% K.tilde
Q = Q %*% res/(2 * s2)
W1 = P.0.tilde %*% K.tilde
W1 = W1 %*% P.0.tilde/2
out = Get_PValue.Modif(W1, Q) ### This function was taken from SKAT package
pvalue.davies = out$p.value
lambda = out$lambda
} else { pvalue.davies = runif(1,0,1)
Q = 0
Estim.Sigma.RG = 0
Estim.Sigma.e = 0
lambda = 0
print("There is no genotypes that match with the phenotype")
}
resultats = list(pvalue.ASKAT = pvalue.davies, Q.ASKAT = Q, Polygenic.VC = Estim.Sigma.RG, Env.VC = Estim.Sigma.e, lambda = lambda)
resultats
}
|
\name{test}
\alias{test}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
the standard generic function for all test methods
}
\description{
The default generic function for test methods for all stepp models classes and the steppes class.
For detail, please refer to the documentation in the test method in the S4 class: stmodelCI, stmodelKM and stmodelGLM.
}
\author{
Wai-ki Yip
}
\seealso{
\code{\linkS4class{stwin}}, \code{\linkS4class{stsubpop}}, \code{\linkS4class{stmodelKM}},
\code{\linkS4class{stmodelCI}}, \code{\linkS4class{stmodelGLM}},
\code{\linkS4class{steppes}}, \code{\linkS4class{stmodel}},
\code{\link{stepp.win}}, \code{\link{stepp.subpop}}, \code{\link{stepp.KM}},
\code{\link{stepp.CI}}, \code{\link{stepp.GLM}},
\code{\link{generate}}, \code{\link{estimate}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/test.Rd
|
no_license
|
bonettim/stepp-3.2.0
|
R
| false | false | 1,032 |
rd
|
\name{test}
\alias{test}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
the standard generic function for all test methods
}
\description{
The default generic function for test methods for all stepp models classes and the steppes class.
For detail, please refer to the documentation in the test method in the S4 class: stmodelCI, stmodelKM and stmodelGLM.
}
\author{
Wai-ki Yip
}
\seealso{
\code{\linkS4class{stwin}}, \code{\linkS4class{stsubpop}}, \code{\linkS4class{stmodelKM}},
\code{\linkS4class{stmodelCI}}, \code{\linkS4class{stmodelGLM}},
\code{\linkS4class{steppes}}, \code{\linkS4class{stmodel}},
\code{\link{stepp.win}}, \code{\link{stepp.subpop}}, \code{\link{stepp.KM}},
\code{\link{stepp.CI}}, \code{\link{stepp.GLM}},
\code{\link{generate}}, \code{\link{estimate}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
\name{nMm2par}
\alias{nMm2par}
\title{Multivariate Normal Mixture Model to parameter for MLE}
\description{
From a \code{"\link{norMmix}"}(-like) object, return the numeric
parameter vector in our MLE parametrization.
}
\usage{
nMm2par(obj,
model = c("EII", "VII", "EEI", "VEI", "EVI",
"VVI", "EEE", "VEE", "EVV", "VVV"),
meanFUN = mean)
}
\arguments{
\item{obj}{a \code{\link{list}} containing \describe{
\item{\code{sig}:}{covariance matrix array,}
\item{\code{mu}:}{mean vector matrix,}
\item{\code{w}:}{= weights,}
\item{\code{k}:}{= number of components,}
\item{\code{p}:}{= dimension}
}
}
\item{model}{a \code{\link{character}} string specifying the (Sigma)
model, one of those listed above.}
\item{meanFUN}{a \code{\link{function}} to compute a mean (of variances typically).}
}
\details{
This transformation forms a vector from the parameters of a normal
mixture. These consist of weights, means and covariance matrices.
Covariance matrices are given as D and L from the LDLt decomposition
}
\seealso{
the \emph{inverse} function of \code{nMm2par()} is \code{\link{par2nMm}()}.
}
\examples{
A <- MW24
nMm2par(A, model = A$model)
# [1] -0.3465736 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
# [7] -2.3025851
}
|
/man/nMm2par.Rd
|
no_license
|
TrN000/norMmix
|
R
| false | false | 1,330 |
rd
|
\name{nMm2par}
\alias{nMm2par}
\title{Multivariate Normal Mixture Model to parameter for MLE}
\description{
From a \code{"\link{norMmix}"}(-like) object, return the numeric
parameter vector in our MLE parametrization.
}
\usage{
nMm2par(obj,
model = c("EII", "VII", "EEI", "VEI", "EVI",
"VVI", "EEE", "VEE", "EVV", "VVV"),
meanFUN = mean)
}
\arguments{
\item{obj}{a \code{\link{list}} containing \describe{
\item{\code{sig}:}{covariance matrix array,}
\item{\code{mu}:}{mean vector matrix,}
\item{\code{w}:}{= weights,}
\item{\code{k}:}{= number of components,}
\item{\code{p}:}{= dimension}
}
}
\item{model}{a \code{\link{character}} string specifying the (Sigma)
model, one of those listed above.}
\item{meanFUN}{a \code{\link{function}} to compute a mean (of variances typically).}
}
\details{
This transformation forms a vector from the parameters of a normal
mixture. These consist of weights, means and covariance matrices.
Covariance matrices are given as D and L from the LDLt decomposition
}
\seealso{
the \emph{inverse} function of \code{nMm2par()} is \code{\link{par2nMm}()}.
}
\examples{
A <- MW24
nMm2par(A, model = A$model)
# [1] -0.3465736 0.0000000 0.0000000 0.0000000 0.0000000 0.0000000
# [7] -2.3025851
}
|
#remove NAs and merge all coloc_all results
#morris eBMD GWAS data
#coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_results_morris/", pattern = "coloc_results*")
coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_morris_results_REV2/", pattern = "coloc_results*")
coloc_7_all_results = data.frame()
for (i in coloc_results_files){
#result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_results_morris/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_morris_results_REV2/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = result[-1,]
result = result[which(is.na(result$V2)==FALSE),]
coloc_7_all_results=rbind(coloc_7_all_results,result)
}
coloc_7_all_results = coloc_7_all_results[,-c(1)]
#gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/morris_lead_BAN_overlaps.txt")
gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/morris_lead_BAN_overlaps_REV2.txt")
gene_list = gene_list[,c(6,7)]
gene_list = unique(gene_list)
coloc_7_all_results$gene = apply(coloc_7_all_results,1,function(x) gene_list[which(gene_list[,1] == x[3]),2])
colnames(coloc_7_all_results) = c("pheno","tissue","ensembl","nSNPs","H0","H1","H2","H3","H4","gene")
coloc_7_all_results$nSNPs = as.numeric(coloc_7_all_results$nSNPs)
coloc_7_all_results$gene = as.character(coloc_7_all_results$gene)
write.table(coloc_7_all_results,"~/Documents/projects/DO_project/results/flat/coloc/coloc_morris_v7_all_results_REV2.txt",sep = "\t",quote = FALSE)
coloc_7_all_results_over75 = coloc_7_all_results[which(coloc_7_all_results$H4 >=0.75),]
morris_ovr75 = coloc_7_all_results_over75
write.table(coloc_7_all_results_over75,"~/Documents/projects/DO_project/results/flat/coloc/coloc_morris_v7_all_results_over75_REV2.txt",sep = "\t",quote = FALSE)
####repeat analysis for estrada GWAS
#coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_results_estrada/", pattern = "coloc_results*")
coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_estrada_results_REV2/", pattern = "coloc_results*")
coloc_7_all_results = data.frame()
for (i in coloc_results_files){
result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_estrada_results_REV2/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = result[-1,]
result = result[which(is.na(result$V2)==FALSE),]
coloc_7_all_results=rbind(coloc_7_all_results,result)
}
coloc_7_all_results = coloc_7_all_results[,-c(1)]
#gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/estrada_lead_BAN_overlaps.txt")
gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/estrada_lead_BAN_overlaps_REV2.txt")
#gene_list = gene_list[,c(6,7)]
gene_list = gene_list[,c(5,6)]
gene_list = unique(gene_list)
coloc_7_all_results$gene = apply(coloc_7_all_results,1,function(x) gene_list[which(gene_list[,1] == x[3]),2])
colnames(coloc_7_all_results) = c("pheno","tissue","ensembl","nSNPs","H0","H1","H2","H3","H4","gene")
coloc_7_all_results$nSNPs = as.numeric(coloc_7_all_results$nSNPs)
coloc_7_all_results$gene = as.character(coloc_7_all_results$gene)
write.table(coloc_7_all_results,"~/Documents/projects/DO_project/results/flat/coloc/coloc_estrada_v7_all_results_REV2.txt",sep = "\t",quote = FALSE)
coloc_7_all_results_over75 = coloc_7_all_results[which(coloc_7_all_results$H4 >=0.75),]
estrada_ovr75 = coloc_7_all_results_over75
fn = coloc_7_all_results_over75[which(coloc_7_all_results_over75$pheno=="FNBMD"),]
ls = coloc_7_all_results_over75[which(coloc_7_all_results_over75$pheno=="LSBMD"),]
write.table(fn,"~/Documents/projects/DO_project/results/flat/coloc/coloc_v7_FNBMD_over75_REV2.txt",sep = "\t",quote = FALSE)
write.table(ls,"~/Documents/projects/DO_project/results/flat/coloc/coloc_v7_LSBMD_over75_REV2.txt",sep = "\t",quote = FALSE)
coloc_ovr_75 = rbind(estrada_ovr75, morris_ovr75)
unique(coloc_ovr_75$gene)
|
/src/get_coloc_results.R
|
no_license
|
basel-maher/DO_project
|
R
| false | false | 4,373 |
r
|
#remove NAs and merge all coloc_all results
#morris eBMD GWAS data
#coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_results_morris/", pattern = "coloc_results*")
coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_morris_results_REV2/", pattern = "coloc_results*")
coloc_7_all_results = data.frame()
for (i in coloc_results_files){
#result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_results_morris/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_morris_results_REV2/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = result[-1,]
result = result[which(is.na(result$V2)==FALSE),]
coloc_7_all_results=rbind(coloc_7_all_results,result)
}
coloc_7_all_results = coloc_7_all_results[,-c(1)]
#gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/morris_lead_BAN_overlaps.txt")
gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/morris_lead_BAN_overlaps_REV2.txt")
gene_list = gene_list[,c(6,7)]
gene_list = unique(gene_list)
coloc_7_all_results$gene = apply(coloc_7_all_results,1,function(x) gene_list[which(gene_list[,1] == x[3]),2])
colnames(coloc_7_all_results) = c("pheno","tissue","ensembl","nSNPs","H0","H1","H2","H3","H4","gene")
coloc_7_all_results$nSNPs = as.numeric(coloc_7_all_results$nSNPs)
coloc_7_all_results$gene = as.character(coloc_7_all_results$gene)
write.table(coloc_7_all_results,"~/Documents/projects/DO_project/results/flat/coloc/coloc_morris_v7_all_results_REV2.txt",sep = "\t",quote = FALSE)
coloc_7_all_results_over75 = coloc_7_all_results[which(coloc_7_all_results$H4 >=0.75),]
morris_ovr75 = coloc_7_all_results_over75
write.table(coloc_7_all_results_over75,"~/Documents/projects/DO_project/results/flat/coloc/coloc_morris_v7_all_results_over75_REV2.txt",sep = "\t",quote = FALSE)
####repeat analysis for estrada GWAS
#coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_results_estrada/", pattern = "coloc_results*")
coloc_results_files =list.files(path= "~/Documents/projects//DO_project/results/flat/coloc/coloc_estrada_results_REV2/", pattern = "coloc_results*")
coloc_7_all_results = data.frame()
for (i in coloc_results_files){
result = read.delim(paste("~/Documents/projects//DO_project/results/flat/coloc/coloc_estrada_results_REV2/",i,sep = ""),as.is = TRUE,stringsAsFactors = FALSE,header = FALSE,sep=" ")
result = result[-1,]
result = result[which(is.na(result$V2)==FALSE),]
coloc_7_all_results=rbind(coloc_7_all_results,result)
}
coloc_7_all_results = coloc_7_all_results[,-c(1)]
#gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/estrada_lead_BAN_overlaps.txt")
gene_list = read.table("~/Documents/projects//DO_project/results/flat/coloc/estrada_lead_BAN_overlaps_REV2.txt")
#gene_list = gene_list[,c(6,7)]
gene_list = gene_list[,c(5,6)]
gene_list = unique(gene_list)
coloc_7_all_results$gene = apply(coloc_7_all_results,1,function(x) gene_list[which(gene_list[,1] == x[3]),2])
colnames(coloc_7_all_results) = c("pheno","tissue","ensembl","nSNPs","H0","H1","H2","H3","H4","gene")
coloc_7_all_results$nSNPs = as.numeric(coloc_7_all_results$nSNPs)
coloc_7_all_results$gene = as.character(coloc_7_all_results$gene)
write.table(coloc_7_all_results,"~/Documents/projects/DO_project/results/flat/coloc/coloc_estrada_v7_all_results_REV2.txt",sep = "\t",quote = FALSE)
coloc_7_all_results_over75 = coloc_7_all_results[which(coloc_7_all_results$H4 >=0.75),]
estrada_ovr75 = coloc_7_all_results_over75
fn = coloc_7_all_results_over75[which(coloc_7_all_results_over75$pheno=="FNBMD"),]
ls = coloc_7_all_results_over75[which(coloc_7_all_results_over75$pheno=="LSBMD"),]
write.table(fn,"~/Documents/projects/DO_project/results/flat/coloc/coloc_v7_FNBMD_over75_REV2.txt",sep = "\t",quote = FALSE)
write.table(ls,"~/Documents/projects/DO_project/results/flat/coloc/coloc_v7_LSBMD_over75_REV2.txt",sep = "\t",quote = FALSE)
coloc_ovr_75 = rbind(estrada_ovr75, morris_ovr75)
unique(coloc_ovr_75$gene)
|
######## input check functions ########
# function to print interval structure as hint for the user if the
# 'interval_probabilities' input variable is not valid
interval_structure_error <- function(number_of_states) {
message("'interval_probabilities' must have the following structure:")
interval_structure <- matrix(c(
paste("lower_boundary", 1:number_of_states, sep = ""),
paste("upper_boundary", 1:number_of_states, sep = "")
), number_of_states, 2)
row.names(interval_structure) <- 1:number_of_states
print(head(interval_structure, max(5, min(number_of_states, 6 - (number_of_states - 6)))))
if (number_of_states > 6) {
message("...")
rest <- interval_structure[-(1:(nrow(interval_structure) - 2)), ]
row.names(rest) <- (number_of_states - 1):number_of_states
print(rest)
}
}
# check of probabilistic information
check_probability_consistency <- function(number_of_states, a1, a2, b1, b2,
interval_probabilities, ordinal_relationships) {
# check requirements for 'a1', 'a2', 'b1', 'b2'
# for further information see rcdd documentation (function 'makeH')
if (!is.null(a1)) {
checkmate::assert_matrix(a1, ncols = number_of_states, any.missing = FALSE)
checkmate::assert_numeric(a1)
if (is.null(b1)) {
# a1 only works together with b1 (see rcdd documentation)
stop("please specify 'b1'")
}
}
if (!is.null(a2)) {
checkmate::assert_matrix(a2, ncols = number_of_states, any.missing = FALSE)
checkmate::assert_numeric(a2)
if (is.null(b2)) {
# a2 only works together with b2 (see rcdd documentation)
stop("please specify 'b2'")
}
}
if (!is.null(b1)) {
if (is.null(a1)) {
stop("please specify 'a1'")
}
checkmate::assert(checkmate::check_numeric(b1, any.missing = FALSE),
checkmate::check_atomic_vector(b1, len = nrow(a1)),
combine = "and"
)
}
if (!is.null(b2)) {
if (is.null(a2)) {
stop("please specify 'a2'")
}
checkmate::assert(checkmate::check_numeric(b2, any.missing = FALSE),
checkmate::check_atomic_vector(b2, len = nrow(a1)),
combine = "and"
)
}
# check requirements for 'ordinal_relationships' if it is defined
if (!is.null(ordinal_relationships)) {
checkmate::assert_list(ordinal_relationships, any.missing = FALSE)
for (i in seq_len(length(ordinal_relationships))) {
checkmate::assert_atomic_vector(ordinal_relationships[[i]],
any.missing = FALSE, min.len = 2
)
checkmate::assert_integerish(ordinal_relationships[[i]],
lower = 1, upper = number_of_states
)
}
}
# check requirements on interval_probabilities
if (!(is.matrix(interval_probabilities) && is.numeric(interval_probabilities))) {
message("Error: 'interval_probabilities' is not a numeric matrix")
interval_structure_error(number_of_states)
stop("please redefine 'interval_probabilities'")
}
else if (any(dim(interval_probabilities) != c(number_of_states, 2))) {
message(paste(
"Error: 'interval_probabilities' has the wrong dimensions - dim(interval_probabilities) must equal",
number_of_states, "2"
))
interval_structure_error(number_of_states)
stop("please redefine 'interval_probabilities'")
}
else if (sum(interval_probabilities[, 1]) > 1) {
message("Error: sum of lower boundaries exceeds 1")
interval_structure_error(number_of_states)
message("it must hold that sum of lower boundaries is smaller than or equal to 1")
stop("please redefine 'interval_probabilities'")
}
else if (sum(interval_probabilities[, 2]) < 1) {
message("Error: upper boundaries don't conver a probability space")
interval_structure_error(number_of_states)
message("it must hold that sum of upper boundaries is greater than or equal to 1")
stop("please redefine 'interval_probabilities'")
}
warning_reminder <- FALSE
for (i in seq_len(nrow(interval_probabilities))) {
if (interval_probabilities[i, 1] > interval_probabilities[i, 2]) {
message(paste("Error: boundaries of state", i, "have been specified incorrectly"))
interval_structure_error(number_of_states)
message(paste(
"State: ", i,
": it must hold that the lower boundary is smaller than or equal to the upper boundary"
))
stop("please redefine 'interval_probabilities'")
}
else if (interval_probabilities[i, 1] < 0 || interval_probabilities[i, 1] > 1 ||
interval_probabilities[i, 2] < 0 || interval_probabilities[i, 2] > 1) {
message(paste("Error: boundaries of state", i, "have been specified incorrectly"))
interval_structure_error(number_of_states)
message(paste("it must hold that all values are element of the interval [0, 1]"))
stop("please redefine 'interval_probabilities'")
}
if (interval_probabilities[i, 2] == 0) {
warning_reminder <- TRUE
}
}
if (warning_reminder) {
warning("There is at least one state with upper probability of zero - priori might be degenerated")
}
ordinal_matrix <- diag(1, nrow = number_of_states, ncol = number_of_states)
# transform 'ordinal_relationships' to a matrix so that [x, y] = 1 if
# '...,x,y,...' is contained in at least one vector of 'ordinal_relationships'
if (!is.null(ordinal_relationships)) {
for (a in seq_len(length(ordinal_relationships))) {
ordinal_vector <- ordinal_relationships[[a]]
for (k in seq_len(length(ordinal_vector) - 1)) {
ordinal_matrix[ordinal_vector[k], ordinal_vector[(k + 1):length(ordinal_vector)]] <- 1
}
}
}
# apply transitivity of state dominance, so that [x,y] = 1 if and only if p(x) >= p(y)
# this may seem inefficient but it's necessary so that transitivity will be applied completely
for (i in seq_len(number_of_states)) {
for (j in seq_len(number_of_states)) {
if (ordinal_matrix[i, j] > 0) {
ordinal_matrix[i, ] <- ordinal_matrix[i, ] + ordinal_matrix[j, ]
}
else if (ordinal_matrix[j, i] > 0) {
ordinal_matrix[j, ] <- ordinal_matrix[j, ] + ordinal_matrix[i, ]
}
}
}
# transform it to a binary relation again
ordinal_matrix <- sign(ordinal_matrix)
# define lower boundaries by applying transitivity for each dominated state
for (i in seq_len(number_of_states)) {
# for computational reasons let say that a state will always dominate itself
ordinal_matrix[i, i] <- 1
if (sum(ordinal_matrix[i, ]) == number_of_states) {
# if the state dominates all of the other states, the lower boundary is either given by:
# 1 divided through the number of all states ...
tmp <- 1 / number_of_states
# ... or by the maximal lower boundary of all states ...
max <- max(interval_probabilities[, 1])
# ... depending on what is larger
if (tmp > max) {
interval_probabilities[i, 1] <- tmp
}
else {
interval_probabilities[i, 1] <- max
}
}
else {
# otherwise the lower boundary will be changed to the highest lower boundary of any state dominated by the corresponding state
tmp <- max(interval_probabilities[as.logical(ordinal_matrix[i, ]), 1])
if (tmp > interval_probabilities[i, 1]) {
interval_probabilities[i, 1] <- tmp
}
}
}
states_order <- (1:number_of_states)[order(rowSums(ordinal_matrix), decreasing = TRUE)]
# define upper boundaries (pre-sorting is necessary because dominators' boundaries will have an impact)
# note that the upper boundaries will be too wide, since the computation of the actual boundaries is quite difficult
for (i in states_order) {
# compute all of the states which dominate the corresponding state
dominators <- as.logical(ordinal_matrix[, i])
if (length(dominators) != 0) {
# the upper boundary is defined by the minimal upper boundary of the dominating states
tmp <- min(interval_probabilities[dominators, 2])
}
else {
tmp <- 1
}
# OR 1 minus the sum of the lower boundaries of all other states
tmp2 <- 1 - sum(interval_probabilities[-i, 1])
# the minimum will define the upper boundary
tmp <- min(tmp, tmp2)
if (tmp < interval_probabilities[i, 2]) {
interval_probabilities[i, 2] <- tmp
}
}
# if the lower boundaries sum up to 1 there is only one solution
if (sum(interval_probabilities[, 1]) == 1) {
return(rbind(interval_probabilities[, 1]))
}
# find logically induced lower boundaries
for (i in seq_len(number_of_states)) {
max_without_i <- sum(interval_probabilities[-i, 2])
if (max_without_i < 1) {
if (interval_probabilities[i, 1] < 1 - max_without_i) {
interval_probabilities[i, 1] <- 1 - max_without_i
}
}
}
# check further requirments on 'interval_probabilities'
# the sum of the lower boundaries must be maximal 1 and the sum of the upper boundaries must be minimal 1
if ((round(sum(interval_probabilities[, 1]), 10) > 1) ||
(round(sum(interval_probabilities[, 2]), 10) < 1)) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
for (i in seq_len(number_of_states)) {
# no state may have an upper boundary higher than its lower boundary
if (round(interval_probabilities[i, 1], 10) >
round(interval_probabilities[i, 2], 10)) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
dominated <- (1:number_of_states)[(ordinal_matrix - diag(number_of_states, number_of_states))[i, ] == 1]
# dominated states are not supposed to have a higher boundary value than their dominator
for (j in dominated) {
if (interval_probabilities[i, 1] < interval_probabilities[j, 1] ||
interval_probabilities[i, 2] < interval_probabilities[j, 2]) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
}
}
if (warning_reminder == FALSE) {
# if there is a state with upper boundary of 0, give a warning to the user
for (i in seq_len(nrow(interval_probabilities))) {
if (interval_probabilities[i, 2] == 0) {
warning("There is at least one state with upper probability of zero - priori might be degenerated")
break
}
}
}
return(interval_probabilities)
}
# R6-class for Decision Systems
DecisionSystem <- R6::R6Class("DecisionSystem",
private = list(
.table = NA,
.mode = NA,
.interval_probabilities = NA,
.ordinal_relationships = NA,
.a1 = NA,
.b1 = NA,
.a2 = NA,
.b2 = NA,
.only_admissible = NA
),
public = list(
initialize = function(table, mode = c("utility", "loss"),
interval_probabilities = matrix(c(0, 1), nrow = ncol(table), ncol = 2, byrow = TRUE),
ordinal_relationships = NULL,
a1 = NULL, b1 = NULL, a2 = NULL, b2 = NULL,
check_admissibility = TRUE) {
# check 'table' to be a non-trivial utility/loss
# table with no missing values
checkmate::assert_matrix(table, min.rows = 2, min.cols = 2)
checkmate::assert_numeric(table, any.missing = FALSE)
# check 'mode' to be one of the valid options
mode <- match.arg(mode)
# check if 'check_admissibility' is a logical value
checkmate::assert_flag(check_admissibility)
# check consistency of information on probabilities
interval_probabilities <- check_probability_consistency(
ncol(table),
a1, b1, a2, b2,
interval_probabilities,
ordinal_relationships
)
# safe values in object
private$.table <- table
private$.mode <- mode
private$.interval_probabilities <- interval_probabilities
private$.ordinal_relationships <- ordinal_relationships
private$.a1 <- a1
private$.b1 <- b1
private$.a2 <- a2
private$.b2 <- b2
private$.only_admissible <- check_admissibility
# if 'check_admissibility' is set to TRUE, apply algorithm to exclude
# dominated acts
if (check_admissibility) {
self$exclude_dominated()
}
},
# method to show/print the class
show = function() {
list(table = private$.table,
mode = private$.mode,
interval_probabilites = private$.interval_probabilities,
ordinal_relationships = private$.ordinal_relationships,
a1 = private$.a1,
b1 = private$.b1,
a2 = private$.a2,
b2 = private$.b2,
only_admisslbe = private$.only_admissible)
},
# method to exclude strictly dominated acts
exclude_dominated = function(strong_domination = FALSE,
exclude_duplicates = FALSE,
column_presort = FALSE) {
#' strong_domination', 'column_presort' and 'exclude_duplicates'
# must be single logical values
checkmate::assert_flag(strong_domination)
if (strong_domination == TRUE) {
checkmate::assert_flag(exclude_duplicates, null.ok = TRUE)
}
else {
checkmate::assert_flag(exclude_duplicates)
}
checkmate::assert_flag(column_presort)
# if there is only one possible act, that act is of course admissible
if (nrow(private$.table) == 1) {
return(invisible(NULL))
}
# copy 'table' attribute into local storage
table <- private$.table
number_of_states <- ncol(table)
number_of_acts <- nrow(table)
# Sort acts to be more likely to be dominated to be in the lower rows
# Acts cannot be dominated by acts with lower row sums
row_sums <- data.frame(
indx = 1:number_of_acts,
rowSum = rowSums(table)
)
row_sums <- row_sums[order(row_sums$rowSum,
decreasing = switch(private$.mode,
"utility" = TRUE,
"loss" = FALSE
)
), ]
row_sums <- row_sums[["indx"]]
# Sort states to be selected earlier when higher rows have low values
# (optional; does not always lead to faster processing)
if (column_presort) {
weighted_table <- table
# normalize the utility for each state (column)
for (j in seq_len(number_of_states)) {
weighted_table[, j] <- (weighted_table[, j] - min(weighted_table[, j]))
weighted_table[, j] <- weighted_table[, j] / (max(weighted_table[, j]))
}
# calculate score values used for ordering the columns in a advantageous way for the detection of admissibility
weights <- seq(number_of_acts - 1, 0)
weighted_table <- weights * weighted_table / sum(weights)
weighted_table_colSums <- colSums(weighted_table)
table <- table[, order(weighted_table_colSums,
decreasing = switch(private$.mode,
"utility" = FALSE,
"loss" = TRUE
)
)]
}
# algorithm for admissible acts according to strong dominance
if (strong_domination == TRUE) {
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# reminder saves if there is a state for which act j performs better than or at least as good as act i
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] >= table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
# if reminder is FALSE at the end of the loop, act j is strongly dominated by act i
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] <= table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
}
else {
if (exclude_duplicates == FALSE) {
# algorithm for admissible acts according to strict dominance
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# in addition to saving if act j has been better than act i in at least one state, one also has to save if the opposite happened
reminder_equal <- TRUE
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder <- TRUE
break
}
else if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder_equal <- FALSE
}
}
# if act j has not been better than act i in any state and act i has been better than act j in any state, act j is strictly dominated by act i
if (reminder == FALSE) {
if (reminder_equal == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder_equal <- TRUE
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder <- TRUE
break
}
else if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder_equal <- FALSE
}
}
if (reminder == FALSE) {
if (reminder_equal == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
}
j <- j + 1
}
i <- i + 1
}
}
}
else if (exclude_duplicates == TRUE) {
# algorithm for admissible acts according to weak dominance (only one duplicate will remain)
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# remember if act j has been better than act i in at least one state
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
# if act j has not been better than act i in any state, act j will be excluded (could be equivalent to act i)
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
}
}
private$.table <- table[row_sums, ]
return(invisible(NULL))
}
),
active = list(
table = function() {
private$.table
},
mode = function() {
private$.mode
},
a1 = function() {
private$.a1
},
b1 = function() {
private$.b1
},
a2 = function() {
private$.a2
},
b2 = function() {
private$.b2
},
interval_probabilities = function() {
private$.interval_probabilities
},
ordinal_relationships = function() {
private$.ordinal_relationships
},
only_admissible = function() {
private$.only_admissible
}
)
)
|
/class_defintions.R
|
no_license
|
MarcJohler/decisionmakeR
|
R
| false | false | 21,221 |
r
|
######## input check functions ########
# function to print interval structure as hint for the user if the
# 'interval_probabilities' input variable is not valid
interval_structure_error <- function(number_of_states) {
message("'interval_probabilities' must have the following structure:")
interval_structure <- matrix(c(
paste("lower_boundary", 1:number_of_states, sep = ""),
paste("upper_boundary", 1:number_of_states, sep = "")
), number_of_states, 2)
row.names(interval_structure) <- 1:number_of_states
print(head(interval_structure, max(5, min(number_of_states, 6 - (number_of_states - 6)))))
if (number_of_states > 6) {
message("...")
rest <- interval_structure[-(1:(nrow(interval_structure) - 2)), ]
row.names(rest) <- (number_of_states - 1):number_of_states
print(rest)
}
}
# check of probabilistic information
check_probability_consistency <- function(number_of_states, a1, a2, b1, b2,
interval_probabilities, ordinal_relationships) {
# check requirements for 'a1', 'a2', 'b1', 'b2'
# for further information see rcdd documentation (function 'makeH')
if (!is.null(a1)) {
checkmate::assert_matrix(a1, ncols = number_of_states, any.missing = FALSE)
checkmate::assert_numeric(a1)
if (is.null(b1)) {
# a1 only works together with b1 (see rcdd documentation)
stop("please specify 'b1'")
}
}
if (!is.null(a2)) {
checkmate::assert_matrix(a2, ncols = number_of_states, any.missing = FALSE)
checkmate::assert_numeric(a2)
if (is.null(b2)) {
# a2 only works together with b2 (see rcdd documentation)
stop("please specify 'b2'")
}
}
if (!is.null(b1)) {
if (is.null(a1)) {
stop("please specify 'a1'")
}
checkmate::assert(checkmate::check_numeric(b1, any.missing = FALSE),
checkmate::check_atomic_vector(b1, len = nrow(a1)),
combine = "and"
)
}
if (!is.null(b2)) {
if (is.null(a2)) {
stop("please specify 'a2'")
}
checkmate::assert(checkmate::check_numeric(b2, any.missing = FALSE),
checkmate::check_atomic_vector(b2, len = nrow(a1)),
combine = "and"
)
}
# check requirements for 'ordinal_relationships' if it is defined
if (!is.null(ordinal_relationships)) {
checkmate::assert_list(ordinal_relationships, any.missing = FALSE)
for (i in seq_len(length(ordinal_relationships))) {
checkmate::assert_atomic_vector(ordinal_relationships[[i]],
any.missing = FALSE, min.len = 2
)
checkmate::assert_integerish(ordinal_relationships[[i]],
lower = 1, upper = number_of_states
)
}
}
# check requirements on interval_probabilities
if (!(is.matrix(interval_probabilities) && is.numeric(interval_probabilities))) {
message("Error: 'interval_probabilities' is not a numeric matrix")
interval_structure_error(number_of_states)
stop("please redefine 'interval_probabilities'")
}
else if (any(dim(interval_probabilities) != c(number_of_states, 2))) {
message(paste(
"Error: 'interval_probabilities' has the wrong dimensions - dim(interval_probabilities) must equal",
number_of_states, "2"
))
interval_structure_error(number_of_states)
stop("please redefine 'interval_probabilities'")
}
else if (sum(interval_probabilities[, 1]) > 1) {
message("Error: sum of lower boundaries exceeds 1")
interval_structure_error(number_of_states)
message("it must hold that sum of lower boundaries is smaller than or equal to 1")
stop("please redefine 'interval_probabilities'")
}
else if (sum(interval_probabilities[, 2]) < 1) {
message("Error: upper boundaries don't conver a probability space")
interval_structure_error(number_of_states)
message("it must hold that sum of upper boundaries is greater than or equal to 1")
stop("please redefine 'interval_probabilities'")
}
warning_reminder <- FALSE
for (i in seq_len(nrow(interval_probabilities))) {
if (interval_probabilities[i, 1] > interval_probabilities[i, 2]) {
message(paste("Error: boundaries of state", i, "have been specified incorrectly"))
interval_structure_error(number_of_states)
message(paste(
"State: ", i,
": it must hold that the lower boundary is smaller than or equal to the upper boundary"
))
stop("please redefine 'interval_probabilities'")
}
else if (interval_probabilities[i, 1] < 0 || interval_probabilities[i, 1] > 1 ||
interval_probabilities[i, 2] < 0 || interval_probabilities[i, 2] > 1) {
message(paste("Error: boundaries of state", i, "have been specified incorrectly"))
interval_structure_error(number_of_states)
message(paste("it must hold that all values are element of the interval [0, 1]"))
stop("please redefine 'interval_probabilities'")
}
if (interval_probabilities[i, 2] == 0) {
warning_reminder <- TRUE
}
}
if (warning_reminder) {
warning("There is at least one state with upper probability of zero - priori might be degenerated")
}
ordinal_matrix <- diag(1, nrow = number_of_states, ncol = number_of_states)
# transform 'ordinal_relationships' to a matrix so that [x, y] = 1 if
# '...,x,y,...' is contained in at least one vector of 'ordinal_relationships'
if (!is.null(ordinal_relationships)) {
for (a in seq_len(length(ordinal_relationships))) {
ordinal_vector <- ordinal_relationships[[a]]
for (k in seq_len(length(ordinal_vector) - 1)) {
ordinal_matrix[ordinal_vector[k], ordinal_vector[(k + 1):length(ordinal_vector)]] <- 1
}
}
}
# apply transitivity of state dominance, so that [x,y] = 1 if and only if p(x) >= p(y)
# this may seem inefficient but it's necessary so that transitivity will be applied completely
for (i in seq_len(number_of_states)) {
for (j in seq_len(number_of_states)) {
if (ordinal_matrix[i, j] > 0) {
ordinal_matrix[i, ] <- ordinal_matrix[i, ] + ordinal_matrix[j, ]
}
else if (ordinal_matrix[j, i] > 0) {
ordinal_matrix[j, ] <- ordinal_matrix[j, ] + ordinal_matrix[i, ]
}
}
}
# transform it to a binary relation again
ordinal_matrix <- sign(ordinal_matrix)
# define lower boundaries by applying transitivity for each dominated state
for (i in seq_len(number_of_states)) {
# for computational reasons let say that a state will always dominate itself
ordinal_matrix[i, i] <- 1
if (sum(ordinal_matrix[i, ]) == number_of_states) {
# if the state dominates all of the other states, the lower boundary is either given by:
# 1 divided through the number of all states ...
tmp <- 1 / number_of_states
# ... or by the maximal lower boundary of all states ...
max <- max(interval_probabilities[, 1])
# ... depending on what is larger
if (tmp > max) {
interval_probabilities[i, 1] <- tmp
}
else {
interval_probabilities[i, 1] <- max
}
}
else {
# otherwise the lower boundary will be changed to the highest lower boundary of any state dominated by the corresponding state
tmp <- max(interval_probabilities[as.logical(ordinal_matrix[i, ]), 1])
if (tmp > interval_probabilities[i, 1]) {
interval_probabilities[i, 1] <- tmp
}
}
}
states_order <- (1:number_of_states)[order(rowSums(ordinal_matrix), decreasing = TRUE)]
# define upper boundaries (pre-sorting is necessary because dominators' boundaries will have an impact)
# note that the upper boundaries will be too wide, since the computation of the actual boundaries is quite difficult
for (i in states_order) {
# compute all of the states which dominate the corresponding state
dominators <- as.logical(ordinal_matrix[, i])
if (length(dominators) != 0) {
# the upper boundary is defined by the minimal upper boundary of the dominating states
tmp <- min(interval_probabilities[dominators, 2])
}
else {
tmp <- 1
}
# OR 1 minus the sum of the lower boundaries of all other states
tmp2 <- 1 - sum(interval_probabilities[-i, 1])
# the minimum will define the upper boundary
tmp <- min(tmp, tmp2)
if (tmp < interval_probabilities[i, 2]) {
interval_probabilities[i, 2] <- tmp
}
}
# if the lower boundaries sum up to 1 there is only one solution
if (sum(interval_probabilities[, 1]) == 1) {
return(rbind(interval_probabilities[, 1]))
}
# find logically induced lower boundaries
for (i in seq_len(number_of_states)) {
max_without_i <- sum(interval_probabilities[-i, 2])
if (max_without_i < 1) {
if (interval_probabilities[i, 1] < 1 - max_without_i) {
interval_probabilities[i, 1] <- 1 - max_without_i
}
}
}
# check further requirments on 'interval_probabilities'
# the sum of the lower boundaries must be maximal 1 and the sum of the upper boundaries must be minimal 1
if ((round(sum(interval_probabilities[, 1]), 10) > 1) ||
(round(sum(interval_probabilities[, 2]), 10) < 1)) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
for (i in seq_len(number_of_states)) {
# no state may have an upper boundary higher than its lower boundary
if (round(interval_probabilities[i, 1], 10) >
round(interval_probabilities[i, 2], 10)) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
dominated <- (1:number_of_states)[(ordinal_matrix - diag(number_of_states, number_of_states))[i, ] == 1]
# dominated states are not supposed to have a higher boundary value than their dominator
for (j in dominated) {
if (interval_probabilities[i, 1] < interval_probabilities[j, 1] ||
interval_probabilities[i, 2] < interval_probabilities[j, 2]) {
stop("'ordinal_relationships' and 'interval_probabilities' contain contradictory requirements")
}
}
}
if (warning_reminder == FALSE) {
# if there is a state with upper boundary of 0, give a warning to the user
for (i in seq_len(nrow(interval_probabilities))) {
if (interval_probabilities[i, 2] == 0) {
warning("There is at least one state with upper probability of zero - priori might be degenerated")
break
}
}
}
return(interval_probabilities)
}
# R6-class for Decision Systems
DecisionSystem <- R6::R6Class("DecisionSystem",
private = list(
.table = NA,
.mode = NA,
.interval_probabilities = NA,
.ordinal_relationships = NA,
.a1 = NA,
.b1 = NA,
.a2 = NA,
.b2 = NA,
.only_admissible = NA
),
public = list(
initialize = function(table, mode = c("utility", "loss"),
interval_probabilities = matrix(c(0, 1), nrow = ncol(table), ncol = 2, byrow = TRUE),
ordinal_relationships = NULL,
a1 = NULL, b1 = NULL, a2 = NULL, b2 = NULL,
check_admissibility = TRUE) {
# check 'table' to be a non-trivial utility/loss
# table with no missing values
checkmate::assert_matrix(table, min.rows = 2, min.cols = 2)
checkmate::assert_numeric(table, any.missing = FALSE)
# check 'mode' to be one of the valid options
mode <- match.arg(mode)
# check if 'check_admissibility' is a logical value
checkmate::assert_flag(check_admissibility)
# check consistency of information on probabilities
interval_probabilities <- check_probability_consistency(
ncol(table),
a1, b1, a2, b2,
interval_probabilities,
ordinal_relationships
)
# safe values in object
private$.table <- table
private$.mode <- mode
private$.interval_probabilities <- interval_probabilities
private$.ordinal_relationships <- ordinal_relationships
private$.a1 <- a1
private$.b1 <- b1
private$.a2 <- a2
private$.b2 <- b2
private$.only_admissible <- check_admissibility
# if 'check_admissibility' is set to TRUE, apply algorithm to exclude
# dominated acts
if (check_admissibility) {
self$exclude_dominated()
}
},
# method to show/print the class
show = function() {
list(table = private$.table,
mode = private$.mode,
interval_probabilites = private$.interval_probabilities,
ordinal_relationships = private$.ordinal_relationships,
a1 = private$.a1,
b1 = private$.b1,
a2 = private$.a2,
b2 = private$.b2,
only_admisslbe = private$.only_admissible)
},
# method to exclude strictly dominated acts
exclude_dominated = function(strong_domination = FALSE,
exclude_duplicates = FALSE,
column_presort = FALSE) {
#' strong_domination', 'column_presort' and 'exclude_duplicates'
# must be single logical values
checkmate::assert_flag(strong_domination)
if (strong_domination == TRUE) {
checkmate::assert_flag(exclude_duplicates, null.ok = TRUE)
}
else {
checkmate::assert_flag(exclude_duplicates)
}
checkmate::assert_flag(column_presort)
# if there is only one possible act, that act is of course admissible
if (nrow(private$.table) == 1) {
return(invisible(NULL))
}
# copy 'table' attribute into local storage
table <- private$.table
number_of_states <- ncol(table)
number_of_acts <- nrow(table)
# Sort acts to be more likely to be dominated to be in the lower rows
# Acts cannot be dominated by acts with lower row sums
row_sums <- data.frame(
indx = 1:number_of_acts,
rowSum = rowSums(table)
)
row_sums <- row_sums[order(row_sums$rowSum,
decreasing = switch(private$.mode,
"utility" = TRUE,
"loss" = FALSE
)
), ]
row_sums <- row_sums[["indx"]]
# Sort states to be selected earlier when higher rows have low values
# (optional; does not always lead to faster processing)
if (column_presort) {
weighted_table <- table
# normalize the utility for each state (column)
for (j in seq_len(number_of_states)) {
weighted_table[, j] <- (weighted_table[, j] - min(weighted_table[, j]))
weighted_table[, j] <- weighted_table[, j] / (max(weighted_table[, j]))
}
# calculate score values used for ordering the columns in a advantageous way for the detection of admissibility
weights <- seq(number_of_acts - 1, 0)
weighted_table <- weights * weighted_table / sum(weights)
weighted_table_colSums <- colSums(weighted_table)
table <- table[, order(weighted_table_colSums,
decreasing = switch(private$.mode,
"utility" = FALSE,
"loss" = TRUE
)
)]
}
# algorithm for admissible acts according to strong dominance
if (strong_domination == TRUE) {
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# reminder saves if there is a state for which act j performs better than or at least as good as act i
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] >= table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
# if reminder is FALSE at the end of the loop, act j is strongly dominated by act i
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] <= table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
}
else {
if (exclude_duplicates == FALSE) {
# algorithm for admissible acts according to strict dominance
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# in addition to saving if act j has been better than act i in at least one state, one also has to save if the opposite happened
reminder_equal <- TRUE
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder <- TRUE
break
}
else if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder_equal <- FALSE
}
}
# if act j has not been better than act i in any state and act i has been better than act j in any state, act j is strictly dominated by act i
if (reminder == FALSE) {
if (reminder_equal == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder_equal <- TRUE
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder <- TRUE
break
}
else if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder_equal <- FALSE
}
}
if (reminder == FALSE) {
if (reminder_equal == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
}
j <- j + 1
}
i <- i + 1
}
}
}
else if (exclude_duplicates == TRUE) {
# algorithm for admissible acts according to weak dominance (only one duplicate will remain)
if (private$.mode == "utility") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
# remember if act j has been better than act i in at least one state
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] > table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
# if act j has not been better than act i in any state, act j will be excluded (could be equivalent to act i)
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
else if (private$.mode == "loss") {
i <- 1
while (i < length(row_sums)) {
j <- i + 1
while (j <= length(row_sums)) {
reminder <- FALSE
for (k in seq_len(number_of_states)) {
if (table[row_sums[j], k] < table[row_sums[i], k]) {
reminder <- TRUE
break
}
}
if (reminder == FALSE) {
row_sums <- row_sums[-j]
j <- j - 1
}
j <- j + 1
}
i <- i + 1
}
}
}
}
private$.table <- table[row_sums, ]
return(invisible(NULL))
}
),
active = list(
table = function() {
private$.table
},
mode = function() {
private$.mode
},
a1 = function() {
private$.a1
},
b1 = function() {
private$.b1
},
a2 = function() {
private$.a2
},
b2 = function() {
private$.b2
},
interval_probabilities = function() {
private$.interval_probabilities
},
ordinal_relationships = function() {
private$.ordinal_relationships
},
only_admissible = function() {
private$.only_admissible
}
)
)
|
library(shiny)
shinyServer(function(input, output) {
output$TD_world_obnovljivi <- DT::renderDataTable({
TD_world_obnovljivi %>% mutate(drzave=slovar[drzave]) %>%
rename("Država"=drzave, "Delež energije iz hidroelektrarn (%)"= `%_obnovljive_energije_iz_hidroelektrarn`,
"Celotna obnovljiva energija (GWh)"=`obnovljiva_energija_(GWh)`, "Delež energije iz vetrnih elektrarn (%)" = `%_obnovljive_energije_iz_vetrnih elektrarn`,
"Delež energije iz biomase in odpadkov (%)"= `%_obnovljive_energije_iz_biomase_in_odpadkov`,
"Delež obnovljive energije iz sončne energije (%)"=`%_obnovljive_energije_iz_sončne energije`,
"Delež obnovljive energije iz geotermalne energije (%)"=`%_obnovljive_energije_iz_geotermalne_energije`,
"Prevladujoči vir" = `prevladujoči vir`)
})
})
|
/shiny/server.R
|
permissive
|
majbc1999/APPR-2019-20
|
R
| false | false | 859 |
r
|
library(shiny)
shinyServer(function(input, output) {
output$TD_world_obnovljivi <- DT::renderDataTable({
TD_world_obnovljivi %>% mutate(drzave=slovar[drzave]) %>%
rename("Država"=drzave, "Delež energije iz hidroelektrarn (%)"= `%_obnovljive_energije_iz_hidroelektrarn`,
"Celotna obnovljiva energija (GWh)"=`obnovljiva_energija_(GWh)`, "Delež energije iz vetrnih elektrarn (%)" = `%_obnovljive_energije_iz_vetrnih elektrarn`,
"Delež energije iz biomase in odpadkov (%)"= `%_obnovljive_energije_iz_biomase_in_odpadkov`,
"Delež obnovljive energije iz sončne energije (%)"=`%_obnovljive_energije_iz_sončne energije`,
"Delež obnovljive energije iz geotermalne energije (%)"=`%_obnovljive_energije_iz_geotermalne_energije`,
"Prevladujoči vir" = `prevladujoči vir`)
})
})
|
library(tidyverse)
library(magrittr)
library(janitor)
library(jsonlite)
# Accessing data through Nobel Prize API. Last time run on 29.07.2021.
url <- "http://api.nobelprize.org/v1/laureate.json"
x <- fromJSON(url)
# This data comes as a list, so we need to convert it to a usable structure
# first.
x <- as_tibble(data.frame(x$laureates)) %>%
clean_names() %>%
select(-c(id, born_country_code, died_country_code)) %>%
rename(first_name = firstname,
last_name = surname)
# Extract variables from "prizes" list. I first created some new variables with
# empty values which will then be overwritten by the values in the list using
# the loop below.
new_cols <- tibble("year" = rep(NA_character_, 955),
"field" = rep(NA_character_, 955),
"share" = rep(NA_character_, 955),
"motivation" = rep(NA_character_, 955),
"aff_inst" = rep(NA_character_, 955),
"aff_city" = rep(NA_character_, 955),
"aff_country" = rep(NA_character_, 955))
x <- cbind(x, new_cols)
y <- tibble("first_name" = rep(NA_character_, 300),
"last_name" = rep(NA_character_, 300),
"born" = rep(NA_character_, 300),
"died" = rep(NA_character_, 300),
"born_country" = rep(NA_character_, 300),
"born_city" = rep(NA_character_, 300),
"died_country" = rep(NA_character_, 300),
"died_city" = rep(NA_character_, 300),
"gender" = rep(NA_character_, 300),
"prizes" = rep(NA_character_, 300),
"year" = rep(NA_character_, 300),
"field" = rep(NA_character_, 300),
"share" = rep(NA_character_, 300),
"motivation" = rep(NA_character_, 300),
"aff_inst" = rep(NA_character_, 300),
"aff_city" = rep(NA_character_, 300),
"aff_country" = rep(NA_character_, 300))
x <- rbind(x,y)
# City was missing for University of Delaware, which messes up the loop.
x[[10]][[826]][[5]][[1]][["city"]] <- "Newark, DE"
v <- 0
u <- -1
for (i in 1:955) {
if (nrow(as_tibble(x[[10]][[i]])) == 1) {
x$year[i] <- pull(x[[10]][[i]]["year"])
x$field[i] <- pull(x[[10]][[i]]["category"])
x$share[i] <- pull(x[[10]][[i]]["share"])
x$motivation[i] <- pull(x[[10]][[i]]["motivation"])
if (length(x[[10]][[i]][["affiliations"]][[1]][[1]]) == 0) {
x$aff_inst[i] <- "None"
x$aff_city[i] <- "None"
x$aff_country[i] <- "None"
} else {
if (nrow(as_tibble(x[[10]][[i]][["affiliations"]][[1]])) == 1) {
x$aff_inst[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["name"])
x$aff_city[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["city"])
x$aff_country[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["country"])
} else {
x$aff_inst[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "name"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "name"], sep = ", ")
x$aff_city[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "city"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "city"], sep = ", ")
x$aff_country[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "country"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "country"], sep = ", ")
}
}
} else {
for (v in 1:nrow(as_tibble(x[[10]][[i]]))) {
u <- u + 1
x$year[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "year"])
x$field[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "category"])
x$share[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "share"])
x$motivation[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "motivation"])
if (length(x[[10]][[i]][["affiliations"]][[v]][[1]]) == 0) {
x$aff_inst[955 + u] <- "None"
x$aff_city[955 + u] <- "None"
x$aff_country[955 + u] <- "None"
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
} else {
if (nrow(as_tibble(x[[10]][[i]][["affiliations"]][[v]])) == 1) {
x$aff_inst[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["name"])
x$aff_city[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["city"])
x$aff_country[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["country"])
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
} else {
x$aff_inst[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "name"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "name"], sep = ", ")
x$aff_city[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "city"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "city"], sep = ", ")
x$aff_country[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "country"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "country"], sep = ", ")
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
}
}
}
}
}
# Convert to tibble and remove leftover NA-rows.
x %<>%
tibble() %>%
filter(is.na(field) == FALSE)
# Changing variable types and cleaning up.
x %<>%
mutate(gender = str_to_title(gender),
field = str_to_title(field),
gender = as.factor(gender),
field = as.factor(field),
born = as.Date(born),
died = as.Date(died),
year = as.integer(year),
share = as.integer(share),
motivation = substr(motivation, 2, nchar(motivation)-1),
born_country = str_replace(born_country, "\\(", ""),
born_country = str_replace(born_country, "\\)", ""),
born_country = sub(".*now ", "", born_country),
born_country = str_replace(born_country, "Faroe Islands Denmark", "Denmark"),
born_country = str_replace(born_country, "the Netherlands", "Netherlands"),
died_country = str_replace(died_country, "\\(", ""),
died_country = str_replace(died_country, "\\)", ""),
died_country = sub(".*now ", "", died_country),
died_country = str_replace(died_country, "Faroe Islands Denmark", "Denmark"),
died_country = str_replace(died_country, "the Netherlands", "Netherlands"),
born_city = str_replace(born_city, "\\(", ""),
born_city = str_replace(born_city, "\\)", ""),
died_city = sub(".*now ", "", died_city),
died_city = str_replace(died_city, "\\(", ""),
died_city = str_replace(died_city, "\\)", ""),
died_city = sub(".*now ", "", died_city))
# Arrange.
x %<>%
arrange(field, year)
# Sort variables and remove useless stuff.
x %<>%
select(-prizes, -aff_country) %>%
select(1:2, year, field, share, gender, everything())
# Save.
nobel <- x
usethis::use_data(nobel, overwrite = T)
|
/data-raw/make_nobel.R
|
permissive
|
tcweiss/primer.data
|
R
| false | false | 7,892 |
r
|
library(tidyverse)
library(magrittr)
library(janitor)
library(jsonlite)
# Accessing data through Nobel Prize API. Last time run on 29.07.2021.
url <- "http://api.nobelprize.org/v1/laureate.json"
x <- fromJSON(url)
# This data comes as a list, so we need to convert it to a usable structure
# first.
x <- as_tibble(data.frame(x$laureates)) %>%
clean_names() %>%
select(-c(id, born_country_code, died_country_code)) %>%
rename(first_name = firstname,
last_name = surname)
# Extract variables from "prizes" list. I first created some new variables with
# empty values which will then be overwritten by the values in the list using
# the loop below.
new_cols <- tibble("year" = rep(NA_character_, 955),
"field" = rep(NA_character_, 955),
"share" = rep(NA_character_, 955),
"motivation" = rep(NA_character_, 955),
"aff_inst" = rep(NA_character_, 955),
"aff_city" = rep(NA_character_, 955),
"aff_country" = rep(NA_character_, 955))
x <- cbind(x, new_cols)
y <- tibble("first_name" = rep(NA_character_, 300),
"last_name" = rep(NA_character_, 300),
"born" = rep(NA_character_, 300),
"died" = rep(NA_character_, 300),
"born_country" = rep(NA_character_, 300),
"born_city" = rep(NA_character_, 300),
"died_country" = rep(NA_character_, 300),
"died_city" = rep(NA_character_, 300),
"gender" = rep(NA_character_, 300),
"prizes" = rep(NA_character_, 300),
"year" = rep(NA_character_, 300),
"field" = rep(NA_character_, 300),
"share" = rep(NA_character_, 300),
"motivation" = rep(NA_character_, 300),
"aff_inst" = rep(NA_character_, 300),
"aff_city" = rep(NA_character_, 300),
"aff_country" = rep(NA_character_, 300))
x <- rbind(x,y)
# City was missing for University of Delaware, which messes up the loop.
x[[10]][[826]][[5]][[1]][["city"]] <- "Newark, DE"
v <- 0
u <- -1
for (i in 1:955) {
if (nrow(as_tibble(x[[10]][[i]])) == 1) {
x$year[i] <- pull(x[[10]][[i]]["year"])
x$field[i] <- pull(x[[10]][[i]]["category"])
x$share[i] <- pull(x[[10]][[i]]["share"])
x$motivation[i] <- pull(x[[10]][[i]]["motivation"])
if (length(x[[10]][[i]][["affiliations"]][[1]][[1]]) == 0) {
x$aff_inst[i] <- "None"
x$aff_city[i] <- "None"
x$aff_country[i] <- "None"
} else {
if (nrow(as_tibble(x[[10]][[i]][["affiliations"]][[1]])) == 1) {
x$aff_inst[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["name"])
x$aff_city[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["city"])
x$aff_country[i] <- pull(x[[10]][[i]][["affiliations"]][[1]]["country"])
} else {
x$aff_inst[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "name"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "name"], sep = ", ")
x$aff_city[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "city"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "city"], sep = ", ")
x$aff_country[i] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[1]])[1, "country"], as_tibble(x[[10]][[i]][["affiliations"]][[1]])[2, "country"], sep = ", ")
}
}
} else {
for (v in 1:nrow(as_tibble(x[[10]][[i]]))) {
u <- u + 1
x$year[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "year"])
x$field[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "category"])
x$share[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "share"])
x$motivation[955 + u] <- pull(as_tibble(x[[10]][[i]])[v, "motivation"])
if (length(x[[10]][[i]][["affiliations"]][[v]][[1]]) == 0) {
x$aff_inst[955 + u] <- "None"
x$aff_city[955 + u] <- "None"
x$aff_country[955 + u] <- "None"
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
} else {
if (nrow(as_tibble(x[[10]][[i]][["affiliations"]][[v]])) == 1) {
x$aff_inst[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["name"])
x$aff_city[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["city"])
x$aff_country[955 + u] <- pull(x[[10]][[i]][["affiliations"]][[v]]["country"])
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
} else {
x$aff_inst[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "name"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "name"], sep = ", ")
x$aff_city[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "city"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "city"], sep = ", ")
x$aff_country[955 + u] <- paste(as_tibble(x[[10]][[i]][["affiliations"]][[v]])[1, "country"], as_tibble(x[[10]][[i]][["affiliations"]][[v]])[2, "country"], sep = ", ")
x$first_name[955 + u] <- x$first_name[i]
x$last_name[955 + u] <- x$last_name[i]
x$born[955 + u] <- x$born[i]
x$died[955 + u] <- x$died[i]
x$born_country[955 + u] <- x$born_country[i]
x$born_city[955 + u] <- x$born_city[i]
x$died_country[955 + u] <- x$died_country[i]
x$died_city[955 + u] <- x$died_city[i]
x$gender[955 + u] <- x$gender[i]
}
}
}
}
}
# Convert to tibble and remove leftover NA-rows.
x %<>%
tibble() %>%
filter(is.na(field) == FALSE)
# Changing variable types and cleaning up.
x %<>%
mutate(gender = str_to_title(gender),
field = str_to_title(field),
gender = as.factor(gender),
field = as.factor(field),
born = as.Date(born),
died = as.Date(died),
year = as.integer(year),
share = as.integer(share),
motivation = substr(motivation, 2, nchar(motivation)-1),
born_country = str_replace(born_country, "\\(", ""),
born_country = str_replace(born_country, "\\)", ""),
born_country = sub(".*now ", "", born_country),
born_country = str_replace(born_country, "Faroe Islands Denmark", "Denmark"),
born_country = str_replace(born_country, "the Netherlands", "Netherlands"),
died_country = str_replace(died_country, "\\(", ""),
died_country = str_replace(died_country, "\\)", ""),
died_country = sub(".*now ", "", died_country),
died_country = str_replace(died_country, "Faroe Islands Denmark", "Denmark"),
died_country = str_replace(died_country, "the Netherlands", "Netherlands"),
born_city = str_replace(born_city, "\\(", ""),
born_city = str_replace(born_city, "\\)", ""),
died_city = sub(".*now ", "", died_city),
died_city = str_replace(died_city, "\\(", ""),
died_city = str_replace(died_city, "\\)", ""),
died_city = sub(".*now ", "", died_city))
# Arrange.
x %<>%
arrange(field, year)
# Sort variables and remove useless stuff.
x %<>%
select(-prizes, -aff_country) %>%
select(1:2, year, field, share, gender, everything())
# Save.
nobel <- x
usethis::use_data(nobel, overwrite = T)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## creates a special "matrix" object that can cache
## its inverse.
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function(){
x
}
setinverse <- function(inverse) i <<- inverse
getinverse <- function() {
i
}
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
m <- matrix(1:4,2,2)
m
cacheSolve(makeCacheMatrix(matrix(1:4,2,2)))
|
/cachematrix.R
|
no_license
|
thomasgierach/ProgrammingAssignment2
|
R
| false | false | 904 |
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## creates a special "matrix" object that can cache
## its inverse.
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function(){
x
}
setinverse <- function(inverse) i <<- inverse
getinverse <- function() {
i
}
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
m <- matrix(1:4,2,2)
m
cacheSolve(makeCacheMatrix(matrix(1:4,2,2)))
|
#!/usr/local/bin/Rscript
# Load libraries
library(oposSOM)
library(biomaRt)
library(stringi)
library(getopt)
args<-commandArgs(TRUE)
options<-matrix(c('file', 'f', 1, "character",
'datasetname', 'n', 1, "character",
'databasebiomart', 'b', 1, "character",
'databasehost', 'ho', 1, "character",
'databasedataset', 'd', 1, "character",
'databaseidtype', 'i', 1, "character",
'samples', 'l', 1, "character",
'log10', 'g', 0, "logical",
'help', 'h', 0, "logical"),
ncol=4,byrow=TRUE)
ret.opts<-getopt(options,args)
if ( !is.null(ret.opts$help) ) {
cat(getopt(options, usage=TRUE));
q(status=1);
}
file <- ret.opts$file
datasetname <- ret.opts$datasetname
databasebiomart <- ret.opts$databasebiomart
databasehost <- ret.opts$databasehost
databasedataset <- ret.opts$databasedataset
databaseidtype <- ret.opts$databaseidtype
file2 <- ret.opts$samples
data <- read.csv(file = file, sep = "\t")
env <- opossom.new()
env$preferences <- list(dataset.name = datasetname, dim.1stLvlSom = "auto",
dim.2ndLvlSom = 20, training.extension = 1, rotate.SOM.portraits = 0,
flip.SOM.portraits = FALSE, activated.modules = list(reporting = TRUE,
primary.analysis = TRUE, sample.similarity.analysis = TRUE,
geneset.analysis = TRUE, geneset.analysis.exact = TRUE,
group.analysis = TRUE, difference.analysis = TRUE),
database.biomart = databasebiomart, database.host = databasehost,
database.dataset = databasedataset, database.id.type = databaseidtype, standard.spot.modules = "dmap",
spot.coresize.modules = 3, spot.threshold.modules = 0.95,
spot.coresize.groupmap = 5, spot.threshold.groupmap = 0.75,
adjust.autogroup.number = 0, feature.centralization = TRUE,
sample.quantile.normalization = TRUE, pairwise.comparison.list = NULL)
# If log transformation
if ( !is.null(ret.opts$log10) ) {
data_log <- log10(data[,-1] + 1);
data_log <- cbind(data[,1],data_log);
names(data_log)[1] <- names(data[1]);
print("log transformation");
env$indata <- data_log
} else {
# If not log transformation
print("Not log transformation");
env$indata <- data
}
# Sample file (txt file containing sample lables and colors)
oposom <- read.csv(file2, sep = "\t")
oposom$label <- as.character(oposom$label)
oposom$color <- as.character(oposom$color)
vector <- c()
for (i in 1:nrow(oposom)){
vector <- c(vector, rep(oposom$label[i], oposom$replicates[i]))
}
env$group.labels <- vector
vector2 <- c()
for (i in 1:nrow(oposom)){
vector2 <- c(vector2, rep(oposom$color[i], oposom$replicates[i]))
}
env$group.colors <- vector2
# Run opossom
opossom.run(env)
|
/oposSOM/2.0.1/opossom_wrapper.R
|
no_license
|
cyverse/docker-builds
|
R
| false | false | 3,212 |
r
|
#!/usr/local/bin/Rscript
# Load libraries
library(oposSOM)
library(biomaRt)
library(stringi)
library(getopt)
args<-commandArgs(TRUE)
options<-matrix(c('file', 'f', 1, "character",
'datasetname', 'n', 1, "character",
'databasebiomart', 'b', 1, "character",
'databasehost', 'ho', 1, "character",
'databasedataset', 'd', 1, "character",
'databaseidtype', 'i', 1, "character",
'samples', 'l', 1, "character",
'log10', 'g', 0, "logical",
'help', 'h', 0, "logical"),
ncol=4,byrow=TRUE)
ret.opts<-getopt(options,args)
if ( !is.null(ret.opts$help) ) {
cat(getopt(options, usage=TRUE));
q(status=1);
}
file <- ret.opts$file
datasetname <- ret.opts$datasetname
databasebiomart <- ret.opts$databasebiomart
databasehost <- ret.opts$databasehost
databasedataset <- ret.opts$databasedataset
databaseidtype <- ret.opts$databaseidtype
file2 <- ret.opts$samples
data <- read.csv(file = file, sep = "\t")
env <- opossom.new()
env$preferences <- list(dataset.name = datasetname, dim.1stLvlSom = "auto",
dim.2ndLvlSom = 20, training.extension = 1, rotate.SOM.portraits = 0,
flip.SOM.portraits = FALSE, activated.modules = list(reporting = TRUE,
primary.analysis = TRUE, sample.similarity.analysis = TRUE,
geneset.analysis = TRUE, geneset.analysis.exact = TRUE,
group.analysis = TRUE, difference.analysis = TRUE),
database.biomart = databasebiomart, database.host = databasehost,
database.dataset = databasedataset, database.id.type = databaseidtype, standard.spot.modules = "dmap",
spot.coresize.modules = 3, spot.threshold.modules = 0.95,
spot.coresize.groupmap = 5, spot.threshold.groupmap = 0.75,
adjust.autogroup.number = 0, feature.centralization = TRUE,
sample.quantile.normalization = TRUE, pairwise.comparison.list = NULL)
# If log transformation
if ( !is.null(ret.opts$log10) ) {
data_log <- log10(data[,-1] + 1);
data_log <- cbind(data[,1],data_log);
names(data_log)[1] <- names(data[1]);
print("log transformation");
env$indata <- data_log
} else {
# If not log transformation
print("Not log transformation");
env$indata <- data
}
# Sample file (txt file containing sample lables and colors)
oposom <- read.csv(file2, sep = "\t")
oposom$label <- as.character(oposom$label)
oposom$color <- as.character(oposom$color)
vector <- c()
for (i in 1:nrow(oposom)){
vector <- c(vector, rep(oposom$label[i], oposom$replicates[i]))
}
env$group.labels <- vector
vector2 <- c()
for (i in 1:nrow(oposom)){
vector2 <- c(vector2, rep(oposom$color[i], oposom$replicates[i]))
}
env$group.colors <- vector2
# Run opossom
opossom.run(env)
|
# setwd('C:/Users/germa/Box Sync/My_Documents') #dell
# setwd("/home/germanm2")
rm(list=ls())
setwd('C:/Users/germanm2/Box Sync/My_Documents')#CPSC
codes_folder <-'C:/Users/germanm2/Documents'#CPSC
setwd('~')#Server
codes_folder <-'~' #Server
source('./Codes_useful/R.libraries.R')
# library(scales)
source('./Codes_useful/gm_functions.R')
source(paste0(codes_folder, '/n_policy_git/Codes/parameters.R'))
"~/n_policy_git/Codes/parameters.R"
# source('./Codes_useful/gm_functions.R')
if(FALSE){
grid10_tiles_sf7 <- readRDS("./n_policy_box/Data/Grid/grid10_tiles_sf7.rds")
grid10_soils_dt5 <- readRDS("./n_policy_box/Data/Grid/grid10_soils_dt5.rds") %>% data.table()
grid10_fields_sf2 <- readRDS('./n_policy_box/Data/Grid/grid10_fields_sf2.rds')
perfomances_dt <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt.rds")
perfomances_dt[,.N, .(id_10, id_field)] %>% .[,.N, id_10] %>% .[,N] %>% table() #number of fields by cell
perfomances_dt[,.N, .(id_10, id_field, mukey, policy, NMS)] %>% .[,N] %>% table() #number of z by mukey. SHould be all equal
perfomances_dt[,.N, .(policy, NMS)]%>% .[,N] %>% table() #number of treatments (policy sublevels x NMS). SHould be all equal
table(perfomances_dt$NMS) #obs by NMS. SHould be all equal
perfomances_dt[NMS == 'dynamic', NMS := 'dynamic1']
summary(perfomances_dt[,.(area_ha = sum(area_ha)), by = .(id_10, id_field, policy, NMS, z)]$area_ha)
#-------------------------------------------------------------------------
#Make profits relative to the zero rate
if(FALSE){
yc_yearly_dt3 <- readRDS("./n_policy_box/Data/files_rds/yc_yearly_dt3.rds")
zero_dt <- yc_yearly_dt3[N_fert == 10, .(id_10, mukey, z, N_fert_zero = N_fert, L_zero = L, Y_corn_zero = Y_corn)]
zero_dt[,.N, .(id_10, mukey, z)]$N %>% table()
perfomances_dt[,policy_name := as.character(lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1]))]
perfomances_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
perfomances_dt <- merge(perfomances_dt, zero_dt, by = c('id_10', 'mukey', 'z'))
perfomances_dt[policy_name == 'ratio', P_zero := Y_corn_zero * Pc - N_fert_zero * policy_val * Pc]
perfomances_dt[policy_name == 'fee', P_zero := Y_corn_zero * Pc - N_fert_zero * Pn - L_zero * policy_val]
perfomances_dt[policy_name == 'nred', P_zero := Y_corn_zero * Pc - N_fert_zero * Pn]
perfomances_dt[,P := P - P_zero]
}
#-------------------------------------------------------------------------
# AGGREGATE THE DATA TO FIELD X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c("policy",'region','id_10', 'NMS', 'z', 'id_field')
do_aggregate = c("Y_corn", 'Y_soy', 'L1', 'L2', "L", "N_fert","P", "G")
if(FALSE){
perfomances_dt2 <- aggregate_by_area(data_dt = perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #field x z level (mukey is out)
}else{
split_list <- split(perfomances_dt,perfomances_dt$z)
split_list_output <- list()
for(split_list_n in split_list){
split_list_output[[unique(split_list_n$z)]] <- aggregate_by_area(data_dt = split_list_n, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #field x z level (mukey is out)
}
perfomances_dt2 <- rbindlist(split_list_output)
}
str(perfomances_dt2)
perfomances_dt2 <- perfomances_dt2[order(id_10, z,id_field, NMS)]
saveRDS(perfomances_dt2, "./n_policy_box/Data/files_rds/perfomances_dt2.rds") #for 5d_pdf.R
perfomances_dt2 <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt2.rds")
#-------------------------------------------------------------------------
# AGGREGATE THE DATA TO CELL X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c('policy','id_10', 'region','NMS', 'z')
do_aggregate = c("Y_corn", 'Y_soy', 'L1', 'L2', "L", "N_fert","P", 'G')
if(FALSE){
#First aggregate without z so then we can get the leach_extreme
perfomances_dt3 <- aggregate_by_area(data_dt = perfomances_dt2, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (field is out)
}else{
split_list <- split(perfomances_dt2,perfomances_dt2$region)
split_list_output <- list()
for(split_list_n in split_list){
split_list_output[[unique(split_list_n$region)]] <- aggregate_by_area(data_dt = split_list_n, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (field is out)
}
perfomances_dt3 <- rbindlist(split_list_output)
}
saveRDS(perfomances_dt3, "./n_policy_box/Data/files_rds/perfomances_dt3.rds") #for 5e_validation.R
#---------------------------------------------------------------------------
# AGGREGATE AGAIN CONSIDERING THE CORN PRODUCTION OF THE CELL
grid10_tiles_dt <- data.table(grid10_tiles_sf7)[,.N, .(id_tile,id_10, corn_avg_ha,corn5_tile )][,-'N']
summary(grid10_tiles_dt$corn_avg_ha)
perfomances_dt3[,id_10 := as.integer(id_10)]
perfomances_dt3 <- merge(perfomances_dt3, grid10_tiles_dt, by = 'id_10')
perfomances_dt4 <- aggregate_by_area(data_dt = perfomances_dt3, variables = c("Y_corn", 'L1', 'L2', "L", "N_fert","P", "G"),
weight = 'corn_avg_ha', by_c = c('policy','NMS')) #state level, weighted by corn_ha
# ---------
# Make leaching relative to baselevel
baselevel_L <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', L]
perfomances_dt4[,L_change := round((L / baselevel_L) - 1,3)*100 ]
#---------------------------------------------------------------------------
# Some cleaning
perfomances_dt4[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
perfomances_dt4[,policy_name := as.character(lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1]))]
colsToDelete <- c('L1', 'L2', 'corn_avg_ha')
set(perfomances_dt4,, colsToDelete, NULL)
# ---------
#Externalities
# perfomances_dt4[,E := L * 0.4 * Pe_med]
# perfomances_dt4[,E := L * Pe_total]
#---------
#remove yields modifications of more that 5%
baselevel_Y_corn <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', Y_corn ]
perfomances_dt4[,Y_corn_change := Y_corn/baselevel_Y_corn]
perfomances_dt4 <- perfomances_dt4[Y_corn_change >=0.95 & Y_corn_change <= 1.05, -'Y_corn_change'] #remove yields modifications of more that 5%
#---------
#remove ratios that are subsidized
perfomances_dt4 <- perfomances_dt4[!(policy_name == 'ratio' & policy_val < 5)]
#---------
#Calculate the subsidies
baselevel_P <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', P ]
# perfomances_dt4[,S := P - baselevel_P]
perfomances_dt4[,net_balance := P - baselevel_P + G]
perfomances_dt4[policy == 'ratio_5']
# perfomances_dt4[,ag_cost := P + G]
saveRDS(perfomances_dt4, "./n_policy_box/Data/files_rds/perfomances_dt4.rds")
}
perfomances_dt4 <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt4.rds")
perfomances_dt4[policy %in% c('ratio_5', 'fee_0', 'nred_1') & NMS == 'static']
W_peak_dt <- perfomances_dt4[,.SD[W == max(W)], by = .(policy_name, NMS)] #peak in W
saveRDS(W_peak_dt, "./n_policy_box/Data/files_rds/W_peak_dt.rds")
#==========================================================================
# RATIO CHART 2
plot_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c('static', 'dynamic1') ]
# Total G collections in IL
if(FALSE){
IL_corn_area_ha = 5179976
plot_dt[policy == 'ratio_12.5' & NMS == 1, G] * IL_corn_area_ha / 1000000 #million in IL
}
# Elasticity of Demand Point-Slope Formula: https://pressbooks.bccampus.ca/uvicecon103/chapter/4-2-elasticity/
if(FALSE){
elasticity_dt <- plot_dt[NMS == 1 & policy_val %in% c(4,5,6)]
d_quantity <- (elasticity_dt[policy_val == 6, N_fert] - elasticity_dt[policy_val == 5, N_fert])/
(elasticity_dt[policy_val == 5, N_fert])
d_price <- (Pc*6 - Pc*5)/
(Pc*5)
d_quantity/d_price
}
# current_ratio_dt <- perfomances_dt4[policy == 'fee_0' & NMS %in% c('static','dynamic','3','4','5')]
# current_ratio_dt[,policy_name := 'ratio']
# current_ratio_dt[,policy_val := Pn/Pc]
# current_ratio_dt[,policy := paste('ratio', round(Pn/Pc,1), sep = '_')]
# plot_dt <- rbind(plot_dt, current_ratio_dt)
baselevel_L <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', L]
baselevel_Y_corn <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', Y_corn ]
# plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
# plot_dt[,.SD[W == max(W)], by = NMS] #peak in W
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = P, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance'))
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
# plot_dt_long[variable == 'N_fert', plot_name := 'a) N Rate kg/ha']
# plot_dt_long[variable == 'L', plot_name := 'b) L (% change)']
# plot_dt_long[variable == 'Y_corn', plot_name := 'c) Yield kg/ha']
# plot_dt_long[variable == 'P', plot_name := 'd) Profits $/ha']
# plot_dt_long[variable == 'G', plot_name := 'e) G $/ha']
# plot_dt_long[variable == 'E', plot_name := 'f) E $/ha']
# plot_dt_long[variable == 'W', plot_name := 'g) W $/ha']
# plot_dt_long[order(variable)]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
#use https://ggplot2.tidyverse.org/reference/labellers.html
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-5,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance', 'E','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)", "g)", "h)")]
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 16, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
scale_x_continuous(breaks = seq(5,20,1), labels = seq(5,20,1)) +
xlab('N:Corn price ratio')+
# geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines")))
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
scale_x_continuous(breaks = seq(5,20,1), labels = seq(5,20,1)) +
xlab('N:Corn price ratio')+
# geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/ratio_all_vars.pdf", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# LRED CHART
plot_dt <- perfomances_dt4[policy_name == 'nred' & NMS %in% c('static','dynamic1')][order(NMS, -policy_val)]
# plot_dt[,L_red := round(1-(L / baselevel_L),2)*100 ]
# plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
plot_dt[,policy_val := (1-policy_val )*100]
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = L_change, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance')) %>% data.table()
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
# hline_dt[variable == 'L_change', y_line := 0]
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-5,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'net_balance','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)")]
ann_text
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 10, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('L reduction target (%)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines"))
)
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn', 'G')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('L reduction target (%)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/Lred_all_vars.jpg", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# FEE CHART
plot_dt <- perfomances_dt4[policy_name == 'fee' & NMS %in% c('static','dynamic1') ]
plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = L, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance'))
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
# hline_dt[variable == 'L_change', y_line := 0]
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-4,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'E','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)", "g)")]
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 16, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(unique(plot_dt$policy_val)), max(unique(plot_dt$policy_val))), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('Fee on L ($/kg)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines"))
)
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(unique(plot_dt$policy_val)), max(unique(plot_dt$policy_val))), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('Fee on L ($/kg)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/fee_all_vars.jpg", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# W MAXIMIZATION
w_dt <- perfomances_dt4[policy_name != 'subs',.SD[W == max(W)], by = .(policy_name, NMS)][order(-W)] #peak in W
w_dt[,policy_val := as.numeric(policy_val )]
w_dt[policy_name == 'nred', policy_val := (1-as.numeric(policy_val ))*100]
w_dt[policy_name == 'nred', policy_name := 'target']
baselevel_dt <- perfomances_dt4[policy == 'ratio_6' & NMS == 1]
baselevel_dt[,policy_name := 'base-level']
baselevel_dt[,policy_val := '-']
baselevel_dt
w_dt <- rbind(w_dt ,baselevel_dt) %>% .[order(-W)]
latex_table_dt <- w_dt[,c('policy_name', 'policy_val','NMS', 'N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W')]
cols_1 <- c('N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W')
latex_table_dt[,(cols_1) := round(.SD,1), .SDcols=cols_1]
latex_table_dt[,Y_corn := round(Y_corn, 0)]
setnames(latex_table_dt, c('policy_name', 'policy_val', 'N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W'),
c('policy', 'level', 'N rate (kg/ha)', 'Y_corn (kg/ha)', 'L change (%)', 'Profits ($/ha)', 'G ($/ha)', 'E ($/ha)', 'W ($/ha)'))
latex_table_xt <- xtable(latex_table_dt, type = "latex", auto = TRUE, label = 'tab:w_maximization',
caption = 'Indicators for the level that maximized W for each policy and NMS combination, ordered by their W.
The base-level system is also shown as a benchmark')
#make L in %. Change N red name to L reduction target
library('xtable')
print(latex_table_xt, file = "./n_policy_box/Data/figures/w_maximization.tex", include.rownames=FALSE)
#---------------------------------------------------------------------------
# COMPARE A SAMPLE OF THE DIFFERENT MODELS THAT WOULD GET US TO THE 15%
# REDUCTION TARGET FROM CURRENT SITUATION (-15% by 2025, -45% by 2035)
unique(perfomances_dt4$policy)
policies_f <- c("fee_0", "ratio_9", "fee_8", 'nred_0.85', 'nred_0.7')
NMSs_f <- c('static', 'dynamic')
table_dt <- perfomances_dt4[policy %in% policies_f & NMS %in% NMSs_f]
table_dt[policy == 'fee_0' & NMS == 'static', order := 1]
table_dt[policy == 'fee_0' & NMS == 'dynamic', order := 2] #science
table_dt[policy == 'yr_0.9' & NMS == 'static', order := 3] #ecological model
table_dt[policy == 'ratio_9' & NMS == 'static', order := 4] #tax
table_dt[policy == 'fee_8' & NMS == 'static', order := 5] #fee
table_dt[policy == 'nred_0.85' & NMS == 'dynamic', order := 6] #ecological model + science
table_dt[policy == 'ratio_9' & NMS == 'dynamic', order := 7] #tax+science
table_dt[policy == 'fee_8' & NMS == 'dynamic', order := 8] #fee+science
table_dt[policy == 'nred_0.7' & NMS == 'dynamic', order := 9] #ecological model strong + science
table_dt <- table_dt[order(order)]
#---------------------------------------------------------------------------
# BEST OPTION 1: FARMERS FOCUSED. Given a yield restriction of 90%, what is the maximum we could reduce N leaching hurting the less the farmers
# and not caring about the externality cost (we are already sending less N, that's it my friends)
table_dt <- perfomances_dt4[ NMS %in% NMSs_f]
baselevel_Lleach <- table_dt[policy == 'fee_0' & NMS == 1, L]
baselevel_Y_corn <- table_dt[policy == 'fee_0' & NMS == 1, Y_corn ]
baselevel_nfert <- table_dt[policy == 'fee_0' & NMS == 1, N_fert ]
table_dt[,Y_corn_red := round((Y_corn / baselevel_Y_corn),2)]
table_dt <- table_dt[Y_corn_red > 0.9] #remove those that decrease yield too much
table_dt[,abatement := baselevel_n - L]
table_dt[,abat_prop := round((abatement)/ baselevel_n,2)]
target_abat_prop <- table_dt[order(-abat_prop)] %>% .[1:10, abat_prop] %>% mean()
table_dt <- table_dt[abat_prop > (target_abat_prop - 0.01)]
table_dt[,soc_benefits := P + G] #only farmers
table_dt <- table_dt[order(-soc_benefits)]
table_dt
#---------------------------------------------------------------------------
# BEST OPTION 2: considering a cost of the externality, what NMS would maximize the welfare of the society
NMSs_f <- c('static','dynamic','4')
table_dt <- perfomances_dt4[ NMS %in% NMSs_f]
baselevel_n <- table_dt[policy == 'fee_0' & NMS == 1, L]
table_dt[,abatement := baselevel_n - L]
table_dt[,abat_prop := round((abatement)/ baselevel_n,2)]
baselevel_Y_corn <- table_dt[policy == 'fee_0' & NMS == 1, Y_corn ]
table_dt[,Y_corn_red := round((Y_corn / baselevel_Y_corn),2)]
table_dt[,soc_benefits := P + G]
target_n <- baselevel_n * (1-0.45) #to accomplish the 45% reduction goal
table_dt[,externatility := ifelse((L - target_n) > 0, (L -target_n)*Pe_total,0)] #externalities are pay for each kg above the 45% target
table_dt[,soc_welfare := soc_benefits - externatility ]
table_dt <- table_dt[order(-soc_welfare)][1:40]
table_dt
baselevel_benefits <- table_dt[policy == 'fee_0' & NMS == 1, soc_benefits]
table_dt[,abat_cost := (soc_benefits - baselevel_benefits)/abatement]
# DT[order(match(y, as.numeric(k)))]
10800000 * 0.404686 * 42 / 1e6 #millons to expend in RS and recovering
+1-(33/45)
#---------------------------------------------------------------------------
# Get RMSE
# install.packages('mlr')
library(mlr)
rmse_dt <- perfomances_dt[stringr::str_detect(string = perfomances_dt$policy, pattern = 'ratio'),
.(Y_corn = mean(Y_corn),
L = mean(L),
N_fert = mean(N_fert),
N_fert_min = min(N_fert),
N_fert_max = max(N_fert),
P = mean(P),
# cor = cor(N_fert_12, N_fert),
RMSE = mlr::measureRMSE(truth = N_fert_12, response = N_fert),
overpred = sum(overpred)/.N,
subpred = sum(subpred)/.N,
angulo = sum(angulo)/.N), by = .( NMS, policy)][order(-P)]
rmse_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
rmse_dt[,policy_name := lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1])]
rmse_dt <- rmse_dt[ NMS %in% c('static','dynamic','3','4','5')]
plot_1 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = RMSE, colour = NMS)) +
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/rmse.jpg", width = 5, height = 5,
units = 'in')
plot_1 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = overpred, colour = NMS))+
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_2 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = subpred, colour = NMS))+
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
grid.arrange(plot_1 , plot_2)
#---------------------------------------------------------------------------
# BOXPLOT OF EXPOST RATES
boxplot_dt <- perfomances_dt[stringr::str_detect(string = perfomances_dt$policy, pattern = 'ratio') & NMS ==12]
boxplot_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
boxplot_dt[,policy_name := lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1])]
class(boxplot_dt$policy_val)
boxplot_dt[,policy_val := factor(policy_val, levels = 1:15)]
plot_1 <- ggplot(boxplot_dt)+
geom_boxplot(aes(x = policy_val, y = N_fert)) +
#scale_x_continuous(breaks = seq(1,15,1), labels = seq(1,15,1)) +
xlab('N:Corn price ratio')+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/boxplot_NMS12.jpg", width = 5, height = 5,
units = 'in')
plot_1 <- ggplot(boxplot_dt[policy_val %in% c(2,8,15)])+
geom_density(aes(x = N_fert, colour = policy_val)) +
#scale_x_continuous(breaks = seq(1,15,1), labels = seq(1,15,1)) +
xlab('N:Corn price ratio')+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/boxplot_NMS12.jpg", width = 5, height = 5,
units = 'in')
#---------------------------------------------------------------------------
#Value of information
# MAKE A MAP OF ECONOMIC VALUE OF INFORMATION SS
profits_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(1,2,4,5)]
ggplot(profits_dt)+
geom_line(aes(x = policy_val, y = P, colour = NMS))
value_long_dt <- data.table()
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(1,2)]
value_dt[NMS == 1, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_info']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(2,4)]
value_dt[NMS == 2, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_ss']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(2,3)]
value_dt[NMS == 2, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_tech']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(4,5)]
value_dt[NMS == 4, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_tech_ss']
value_long_dt <- rbind(value_long_dt, value_dt)
plot_1 <- ggplot(value_long_dt)+
geom_line(aes(x = policy_val, y = P, colour = variable), show.legend = F) +
# scale_y_continuous(breaks = seq(1,10,1), labels = seq(1,10,1)) +
facet_grid(variable~., scales = "free" ) +
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
ylab('Value ($/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/valueISST_by_ratio.jpg", width = 10, height = 10,
units = 'in')
#==========================================================================
# nred CHART #===============================================================
#==========================================================================
unique(perfomances_dt4$policy_name)
plot_dt <- perfomances_dt4[policy_name == 'nred' & NMS %in% c('1_ok','dynamic','3','4','5') ]
plot_dt[,soc_benefits := P + G]
target_n <- baselevel_n * (1-0.45) #to accomplish the 45% reduction goal
plot_dt[,externatility := ifelse((L - target_n) > 0, (L -target_n)*Pe_total,0)]
plot_dt[,soc_welfare := soc_benefits - externatility ]
plot_dt[order(-soc_welfare)][1:40]
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L', 'N_fert',
'P', 'externatility','soc_welfare'))
plot_dt_long[variable == 'N_fert', plot_name := 'a) N Rate']
plot_dt_long[variable == 'L', plot_name := 'b) N Leaching']
plot_dt_long[variable == 'Y_corn', plot_name := 'c) Yield']
plot_dt_long[variable == 'P', plot_name := 'd) Profits']
# plot_dt_long[variable == 'G', plot_name := 'e) Tax revenue']
plot_dt_long[variable == 'externatility', plot_name := 'f) externatility']
plot_dt_long[variable == 'soc_welfare', plot_name := 'g) soc_welfare']
plot_dt_long[order(variable)]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L', 'Y_corn')]
hline_dt <- data.table(plot_name = unique(plot_dt_long1$plot_name))
hline_dt[plot_name == 'c) Yield', y_line := baselevel_Y_corn*0.95]
hline_dt[plot_name == 'c) Yield', y_label := '95% baselevel']
plot_1 <- ggplot()+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = .95, y = y_line, label =y_label ))+
facet_grid(plot_name~., scales = "free") +
# scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N reduction level')+
geom_vline(xintercept = 1, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/nred_all_vars_part1.jpg", width = 10, height = 10,
units = 'in')
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L', 'Y_corn')]
plot_1 <- ggplot(plot_dt_long2)+
geom_line(aes(x = policy_val, y = value, colour = NMS)) +
facet_grid(plot_name~., scales = "free") +
# scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N reduction level')+
geom_vline(xintercept = 1, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/nred_all_vars_part2.jpg",
width = 10, height = 10,
units = 'in')
#==========================================================================
# FEE CHART
plot_dt <- perfomances_dt4[policy_name == 'fee' & NMS == 'static']
plot_dt1 <- melt(plot_dt, id.vars = 'policy_val', measure.vars = c('Y_corn', 'L', 'N_fert', 'P', 'G'))
plot_dt2 <- melt(plot_dt[policy_val == 6], id.vars = 'policy_val', measure.vars = c('Y_corn', 'L', 'N_fert', 'P', 'G'))
plot_dt3 <- merge(plot_dt1, plot_dt2[,.(variable, value_max = value)], by = c('variable'))
plot_dt3[, value_rel := value/value_max]
ggplot(plot_dt3) +
geom_line(aes(x = policy_val, y = value_rel, colour = variable))
#---------------------------------------------------------------------------
# cols <- c('cor', 'overpred', 'RMSE_MAE')
# rmse_dt[, (cols) := lapply(.SD, function(x) round(x, 2)), .SDcols = cols]
#
# cols <- c( 'RMSE', 'MAE', 'RMSE_MAE')
# rmse_dt[, (cols) := lapply(.SD, function(x) round(x,1)), .SDcols = cols]
# rmse_dt[,NMS := factor(NMS, levels= c('static', 'dynamic', '3','4', '5', '6', '7', '8', '9', '10', '11', '12'))]
# (p1 <- ggplot(rmse_dt, aes(x = NMS, y = RMSE))+
# geom_bar(stat="identity") )
# rmse_dt[,NMS := as.integer(NMS)]
# rmse_dt <- rmse_dt[order(NMS)]
# perfomances_dt4[,NMS := as.integer(NMS)]
latex_table_dt <- perfomances_dt4[,-'corn_avg_ha']
cols_1 <- c('Y_corn', 'L', 'leach_ext', 'N_fert', 'P')
latex_table_dt[,(cols_1) := round(.SD,1), .SDcols=cols_1]
latex_table_dt[,Y_corn := round(Y_corn, 0)]
setnames(latex_table_dt, c('Y_corn', 'L', 'leach_ext', 'N_fert', 'P'),
c('Yield', 'N leaching', 'N leach ext', 'N rate', 'Profits'))
library('xtable')
print(xtable(latex_table_dt, type = "latex", auto = TRUE, label = 'tab:state_output',
caption = 'Results for the State of Illinois.
Aggregated considering the area planted to corn for each cell, using eq. \ref{eq_I_state}'),
file = "./n_policy_box/Data/figures/state_output.tex", include.rownames=FALSE)
?print.xtable
?xtable
latex_table_dt[NMS==2, ]$Profits - latex_table_dt[NMS==1, ]$Profits
latex_table_dt[NMS==4, ]$Profits - latex_table_dt[NMS==1, ]$Profits #Value of infomation
latex_table_dt[NMS==5, ]$Profits - latex_table_dt[NMS==4, ]$Profits #Ex-ante Value of T
latex_table_dt[NMS==12, ]$Profits - latex_table_dt[NMS==11, ]$Profits #Ex-ost Value of T
latex_table_dt[NMS==2, ]$'N leaching' - latex_table_dt[NMS==1, ]$'N leaching'
latex_table_dt[NMS==4, ]$'N leaching' - latex_table_dt[NMS==1, ]$'N leaching' #EB of infomation
latex_table_dt[NMS==5, ]$'N leaching' - latex_table_dt[NMS==4, ]$'N leaching' #EB of T
-(latex_table_dt[NMS==4, 'N leaching'] - latex_table_dt[NMS==1, 'N leaching'])/latex_table_dt[NMS==1, 'N leaching'] #% Decrease in N leaching
-(latex_table_dt[NMS==4, 'N leach ext'] - latex_table_dt[NMS==1, 'N leach ext'])/latex_table_dt[NMS==1, 'N leach ext'] #% Decrease in N leaching extreme
-(latex_table_dt[NMS==4, 'N rate'] - latex_table_dt[NMS==1, 'N rate'])/latex_table_dt[NMS==1, 'N rate'] #% Decrease in N use
#=====================================================================================================================
# MRTN vs Minimum NMS
reg_NMS_stuff <- readRDS( "./n_policy_box/Data/files_rds/reg_NMS_stuff.rds")
NMS_minimum_regional <- reg_NMS_stuff$NMS_minimum_regional
rm(reg_NMS_stuff)
mrtn_dt <- data.table(region = c(3,3,2,2,1,1),
prev_crop = c(0,1,0,1,0,1),
MRTN_Rate_lbN_ac = c(161, 200, 175, 193,187, 192))
mrtn_dt <- mrtn_dt[prev_crop == 0]
mrtn_dt[,MRTN_rate := round(MRTN_Rate_lbN_ac * 1.12,0)] #1 pound per acre = 1.12 kilograms per hectare
NMS_minimum_regional2 <- merge(NMS_minimum_regional, mrtn_dt[,-c('prev_crop','MRTN_Rate_lbN_ac')], by = c('region'))
# NMS_minimum_regional2[,prev_crop := ifelse(prev_crop == 0, 'Soybean', 'Corn')]
NMS_minimum_regional2[,region := ifelse(region == 1, '1_South', ifelse(region == 2, '2_Central', '3_North'))]
setnames(NMS_minimum_regional2, 'eonr_pred', 'NMS1_rate')
NMS_minimum_regional2[order(-region)]
print.xtable(xtable(NMS_minimum_regional2, type = "latex", auto = TRUE,
label = 'tab:NMS1',
caption = 'NMS 1 predictions paired with MRTN recommendations for the same region'),
file = "./n_policy_box/Data/figures/NMS1.tex", include.rownames=FALSE)
#=====================================================================================================================
#-----------------------------------------VALUE OF INFORMATION--------------------------------------------------------
#=====================================================================================================================
# MAKE A MAP OF TOTAL LEACHING WITH NMS 1 AND REDUCTION WITH NMS 4
value_dt <- perfomances_dt4[NMS %in% c(1, 4), .(id_10, NMS, L, corn_avg_ha, leach_ext)]
value_dt[, L_cell := L * corn_avg_ha]
value_dt[, leach_ext_cell := leach_ext * corn_avg_ha]
value_dt <- dcast(value_dt, id_10 ~ NMS, value.var = c('L_cell', 'leach_ext_cell', 'L'))
value_dt[,L_4 := NULL]
# setnames(value_dt, c('static', '4'), c('L_m1', 'L_m2'))
#make one negative
value_dt[, eb_cell := L_cell_4-L_cell_1] #Enviromental Benefit
value_sf <- merge(grid10_tiles_sf7, value_dt[,.(id_10, L_cell_1,eb_cell, leach_ext_cell_1, L_1)], by = 'id_10', all.x = T)
(p1 <- tm_shape(value_sf) + tm_polygons(c('corn_avg_ha'),
n =10,
title = c("Corn area (ha/cell)"),
style ="cont",
# border.col = 'black',
palette = "Greys")+
tm_layout(panel.labels = 'a)',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend =F,
legend.width = 1,
legend.position = c('left', 'bottom')))
tm_shape(value_sf) + tm_polygons(c('L_1')) #Show me: leaching by ha
breaks_n <- c(50000,100000,200000,300000,400000)
(p2 <- tm_shape(value_sf) + tm_polygons(c('L_cell_1'),
breaks = breaks_n,
title = c("N Leaching (kg/cell)"),
style ="cont",
colorNA = 'white',
palette = "Greys")+
tm_layout(panel.labels = 'b)',
legend.text.size = 0.7,
main.title.size = 1.2,
legend.position = c('left', 'bottom'))) #Leaching with MRTN (baseline_characterization_map)
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF RMSE (In what areas are the NMSs more off?) -----
rmse_map_dt <- perfomances_dt[NMS %in% c(1,4) ,.(RMSE = mlr::measureRMSE(truth = N_fert_12, response = N_fert),
MAE = mlr::measureMAE(truth = N_fert_12, response = N_fert),
subpred = sum(subpred)/.N,
overpred = sum(overpred)/.N), by = .(id_10, NMS)]
rmse_map_dt2 <- dcast(rmse_map_dt, id_10 ~ NMS, value.var = c('RMSE', 'MAE', 'subpred', 'overpred'))
rmse_map_sf <- merge(grid10_tiles_sf7, rmse_map_dt2, by = 'id_10', all.x = T)
tm_shape(rmse_map_sf) + tm_polygons(c('RMSE_1', 'RMSE_4', 'overpred_4', 'subpred_4'))
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF EONR for NMS 1, 4, 12 -----
rates_map_dt <- perfomances_dt3[NMS %in% c(1,4, 12)]
rates_map_dt2 <- dcast(rates_map_dt, id_10 ~ NMS, value.var = c('L','N_fert', 'P', 'Y_corn'))
rates_map_sf <- merge(grid10_tiles_sf7, rates_map_dt2, by = 'id_10', all.x = T)
rates_map_sf <- merge(grid10_tiles_sf7, rates_map_dt[,.(id_10, NMS, Y_corn, L, N_fert, P)], by = 'id_10', all = T)
empty_cells_sf <- rates_map_sf[is.na(rates_map_sf$NMS),]
rates_map_sf2 <- rates_map_sf[!is.na(rates_map_sf$NMS),]
for(NMS_n in c(1,4,12)){
rates_map_sf2 <- rbind(rates_map_sf2, empty_cells_sf %>% mutate(NMS = NMS_n))
}
rates_map_sf3 <- melt(rates_map_sf2, id.vars = c("id_10", "geometry", 'NMS'), measure.vars = c("Y_corn", "L", "N_fert", "P"))
rates_map_sf3 <- st_sf(rates_map_sf3)
nrow(grid10_tiles_sf7)*3
rates_map_sf3$NMS <- factor(rates_map_sf3$NMS, levels = c(1,4,12))
tm_shape(rates_map_sf3) + tm_polygons('value')+
tm_facets(c("NMS", "variable"), ncol = 3, free.scales= T, as.layers = T)
(p1 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'Y_corn',]) +
tm_polygons('value',
title = c("Y_corn (kg/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F))
(p2 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'L',]) +
tm_polygons('value',
n= 6,
title = c("N Leaching (kg/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F,
panel.label.height = 0
))
rates_map_sf4 <- rates_map_sf3
rates_map_sf4[ rates_map_sf4$variable == 'N_fert' & rates_map_sf4$NMS == 1 & rates_map_sf4$value == 180 &
!(is.na(rates_map_sf4$value)),]$value <- 0
rates_map_sf4[ rates_map_sf4$variable == 'N_fert' & rates_map_sf4$NMS == 1 & !(is.na(rates_map_sf4$value)),]$value %>% table()
(p3 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'N_fert',]) +
tm_polygons('value',
# n=10,
palette = "Greys",
title = c("N Fert (kg/ha)"),
colorNA = 'white') +
tm_facets(c("NMS"), free.scales = T) +
tm_layout(legend.outside = F,
panel.label.height = 0,
legend.position = c('left', 'bottom')
))
(p4 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'P',]) +
tm_polygons('value',
#n = 10,
title = c("P ($/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F,
panel.label.height = 0
))
tmap_save(tmap_arrange(p1,p2,p3,p4, ncol = 1), "./n_policy_box/Data/figures/appendix1_map.jpg",
width = 10, height = 15,
units = 'in')
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF ECONOMIC VALUE OF INFORMATION SS
value_dt <- perfomances_dt4[NMS %in% c(1,4)]
#make one negative
value_dt[NMS == 1, Y_corn := -Y_corn]
value_dt[NMS == 1, L := -L]
value_dt[NMS == 1, leach_ext := -leach_ext]
value_dt[NMS == 1, N_fert := -N_fert]
value_dt[NMS == 1, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
# baseline_leaching_dt <- perfomances_dt3[NMS == 1, .(id_10, baseline_leach = L)]
# value_dt <- merge(value_dt, baseline_leaching_dt, by = 'id_10')
#
# ggplot(data = value_dt, aes(x = baseline_leach, y = P)) +
# geom_point() + geom_smooth()
value_sf <- merge(value_sf, value_dt[,.(id_10, L,P)], by = 'id_10', all.x = T)
hist(value_sf$eb_cell)
breaks_n <- c(-60000,-40000,-10000,0)
(p3 <- tm_shape(value_sf) + tm_polygons(c('eb_cell'),
# textNA="Not VR area",
title = expression(paste('EB'^'I-SS-ex ante', '(kg/cell)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "-Greys",
colorNA = 'white', midpoint = -10000)+
tm_layout(panel.labels = expression(paste('c)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom'))) #Enviromental benefit of NMS 4
value_sf <- dplyr::mutate(value_sf, P_r = round(P, 0))
breaks_n <- c(-20,0,10,20, 30,40)
# (p4 <- tm_shape(value_sf, bbox = st_bbox(value_sf)) + tm_polygons(c('P'),
# title = expression(paste('V'^'I-SS-ex ante', '($/ha)')),
# breaks = breaks_n,
# style ="cont", palette = "Greys", colorNA = 'white', midpoint = 0)+
# tm_layout(panel.labels = 'd)',
# legend.text.size = 0.7,
# main.title.size = 1.2,
# legend.position = c('left', 'bottom'))) #Economic Value
(p4 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
title = expression(paste('V'^'I-SS-ex ante', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
# style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 0)+
tm_layout(panel.labels = expression(paste('c)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom'))) #Economic V of NMS 4
tmap_save(tmap_arrange(p1, p2, p3, p4, ncol = 2) , "./n_policy_box/Data/figures/information_characterization_map.jpg",
width = 10, height = 10,
units = 'in')
#=====================================================================================================================
#-----------------------------------------VALUE OF TECHNOLOGY--------------------------------------------------------
#=====================================================================================================================
#1) MAKE A MAP OF VALUE TECHNOLOGY (EX POST VALUE)
#Select the two NMSs of interest
value_dt <- perfomances_dt3[NMS %in% c(11,12)]
#make one negative
value_dt[NMS == 11, Y_corn := -Y_corn]
value_dt[NMS == 11, L := -L]
value_dt[NMS == 11, leach_ext := -leach_ext]
value_dt[NMS == 11, N_fert := -N_fert]
value_dt[NMS == 11, P := -P]
# Add values by group
value_post_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
value_post_dt[order(-P)]
value_sf <- merge(grid10_tiles_sf7, value_post_dt, by = 'id_10', all.x = T)
breaks_n <- c(min(value_post_dt$P), 5,10,15, 20, max(value_post_dt$P))
(p1 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
#title = expression(paste('VR market', '($/cell)')),
title = expression(paste('V'^'T-ex post', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 10)+
tm_layout(panel.labels = expression(paste('a)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
#---------------------------------------------------------------------------
#2) MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_dt3[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, leach_ext := -leach_ext]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_ante_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
value_sf <- merge(grid10_tiles_sf7, value_ante_dt, by = 'id_10', all.x = T)
# value_sf$P[is.na(value_sf$P)] <- 0
# (p2 <- tm_shape(value_sf) + tm_polygons(c('P'), n =10)+
# tm_layout(legend.text.size = 0.7,
# main.title = paste('Ex-ante'),
# main.title.position = "center",
# main.title.size = 1.2))
breaks_n <- c(floor(min(value_sf$P, na.rm = T)),0, 5,ceiling(max(value_sf$P, na.rm = T)))
(p2 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
#title = expression(paste('VR market', '($/cell)')),
title = expression(paste('V'^'T-ex ante', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 5) +
tm_layout(panel.labels = expression(paste('b)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
value_comp_dt <- merge(value_ante_dt[,.(id_10, P_ante = P)], value_post_dt[,.(id_10, P_post = P)], by = 'id_10')
ggplot(data = value_comp_dt, aes(x= P_post, y = P_ante)) +
geom_point() +
geom_smooth()
# tmap_mode("view")
#
# tm_basemap("OpenStreetMap.DE") +
# tm_shape(value_sf) + tm_polygons(c('P'), n =10)+
# tm_layout(legend.text.size = 0.7,
# main.title = paste('EX-ANTE'),
# main.title.position = "center",
# main.title.size = 1.2)
#
# ---------------------------------------------------------------------------
# 3) MAKE A MAP OF TECHNOLOGY MARKET CAP BY CELL
# MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_dt3[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, leach_ext := -leach_ext]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P),
corn_avg_ha = mean(corn_avg_ha)), by = .(id_10)]
value_dt2 <- value_dt[P > 2] #
value_dt2[,mkt_value := P * corn_avg_ha]
value_dt2[,.(sum(mkt_value))]
value_sf <- merge(grid10_tiles_sf7, value_dt2[,.(id_10, mkt_value)], by = 'id_10', all.x = T) %>%
dplyr::mutate(NMS = 'ex_ante')
sum(value_sf$mkt_value, na.rm = TRUE)
# (p3 <- tm_shape(value_sf) +
# tm_polygons("mkt_value", textNA="Not VR area", title="VR Value (USD/Cell)", n = 10) +
# tm_layout(legend.text.size = 0.7,
# main.title = paste('VR Market Value'),
# main.title.position = "center",
# main.title.size = 1.2))
breaks_n <- c(0, 5000,10000,15000,20000, 30000,40000,50000)
(p3 <- tm_shape(value_sf) + tm_polygons(c('mkt_value'), textNA="Not VR area",
title = expression(paste('Mkt value ($/cell)')),
# title = "",
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 20000)+
tm_layout(panel.labels = expression(paste('b)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
tmap_arrange(p1, p2, p3, nrow = 1)
tmap_save(tmap_arrange(p1, p2, p3, nrow = 1), "./n_policy_box/Data/figures/techonology_characterization_map.jpg",
width = 10, height = 5, units = 'in')
st_write(value_sf, "./n_policy_box/Data/shapefiles/vr_cell_value_sf.shp", delete_dsn = TRUE)
#---------------------------------------------------------------------------
# 4) MAKE A MAP OF TECHNOLOGY MARKET CAP BY FIELD (for QGIS)
# AGGREGATE THE DATA TO CELL X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c('id_10', 'id_field','region','NMS', 'tech')
do_aggregate = c("Y_corn", "L", "N_fert","P")
perfomances_field_dt <- aggregate_by_area(data_dt = perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (mukey and field are out)
# MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_field_dt[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
N_fert = sum(N_fert),
n_policy = sum(P)), by = .(id_10, id_field)]
# value_dt <- value_dt[P > 3] #considering a cost of VR Cost of 3 usd
# value_dt[,mkt_value := P * corn_avg_ha]
# value_dt[,.(sum(mkt_value))]
value_sf <- merge(grid10_fields_sf2, value_dt[,.(id_10, id_field, n_policy)], by = c('id_10', 'id_field'), all.x = T) %>%
dplyr::mutate(NMS = 'ex_ante')
value_sf <- value_sf[!is.na(value_sf$n_policy),]
(p <- tm_shape(value_sf) +
tm_polygons("n_policy", textNA="Not VR area", title="VR Value (USD/Cell)", n = 10) +
tm_layout(legend.text.size = 0.7,
main.title = paste('VR Market Value'),
main.title.position = "center",
main.title.size = 1.2))
st_write(value_sf, "./n_policy_box/Data/shapefiles/vr_field_value_sf.shp", delete_dsn = TRUE)
#---------------------------------------------------------------------------
# MAKE A MAP OF THE BEST NMS
#Select the two NMSs of interest
best_NMS_dt <- perfomances_dt3[NMS %in% 1:10]
# best_NMS_dt[NMS == 5, P := P-3]
best_NMS_dt <- best_NMS_dt[,.SD[P==max(P)], by = id_10]
best_NMS_dt[,.N, by = .(NMS)][order(-N)]
value_sf <- merge(grid10_tiles_sf7, best_NMS_dt[,.(id_10, NMS)],
by = 'id_10', all = T)
value_sf <- dplyr::mutate(value_sf, NMS = ifelse(NMS <6, NA, NMS))
(p <- tm_shape(value_sf) + tm_polygons(c('NMS'), n =10)+
tm_text('NMS')+
tm_layout(legend.text.size = 0.7,
main.title = paste('Best NMS by cell'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/best_NMS_map.jpg")
#==============================================================================================================
#==============================================================================================================
#============================= YIELD CURVE EXAMPLE ==============================
#==============================================================================================================
#==============================================================================================================
yc_yearly_dt3 <- readRDS("./n_policy_box/Data/files_rds/yc_yearly_dt3.rds")
reg_NMS_stuff <- readRDS( "./n_policy_box/Data/files_rds/reg_NMS_stuff.rds")
training_z <- reg_NMS_stuff$training_z
rm(reg_NMS_stuff)
# tile_n = 10
cell_n = 765#755#763#765
mukey_n = 242997
testing_set_dt <- perfomances_dt[id_10 == cell_n]
testing_set_dt[,mean(Y_corn), by = mukey]
testing_set_plot <- testing_set_dt[mukey == mukey_n]
testing_set_plot[,NMS := factor(NMS, levels= c('static', 'dynamic', '3','4', '5', '6', '7', '8', '9', '10', '11', '12'))]
ic_field_plot <- yc_yearly_dt3[mukey == mukey_n & id_10 == cell_n ] %>% .[!z %in% training_z ]
# testing_set_plot[,z := gsub(pattern = 'A', replacement = 'z', x = z)]
# ic_field_plot[,z := gsub(pattern = 'A', replacement = 'z', x = z)]
# library(RColorBrewer)
# n <- 12
# qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
# col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
# colors_sample =sample(col_vector, n)
# pie(rep(1,n), colors_sample)
colors_sample=c( "#7570B3", "#FFED6F", "#666666", "#7FC97F", "#386CB0", "#B3B3B3", "#FFFFCC", "#A65628", "#F4CAE4", "#E41A1C", "#E6AB02", "black")
# Y plot with Y_corn at eonr
z_labels <- ic_field_plot[N_fert == max(ic_field_plot$N_fert), .(N_fert, Y_corn, z)][order(-Y_corn)]
z_labels[seq(1, nrow(z_labels), by = 2), N_fert := N_fert - 50]
ggplot() +
geom_point(data = testing_set_plot, aes(x = N_fert, y = Y_corn, colour = NMS, size = NMS)) +
geom_line(data = ic_field_plot, aes(x = N_fert, y = Y_corn, group=z), show.legend = FALSE) +
scale_size_manual(values=c(rep(2, 11), 4)) +
scale_color_manual(values=colors_sample)+
ylab('Yield (kg/ha)')+
xlab('N rate (kg/ha)')+
geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
theme_bw()+
theme(panel.grid = element_blank())
# (plot_n1 <- ggplot() +
# geom_point(data = testing_set_plot[z == z_n & NMS == '12' & prev_crop == 1],
# aes(x = N_fert, y = Y_corn , size = NMS)) +
# geom_line(data = ic_field_plot[z == z_n & prev_crop == 1], aes(x = N_fert, y = Y_corn, linetype = "Yield")) +
# geom_point(data = testing_set_plot[z == z_n & NMS == '12' & prev_crop == 1],
# aes(x = N_fert, y = L*150, size = NMS)) +
# geom_line(data = ic_field_plot[z == z_n & prev_crop == 1], aes(x = N_fert, y = L*150, linetype = "N Leaching")) +
# # scale_size_manual(values=c(rep(2, 11), 4)) +
# ## scale_color_manual(values=colors_sample)+
# labs(y = 'Yield (kg/ha)',
# x = 'N rate (kg/ha)',
# colour = "Parameter")+
# scale_y_continuous(sec.axis = sec_axis(~./150, name = "N leaching (kg/ha)"))+
# scale_linetype_manual(values = c("dashed", "solid"))+
# scale_size_manual(values = 4,
# labels = expression(paste('EONR'^'ex post')))+
# #geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
# theme_bw()+
# guides(linetype = guide_legend(order=2),
# size = guide_legend(order=1)) +
# theme(legend.title = element_blank(),
# legend.position = c(0.85, 0.15),
# panel.grid = element_blank())+
# annotate("text", x=300, y=11500, label= "a)", size = 10) )
ic_field_plot$z
ggplot(ic_field_plot, aes(x= N_fert, y = L, color = z)) +
geom_line()
z_n = 23
ic_field_plot2 <- melt(ic_field_plot[z == z_n ], id.vars = 'N_fert', measure.vars = c('Y_corn', 'L'))
ic_field_plot2[variable == 'L', value := value * 150]
testing_set_plot2 <- melt(testing_set_plot[z == z_n & NMS == '12'], id.vars = 'N_fert', measure.vars = c('Y_corn', 'L'))
testing_set_plot2[variable == 'L', value := value * 150]
(plot_n1 <- ggplot() +
geom_line(data = ic_field_plot2, aes(x = N_fert, y = value, linetype = variable, colour = variable))+
scale_color_manual(values=c('black', 'black', 'black'),
labels = c(bquote (paste('EONR'^'ex post')), 'Yield', 'N leaching'))+
geom_point(data = testing_set_plot2, aes(x = N_fert, y = value, colour = 'EONR')) +
guides( linetype = FALSE,
colour = guide_legend(override.aes = list(shape = c(16, NA, NA),
linetype = c("blank", "solid", "dotted"))))+
labs(y = 'Yield (kg/ha)',
x = 'N rate (kg/ha)',
colour = "Variable") +
scale_y_continuous(sec.axis = sec_axis(~./150, name = "N leaching (kg/ha)"))+
theme_bw() +
theme(legend.title = element_blank(),
legend.position = c(0.85, 0.15),
panel.grid = element_blank())+
annotate("text", x=300, y=15000, label= "a)", size = 10) )
summary(testing_set_plot$Y_corn)
exclude_z = testing_set_plot[Y_corn == min(Y_corn)]$z[1]
(plot_n2 <- ggplot() +
geom_line(data = ic_field_plot[ !(z == exclude_z) ], aes(x = N_fert, y = Y_corn, group = z), show.legend = F) +
geom_point(data = testing_set_plot[NMS == '12' & !(z == exclude_z) ], aes(x = N_fert, y = Y_corn , shape = 'EONR'), size = 2) +
scale_shape_manual( values = 16,
labels = c(bquote (paste('EONR'^'ex post')))) +
# scale_color_manual(values=colors_sample)+
# scale_size_manual(values=c(rep(2, 11), 4)) +
## scale_color_manual(values=colors_sample)+
labs(y = 'Yield (kg/ha)',
x = 'N rate (kg/ha)')+
#scale_y_continuous(sec.axis = sec_axis(~./150, name = "N Leaching (kg/ha)"))+
#scale_linetype_manual(values = c("solid", "dashed"))+
#geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
theme_bw()+
theme(legend.title = element_blank(),
legend.position = c(0.85, 0.15),
panel.grid = element_blank())+
guides(color = FALSE) + #remove legend for color
annotate("text", x=300, y=15000, label= "b)", size = 10))
summary(testing_set_plot[NMS == '12']$N_fert)
perfomances_champaign_dt <- perfomances_dt[id_10 %in% unique(dplyr::filter(grid10_tiles_sf7, county_name == 'Champaign')$id_10) & NMS == 12]
(plot_n3 <- ggplot() +
geom_density(data = perfomances_champaign_dt, aes( x= N_fert, y = ..density..), alpha = 0.4)+
labs(x = expression(paste('EONR'^'ex post', '(kg/ha)')))+
theme_bw()+
theme(panel.grid = element_blank(),
legend.position = c(0.85, .7))+
scale_fill_manual(name = "Previous crop", labels = c("Soybean", "Corn"), values = c('#696969', '#D3D3D3'))+
scale_linetype_manual(values=c("twodash", "dotted"))+
guides(linetype = FALSE)+
annotate("text", x=300, y=0.009, label= "c)", size = 10) )
(plot_n4 <- ggplot() +
geom_density(data = perfomances_champaign_dt, aes( x= L, y = ..density..), alpha = 0.4)+
labs(x = 'N leaching (kg/ha)')+
theme_bw()+
xlim(c(0, 150))+
theme(panel.grid = element_blank(),
legend.position = c(0.85, 0.7))+
scale_fill_manual(name = "Previous crop", labels = c("Soybean", "Corn"), values = c('#696969', '#D3D3D3'))+
scale_linetype_manual(values=c("twodash", "dotted"))+
guides(linetype = FALSE)+
annotate("text", x=140, y=0.025, label= "d)", size = 10) )
grid.arrange(plot_n1, plot_n2, nrow = 1)
grid.arrange(plot_n1, plot_n2, plot_n3, plot_n4, nrow = 2)
ggsave(grid.arrange(plot_n1, plot_n2, nrow = 1),
filename = "./n_policy_box/Data/figures/yield_curve_example.jpg", width = 10, height = 4, units = 'in')
grid.arrange(grid.arrange(plot_n1, plot_n2, nrow=1), plot_n3, nrow = 2)
#---------------------------------------------------------------------------
# MAKE A MAP OF THE BEST NMS
#Select the two NMSs of interest
best_NMS_dt <- perfomances_dt3[NMS %in% 1:10]
best_NMS_dt <- best_NMS_dt[,.SD[P==max(P)], by = id_10]
best_NMS_dt <- merge(best_NMS_dt, perfomances_dt3[NMS == 1, .(id_10, P_1 = P)], by = 'id_10')
best_NMS_dt[,P_improve := P-P_1]
best_NMS_dt[,.N, by = .(region, NMS)]
best_NMS_sf <- merge(grid10_tiles_sf7, best_NMS_dt, by = 'id_10', all.x = T)
(p <- tm_shape(best_NMS_sf) + tm_polygons(c('NMS','P_improve'), n =10)+
tm_text('NMS')+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/value_t_i.jpg")
#---------------------------------------------------------------------------
# PLOT WEIRD CASE
yc_yearly_dt <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt.rds')
length(unique(yc_yearly_dt$id_10))
yc_yearly_dt3 <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt3.rds')
length(unique(yc_yearly_dt3$id_10))
length(unique(full_fields_dt2$id_10))
id_10_n <- n_policy_expost_dt[order(-L2)][1,]$id_10
yc_yearly_dt <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt.rds')
one_field_dt <- data.table(grid10_soils_sf6[grid10_soils_sf6$id_10 == id_10_n,])
mukey_n <- one_field_dt[area_ha == max(area_ha)][1,] %>% .[,.(id_10, mukey)]
ic_field_dt <- filter_dt_in_dt(yc_yearly_dt , filter_dt = mukey_n, return_table = TRUE)
ic_field_dt[,prev_crop := ifelse(prev_crop == 'MSM', 0, 1)]
ic_field_dt[, P := Y_corn * Pc - N_fert * Pn]
performance_set_dt <- filter_dt_in_dt(perfomances_dt , filter_dt = mukey_n, return_table = TRUE)
performance_set_dt[,NMS := as.character(NMS)]
performance_set_dt[prev_crop == 0 & NMS != 11, .N, by = .(NMS, z)]
# P plot with P at eonr
(plot_n <- ggplot() +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS != 11], aes(x = N_fert, y = P, colour = NMS)) +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS == 11], aes(x = N_fert, y = P), size = 3, show.legend = FALSE) +
geom_line(data = ic_field_dt[prev_crop == 0], aes(x = N_fert, y = P, group=interaction(z)), show.legend = FALSE) +
ggtitle(paste('P plot with P at eonr', mukey_n$mukey)))
ggsave(plot_n, filename = "./n_policy_box/Data/figures/yield_curve_example.jpg")
# Lo3 plot with leaching at eonr
(plot_n <- ggplot() +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS != 11], aes(x = N_fert, y = L2, colour = NMS)) +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS == 11], aes(x = N_fert, y = L2), size = 3, show.legend = FALSE) +
geom_line(data = ic_field_dt[prev_crop == 0], aes(x = N_fert, y = L2, group=interaction(z)), show.legend = FALSE) +
ggtitle(paste('Lo3 plot with leaching at eonr', mukey_n$mukey)))
ggsave(plot_n, filename = "./n_policy_box/Data/figures/leaching_curve_example.jpg")
#---------------------------------------------------------------------------
# CALCULATE STATE TOTAL VARIABLES
perfomances_dt4 <- copy(perfomances_dt4)
do_aggregate = c("Y_corn", "L2", "leach_ext", "N_fert","P")
perfomances_dt4[,(do_aggregate) := (.SD * corn_avg_ha/1000), .SDcols=do_aggregate]
state_total_production_dt <- perfomances_dt4[, lapply(.SD, function(x) sum(x)), .SDcols= do_aggregate]
2.2 * 10^9 * 25.4 /1000 #IL production in tons https://www.nass.usda.G/Statistics_by_State/Illinois/Publications/Current_News_Release/2018/20180112-IL_Annual_Crop_Production.pdf
10.95 * 10^6 *0.4046#IL harvested area in ha
201 * 25.4/0.4046 #IL Yield
#---------------------------------------------------------------------------
# SELECT THE WORST YEAR FOR LEACHING AND SEE THE BENEFIT THERE
all_perfomances_dt2 <- aggregate_by_area(data_dt = all_perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = c('id_10', 'z'))
extreme_year_dt <- all_perfomances_dt2[, .SD[ nL1 == max( nL1)], by = .(id_10)]
extreme_year_dt[, lapply(.SD, function(x) mean(x)), .SDcols= do_aggregate]
eonr_mukey_dt <- yc_yearly_dt[, .SD[ P == max( P)], by = .(id_10, mukey, z)]
1804.744/1814.46
all_perfomances_dt[id_10 == 5]
all_perfomances_dt[id_10 == 5, .(Y_corn_1 = sum(Y_corn_1), area_ha = sum(area_ha))]
# MAKE A DT
economics_field_dt <- merge(n_regional_noss_dt2, n_regional_ss_dt2) %>% merge(eonr_ur_dt2) %>% merge(eonr_vr_dt2)
economics_field_dt[,area_ha := sum(area_dt$area_ha)]
economics_field_dt[,val_ss_ha := (P_reg_ss - P_reg_no_ss)/area_ha]
economics_field_dt[,val_info_ha := (P_ur - P_reg_no_ss)/area_ha]
economics_field_dt[,val_tech_ha := (P_vr - P_ur)/area_ha]
economics_field_dt[,nval_ss_ha := (nleach_reg_ss - nleach_reg_no_ss)/area_ha]
economics_field_dt[,nval_info_ha := (nleach_ur - nleach_reg_no_ss)/area_ha]
economics_field_dt[,nval_tech_ha := (nleach_vr - nleach_ur)/area_ha]
cols <- names(economics_field_dt)[sapply(economics_field_dt,is.numeric)]
economics_field_dt[,(cols) := round(.SD,3), .SDcols=cols]
economics_field_dt <- cbind(fields_seq_tmp, economics_field_dt)
economics_field_dt[,mukey_count := nrow(area_dt)]
economics_ls[[j]] <- economics_field_dt
}
economics_dt <- rbindlist(economics_ls)
saveRDS(economics_dt, './n_policy_box/Data/files_rds/economics_dt.rds')
#---------------------------------------------------------------------------
# MAKE A MAP OF VALUE OF I AND T
val_map_dt <- economics_dt[,.(val_ss_ha = mean(val_ss_ha),
val_info_ha = mean(val_info_ha),
val_tech_ha = mean(val_tech_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_tiles_sf, val_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons(c('val_ss_ha','val_info_ha', 'val_tech_ha'))+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/value_t_i.jpg")
#---------------------------------------------------------------------------
# MAKE A MAP OF VALUE OF I AND T FOR LEACHING
val_map_dt <- economics_dt[,.(nval_ss_ha = mean(nval_ss_ha),
nval_info_ha = mean(nval_info_ha),
nval_tech_ha = mean(nval_tech_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_tiles_sf, val_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons(c('nval_ss_ha','nval_info_ha', 'nval_tech_ha'))+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/nvalue_t_i.jpg")
#---------------------------------------------------------------------------
# MAKE A MAP OF PROFITS
economics_dt
prof_map_dt <- economics_dt[,P_ur_ha := P_ur / area_ha]
prof_map_dt <- prof_map_dt[,.(P_ur_ha = mean(P_ur_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_value_sf, prof_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons('P_ur_ha')+
tm_layout(legend.text.size = 0.7,
main.title = paste('AVERAGE UR PROFITS $/HA'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/profits.jpg")
#---------------------------------------------------------------------------
# MAKE BOXPLOT
dat.m <- melt(economics_dt,id.vars= c('id_10', 'id_field', 'z'), measure.vars=c('val_ss_ha','val_info_ha', 'val_tech_ha'))
(p <- ggplot(dat.m) +
geom_boxplot(aes(x=variable, y=value, color=variable)) +
# scale_color_discrete(name = "REGION") +
ggtitle('Value of information and technology')+
theme_bw() +
theme(axis.text=element_text(size=12),
axis.title=element_text(size=14),
#legend.position='bottom',
panel.grid = element_blank(),
strip.background = element_blank(),
legend.text.align = 0,
legend.position = "none",
legend.title = element_blank(),
strip.text = element_blank()))
ggsave(p, filename = "./n_policy_box/Data/figures/value_boxplot.jpg")
|
/Codes/old_codes/5b_value_and_maps_policy_jul9.R
|
no_license
|
germanmandrini/n_policy
|
R
| false | false | 79,433 |
r
|
# setwd('C:/Users/germa/Box Sync/My_Documents') #dell
# setwd("/home/germanm2")
rm(list=ls())
setwd('C:/Users/germanm2/Box Sync/My_Documents')#CPSC
codes_folder <-'C:/Users/germanm2/Documents'#CPSC
setwd('~')#Server
codes_folder <-'~' #Server
source('./Codes_useful/R.libraries.R')
# library(scales)
source('./Codes_useful/gm_functions.R')
source(paste0(codes_folder, '/n_policy_git/Codes/parameters.R'))
"~/n_policy_git/Codes/parameters.R"
# source('./Codes_useful/gm_functions.R')
if(FALSE){
grid10_tiles_sf7 <- readRDS("./n_policy_box/Data/Grid/grid10_tiles_sf7.rds")
grid10_soils_dt5 <- readRDS("./n_policy_box/Data/Grid/grid10_soils_dt5.rds") %>% data.table()
grid10_fields_sf2 <- readRDS('./n_policy_box/Data/Grid/grid10_fields_sf2.rds')
perfomances_dt <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt.rds")
perfomances_dt[,.N, .(id_10, id_field)] %>% .[,.N, id_10] %>% .[,N] %>% table() #number of fields by cell
perfomances_dt[,.N, .(id_10, id_field, mukey, policy, NMS)] %>% .[,N] %>% table() #number of z by mukey. SHould be all equal
perfomances_dt[,.N, .(policy, NMS)]%>% .[,N] %>% table() #number of treatments (policy sublevels x NMS). SHould be all equal
table(perfomances_dt$NMS) #obs by NMS. SHould be all equal
perfomances_dt[NMS == 'dynamic', NMS := 'dynamic1']
summary(perfomances_dt[,.(area_ha = sum(area_ha)), by = .(id_10, id_field, policy, NMS, z)]$area_ha)
#-------------------------------------------------------------------------
#Make profits relative to the zero rate
if(FALSE){
yc_yearly_dt3 <- readRDS("./n_policy_box/Data/files_rds/yc_yearly_dt3.rds")
zero_dt <- yc_yearly_dt3[N_fert == 10, .(id_10, mukey, z, N_fert_zero = N_fert, L_zero = L, Y_corn_zero = Y_corn)]
zero_dt[,.N, .(id_10, mukey, z)]$N %>% table()
perfomances_dt[,policy_name := as.character(lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1]))]
perfomances_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
perfomances_dt <- merge(perfomances_dt, zero_dt, by = c('id_10', 'mukey', 'z'))
perfomances_dt[policy_name == 'ratio', P_zero := Y_corn_zero * Pc - N_fert_zero * policy_val * Pc]
perfomances_dt[policy_name == 'fee', P_zero := Y_corn_zero * Pc - N_fert_zero * Pn - L_zero * policy_val]
perfomances_dt[policy_name == 'nred', P_zero := Y_corn_zero * Pc - N_fert_zero * Pn]
perfomances_dt[,P := P - P_zero]
}
#-------------------------------------------------------------------------
# AGGREGATE THE DATA TO FIELD X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c("policy",'region','id_10', 'NMS', 'z', 'id_field')
do_aggregate = c("Y_corn", 'Y_soy', 'L1', 'L2', "L", "N_fert","P", "G")
if(FALSE){
perfomances_dt2 <- aggregate_by_area(data_dt = perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #field x z level (mukey is out)
}else{
split_list <- split(perfomances_dt,perfomances_dt$z)
split_list_output <- list()
for(split_list_n in split_list){
split_list_output[[unique(split_list_n$z)]] <- aggregate_by_area(data_dt = split_list_n, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #field x z level (mukey is out)
}
perfomances_dt2 <- rbindlist(split_list_output)
}
str(perfomances_dt2)
perfomances_dt2 <- perfomances_dt2[order(id_10, z,id_field, NMS)]
saveRDS(perfomances_dt2, "./n_policy_box/Data/files_rds/perfomances_dt2.rds") #for 5d_pdf.R
perfomances_dt2 <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt2.rds")
#-------------------------------------------------------------------------
# AGGREGATE THE DATA TO CELL X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c('policy','id_10', 'region','NMS', 'z')
do_aggregate = c("Y_corn", 'Y_soy', 'L1', 'L2', "L", "N_fert","P", 'G')
if(FALSE){
#First aggregate without z so then we can get the leach_extreme
perfomances_dt3 <- aggregate_by_area(data_dt = perfomances_dt2, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (field is out)
}else{
split_list <- split(perfomances_dt2,perfomances_dt2$region)
split_list_output <- list()
for(split_list_n in split_list){
split_list_output[[unique(split_list_n$region)]] <- aggregate_by_area(data_dt = split_list_n, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (field is out)
}
perfomances_dt3 <- rbindlist(split_list_output)
}
saveRDS(perfomances_dt3, "./n_policy_box/Data/files_rds/perfomances_dt3.rds") #for 5e_validation.R
#---------------------------------------------------------------------------
# AGGREGATE AGAIN CONSIDERING THE CORN PRODUCTION OF THE CELL
grid10_tiles_dt <- data.table(grid10_tiles_sf7)[,.N, .(id_tile,id_10, corn_avg_ha,corn5_tile )][,-'N']
summary(grid10_tiles_dt$corn_avg_ha)
perfomances_dt3[,id_10 := as.integer(id_10)]
perfomances_dt3 <- merge(perfomances_dt3, grid10_tiles_dt, by = 'id_10')
perfomances_dt4 <- aggregate_by_area(data_dt = perfomances_dt3, variables = c("Y_corn", 'L1', 'L2', "L", "N_fert","P", "G"),
weight = 'corn_avg_ha', by_c = c('policy','NMS')) #state level, weighted by corn_ha
# ---------
# Make leaching relative to baselevel
baselevel_L <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', L]
perfomances_dt4[,L_change := round((L / baselevel_L) - 1,3)*100 ]
#---------------------------------------------------------------------------
# Some cleaning
perfomances_dt4[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
perfomances_dt4[,policy_name := as.character(lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1]))]
colsToDelete <- c('L1', 'L2', 'corn_avg_ha')
set(perfomances_dt4,, colsToDelete, NULL)
# ---------
#Externalities
# perfomances_dt4[,E := L * 0.4 * Pe_med]
# perfomances_dt4[,E := L * Pe_total]
#---------
#remove yields modifications of more that 5%
baselevel_Y_corn <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', Y_corn ]
perfomances_dt4[,Y_corn_change := Y_corn/baselevel_Y_corn]
perfomances_dt4 <- perfomances_dt4[Y_corn_change >=0.95 & Y_corn_change <= 1.05, -'Y_corn_change'] #remove yields modifications of more that 5%
#---------
#remove ratios that are subsidized
perfomances_dt4 <- perfomances_dt4[!(policy_name == 'ratio' & policy_val < 5)]
#---------
#Calculate the subsidies
baselevel_P <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', P ]
# perfomances_dt4[,S := P - baselevel_P]
perfomances_dt4[,net_balance := P - baselevel_P + G]
perfomances_dt4[policy == 'ratio_5']
# perfomances_dt4[,ag_cost := P + G]
saveRDS(perfomances_dt4, "./n_policy_box/Data/files_rds/perfomances_dt4.rds")
}
perfomances_dt4 <- readRDS("./n_policy_box/Data/files_rds/perfomances_dt4.rds")
perfomances_dt4[policy %in% c('ratio_5', 'fee_0', 'nred_1') & NMS == 'static']
W_peak_dt <- perfomances_dt4[,.SD[W == max(W)], by = .(policy_name, NMS)] #peak in W
saveRDS(W_peak_dt, "./n_policy_box/Data/files_rds/W_peak_dt.rds")
#==========================================================================
# RATIO CHART 2
plot_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c('static', 'dynamic1') ]
# Total G collections in IL
if(FALSE){
IL_corn_area_ha = 5179976
plot_dt[policy == 'ratio_12.5' & NMS == 1, G] * IL_corn_area_ha / 1000000 #million in IL
}
# Elasticity of Demand Point-Slope Formula: https://pressbooks.bccampus.ca/uvicecon103/chapter/4-2-elasticity/
if(FALSE){
elasticity_dt <- plot_dt[NMS == 1 & policy_val %in% c(4,5,6)]
d_quantity <- (elasticity_dt[policy_val == 6, N_fert] - elasticity_dt[policy_val == 5, N_fert])/
(elasticity_dt[policy_val == 5, N_fert])
d_price <- (Pc*6 - Pc*5)/
(Pc*5)
d_quantity/d_price
}
# current_ratio_dt <- perfomances_dt4[policy == 'fee_0' & NMS %in% c('static','dynamic','3','4','5')]
# current_ratio_dt[,policy_name := 'ratio']
# current_ratio_dt[,policy_val := Pn/Pc]
# current_ratio_dt[,policy := paste('ratio', round(Pn/Pc,1), sep = '_')]
# plot_dt <- rbind(plot_dt, current_ratio_dt)
baselevel_L <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', L]
baselevel_Y_corn <- perfomances_dt4[policy == 'ratio_5' & NMS == 'static', Y_corn ]
# plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
# plot_dt[,.SD[W == max(W)], by = NMS] #peak in W
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = P, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance'))
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
# plot_dt_long[variable == 'N_fert', plot_name := 'a) N Rate kg/ha']
# plot_dt_long[variable == 'L', plot_name := 'b) L (% change)']
# plot_dt_long[variable == 'Y_corn', plot_name := 'c) Yield kg/ha']
# plot_dt_long[variable == 'P', plot_name := 'd) Profits $/ha']
# plot_dt_long[variable == 'G', plot_name := 'e) G $/ha']
# plot_dt_long[variable == 'E', plot_name := 'f) E $/ha']
# plot_dt_long[variable == 'W', plot_name := 'g) W $/ha']
# plot_dt_long[order(variable)]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
#use https://ggplot2.tidyverse.org/reference/labellers.html
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-5,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance', 'E','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)", "g)", "h)")]
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 16, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
scale_x_continuous(breaks = seq(5,20,1), labels = seq(5,20,1)) +
xlab('N:Corn price ratio')+
# geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines")))
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
scale_x_continuous(breaks = seq(5,20,1), labels = seq(5,20,1)) +
xlab('N:Corn price ratio')+
# geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/ratio_all_vars.pdf", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# LRED CHART
plot_dt <- perfomances_dt4[policy_name == 'nred' & NMS %in% c('static','dynamic1')][order(NMS, -policy_val)]
# plot_dt[,L_red := round(1-(L / baselevel_L),2)*100 ]
# plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
plot_dt[,policy_val := (1-policy_val )*100]
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = L_change, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance')) %>% data.table()
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
# hline_dt[variable == 'L_change', y_line := 0]
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-5,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'net_balance','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)")]
ann_text
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 10, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('L reduction target (%)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines"))
)
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn', 'G')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(plot_dt_long$policy_val), max(plot_dt_long$policy_val)), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('L reduction target (%)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/Lred_all_vars.jpg", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# FEE CHART
plot_dt <- perfomances_dt4[policy_name == 'fee' & NMS %in% c('static','dynamic1') ]
plot_dt[,L := round((L / baselevel_L) - 1,2)*100 ]
ggplot(plot_dt) + geom_line(aes(x = policy_val, y = L, color = NMS))
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L_change', 'N_fert',
'P', 'G', 'net_balance'))
plot_dt_long[,variable_labels := factor(variable, levels = c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'net_balance'),
labels = c(expression("Fertilizer (N kg " * ha^"-1" *yr^"-1"* ")"),
expression("L ("*'%'*" change)"),
expression("Corn Yield (kg N " * ha^"-1" *yr^"-1"* ")"),
expression("Farm profits ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Gov. collections ($ " * ha^"-1" * yr^"-1"* ")"),
expression("Net balance ($ " * ha^"-1" * yr^"-1"* ")")))]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L_change', 'Y_corn')]
hline_dt <- data.table(unique(plot_dt_long1[,.(variable, variable_labels)]))
hline_dt[variable == 'Y_corn', y_line := baselevel_Y_corn*0.95]
hline_dt[variable == 'Y_corn', y_label := '95% baselevel']
# hline_dt[variable == 'L_change', y_line := 0]
#----
# ADD letters outside plot (go down) https://stackoverflow.com/questions/12409960/ggplot2-annotate-outside-of-plot
ann_text <- plot_dt_long[,.(x = min(policy_val)-4,
value = max(value)), by = .(variable, variable_labels)]
#Sort in the right order
ann_text <- ann_text[match(c('N_fert', 'L_change', 'Y_corn', 'P', 'G', 'E','W'), ann_text$variable),]
ann_text[,lab := c("a)", "b)", "c)", "d)", "e)", "f)", "g)")]
#----
(plot_1 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = 16, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long1$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(unique(plot_dt$policy_val)), max(unique(plot_dt$policy_val))), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('Fee on L ($/kg)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
legend.position = "none",
plot.margin = unit(c(2,1,1,1), "lines"))
)
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L_change', 'Y_corn')]
(plot_2 <- ggplot() +
# geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
# scale_colour_manual(values = c("black", "brown"))+
geom_line(data = plot_dt_long2, aes(x = policy_val, y = value, color = NMS)) +
# scale_linetype_manual(values = c("dashed", "solid"))+
# geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
# geom_text(data = hline_dt, aes(x = 18, y = y_line, label =y_label ))+
scale_color_manual(values=c("royalblue2", "tomato3"))+
geom_text(data = ann_text[variable %in% unique(plot_dt_long2$variable)], aes(y = value, x = x, label = lab),
hjust = 0, size = 8) +
coord_cartesian(xlim = c(min(unique(plot_dt$policy_val)), max(unique(plot_dt$policy_val))), # This focuses the x-axis on the range of interest
clip = 'off') + # This keeps the labels from disappearing
facet_wrap(variable_labels~.,
ncol = 1,
labeller = label_parsed,
scales="free",
strip.position = "left") +
# scale_x_continuous(breaks = sort(unique(plot_dt$policy_val)), labels = sort(unique(plot_dt$policy_val))) +
xlab('Fee on L ($/kg)')+
theme_bw()+
theme(panel.grid = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
panel.spacing = unit(1.5, "lines"),
# axis.title.x=element_blank(),
axis.title.y=element_blank(),
plot.margin = unit(c(2,1,1,1), "lines"))
)
grid.arrange(plot_1, plot_2, nrow = 1)
ggsave(plot = grid.arrange(plot_1, plot_2, nrow = 1),
filename = "./n_policy_box/Data/figures/fee_all_vars.jpg", width = 979/300*3, height = 1042/300*3,
units = 'in')
#==========================================================================
# W MAXIMIZATION
w_dt <- perfomances_dt4[policy_name != 'subs',.SD[W == max(W)], by = .(policy_name, NMS)][order(-W)] #peak in W
w_dt[,policy_val := as.numeric(policy_val )]
w_dt[policy_name == 'nred', policy_val := (1-as.numeric(policy_val ))*100]
w_dt[policy_name == 'nred', policy_name := 'target']
baselevel_dt <- perfomances_dt4[policy == 'ratio_6' & NMS == 1]
baselevel_dt[,policy_name := 'base-level']
baselevel_dt[,policy_val := '-']
baselevel_dt
w_dt <- rbind(w_dt ,baselevel_dt) %>% .[order(-W)]
latex_table_dt <- w_dt[,c('policy_name', 'policy_val','NMS', 'N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W')]
cols_1 <- c('N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W')
latex_table_dt[,(cols_1) := round(.SD,1), .SDcols=cols_1]
latex_table_dt[,Y_corn := round(Y_corn, 0)]
setnames(latex_table_dt, c('policy_name', 'policy_val', 'N_fert', 'Y_corn', 'L_change', 'P', 'G', 'E', 'W'),
c('policy', 'level', 'N rate (kg/ha)', 'Y_corn (kg/ha)', 'L change (%)', 'Profits ($/ha)', 'G ($/ha)', 'E ($/ha)', 'W ($/ha)'))
latex_table_xt <- xtable(latex_table_dt, type = "latex", auto = TRUE, label = 'tab:w_maximization',
caption = 'Indicators for the level that maximized W for each policy and NMS combination, ordered by their W.
The base-level system is also shown as a benchmark')
#make L in %. Change N red name to L reduction target
library('xtable')
print(latex_table_xt, file = "./n_policy_box/Data/figures/w_maximization.tex", include.rownames=FALSE)
#---------------------------------------------------------------------------
# COMPARE A SAMPLE OF THE DIFFERENT MODELS THAT WOULD GET US TO THE 15%
# REDUCTION TARGET FROM CURRENT SITUATION (-15% by 2025, -45% by 2035)
unique(perfomances_dt4$policy)
policies_f <- c("fee_0", "ratio_9", "fee_8", 'nred_0.85', 'nred_0.7')
NMSs_f <- c('static', 'dynamic')
table_dt <- perfomances_dt4[policy %in% policies_f & NMS %in% NMSs_f]
table_dt[policy == 'fee_0' & NMS == 'static', order := 1]
table_dt[policy == 'fee_0' & NMS == 'dynamic', order := 2] #science
table_dt[policy == 'yr_0.9' & NMS == 'static', order := 3] #ecological model
table_dt[policy == 'ratio_9' & NMS == 'static', order := 4] #tax
table_dt[policy == 'fee_8' & NMS == 'static', order := 5] #fee
table_dt[policy == 'nred_0.85' & NMS == 'dynamic', order := 6] #ecological model + science
table_dt[policy == 'ratio_9' & NMS == 'dynamic', order := 7] #tax+science
table_dt[policy == 'fee_8' & NMS == 'dynamic', order := 8] #fee+science
table_dt[policy == 'nred_0.7' & NMS == 'dynamic', order := 9] #ecological model strong + science
table_dt <- table_dt[order(order)]
#---------------------------------------------------------------------------
# BEST OPTION 1: FARMERS FOCUSED. Given a yield restriction of 90%, what is the maximum we could reduce N leaching hurting the less the farmers
# and not caring about the externality cost (we are already sending less N, that's it my friends)
table_dt <- perfomances_dt4[ NMS %in% NMSs_f]
baselevel_Lleach <- table_dt[policy == 'fee_0' & NMS == 1, L]
baselevel_Y_corn <- table_dt[policy == 'fee_0' & NMS == 1, Y_corn ]
baselevel_nfert <- table_dt[policy == 'fee_0' & NMS == 1, N_fert ]
table_dt[,Y_corn_red := round((Y_corn / baselevel_Y_corn),2)]
table_dt <- table_dt[Y_corn_red > 0.9] #remove those that decrease yield too much
table_dt[,abatement := baselevel_n - L]
table_dt[,abat_prop := round((abatement)/ baselevel_n,2)]
target_abat_prop <- table_dt[order(-abat_prop)] %>% .[1:10, abat_prop] %>% mean()
table_dt <- table_dt[abat_prop > (target_abat_prop - 0.01)]
table_dt[,soc_benefits := P + G] #only farmers
table_dt <- table_dt[order(-soc_benefits)]
table_dt
#---------------------------------------------------------------------------
# BEST OPTION 2: considering a cost of the externality, what NMS would maximize the welfare of the society
NMSs_f <- c('static','dynamic','4')
table_dt <- perfomances_dt4[ NMS %in% NMSs_f]
baselevel_n <- table_dt[policy == 'fee_0' & NMS == 1, L]
table_dt[,abatement := baselevel_n - L]
table_dt[,abat_prop := round((abatement)/ baselevel_n,2)]
baselevel_Y_corn <- table_dt[policy == 'fee_0' & NMS == 1, Y_corn ]
table_dt[,Y_corn_red := round((Y_corn / baselevel_Y_corn),2)]
table_dt[,soc_benefits := P + G]
target_n <- baselevel_n * (1-0.45) #to accomplish the 45% reduction goal
table_dt[,externatility := ifelse((L - target_n) > 0, (L -target_n)*Pe_total,0)] #externalities are pay for each kg above the 45% target
table_dt[,soc_welfare := soc_benefits - externatility ]
table_dt <- table_dt[order(-soc_welfare)][1:40]
table_dt
baselevel_benefits <- table_dt[policy == 'fee_0' & NMS == 1, soc_benefits]
table_dt[,abat_cost := (soc_benefits - baselevel_benefits)/abatement]
# DT[order(match(y, as.numeric(k)))]
10800000 * 0.404686 * 42 / 1e6 #millons to expend in RS and recovering
+1-(33/45)
#---------------------------------------------------------------------------
# Get RMSE
# install.packages('mlr')
library(mlr)
rmse_dt <- perfomances_dt[stringr::str_detect(string = perfomances_dt$policy, pattern = 'ratio'),
.(Y_corn = mean(Y_corn),
L = mean(L),
N_fert = mean(N_fert),
N_fert_min = min(N_fert),
N_fert_max = max(N_fert),
P = mean(P),
# cor = cor(N_fert_12, N_fert),
RMSE = mlr::measureRMSE(truth = N_fert_12, response = N_fert),
overpred = sum(overpred)/.N,
subpred = sum(subpred)/.N,
angulo = sum(angulo)/.N), by = .( NMS, policy)][order(-P)]
rmse_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
rmse_dt[,policy_name := lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1])]
rmse_dt <- rmse_dt[ NMS %in% c('static','dynamic','3','4','5')]
plot_1 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = RMSE, colour = NMS)) +
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/rmse.jpg", width = 5, height = 5,
units = 'in')
plot_1 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = overpred, colour = NMS))+
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_2 <- ggplot(rmse_dt)+
geom_line(aes(x = policy_val, y = subpred, colour = NMS))+
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
grid.arrange(plot_1 , plot_2)
#---------------------------------------------------------------------------
# BOXPLOT OF EXPOST RATES
boxplot_dt <- perfomances_dt[stringr::str_detect(string = perfomances_dt$policy, pattern = 'ratio') & NMS ==12]
boxplot_dt[,policy_val := as.numeric(str_extract(policy,pattern = '[0-9.]+'))]
boxplot_dt[,policy_name := lapply(policy, function(x) str_split(x, pattern = '_')[[1]][1])]
class(boxplot_dt$policy_val)
boxplot_dt[,policy_val := factor(policy_val, levels = 1:15)]
plot_1 <- ggplot(boxplot_dt)+
geom_boxplot(aes(x = policy_val, y = N_fert)) +
#scale_x_continuous(breaks = seq(1,15,1), labels = seq(1,15,1)) +
xlab('N:Corn price ratio')+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/boxplot_NMS12.jpg", width = 5, height = 5,
units = 'in')
plot_1 <- ggplot(boxplot_dt[policy_val %in% c(2,8,15)])+
geom_density(aes(x = N_fert, colour = policy_val)) +
#scale_x_continuous(breaks = seq(1,15,1), labels = seq(1,15,1)) +
xlab('N:Corn price ratio')+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/boxplot_NMS12.jpg", width = 5, height = 5,
units = 'in')
#---------------------------------------------------------------------------
#Value of information
# MAKE A MAP OF ECONOMIC VALUE OF INFORMATION SS
profits_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(1,2,4,5)]
ggplot(profits_dt)+
geom_line(aes(x = policy_val, y = P, colour = NMS))
value_long_dt <- data.table()
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(1,2)]
value_dt[NMS == 1, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_info']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(2,4)]
value_dt[NMS == 2, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_ss']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(2,3)]
value_dt[NMS == 2, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_tech']
value_long_dt <- rbind(value_long_dt, value_dt)
value_dt <- perfomances_dt4[policy_name == 'ratio' & NMS %in% c(4,5)]
value_dt[NMS == 4, P := -P]
value_dt <- value_dt[, .(P = sum(P)), by = .(policy_val)]
value_dt[, variable := 'val_tech_ss']
value_long_dt <- rbind(value_long_dt, value_dt)
plot_1 <- ggplot(value_long_dt)+
geom_line(aes(x = policy_val, y = P, colour = variable), show.legend = F) +
# scale_y_continuous(breaks = seq(1,10,1), labels = seq(1,10,1)) +
facet_grid(variable~., scales = "free" ) +
scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N:Corn price ratio')+
geom_vline(xintercept = Pn/Pc, linetype = 'dashed', color = 'grey', size = 1)+
ylab('Value ($/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/valueISST_by_ratio.jpg", width = 10, height = 10,
units = 'in')
#==========================================================================
# nred CHART #===============================================================
#==========================================================================
unique(perfomances_dt4$policy_name)
plot_dt <- perfomances_dt4[policy_name == 'nred' & NMS %in% c('1_ok','dynamic','3','4','5') ]
plot_dt[,soc_benefits := P + G]
target_n <- baselevel_n * (1-0.45) #to accomplish the 45% reduction goal
plot_dt[,externatility := ifelse((L - target_n) > 0, (L -target_n)*Pe_total,0)]
plot_dt[,soc_welfare := soc_benefits - externatility ]
plot_dt[order(-soc_welfare)][1:40]
plot_dt_long <- melt(plot_dt, id.vars = c('policy_val', 'NMS'), measure.vars = c('Y_corn', 'L', 'N_fert',
'P', 'externatility','soc_welfare'))
plot_dt_long[variable == 'N_fert', plot_name := 'a) N Rate']
plot_dt_long[variable == 'L', plot_name := 'b) N Leaching']
plot_dt_long[variable == 'Y_corn', plot_name := 'c) Yield']
plot_dt_long[variable == 'P', plot_name := 'd) Profits']
# plot_dt_long[variable == 'G', plot_name := 'e) Tax revenue']
plot_dt_long[variable == 'externatility', plot_name := 'f) externatility']
plot_dt_long[variable == 'soc_welfare', plot_name := 'g) soc_welfare']
plot_dt_long[order(variable)]
plot_dt_long1 <- plot_dt_long[variable %in% c('N_fert', 'L', 'Y_corn')]
hline_dt <- data.table(plot_name = unique(plot_dt_long1$plot_name))
hline_dt[plot_name == 'c) Yield', y_line := baselevel_Y_corn*0.95]
hline_dt[plot_name == 'c) Yield', y_label := '95% baselevel']
plot_1 <- ggplot()+
geom_line(data = plot_dt_long1, aes(x = policy_val, y = value, colour = NMS)) +
geom_hline(data = hline_dt, aes(yintercept = y_line), linetype = 'dashed', color = 'grey', size = 1)+
geom_text(data = hline_dt, aes(x = .95, y = y_line, label =y_label ))+
facet_grid(plot_name~., scales = "free") +
# scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N reduction level')+
geom_vline(xintercept = 1, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/nred_all_vars_part1.jpg", width = 10, height = 10,
units = 'in')
plot_dt_long2 <- plot_dt_long[!variable %in% c('N_fert', 'L', 'Y_corn')]
plot_1 <- ggplot(plot_dt_long2)+
geom_line(aes(x = policy_val, y = value, colour = NMS)) +
facet_grid(plot_name~., scales = "free") +
# scale_x_continuous(breaks = seq(1,20,1), labels = seq(1,20,1)) +
xlab('N reduction level')+
geom_vline(xintercept = 1, linetype = 'dashed', color = 'grey', size = 1)+
# ylab('Yield (kg/ha)')+
theme_bw()+
theme(panel.grid = element_blank())
plot_1
ggsave(plot = plot_1, filename = "./n_policy_box/Data/figures/nred_all_vars_part2.jpg",
width = 10, height = 10,
units = 'in')
#==========================================================================
# FEE CHART
plot_dt <- perfomances_dt4[policy_name == 'fee' & NMS == 'static']
plot_dt1 <- melt(plot_dt, id.vars = 'policy_val', measure.vars = c('Y_corn', 'L', 'N_fert', 'P', 'G'))
plot_dt2 <- melt(plot_dt[policy_val == 6], id.vars = 'policy_val', measure.vars = c('Y_corn', 'L', 'N_fert', 'P', 'G'))
plot_dt3 <- merge(plot_dt1, plot_dt2[,.(variable, value_max = value)], by = c('variable'))
plot_dt3[, value_rel := value/value_max]
ggplot(plot_dt3) +
geom_line(aes(x = policy_val, y = value_rel, colour = variable))
#---------------------------------------------------------------------------
# cols <- c('cor', 'overpred', 'RMSE_MAE')
# rmse_dt[, (cols) := lapply(.SD, function(x) round(x, 2)), .SDcols = cols]
#
# cols <- c( 'RMSE', 'MAE', 'RMSE_MAE')
# rmse_dt[, (cols) := lapply(.SD, function(x) round(x,1)), .SDcols = cols]
# rmse_dt[,NMS := factor(NMS, levels= c('static', 'dynamic', '3','4', '5', '6', '7', '8', '9', '10', '11', '12'))]
# (p1 <- ggplot(rmse_dt, aes(x = NMS, y = RMSE))+
# geom_bar(stat="identity") )
# rmse_dt[,NMS := as.integer(NMS)]
# rmse_dt <- rmse_dt[order(NMS)]
# perfomances_dt4[,NMS := as.integer(NMS)]
latex_table_dt <- perfomances_dt4[,-'corn_avg_ha']
cols_1 <- c('Y_corn', 'L', 'leach_ext', 'N_fert', 'P')
latex_table_dt[,(cols_1) := round(.SD,1), .SDcols=cols_1]
latex_table_dt[,Y_corn := round(Y_corn, 0)]
setnames(latex_table_dt, c('Y_corn', 'L', 'leach_ext', 'N_fert', 'P'),
c('Yield', 'N leaching', 'N leach ext', 'N rate', 'Profits'))
library('xtable')
print(xtable(latex_table_dt, type = "latex", auto = TRUE, label = 'tab:state_output',
caption = 'Results for the State of Illinois.
Aggregated considering the area planted to corn for each cell, using eq. \ref{eq_I_state}'),
file = "./n_policy_box/Data/figures/state_output.tex", include.rownames=FALSE)
?print.xtable
?xtable
latex_table_dt[NMS==2, ]$Profits - latex_table_dt[NMS==1, ]$Profits
latex_table_dt[NMS==4, ]$Profits - latex_table_dt[NMS==1, ]$Profits #Value of infomation
latex_table_dt[NMS==5, ]$Profits - latex_table_dt[NMS==4, ]$Profits #Ex-ante Value of T
latex_table_dt[NMS==12, ]$Profits - latex_table_dt[NMS==11, ]$Profits #Ex-ost Value of T
latex_table_dt[NMS==2, ]$'N leaching' - latex_table_dt[NMS==1, ]$'N leaching'
latex_table_dt[NMS==4, ]$'N leaching' - latex_table_dt[NMS==1, ]$'N leaching' #EB of infomation
latex_table_dt[NMS==5, ]$'N leaching' - latex_table_dt[NMS==4, ]$'N leaching' #EB of T
-(latex_table_dt[NMS==4, 'N leaching'] - latex_table_dt[NMS==1, 'N leaching'])/latex_table_dt[NMS==1, 'N leaching'] #% Decrease in N leaching
-(latex_table_dt[NMS==4, 'N leach ext'] - latex_table_dt[NMS==1, 'N leach ext'])/latex_table_dt[NMS==1, 'N leach ext'] #% Decrease in N leaching extreme
-(latex_table_dt[NMS==4, 'N rate'] - latex_table_dt[NMS==1, 'N rate'])/latex_table_dt[NMS==1, 'N rate'] #% Decrease in N use
#=====================================================================================================================
# MRTN vs Minimum NMS
reg_NMS_stuff <- readRDS( "./n_policy_box/Data/files_rds/reg_NMS_stuff.rds")
NMS_minimum_regional <- reg_NMS_stuff$NMS_minimum_regional
rm(reg_NMS_stuff)
mrtn_dt <- data.table(region = c(3,3,2,2,1,1),
prev_crop = c(0,1,0,1,0,1),
MRTN_Rate_lbN_ac = c(161, 200, 175, 193,187, 192))
mrtn_dt <- mrtn_dt[prev_crop == 0]
mrtn_dt[,MRTN_rate := round(MRTN_Rate_lbN_ac * 1.12,0)] #1 pound per acre = 1.12 kilograms per hectare
NMS_minimum_regional2 <- merge(NMS_minimum_regional, mrtn_dt[,-c('prev_crop','MRTN_Rate_lbN_ac')], by = c('region'))
# NMS_minimum_regional2[,prev_crop := ifelse(prev_crop == 0, 'Soybean', 'Corn')]
NMS_minimum_regional2[,region := ifelse(region == 1, '1_South', ifelse(region == 2, '2_Central', '3_North'))]
setnames(NMS_minimum_regional2, 'eonr_pred', 'NMS1_rate')
NMS_minimum_regional2[order(-region)]
print.xtable(xtable(NMS_minimum_regional2, type = "latex", auto = TRUE,
label = 'tab:NMS1',
caption = 'NMS 1 predictions paired with MRTN recommendations for the same region'),
file = "./n_policy_box/Data/figures/NMS1.tex", include.rownames=FALSE)
#=====================================================================================================================
#-----------------------------------------VALUE OF INFORMATION--------------------------------------------------------
#=====================================================================================================================
# MAKE A MAP OF TOTAL LEACHING WITH NMS 1 AND REDUCTION WITH NMS 4
value_dt <- perfomances_dt4[NMS %in% c(1, 4), .(id_10, NMS, L, corn_avg_ha, leach_ext)]
value_dt[, L_cell := L * corn_avg_ha]
value_dt[, leach_ext_cell := leach_ext * corn_avg_ha]
value_dt <- dcast(value_dt, id_10 ~ NMS, value.var = c('L_cell', 'leach_ext_cell', 'L'))
value_dt[,L_4 := NULL]
# setnames(value_dt, c('static', '4'), c('L_m1', 'L_m2'))
#make one negative
value_dt[, eb_cell := L_cell_4-L_cell_1] #Enviromental Benefit
value_sf <- merge(grid10_tiles_sf7, value_dt[,.(id_10, L_cell_1,eb_cell, leach_ext_cell_1, L_1)], by = 'id_10', all.x = T)
(p1 <- tm_shape(value_sf) + tm_polygons(c('corn_avg_ha'),
n =10,
title = c("Corn area (ha/cell)"),
style ="cont",
# border.col = 'black',
palette = "Greys")+
tm_layout(panel.labels = 'a)',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend =F,
legend.width = 1,
legend.position = c('left', 'bottom')))
tm_shape(value_sf) + tm_polygons(c('L_1')) #Show me: leaching by ha
breaks_n <- c(50000,100000,200000,300000,400000)
(p2 <- tm_shape(value_sf) + tm_polygons(c('L_cell_1'),
breaks = breaks_n,
title = c("N Leaching (kg/cell)"),
style ="cont",
colorNA = 'white',
palette = "Greys")+
tm_layout(panel.labels = 'b)',
legend.text.size = 0.7,
main.title.size = 1.2,
legend.position = c('left', 'bottom'))) #Leaching with MRTN (baseline_characterization_map)
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF RMSE (In what areas are the NMSs more off?) -----
rmse_map_dt <- perfomances_dt[NMS %in% c(1,4) ,.(RMSE = mlr::measureRMSE(truth = N_fert_12, response = N_fert),
MAE = mlr::measureMAE(truth = N_fert_12, response = N_fert),
subpred = sum(subpred)/.N,
overpred = sum(overpred)/.N), by = .(id_10, NMS)]
rmse_map_dt2 <- dcast(rmse_map_dt, id_10 ~ NMS, value.var = c('RMSE', 'MAE', 'subpred', 'overpred'))
rmse_map_sf <- merge(grid10_tiles_sf7, rmse_map_dt2, by = 'id_10', all.x = T)
tm_shape(rmse_map_sf) + tm_polygons(c('RMSE_1', 'RMSE_4', 'overpred_4', 'subpred_4'))
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF EONR for NMS 1, 4, 12 -----
rates_map_dt <- perfomances_dt3[NMS %in% c(1,4, 12)]
rates_map_dt2 <- dcast(rates_map_dt, id_10 ~ NMS, value.var = c('L','N_fert', 'P', 'Y_corn'))
rates_map_sf <- merge(grid10_tiles_sf7, rates_map_dt2, by = 'id_10', all.x = T)
rates_map_sf <- merge(grid10_tiles_sf7, rates_map_dt[,.(id_10, NMS, Y_corn, L, N_fert, P)], by = 'id_10', all = T)
empty_cells_sf <- rates_map_sf[is.na(rates_map_sf$NMS),]
rates_map_sf2 <- rates_map_sf[!is.na(rates_map_sf$NMS),]
for(NMS_n in c(1,4,12)){
rates_map_sf2 <- rbind(rates_map_sf2, empty_cells_sf %>% mutate(NMS = NMS_n))
}
rates_map_sf3 <- melt(rates_map_sf2, id.vars = c("id_10", "geometry", 'NMS'), measure.vars = c("Y_corn", "L", "N_fert", "P"))
rates_map_sf3 <- st_sf(rates_map_sf3)
nrow(grid10_tiles_sf7)*3
rates_map_sf3$NMS <- factor(rates_map_sf3$NMS, levels = c(1,4,12))
tm_shape(rates_map_sf3) + tm_polygons('value')+
tm_facets(c("NMS", "variable"), ncol = 3, free.scales= T, as.layers = T)
(p1 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'Y_corn',]) +
tm_polygons('value',
title = c("Y_corn (kg/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F))
(p2 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'L',]) +
tm_polygons('value',
n= 6,
title = c("N Leaching (kg/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F,
panel.label.height = 0
))
rates_map_sf4 <- rates_map_sf3
rates_map_sf4[ rates_map_sf4$variable == 'N_fert' & rates_map_sf4$NMS == 1 & rates_map_sf4$value == 180 &
!(is.na(rates_map_sf4$value)),]$value <- 0
rates_map_sf4[ rates_map_sf4$variable == 'N_fert' & rates_map_sf4$NMS == 1 & !(is.na(rates_map_sf4$value)),]$value %>% table()
(p3 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'N_fert',]) +
tm_polygons('value',
# n=10,
palette = "Greys",
title = c("N Fert (kg/ha)"),
colorNA = 'white') +
tm_facets(c("NMS"), free.scales = T) +
tm_layout(legend.outside = F,
panel.label.height = 0,
legend.position = c('left', 'bottom')
))
(p4 <- tm_shape(rates_map_sf3[rates_map_sf3$variable == 'P',]) +
tm_polygons('value',
#n = 10,
title = c("P ($/ha)"),
palette = "Greys",
colorNA = 'white')+
tm_facets(c("NMS"), free.scales = F, as.layers = T) +
tm_layout(legend.outside = F,
panel.label.height = 0
))
tmap_save(tmap_arrange(p1,p2,p3,p4, ncol = 1), "./n_policy_box/Data/figures/appendix1_map.jpg",
width = 10, height = 15,
units = 'in')
#---------------------------------------------------------------------------------------------------
# MAKE A MAP OF ECONOMIC VALUE OF INFORMATION SS
value_dt <- perfomances_dt4[NMS %in% c(1,4)]
#make one negative
value_dt[NMS == 1, Y_corn := -Y_corn]
value_dt[NMS == 1, L := -L]
value_dt[NMS == 1, leach_ext := -leach_ext]
value_dt[NMS == 1, N_fert := -N_fert]
value_dt[NMS == 1, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
# baseline_leaching_dt <- perfomances_dt3[NMS == 1, .(id_10, baseline_leach = L)]
# value_dt <- merge(value_dt, baseline_leaching_dt, by = 'id_10')
#
# ggplot(data = value_dt, aes(x = baseline_leach, y = P)) +
# geom_point() + geom_smooth()
value_sf <- merge(value_sf, value_dt[,.(id_10, L,P)], by = 'id_10', all.x = T)
hist(value_sf$eb_cell)
breaks_n <- c(-60000,-40000,-10000,0)
(p3 <- tm_shape(value_sf) + tm_polygons(c('eb_cell'),
# textNA="Not VR area",
title = expression(paste('EB'^'I-SS-ex ante', '(kg/cell)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "-Greys",
colorNA = 'white', midpoint = -10000)+
tm_layout(panel.labels = expression(paste('c)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom'))) #Enviromental benefit of NMS 4
value_sf <- dplyr::mutate(value_sf, P_r = round(P, 0))
breaks_n <- c(-20,0,10,20, 30,40)
# (p4 <- tm_shape(value_sf, bbox = st_bbox(value_sf)) + tm_polygons(c('P'),
# title = expression(paste('V'^'I-SS-ex ante', '($/ha)')),
# breaks = breaks_n,
# style ="cont", palette = "Greys", colorNA = 'white', midpoint = 0)+
# tm_layout(panel.labels = 'd)',
# legend.text.size = 0.7,
# main.title.size = 1.2,
# legend.position = c('left', 'bottom'))) #Economic Value
(p4 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
title = expression(paste('V'^'I-SS-ex ante', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
# style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 0)+
tm_layout(panel.labels = expression(paste('c)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom'))) #Economic V of NMS 4
tmap_save(tmap_arrange(p1, p2, p3, p4, ncol = 2) , "./n_policy_box/Data/figures/information_characterization_map.jpg",
width = 10, height = 10,
units = 'in')
#=====================================================================================================================
#-----------------------------------------VALUE OF TECHNOLOGY--------------------------------------------------------
#=====================================================================================================================
#1) MAKE A MAP OF VALUE TECHNOLOGY (EX POST VALUE)
#Select the two NMSs of interest
value_dt <- perfomances_dt3[NMS %in% c(11,12)]
#make one negative
value_dt[NMS == 11, Y_corn := -Y_corn]
value_dt[NMS == 11, L := -L]
value_dt[NMS == 11, leach_ext := -leach_ext]
value_dt[NMS == 11, N_fert := -N_fert]
value_dt[NMS == 11, P := -P]
# Add values by group
value_post_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
value_post_dt[order(-P)]
value_sf <- merge(grid10_tiles_sf7, value_post_dt, by = 'id_10', all.x = T)
breaks_n <- c(min(value_post_dt$P), 5,10,15, 20, max(value_post_dt$P))
(p1 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
#title = expression(paste('VR market', '($/cell)')),
title = expression(paste('V'^'T-ex post', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 10)+
tm_layout(panel.labels = expression(paste('a)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
#---------------------------------------------------------------------------
#2) MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_dt3[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, leach_ext := -leach_ext]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_ante_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P)), by = .(id_10)]
value_sf <- merge(grid10_tiles_sf7, value_ante_dt, by = 'id_10', all.x = T)
# value_sf$P[is.na(value_sf$P)] <- 0
# (p2 <- tm_shape(value_sf) + tm_polygons(c('P'), n =10)+
# tm_layout(legend.text.size = 0.7,
# main.title = paste('Ex-ante'),
# main.title.position = "center",
# main.title.size = 1.2))
breaks_n <- c(floor(min(value_sf$P, na.rm = T)),0, 5,ceiling(max(value_sf$P, na.rm = T)))
(p2 <- tm_shape(value_sf) + tm_polygons(c('P'),
# textNA="Not VR area",
#title = expression(paste('VR market', '($/cell)')),
title = expression(paste('V'^'T-ex ante', '($/ha)')),
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 5) +
tm_layout(panel.labels = expression(paste('b)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
value_comp_dt <- merge(value_ante_dt[,.(id_10, P_ante = P)], value_post_dt[,.(id_10, P_post = P)], by = 'id_10')
ggplot(data = value_comp_dt, aes(x= P_post, y = P_ante)) +
geom_point() +
geom_smooth()
# tmap_mode("view")
#
# tm_basemap("OpenStreetMap.DE") +
# tm_shape(value_sf) + tm_polygons(c('P'), n =10)+
# tm_layout(legend.text.size = 0.7,
# main.title = paste('EX-ANTE'),
# main.title.position = "center",
# main.title.size = 1.2)
#
# ---------------------------------------------------------------------------
# 3) MAKE A MAP OF TECHNOLOGY MARKET CAP BY CELL
# MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_dt3[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, leach_ext := -leach_ext]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
leach_ext = sum(leach_ext),
N_fert = sum(N_fert),
P = sum(P),
corn_avg_ha = mean(corn_avg_ha)), by = .(id_10)]
value_dt2 <- value_dt[P > 2] #
value_dt2[,mkt_value := P * corn_avg_ha]
value_dt2[,.(sum(mkt_value))]
value_sf <- merge(grid10_tiles_sf7, value_dt2[,.(id_10, mkt_value)], by = 'id_10', all.x = T) %>%
dplyr::mutate(NMS = 'ex_ante')
sum(value_sf$mkt_value, na.rm = TRUE)
# (p3 <- tm_shape(value_sf) +
# tm_polygons("mkt_value", textNA="Not VR area", title="VR Value (USD/Cell)", n = 10) +
# tm_layout(legend.text.size = 0.7,
# main.title = paste('VR Market Value'),
# main.title.position = "center",
# main.title.size = 1.2))
breaks_n <- c(0, 5000,10000,15000,20000, 30000,40000,50000)
(p3 <- tm_shape(value_sf) + tm_polygons(c('mkt_value'), textNA="Not VR area",
title = expression(paste('Mkt value ($/cell)')),
# title = "",
breaks = breaks_n,
border.col = 'black',
#style ="cont",
palette = "Greys", colorNA = 'white', midpoint = 20000)+
tm_layout(panel.labels = expression(paste('b)')),
# main.title = 'f',
main.title.position = c(0,0),
legend.text.size = 0.7,
main.title.size = 1.2,
title.snap.to.legend = F,
legend.width = 1,
legend.position = c('left', 'bottom')))
tmap_arrange(p1, p2, p3, nrow = 1)
tmap_save(tmap_arrange(p1, p2, p3, nrow = 1), "./n_policy_box/Data/figures/techonology_characterization_map.jpg",
width = 10, height = 5, units = 'in')
st_write(value_sf, "./n_policy_box/Data/shapefiles/vr_cell_value_sf.shp", delete_dsn = TRUE)
#---------------------------------------------------------------------------
# 4) MAKE A MAP OF TECHNOLOGY MARKET CAP BY FIELD (for QGIS)
# AGGREGATE THE DATA TO CELL X Z LEVEL CONSIDERING THE AREA
names(perfomances_dt)
do_not_aggregate = c('id_10', 'id_field','region','NMS', 'tech')
do_aggregate = c("Y_corn", "L", "N_fert","P")
perfomances_field_dt <- aggregate_by_area(data_dt = perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = do_not_aggregate) #cell x z level (mukey and field are out)
# MAKE A MAP OF VALUE TECHNOLOGY (EX ANTE VALUE)
value_dt <- perfomances_field_dt[NMS %in% c(4,5)]
#make one negative
value_dt[NMS == 4, Y_corn := -Y_corn]
value_dt[NMS == 4, L := -L]
value_dt[NMS == 4, N_fert := -N_fert]
value_dt[NMS == 4, P := -P]
# Add values by group
value_dt <- value_dt[, .(Y_corn = sum(Y_corn),
L = sum(L),
N_fert = sum(N_fert),
n_policy = sum(P)), by = .(id_10, id_field)]
# value_dt <- value_dt[P > 3] #considering a cost of VR Cost of 3 usd
# value_dt[,mkt_value := P * corn_avg_ha]
# value_dt[,.(sum(mkt_value))]
value_sf <- merge(grid10_fields_sf2, value_dt[,.(id_10, id_field, n_policy)], by = c('id_10', 'id_field'), all.x = T) %>%
dplyr::mutate(NMS = 'ex_ante')
value_sf <- value_sf[!is.na(value_sf$n_policy),]
(p <- tm_shape(value_sf) +
tm_polygons("n_policy", textNA="Not VR area", title="VR Value (USD/Cell)", n = 10) +
tm_layout(legend.text.size = 0.7,
main.title = paste('VR Market Value'),
main.title.position = "center",
main.title.size = 1.2))
st_write(value_sf, "./n_policy_box/Data/shapefiles/vr_field_value_sf.shp", delete_dsn = TRUE)
#---------------------------------------------------------------------------
# MAKE A MAP OF THE BEST NMS
#Select the two NMSs of interest
best_NMS_dt <- perfomances_dt3[NMS %in% 1:10]
# best_NMS_dt[NMS == 5, P := P-3]
best_NMS_dt <- best_NMS_dt[,.SD[P==max(P)], by = id_10]
best_NMS_dt[,.N, by = .(NMS)][order(-N)]
value_sf <- merge(grid10_tiles_sf7, best_NMS_dt[,.(id_10, NMS)],
by = 'id_10', all = T)
value_sf <- dplyr::mutate(value_sf, NMS = ifelse(NMS <6, NA, NMS))
(p <- tm_shape(value_sf) + tm_polygons(c('NMS'), n =10)+
tm_text('NMS')+
tm_layout(legend.text.size = 0.7,
main.title = paste('Best NMS by cell'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/best_NMS_map.jpg")
#==============================================================================================================
#==============================================================================================================
#============================= YIELD CURVE EXAMPLE ==============================
#==============================================================================================================
#==============================================================================================================
yc_yearly_dt3 <- readRDS("./n_policy_box/Data/files_rds/yc_yearly_dt3.rds")
reg_NMS_stuff <- readRDS( "./n_policy_box/Data/files_rds/reg_NMS_stuff.rds")
training_z <- reg_NMS_stuff$training_z
rm(reg_NMS_stuff)
# tile_n = 10
cell_n = 765#755#763#765
mukey_n = 242997
testing_set_dt <- perfomances_dt[id_10 == cell_n]
testing_set_dt[,mean(Y_corn), by = mukey]
testing_set_plot <- testing_set_dt[mukey == mukey_n]
testing_set_plot[,NMS := factor(NMS, levels= c('static', 'dynamic', '3','4', '5', '6', '7', '8', '9', '10', '11', '12'))]
ic_field_plot <- yc_yearly_dt3[mukey == mukey_n & id_10 == cell_n ] %>% .[!z %in% training_z ]
# testing_set_plot[,z := gsub(pattern = 'A', replacement = 'z', x = z)]
# ic_field_plot[,z := gsub(pattern = 'A', replacement = 'z', x = z)]
# library(RColorBrewer)
# n <- 12
# qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
# col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
# colors_sample =sample(col_vector, n)
# pie(rep(1,n), colors_sample)
colors_sample=c( "#7570B3", "#FFED6F", "#666666", "#7FC97F", "#386CB0", "#B3B3B3", "#FFFFCC", "#A65628", "#F4CAE4", "#E41A1C", "#E6AB02", "black")
# Y plot with Y_corn at eonr
z_labels <- ic_field_plot[N_fert == max(ic_field_plot$N_fert), .(N_fert, Y_corn, z)][order(-Y_corn)]
z_labels[seq(1, nrow(z_labels), by = 2), N_fert := N_fert - 50]
ggplot() +
geom_point(data = testing_set_plot, aes(x = N_fert, y = Y_corn, colour = NMS, size = NMS)) +
geom_line(data = ic_field_plot, aes(x = N_fert, y = Y_corn, group=z), show.legend = FALSE) +
scale_size_manual(values=c(rep(2, 11), 4)) +
scale_color_manual(values=colors_sample)+
ylab('Yield (kg/ha)')+
xlab('N rate (kg/ha)')+
geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
theme_bw()+
theme(panel.grid = element_blank())
# (plot_n1 <- ggplot() +
# geom_point(data = testing_set_plot[z == z_n & NMS == '12' & prev_crop == 1],
# aes(x = N_fert, y = Y_corn , size = NMS)) +
# geom_line(data = ic_field_plot[z == z_n & prev_crop == 1], aes(x = N_fert, y = Y_corn, linetype = "Yield")) +
# geom_point(data = testing_set_plot[z == z_n & NMS == '12' & prev_crop == 1],
# aes(x = N_fert, y = L*150, size = NMS)) +
# geom_line(data = ic_field_plot[z == z_n & prev_crop == 1], aes(x = N_fert, y = L*150, linetype = "N Leaching")) +
# # scale_size_manual(values=c(rep(2, 11), 4)) +
# ## scale_color_manual(values=colors_sample)+
# labs(y = 'Yield (kg/ha)',
# x = 'N rate (kg/ha)',
# colour = "Parameter")+
# scale_y_continuous(sec.axis = sec_axis(~./150, name = "N leaching (kg/ha)"))+
# scale_linetype_manual(values = c("dashed", "solid"))+
# scale_size_manual(values = 4,
# labels = expression(paste('EONR'^'ex post')))+
# #geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
# theme_bw()+
# guides(linetype = guide_legend(order=2),
# size = guide_legend(order=1)) +
# theme(legend.title = element_blank(),
# legend.position = c(0.85, 0.15),
# panel.grid = element_blank())+
# annotate("text", x=300, y=11500, label= "a)", size = 10) )
ic_field_plot$z
ggplot(ic_field_plot, aes(x= N_fert, y = L, color = z)) +
geom_line()
z_n = 23
ic_field_plot2 <- melt(ic_field_plot[z == z_n ], id.vars = 'N_fert', measure.vars = c('Y_corn', 'L'))
ic_field_plot2[variable == 'L', value := value * 150]
testing_set_plot2 <- melt(testing_set_plot[z == z_n & NMS == '12'], id.vars = 'N_fert', measure.vars = c('Y_corn', 'L'))
testing_set_plot2[variable == 'L', value := value * 150]
(plot_n1 <- ggplot() +
geom_line(data = ic_field_plot2, aes(x = N_fert, y = value, linetype = variable, colour = variable))+
scale_color_manual(values=c('black', 'black', 'black'),
labels = c(bquote (paste('EONR'^'ex post')), 'Yield', 'N leaching'))+
geom_point(data = testing_set_plot2, aes(x = N_fert, y = value, colour = 'EONR')) +
guides( linetype = FALSE,
colour = guide_legend(override.aes = list(shape = c(16, NA, NA),
linetype = c("blank", "solid", "dotted"))))+
labs(y = 'Yield (kg/ha)',
x = 'N rate (kg/ha)',
colour = "Variable") +
scale_y_continuous(sec.axis = sec_axis(~./150, name = "N leaching (kg/ha)"))+
theme_bw() +
theme(legend.title = element_blank(),
legend.position = c(0.85, 0.15),
panel.grid = element_blank())+
annotate("text", x=300, y=15000, label= "a)", size = 10) )
summary(testing_set_plot$Y_corn)
exclude_z = testing_set_plot[Y_corn == min(Y_corn)]$z[1]
(plot_n2 <- ggplot() +
geom_line(data = ic_field_plot[ !(z == exclude_z) ], aes(x = N_fert, y = Y_corn, group = z), show.legend = F) +
geom_point(data = testing_set_plot[NMS == '12' & !(z == exclude_z) ], aes(x = N_fert, y = Y_corn , shape = 'EONR'), size = 2) +
scale_shape_manual( values = 16,
labels = c(bquote (paste('EONR'^'ex post')))) +
# scale_color_manual(values=colors_sample)+
# scale_size_manual(values=c(rep(2, 11), 4)) +
## scale_color_manual(values=colors_sample)+
labs(y = 'Yield (kg/ha)',
x = 'N rate (kg/ha)')+
#scale_y_continuous(sec.axis = sec_axis(~./150, name = "N Leaching (kg/ha)"))+
#scale_linetype_manual(values = c("solid", "dashed"))+
#geom_text(data = z_labels, aes(x = N_fert, y = Y_corn, label = z))+
theme_bw()+
theme(legend.title = element_blank(),
legend.position = c(0.85, 0.15),
panel.grid = element_blank())+
guides(color = FALSE) + #remove legend for color
annotate("text", x=300, y=15000, label= "b)", size = 10))
summary(testing_set_plot[NMS == '12']$N_fert)
perfomances_champaign_dt <- perfomances_dt[id_10 %in% unique(dplyr::filter(grid10_tiles_sf7, county_name == 'Champaign')$id_10) & NMS == 12]
(plot_n3 <- ggplot() +
geom_density(data = perfomances_champaign_dt, aes( x= N_fert, y = ..density..), alpha = 0.4)+
labs(x = expression(paste('EONR'^'ex post', '(kg/ha)')))+
theme_bw()+
theme(panel.grid = element_blank(),
legend.position = c(0.85, .7))+
scale_fill_manual(name = "Previous crop", labels = c("Soybean", "Corn"), values = c('#696969', '#D3D3D3'))+
scale_linetype_manual(values=c("twodash", "dotted"))+
guides(linetype = FALSE)+
annotate("text", x=300, y=0.009, label= "c)", size = 10) )
(plot_n4 <- ggplot() +
geom_density(data = perfomances_champaign_dt, aes( x= L, y = ..density..), alpha = 0.4)+
labs(x = 'N leaching (kg/ha)')+
theme_bw()+
xlim(c(0, 150))+
theme(panel.grid = element_blank(),
legend.position = c(0.85, 0.7))+
scale_fill_manual(name = "Previous crop", labels = c("Soybean", "Corn"), values = c('#696969', '#D3D3D3'))+
scale_linetype_manual(values=c("twodash", "dotted"))+
guides(linetype = FALSE)+
annotate("text", x=140, y=0.025, label= "d)", size = 10) )
grid.arrange(plot_n1, plot_n2, nrow = 1)
grid.arrange(plot_n1, plot_n2, plot_n3, plot_n4, nrow = 2)
ggsave(grid.arrange(plot_n1, plot_n2, nrow = 1),
filename = "./n_policy_box/Data/figures/yield_curve_example.jpg", width = 10, height = 4, units = 'in')
grid.arrange(grid.arrange(plot_n1, plot_n2, nrow=1), plot_n3, nrow = 2)
#---------------------------------------------------------------------------
# MAKE A MAP OF THE BEST NMS
#Select the two NMSs of interest
best_NMS_dt <- perfomances_dt3[NMS %in% 1:10]
best_NMS_dt <- best_NMS_dt[,.SD[P==max(P)], by = id_10]
best_NMS_dt <- merge(best_NMS_dt, perfomances_dt3[NMS == 1, .(id_10, P_1 = P)], by = 'id_10')
best_NMS_dt[,P_improve := P-P_1]
best_NMS_dt[,.N, by = .(region, NMS)]
best_NMS_sf <- merge(grid10_tiles_sf7, best_NMS_dt, by = 'id_10', all.x = T)
(p <- tm_shape(best_NMS_sf) + tm_polygons(c('NMS','P_improve'), n =10)+
tm_text('NMS')+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/value_t_i.jpg")
#---------------------------------------------------------------------------
# PLOT WEIRD CASE
yc_yearly_dt <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt.rds')
length(unique(yc_yearly_dt$id_10))
yc_yearly_dt3 <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt3.rds')
length(unique(yc_yearly_dt3$id_10))
length(unique(full_fields_dt2$id_10))
id_10_n <- n_policy_expost_dt[order(-L2)][1,]$id_10
yc_yearly_dt <- readRDS('./n_policy_box/Data/files_rds/yc_yearly_dt.rds')
one_field_dt <- data.table(grid10_soils_sf6[grid10_soils_sf6$id_10 == id_10_n,])
mukey_n <- one_field_dt[area_ha == max(area_ha)][1,] %>% .[,.(id_10, mukey)]
ic_field_dt <- filter_dt_in_dt(yc_yearly_dt , filter_dt = mukey_n, return_table = TRUE)
ic_field_dt[,prev_crop := ifelse(prev_crop == 'MSM', 0, 1)]
ic_field_dt[, P := Y_corn * Pc - N_fert * Pn]
performance_set_dt <- filter_dt_in_dt(perfomances_dt , filter_dt = mukey_n, return_table = TRUE)
performance_set_dt[,NMS := as.character(NMS)]
performance_set_dt[prev_crop == 0 & NMS != 11, .N, by = .(NMS, z)]
# P plot with P at eonr
(plot_n <- ggplot() +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS != 11], aes(x = N_fert, y = P, colour = NMS)) +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS == 11], aes(x = N_fert, y = P), size = 3, show.legend = FALSE) +
geom_line(data = ic_field_dt[prev_crop == 0], aes(x = N_fert, y = P, group=interaction(z)), show.legend = FALSE) +
ggtitle(paste('P plot with P at eonr', mukey_n$mukey)))
ggsave(plot_n, filename = "./n_policy_box/Data/figures/yield_curve_example.jpg")
# Lo3 plot with leaching at eonr
(plot_n <- ggplot() +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS != 11], aes(x = N_fert, y = L2, colour = NMS)) +
geom_point(data = performance_set_dt[prev_crop == 0 & NMS == 11], aes(x = N_fert, y = L2), size = 3, show.legend = FALSE) +
geom_line(data = ic_field_dt[prev_crop == 0], aes(x = N_fert, y = L2, group=interaction(z)), show.legend = FALSE) +
ggtitle(paste('Lo3 plot with leaching at eonr', mukey_n$mukey)))
ggsave(plot_n, filename = "./n_policy_box/Data/figures/leaching_curve_example.jpg")
#---------------------------------------------------------------------------
# CALCULATE STATE TOTAL VARIABLES
perfomances_dt4 <- copy(perfomances_dt4)
do_aggregate = c("Y_corn", "L2", "leach_ext", "N_fert","P")
perfomances_dt4[,(do_aggregate) := (.SD * corn_avg_ha/1000), .SDcols=do_aggregate]
state_total_production_dt <- perfomances_dt4[, lapply(.SD, function(x) sum(x)), .SDcols= do_aggregate]
2.2 * 10^9 * 25.4 /1000 #IL production in tons https://www.nass.usda.G/Statistics_by_State/Illinois/Publications/Current_News_Release/2018/20180112-IL_Annual_Crop_Production.pdf
10.95 * 10^6 *0.4046#IL harvested area in ha
201 * 25.4/0.4046 #IL Yield
#---------------------------------------------------------------------------
# SELECT THE WORST YEAR FOR LEACHING AND SEE THE BENEFIT THERE
all_perfomances_dt2 <- aggregate_by_area(data_dt = all_perfomances_dt, variables = do_aggregate,
weight = 'area_ha', by_c = c('id_10', 'z'))
extreme_year_dt <- all_perfomances_dt2[, .SD[ nL1 == max( nL1)], by = .(id_10)]
extreme_year_dt[, lapply(.SD, function(x) mean(x)), .SDcols= do_aggregate]
eonr_mukey_dt <- yc_yearly_dt[, .SD[ P == max( P)], by = .(id_10, mukey, z)]
1804.744/1814.46
all_perfomances_dt[id_10 == 5]
all_perfomances_dt[id_10 == 5, .(Y_corn_1 = sum(Y_corn_1), area_ha = sum(area_ha))]
# MAKE A DT
economics_field_dt <- merge(n_regional_noss_dt2, n_regional_ss_dt2) %>% merge(eonr_ur_dt2) %>% merge(eonr_vr_dt2)
economics_field_dt[,area_ha := sum(area_dt$area_ha)]
economics_field_dt[,val_ss_ha := (P_reg_ss - P_reg_no_ss)/area_ha]
economics_field_dt[,val_info_ha := (P_ur - P_reg_no_ss)/area_ha]
economics_field_dt[,val_tech_ha := (P_vr - P_ur)/area_ha]
economics_field_dt[,nval_ss_ha := (nleach_reg_ss - nleach_reg_no_ss)/area_ha]
economics_field_dt[,nval_info_ha := (nleach_ur - nleach_reg_no_ss)/area_ha]
economics_field_dt[,nval_tech_ha := (nleach_vr - nleach_ur)/area_ha]
cols <- names(economics_field_dt)[sapply(economics_field_dt,is.numeric)]
economics_field_dt[,(cols) := round(.SD,3), .SDcols=cols]
economics_field_dt <- cbind(fields_seq_tmp, economics_field_dt)
economics_field_dt[,mukey_count := nrow(area_dt)]
economics_ls[[j]] <- economics_field_dt
}
economics_dt <- rbindlist(economics_ls)
saveRDS(economics_dt, './n_policy_box/Data/files_rds/economics_dt.rds')
#---------------------------------------------------------------------------
# MAKE A MAP OF VALUE OF I AND T
val_map_dt <- economics_dt[,.(val_ss_ha = mean(val_ss_ha),
val_info_ha = mean(val_info_ha),
val_tech_ha = mean(val_tech_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_tiles_sf, val_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons(c('val_ss_ha','val_info_ha', 'val_tech_ha'))+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/value_t_i.jpg")
#---------------------------------------------------------------------------
# MAKE A MAP OF VALUE OF I AND T FOR LEACHING
val_map_dt <- economics_dt[,.(nval_ss_ha = mean(nval_ss_ha),
nval_info_ha = mean(nval_info_ha),
nval_tech_ha = mean(nval_tech_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_tiles_sf, val_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons(c('nval_ss_ha','nval_info_ha', 'nval_tech_ha'))+
tm_layout(legend.text.size = 0.7,
main.title = paste('VALUE OF TECHNOLOGY AND INFORMATION'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/nvalue_t_i.jpg")
#---------------------------------------------------------------------------
# MAKE A MAP OF PROFITS
economics_dt
prof_map_dt <- economics_dt[,P_ur_ha := P_ur / area_ha]
prof_map_dt <- prof_map_dt[,.(P_ur_ha = mean(P_ur_ha)), by = id_10]
grid10_value_sf <- left_join(grid10_value_sf, prof_map_dt, by = 'id_10')
(p <- tm_shape(grid10_value_sf) + tm_polygons('P_ur_ha')+
tm_layout(legend.text.size = 0.7,
main.title = paste('AVERAGE UR PROFITS $/HA'),
main.title.position = "center",
main.title.size = 1.2))
tmap_save(p, "./n_policy_box/Data/figures/profits.jpg")
#---------------------------------------------------------------------------
# MAKE BOXPLOT
dat.m <- melt(economics_dt,id.vars= c('id_10', 'id_field', 'z'), measure.vars=c('val_ss_ha','val_info_ha', 'val_tech_ha'))
(p <- ggplot(dat.m) +
geom_boxplot(aes(x=variable, y=value, color=variable)) +
# scale_color_discrete(name = "REGION") +
ggtitle('Value of information and technology')+
theme_bw() +
theme(axis.text=element_text(size=12),
axis.title=element_text(size=14),
#legend.position='bottom',
panel.grid = element_blank(),
strip.background = element_blank(),
legend.text.align = 0,
legend.position = "none",
legend.title = element_blank(),
strip.text = element_blank()))
ggsave(p, filename = "./n_policy_box/Data/figures/value_boxplot.jpg")
|
#' Plot of the time series of daily flux estimates and the sample values for the days that were sampled
#'
#' This plot is useful for visual examination of the ability of the WRTDS, or other model, to fit the
#' data, as seen in a time-series perspective.
#'
#' @param startYear numeric specifying the starting date (expressed as decimal years, for example 1989.0) for the plot
#' @param endYear numeric specifiying the ending date for the plot
#' @param localSample string specifying the name of the data frame that contains the concentration data, default name is Sample
#' @param localDaily string specifying the name of the data frame that contains the flow data, default name is Daily
#' @param localINFO string specifying the name of the data frame that contains the metadata, default name is INFO
#' @param tinyPlot logical variable, if TRUE plot is designed to be short and wide, default is FALSE.
#' @param fluxUnit number representing in pre-defined fluxUnit class array. \code{\link{fluxConst}}
#' @param fluxMax number specifying the maximum value to be used on the vertical axis, default is NA (which allows it to be set automatically by the data)
#' @param printTitle logical variable if TRUE title is printed, if FALSE title is not printed (this is best for a multi-plot figure)
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics water-quality statistics
#' @export
#' @examples
#' Sample <- exSample
#' Daily <- exDaily
#' INFO <- exINFO
#' plotFluxTimeDaily(2001,2009)
plotFluxTimeDaily<-function (startYear, endYear, localSample = Sample, localDaily = Daily,
localINFO = INFO, tinyPlot = FALSE, fluxUnit = 3, fluxMax = NA,
printTitle = TRUE, ...) {
################################################################################
# I plan to make this a method, so we don't have to repeat it in every funciton:
if (is.numeric(fluxUnit)){
fluxUnit <- fluxConst[shortCode=fluxUnit][[1]]
} else if (is.character(fluxUnit)){
fluxUnit <- fluxConst[fluxUnit][[1]]
}
################################################################################
# if (tinyPlot)
# par(mar = c(5, 4, 1, 1))
# else par(mar = c(5, 4, 4, 2) + 0.1)
fluxFactor <- fluxUnit@unitFactor*86.40
subSample <- subset(localSample, DecYear >= startYear)
subSample <- subset(subSample, DecYear <= endYear)
subDaily <- subset(localDaily, DecYear >= startYear)
subDaily <- subset(subDaily, DecYear <= endYear)
xSample <- subSample$DecYear
xDaily <- subDaily$DecYear
#xLimits <- c(startYear, endYear)
#xTicks <- pretty(xLimits, n = 5)
#numXTicks <- length(xTicks)
#xLeft <- xTicks[1]
#xRight <- xTicks[numXTicks]
yLow <- subSample$ConcLow*subSample$Q*fluxFactor
yHigh <- subSample$ConcHigh*subSample$Q*fluxFactor
Uncen <- subSample$Uncen
#yAll <- c(subDaily$ConcDay*subDaily$Q*fluxFactor, subSample$ConcHigh*subSample$Q*fluxFactor)
#maxYHigh <- if (is.na(fluxMax))
# 1.05 * max(yAll)
#else fluxMax
#yTicks <- yPretty(maxYHigh)
#yTop <- yTicks[length(yTicks)]
plotTitle <- if (printTitle)
paste(localINFO$shortName, "\n", localINFO$paramShortName,
"\n", "Observed and Estimated Flux versus Time")
else ""
###################################
yBottom <- 0 #Not specified within script, added under assumption that it's always zero based on ylim definition in this function
par(mar = c(5,6,5,2))
xInfo <- generalAxis(x=xSample, minVal=startYear, maxVal=endYear, tinyPlot=tinyPlot)
yInfo <- generalAxis(x=yHigh, minVal=yBottom, maxVal=fluxMax, tinyPlot=tinyPlot)
if (tinyPlot) {
yLab <- fluxUnit@unitExpressTiny
}
else {
yLab <- fluxUnit@unitExpress
}
genericEGRETDotPlot(x=xSample, y=yHigh,
xlim = c(xInfo$bottom, xInfo$top), ylim = c(yInfo$bottom, yInfo$top),
xTicks=xInfo$ticks, yTicks=yInfo$ticks,
ylab = yLab,
plotTitle=plotTitle, ...
)
lines(xDaily, subDaily$ConcDay*subDaily$Q*fluxFactor)
censoredSegments(yBottom=yInfo$bottom,yLow=yLow,yHigh=yHigh,x=xSample,Uncen=Uncen)
par(mar = c(5, 4, 4, 2) + 0.1)
}
|
/R/plotFluxTimeDaily.R
|
permissive
|
ayan-usgs/EGRET
|
R
| false | false | 4,290 |
r
|
#' Plot of the time series of daily flux estimates and the sample values for the days that were sampled
#'
#' This plot is useful for visual examination of the ability of the WRTDS, or other model, to fit the
#' data, as seen in a time-series perspective.
#'
#' @param startYear numeric specifying the starting date (expressed as decimal years, for example 1989.0) for the plot
#' @param endYear numeric specifiying the ending date for the plot
#' @param localSample string specifying the name of the data frame that contains the concentration data, default name is Sample
#' @param localDaily string specifying the name of the data frame that contains the flow data, default name is Daily
#' @param localINFO string specifying the name of the data frame that contains the metadata, default name is INFO
#' @param tinyPlot logical variable, if TRUE plot is designed to be short and wide, default is FALSE.
#' @param fluxUnit number representing in pre-defined fluxUnit class array. \code{\link{fluxConst}}
#' @param fluxMax number specifying the maximum value to be used on the vertical axis, default is NA (which allows it to be set automatically by the data)
#' @param printTitle logical variable if TRUE title is printed, if FALSE title is not printed (this is best for a multi-plot figure)
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics water-quality statistics
#' @export
#' @examples
#' Sample <- exSample
#' Daily <- exDaily
#' INFO <- exINFO
#' plotFluxTimeDaily(2001,2009)
plotFluxTimeDaily<-function (startYear, endYear, localSample = Sample, localDaily = Daily,
localINFO = INFO, tinyPlot = FALSE, fluxUnit = 3, fluxMax = NA,
printTitle = TRUE, ...) {
################################################################################
# I plan to make this a method, so we don't have to repeat it in every funciton:
if (is.numeric(fluxUnit)){
fluxUnit <- fluxConst[shortCode=fluxUnit][[1]]
} else if (is.character(fluxUnit)){
fluxUnit <- fluxConst[fluxUnit][[1]]
}
################################################################################
# if (tinyPlot)
# par(mar = c(5, 4, 1, 1))
# else par(mar = c(5, 4, 4, 2) + 0.1)
fluxFactor <- fluxUnit@unitFactor*86.40
subSample <- subset(localSample, DecYear >= startYear)
subSample <- subset(subSample, DecYear <= endYear)
subDaily <- subset(localDaily, DecYear >= startYear)
subDaily <- subset(subDaily, DecYear <= endYear)
xSample <- subSample$DecYear
xDaily <- subDaily$DecYear
#xLimits <- c(startYear, endYear)
#xTicks <- pretty(xLimits, n = 5)
#numXTicks <- length(xTicks)
#xLeft <- xTicks[1]
#xRight <- xTicks[numXTicks]
yLow <- subSample$ConcLow*subSample$Q*fluxFactor
yHigh <- subSample$ConcHigh*subSample$Q*fluxFactor
Uncen <- subSample$Uncen
#yAll <- c(subDaily$ConcDay*subDaily$Q*fluxFactor, subSample$ConcHigh*subSample$Q*fluxFactor)
#maxYHigh <- if (is.na(fluxMax))
# 1.05 * max(yAll)
#else fluxMax
#yTicks <- yPretty(maxYHigh)
#yTop <- yTicks[length(yTicks)]
plotTitle <- if (printTitle)
paste(localINFO$shortName, "\n", localINFO$paramShortName,
"\n", "Observed and Estimated Flux versus Time")
else ""
###################################
yBottom <- 0 #Not specified within script, added under assumption that it's always zero based on ylim definition in this function
par(mar = c(5,6,5,2))
xInfo <- generalAxis(x=xSample, minVal=startYear, maxVal=endYear, tinyPlot=tinyPlot)
yInfo <- generalAxis(x=yHigh, minVal=yBottom, maxVal=fluxMax, tinyPlot=tinyPlot)
if (tinyPlot) {
yLab <- fluxUnit@unitExpressTiny
}
else {
yLab <- fluxUnit@unitExpress
}
genericEGRETDotPlot(x=xSample, y=yHigh,
xlim = c(xInfo$bottom, xInfo$top), ylim = c(yInfo$bottom, yInfo$top),
xTicks=xInfo$ticks, yTicks=yInfo$ticks,
ylab = yLab,
plotTitle=plotTitle, ...
)
lines(xDaily, subDaily$ConcDay*subDaily$Q*fluxFactor)
censoredSegments(yBottom=yInfo$bottom,yLow=yLow,yHigh=yHigh,x=xSample,Uncen=Uncen)
par(mar = c(5, 4, 4, 2) + 0.1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FullMap2bedGraph.R
\name{FullMap2bedGraph}
\alias{FullMap2bedGraph}
\title{FullMap2BedGraph}
\usage{
FullMap2bedGraph(
path.name,
scaling.factor = 1,
out.dir = getwd(),
chrom = 1:18,
mode = "base"
)
}
\arguments{
\item{path.name}{Character string defining the pathname to the fullmap file. Files must be in the format SAMPLENAME.FullMap.txt}
\item{scaling.factor}{a scaling factor used to boost the values. This is useful in calibration, for example. Defaults to 1.}
\item{out.dir}{A character string defining the pathname to the output directory. Defaults to current working directory.}
\item{chrom}{A vector defining the chromosomes to process. Defaults to all chromosomes, including chromosome 3, 17 and 18, which may cause compatibility issues with UCSC.}
\item{mode}{Can be "break" or "base", depending on whether you want to output the position of the DNA break (between bases), or the position of the base which is covalently linked to the protein.}
}
\description{
Converts FullMaps downloaded from GEO (SPECIFICALLY), into bedGraphs which can be uploaded as custom tracks to UCSC.
}
\author{
Will Gittens
}
|
/man/FullMap2bedGraph.Rd
|
permissive
|
WHG1990/CCTools
|
R
| false | true | 1,208 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FullMap2bedGraph.R
\name{FullMap2bedGraph}
\alias{FullMap2bedGraph}
\title{FullMap2BedGraph}
\usage{
FullMap2bedGraph(
path.name,
scaling.factor = 1,
out.dir = getwd(),
chrom = 1:18,
mode = "base"
)
}
\arguments{
\item{path.name}{Character string defining the pathname to the fullmap file. Files must be in the format SAMPLENAME.FullMap.txt}
\item{scaling.factor}{a scaling factor used to boost the values. This is useful in calibration, for example. Defaults to 1.}
\item{out.dir}{A character string defining the pathname to the output directory. Defaults to current working directory.}
\item{chrom}{A vector defining the chromosomes to process. Defaults to all chromosomes, including chromosome 3, 17 and 18, which may cause compatibility issues with UCSC.}
\item{mode}{Can be "break" or "base", depending on whether you want to output the position of the DNA break (between bases), or the position of the base which is covalently linked to the protein.}
}
\description{
Converts FullMaps downloaded from GEO (SPECIFICALLY), into bedGraphs which can be uploaded as custom tracks to UCSC.
}
\author{
Will Gittens
}
|
require(FNN)
gen_estimate <- function(indices,
train_resp){
estimates <- c()
for(i in seq(1,nrow(indices),1)) {
lapply(indices[i,],
function(x){train_resp[x,]$MSRP}) %>%
Reduce("+", .) %>%
divide_by(length(indices[i,])) -> result
estimates <-c(estimates, result)
}
return(estimates)
}
calc_KNN_error <- function(k_val,
train_pred,
test_pred,
label,
train_resp,
test_res) {
k <- FNN::knn(train_pred,
test_pred,
label,
k = k_val,
algorithm="cover_tree")
indices <- attr(k, "nn.index")
estimates <- gen_estimate(indices,
train_resp)
accum_error <- 0
for(j in seq(1,length(estimates),1)) {
disturbance <- ((estimates[j] - test_res$MSRP[j]) ^ 2 )
accum_error <- accum_error + disturbance
}
return(accum_error)
}
|
/code/calc_KNN_error.R
|
permissive
|
Benardi/cbr_msrp
|
R
| false | false | 1,042 |
r
|
require(FNN)
gen_estimate <- function(indices,
train_resp){
estimates <- c()
for(i in seq(1,nrow(indices),1)) {
lapply(indices[i,],
function(x){train_resp[x,]$MSRP}) %>%
Reduce("+", .) %>%
divide_by(length(indices[i,])) -> result
estimates <-c(estimates, result)
}
return(estimates)
}
calc_KNN_error <- function(k_val,
train_pred,
test_pred,
label,
train_resp,
test_res) {
k <- FNN::knn(train_pred,
test_pred,
label,
k = k_val,
algorithm="cover_tree")
indices <- attr(k, "nn.index")
estimates <- gen_estimate(indices,
train_resp)
accum_error <- 0
for(j in seq(1,length(estimates),1)) {
disturbance <- ((estimates[j] - test_res$MSRP[j]) ^ 2 )
accum_error <- accum_error + disturbance
}
return(accum_error)
}
|
#' Format p value
#'
#' This function allows you format the o values including <.001
#' @param raw_p Number to be treated as p value
#' @param d Decimal places used for p value.
#' @param equal Symbol to include if not <
#' @export
#' @examples
#' format_pval(0.0001)
#' format_pval(0.95,equal = "")
#'
format_pval <- function(raw_p, d=3,equal="="){
p_string= ifelse(raw_p < (1/10^d),paste0("<0.",paste(rep(0,d-1),collapse=""),"1"),paste(equal,sprintf(paste0("%.",d,"f"),raw_p)))
return(p_string)
}
|
/R/format_pval.R
|
no_license
|
Oromendia/oromendia
|
R
| false | false | 518 |
r
|
#' Format p value
#'
#' This function allows you format the o values including <.001
#' @param raw_p Number to be treated as p value
#' @param d Decimal places used for p value.
#' @param equal Symbol to include if not <
#' @export
#' @examples
#' format_pval(0.0001)
#' format_pval(0.95,equal = "")
#'
format_pval <- function(raw_p, d=3,equal="="){
p_string= ifelse(raw_p < (1/10^d),paste0("<0.",paste(rep(0,d-1),collapse=""),"1"),paste(equal,sprintf(paste0("%.",d,"f"),raw_p)))
return(p_string)
}
|
#!/usr/bin/Rscript
library(maps)
library(rgdal)
library(leaflet)
library(geojsonio)
library(sp)
library(GISTools)
dt = read.delim("filter.txt", sep = "|", header = T)
dt = dt[,2:dim(dt)[2]]
dt = dt[2:dim(dt)[1],]
rownames(dt) = 1:length(dt[,1])
dt$Lon = gsub(pattern = " E", replacement = "",dt$Lon)
dt$Lon = gsub(pattern = " W", replacement = "",dt$Lon)
indexS = grep(dt$Lat, pattern = "S")
dt$Lat = gsub(pattern = " S", replacement = "",dt$Lat)
dt$Lat = gsub(pattern = " N", replacement = "",dt$Lat)
dt$Lat[indexS] = as.numeric(dt$Lat[indexS])*(-1)
kata_kata = paste0(dt$Origin.Time..GMT., " pada Kedalaman ", dt$Depth, " Dengan M ", dt$Mag, " di Wilayah ", dt$Remarks)
TT = as.POSIXct(dt$Origin.Time..GMT., tz = "UTC")
TW = TT
TWita = as.POSIXct(as.character(TW), tz = "WITA")
jso = data.frame(lon = dt$Lon, lat = dt$Lat,Time = TWita, Mag = dt$Mag, Depth = dt$Depth, Status = dt$Status,
TypeMag = dt$TypeMag, Remarks = dt$Remarks)
if(any(is.na(dt$Lon) | is.na(dt$Lat))){
i_lon = c(which(is.na(as.numeric(dt$Lon))))
i_lat = which(is.na(as.numeric(dt$Lat) ))
iii = 1:dim(jso)[1]
jso = jso[!iii %in% c(i_lon,i_lat),]
}
iii = 1:dim(jso)[1]
i_lon = which(as.numeric(as.character(dt$Lon)) > 90 & as.numeric(as.character(dt$Lon)) < 150)
jso = jso[iii %in% c(i_lon),]
i_lat = which(as.numeric(as.character(jso$lat)) > -15 & as.numeric(as.character(jso$lat)) < 15)
jso = jso[iii %in% c(i_lat),]
jsobackup = jso
jso = head(jso, 200L)
oke = cbind(as.numeric(as.character(jso$lon)),as.numeric(as.character(jso$lat)))
okebackup = cbind(as.numeric(as.character(jsobackup$lon)),as.numeric(as.character(jsobackup$lat)))
json <- SpatialPointsDataFrame(oke, jso[, 3:dim(jso)[2]])
jsonback <- SpatialPointsDataFrame(okebackup, jsobackup[, 3:dim(jsobackup)[2]])
head(jsobackup)
geojsonio::geojson_write(file = "~/tews/data_mag.geojson",(jsonback))
kata_kata = paste0("<h2>",jso$Time, " pada dedalaman ", jso$Depth, " dengan M ", jso$Mag, " di Wilayah ", jso$Remarks, "</h2>")
df.20 <- jso
getColor <- function(quakes) {
sapply(as.numeric(quakes$Mag), function(Mag) {
if(Mag <= 4) {
"green"
} else if(Mag <= 5) {
"orange"
} else {
"red"
} })
}
colll = getColor(jso)
icons <- awesomeIcons(
icon = 'ios-close',
iconColor = 'black',
library = 'ion',
markerColor = getColor(jso)
)
pal = colorNumeric(
palette = "Greens",
domain = json$Mag
)
library(leaflet.extras)
icon.pop <- pulseIcons(color = ifelse(as.numeric(json$Mag) < 4,'blue','red'),
heartbeat = ifelse(as.numeric(json$Depth) > 50 ,'2','0.7'))
icon.pop$color[which(json$Mag >= 5)] = "red"
icon.pop$color[which(json$Mag < 5)] = "blue"
json$Depth = gsub(pattern = " km",replacement = "", json$Depth)
icon.pop$heartbeat[which(as.numeric(json$Depth) >= 50)] = '1.5'
icon.pop$heartbeat[which(as.numeric(json$Depth) < 50)] = '0.7'
kata_kata = paste0("<h2>",json$Time, " pada kedalaman ", json$Depth, " km dengan M ", json$Mag, " di Wilayah ", json$Remarks, "</h2>")
html_legend <- paste0("<h2 style='font-size:1em; line-height: 0.1'>
<center><img src='map_quake_files/png/RDCA.png'> M >5  
<img src='map_quake_files/png/jadi.png' height='36px' width='30px'></center>
<h4 style='font-size:1em; color:black; text-align: center; '>
", "BBMKG IV Makassar","</h4>")
dfq = cbind(as.numeric(as.character(jsobackup$lon)),
as.numeric(as.character(jsobackup$lat)))
dfq = data.frame(dfq)
colnames(dfq) = c('lng', 'lat')
icon.pop1=list()
for(i in 1:length(icon.pop)){
icon.pop1[[i]] = icon.pop[[i]][1]
}
names(icon.pop1) = names(icon.pop)
icon.pop2=list()
for(i in 1:length(icon.pop)){
icon.pop2[[i]] = icon.pop[[i]][2:length(icon.pop[[i]])]
}
names(icon.pop2) = names(icon.pop)
# leaflet(data.frame(dfq)) %>%
# addProviderTiles(providers$CartoDB.Positron) %>%
# addWebGLHeatmap(lng=~lng, lat=~lat,size=dim(dfq)[1])
map = leaflet(data.frame(dfq)) %>%
addWebGLHeatmap(lng=~lng, lat=~lat,size=dim(dfq)[1]) %>%
addProviderTiles("Esri") %>% setView(lng = 122, lat = -1, zoom = 5) %>%
addPulseMarkers(lng = oke[2:dim(oke)[1],1], lat = oke[2:dim(oke)[1],2],
label = as.character(json$Remarks[2:dim(json)[1]]),
labelOptions = rep(labelOptions(noHide = F),nrow(jso)),
icon = icon.pop2,popup = as.character(kata_kata[2:length(kata_kata)]),
clusterOptions = markerClusterOptions()) %>%
addControl(html = html_legend, position = "bottomleft") %>%
addPulseMarkers(lng = oke[1,1], lat = oke[1,2],
label = paste0("Last event: ",as.character(json$Remarks[1])),
# labelOptions = rep(labelOptions(noHide = T),nrow(jso)),
icon = icon.pop1, popup = as.character(kata_kata[1]),
# label = output_VHR$Kecamatan,
labelOptions =
labelOptions(noHide = T,
style = list("font-weight" = "bold", padding = "3px 8px"),
textsize = "15px",
direction = "auto")
)
# library(leaflet.minicharts)
# ?leaflet.minicharts::popupArgs
library(htmlwidgets)
saveWidget(file = "map_quake.html", map, selfcontained = F)
#
#
# data("eco2mixBalance")
# bal <- eco2mixBalance
# leaflet() %>% addTiles() %>%
# addFlows(
# as.numeric(bal$lng0), as.numeric(bal$lat0), as.numeric(bal$lng1), as.numeric(bal$lat1),
# flow = bal$balance,
# time = bal$month,
# color = "navy"
# )
|
/tewss/convertToGeojson.R
|
no_license
|
yosiknorman/yosiknorman.github.io
|
R
| false | false | 5,614 |
r
|
#!/usr/bin/Rscript
library(maps)
library(rgdal)
library(leaflet)
library(geojsonio)
library(sp)
library(GISTools)
dt = read.delim("filter.txt", sep = "|", header = T)
dt = dt[,2:dim(dt)[2]]
dt = dt[2:dim(dt)[1],]
rownames(dt) = 1:length(dt[,1])
dt$Lon = gsub(pattern = " E", replacement = "",dt$Lon)
dt$Lon = gsub(pattern = " W", replacement = "",dt$Lon)
indexS = grep(dt$Lat, pattern = "S")
dt$Lat = gsub(pattern = " S", replacement = "",dt$Lat)
dt$Lat = gsub(pattern = " N", replacement = "",dt$Lat)
dt$Lat[indexS] = as.numeric(dt$Lat[indexS])*(-1)
kata_kata = paste0(dt$Origin.Time..GMT., " pada Kedalaman ", dt$Depth, " Dengan M ", dt$Mag, " di Wilayah ", dt$Remarks)
TT = as.POSIXct(dt$Origin.Time..GMT., tz = "UTC")
TW = TT
TWita = as.POSIXct(as.character(TW), tz = "WITA")
jso = data.frame(lon = dt$Lon, lat = dt$Lat,Time = TWita, Mag = dt$Mag, Depth = dt$Depth, Status = dt$Status,
TypeMag = dt$TypeMag, Remarks = dt$Remarks)
if(any(is.na(dt$Lon) | is.na(dt$Lat))){
i_lon = c(which(is.na(as.numeric(dt$Lon))))
i_lat = which(is.na(as.numeric(dt$Lat) ))
iii = 1:dim(jso)[1]
jso = jso[!iii %in% c(i_lon,i_lat),]
}
iii = 1:dim(jso)[1]
i_lon = which(as.numeric(as.character(dt$Lon)) > 90 & as.numeric(as.character(dt$Lon)) < 150)
jso = jso[iii %in% c(i_lon),]
i_lat = which(as.numeric(as.character(jso$lat)) > -15 & as.numeric(as.character(jso$lat)) < 15)
jso = jso[iii %in% c(i_lat),]
jsobackup = jso
jso = head(jso, 200L)
oke = cbind(as.numeric(as.character(jso$lon)),as.numeric(as.character(jso$lat)))
okebackup = cbind(as.numeric(as.character(jsobackup$lon)),as.numeric(as.character(jsobackup$lat)))
json <- SpatialPointsDataFrame(oke, jso[, 3:dim(jso)[2]])
jsonback <- SpatialPointsDataFrame(okebackup, jsobackup[, 3:dim(jsobackup)[2]])
head(jsobackup)
geojsonio::geojson_write(file = "~/tews/data_mag.geojson",(jsonback))
kata_kata = paste0("<h2>",jso$Time, " pada dedalaman ", jso$Depth, " dengan M ", jso$Mag, " di Wilayah ", jso$Remarks, "</h2>")
df.20 <- jso
getColor <- function(quakes) {
sapply(as.numeric(quakes$Mag), function(Mag) {
if(Mag <= 4) {
"green"
} else if(Mag <= 5) {
"orange"
} else {
"red"
} })
}
colll = getColor(jso)
icons <- awesomeIcons(
icon = 'ios-close',
iconColor = 'black',
library = 'ion',
markerColor = getColor(jso)
)
pal = colorNumeric(
palette = "Greens",
domain = json$Mag
)
library(leaflet.extras)
icon.pop <- pulseIcons(color = ifelse(as.numeric(json$Mag) < 4,'blue','red'),
heartbeat = ifelse(as.numeric(json$Depth) > 50 ,'2','0.7'))
icon.pop$color[which(json$Mag >= 5)] = "red"
icon.pop$color[which(json$Mag < 5)] = "blue"
json$Depth = gsub(pattern = " km",replacement = "", json$Depth)
icon.pop$heartbeat[which(as.numeric(json$Depth) >= 50)] = '1.5'
icon.pop$heartbeat[which(as.numeric(json$Depth) < 50)] = '0.7'
kata_kata = paste0("<h2>",json$Time, " pada kedalaman ", json$Depth, " km dengan M ", json$Mag, " di Wilayah ", json$Remarks, "</h2>")
html_legend <- paste0("<h2 style='font-size:1em; line-height: 0.1'>
<center><img src='map_quake_files/png/RDCA.png'> M >5  
<img src='map_quake_files/png/jadi.png' height='36px' width='30px'></center>
<h4 style='font-size:1em; color:black; text-align: center; '>
", "BBMKG IV Makassar","</h4>")
dfq = cbind(as.numeric(as.character(jsobackup$lon)),
as.numeric(as.character(jsobackup$lat)))
dfq = data.frame(dfq)
colnames(dfq) = c('lng', 'lat')
icon.pop1=list()
for(i in 1:length(icon.pop)){
icon.pop1[[i]] = icon.pop[[i]][1]
}
names(icon.pop1) = names(icon.pop)
icon.pop2=list()
for(i in 1:length(icon.pop)){
icon.pop2[[i]] = icon.pop[[i]][2:length(icon.pop[[i]])]
}
names(icon.pop2) = names(icon.pop)
# leaflet(data.frame(dfq)) %>%
# addProviderTiles(providers$CartoDB.Positron) %>%
# addWebGLHeatmap(lng=~lng, lat=~lat,size=dim(dfq)[1])
map = leaflet(data.frame(dfq)) %>%
addWebGLHeatmap(lng=~lng, lat=~lat,size=dim(dfq)[1]) %>%
addProviderTiles("Esri") %>% setView(lng = 122, lat = -1, zoom = 5) %>%
addPulseMarkers(lng = oke[2:dim(oke)[1],1], lat = oke[2:dim(oke)[1],2],
label = as.character(json$Remarks[2:dim(json)[1]]),
labelOptions = rep(labelOptions(noHide = F),nrow(jso)),
icon = icon.pop2,popup = as.character(kata_kata[2:length(kata_kata)]),
clusterOptions = markerClusterOptions()) %>%
addControl(html = html_legend, position = "bottomleft") %>%
addPulseMarkers(lng = oke[1,1], lat = oke[1,2],
label = paste0("Last event: ",as.character(json$Remarks[1])),
# labelOptions = rep(labelOptions(noHide = T),nrow(jso)),
icon = icon.pop1, popup = as.character(kata_kata[1]),
# label = output_VHR$Kecamatan,
labelOptions =
labelOptions(noHide = T,
style = list("font-weight" = "bold", padding = "3px 8px"),
textsize = "15px",
direction = "auto")
)
# library(leaflet.minicharts)
# ?leaflet.minicharts::popupArgs
library(htmlwidgets)
saveWidget(file = "map_quake.html", map, selfcontained = F)
#
#
# data("eco2mixBalance")
# bal <- eco2mixBalance
# leaflet() %>% addTiles() %>%
# addFlows(
# as.numeric(bal$lng0), as.numeric(bal$lat0), as.numeric(bal$lng1), as.numeric(bal$lat1),
# flow = bal$balance,
# time = bal$month,
# color = "navy"
# )
|
# This file contains the data preparation and estimation functions for temporal
# or cross-sectional network autocorrelation models. Written by Philip Leifeld.
# display version number and date when the package is loaded
.onAttach <- function(libname, pkgname) {
desc <- packageDescription(pkgname, libname)
packageStartupMessage(
'Package: tnam\n',
'Version: ', desc$Version, '\n',
'Date: ', desc$Date, '\n',
'Authors: Philip Leifeld (Eawag and University of Bern)\n',
' Skyler J. Cranmer (The Ohio State University)\n'
)
}
# function which aggregates data for glm analysis
tnamdata <- function(formula, center.y = FALSE) {
# parse the formula
if (class(formula) != "formula") {
stop("'formula' must be a formula object.")
}
lhs <- deparse(formula[[2]]) # name of the response variable
lhs <- eval(parse(text = lhs)) # get the actual response data
rhs <- paste0(deparse(formula[[3]]), collapse = "") # rhs of formula
rhs <- gsub("\\s+", " ", rhs) # get rid of redundant spaces
rhs <- strsplit(rhs, " \\+ ")[[1]] # parse separate formula elements
# create data frame with response variable, time, and nodes
time <- numeric()
node <- character()
response <- numeric()
if (class(lhs) == "list") {
for (i in 1:length(lhs)) {
if (!is.numeric(lhs[[i]])) {
stop(paste("The response variable should be numeric or a list of",
"numerics or a data frame with one time point per column."))
}
if (is.null(names(lhs[[i]])) || length(names(lhs[[i]])) !=
length(lhs[[i]])) {
stop(paste("The outcome variable must have node labels if multiple",
"time points are present."))
}
node <- c(node, names(lhs[[i]]))
time <- c(time, rep(i, length(lhs[[i]])))
if (center.y == TRUE) {
lhs[[i]] <- lhs[[i]] - mean(lhs[[i]], na.rm = TRUE)
}
response <- c(response, lhs[[i]])
}
} else if (class(lhs) == "data.frame") {
for (i in 1:ncol(lhs)) {
if (!is.numeric(lhs[, i])) {
stop(paste("The response variable should be numeric or a list of",
"numerics or a data frame with one time point per column."))
}
if (is.null(rownames(lhs)) || length(rownames(lhs)) != nrow(lhs)) {
stop(paste("The outcome variable must have node labels if multiple",
"time points are present."))
}
node <- c(node, rownames(lhs))
time <- c(time, rep(i, nrow(lhs)))
if (center.y == TRUE) {
lhs[, i] <- lhs[, i] - mean(lhs[, i], na.rm = TRUE)
}
response <- c(response, lhs[, i])
}
} else if (!is.numeric(lhs)) {
stop("Data type of the response variable could not be recognized.")
} else {
response <- lhs
if (center.y == TRUE) {
response <- response - mean(response, na.rm = TRUE)
}
time <- rep(1, length(lhs))
node <- as.character(1:length(lhs))
}
dat <- data.frame(response = response, time = time, node = node)
# compute results according to rhs
resultlist <- list()
for (i in 1:length(rhs)) {
result <- eval(parse(text = rhs[i]))
resultlist[[i]] <- result
}
# check compatibility of labels
for (i in 1:length(resultlist)) {
for (j in 1:length(resultlist)) {
itime <- length(unique(resultlist[[i]]$time))
jtime <- length(unique(resultlist[[j]]$time))
if ((itime > 1 || jtime > 1) && i < j) {
inters <- length(intersect(resultlist[[i]]$node, resultlist[[j]]$node))
if (inters == 0) {
stop(paste("Model terms", i, "and", j, "do not have any",
"intersecting node labels. Please attach names, row names, or",
"vertex names to the 'y' or 'networks' argument."))
}
}
}
}
# take care of the lags
for (i in 1:length(resultlist)) {
lag.i <- attributes(resultlist[[i]])$lag
if (is.null(lag.i) || length(lag.i) == 0) {
lag.i <- 0
}
resultlist[[i]]$time <- resultlist[[i]]$time + lag.i
}
# merge results with response variable and take care of lags
for (i in 1:length(resultlist)) {
dat <- merge(dat, resultlist[[i]], by = c("time", "node"), all.x = TRUE,
all.y = FALSE)
colnames(dat)[3] <- "response"
dat$node <- as.character(dat$node)
if (ncol(resultlist[[i]]) == 4) {
dat <- dat[, -ncol(dat)]
}
}
dat <- dat[, c(3, 1, 2, 4:ncol(dat))]
return(dat)
}
# temporal network autocorrelation model
tnam <- function(formula, family = gaussian, re.node = FALSE,
re.time = FALSE, time.linear = FALSE, time.quadratic = FALSE,
center.y = FALSE, na.action = na.omit, ...) {
# prepare the data frame
dat <- tnamdata(formula, center.y = center.y)
# check if GLM is appropriate
if (re.node == FALSE && re.time == FALSE && length(unique(dat$time)) > 1) {
warning(paste("Different time points are available. You might want to use",
"a mixed effects model using arguments 're.time' and/or 're.node'."))
}
# take care of the node variable: keep as random effect or remove
if (re.node == TRUE && length(unique(dat$time)) > 1) {
glmest.node <- FALSE
} else {
dat <- dat[, -3]
glmest.node <- TRUE
}
# take care of the time variable
if (time.linear == TRUE && time.quadratic == TRUE && re.time == TRUE) {
# T-T-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
dat <- dat[, -2] # remove linear effect
warning(paste("Arguments 're.time' and 'time.linear' cannot be used",
"together. Omitting the linear time effect."))
warning(paste("Arguments 're.time' and 'time.quadratic' cannot be used",
"together. Omitting the quadratic time effect."))
} else {
glmest.time <- TRUE
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == TRUE && time.quadratic == TRUE &&
re.time == FALSE) {
# T-T-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
dat$time.squared <- dat$time^2
} else {
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == TRUE && time.quadratic == FALSE &&
re.time == FALSE) {
# T-F-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
# OK; do not modify anything
} else {
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == FALSE && time.quadratic == FALSE &&
re.time == FALSE) {
# F-F-F
dat <- dat[, -2] # remove linear effect
glmest.time <- TRUE
} else if (time.linear == FALSE && time.quadratic == TRUE &&
re.time == FALSE) {
# F-T-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
dat$time.squared <- dat$time^2 # create quadratic effect
} else {
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == FALSE && time.quadratic == TRUE &&
re.time == TRUE) {
# F-T-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
message(paste("Arguments 're.time' and 'time.quadratic' cannot be used",
"together. Omitting the quadratic time effect."))
} else {
glmest.time <- TRUE
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == FALSE && time.quadratic == FALSE &&
re.time == TRUE) {
# F-F-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
} else {
glmest.time <- TRUE
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == TRUE && time.quadratic == FALSE &&
re.time == TRUE) {
# T-F-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
dat <- dat[, -2] # remove linear effect
warning(paste("Arguments 're.time' and 'time.linear' cannot be used",
"together. Omitting the linear time effect."))
} else {
glmest.time <- TRUE
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
}
if (glmest.node == FALSE || glmest.time == FALSE) {
glmest <- FALSE
} else {
glmest <- TRUE
}
# estimate!
if (glmest == TRUE) { # GLM is necessary; no random effects
model <- glm(dat, family = family, na.action = na.action, ...)
} else { # mixed-effects model (lme4) is required; random effects present
if (is.character(family)) {
family <- get(family, mode = "function", envir = parent.frame(2))
} else if (is.function(family)) {
family <- family()
}
if (isTRUE(all.equal(family, gaussian()))) { # gaussian link: use lmer
if (re.node == TRUE && re.time == TRUE) {
model <- lme4::lmer(response ~ . - re.time - node + (1|re.time) +
(1|node), data = dat, na.action = na.action, ...)
} else if (re.node == TRUE && re.time == FALSE) {
model <- lme4::lmer(response ~ . - node + (1|node), data = dat,
na.action = na.action, ...)
} else if (re.node == FALSE && re.time == TRUE) {
model <- lme4::lmer(response ~ . -re.time + (1|re.time), data = dat,
na.action = na.action, ...)
}
} else {
if (re.node == TRUE && re.time == TRUE) { # other link function: glmer
model <- lme4::glmer(response ~ . - re.time - node + (1|re.time) +
(1|node), data = dat, family = family, na.action = na.action, ...)
} else if (re.node == TRUE && re.time == FALSE) {
model <- lme4::glmer(response ~ . - node + (1|node), data = dat,
family = family, na.action = na.action, ...)
} else if (re.node == FALSE && re.time == TRUE) {
model <- lme4::glmer(response ~ . - re.time + (1|re.time), data = dat,
family = family, na.action = na.action, ...)
}
}
}
return(model)
}
|
/tnam/R/tnam.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 10,467 |
r
|
# This file contains the data preparation and estimation functions for temporal
# or cross-sectional network autocorrelation models. Written by Philip Leifeld.
# display version number and date when the package is loaded
.onAttach <- function(libname, pkgname) {
desc <- packageDescription(pkgname, libname)
packageStartupMessage(
'Package: tnam\n',
'Version: ', desc$Version, '\n',
'Date: ', desc$Date, '\n',
'Authors: Philip Leifeld (Eawag and University of Bern)\n',
' Skyler J. Cranmer (The Ohio State University)\n'
)
}
# function which aggregates data for glm analysis
tnamdata <- function(formula, center.y = FALSE) {
# parse the formula
if (class(formula) != "formula") {
stop("'formula' must be a formula object.")
}
lhs <- deparse(formula[[2]]) # name of the response variable
lhs <- eval(parse(text = lhs)) # get the actual response data
rhs <- paste0(deparse(formula[[3]]), collapse = "") # rhs of formula
rhs <- gsub("\\s+", " ", rhs) # get rid of redundant spaces
rhs <- strsplit(rhs, " \\+ ")[[1]] # parse separate formula elements
# create data frame with response variable, time, and nodes
time <- numeric()
node <- character()
response <- numeric()
if (class(lhs) == "list") {
for (i in 1:length(lhs)) {
if (!is.numeric(lhs[[i]])) {
stop(paste("The response variable should be numeric or a list of",
"numerics or a data frame with one time point per column."))
}
if (is.null(names(lhs[[i]])) || length(names(lhs[[i]])) !=
length(lhs[[i]])) {
stop(paste("The outcome variable must have node labels if multiple",
"time points are present."))
}
node <- c(node, names(lhs[[i]]))
time <- c(time, rep(i, length(lhs[[i]])))
if (center.y == TRUE) {
lhs[[i]] <- lhs[[i]] - mean(lhs[[i]], na.rm = TRUE)
}
response <- c(response, lhs[[i]])
}
} else if (class(lhs) == "data.frame") {
for (i in 1:ncol(lhs)) {
if (!is.numeric(lhs[, i])) {
stop(paste("The response variable should be numeric or a list of",
"numerics or a data frame with one time point per column."))
}
if (is.null(rownames(lhs)) || length(rownames(lhs)) != nrow(lhs)) {
stop(paste("The outcome variable must have node labels if multiple",
"time points are present."))
}
node <- c(node, rownames(lhs))
time <- c(time, rep(i, nrow(lhs)))
if (center.y == TRUE) {
lhs[, i] <- lhs[, i] - mean(lhs[, i], na.rm = TRUE)
}
response <- c(response, lhs[, i])
}
} else if (!is.numeric(lhs)) {
stop("Data type of the response variable could not be recognized.")
} else {
response <- lhs
if (center.y == TRUE) {
response <- response - mean(response, na.rm = TRUE)
}
time <- rep(1, length(lhs))
node <- as.character(1:length(lhs))
}
dat <- data.frame(response = response, time = time, node = node)
# compute results according to rhs
resultlist <- list()
for (i in 1:length(rhs)) {
result <- eval(parse(text = rhs[i]))
resultlist[[i]] <- result
}
# check compatibility of labels
for (i in 1:length(resultlist)) {
for (j in 1:length(resultlist)) {
itime <- length(unique(resultlist[[i]]$time))
jtime <- length(unique(resultlist[[j]]$time))
if ((itime > 1 || jtime > 1) && i < j) {
inters <- length(intersect(resultlist[[i]]$node, resultlist[[j]]$node))
if (inters == 0) {
stop(paste("Model terms", i, "and", j, "do not have any",
"intersecting node labels. Please attach names, row names, or",
"vertex names to the 'y' or 'networks' argument."))
}
}
}
}
# take care of the lags
for (i in 1:length(resultlist)) {
lag.i <- attributes(resultlist[[i]])$lag
if (is.null(lag.i) || length(lag.i) == 0) {
lag.i <- 0
}
resultlist[[i]]$time <- resultlist[[i]]$time + lag.i
}
# merge results with response variable and take care of lags
for (i in 1:length(resultlist)) {
dat <- merge(dat, resultlist[[i]], by = c("time", "node"), all.x = TRUE,
all.y = FALSE)
colnames(dat)[3] <- "response"
dat$node <- as.character(dat$node)
if (ncol(resultlist[[i]]) == 4) {
dat <- dat[, -ncol(dat)]
}
}
dat <- dat[, c(3, 1, 2, 4:ncol(dat))]
return(dat)
}
# temporal network autocorrelation model
tnam <- function(formula, family = gaussian, re.node = FALSE,
re.time = FALSE, time.linear = FALSE, time.quadratic = FALSE,
center.y = FALSE, na.action = na.omit, ...) {
# prepare the data frame
dat <- tnamdata(formula, center.y = center.y)
# check if GLM is appropriate
if (re.node == FALSE && re.time == FALSE && length(unique(dat$time)) > 1) {
warning(paste("Different time points are available. You might want to use",
"a mixed effects model using arguments 're.time' and/or 're.node'."))
}
# take care of the node variable: keep as random effect or remove
if (re.node == TRUE && length(unique(dat$time)) > 1) {
glmest.node <- FALSE
} else {
dat <- dat[, -3]
glmest.node <- TRUE
}
# take care of the time variable
if (time.linear == TRUE && time.quadratic == TRUE && re.time == TRUE) {
# T-T-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
dat <- dat[, -2] # remove linear effect
warning(paste("Arguments 're.time' and 'time.linear' cannot be used",
"together. Omitting the linear time effect."))
warning(paste("Arguments 're.time' and 'time.quadratic' cannot be used",
"together. Omitting the quadratic time effect."))
} else {
glmest.time <- TRUE
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == TRUE && time.quadratic == TRUE &&
re.time == FALSE) {
# T-T-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
dat$time.squared <- dat$time^2
} else {
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == TRUE && time.quadratic == FALSE &&
re.time == FALSE) {
# T-F-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
# OK; do not modify anything
} else {
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
} else if (time.linear == FALSE && time.quadratic == FALSE &&
re.time == FALSE) {
# F-F-F
dat <- dat[, -2] # remove linear effect
glmest.time <- TRUE
} else if (time.linear == FALSE && time.quadratic == TRUE &&
re.time == FALSE) {
# F-T-F
glmest.time <- TRUE
if (length(unique(dat$time)) > 1) {
dat$time.squared <- dat$time^2 # create quadratic effect
} else {
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == FALSE && time.quadratic == TRUE &&
re.time == TRUE) {
# F-T-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
message(paste("Arguments 're.time' and 'time.quadratic' cannot be used",
"together. Omitting the quadratic time effect."))
} else {
glmest.time <- TRUE
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == FALSE && time.quadratic == FALSE &&
re.time == TRUE) {
# F-F-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
} else {
glmest.time <- TRUE
message("Time effects are ignored because only one time step is present.")
}
dat <- dat[, -2] # remove linear effect
} else if (time.linear == TRUE && time.quadratic == FALSE &&
re.time == TRUE) {
# T-F-T
if (length(unique(dat$time)) > 1) {
glmest.time <- FALSE
dat$re.time <- dat$time # add RE
dat <- dat[, -2] # remove linear effect
warning(paste("Arguments 're.time' and 'time.linear' cannot be used",
"together. Omitting the linear time effect."))
} else {
glmest.time <- TRUE
dat <- dat[, -2] # remove linear effect
message("Time effects are ignored because only one time step is present.")
}
}
if (glmest.node == FALSE || glmest.time == FALSE) {
glmest <- FALSE
} else {
glmest <- TRUE
}
# estimate!
if (glmest == TRUE) { # GLM is necessary; no random effects
model <- glm(dat, family = family, na.action = na.action, ...)
} else { # mixed-effects model (lme4) is required; random effects present
if (is.character(family)) {
family <- get(family, mode = "function", envir = parent.frame(2))
} else if (is.function(family)) {
family <- family()
}
if (isTRUE(all.equal(family, gaussian()))) { # gaussian link: use lmer
if (re.node == TRUE && re.time == TRUE) {
model <- lme4::lmer(response ~ . - re.time - node + (1|re.time) +
(1|node), data = dat, na.action = na.action, ...)
} else if (re.node == TRUE && re.time == FALSE) {
model <- lme4::lmer(response ~ . - node + (1|node), data = dat,
na.action = na.action, ...)
} else if (re.node == FALSE && re.time == TRUE) {
model <- lme4::lmer(response ~ . -re.time + (1|re.time), data = dat,
na.action = na.action, ...)
}
} else {
if (re.node == TRUE && re.time == TRUE) { # other link function: glmer
model <- lme4::glmer(response ~ . - re.time - node + (1|re.time) +
(1|node), data = dat, family = family, na.action = na.action, ...)
} else if (re.node == TRUE && re.time == FALSE) {
model <- lme4::glmer(response ~ . - node + (1|node), data = dat,
family = family, na.action = na.action, ...)
} else if (re.node == FALSE && re.time == TRUE) {
model <- lme4::glmer(response ~ . - re.time + (1|re.time), data = dat,
family = family, na.action = na.action, ...)
}
}
}
return(model)
}
|
allH3K27ac <- as.matrix(read.table("allEncodeDnasePeaks.H3K27ac_only.sorted.merged.distal.H3K27ac_percentiles.numbers.for_clustering.txt",sep="\t", header=TRUE, row.names=NULL))
#allH3K27ac <- as.data.frame(read.table("test.txt",sep="\t", header=TRUE, row.names=NULL))
cat("read file\n")
kmeans_results <- kmeans(allH3K27ac, centers=5)
m <- cbind(allH3K27ac, kmeans_results$cluster)
write.table(m, file="allEncodeDnasePeaks.H3K27ac_only.sorted.merged.distal.clusters.txt", sep="\t", quote=FALSE, row.names=FALSE, col.names=TRUE)
|
/clusterH3K27acProfiles.R
|
no_license
|
sowmyaiyer/encode_elements
|
R
| false | false | 530 |
r
|
allH3K27ac <- as.matrix(read.table("allEncodeDnasePeaks.H3K27ac_only.sorted.merged.distal.H3K27ac_percentiles.numbers.for_clustering.txt",sep="\t", header=TRUE, row.names=NULL))
#allH3K27ac <- as.data.frame(read.table("test.txt",sep="\t", header=TRUE, row.names=NULL))
cat("read file\n")
kmeans_results <- kmeans(allH3K27ac, centers=5)
m <- cbind(allH3K27ac, kmeans_results$cluster)
write.table(m, file="allEncodeDnasePeaks.H3K27ac_only.sorted.merged.distal.clusters.txt", sep="\t", quote=FALSE, row.names=FALSE, col.names=TRUE)
|
library(shiny)
ui <- fluidPage(
h2(textOutput("txt")),
verbatimTextOutput("sum1")
)
server <- function(input, output, session) {
output$txt = renderText({
"mtcars를 사용한 회귀분석"
})
output$sum1 = renderPrint({
summary(lm(mpg~wt + qsec, data=mtcars))
})
}
shinyApp(ui, server)
|
/s1~s10/s_test10.R
|
no_license
|
bjh1646/R_data
|
R
| false | false | 312 |
r
|
library(shiny)
ui <- fluidPage(
h2(textOutput("txt")),
verbatimTextOutput("sum1")
)
server <- function(input, output, session) {
output$txt = renderText({
"mtcars를 사용한 회귀분석"
})
output$sum1 = renderPrint({
summary(lm(mpg~wt + qsec, data=mtcars))
})
}
shinyApp(ui, server)
|
# make sigma (gridded prior uncertainty) vector for domain
# author: Lewis Kunik
## prerequisite scripts:
## none (but prior_uncert.nc must be present in the include/ directory)
##
## output files:
## sigma.rds - file containing a vector of length (#cells * #times)
## with values of prior uncertainty for every timestep
# load package dependencies
library(ncdf4)
# run dependent scripts
source("config.r")
# ~~~~~~~~~~~~~~~~~~~~~~~ create time bins ~~~~~~~~~~~~~~~~~~~~~~~#
# establish the markers for the time-bin cutoffs (includes an end-timestamp after
# the last flux hour for the purpose of the cut() function)
time_bins <- seq(from = flux_start_POSIX, to = flux_end_POSIX, by = flux_t_res) #flux_t_res defined in config.r
# ~~~~~~~~~~~~~~~~~~~~~~~ load in prior uncertainty file ~~~~~~~~~~~~~~~~~~~~~~~#
print("Loading prior uncertainty file")
# load ncdf file from outer filepath
nc_sigma <- nc_open(sigma_file)
# get lon, lat, time, and flux from the netcdf file
nc_lat <- round(ncvar_get(nc_sigma, "lat"), round_digs)
nc_lon <- round(ncvar_get(nc_sigma, "lon"), round_digs)
nc_time <- ncvar_get(nc_sigma, "time")
nc_uncert <- ncvar_get(nc_sigma, "uncertainty")
nc_close(nc_sigma)
# convert time field from seconds-since-epoch to POSIX time
class(nc_time) <- c("POSIXt", "POSIXct")
attributes(nc_time)$tzone <- "UTC"
# NOTE: uncertainty vals are intended to have dimensions (lon x lat x time)
# get the number of lon/lat in the uncertainty array
nlon <- length(nc_lon)
nlat <- length(nc_lat)
ntime <- length(nc_time)
# in case the time field has length = 1
if(ntime == 1)
nc_uncert <- array(nc_uncert, dim = c(nlon, nlat, ntime))
# ~~~~~~~~~~~~~~ obtain avg uncertainties for each timestep ~~~~~~~~~~~~~~ #
# break up emiss times into time bins corresponding to inversion flux times
times_cut_all <- as.POSIXct(cut(nc_time, breaks = time_bins), tz = "UTC")
# filter for NA values
if (any(is.na(times_cut_all))) {
times_cut <- times_cut_all[-(which(is.na(times_cut_all)))] #remove NA values
} else {
times_cut <- times_cut_all #keep as is
}
# this is the number of timesteps covered by the prior uncertainty
nbins <- length(unique(times_cut))
# Average the uncertainties to obtain mean vals for each time bin
uncert_bins <- array(0, dim = c(nlon, nlat, nbins))
for (ii in 1:nbins) {
ibin <- which(times_cut_all == unique(times_cut)[ii]) #get indices of timesteps corresponding to this bin
uncert_arr <- array(nc_uncert[, , ibin], dim = c(nlon, nlat, length(ibin))) #this is needed in case t_res is hourly
uncert_bins[, , ii] <- apply(uncert_arr, FUN = mean, MARGIN = c(1, 2)) #aggregate over bin's timesteps
}
# ~~~~~~~~~~~~~~ mask grid to include only domain cells ~~~~~~~~~~~~~~ #
# get lonlat pairs for the sigma file
lonlat_sigma <- expand.grid(nc_lon, nc_lat)
# load in lonlat_domain file specified in config.r
lonlat_domain <- readRDS(lonlat_domain_file)
# note which grid cells are in the domain
iDomain <- apply(lonlat_domain, FUN = function(x) which((lonlat_sigma[, 1] == x[1]) &
(lonlat_sigma[, 2] == x[2])), MARGIN = 1)
# subset for the domain cells
sigma_mat <- apply(uncert_bins, FUN = function(x) x[iDomain], MARGIN = 3)
# convert to vector format
sigma <- as.vector(sigma_mat)
# ~~~~~~~~~~~~~~~~~~~~ Save prior uncertainty file ~~~~~~~~~~~~~~~~~~~~~~~#
print("Saving formatted prior emission uncertainty to sigma file")
filepath <- paste0(out_path, "sigma.rds")
saveRDS(sigma, filepath)
|
/inversion/src/make_sigma.r
|
no_license
|
lkunik/bayesian-inversion-R
|
R
| false | false | 3,476 |
r
|
# make sigma (gridded prior uncertainty) vector for domain
# author: Lewis Kunik
## prerequisite scripts:
## none (but prior_uncert.nc must be present in the include/ directory)
##
## output files:
## sigma.rds - file containing a vector of length (#cells * #times)
## with values of prior uncertainty for every timestep
# load package dependencies
library(ncdf4)
# run dependent scripts
source("config.r")
# ~~~~~~~~~~~~~~~~~~~~~~~ create time bins ~~~~~~~~~~~~~~~~~~~~~~~#
# establish the markers for the time-bin cutoffs (includes an end-timestamp after
# the last flux hour for the purpose of the cut() function)
time_bins <- seq(from = flux_start_POSIX, to = flux_end_POSIX, by = flux_t_res) #flux_t_res defined in config.r
# ~~~~~~~~~~~~~~~~~~~~~~~ load in prior uncertainty file ~~~~~~~~~~~~~~~~~~~~~~~#
print("Loading prior uncertainty file")
# load ncdf file from outer filepath
nc_sigma <- nc_open(sigma_file)
# get lon, lat, time, and flux from the netcdf file
nc_lat <- round(ncvar_get(nc_sigma, "lat"), round_digs)
nc_lon <- round(ncvar_get(nc_sigma, "lon"), round_digs)
nc_time <- ncvar_get(nc_sigma, "time")
nc_uncert <- ncvar_get(nc_sigma, "uncertainty")
nc_close(nc_sigma)
# convert time field from seconds-since-epoch to POSIX time
class(nc_time) <- c("POSIXt", "POSIXct")
attributes(nc_time)$tzone <- "UTC"
# NOTE: uncertainty vals are intended to have dimensions (lon x lat x time)
# get the number of lon/lat in the uncertainty array
nlon <- length(nc_lon)
nlat <- length(nc_lat)
ntime <- length(nc_time)
# in case the time field has length = 1
if(ntime == 1)
nc_uncert <- array(nc_uncert, dim = c(nlon, nlat, ntime))
# ~~~~~~~~~~~~~~ obtain avg uncertainties for each timestep ~~~~~~~~~~~~~~ #
# break up emiss times into time bins corresponding to inversion flux times
times_cut_all <- as.POSIXct(cut(nc_time, breaks = time_bins), tz = "UTC")
# filter for NA values
if (any(is.na(times_cut_all))) {
times_cut <- times_cut_all[-(which(is.na(times_cut_all)))] #remove NA values
} else {
times_cut <- times_cut_all #keep as is
}
# this is the number of timesteps covered by the prior uncertainty
nbins <- length(unique(times_cut))
# Average the uncertainties to obtain mean vals for each time bin
uncert_bins <- array(0, dim = c(nlon, nlat, nbins))
for (ii in 1:nbins) {
ibin <- which(times_cut_all == unique(times_cut)[ii]) #get indices of timesteps corresponding to this bin
uncert_arr <- array(nc_uncert[, , ibin], dim = c(nlon, nlat, length(ibin))) #this is needed in case t_res is hourly
uncert_bins[, , ii] <- apply(uncert_arr, FUN = mean, MARGIN = c(1, 2)) #aggregate over bin's timesteps
}
# ~~~~~~~~~~~~~~ mask grid to include only domain cells ~~~~~~~~~~~~~~ #
# get lonlat pairs for the sigma file
lonlat_sigma <- expand.grid(nc_lon, nc_lat)
# load in lonlat_domain file specified in config.r
lonlat_domain <- readRDS(lonlat_domain_file)
# note which grid cells are in the domain
iDomain <- apply(lonlat_domain, FUN = function(x) which((lonlat_sigma[, 1] == x[1]) &
(lonlat_sigma[, 2] == x[2])), MARGIN = 1)
# subset for the domain cells
sigma_mat <- apply(uncert_bins, FUN = function(x) x[iDomain], MARGIN = 3)
# convert to vector format
sigma <- as.vector(sigma_mat)
# ~~~~~~~~~~~~~~~~~~~~ Save prior uncertainty file ~~~~~~~~~~~~~~~~~~~~~~~#
print("Saving formatted prior emission uncertainty to sigma file")
filepath <- paste0(out_path, "sigma.rds")
saveRDS(sigma, filepath)
|
X_train <- read.table("../UCI HAR Dataset/train/X_test.txt")
y_train <- read.table("../UCI HAR Dataset/train/y_train.txt")
X_test <- read.table("../UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("../UCI HAR Dataset/test/y_test.txt")
features <- read.table("../UCI HAR Dataset/features.txt")
subject_train <- read.table("../UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("../UCI HAR Dataset/test/subject_test.txt")
activity_labels <-read.table("../UCI HAR Dataset/activity_labels.txt")
subject <- rbind(subject_train,subject_test)
X_tot <- rbind(X_train,X_tot)
## Use the given feature names as our column headers
colnames(X_tot) <- features[,2]
## Stick the training and testing labels together
y_tot <- rbind(y_train,y_tot)
## Stick the labels and the measurements together
tot <- cbind(y_tot,X_tot)
## Stick the measurement + labels together with the subjects
df <- cbind(subject,tot)
## Use the given activity labels on our measurements
df[,2] <- factor(df[,2],levels=1:6,labels=activity_labels[,2])
## Include nice labels for the first two columns as well
colnames(df)[1] <- "Subject"
colnames(df)[2] <- "Label"
## Only mean and std measurements:
df[,c("tBodyAcc-mean()-X","tBodyAcc-mean()-Y","tBodyAcc-mean()-Z","tBodyAcc-std()-X","tBodyAcc-std()-Y","tBodyAcc-std()-Z")]
df_tidy <- ddply(df, .(Subject,Label), colwise(mean))
## Write our new tidy data file to a file
write.data(df_tidy,"tidy.txt")
|
/run_analysis.R
|
no_license
|
jessems/gettingdata
|
R
| false | false | 1,444 |
r
|
X_train <- read.table("../UCI HAR Dataset/train/X_test.txt")
y_train <- read.table("../UCI HAR Dataset/train/y_train.txt")
X_test <- read.table("../UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("../UCI HAR Dataset/test/y_test.txt")
features <- read.table("../UCI HAR Dataset/features.txt")
subject_train <- read.table("../UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.table("../UCI HAR Dataset/test/subject_test.txt")
activity_labels <-read.table("../UCI HAR Dataset/activity_labels.txt")
subject <- rbind(subject_train,subject_test)
X_tot <- rbind(X_train,X_tot)
## Use the given feature names as our column headers
colnames(X_tot) <- features[,2]
## Stick the training and testing labels together
y_tot <- rbind(y_train,y_tot)
## Stick the labels and the measurements together
tot <- cbind(y_tot,X_tot)
## Stick the measurement + labels together with the subjects
df <- cbind(subject,tot)
## Use the given activity labels on our measurements
df[,2] <- factor(df[,2],levels=1:6,labels=activity_labels[,2])
## Include nice labels for the first two columns as well
colnames(df)[1] <- "Subject"
colnames(df)[2] <- "Label"
## Only mean and std measurements:
df[,c("tBodyAcc-mean()-X","tBodyAcc-mean()-Y","tBodyAcc-mean()-Z","tBodyAcc-std()-X","tBodyAcc-std()-Y","tBodyAcc-std()-Z")]
df_tidy <- ddply(df, .(Subject,Label), colwise(mean))
## Write our new tidy data file to a file
write.data(df_tidy,"tidy.txt")
|
#
# Predict salary (Pounds) based on job descriptions from online postings
# Simple model using 200K training set results in MAE of 13,010
# Increasing training set made MAE bigger but this is probably overfitting with small sets
#
# Variables:
# Id, numeric counter
# Title - job description, often including location
# FullDescription - detailed job description
# LocationRaw -
# LocationNormalized - city
# ContractType - full_time, part_time, NA (almost always full-time according to boxplots)
# ContractTime - permanent, contract, NULL (boxplots very different)
# Company - recruiting company or actual?
# Category - job category, e.g. engineering **
# SalaryRaw - salary, often as range (pounds)
# SalaryNormalized - salaray as actual number (pounds)
# SourceName - URL (of job posting?)
require('plyr')
require('DAAG')
require('ggplot2')
require('tm')
mae <- function(x,y) {
if (length(x) != length(y) ) {
print("Vectors not equal. Exiting ...")
}
mean(abs((x - y)))
}
mse <- function(x , y) {
if (length(x) != length(y) ) {
print("Vectors not equal. Exiting ...")
}
mean( (x-y)^2 )
}
# --------------------------------------------------------------------------------------------------------
# Partition data into folds & run a set of linear regression models with lm
# Returns a list of k-fold average MAE for each model
# --------------------------------------------------------------------------------------------------------
model.nfold <- function(n, data, locations=NULL) {
#
err.rates <- data.frame() # initialize results object
for (k in 1:n) {
#
# Split the data into training (train.pct %) and test (1 - train.pct %) data
#
if (!missing(locations)) {
top_20cities <- sort(table(data$City),decreasing=TRUE)[1:20]
locations$top20_flag <- (locations$city1 %in% top_20cities)
locations_short <- locations[locations$top20_flag,]
data <- merge(data,locations_short,by.x = "City", by.y="city1")
data$city <- as.numeric(as.factor(data$City))
data$city2 <- as.numeric(as.factor(data$city2))
print(head(locations$top20_flag))
}
train.index <- sample(1:N, train.pct * N) # Create random sample of records (training set)
train.data <- data[train.index, ] # Split into train/test
test.data <- data[-train.index, ] # note use of neg index...different than Python!
#
# Perform fit for various values of k
#
lm1 <- lm(Salary ~ 1 , data=train.data) #
lm2 <- lm(Salary ~ city + category + ctime + ctype + company, data=train.data)
lm3 <- lm(Salary ~ title + city + category + ctime + ctype + company, data=train.data)
lm4 <- lm(Salary ~ title + city + category + ctime + ctype + company + city:category,
data=train.data)
lm5 <- lm(Salary ~ title + city + category + ctime + ctype + company + city:category + company:category,
data=train.data)
lm6 <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category,
data=train.data)
list_of_models <- list(lm1,lm2,lm3,lm4,lm5,lm6)
for (m in 1:6) {
salary.lm <- list_of_models[[m]]
if (!missing(locations)) {
salary.lm <- lm(Salary ~ title + city + city2 + category + ctime + ctype + company,
data=train.data) #
}
# Calculate an error metric for this fold k and add to err.rates dataframe
test.predict <- predict(salary.lm, test.data)
this.error <- mae(test.data$Salary, test.predict)
err.rates <- rbind(err.rates, cbind(m,this.error))
}
}
return(err.rates)
}
# --------------------------------------------------------------------------------------------------------
# Partition data into n-folds & run linear regression with lm
# - returns
# --------------------------------------------------------------------------------------------------------
get_dtm <- function(data) {
src <- DataframeSource(data.frame(data$Title)) # Using just Title for simplicity
c <- Corpus(src)
c <- tm_map(c, stripWhitespace)
c <- tm_map(c, tolower)
c <- tm_map(c, removeWords, stopwords("english"))
# Create a dtm where each "document" is the rowid
dtm <- DocumentTermMatrix(c)
}
textmodel.nfold <- function(n, data, locations=NULL) {
# Create a dtm where each "document" is the rowid and look at the Title field
dtm <- get_dtm(data)
# Each of these words becomes a column containing a binary flag
# This list came from a perusal of titles for each quantile
data <- cbind(data, as.matrix(dtm[, job_titles]))
err.rates <- data.frame() # initialize results object
for (k in 1:n) {
# Split the data into training (train.pct %) and test (1 - train.pct %) data
train.index <- sample(1:N, train.pct * N) # Create random sample of records (training set)
train.data <- data[train.index, ] # Split into train/test
test.data <- data[-train.index, ] # note use of neg index...different than Python!
# Could use paste() and as.formula() here
salary.lm <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category +
teacher + chef + support + assistant+ administrator + operator + technician +
analyst + consultant + engineer + engineering + trainee + care + manager + nurse + developer,
data=train.data)
# Calculate an error metric for this fold k and add to err.rates dataframe
test.predict <- predict(salary.lm, test.data)
this.error <- mae(test.data$Salary, test.predict)
err.rates <- rbind(err.rates, this.error)
}
return(err.rates)
}
# --------------------------------------------------------------------------------------------------------
# Partition data into n-folds & run linear regression with lm
# - returns
# --------------------------------------------------------------------------------------------------------
setwd("/Volumes/DATA/robert/Desktop/Projects/GA/salary")
train_all <- read.csv('data/train.csv')
test <- read.csv('data/test.csv')
loc_tree <- read.csv('data/Location_Tree2.csv',header=FALSE,
col.names=c('uk','country','city1','city2','city3','city4','city5' ))
job_titles <- c('teacher','chef','support','assistant','administrator',
'operator','technician','analyst', 'consultant',
'engineer','engineering', 'trainee',
'care', 'manager','nurse','developer')
N <- nrow(train_all) # size of data
train.pct <- .7 # Use 70% of our data as a training set
# Rename some columns to make life easier
train_all <- rename(train_all, c("LocationNormalized" = "City"))
train_all <- rename(train_all, c("SalaryNormalized" = "Salary"))
test <- rename(test, c("LocationNormalized" = "City"))
# Code the categories by taking levels - R does it automatically but it's a lot faster to code
train_all$title <- as.numeric(as.factor(train_all$Title))
train_all$city <- as.numeric(as.factor(train_all$City))
train_all$category <- as.numeric(as.factor(train_all$Category))
train_all$ctime <- as.numeric(as.factor(train_all$ContractTime))
train_all$ctype <- as.numeric(as.factor(train_all$ContractType))
train_all$company <- as.numeric(as.factor(train_all$Company))
test$title <- as.numeric(as.factor(test$Title))
test$city <- as.numeric(as.factor(test$City))
test$category <- as.numeric(as.factor(test$Category))
test$ctime <- as.numeric(as.factor(test$ContractTime))
test$ctype <- as.numeric(as.factor(test$ContractType))
test$company <- as.numeric(as.factor(test$Company))
#
# Make some initial plots to
#
#ggplot(data=train_all,aes(x=company,y=Salary)) + geom_point() # some variability
#ggplot(data=train_all,aes(x=category,y=Salary)) + geom_point() # ***
#ggplot(data=train_all,aes(x=ctype,y=Salary)) + geom_point()
#ggplot(data=train_all,aes(x=city,y=Salary)) + geom_point()
#
#ggplot(data=train_all,aes(x=ctime,y=Salary)) + geom_point() # minor
#ggplot(data=train_all,aes(x=category,y=company,color=Salary)) + geom_point() + geom_jitter()
# --------------------------------------------------------------------------------------------------------
# Run the linear model
# with and without the location tree
#
# Run using the tm package to parse some text-heavy columns to create additional columns to use in the model
# - With anything bigger than the smallest training set, takes a long time on desktop and laptop
#
# Generally model 5 works best. Adding interaction terms in model 6 makes things slightly worse
# --------------------------------------------------------------------------------------------------------
print("Evaluating linear models with MAE, 10 fold CV")
errors <- model.nfold(10,train_all)
model_errors <- aggregate(errors, by=errors['m'], FUN=mean)
print(model_errors)
#print("Evaluating linear models and location tree with MAE, 10 fold CV")
#errors <- model.nfold(2,train_all,loc_tree)
#model_errors <- aggregate(errors, by=errors['m'], FUN=mean)
#print(model_errors)
print("Evaluating with some key job titles, 10 fold CV")
errors <- textmodel.nfold(10,train_all[1:5000,])
print(colMeans(errors))
# Finally, we need to work with the actual test data
# We should train our final model with all the training data #train_all
train_dtm <- get_dtm(train_all)
test_dtm <- get_dtm(test)
train_all <- cbind(train_all, as.matrix(train_dtm[,job_titles]))
test <- cbind(test, as.matrix(test_dtm[, job_titles]))
print("final model")
finalmodel <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category +
teacher + chef+ support+ assistant+
administrator + operator + technician + analyst + consultant + engineer + engineering + trainee +
care + manager + nurse + developer,
data=train_all)
print("predictions")
predictions <- predict(finalmodel, test)
# What are these predictions going to be?
# Put the submission together and write it to a file
print("writing submission file")
submission <- data.frame(Id=test$Id, Salary=predictions)
write.csv(submission, "my_submission.csv", row.names=FALSE)
print("done...")
|
/salary/kaggle.R
|
no_license
|
rmauriello/GA
|
R
| false | false | 10,584 |
r
|
#
# Predict salary (Pounds) based on job descriptions from online postings
# Simple model using 200K training set results in MAE of 13,010
# Increasing training set made MAE bigger but this is probably overfitting with small sets
#
# Variables:
# Id, numeric counter
# Title - job description, often including location
# FullDescription - detailed job description
# LocationRaw -
# LocationNormalized - city
# ContractType - full_time, part_time, NA (almost always full-time according to boxplots)
# ContractTime - permanent, contract, NULL (boxplots very different)
# Company - recruiting company or actual?
# Category - job category, e.g. engineering **
# SalaryRaw - salary, often as range (pounds)
# SalaryNormalized - salaray as actual number (pounds)
# SourceName - URL (of job posting?)
require('plyr')
require('DAAG')
require('ggplot2')
require('tm')
mae <- function(x,y) {
if (length(x) != length(y) ) {
print("Vectors not equal. Exiting ...")
}
mean(abs((x - y)))
}
mse <- function(x , y) {
if (length(x) != length(y) ) {
print("Vectors not equal. Exiting ...")
}
mean( (x-y)^2 )
}
# --------------------------------------------------------------------------------------------------------
# Partition data into folds & run a set of linear regression models with lm
# Returns a list of k-fold average MAE for each model
# --------------------------------------------------------------------------------------------------------
model.nfold <- function(n, data, locations=NULL) {
#
err.rates <- data.frame() # initialize results object
for (k in 1:n) {
#
# Split the data into training (train.pct %) and test (1 - train.pct %) data
#
if (!missing(locations)) {
top_20cities <- sort(table(data$City),decreasing=TRUE)[1:20]
locations$top20_flag <- (locations$city1 %in% top_20cities)
locations_short <- locations[locations$top20_flag,]
data <- merge(data,locations_short,by.x = "City", by.y="city1")
data$city <- as.numeric(as.factor(data$City))
data$city2 <- as.numeric(as.factor(data$city2))
print(head(locations$top20_flag))
}
train.index <- sample(1:N, train.pct * N) # Create random sample of records (training set)
train.data <- data[train.index, ] # Split into train/test
test.data <- data[-train.index, ] # note use of neg index...different than Python!
#
# Perform fit for various values of k
#
lm1 <- lm(Salary ~ 1 , data=train.data) #
lm2 <- lm(Salary ~ city + category + ctime + ctype + company, data=train.data)
lm3 <- lm(Salary ~ title + city + category + ctime + ctype + company, data=train.data)
lm4 <- lm(Salary ~ title + city + category + ctime + ctype + company + city:category,
data=train.data)
lm5 <- lm(Salary ~ title + city + category + ctime + ctype + company + city:category + company:category,
data=train.data)
lm6 <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category,
data=train.data)
list_of_models <- list(lm1,lm2,lm3,lm4,lm5,lm6)
for (m in 1:6) {
salary.lm <- list_of_models[[m]]
if (!missing(locations)) {
salary.lm <- lm(Salary ~ title + city + city2 + category + ctime + ctype + company,
data=train.data) #
}
# Calculate an error metric for this fold k and add to err.rates dataframe
test.predict <- predict(salary.lm, test.data)
this.error <- mae(test.data$Salary, test.predict)
err.rates <- rbind(err.rates, cbind(m,this.error))
}
}
return(err.rates)
}
# --------------------------------------------------------------------------------------------------------
# Partition data into n-folds & run linear regression with lm
# - returns
# --------------------------------------------------------------------------------------------------------
get_dtm <- function(data) {
src <- DataframeSource(data.frame(data$Title)) # Using just Title for simplicity
c <- Corpus(src)
c <- tm_map(c, stripWhitespace)
c <- tm_map(c, tolower)
c <- tm_map(c, removeWords, stopwords("english"))
# Create a dtm where each "document" is the rowid
dtm <- DocumentTermMatrix(c)
}
textmodel.nfold <- function(n, data, locations=NULL) {
# Create a dtm where each "document" is the rowid and look at the Title field
dtm <- get_dtm(data)
# Each of these words becomes a column containing a binary flag
# This list came from a perusal of titles for each quantile
data <- cbind(data, as.matrix(dtm[, job_titles]))
err.rates <- data.frame() # initialize results object
for (k in 1:n) {
# Split the data into training (train.pct %) and test (1 - train.pct %) data
train.index <- sample(1:N, train.pct * N) # Create random sample of records (training set)
train.data <- data[train.index, ] # Split into train/test
test.data <- data[-train.index, ] # note use of neg index...different than Python!
# Could use paste() and as.formula() here
salary.lm <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category +
teacher + chef + support + assistant+ administrator + operator + technician +
analyst + consultant + engineer + engineering + trainee + care + manager + nurse + developer,
data=train.data)
# Calculate an error metric for this fold k and add to err.rates dataframe
test.predict <- predict(salary.lm, test.data)
this.error <- mae(test.data$Salary, test.predict)
err.rates <- rbind(err.rates, this.error)
}
return(err.rates)
}
# --------------------------------------------------------------------------------------------------------
# Partition data into n-folds & run linear regression with lm
# - returns
# --------------------------------------------------------------------------------------------------------
setwd("/Volumes/DATA/robert/Desktop/Projects/GA/salary")
train_all <- read.csv('data/train.csv')
test <- read.csv('data/test.csv')
loc_tree <- read.csv('data/Location_Tree2.csv',header=FALSE,
col.names=c('uk','country','city1','city2','city3','city4','city5' ))
job_titles <- c('teacher','chef','support','assistant','administrator',
'operator','technician','analyst', 'consultant',
'engineer','engineering', 'trainee',
'care', 'manager','nurse','developer')
N <- nrow(train_all) # size of data
train.pct <- .7 # Use 70% of our data as a training set
# Rename some columns to make life easier
train_all <- rename(train_all, c("LocationNormalized" = "City"))
train_all <- rename(train_all, c("SalaryNormalized" = "Salary"))
test <- rename(test, c("LocationNormalized" = "City"))
# Code the categories by taking levels - R does it automatically but it's a lot faster to code
train_all$title <- as.numeric(as.factor(train_all$Title))
train_all$city <- as.numeric(as.factor(train_all$City))
train_all$category <- as.numeric(as.factor(train_all$Category))
train_all$ctime <- as.numeric(as.factor(train_all$ContractTime))
train_all$ctype <- as.numeric(as.factor(train_all$ContractType))
train_all$company <- as.numeric(as.factor(train_all$Company))
test$title <- as.numeric(as.factor(test$Title))
test$city <- as.numeric(as.factor(test$City))
test$category <- as.numeric(as.factor(test$Category))
test$ctime <- as.numeric(as.factor(test$ContractTime))
test$ctype <- as.numeric(as.factor(test$ContractType))
test$company <- as.numeric(as.factor(test$Company))
#
# Make some initial plots to
#
#ggplot(data=train_all,aes(x=company,y=Salary)) + geom_point() # some variability
#ggplot(data=train_all,aes(x=category,y=Salary)) + geom_point() # ***
#ggplot(data=train_all,aes(x=ctype,y=Salary)) + geom_point()
#ggplot(data=train_all,aes(x=city,y=Salary)) + geom_point()
#
#ggplot(data=train_all,aes(x=ctime,y=Salary)) + geom_point() # minor
#ggplot(data=train_all,aes(x=category,y=company,color=Salary)) + geom_point() + geom_jitter()
# --------------------------------------------------------------------------------------------------------
# Run the linear model
# with and without the location tree
#
# Run using the tm package to parse some text-heavy columns to create additional columns to use in the model
# - With anything bigger than the smallest training set, takes a long time on desktop and laptop
#
# Generally model 5 works best. Adding interaction terms in model 6 makes things slightly worse
# --------------------------------------------------------------------------------------------------------
print("Evaluating linear models with MAE, 10 fold CV")
errors <- model.nfold(10,train_all)
model_errors <- aggregate(errors, by=errors['m'], FUN=mean)
print(model_errors)
#print("Evaluating linear models and location tree with MAE, 10 fold CV")
#errors <- model.nfold(2,train_all,loc_tree)
#model_errors <- aggregate(errors, by=errors['m'], FUN=mean)
#print(model_errors)
print("Evaluating with some key job titles, 10 fold CV")
errors <- textmodel.nfold(10,train_all[1:5000,])
print(colMeans(errors))
# Finally, we need to work with the actual test data
# We should train our final model with all the training data #train_all
train_dtm <- get_dtm(train_all)
test_dtm <- get_dtm(test)
train_all <- cbind(train_all, as.matrix(train_dtm[,job_titles]))
test <- cbind(test, as.matrix(test_dtm[, job_titles]))
print("final model")
finalmodel <- lm(Salary ~ title + city + category + ctime + ctype + company + company:category +
teacher + chef+ support+ assistant+
administrator + operator + technician + analyst + consultant + engineer + engineering + trainee +
care + manager + nurse + developer,
data=train_all)
print("predictions")
predictions <- predict(finalmodel, test)
# What are these predictions going to be?
# Put the submission together and write it to a file
print("writing submission file")
submission <- data.frame(Id=test$Id, Salary=predictions)
write.csv(submission, "my_submission.csv", row.names=FALSE)
print("done...")
|
# Simulation parameters (Vector)
sim_params <- list(timestep = 0.1,
endTime = 200)
# Model Parameters (List)
model_params <- list(vols = c(vol = 5e-15),
init_conc = c(W_I = 40,
W_B = 0,
W_P = 0,
W_T = 0,
W_A = 0),
params = c(totalC = 40))
# Create calcium input signal (unit: concentration nmol/l):
# increase Ca from 50 to 600 at 100s, hold for 40s, then drop to 50 again
# (from Dupont_camkii.cps)
x <- seq(0, 400, 1)
y <- append(rep(0, 99), rep(600, 41))
y <- append(y, rep(50, 261))
input_df <- data.frame("time" = x, "Ca" = y)
# Sine(baseline, amp, period, phase, duration, resolution)
#input_df <- as.data.frame(OscillatorGenerator::Sine(0, 350, 20, 5, 200, 0.01))
colnames(input_df) <- c("time", "Ca")
start.time <- as.numeric(Sys.time())*1000
# Simulate model
output <- detSim_camkii(input_df, sim_params, model_params)
end.time <- as.numeric(Sys.time())*1000
time.taken <- end.time - start.time
cat(time.taken)
# Plot output
par(mar = c(5,5,2,5))
colnames(output) <- c("time", "calcium", "W_I", "W_B", "W_P", "W_T", "W_A")
plot(output$time, output$calcium, col="blue", xlim = c(90,160), ylim = c(0,55), type="l", xlab="time [s]", ylab="CamKII [nmol/l]")
lines(output$time, output$W_I, col="black", type = "l")
lines(output$time, output$W_B, col="red", type="l")
lines(output$time, output$W_P, col="green", type="l")
lines(output$time, output$W_T, col="cyan", type="l")
lines(output$time, output$W_A, col="orange", type="l")
axis(side = 4)
mtext(side = 4, line = 3, 'calcium [a.u]')
legend("topright", legend=c("calcium", "W_I", "W_B", "W_P", "W_T", "W_A"),
col=c("blue", "black", "red", "green", "cyan", "orange"),
lty=c(1,1))
|
/material/test_ode_camkii.R
|
no_license
|
jpahle/CalciumModelsLibrary
|
R
| false | false | 1,887 |
r
|
# Simulation parameters (Vector)
sim_params <- list(timestep = 0.1,
endTime = 200)
# Model Parameters (List)
model_params <- list(vols = c(vol = 5e-15),
init_conc = c(W_I = 40,
W_B = 0,
W_P = 0,
W_T = 0,
W_A = 0),
params = c(totalC = 40))
# Create calcium input signal (unit: concentration nmol/l):
# increase Ca from 50 to 600 at 100s, hold for 40s, then drop to 50 again
# (from Dupont_camkii.cps)
x <- seq(0, 400, 1)
y <- append(rep(0, 99), rep(600, 41))
y <- append(y, rep(50, 261))
input_df <- data.frame("time" = x, "Ca" = y)
# Sine(baseline, amp, period, phase, duration, resolution)
#input_df <- as.data.frame(OscillatorGenerator::Sine(0, 350, 20, 5, 200, 0.01))
colnames(input_df) <- c("time", "Ca")
start.time <- as.numeric(Sys.time())*1000
# Simulate model
output <- detSim_camkii(input_df, sim_params, model_params)
end.time <- as.numeric(Sys.time())*1000
time.taken <- end.time - start.time
cat(time.taken)
# Plot output
par(mar = c(5,5,2,5))
colnames(output) <- c("time", "calcium", "W_I", "W_B", "W_P", "W_T", "W_A")
plot(output$time, output$calcium, col="blue", xlim = c(90,160), ylim = c(0,55), type="l", xlab="time [s]", ylab="CamKII [nmol/l]")
lines(output$time, output$W_I, col="black", type = "l")
lines(output$time, output$W_B, col="red", type="l")
lines(output$time, output$W_P, col="green", type="l")
lines(output$time, output$W_T, col="cyan", type="l")
lines(output$time, output$W_A, col="orange", type="l")
axis(side = 4)
mtext(side = 4, line = 3, 'calcium [a.u]')
legend("topright", legend=c("calcium", "W_I", "W_B", "W_P", "W_T", "W_A"),
col=c("blue", "black", "red", "green", "cyan", "orange"),
lty=c(1,1))
|
library(cluster)
library(reshape2)
library(ggplot2)
plotAggregate <- function(statesList,df1,xlabc,ylabc,mainc,colc){
dataFrame1<-df1[which(df1$StateName %in% statesList),2:3]
dataFrame1<-aggregate(dataFrame1,by=list("yearTrend"=dataFrame1$variable),mean)
plot(dataFrame1$yearTrend,dataFrame1$value,type="b",xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
abline(h=50,lty=2)
}
plotqq <- function(statesList,df1,xlabc,ylabc,mainc,colc){
dataFrame1<-df1[which(df1$StateName %in% statesList),2:3]
#dataFrame1<-aggregate(dataFrame1,by=list("yearTrend"=dataFrame1$variable),mean)
#qqnorm(dataFrame1$value,xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
#qqline(dataFrame1$value)
hist(dataFrame1$value,xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
}
data("votes.repub")
df<-as.data.frame(votes.repub[,26:30])
colnames(df)<-c(1956,1960,1964,1968,1972)
df["StateName"]<-rownames(df)
df1<-melt(df,id.vars = "StateName")
df1$StateName<-as.factor(df1$StateName)
df1$variable<-as.integer(as.character(df1$variable))
df1$value[is.na(df1$value)]<-0
NorthEast=c("Connecticut","Delaware","Maine","Massachusetts","New Hampshire","New Jersey","New York","Pennsylvania","Rhode Island","Vermont")
MaEc<-c("Kentucky","Maryland","North Carolina","South Carolina","Tennessee","Virginia","West Virginia")
South<-c("Alabama","Arkansas","Florida","Georgia","Louisiana","Mississippi","Oklahoma","Texas")
midwest<-c("Illinois","Indiana","Iowa","Kansas","Michigan","Minnesota","Missouri","Nebraska","Ohio","Wisconsin")
rockies<-c("Colorado","Idaho","Montana","North Dakota","South Dakota","Utah","Wyoming")
west<-c("Alaska","Arizona","California","Hawaii","Nevada","New Mexico","Oregon","Washington")
par(mfrow=c(2,3))
plotAggregate(NorthEast,df1,"Years","Votes Percentage","North East Region",2)
plotAggregate(MaEc,df1,"Years","Votes Percentage","Mid Atlantic/East Central Region",3)
plotAggregate(South,df1,"Years","Votes Percentage","Southern Region",4)
plotAggregate(midwest,df1,"Years","Votes Percentage","Mid West Region",2)
plotAggregate(rockies,df1,"Years","Votes Percentage","Rockies Region",3)
plotAggregate(west,df1,"Years","Votes Percentage","West Region",4)
par(mfrow=c(2,3))
plotqq(NorthEast,df1,"Years","Votes Percentage","North East Region",2)
plotqq(MaEc,df1,"Years","Votes Percentage","Mid Atlantic/East Central Region",3)
plotqq(South,df1,"Years","Votes Percentage","Southern Region",4)
plotqq(midwest,df1,"Years","Votes Percentage","Mid West Region",2)
plotqq(rockies,df1,"Years","Votes Percentage","Rockies Region",3)
plotqq(west,df1,"Years","Votes Percentage","West Region",4)
qqnorm(df1$value,main="QQ plot for the entire dataset")
qqline(df1$value)
qplot(variable,value,data=df1,facets = Region~.,color=StateName,geom="line")+theme(legend.position="bottom")+guides(colour=guide_legend(nrow=5))+ggtitle("Votes by Country by Region")
|
/Homework 2/votesgroupBy.R
|
no_license
|
ganesh91/STAT-S670-Exploratory-Data-Analysis
|
R
| false | false | 2,838 |
r
|
library(cluster)
library(reshape2)
library(ggplot2)
plotAggregate <- function(statesList,df1,xlabc,ylabc,mainc,colc){
dataFrame1<-df1[which(df1$StateName %in% statesList),2:3]
dataFrame1<-aggregate(dataFrame1,by=list("yearTrend"=dataFrame1$variable),mean)
plot(dataFrame1$yearTrend,dataFrame1$value,type="b",xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
abline(h=50,lty=2)
}
plotqq <- function(statesList,df1,xlabc,ylabc,mainc,colc){
dataFrame1<-df1[which(df1$StateName %in% statesList),2:3]
#dataFrame1<-aggregate(dataFrame1,by=list("yearTrend"=dataFrame1$variable),mean)
#qqnorm(dataFrame1$value,xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
#qqline(dataFrame1$value)
hist(dataFrame1$value,xlab=xlabc,ylab = ylabc,main=mainc,col=colc)
}
data("votes.repub")
df<-as.data.frame(votes.repub[,26:30])
colnames(df)<-c(1956,1960,1964,1968,1972)
df["StateName"]<-rownames(df)
df1<-melt(df,id.vars = "StateName")
df1$StateName<-as.factor(df1$StateName)
df1$variable<-as.integer(as.character(df1$variable))
df1$value[is.na(df1$value)]<-0
NorthEast=c("Connecticut","Delaware","Maine","Massachusetts","New Hampshire","New Jersey","New York","Pennsylvania","Rhode Island","Vermont")
MaEc<-c("Kentucky","Maryland","North Carolina","South Carolina","Tennessee","Virginia","West Virginia")
South<-c("Alabama","Arkansas","Florida","Georgia","Louisiana","Mississippi","Oklahoma","Texas")
midwest<-c("Illinois","Indiana","Iowa","Kansas","Michigan","Minnesota","Missouri","Nebraska","Ohio","Wisconsin")
rockies<-c("Colorado","Idaho","Montana","North Dakota","South Dakota","Utah","Wyoming")
west<-c("Alaska","Arizona","California","Hawaii","Nevada","New Mexico","Oregon","Washington")
par(mfrow=c(2,3))
plotAggregate(NorthEast,df1,"Years","Votes Percentage","North East Region",2)
plotAggregate(MaEc,df1,"Years","Votes Percentage","Mid Atlantic/East Central Region",3)
plotAggregate(South,df1,"Years","Votes Percentage","Southern Region",4)
plotAggregate(midwest,df1,"Years","Votes Percentage","Mid West Region",2)
plotAggregate(rockies,df1,"Years","Votes Percentage","Rockies Region",3)
plotAggregate(west,df1,"Years","Votes Percentage","West Region",4)
par(mfrow=c(2,3))
plotqq(NorthEast,df1,"Years","Votes Percentage","North East Region",2)
plotqq(MaEc,df1,"Years","Votes Percentage","Mid Atlantic/East Central Region",3)
plotqq(South,df1,"Years","Votes Percentage","Southern Region",4)
plotqq(midwest,df1,"Years","Votes Percentage","Mid West Region",2)
plotqq(rockies,df1,"Years","Votes Percentage","Rockies Region",3)
plotqq(west,df1,"Years","Votes Percentage","West Region",4)
qqnorm(df1$value,main="QQ plot for the entire dataset")
qqline(df1$value)
qplot(variable,value,data=df1,facets = Region~.,color=StateName,geom="line")+theme(legend.position="bottom")+guides(colour=guide_legend(nrow=5))+ggtitle("Votes by Country by Region")
|
context("Sensitivity")
# ------------------------------------------------------------------------------
lst <- data_altman()
pathology <- lst$pathology
path_tbl <- lst$path_tbl
pred_ch <- quote(scan)
test_that('Two class', {
expect_equal(
sens(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, estimate = scan, truth = pathology)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, !! pred_ch)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, scan)[[".estimate"]],
231/258
)
expect_equal(
sens(path_tbl)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, truth = pathology, estimate = "scan_na")[[".estimate"]],
230/256
)
expect_equal(
sens(as.matrix(path_tbl))[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, scan_na, na_rm = FALSE)[[".estimate"]],
NA_real_
)
})
# ------------------------------------------------------------------------------
multi_ex <- data_three_by_three()
micro <- data_three_by_three_micro()
test_that('Three class', {
# sens = recall
expect_equal(
sens(multi_ex, estimator = "macro")[[".estimate"]],
macro_metric(recall_binary)
)
expect_equal(
sens(multi_ex, estimator = "macro_weighted")[[".estimate"]],
macro_weighted_metric(recall_binary)
)
expect_equal(
sens(multi_ex, estimator = "micro")[[".estimate"]],
with(micro, sum(tp) / sum(tp + fp))
)
})
|
/tests/testthat/test-class-sens.R
|
no_license
|
jyuu/yardstick
|
R
| false | false | 1,531 |
r
|
context("Sensitivity")
# ------------------------------------------------------------------------------
lst <- data_altman()
pathology <- lst$pathology
path_tbl <- lst$path_tbl
pred_ch <- quote(scan)
test_that('Two class', {
expect_equal(
sens(pathology, truth = "pathology", estimate = "scan")[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, estimate = scan, truth = pathology)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, !! pred_ch)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, scan)[[".estimate"]],
231/258
)
expect_equal(
sens(path_tbl)[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, truth = pathology, estimate = "scan_na")[[".estimate"]],
230/256
)
expect_equal(
sens(as.matrix(path_tbl))[[".estimate"]],
231/258
)
expect_equal(
sens(pathology, pathology, scan_na, na_rm = FALSE)[[".estimate"]],
NA_real_
)
})
# ------------------------------------------------------------------------------
multi_ex <- data_three_by_three()
micro <- data_three_by_three_micro()
test_that('Three class', {
# sens = recall
expect_equal(
sens(multi_ex, estimator = "macro")[[".estimate"]],
macro_metric(recall_binary)
)
expect_equal(
sens(multi_ex, estimator = "macro_weighted")[[".estimate"]],
macro_weighted_metric(recall_binary)
)
expect_equal(
sens(multi_ex, estimator = "micro")[[".estimate"]],
with(micro, sum(tp) / sum(tp + fp))
)
})
|
# transform ndvi data to 0-1 range
layers <- raster::brick("data/raster/stack/ndvi_17.gri")
# transform values
for(i in 1:raster::nlayers(layers)){
print(i)
temp_vals = (raster::values(layers[[i]])-10000) / 10000
raster::values(layers[[i]]) <- temp_vals
}
rm(temp_vals)
# write to disk rowwise
# create empty brick
b <- raster::brick(layers, values=FALSE)
b <- raster::writeStart(b,
filename= "data/raster/stack/ndvi_17_transform.gri",
format="raster",
overwrite=TRUE)
tr <- raster::blockSize(b)
for (i in 1:tr$n) {
v <- raster::getValuesBlock(layers , row=tr$row[i], nrows=tr$nrows[i])
b <- raster::writeValues(b, v, tr$row[i])
}
b <- raster::writeStop(b)
rm(v, b)
gc()
|
/scripts/jobs/2017_transform.R
|
no_license
|
EricKrg/ml_forest-health
|
R
| false | false | 758 |
r
|
# transform ndvi data to 0-1 range
layers <- raster::brick("data/raster/stack/ndvi_17.gri")
# transform values
for(i in 1:raster::nlayers(layers)){
print(i)
temp_vals = (raster::values(layers[[i]])-10000) / 10000
raster::values(layers[[i]]) <- temp_vals
}
rm(temp_vals)
# write to disk rowwise
# create empty brick
b <- raster::brick(layers, values=FALSE)
b <- raster::writeStart(b,
filename= "data/raster/stack/ndvi_17_transform.gri",
format="raster",
overwrite=TRUE)
tr <- raster::blockSize(b)
for (i in 1:tr$n) {
v <- raster::getValuesBlock(layers , row=tr$row[i], nrows=tr$nrows[i])
b <- raster::writeValues(b, v, tr$row[i])
}
b <- raster::writeStop(b)
rm(v, b)
gc()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.R
\name{create_future_rt}
\alias{create_future_rt}
\title{Construct the Required Future Rt assumption}
\usage{
create_future_rt(future_rt = "latest", delay = 0)
}
\arguments{
\item{future_rt}{A character string or integer. This argument indicates how to set future Rt values. Supported
options are to project using the Rt model ("project"), to use the latest estimate based on partial data ("latest"),
to use the latest estimate based on data that is over 50\% complete ("estimate"). If an integer is supplied then the Rt estimate
from this many days into the future (or past if negative) past will be used forwards in time.}
\item{delay}{Numeric mean delay}
}
\value{
A list containing a logical called fixed and an integer called from
}
\description{
Construct the Required Future Rt assumption
}
|
/man/create_future_rt.Rd
|
permissive
|
DrJohan/EpiNow2
|
R
| false | true | 885 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create.R
\name{create_future_rt}
\alias{create_future_rt}
\title{Construct the Required Future Rt assumption}
\usage{
create_future_rt(future_rt = "latest", delay = 0)
}
\arguments{
\item{future_rt}{A character string or integer. This argument indicates how to set future Rt values. Supported
options are to project using the Rt model ("project"), to use the latest estimate based on partial data ("latest"),
to use the latest estimate based on data that is over 50\% complete ("estimate"). If an integer is supplied then the Rt estimate
from this many days into the future (or past if negative) past will be used forwards in time.}
\item{delay}{Numeric mean delay}
}
\value{
A list containing a logical called fixed and an integer called from
}
\description{
Construct the Required Future Rt assumption
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Patch-Class.R
\name{get_femalePop_Patch}
\alias{get_femalePop_Patch}
\title{Get female Population}
\usage{
get_femalePop_Patch()
}
\description{
Return females (nGenotypes X nGenotypes matrix)
}
|
/fuzzedpackages/MGDrivE/man/get_femalePop_Patch.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | true | 274 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Patch-Class.R
\name{get_femalePop_Patch}
\alias{get_femalePop_Patch}
\title{Get female Population}
\usage{
get_femalePop_Patch()
}
\description{
Return females (nGenotypes X nGenotypes matrix)
}
|
library(geozoning)
### Name: pointsSp
### Title: pointsSp
### Aliases: pointsSp
### ** Examples
data(resZTest)
K=resZTest
Z=K$zonePolygone
plotZ(Z)
pointsSp(Z[[1]])
|
/data/genthat_extracted_code/geozoning/examples/pointsSp.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 172 |
r
|
library(geozoning)
### Name: pointsSp
### Title: pointsSp
### Aliases: pointsSp
### ** Examples
data(resZTest)
K=resZTest
Z=K$zonePolygone
plotZ(Z)
pointsSp(Z[[1]])
|
library(MASS)
library(reshape2)
library(sqldf)
rm(list=ls(all=TRUE))
#par
setwd("/root/devel/proc_hom/db/exports/hgt-par/family/data")
par_mtx <- as.matrix(read.csv("hgt-par-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(par_mtx) <- 1:23
colnames(par_mtx) <- 1:23
par_df <- melt(par_mtx)
colnames(par_df) <- c("x", "y","val")
#com
setwd("/root/devel/proc_hom/db/exports/hgt-com/family/data")
com_mtx <- as.matrix(read.csv("hgt-com-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(com_mtx) <- 1:23
colnames(com_mtx) <- 1:23
com_df <- melt(com_mtx)
colnames(com_df) <- c("x", "y","val")
#tot
setwd("/root/devel/proc_hom/db/exports/hgt-tot/family/data")
tot_mtx <- as.matrix(read.csv("hgt-tot-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(tot_mtx) <- 1:23
colnames(tot_mtx) <- 1:23
tot_df <- melt(tot_mtx)
colnames(tot_df) <- c("x", "y","val")
# best from total
bst_tot <- sqldf("
select x,y
from tot_df
order by val desc
limit 20")
#print(bst_tot)
#join with com and par
bst_df = sqldf("
select bst_tot.x,
bst_tot.y,
tot_df.val as tot_val,
par_df.val as par_val,
com_df.val as com_val
from bst_tot
join tot_df on tot_df.x = bst_tot.x and
tot_df.y = bst_tot.y
join par_df on par_df.x = bst_tot.x and
par_df.y = bst_tot.y
join com_df on com_df.x = bst_tot.x and
com_df.y = bst_tot.y
")
print(bst_df)
setwd("/root/devel/proc_hom/db/exports/synth/sp-tr/data")
write.csv(format(bst_df, scientific=FALSE), file = "sp-tr21.csv")
|
/Chapter4/Supplementary/proc_hom/db/exports/synth/sp-tr/work/sqldf_matrix.R
|
permissive
|
dunarel/dunphd-thesis
|
R
| false | false | 1,587 |
r
|
library(MASS)
library(reshape2)
library(sqldf)
rm(list=ls(all=TRUE))
#par
setwd("/root/devel/proc_hom/db/exports/hgt-par/family/data")
par_mtx <- as.matrix(read.csv("hgt-par-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(par_mtx) <- 1:23
colnames(par_mtx) <- 1:23
par_df <- melt(par_mtx)
colnames(par_df) <- c("x", "y","val")
#com
setwd("/root/devel/proc_hom/db/exports/hgt-com/family/data")
com_mtx <- as.matrix(read.csv("hgt-com-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(com_mtx) <- 1:23
colnames(com_mtx) <- 1:23
com_df <- melt(com_mtx)
colnames(com_df) <- c("x", "y","val")
#tot
setwd("/root/devel/proc_hom/db/exports/hgt-tot/family/data")
tot_mtx <- as.matrix(read.csv("hgt-tot-family-raxml-75-regular-tr-rl-mtx.csv", sep=",",header=FALSE))
rownames(tot_mtx) <- 1:23
colnames(tot_mtx) <- 1:23
tot_df <- melt(tot_mtx)
colnames(tot_df) <- c("x", "y","val")
# best from total
bst_tot <- sqldf("
select x,y
from tot_df
order by val desc
limit 20")
#print(bst_tot)
#join with com and par
bst_df = sqldf("
select bst_tot.x,
bst_tot.y,
tot_df.val as tot_val,
par_df.val as par_val,
com_df.val as com_val
from bst_tot
join tot_df on tot_df.x = bst_tot.x and
tot_df.y = bst_tot.y
join par_df on par_df.x = bst_tot.x and
par_df.y = bst_tot.y
join com_df on com_df.x = bst_tot.x and
com_df.y = bst_tot.y
")
print(bst_df)
setwd("/root/devel/proc_hom/db/exports/synth/sp-tr/data")
write.csv(format(bst_df, scientific=FALSE), file = "sp-tr21.csv")
|
#' @include internal.R
NULL
#' Randomly prioritize conservation projects under expected weighted species
#' richness
#'
#' Generate random solutions for the 'Project Prioritization Protocol'
#' problem (Joseph, Maloney & Possingham 2009) with species weights.
#' Although conservation projects should, ideally, not be funded based on random
#' allocations, it can be useful to compare the effectiveness of solutions to
#' random decisions in order to evaluate their effectiveness.
#' \strong{When informing conservation actions, it is strongly recommended to
#' use the \code{\link{ppp_exact_spp_solution}} method because it can identify
#' optimal funding schemes with a guarantee.}
#'
#' @inheritParams help
#'
#' @inherit ppp_random_phylo_solution details
#'
#' @seealso For other methods for generating solutions for the 'Project
#' Prioritization Protocol' problem using phylogenetic data, see
#' \code{\link{ppp_heuristic_spp_solution}}
#' \code{\link{ppp_exact_spp_solution}}, and
#' \code{\link{ppp_manual_spp_solution}}.
#' To visualize the effectiveness of a particular solution, see
#' \code{\link{ppp_plot_spp_solution}}.
#'
#' @references
#' Faith DP (2008) Threatened species and the potential loss of
#' phylogenetic diversity: conservation scenarios based on estimated extinction
#' probabilities and phylogenetic risk analysis. \emph{Conservation Biology},
#' \strong{22}: 1461--1470.
#'
#' Joseph LN, Maloney RF & Possingham HP (2009) Optimal allocation of
#' resources among threatened species: A project prioritization protocol.
#' \emph{Conservation Biology}, \strong{23}, 328--338.
#'
#' @examples
#' # set seed for reproducibility
#' set.seed(500)
#'
#' # load built-in data
#' data(sim_project_data, sim_action_data, sim_species_data)
#'
#' # print simulated project data
#' print(sim_project_data)
#'
#' # print simulated action data
#' print(sim_action_data)
#'
#' # print simulated species data
#' print(sim_species_data)
#'
#' # generate 10 random solutions that meet a budget of 300
#' s1 <- ppp_random_spp_solution(sim_project_data, sim_action_data,
#' sim_species_data, 300, "name", "success",
#' "name", "cost", "name", "weight",
#' number_solutions = 10)
#'
#' # print solutions
#' print(s1)
#'
#' # plot first solution
#' ppp_plot_spp_solution(sim_project_data, sim_action_data, sim_species_data,
#' s1, "name", "success", "name", "cost", "name",
#' "weight", n = 1)
#'
#' # view histogram the objective value
#' hist(s1$obj, xlab = "Expected weighted species richness")
#'
#' # view histogram of their costs
#' hist(s1$cost, xlab = "Solution cost ($)")
#' @export
ppp_random_spp_solution <- function(x, y, spp, budget,
project_column_name,
success_column_name,
action_column_name,
cost_column_name,
species_column_name,
weight_column_name = NULL,
locked_in_column_name = NULL,
locked_out_column_name = NULL,
number_solutions = 1L) {
# assertions
## coerce x to tibble if just a regular data.frame
if (inherits(spp, "data.frame") && !inherits(spp, "tbl_df"))
spp <- tibble::as_tibble(spp)
## assert that parameters are valid
assertthat::assert_that(inherits(spp, "tbl_df"),
ncol(spp) > 0, nrow(spp) > 0,
assertthat::is.string(species_column_name),
assertthat::has_name(spp, species_column_name),
assertthat::noNA(spp[[species_column_name]]),
inherits(spp[[species_column_name]],
c("character", "factor")),
isTRUE(all(spp[[species_column_name]] %in% names(x))),
anyDuplicated(spp[[species_column_name]]) == 0)
if (!is.null(weight_column_name)) {
assertthat::assert_that(assertthat::is.string(weight_column_name),
assertthat::has_name(spp, weight_column_name),
is.numeric(spp[[weight_column_name]]),
assertthat::noNA(spp[[weight_column_name]]))
w <- spp[[weight_column_name]]
} else {
w <- rep(1, nrow(spp))
}
## coerce factor project names to character
if (is.factor(spp[[species_column_name]]))
spp[[species_column_name]] <- as.character(x[[species_column_name]])
# generate random solutions
out <- ppp_random_phylo_solution(
x = x, y = y, tree = star_phylogeny(spp[[species_column_name]], w),
budget = budget, project_column_name = project_column_name,
success_column_name = success_column_name,
action_column_name = action_column_name,
cost_column_name = cost_column_name,
locked_in_column_name = locked_in_column_name,
locked_out_column_name = locked_out_column_name,
number_solutions = number_solutions)
# return output
out
}
|
/R/ppp_random_spp_solution.R
|
no_license
|
prioritizr/optimalppp
|
R
| false | false | 5,219 |
r
|
#' @include internal.R
NULL
#' Randomly prioritize conservation projects under expected weighted species
#' richness
#'
#' Generate random solutions for the 'Project Prioritization Protocol'
#' problem (Joseph, Maloney & Possingham 2009) with species weights.
#' Although conservation projects should, ideally, not be funded based on random
#' allocations, it can be useful to compare the effectiveness of solutions to
#' random decisions in order to evaluate their effectiveness.
#' \strong{When informing conservation actions, it is strongly recommended to
#' use the \code{\link{ppp_exact_spp_solution}} method because it can identify
#' optimal funding schemes with a guarantee.}
#'
#' @inheritParams help
#'
#' @inherit ppp_random_phylo_solution details
#'
#' @seealso For other methods for generating solutions for the 'Project
#' Prioritization Protocol' problem using phylogenetic data, see
#' \code{\link{ppp_heuristic_spp_solution}}
#' \code{\link{ppp_exact_spp_solution}}, and
#' \code{\link{ppp_manual_spp_solution}}.
#' To visualize the effectiveness of a particular solution, see
#' \code{\link{ppp_plot_spp_solution}}.
#'
#' @references
#' Faith DP (2008) Threatened species and the potential loss of
#' phylogenetic diversity: conservation scenarios based on estimated extinction
#' probabilities and phylogenetic risk analysis. \emph{Conservation Biology},
#' \strong{22}: 1461--1470.
#'
#' Joseph LN, Maloney RF & Possingham HP (2009) Optimal allocation of
#' resources among threatened species: A project prioritization protocol.
#' \emph{Conservation Biology}, \strong{23}, 328--338.
#'
#' @examples
#' # set seed for reproducibility
#' set.seed(500)
#'
#' # load built-in data
#' data(sim_project_data, sim_action_data, sim_species_data)
#'
#' # print simulated project data
#' print(sim_project_data)
#'
#' # print simulated action data
#' print(sim_action_data)
#'
#' # print simulated species data
#' print(sim_species_data)
#'
#' # generate 10 random solutions that meet a budget of 300
#' s1 <- ppp_random_spp_solution(sim_project_data, sim_action_data,
#' sim_species_data, 300, "name", "success",
#' "name", "cost", "name", "weight",
#' number_solutions = 10)
#'
#' # print solutions
#' print(s1)
#'
#' # plot first solution
#' ppp_plot_spp_solution(sim_project_data, sim_action_data, sim_species_data,
#' s1, "name", "success", "name", "cost", "name",
#' "weight", n = 1)
#'
#' # view histogram the objective value
#' hist(s1$obj, xlab = "Expected weighted species richness")
#'
#' # view histogram of their costs
#' hist(s1$cost, xlab = "Solution cost ($)")
#' @export
ppp_random_spp_solution <- function(x, y, spp, budget,
project_column_name,
success_column_name,
action_column_name,
cost_column_name,
species_column_name,
weight_column_name = NULL,
locked_in_column_name = NULL,
locked_out_column_name = NULL,
number_solutions = 1L) {
# assertions
## coerce x to tibble if just a regular data.frame
if (inherits(spp, "data.frame") && !inherits(spp, "tbl_df"))
spp <- tibble::as_tibble(spp)
## assert that parameters are valid
assertthat::assert_that(inherits(spp, "tbl_df"),
ncol(spp) > 0, nrow(spp) > 0,
assertthat::is.string(species_column_name),
assertthat::has_name(spp, species_column_name),
assertthat::noNA(spp[[species_column_name]]),
inherits(spp[[species_column_name]],
c("character", "factor")),
isTRUE(all(spp[[species_column_name]] %in% names(x))),
anyDuplicated(spp[[species_column_name]]) == 0)
if (!is.null(weight_column_name)) {
assertthat::assert_that(assertthat::is.string(weight_column_name),
assertthat::has_name(spp, weight_column_name),
is.numeric(spp[[weight_column_name]]),
assertthat::noNA(spp[[weight_column_name]]))
w <- spp[[weight_column_name]]
} else {
w <- rep(1, nrow(spp))
}
## coerce factor project names to character
if (is.factor(spp[[species_column_name]]))
spp[[species_column_name]] <- as.character(x[[species_column_name]])
# generate random solutions
out <- ppp_random_phylo_solution(
x = x, y = y, tree = star_phylogeny(spp[[species_column_name]], w),
budget = budget, project_column_name = project_column_name,
success_column_name = success_column_name,
action_column_name = action_column_name,
cost_column_name = cost_column_name,
locked_in_column_name = locked_in_column_name,
locked_out_column_name = locked_out_column_name,
number_solutions = number_solutions)
# return output
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_wm.R
\name{setup-wm}
\alias{setup-wm}
\alias{sim_wm}
\alias{path_sim_wm}
\alias{force_path}
\title{Setup a WM simulation before running Antares}
\usage{
sim_wm(date_prev, start_prev_hebdo, path_inputs = path_sim_wm(),
n_mcyears = 2040, type_load = "prevu", load_offset_options = NULL,
dispo_pump = c(3520, 3520, 3520, 3520, 3520, 3520, 3520),
simulation_source = NULL, simulation_dest = NULL,
copy_study = TRUE, flow_based = FALSE, opts = NULL)
path_sim_wm(path_dir = "inputs", meteologica = "meteologica",
cnes = "cnes", planning = "planning", forfait_oa = "forfait_oa",
ntc = "ntc", ntc_tp = "ntc_tp", capa_hydro = "capa_hydro",
hydro = "hydro", eco2mix = "eco2mix")
force_path(...)
}
\arguments{
\item{date_prev}{Date of simulation.}
\item{start_prev_hebdo}{Date of forecasts, with format \code{\%Y-\%m-\%d}.}
\item{path_inputs}{A \code{list} with path to inputs directories, obtained with \code{path_sim_wm}.}
\item{n_mcyears}{Number of MC years in the study, default to \code{2040}.}
\item{type_load}{Forecast to use \code{"prevu"}, \code{"premis"} or \code{"offset"}.}
\item{load_offset_options}{DEPRECATED. Peak and off-peak if \code{type_load = "offset"}, see \code{\link{offset_opts}}.}
\item{dispo_pump}{Pumpage availability.}
\item{simulation_source}{Path to source simulation for creating Hydro for other areas.
If provided a copy of this simulation will be performed.}
\item{simulation_dest}{Name of the directory where to copy the study. Warning: content of directory will be deleted!}
\item{copy_study}{Make a copy of the whole study before modifying Antares inputs.}
\item{flow_based}{Logical, is this a flow based study ? If \code{TRUE}, NTC steps are ignored.}
\item{opts}{List of simulation parameters returned by the function
\code{antaresRead::setSimulationPath}}
\item{path_dir}{Main directory where inputs are.}
\item{meteologica}{Sub-directory containing meteologica files.}
\item{cnes}{Sub-directory containing CNES files.}
\item{planning}{Sub-directory containing planning files.}
\item{forfait_oa}{Sub-directory containing forfait OA files.}
\item{ntc}{Sub-directory containing NTC files.}
\item{ntc_tp}{Sub-directory containing NTC transparency files.}
\item{capa_hydro}{Sub-directory containing Hydraulic capacity transparency files.}
\item{hydro}{Sub-directory containing hydro files.}
\item{eco2mix}{Sub-directory containing eco2mix files.}
\item{...}{Character vectors indicating path to a directory,
use this to specify a path outside the \code{"path_dir"} directory.}
}
\value{
For \code{path_sim_wm} a named \code{list}.
}
\description{
Setup a WM simulation before running Antares
Set path to inputs for WM simulation
}
\examples{
\dontrun{
library(antaresRead)
library(antaresWeeklyMargin)
opts <- setSimulationPath("path/to/original/study")
sim_wm(
date_prev = "2018-06-21",
start_prev_hebdo = "2018-06-23",
simulation_dest = "test_case_WM",
type_load = "prevu",
path_inputs = path_sim_wm( # Path to input data
path_dir = "../inputs",
meteologica = "meteologica",
cnes = "cnes",
planning = "planning",
forfait_oa = "forfait_oa",
ntc = "ntc",
ntc_tp = "ntc_tp",
capa_hydro = "capa_hydro",
hydro = "hydro"
),
opts = opts
)
}
}
|
/man/setup-wm.Rd
|
no_license
|
rte-antares-rpackage/antaresWeeklyMargin
|
R
| false | true | 3,375 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_wm.R
\name{setup-wm}
\alias{setup-wm}
\alias{sim_wm}
\alias{path_sim_wm}
\alias{force_path}
\title{Setup a WM simulation before running Antares}
\usage{
sim_wm(date_prev, start_prev_hebdo, path_inputs = path_sim_wm(),
n_mcyears = 2040, type_load = "prevu", load_offset_options = NULL,
dispo_pump = c(3520, 3520, 3520, 3520, 3520, 3520, 3520),
simulation_source = NULL, simulation_dest = NULL,
copy_study = TRUE, flow_based = FALSE, opts = NULL)
path_sim_wm(path_dir = "inputs", meteologica = "meteologica",
cnes = "cnes", planning = "planning", forfait_oa = "forfait_oa",
ntc = "ntc", ntc_tp = "ntc_tp", capa_hydro = "capa_hydro",
hydro = "hydro", eco2mix = "eco2mix")
force_path(...)
}
\arguments{
\item{date_prev}{Date of simulation.}
\item{start_prev_hebdo}{Date of forecasts, with format \code{\%Y-\%m-\%d}.}
\item{path_inputs}{A \code{list} with path to inputs directories, obtained with \code{path_sim_wm}.}
\item{n_mcyears}{Number of MC years in the study, default to \code{2040}.}
\item{type_load}{Forecast to use \code{"prevu"}, \code{"premis"} or \code{"offset"}.}
\item{load_offset_options}{DEPRECATED. Peak and off-peak if \code{type_load = "offset"}, see \code{\link{offset_opts}}.}
\item{dispo_pump}{Pumpage availability.}
\item{simulation_source}{Path to source simulation for creating Hydro for other areas.
If provided a copy of this simulation will be performed.}
\item{simulation_dest}{Name of the directory where to copy the study. Warning: content of directory will be deleted!}
\item{copy_study}{Make a copy of the whole study before modifying Antares inputs.}
\item{flow_based}{Logical, is this a flow based study ? If \code{TRUE}, NTC steps are ignored.}
\item{opts}{List of simulation parameters returned by the function
\code{antaresRead::setSimulationPath}}
\item{path_dir}{Main directory where inputs are.}
\item{meteologica}{Sub-directory containing meteologica files.}
\item{cnes}{Sub-directory containing CNES files.}
\item{planning}{Sub-directory containing planning files.}
\item{forfait_oa}{Sub-directory containing forfait OA files.}
\item{ntc}{Sub-directory containing NTC files.}
\item{ntc_tp}{Sub-directory containing NTC transparency files.}
\item{capa_hydro}{Sub-directory containing Hydraulic capacity transparency files.}
\item{hydro}{Sub-directory containing hydro files.}
\item{eco2mix}{Sub-directory containing eco2mix files.}
\item{...}{Character vectors indicating path to a directory,
use this to specify a path outside the \code{"path_dir"} directory.}
}
\value{
For \code{path_sim_wm} a named \code{list}.
}
\description{
Setup a WM simulation before running Antares
Set path to inputs for WM simulation
}
\examples{
\dontrun{
library(antaresRead)
library(antaresWeeklyMargin)
opts <- setSimulationPath("path/to/original/study")
sim_wm(
date_prev = "2018-06-21",
start_prev_hebdo = "2018-06-23",
simulation_dest = "test_case_WM",
type_load = "prevu",
path_inputs = path_sim_wm( # Path to input data
path_dir = "../inputs",
meteologica = "meteologica",
cnes = "cnes",
planning = "planning",
forfait_oa = "forfait_oa",
ntc = "ntc",
ntc_tp = "ntc_tp",
capa_hydro = "capa_hydro",
hydro = "hydro"
),
opts = opts
)
}
}
|
# created both functions in the style of the example functions
# since it is kind of the same data structure / structural problem
# as the example was, but a different computation
makeCacheMatrix <- function(x = matrix()) {
xInv <- NULL
set <- function(y) {
x <<- y
xInv <<- NULL
}
get <- function() x
setInv <- function(inv) xInv <<- inv
getInv <- function() xInv
# return new defined functions.
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
# compute the inverse of the matrix, but only if there is no cached version of the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xInv <- x$getInv()
if (!is.null(xInv)) {
message("getting cached data")
return(xInv)
}
matrix <- x$get()
xInv <- solve(matrix, ...)
x$setInv(xInv)
xInv
}
|
/cachematrix.R
|
no_license
|
smichalak/ProgrammingAssignment2
|
R
| false | false | 913 |
r
|
# created both functions in the style of the example functions
# since it is kind of the same data structure / structural problem
# as the example was, but a different computation
makeCacheMatrix <- function(x = matrix()) {
xInv <- NULL
set <- function(y) {
x <<- y
xInv <<- NULL
}
get <- function() x
setInv <- function(inv) xInv <<- inv
getInv <- function() xInv
# return new defined functions.
list(set = set,
get = get,
setInv = setInv,
getInv = getInv)
}
# compute the inverse of the matrix, but only if there is no cached version of the matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
xInv <- x$getInv()
if (!is.null(xInv)) {
message("getting cached data")
return(xInv)
}
matrix <- x$get()
xInv <- solve(matrix, ...)
x$setInv(xInv)
xInv
}
|
install.packages("uuid")
install.packages('telegram')
install.packages("cronR")
|
/InstallPackages.R
|
no_license
|
charlos123/bot
|
R
| false | false | 80 |
r
|
install.packages("uuid")
install.packages('telegram')
install.packages("cronR")
|
require(DAAG)
attach(possum)
#1
hist(age)
colnames(possum)
sex
hist(age, seq(0,9,1.5))
plot(density(age,na.rm=TRUE))
#2
hist(earconch)
boxplot(earconch~sex)
#3
pairs(~age+hdlngth+skullw+totlngth+taill+footlgth+earconch+eye+chest+belly, data=possum)
plot(hdlngth,skullw)
points(mean(hdlngth),mean(skullw),pch=2,col="red")
|
/HW5/HW5_script.R
|
no_license
|
brisenodaniel/Data_Viz
|
R
| false | false | 326 |
r
|
require(DAAG)
attach(possum)
#1
hist(age)
colnames(possum)
sex
hist(age, seq(0,9,1.5))
plot(density(age,na.rm=TRUE))
#2
hist(earconch)
boxplot(earconch~sex)
#3
pairs(~age+hdlngth+skullw+totlngth+taill+footlgth+earconch+eye+chest+belly, data=possum)
plot(hdlngth,skullw)
points(mean(hdlngth),mean(skullw),pch=2,col="red")
|
##Shapefile prep for reef fish management areas
library(rgdal)
## Shallow water grouper closure
setwd("X:/Data_John/shiny/reeffishmanagementareas/swg")
lineSWG <- readOGR("SWG_ln.shp", layer="SWG_ln")
pointSWG <- readOGR("SWG_pt.shp", layer="SWG_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/gulf_reefll_seasonal")
polyLongLine <- readOGR("Gulf_ReefLL_seasonal_po.shp", layer="Gulf_ReefLL_seasonal_po")
pointLongLine <- readOGR("Gulf_ReefLL_seasonal_pt.shp", layer="Gulf_ReefLL_seasonal_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/longline_buoy")
polyLongLineBuoy <- readOGR("longline_buoy_po.shp", layer="longline_buoy_po")
pointLongLineBuoy <- readOGR("longline_buoy_pt.shp", layer="longline_buoy_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/reef_stressed")
polyReefStressed <- readOGR("reef_stressed_po.shp", layer="reef_stressed_po")
pointReefStresssed <- readOGR("reef_stressed_pt.shp", layer="reef_stressed_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/NorthernAndSouthern")
SWGOpen <- readOGR("NorthernSGrouper.shp", layer="NorthernSGrouper")
SWGClosed <- readOGR("SouthernSGrouper.shp", layer="SouthernSGrouper")
library(taRifx.geo)
polySWG <- rbind(SWGOpen, SWGClosed)
setwd("X:/Data_John/shiny/reeffishmanagementareas")
save.image("ReefFishManagement.RData")
|
/reeffishmanagementv030/shapefileprep.R
|
no_license
|
claire-roberts/Reef-Fish-Management-Areas
|
R
| false | false | 1,314 |
r
|
##Shapefile prep for reef fish management areas
library(rgdal)
## Shallow water grouper closure
setwd("X:/Data_John/shiny/reeffishmanagementareas/swg")
lineSWG <- readOGR("SWG_ln.shp", layer="SWG_ln")
pointSWG <- readOGR("SWG_pt.shp", layer="SWG_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/gulf_reefll_seasonal")
polyLongLine <- readOGR("Gulf_ReefLL_seasonal_po.shp", layer="Gulf_ReefLL_seasonal_po")
pointLongLine <- readOGR("Gulf_ReefLL_seasonal_pt.shp", layer="Gulf_ReefLL_seasonal_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/longline_buoy")
polyLongLineBuoy <- readOGR("longline_buoy_po.shp", layer="longline_buoy_po")
pointLongLineBuoy <- readOGR("longline_buoy_pt.shp", layer="longline_buoy_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/reef_stressed")
polyReefStressed <- readOGR("reef_stressed_po.shp", layer="reef_stressed_po")
pointReefStresssed <- readOGR("reef_stressed_pt.shp", layer="reef_stressed_pt")
setwd("X:/Data_John/shiny/reeffishmanagementareas/NorthernAndSouthern")
SWGOpen <- readOGR("NorthernSGrouper.shp", layer="NorthernSGrouper")
SWGClosed <- readOGR("SouthernSGrouper.shp", layer="SouthernSGrouper")
library(taRifx.geo)
polySWG <- rbind(SWGOpen, SWGClosed)
setwd("X:/Data_John/shiny/reeffishmanagementareas")
save.image("ReefFishManagement.RData")
|
#setwd("~/Desktop/text analysis")
#setwd("D:/Dati/vito.tranquillo/Desktop/Rprojects/text mining elisabetta")
library(tm)
library(wordcloud)
library(ggplot2)
library(ggthemes)
library(igraph)
library(qdap)
library(dendextend)
library(circlize)
library(Rgraphviz)
library(graph)
#library(tidyverse)
library(tidytext)
library(topicmodels)
library(dplyr)
library(tidyr)
#library(data.table)
rm(list=ls())
tryTolower<-function(x){
y=NA
try_error=tryCatch(tolower(x), error=function(e) e)
if(!inherits(try_error, 'error'))
y=tolower(x)
return(y)
}
clean.corpus<-function(corpus){
corpus<-tm_map(corpus, content_transformer(tryTolower))
corpus<-tm_map(corpus, removeWords, custom.stopwords)
corpus<-tm_map(corpus, removePunctuation)
corpus<-tm_map(corpus, stripWhitespace)
corpus<-tm_map(corpus,removeNumbers)
return(corpus)
}
df<-read.csv('dati.csv', header = T, sep = ";", stringsAsFactors=FALSE)
##################Q10###########
q10<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q10)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","appropriate","available","fresh","adequate","presence", "access","enough")
corpus <- VCorpus(DataframeSource(q10))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "litter", replacement="bedding")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q10.m<-as.matrix(tdm)
dim(tdm.q10.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q10.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
ggplot(freq.df, aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
# tdm.q10.m #matrice
#
#
#
# #adjacency matrix
# tdm.q10.m[tdm.q10.m>=1] <- 1
# tdm2<-tdm.q10.m%*%t(tdm.q10.m)
#
# tdm2[5:10,5:10]
# tdm.g <- graph.adjacency(tdm2, weighted=TRUE, mode="undirected")
# tdm.g<-simplify(tdm.g)
#
# V(tdm.g)$label <- V(tdm.g)$name
# V(tdm.g)$degree <- degree(tdm.g)
#
#
# layout1 <- layout.fruchterman.reingold(tdm.g)
#
# plot(tdm.g, layout=layout1, vertex.size=20,
# vertex.label.color="darkred")
######associations############
associations<-findAssocs(tdm,'floors', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=floors), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=clean, label=clean),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
X["nome",]
esempio
X<-matrix(runif(100), ncol=10)
>dimnames(X)<-list(righe=LETTERS[1:10], colonne=LETTERS[1:10])
X
X["B",]
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q11#######################################
q11<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q11)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","adequate","enough","feeding","food","feed")
corpus <- VCorpus(DataframeSource(q11))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "body", replacement = "score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "score", replacement = "body_score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "forage", replacement = "roughage")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "hay", replacement = "roughage")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q11.m<-as.matrix(tdm)
dim(tdm.q11.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q11.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=200, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'roughage', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=roughage), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=roughage, label=roughage),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
#############################################Q12#######################################
q12<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q12)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"look", "alert","healthy","shiny","bright","signs","feet","good")
corpus <- VCorpus(DataframeSource(q12))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "body", replacement = "score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "score", replacement = "body_score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "hair", replacement = "coat")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q12.m<-as.matrix(tdm)
dim(tdm.q12.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q12.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'coat', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=coat), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=coat, label=coat),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q13#######################################
q13<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q13)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"environment", "around","animals","behaviour", "normal")
corpus <- VCorpus(DataframeSource(q13))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "herd", replacement = "group")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "social", replacement = "interaction")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q13.m<-as.matrix(tdm)
dim(tdm.q13.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q13.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'group', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=group), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=group, label=group),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q14#######################################
q14<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q14)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"back","people","herd","behaviours","abnormal","animal","presence","herd","excessive","behaviour")
corpus <- VCorpus(DataframeSource(q14))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "stereotypical", replacement = "stereotypies")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "dance", replacement = "weaving")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "bear", replacement = "weaving")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q14.m<-as.matrix(tdm)
dim(tdm.q14.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q14.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'weaving', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=weaving), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=weaving, label=weaving),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
##################TIDY#######################
######bi-gram Q10#####
data("stop_words")
q10<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q10)
my_stop_words<-rbind(c("horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","appropriate","available","fresh","adequate","presence", "access","enough"), stop_words)
q10bgr<-q10 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q10bgr %>%
count(ngram, sort = TRUE)
q10bsep<-q10bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q10bsepf <- q10bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q10bsepf<-na.omit(q10bsepf)
bigram_graph <- q10bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###bi-gram Q11###
data("stop_words")
q11<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q11)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q11bgr<-q11 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q11bgr %>%
count(ngram, sort = TRUE)
q11bsep<-q11bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q11bsepf <- q11bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q11bsepf<-na.omit(q11bsepf)
bigram_graph <- q11bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###bi-gram Q12###
data("stop_words")
q12<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q12)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q12bgr<-q12 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q12bgr %>%
count(ngram, sort = TRUE)
q12bsep<-q12bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q12bsepf <- q12bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q12bsepf<-na.omit(q12bsepf)
bigram_graph <- q12bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###Q13-bigram###
data("stop_words")
q13<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q13)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q13bgr<-q13%>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q13bgr %>%
count(ngram, sort = TRUE)
q13bsep<-q13bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q13bsepf <- q13bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q13bsepf<-na.omit(q13bsepf)
bigram_graph <- q13bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###Q14-bigram###
data("stop_words")
q14<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q14)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q14bgr<-q14 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q14bgr %>%
count(ngram, sort = TRUE)
q14bsep<-q14bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q14bsepf <- q14bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q14bsepf<-na.omit(q14bsepf)
bigram_graph <- q14bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###############TOPIC MODELING###############
dtm<-DocumentTermMatrix(corpus, control=list(weighting=weightTf))
rowTotals <- apply(dtm , 1, sum)
dtm<-dtm.new<- dtm[rowTotals> 0, ]
q10lda <- LDA(dtm, k = 2, control = list(seed = 1234))
q10topics <- tidy(q10lda, matrix = "beta")
ap_top_terms <- q10topics %>%
group_by(topic) %>%
top_n(20, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder(term, beta)) %>%
mutate(topic = paste0("topic", topic)) %>%
ggplot(aes(term, beta)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
beta_spread <- q10topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1)) %>%
group_by(log_ratio < 0) %>%
top_n(15, abs(log_ratio)) %>%
ungroup() %>%
mutate(word = reorder(term, log_ratio))
ggplot(aes(word, log_ratio))+
geom_col()
# arrange(topic, -log_ratio) %>%
# mutate(term = reorder(term, log_ratio)) %>%
# ggplot(aes(term, log_ratio)) +
#geom_col() +
#coord_flip()
beta_spread %>%
group_by(log_ratio < 0) %>%
top_n(15, abs(log_ratio)) %>%
ungroup() %>%
mutate(word = reorder(term, log_ratio)) %>%
ggplot(aes(word, log_ratio)) +
geom_col(show.legend = FALSE) +
coord_flip() +
ylab("log odds ratio")
|
/newcode.r
|
no_license
|
TMax66/textmining
|
R
| false | false | 25,475 |
r
|
#setwd("~/Desktop/text analysis")
#setwd("D:/Dati/vito.tranquillo/Desktop/Rprojects/text mining elisabetta")
library(tm)
library(wordcloud)
library(ggplot2)
library(ggthemes)
library(igraph)
library(qdap)
library(dendextend)
library(circlize)
library(Rgraphviz)
library(graph)
#library(tidyverse)
library(tidytext)
library(topicmodels)
library(dplyr)
library(tidyr)
#library(data.table)
rm(list=ls())
tryTolower<-function(x){
y=NA
try_error=tryCatch(tolower(x), error=function(e) e)
if(!inherits(try_error, 'error'))
y=tolower(x)
return(y)
}
clean.corpus<-function(corpus){
corpus<-tm_map(corpus, content_transformer(tryTolower))
corpus<-tm_map(corpus, removeWords, custom.stopwords)
corpus<-tm_map(corpus, removePunctuation)
corpus<-tm_map(corpus, stripWhitespace)
corpus<-tm_map(corpus,removeNumbers)
return(corpus)
}
df<-read.csv('dati.csv', header = T, sep = ";", stringsAsFactors=FALSE)
##################Q10###########
q10<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q10)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","appropriate","available","fresh","adequate","presence", "access","enough")
corpus <- VCorpus(DataframeSource(q10))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "litter", replacement="bedding")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q10.m<-as.matrix(tdm)
dim(tdm.q10.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q10.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
ggplot(freq.df, aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
# tdm.q10.m #matrice
#
#
#
# #adjacency matrix
# tdm.q10.m[tdm.q10.m>=1] <- 1
# tdm2<-tdm.q10.m%*%t(tdm.q10.m)
#
# tdm2[5:10,5:10]
# tdm.g <- graph.adjacency(tdm2, weighted=TRUE, mode="undirected")
# tdm.g<-simplify(tdm.g)
#
# V(tdm.g)$label <- V(tdm.g)$name
# V(tdm.g)$degree <- degree(tdm.g)
#
#
# layout1 <- layout.fruchterman.reingold(tdm.g)
#
# plot(tdm.g, layout=layout1, vertex.size=20,
# vertex.label.color="darkred")
######associations############
associations<-findAssocs(tdm,'floors', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=floors), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=clean, label=clean),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
X["nome",]
esempio
X<-matrix(runif(100), ncol=10)
>dimnames(X)<-list(righe=LETTERS[1:10], colonne=LETTERS[1:10])
X
X["B",]
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q11#######################################
q11<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q11)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","adequate","enough","feeding","food","feed")
corpus <- VCorpus(DataframeSource(q11))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "body", replacement = "score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "score", replacement = "body_score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "forage", replacement = "roughage")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "hay", replacement = "roughage")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q11.m<-as.matrix(tdm)
dim(tdm.q11.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q11.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=200, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'roughage', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=roughage), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=roughage, label=roughage),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
#############################################Q12#######################################
q12<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q12)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"look", "alert","healthy","shiny","bright","signs","feet","good")
corpus <- VCorpus(DataframeSource(q12))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "body", replacement = "score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "score", replacement = "body_score")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "hair", replacement = "coat")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q12.m<-as.matrix(tdm)
dim(tdm.q12.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q12.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'coat', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=coat), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=coat, label=coat),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q13#######################################
q13<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q13)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"environment", "around","animals","behaviour", "normal")
corpus <- VCorpus(DataframeSource(q13))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "herd", replacement = "group")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "social", replacement = "interaction")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q13.m<-as.matrix(tdm)
dim(tdm.q13.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q13.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
set.seed(500)
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'group', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=group), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=group, label=group),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
############################################Q14#######################################
q14<-data.frame(doc_id=seq(1:nrow(df)),text=df$Q14)
custom.stopwords<-c(stopwords('english'), "horses", "horse", "min", "exits", "etc", "condition", "conditions",
"back","people","herd","behaviours","abnormal","animal","presence","herd","excessive","behaviour")
corpus <- VCorpus(DataframeSource(q14))
corpus<-clean.corpus(corpus)
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleanliness", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleaning", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "cleansing", replacement = "clean")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "mangers", replacement = "manger")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "stereotypical", replacement = "stereotypies")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "dance", replacement = "weaving")
corpus<-tm_map(corpus, content_transformer(gsub), pattern = "bear", replacement = "weaving")
tdm<-TermDocumentMatrix(corpus, control=list(weighting=weightTf))
tdm<-removeSparseTerms(tdm, sparse=0.99)
tdm.q14.m<-as.matrix(tdm)
dim(tdm.q14.m)
#####count-based evaluation#####
term.freq<-rowSums(tdm.q14.m)
freq.df<-data.frame(word=names(term.freq), frequency=term.freq)
freq.df<-freq.df[order(freq.df[,2], decreasing=T),]
freq.df$word<-factor(freq.df$word, levels=unique(as.character(freq.df$word)))
ggplot(freq.df[1:20,], aes(x=word, y=frequency))+geom_bar(stat = "identity", fill='darkred')+
coord_flip()+theme_gdocs()+geom_text(aes(label=frequency), colour="white",hjust=1.25, size=5.0)
#####word clouds#########
wordcloud(freq.df$word, freq.df$frequency, max.words=100, colors=c('black','darkred'))
###########NETWORK#############
freq.term<-findFreqTerms(tdm, lowfreq = 10)
plot(tdm, term=freq.term, corThreshold = 0.2,weighting=T)
######associations############
associations<-findAssocs(tdm,'weaving', 0.2)
associations<-as.data.frame(associations)
associations$terms<-row.names(associations)
associations$terms<-factor(associations$terms, levels = associations$terms)
ggplot(associations, aes(y=terms))+
geom_point(aes(x=weaving), data=associations, size=1)+
theme_gdocs()+geom_text(aes(x=weaving, label=weaving),
colour="darkred", hjust=-.25, size=3)+
theme(text=element_text(size=8),
axis.title.y = element_blank())
tdm2<-removeSparseTerms(tdm, sparse=0.975)
hc<-hclust(dist(tdm2, method = "euclidean"), method="complete")
plot(hc, yaxt="n",main="", hang=0.5, cex=0.6)
rect.hclust(hc,k=6)
dend.change<-function(n){
if(is.leaf(n))
{
a<-attributes(n)
labCol<-labelColors[clusMember[which(
names(clusMember)==a$label)]]
attr(n, "nodePar") <-c(a$nodePar, lab.col=labCol)
}
n
}
hcd<-as.dendrogram(hc)
clusMember<-cutree(hc, 6)
labelColors<-c('darkgrey', 'darkred', 'black', '#bada55', "lightblue", "red")
clusDendro<-dendrapply(hcd, dend.change)
plot(clusDendro, main='title', type='triangle', yaxt='n')
hcd<-color_labels(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
hcd<-color_branches(hcd, 4, col=c('#bada55', 'darkgrey', 'darkred', 'black'))
circlize_dendrogram(hcd, labels_track_height = 0.5, dend_track_height = 0.4)
##################TIDY#######################
######bi-gram Q10#####
data("stop_words")
q10<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q10)
my_stop_words<-rbind(c("horses", "horse", "min", "exits", "etc", "condition", "conditions",
"good","appropriate","available","fresh","adequate","presence", "access","enough"), stop_words)
q10bgr<-q10 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q10bgr %>%
count(ngram, sort = TRUE)
q10bsep<-q10bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q10bsepf <- q10bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q10bsepf<-na.omit(q10bsepf)
bigram_graph <- q10bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
ggraph(bigram_graph, layout = "fr") +
geom_edge_link() +
geom_node_point() +
geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###bi-gram Q11###
data("stop_words")
q11<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q11)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q11bgr<-q11 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q11bgr %>%
count(ngram, sort = TRUE)
q11bsep<-q11bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q11bsepf <- q11bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q11bsepf<-na.omit(q11bsepf)
bigram_graph <- q11bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###bi-gram Q12###
data("stop_words")
q12<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q12)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q12bgr<-q12 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q12bgr %>%
count(ngram, sort = TRUE)
q12bsep<-q12bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q12bsepf <- q12bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q12bsepf<-na.omit(q12bsepf)
bigram_graph <- q12bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###Q13-bigram###
data("stop_words")
q13<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q13)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q13bgr<-q13%>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q13bgr %>%
count(ngram, sort = TRUE)
q13bsep<-q13bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q13bsepf <- q13bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q13bsepf<-na.omit(q13bsepf)
bigram_graph <- q13bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###Q14-bigram###
data("stop_words")
q14<-data_frame(doc_id=seq(1:nrow(df)),text=df$Q14)
my_stop_words<-rbind(c("horses", "horse", "box", "min",
"animals", "etc"), stop_words)
q14bgr<-q14 %>%
unnest_tokens(ngram, text, token = "ngrams", n = 2)
q14bgr %>%
count(ngram, sort = TRUE)
q14bsep<-q14bgr %>%
separate(ngram, c("word1", "word2"), sep = " ")
q14bsepf <- q14bsep %>%
filter(!word1 %in% my_stop_words$word) %>%
filter(!word2 %in% my_stop_words$word) %>%
count(word1, word2, sort = TRUE)
q14bsepf<-na.omit(q14bsepf)
bigram_graph <- q14bsepf %>%
filter(n>1) %>%
graph_from_data_frame()
library(ggraph)
set.seed(2017)
# ggraph(bigram_graph, layout = "fr") +
# geom_edge_link() +
# geom_node_point() +
# geom_node_text(aes(label = name), vjust = 1, hjust = 1)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
ggraph(bigram_graph, layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE,
arrow = a, end_cap = circle(.07, 'inches')) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
###############TOPIC MODELING###############
dtm<-DocumentTermMatrix(corpus, control=list(weighting=weightTf))
rowTotals <- apply(dtm , 1, sum)
dtm<-dtm.new<- dtm[rowTotals> 0, ]
q10lda <- LDA(dtm, k = 2, control = list(seed = 1234))
q10topics <- tidy(q10lda, matrix = "beta")
ap_top_terms <- q10topics %>%
group_by(topic) %>%
top_n(20, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder(term, beta)) %>%
mutate(topic = paste0("topic", topic)) %>%
ggplot(aes(term, beta)) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
beta_spread <- q10topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1)) %>%
group_by(log_ratio < 0) %>%
top_n(15, abs(log_ratio)) %>%
ungroup() %>%
mutate(word = reorder(term, log_ratio))
ggplot(aes(word, log_ratio))+
geom_col()
# arrange(topic, -log_ratio) %>%
# mutate(term = reorder(term, log_ratio)) %>%
# ggplot(aes(term, log_ratio)) +
#geom_col() +
#coord_flip()
beta_spread %>%
group_by(log_ratio < 0) %>%
top_n(15, abs(log_ratio)) %>%
ungroup() %>%
mutate(word = reorder(term, log_ratio)) %>%
ggplot(aes(word, log_ratio)) +
geom_col(show.legend = FALSE) +
coord_flip() +
ylab("log odds ratio")
|
env_bdgdiff = new.env()
# source("H:/projects/Apps/diff_marks_v2/scripts/enrichment_testing.R")
evalq(envir = env_bdgdiff, {
setwd(input_dir)
files = dir(path = "bdgdiff_short_len/", pattern = "common", full.names = T)
all_bdgdiff_tables = list()
wrap.read.table = function(file){
test = read.table(file, sep = "\n", nrows = 2)
if(nrow(test) == 1){
warning(paste("no results for:", file))
blank_gr = GRanges(lr = numeric())
return(blank_gr)
}
tab = read.table(file, skip = 1)
gr = GRanges(seqnames = tab[,1], ranges = IRanges(start = tab[,2], end = tab[,3]), lr = tab[,5])
return(gr)
}
for(f in files){
name = basename(f)
name = sub("diff_", "", name)
name = sub("_c3.0_common.bed", "", name)
print(name)
cl = strsplit(name, "_")[[1]][1]
d1 = strsplit(name, "_")[[1]][2]
d2 = strsplit(name, "_")[[1]][6]
m = strsplit(name, "_")[[1]][3]
top_name = paste(cl, m, sep = "_")
bot_name = paste("from", d1, "to", d2, sep = "_")
rbot_name = paste("from", d2, "to", d1, sep = "_")
common_res = wrap.read.table(f)
d1_res = wrap.read.table(sub("common.bed", "cond1.bed", f))
d2_res = wrap.read.table(sub("common.bed", "cond2.bed", f))
if(is.null(all_bdgdiff_tables[[top_name]])) all_bdgdiff_tables[[top_name]] = list()
all_bdgdiff_tables[[top_name]][[bot_name]] = list("common" = common_res, "down" = d1_res, "up" = d2_res)
all_bdgdiff_tables[[top_name]][[rbot_name]] = list("common" = common_res, "down" = d2_res, "up" = d1_res)
}
#sort names for neatness
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(x){
o = order(names(x))
return(x[o])
})
#filter out regions that aren't wide enough
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(group){
lapply(group, function(pair){
lapply(pair, function(direction){
keep = width(direction) > 250
return(direction[keep])
})
})
})
#create bg+up/down peaks by combining up/down with bg
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(group){
lapply(group, function(pair){
pair$up_bg = c(pair$common, pair$up)
pair$down_bg = c(pair$common, pair$down)
return(pair)
})
})
setwd(wd)
})
#compare up/down to common+up/down
all_bdgdiff_tables = env_bdgdiff$all_bdgdiff_tables
|
/module_STEIN_bdgdiff_ChIPseq.R
|
no_license
|
jrboyd/paper3_v3
|
R
| false | false | 2,378 |
r
|
env_bdgdiff = new.env()
# source("H:/projects/Apps/diff_marks_v2/scripts/enrichment_testing.R")
evalq(envir = env_bdgdiff, {
setwd(input_dir)
files = dir(path = "bdgdiff_short_len/", pattern = "common", full.names = T)
all_bdgdiff_tables = list()
wrap.read.table = function(file){
test = read.table(file, sep = "\n", nrows = 2)
if(nrow(test) == 1){
warning(paste("no results for:", file))
blank_gr = GRanges(lr = numeric())
return(blank_gr)
}
tab = read.table(file, skip = 1)
gr = GRanges(seqnames = tab[,1], ranges = IRanges(start = tab[,2], end = tab[,3]), lr = tab[,5])
return(gr)
}
for(f in files){
name = basename(f)
name = sub("diff_", "", name)
name = sub("_c3.0_common.bed", "", name)
print(name)
cl = strsplit(name, "_")[[1]][1]
d1 = strsplit(name, "_")[[1]][2]
d2 = strsplit(name, "_")[[1]][6]
m = strsplit(name, "_")[[1]][3]
top_name = paste(cl, m, sep = "_")
bot_name = paste("from", d1, "to", d2, sep = "_")
rbot_name = paste("from", d2, "to", d1, sep = "_")
common_res = wrap.read.table(f)
d1_res = wrap.read.table(sub("common.bed", "cond1.bed", f))
d2_res = wrap.read.table(sub("common.bed", "cond2.bed", f))
if(is.null(all_bdgdiff_tables[[top_name]])) all_bdgdiff_tables[[top_name]] = list()
all_bdgdiff_tables[[top_name]][[bot_name]] = list("common" = common_res, "down" = d1_res, "up" = d2_res)
all_bdgdiff_tables[[top_name]][[rbot_name]] = list("common" = common_res, "down" = d2_res, "up" = d1_res)
}
#sort names for neatness
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(x){
o = order(names(x))
return(x[o])
})
#filter out regions that aren't wide enough
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(group){
lapply(group, function(pair){
lapply(pair, function(direction){
keep = width(direction) > 250
return(direction[keep])
})
})
})
#create bg+up/down peaks by combining up/down with bg
all_bdgdiff_tables = lapply(all_bdgdiff_tables, function(group){
lapply(group, function(pair){
pair$up_bg = c(pair$common, pair$up)
pair$down_bg = c(pair$common, pair$down)
return(pair)
})
})
setwd(wd)
})
#compare up/down to common+up/down
all_bdgdiff_tables = env_bdgdiff$all_bdgdiff_tables
|
context("test-generate_text.R")
test_that("generate_text returns text string",
{
simresult=DSAIRM::simulate_basicbacteria_ode()
result = vector("list", 1)
result[[1]]$dat = simresult$ts
#no maketext provided, should be character of length 1
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
expect_length( DSAIRM::generate_text(result), 1)
#maketext false is same as above
result[[1]]$maketext = FALSE
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
expect_length( DSAIRM::generate_text(result), 1)
#should now produce text
result[[1]]$maketext = TRUE
#should both be of class html and character
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
result[[1]]$maketext = FALSE
result[[1]]$showtext = 'Hello'
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
})
|
/tests/testthat/test-generate_text.R
|
no_license
|
HulyaKocyigit/DSAIRM
|
R
| false | false | 1,263 |
r
|
context("test-generate_text.R")
test_that("generate_text returns text string",
{
simresult=DSAIRM::simulate_basicbacteria_ode()
result = vector("list", 1)
result[[1]]$dat = simresult$ts
#no maketext provided, should be character of length 1
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
expect_length( DSAIRM::generate_text(result), 1)
#maketext false is same as above
result[[1]]$maketext = FALSE
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
expect_length( DSAIRM::generate_text(result), 1)
#should now produce text
result[[1]]$maketext = TRUE
#should both be of class html and character
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
result[[1]]$maketext = FALSE
result[[1]]$showtext = 'Hello'
expect_is( DSAIRM::generate_text(result), "html" )
expect_is( DSAIRM::generate_text(result), "character" )
})
|
#numeric [Integer and Double, default is double
num <- 10
num
class(num)
typeof(num)
num2 <- 10.75
num2
class(num2)
typeof(num2)
#Integer [convert value to Integer explicity]
i = as.integer(10)
i
class(i)
typeof(i)
#numeric data type functons
#abs() to find absoltue value
print("absolute value : ")
abs(num2)
ceiling(num2)
floor(num2)
sqrt(2)
trunc(num2)
round(num2)
round(num2,1)
round(num2,2)
exp(2)
|
/basic_datastructures/numeric_data_types.R
|
no_license
|
khandura/R_Basics
|
R
| false | false | 416 |
r
|
#numeric [Integer and Double, default is double
num <- 10
num
class(num)
typeof(num)
num2 <- 10.75
num2
class(num2)
typeof(num2)
#Integer [convert value to Integer explicity]
i = as.integer(10)
i
class(i)
typeof(i)
#numeric data type functons
#abs() to find absoltue value
print("absolute value : ")
abs(num2)
ceiling(num2)
floor(num2)
sqrt(2)
trunc(num2)
round(num2)
round(num2,1)
round(num2,2)
exp(2)
|
library(atlantistools)
### Name: plot_add_polygon_overview
### Title: Add spatial representation of polygon layout to a ggplot2
### object.
### Aliases: plot_add_polygon_overview
### ** Examples
d <- system.file("extdata", "setas-model-new-trunk", package = "atlantistools")
bgm_as_df <- convert_bgm(bgm = file.path(d, "VMPA_setas.bgm"))
p <- plot_line(preprocess$physics, wrap = NULL)
p <- custom_grid(p, grid_x = "polygon", grid_y = "variable")
grob <- plot_add_polygon_overview(p, bgm_as_df)
gridExtra::grid.arrange(grob)
|
/data/genthat_extracted_code/atlantistools/examples/plot_add_polygon_overview.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 536 |
r
|
library(atlantistools)
### Name: plot_add_polygon_overview
### Title: Add spatial representation of polygon layout to a ggplot2
### object.
### Aliases: plot_add_polygon_overview
### ** Examples
d <- system.file("extdata", "setas-model-new-trunk", package = "atlantistools")
bgm_as_df <- convert_bgm(bgm = file.path(d, "VMPA_setas.bgm"))
p <- plot_line(preprocess$physics, wrap = NULL)
p <- custom_grid(p, grid_x = "polygon", grid_y = "variable")
grob <- plot_add_polygon_overview(p, bgm_as_df)
gridExtra::grid.arrange(grob)
|
# import useful libraries
library("tidyverse")
# set working directory
setwd("/Users/michael/Clinical_Priority/data/OneDrive_1_2-15-2021")
# read in csv
labs <- read_csv("labs_clean.csv") # look at
labs_neph_hosp <- read_csv("labs_neph_hosp.csv")
labs_icu_hosp <- read_csv("labs_icu_hosp.csv")
labs_non_neph_non_icu_hosp <- read_csv("labs_non_neph_non_icu_hosp.csv")
labs_non_neph_non_icu_out <- read_csv("labs_non_neph_non_icu_out.csv")
labs_neph_out <- read_csv("labs_neph_out.csv")
# ------------------------------------------------------------------------------
# Acuity Score Calculations
# ------------------------------------------------------------------------------
# Random sampling of timestamp
set.seed(2)
labs_random <- labs %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
sample_n(1) %>%
select(ENCRYPTED_PAT_MRN_ID, EVENT_TS)
# Rename the timestamp to a new name for joining
labs_random <- labs_random %>%
rename(TS = EVENT_TS)
# join the random timestamp to original labs dataframe
labs_z <- left_join(labs, labs_random)
# create new column with difference between random date per patient
# and date per lab (in seconds)
labs_z <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID, EVENT_NAME) %>%
mutate(date_minimum = abs(EVENT_TS - TS),
date_range = EVENT_TS - TS)
# find the lab date per lab closest to random date per patient
labs_z_min <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID, EVENT_NAME) %>%
filter(date_minimum == min(date_minimum))
# filter out for only 12 hours plus or minus random date per patient
# to create 24 window
labs_z_min <- labs_z_min %>%
filter(date_minimum <= 43200) # 43,200 seconds is a 12 hr window
# reassign dataframe to labs_z to calculate acuity
labs_z <- labs_z_min %>%
mutate(NORMAL_MEAN = (NORMAL_HIGH + NORMAL_LOW) / 2)
# add column of z-scores per latest lab results
labs_z <- labs_z %>%
mutate(z_score = abs((RESULT_VALUE - NORMAL_MEAN) / ((NORMAL_HIGH - NORMAL_MEAN) / 2)))
# Acuity Score Dataframe
# create dataframe of acuity score (z-score sums per patient)
labs_z_score <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
summarise(acuity_score = sum(z_score, na.rm = T))
# normalize acuity score by dividing by all values by max acuity score
# find max acuity score
acuity_max <- max(labs_z_score$acuity_score)
# create column with values scaled to 100 max
labs_z_score <- labs_z_score %>%
mutate(acuity_score_norm = (acuity_score / acuity_max) * 100)
# create dataframe with patient id and last encounter type
encounter_type <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
filter(EVENT_TS == unique(max(EVENT_TS))) %>%
select(ENCRYPTED_PAT_MRN_ID, ENCOUNTER_TYPE, EVENT_TS)
# eliminate duplicate entries
encounter_type <- unique(encounter_type)
labs_acuity <- left_join(labs_z_score, encounter_type)
# write csv of z scores
write_csv(labs_acuity, "z_score_labs.csv")
# ------------------------------------------------------------------------------
# Stats Summary Table
# ------------------------------------------------------------------------------
# 1) Load lines 1-17 and lines 61-73 from Clinical_Priority_Visualizaiton file
# to get labs_score and acuity_median and CoT_median
labs_score <- labs_score %>%
mutate(category = ifelse(log_acuity < acuity_median & log_CoT < CoT_median, 1,
ifelse(log_acuity >= acuity_median & log_CoT < CoT_median, 2,
ifelse(log_acuity >= acuity_median & log_CoT >= CoT_median, 3, 4))))
# make the category column into factors
labs_score$category <- as.factor(labs_score$category)
# label the factors
levels(labs_score$category) <- c("Well", "Chronically Sick", "Acutely Sick", "Improved")
# create a summary; finds sum of each factor
labs_summary_count <- labs_score %>%
group_by(ENCOUNTER_TYPE, category) %>%
summarise(count = n())
# write csv file
write_csv(labs_summary_count, "labs_count_encounter.csv")
# create a column categorizing encounter type as inpatient or outpatient
labs_score <- labs_score %>%
mutate(in_vs_outpatient = ifelse(ENCOUNTER_TYPE == "ICU_VISIT_HOSPITAL" |
ENCOUNTER_TYPE == "NEPH_VISIT_HOSPITAL" |
ENCOUNTER_TYPE == "NON_NEPH_NON_ICU_HOSPITAL", 1, 2))
# factor in_vs_oupatient column and label
labs_score$in_vs_outpatient <- as.factor(labs_score$in_vs_outpatient)
levels(labs_score$in_vs_outpatient) <- c("Inpatient", "Outpatient")
# summarize column; find sum of categories
labs_summary_count_in_vs_out <- labs_score %>%
group_by(in_vs_outpatient, category) %>%
summarise(count = n())
# write csv
write_csv(labs_summary_count_in_vs_out, "labs_count_in_vs_out.csv")
# statistical summary of timeframe of lab draws
time_summary_acuity <- labs_z %>%
group_by(ENCOUNTER_TYPE) %>%
summarise(median_time = median(date_range),
min_time = min(date_range),
max_time = max(date_range))
# write csv
write_csv(time_summary_acuity, "time_summary_acuity.csv")
|
/Clinical_Priority.R
|
no_license
|
michael-mokiao/Clinical-Priority
|
R
| false | false | 5,080 |
r
|
# import useful libraries
library("tidyverse")
# set working directory
setwd("/Users/michael/Clinical_Priority/data/OneDrive_1_2-15-2021")
# read in csv
labs <- read_csv("labs_clean.csv") # look at
labs_neph_hosp <- read_csv("labs_neph_hosp.csv")
labs_icu_hosp <- read_csv("labs_icu_hosp.csv")
labs_non_neph_non_icu_hosp <- read_csv("labs_non_neph_non_icu_hosp.csv")
labs_non_neph_non_icu_out <- read_csv("labs_non_neph_non_icu_out.csv")
labs_neph_out <- read_csv("labs_neph_out.csv")
# ------------------------------------------------------------------------------
# Acuity Score Calculations
# ------------------------------------------------------------------------------
# Random sampling of timestamp
set.seed(2)
labs_random <- labs %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
sample_n(1) %>%
select(ENCRYPTED_PAT_MRN_ID, EVENT_TS)
# Rename the timestamp to a new name for joining
labs_random <- labs_random %>%
rename(TS = EVENT_TS)
# join the random timestamp to original labs dataframe
labs_z <- left_join(labs, labs_random)
# create new column with difference between random date per patient
# and date per lab (in seconds)
labs_z <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID, EVENT_NAME) %>%
mutate(date_minimum = abs(EVENT_TS - TS),
date_range = EVENT_TS - TS)
# find the lab date per lab closest to random date per patient
labs_z_min <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID, EVENT_NAME) %>%
filter(date_minimum == min(date_minimum))
# filter out for only 12 hours plus or minus random date per patient
# to create 24 window
labs_z_min <- labs_z_min %>%
filter(date_minimum <= 43200) # 43,200 seconds is a 12 hr window
# reassign dataframe to labs_z to calculate acuity
labs_z <- labs_z_min %>%
mutate(NORMAL_MEAN = (NORMAL_HIGH + NORMAL_LOW) / 2)
# add column of z-scores per latest lab results
labs_z <- labs_z %>%
mutate(z_score = abs((RESULT_VALUE - NORMAL_MEAN) / ((NORMAL_HIGH - NORMAL_MEAN) / 2)))
# Acuity Score Dataframe
# create dataframe of acuity score (z-score sums per patient)
labs_z_score <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
summarise(acuity_score = sum(z_score, na.rm = T))
# normalize acuity score by dividing by all values by max acuity score
# find max acuity score
acuity_max <- max(labs_z_score$acuity_score)
# create column with values scaled to 100 max
labs_z_score <- labs_z_score %>%
mutate(acuity_score_norm = (acuity_score / acuity_max) * 100)
# create dataframe with patient id and last encounter type
encounter_type <- labs_z %>%
group_by(ENCRYPTED_PAT_MRN_ID) %>%
filter(EVENT_TS == unique(max(EVENT_TS))) %>%
select(ENCRYPTED_PAT_MRN_ID, ENCOUNTER_TYPE, EVENT_TS)
# eliminate duplicate entries
encounter_type <- unique(encounter_type)
labs_acuity <- left_join(labs_z_score, encounter_type)
# write csv of z scores
write_csv(labs_acuity, "z_score_labs.csv")
# ------------------------------------------------------------------------------
# Stats Summary Table
# ------------------------------------------------------------------------------
# 1) Load lines 1-17 and lines 61-73 from Clinical_Priority_Visualizaiton file
# to get labs_score and acuity_median and CoT_median
labs_score <- labs_score %>%
mutate(category = ifelse(log_acuity < acuity_median & log_CoT < CoT_median, 1,
ifelse(log_acuity >= acuity_median & log_CoT < CoT_median, 2,
ifelse(log_acuity >= acuity_median & log_CoT >= CoT_median, 3, 4))))
# make the category column into factors
labs_score$category <- as.factor(labs_score$category)
# label the factors
levels(labs_score$category) <- c("Well", "Chronically Sick", "Acutely Sick", "Improved")
# create a summary; finds sum of each factor
labs_summary_count <- labs_score %>%
group_by(ENCOUNTER_TYPE, category) %>%
summarise(count = n())
# write csv file
write_csv(labs_summary_count, "labs_count_encounter.csv")
# create a column categorizing encounter type as inpatient or outpatient
labs_score <- labs_score %>%
mutate(in_vs_outpatient = ifelse(ENCOUNTER_TYPE == "ICU_VISIT_HOSPITAL" |
ENCOUNTER_TYPE == "NEPH_VISIT_HOSPITAL" |
ENCOUNTER_TYPE == "NON_NEPH_NON_ICU_HOSPITAL", 1, 2))
# factor in_vs_oupatient column and label
labs_score$in_vs_outpatient <- as.factor(labs_score$in_vs_outpatient)
levels(labs_score$in_vs_outpatient) <- c("Inpatient", "Outpatient")
# summarize column; find sum of categories
labs_summary_count_in_vs_out <- labs_score %>%
group_by(in_vs_outpatient, category) %>%
summarise(count = n())
# write csv
write_csv(labs_summary_count_in_vs_out, "labs_count_in_vs_out.csv")
# statistical summary of timeframe of lab draws
time_summary_acuity <- labs_z %>%
group_by(ENCOUNTER_TYPE) %>%
summarise(median_time = median(date_range),
min_time = min(date_range),
max_time = max(date_range))
# write csv
write_csv(time_summary_acuity, "time_summary_acuity.csv")
|
## Time Series ARIMA and MSARIMA Models in R: Box Jenkin's Methodology
rm(list=ls())
cat("\014")
install.packages("tseries")
install.packages("forecast")
install.packages("plm")
install.packages("Formula")
install.packages("tcltk")
install.packages("uroot")
install.packages("pdR")
install.packages("stats")
library(tseries)
library(forecast)
library(plm)
library(Formula)
library(tcltk)
library(uroot)
library(pdR)
library(stats)
options (scipen=99999)
##data2=read.csv("C:/Users/kakali/Desktop/PA/ARIMA/CW/hourlydd.csv") # Read the file
##data3=read.csv("C:/Users/kakali/Desktop/PA/ARIMA/CW/sensex-oil-exrt.csv")# Read the file
##temp<-na.omit(df$Searchvolume)
##Identification Stage
#Plots: Graphs, ACF & PACF and Unit Root Tests
###GOOGLE SEARCH VOLUME WEEKLY
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$SearchVolume, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$SearchVolume)-1)^(1/3) #k= number of lags
k
adf.test(df$SearchVolume, alternative="stationary", k=5) #Trend Non-stationarity
adf.test(df$SearchVolume, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$SearchVolume,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$SearchVolume,1))-1)^(1/3))
k
adf.test(diff(df$SearchVolume,1), alternative="stationary", k=5)
adf.test(diff(df$SearchVolume,4), alternative="stationary", k=5)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$SearchVolume,4), lag=50) #Graph, ACF, PACF
##Estimation
fit1 <- Arima(df$searchvolume, order=c(0,0,1), seasonal = list(order = c(1, 1, 1), period=4))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(res)
Box.test(residuals(fit1), lag=16, fitdf=4, type="Ljung")
b2<-tsdiag(fit1)
b2
##MONTHLY PEAK ELECTRICITY DEMAND
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$Demand, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$Demand)-1)^(1/3)
k
adf.test(df$Demand, alternative="stationary", k=4)
adf.test(df$Demand, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$Demand,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$Demand,12))-1)^(1/3))
k
adf.test(diff(df$Demand,1), alternative="stationary", k=4)
adf.test(diff(df$Demand,12), alternative="stationary", k=12)
adf.test(diff(diff(df$Demand,12),1), alternative="stationary", k=4)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$Demand,12), lag=50) #stationary
##Estimation
fit1 <- Arima(df$Demand, order=c(0,0,1), seasonal = list(order = c(1, 1, 1), period=12))
fit1
summary(fit1)
fit2 <- Arima(df$Demand, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=12))
fit2
summary(fit2)
##Diagnostics
tsdisplay(res)
Box.test(residuals(fit1), lag=16, fitdf=4, type="Ljung")
b2<-tsdiag(fit1)
b2
##MONTHLY FOREIGN TOURIST DEMAND
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$FTD, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$FTD)-1)^(1/3)
k
adf.test(df$FTD, alternative="stationary", k=4)
adf.test(df$FTD, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$FTD,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$FTD,1))-1)^(1/3))
k
adf.test(diff(df$FTD,1), alternative="stationary", k=4)
adf.test(diff(df$FTD,12), alternative="stationary", k=24)
adf.test(diff(diff(df$FTD,12),1), alternative="stationary", k=24)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$FTD,12), lag=50) #Graph, ACF, PACF
tsdisplay(diff(diff(df$FTD,12),1), lag=50) #stationary
######DAILY SENSEX PRICE-------------------
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$sensex, lag=50) #Graph, ACF, PACF
tsdisplay(df$oil, lag=50) #Graph, ACF, PACF
tsdisplay(df$ex, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$sensex)-1)^(1/3)
k
adf.test(df$sensex, alternative="stationary", k=6)
adf.test(df$sensex, alternative="stationary", k=15)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$sensex,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$sensex,1))-1)^(1/3))
k
adf.test(diff(df$sensex,1), alternative="stationary", k=5)
adf.test(diff(df$sensex,5), alternative="stationary", k=15)
adf.test(diff(diff(df$sensex,5),1), alternative="stationary", k=20)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$sensex,5), lag=50)
tsdisplay(diff(diff(df$sensex,5),1),lag=50)
##Estimation
fit1 <- Arima(df$sensex, order=c(0,1,1), seasonal = list(order = c(2, 1, 0), period=5))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
###US Electricity Sales
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$Sales, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$Sales)-1)^(1/3) #k= number of lags
k
adf.test(df$Sales, alternative="stationary", k=7) #Trend Non-stationarity
adf.test(df$Sales, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
#tsdisplay(diff(df$Sales,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$Sales,1))-1)^(1/3))
k
#adf.test(diff(df$Sales,1), alternative="stationary", k=6)
adf.test(diff(df$Sales,12), alternative="stationary", k=12)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(diff(df$Sales,12),1), lag=50) #Graph, ACF, PACF
##Estimation
fit1 <- Arima(df$Sales, order=c(1,1,1), seasonal = list(order = c(1, 1, 1), period=12))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
##Simle Forecast
fcast.fit1 <- forecast(fit1, h=30)
fcast.fit1
plot(fcast.fit1)
##Out-of-Sample Forecast
hold <- window(ts(df$Sales), start=277);
fit1_no_holdout <- Arima(ts(df$Sales[-c(277:288)]), order=c(1,1,1), seasonal = list(order = c(1, 1, 1), period = 12))
fit1_fcast_no_holdout <- forecast(fit1_no_holdout,h=12)
plot(fit1_fcast_no_holdout, main=" ")
lines(ts(df$Sales))
summary(fit1_fcast_no_holdout)
##Out-of-sample forecast accuracy
fcst_hold<-((abs(hold-fit1_fcast_no_holdout$mean)/hold))*100
mean(fcst_hold)
##auto.arima
fit1.auto<-auto.arima(df$Sales, d=NA, D=NA, seasonal = TRUE)
fit1.auto
summary(fit1.auto)
##Diagnostics
res <- residuals(fit1.auto)
tsdisplay(res)
Box.test(res, lag=16, fitdf=5, type="Ljung")
windows()
b1<-tsdiag(fit1.auto)
b1
fcast.auto <- forecast(fit1.auto, h=30)
plot(fcast.auto)
###HOURLY ELECTRICITY DEMAND------------------
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$MW, lag=50) #stationary
##Unit Root Tests: Trend -----------
k = trunc(length(df$MW)-1)^(1/3)
k
adf.test(df$MW, alternative="stationary", k=10)
adf.test(df$MW, alternative="stationary", k=24)
##If lshort is TRUE, then the truncation lag parameter is set to
##trunc(4*(n/100)^0.25), otherwise trunc(12*(n/100)^0.25) is used.
pp.test(df$MW, alternative="stationary", lshort=TRUE)
adf.test(diff(df$MW,24), alternative="stationary", k=24)
tsdisplay(df$MW, lag=50) #stationary
tsdisplay(diff(df$MW,24), lag=50) #stationary
##Estimation
fit1 <- Arima(df$MW, order=c(1,0,2), seasonal = list(order = c(1, 0, 1), period=24))
fit1
summary(fit1)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=4, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
|
/PRA/(modified)arima_identification_estimation_diagnostics_forecast.R
|
no_license
|
souviksamanta95/R_Personal
|
R
| false | false | 8,924 |
r
|
## Time Series ARIMA and MSARIMA Models in R: Box Jenkin's Methodology
rm(list=ls())
cat("\014")
install.packages("tseries")
install.packages("forecast")
install.packages("plm")
install.packages("Formula")
install.packages("tcltk")
install.packages("uroot")
install.packages("pdR")
install.packages("stats")
library(tseries)
library(forecast)
library(plm)
library(Formula)
library(tcltk)
library(uroot)
library(pdR)
library(stats)
options (scipen=99999)
##data2=read.csv("C:/Users/kakali/Desktop/PA/ARIMA/CW/hourlydd.csv") # Read the file
##data3=read.csv("C:/Users/kakali/Desktop/PA/ARIMA/CW/sensex-oil-exrt.csv")# Read the file
##temp<-na.omit(df$Searchvolume)
##Identification Stage
#Plots: Graphs, ACF & PACF and Unit Root Tests
###GOOGLE SEARCH VOLUME WEEKLY
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$SearchVolume, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$SearchVolume)-1)^(1/3) #k= number of lags
k
adf.test(df$SearchVolume, alternative="stationary", k=5) #Trend Non-stationarity
adf.test(df$SearchVolume, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$SearchVolume,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$SearchVolume,1))-1)^(1/3))
k
adf.test(diff(df$SearchVolume,1), alternative="stationary", k=5)
adf.test(diff(df$SearchVolume,4), alternative="stationary", k=5)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$SearchVolume,4), lag=50) #Graph, ACF, PACF
##Estimation
fit1 <- Arima(df$searchvolume, order=c(0,0,1), seasonal = list(order = c(1, 1, 1), period=4))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(res)
Box.test(residuals(fit1), lag=16, fitdf=4, type="Ljung")
b2<-tsdiag(fit1)
b2
##MONTHLY PEAK ELECTRICITY DEMAND
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$Demand, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$Demand)-1)^(1/3)
k
adf.test(df$Demand, alternative="stationary", k=4)
adf.test(df$Demand, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$Demand,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$Demand,12))-1)^(1/3))
k
adf.test(diff(df$Demand,1), alternative="stationary", k=4)
adf.test(diff(df$Demand,12), alternative="stationary", k=12)
adf.test(diff(diff(df$Demand,12),1), alternative="stationary", k=4)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$Demand,12), lag=50) #stationary
##Estimation
fit1 <- Arima(df$Demand, order=c(0,0,1), seasonal = list(order = c(1, 1, 1), period=12))
fit1
summary(fit1)
fit2 <- Arima(df$Demand, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=12))
fit2
summary(fit2)
##Diagnostics
tsdisplay(res)
Box.test(residuals(fit1), lag=16, fitdf=4, type="Ljung")
b2<-tsdiag(fit1)
b2
##MONTHLY FOREIGN TOURIST DEMAND
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$FTD, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$FTD)-1)^(1/3)
k
adf.test(df$FTD, alternative="stationary", k=4)
adf.test(df$FTD, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$FTD,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$FTD,1))-1)^(1/3))
k
adf.test(diff(df$FTD,1), alternative="stationary", k=4)
adf.test(diff(df$FTD,12), alternative="stationary", k=24)
adf.test(diff(diff(df$FTD,12),1), alternative="stationary", k=24)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$FTD,12), lag=50) #Graph, ACF, PACF
tsdisplay(diff(diff(df$FTD,12),1), lag=50) #stationary
######DAILY SENSEX PRICE-------------------
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$sensex, lag=50) #Graph, ACF, PACF
tsdisplay(df$oil, lag=50) #Graph, ACF, PACF
tsdisplay(df$ex, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$sensex)-1)^(1/3)
k
adf.test(df$sensex, alternative="stationary", k=6)
adf.test(df$sensex, alternative="stationary", k=15)
##Identification: First Difference/Trend Differencing
tsdisplay(diff(df$sensex,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$sensex,1))-1)^(1/3))
k
adf.test(diff(df$sensex,1), alternative="stationary", k=5)
adf.test(diff(df$sensex,5), alternative="stationary", k=15)
adf.test(diff(diff(df$sensex,5),1), alternative="stationary", k=20)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(df$sensex,5), lag=50)
tsdisplay(diff(diff(df$sensex,5),1),lag=50)
##Estimation
fit1 <- Arima(df$sensex, order=c(0,1,1), seasonal = list(order = c(2, 1, 0), period=5))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
###US Electricity Sales
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
windows()
tsdisplay(df$Sales, lag=50) #Graph, ACF, PACF
##Unit Root Tests: Trend -----------
k = trunc(length(df$Sales)-1)^(1/3) #k= number of lags
k
adf.test(df$Sales, alternative="stationary", k=7) #Trend Non-stationarity
adf.test(df$Sales, alternative="stationary", k=12)
##Identification: First Difference/Trend Differencing
#tsdisplay(diff(df$Sales,1), lag=50) #Graph, ACF, PACF
k = trunc((length(diff(df$Sales,1))-1)^(1/3))
k
#adf.test(diff(df$Sales,1), alternative="stationary", k=6)
adf.test(diff(df$Sales,12), alternative="stationary", k=12)
##Identification: SEASONAL: Seasonal Differencing
tsdisplay(diff(diff(df$Sales,12),1), lag=50) #Graph, ACF, PACF
##Estimation
fit1 <- Arima(df$Sales, order=c(1,1,1), seasonal = list(order = c(1, 1, 1), period=12))
fit1
summary(fit1)
fit2 <- Arima(df$searchvolume, order=c(3,0,1), seasonal = list(order = c(0, 1, 1), period=4))
fit2
summary(fit2)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
##Simle Forecast
fcast.fit1 <- forecast(fit1, h=30)
fcast.fit1
plot(fcast.fit1)
##Out-of-Sample Forecast
hold <- window(ts(df$Sales), start=277);
fit1_no_holdout <- Arima(ts(df$Sales[-c(277:288)]), order=c(1,1,1), seasonal = list(order = c(1, 1, 1), period = 12))
fit1_fcast_no_holdout <- forecast(fit1_no_holdout,h=12)
plot(fit1_fcast_no_holdout, main=" ")
lines(ts(df$Sales))
summary(fit1_fcast_no_holdout)
##Out-of-sample forecast accuracy
fcst_hold<-((abs(hold-fit1_fcast_no_holdout$mean)/hold))*100
mean(fcst_hold)
##auto.arima
fit1.auto<-auto.arima(df$Sales, d=NA, D=NA, seasonal = TRUE)
fit1.auto
summary(fit1.auto)
##Diagnostics
res <- residuals(fit1.auto)
tsdisplay(res)
Box.test(res, lag=16, fitdf=5, type="Ljung")
windows()
b1<-tsdiag(fit1.auto)
b1
fcast.auto <- forecast(fit1.auto, h=30)
plot(fcast.auto)
###HOURLY ELECTRICITY DEMAND------------------
data<-read.csv(file.choose(),header=TRUE)
df<-data
nrow(df)
ncol(df)
head(df)
View(df)
#Plots: Graphs, ACF and PACF-------
##Identification: TREND: Level
tsdisplay(df$MW, lag=50) #stationary
##Unit Root Tests: Trend -----------
k = trunc(length(df$MW)-1)^(1/3)
k
adf.test(df$MW, alternative="stationary", k=10)
adf.test(df$MW, alternative="stationary", k=24)
##If lshort is TRUE, then the truncation lag parameter is set to
##trunc(4*(n/100)^0.25), otherwise trunc(12*(n/100)^0.25) is used.
pp.test(df$MW, alternative="stationary", lshort=TRUE)
adf.test(diff(df$MW,24), alternative="stationary", k=24)
tsdisplay(df$MW, lag=50) #stationary
tsdisplay(diff(df$MW,24), lag=50) #stationary
##Estimation
fit1 <- Arima(df$MW, order=c(1,0,2), seasonal = list(order = c(1, 0, 1), period=24))
fit1
summary(fit1)
##Diagnostics
tsdisplay(residuals(fit1))
Box.test(residuals(fit1), lag=5, fitdf=4, type="Ljung")
Box.test(residuals(fit1), lag=10, fitdf=2, type="Ljung")
Box.test(residuals(fit1), lag=20, fitdf=2, type="Ljung")
windows()
b2<-tsdiag(fit1)
|
library(tidyverse)
## Read CSV
memory.limit(size=35000)
setwd("C:/Users/Owner/Dropbox/Trademark Rebrand Project")
Trademarks <- read_csv("C:/Users/Owner/Dropbox/Trademark Rebrand Project/Trademarks 1884-2017 (Reduced).csv")
EntityRef <- read_csv("c:/Users/Owner/Dropbox/Trademark Rebrand Project/EntityRef.csv")
## Preliminary Analysis
Trademarks$filing_dt <- as.Date(Trademarks$filing_dt, format="%Y-%m-%d")
Trademarks$registration_dt <- as.Date(Trademarks$registration_dt, format="%Y-%m-%d")
Trademarks$Year <- format(Trademarks$filing_dt, "%Y")
# Bar plot
PriorRegBar <- Trademarks %>%
filter(own_entity_cd == 3) %>%
group_by(Year, prior_regs) %>%
summarise(n=n()) %>%
ggplot() + aes(x=Year, y = n, fill = prior_regs) + geom_bar(stat="identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Year") + ylab("Number of Prior Marks") + ggtitle("Barplot of Relative Proportions of Corporate Trademark Prior Marks 1884-2017")
# Scatter Plot
PriorRegScatter <- Trademarks %>%
filter(own_entity_cd == 3)
group_by(Year, prior_regs) %>%
summarise(n=n()) %>%
ggplot() + aes(x=Year, y=n) + geom_point() + geom_line(color="red") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Year") + ylab("Number of Prior Marks") + ggtitle("Barplot of Relative Proportions of Corporate Trademarks Prior Marks 1884-2017")
|
/R Scripts/Approximate Regular Expression Matching Script.R
|
no_license
|
Akesari12/TrademarkRebrand
|
R
| false | false | 1,566 |
r
|
library(tidyverse)
## Read CSV
memory.limit(size=35000)
setwd("C:/Users/Owner/Dropbox/Trademark Rebrand Project")
Trademarks <- read_csv("C:/Users/Owner/Dropbox/Trademark Rebrand Project/Trademarks 1884-2017 (Reduced).csv")
EntityRef <- read_csv("c:/Users/Owner/Dropbox/Trademark Rebrand Project/EntityRef.csv")
## Preliminary Analysis
Trademarks$filing_dt <- as.Date(Trademarks$filing_dt, format="%Y-%m-%d")
Trademarks$registration_dt <- as.Date(Trademarks$registration_dt, format="%Y-%m-%d")
Trademarks$Year <- format(Trademarks$filing_dt, "%Y")
# Bar plot
PriorRegBar <- Trademarks %>%
filter(own_entity_cd == 3) %>%
group_by(Year, prior_regs) %>%
summarise(n=n()) %>%
ggplot() + aes(x=Year, y = n, fill = prior_regs) + geom_bar(stat="identity") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Year") + ylab("Number of Prior Marks") + ggtitle("Barplot of Relative Proportions of Corporate Trademark Prior Marks 1884-2017")
# Scatter Plot
PriorRegScatter <- Trademarks %>%
filter(own_entity_cd == 3)
group_by(Year, prior_regs) %>%
summarise(n=n()) %>%
ggplot() + aes(x=Year, y=n) + geom_point() + geom_line(color="red") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Year") + ylab("Number of Prior Marks") + ggtitle("Barplot of Relative Proportions of Corporate Trademarks Prior Marks 1884-2017")
|
#!/usr/bin/env Rscript
options(stringsAsFactors=FALSE)
options("scipen"=100, "digits"=4)
##################
# OPTION PARSING
##################
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-i", "--input"), action = "store", type="character", default=NULL,
help="$TMP/collapsed_frags.bed", metavar="character"),
make_option(c("-m", "--MM"), action = "store", type="numeric", default=1,
help="$UMI_MM", metavar="character"),
make_option(c("-c", "--core"), action = "store", type="numeric", default=1,
help="number of cores", metavar="character"),
make_option(c("-o", "--out"), action = "store", type="character", default="average.txt",
help="$TMP/reads.filtered.3.bed", metavar="character")
)
opt_parser <- OptionParser(
usage = "%prog [options]",
option_list=option_list,
description = "UMI collapsing"
)
arguments <- parse_args(opt_parser, positional_arguments = TRUE)
opt <- arguments$options
#------------
# LIBRARIES
#------------
suppressPackageStartupMessages(library("rtracklayer"))
suppressPackageStartupMessages(library("parallel"))
suppressPackageStartupMessages(library("stringdist"))
# print options
cat("\nRunning UMI collapsing\n")
opt
#------------
# Prepare data
#------------
test_big <- import.bed(opt$input)
test_big$ID <- paste(test_big@seqnames, test_big@ranges@start, test_big@ranges@start+test_big@ranges@width-1, test_big@strand, sep="_")
lvl <- names(sort(table(test_big$ID)))
test_big$ID <- factor(test_big$ID, levels = rev(lvl))
test_big_sorted <- test_big[order(test_big$ID)]
f3 = function(bar,c=1){
keep <- vector("numeric",length(bar))
names(keep) = names(bar)
while (length(bar)>0) {
rmv = which(stringdist(bar[1],bar,method="hamming",nthread =c)<=opt$MM)
keep[names(bar)[1]] = length(rmv)
bar = bar[-rmv]
}
return(keep)
}
#------------
# Run
#------------
# Calculate the number of cores
no_cores <- opt$core
# Initiate cluster
cl <- makeCluster(no_cores)
clusterExport(cl, "stringdist")
clusterExport(cl, "f3")
clusterExport(cl, "opt")
results <- unlist(parLapply(cl, split(test_big_sorted$name,test_big_sorted$ID),
function(x){
names(x) <- 1:length(x)
f3(x)
})
)
# Finish
stopCluster(cl)
test_big_sorted$counts <- as.numeric(results)
# print to see top
test_big_sorted
test_big_sorted <- test_big_sorted[test_big_sorted$counts>0]
test_big_sorted
out <- as.data.frame(test_big_sorted)
out$start <- out$start-1
out$name <- paste(out$name, out$score, sep="_")
write.table(out[,c(1:3,6,9,5)], opt$out, sep="\t", row.names = F, col.names = F, quote = F)
sessionInfo()
|
/GenomeWide_UMISTARRseq/STARRseq_UMI_collapsing.R
|
permissive
|
vahidelyasi/DeepSTARR
|
R
| false | false | 2,820 |
r
|
#!/usr/bin/env Rscript
options(stringsAsFactors=FALSE)
options("scipen"=100, "digits"=4)
##################
# OPTION PARSING
##################
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-i", "--input"), action = "store", type="character", default=NULL,
help="$TMP/collapsed_frags.bed", metavar="character"),
make_option(c("-m", "--MM"), action = "store", type="numeric", default=1,
help="$UMI_MM", metavar="character"),
make_option(c("-c", "--core"), action = "store", type="numeric", default=1,
help="number of cores", metavar="character"),
make_option(c("-o", "--out"), action = "store", type="character", default="average.txt",
help="$TMP/reads.filtered.3.bed", metavar="character")
)
opt_parser <- OptionParser(
usage = "%prog [options]",
option_list=option_list,
description = "UMI collapsing"
)
arguments <- parse_args(opt_parser, positional_arguments = TRUE)
opt <- arguments$options
#------------
# LIBRARIES
#------------
suppressPackageStartupMessages(library("rtracklayer"))
suppressPackageStartupMessages(library("parallel"))
suppressPackageStartupMessages(library("stringdist"))
# print options
cat("\nRunning UMI collapsing\n")
opt
#------------
# Prepare data
#------------
test_big <- import.bed(opt$input)
test_big$ID <- paste(test_big@seqnames, test_big@ranges@start, test_big@ranges@start+test_big@ranges@width-1, test_big@strand, sep="_")
lvl <- names(sort(table(test_big$ID)))
test_big$ID <- factor(test_big$ID, levels = rev(lvl))
test_big_sorted <- test_big[order(test_big$ID)]
f3 = function(bar,c=1){
keep <- vector("numeric",length(bar))
names(keep) = names(bar)
while (length(bar)>0) {
rmv = which(stringdist(bar[1],bar,method="hamming",nthread =c)<=opt$MM)
keep[names(bar)[1]] = length(rmv)
bar = bar[-rmv]
}
return(keep)
}
#------------
# Run
#------------
# Calculate the number of cores
no_cores <- opt$core
# Initiate cluster
cl <- makeCluster(no_cores)
clusterExport(cl, "stringdist")
clusterExport(cl, "f3")
clusterExport(cl, "opt")
results <- unlist(parLapply(cl, split(test_big_sorted$name,test_big_sorted$ID),
function(x){
names(x) <- 1:length(x)
f3(x)
})
)
# Finish
stopCluster(cl)
test_big_sorted$counts <- as.numeric(results)
# print to see top
test_big_sorted
test_big_sorted <- test_big_sorted[test_big_sorted$counts>0]
test_big_sorted
out <- as.data.frame(test_big_sorted)
out$start <- out$start-1
out$name <- paste(out$name, out$score, sep="_")
write.table(out[,c(1:3,6,9,5)], opt$out, sep="\t", row.names = F, col.names = F, quote = F)
sessionInfo()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Main_Functions_and_Methods.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Binomial Cumulative Main Function}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{number of trials - numeric}
\item{prob}{probability of each trial (more than 0 and less than 1)}
}
\value{
a data.frame with two classes: c("bincum", "data.frame")
}
\description{
computes a data frame with the probability distribution along with cumulative probability: sucesses in the first column, prob in the second
}
\examples{
bin_cumulative(trials = 5, prob = 0.5)
}
|
/Workout03Binomial/Workout03Binomial/man/bin_cumulative.Rd
|
no_license
|
stat133-sp19/hw-stat133-madisontagg
|
R
| false | true | 634 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Main_Functions_and_Methods.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{Binomial Cumulative Main Function}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{number of trials - numeric}
\item{prob}{probability of each trial (more than 0 and less than 1)}
}
\value{
a data.frame with two classes: c("bincum", "data.frame")
}
\description{
computes a data frame with the probability distribution along with cumulative probability: sucesses in the first column, prob in the second
}
\examples{
bin_cumulative(trials = 5, prob = 0.5)
}
|
# -*- coding: utf-8 -*-
#' Created on Fri Apr 13 15:38:28 2018
#' R version 3.4.3 (2017-11-30)
#'
#' @group Group 2, DM2 2018 Semester 2
#' @author: Martins T.
#' @author: Mendes R.
#' @author: Santos R.
#'
# Libs --------------------------------------------------------------------
options(warn=-1)
source("src/packages.r")
include_packs(c("dygraphs","d3heatmap","rockchalk","forcats","rJava",
"xlsxjars","xlsx","tidyverse","stringi","stringr","ggcorrplot",
"sm","lubridate","magrittr","ggplot2","openxlsx","RColorBrewer",
"psych","treemap","data.table","pROC","class",'gmodels','klaR',
"C50","caret",'gmodels',"DMwR","recipes","epiR","pubh"))
# Load data ---------------------------------------------------------------
#Load normalized data xlsx
source("src/wrangling.r")
numericDataset <- xlsx::read.xlsx('datasets/numericDataset.xlsx',1, header= TRUE)
temp<-numericDataset
numericDataset<-temp
numericDataset$isChurn <- ifelse(numericDataset$isChurn==1,"Yes","No")
#Save test_labels
numericDataset$isChurn <- as.factor(numericDataset$isChurn)
set.seed(375)
# ## now using SMOTE to create a more "balanced problem"
newData <- DMwR::SMOTE(isChurn ~ ., numericDataset, perc.over = 300,perc.under=0)
# count(newData$isChurn)
#
numericDataset<-rbind(numericDataset,newData)
# count(numericDataset$isChurn)
#glmStepAIC
#Setting up CV
ControlParamteres <- trainControl(method="repeatedcv",
number=10,
repeats=10,
#sampling = "up",
#verboseIter = FALSE,
# metric = "ROC",
savePredictions = TRUE,
classProbs = TRUE)
#regLogistic
log_CV <- train(isChurn~.,
data=numericDataset,
method = "glm",
preProc=c("center", "scale"),
trControl = ControlParamteres)
log_CV
pred <- predict(log_CV)
#predict prob, no response w/ factors
numericDataset$prob <-predict(log_CV, type = "prob")[2]
g <- roc(isChurn ~ prob$Yes, data =numericDataset )
dev.off(); plot(g, col = 4, lty = 1,
main = "ROC", asp = NA,
xlab = "Specificity (%)",
ylab = "Sensitivity (%)",
print.auc=TRUE, type = 'l', ps=1000)
legend(.35,.75,
legend=c("48 Variables"),
col=c("4"),
lwd=2, ncol = 1,bty = "n", cex=1.3)
#confusionMatrix(pred, obs, positive = NULL,...), not the inverse
cm <- caret::confusionMatrix(pred,numericDataset$isChurn, positive = 'Yes',
dnn=c("Pred","Actual"))
draw_confusion_matrix_cv(cm)
lmFuncs$fit<-function (x, y, first, last, ...){
tmp <- as.data.frame(x)
tmp$y <- y
glm(y ~ ., data = tmp,family=binomial)
}
#note that this lmFuncs is logistic regr by my overwrite function
rfe_controller <- rfeControl(functions=lmFuncs, method="repeatedcv",
rerank = FALSE,repeats = 10, verbose = FALSE)
subsets <- c(5:15)
predictorss<-normalizedDataset[,1:48]
predictorss<- predictorss[,sample(ncol(predictorss))]
log_Profiler <- rfe(predictorss,normalizedDataset[,49],
size=subsets, rfeControl = rfe_controller)
log_Profiler
plot(log_Profiler, type = c("g", "o"), col="blue")
ga_ctrl <- gafsControl(functions = rfGA,
method = "cv")
# , repeats = 1)
## Use the same random number seed as the RFE process
## so that the same CV folds are used for the external
## resampling.
set.seed(10)
rf_ga <- gafs(x = normalizedDataset[,1:48], y = normalizedDataset[,49],
iters = 15,
method = "glm",
gafsControl = ga_ctrl)
rf_ga
plot(rf_ga, type = c("g", "o"), col="blue")
pubh::contingency(isChurn~Age, data = normalizedDataset, method = "cohort.count")
|
/bin/CV.r
|
permissive
|
tmartins1996/r-binary-classification
|
R
| false | false | 3,983 |
r
|
# -*- coding: utf-8 -*-
#' Created on Fri Apr 13 15:38:28 2018
#' R version 3.4.3 (2017-11-30)
#'
#' @group Group 2, DM2 2018 Semester 2
#' @author: Martins T.
#' @author: Mendes R.
#' @author: Santos R.
#'
# Libs --------------------------------------------------------------------
options(warn=-1)
source("src/packages.r")
include_packs(c("dygraphs","d3heatmap","rockchalk","forcats","rJava",
"xlsxjars","xlsx","tidyverse","stringi","stringr","ggcorrplot",
"sm","lubridate","magrittr","ggplot2","openxlsx","RColorBrewer",
"psych","treemap","data.table","pROC","class",'gmodels','klaR',
"C50","caret",'gmodels',"DMwR","recipes","epiR","pubh"))
# Load data ---------------------------------------------------------------
#Load normalized data xlsx
source("src/wrangling.r")
numericDataset <- xlsx::read.xlsx('datasets/numericDataset.xlsx',1, header= TRUE)
temp<-numericDataset
numericDataset<-temp
numericDataset$isChurn <- ifelse(numericDataset$isChurn==1,"Yes","No")
#Save test_labels
numericDataset$isChurn <- as.factor(numericDataset$isChurn)
set.seed(375)
# ## now using SMOTE to create a more "balanced problem"
newData <- DMwR::SMOTE(isChurn ~ ., numericDataset, perc.over = 300,perc.under=0)
# count(newData$isChurn)
#
numericDataset<-rbind(numericDataset,newData)
# count(numericDataset$isChurn)
#glmStepAIC
#Setting up CV
ControlParamteres <- trainControl(method="repeatedcv",
number=10,
repeats=10,
#sampling = "up",
#verboseIter = FALSE,
# metric = "ROC",
savePredictions = TRUE,
classProbs = TRUE)
#regLogistic
log_CV <- train(isChurn~.,
data=numericDataset,
method = "glm",
preProc=c("center", "scale"),
trControl = ControlParamteres)
log_CV
pred <- predict(log_CV)
#predict prob, no response w/ factors
numericDataset$prob <-predict(log_CV, type = "prob")[2]
g <- roc(isChurn ~ prob$Yes, data =numericDataset )
dev.off(); plot(g, col = 4, lty = 1,
main = "ROC", asp = NA,
xlab = "Specificity (%)",
ylab = "Sensitivity (%)",
print.auc=TRUE, type = 'l', ps=1000)
legend(.35,.75,
legend=c("48 Variables"),
col=c("4"),
lwd=2, ncol = 1,bty = "n", cex=1.3)
#confusionMatrix(pred, obs, positive = NULL,...), not the inverse
cm <- caret::confusionMatrix(pred,numericDataset$isChurn, positive = 'Yes',
dnn=c("Pred","Actual"))
draw_confusion_matrix_cv(cm)
lmFuncs$fit<-function (x, y, first, last, ...){
tmp <- as.data.frame(x)
tmp$y <- y
glm(y ~ ., data = tmp,family=binomial)
}
#note that this lmFuncs is logistic regr by my overwrite function
rfe_controller <- rfeControl(functions=lmFuncs, method="repeatedcv",
rerank = FALSE,repeats = 10, verbose = FALSE)
subsets <- c(5:15)
predictorss<-normalizedDataset[,1:48]
predictorss<- predictorss[,sample(ncol(predictorss))]
log_Profiler <- rfe(predictorss,normalizedDataset[,49],
size=subsets, rfeControl = rfe_controller)
log_Profiler
plot(log_Profiler, type = c("g", "o"), col="blue")
ga_ctrl <- gafsControl(functions = rfGA,
method = "cv")
# , repeats = 1)
## Use the same random number seed as the RFE process
## so that the same CV folds are used for the external
## resampling.
set.seed(10)
rf_ga <- gafs(x = normalizedDataset[,1:48], y = normalizedDataset[,49],
iters = 15,
method = "glm",
gafsControl = ga_ctrl)
rf_ga
plot(rf_ga, type = c("g", "o"), col="blue")
pubh::contingency(isChurn~Age, data = normalizedDataset, method = "cohort.count")
|
\encoding{UTF-8}
\name{generate.data.miss}
\alias{generate.data.miss}
\title{
Generate the dataset with missing values
}
\description{
The function for the generation the dataset with missing values from the input dataset with all the values. It is mainly intended for the testing purposes.
The results is in the form of \dQuote{data.frame} which corresponds to the input data.frame or matrix, where missing values are inserted. The percent of missing values is supplied as the input parameters. The processed dataset can be used in the algorithms for missing value imputation \dQuote{input_miss} or for any other purposes.
}
\usage{
generate.data.miss(data,percent=5,filename=NULL)
}
\arguments{
\item{data}{a dataset, a matrix of feature values for several cases, the last column is for the class labels. Class labels could be numerical or character values. This data set has not missing values}
\item{percent}{a numerical value for the percent of the missing values to be inserted into the dataset.}
\item{filename}{a character name of the output file to save the dataset with missing values.}
}
\details{
This function's main job is to generate the dataset with missing values from the input dataset with all the values. See the \dQuote{Value} section to this page for more details.
Data can be provided in matrix form, where the rows correspond to cases with feature values and class label. The columns contain the values of individual features and the last column must contain class labels. The maximal number of class labels equals 10.
The class label features and all the nominal features must be defined as factors.
}
\value{
A returned data.frame corresponds to the input dataset with inserted missing values.
}
\references{
McShane LM, Radmacher MD, Freidlin B, Yu R, Li MC, Simon R. Methods for assessing reproducibility of clustering patterns observed in analyses of microarray data. Bioinformatics. 2002 Nov;18(11):1462-9.
}
\seealso{
\code{\link{input_miss}}, \code{\link{select.process}},
\code{\link{classifier.loop}}
}
\examples{
# example
data(leukemia72_2)
percent =5
f.name=NULL #file name to include
out=generate.data.miss(data=leukemia72_2,percent=percent,filename=f.name)
}
\keyword{feature selection}
\keyword{classification}
\keyword{missing values}
|
/fuzzedpackages/Biocomb/man/generate.data.miss.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false | false | 2,359 |
rd
|
\encoding{UTF-8}
\name{generate.data.miss}
\alias{generate.data.miss}
\title{
Generate the dataset with missing values
}
\description{
The function for the generation the dataset with missing values from the input dataset with all the values. It is mainly intended for the testing purposes.
The results is in the form of \dQuote{data.frame} which corresponds to the input data.frame or matrix, where missing values are inserted. The percent of missing values is supplied as the input parameters. The processed dataset can be used in the algorithms for missing value imputation \dQuote{input_miss} or for any other purposes.
}
\usage{
generate.data.miss(data,percent=5,filename=NULL)
}
\arguments{
\item{data}{a dataset, a matrix of feature values for several cases, the last column is for the class labels. Class labels could be numerical or character values. This data set has not missing values}
\item{percent}{a numerical value for the percent of the missing values to be inserted into the dataset.}
\item{filename}{a character name of the output file to save the dataset with missing values.}
}
\details{
This function's main job is to generate the dataset with missing values from the input dataset with all the values. See the \dQuote{Value} section to this page for more details.
Data can be provided in matrix form, where the rows correspond to cases with feature values and class label. The columns contain the values of individual features and the last column must contain class labels. The maximal number of class labels equals 10.
The class label features and all the nominal features must be defined as factors.
}
\value{
A returned data.frame corresponds to the input dataset with inserted missing values.
}
\references{
McShane LM, Radmacher MD, Freidlin B, Yu R, Li MC, Simon R. Methods for assessing reproducibility of clustering patterns observed in analyses of microarray data. Bioinformatics. 2002 Nov;18(11):1462-9.
}
\seealso{
\code{\link{input_miss}}, \code{\link{select.process}},
\code{\link{classifier.loop}}
}
\examples{
# example
data(leukemia72_2)
percent =5
f.name=NULL #file name to include
out=generate.data.miss(data=leukemia72_2,percent=percent,filename=f.name)
}
\keyword{feature selection}
\keyword{classification}
\keyword{missing values}
|
#NEW FINDLINE APP USING METHODS FROM FINEGENE
source("../global.R")
#source('adjust-pheno.R')
library(ggplot2)
dbInfo = read.table('../../dbInfo.txt')
#### Define server logic required to summarize and view the selected dataset
shinyServer(function(input, output, session) {
allData <- reactive({
cdata <- session$clientData
tmp <- strsplit(cdata$url_search,"&")[[1]]
tmp <- tmp[grep("line",tmp)]
if(length(tmp)>0) {
url_line=strsplit(tmp,"=")[[1]][2]
updateTextInput(session=session,inputId="line",value=url_line)
}
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
# lines <- unique(dbGetQuery(con,"SELECT idAccession FROM Accession"))
query <- paste("SELECT O.value, Ph.name, Pl.Accession_idAccession, T.name, E.name, F.name, Pl.idIndividualPlant",
" FROM Observation O",
" JOIN IndividualPlant Pl ON O.IndividualPlant_idIndividualPlant = Pl.idIndividualPlant",
" JOIN Phenotype Ph ON O.Phenotype_idPhenotype = Ph.idPhenotype",
" JOIN Experiment E ON Pl.Experiment_idExperiment = E.idExperiment",
" JOIN Facility F ON F.idFacility = Pl.Facility_idFacility",
" JOIN Treatment T ON O.Treatment_idTreatment = T.idTreatment",
sep="")
obstbl <- dbGetQuery(con,query)
names(obstbl) <- c("value","phenotype","line","treatment","experiment","facility","individualPlant")
if (dim(obstbl)[1]>0)
{
ret <- obstbl
ret <- ret[complete.cases(ret),]
ret$line <- ifelse(ret$line%in%c("COL70000","SALK_CS60000","CS70000"),"CS70000",ret$line)
} else {
ret <- NULL
}
cons<-dbListConnections(MySQL())
for(con in cons)
dbDisconnect(con)
# allData <- melt(ret, id = c('line','experiment','treatment','facility','phenotype','individualPlant'))
# allData <- cast(allData, line+experiment+treatment+facility+individualPlant ~ phenotype)
# lineNames <- unique(allData$line)
# allData
ret
})
allLineNames <- reactive({
ln <- unique(allData()$line)
if (length(ln)>0) {ln} else {""}
})
focalLines <- reactive({
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
query = paste("SELECT idAccession FROM Accession")
### WHERE Accession_idAccession='",input$line,"' or Accession_idAccession='",input$line2,"'",sep="")
acc <- unique(unlist(c(dbGetQuery(con,query))))
lines=c(input$line,input$line2)[which(c(input$line,input$line2) %in% acc)]
print("in focalLines")
print(query)
print(length(acc))
print(str(acc))
print(c(input$line,input$line2))
print(lines)
print(acc[acc=="SALK_024526C"])
print("that was the 'lines' variable")
cons<-dbListConnections(MySQL())
for(con in cons)
dbDisconnect(con)
if (length(lines)>0) {lines} else {NA}
})
poss <- function() #checks for phenotypes associated with line(s) and only gives options to examine those (used in renderUI)
{
df <- allData()
phenos <- sort(unique(c(df$phenotype[df$line %in% focalLines()])))
if (length(phenos)>0) {phenos} else {""}
}
# Renders the User interface for selecting which phenotype to use
output$phenos = renderUI({
selectizeInput("phenos", "Choose phenotype(s):",
choices = c(poss()),selected = poss()[poss()=="fruitnum"],multiple=TRUE)
})
# This gives the warning message if the line is not in the DB, or links to it if it is
output$msg1 <- renderUI({
df = allData()
url.root <- "http://arabidopsis.org/servlets/Search?type=germplasm&search_action=search&pageNum=1&search=Submit+Query&germplasm_type=individual_line&taxon=1&name_type_1=gene_name&method_1=2&name_1=&name_type_2=germplasm_phenotype&method_2=1&name_2=&name_type_3=germplasm_stock_name&method_3=4&name_3="
if(input$line %in% df$line)
HTML(paste0("Go to TAIR for First Line: <a href='",url.root,input$line,"' target='_blank'>",input$line,"</a>"))
else
paste('Line ', input$line, " is not found in the database. Please try a different line.", sep='')
})
output$msg2 <- renderUI({
if (input$line2 == '') {
' '
}
else{
df = allData()
url.root <- "http://arabidopsis.org/servlets/Search?type=germplasm&search_action=search&pageNum=1&search=Submit+Query&germplasm_type=individual_line&taxon=1&name_type_1=gene_name&method_1=2&name_1=&name_type_2=germplasm_phenotype&method_2=1&name_2=&name_type_3=germplasm_stock_name&method_3=4&name_3="
if(input$line2 %in% df$line)
HTML(paste0("Go to TAIR for Second Line: <a href='",url.root,input$line2,"' target='_blank'>",input$line2,"</a>"))
else
paste('Line ', input$line2, " is not found in the database. Please try a different line.", sep='')
}
})
### This function, given a dataframe, builds the histograms, breaking them up by experiment and treatment pairs.
buildFinalData = function() {
df <- allData()
print(names(df))
df <- df[df$phenotype %in% input$phenos,]
df <- df[!is.na(df$value),] #don't mess with NAs
names(df)[which(names(df)=="phenotype")]="variable"
df$meta.experiment=df$experiment
if (input$correct == "phyt")
{
df = adjustPhenotypes::phytcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
print("made it past phytcorrect")
}
if (input$correct == "all")
df = adjustPhenotypes::allcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
if (input$correct == "col")
df = adjustPhenotypes::colcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
if (input$scale==TRUE)
df = adjustPhenotypes::scalePhenos(df, classifier=c("experiment","facility"), lineid='line')
names(df)[which(names(df)=="variable")]="phenotype"
if (input$linemeans == 'yes') { #get means per line instead of actual observations
if (input$collapse=="yes")
{
df <- df%>%group_by(line,experiment,phenotype)%>%summarise(value=mean(value,na.rm=T))
} else {
df <- df%>%group_by(line,experiment,facility,phenotype)%>%summarise(value=mean(value,na.rm=T))
}
}
df
}
# This renders the histograms
output$hist = renderPlot({
df <- buildFinalData()
linedf = df[df$line %in% focalLines(),]
lineSub <- unique(linedf[,c("experiment","phenotype")])
print(focalLines())
df <- merge(df,lineSub,all.x=F)
print(dim(df))
print(names(df))
if (input$collapse=="yes")
{
ggplot(data = df, aes(value)) +
geom_histogram() +
scale_colour_brewer(type="qual", palette=8) +
geom_vline(data = linedf, aes(xintercept = value, color = line), linetype = 'solid', show_guide = T) +
facet_wrap(~ phenotype , scales = 'free')
}
else
{
ggplot(data = df, aes(value)) +
geom_histogram() +
scale_colour_brewer(type="qual", palette=8) +
geom_vline(data = linedf, aes(xintercept = value, color = line), linetype = 'solid', show_guide = T) +
facet_wrap(~ phenotype + experiment , scales = 'free')
}
})
# This renders the interactive data table of the selected values
output$overview = renderDataTable({
df = buildFinalData()
inputLines = focalLines()
df <- df[df$line%in%inputLines,]
df <- df[!is.na(df$value),]
df
} , options = list(lengthMenu = c(50, 100, 500), pageLength = 50))
# This renders the data download link
output$downloadData <- downloadHandler(
filename = function() {
paste("linedata",Sys.Date(),".csv",sep="")
},
content = function(file) {
df = buildFinalData()
inputLines = focalLines()
df <- df[df$line%in%inputLines,]
df <- df[!is.na(df$value),]
write.csv(file=file,df)
}
)
# This renders the PDF download link -- needs some work.
output$downloadPDF <- downloadHandler(
filename = function() {
paste("linePDF",Sys.Date(),".png",sep="")
},
content = function(file) {
df = allData
hist = buildHist(df)
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 600, units = "in")
ggsave(file, plot = hist, device = device)
}
)
})
|
/findline/server.R
|
no_license
|
stranda/unpak-shiny
|
R
| false | false | 8,545 |
r
|
#NEW FINDLINE APP USING METHODS FROM FINEGENE
source("../global.R")
#source('adjust-pheno.R')
library(ggplot2)
dbInfo = read.table('../../dbInfo.txt')
#### Define server logic required to summarize and view the selected dataset
shinyServer(function(input, output, session) {
allData <- reactive({
cdata <- session$clientData
tmp <- strsplit(cdata$url_search,"&")[[1]]
tmp <- tmp[grep("line",tmp)]
if(length(tmp)>0) {
url_line=strsplit(tmp,"=")[[1]][2]
updateTextInput(session=session,inputId="line",value=url_line)
}
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
# lines <- unique(dbGetQuery(con,"SELECT idAccession FROM Accession"))
query <- paste("SELECT O.value, Ph.name, Pl.Accession_idAccession, T.name, E.name, F.name, Pl.idIndividualPlant",
" FROM Observation O",
" JOIN IndividualPlant Pl ON O.IndividualPlant_idIndividualPlant = Pl.idIndividualPlant",
" JOIN Phenotype Ph ON O.Phenotype_idPhenotype = Ph.idPhenotype",
" JOIN Experiment E ON Pl.Experiment_idExperiment = E.idExperiment",
" JOIN Facility F ON F.idFacility = Pl.Facility_idFacility",
" JOIN Treatment T ON O.Treatment_idTreatment = T.idTreatment",
sep="")
obstbl <- dbGetQuery(con,query)
names(obstbl) <- c("value","phenotype","line","treatment","experiment","facility","individualPlant")
if (dim(obstbl)[1]>0)
{
ret <- obstbl
ret <- ret[complete.cases(ret),]
ret$line <- ifelse(ret$line%in%c("COL70000","SALK_CS60000","CS70000"),"CS70000",ret$line)
} else {
ret <- NULL
}
cons<-dbListConnections(MySQL())
for(con in cons)
dbDisconnect(con)
# allData <- melt(ret, id = c('line','experiment','treatment','facility','phenotype','individualPlant'))
# allData <- cast(allData, line+experiment+treatment+facility+individualPlant ~ phenotype)
# lineNames <- unique(allData$line)
# allData
ret
})
allLineNames <- reactive({
ln <- unique(allData()$line)
if (length(ln)>0) {ln} else {""}
})
focalLines <- reactive({
con = dbConnect(MySQL(),dbname=toString(dbInfo[[1]]),user=toString(dbInfo[[2]]),password=toString(dbInfo[[3]]))
query = paste("SELECT idAccession FROM Accession")
### WHERE Accession_idAccession='",input$line,"' or Accession_idAccession='",input$line2,"'",sep="")
acc <- unique(unlist(c(dbGetQuery(con,query))))
lines=c(input$line,input$line2)[which(c(input$line,input$line2) %in% acc)]
print("in focalLines")
print(query)
print(length(acc))
print(str(acc))
print(c(input$line,input$line2))
print(lines)
print(acc[acc=="SALK_024526C"])
print("that was the 'lines' variable")
cons<-dbListConnections(MySQL())
for(con in cons)
dbDisconnect(con)
if (length(lines)>0) {lines} else {NA}
})
poss <- function() #checks for phenotypes associated with line(s) and only gives options to examine those (used in renderUI)
{
df <- allData()
phenos <- sort(unique(c(df$phenotype[df$line %in% focalLines()])))
if (length(phenos)>0) {phenos} else {""}
}
# Renders the User interface for selecting which phenotype to use
output$phenos = renderUI({
selectizeInput("phenos", "Choose phenotype(s):",
choices = c(poss()),selected = poss()[poss()=="fruitnum"],multiple=TRUE)
})
# This gives the warning message if the line is not in the DB, or links to it if it is
output$msg1 <- renderUI({
df = allData()
url.root <- "http://arabidopsis.org/servlets/Search?type=germplasm&search_action=search&pageNum=1&search=Submit+Query&germplasm_type=individual_line&taxon=1&name_type_1=gene_name&method_1=2&name_1=&name_type_2=germplasm_phenotype&method_2=1&name_2=&name_type_3=germplasm_stock_name&method_3=4&name_3="
if(input$line %in% df$line)
HTML(paste0("Go to TAIR for First Line: <a href='",url.root,input$line,"' target='_blank'>",input$line,"</a>"))
else
paste('Line ', input$line, " is not found in the database. Please try a different line.", sep='')
})
output$msg2 <- renderUI({
if (input$line2 == '') {
' '
}
else{
df = allData()
url.root <- "http://arabidopsis.org/servlets/Search?type=germplasm&search_action=search&pageNum=1&search=Submit+Query&germplasm_type=individual_line&taxon=1&name_type_1=gene_name&method_1=2&name_1=&name_type_2=germplasm_phenotype&method_2=1&name_2=&name_type_3=germplasm_stock_name&method_3=4&name_3="
if(input$line2 %in% df$line)
HTML(paste0("Go to TAIR for Second Line: <a href='",url.root,input$line2,"' target='_blank'>",input$line2,"</a>"))
else
paste('Line ', input$line2, " is not found in the database. Please try a different line.", sep='')
}
})
### This function, given a dataframe, builds the histograms, breaking them up by experiment and treatment pairs.
buildFinalData = function() {
df <- allData()
print(names(df))
df <- df[df$phenotype %in% input$phenos,]
df <- df[!is.na(df$value),] #don't mess with NAs
names(df)[which(names(df)=="phenotype")]="variable"
df$meta.experiment=df$experiment
if (input$correct == "phyt")
{
df = adjustPhenotypes::phytcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
print("made it past phytcorrect")
}
if (input$correct == "all")
df = adjustPhenotypes::allcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
if (input$correct == "col")
df = adjustPhenotypes::colcorrect(df, pheno=input$phenos, c("experiment","facility"), lineid='line')
if (input$scale==TRUE)
df = adjustPhenotypes::scalePhenos(df, classifier=c("experiment","facility"), lineid='line')
names(df)[which(names(df)=="variable")]="phenotype"
if (input$linemeans == 'yes') { #get means per line instead of actual observations
if (input$collapse=="yes")
{
df <- df%>%group_by(line,experiment,phenotype)%>%summarise(value=mean(value,na.rm=T))
} else {
df <- df%>%group_by(line,experiment,facility,phenotype)%>%summarise(value=mean(value,na.rm=T))
}
}
df
}
# This renders the histograms
output$hist = renderPlot({
df <- buildFinalData()
linedf = df[df$line %in% focalLines(),]
lineSub <- unique(linedf[,c("experiment","phenotype")])
print(focalLines())
df <- merge(df,lineSub,all.x=F)
print(dim(df))
print(names(df))
if (input$collapse=="yes")
{
ggplot(data = df, aes(value)) +
geom_histogram() +
scale_colour_brewer(type="qual", palette=8) +
geom_vline(data = linedf, aes(xintercept = value, color = line), linetype = 'solid', show_guide = T) +
facet_wrap(~ phenotype , scales = 'free')
}
else
{
ggplot(data = df, aes(value)) +
geom_histogram() +
scale_colour_brewer(type="qual", palette=8) +
geom_vline(data = linedf, aes(xintercept = value, color = line), linetype = 'solid', show_guide = T) +
facet_wrap(~ phenotype + experiment , scales = 'free')
}
})
# This renders the interactive data table of the selected values
output$overview = renderDataTable({
df = buildFinalData()
inputLines = focalLines()
df <- df[df$line%in%inputLines,]
df <- df[!is.na(df$value),]
df
} , options = list(lengthMenu = c(50, 100, 500), pageLength = 50))
# This renders the data download link
output$downloadData <- downloadHandler(
filename = function() {
paste("linedata",Sys.Date(),".csv",sep="")
},
content = function(file) {
df = buildFinalData()
inputLines = focalLines()
df <- df[df$line%in%inputLines,]
df <- df[!is.na(df$value),]
write.csv(file=file,df)
}
)
# This renders the PDF download link -- needs some work.
output$downloadPDF <- downloadHandler(
filename = function() {
paste("linePDF",Sys.Date(),".png",sep="")
},
content = function(file) {
df = allData
hist = buildHist(df)
device <- function(..., width, height) grDevices::png(..., width = width, height = height, res = 600, units = "in")
ggsave(file, plot = hist, device = device)
}
)
})
|
#' @export
#' @rdname sql_build
sql_optimise <- function(x, con = NULL, ...) {
UseMethod("sql_optimise")
}
#' @export
sql_optimise.sql <- function(x, con = NULL, ...) {
# Can't optimise raw SQL
x
}
#' @export
sql_optimise.query <- function(x, con = NULL, ...) {
# Default to no optimisation
x
}
#' @export
sql_optimise.select_query <- function(x, con = NULL, ...) {
if (!inherits(x$from, "select_query")) {
return(x)
}
from <- sql_optimise(x$from)
# If all outer clauses are exeucted after the inner clauses, we
# can drop them down a level
outer <- select_query_clauses(x)
inner <- select_query_clauses(from)
if (min(outer) > max(inner)) {
from[as.character(outer)] <- x[as.character(outer)]
from
} else {
x
}
}
# Helpers for testing -----------------------------------------------------
#' @export
sql_optimise.tbl_sql <- function(x, con = NULL, ...) {
if (is.null(con)) {
con <- con_acquire(x$src)
on.exit(con_release(x$src, con), add = TRUE)
}
sql_optimise(sql_build(x$ops, con, ...), con = con, ...)
}
#' @export
sql_optimise.tbl_lazy <- function(x, con = NULL, ...) {
sql_optimise(sql_build(x$ops, con = NULL, ...), con = NULL, ...)
}
|
/R/sql-optimise.R
|
permissive
|
davharris/dbplyr
|
R
| false | false | 1,213 |
r
|
#' @export
#' @rdname sql_build
sql_optimise <- function(x, con = NULL, ...) {
UseMethod("sql_optimise")
}
#' @export
sql_optimise.sql <- function(x, con = NULL, ...) {
# Can't optimise raw SQL
x
}
#' @export
sql_optimise.query <- function(x, con = NULL, ...) {
# Default to no optimisation
x
}
#' @export
sql_optimise.select_query <- function(x, con = NULL, ...) {
if (!inherits(x$from, "select_query")) {
return(x)
}
from <- sql_optimise(x$from)
# If all outer clauses are exeucted after the inner clauses, we
# can drop them down a level
outer <- select_query_clauses(x)
inner <- select_query_clauses(from)
if (min(outer) > max(inner)) {
from[as.character(outer)] <- x[as.character(outer)]
from
} else {
x
}
}
# Helpers for testing -----------------------------------------------------
#' @export
sql_optimise.tbl_sql <- function(x, con = NULL, ...) {
if (is.null(con)) {
con <- con_acquire(x$src)
on.exit(con_release(x$src, con), add = TRUE)
}
sql_optimise(sql_build(x$ops, con, ...), con = con, ...)
}
#' @export
sql_optimise.tbl_lazy <- function(x, con = NULL, ...) {
sql_optimise(sql_build(x$ops, con = NULL, ...), con = NULL, ...)
}
|
helio.plot2 <- function (c, cv = 1, xvlab = c$xlab, yvlab = c$ylab, x.name = "X Variables",
y.name = "Y Variables", lab.cex = 1, wid.fact = 0.75, main = "Helio Plot",
sub = paste("Canonical Variate", cv, sep = ""), zero.rad = 30,
range.rad = 20, name.padding = 5, name.cex = 1.5, axis.circ = c(-1,
1), x.group = rep(0, dim(c$xstructcorr)[1]), y.group = rep(0,
dim(c$ystructcorr)[1]), type = "correlation")
{
plot.new()
plot.window(c(-100, 100), c(-100, 100))
if (type == "correlation") {
xdat <- c$xstructcorr
ydat <- c$ystructcorr
}
else if (type == "variance") {
xdat <- c$xstructcorrsq
ydat <- c$ystructcorrsq
}
else if (type == "loadings") {
xdat <- c$xcoef
ydat <- c$ycoef
}
else stop(paste("Plot type ", type, " not supported.\n",
sep = ""))
ir <- zero.rad - range.rad
mr <- zero.rad
or <- zero.rad + range.rad
nr <- zero.rad + range.rad + name.padding
lines(c(0, 0), c(-90, 90))
lines(mr * sin(2 * pi * ((0:100)/100)), mr * cos(2 * pi *
((0:100)/100)), lty = 1)
if (!is.null(axis.circ))
for (i in 1:length(axis.circ)) lines((mr + range.rad *
axis.circ[i]) * sin(2 * pi * ((0:100)/100)), (mr +
range.rad * axis.circ[i]) * cos(2 * pi * ((0:100)/100)),
lty = 3)
text(-50, 95, label = x.name, cex = name.cex)
text(50, 95, label = y.name, cex = name.cex)
nx <- dim(xdat)[1]
ny <- dim(ydat)[1]
for (i in 1:nx) {
if (xdat[i, cv] > 0)
bcol <- 1
else bcol <- NA
bang <- (-pi/(nx + 1)) * i
binc <- pi/(max(nx, ny) + 1) * wid.fact/2
bwinc <- ir * sin(binc)
bx <- vector()
bx[1] <- mr * sin(bang) - bwinc * cos(-bang)
bx[2] <- (mr + range.rad * xdat[i, cv]) * sin(bang) -
bwinc * cos(-bang)
bx[3] <- (mr + range.rad * xdat[i, cv]) * sin(bang) +
bwinc * cos(-bang)
bx[4] <- mr * sin(bang) + bwinc * cos(-bang)
by <- vector()
by[1] <- mr * cos(bang) - bwinc * sin(-bang)
by[2] <- (mr + range.rad * xdat[i, cv]) * cos(bang) -
bwinc * sin(-bang)
by[3] <- (mr + range.rad * xdat[i, cv]) * cos(bang) +
bwinc * sin(-bang)
by[4] <- mr * cos(bang) + bwinc * sin(-bang)
polygon(bx, by, col = bcol, lty = 1)
text(nr * sin(bang), nr * cos(bang), label = xvlab[i],
srt = (3 * pi/2 - bang) * (360/(2 * pi)), pos = 2,
cex = lab.cex)
}
for (i in 1:ny) {
if (ydat[i, cv] > 0)
bcol <- 1
else bcol <- NA
bang <- (pi/(ny + 1)) * i
binc <- pi/(max(nx, ny) + 1) * wid.fact/2
bwinc <- ir * sin(binc)
bx <- vector()
bx[1] <- mr * sin(bang) - bwinc * cos(-bang)
bx[2] <- (mr + range.rad * ydat[i, cv]) * sin(bang) -
bwinc * cos(-bang)
bx[3] <- (mr + range.rad * ydat[i, cv]) * sin(bang) +
bwinc * cos(-bang)
bx[4] <- mr * sin(bang) + bwinc * cos(-bang)
by <- vector()
by[1] <- mr * cos(bang) - bwinc * sin(-bang)
by[2] <- (mr + range.rad * ydat[i, cv]) * cos(bang) -
bwinc * sin(-bang)
by[3] <- (mr + range.rad * ydat[i, cv]) * cos(bang) +
bwinc * sin(-bang)
by[4] <- mr * cos(bang) + bwinc * sin(-bang)
polygon(bx, by, col = bcol, lty = 1)
text(nr * sin(bang), nr * cos(bang), label = yvlab[i],
srt = (pi/2 - bang) * (360/(2 * pi)), pos = 4, cex = lab.cex)
}
if ((!is.null(x.group)) & (max(x.group) > 0)) {
for (i in unique(x.group)) if (i > 0) {
gvect <- (x.group %in% i) * (1:length(x.group))
gvect <- gvect[gvect > 0]
minang <- min(gvect) * (-pi/(nx + 1))
maxang <- max(gvect) * (-pi/(nx + 1))
lines(((or + nr)/2) * sin((((0:100)/100) * (maxang -
minang) + minang)), ((or + nr)/2) * cos((((0:100)/100) *
(maxang - minang) + minang)), lty = 1)
lines(c(((or + nr)/2) * sin(minang), nr * sin(minang)),
c(((or + nr)/2) * cos(minang), nr * cos(minang)),
lty = 1)
lines(c(((or + nr)/2) * sin(maxang), nr * sin(maxang)),
c(((or + nr)/2) * cos(maxang), nr * cos(maxang)),
lty = 1)
}
}
if ((!is.null(y.group)) & (max(y.group) > 0)) {
for (i in unique(y.group)) if (i > 0) {
gvect <- (y.group %in% i) * (1:length(y.group))
gvect <- gvect[gvect > 0]
minang <- min(gvect) * (pi/(ny + 1))
maxang <- max(gvect) * (pi/(ny + 1))
lines(((or + nr)/2) * sin((((0:100)/100) * (maxang -
minang) + minang)), ((or + nr)/2) * cos((((0:100)/100) *
(maxang - minang) + minang)), lty = 1)
lines(c(((or + nr)/2) * sin(minang), nr * sin(minang)),
c(((or + nr)/2) * cos(minang), nr * cos(minang)),
lty = 1)
lines(c(((or + nr)/2) * sin(maxang), nr * sin(maxang)),
c(((or + nr)/2) * cos(maxang), nr * cos(maxang)),
lty = 1)
}
}
title(main = main, sub = sub)
}
|
/helioplot2.r
|
no_license
|
gabrakadabra/Rtoolbox
|
R
| false | false | 5,310 |
r
|
helio.plot2 <- function (c, cv = 1, xvlab = c$xlab, yvlab = c$ylab, x.name = "X Variables",
y.name = "Y Variables", lab.cex = 1, wid.fact = 0.75, main = "Helio Plot",
sub = paste("Canonical Variate", cv, sep = ""), zero.rad = 30,
range.rad = 20, name.padding = 5, name.cex = 1.5, axis.circ = c(-1,
1), x.group = rep(0, dim(c$xstructcorr)[1]), y.group = rep(0,
dim(c$ystructcorr)[1]), type = "correlation")
{
plot.new()
plot.window(c(-100, 100), c(-100, 100))
if (type == "correlation") {
xdat <- c$xstructcorr
ydat <- c$ystructcorr
}
else if (type == "variance") {
xdat <- c$xstructcorrsq
ydat <- c$ystructcorrsq
}
else if (type == "loadings") {
xdat <- c$xcoef
ydat <- c$ycoef
}
else stop(paste("Plot type ", type, " not supported.\n",
sep = ""))
ir <- zero.rad - range.rad
mr <- zero.rad
or <- zero.rad + range.rad
nr <- zero.rad + range.rad + name.padding
lines(c(0, 0), c(-90, 90))
lines(mr * sin(2 * pi * ((0:100)/100)), mr * cos(2 * pi *
((0:100)/100)), lty = 1)
if (!is.null(axis.circ))
for (i in 1:length(axis.circ)) lines((mr + range.rad *
axis.circ[i]) * sin(2 * pi * ((0:100)/100)), (mr +
range.rad * axis.circ[i]) * cos(2 * pi * ((0:100)/100)),
lty = 3)
text(-50, 95, label = x.name, cex = name.cex)
text(50, 95, label = y.name, cex = name.cex)
nx <- dim(xdat)[1]
ny <- dim(ydat)[1]
for (i in 1:nx) {
if (xdat[i, cv] > 0)
bcol <- 1
else bcol <- NA
bang <- (-pi/(nx + 1)) * i
binc <- pi/(max(nx, ny) + 1) * wid.fact/2
bwinc <- ir * sin(binc)
bx <- vector()
bx[1] <- mr * sin(bang) - bwinc * cos(-bang)
bx[2] <- (mr + range.rad * xdat[i, cv]) * sin(bang) -
bwinc * cos(-bang)
bx[3] <- (mr + range.rad * xdat[i, cv]) * sin(bang) +
bwinc * cos(-bang)
bx[4] <- mr * sin(bang) + bwinc * cos(-bang)
by <- vector()
by[1] <- mr * cos(bang) - bwinc * sin(-bang)
by[2] <- (mr + range.rad * xdat[i, cv]) * cos(bang) -
bwinc * sin(-bang)
by[3] <- (mr + range.rad * xdat[i, cv]) * cos(bang) +
bwinc * sin(-bang)
by[4] <- mr * cos(bang) + bwinc * sin(-bang)
polygon(bx, by, col = bcol, lty = 1)
text(nr * sin(bang), nr * cos(bang), label = xvlab[i],
srt = (3 * pi/2 - bang) * (360/(2 * pi)), pos = 2,
cex = lab.cex)
}
for (i in 1:ny) {
if (ydat[i, cv] > 0)
bcol <- 1
else bcol <- NA
bang <- (pi/(ny + 1)) * i
binc <- pi/(max(nx, ny) + 1) * wid.fact/2
bwinc <- ir * sin(binc)
bx <- vector()
bx[1] <- mr * sin(bang) - bwinc * cos(-bang)
bx[2] <- (mr + range.rad * ydat[i, cv]) * sin(bang) -
bwinc * cos(-bang)
bx[3] <- (mr + range.rad * ydat[i, cv]) * sin(bang) +
bwinc * cos(-bang)
bx[4] <- mr * sin(bang) + bwinc * cos(-bang)
by <- vector()
by[1] <- mr * cos(bang) - bwinc * sin(-bang)
by[2] <- (mr + range.rad * ydat[i, cv]) * cos(bang) -
bwinc * sin(-bang)
by[3] <- (mr + range.rad * ydat[i, cv]) * cos(bang) +
bwinc * sin(-bang)
by[4] <- mr * cos(bang) + bwinc * sin(-bang)
polygon(bx, by, col = bcol, lty = 1)
text(nr * sin(bang), nr * cos(bang), label = yvlab[i],
srt = (pi/2 - bang) * (360/(2 * pi)), pos = 4, cex = lab.cex)
}
if ((!is.null(x.group)) & (max(x.group) > 0)) {
for (i in unique(x.group)) if (i > 0) {
gvect <- (x.group %in% i) * (1:length(x.group))
gvect <- gvect[gvect > 0]
minang <- min(gvect) * (-pi/(nx + 1))
maxang <- max(gvect) * (-pi/(nx + 1))
lines(((or + nr)/2) * sin((((0:100)/100) * (maxang -
minang) + minang)), ((or + nr)/2) * cos((((0:100)/100) *
(maxang - minang) + minang)), lty = 1)
lines(c(((or + nr)/2) * sin(minang), nr * sin(minang)),
c(((or + nr)/2) * cos(minang), nr * cos(minang)),
lty = 1)
lines(c(((or + nr)/2) * sin(maxang), nr * sin(maxang)),
c(((or + nr)/2) * cos(maxang), nr * cos(maxang)),
lty = 1)
}
}
if ((!is.null(y.group)) & (max(y.group) > 0)) {
for (i in unique(y.group)) if (i > 0) {
gvect <- (y.group %in% i) * (1:length(y.group))
gvect <- gvect[gvect > 0]
minang <- min(gvect) * (pi/(ny + 1))
maxang <- max(gvect) * (pi/(ny + 1))
lines(((or + nr)/2) * sin((((0:100)/100) * (maxang -
minang) + minang)), ((or + nr)/2) * cos((((0:100)/100) *
(maxang - minang) + minang)), lty = 1)
lines(c(((or + nr)/2) * sin(minang), nr * sin(minang)),
c(((or + nr)/2) * cos(minang), nr * cos(minang)),
lty = 1)
lines(c(((or + nr)/2) * sin(maxang), nr * sin(maxang)),
c(((or + nr)/2) * cos(maxang), nr * cos(maxang)),
lty = 1)
}
}
title(main = main, sub = sub)
}
|
context("Test API response columns are renamed correctly")
unnamed_data <- read.table(system.file("extdata", "example_api_data_unnamed.txt", package = "scrobbler"), header = TRUE)
test_renamed_data <- rename_api_response(unnamed_data)
verified_renamed_data <- read.table(system.file("extdata", "example_api_data_renamed.txt", package = "scrobbler"), header = TRUE)
test_that("Rename API table is successful", {
expect_equal(test_renamed_data, verified_renamed_data)
})
|
/tests/testthat/test_df_col_renames.R
|
no_license
|
condwanaland/scrobbler
|
R
| false | false | 475 |
r
|
context("Test API response columns are renamed correctly")
unnamed_data <- read.table(system.file("extdata", "example_api_data_unnamed.txt", package = "scrobbler"), header = TRUE)
test_renamed_data <- rename_api_response(unnamed_data)
verified_renamed_data <- read.table(system.file("extdata", "example_api_data_renamed.txt", package = "scrobbler"), header = TRUE)
test_that("Rename API table is successful", {
expect_equal(test_renamed_data, verified_renamed_data)
})
|
cleanData<-function(){
traindata <- read.table("./train/X_train.txt")
traindata_labels <- read.table("./train/y_train.txt")
subject_train <- read.table("./train/subject_train.txt") # Loading the Training data and also subject and activity labels
testdata <- read.table("./test/X_test.txt")
testdata_labels <- read.table("./test/y_test.txt")
subject_test <- read.table("./test/subject_test.txt") # Loading the Testing data and also subject and activity labels
myData <- rbind(traindata,testdata) #merging training and test measurements
myLabels <- rbind(traindata_labels,testdata_labels) # merging training labels and testing labels
mySubject <- rbind(subject_train,subject_test) # merging training subject id and testing subject id
variables <- read.table("./features.txt") #reading the feature names
variable_names <- variables$V2
colnames(myData) <- variable_names
cut_mean_std <- grep("[Mm][Ee][Aa][Nn]|[Ss][Tt][Dd]"
,colnames(myData)) # Extracting only the mean and std measurements
myData_meanstd <- myData[,cut_mean_std]
names <- colnames(myData_meanstd)
names = gsub("tBodyAcc","linearaccbody(Time)",names)
names = gsub("tGravityAcc","gravityaccbody(Time)",names)
names = gsub("Mag","magnitude",names)
names = gsub("tBodyGyro","angvelocitybody(Time)",names) # Renaming variable names for better understanding
names = gsub("fBodyAcc","linearaccbody(Frequency)",names)
names = gsub("fBodyGyro","angvelocitybody(Frequency)",names)
names = gsub("fBodyBodyGyro","BodyBodyangvelocity(Frequency)",names)
names = gsub("fBodyBodyAcc","BodyBodylinearacc(Frequency)",names)
colnames(myData_meanstd)= names
cleaned_data <- myData_meanstd
cleaned_data$"Activity Label" <- myLabels
cleaned_data$"Subject No." <- mySubject
#cleaned_data = cleaned_data[,1:88]
return (cleaned_data)
}
createDataset<-function(dataset){
col_names = colnames(dataset)
data_split <- split(dataset,dataset$"Subject No.") #Split Dataset according to Subject
func <- function(data=data_split){
m <- split(data,data$"Activity Label")
l <- sapply(m,colMeans) #function for taking mean of each activity
return (l)}
gdata <- sapply(data_split,func) #calculating mean for each activity for each subject
for (i in 1:30){
n <- rbind(gdata[1:88,i],gdata[89:176,i],gdata[177:264,i],gdata[265:352,i],gdata[353:440,i],gdata[441:528,i])
#creating variable for storing data for each subject
assign(paste0("n",i),n)
}
new_dataset <- rbind(n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13,n14,n15,n16,n17,n18,n19,n20,n21,n22,n23,n24,n25,n26,n27,n28,n29,n30)
new_dataset <- data.frame(new_dataset) #merging data of all sujects to create one dataset
colnames(new_dataset) <- col_names
activity_lables <- c("SUBJECT IS WALKING ON LEVELLED SURFACE",
"SUBJECT IS WALKING UPHILL OR UPSTAIRS",
"SUBJECT IS WALKING DOWNHILL OR DOWNSTAIRS", #Descriptive activity labels
"SUBJECT IS SITTING",
"SUBJECT IS JUST STANDING",
"SUBJECT IS LAYING OR RESTING")
new_dataset$"Activity Label Description" <- new_dataset$"Activity Label"
for (i in 1:6){
d_labels <- gsub(i,activity_lables[i],new_dataset$"Activity Label Description")
new_dataset$"Activity Label Description" <- d_labels
}
#new_dataset$"Activity Label" <- descriptive_labels
write.csv(new_dataset,"./tidy_dataset.csv")
write.table(new_dataset,"./tidy_dataset.txt",row.names = FALSE) #writing data to text file
}
|
/runAnalysis.R
|
no_license
|
ShantamGautam/Getting-and-Cleaning-Data-Course-Project
|
R
| false | false | 3,812 |
r
|
cleanData<-function(){
traindata <- read.table("./train/X_train.txt")
traindata_labels <- read.table("./train/y_train.txt")
subject_train <- read.table("./train/subject_train.txt") # Loading the Training data and also subject and activity labels
testdata <- read.table("./test/X_test.txt")
testdata_labels <- read.table("./test/y_test.txt")
subject_test <- read.table("./test/subject_test.txt") # Loading the Testing data and also subject and activity labels
myData <- rbind(traindata,testdata) #merging training and test measurements
myLabels <- rbind(traindata_labels,testdata_labels) # merging training labels and testing labels
mySubject <- rbind(subject_train,subject_test) # merging training subject id and testing subject id
variables <- read.table("./features.txt") #reading the feature names
variable_names <- variables$V2
colnames(myData) <- variable_names
cut_mean_std <- grep("[Mm][Ee][Aa][Nn]|[Ss][Tt][Dd]"
,colnames(myData)) # Extracting only the mean and std measurements
myData_meanstd <- myData[,cut_mean_std]
names <- colnames(myData_meanstd)
names = gsub("tBodyAcc","linearaccbody(Time)",names)
names = gsub("tGravityAcc","gravityaccbody(Time)",names)
names = gsub("Mag","magnitude",names)
names = gsub("tBodyGyro","angvelocitybody(Time)",names) # Renaming variable names for better understanding
names = gsub("fBodyAcc","linearaccbody(Frequency)",names)
names = gsub("fBodyGyro","angvelocitybody(Frequency)",names)
names = gsub("fBodyBodyGyro","BodyBodyangvelocity(Frequency)",names)
names = gsub("fBodyBodyAcc","BodyBodylinearacc(Frequency)",names)
colnames(myData_meanstd)= names
cleaned_data <- myData_meanstd
cleaned_data$"Activity Label" <- myLabels
cleaned_data$"Subject No." <- mySubject
#cleaned_data = cleaned_data[,1:88]
return (cleaned_data)
}
createDataset<-function(dataset){
col_names = colnames(dataset)
data_split <- split(dataset,dataset$"Subject No.") #Split Dataset according to Subject
func <- function(data=data_split){
m <- split(data,data$"Activity Label")
l <- sapply(m,colMeans) #function for taking mean of each activity
return (l)}
gdata <- sapply(data_split,func) #calculating mean for each activity for each subject
for (i in 1:30){
n <- rbind(gdata[1:88,i],gdata[89:176,i],gdata[177:264,i],gdata[265:352,i],gdata[353:440,i],gdata[441:528,i])
#creating variable for storing data for each subject
assign(paste0("n",i),n)
}
new_dataset <- rbind(n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11,n12,n13,n14,n15,n16,n17,n18,n19,n20,n21,n22,n23,n24,n25,n26,n27,n28,n29,n30)
new_dataset <- data.frame(new_dataset) #merging data of all sujects to create one dataset
colnames(new_dataset) <- col_names
activity_lables <- c("SUBJECT IS WALKING ON LEVELLED SURFACE",
"SUBJECT IS WALKING UPHILL OR UPSTAIRS",
"SUBJECT IS WALKING DOWNHILL OR DOWNSTAIRS", #Descriptive activity labels
"SUBJECT IS SITTING",
"SUBJECT IS JUST STANDING",
"SUBJECT IS LAYING OR RESTING")
new_dataset$"Activity Label Description" <- new_dataset$"Activity Label"
for (i in 1:6){
d_labels <- gsub(i,activity_lables[i],new_dataset$"Activity Label Description")
new_dataset$"Activity Label Description" <- d_labels
}
#new_dataset$"Activity Label" <- descriptive_labels
write.csv(new_dataset,"./tidy_dataset.csv")
write.table(new_dataset,"./tidy_dataset.txt",row.names = FALSE) #writing data to text file
}
|
pkgs <- c("data.table", "forecast", "tseries", "kableExtra", "reshape2", "InfoTrad", "mFilter", "tidyverse", "qrmtools", "tidyquant",
"stargazer", "qrmtools", "gridExtra", "tbl2xts", "highfrequency", "pinbasic")
# install.packages(pkgs) #install
pkgs <- c(pkgs, "timeDate", "readxl")
sapply(pkgs, library, character.only = T)
pin_names <- file.info(list.files
(path = getwd(),
pattern = "[0-9]{4}.csv",
full.names = T))
pin_files <- rownames(pin_names)
sell_list <- vector("list", length = nrow(pin_names))
i <- 1
code_list <- vector("list", length = 11*252)
r <- 1
for (pf in pin_files) {
pin_data <- read.csv(pf)
LBD <- unique(pin_data[, "LogicalBusinessDay", drop = T])
LBD <- as.character(LBD)
for (dates in LBD) {
sells_only <- pin_data %>%
filter(BuySellIndicator == "S") %>%
filter(LogicalBusinessDay == dates)
code <- t(sells_only[, 1, drop = F])
code <- code[1, ]
code_list[[r]] <- code
r <- r + 1
}
}
dfs <- ""
for (df in code_list) {
dfs <- c(dfs, df)
}
tickers <- unique(dfs)
sells <- matrix(NA, nrow = 252 * 11, ncol = length(tickers))
sells <- data.frame(sells)
colnames(sells) <- tickers
trade_dates <- matrix(NA, nrow = 252 * 11, ncol = 1)
colnames(trade_dates) <- "Date"
sells <- cbind(trade_dates, sells)
r <- 1
for (pf in pin_files) {
pin_data <- read.csv(pf)
LBD <- unique(pin_data[, "LogicalBusinessDay", drop = T])
LBD <- as.character(LBD)
for (dates in LBD) {
sells_only <- pin_data %>%
filter(BuySellIndicator == "S") %>%
filter(LogicalBusinessDay == dates)
temp <- t(sells_only[, 1, drop = F])
if(nchar(temp[1])==0){
temp[1] <- colnames(sells)[2]
}
sells[r, temp] <- t(sells_only[, "CountOfTrades", drop = F])
sells[r, 1] <- dates
r <- r + 1
}
}
code_file <- sub("Annual Pins", "pinestimation", paste0(getwd(), "/", "ALL COMPANIES.xlsx"))
codes <- read_xlsx(code_file, sheet = "Sheet2")[1]
codes <- t(codes)
test <- codes %in% colnames(sells)
companies <- codes[test==TRUE]
final_data <- sells[, companies, drop = F]
final_data <- cbind(sells[, 1, drop = FALSE], final_data)
final_data <- final_data[1:2749, , drop = FALSE]
write.csv(final_data, file = "sells.csv", row.names = FALSE)
|
/Data/Sell Data.R
|
no_license
|
mrRlover/PIN-Estimation
|
R
| false | false | 2,376 |
r
|
pkgs <- c("data.table", "forecast", "tseries", "kableExtra", "reshape2", "InfoTrad", "mFilter", "tidyverse", "qrmtools", "tidyquant",
"stargazer", "qrmtools", "gridExtra", "tbl2xts", "highfrequency", "pinbasic")
# install.packages(pkgs) #install
pkgs <- c(pkgs, "timeDate", "readxl")
sapply(pkgs, library, character.only = T)
pin_names <- file.info(list.files
(path = getwd(),
pattern = "[0-9]{4}.csv",
full.names = T))
pin_files <- rownames(pin_names)
sell_list <- vector("list", length = nrow(pin_names))
i <- 1
code_list <- vector("list", length = 11*252)
r <- 1
for (pf in pin_files) {
pin_data <- read.csv(pf)
LBD <- unique(pin_data[, "LogicalBusinessDay", drop = T])
LBD <- as.character(LBD)
for (dates in LBD) {
sells_only <- pin_data %>%
filter(BuySellIndicator == "S") %>%
filter(LogicalBusinessDay == dates)
code <- t(sells_only[, 1, drop = F])
code <- code[1, ]
code_list[[r]] <- code
r <- r + 1
}
}
dfs <- ""
for (df in code_list) {
dfs <- c(dfs, df)
}
tickers <- unique(dfs)
sells <- matrix(NA, nrow = 252 * 11, ncol = length(tickers))
sells <- data.frame(sells)
colnames(sells) <- tickers
trade_dates <- matrix(NA, nrow = 252 * 11, ncol = 1)
colnames(trade_dates) <- "Date"
sells <- cbind(trade_dates, sells)
r <- 1
for (pf in pin_files) {
pin_data <- read.csv(pf)
LBD <- unique(pin_data[, "LogicalBusinessDay", drop = T])
LBD <- as.character(LBD)
for (dates in LBD) {
sells_only <- pin_data %>%
filter(BuySellIndicator == "S") %>%
filter(LogicalBusinessDay == dates)
temp <- t(sells_only[, 1, drop = F])
if(nchar(temp[1])==0){
temp[1] <- colnames(sells)[2]
}
sells[r, temp] <- t(sells_only[, "CountOfTrades", drop = F])
sells[r, 1] <- dates
r <- r + 1
}
}
code_file <- sub("Annual Pins", "pinestimation", paste0(getwd(), "/", "ALL COMPANIES.xlsx"))
codes <- read_xlsx(code_file, sheet = "Sheet2")[1]
codes <- t(codes)
test <- codes %in% colnames(sells)
companies <- codes[test==TRUE]
final_data <- sells[, companies, drop = F]
final_data <- cbind(sells[, 1, drop = FALSE], final_data)
final_data <- final_data[1:2749, , drop = FALSE]
write.csv(final_data, file = "sells.csv", row.names = FALSE)
|
/Mineração & Siderurgia e Papel & Celulose X Ibovespa.R
|
no_license
|
lourencotadeu/Minera-oSiderurgia-PapelCelulose-Ibovespa
|
R
| false | false | 1,829 |
r
| ||
####DATI E PACCHETTI####
library("here")
source(here("ANALYSIS", "scripts", 'pacchetti.r'))#<-carica le librerie
source(here("ANALYSIS", "scripts",'funzioni.r'))#<-carica funzioni in-built
source(here("ANALYSIS", "scripts",'dataset.r'))#<-carica lo script di preparazione dei dataset da usare per le analisi
options(mc.cores = parallel::detectCores())
#QUADRO GENERALE####
#Territorio####
###numero comuni da cui provengono i campioni
AMR_com %>%
filter(x=="TRUE") %>%
group_by(comune) %>%
tally()%>%
dim()
####MAPPA CAMPIONAMENTI (fig.1)
#(uso i dati con tutti i comuni da cui è
#### stato fatto il campionamento non quello con i dati dell'istat che sono di meno...###
source('mappe.r')#<-carica le mappe
AMR_com<-AMR_com %>%
drop_na(identificazione) %>%
group_by(comune)%>%
dplyr::summarise("n"=n()) %>%
as.data.frame()
com<-subset(comuni, comuni@data$NOME_COM %in% AMR_com$comune)
com@data<-merge(com@data, AMR_com, by.x = "NOME_COM", by.y = "comune")
mybins=c(1,5,10,15,20,30)
mypalette = colorBin( palette="OrRd",
domain=com@data$n, na.color="transparent", bins=mybins, reverse = F)
map1<-leaflet(data=regione) %>% addTiles() %>%
addPolygons(data=com,
fillColor = ~mypalette(n),
fillOpacity = 0.9,stroke=FALSE, weight = 10) %>%
addLegend( pal=mypalette, values= com@data$n, opacity=0.9, title = "N.Campioni", position = "bottomleft" )%>%
addPolygons(data=province, fill="F",color="") %>%
addPolygons(data=prov, fill=F, color="blue", weight=1, opacity=1) %>%
addPolygons(data=com, fill=F, color="black", weight=1, opacity=1)
# %>% x aggiungere il titolo per presentazione....
# addControl(title, position = "topleft", className="map-title")
mapshot(map1, file = "map1.png")
############CARATTERISTICHE DEL TERRITORIO
##tabella 1##
options(digits = 3)
AMR_istat %>%
select(sup, `denpop(abkmq)`,mediana,hapasc,aziende, capi ) %>% # select variables to summarise
summarise_each(funs(min = min(.,na.rm=T),
q25 = quantile(., 0.25, na.rm = T),
median = median(.,na.rm=T),
q75 = quantile(., 0.75, na.rm=T),
max = max(.,na.rm=T),
mean = mean(.,na.rm=T),
sd = sd(.,na.rm=T)))%>%
gather(stat, val) %>%
separate(stat, into = c("var", "stat"), sep = "_") %>%
spread(stat, val) %>%
select(var, min, q25, median, q75, max, mean, sd) %>%
mutate("ord"=c(5,6,2,4,3,1)) %>%
mutate("Parametri"= plyr::revalue(var, c("aziende"="Aziende con pascolo",
"capi"="Capi al pascolo", "mediana"="Altitudine mediana", "sup"="Superficie (Kmq)",
`denpop(abkmq)`="Densità di popolazione (Ab/Kmq)",
"hapasc"="Superficie al pascolo (ettari)"
))) %>%
arrange(ord) %>%
select(Parametri, 2:8) %>%
kable("latex", booktabs = TRUE, caption = "Caratterizzazione territoriale e demografica dei comuni di provenienza dei campioni") %>%
kable_styling()
AMR_istat %>%
filter(x=="TRUE") %>%
group_by(urb) %>%
summarise(n())
AMR_istat %>%
filter(x=="TRUE") %>%
group_by(montano) %>%
summarise(n()) %>%
adorn_totals()
###Fauna Selvatica#####
#numero campioni di feci
AMR_com %>%
filter(x=="TRUE") %>%
group_by(IDcamp) %>%
tally() %>% summarise(n=sum(n))
#numero di specie
length(unique(as.character(AMR$SPECIE)))##c'è un NA da togliere....
#tabella 2
AMR_com%>%
filter(x=="TRUE") %>%
group_by("Gruppo"=Specieagg, SPECIE) %>%
summarise(n=n()) %>%
mutate(prop=round(100*(n/670), 2)) %>%
adorn_totals(where = "row") %>%
kable("latex",caption = "Distribuzione del numero di campioni di feci in base alle differenti specie di fauna selvatica di origine, raggruppati per il gruppo-specie di appartenenza", booktabs = T, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
#tabella 2b<-campioni by montanità####
AMR_istat %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
filter(x=="TRUE") %>%
group_by(Specieagg, montano) %>%
summarise(N=n()) %>%
pivot_wider(names_from = montano, values_from = N, values_fill = list(N=0)) %>%
adorn_totals(where = "col") %>%
adorn_totals(where = "row") %>%
kable("latex", booktabs = TRUE, caption = "Distribuzione del numero di campioni di feci in base alle caratteristiche territoriali di provenienza per i differenti gruppo-specie di appartenenza (NM= Non Montani, P= Parzialmente Montani, T= Totalmente Montani)") %>%
kable_styling()
#tabella 2c<-campioni by urbanizzazione
AMR_istat %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
filter(x=="TRUE") %>%
group_by(Specieagg, urb) %>%
summarise(N=n()) %>%
pivot_wider(names_from = urb, values_from = N, values_fill = list(N=0)) %>%
adorn_totals(where = "col") %>%
adorn_totals(where = "row") %>%
select("Gruppo Specie"=Specieagg, "UA"= "densPop alta", "UM" = "densPop media", "UB" = "densPop scarsa", "Totale"= Total) %>%
kable("latex", caption = "Distribuzione del numero di campioni di feci in base al grado di urbanizzazione dei comuni di provenienza per i differenti gruppo-specie di appartenenza (UA= Urbanizzazione ALta, UM= Urbanizzazione Media, UB= Urbanizzazione Bassa)",
booktabs = TRUE) %>%
kable_styling()
##Esami Microbiologici#####
options(knitr.kable.NA = 0)
#tabella 3
AMR %>%
group_by(identificazione) %>%
filter(identificazione!="Non identificabile") %>%
tally() %>% arrange(desc(n)) %>%
mutate("prop(%)"=round(100*prop.table(n),2)) %>%
adorn_totals(where = c("row")) %>%
kable("latex", caption = "Distribuzione del numero e proprorzione di ceppi della famiglia Enterobacteriacee suddivise per genere, isolati dai 670 campioni di feci analizzati",
booktabs = TRUE, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
##Antibiogrammi####
ab<-AMR %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
ab[,13:19]<-apply(ab[,13:19], 2 , funz)
ab<-ab %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
ab$R<- ifelse(ab$MDR==0, 0, 1)
ab$MR<-ifelse(ab$MDR==0, "S",
ifelse(ab$MDR>=1 & ab$MDR<3, "R", "MR"))
#figura 2-ab length
ab %>%
drop_na(MDR) %>%
ggplot(aes(x=as.factor(MDR)))+geom_bar(aes(fill=MR))+
labs(x="numero di resistenze al panel di antibiotici",
y="numero ceppi")+
theme_ipsum_rc()+ theme(legend.title = element_blank())+
scale_fill_brewer(labels = c("Ceppi multiresistenti", "Ceppi Resistenti", "Ceppi Suscettibili"), direction = -1)
#PREVALENZA DI WILD ANIMALS PORTATORI DI R E MR - ALL DATA 671 campioni..####
##....
#dati aggregati per campione per calcolare la prevalenza di animali che hanno
#almeno un ceppo resistente ad almeno un antibiotico
#steps: 1.trasformo i risultati dei singoli AB in 1 e 0 (1=R, 0=S)
# 2.sommo per ogni ceppo i valori e ottengo il numero di resistenze per ceppo
# 3.creo una variabile R che mi indica se il ceppo ha almeno una resistenza
# 4.aggrego il dataset per la variabile IDcamp e sommo i valori di R , valori =>1
# indicano che nel campione c'era almeno un ceppo con almeno una resistenza
# 5. dal database originale preparo un dataset escludendo i ceppi senza identificazione
# 6. Unisco i due dataframe, che devono avere le stesse dimensioni, usando la variabile
# IDgruppo comune e creo una variabile chiamata RSelv che ha valore uguale a S se
# Res è 0, oppure R se Res è >=1. Il nuovo dataframe si chiama Prev è contiene i dati
# per analizzare la prevalenza di animali portatori di ceppi resistenti...
#step1
Psel<-AMR %>%
# filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
Psel[,13:19]<-apply(Psel[,13:19], 2 , funz)
#step2
Psel<-Psel %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
#step3
Psel$R<- ifelse(Psel$MDR==0, 0, 1)
#step4
Psel<-Psel %>%
drop_na(R) %>%
group_by(IDcamp)%>%
summarise(Res=sum(R))
#step5
pAMR<-AMR %>%
filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile")
#step6
Prev<-pAMR %>%
left_join(Psel) %>%
mutate(RSelv=ifelse(Res==0,"S","R"))
## prevalenza resistenza in specie...####
options(digits=2)
mr<-Prev%>%
select(Specieagg, RSelv)%>%
group_by(Specieagg, RSelv) %>%
#drop_na(RSelv) %>%
tally() %>%
pivot_wider(names_from = RSelv, values_from = n, values_fill = list(n = 0)) %>%
data.frame() %>%
mutate(N=rowSums(.[2:3],na.rm = TRUE))
options(digits=2)
Rhpd <- binom.bayes(
x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8]) %>%
# arrange(desc(mean))
Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd)
R <- Rhpd %>%
arrange(desc(mean)) %>%
mutate(Specieagg = unique(factor(Specieagg)))
R %>%
select("Gruppo Specie" = Specieagg, -method, "R"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig) %>%
kable("latex", booktabs = T,
caption = "Stime della prevalenza di campioni resistenti suddivisi per gruppo-specie: R= numero di campioni resistenti, N= numero campioni esaminati, Prevalenza = media della distribuzione beta, inf-HPD= valore inferiore dell'intervallo HPD, sup-HPD = valore superiore dell'intervallo HPD") %>%
kable_styling()
z <- binom.bayes.densityplot(R)
z+facet_wrap(~Specieagg)+xlab("Prevalenza")
#grafico bayesian density (figura 3)
# prev <-Prev %>%
# mutate(prev=ifelse(RSelv=="S", 0, 1))
#
# modn <- stan_glm(prev~ 1, data=prev,family=binomial(link="logit"))
# modp<-stan_glm(prev~ Specieagg, data=prev,family=binomial(link="logit"))
#
# t<-emmeans(modp, ~Specieagg)
#
# t %>% as.data.frame() %>%
# mutate(emmean=invlogit(emmean),
# lower.HPD = invlogit(lower.HPD),
# upper.HPD = invlogit(upper.HPD)) %>%
# select("Gruppo-Specie" = Specieagg, "Prevalenza"=emmean, "HPD-inf"= lower.HPD, "HPD-sup"= upper.HPD ) %>%
# arrange(desc(Prevalenza)) %>%
# kable("latex" ) %>%
# kable_styling()
# mod<-glm(prev~ Specieagg, data=prev,family=binomial(link="logit"))
# p<-gather_emmeans_draws(t)
# p %>%
# mutate("prev"=invlogit(.value))%>%
# group_by(Specieagg) %>%
# summarise(m=mean(prev),
# sd=sd(prev))
# ggplot(aes(x = prev, y=Specieagg,fill = Specieagg)) +
# geom_density_ridges(panel_scaling=FALSE)+
# theme_ridges()+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza",
# y="")
#prevalenza multi-resistenza###################
mPsel<-AMR %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
mPsel[,13:19]<-apply(mPsel[,13:19], 2 , funz)
#step2
mPsel<-mPsel %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
#step3
mPsel$MR<- ifelse(mPsel$MDR==0 |mPsel$MDR<=2, 0, 1)
mPsel$R<- ifelse(mPsel$MDR==0, 0, 1)
mPsel<-mPsel %>%
drop_na(R) %>%
drop_na(MR) %>%
group_by(IDcamp)%>%
summarise(Res=sum(R),
MRes=sum(MR))
#step5
pAMR<-AMR %>%
filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile")
#step6
mPrev<-pAMR %>%
left_join(mPsel) %>%
mutate(MRSelv=ifelse(MRes==0,"Sr","MR"))
binom.bayes(
x = 92, n = 670)
options(digits=2)
Mr<-mPrev%>%
select(Specieagg, MRSelv)%>%
group_by(Specieagg, MRSelv) %>%
drop_na(MRSelv) %>%
tally() %>%
pivot_wider(names_from = MRSelv, values_from = n, values_fill = list(n = 0)) %>%
data.frame() %>%
mutate(N=rowSums(.[2:3],na.rm = TRUE))
options(digits=2)
MRhpd <- binom.bayes(
x = Mr$MR, n = Mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
MRhpd<- cbind("Specieagg"=mr[, 1], MRhpd)
MR <- MRhpd %>%
arrange(desc(mean)) %>%
mutate(Specieagg = unique(factor(Specieagg)))
MR %>%
select(-method, "MR"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig) %>%
kable("latex", caption = "Stime della prevalenza di campioni multi-resistenti suddivisi per gruppo-specie: MR= numero di campioni resistenti, N= numero campioni esaminati, Prevalenza = media della distribuzione beta, inf-HPD= valore inferiore dell'intervallo HPD, sup-HPD = valore superiore dell'intervallo HPD)",
booktabs = T) %>%
kable_styling()
z <- binom.bayes.densityplot(MR)
z+facet_wrap(~Specieagg)+xlab("Prevalenza")
#bayesian density
#grafico bayesian density (figura 3)
# mprev <-mPrev %>%
# mutate(prev=ifelse(MRSelv=="Sr", 0, 1))
#
# modp<-stan_glm(prev~ Specieagg, data=mprev,family=binomial(link="logit"))
#
# t<-emmeans(modp, ~Specieagg)
# p<-gather_emmeans_draws(t)
#
# p %>%
# mutate("prev"=logit2prob(.value))%>%
# ggplot(aes(x = prev, y=Specieagg,fill = Specieagg)) +
# geom_density_ridges(panel_scaling=TRUE)+
# theme_ridges()+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza MR Intervalli di Credibilità Bayesiani 95%",y="")
#
#PREVALENZA R E MR IN WILDLIFE BY TERRITORIO E SPECIE####
#USO DATASET ComAMRsel 640 RIGHE
#-prevalenza R by URB
RbyT<-ComAMRsel %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
drop_na(Res) %>%
select(Specieagg, montano, Res, MRes) %>%
group_by(Specieagg, montano, Res) %>%
summarise(n=n()) %>%
pivot_wider(names_from = Res, values_from = n, values_fill = list(n=0)) %>%
mutate(N=S+R) %>%
data.frame()
hpd <- binom.bayes(
x = RbyT$R, n = RbyT$N, type = "highest", conf.level = 0.95, tol = 1e-9)
Rhpd<- cbind("Specieagg"=RbyT[, 1], hpd[,6:8])
Rhpd<-cbind(RbyT, Rhpd[,-1])
#Bayesian multilevel model R~(1|comune)+ Specie+ pascolo+ urbanizzazione####
#codici nel file bayesmod.R
#CARATTERIZZAZIONE FENOTIPICA DELL'ANTIBIOTICO-RESISTENZA DEGLI ISOLATI BATTERICI####
#Antibiotico-resistenza byAB <- tabella 4####
z <-AMR %>%
filter(identificazione!="Non identificabile") %>%
select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=3:9, names_to = "antibiotico") %>%
group_by(antibiotico,value) %>%
drop_na(value) %>%
tally() %>%
pivot_wider(names_from = value,values_from = n) %>%
mutate("N"=R+S,
"prop" = round(100*(R/N),2)) %>%
arrange(desc(prop))
options(digits=2)
Rhpd <- binom.bayes(
x = z$R, n = z$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8]) %>%
# arrange(desc(mean))
Rhpd<- cbind("Antibiotico"=z[, 1], Rhpd[])
R <- Rhpd[-8,] %>%
arrange(desc(mean)) %>%
mutate(Antibiotico = unique(factor(antibiotico)))
R %>%
select(Antibiotico,-method, "R"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig, -antibiotico) %>%
kable("latex", caption = "Stime bayesiane della prevalenza di ceppi resistenti ai differenti antibiotici", booktabs = TRUE) %>%
kable_styling()
pz <- binom.bayes.densityplot(R, fill.central = "steelblue", fill.lower = "steelblue",
alpha = 1.2) +facet_wrap(~Antibiotico)
#--Bayesian posterior prevalence of seven antibiotic-resistence phenotype
#--figura 5
dt<-AMR %>%
select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
na.omit()
dt[,3:9]<-apply(dt[,3:9], 2 , funz)
AB<-data.frame(dt[,3:9])
my_lms <- lapply(1:7, function(x) stan_glm(AB[,x] ~ Specieagg, data=dt,
family=binomial(link="logit")))
t<- lapply(1:7, function(x) emmeans(my_lms[[x]], ~Specieagg))
#t2<-lapply(1:7, function(x) contrast(t[[x]], method="eff"))
z<- lapply(1:7, function(x) gather_emmeans_draws(t[[x]]))
bigdf<-do.call(rbind, z)
ab<-c(rep("COL", 36000), rep("CFT", 36000), rep("KAN", 36000), rep("ENR",36000),
rep("GEN", 36000), rep("TET",36000), rep("AMP",36000))
bigdf<-cbind(bigdf, "ab"=ab)
bigdf <- bigdf %>%
mutate(ab = factor(ab, levels = c("AMP", "TET","CFT", "COL", "ENR", "KAN", "GEN")))
bigfg<- bigdf %>%
ggplot(aes(x = logit2prob(.value), y=Specieagg,fill = Specieagg)) +geom_density_ridges(panel_scaling=TRUE)+
theme_ridges()+facet_wrap(~ab)+
scale_fill_brewer(palette = "Blues") +
theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza", y="")
#tabella 4
bigdf %>%
group_by(Specieagg,ab) %>%
summarise(prev=round(mean(logit2prob(.value)),2)) %>%
pivot_wider(names_from = ab, values_from = "prev") %>%
kable("latex", booktabs = T, caption = "Profilo di resistenza dei ceppi ai diversi antibiotici per gruppo-specie di provevienza del campione di feci") %>%
kable_styling()
#Antibiotico-resistenza by genere <-tabella 5####
tot<-AMR %>%
select(identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=2:8, names_to = "antibiotico") %>%
group_by(identificazione,antibiotico) %>%
tally(name="tot")
res<-AMR %>%
select(identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=2:8, names_to = "antibiotico") %>%
group_by(identificazione, antibiotico) %>%
filter(value=='R') %>%
tally(name="res")
tot %>% full_join(res)%>%
replace_na(list(res=0)) %>%
filter(identificazione!="Non identificabile") %>%
mutate("%R"=(res/tot)*100) %>%
select(-res) %>%
pivot_wider(names_from = antibiotico, values_from = `%R`) %>%
arrange(desc(tot)) %>%
select("Genere" = identificazione, "N.ceppi" = tot, AMP, TET, CFT, COL, ENR, KAN, GEN, ) %>%
kable("latex", digits = 2, booktabs= T, caption = "Profilo di antibiotico-resistenza tra i diversi generi dei ceppi isolati") %>%
kable_styling()
#Profilo di multiresistenza####
amr %>%
filter(profilo!="SUSC") %>%
group_by(profilo) %>%
dplyr::summarise(n=n()) %>%
arrange(n) %>%
#top_n(10, n) %>%
mutate(profilo = factor(profilo, unique(profilo))) %>%
#ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
ggplot(aes(x=profilo, y=n, label=n))+
geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
geom_point( aes(x=profilo, y=n), size=8.4, color="steelblue" )+
geom_text(color="white", size=4)+
coord_flip()+
theme_ipsum_rc()+
labs(y="n.ceppi",x="")
#prevalenza ceppi ABr e MAbr per gruppo-specie di provenienza del campione
####Biodiversità<---fig.6####
amr %>%
group_by(Specieagg,profilo) %>%
dplyr::summarise(n=n()) %>%
ggplot( aes(Specieagg,profilo), label=n) +
geom_tile(aes(fill = n)) +
geom_text(aes(label = n), size=4) +
scale_fill_gradient(low = "gray", high = "red")+
#scale_fill_gradient(low = "lightgrey",high = "steelblue")+
scale_x_discrete(expand = c(0, 0)) + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Gruppo Specie")+
theme(legend.position = "bottom",axis.ticks = element_blank(),axis.text.x = element_text(angle = 90, hjust = 1,size=8),axis.text.y = element_text(size=8))
profili<-amr %>%
group_by(Specieagg,profilo) %>%
dplyr::summarise(n=n()) %>%
pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
data.frame()
renyis<-renyi(profili[-1], hill=TRUE)
# tabella valori renyis <-tabella 6
specie<-levels(profili$Specieagg)
tab<-renyis
row.names(tab)<-specie
tab%>%
kable("latex", digits = 2, booktabs = T, caption = "Valori di entropia di Renyi standardizzati tra i diversi gruppi specie" ) %>%
kable_styling()
###fig.7#<-graifico di Renyis standardizzato
renyis<-renyis-renyis[,1]
renyis %>%
mutate(Specie=levels(profili$Specieagg)) %>%
pivot_longer(cols=1:11, names_to="alpha",values_to = "indici" ) %>%
mutate(alpha=factor(alpha, levels=c("0", "0.25", "0.5","1", "2", "4", "8", "16","32","64","Inf"))) %>%
ggplot(aes(x=alpha, y=indici, color=Specie, group=Specie))+
geom_line()+theme_bw()+
labs(y="Rényi entropy", x="Rényi scale (alpha)")+
theme(legend.position="bottom",legend.text=element_text(size=7),
legend.title = element_blank())#+
#scale_color_manual(values = brewer.pal(12, "Set1"))
#####META-ANALISI PREVALENZE####
amrbib <- read_excel(here("ANALYSIS", "data", "raw", "meta.xlsx"))
amrbib<-amrbib %>%
filter(articolo!="8") %>%
filter(articolo!="10")
options(digits=2)
resbinom <- binom.bayes(
x = amrbib$nresitenti, n = amrbib$nisolati,
type = "highest", conf.level = 0.95, tol = 1e-9)
metares<-cbind(amrbib, resbinom[,6:8])
metares %>%
ggplot( aes(y=mean,ymin=lower, ymax=upper, x=articolo))+
geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
coord_flip()+
theme_ipsum_rc(axis_title_just = "mc")+
facet_wrap(~specie)+
labs(x="", y="Prevalenza")
######################################################
# #biodiversità
# coli<-amr %>%
# mutate(genere=ifelse(identificazione!= "E.coli", "Enterobacteriacee", "E.coli")) %>%
# filter(genere=="E.coli") %>%
# group_by(Specieagg,profilo) %>%
# dplyr::summarise(n=n()) %>%
# pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
# data.frame()
#
# coli.renyis<-exp(renyi(coli[,-1]))
#
#
#
# entb<-amr %>%
# mutate(genere=ifelse(identificazione!= "E.coli", "Enterobacteriacee", "E.coli")) %>%
# filter(genere=="Enterobacteriacee") %>%
# group_by(Specieagg,profilo) %>%
# dplyr::summarise(n=n()) %>%
# pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
# data.frame()
#
# entb.renyis<-exp(renyi(entb[,-1]))
#
# genere<-rbind(coli.renyis,entb.renyis)
#
#
# genere %>%
# mutate(Specie=levels(profili$Specieagg)) %>%
# mutate(genere=c(rep("E.coli",9), rep("Altre Enterobacteriacee", 9))) %>%
# pivot_longer(cols=1:11, names_to="alpha",values_to = "indici" ) %>%
# mutate(alpha=factor(alpha, levels=c("0", "0.25", "0.5","1", "2", "4", "8", "16","32","64","Inf"))) %>%
# ggplot(aes(x=alpha, y=indici, color=Specie, group=Specie))+
# geom_point()+geom_line()+facet_grid(~genere)
#
#
###################################################################
###Prevalenza ceppi multiresistenti per specie (Bayes)
# d2<-d %>%
# fastDummies::dummy_columns("MR")
#
# AB<-data.frame(d2[,24:26])
# my_lms <- lapply(1:3, function(x) stan_glm(AB[,x] ~ Specieagg, data=d2,
# family=binomial(link="logit")))
#
# t<- lapply(1:3, function(x) emmeans(my_lms[[x]], ~Specieagg))
# t2<-lapply(1:3, function(x) contrast(t[[x]], method="eff"))
# z<- lapply(1:3, function(x) gather_emmeans_draws(t2[[x]]))
#
# bigdf<-do.call(rbind, z)
#
# MR<-c(rep("Multiresistenti", 36000),rep("Resistenti", 36000), rep("Suscettibili", 36000))
#
# bigdf<-cbind(bigdf, "MR"=MR)
#
# bigfg<-bigdf %>%
# mutate(MR=factor(MR, levels=c("Suscettibili", "Resistenti", "Multiresistenti"))) %>%
# mutate("prev"=.value) %>%
# ggplot(aes(x = prev, y=contrast,fill = contrast)) +geom_density_ridges(panel_scaling=TRUE)+
# theme_ridges()+facet_wrap(~MR)+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Differenze prevalenza intra-specie vs Prevalenza media e Intervalli di Credibilità Bayesiani 95%",
# y="")
#
#
#
#
#
#
### PROFILO RESISTENZE WILDLIFE
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
##tabella ceppi S/R/MR by specie fauna
# d %>%
# mutate(Specieagg=forcats::fct_explicit_na(Specieagg)) %>%
# mutate(MR=factor(MR, levels=c("S","R","MR"), ordered=TRUE)) %>%
# drop_na(MR) %>%
# group_by(Specieagg, MR) %>%
# tally()%>%
# #mutate(prop=100*prop.table(n)) %>%
# #arrange(desc(n))
# pivot_wider(names_from = MR, values_from = n, values_fill = list(n=0)) %>%
# kable("latex") %>%
# kable_styling()
# mr<-d%>%
# select(Specieagg, MR) %>%
# group_by(Specieagg, MR) %>%
# drop_na(MR) %>%
# tally() %>%
# pivot_wider(names_from = MR, values_from = n) %>%
# mutate(N=rowSums(.[2:4],na.rm = TRUE)) %>%
# data.frame()
# options(digits=2)
# Shpd <- binom.bayes(
# x = mr$S, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Shpd<- cbind("Specieagg"=mr[, 1], Shpd[,6:8], rep("S",9))
# names(Shpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# Rhpd <- binom.bayes(
# x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8], rep("R",9))
# names(Rhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# mRhpd <- binom.bayes(
# x = mr$MR, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# mRhpd<- cbind("Specieagg"=mr[, 1], mRhpd[,6:8], rep("MR",9))
# names(mRhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# MR<-rbind(Shpd, Rhpd, mRhpd)
# MR %>%
# pivot_longer(cols=2:4, names_to = "par")
# MR %>%
# ggplot( aes(y=m,ymin=low, ymax=hig, x=Specieagg))+
# geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
# facet_wrap(~gruppo)+coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(x="", y="Prevalenza")
#Analisi delle corrispondenze
# mr<-mr %>%
# column_to_rownames(var="Specieagg") %>%
# select(-4)
#
# dt <- as.table(as.matrix(mr[-4]))
# res.ca <- CA(mr, graph = FALSE)
# fviz_ca_biplot(res.ca, repel = TRUE)
#######PROFILI MULTIRESISTENZA
# amr <- AMR %>%
# drop_na(Specieagg) %>%
# filter(identificazione!="Non identificabile") %>%
# dplyr::select(-15,-18,-19)
# amr$COL<-ifelse(amr$COL=='R', 'COL',0)
# amr$CFT<-ifelse(amr$CFT=='R', 'CFT',0)
# #amr$tilmicosina<-ifelse(amr$tilmicosina=='R', 'TIL',0)
# amr$KAN<-ifelse(amr$KAN=='R', 'KAN',0)
# amr$ENR<-ifelse(amr$ENR=='R', 'ENR',0)
# #amr$oxacillina<-ifelse(amr$oxacillina=='R', 'OXA',0)
# #amr$eritromicina<-ifelse(amr$eritromicina=='R', 'ERT',0)
# amr$GEN<-ifelse(amr$GEN=='R', 'GEN',0)
# amr$TET<-ifelse(amr$TET=='R', 'TET',0)
# amr$AMP<-ifelse(amr$AMP=='R', 'AMP',0)
#
# #write.table(amr, file="amrxx.csv")
#
# amr[,13:19]<-amr[,13:19] != 0
# nomi_abb<-toupper(abbreviate(names(amr)[13:19]))
# X<- apply(amr[, 13:19], 1, function(x) nomi_abb[x])
# XX<-lapply(X, paste, collapse="-")
#
# amr$profilo<-unlist(XX)
#png("fig3.png", height = 550, width = 600)
# amr %>%
# filter(!profilo %in% c("NA-NA-NA-NA-NA-NA-NA")) %>%
# mutate( profilo= ifelse(profilo=="", "SUSC", profilo))%>%
# amr %>%
# group_by(profilo) %>%
# dplyr::summarise(n=n()) %>%
# arrange(n) %>%
# #top_n(10, n) %>%
# mutate(profilo = factor(profilo, unique(profilo))) %>%
# #ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
# ggplot(aes(x=profilo, y=n, label=n))+
# geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
# geom_point( aes(x=profilo, y=n), size=8.4, color="lightblue" )+
# geom_text(color="black", size=4)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(y="n.ceppi",x="Profilo di resistenza/multiresistenza")
# dat <- read.csv("spcdat.csv", header=TRUE, row.names=1)
# exp(renyi(dat))
# dat2 <- t(dat[4:8,])
# out <- iNEXT(dat2, q=c(0,1,2), datatype="abundance")
#
#
# profili2<-t(data.frame(profili[,-1], row.names = profili[,1]))
# out <- iNEXT(profili2, q=c(0,1,2), datatype="abundance")
# ggiNEXT(out, type=3, facet.var = "order")
# alpha<-diversityvariables(profili[,-1], y=NULL, digits=2)
# alpha<-data.frame(do.call(rbind, alpha))
#renyiplot(renyi(profili[,-1]))
#diversityresult(prof, y=NULL, digits=2, index="richness", method="s")
# plot(renyiaccum(prof))
#
# Renyi.1 <- renyiresult(prof, method = "s")
#
# abdist <- vegdist(prof, method = "bray")
#renyiplot(renyi(profili[-1]))
# ARINDEX
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
d2<-d %>%
dplyr::select(-15,-18,-19)
d2[,13:19]<-apply(d2[,13:19], 2 , funz)
d2$res<-rowSums(d2[,13:19], na.rm = T)
d2<-d2 %>%
mutate(nab=7) %>%
mutate(ARi=res/nab)
###Distribuzione ARi per isolato
d2 %>%
arrange(ARi) %>%
mutate(IDceppo = factor(IDceppo, unique(IDceppo))) %>%
top_n(300, ARi) %>%
ggplot(aes(x=IDceppo, y=ARi))+
geom_segment( aes(x=IDceppo, xend=IDceppo, y=0, yend=ARi), color="grey")+
geom_point( aes(x=IDceppo, y=ARi), size=1, color="black" )+
coord_flip()
d2<-d2 %>%
mutate(AR0.20= ifelse(ARi<0.20, "BPS", "APS"))
table(d2$AR0.20)
####ARi x Genere###
png("fig5.png", height = 550, width = 600)
d2 %>%
filter(!(identificazione=="Non identificabile")) %>%
group_by(identificazione) %>%
dplyr::summarise(Somma=sum(res),
nceppi=n(),
nab=7) %>%
mutate(ARi=round(Somma/(nceppi*nab),2)) %>%
arrange(ARi) %>%
mutate(identificazione = factor(identificazione, unique(identificazione))) %>%
ggplot(aes(x=identificazione, y=ARi, label=ARi))+
geom_segment( aes(x=identificazione, xend=identificazione, y=0, yend=ARi), color="black")+
geom_point( aes(x=identificazione, y=ARi), size=8.3, color="lightblue" )+
geom_text(color="black", size=4)+
coord_flip()+
theme_ipsum_rc(axis_title_just = "mc")+
labs(y="ARindex",x="Genere ceppi isolati")+
geom_hline(yintercept =0.20, col="red")
dev.off()
####ARi x Specie###
ARispec<-d2 %>%
group_by(Specieagg) %>%
dplyr::summarise(Somma=sum(res),
nceppi=n(),
nab=7) %>%
mutate(ARi=Somma/(nceppi*nab))
plot(density(d2$ARi))
#######################
esbl <- read_excel("RELAZIONE FINALE/esbl.xlsx")
esbl %>%
drop_na() %>%
kable("latex", caption = "Caratterizzazione genotipica dei geni di resistenza dei 47 ceppi resistenti a Ceftiofur",
booktabs = TRUE, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
###########TABELLE ALLEGATO ANALISI METAGENOMICHE################
t1 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab1.xlsx"))
t1 %>%
kable("latex", caption = "Composizione dei pool di ceppi utilizzati per l'analisi metagenomica",
booktabs = TRUE) %>%
kable_styling()
t2 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab2.xlsx"), col_types = c("numeric", "numeric", "numeric"))
t2 %>%
kable("latex", caption = "Elenco dei pool analizzati e relative quantità di DNA genomico estratto.",
booktabs = TRUE) %>%
kable_styling()
t3 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab3.xlsx"),
col_types = c("numeric", "numeric", "numeric", "numeric", "numeric", "numeric" ))
t3 %>%
kable("latex", caption = "Tabella 3: Numero di reads e coverage per ciascun pool sequenziato.",
booktabs = TRUE) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed")) %>%
footnote(
symbol = c("N.totali reads", "N.totali reads filtrate (bp)",
"N.reads totali filtrate/isolato (bp)", "Coverage medio/isolato (X)")
)
library(hrbrthemes)
library(ggvis)
t4 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab4.xlsx"))
t4$gene_phenotype <- paste(t4$AMRgene, "-", t4$Phenotype)
t4 %>%
pivot_longer(cols = 3:12, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" ))) %>%
ggplot( aes(Pool, gene_phenotype), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, col = "white") +
#scale_fill_gradient(low = "gray", high = "steelblue")+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.1: Geni AMR individuati nei pool- Per ogni gene AMR individuato nell’analisi metagenomica (AMR gene) viene riportata
la classe di antimicrobici associata (Phenotype) e la presenza/assenza del gene in ogni pool")
t5 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "allfig2.xlsx"))
t5$Classe_antibiotico <- paste(t5$Classe, "-", t5$Antibiotico)
t5 %>%
pivot_longer(cols = 3:12, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" )),
Classe_antibiotico = fct_rev(Classe_antibiotico)) %>%
ggplot( aes(Pool, Classe_antibiotico), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, color = "white") +
# scale_fill_gradient(low = "gray3", high = "steelblue")+
scale_fill_gradient(trans = 'reverse')+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.2: Analisi quantitativa del n. di geni AMR per antibiotico nei pool analizzati")
t6 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "allfig3.xlsx"))
t6 %>%
pivot_longer(cols = 2:11, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" )),
Classe_fenotipica = fct_rev(Classe_fenotipica)) %>%
ggplot( aes(Pool, Classe_fenotipica), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, color = "white") +
# scale_fill_gradient(low = "gray3", high = "steelblue")+
scale_fill_gradient(trans = 'reverse')+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.3: Analisi quantitativa del n. di geni AMR per classi di antibiotici nei pool analizzati")
# t4$pool1 <- cell_spec(t4$pool1, color = ifelse( !is.na(t4$pool1), "blue", "white"))
# t4$pool2 <- cell_spec(t4$pool2, color = ifelse( !is.na(t4$pool2), "blue", "white"))
# t4$pool3 <- cell_spec(t4$pool3, color = ifelse( !is.na(t4$pool3), "blue", "white"))
# t4$pool4 <- cell_spec(t4$pool4, color = ifelse( !is.na(t4$pool4), "blue", "white"))
# t4$pool5 <- cell_spec(t4$pool5, color = ifelse( !is.na(t4$pool5), "blue", "white"))
# t4$pool6 <- cell_spec(t4$pool6, color = ifelse( !is.na(t4$pool6), "blue", "white"))
# t4$pool7 <- cell_spec(t4$pool7, color = ifelse( !is.na(t4$pool7), "blue", "white"))
# t4$pool8 <- cell_spec(t4$pool8, color = ifelse( !is.na(t4$pool8), "blue", "white"))
# t4$pool9 <- cell_spec(t4$pool9, color = ifelse( !is.na(t4$pool9), "blue", "white"))
# t4$pool10 <- cell_spec(t4$pool10, color = ifelse( !is.na(t4$pool10), "blue", "white"))
#
# t4 %>%
# kable("latex", caption = "Tabella 4: Geni AMR individuati nei pool:Per ogni gene AMR individuato nell’analisi metagenomica (AMR gene)
# viene riportata la classe di antimicrobici associata (Phenotype) e la presenza/assenza del gene in ogni pool",
# booktabs = TRUE, longtable = T, escape = FALSE) %>%
# kable_styling()
###Grafico
# AMR$x<-!duplicated(AMR$IDcamp)#<-crea una variabile che identifica i singoli campioni di feci
# AMR %>%
# filter(x=="TRUE") %>% #filtra i campioni non duplicati
# group_by(Specieagg) %>%
# tally() %>%
# arrange(desc(n)) %>%
# mutate(prop=100*prop.table(n)) %>%
# ggplot(aes(x=n, y=fct_reorder(Specieagg, n), label=paste(round(prop, 1),"%")))+
# # geom_segment( aes(x=Specieagg, xend=Specieagg, y=0, yend=n), color="grey")+
# geom_point(size=15, color="grey")+
# geom_text(color="black", size=4)+
# labs(x="N.campioni")+
# theme_ipsum_rc(axis_title_just = "mc")+
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 14),
# plot.title = element_text(color = "blue", face = "bold", size=13.2),
# plot.caption = element_text(color = "blue", face = "italic", size=10),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="",x="N.campioni",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione di 729 campioni di feci per specie di provenienza",
# caption="
# Cervidi: Capriolo, Cervo;
#
# sCaprine: Camoscio,Stambecco, Muflone;
#
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
#
# Altri Vol: Piccione, Starna, Fagiano")
##Distribuzione dei ceppi e dei ceppi per Specie di provenienza delle feci
# AMR %>%
# group_by(Specieagg, identificazione) %>%
# filter(identificazione!="Non identificabile") %>%
# tally() %>% arrange(desc(n)) %>%
# pivot_wider(names_from = Specieagg, values_from = n) %>%
# adorn_totals(where = c("row","col")) %>%
# kable("latex" ) %>%
# kable_styling()
##########Grafico con la ditribuzione dei ceppi
#isolati per specie di provenieinza delle feci#
# AMR %>%
# group_by(Specieagg) %>%
# tally() %>%
# arrange(desc(n)) %>%
# mutate(prop=100*prop.table(n)) %>%
# ggplot(aes(x=n, y=fct_reorder(Specieagg, n), label=paste(round(prop, 1),"%")))+
# # geom_segment( aes(x=Specieagg, xend=Specieagg, y=0, yend=n), color="grey")+
# geom_point(size=15, color="grey")+
# geom_text(color="black", size=4)+
# labs(x="N.campioni")+
# theme_ipsum_rc(axis_title_just = "mc")+
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=10),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="",x="N.campioni",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione dei 941 ceppi batterici isolati per specie di provenienza delle feci",
# caption="
# Cervidi: Capriolo, Cervo;
#
# sCaprine: Camoscio,Stambecco, Muflone;
#
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
#
# Altri Vol: Piccione, Starna, Fagiano")
######mappa della provenienza dei campioni di feci
####MD
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
# d<-d %>%
# select(-tilmicosina,-oxacillina, -eritromicina)
# amr[,13:19]<-apply(amr[,13:19], 2 , funz)
#
# amr<-amr %>%
# mutate(MDR = rowSums(.[13:19]))
#amr<-amr[-416,]
######DESCRITTIVA DEI COMUNI
# d<-d %>%
# #filter(Specieagg=="") %>%
# drop_na(identificazione) %>%
# group_by(comune)%>%
# dplyr::summarise("n"=n())#,
# # MAR=sum(MDR)/(n*7)) %>%
# as.data.frame()
#
#
# com<-subset(comuni, comuni@data$NOME_COM %in% d$comune)
#
# com@data<-merge(com@data, d, by.x = "NOME_COM", by.y = "comune")
# com@data$MAR[ which(com@data$MAR == 0)] = NA
####titolo
#
# # mytext=paste("N.campioni: ", round(com@data$n,2), sep="") %>%
# lapply(htmltools::HTML)
#
# tag.map.title <- tags$style(HTML("
# .leaflet-control.map-title {
# transform: translate(-50%,20%);
# position: fixed !important;
# left: 50%;
# text-align: center;
# padding-left: 10px;
# padding-right: 10px;
# background: rgba(255,255,255,0.75);
# font-weight: bold;
# font-size: 12px;
# }
# "))
# title <- tags$div(
# tag.map.title, HTML("Ruolo della fauna selvatica nella diffusione e mantenimento
# dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari")
# )
###############PROFILI DI RESISTENZA
# AMR %>%
# select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=3:9, names_to = "antibiotico") %>%
# group_by(antibiotico,value) %>%
# drop_na(value) %>%
# tally() %>%
# pivot_wider(names_from = value,values_from = n) %>%
# mutate("%Res"=round(100*(R/(R+S)),2)) %>%
# arrange(desc(`%Res`)) %>%
# kable("latex") %>%
# kable_styling()
########tabella profili di resistenza ceppo/antibiotico
# tot<-AMR %>%
# select(identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=2:8, names_to = "antibiotico") %>%
# group_by(identificazione,antibiotico) %>%
# tally(name="tot")
#
# res<-AMR %>%
# select(identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=2:8, names_to = "antibiotico") %>%
# group_by(identificazione, antibiotico) %>%
# filter(value=='R') %>%
# tally(name="res")
#
# tot %>% full_join(res)%>%
# replace_na(list(res=0)) %>%
# filter(identificazione!="Non identificabile") %>%
# mutate("%R"=(res/tot)*100) %>%
# select(-res) %>%
# pivot_wider(names_from = antibiotico, values_from = `%R`) %>%
# kable("latex", digits = 2) %>%
# kable_styling()
#
#
# x<-AMR %>%
# select(identificazione,22) %>%
# filter(identificazione!="Non identificabile") %>%
# group_by(identificazione, ampicillina) %>%
# drop_na(ampicillina) %>%
# tally() %>%
# drop_na(n) %>%
# pivot_wider(names_from = ampicillina,values_from = n,
# values_fill = list(n = 0)) %>%
# mutate("%Res"=round(100*(R/(R+S)),2)) %>%
# arrange(desc(`%Res`)) %>%
# select(-R, -S) %>%
# pivot_wider(names_from = identificazione, values_from = `%Res`,values_fill = list(`%Res` = 0)) %>%
# kable( ) %>%
# kable_styling()
#
#
#
#
# pivot_longer(cols=3:9, names_to = "antibiotico") %>%
# group_by(antibiotico,value) %>%
# drop_na(value) %>%
# tally() %>%
# pivot_wider(names_from = value,values_from = n)
######MULTIRESISTENZE SRMR
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
#
# d2<-AMR %>%
# dplyr::select(-15,-18,-19)
#
#
#
# d2[,13:19]<-apply(d2[,13:19], 2 , funz)
#
#
#
# d2<-d2 %>%
# mutate(MDR = rowSums(.[13:19]))
#
# d2$R<- ifelse(d2$MDR==0, 0, 1)
#
# d2$MR<-ifelse(d2$MDR==0, "S",
# ifelse(d2$MDR>=1 & d2$MDR<3, "R", "MR"))
#
# d2<-d2 %>%
# mutate(MR=factor(MR, levels=c("S","R","MR"), ordered=TRUE)) %>%
# mutate(Specieagg=factor(Specieagg,
# levels=c("CERVIDI","sCAPRINAE","CARNIVORI","CINGHIALE",
# "LEPRE","CORVIDI" ,"RAPACI","UCCELLI ACQUATICI",
# "ALTRI VOLATILI")))
#
#
#
# png("fig2.png", height = 550, width = 500)
# d2 %>%
# drop_na(MR) %>%
# group_by(Specieagg, MR) %>%
# tally()%>%
# #mutate(prop=100*prop.table(n)) %>%
# arrange(desc(n)) %>%
# ggplot(aes(x=MR, y=n, label=n))+
# geom_segment( aes(x=MR, xend=MR, y=0, yend=n), color="grey") +
# geom_point( aes(x=MR, y=n, color=Specieagg), size=5 ) +
# geom_text(color="black", size=3)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc") +
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=7),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="n.ceppi",x="")+
# #title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# #subtitle = "Distribuzione di 923 ceppi di Enterobatteriacee, per specie e grado di resistenza ad un pannello di antibiotici (COLISTINA, CEFTIOFUR,KANAMICINA,ENROFLOXACIN, TETRACICLINA, AMPICILLINA)",
# #caption="S= Suscettibile, R= Resistente fino a due antibiotici, MR=Multiresistente
#
# #Cervidi: Capriolo, Cervo;
# #sCaprine: Camoscio,Stambecco, Muflone;
# #U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
# # Altri Vol: Piccione, Starna, Fagiano")+
# ggforce::facet_col(~Specieagg, scales="free_y", space="free")
# dev.off()
#
#
#
# d2 %>%
# drop_na(MR) %>%
# filter(identificazione=="E.coli") %>%
# group_by(Specieagg, MR) %>%
# tally() %>%
# #mutate(prop=100*prop.table(n)) %>%
# arrange(desc(n)) %>%
# ggplot(aes(x=MR, y=n, label=n))+
# geom_segment( aes(x=MR, xend=MR, y=0, yend=n), color="grey") +
# geom_point( aes(x=MR, y=n, color=Specieagg), size=5 ) +
# geom_text(color="black", size=3)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc") +
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face = "bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=7),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="n.ceppi",x="",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione di 615 ceppi di E.coli, per specie e grado di resistenza ad un pannello di antibiotici (COLISTINA, CEFTIOFUR,KANAMICINA,ENROFLOXACIN, TETRACICLINA, AMPICILLINA)",
# caption="S= Suscettibile, R= Resistente fino a due antibiotici, MR=Multiresistente
#
# Cervidi: Capriolo, Cervo;
# sCaprine: Camoscio,Stambecco, Muflone;
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
# Altri Vol: Piccione, Starna, Fagiano")+
# ggforce::facet_col(~Specieagg, scales="free_y", space="free")
#
# mr<-d%>%
# select(Specieagg, MR) %>%
# group_by(Specieagg, MR) %>%
# drop_na(MR) %>%
# tally() %>%
# pivot_wider(names_from = MR, values_from = n) %>%
# mutate(N=S+R+MR) %>%
# data.frame()# %>%
#pivot_longer(cols = 2:4, names_to = "gruppo")
# mod<-brm(data = mr, family = binomial,
# value | trials(N) ~ 1 + gruppo ,
# prior = c(prior(normal(0, 10), class = Intercept),
# prior(normal(0, 10), class = b)),
# iter = 2500, warmup = 500, cores = 2, chains = 2,
# seed = 10)
# s<-32
# n<-363
#
# shape1<-s+1
# shape2<-n-(s+1)
# x <- seq(0,1,length=500)
# #xp<-seq(from=0, to=1, by=0.01)
# beta=dbeta(x,shape1 = shape1, shape2 = shape2)
# df <- data.frame(x,beta)
# ggplot(df, aes(x=x, y=beta))+
# geom_line(col="blue")#+#theme_bw(16, "serif")+
# #scale_y_continuous(expand=c(0, 0))+
# # labs(x="Probability", y="Density",
# # title=paste("Beta distribution (n=",input$n2,";","s=", input$s2,")"))+
# # theme(plot.title = element_text(size = rel(1), vjust = 1.5))
# options(digits=2)
# Shpd <- binom.bayes(
# x = mr$S, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Shpd<- cbind("Specieagg"=mr[, 1], Shpd[,6:8], rep("S",9))
# names(Shpd)[2:5]<-c("m", "low", "hig", "gruppo")
# x<-binom.bayes.densityplot(Shpd,
# fill.central = "steelblue",
# fill.lower = "lightgray",
# fill.upper = "lightgray")+
# theme_ipsum_rc(axis_title_just = "mc")
# Rhpd <- binom.bayes(
# x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8], rep("R",9))
# names(Rhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# mRhpd <- binom.bayes(
# x = mr$MR, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# mRhpd<- cbind("Specieagg"=mr[, 1], mRhpd[,6:8], rep("MR",9))
# names(mRhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# MR<-rbind(Shpd, Rhpd, mRhpd)
#
# MR %>%
# pivot_longer(cols=2:4, names_to = "par") #%>%
# MR %>%
# ggplot(aes(x=m, y=Specieagg))+geom_point()+
# geom_segment(aes(x=low,
# xend=hig,
# y=Specieagg,
# yend=Specieagg))+
# facet_wrap(~gruppo)
# MR %>%
# #arrange(m) %>%
# ggplot( aes(y=m,ymin=low, ymax=hig, x=Specieagg))+
# geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
# facet_wrap(~gruppo)+coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(x="", y="Prevalenza")
####CORRESPONDENCE ANALYSIS #
# mr<-mr %>%
# column_to_rownames(var="Specieagg") %>%
# select(-4)
#
#
# dt <- as.table(as.matrix(mr[-4]))
# res.ca <- CA(mr, graph = FALSE)
# fviz_ca_biplot(res.ca, repel = TRUE)
# balloonplot(t(dt), main =" ", xlab ="", ylab="",
# label = FALSE, show.margins = FALSE)
# fviz_screeplot(res.ca, addlabels = TRUE, ylim = c(0, 50))
# fviz_screeplot(res.ca) +
# geom_hline(yintercept=33.33, linetype=2, color="red")
#
# fviz_ca_biplot(res.ca,
# map ="rowprincipal", arrow = c(TRUE, TRUE),
# repel = TRUE)
# fviz_ca_biplot(res.ca, map ="colgreen", arrow = c(TRUE, FALSE),
# repel = TRUE)
#######PROFILI MULTIRESISTENZA
# amr <- d
# amr<-amr %>%
# dplyr::select(-c(15,18:19))
# amr$colistina<-ifelse(amr$colistina=='R', 'COL',0)
# amr$ceftiofur<-ifelse(amr$ceftiofur=='R', 'CFT',0)
# #amr$tilmicosina<-ifelse(amr$tilmicosina=='R', 'TIL',0)
# amr$kanamicina<-ifelse(amr$kanamicina=='R', 'KAN',0)
# amr$enrofloxacin<-ifelse(amr$enrofloxacin=='R', 'ENR',0)
# #amr$oxacillina<-ifelse(amr$oxacillina=='R', 'OXA',0)
# #amr$eritromicina<-ifelse(amr$eritromicina=='R', 'ERT',0)
# amr$gentamicina<-ifelse(amr$gentamicina=='R', 'GEN',0)
# amr$tetraciclina<-ifelse(amr$tetraciclina=='R', 'TET',0)
# amr$ampicillina<-ifelse(amr$ampicillina=='R', 'AMP',0)
#
# #write.table(amr, file="amrxx.csv")
#
# amr[,13:19]<-amr[,13:19] != 0
# nomi_abb<-toupper(abbreviate(names(amr)[13:19]))
# X<- apply(amr[, 13:19], 1, function(x) nomi_abb[x])
# XX<-lapply(X, paste, collapse="-")
#
# amr$profilo<-unlist(XX)
#
#
#
# png("fig3.png", height = 550, width = 600)
# amr %>%
# filter(!profilo %in% c("NA-NA-NA-NA-NA-NA-NA", "" )) %>%
# #filter(identificazione=="E.coli") %>%
# group_by(profilo) %>%
# dplyr::summarise(n=n()) %>%
# arrange(n) %>%
# top_n(10, n) %>%
# mutate(profilo = factor(profilo, unique(profilo))) %>%
# #ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
# ggplot(aes(x=profilo, y=n, label=n))+
# geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
# geom_point( aes(x=profilo, y=n), size=8.4, color="lightblue" )+
# geom_text(color="black", size=4)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(y="n.ceppi",x="Profilo di resistenza/multiresistenza")
# dev.off()
###### Bayesian regression of Isolate ARindex
##clean dataframe
#d2<-d2 %>%
#dplyr::select(1,2,3,7,12,13:19,20:39)
###ARindex è una proporzione derivata da conteggi ( numero di resistenze / numero di test ab eseguiti)
# quindi possono essere analizzati come modelli di regression logistica per dati aggregati...
###aggregate bayes logistic regression brms###
# mod0 <-brm(data=d2, family = binomial,
# res|trials(nab)~1)
#
# mod1 <-brm(data=d2, family = binomial,res|trials(nab)~1+Specieagg)
# mod1 <-brm(data=d2, family = binomial,res|trials(nab)~)
#
# mod2 <-brm(data=d2, family = binomial,res|trials(nab)~Specieagg+urb+hapasc)
#
# pp<-brms::pp_check(mod0)
#
# ###betaregression
#
# beta0<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~1)
#
# beta1<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~Specieagg)
#
# beta2<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~Specieagg+urb)
#
# pp<-brms::pp_check(mod0)
#
# library(GGally)
# ggpairs(d2[,13:30])
|
/ANALYSIS/scripts/CODE.R
|
no_license
|
TMax66/AMR
|
R
| false | false | 53,054 |
r
|
####DATI E PACCHETTI####
library("here")
source(here("ANALYSIS", "scripts", 'pacchetti.r'))#<-carica le librerie
source(here("ANALYSIS", "scripts",'funzioni.r'))#<-carica funzioni in-built
source(here("ANALYSIS", "scripts",'dataset.r'))#<-carica lo script di preparazione dei dataset da usare per le analisi
options(mc.cores = parallel::detectCores())
#QUADRO GENERALE####
#Territorio####
###numero comuni da cui provengono i campioni
AMR_com %>%
filter(x=="TRUE") %>%
group_by(comune) %>%
tally()%>%
dim()
####MAPPA CAMPIONAMENTI (fig.1)
#(uso i dati con tutti i comuni da cui è
#### stato fatto il campionamento non quello con i dati dell'istat che sono di meno...###
source('mappe.r')#<-carica le mappe
AMR_com<-AMR_com %>%
drop_na(identificazione) %>%
group_by(comune)%>%
dplyr::summarise("n"=n()) %>%
as.data.frame()
com<-subset(comuni, comuni@data$NOME_COM %in% AMR_com$comune)
com@data<-merge(com@data, AMR_com, by.x = "NOME_COM", by.y = "comune")
mybins=c(1,5,10,15,20,30)
mypalette = colorBin( palette="OrRd",
domain=com@data$n, na.color="transparent", bins=mybins, reverse = F)
map1<-leaflet(data=regione) %>% addTiles() %>%
addPolygons(data=com,
fillColor = ~mypalette(n),
fillOpacity = 0.9,stroke=FALSE, weight = 10) %>%
addLegend( pal=mypalette, values= com@data$n, opacity=0.9, title = "N.Campioni", position = "bottomleft" )%>%
addPolygons(data=province, fill="F",color="") %>%
addPolygons(data=prov, fill=F, color="blue", weight=1, opacity=1) %>%
addPolygons(data=com, fill=F, color="black", weight=1, opacity=1)
# %>% x aggiungere il titolo per presentazione....
# addControl(title, position = "topleft", className="map-title")
mapshot(map1, file = "map1.png")
############CARATTERISTICHE DEL TERRITORIO
##tabella 1##
options(digits = 3)
AMR_istat %>%
select(sup, `denpop(abkmq)`,mediana,hapasc,aziende, capi ) %>% # select variables to summarise
summarise_each(funs(min = min(.,na.rm=T),
q25 = quantile(., 0.25, na.rm = T),
median = median(.,na.rm=T),
q75 = quantile(., 0.75, na.rm=T),
max = max(.,na.rm=T),
mean = mean(.,na.rm=T),
sd = sd(.,na.rm=T)))%>%
gather(stat, val) %>%
separate(stat, into = c("var", "stat"), sep = "_") %>%
spread(stat, val) %>%
select(var, min, q25, median, q75, max, mean, sd) %>%
mutate("ord"=c(5,6,2,4,3,1)) %>%
mutate("Parametri"= plyr::revalue(var, c("aziende"="Aziende con pascolo",
"capi"="Capi al pascolo", "mediana"="Altitudine mediana", "sup"="Superficie (Kmq)",
`denpop(abkmq)`="Densità di popolazione (Ab/Kmq)",
"hapasc"="Superficie al pascolo (ettari)"
))) %>%
arrange(ord) %>%
select(Parametri, 2:8) %>%
kable("latex", booktabs = TRUE, caption = "Caratterizzazione territoriale e demografica dei comuni di provenienza dei campioni") %>%
kable_styling()
AMR_istat %>%
filter(x=="TRUE") %>%
group_by(urb) %>%
summarise(n())
AMR_istat %>%
filter(x=="TRUE") %>%
group_by(montano) %>%
summarise(n()) %>%
adorn_totals()
###Fauna Selvatica#####
#numero campioni di feci
AMR_com %>%
filter(x=="TRUE") %>%
group_by(IDcamp) %>%
tally() %>% summarise(n=sum(n))
#numero di specie
length(unique(as.character(AMR$SPECIE)))##c'è un NA da togliere....
#tabella 2
AMR_com%>%
filter(x=="TRUE") %>%
group_by("Gruppo"=Specieagg, SPECIE) %>%
summarise(n=n()) %>%
mutate(prop=round(100*(n/670), 2)) %>%
adorn_totals(where = "row") %>%
kable("latex",caption = "Distribuzione del numero di campioni di feci in base alle differenti specie di fauna selvatica di origine, raggruppati per il gruppo-specie di appartenenza", booktabs = T, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
#tabella 2b<-campioni by montanità####
AMR_istat %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
filter(x=="TRUE") %>%
group_by(Specieagg, montano) %>%
summarise(N=n()) %>%
pivot_wider(names_from = montano, values_from = N, values_fill = list(N=0)) %>%
adorn_totals(where = "col") %>%
adorn_totals(where = "row") %>%
kable("latex", booktabs = TRUE, caption = "Distribuzione del numero di campioni di feci in base alle caratteristiche territoriali di provenienza per i differenti gruppo-specie di appartenenza (NM= Non Montani, P= Parzialmente Montani, T= Totalmente Montani)") %>%
kable_styling()
#tabella 2c<-campioni by urbanizzazione
AMR_istat %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
filter(x=="TRUE") %>%
group_by(Specieagg, urb) %>%
summarise(N=n()) %>%
pivot_wider(names_from = urb, values_from = N, values_fill = list(N=0)) %>%
adorn_totals(where = "col") %>%
adorn_totals(where = "row") %>%
select("Gruppo Specie"=Specieagg, "UA"= "densPop alta", "UM" = "densPop media", "UB" = "densPop scarsa", "Totale"= Total) %>%
kable("latex", caption = "Distribuzione del numero di campioni di feci in base al grado di urbanizzazione dei comuni di provenienza per i differenti gruppo-specie di appartenenza (UA= Urbanizzazione ALta, UM= Urbanizzazione Media, UB= Urbanizzazione Bassa)",
booktabs = TRUE) %>%
kable_styling()
##Esami Microbiologici#####
options(knitr.kable.NA = 0)
#tabella 3
AMR %>%
group_by(identificazione) %>%
filter(identificazione!="Non identificabile") %>%
tally() %>% arrange(desc(n)) %>%
mutate("prop(%)"=round(100*prop.table(n),2)) %>%
adorn_totals(where = c("row")) %>%
kable("latex", caption = "Distribuzione del numero e proprorzione di ceppi della famiglia Enterobacteriacee suddivise per genere, isolati dai 670 campioni di feci analizzati",
booktabs = TRUE, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
##Antibiogrammi####
ab<-AMR %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
ab[,13:19]<-apply(ab[,13:19], 2 , funz)
ab<-ab %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
ab$R<- ifelse(ab$MDR==0, 0, 1)
ab$MR<-ifelse(ab$MDR==0, "S",
ifelse(ab$MDR>=1 & ab$MDR<3, "R", "MR"))
#figura 2-ab length
ab %>%
drop_na(MDR) %>%
ggplot(aes(x=as.factor(MDR)))+geom_bar(aes(fill=MR))+
labs(x="numero di resistenze al panel di antibiotici",
y="numero ceppi")+
theme_ipsum_rc()+ theme(legend.title = element_blank())+
scale_fill_brewer(labels = c("Ceppi multiresistenti", "Ceppi Resistenti", "Ceppi Suscettibili"), direction = -1)
#PREVALENZA DI WILD ANIMALS PORTATORI DI R E MR - ALL DATA 671 campioni..####
##....
#dati aggregati per campione per calcolare la prevalenza di animali che hanno
#almeno un ceppo resistente ad almeno un antibiotico
#steps: 1.trasformo i risultati dei singoli AB in 1 e 0 (1=R, 0=S)
# 2.sommo per ogni ceppo i valori e ottengo il numero di resistenze per ceppo
# 3.creo una variabile R che mi indica se il ceppo ha almeno una resistenza
# 4.aggrego il dataset per la variabile IDcamp e sommo i valori di R , valori =>1
# indicano che nel campione c'era almeno un ceppo con almeno una resistenza
# 5. dal database originale preparo un dataset escludendo i ceppi senza identificazione
# 6. Unisco i due dataframe, che devono avere le stesse dimensioni, usando la variabile
# IDgruppo comune e creo una variabile chiamata RSelv che ha valore uguale a S se
# Res è 0, oppure R se Res è >=1. Il nuovo dataframe si chiama Prev è contiene i dati
# per analizzare la prevalenza di animali portatori di ceppi resistenti...
#step1
Psel<-AMR %>%
# filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
Psel[,13:19]<-apply(Psel[,13:19], 2 , funz)
#step2
Psel<-Psel %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
#step3
Psel$R<- ifelse(Psel$MDR==0, 0, 1)
#step4
Psel<-Psel %>%
drop_na(R) %>%
group_by(IDcamp)%>%
summarise(Res=sum(R))
#step5
pAMR<-AMR %>%
filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile")
#step6
Prev<-pAMR %>%
left_join(Psel) %>%
mutate(RSelv=ifelse(Res==0,"S","R"))
## prevalenza resistenza in specie...####
options(digits=2)
mr<-Prev%>%
select(Specieagg, RSelv)%>%
group_by(Specieagg, RSelv) %>%
#drop_na(RSelv) %>%
tally() %>%
pivot_wider(names_from = RSelv, values_from = n, values_fill = list(n = 0)) %>%
data.frame() %>%
mutate(N=rowSums(.[2:3],na.rm = TRUE))
options(digits=2)
Rhpd <- binom.bayes(
x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8]) %>%
# arrange(desc(mean))
Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd)
R <- Rhpd %>%
arrange(desc(mean)) %>%
mutate(Specieagg = unique(factor(Specieagg)))
R %>%
select("Gruppo Specie" = Specieagg, -method, "R"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig) %>%
kable("latex", booktabs = T,
caption = "Stime della prevalenza di campioni resistenti suddivisi per gruppo-specie: R= numero di campioni resistenti, N= numero campioni esaminati, Prevalenza = media della distribuzione beta, inf-HPD= valore inferiore dell'intervallo HPD, sup-HPD = valore superiore dell'intervallo HPD") %>%
kable_styling()
z <- binom.bayes.densityplot(R)
z+facet_wrap(~Specieagg)+xlab("Prevalenza")
#grafico bayesian density (figura 3)
# prev <-Prev %>%
# mutate(prev=ifelse(RSelv=="S", 0, 1))
#
# modn <- stan_glm(prev~ 1, data=prev,family=binomial(link="logit"))
# modp<-stan_glm(prev~ Specieagg, data=prev,family=binomial(link="logit"))
#
# t<-emmeans(modp, ~Specieagg)
#
# t %>% as.data.frame() %>%
# mutate(emmean=invlogit(emmean),
# lower.HPD = invlogit(lower.HPD),
# upper.HPD = invlogit(upper.HPD)) %>%
# select("Gruppo-Specie" = Specieagg, "Prevalenza"=emmean, "HPD-inf"= lower.HPD, "HPD-sup"= upper.HPD ) %>%
# arrange(desc(Prevalenza)) %>%
# kable("latex" ) %>%
# kable_styling()
# mod<-glm(prev~ Specieagg, data=prev,family=binomial(link="logit"))
# p<-gather_emmeans_draws(t)
# p %>%
# mutate("prev"=invlogit(.value))%>%
# group_by(Specieagg) %>%
# summarise(m=mean(prev),
# sd=sd(prev))
# ggplot(aes(x = prev, y=Specieagg,fill = Specieagg)) +
# geom_density_ridges(panel_scaling=FALSE)+
# theme_ridges()+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza",
# y="")
#prevalenza multi-resistenza###################
mPsel<-AMR %>%
filter(identificazione!="Non identificabile") %>%
dplyr::select(-15,-18,-19)
mPsel[,13:19]<-apply(mPsel[,13:19], 2 , funz)
#step2
mPsel<-mPsel %>%
mutate(MDR = rowSums(.[13:19]))###antibiogram length###
#step3
mPsel$MR<- ifelse(mPsel$MDR==0 |mPsel$MDR<=2, 0, 1)
mPsel$R<- ifelse(mPsel$MDR==0, 0, 1)
mPsel<-mPsel %>%
drop_na(R) %>%
drop_na(MR) %>%
group_by(IDcamp)%>%
summarise(Res=sum(R),
MRes=sum(MR))
#step5
pAMR<-AMR %>%
filter(x=="TRUE") %>%
filter(identificazione!="Non identificabile")
#step6
mPrev<-pAMR %>%
left_join(mPsel) %>%
mutate(MRSelv=ifelse(MRes==0,"Sr","MR"))
binom.bayes(
x = 92, n = 670)
options(digits=2)
Mr<-mPrev%>%
select(Specieagg, MRSelv)%>%
group_by(Specieagg, MRSelv) %>%
drop_na(MRSelv) %>%
tally() %>%
pivot_wider(names_from = MRSelv, values_from = n, values_fill = list(n = 0)) %>%
data.frame() %>%
mutate(N=rowSums(.[2:3],na.rm = TRUE))
options(digits=2)
MRhpd <- binom.bayes(
x = Mr$MR, n = Mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
MRhpd<- cbind("Specieagg"=mr[, 1], MRhpd)
MR <- MRhpd %>%
arrange(desc(mean)) %>%
mutate(Specieagg = unique(factor(Specieagg)))
MR %>%
select(-method, "MR"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig) %>%
kable("latex", caption = "Stime della prevalenza di campioni multi-resistenti suddivisi per gruppo-specie: MR= numero di campioni resistenti, N= numero campioni esaminati, Prevalenza = media della distribuzione beta, inf-HPD= valore inferiore dell'intervallo HPD, sup-HPD = valore superiore dell'intervallo HPD)",
booktabs = T) %>%
kable_styling()
z <- binom.bayes.densityplot(MR)
z+facet_wrap(~Specieagg)+xlab("Prevalenza")
#bayesian density
#grafico bayesian density (figura 3)
# mprev <-mPrev %>%
# mutate(prev=ifelse(MRSelv=="Sr", 0, 1))
#
# modp<-stan_glm(prev~ Specieagg, data=mprev,family=binomial(link="logit"))
#
# t<-emmeans(modp, ~Specieagg)
# p<-gather_emmeans_draws(t)
#
# p %>%
# mutate("prev"=logit2prob(.value))%>%
# ggplot(aes(x = prev, y=Specieagg,fill = Specieagg)) +
# geom_density_ridges(panel_scaling=TRUE)+
# theme_ridges()+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza MR Intervalli di Credibilità Bayesiani 95%",y="")
#
#PREVALENZA R E MR IN WILDLIFE BY TERRITORIO E SPECIE####
#USO DATASET ComAMRsel 640 RIGHE
#-prevalenza R by URB
RbyT<-ComAMRsel %>%
mutate(urb=ifelse(urb==1, "densPop alta",
ifelse(urb==2,"densPop media", "densPop scarsa" ))) %>%
drop_na(Res) %>%
select(Specieagg, montano, Res, MRes) %>%
group_by(Specieagg, montano, Res) %>%
summarise(n=n()) %>%
pivot_wider(names_from = Res, values_from = n, values_fill = list(n=0)) %>%
mutate(N=S+R) %>%
data.frame()
hpd <- binom.bayes(
x = RbyT$R, n = RbyT$N, type = "highest", conf.level = 0.95, tol = 1e-9)
Rhpd<- cbind("Specieagg"=RbyT[, 1], hpd[,6:8])
Rhpd<-cbind(RbyT, Rhpd[,-1])
#Bayesian multilevel model R~(1|comune)+ Specie+ pascolo+ urbanizzazione####
#codici nel file bayesmod.R
#CARATTERIZZAZIONE FENOTIPICA DELL'ANTIBIOTICO-RESISTENZA DEGLI ISOLATI BATTERICI####
#Antibiotico-resistenza byAB <- tabella 4####
z <-AMR %>%
filter(identificazione!="Non identificabile") %>%
select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=3:9, names_to = "antibiotico") %>%
group_by(antibiotico,value) %>%
drop_na(value) %>%
tally() %>%
pivot_wider(names_from = value,values_from = n) %>%
mutate("N"=R+S,
"prop" = round(100*(R/N),2)) %>%
arrange(desc(prop))
options(digits=2)
Rhpd <- binom.bayes(
x = z$R, n = z$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8]) %>%
# arrange(desc(mean))
Rhpd<- cbind("Antibiotico"=z[, 1], Rhpd[])
R <- Rhpd[-8,] %>%
arrange(desc(mean)) %>%
mutate(Antibiotico = unique(factor(antibiotico)))
R %>%
select(Antibiotico,-method, "R"=x, "N"=n, "Prevalenza"=mean, "inf-HPD"=lower, "sup-HPD"=upper, -shape1, -shape2, -sig, -antibiotico) %>%
kable("latex", caption = "Stime bayesiane della prevalenza di ceppi resistenti ai differenti antibiotici", booktabs = TRUE) %>%
kable_styling()
pz <- binom.bayes.densityplot(R, fill.central = "steelblue", fill.lower = "steelblue",
alpha = 1.2) +facet_wrap(~Antibiotico)
#--Bayesian posterior prevalence of seven antibiotic-resistence phenotype
#--figura 5
dt<-AMR %>%
select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
na.omit()
dt[,3:9]<-apply(dt[,3:9], 2 , funz)
AB<-data.frame(dt[,3:9])
my_lms <- lapply(1:7, function(x) stan_glm(AB[,x] ~ Specieagg, data=dt,
family=binomial(link="logit")))
t<- lapply(1:7, function(x) emmeans(my_lms[[x]], ~Specieagg))
#t2<-lapply(1:7, function(x) contrast(t[[x]], method="eff"))
z<- lapply(1:7, function(x) gather_emmeans_draws(t[[x]]))
bigdf<-do.call(rbind, z)
ab<-c(rep("COL", 36000), rep("CFT", 36000), rep("KAN", 36000), rep("ENR",36000),
rep("GEN", 36000), rep("TET",36000), rep("AMP",36000))
bigdf<-cbind(bigdf, "ab"=ab)
bigdf <- bigdf %>%
mutate(ab = factor(ab, levels = c("AMP", "TET","CFT", "COL", "ENR", "KAN", "GEN")))
bigfg<- bigdf %>%
ggplot(aes(x = logit2prob(.value), y=Specieagg,fill = Specieagg)) +geom_density_ridges(panel_scaling=TRUE)+
theme_ridges()+facet_wrap(~ab)+
scale_fill_brewer(palette = "Blues") +
theme_ridges() + theme(legend.position = "NULL")+labs(x="Prevalenza", y="")
#tabella 4
bigdf %>%
group_by(Specieagg,ab) %>%
summarise(prev=round(mean(logit2prob(.value)),2)) %>%
pivot_wider(names_from = ab, values_from = "prev") %>%
kable("latex", booktabs = T, caption = "Profilo di resistenza dei ceppi ai diversi antibiotici per gruppo-specie di provevienza del campione di feci") %>%
kable_styling()
#Antibiotico-resistenza by genere <-tabella 5####
tot<-AMR %>%
select(identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=2:8, names_to = "antibiotico") %>%
group_by(identificazione,antibiotico) %>%
tally(name="tot")
res<-AMR %>%
select(identificazione, 13,14,16,17,20:22) %>%
pivot_longer(cols=2:8, names_to = "antibiotico") %>%
group_by(identificazione, antibiotico) %>%
filter(value=='R') %>%
tally(name="res")
tot %>% full_join(res)%>%
replace_na(list(res=0)) %>%
filter(identificazione!="Non identificabile") %>%
mutate("%R"=(res/tot)*100) %>%
select(-res) %>%
pivot_wider(names_from = antibiotico, values_from = `%R`) %>%
arrange(desc(tot)) %>%
select("Genere" = identificazione, "N.ceppi" = tot, AMP, TET, CFT, COL, ENR, KAN, GEN, ) %>%
kable("latex", digits = 2, booktabs= T, caption = "Profilo di antibiotico-resistenza tra i diversi generi dei ceppi isolati") %>%
kable_styling()
#Profilo di multiresistenza####
amr %>%
filter(profilo!="SUSC") %>%
group_by(profilo) %>%
dplyr::summarise(n=n()) %>%
arrange(n) %>%
#top_n(10, n) %>%
mutate(profilo = factor(profilo, unique(profilo))) %>%
#ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
ggplot(aes(x=profilo, y=n, label=n))+
geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
geom_point( aes(x=profilo, y=n), size=8.4, color="steelblue" )+
geom_text(color="white", size=4)+
coord_flip()+
theme_ipsum_rc()+
labs(y="n.ceppi",x="")
#prevalenza ceppi ABr e MAbr per gruppo-specie di provenienza del campione
####Biodiversità<---fig.6####
amr %>%
group_by(Specieagg,profilo) %>%
dplyr::summarise(n=n()) %>%
ggplot( aes(Specieagg,profilo), label=n) +
geom_tile(aes(fill = n)) +
geom_text(aes(label = n), size=4) +
scale_fill_gradient(low = "gray", high = "red")+
#scale_fill_gradient(low = "lightgrey",high = "steelblue")+
scale_x_discrete(expand = c(0, 0)) + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Gruppo Specie")+
theme(legend.position = "bottom",axis.ticks = element_blank(),axis.text.x = element_text(angle = 90, hjust = 1,size=8),axis.text.y = element_text(size=8))
profili<-amr %>%
group_by(Specieagg,profilo) %>%
dplyr::summarise(n=n()) %>%
pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
data.frame()
renyis<-renyi(profili[-1], hill=TRUE)
# tabella valori renyis <-tabella 6
specie<-levels(profili$Specieagg)
tab<-renyis
row.names(tab)<-specie
tab%>%
kable("latex", digits = 2, booktabs = T, caption = "Valori di entropia di Renyi standardizzati tra i diversi gruppi specie" ) %>%
kable_styling()
###fig.7#<-graifico di Renyis standardizzato
renyis<-renyis-renyis[,1]
renyis %>%
mutate(Specie=levels(profili$Specieagg)) %>%
pivot_longer(cols=1:11, names_to="alpha",values_to = "indici" ) %>%
mutate(alpha=factor(alpha, levels=c("0", "0.25", "0.5","1", "2", "4", "8", "16","32","64","Inf"))) %>%
ggplot(aes(x=alpha, y=indici, color=Specie, group=Specie))+
geom_line()+theme_bw()+
labs(y="Rényi entropy", x="Rényi scale (alpha)")+
theme(legend.position="bottom",legend.text=element_text(size=7),
legend.title = element_blank())#+
#scale_color_manual(values = brewer.pal(12, "Set1"))
#####META-ANALISI PREVALENZE####
amrbib <- read_excel(here("ANALYSIS", "data", "raw", "meta.xlsx"))
amrbib<-amrbib %>%
filter(articolo!="8") %>%
filter(articolo!="10")
options(digits=2)
resbinom <- binom.bayes(
x = amrbib$nresitenti, n = amrbib$nisolati,
type = "highest", conf.level = 0.95, tol = 1e-9)
metares<-cbind(amrbib, resbinom[,6:8])
metares %>%
ggplot( aes(y=mean,ymin=lower, ymax=upper, x=articolo))+
geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
coord_flip()+
theme_ipsum_rc(axis_title_just = "mc")+
facet_wrap(~specie)+
labs(x="", y="Prevalenza")
######################################################
# #biodiversità
# coli<-amr %>%
# mutate(genere=ifelse(identificazione!= "E.coli", "Enterobacteriacee", "E.coli")) %>%
# filter(genere=="E.coli") %>%
# group_by(Specieagg,profilo) %>%
# dplyr::summarise(n=n()) %>%
# pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
# data.frame()
#
# coli.renyis<-exp(renyi(coli[,-1]))
#
#
#
# entb<-amr %>%
# mutate(genere=ifelse(identificazione!= "E.coli", "Enterobacteriacee", "E.coli")) %>%
# filter(genere=="Enterobacteriacee") %>%
# group_by(Specieagg,profilo) %>%
# dplyr::summarise(n=n()) %>%
# pivot_wider(names_from = profilo, values_from = n, values_fill=list(n = 0)) %>%
# data.frame()
#
# entb.renyis<-exp(renyi(entb[,-1]))
#
# genere<-rbind(coli.renyis,entb.renyis)
#
#
# genere %>%
# mutate(Specie=levels(profili$Specieagg)) %>%
# mutate(genere=c(rep("E.coli",9), rep("Altre Enterobacteriacee", 9))) %>%
# pivot_longer(cols=1:11, names_to="alpha",values_to = "indici" ) %>%
# mutate(alpha=factor(alpha, levels=c("0", "0.25", "0.5","1", "2", "4", "8", "16","32","64","Inf"))) %>%
# ggplot(aes(x=alpha, y=indici, color=Specie, group=Specie))+
# geom_point()+geom_line()+facet_grid(~genere)
#
#
###################################################################
###Prevalenza ceppi multiresistenti per specie (Bayes)
# d2<-d %>%
# fastDummies::dummy_columns("MR")
#
# AB<-data.frame(d2[,24:26])
# my_lms <- lapply(1:3, function(x) stan_glm(AB[,x] ~ Specieagg, data=d2,
# family=binomial(link="logit")))
#
# t<- lapply(1:3, function(x) emmeans(my_lms[[x]], ~Specieagg))
# t2<-lapply(1:3, function(x) contrast(t[[x]], method="eff"))
# z<- lapply(1:3, function(x) gather_emmeans_draws(t2[[x]]))
#
# bigdf<-do.call(rbind, z)
#
# MR<-c(rep("Multiresistenti", 36000),rep("Resistenti", 36000), rep("Suscettibili", 36000))
#
# bigdf<-cbind(bigdf, "MR"=MR)
#
# bigfg<-bigdf %>%
# mutate(MR=factor(MR, levels=c("Suscettibili", "Resistenti", "Multiresistenti"))) %>%
# mutate("prev"=.value) %>%
# ggplot(aes(x = prev, y=contrast,fill = contrast)) +geom_density_ridges(panel_scaling=TRUE)+
# theme_ridges()+facet_wrap(~MR)+
# scale_fill_brewer(palette = 7) +
# theme_ridges() + theme(legend.position = "NULL")+labs(x="Differenze prevalenza intra-specie vs Prevalenza media e Intervalli di Credibilità Bayesiani 95%",
# y="")
#
#
#
#
#
#
### PROFILO RESISTENZE WILDLIFE
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
##tabella ceppi S/R/MR by specie fauna
# d %>%
# mutate(Specieagg=forcats::fct_explicit_na(Specieagg)) %>%
# mutate(MR=factor(MR, levels=c("S","R","MR"), ordered=TRUE)) %>%
# drop_na(MR) %>%
# group_by(Specieagg, MR) %>%
# tally()%>%
# #mutate(prop=100*prop.table(n)) %>%
# #arrange(desc(n))
# pivot_wider(names_from = MR, values_from = n, values_fill = list(n=0)) %>%
# kable("latex") %>%
# kable_styling()
# mr<-d%>%
# select(Specieagg, MR) %>%
# group_by(Specieagg, MR) %>%
# drop_na(MR) %>%
# tally() %>%
# pivot_wider(names_from = MR, values_from = n) %>%
# mutate(N=rowSums(.[2:4],na.rm = TRUE)) %>%
# data.frame()
# options(digits=2)
# Shpd <- binom.bayes(
# x = mr$S, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Shpd<- cbind("Specieagg"=mr[, 1], Shpd[,6:8], rep("S",9))
# names(Shpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# Rhpd <- binom.bayes(
# x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8], rep("R",9))
# names(Rhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# mRhpd <- binom.bayes(
# x = mr$MR, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# mRhpd<- cbind("Specieagg"=mr[, 1], mRhpd[,6:8], rep("MR",9))
# names(mRhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# MR<-rbind(Shpd, Rhpd, mRhpd)
# MR %>%
# pivot_longer(cols=2:4, names_to = "par")
# MR %>%
# ggplot( aes(y=m,ymin=low, ymax=hig, x=Specieagg))+
# geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
# facet_wrap(~gruppo)+coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(x="", y="Prevalenza")
#Analisi delle corrispondenze
# mr<-mr %>%
# column_to_rownames(var="Specieagg") %>%
# select(-4)
#
# dt <- as.table(as.matrix(mr[-4]))
# res.ca <- CA(mr, graph = FALSE)
# fviz_ca_biplot(res.ca, repel = TRUE)
#######PROFILI MULTIRESISTENZA
# amr <- AMR %>%
# drop_na(Specieagg) %>%
# filter(identificazione!="Non identificabile") %>%
# dplyr::select(-15,-18,-19)
# amr$COL<-ifelse(amr$COL=='R', 'COL',0)
# amr$CFT<-ifelse(amr$CFT=='R', 'CFT',0)
# #amr$tilmicosina<-ifelse(amr$tilmicosina=='R', 'TIL',0)
# amr$KAN<-ifelse(amr$KAN=='R', 'KAN',0)
# amr$ENR<-ifelse(amr$ENR=='R', 'ENR',0)
# #amr$oxacillina<-ifelse(amr$oxacillina=='R', 'OXA',0)
# #amr$eritromicina<-ifelse(amr$eritromicina=='R', 'ERT',0)
# amr$GEN<-ifelse(amr$GEN=='R', 'GEN',0)
# amr$TET<-ifelse(amr$TET=='R', 'TET',0)
# amr$AMP<-ifelse(amr$AMP=='R', 'AMP',0)
#
# #write.table(amr, file="amrxx.csv")
#
# amr[,13:19]<-amr[,13:19] != 0
# nomi_abb<-toupper(abbreviate(names(amr)[13:19]))
# X<- apply(amr[, 13:19], 1, function(x) nomi_abb[x])
# XX<-lapply(X, paste, collapse="-")
#
# amr$profilo<-unlist(XX)
#png("fig3.png", height = 550, width = 600)
# amr %>%
# filter(!profilo %in% c("NA-NA-NA-NA-NA-NA-NA")) %>%
# mutate( profilo= ifelse(profilo=="", "SUSC", profilo))%>%
# amr %>%
# group_by(profilo) %>%
# dplyr::summarise(n=n()) %>%
# arrange(n) %>%
# #top_n(10, n) %>%
# mutate(profilo = factor(profilo, unique(profilo))) %>%
# #ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
# ggplot(aes(x=profilo, y=n, label=n))+
# geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
# geom_point( aes(x=profilo, y=n), size=8.4, color="lightblue" )+
# geom_text(color="black", size=4)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(y="n.ceppi",x="Profilo di resistenza/multiresistenza")
# dat <- read.csv("spcdat.csv", header=TRUE, row.names=1)
# exp(renyi(dat))
# dat2 <- t(dat[4:8,])
# out <- iNEXT(dat2, q=c(0,1,2), datatype="abundance")
#
#
# profili2<-t(data.frame(profili[,-1], row.names = profili[,1]))
# out <- iNEXT(profili2, q=c(0,1,2), datatype="abundance")
# ggiNEXT(out, type=3, facet.var = "order")
# alpha<-diversityvariables(profili[,-1], y=NULL, digits=2)
# alpha<-data.frame(do.call(rbind, alpha))
#renyiplot(renyi(profili[,-1]))
#diversityresult(prof, y=NULL, digits=2, index="richness", method="s")
# plot(renyiaccum(prof))
#
# Renyi.1 <- renyiresult(prof, method = "s")
#
# abdist <- vegdist(prof, method = "bray")
#renyiplot(renyi(profili[-1]))
# ARINDEX
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
d2<-d %>%
dplyr::select(-15,-18,-19)
d2[,13:19]<-apply(d2[,13:19], 2 , funz)
d2$res<-rowSums(d2[,13:19], na.rm = T)
d2<-d2 %>%
mutate(nab=7) %>%
mutate(ARi=res/nab)
###Distribuzione ARi per isolato
d2 %>%
arrange(ARi) %>%
mutate(IDceppo = factor(IDceppo, unique(IDceppo))) %>%
top_n(300, ARi) %>%
ggplot(aes(x=IDceppo, y=ARi))+
geom_segment( aes(x=IDceppo, xend=IDceppo, y=0, yend=ARi), color="grey")+
geom_point( aes(x=IDceppo, y=ARi), size=1, color="black" )+
coord_flip()
d2<-d2 %>%
mutate(AR0.20= ifelse(ARi<0.20, "BPS", "APS"))
table(d2$AR0.20)
####ARi x Genere###
png("fig5.png", height = 550, width = 600)
d2 %>%
filter(!(identificazione=="Non identificabile")) %>%
group_by(identificazione) %>%
dplyr::summarise(Somma=sum(res),
nceppi=n(),
nab=7) %>%
mutate(ARi=round(Somma/(nceppi*nab),2)) %>%
arrange(ARi) %>%
mutate(identificazione = factor(identificazione, unique(identificazione))) %>%
ggplot(aes(x=identificazione, y=ARi, label=ARi))+
geom_segment( aes(x=identificazione, xend=identificazione, y=0, yend=ARi), color="black")+
geom_point( aes(x=identificazione, y=ARi), size=8.3, color="lightblue" )+
geom_text(color="black", size=4)+
coord_flip()+
theme_ipsum_rc(axis_title_just = "mc")+
labs(y="ARindex",x="Genere ceppi isolati")+
geom_hline(yintercept =0.20, col="red")
dev.off()
####ARi x Specie###
ARispec<-d2 %>%
group_by(Specieagg) %>%
dplyr::summarise(Somma=sum(res),
nceppi=n(),
nab=7) %>%
mutate(ARi=Somma/(nceppi*nab))
plot(density(d2$ARi))
#######################
esbl <- read_excel("RELAZIONE FINALE/esbl.xlsx")
esbl %>%
drop_na() %>%
kable("latex", caption = "Caratterizzazione genotipica dei geni di resistenza dei 47 ceppi resistenti a Ceftiofur",
booktabs = TRUE, longtable = T) %>%
kable_styling(latex_options = c("repeat_header"))
###########TABELLE ALLEGATO ANALISI METAGENOMICHE################
t1 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab1.xlsx"))
t1 %>%
kable("latex", caption = "Composizione dei pool di ceppi utilizzati per l'analisi metagenomica",
booktabs = TRUE) %>%
kable_styling()
t2 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab2.xlsx"), col_types = c("numeric", "numeric", "numeric"))
t2 %>%
kable("latex", caption = "Elenco dei pool analizzati e relative quantità di DNA genomico estratto.",
booktabs = TRUE) %>%
kable_styling()
t3 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab3.xlsx"),
col_types = c("numeric", "numeric", "numeric", "numeric", "numeric", "numeric" ))
t3 %>%
kable("latex", caption = "Tabella 3: Numero di reads e coverage per ciascun pool sequenziato.",
booktabs = TRUE) %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed")) %>%
footnote(
symbol = c("N.totali reads", "N.totali reads filtrate (bp)",
"N.reads totali filtrate/isolato (bp)", "Coverage medio/isolato (X)")
)
library(hrbrthemes)
library(ggvis)
t4 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "alltab4.xlsx"))
t4$gene_phenotype <- paste(t4$AMRgene, "-", t4$Phenotype)
t4 %>%
pivot_longer(cols = 3:12, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" ))) %>%
ggplot( aes(Pool, gene_phenotype), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, col = "white") +
#scale_fill_gradient(low = "gray", high = "steelblue")+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.1: Geni AMR individuati nei pool- Per ogni gene AMR individuato nell’analisi metagenomica (AMR gene) viene riportata
la classe di antimicrobici associata (Phenotype) e la presenza/assenza del gene in ogni pool")
t5 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "allfig2.xlsx"))
t5$Classe_antibiotico <- paste(t5$Classe, "-", t5$Antibiotico)
t5 %>%
pivot_longer(cols = 3:12, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" )),
Classe_antibiotico = fct_rev(Classe_antibiotico)) %>%
ggplot( aes(Pool, Classe_antibiotico), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, color = "white") +
# scale_fill_gradient(low = "gray3", high = "steelblue")+
scale_fill_gradient(trans = 'reverse')+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.2: Analisi quantitativa del n. di geni AMR per antibiotico nei pool analizzati")
t6 <- read_excel(here("ANALYSIS", "reports", "RELAZIONE FINALE", "allfig3.xlsx"))
t6 %>%
pivot_longer(cols = 2:11, names_to = "Pool", values_to = "n",values_drop_na = TRUE) %>%
mutate(Pool = factor(Pool, levels = c("pool1","pool2","pool3",
"pool4","pool5","pool6","pool7",
"pool8","pool9","pool10" )),
Classe_fenotipica = fct_rev(Classe_fenotipica)) %>%
ggplot( aes(Pool, Classe_fenotipica), label=n) + geom_tile(aes(fill = n))+
theme_ipsum_rc()+
geom_text(aes(label = n), size=4, color = "white") +
# scale_fill_gradient(low = "gray3", high = "steelblue")+
scale_fill_gradient(trans = 'reverse')+
scale_x_discrete(expand = c(0, 0), position = "top") + theme_ipsum_rc()+
scale_y_discrete(expand = c(0, 0)) + labs(x="Pool") +
theme(legend.position = "NULL",axis.ticks = element_blank(),
axis.text.x = element_text(hjust = 1,size=8),axis.text.y = element_text(size=8))+
labs(caption = "Fig.3: Analisi quantitativa del n. di geni AMR per classi di antibiotici nei pool analizzati")
# t4$pool1 <- cell_spec(t4$pool1, color = ifelse( !is.na(t4$pool1), "blue", "white"))
# t4$pool2 <- cell_spec(t4$pool2, color = ifelse( !is.na(t4$pool2), "blue", "white"))
# t4$pool3 <- cell_spec(t4$pool3, color = ifelse( !is.na(t4$pool3), "blue", "white"))
# t4$pool4 <- cell_spec(t4$pool4, color = ifelse( !is.na(t4$pool4), "blue", "white"))
# t4$pool5 <- cell_spec(t4$pool5, color = ifelse( !is.na(t4$pool5), "blue", "white"))
# t4$pool6 <- cell_spec(t4$pool6, color = ifelse( !is.na(t4$pool6), "blue", "white"))
# t4$pool7 <- cell_spec(t4$pool7, color = ifelse( !is.na(t4$pool7), "blue", "white"))
# t4$pool8 <- cell_spec(t4$pool8, color = ifelse( !is.na(t4$pool8), "blue", "white"))
# t4$pool9 <- cell_spec(t4$pool9, color = ifelse( !is.na(t4$pool9), "blue", "white"))
# t4$pool10 <- cell_spec(t4$pool10, color = ifelse( !is.na(t4$pool10), "blue", "white"))
#
# t4 %>%
# kable("latex", caption = "Tabella 4: Geni AMR individuati nei pool:Per ogni gene AMR individuato nell’analisi metagenomica (AMR gene)
# viene riportata la classe di antimicrobici associata (Phenotype) e la presenza/assenza del gene in ogni pool",
# booktabs = TRUE, longtable = T, escape = FALSE) %>%
# kable_styling()
###Grafico
# AMR$x<-!duplicated(AMR$IDcamp)#<-crea una variabile che identifica i singoli campioni di feci
# AMR %>%
# filter(x=="TRUE") %>% #filtra i campioni non duplicati
# group_by(Specieagg) %>%
# tally() %>%
# arrange(desc(n)) %>%
# mutate(prop=100*prop.table(n)) %>%
# ggplot(aes(x=n, y=fct_reorder(Specieagg, n), label=paste(round(prop, 1),"%")))+
# # geom_segment( aes(x=Specieagg, xend=Specieagg, y=0, yend=n), color="grey")+
# geom_point(size=15, color="grey")+
# geom_text(color="black", size=4)+
# labs(x="N.campioni")+
# theme_ipsum_rc(axis_title_just = "mc")+
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 14),
# plot.title = element_text(color = "blue", face = "bold", size=13.2),
# plot.caption = element_text(color = "blue", face = "italic", size=10),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="",x="N.campioni",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione di 729 campioni di feci per specie di provenienza",
# caption="
# Cervidi: Capriolo, Cervo;
#
# sCaprine: Camoscio,Stambecco, Muflone;
#
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
#
# Altri Vol: Piccione, Starna, Fagiano")
##Distribuzione dei ceppi e dei ceppi per Specie di provenienza delle feci
# AMR %>%
# group_by(Specieagg, identificazione) %>%
# filter(identificazione!="Non identificabile") %>%
# tally() %>% arrange(desc(n)) %>%
# pivot_wider(names_from = Specieagg, values_from = n) %>%
# adorn_totals(where = c("row","col")) %>%
# kable("latex" ) %>%
# kable_styling()
##########Grafico con la ditribuzione dei ceppi
#isolati per specie di provenieinza delle feci#
# AMR %>%
# group_by(Specieagg) %>%
# tally() %>%
# arrange(desc(n)) %>%
# mutate(prop=100*prop.table(n)) %>%
# ggplot(aes(x=n, y=fct_reorder(Specieagg, n), label=paste(round(prop, 1),"%")))+
# # geom_segment( aes(x=Specieagg, xend=Specieagg, y=0, yend=n), color="grey")+
# geom_point(size=15, color="grey")+
# geom_text(color="black", size=4)+
# labs(x="N.campioni")+
# theme_ipsum_rc(axis_title_just = "mc")+
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=10),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="",x="N.campioni",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione dei 941 ceppi batterici isolati per specie di provenienza delle feci",
# caption="
# Cervidi: Capriolo, Cervo;
#
# sCaprine: Camoscio,Stambecco, Muflone;
#
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
#
# Altri Vol: Piccione, Starna, Fagiano")
######mappa della provenienza dei campioni di feci
####MD
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
# d<-d %>%
# select(-tilmicosina,-oxacillina, -eritromicina)
# amr[,13:19]<-apply(amr[,13:19], 2 , funz)
#
# amr<-amr %>%
# mutate(MDR = rowSums(.[13:19]))
#amr<-amr[-416,]
######DESCRITTIVA DEI COMUNI
# d<-d %>%
# #filter(Specieagg=="") %>%
# drop_na(identificazione) %>%
# group_by(comune)%>%
# dplyr::summarise("n"=n())#,
# # MAR=sum(MDR)/(n*7)) %>%
# as.data.frame()
#
#
# com<-subset(comuni, comuni@data$NOME_COM %in% d$comune)
#
# com@data<-merge(com@data, d, by.x = "NOME_COM", by.y = "comune")
# com@data$MAR[ which(com@data$MAR == 0)] = NA
####titolo
#
# # mytext=paste("N.campioni: ", round(com@data$n,2), sep="") %>%
# lapply(htmltools::HTML)
#
# tag.map.title <- tags$style(HTML("
# .leaflet-control.map-title {
# transform: translate(-50%,20%);
# position: fixed !important;
# left: 50%;
# text-align: center;
# padding-left: 10px;
# padding-right: 10px;
# background: rgba(255,255,255,0.75);
# font-weight: bold;
# font-size: 12px;
# }
# "))
# title <- tags$div(
# tag.map.title, HTML("Ruolo della fauna selvatica nella diffusione e mantenimento
# dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari")
# )
###############PROFILI DI RESISTENZA
# AMR %>%
# select(Specieagg,identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=3:9, names_to = "antibiotico") %>%
# group_by(antibiotico,value) %>%
# drop_na(value) %>%
# tally() %>%
# pivot_wider(names_from = value,values_from = n) %>%
# mutate("%Res"=round(100*(R/(R+S)),2)) %>%
# arrange(desc(`%Res`)) %>%
# kable("latex") %>%
# kable_styling()
########tabella profili di resistenza ceppo/antibiotico
# tot<-AMR %>%
# select(identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=2:8, names_to = "antibiotico") %>%
# group_by(identificazione,antibiotico) %>%
# tally(name="tot")
#
# res<-AMR %>%
# select(identificazione, 13,14,16,17,20:22) %>%
# pivot_longer(cols=2:8, names_to = "antibiotico") %>%
# group_by(identificazione, antibiotico) %>%
# filter(value=='R') %>%
# tally(name="res")
#
# tot %>% full_join(res)%>%
# replace_na(list(res=0)) %>%
# filter(identificazione!="Non identificabile") %>%
# mutate("%R"=(res/tot)*100) %>%
# select(-res) %>%
# pivot_wider(names_from = antibiotico, values_from = `%R`) %>%
# kable("latex", digits = 2) %>%
# kable_styling()
#
#
# x<-AMR %>%
# select(identificazione,22) %>%
# filter(identificazione!="Non identificabile") %>%
# group_by(identificazione, ampicillina) %>%
# drop_na(ampicillina) %>%
# tally() %>%
# drop_na(n) %>%
# pivot_wider(names_from = ampicillina,values_from = n,
# values_fill = list(n = 0)) %>%
# mutate("%Res"=round(100*(R/(R+S)),2)) %>%
# arrange(desc(`%Res`)) %>%
# select(-R, -S) %>%
# pivot_wider(names_from = identificazione, values_from = `%Res`,values_fill = list(`%Res` = 0)) %>%
# kable( ) %>%
# kable_styling()
#
#
#
#
# pivot_longer(cols=3:9, names_to = "antibiotico") %>%
# group_by(antibiotico,value) %>%
# drop_na(value) %>%
# tally() %>%
# pivot_wider(names_from = value,values_from = n)
######MULTIRESISTENZE SRMR
# funz<-function(x){
#
# abs(as.numeric(as.factor(x))-2)
# }
#
# d2<-AMR %>%
# dplyr::select(-15,-18,-19)
#
#
#
# d2[,13:19]<-apply(d2[,13:19], 2 , funz)
#
#
#
# d2<-d2 %>%
# mutate(MDR = rowSums(.[13:19]))
#
# d2$R<- ifelse(d2$MDR==0, 0, 1)
#
# d2$MR<-ifelse(d2$MDR==0, "S",
# ifelse(d2$MDR>=1 & d2$MDR<3, "R", "MR"))
#
# d2<-d2 %>%
# mutate(MR=factor(MR, levels=c("S","R","MR"), ordered=TRUE)) %>%
# mutate(Specieagg=factor(Specieagg,
# levels=c("CERVIDI","sCAPRINAE","CARNIVORI","CINGHIALE",
# "LEPRE","CORVIDI" ,"RAPACI","UCCELLI ACQUATICI",
# "ALTRI VOLATILI")))
#
#
#
# png("fig2.png", height = 550, width = 500)
# d2 %>%
# drop_na(MR) %>%
# group_by(Specieagg, MR) %>%
# tally()%>%
# #mutate(prop=100*prop.table(n)) %>%
# arrange(desc(n)) %>%
# ggplot(aes(x=MR, y=n, label=n))+
# geom_segment( aes(x=MR, xend=MR, y=0, yend=n), color="grey") +
# geom_point( aes(x=MR, y=n, color=Specieagg), size=5 ) +
# geom_text(color="black", size=3)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc") +
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face="bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=7),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="n.ceppi",x="")+
# #title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# #subtitle = "Distribuzione di 923 ceppi di Enterobatteriacee, per specie e grado di resistenza ad un pannello di antibiotici (COLISTINA, CEFTIOFUR,KANAMICINA,ENROFLOXACIN, TETRACICLINA, AMPICILLINA)",
# #caption="S= Suscettibile, R= Resistente fino a due antibiotici, MR=Multiresistente
#
# #Cervidi: Capriolo, Cervo;
# #sCaprine: Camoscio,Stambecco, Muflone;
# #U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
# # Altri Vol: Piccione, Starna, Fagiano")+
# ggforce::facet_col(~Specieagg, scales="free_y", space="free")
# dev.off()
#
#
#
# d2 %>%
# drop_na(MR) %>%
# filter(identificazione=="E.coli") %>%
# group_by(Specieagg, MR) %>%
# tally() %>%
# #mutate(prop=100*prop.table(n)) %>%
# arrange(desc(n)) %>%
# ggplot(aes(x=MR, y=n, label=n))+
# geom_segment( aes(x=MR, xend=MR, y=0, yend=n), color="grey") +
# geom_point( aes(x=MR, y=n, color=Specieagg), size=5 ) +
# geom_text(color="black", size=3)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc") +
# theme(
# legend.position = "none",
# panel.border = element_blank(),
# panel.spacing = unit(0.1, "lines"),
# strip.text.x = element_text(size = 10, face = "bold"),
# axis.text.y=element_text(size=10),
# axis.title.x = element_text(size = 16),
# plot.title = element_text(color = "blue", face = "bold"),
# plot.caption = element_text(color = "blue", face = "italic", size=7),
# plot.subtitle = element_text(size=10)
#
#
# ) +
#
# labs(y="n.ceppi",x="",
# title="Ruolo della fauna selvatica nella diffusione e mantenimento dell'antibiotico-resistenza (PRC2016020-IZSLER)-risultati preliminari",
# subtitle = "Distribuzione di 615 ceppi di E.coli, per specie e grado di resistenza ad un pannello di antibiotici (COLISTINA, CEFTIOFUR,KANAMICINA,ENROFLOXACIN, TETRACICLINA, AMPICILLINA)",
# caption="S= Suscettibile, R= Resistente fino a due antibiotici, MR=Multiresistente
#
# Cervidi: Capriolo, Cervo;
# sCaprine: Camoscio,Stambecco, Muflone;
# U.acquatici: Cigno, Gabbiano, Cormorano, Germano;
# Altri Vol: Piccione, Starna, Fagiano")+
# ggforce::facet_col(~Specieagg, scales="free_y", space="free")
#
# mr<-d%>%
# select(Specieagg, MR) %>%
# group_by(Specieagg, MR) %>%
# drop_na(MR) %>%
# tally() %>%
# pivot_wider(names_from = MR, values_from = n) %>%
# mutate(N=S+R+MR) %>%
# data.frame()# %>%
#pivot_longer(cols = 2:4, names_to = "gruppo")
# mod<-brm(data = mr, family = binomial,
# value | trials(N) ~ 1 + gruppo ,
# prior = c(prior(normal(0, 10), class = Intercept),
# prior(normal(0, 10), class = b)),
# iter = 2500, warmup = 500, cores = 2, chains = 2,
# seed = 10)
# s<-32
# n<-363
#
# shape1<-s+1
# shape2<-n-(s+1)
# x <- seq(0,1,length=500)
# #xp<-seq(from=0, to=1, by=0.01)
# beta=dbeta(x,shape1 = shape1, shape2 = shape2)
# df <- data.frame(x,beta)
# ggplot(df, aes(x=x, y=beta))+
# geom_line(col="blue")#+#theme_bw(16, "serif")+
# #scale_y_continuous(expand=c(0, 0))+
# # labs(x="Probability", y="Density",
# # title=paste("Beta distribution (n=",input$n2,";","s=", input$s2,")"))+
# # theme(plot.title = element_text(size = rel(1), vjust = 1.5))
# options(digits=2)
# Shpd <- binom.bayes(
# x = mr$S, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Shpd<- cbind("Specieagg"=mr[, 1], Shpd[,6:8], rep("S",9))
# names(Shpd)[2:5]<-c("m", "low", "hig", "gruppo")
# x<-binom.bayes.densityplot(Shpd,
# fill.central = "steelblue",
# fill.lower = "lightgray",
# fill.upper = "lightgray")+
# theme_ipsum_rc(axis_title_just = "mc")
# Rhpd <- binom.bayes(
# x = mr$R, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# Rhpd<- cbind("Specieagg"=mr[, 1], Rhpd[,6:8], rep("R",9))
# names(Rhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# mRhpd <- binom.bayes(
# x = mr$MR, n = mr$N, type = "highest", conf.level = 0.95, tol = 1e-9)
# mRhpd<- cbind("Specieagg"=mr[, 1], mRhpd[,6:8], rep("MR",9))
# names(mRhpd)[2:5]<-c("m", "low", "hig", "gruppo")
#
# MR<-rbind(Shpd, Rhpd, mRhpd)
#
# MR %>%
# pivot_longer(cols=2:4, names_to = "par") #%>%
# MR %>%
# ggplot(aes(x=m, y=Specieagg))+geom_point()+
# geom_segment(aes(x=low,
# xend=hig,
# y=Specieagg,
# yend=Specieagg))+
# facet_wrap(~gruppo)
# MR %>%
# #arrange(m) %>%
# ggplot( aes(y=m,ymin=low, ymax=hig, x=Specieagg))+
# geom_point(color="blue", size=2)+geom_linerange(color="blue", size=.8)+
# facet_wrap(~gruppo)+coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(x="", y="Prevalenza")
####CORRESPONDENCE ANALYSIS #
# mr<-mr %>%
# column_to_rownames(var="Specieagg") %>%
# select(-4)
#
#
# dt <- as.table(as.matrix(mr[-4]))
# res.ca <- CA(mr, graph = FALSE)
# fviz_ca_biplot(res.ca, repel = TRUE)
# balloonplot(t(dt), main =" ", xlab ="", ylab="",
# label = FALSE, show.margins = FALSE)
# fviz_screeplot(res.ca, addlabels = TRUE, ylim = c(0, 50))
# fviz_screeplot(res.ca) +
# geom_hline(yintercept=33.33, linetype=2, color="red")
#
# fviz_ca_biplot(res.ca,
# map ="rowprincipal", arrow = c(TRUE, TRUE),
# repel = TRUE)
# fviz_ca_biplot(res.ca, map ="colgreen", arrow = c(TRUE, FALSE),
# repel = TRUE)
#######PROFILI MULTIRESISTENZA
# amr <- d
# amr<-amr %>%
# dplyr::select(-c(15,18:19))
# amr$colistina<-ifelse(amr$colistina=='R', 'COL',0)
# amr$ceftiofur<-ifelse(amr$ceftiofur=='R', 'CFT',0)
# #amr$tilmicosina<-ifelse(amr$tilmicosina=='R', 'TIL',0)
# amr$kanamicina<-ifelse(amr$kanamicina=='R', 'KAN',0)
# amr$enrofloxacin<-ifelse(amr$enrofloxacin=='R', 'ENR',0)
# #amr$oxacillina<-ifelse(amr$oxacillina=='R', 'OXA',0)
# #amr$eritromicina<-ifelse(amr$eritromicina=='R', 'ERT',0)
# amr$gentamicina<-ifelse(amr$gentamicina=='R', 'GEN',0)
# amr$tetraciclina<-ifelse(amr$tetraciclina=='R', 'TET',0)
# amr$ampicillina<-ifelse(amr$ampicillina=='R', 'AMP',0)
#
# #write.table(amr, file="amrxx.csv")
#
# amr[,13:19]<-amr[,13:19] != 0
# nomi_abb<-toupper(abbreviate(names(amr)[13:19]))
# X<- apply(amr[, 13:19], 1, function(x) nomi_abb[x])
# XX<-lapply(X, paste, collapse="-")
#
# amr$profilo<-unlist(XX)
#
#
#
# png("fig3.png", height = 550, width = 600)
# amr %>%
# filter(!profilo %in% c("NA-NA-NA-NA-NA-NA-NA", "" )) %>%
# #filter(identificazione=="E.coli") %>%
# group_by(profilo) %>%
# dplyr::summarise(n=n()) %>%
# arrange(n) %>%
# top_n(10, n) %>%
# mutate(profilo = factor(profilo, unique(profilo))) %>%
# #ggplot(aes(x=profilo, y=n))+geom_bar(stat = "identity")+coord_flip()
# ggplot(aes(x=profilo, y=n, label=n))+
# geom_segment( aes(x=profilo, xend=profilo, y=0, yend=n), color="grey")+
# geom_point( aes(x=profilo, y=n), size=8.4, color="lightblue" )+
# geom_text(color="black", size=4)+
# coord_flip()+
# theme_ipsum_rc(axis_title_just = "mc")+
# labs(y="n.ceppi",x="Profilo di resistenza/multiresistenza")
# dev.off()
###### Bayesian regression of Isolate ARindex
##clean dataframe
#d2<-d2 %>%
#dplyr::select(1,2,3,7,12,13:19,20:39)
###ARindex è una proporzione derivata da conteggi ( numero di resistenze / numero di test ab eseguiti)
# quindi possono essere analizzati come modelli di regression logistica per dati aggregati...
###aggregate bayes logistic regression brms###
# mod0 <-brm(data=d2, family = binomial,
# res|trials(nab)~1)
#
# mod1 <-brm(data=d2, family = binomial,res|trials(nab)~1+Specieagg)
# mod1 <-brm(data=d2, family = binomial,res|trials(nab)~)
#
# mod2 <-brm(data=d2, family = binomial,res|trials(nab)~Specieagg+urb+hapasc)
#
# pp<-brms::pp_check(mod0)
#
# ###betaregression
#
# beta0<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~1)
#
# beta1<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~Specieagg)
#
# beta2<-brm(data=d2, family=zero_one_inflated_beta(),
# ARi~Specieagg+urb)
#
# pp<-brms::pp_check(mod0)
#
# library(GGally)
# ggpairs(d2[,13:30])
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ft_providers.R
\name{ft_providers}
\alias{ft_providers}
\title{Search for information on journals or publishers.}
\usage{
ft_providers(journal = NULL, publisher = NULL, limit = 10, ...)
}
\arguments{
\item{journal}{Query terms}
\item{publisher}{Source to query}
\item{limit}{Number of records to return.}
\item{...}{Further args passed on to \code{\link[httr]{GET}}}
}
\value{
An object of class ft_p
}
\description{
Search for information on journals or publishers.
}
\examples{
\dontrun{
# journal name search
ft_providers(journal="Stem Cells International")
ft_providers(publisher="hindawi")
ft_providers(publisher="journal")
}
}
|
/man/ft_providers.Rd
|
permissive
|
emhart/fulltext
|
R
| false | false | 723 |
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ft_providers.R
\name{ft_providers}
\alias{ft_providers}
\title{Search for information on journals or publishers.}
\usage{
ft_providers(journal = NULL, publisher = NULL, limit = 10, ...)
}
\arguments{
\item{journal}{Query terms}
\item{publisher}{Source to query}
\item{limit}{Number of records to return.}
\item{...}{Further args passed on to \code{\link[httr]{GET}}}
}
\value{
An object of class ft_p
}
\description{
Search for information on journals or publishers.
}
\examples{
\dontrun{
# journal name search
ft_providers(journal="Stem Cells International")
ft_providers(publisher="hindawi")
ft_providers(publisher="journal")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fit.R
\name{fitPlp}
\alias{fitPlp}
\title{fitPlp}
\usage{
fitPlp(trainData, modelSettings, search = "grid", analysisId)
}
\arguments{
\item{trainData}{An object of type \code{TrainData} created using \code{splitData}
data extracted from the CDM.}
\item{modelSettings}{An object of class \code{modelSettings} created using one of the function:
\itemize{
\item{logisticRegressionModel()}{ A lasso logistic regression model}
\item{GBMclassifier()}{ A gradient boosting machine}
\item{RFclassifier()}{ A random forest model}
\item{GLMclassifier ()}{ A generalised linear model}
\item{KNNclassifier()}{ A KNN model}
}}
\item{search}{The search strategy for the hyper-parameter selection (currently not used)}
\item{analysisId}{The id of the analysis}
}
\value{
An object of class \code{plpModel} containing:
\item{model}{The trained prediction model}
\item{modelLoc}{The path to where the model is saved (if saved)}
\item{trainAuc}{The AUC obtained on the training set}
\item{trainCalibration}{The calibration obtained on the training set}
\item{modelSettings}{A list specifiying the model, preprocessing, outcomeId and cohortId}
\item{metaData}{The model meta data}
\item{trainingTime}{The time taken to train the classifier}
}
\description{
Train various models using a default parameter gird search or user specified parameters
}
\details{
The user can define the machine learning model to train (regularised logistic regression, random forest,
gradient boosting machine, neural network and )
}
|
/man/fitPlp.Rd
|
permissive
|
anthonysena/PatientLevelPrediction
|
R
| false | true | 1,575 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fit.R
\name{fitPlp}
\alias{fitPlp}
\title{fitPlp}
\usage{
fitPlp(trainData, modelSettings, search = "grid", analysisId)
}
\arguments{
\item{trainData}{An object of type \code{TrainData} created using \code{splitData}
data extracted from the CDM.}
\item{modelSettings}{An object of class \code{modelSettings} created using one of the function:
\itemize{
\item{logisticRegressionModel()}{ A lasso logistic regression model}
\item{GBMclassifier()}{ A gradient boosting machine}
\item{RFclassifier()}{ A random forest model}
\item{GLMclassifier ()}{ A generalised linear model}
\item{KNNclassifier()}{ A KNN model}
}}
\item{search}{The search strategy for the hyper-parameter selection (currently not used)}
\item{analysisId}{The id of the analysis}
}
\value{
An object of class \code{plpModel} containing:
\item{model}{The trained prediction model}
\item{modelLoc}{The path to where the model is saved (if saved)}
\item{trainAuc}{The AUC obtained on the training set}
\item{trainCalibration}{The calibration obtained on the training set}
\item{modelSettings}{A list specifiying the model, preprocessing, outcomeId and cohortId}
\item{metaData}{The model meta data}
\item{trainingTime}{The time taken to train the classifier}
}
\description{
Train various models using a default parameter gird search or user specified parameters
}
\details{
The user can define the machine learning model to train (regularised logistic regression, random forest,
gradient boosting machine, neural network and )
}
|
# UI side of the APP
navbarPage("Les ODD dans mon territoire",
# PREMIERE PAGE
tabPanel(
"Accueil-Quiz",
tags$head(tags$style("* { font-family: Arial; line-height:1.3em};.h4 {line-height:1.3em}")),
fluidRow(
column(1),
column(6,tags$img(src="logos.png", width="100%"),class="col-xs-12")
),
fluidRow(
column(1),
column(9,
# TITRES ET PARAGRAPHE D'INTRO
includeHTML("intro.html"),
# SELECTION DEPARTEMENT COMMUNE
uiOutput("departement"),
uiOutput("commune"),
actionButton("validate_choice", "Valider"),
# FORM
conditionalPanel(condition = "input.validate_choice",
br(),
tags$head(tags$style("
#epci_text{
display:inline
}")),
h4("Votre territoire (EPCI) est : ", tags$b(textOutput("epci_text")), style="display:inline"),
br(),
h4("Par rapport à votre département..."),
lapply(1:length(QUESTION[["Question.QUIZ"]]), function(i){
radio_button <- paste("radio_button_", i, sep="")
radio_img <- paste("radio_img_", i, sep="")
fluidRow(column(3,align = "center",uiOutput(radio_img), br()),
column(5,uiOutput(radio_button)))
}),
br(),
actionButton("submitBtn", "Valider"),
actionButton("refresh", "Réessayer le quiz")
),
# WHEEL
conditionalPanel(condition = "input.submitBtn",
h4(textOutput("right_answers")),
br(),
hr()),
h3(textOutput("wheel_title")),
h4(textOutput("wheel_legend")),
conditionalPanel(condition = "input.submitBtn",
div(tags$img(src="green_tile.png", height="20px"),"Votre territoire est mieux positionné que votre département pour contribuer à l’ODD"),
div(tags$img(src="red_tile.png", height="20px"),"Votre territoire est moins bien positionné que votre département pour contribuer à l’ODD")
),
# Size of the wheel
tags$head(tags$script('
var dimension = [0, 0];
$(document).on("shiny:connected", function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
$(window).resize(function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
')),
divwheelnavOutput("nav_output")
)
)
),
# Styling nav bar
tags$head(
tags$style(HTML("
.navbar-default .navbar-brand {background-color: #FFFFFF; color: black}
.navbar-default {background-color: #FFFFFF ;}
"))
),
# DEUXIEME PAGE
tabPanel("Les indicateurs ODD de mon territoire",
column(8, tags$img(src="logo.svg", width="70%")),
# CHOIX DEPARTEMENT
column(
12, align="left", br(), h3("Portrait de territoire - Tous les indicateurs ODD de mon territoire"),
h4(style = "line-height:1.3em","Sélectionnez votre territoire et cliquez sur chaque bloc ODD pour visualiser le positionnement de votre territoire
par rapport aux échelles territoriales supérieures (département, région, France métropolitaine).", br(),br(), "Un indicateur peut être relié à plusieurs ODD.", br(),br()
)),
uiOutput("departement_2"),
uiOutput("commune_2"),
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"),
tags$head(tags$style("#epci_text_2{display:inline}")),
# LOGOS
h4("Votre territoire (EPCI) est : ", tags$b(textOutput("epci_text_2")), style="display:inline"),
br(),
lapply(1:17, function(i){
odd <- paste("ODD ", i, sep="")
id_button <- paste("ODD_button_graph", i, sep="")
odd_image <- paste("ODD", i,".png", sep="")
tags$button(
id = id_button,
class = "btn action-button",
img(src = odd_image,
height = "125px"),
tags$style(type = 'text/css',
HTML('.action-button { background-color: transparent;.active {color: #555;background-color: green;};}'))
)
}
),
column(
12, align="left", br(),h4(style = "line-height:1.3em","Pour afficher l'ensemble des indicateurs, cliquez sur la roue (le chargement peut être long)",
tags$button(
id="wheel_small_button",
class = "btn action-button",
img(src="wheel_to_text.png", height="50px")
))),
# TEXTE
useShinyjs(),
column(12, align="center", h3(textOutput("text_graph"), br(), style="display: block; margin-left: auto; margin-right: auto;")),
# ONE ODD
uiOutput("cur_odd"),
h3(textOutput("no_indicateur"), align="center"),
# ALL ODD
lapply(1:17, function(cur_odd){
odd_all_cur <- paste("odd_all_cur", cur_odd, sep="")
odd_main_text <- paste("odd_main_text", cur_odd, sep="")
fluidRow(column(12, align="center", h3(textOutput(odd_main_text), br(), style="display: block; margin-left: auto; margin-right: auto;")),
uiOutput(odd_all_cur))
})
),
# DEBUT TROISIEME PAGE
tabPanel(
div(tags$img(src="wheel_to_text.png", height="20px"),"Les ODD, qu'est-ce que c'est ?"),
column(8, tags$img(src="logo.svg", width="70%")),
column(12, style = "font-size: 130%", br(), TEXT_ODD_PRES_1,
br(), br(), TEXT_ODD_PRES_2, br(), br(),
tags$b("Rendez-vous sur le site des ODD pour la France :"),
a("https://www.agenda-2030.fr", href="https://www.agenda-2030.fr"), br(), br(),
tags$ul(
tags$li(a("Présentation : origines et principes", href="https://www.agenda-2030.fr/agenda2030/presentation-principes-specificites-origines-18",target="_blank",rel="noopener noreferrer")),
tags$li(a("Les indicateurs de suivi des Objectifs de développement durable", href="https://www.agenda-2030.fr/agenda2030/dispositif-de-suivi-les-indicateurs-19",target="_blank",rel="noopener noreferrer")),
tags$li(a("Situation et organisation de la mise en œuvre en France", href="https://www.agenda-2030.fr/agenda2030/situation-de-la-france-21",target="_blank",rel="noopener noreferrer")),
tags$li(a("Mobilisation des acteurs", href="https://www.agenda-2030.fr/agenda2030/mobilisation-des-acteurs-non-etatiques-en-france-40",target="_blank",rel="noopener noreferrer")),
tags$li(a("En Europe et à l'international", href="https://www.agenda-2030.fr/agenda2030/en-europe-et-linternational-22",target="_blank",rel="noopener noreferrer"))
),
br(), br(),
"Les ODD sur le site de l'ONU :", tags$a(href="https://www.un.org/sustainabledevelopment/fr/objectifs-de-developpement-durable/",
"https://www.un.org/sustainabledevelopment/fr/objectifs-de-developpement-durable/",target="_blank",rel="noopener noreferrer"),
br(), br()
),
# LES 17 LOGOS
lapply(1:17, function(i) {
odd <- paste("ODD ", i, sep="")
id_button <- paste("ODD_button_", i, sep="")
odd_image <- paste("ODD", i,".png", sep="")
tags$button(
id = id_button,
class = "btn action-button",
img(src = odd_image,
height = "125px"),
style="background-color: transparent")
}
),
fluidRow(
column(width = 4, align = "center", br(), imageOutput("third_image")),
column(width = 7, style = "font-size: 150%;margin-top:30px",
h4(textOutput("text_odd"),
style="font-size: 150%"),
textOutput("subtext_odd"), br(),
textOutput("third_text_with_link"),
uiOutput("third_link"),br())
)
),
tabPanel("Mentions Légales",
column(1),
column(10,htmlOutput("mentions")),
column(1))
)
|
/ui.r
|
no_license
|
manderhalt/dreal_odd
|
R
| false | false | 8,898 |
r
|
# UI side of the APP
navbarPage("Les ODD dans mon territoire",
# PREMIERE PAGE
tabPanel(
"Accueil-Quiz",
tags$head(tags$style("* { font-family: Arial; line-height:1.3em};.h4 {line-height:1.3em}")),
fluidRow(
column(1),
column(6,tags$img(src="logos.png", width="100%"),class="col-xs-12")
),
fluidRow(
column(1),
column(9,
# TITRES ET PARAGRAPHE D'INTRO
includeHTML("intro.html"),
# SELECTION DEPARTEMENT COMMUNE
uiOutput("departement"),
uiOutput("commune"),
actionButton("validate_choice", "Valider"),
# FORM
conditionalPanel(condition = "input.validate_choice",
br(),
tags$head(tags$style("
#epci_text{
display:inline
}")),
h4("Votre territoire (EPCI) est : ", tags$b(textOutput("epci_text")), style="display:inline"),
br(),
h4("Par rapport à votre département..."),
lapply(1:length(QUESTION[["Question.QUIZ"]]), function(i){
radio_button <- paste("radio_button_", i, sep="")
radio_img <- paste("radio_img_", i, sep="")
fluidRow(column(3,align = "center",uiOutput(radio_img), br()),
column(5,uiOutput(radio_button)))
}),
br(),
actionButton("submitBtn", "Valider"),
actionButton("refresh", "Réessayer le quiz")
),
# WHEEL
conditionalPanel(condition = "input.submitBtn",
h4(textOutput("right_answers")),
br(),
hr()),
h3(textOutput("wheel_title")),
h4(textOutput("wheel_legend")),
conditionalPanel(condition = "input.submitBtn",
div(tags$img(src="green_tile.png", height="20px"),"Votre territoire est mieux positionné que votre département pour contribuer à l’ODD"),
div(tags$img(src="red_tile.png", height="20px"),"Votre territoire est moins bien positionné que votre département pour contribuer à l’ODD")
),
# Size of the wheel
tags$head(tags$script('
var dimension = [0, 0];
$(document).on("shiny:connected", function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
$(window).resize(function(e) {
dimension[0] = window.innerWidth;
dimension[1] = window.innerHeight;
Shiny.onInputChange("dimension", dimension);
});
')),
divwheelnavOutput("nav_output")
)
)
),
# Styling nav bar
tags$head(
tags$style(HTML("
.navbar-default .navbar-brand {background-color: #FFFFFF; color: black}
.navbar-default {background-color: #FFFFFF ;}
"))
),
# DEUXIEME PAGE
tabPanel("Les indicateurs ODD de mon territoire",
column(8, tags$img(src="logo.svg", width="70%")),
# CHOIX DEPARTEMENT
column(
12, align="left", br(), h3("Portrait de territoire - Tous les indicateurs ODD de mon territoire"),
h4(style = "line-height:1.3em","Sélectionnez votre territoire et cliquez sur chaque bloc ODD pour visualiser le positionnement de votre territoire
par rapport aux échelles territoriales supérieures (département, région, France métropolitaine).", br(),br(), "Un indicateur peut être relié à plusieurs ODD.", br(),br()
)),
uiOutput("departement_2"),
uiOutput("commune_2"),
tags$style(type="text/css",
".shiny-output-error { visibility: hidden; }",
".shiny-output-error:before { visibility: hidden; }"),
tags$head(tags$style("#epci_text_2{display:inline}")),
# LOGOS
h4("Votre territoire (EPCI) est : ", tags$b(textOutput("epci_text_2")), style="display:inline"),
br(),
lapply(1:17, function(i){
odd <- paste("ODD ", i, sep="")
id_button <- paste("ODD_button_graph", i, sep="")
odd_image <- paste("ODD", i,".png", sep="")
tags$button(
id = id_button,
class = "btn action-button",
img(src = odd_image,
height = "125px"),
tags$style(type = 'text/css',
HTML('.action-button { background-color: transparent;.active {color: #555;background-color: green;};}'))
)
}
),
column(
12, align="left", br(),h4(style = "line-height:1.3em","Pour afficher l'ensemble des indicateurs, cliquez sur la roue (le chargement peut être long)",
tags$button(
id="wheel_small_button",
class = "btn action-button",
img(src="wheel_to_text.png", height="50px")
))),
# TEXTE
useShinyjs(),
column(12, align="center", h3(textOutput("text_graph"), br(), style="display: block; margin-left: auto; margin-right: auto;")),
# ONE ODD
uiOutput("cur_odd"),
h3(textOutput("no_indicateur"), align="center"),
# ALL ODD
lapply(1:17, function(cur_odd){
odd_all_cur <- paste("odd_all_cur", cur_odd, sep="")
odd_main_text <- paste("odd_main_text", cur_odd, sep="")
fluidRow(column(12, align="center", h3(textOutput(odd_main_text), br(), style="display: block; margin-left: auto; margin-right: auto;")),
uiOutput(odd_all_cur))
})
),
# DEBUT TROISIEME PAGE
tabPanel(
div(tags$img(src="wheel_to_text.png", height="20px"),"Les ODD, qu'est-ce que c'est ?"),
column(8, tags$img(src="logo.svg", width="70%")),
column(12, style = "font-size: 130%", br(), TEXT_ODD_PRES_1,
br(), br(), TEXT_ODD_PRES_2, br(), br(),
tags$b("Rendez-vous sur le site des ODD pour la France :"),
a("https://www.agenda-2030.fr", href="https://www.agenda-2030.fr"), br(), br(),
tags$ul(
tags$li(a("Présentation : origines et principes", href="https://www.agenda-2030.fr/agenda2030/presentation-principes-specificites-origines-18",target="_blank",rel="noopener noreferrer")),
tags$li(a("Les indicateurs de suivi des Objectifs de développement durable", href="https://www.agenda-2030.fr/agenda2030/dispositif-de-suivi-les-indicateurs-19",target="_blank",rel="noopener noreferrer")),
tags$li(a("Situation et organisation de la mise en œuvre en France", href="https://www.agenda-2030.fr/agenda2030/situation-de-la-france-21",target="_blank",rel="noopener noreferrer")),
tags$li(a("Mobilisation des acteurs", href="https://www.agenda-2030.fr/agenda2030/mobilisation-des-acteurs-non-etatiques-en-france-40",target="_blank",rel="noopener noreferrer")),
tags$li(a("En Europe et à l'international", href="https://www.agenda-2030.fr/agenda2030/en-europe-et-linternational-22",target="_blank",rel="noopener noreferrer"))
),
br(), br(),
"Les ODD sur le site de l'ONU :", tags$a(href="https://www.un.org/sustainabledevelopment/fr/objectifs-de-developpement-durable/",
"https://www.un.org/sustainabledevelopment/fr/objectifs-de-developpement-durable/",target="_blank",rel="noopener noreferrer"),
br(), br()
),
# LES 17 LOGOS
lapply(1:17, function(i) {
odd <- paste("ODD ", i, sep="")
id_button <- paste("ODD_button_", i, sep="")
odd_image <- paste("ODD", i,".png", sep="")
tags$button(
id = id_button,
class = "btn action-button",
img(src = odd_image,
height = "125px"),
style="background-color: transparent")
}
),
fluidRow(
column(width = 4, align = "center", br(), imageOutput("third_image")),
column(width = 7, style = "font-size: 150%;margin-top:30px",
h4(textOutput("text_odd"),
style="font-size: 150%"),
textOutput("subtext_odd"), br(),
textOutput("third_text_with_link"),
uiOutput("third_link"),br())
)
),
tabPanel("Mentions Légales",
column(1),
column(10,htmlOutput("mentions")),
column(1))
)
|
## Getting full dataset
data_read <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_read$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_read, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_read)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/plot3.R
|
no_license
|
summermeng/Exploratory-Data-Analysis-Course-Project-1
|
R
| false | false | 960 |
r
|
## Getting full dataset
data_read <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_read$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_read, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_read)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/error_measures.R
\name{MAPE}
\alias{MAPE}
\title{MAPE}
\usage{
MAPE(y, y_hat)
}
\arguments{
\item{y}{is a vector of the actual values.}
\item{y_hat}{is a vector of the predictions.}
}
\description{
Calculates the MAPE value for predictions.
}
\details{
NOT tested yet!
}
|
/Filips ML package/Filips.ML.package/man/MAPE.Rd
|
no_license
|
Filco306/ML-Implementations
|
R
| false | true | 350 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/error_measures.R
\name{MAPE}
\alias{MAPE}
\title{MAPE}
\usage{
MAPE(y, y_hat)
}
\arguments{
\item{y}{is a vector of the actual values.}
\item{y_hat}{is a vector of the predictions.}
}
\description{
Calculates the MAPE value for predictions.
}
\details{
NOT tested yet!
}
|
############ This is the select function #########
# It performs the genetic algorithm
#' select Function
#'
#' This function implement a genetic algorithm for variable selection in regression problems.
#'
#' @param X A n*p matrix of predictors.
#' @param Y A n*1 matrix of responses.
#' @param ObjectiveFunction An objective criterion/fitness function. Defaults to AIC.
#' @param Probs The probability of parents being selected. Defaults to Ranking.
#' @param P Population size for generation. Must be an even integer. Defaults to 2p.
#' @param Initialized A matrix initialized the population. Defaults to Initialize(p, P).
#' @param mu The mutation rate has to be a number between 0 and 1. Defaults to 1/p.
#' @param StopFunction A stop criterion. Defaults to Stop function.
#' @param IterationsMax The maximum number of iterations.
#' @param IterationsMin The minimum number of iterations. Default to half of IterationsMax
#' @param nCores Number of cores used for parallelization. Defaults to 1.
#' @return A list with the elements
#' \item{FinalGeneration}{The generation of the last iteration}
#' \item{FittestInd}{The fittest individual of the the last iteration}
#' @export
#' @examples
#' X <- matrix(rnorm(30 * 100), ncol = 30)
#' beta <- 100 * rnorm(10)
#'
#' # We randomly select 10 columns of X (out of 30) as covariates
#' select <- sample(1:ncol(X), size = 10, replace = F)
#'
#' # Y is X*beta plus some noice
#' Y <- X[,select] %*% matrix(beta, ncol = 1) + rnorm(100)
#'
#' # Apply the genetic algorithm with default coefficients
#' beta1 <- select(X, Y, IterationsMax = 200)
#'
#' # Check the fit
#' sum(lm(Y ~X [,select])$residuals^2)
#' sum(lm(Y ~X [,1:ncol(X)*beta1$FittestInd])$residuals^2)
select <- function(X, Y, ObjectiveFunction = AIC, Probs = Ranking,
P = 2 * ncol(X), Initialized = Initialize(ncol(X), P),
mu = 1 / ncol(X), StopFunction = Stop, IterationsMax,
IterationsMin = IterationsMax / 2, nCores = 1, ...){
# Adaptive, if Y is a vector instead of a matrix
Y <- matrix(Y)
# Lots of checks on the variables
Input(X, Y, ObjectiveFunction, Probs, P,
Initialize, mu, Stop, IterationsMax, nCores)
#Initialize the population
Gen <- Initialized
FitnessGen <- ObjectiveFunction(X, Y, Gen, nCores)
# Initialize the stopping criterions
i <- 0
Stopping <- FALSE
# Run the algorithm
while(i < IterationsMax & !Stopping){
# Span the next generation
NewGen <- NextGen(LastGen = Gen, X, Y,
FitnessLastGen = FitnessGen, Probs, mu)
# Check if the algorithm stops there
FitnessNewGen <- ObjectiveFunction(X, Y, NewGen, nCores)
if(i > IterationsMin){
Stopping <- Stop(FitnessLastGen = FitnessGen, FitnessNewGen)
}
i <- i + 1
Gen <- NewGen
FitnessGen <- FitnessNewGen
}
# Return the fittest individual in the population
return( list("FinalGeneration" = Gen,
"FittestInd" = Gen[which.min(FitnessGen), ] ))
}
|
/R/select.R
|
no_license
|
yanrongmu/GA
|
R
| false | false | 3,004 |
r
|
############ This is the select function #########
# It performs the genetic algorithm
#' select Function
#'
#' This function implement a genetic algorithm for variable selection in regression problems.
#'
#' @param X A n*p matrix of predictors.
#' @param Y A n*1 matrix of responses.
#' @param ObjectiveFunction An objective criterion/fitness function. Defaults to AIC.
#' @param Probs The probability of parents being selected. Defaults to Ranking.
#' @param P Population size for generation. Must be an even integer. Defaults to 2p.
#' @param Initialized A matrix initialized the population. Defaults to Initialize(p, P).
#' @param mu The mutation rate has to be a number between 0 and 1. Defaults to 1/p.
#' @param StopFunction A stop criterion. Defaults to Stop function.
#' @param IterationsMax The maximum number of iterations.
#' @param IterationsMin The minimum number of iterations. Default to half of IterationsMax
#' @param nCores Number of cores used for parallelization. Defaults to 1.
#' @return A list with the elements
#' \item{FinalGeneration}{The generation of the last iteration}
#' \item{FittestInd}{The fittest individual of the the last iteration}
#' @export
#' @examples
#' X <- matrix(rnorm(30 * 100), ncol = 30)
#' beta <- 100 * rnorm(10)
#'
#' # We randomly select 10 columns of X (out of 30) as covariates
#' select <- sample(1:ncol(X), size = 10, replace = F)
#'
#' # Y is X*beta plus some noice
#' Y <- X[,select] %*% matrix(beta, ncol = 1) + rnorm(100)
#'
#' # Apply the genetic algorithm with default coefficients
#' beta1 <- select(X, Y, IterationsMax = 200)
#'
#' # Check the fit
#' sum(lm(Y ~X [,select])$residuals^2)
#' sum(lm(Y ~X [,1:ncol(X)*beta1$FittestInd])$residuals^2)
select <- function(X, Y, ObjectiveFunction = AIC, Probs = Ranking,
P = 2 * ncol(X), Initialized = Initialize(ncol(X), P),
mu = 1 / ncol(X), StopFunction = Stop, IterationsMax,
IterationsMin = IterationsMax / 2, nCores = 1, ...){
# Adaptive, if Y is a vector instead of a matrix
Y <- matrix(Y)
# Lots of checks on the variables
Input(X, Y, ObjectiveFunction, Probs, P,
Initialize, mu, Stop, IterationsMax, nCores)
#Initialize the population
Gen <- Initialized
FitnessGen <- ObjectiveFunction(X, Y, Gen, nCores)
# Initialize the stopping criterions
i <- 0
Stopping <- FALSE
# Run the algorithm
while(i < IterationsMax & !Stopping){
# Span the next generation
NewGen <- NextGen(LastGen = Gen, X, Y,
FitnessLastGen = FitnessGen, Probs, mu)
# Check if the algorithm stops there
FitnessNewGen <- ObjectiveFunction(X, Y, NewGen, nCores)
if(i > IterationsMin){
Stopping <- Stop(FitnessLastGen = FitnessGen, FitnessNewGen)
}
i <- i + 1
Gen <- NewGen
FitnessGen <- FitnessNewGen
}
# Return the fittest individual in the population
return( list("FinalGeneration" = Gen,
"FittestInd" = Gen[which.min(FitnessGen), ] ))
}
|
## This script contains two functions:
## makeCacheMatrix is function to create a special "matrix" object that can cache its inverse
## cacheSolve is a function that computes inverse of matrix if it has not been calculated, otherwise it retreives cached value
##
##-----------------------------------------------------------
## makeCacheMatrix
## Creates a special "matrix" which is really a list that:
##
## 1) sets the value of the Matrix
## 2) gets the value of the Matrix
## 3) sets the value of the Inverse of the Matrix
## 4) gets the value of the Inverse of the Matrix
##
## Matrix is assumed to be always invertible
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set<-function(y){
x<<- y
i<<-NULL
}
get<-function() x
setInv<-function(solve) i <<- solve
getInv<-function() i
list(set=set, get=get, setInv=setInv, getInv=getInv)
}
##-----------------------------------------------------------
## CacheSolve
## Calculates the inverse of Matrix created with above function
##
## 1) check to see if inverse of matrix previously calculated
## 2) If yes, return previous calculated value
## 3) Otherwise, calculate inverse and return
##
## Matrix is assumed to be always invertible
##
cacheSolve <- function(x, ...) {
i<-x$getInv()
if(!is.null(i)) {
message("Getting Inverse of matrix from cache")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setInv(i)
i
}
|
/cachematrix.R
|
no_license
|
davebarron/ProgrammingAssignment2
|
R
| false | false | 1,395 |
r
|
## This script contains two functions:
## makeCacheMatrix is function to create a special "matrix" object that can cache its inverse
## cacheSolve is a function that computes inverse of matrix if it has not been calculated, otherwise it retreives cached value
##
##-----------------------------------------------------------
## makeCacheMatrix
## Creates a special "matrix" which is really a list that:
##
## 1) sets the value of the Matrix
## 2) gets the value of the Matrix
## 3) sets the value of the Inverse of the Matrix
## 4) gets the value of the Inverse of the Matrix
##
## Matrix is assumed to be always invertible
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set<-function(y){
x<<- y
i<<-NULL
}
get<-function() x
setInv<-function(solve) i <<- solve
getInv<-function() i
list(set=set, get=get, setInv=setInv, getInv=getInv)
}
##-----------------------------------------------------------
## CacheSolve
## Calculates the inverse of Matrix created with above function
##
## 1) check to see if inverse of matrix previously calculated
## 2) If yes, return previous calculated value
## 3) Otherwise, calculate inverse and return
##
## Matrix is assumed to be always invertible
##
cacheSolve <- function(x, ...) {
i<-x$getInv()
if(!is.null(i)) {
message("Getting Inverse of matrix from cache")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setInv(i)
i
}
|
# Week 10: More ggplot2 and colors
# Goals:
# 1. Theme elements
# 2. Legends
# 3. Better colors
# 4. Manipulating datasets for ggplot
# Set working directory
#setwd("~/Dropbox/IntrotoREpi/data/")
# load/install relevant libraries
#install.packages("RColorBrewer")
#install.packages("reshape2)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(RColorBrewer)
library(gridExtra)
# load data
data(airquality)
##################
# Review
# Recall plot of airquality data from last week:
g1 <- ggplot(data = airquality, aes(x = Temp, y = Ozone)) + geom_point() +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
g1
# Recall plot of airquality datafrom last week:
ggplot(data = airquality, aes(x = Temp, y = Ozone)) +
geom_point(size = 10, color = "red") +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
##################
# Theme elements
# These control sizing, color etc. for things like axis labels, titles
# Syntax:
# theme(thing we want to change = theme attribute(attribute assigning))
# Changing the size of all titles
g1 + theme(title = element_text(size = 25))
# Changing the size of one title
g1 + theme(axis.title.x = element_text(size = 25))
g1 + theme(plot.title = element_text(size = 25))
# Changing the size of text and axes labels simultaneously
g1 + theme(text = element_text(size = 25))
# Changing the size of one axis
g1 + theme(axis.text.y = element_text(size = 25))
# Changing the size of lines (including tick marks)
g1
g1 + theme(line = element_line(size = 0.1))
g1 + theme(axis.ticks.y = element_line(size = 2))
# Getting rid of grey background
g1 + theme_bw()
# Removing things (e.g. text, titles, etc.) from your plot
g1 + theme(axis.text = element_blank())
# In-class exercise 1:
# Create a plot of ozone vs. temperature with no tick marks and no grid
g1 + theme(axis.ticks = element_blank(), panel.grid = element_blank())
#######
# Legends
airquality <- mutate(airquality, Month = factor(Month))
g1 <- ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Month)) +
geom_point() + ggtitle("Temperature vs. Ozone") + xlab("Temperature") +
ylab("Ozone")
g1
# Move legend
g1 + theme(legend.position = "top")
# Change title of legend
g1 + theme(legend.title = element_blank(), legend.position = "top")
# But those colors are ugly! I don't know what those numbers are!
labs1 <- c("May", "June", "July", "August", "September")
levels(factor(airquality$Month))
cols <- seq(1, 5)
g1 + scale_color_manual(labels = labs1, name = "", values = cols)
########
# Better colors using R Color Brewer
cols <- brewer.pal(5, "Dark2")
g1 + scale_color_manual(labels = labs1, values = cols)
### ggplot 2 has this built in!
g1 + scale_colour_brewer(palette = "Dark2")
# Continuous wind speed
# Specify sequential palette
cols <- brewer.pal(9, "Oranges")
# Usual plot with color for wind speed
g2 <- ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Wind)) +
geom_point() +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
g2
# Change colors specifying low and high
g2 + scale_color_gradient(low = cols[1], high = cols[9]) + theme_bw()
# Using ggplot2 directly (no color bar)
g2 + scale_color_distiller(palette = "Oranges")
# Everything at once:
# Initialize plot
ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Month)) +
# Add points with shape = 2
geom_point(shape = 2) +
# Add titles
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone") +
# Change colours, labels for legend
scale_color_brewer(labels = labs1, palette = "Set1") +
# Remove title for legend, move legend to top
theme(legend.title = element_blank(), legend.position = "top") +
# Change size of all text
theme(text = element_text(size = 25))
######
# Manipulating datasets for ggplot: "tidy data"
# 1. Anything you want to "facet" by should be in one variable (multiple rows)
# 2. Anything you want to color, shape, etc by should be in multiple columns
data(Titanic)
mtitanic <- melt(Titanic)
head(mtitanic)
ggplot(data = mtitanic, aes(x = Class, y = value, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(Sex ~ Age, scales = "free")
# Same plot if data aren't formatted nicely
wide <- spread(mtitanic, Sex, value)
head(wide)
# Now need to make separate plots for males and females
# You need to write more code!
gmale <- ggplot(data = wide, aes(x = Class, y = Male, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(~ Age, scales = "free")
gfemale <- ggplot(data = wide, aes(x = Class, y = Female, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(~ Age, scales = "free")
grid.arrange(gmale, gfemale)
# If variable is a column (instead of columns for each of age, chol, etc),
# can use ggplot without alteration
load("OR_df.RData")
head(OR_df)
# Set up data, x/y/ color
ggplot(OR_df, aes(x = Variable, y = OR, color = Variable)) +
#Add points for ORs
geom_point(size = 3, shape = 20) +
#Add error bars
geom_errorbar(aes(ymin = LB, ymax = UB), width = 0.3) +
#Add main and axes titles
ggtitle("Associations between covariates and diabetes") +
ylab("Odds ratio") + xlab("Covariates")
head(airquality)
g1 <- ggplot(airquality, aes(x = Ozone, fill = Month)) + geom_histogram()
g2 <- ggplot(airquality, aes(x = Solar.R, fill = Month)) + geom_histogram()
g3 <- ggplot(airquality, aes(x = Wind, fill = Month)) + geom_histogram()
g4 <- ggplot(airquality, aes(x = Temp, fill = Month)) + geom_histogram()
grid.arrange(g1, g2, g3, g4)
# In-class exercise 2:
# Set up a dataset called gaq to plot histograms of each variable
# (ozone, solar, wind, temperature)
# Hint: use gather function and specify arguments:
# 1. data, 2. name of column for variables, 3. name of column for values,
# 4. what variables do you want to gather (or not gather)
gaq <- gather(airquality, variable, value, -Month, -Day)
ggplot(gaq, aes(x = value, fill = Month)) + geom_histogram() +
facet_wrap( ~ variable, scales = "free")
|
/IntrotoRepi/IntrotoR_week10.R
|
no_license
|
kralljr/kralljr.github.io
|
R
| false | false | 6,198 |
r
|
# Week 10: More ggplot2 and colors
# Goals:
# 1. Theme elements
# 2. Legends
# 3. Better colors
# 4. Manipulating datasets for ggplot
# Set working directory
#setwd("~/Dropbox/IntrotoREpi/data/")
# load/install relevant libraries
#install.packages("RColorBrewer")
#install.packages("reshape2)
library(dplyr)
library(tidyr)
library(reshape2)
library(ggplot2)
library(RColorBrewer)
library(gridExtra)
# load data
data(airquality)
##################
# Review
# Recall plot of airquality data from last week:
g1 <- ggplot(data = airquality, aes(x = Temp, y = Ozone)) + geom_point() +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
g1
# Recall plot of airquality datafrom last week:
ggplot(data = airquality, aes(x = Temp, y = Ozone)) +
geom_point(size = 10, color = "red") +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
##################
# Theme elements
# These control sizing, color etc. for things like axis labels, titles
# Syntax:
# theme(thing we want to change = theme attribute(attribute assigning))
# Changing the size of all titles
g1 + theme(title = element_text(size = 25))
# Changing the size of one title
g1 + theme(axis.title.x = element_text(size = 25))
g1 + theme(plot.title = element_text(size = 25))
# Changing the size of text and axes labels simultaneously
g1 + theme(text = element_text(size = 25))
# Changing the size of one axis
g1 + theme(axis.text.y = element_text(size = 25))
# Changing the size of lines (including tick marks)
g1
g1 + theme(line = element_line(size = 0.1))
g1 + theme(axis.ticks.y = element_line(size = 2))
# Getting rid of grey background
g1 + theme_bw()
# Removing things (e.g. text, titles, etc.) from your plot
g1 + theme(axis.text = element_blank())
# In-class exercise 1:
# Create a plot of ozone vs. temperature with no tick marks and no grid
g1 + theme(axis.ticks = element_blank(), panel.grid = element_blank())
#######
# Legends
airquality <- mutate(airquality, Month = factor(Month))
g1 <- ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Month)) +
geom_point() + ggtitle("Temperature vs. Ozone") + xlab("Temperature") +
ylab("Ozone")
g1
# Move legend
g1 + theme(legend.position = "top")
# Change title of legend
g1 + theme(legend.title = element_blank(), legend.position = "top")
# But those colors are ugly! I don't know what those numbers are!
labs1 <- c("May", "June", "July", "August", "September")
levels(factor(airquality$Month))
cols <- seq(1, 5)
g1 + scale_color_manual(labels = labs1, name = "", values = cols)
########
# Better colors using R Color Brewer
cols <- brewer.pal(5, "Dark2")
g1 + scale_color_manual(labels = labs1, values = cols)
### ggplot 2 has this built in!
g1 + scale_colour_brewer(palette = "Dark2")
# Continuous wind speed
# Specify sequential palette
cols <- brewer.pal(9, "Oranges")
# Usual plot with color for wind speed
g2 <- ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Wind)) +
geom_point() +
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone")
g2
# Change colors specifying low and high
g2 + scale_color_gradient(low = cols[1], high = cols[9]) + theme_bw()
# Using ggplot2 directly (no color bar)
g2 + scale_color_distiller(palette = "Oranges")
# Everything at once:
# Initialize plot
ggplot(data = airquality, aes(x = Temp, y = Ozone, colour = Month)) +
# Add points with shape = 2
geom_point(shape = 2) +
# Add titles
ggtitle("Temperature vs. Ozone") + xlab("Temperature") + ylab("Ozone") +
# Change colours, labels for legend
scale_color_brewer(labels = labs1, palette = "Set1") +
# Remove title for legend, move legend to top
theme(legend.title = element_blank(), legend.position = "top") +
# Change size of all text
theme(text = element_text(size = 25))
######
# Manipulating datasets for ggplot: "tidy data"
# 1. Anything you want to "facet" by should be in one variable (multiple rows)
# 2. Anything you want to color, shape, etc by should be in multiple columns
data(Titanic)
mtitanic <- melt(Titanic)
head(mtitanic)
ggplot(data = mtitanic, aes(x = Class, y = value, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(Sex ~ Age, scales = "free")
# Same plot if data aren't formatted nicely
wide <- spread(mtitanic, Sex, value)
head(wide)
# Now need to make separate plots for males and females
# You need to write more code!
gmale <- ggplot(data = wide, aes(x = Class, y = Male, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(~ Age, scales = "free")
gfemale <- ggplot(data = wide, aes(x = Class, y = Female, fill = Survived)) +
geom_bar(stat = "identity") +
facet_grid(~ Age, scales = "free")
grid.arrange(gmale, gfemale)
# If variable is a column (instead of columns for each of age, chol, etc),
# can use ggplot without alteration
load("OR_df.RData")
head(OR_df)
# Set up data, x/y/ color
ggplot(OR_df, aes(x = Variable, y = OR, color = Variable)) +
#Add points for ORs
geom_point(size = 3, shape = 20) +
#Add error bars
geom_errorbar(aes(ymin = LB, ymax = UB), width = 0.3) +
#Add main and axes titles
ggtitle("Associations between covariates and diabetes") +
ylab("Odds ratio") + xlab("Covariates")
head(airquality)
g1 <- ggplot(airquality, aes(x = Ozone, fill = Month)) + geom_histogram()
g2 <- ggplot(airquality, aes(x = Solar.R, fill = Month)) + geom_histogram()
g3 <- ggplot(airquality, aes(x = Wind, fill = Month)) + geom_histogram()
g4 <- ggplot(airquality, aes(x = Temp, fill = Month)) + geom_histogram()
grid.arrange(g1, g2, g3, g4)
# In-class exercise 2:
# Set up a dataset called gaq to plot histograms of each variable
# (ozone, solar, wind, temperature)
# Hint: use gather function and specify arguments:
# 1. data, 2. name of column for variables, 3. name of column for values,
# 4. what variables do you want to gather (or not gather)
gaq <- gather(airquality, variable, value, -Month, -Day)
ggplot(gaq, aes(x = value, fill = Month)) + geom_histogram() +
facet_wrap( ~ variable, scales = "free")
|
library(tidyverse)
# Init --------------------------------------------------------------------
exp_sin <- 3
exp_cos <- 4
valores_eje_x <- seq(0, 2*pi, by = 0.1)
# Funciones ---------------------------------------------------------------
f1 <- function(x) sin(x)^exp_sin + cos(x)^exp_cos
f2 <- function(x) -sin(x)
f3 <- function(x) sin(3 * x^2)
# f4 <- function(x) sin(10 * x) ^ 2 - cos(10 * x) ^ 2
Fprueba <- function(x) sin(x^2) ^ 2 - cos(x^2) ^2
ggplot(df_plot, aes(x = eje_x, y = Fprueba(eje_x))) +
geom_line() +
coord_polar()
lista_funciones <- ls(pattern = "^f[[:digit:]]")
# Data frame --------------------------------------------------------------
df_evaluacion_funciones <- map_dfc(lista_funciones, function(funcion){
funcion_eval <- eval(as.symbol(funcion))
c(funcion = funcion_eval(valores_eje_x))
}) %>%
set_names(lista_funciones)
df_plot <- tibble(eje_x) %>%
mutate(ind = row_number()) %>%
bind_cols(df_evaluacion_funciones) %>%
pivot_longer(-c(ind, eje_x), values_to = "y")
# Parámetros de dibujo ----------------------------------------------------
# TODO Esto no funciona
alphas <- seq(1, 0.5, along.with = unique(df_plot$name))
names(alphas) <- unique(df_plot$name)
df_plot <- df_plot %>%
mutate(trans = alphas[name])
# Dibujo ------------------------------------------------------------------
ggplot(df_plot) +
geom_line(aes(eje_x, y, group = name, alpha = trans),
col = "yellow", size = 1) +
coord_polar() +
theme_void() +
theme(panel.background = element_rect(fill = "purple"),
legend.position = "none")
|
/trigo-i.R
|
no_license
|
lhansa/deboissier
|
R
| false | false | 1,597 |
r
|
library(tidyverse)
# Init --------------------------------------------------------------------
exp_sin <- 3
exp_cos <- 4
valores_eje_x <- seq(0, 2*pi, by = 0.1)
# Funciones ---------------------------------------------------------------
f1 <- function(x) sin(x)^exp_sin + cos(x)^exp_cos
f2 <- function(x) -sin(x)
f3 <- function(x) sin(3 * x^2)
# f4 <- function(x) sin(10 * x) ^ 2 - cos(10 * x) ^ 2
Fprueba <- function(x) sin(x^2) ^ 2 - cos(x^2) ^2
ggplot(df_plot, aes(x = eje_x, y = Fprueba(eje_x))) +
geom_line() +
coord_polar()
lista_funciones <- ls(pattern = "^f[[:digit:]]")
# Data frame --------------------------------------------------------------
df_evaluacion_funciones <- map_dfc(lista_funciones, function(funcion){
funcion_eval <- eval(as.symbol(funcion))
c(funcion = funcion_eval(valores_eje_x))
}) %>%
set_names(lista_funciones)
df_plot <- tibble(eje_x) %>%
mutate(ind = row_number()) %>%
bind_cols(df_evaluacion_funciones) %>%
pivot_longer(-c(ind, eje_x), values_to = "y")
# Parámetros de dibujo ----------------------------------------------------
# TODO Esto no funciona
alphas <- seq(1, 0.5, along.with = unique(df_plot$name))
names(alphas) <- unique(df_plot$name)
df_plot <- df_plot %>%
mutate(trans = alphas[name])
# Dibujo ------------------------------------------------------------------
ggplot(df_plot) +
geom_line(aes(eje_x, y, group = name, alpha = trans),
col = "yellow", size = 1) +
coord_polar() +
theme_void() +
theme(panel.background = element_rect(fill = "purple"),
legend.position = "none")
|
### Spanish Aggregated Data ###
### ------------------------------------------------------------------ ###
### General Descriptive and aggregated measures for the Dissertation ###
### ------------------------------------------------------------------ ###
# set working directory
dir()
setwd("C:/Users/y4956294S/Documents/LONGPOP/LE Data Spain Diss/DISSLTESP/data")
set.seed(17952)
## use hmd/hfd package for load the data (Spain)
# LIBRARIES #
library(ggplot2)
library(gcookbook)
library(HMDHFDplus)
library(plyr)
library(reshape2)
library(grid)
library(gridExtra)
library(tidyr)
library(ggplot2)
library(readxl)
library(dplyr)
library(scales)
library(RColorBrewer)
library(MortalitySmooth)
# library(MortHump)
### -------------------------------------------------- ###
### South European Countries LE at Birth and at age 50 ###
### - - - - - - ###
### and later for the life span disparity ###
# Spain
LT.ESP <- readHMD("ESP_LT.txt", fixup = T)
POP.ESP <- readHMD("ESP_pop.txt", fixup = T)
DEAD.ESP <- readHMD("ESP_dea.txt", fixup = T)
# Italy
LT.ITA <- readHMD("ITA_LT.txt", fixup = T)
# Portugal
LT.PRT <- readHMD("POR_LT.txt", fixup = T)
# Greece
LT.GRC <- readHMD("GRE_LT.txt", fixup = T)
# Japan
LT.JAP <- readHMD("JAP_LT.txt", fixup = T)
summary(LT.ESP)
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
### Rectangularization plot with smooth data
## 1. Smooth grid of age-specific death rates
# 1.1 Extract deaths and exposure
# Dx
DxS_M = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Male)
DxS_F = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Female)
DxS_T = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Total)
# Nx
NxS_M = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Male1)
NxS_F = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Female1)
NxS_T = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Total1)
# Data manipulation
y <- unique(DxS_M$Year)
x <- unique(DxS_M$Age)
m <- length(x)
n <- length(y)
## Deaths and exposures in a matrix format (age x year)
# --- males
D.Mal <- do.call(cbind,tapply(X = DxS_M$Male, INDEX = DxS_M$Year, FUN = identity))
E.Mal <- do.call(cbind,tapply(X = NxS_M$Male1, INDEX = NxS_M$Year, FUN = identity))
# --- females
D.Fem <- do.call(cbind,tapply(X = DxS_F$Female, INDEX = DxS_F$Year, FUN = identity))
E.Fem <- do.call(cbind,tapply(X = NxS_F$Female1, INDEX = NxS_F$Year, FUN = identity))
# --- total
D.Tot <- do.call(cbind,tapply(X = DxS_T$Total, INDEX = DxS_T$Year, FUN = identity))
E.Tot <- do.call(cbind,tapply(X = NxS_T$Total1, INDEX = NxS_T$Year, FUN = identity))
# some E are equal to zer0, -> weights are necessary (Wheight matrix)
W <- matrix(1, m, n)
W[E.Mal==0] <- 0
W[E.Fem==0] <- 0
W[E.Tot==0] <- 0
# Smoothing deaths using Mort2Dsmooth() for both sexes and combined mortality
# ----------------------------------------------------------------------------
fitDx_M = Mort2Dsmooth(x = x, y = y, Z = D.Mal, offset = log(E.Mal),W=W)
fitDx_F = Mort2Dsmooth(x = x, y = y, Z = D.Fem, offset = log(E.Fem),W=W)
fitDx_T = Mort2Dsmooth(x = x, y = y, Z = D.Tot, offset = log(E.Tot), W=W)
# plot raw age-specific mortality rates vs. smoothed rates
# --------------------------------------------------------
plot(fitDx_M)
plot(fitDx_F)
plot(fitDx_T)
# See the fit of log mortality (should be more or less on a straight line) #### it is!
# ------------------------------------------------------------------------
plot(fitDx_M$logmortality, log(D.Mal/E.Mal))
plot(fitDx_F$logmortality, log(D.Fem/E.Fem))
plot(fitDx_T$logmortality, log(D.Tot/E.Tot))
### ------------------------------------------------------------------------------------------------ ###
#############
#### MALE ###
#############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Mal <- fitDx_M$Bx
## over years are the same
By.Mal <- fitDx_M$By
## fitted coefficients
betas.Mal <- fitDx_M$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Mal <- MortSmooth_BcoefB(Bxs.Mal, By.Mal, betas.Mal)
## hazard
h.Mal <- exp(ln.h.Mal)
## cumulative hazard using cumsum
H.Mal <- matrix(0, m, n)
for(i in 1:n){
H.Mal[,i] <- cumsum(h.Mal[,i]*delta)
}
## fitted survival functions
S.Mal <- apply(H.Mal, 2, function(x){exp(-x)})
## fitted density functions
f.Mal <- h.Mal * S.Mal
image(t(f.Mal))
#### building life tables from the fx ####
dim(f.Mal)
# radix of the table by year
colSums(f.Mal)
# rough estimate of the life expectancy at birth
plot(y, colSums(S.Mal) / 10, type = "b", las = 1, main = "e0")
######################
# Build a life table #
######################
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
mal.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(mal.smooth)[1] <- "Age"
mal.smooth <- mal.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# obtain the mx values from the smoothed hazard function
dim(h.Mal)
## get the hx in the right format
h.new.Mal <- as.data.frame(h.Mal)
h.new.Mal <- data.frame(mx=unlist(h.new.Mal, use.names = FALSE))
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% bind_cols(h.new.Mal)
# ------------------------------------------------------------
# ax for first year of life (Based on Andreev, Kingkade 2015)
mal.smooth$ax[ mal.smooth$Age==0] <- ifelse( mal.smooth$mx[ mal.smooth$Age==0]<=0.023, 0.14929 - 1.99545* mal.smooth$mx[ mal.smooth$Age==0],
ifelse( mal.smooth$mx[mal.smooth$Age==0]<=0.08307, 0.02832 + 3.26021* mal.smooth$mx[ mal.smooth$Age==0], 0.29915))
# ------------------------------------------------------------
# qx
mal.smooth <- mal.smooth %>% mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(mal.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(mal.smooth)[5] <- "qx"
mal.smooth <- mal.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Mal, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(mal.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
mal.smooth <- mal.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
###############
#### FEMALE ###
###############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Fem <- fitDx_F$Bx
## over years are the same
By.Fem <- fitDx_F$By
## fitted coefficients
betas.Fem <- fitDx_F$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Fem <- MortSmooth_BcoefB(Bxs.Fem, By.Fem, betas.Fem)
## hazard
h.Fem <- exp(ln.h.Fem)
## cumulative hazard using cumsum
H.Fem <- matrix(0, m, n)
for(i in 1:n){
H.Fem[,i] <- cumsum(h.Fem[,i]*delta)
}
## fitted survival functions
S.Fem <- apply(H.Fem, 2, function(x){exp(-x)})
## fitted density functions
f.Fem <- h.Fem * S.Fem
image(t(f.Fem))
#### building life tables from the fx ####
dim(f.Fem)
# radix of the table by year
colSums(f.Fem)
cols <- grey(ncol(S.Fem):1/ncol(S.Fem))
######################
# Build a life table #
######################
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
fem.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(fem.smooth)[1] <- "Age"
fem.smooth <- fem.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# ------------------------------------------------------------
# ax for first year of life
# ------------------------------------------------------------
# obtain the mx values from the smoothed hazard function
dim(h.Fem)
## get the hx in the right format
h.new.Fem <- as.data.frame(h.Fem)
h.new.Fem <- data.frame(mx=unlist(h.new.Fem, use.names = FALSE))
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% bind_cols(h.new.Fem)
# ------------------------------------------------------------
# ax for first year of life (Based on Andreev, Kingkade 2015)
fem.smooth$ax[fem.smooth$Age==0] <- ifelse(fem.smooth$mx[fem.smooth$Age==0]<=0.023, 0.14903 - 2.05527 * fem.smooth$mx[fem.smooth$Age==0],
ifelse( fem.smooth$mx[fem.smooth$Age==0]<=0.08307, 0.04667 + 3.88089 * fem.smooth$mx[fem.smooth$Age==0], 0.31411))
# ------------------------------------------------------------
# qx
fem.smooth <- fem.smooth %>%mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(fem.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(fem.smooth)[5] <- "qx"
fem.smooth <- fem.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Fem, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(fem.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
fem.smooth <- fem.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
##############
#### TOTAL ###
##############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Tot <- fitDx_T$Bx
## over years are the same
By.Tot <- fitDx_T$By
## fitted coefficients
betas.Tot <- fitDx_T$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Tot <- MortSmooth_BcoefB(Bxs.Tot, By.Tot, betas.Tot)
## hazard
h.Tot <- exp(ln.h.Tot)
## cumulative hazard using cumsum
H.Tot <- matrix(0, m, n)
for(i in 1:n){
H.Tot[,i] <- cumsum(h.Tot[,i]*delta)
}
## fitted survival functions
S.Tot <- apply(H.Tot, 2, function(x){exp(-x)})
## fitted density functions
f.Tot <- h.Tot * S.Tot
image(t(f.Tot))
#### building life tables from the fx ####
dim(f.Tot)
# radix of the table by year
colSums(f.Tot)
tot.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(tot.smooth)[1] <- "Age"
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
tot.smooth <- tot.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# obtain the mx values from the smoothed hazard function
dim(h.Tot)
## get the hx in the right format
h.new.Tot <- as.data.frame(h.Tot)
h.new.Tot <- data.frame(mx=unlist(h.new.Tot, use.names = FALSE))
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% bind_cols(h.new.Tot) %>%
# ------------------------------------------------------------
# ax for first year of life ( needs values for males and females)
# ------------------------------------------------------------
# qx
mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(tot.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(tot.smooth)[5] <- "qx"
tot.smooth <- tot.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Tot, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(tot.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
tot.smooth <- tot.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
par(mfrow = c(1, 2))
# survival function by year (beautiful ;) )
matplot(S.Mal, type = "l", lty = 1, col = cols, xlab="Age", ylab="Probability of Survival", main="Male")
legend("bottomleft", inset=.05, legend=c("1915", "2016"), lty=1, col=c("lightgrey", "black"), bty="n")
matplot(S.Fem, type = "l", lty = 1, col = cols, xlab = "Age", ylab=" ", main="Female")
par(mfrow = c(1,1))
# different colors: heat.colors(n = ncol(S.Fem))
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
#### Plotting with smooth survival curves for total population in ggplot
#### -------------------------------------------------------------------
#ggplot needs a dataframe
data <- as.data.frame(S.Tot)
#id variable for position in matrix
data$id <- 1:nrow(S.Tot)
#reshape to long format
plot_data <- melt(S.Tot,id.var="id")
# to change Var2 back to years and ages for easier handling
plot_data <- plot_data %>% mutate(years = Var2+1914) %>% mutate(ages = Var1)
#plot
ggplot_Surv <- plot_data %>% ggplot(aes(x=ages,y=value,group=years,colour=years)) +
geom_line() +
scale_y_continuous(name = "Probability of Survival") +
scale_x_continuous(name = "Age") +
scale_colour_gradient(name= " ",low = "white", high = "black") +
theme_bw()
# move legend
ggplot_Surv <-ggplot_Surv + theme(legend.position = c(0.1, 0.2)) + theme(axis.text=element_text(size=12),
axis.title=element_text(size=12,face="bold"))
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
############################################
##### Save life tables as data frames! #####
############################################
## Female Life Table
save(fem.smooth,file = "FLT_HMD.Rdata")
## Male Life Table
save(mal.smooth,file = "MLT_HMD.Rdata")
## Total Life Table
save(tot.smooth,file = "TLT_HMD.Rdata")
|
/ESP_SmoothLT.R
|
no_license
|
mvoigt87/DISSLTESP
|
R
| false | false | 20,519 |
r
|
### Spanish Aggregated Data ###
### ------------------------------------------------------------------ ###
### General Descriptive and aggregated measures for the Dissertation ###
### ------------------------------------------------------------------ ###
# set working directory
dir()
setwd("C:/Users/y4956294S/Documents/LONGPOP/LE Data Spain Diss/DISSLTESP/data")
set.seed(17952)
## use hmd/hfd package for load the data (Spain)
# LIBRARIES #
library(ggplot2)
library(gcookbook)
library(HMDHFDplus)
library(plyr)
library(reshape2)
library(grid)
library(gridExtra)
library(tidyr)
library(ggplot2)
library(readxl)
library(dplyr)
library(scales)
library(RColorBrewer)
library(MortalitySmooth)
# library(MortHump)
### -------------------------------------------------- ###
### South European Countries LE at Birth and at age 50 ###
### - - - - - - ###
### and later for the life span disparity ###
# Spain
LT.ESP <- readHMD("ESP_LT.txt", fixup = T)
POP.ESP <- readHMD("ESP_pop.txt", fixup = T)
DEAD.ESP <- readHMD("ESP_dea.txt", fixup = T)
# Italy
LT.ITA <- readHMD("ITA_LT.txt", fixup = T)
# Portugal
LT.PRT <- readHMD("POR_LT.txt", fixup = T)
# Greece
LT.GRC <- readHMD("GRE_LT.txt", fixup = T)
# Japan
LT.JAP <- readHMD("JAP_LT.txt", fixup = T)
summary(LT.ESP)
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
### Rectangularization plot with smooth data
## 1. Smooth grid of age-specific death rates
# 1.1 Extract deaths and exposure
# Dx
DxS_M = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Male)
DxS_F = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Female)
DxS_T = DEAD.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Total)
# Nx
NxS_M = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Male1)
NxS_F = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Female1)
NxS_T = POP.ESP %>% filter(Year>=1915 & Year<=2016) %>% select(Year, Age, Total1)
# Data manipulation
y <- unique(DxS_M$Year)
x <- unique(DxS_M$Age)
m <- length(x)
n <- length(y)
## Deaths and exposures in a matrix format (age x year)
# --- males
D.Mal <- do.call(cbind,tapply(X = DxS_M$Male, INDEX = DxS_M$Year, FUN = identity))
E.Mal <- do.call(cbind,tapply(X = NxS_M$Male1, INDEX = NxS_M$Year, FUN = identity))
# --- females
D.Fem <- do.call(cbind,tapply(X = DxS_F$Female, INDEX = DxS_F$Year, FUN = identity))
E.Fem <- do.call(cbind,tapply(X = NxS_F$Female1, INDEX = NxS_F$Year, FUN = identity))
# --- total
D.Tot <- do.call(cbind,tapply(X = DxS_T$Total, INDEX = DxS_T$Year, FUN = identity))
E.Tot <- do.call(cbind,tapply(X = NxS_T$Total1, INDEX = NxS_T$Year, FUN = identity))
# some E are equal to zer0, -> weights are necessary (Wheight matrix)
W <- matrix(1, m, n)
W[E.Mal==0] <- 0
W[E.Fem==0] <- 0
W[E.Tot==0] <- 0
# Smoothing deaths using Mort2Dsmooth() for both sexes and combined mortality
# ----------------------------------------------------------------------------
fitDx_M = Mort2Dsmooth(x = x, y = y, Z = D.Mal, offset = log(E.Mal),W=W)
fitDx_F = Mort2Dsmooth(x = x, y = y, Z = D.Fem, offset = log(E.Fem),W=W)
fitDx_T = Mort2Dsmooth(x = x, y = y, Z = D.Tot, offset = log(E.Tot), W=W)
# plot raw age-specific mortality rates vs. smoothed rates
# --------------------------------------------------------
plot(fitDx_M)
plot(fitDx_F)
plot(fitDx_T)
# See the fit of log mortality (should be more or less on a straight line) #### it is!
# ------------------------------------------------------------------------
plot(fitDx_M$logmortality, log(D.Mal/E.Mal))
plot(fitDx_F$logmortality, log(D.Fem/E.Fem))
plot(fitDx_T$logmortality, log(D.Tot/E.Tot))
### ------------------------------------------------------------------------------------------------ ###
#############
#### MALE ###
#############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Mal <- fitDx_M$Bx
## over years are the same
By.Mal <- fitDx_M$By
## fitted coefficients
betas.Mal <- fitDx_M$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Mal <- MortSmooth_BcoefB(Bxs.Mal, By.Mal, betas.Mal)
## hazard
h.Mal <- exp(ln.h.Mal)
## cumulative hazard using cumsum
H.Mal <- matrix(0, m, n)
for(i in 1:n){
H.Mal[,i] <- cumsum(h.Mal[,i]*delta)
}
## fitted survival functions
S.Mal <- apply(H.Mal, 2, function(x){exp(-x)})
## fitted density functions
f.Mal <- h.Mal * S.Mal
image(t(f.Mal))
#### building life tables from the fx ####
dim(f.Mal)
# radix of the table by year
colSums(f.Mal)
# rough estimate of the life expectancy at birth
plot(y, colSums(S.Mal) / 10, type = "b", las = 1, main = "e0")
######################
# Build a life table #
######################
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
mal.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(mal.smooth)[1] <- "Age"
mal.smooth <- mal.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# obtain the mx values from the smoothed hazard function
dim(h.Mal)
## get the hx in the right format
h.new.Mal <- as.data.frame(h.Mal)
h.new.Mal <- data.frame(mx=unlist(h.new.Mal, use.names = FALSE))
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% bind_cols(h.new.Mal)
# ------------------------------------------------------------
# ax for first year of life (Based on Andreev, Kingkade 2015)
mal.smooth$ax[ mal.smooth$Age==0] <- ifelse( mal.smooth$mx[ mal.smooth$Age==0]<=0.023, 0.14929 - 1.99545* mal.smooth$mx[ mal.smooth$Age==0],
ifelse( mal.smooth$mx[mal.smooth$Age==0]<=0.08307, 0.02832 + 3.26021* mal.smooth$mx[ mal.smooth$Age==0], 0.29915))
# ------------------------------------------------------------
# qx
mal.smooth <- mal.smooth %>% mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(mal.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(mal.smooth)[5] <- "qx"
mal.smooth <- mal.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Mal, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
mal.smooth <- mal.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(mal.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
mal.smooth <- mal.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
###############
#### FEMALE ###
###############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Fem <- fitDx_F$Bx
## over years are the same
By.Fem <- fitDx_F$By
## fitted coefficients
betas.Fem <- fitDx_F$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Fem <- MortSmooth_BcoefB(Bxs.Fem, By.Fem, betas.Fem)
## hazard
h.Fem <- exp(ln.h.Fem)
## cumulative hazard using cumsum
H.Fem <- matrix(0, m, n)
for(i in 1:n){
H.Fem[,i] <- cumsum(h.Fem[,i]*delta)
}
## fitted survival functions
S.Fem <- apply(H.Fem, 2, function(x){exp(-x)})
## fitted density functions
f.Fem <- h.Fem * S.Fem
image(t(f.Fem))
#### building life tables from the fx ####
dim(f.Fem)
# radix of the table by year
colSums(f.Fem)
cols <- grey(ncol(S.Fem):1/ncol(S.Fem))
######################
# Build a life table #
######################
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
fem.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(fem.smooth)[1] <- "Age"
fem.smooth <- fem.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# ------------------------------------------------------------
# ax for first year of life
# ------------------------------------------------------------
# obtain the mx values from the smoothed hazard function
dim(h.Fem)
## get the hx in the right format
h.new.Fem <- as.data.frame(h.Fem)
h.new.Fem <- data.frame(mx=unlist(h.new.Fem, use.names = FALSE))
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% bind_cols(h.new.Fem)
# ------------------------------------------------------------
# ax for first year of life (Based on Andreev, Kingkade 2015)
fem.smooth$ax[fem.smooth$Age==0] <- ifelse(fem.smooth$mx[fem.smooth$Age==0]<=0.023, 0.14903 - 2.05527 * fem.smooth$mx[fem.smooth$Age==0],
ifelse( fem.smooth$mx[fem.smooth$Age==0]<=0.08307, 0.04667 + 3.88089 * fem.smooth$mx[fem.smooth$Age==0], 0.31411))
# ------------------------------------------------------------
# qx
fem.smooth <- fem.smooth %>%mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(fem.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(fem.smooth)[5] <- "qx"
fem.smooth <- fem.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Fem, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
fem.smooth <- fem.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(fem.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
fem.smooth <- fem.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
##############
#### TOTAL ###
##############
# survival function by year (beautiful ;) )
delta <- 1
Bxs.Tot <- fitDx_T$Bx
## over years are the same
By.Tot <- fitDx_T$By
## fitted coefficients
betas.Tot <- fitDx_T$coef
## log-mortality (linear predictor) over new ages and years (output: log hazard)
ln.h.Tot <- MortSmooth_BcoefB(Bxs.Tot, By.Tot, betas.Tot)
## hazard
h.Tot <- exp(ln.h.Tot)
## cumulative hazard using cumsum
H.Tot <- matrix(0, m, n)
for(i in 1:n){
H.Tot[,i] <- cumsum(h.Tot[,i]*delta)
}
## fitted survival functions
S.Tot <- apply(H.Tot, 2, function(x){exp(-x)})
## fitted density functions
f.Tot <- h.Tot * S.Tot
image(t(f.Tot))
#### building life tables from the fx ####
dim(f.Tot)
# radix of the table by year
colSums(f.Tot)
tot.smooth <- as.data.frame(rep(x,n))
# Change names for ages
colnames(tot.smooth)[1] <- "Age"
# ------------------------------- #
N <- length(x)
Widths <- rep(delta, length(x))
# ------------------------------- #
tot.smooth <- tot.smooth %>% mutate(Year=rep(min(y):max(y), times=1, each=111)) %>%
# ax values (may be to be changed for the highest age groups)
mutate(ax = rep(Widths / 2, times=n))
# obtain the mx values from the smoothed hazard function
dim(h.Tot)
## get the hx in the right format
h.new.Tot <- as.data.frame(h.Tot)
h.new.Tot <- data.frame(mx=unlist(h.new.Tot, use.names = FALSE))
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% bind_cols(h.new.Tot) %>%
# ------------------------------------------------------------
# ax for first year of life ( needs values for males and females)
# ------------------------------------------------------------
# qx
mutate(qx = (Widths * mx) / (1 + (Widths - ax) * mx))
# ------------------------------------------------------------
## make the last qx=1 with a little trick which would not work with a data frame
qx <- matrix(tot.smooth$qx)
qx[1:(0+111)==(0+111)] <- 1
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% select(-qx) %>% bind_cols(as.data.frame(qx))
colnames(tot.smooth)[5] <- "qx"
tot.smooth <- tot.smooth %>% mutate(qx = ifelse(qx>1,1,qx)) %>%
## add the px
mutate(px = 1 - qx)
# ------------------------------------------------------------
## matrix operations: sum over the columns of the estimated f-values to obtain
## the base/radix for the life table
radix.mat <- as.data.frame(matrix(data=colSums (f.Tot, na.rm = FALSE, dims = 1), nrow = 1)) %>%
## now making filling dummie values in between to make it the same length as the data frame
bind_rows(as.data.frame(matrix(data = 0,nrow = 110, ncol = 102)))
## stack them in order and delete the extra variable
radix.mat <- stack(radix.mat) %>% select(-ind)
# ------------------------------------------------------------
tot.smooth <- tot.smooth %>% bind_cols(as.data.frame(radix.mat))
colnames(tot.smooth)[7] <- "lx"
## use the dplyr group_by command to calculate the rest of the lx from the px
tot.smooth <- tot.smooth %>% group_by(Year) %>% mutate(lx = c(lx[1],lx[1] * cumprod(px))[1:N]) %>%
## dx values from the lx (alternatively from the smoothing algorithm)
group_by(Year) %>% mutate(dx = c(-diff(lx),lx[N])) %>%
## Create the Lx from the lx and the dx
group_by(Year) %>% mutate(Lx = c(Widths[1:(N - 1)] * lx[2:N] + ax[1:(N - 1)] * dx[1:(N - 1)], lx[N] * ax[N])) %>%
## account for infinite Lx and NA
mutate(Lx = ifelse(is.infinite(Lx),1,Lx)) %>% mutate(Lx = ifelse(is.na(Lx),0,Lx)) %>%
## Calculate the Tx from the Lx
group_by(Year) %>% mutate(Tx = rev(cumsum(rev(Lx)))) %>%
## Finally obtain the life expectancy from the Tx and lx
group_by(Year) %>% mutate(ex = Tx / lx)
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
par(mfrow = c(1, 2))
# survival function by year (beautiful ;) )
matplot(S.Mal, type = "l", lty = 1, col = cols, xlab="Age", ylab="Probability of Survival", main="Male")
legend("bottomleft", inset=.05, legend=c("1915", "2016"), lty=1, col=c("lightgrey", "black"), bty="n")
matplot(S.Fem, type = "l", lty = 1, col = cols, xlab = "Age", ylab=" ", main="Female")
par(mfrow = c(1,1))
# different colors: heat.colors(n = ncol(S.Fem))
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
#### Plotting with smooth survival curves for total population in ggplot
#### -------------------------------------------------------------------
#ggplot needs a dataframe
data <- as.data.frame(S.Tot)
#id variable for position in matrix
data$id <- 1:nrow(S.Tot)
#reshape to long format
plot_data <- melt(S.Tot,id.var="id")
# to change Var2 back to years and ages for easier handling
plot_data <- plot_data %>% mutate(years = Var2+1914) %>% mutate(ages = Var1)
#plot
ggplot_Surv <- plot_data %>% ggplot(aes(x=ages,y=value,group=years,colour=years)) +
geom_line() +
scale_y_continuous(name = "Probability of Survival") +
scale_x_continuous(name = "Age") +
scale_colour_gradient(name= " ",low = "white", high = "black") +
theme_bw()
# move legend
ggplot_Surv <-ggplot_Surv + theme(legend.position = c(0.1, 0.2)) + theme(axis.text=element_text(size=12),
axis.title=element_text(size=12,face="bold"))
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
#### %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ####
### ------------------------------------------------------------------------------------------------ ###
### ------------------------------------------------------------------------------------------------ ###
############################################
##### Save life tables as data frames! #####
############################################
## Female Life Table
save(fem.smooth,file = "FLT_HMD.Rdata")
## Male Life Table
save(mal.smooth,file = "MLT_HMD.Rdata")
## Total Life Table
save(tot.smooth,file = "TLT_HMD.Rdata")
|
# Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Unnati Goyal"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
print(name_length)
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
now_doing <- toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
# Divide each number by the square root of 201 and save the new value in the
# original variable
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
|
/chapter-06-exercises/exercise-1/exercise.R
|
permissive
|
unnatigoyal900/book-exercises
|
R
| false | false | 1,313 |
r
|
# Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Unnati Goyal"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
print(name_length)
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
now_doing <- toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
# Divide each number by the square root of 201 and save the new value in the
# original variable
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_projections.R
\name{projections_table2}
\alias{projections_table2}
\title{New, lighter, projections_table function}
\usage{
projections_table2(
data_result,
scoring_rules = NULL,
src_weights = NULL,
vor_baseline = NULL,
tier_thresholds = NULL,
avg_type = c("average", "robust", "weighted"),
return_raw_stats = FALSE
)
}
\description{
Keeping until we transition in the app
}
|
/man/projections_table2.Rd
|
no_license
|
FantasyFootballAnalytics/ffanalytics
|
R
| false | true | 472 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_projections.R
\name{projections_table2}
\alias{projections_table2}
\title{New, lighter, projections_table function}
\usage{
projections_table2(
data_result,
scoring_rules = NULL,
src_weights = NULL,
vor_baseline = NULL,
tier_thresholds = NULL,
avg_type = c("average", "robust", "weighted"),
return_raw_stats = FALSE
)
}
\description{
Keeping until we transition in the app
}
|
\name{has_tests}
\alias{has_tests}
\title{Was devtools installed with tests?}
\usage{
has_tests()
}
\description{
Was devtools installed with tests?
}
\keyword{internal}
|
/devtoolsVersion/devtools 15/man/has_tests.Rd
|
no_license
|
connectthefuture/devtools-R-Forge
|
R
| false | false | 173 |
rd
|
\name{has_tests}
\alias{has_tests}
\title{Was devtools installed with tests?}
\usage{
has_tests()
}
\description{
Was devtools installed with tests?
}
\keyword{internal}
|
# Title : 00_setup
# Objective :
# Created by: Alex
# Created on: 2021-05-25
#
# Purpose: Setup proper packages for data wranglign and visualization
#
if (!("tidyverse" %in% installed.packages())) {
install.packages("tidyverse")
}
if (!("ggplot2" %in% installed.packages())) {
install.packages("ggplot2")
}
library(ggplot2)
library(tidyverse)
# Library() imports a package
|
/alex_tasks/data_wrangling/scripts/00_setup.R
|
permissive
|
AlexAdrian-Hamazaki/Pavlab_Curators
|
R
| false | false | 386 |
r
|
# Title : 00_setup
# Objective :
# Created by: Alex
# Created on: 2021-05-25
#
# Purpose: Setup proper packages for data wranglign and visualization
#
if (!("tidyverse" %in% installed.packages())) {
install.packages("tidyverse")
}
if (!("ggplot2" %in% installed.packages())) {
install.packages("ggplot2")
}
library(ggplot2)
library(tidyverse)
# Library() imports a package
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{WHO_SR}
\alias{WHO_SR}
\title{World Health Organization situation reports data}
\format{A data frame with a row for each situation report}
\source{
\url{https://www.who.int/emergencies/diseases/novel-coronavirus-2019/situation-reports}
}
\usage{
WHO_SR
}
\description{
A dataset containing the data reported in the WHO 2019 novel
coronavirus situation reports.
}
\keyword{datasets}
|
/man/WHO_SR.Rd
|
no_license
|
seabbs/data2019nCoV
|
R
| false | true | 488 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{WHO_SR}
\alias{WHO_SR}
\title{World Health Organization situation reports data}
\format{A data frame with a row for each situation report}
\source{
\url{https://www.who.int/emergencies/diseases/novel-coronavirus-2019/situation-reports}
}
\usage{
WHO_SR
}
\description{
A dataset containing the data reported in the WHO 2019 novel
coronavirus situation reports.
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.