content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.6187081097806e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615837310-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,047 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.6187081097806e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
library(tidyverse)
library(data.table)
library(fgsea)
library(msigdbr)
library(DT)
library(clusterProfiler)
library(grid)
library(org.Mmusculus.GRCm38p6.99.eg.db)
## RNAseq DEG functional analysis using clusterProfiler
rm(list = ls())
source(file = "E:/Chris_UM/GitHub/omics_util/04_GO_enrichment/topGO_functions.R")
source("E:/Chris_UM/GitHub/omics_util/02_RNAseq_scripts/s02_DESeq2_functions.R")
###########################################################################
degResult <- "DKO_vs_WT"
file_RNAseq_info <- here::here("data", "RNAseq_info.txt")
diffDataPath <- here::here("analysis", "02_DESeq2_diff")
outDir <- here::here("analysis", "02_DESeq2_diff", degResult)
outPrefix <- paste(outDir, "/", degResult, sep = "")
# file_msigDesc <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/msigDB_geneset_desc.tab"
orgDb <- org.Mmusculus.GRCm38p6.99.eg.db
keggOrg <- 'mmu'
keggIdCol <- "NCBI"
file_topGO <- "E:/Chris_UM/Database/Mouse/GRCm38.99/annotation_resources/geneid2go.Mmusculus.GRCm38p6.topGO.map"
cutoff_qval <- 0.05
cutoff_lfc <- 0.585
cutoff_up <- cutoff_lfc
cutoff_down <- -1 * cutoff_lfc
col_lfc <- "log2FoldChange"
###########################################################################
rnaseqInfo <- get_diff_info(degInfoFile = file_RNAseq_info, dataPath = diffDataPath) %>%
dplyr::filter(comparison == degResult)
degs <- suppressMessages(readr::read_tsv(file = rnaseqInfo$deg[1])) %>%
dplyr::mutate(rankMetric = (-log10(pvalue) * sign(shrinkLog2FC))) %>%
dplyr::arrange(desc(rankMetric)) %>%
dplyr::filter(!is.na(rankMetric))
if(! keggIdCol %in% colnames(degs)){
keggInfo <- suppressMessages(
AnnotationDbi::select(x = orgDb, keys = degs$geneId,
keytype = "GID", columns = keggIdCol)
) %>%
dplyr::filter(!is.na(!!sym(keggIdCol))) %>%
dplyr::rename(geneId = ENSEMBL_VERSION)
degs <- dplyr::left_join(x = degs, y = keggInfo, by = "geneId")
}
downDegs <- dplyr::filter(degs, padj <= cutoff_qval & !!sym(col_lfc) <= cutoff_down) %>%
dplyr::mutate(category = "down")
upDegs <- dplyr::filter(degs, padj <= cutoff_qval & !!sym(col_lfc) >= cutoff_up) %>%
dplyr::mutate(category = "up")
degData <- dplyr::bind_rows(upDegs, downDegs)
contrast <- unique(upDegs$contrast)
geneList <- dplyr::filter(degs, !is.na(NCBI)) %>%
dplyr::select(NCBI, rankMetric) %>%
tibble::deframe()
## replace +Inf and -Inf values with max and min
geneList[is.infinite(geneList) & geneList > 0] <- max(geneList[is.finite(geneList)]) + 1
geneList[is.infinite(geneList) & geneList < 0] <- min(geneList[is.finite(geneList)]) - 1
# ###########################################################################
# ## clusterProfiler: GO enrichment
# ego_up <- enrichGO(gene = unique(upDegs$geneId),
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# barplot(ego_up, showCategory=20)
# emapplot(ego_up, pie_scale=1.5,layout="kk", )
# cnetplot(ego_up, showCategory = 10, node_label="category")
#
# ego_up <- simplify(x = ego_up)
#
# ego_down <- enrichGO(gene = unique(downDegs$geneId),
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# ego_down <- simplify(x = ego_down)
#
# ego_res <- dplyr::bind_rows(
# dplyr::mutate(.data = as.data.frame(ego_up), category = "up"),
# dplyr::mutate(.data = as.data.frame(ego_down), category = "down")
# ) %>%
# dplyr::mutate(contrast = contrast)
#
#
# readr::write_tsv(x = ego_res, path = paste(outPrefix, ".clusterProfiler.GO.tab", sep = ""))
#
#
# ego_degs <- compareCluster(geneClusters = geneId ~ category,
# fun = "enrichGO", data = degData,
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# dotplot(ego_degs,
# showCategory = 20)
# emapplot(ego_degs)
## topGO GO enrichment
topgo_up <- topGO_enrichment(goMapFile = file_topGO,
genes = unique(upDegs$geneId),
type = "BP",
goNodeSize = 5)
topgo_down <- topGO_enrichment(goMapFile = file_topGO,
genes = unique(downDegs$geneId),
type = "BP",
goNodeSize = 5)
topgo_res <- dplyr::bind_rows(
dplyr::mutate(.data = as.data.frame(topgo_up), category = "up"),
dplyr::mutate(.data = as.data.frame(topgo_down), category = "down")
) %>%
dplyr::mutate(contrast = contrast)
readr::write_tsv(x = topgo_res, path = paste(outPrefix, ".topGO.tab", sep = ""))
## top 10 GO term bar plot
topgoPlotDf <- dplyr::group_by(topgo_res, category) %>%
dplyr::arrange(weightedFisher, .by_group = TRUE) %>%
dplyr::slice(1:10) %>%
dplyr::ungroup()
topgo_bar <- enrichment_bar(df = topgoPlotDf,
title = paste(degResult, "\ntop 10 enriched GO terms in up and down DEGs"))
png(filename = paste(outPrefix, ".topGO_bar.png", sep = ""),
width = 2500, height = 2500, res = 250)
topgo_bar
dev.off()
###########################################################################
# ## clusterProfiler: KEGG pathway enrichment
# ekegg_up <- enrichKEGG(gene = na.omit(unique(upDegs$NCBI)),
# organism = keggOrg,
# pvalueCutoff = 0.05)
#
# ekegg_down <- enrichKEGG(gene = na.omit(unique(downDegs$NCBI)),
# organism = keggOrg,
# pvalueCutoff = 0.05)
#
# ekegg_res <- dplyr::bind_rows(
# dplyr::mutate(.data = as.data.frame(ekegg_up), category = "up"),
# dplyr::mutate(.data = as.data.frame(ekegg_down), category = "down")
# ) %>%
# dplyr::mutate(contrast = contrast)
#
#
# readr::write_tsv(x = ekegg_res, path = paste(outPrefix, ".clusterProfiler.kegg.tab", sep = ""))
# ## up and down DEG
# cp_kegg <- compareCluster(
# geneClusters = list(up = na.omit(unique(upDegs$NCBI)),
# down = na.omit(unique(downDegs$NCBI))),
# fun = "enrichKEGG",
# organism = keggOrg
# )
#
# gg_cp_kegg <- dotplot(cp_kegg, showCategory = 100) +
# labs(title = paste(analysisName, "KEGG pathway enrichment")) +
# theme(
# plot.title = element_text(hjust = 1)
# )
#
#
# png(filename = paste(outPrefix, ".clusterProfiler.kegg.png", sep = ""),
# width = 1500, height = 1500, res = 200)
#
# gg_cp_kegg
#
# dev.off()
## KEGGprofile::find_enriched_pathway
keggp_up <- keggprofile_enrichment(
genes = as.character(na.omit(unique(upDegs[[keggIdCol]]))), orgdb = orgDb,
keytype = keggIdCol, keggIdCol = keggIdCol, keggOrg = keggOrg
)
keggp_down <- keggprofile_enrichment(
genes = as.character(na.omit(unique(downDegs[[keggIdCol]]))), orgdb = orgDb,
keytype = keggIdCol, keggIdCol = keggIdCol, keggOrg = keggOrg
)
keggp_res <- dplyr::bind_rows(
dplyr::mutate(.data = as.data.frame(keggp_up), category = "up"),
dplyr::mutate(.data = as.data.frame(keggp_down), category = "down")
) %>%
dplyr::mutate(contrast = contrast)
readr::write_tsv(x = keggp_res, path = paste(outPrefix, ".keggProfile.tab", sep = ""))
## top 10 KEGG pathway bar plot
keggPlotDf <- dplyr::group_by(keggp_res, category) %>%
dplyr::arrange(pvalue, .by_group = TRUE) %>%
dplyr::slice(1:10) %>%
dplyr::ungroup()
kegg_bar <- enrichment_bar(
df = keggPlotDf,
title = paste(degResult, "\ntop 10 enriched KEGG pathways in up and down DEGs"),
pvalCol = "pvalue", termCol = "Pathway_Name",
colorCol = "category", countCol = "Significant"
)
png(filename = paste(outPrefix, ".KEGG_bar.png", sep = ""),
width = 2500, height = 2500, res = 250)
kegg_bar
dev.off()
###########################################################################
# ## GSEA
# msigdbr_show_species()
# msig_df <- msigdbr(species = "Homo sapiens") %>%
# dplyr::filter(gs_cat %in% c("H", "C2", "C5")) %>%
# dplyr::filter(! gs_subcat %in% c("MF", "CC"))
#
# # , category = c("H", "C2", "C5")
# msig_list <- split(x = msig_df$entrez_gene, f = msig_df$gs_name)
#
# length(intersect(names(geneList), unique(msig_df$entrez_gene)))
#
# vn <- VennDiagram::venn.diagram(
# x = list(geneList = names(geneList), msig = unique(msig_df$entrez_gene)),
# filename = NULL,
# print.mode = c("raw", "percent"),
# scaled = FALSE
# )
# dev.off()
# grid.draw(vn)
#
# msigDescDf <- suppressMessages(readr::read_tsv(file = file_msigDesc))
# msigDesc <- split(x = msigDescDf$DESCRIPTION_BRIEF, f = msigDescDf$STANDARD_NAME)
#
# egsea <- GSEA(geneList = geneList,
# nPerm = 10000,
# pvalueCutoff = 0.1,
# minGSSize = 10, maxGSSize = Inf,
# TERM2GENE = dplyr::select(msig_df, gs_name, entrez_gene))
#
# egseaDf <- as_tibble(egsea) %>%
# dplyr::left_join(y = msigDescDf, by = c("ID" = "STANDARD_NAME")) %>%
# dplyr::mutate(contrast = contrast) %>%
# dplyr::select(ID, contrast, everything(), -Description)
#
# readr::write_tsv(x = egseaDf,
# path = paste(outPrefix, ".clusterProfiler.GSEA.tab", sep = ""))
# ## plotting specific genesets
# genesetSub <- c("GO_CELL_CYCLE",
# "GO_RESPONSE_TO_ENDOPLASMIC_RETICULUM_STRESS",
# "GO_DNA_REPLICATION")
#
# plotList <- list()
#
# pdf(file = paste(outPrefix, ".clusterProfiler.GSEA_enrichmentPlot.pdf", sep = ""),
# width = 10, height = 8, onefile = TRUE)
#
# for (setId in genesetSub) {
#
# if(setId %in% egsea$ID){
# pt <- enrichplot::gseaplot2(egsea, geneSetID = setId)
# wrap_100 <- wrap_format(120)
#
# plotSubTitle <- paste(
# "p-value = ", sprintf(fmt = "%.2E", egseaDf$pvalue[which(egseaDf$ID == setId)]),
# "; q-value = ", sprintf(fmt = "%.2E", egseaDf$qvalues[which(egseaDf$ID == setId)]),
# "\n", wrap_100(x = msigDesc[[setId]]),
# sep = "")
#
# pt <- pt +
# labs(
# title = paste(setId, ": ", contrast, sep = ""),
# subtitle = plotSubTitle
# ) +
# theme_bw() +
# theme(panel.grid = element_blank(),
# panel.border = element_blank())
#
# plotList[[setId]] <- pt
#
# plot(pt)
#
# }
#
# }
#
# dev.off()
# ###########################################################################
# ## GSEA enrichment using fgsea
# gseaRes <- fgsea(pathways = msig_list, stats = geneList, nperm = 10000)
#
# gseaRes <- dplyr::filter(gseaRes, pval < 0.05) %>%
# dplyr::left_join(y = msigDescDf, by = c("pathway" = "STANDARD_NAME")) %>%
# dplyr::mutate(contrast = contrast)
#
# topPathways <- gseaRes[head(order(pval), n=15)][order(NES), pathway]
# plotGseaTable(msig_list[topPathways], geneList,
# gseaRes, gseaParam=0.5)
#
# pt2 <- plotEnrichment(pathway = msig_list[[setId]],
# stats = geneList) +
# labs(
# title = paste(setId, ":", contrast),
# subtitle = wrap_100(x = msigDesc[[setId]]),
# x = "Rank in ordered dataset",
# y = "Enrichment Score") +
# theme_bw() +
# theme(panel.grid = element_blank(),
# axis.text = element_text(size = 12),
# axis.title = element_text(size = 14, face = "bold"))
###########################################################################
excelOut <- paste(outPrefix, ".enrichment.xlsx", sep = "")
unlink(excelOut, recursive = FALSE, force = FALSE)
exc = loadWorkbook(excelOut , create = TRUE)
xlcFreeMemory()
wrkSheet <- "topGO"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
writeWorksheet(object = exc, data = topgo_res, sheet = wrkSheet)
setAutoFilter(object = exc, sheet = wrkSheet,
reference = aref(topLeft = "A1", dimension = dim(topgo_res)))
wrkSheet <- "keggProfile"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
writeWorksheet(object = exc, data = keggp_res, sheet = wrkSheet)
setAutoFilter(object = exc, sheet = wrkSheet,
reference = aref(topLeft = "A1", dimension = dim(keggp_res)))
setColumnWidth(object = exc, sheet = 1:2, column = 1, width = -1)
setColumnWidth(object = exc, sheet = 1:2, column = 2, width = c(13000))
# wrkSheet <- "GSEA"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = egseaDf, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(egseaDf)))
# setColumnWidth(object = exc, sheet = 3, column = 1, width = c(13000))
# wrkSheet <- "clusterProfiler_GO"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = ego_res, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(ego_res)))
#
# wrkSheet <- "clusterProfiler_KEGG"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = ekegg_res, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(ekegg_res)))
xlcFreeMemory()
saveWorkbook(exc)
|
/scripts/03_RNAseq_functional_enrichment.R
|
no_license
|
lakhanp1/38_ZhuBO_RNAseq4_DKO
|
R
| false | false | 13,584 |
r
|
library(tidyverse)
library(data.table)
library(fgsea)
library(msigdbr)
library(DT)
library(clusterProfiler)
library(grid)
library(org.Mmusculus.GRCm38p6.99.eg.db)
## RNAseq DEG functional analysis using clusterProfiler
rm(list = ls())
source(file = "E:/Chris_UM/GitHub/omics_util/04_GO_enrichment/topGO_functions.R")
source("E:/Chris_UM/GitHub/omics_util/02_RNAseq_scripts/s02_DESeq2_functions.R")
###########################################################################
degResult <- "DKO_vs_WT"
file_RNAseq_info <- here::here("data", "RNAseq_info.txt")
diffDataPath <- here::here("analysis", "02_DESeq2_diff")
outDir <- here::here("analysis", "02_DESeq2_diff", degResult)
outPrefix <- paste(outDir, "/", degResult, sep = "")
# file_msigDesc <- "E:/Chris_UM/Database/Human/GRCh38p12.gencode30/annotation_resources/msigDB_geneset_desc.tab"
orgDb <- org.Mmusculus.GRCm38p6.99.eg.db
keggOrg <- 'mmu'
keggIdCol <- "NCBI"
file_topGO <- "E:/Chris_UM/Database/Mouse/GRCm38.99/annotation_resources/geneid2go.Mmusculus.GRCm38p6.topGO.map"
cutoff_qval <- 0.05
cutoff_lfc <- 0.585
cutoff_up <- cutoff_lfc
cutoff_down <- -1 * cutoff_lfc
col_lfc <- "log2FoldChange"
###########################################################################
rnaseqInfo <- get_diff_info(degInfoFile = file_RNAseq_info, dataPath = diffDataPath) %>%
dplyr::filter(comparison == degResult)
degs <- suppressMessages(readr::read_tsv(file = rnaseqInfo$deg[1])) %>%
dplyr::mutate(rankMetric = (-log10(pvalue) * sign(shrinkLog2FC))) %>%
dplyr::arrange(desc(rankMetric)) %>%
dplyr::filter(!is.na(rankMetric))
if(! keggIdCol %in% colnames(degs)){
keggInfo <- suppressMessages(
AnnotationDbi::select(x = orgDb, keys = degs$geneId,
keytype = "GID", columns = keggIdCol)
) %>%
dplyr::filter(!is.na(!!sym(keggIdCol))) %>%
dplyr::rename(geneId = ENSEMBL_VERSION)
degs <- dplyr::left_join(x = degs, y = keggInfo, by = "geneId")
}
downDegs <- dplyr::filter(degs, padj <= cutoff_qval & !!sym(col_lfc) <= cutoff_down) %>%
dplyr::mutate(category = "down")
upDegs <- dplyr::filter(degs, padj <= cutoff_qval & !!sym(col_lfc) >= cutoff_up) %>%
dplyr::mutate(category = "up")
degData <- dplyr::bind_rows(upDegs, downDegs)
contrast <- unique(upDegs$contrast)
geneList <- dplyr::filter(degs, !is.na(NCBI)) %>%
dplyr::select(NCBI, rankMetric) %>%
tibble::deframe()
## replace +Inf and -Inf values with max and min
geneList[is.infinite(geneList) & geneList > 0] <- max(geneList[is.finite(geneList)]) + 1
geneList[is.infinite(geneList) & geneList < 0] <- min(geneList[is.finite(geneList)]) - 1
# ###########################################################################
# ## clusterProfiler: GO enrichment
# ego_up <- enrichGO(gene = unique(upDegs$geneId),
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# barplot(ego_up, showCategory=20)
# emapplot(ego_up, pie_scale=1.5,layout="kk", )
# cnetplot(ego_up, showCategory = 10, node_label="category")
#
# ego_up <- simplify(x = ego_up)
#
# ego_down <- enrichGO(gene = unique(downDegs$geneId),
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# ego_down <- simplify(x = ego_down)
#
# ego_res <- dplyr::bind_rows(
# dplyr::mutate(.data = as.data.frame(ego_up), category = "up"),
# dplyr::mutate(.data = as.data.frame(ego_down), category = "down")
# ) %>%
# dplyr::mutate(contrast = contrast)
#
#
# readr::write_tsv(x = ego_res, path = paste(outPrefix, ".clusterProfiler.GO.tab", sep = ""))
#
#
# ego_degs <- compareCluster(geneClusters = geneId ~ category,
# fun = "enrichGO", data = degData,
# OrgDb = orgDb,
# ont = "BP", pAdjustMethod = "BH",
# pvalueCutoff = 0.05,
# qvalueCutoff = 0.05,
# keyType = "ENSEMBL",
# readable = FALSE)
#
# dotplot(ego_degs,
# showCategory = 20)
# emapplot(ego_degs)
## topGO GO enrichment
topgo_up <- topGO_enrichment(goMapFile = file_topGO,
genes = unique(upDegs$geneId),
type = "BP",
goNodeSize = 5)
topgo_down <- topGO_enrichment(goMapFile = file_topGO,
genes = unique(downDegs$geneId),
type = "BP",
goNodeSize = 5)
topgo_res <- dplyr::bind_rows(
dplyr::mutate(.data = as.data.frame(topgo_up), category = "up"),
dplyr::mutate(.data = as.data.frame(topgo_down), category = "down")
) %>%
dplyr::mutate(contrast = contrast)
readr::write_tsv(x = topgo_res, path = paste(outPrefix, ".topGO.tab", sep = ""))
## top 10 GO term bar plot
topgoPlotDf <- dplyr::group_by(topgo_res, category) %>%
dplyr::arrange(weightedFisher, .by_group = TRUE) %>%
dplyr::slice(1:10) %>%
dplyr::ungroup()
topgo_bar <- enrichment_bar(df = topgoPlotDf,
title = paste(degResult, "\ntop 10 enriched GO terms in up and down DEGs"))
png(filename = paste(outPrefix, ".topGO_bar.png", sep = ""),
width = 2500, height = 2500, res = 250)
topgo_bar
dev.off()
###########################################################################
# ## clusterProfiler: KEGG pathway enrichment
# ekegg_up <- enrichKEGG(gene = na.omit(unique(upDegs$NCBI)),
# organism = keggOrg,
# pvalueCutoff = 0.05)
#
# ekegg_down <- enrichKEGG(gene = na.omit(unique(downDegs$NCBI)),
# organism = keggOrg,
# pvalueCutoff = 0.05)
#
# ekegg_res <- dplyr::bind_rows(
# dplyr::mutate(.data = as.data.frame(ekegg_up), category = "up"),
# dplyr::mutate(.data = as.data.frame(ekegg_down), category = "down")
# ) %>%
# dplyr::mutate(contrast = contrast)
#
#
# readr::write_tsv(x = ekegg_res, path = paste(outPrefix, ".clusterProfiler.kegg.tab", sep = ""))
# ## up and down DEG
# cp_kegg <- compareCluster(
# geneClusters = list(up = na.omit(unique(upDegs$NCBI)),
# down = na.omit(unique(downDegs$NCBI))),
# fun = "enrichKEGG",
# organism = keggOrg
# )
#
# gg_cp_kegg <- dotplot(cp_kegg, showCategory = 100) +
# labs(title = paste(analysisName, "KEGG pathway enrichment")) +
# theme(
# plot.title = element_text(hjust = 1)
# )
#
#
# png(filename = paste(outPrefix, ".clusterProfiler.kegg.png", sep = ""),
# width = 1500, height = 1500, res = 200)
#
# gg_cp_kegg
#
# dev.off()
## KEGGprofile::find_enriched_pathway
keggp_up <- keggprofile_enrichment(
genes = as.character(na.omit(unique(upDegs[[keggIdCol]]))), orgdb = orgDb,
keytype = keggIdCol, keggIdCol = keggIdCol, keggOrg = keggOrg
)
keggp_down <- keggprofile_enrichment(
genes = as.character(na.omit(unique(downDegs[[keggIdCol]]))), orgdb = orgDb,
keytype = keggIdCol, keggIdCol = keggIdCol, keggOrg = keggOrg
)
keggp_res <- dplyr::bind_rows(
dplyr::mutate(.data = as.data.frame(keggp_up), category = "up"),
dplyr::mutate(.data = as.data.frame(keggp_down), category = "down")
) %>%
dplyr::mutate(contrast = contrast)
readr::write_tsv(x = keggp_res, path = paste(outPrefix, ".keggProfile.tab", sep = ""))
## top 10 KEGG pathway bar plot
keggPlotDf <- dplyr::group_by(keggp_res, category) %>%
dplyr::arrange(pvalue, .by_group = TRUE) %>%
dplyr::slice(1:10) %>%
dplyr::ungroup()
kegg_bar <- enrichment_bar(
df = keggPlotDf,
title = paste(degResult, "\ntop 10 enriched KEGG pathways in up and down DEGs"),
pvalCol = "pvalue", termCol = "Pathway_Name",
colorCol = "category", countCol = "Significant"
)
png(filename = paste(outPrefix, ".KEGG_bar.png", sep = ""),
width = 2500, height = 2500, res = 250)
kegg_bar
dev.off()
###########################################################################
# ## GSEA
# msigdbr_show_species()
# msig_df <- msigdbr(species = "Homo sapiens") %>%
# dplyr::filter(gs_cat %in% c("H", "C2", "C5")) %>%
# dplyr::filter(! gs_subcat %in% c("MF", "CC"))
#
# # , category = c("H", "C2", "C5")
# msig_list <- split(x = msig_df$entrez_gene, f = msig_df$gs_name)
#
# length(intersect(names(geneList), unique(msig_df$entrez_gene)))
#
# vn <- VennDiagram::venn.diagram(
# x = list(geneList = names(geneList), msig = unique(msig_df$entrez_gene)),
# filename = NULL,
# print.mode = c("raw", "percent"),
# scaled = FALSE
# )
# dev.off()
# grid.draw(vn)
#
# msigDescDf <- suppressMessages(readr::read_tsv(file = file_msigDesc))
# msigDesc <- split(x = msigDescDf$DESCRIPTION_BRIEF, f = msigDescDf$STANDARD_NAME)
#
# egsea <- GSEA(geneList = geneList,
# nPerm = 10000,
# pvalueCutoff = 0.1,
# minGSSize = 10, maxGSSize = Inf,
# TERM2GENE = dplyr::select(msig_df, gs_name, entrez_gene))
#
# egseaDf <- as_tibble(egsea) %>%
# dplyr::left_join(y = msigDescDf, by = c("ID" = "STANDARD_NAME")) %>%
# dplyr::mutate(contrast = contrast) %>%
# dplyr::select(ID, contrast, everything(), -Description)
#
# readr::write_tsv(x = egseaDf,
# path = paste(outPrefix, ".clusterProfiler.GSEA.tab", sep = ""))
# ## plotting specific genesets
# genesetSub <- c("GO_CELL_CYCLE",
# "GO_RESPONSE_TO_ENDOPLASMIC_RETICULUM_STRESS",
# "GO_DNA_REPLICATION")
#
# plotList <- list()
#
# pdf(file = paste(outPrefix, ".clusterProfiler.GSEA_enrichmentPlot.pdf", sep = ""),
# width = 10, height = 8, onefile = TRUE)
#
# for (setId in genesetSub) {
#
# if(setId %in% egsea$ID){
# pt <- enrichplot::gseaplot2(egsea, geneSetID = setId)
# wrap_100 <- wrap_format(120)
#
# plotSubTitle <- paste(
# "p-value = ", sprintf(fmt = "%.2E", egseaDf$pvalue[which(egseaDf$ID == setId)]),
# "; q-value = ", sprintf(fmt = "%.2E", egseaDf$qvalues[which(egseaDf$ID == setId)]),
# "\n", wrap_100(x = msigDesc[[setId]]),
# sep = "")
#
# pt <- pt +
# labs(
# title = paste(setId, ": ", contrast, sep = ""),
# subtitle = plotSubTitle
# ) +
# theme_bw() +
# theme(panel.grid = element_blank(),
# panel.border = element_blank())
#
# plotList[[setId]] <- pt
#
# plot(pt)
#
# }
#
# }
#
# dev.off()
# ###########################################################################
# ## GSEA enrichment using fgsea
# gseaRes <- fgsea(pathways = msig_list, stats = geneList, nperm = 10000)
#
# gseaRes <- dplyr::filter(gseaRes, pval < 0.05) %>%
# dplyr::left_join(y = msigDescDf, by = c("pathway" = "STANDARD_NAME")) %>%
# dplyr::mutate(contrast = contrast)
#
# topPathways <- gseaRes[head(order(pval), n=15)][order(NES), pathway]
# plotGseaTable(msig_list[topPathways], geneList,
# gseaRes, gseaParam=0.5)
#
# pt2 <- plotEnrichment(pathway = msig_list[[setId]],
# stats = geneList) +
# labs(
# title = paste(setId, ":", contrast),
# subtitle = wrap_100(x = msigDesc[[setId]]),
# x = "Rank in ordered dataset",
# y = "Enrichment Score") +
# theme_bw() +
# theme(panel.grid = element_blank(),
# axis.text = element_text(size = 12),
# axis.title = element_text(size = 14, face = "bold"))
###########################################################################
excelOut <- paste(outPrefix, ".enrichment.xlsx", sep = "")
unlink(excelOut, recursive = FALSE, force = FALSE)
exc = loadWorkbook(excelOut , create = TRUE)
xlcFreeMemory()
wrkSheet <- "topGO"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
writeWorksheet(object = exc, data = topgo_res, sheet = wrkSheet)
setAutoFilter(object = exc, sheet = wrkSheet,
reference = aref(topLeft = "A1", dimension = dim(topgo_res)))
wrkSheet <- "keggProfile"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
writeWorksheet(object = exc, data = keggp_res, sheet = wrkSheet)
setAutoFilter(object = exc, sheet = wrkSheet,
reference = aref(topLeft = "A1", dimension = dim(keggp_res)))
setColumnWidth(object = exc, sheet = 1:2, column = 1, width = -1)
setColumnWidth(object = exc, sheet = 1:2, column = 2, width = c(13000))
# wrkSheet <- "GSEA"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = egseaDf, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(egseaDf)))
# setColumnWidth(object = exc, sheet = 3, column = 1, width = c(13000))
# wrkSheet <- "clusterProfiler_GO"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = ego_res, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(ego_res)))
#
# wrkSheet <- "clusterProfiler_KEGG"
# createSheet(exc, name = wrkSheet)
# createFreezePane(exc, sheet = wrkSheet, 2, 2)
# writeWorksheet(object = exc, data = ekegg_res, sheet = wrkSheet)
# setAutoFilter(object = exc, sheet = wrkSheet,
# reference = aref(topLeft = "A1", dimension = dim(ekegg_res)))
xlcFreeMemory()
saveWorkbook(exc)
|
# This script produces statistical summaries and graphical results
# from the output of ba_rand_test.R in the different regions and subregions
library(tidyverse)
cell_km2 <- 0.215 # MODIS raster cell area in km2
# Codes (used in file names) and names (to display in plots) for regions and subregions
reg_codes <- c("northam", "eurasia", "westna", "eastna",
"scand", "eurus", "wsib", "esib")
reg_names <- c("North America", "Eurasia", "West North Am.", "East North Am.",
"Scandinavia", "Eur. Russia", "West Siberia", "East Siberia")
# Calculate summary statistics from randomization test --------------------
calc_stats <- function(reg_code) {
rand_out <- readRDS(paste0("res_rand_multi_", reg_code, ".rds"))
obs_out <- readRDS(paste0("res_obs_", reg_code, ".rds"))
# Combine the distribution of # of years burned by cell from the
# 200 randomizations (identified by "sim" ID column) into one data frame
# and add 0s (when no cell with that burn count) with complete function
burn_counts <- map_dfr(rand_out, ~ as.data.frame(.$tab), .id = "sim") %>%
mutate(burn_counts = as.integer(as.character(burn_counts))) %>%
complete(sim, burn_counts, fill = list(Freq = 0))
# Calculate the mean and 95% interval of cell frequencies
# for each value of burn_counts (# of years with fire) across simulations
burn_stats <- group_by(burn_counts, burn_counts) %>%
summarize(mean = mean(Freq), lo = quantile(Freq, 0.025),
hi = quantile(Freq, 0.975))
# Combine with distribution from original data
burn_obs <- as.data.frame(obs_out$tab) %>%
mutate(burn_counts = as.integer(as.character(burn_counts))) %>%
rename(obs = Freq)
burn_stats <- full_join(burn_stats, burn_obs)
# Replace NAs with 0s (when a burn_counts value is absent from simulated or observed data)
burn_stats <- replace_na(burn_stats, list(mean = 0, lo = 0, hi = 0, obs = 0))
# Calculate the distribution of time between fires for each randomization output
# and combine in one data frame
ret_counts <- map_dfr(rand_out, ~ as.data.frame(table(.$ret$dt)), .id = "sim") %>%
mutate(Var1 = as.integer(as.character(Var1))) %>%
complete(sim, Var1, fill = list(Freq = 0))
# Similar to above, get the mean and 95% interval for the cell frequencies for
# each value of dt (years between fires) across simulations, then
# combine with observed values in original data
ret_stats <- rename(ret_counts, dt = Var1) %>%
group_by(dt) %>%
summarize(mean = mean(Freq), lo = quantile(Freq, 0.025),
hi = quantile(Freq, 0.975))
ret_obs <- as.data.frame(table(obs_out$ret$dt)) %>%
mutate(Var1 = as.integer(as.character(Var1))) %>%
rename(dt = Var1, obs = Freq)
ret_stats <- full_join(ret_stats, ret_obs)
ret_stats <- replace_na(ret_stats, list(mean = 0, lo = 0, hi = 0, obs = 0))
# Return output as a list and save to disk
stats_out <- lst(burn_stats, ret_stats)
saveRDS(stats_out, paste0("res_stats_", reg_code, ".rds"))
stats_out
}
# Apply function above to all subregions and combine results into list
#res <- map(reg_codes, calc_stats) %>%
# setNames(reg_names)
# or get from disk
res <- map(paste0("res_stats_", reg_codes, ".rds"), readRDS) %>%
setNames(reg_names)
# Years with fire ---------------------------------------------------------
# The number of cells with 0 fires in burn_stats table includes cells that are
# as located in water or outside boreal biomes, need to count them from mask
# and subtract numbers from table
mask_dir <- "data/cell_masks"
count_na <- function(reg_code) {
rast <- raster(file.path(mask_dir, paste0("cells_na025_", reg_code, ".tif")))
cellStats(rast == 0, sum)
}
reg_na_counts <- map_dbl(reg_codes, count_na)
for (i in seq_along(res)) {
for (j in c("mean", "lo", "hi", "obs")) {
res[[i]]$burn_stats[1, j] <- res[[i]]$burn_stats[1, j] - reg_na_counts[i]
}
}
# Combine all the burn_stats tables from all regions into one data frame
burn_stats <- map_df(res, "burn_stats", .id = "region")
# Sum counts for cells with 4+ fires into same category
burn_stats2 <- burn_stats %>%
mutate(burn_counts = ifelse(burn_counts > 4, 4, burn_counts)) %>%
group_by(region, burn_counts) %>%
summarize_all(.funs = "sum")
# Manually add row of 0s for 4+ fires category in Scandinavia
burn_stats2 <- rbind(burn_stats2, data.frame(region = "Scandinavia", burn_counts = 4,
mean = 0, lo = 0, hi = 0, obs = 0))
# Convert numbers of cells to area in km2
burn_stats2 <- mutate(burn_stats2, mean = mean * cell_km2,
lo = lo * cell_km2, hi = hi * cell_km2, obs = obs * cell_km2)
# Pivot data in burn_stats2 to put observed and simulated (lo/mean/hi) statistics
# side to side (for results presentation, redundant values need to be removed in some columns)
burn_tab <- pivot_longer(burn_stats2, cols = c("hi", "mean", "lo"),
names_to = "stat", values_to = "area") %>%
ungroup() %>%
nest_by(region, burn_counts, obs) %>%
pivot_wider(names_from = "burn_counts", values_from = c("obs", "data")) %>%
unnest()
# Bias in total fire area (relative difference between mean of simulations and observation)
# The is negative and due to fires being "pushed out" of study area by random translation
group_by(burn_stats, region) %>%
summarize(bias = sum(burn_counts * mean) / sum(burn_counts * obs) - 1)
# Return times ------------------------------------------------------------
# Combine all the ret_stats tables from all regions into one data frame,
# convert cell counts to areas in km2
ret_stats <- map_df(res, "ret_stats", .id = "region")
ret_stats2 <- mutate(ret_stats, mean = mean * cell_km2,
lo = lo * cell_km2, hi = hi * cell_km2, obs = obs * cell_km2)
ret_stats2$region <- factor(ret_stats2$region, levels = reg_names)
# Produce graph of simulated vs. observed distribution of years between fires by region
ggplot(ret_stats2, aes(x = dt, y = mean)) +
geom_pointrange(aes(ymin = lo, ymax = hi), fatten = 2) +
geom_point(aes(y = obs), color = "red") +
geom_line(aes(y = obs), color = "red") +
labs(x = "Time between fires", y = "Area (sq. km)") +
facet_wrap(~ region, ncol = 2, scale = "free_y") +
theme_bw() +
theme(strip.background = element_blank(), strip.text = element_text(face = "bold"))
# Map of study area -------------------------------------------------------
library(raster)
library(stars)
library(sf)
library(spData)
data(world)
na_mask <- read_stars(file.path(mask_dir, "cells_na025_northam.tif"), proxy = TRUE)
eu_mask <- read_stars(file.path(mask_dir, "cells_na025_eurasia.tif"), proxy = TRUE)
bbox = st_bbox(na_mask)
ggplot(world) +
labs(x = "", y = "") +
geom_stars(data = na_mask, downsample = 10) +
geom_stars(data = eu_mask, downsample = 10) +
geom_sf(fill = NA) +
coord_sf(crs = st_crs(na_mask), ylim = c(5000000, 8000000)) +
scale_fill_gradient(low = "white", high = "darkgreen") +
theme_minimal() +
theme(legend.position = "none")
|
/ba_process_output.R
|
no_license
|
pmarchand1/fire-recurrence-modis
|
R
| false | false | 7,248 |
r
|
# This script produces statistical summaries and graphical results
# from the output of ba_rand_test.R in the different regions and subregions
library(tidyverse)
cell_km2 <- 0.215 # MODIS raster cell area in km2
# Codes (used in file names) and names (to display in plots) for regions and subregions
reg_codes <- c("northam", "eurasia", "westna", "eastna",
"scand", "eurus", "wsib", "esib")
reg_names <- c("North America", "Eurasia", "West North Am.", "East North Am.",
"Scandinavia", "Eur. Russia", "West Siberia", "East Siberia")
# Calculate summary statistics from randomization test --------------------
calc_stats <- function(reg_code) {
rand_out <- readRDS(paste0("res_rand_multi_", reg_code, ".rds"))
obs_out <- readRDS(paste0("res_obs_", reg_code, ".rds"))
# Combine the distribution of # of years burned by cell from the
# 200 randomizations (identified by "sim" ID column) into one data frame
# and add 0s (when no cell with that burn count) with complete function
burn_counts <- map_dfr(rand_out, ~ as.data.frame(.$tab), .id = "sim") %>%
mutate(burn_counts = as.integer(as.character(burn_counts))) %>%
complete(sim, burn_counts, fill = list(Freq = 0))
# Calculate the mean and 95% interval of cell frequencies
# for each value of burn_counts (# of years with fire) across simulations
burn_stats <- group_by(burn_counts, burn_counts) %>%
summarize(mean = mean(Freq), lo = quantile(Freq, 0.025),
hi = quantile(Freq, 0.975))
# Combine with distribution from original data
burn_obs <- as.data.frame(obs_out$tab) %>%
mutate(burn_counts = as.integer(as.character(burn_counts))) %>%
rename(obs = Freq)
burn_stats <- full_join(burn_stats, burn_obs)
# Replace NAs with 0s (when a burn_counts value is absent from simulated or observed data)
burn_stats <- replace_na(burn_stats, list(mean = 0, lo = 0, hi = 0, obs = 0))
# Calculate the distribution of time between fires for each randomization output
# and combine in one data frame
ret_counts <- map_dfr(rand_out, ~ as.data.frame(table(.$ret$dt)), .id = "sim") %>%
mutate(Var1 = as.integer(as.character(Var1))) %>%
complete(sim, Var1, fill = list(Freq = 0))
# Similar to above, get the mean and 95% interval for the cell frequencies for
# each value of dt (years between fires) across simulations, then
# combine with observed values in original data
ret_stats <- rename(ret_counts, dt = Var1) %>%
group_by(dt) %>%
summarize(mean = mean(Freq), lo = quantile(Freq, 0.025),
hi = quantile(Freq, 0.975))
ret_obs <- as.data.frame(table(obs_out$ret$dt)) %>%
mutate(Var1 = as.integer(as.character(Var1))) %>%
rename(dt = Var1, obs = Freq)
ret_stats <- full_join(ret_stats, ret_obs)
ret_stats <- replace_na(ret_stats, list(mean = 0, lo = 0, hi = 0, obs = 0))
# Return output as a list and save to disk
stats_out <- lst(burn_stats, ret_stats)
saveRDS(stats_out, paste0("res_stats_", reg_code, ".rds"))
stats_out
}
# Apply function above to all subregions and combine results into list
#res <- map(reg_codes, calc_stats) %>%
# setNames(reg_names)
# or get from disk
res <- map(paste0("res_stats_", reg_codes, ".rds"), readRDS) %>%
setNames(reg_names)
# Years with fire ---------------------------------------------------------
# The number of cells with 0 fires in burn_stats table includes cells that are
# as located in water or outside boreal biomes, need to count them from mask
# and subtract numbers from table
mask_dir <- "data/cell_masks"
count_na <- function(reg_code) {
rast <- raster(file.path(mask_dir, paste0("cells_na025_", reg_code, ".tif")))
cellStats(rast == 0, sum)
}
reg_na_counts <- map_dbl(reg_codes, count_na)
for (i in seq_along(res)) {
for (j in c("mean", "lo", "hi", "obs")) {
res[[i]]$burn_stats[1, j] <- res[[i]]$burn_stats[1, j] - reg_na_counts[i]
}
}
# Combine all the burn_stats tables from all regions into one data frame
burn_stats <- map_df(res, "burn_stats", .id = "region")
# Sum counts for cells with 4+ fires into same category
burn_stats2 <- burn_stats %>%
mutate(burn_counts = ifelse(burn_counts > 4, 4, burn_counts)) %>%
group_by(region, burn_counts) %>%
summarize_all(.funs = "sum")
# Manually add row of 0s for 4+ fires category in Scandinavia
burn_stats2 <- rbind(burn_stats2, data.frame(region = "Scandinavia", burn_counts = 4,
mean = 0, lo = 0, hi = 0, obs = 0))
# Convert numbers of cells to area in km2
burn_stats2 <- mutate(burn_stats2, mean = mean * cell_km2,
lo = lo * cell_km2, hi = hi * cell_km2, obs = obs * cell_km2)
# Pivot data in burn_stats2 to put observed and simulated (lo/mean/hi) statistics
# side to side (for results presentation, redundant values need to be removed in some columns)
burn_tab <- pivot_longer(burn_stats2, cols = c("hi", "mean", "lo"),
names_to = "stat", values_to = "area") %>%
ungroup() %>%
nest_by(region, burn_counts, obs) %>%
pivot_wider(names_from = "burn_counts", values_from = c("obs", "data")) %>%
unnest()
# Bias in total fire area (relative difference between mean of simulations and observation)
# The is negative and due to fires being "pushed out" of study area by random translation
group_by(burn_stats, region) %>%
summarize(bias = sum(burn_counts * mean) / sum(burn_counts * obs) - 1)
# Return times ------------------------------------------------------------
# Combine all the ret_stats tables from all regions into one data frame,
# convert cell counts to areas in km2
ret_stats <- map_df(res, "ret_stats", .id = "region")
ret_stats2 <- mutate(ret_stats, mean = mean * cell_km2,
lo = lo * cell_km2, hi = hi * cell_km2, obs = obs * cell_km2)
ret_stats2$region <- factor(ret_stats2$region, levels = reg_names)
# Produce graph of simulated vs. observed distribution of years between fires by region
ggplot(ret_stats2, aes(x = dt, y = mean)) +
geom_pointrange(aes(ymin = lo, ymax = hi), fatten = 2) +
geom_point(aes(y = obs), color = "red") +
geom_line(aes(y = obs), color = "red") +
labs(x = "Time between fires", y = "Area (sq. km)") +
facet_wrap(~ region, ncol = 2, scale = "free_y") +
theme_bw() +
theme(strip.background = element_blank(), strip.text = element_text(face = "bold"))
# Map of study area -------------------------------------------------------
library(raster)
library(stars)
library(sf)
library(spData)
data(world)
na_mask <- read_stars(file.path(mask_dir, "cells_na025_northam.tif"), proxy = TRUE)
eu_mask <- read_stars(file.path(mask_dir, "cells_na025_eurasia.tif"), proxy = TRUE)
bbox = st_bbox(na_mask)
ggplot(world) +
labs(x = "", y = "") +
geom_stars(data = na_mask, downsample = 10) +
geom_stars(data = eu_mask, downsample = 10) +
geom_sf(fill = NA) +
coord_sf(crs = st_crs(na_mask), ylim = c(5000000, 8000000)) +
scale_fill_gradient(low = "white", high = "darkgreen") +
theme_minimal() +
theme(legend.position = "none")
|
shinyServer(
function(input, output) {
# forming dataframe fo radar plot
radar.data <- reactive({
rbind(r.data,
nriData.wide[nriData.wide$Country==input$country1, dims],
nriData.wide[nriData.wide$Country==input$country2, dims]
)
})
# preparing radarplot
output$radarPlot <- renderPlot({
radarchart(radar.data()[,-1], axistype=1, seg=7, centerzero=T,
vlabels=dims.lab, caxislabels=0:7, axislabcol=1, calcex=0.8,
vlcex=0.9, plty=1, pcol=c(2,4), cglcol="darkgrey")
legend("topright", lty="solid", col = c(2,4), pch=16, cex=1,
legend = radar.data()[3:4,1], bty="n")
})
# table with values
# this is because I don't know how to diplay values on the plot
output$table <- renderTable({radar.data()[3:4,]})
# function preparing data to download
output$download <- downloadHandler(
filename = function() { 'NRIsimpleData.csv' },
content = function(file) {
write.csv(nriData.wide[,dims.download], file, row.names = FALSE)
}
)
}
)
|
/server.R
|
no_license
|
tomowc/ddp_proj_app
|
R
| false | false | 1,282 |
r
|
shinyServer(
function(input, output) {
# forming dataframe fo radar plot
radar.data <- reactive({
rbind(r.data,
nriData.wide[nriData.wide$Country==input$country1, dims],
nriData.wide[nriData.wide$Country==input$country2, dims]
)
})
# preparing radarplot
output$radarPlot <- renderPlot({
radarchart(radar.data()[,-1], axistype=1, seg=7, centerzero=T,
vlabels=dims.lab, caxislabels=0:7, axislabcol=1, calcex=0.8,
vlcex=0.9, plty=1, pcol=c(2,4), cglcol="darkgrey")
legend("topright", lty="solid", col = c(2,4), pch=16, cex=1,
legend = radar.data()[3:4,1], bty="n")
})
# table with values
# this is because I don't know how to diplay values on the plot
output$table <- renderTable({radar.data()[3:4,]})
# function preparing data to download
output$download <- downloadHandler(
filename = function() { 'NRIsimpleData.csv' },
content = function(file) {
write.csv(nriData.wide[,dims.download], file, row.names = FALSE)
}
)
}
)
|
#3
#일리노이주와 미시건주 오하이오주의 전체 인구는 크게 차이가 없는 반면 일리노이 주의 아시아인 수가 월등히 높다.
#mean(midwest$percollege)== 18.27274
#mean(midwest[midwest$percasian>1,]$percollege) == 29.87688
#mean(midwest[midwest$percasian>2,]$percollege) == 35.01751
#mean(midwest[midwest$percasian>3,]$percollege) == 38.84174
#mean(midwest[midwest$percasian>4,]$percollege) == 44.04773
#정확한 상관관계 파악은 어려우나 특정 지역의 asian 인구비율이 높을수록 대학진학률이 높아진다는 것을 확인할 수 있다.
midwest = as.data.frame(ggplot2::midwest)
st = aggregate(data=midwest, poptotal~state, sum)
at = aggregate(data=midwest, popasian~state, sum)
tableapps = cbind(st,at[,2])
colnames(tableapps)[3]='asian'
tableapps
hist(tableapps$asian)
#4
colnames(midwest)[5]='total'
colnames(midwest)[10]='asian'
#5
ta = sum(midwest$asian)
midwest$asianpct = midwest$asian / ta
hist(midwest$asianpct)
#6
apps = aggregate(data=midwest, asian~state, sum)
barplot(apps$asian, names.arg=apps$state, main="주별 아시아인 인구분포")
Illinois = midwest[midwest$state=='IL',]
barplot(Illinois$asian, names.arg=Illinois$county, main="일리노이주의 카운티별 아시아인 인구분포")
#7
apavg = mean(midwest$asianpct)
midwest$asianrate = ifelse(midwest$asianpct > apavg,'lg','sm')
#8
qplot(midwest$asianrate)
|
/R/midwest데이터.R
|
no_license
|
better331132/helloR
|
R
| false | false | 1,404 |
r
|
#3
#일리노이주와 미시건주 오하이오주의 전체 인구는 크게 차이가 없는 반면 일리노이 주의 아시아인 수가 월등히 높다.
#mean(midwest$percollege)== 18.27274
#mean(midwest[midwest$percasian>1,]$percollege) == 29.87688
#mean(midwest[midwest$percasian>2,]$percollege) == 35.01751
#mean(midwest[midwest$percasian>3,]$percollege) == 38.84174
#mean(midwest[midwest$percasian>4,]$percollege) == 44.04773
#정확한 상관관계 파악은 어려우나 특정 지역의 asian 인구비율이 높을수록 대학진학률이 높아진다는 것을 확인할 수 있다.
midwest = as.data.frame(ggplot2::midwest)
st = aggregate(data=midwest, poptotal~state, sum)
at = aggregate(data=midwest, popasian~state, sum)
tableapps = cbind(st,at[,2])
colnames(tableapps)[3]='asian'
tableapps
hist(tableapps$asian)
#4
colnames(midwest)[5]='total'
colnames(midwest)[10]='asian'
#5
ta = sum(midwest$asian)
midwest$asianpct = midwest$asian / ta
hist(midwest$asianpct)
#6
apps = aggregate(data=midwest, asian~state, sum)
barplot(apps$asian, names.arg=apps$state, main="주별 아시아인 인구분포")
Illinois = midwest[midwest$state=='IL',]
barplot(Illinois$asian, names.arg=Illinois$county, main="일리노이주의 카운티별 아시아인 인구분포")
#7
apavg = mean(midwest$asianpct)
midwest$asianrate = ifelse(midwest$asianpct > apavg,'lg','sm')
#8
qplot(midwest$asianrate)
|
.Random.seed <-
c(403L, 432L, -678628990L, -398879292L, -1175936006L, -1927840090L,
71575548L, 116701410L, 973048916L, -1634865779L, 413940365L,
626374877L, 688513227L, -158508683L, 80827441L, 376549021L, -198417317L,
-601753194L, 1399586916L, 1828546730L, 864239660L, -427201396L,
-2085029066L, -1447676248L, 297026322L, 274436715L, 1570852439L,
713321715L, 1119744301L, -746630965L, 1458399651L, -946188469L,
-1158865623L, 1264151460L, 738548702L, 1626031552L, 702213062L,
1269439554L, -1213388584L, 664600934L, -893940864L, 1114981001L,
1604605673L, 1950682481L, -512296665L, -2058492583L, -646003579L,
-1597660015L, -1043364697L, 839734130L, -1312895440L, -1307176714L,
1449368072L, -1653838744L, -887775358L, -1976264692L, -121651394L,
2139912247L, 1185569955L, -1323091161L, 1125807513L, 2013584207L,
2036168343L, 139071L, -1956296395L, 831987440L, 92544410L, -1016487700L,
-1849779566L, -21018226L, -1668500556L, 1459394650L, 747166668L,
951348533L, 920377013L, -899739179L, 1501075843L, 277385965L,
-1039263191L, 1113072517L, -567437373L, 2027522350L, 1886004748L,
-774479726L, 512471156L, -503412492L, -681766434L, -1224206336L,
1821028010L, -2072261053L, -1854695505L, 727208283L, 347105941L,
-1929454701L, 1130402779L, 672193683L, -452938351L, 1911258956L,
775003990L, 9710984L, -1801397794L, 289601674L, 1638323472L,
-432850274L, 1173086200L, -478964495L, -1496725487L, -456969719L,
-1332195297L, -1558593423L, 2069917501L, 248194873L, 1421978031L,
912716106L, -1317845896L, 902349438L, 1160934516L, 1242284070L,
1725657337L, 1368664303L, 842831784L, -386904873L, -433032338L,
2114352002L, 105472177L, -1280577707L, 1750320208L, -306698936L,
192454119L, 692843292L, -882672157L, -1743281499L, 1382387670L,
-2097202952L, -354870617L, 225711393L, 1322358498L, 76310285L,
-141122240L, -1107844312L, 703727731L, -1935727969L, -397122122L,
1560418874L, 1421401353L, -1735857958L, 424580397L, 734131187L,
249024008L, -1634955718L, 146049413L, -140860989L, -481659604L,
89074323L, 1759469186L, -1162794370L, 1475037437L, -955315223L,
-1321315140L, -1804122524L, -806842957L, -1562747392L, 799140047L,
-1643377103L, 814462426L, 1092725356L, -1297108845L, -1828040203L,
913582694L, -52432135L, -1860348876L, 1366604660L, -1247156305L,
-242516157L, 763576786L, 1767801510L, 1561136389L, -364249458L,
-901295575L, 1336685823L, 1463710812L, 641802542L, -868406751L,
1118233847L, -1635437264L, -924337761L, -299095994L, 1951974138L,
-648053767L, -137244035L, -1479183880L, -1732755296L, -775287265L,
-1914750044L, 1944451259L, 372759405L, 1723378334L, -1323804368L,
1915256015L, 1204473241L, -1221254022L, 1366246581L, -1149451464L,
-105900800L, 1892985563L, -154435897L, -257054162L, 908561122L,
-486026671L, -1690688414L, 1690895077L, -1249609973L, 1331646096L,
-708390846L, 382377997L, 39964363L, -1051645420L, 980477787L,
445975290L, -293132490L, -1056638587L, -909720335L, -2139485308L,
-1230070724L, -1243231413L, -1843137336L, 1613120807L, -598167552L,
-609296512L, -1480274249L, -1808484402L, 2135344181L, 775080344L,
-557330412L, -595116833L, 435091232L, -787742264L, 1125892787L,
239611654L, -1652636241L, 807172076L, -810246070L, -1849051843L,
-799852728L, -1822061624L, 1040420411L, 889143290L, -382725479L,
869541460L, 2011101960L, 1562599423L, 390909136L, 1161581032L,
239177303L, 1319218202L, -987235097L, 626939904L, -322864966L,
1197834065L, -908030920L, 2140407224L, 889325815L, 945651798L,
-1114520171L, -373042544L, 1181368284L, -703499177L, -524689344L,
832113672L, 1182454835L, 1660662662L, 900538375L, 72518932L,
1966896322L, 110281149L, -1342923856L, 792213728L, -314844109L,
-1287859302L, -574840991L, 770307956L, 608662888L, 349791799L,
-610456752L, 1265937800L, 1700416109L, -688226935L, 151721097L,
2029903722L, -1119366995L, -1273897883L, 1335704149L, -1500444983L,
513178415L, -1784653697L, -1694558843L, -335749632L, 421812201L,
-899984659L, -1274153033L, -1781825729L, 1238029485L, -1927832735L,
1136185445L, 1670668906L, 905611273L, -773901147L, 1444205193L,
-1029084187L, 817354943L, -1623631593L, 388422909L, -1165302336L,
1047356745L, 537047057L, -1846419133L, 1966065171L, -2033240803L,
2087000985L, 983271465L, -1646984222L, -140342835L, -159009323L,
1687391061L, -1920837175L, 1182575943L, 1834243967L, -1540677747L,
1734748736L, -1071236151L, 713181165L, -1131342849L, 344330599L,
-381008811L, -2014390359L, -944201995L, -722125990L, -507420807L,
-1405385875L, -1582773687L, -1549038411L, -1135856449L, 333733103L,
1648203097L, 1485895102L, -2021911790L, -40994934L, -483275243L,
1903867747L, -867676632L, 644998820L, 1258905385L, -127843720L,
-1151059726L, 448064906L, -816933377L, -1532752979L, -160987154L,
-1154897554L, 1776799049L, -328215714L, 1098349362L, -678580130L,
-2090834127L, -264884913L, -545945504L, -294614572L, 807014733L,
-1844186688L, 539268030L, -1090604606L, -483543717L, 144771897L,
2100690134L, 1428280718L, 195447097L, -524964586L, -228524582L,
701745946L, 683013277L, -159546005L, 205630712L, -964223788L,
1812774441L, -125987168L, 1303924594L, 1257882810L, 1686273823L,
1694510285L, 1311178358L, -1127972370L, 90012081L, 886298462L,
834184050L, 1121679998L, -460872119L, 845419815L, -484934616L,
418420492L, -1543418979L, -2083489904L, -1410648402L, -831603526L,
-133287813L, 643503785L, -67902282L, 602653222L, 13156761L, 865489550L,
152993090L, -127776278L, 530094181L, 830765747L, -1106803176L,
683272036L, -1991481767L, -1055438936L, 1745008562L, 957075114L,
967219183L, 453545389L, 1186946590L, 432626894L, 6356857L, -1648509234L,
-1195374174L, -54104834L, 77884465L, -89230097L, -591712272L,
-448787420L, 963286509L, -56883120L, 51888414L, -1024170958L,
533154859L, 2075959577L, 665635046L, -1762715618L, 871984329L,
-695231706L, -1815508310L, -1252808214L, 426529965L, -1211069925L,
1549021624L, -967019004L, -804573719L, -1187570672L, 1317399042L,
675211306L, 1940022463L, 897706493L, 70692742L, -415387234L,
924396193L, -369242530L, 835053266L, 1984702350L, -1194891959L,
-1442213561L, 1191111032L, 1251454940L, 242515597L, 201797296L,
-151132994L, -869086134L, -627086949L, -2044710183L, 1318180790L,
-1307105514L, 1239598233L, 1234137566L, -1997800014L, -449526486L,
-972994347L, 248669859L, -210071128L, 1097405252L, -1957824151L,
1090040152L, -720479822L, -1714844982L, -221397345L, -530302195L,
-1207755314L, -1021582994L, -773060887L, 686423870L, -1237840686L,
-670378050L, 1219487633L, 857223599L, 1466433696L, -1935077132L,
-826237171L, 1379193984L, -1734752354L, 333728290L, -2108234789L,
1661987737L, -713629002L, -1679360466L, -1338465383L, -1781762026L,
1449067674L, -1334981958L, -1413483971L, 1927969483L, -1024618472L,
-1922546316L, -777811831L, 1140279200L, 1618307026L, -1792983654L,
1334028479L, -653892051L, 1147136118L, -480364690L, -1347321231L,
886469982L, 856743474L, 1603984062L, -410732727L, 857493415L,
503043720L, 1223948364L, -1835141123L, 868392464L, 539237742L,
-1392244870L, 369408507L, 1728478313L, 982060790L, -1232615258L,
-526812935L, 1712374446L, 128581026L, -686588566L, -79365595L,
1101727475L, -51486824L, 705124964L, 109274073L, -1915654776L,
-1053346478L, -920834934L, -1986103345L, -374215379L, 1590133438L,
-1528927250L, -30913767L, 560498606L, -1053516478L, 1232090046L,
301750321L, -203888337L, -2069774736L, 101380740L, 1331122509L,
-1442643312L, 268086238L, 1393707538L, -1516209237L, -653047847L,
1680624582L, 459832446L, -1410861207L, -1548072858L, -737907926L,
-1420347538L, 1890420475L, -10567480L, 881331219L, 1604721722L,
1776082929L, -699339963L, -1513254737L, -899122822L, -2011056899L,
-481726974L, -1635874615L, 1471273956L, -1329496411L, -213739313L,
1535947971L, -1654339982L, -1236623129L, 1353997100L, 1327616427L,
622599674L, -1329658787L, -761604883L, 1958464763L, -459034366L,
-1141222495L, 1971681710L, 158871233L, 379301460L, 597639365L,
576925095L, -20491749L, -727623746L, -2089862189L, -1315474016L,
-1884262125L, -1326506806L, -1537245087L, 1107282365L, 1774183919L,
-1303256726L, -169533331L, 508469090L, -547045999L, -1640342044L,
262754557L, 890653887L, 2011475699L, -128819822L, 1727898671L,
412415764L, -1030977565L, 2101494498L, -862733602L, -1119278516L,
1932980173L)
|
/GDAtools/R/GDAtools-internal.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 8,244 |
r
|
.Random.seed <-
c(403L, 432L, -678628990L, -398879292L, -1175936006L, -1927840090L,
71575548L, 116701410L, 973048916L, -1634865779L, 413940365L,
626374877L, 688513227L, -158508683L, 80827441L, 376549021L, -198417317L,
-601753194L, 1399586916L, 1828546730L, 864239660L, -427201396L,
-2085029066L, -1447676248L, 297026322L, 274436715L, 1570852439L,
713321715L, 1119744301L, -746630965L, 1458399651L, -946188469L,
-1158865623L, 1264151460L, 738548702L, 1626031552L, 702213062L,
1269439554L, -1213388584L, 664600934L, -893940864L, 1114981001L,
1604605673L, 1950682481L, -512296665L, -2058492583L, -646003579L,
-1597660015L, -1043364697L, 839734130L, -1312895440L, -1307176714L,
1449368072L, -1653838744L, -887775358L, -1976264692L, -121651394L,
2139912247L, 1185569955L, -1323091161L, 1125807513L, 2013584207L,
2036168343L, 139071L, -1956296395L, 831987440L, 92544410L, -1016487700L,
-1849779566L, -21018226L, -1668500556L, 1459394650L, 747166668L,
951348533L, 920377013L, -899739179L, 1501075843L, 277385965L,
-1039263191L, 1113072517L, -567437373L, 2027522350L, 1886004748L,
-774479726L, 512471156L, -503412492L, -681766434L, -1224206336L,
1821028010L, -2072261053L, -1854695505L, 727208283L, 347105941L,
-1929454701L, 1130402779L, 672193683L, -452938351L, 1911258956L,
775003990L, 9710984L, -1801397794L, 289601674L, 1638323472L,
-432850274L, 1173086200L, -478964495L, -1496725487L, -456969719L,
-1332195297L, -1558593423L, 2069917501L, 248194873L, 1421978031L,
912716106L, -1317845896L, 902349438L, 1160934516L, 1242284070L,
1725657337L, 1368664303L, 842831784L, -386904873L, -433032338L,
2114352002L, 105472177L, -1280577707L, 1750320208L, -306698936L,
192454119L, 692843292L, -882672157L, -1743281499L, 1382387670L,
-2097202952L, -354870617L, 225711393L, 1322358498L, 76310285L,
-141122240L, -1107844312L, 703727731L, -1935727969L, -397122122L,
1560418874L, 1421401353L, -1735857958L, 424580397L, 734131187L,
249024008L, -1634955718L, 146049413L, -140860989L, -481659604L,
89074323L, 1759469186L, -1162794370L, 1475037437L, -955315223L,
-1321315140L, -1804122524L, -806842957L, -1562747392L, 799140047L,
-1643377103L, 814462426L, 1092725356L, -1297108845L, -1828040203L,
913582694L, -52432135L, -1860348876L, 1366604660L, -1247156305L,
-242516157L, 763576786L, 1767801510L, 1561136389L, -364249458L,
-901295575L, 1336685823L, 1463710812L, 641802542L, -868406751L,
1118233847L, -1635437264L, -924337761L, -299095994L, 1951974138L,
-648053767L, -137244035L, -1479183880L, -1732755296L, -775287265L,
-1914750044L, 1944451259L, 372759405L, 1723378334L, -1323804368L,
1915256015L, 1204473241L, -1221254022L, 1366246581L, -1149451464L,
-105900800L, 1892985563L, -154435897L, -257054162L, 908561122L,
-486026671L, -1690688414L, 1690895077L, -1249609973L, 1331646096L,
-708390846L, 382377997L, 39964363L, -1051645420L, 980477787L,
445975290L, -293132490L, -1056638587L, -909720335L, -2139485308L,
-1230070724L, -1243231413L, -1843137336L, 1613120807L, -598167552L,
-609296512L, -1480274249L, -1808484402L, 2135344181L, 775080344L,
-557330412L, -595116833L, 435091232L, -787742264L, 1125892787L,
239611654L, -1652636241L, 807172076L, -810246070L, -1849051843L,
-799852728L, -1822061624L, 1040420411L, 889143290L, -382725479L,
869541460L, 2011101960L, 1562599423L, 390909136L, 1161581032L,
239177303L, 1319218202L, -987235097L, 626939904L, -322864966L,
1197834065L, -908030920L, 2140407224L, 889325815L, 945651798L,
-1114520171L, -373042544L, 1181368284L, -703499177L, -524689344L,
832113672L, 1182454835L, 1660662662L, 900538375L, 72518932L,
1966896322L, 110281149L, -1342923856L, 792213728L, -314844109L,
-1287859302L, -574840991L, 770307956L, 608662888L, 349791799L,
-610456752L, 1265937800L, 1700416109L, -688226935L, 151721097L,
2029903722L, -1119366995L, -1273897883L, 1335704149L, -1500444983L,
513178415L, -1784653697L, -1694558843L, -335749632L, 421812201L,
-899984659L, -1274153033L, -1781825729L, 1238029485L, -1927832735L,
1136185445L, 1670668906L, 905611273L, -773901147L, 1444205193L,
-1029084187L, 817354943L, -1623631593L, 388422909L, -1165302336L,
1047356745L, 537047057L, -1846419133L, 1966065171L, -2033240803L,
2087000985L, 983271465L, -1646984222L, -140342835L, -159009323L,
1687391061L, -1920837175L, 1182575943L, 1834243967L, -1540677747L,
1734748736L, -1071236151L, 713181165L, -1131342849L, 344330599L,
-381008811L, -2014390359L, -944201995L, -722125990L, -507420807L,
-1405385875L, -1582773687L, -1549038411L, -1135856449L, 333733103L,
1648203097L, 1485895102L, -2021911790L, -40994934L, -483275243L,
1903867747L, -867676632L, 644998820L, 1258905385L, -127843720L,
-1151059726L, 448064906L, -816933377L, -1532752979L, -160987154L,
-1154897554L, 1776799049L, -328215714L, 1098349362L, -678580130L,
-2090834127L, -264884913L, -545945504L, -294614572L, 807014733L,
-1844186688L, 539268030L, -1090604606L, -483543717L, 144771897L,
2100690134L, 1428280718L, 195447097L, -524964586L, -228524582L,
701745946L, 683013277L, -159546005L, 205630712L, -964223788L,
1812774441L, -125987168L, 1303924594L, 1257882810L, 1686273823L,
1694510285L, 1311178358L, -1127972370L, 90012081L, 886298462L,
834184050L, 1121679998L, -460872119L, 845419815L, -484934616L,
418420492L, -1543418979L, -2083489904L, -1410648402L, -831603526L,
-133287813L, 643503785L, -67902282L, 602653222L, 13156761L, 865489550L,
152993090L, -127776278L, 530094181L, 830765747L, -1106803176L,
683272036L, -1991481767L, -1055438936L, 1745008562L, 957075114L,
967219183L, 453545389L, 1186946590L, 432626894L, 6356857L, -1648509234L,
-1195374174L, -54104834L, 77884465L, -89230097L, -591712272L,
-448787420L, 963286509L, -56883120L, 51888414L, -1024170958L,
533154859L, 2075959577L, 665635046L, -1762715618L, 871984329L,
-695231706L, -1815508310L, -1252808214L, 426529965L, -1211069925L,
1549021624L, -967019004L, -804573719L, -1187570672L, 1317399042L,
675211306L, 1940022463L, 897706493L, 70692742L, -415387234L,
924396193L, -369242530L, 835053266L, 1984702350L, -1194891959L,
-1442213561L, 1191111032L, 1251454940L, 242515597L, 201797296L,
-151132994L, -869086134L, -627086949L, -2044710183L, 1318180790L,
-1307105514L, 1239598233L, 1234137566L, -1997800014L, -449526486L,
-972994347L, 248669859L, -210071128L, 1097405252L, -1957824151L,
1090040152L, -720479822L, -1714844982L, -221397345L, -530302195L,
-1207755314L, -1021582994L, -773060887L, 686423870L, -1237840686L,
-670378050L, 1219487633L, 857223599L, 1466433696L, -1935077132L,
-826237171L, 1379193984L, -1734752354L, 333728290L, -2108234789L,
1661987737L, -713629002L, -1679360466L, -1338465383L, -1781762026L,
1449067674L, -1334981958L, -1413483971L, 1927969483L, -1024618472L,
-1922546316L, -777811831L, 1140279200L, 1618307026L, -1792983654L,
1334028479L, -653892051L, 1147136118L, -480364690L, -1347321231L,
886469982L, 856743474L, 1603984062L, -410732727L, 857493415L,
503043720L, 1223948364L, -1835141123L, 868392464L, 539237742L,
-1392244870L, 369408507L, 1728478313L, 982060790L, -1232615258L,
-526812935L, 1712374446L, 128581026L, -686588566L, -79365595L,
1101727475L, -51486824L, 705124964L, 109274073L, -1915654776L,
-1053346478L, -920834934L, -1986103345L, -374215379L, 1590133438L,
-1528927250L, -30913767L, 560498606L, -1053516478L, 1232090046L,
301750321L, -203888337L, -2069774736L, 101380740L, 1331122509L,
-1442643312L, 268086238L, 1393707538L, -1516209237L, -653047847L,
1680624582L, 459832446L, -1410861207L, -1548072858L, -737907926L,
-1420347538L, 1890420475L, -10567480L, 881331219L, 1604721722L,
1776082929L, -699339963L, -1513254737L, -899122822L, -2011056899L,
-481726974L, -1635874615L, 1471273956L, -1329496411L, -213739313L,
1535947971L, -1654339982L, -1236623129L, 1353997100L, 1327616427L,
622599674L, -1329658787L, -761604883L, 1958464763L, -459034366L,
-1141222495L, 1971681710L, 158871233L, 379301460L, 597639365L,
576925095L, -20491749L, -727623746L, -2089862189L, -1315474016L,
-1884262125L, -1326506806L, -1537245087L, 1107282365L, 1774183919L,
-1303256726L, -169533331L, 508469090L, -547045999L, -1640342044L,
262754557L, 890653887L, 2011475699L, -128819822L, 1727898671L,
412415764L, -1030977565L, 2101494498L, -862733602L, -1119278516L,
1932980173L)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_data.R
\name{fmt_url}
\alias{fmt_url}
\title{Format URLs to generate links}
\usage{
fmt_url(
data,
columns = everything(),
rows = everything(),
label = NULL,
as_button = FALSE,
color = "auto",
show_underline = "auto",
button_fill = "auto",
button_width = "auto",
button_outline = "auto"
)
}
\arguments{
\item{data}{\emph{The gt table data object}
\verb{obj:<gt_tbl>} // \strong{required}
This is the \strong{gt} table object that is commonly created through use of the
\code{\link[=gt]{gt()}} function.}
\item{columns}{\emph{Columns to target}
\verb{<column-targeting expression>} // \emph{default:} \code{everything()}
Can either be a series of column names provided in \code{\link[=c]{c()}}, a vector of
column indices, or a select helper function. Examples of select helper
functions include \code{\link[=starts_with]{starts_with()}}, \code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}},
\code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}}, \code{\link[=num_range]{num_range()}}, and \code{\link[=everything]{everything()}}.}
\item{rows}{\emph{Rows to target}
\verb{<row-targeting expression>} // \emph{default:} \code{everything()}
In conjunction with \code{columns}, we can specify which of their rows should
undergo formatting. The default \code{\link[=everything]{everything()}} results in all rows in
\code{columns} being formatted. Alternatively, we can supply a vector of row
captions within \code{\link[=c]{c()}}, a vector of row indices, or a select helper
function. Examples of select helper functions include \code{\link[=starts_with]{starts_with()}},
\code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}}, \code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}}, \code{\link[=num_range]{num_range()}}, and
\code{\link[=everything]{everything()}}. We can also use expressions to filter down to the rows we
need (e.g., \verb{[colname_1] > 100 & [colname_2] < 50}).}
\item{label}{\emph{Link label}
\verb{scalar<character>} // \emph{default:} \code{NULL} (\code{optional})
The visible 'label' to use for the link. If \code{NULL} (the default)
the URL will serve as the label. There are two non-\code{NULL} options: (1) a
static text can be used for the label by providing a string, and (2) a
function can be provided to fashion a label from every URL.}
\item{as_button}{\emph{Style link as a button}
\verb{scalar<logical>} // \emph{default:} \code{FALSE}
An option to style the link as a button. By default, this is
\code{FALSE}. If this option is chosen then the \code{button_fill} argument becomes
usable.}
\item{color}{\emph{Link color}
\verb{scalar<character>} // \emph{default:} \code{"auto"}
The color used for the resulting link and its underline. This is
\code{"auto"} by default; this allows \strong{gt} to choose an appropriate color
based on various factors (such as the background \code{button_fill} when
\code{as_button} is \code{TRUE}).}
\item{show_underline}{\emph{Show the link underline}
\verb{scalar<character>|scalar<logical>} // \emph{default:} \code{"auto"}
Should the link be decorated with an underline? By
default this is \code{"auto"} which means that \strong{gt} will choose \code{TRUE} when
\code{as_button = FALSE} and \code{FALSE} in the other case. The link underline will
be the same color as that set in the \code{color} option.}
\item{button_fill, button_width, button_outline}{\emph{Button options}
\verb{scalar<character>} // \emph{default:} \code{"auto"}
Options for styling a link-as-button (and only applies if
\code{as_button = TRUE}). All of these options are by default set to \code{"auto"},
allowing \strong{gt} to choose appropriate fill, width, and outline values.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
Should cells contain URLs, the \code{fmt_url()} function can be used to make them
navigable links. This should be expressly used on columns that contain \emph{only}
URL text (i.e., no URLs as part of a larger block of text). Should you have
such a column of data, there are options for how the links should be styled.
They can be of the conventional style (with underlines and text coloring that
sets it apart from other text), or, they can appear to be button-like (with
a surrounding box that can be filled with a color of your choosing).
URLs in data cells are detected in two ways. The first is using the simple
Markdown notation for URLs of the form: \verb{[label](URL)}. The second assumes
that the text is the URL. In the latter case the URL is also used as the
label but there is the option to use the \code{label} argument to modify that
text.
}
\section{Compatibility of formatting function with data values}{
The \code{fmt_url()} formatting function is compatible with body cells that are
of the \code{"character"} or \code{"factor"} types. Any other types of body cells are
ignored during formatting. This is to say that cells of incompatible data
types may be targeted, but there will be no attempt to format them.
}
\section{Targeting cells with \code{columns} and \code{rows}}{
Targeting of values is done through \code{columns} and additionally by \code{rows} (if
nothing is provided for \code{rows} then entire columns are selected). The
\code{columns} argument allows us to target a subset of cells contained in the
resolved columns. We say resolved because aside from declaring column names
in \code{c()} (with bare column names or names in quotes) we can use
\strong{tidyselect}-style expressions. This can be as basic as supplying a select
helper like \code{starts_with()}, or, providing a more complex incantation like
\code{where(~ is.numeric(.x) && max(.x, na.rm = TRUE) > 1E6)}
which targets numeric columns that have a maximum value greater than
1,000,000 (excluding any \code{NA}s from consideration).
By default all columns and rows are selected (with the \code{everything()}
defaults). Cell values that are incompatible with a given formatting function
will be skipped over, like \code{character} values and numeric \verb{fmt_*()}
functions. So it's safe to select all columns with a particular formatting
function (only those values that can be formatted will be formatted), but,
you may not want that. One strategy is to format the bulk of cell values with
one formatting function and then constrain the columns for later passes with
other types of formatting (the last formatting done to a cell is what you get
in the final output).
Once the columns are targeted, we may also target the \code{rows} within those
columns. This can be done in a variety of ways. If a stub is present, then we
potentially have row identifiers. Those can be used much like column names in
the \code{columns}-targeting scenario. We can use simpler \strong{tidyselect}-style
expressions (the select helpers should work well here) and we can use quoted
row identifiers in \code{c()}. It's also possible to use row indices (e.g.,
\code{c(3, 5, 6)}) though these index values must correspond to the row numbers of
the input data (the indices won't necessarily match those of rearranged rows
if row groups are present). One more type of expression is possible, an
expression that takes column values (can involve any of the available columns
in the table) and returns a logical vector. This is nice if you want to base
formatting on values in the column or another column, or, you'd like to use a
more complex predicate expression.
}
\section{Compatibility of arguments with the \code{from_column()} helper function}{
The \code{\link[=from_column]{from_column()}} helper function can be used with certain arguments of
\code{fmt_url()} to obtain varying parameter values from a specified column within
the table. This means that each row could be formatted a little bit
differently. These arguments provide support for \code{\link[=from_column]{from_column()}}:
\itemize{
\item \code{label}
\item \code{as_button}
\item \code{color}
\item \code{show_underline}
\item \code{button_fill}
\item \code{button_width}
\item \code{button_outline}
}
Please note that for each of the aforementioned arguments, a \code{\link[=from_column]{from_column()}}
call needs to reference a column that has data of the correct type (this is
different for each argument). Additional columns for parameter values can be
generated with the \code{\link[=cols_add]{cols_add()}} function (if not already present). Columns
that contain parameter data can also be hidden from final display with
\code{\link[=cols_hide]{cols_hide()}}. Finally, there is no limitation to how many arguments the
\code{\link[=from_column]{from_column()}} helper is applied so long as the arguments belong to this
closed set.
}
\section{Examples}{
Using a portion of the \code{\link{towny}} dataset, let's create a \strong{gt} table. We can
use the \code{fmt_url()} function on the \code{website} column to generate navigable
links to websites. By default the links are underlined and the color will be
chosen for you (it's dark cyan).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(columns = website) |>
cols_label(
name = "Name",
website = "Site",
population_2021 = "Population"
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_1.png" alt="This image of a table was generated from the first code example in the `fmt_url()` help file." style="width:100\%;">
}}
Let's try something else. We can set a static text label for the link with
the \code{label} argument (and we'll use the word \code{"site"} for this). The link
underline is removable with \code{show_underline = FALSE}. With this change, it
seems sensible to merge the link to the \code{"name"} column and enclose the link
text in parentheses (the \code{\link[=cols_merge]{cols_merge()}} function handles all that).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(
columns = website,
label = "site",
show_underline = FALSE
) |>
cols_merge(
columns = c(name, website),
pattern = "\{1\} (\{2\})"
) |>
cols_label(
name = "Name",
population_2021 = "Population"
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_2.png" alt="This image of a table was generated from the second code example in the `fmt_url()` help file." style="width:100\%;">
}}
The \code{fmt_url()} function allows for the styling of links as 'buttons'. This
is as easy as setting \code{as_button = TRUE}. Doing that unlocks the ability to
set a \code{button_fill} color. This color can automatically selected by \strong{gt}
(this is the default) but here we're using \code{"steelblue"}. The \code{label}
argument also accepts a function! We can choose to adapt the label text from
the URLs by eliminating any leading \code{"https://"} or \code{"www."} parts.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
dplyr::mutate(ranking = dplyr::row_number()) |>
gt(rowname_col = "ranking") |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(
columns = website,
label = function(x) gsub("https://|www.", "", x),
as_button = TRUE,
button_fill = "steelblue",
button_width = px(150)
) |>
cols_move_to_end(columns = website) |>
cols_align(align = "center", columns = website) |>
cols_width(
ranking ~ px(40),
website ~ px(200)
) |>
tab_options(column_labels.hidden = TRUE) |>
tab_style(
style = cell_text(weight = "bold"),
locations = cells_stub()
) \%>\%
opt_vertical_padding(scale = 0.75)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_3.png" alt="This image of a table was generated from the third code example in the `fmt_url()` help file." style="width:100\%;">
}}
It's perhaps inevitable that you'll come across missing values in your column
of URLs. The \code{fmt_url()} function will preserve input \code{NA} values, allowing
you to handle them with \code{\link[=sub_missing]{sub_missing()}}. Here's an example of that.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::arrange(population_2021) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Smallest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(columns = website) |>
cols_label(
name = "Name",
website = "Site",
population_2021 = "Population"
) |>
sub_missing()
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_4.png" alt="This image of a table was generated from the fourth code example in the `fmt_url()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
3-19
}
\section{Function Introduced}{
\code{v0.9.0} (Mar 31, 2023)
}
\seealso{
Other data formatting functions:
\code{\link{data_color}()},
\code{\link{fmt_auto}()},
\code{\link{fmt_bins}()},
\code{\link{fmt_bytes}()},
\code{\link{fmt_currency}()},
\code{\link{fmt_datetime}()},
\code{\link{fmt_date}()},
\code{\link{fmt_duration}()},
\code{\link{fmt_engineering}()},
\code{\link{fmt_flag}()},
\code{\link{fmt_fraction}()},
\code{\link{fmt_icon}()},
\code{\link{fmt_image}()},
\code{\link{fmt_index}()},
\code{\link{fmt_integer}()},
\code{\link{fmt_markdown}()},
\code{\link{fmt_number}()},
\code{\link{fmt_partsper}()},
\code{\link{fmt_passthrough}()},
\code{\link{fmt_percent}()},
\code{\link{fmt_roman}()},
\code{\link{fmt_scientific}()},
\code{\link{fmt_spelled_num}()},
\code{\link{fmt_time}()},
\code{\link{fmt_units}()},
\code{\link{fmt}()},
\code{\link{sub_large_vals}()},
\code{\link{sub_missing}()},
\code{\link{sub_small_vals}()},
\code{\link{sub_values}()},
\code{\link{sub_zero}()}
}
\concept{data formatting functions}
|
/man/fmt_url.Rd
|
permissive
|
rstudio/gt
|
R
| false | true | 14,997 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_data.R
\name{fmt_url}
\alias{fmt_url}
\title{Format URLs to generate links}
\usage{
fmt_url(
data,
columns = everything(),
rows = everything(),
label = NULL,
as_button = FALSE,
color = "auto",
show_underline = "auto",
button_fill = "auto",
button_width = "auto",
button_outline = "auto"
)
}
\arguments{
\item{data}{\emph{The gt table data object}
\verb{obj:<gt_tbl>} // \strong{required}
This is the \strong{gt} table object that is commonly created through use of the
\code{\link[=gt]{gt()}} function.}
\item{columns}{\emph{Columns to target}
\verb{<column-targeting expression>} // \emph{default:} \code{everything()}
Can either be a series of column names provided in \code{\link[=c]{c()}}, a vector of
column indices, or a select helper function. Examples of select helper
functions include \code{\link[=starts_with]{starts_with()}}, \code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}},
\code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}}, \code{\link[=num_range]{num_range()}}, and \code{\link[=everything]{everything()}}.}
\item{rows}{\emph{Rows to target}
\verb{<row-targeting expression>} // \emph{default:} \code{everything()}
In conjunction with \code{columns}, we can specify which of their rows should
undergo formatting. The default \code{\link[=everything]{everything()}} results in all rows in
\code{columns} being formatted. Alternatively, we can supply a vector of row
captions within \code{\link[=c]{c()}}, a vector of row indices, or a select helper
function. Examples of select helper functions include \code{\link[=starts_with]{starts_with()}},
\code{\link[=ends_with]{ends_with()}}, \code{\link[=contains]{contains()}}, \code{\link[=matches]{matches()}}, \code{\link[=one_of]{one_of()}}, \code{\link[=num_range]{num_range()}}, and
\code{\link[=everything]{everything()}}. We can also use expressions to filter down to the rows we
need (e.g., \verb{[colname_1] > 100 & [colname_2] < 50}).}
\item{label}{\emph{Link label}
\verb{scalar<character>} // \emph{default:} \code{NULL} (\code{optional})
The visible 'label' to use for the link. If \code{NULL} (the default)
the URL will serve as the label. There are two non-\code{NULL} options: (1) a
static text can be used for the label by providing a string, and (2) a
function can be provided to fashion a label from every URL.}
\item{as_button}{\emph{Style link as a button}
\verb{scalar<logical>} // \emph{default:} \code{FALSE}
An option to style the link as a button. By default, this is
\code{FALSE}. If this option is chosen then the \code{button_fill} argument becomes
usable.}
\item{color}{\emph{Link color}
\verb{scalar<character>} // \emph{default:} \code{"auto"}
The color used for the resulting link and its underline. This is
\code{"auto"} by default; this allows \strong{gt} to choose an appropriate color
based on various factors (such as the background \code{button_fill} when
\code{as_button} is \code{TRUE}).}
\item{show_underline}{\emph{Show the link underline}
\verb{scalar<character>|scalar<logical>} // \emph{default:} \code{"auto"}
Should the link be decorated with an underline? By
default this is \code{"auto"} which means that \strong{gt} will choose \code{TRUE} when
\code{as_button = FALSE} and \code{FALSE} in the other case. The link underline will
be the same color as that set in the \code{color} option.}
\item{button_fill, button_width, button_outline}{\emph{Button options}
\verb{scalar<character>} // \emph{default:} \code{"auto"}
Options for styling a link-as-button (and only applies if
\code{as_button = TRUE}). All of these options are by default set to \code{"auto"},
allowing \strong{gt} to choose appropriate fill, width, and outline values.}
}
\value{
An object of class \code{gt_tbl}.
}
\description{
Should cells contain URLs, the \code{fmt_url()} function can be used to make them
navigable links. This should be expressly used on columns that contain \emph{only}
URL text (i.e., no URLs as part of a larger block of text). Should you have
such a column of data, there are options for how the links should be styled.
They can be of the conventional style (with underlines and text coloring that
sets it apart from other text), or, they can appear to be button-like (with
a surrounding box that can be filled with a color of your choosing).
URLs in data cells are detected in two ways. The first is using the simple
Markdown notation for URLs of the form: \verb{[label](URL)}. The second assumes
that the text is the URL. In the latter case the URL is also used as the
label but there is the option to use the \code{label} argument to modify that
text.
}
\section{Compatibility of formatting function with data values}{
The \code{fmt_url()} formatting function is compatible with body cells that are
of the \code{"character"} or \code{"factor"} types. Any other types of body cells are
ignored during formatting. This is to say that cells of incompatible data
types may be targeted, but there will be no attempt to format them.
}
\section{Targeting cells with \code{columns} and \code{rows}}{
Targeting of values is done through \code{columns} and additionally by \code{rows} (if
nothing is provided for \code{rows} then entire columns are selected). The
\code{columns} argument allows us to target a subset of cells contained in the
resolved columns. We say resolved because aside from declaring column names
in \code{c()} (with bare column names or names in quotes) we can use
\strong{tidyselect}-style expressions. This can be as basic as supplying a select
helper like \code{starts_with()}, or, providing a more complex incantation like
\code{where(~ is.numeric(.x) && max(.x, na.rm = TRUE) > 1E6)}
which targets numeric columns that have a maximum value greater than
1,000,000 (excluding any \code{NA}s from consideration).
By default all columns and rows are selected (with the \code{everything()}
defaults). Cell values that are incompatible with a given formatting function
will be skipped over, like \code{character} values and numeric \verb{fmt_*()}
functions. So it's safe to select all columns with a particular formatting
function (only those values that can be formatted will be formatted), but,
you may not want that. One strategy is to format the bulk of cell values with
one formatting function and then constrain the columns for later passes with
other types of formatting (the last formatting done to a cell is what you get
in the final output).
Once the columns are targeted, we may also target the \code{rows} within those
columns. This can be done in a variety of ways. If a stub is present, then we
potentially have row identifiers. Those can be used much like column names in
the \code{columns}-targeting scenario. We can use simpler \strong{tidyselect}-style
expressions (the select helpers should work well here) and we can use quoted
row identifiers in \code{c()}. It's also possible to use row indices (e.g.,
\code{c(3, 5, 6)}) though these index values must correspond to the row numbers of
the input data (the indices won't necessarily match those of rearranged rows
if row groups are present). One more type of expression is possible, an
expression that takes column values (can involve any of the available columns
in the table) and returns a logical vector. This is nice if you want to base
formatting on values in the column or another column, or, you'd like to use a
more complex predicate expression.
}
\section{Compatibility of arguments with the \code{from_column()} helper function}{
The \code{\link[=from_column]{from_column()}} helper function can be used with certain arguments of
\code{fmt_url()} to obtain varying parameter values from a specified column within
the table. This means that each row could be formatted a little bit
differently. These arguments provide support for \code{\link[=from_column]{from_column()}}:
\itemize{
\item \code{label}
\item \code{as_button}
\item \code{color}
\item \code{show_underline}
\item \code{button_fill}
\item \code{button_width}
\item \code{button_outline}
}
Please note that for each of the aforementioned arguments, a \code{\link[=from_column]{from_column()}}
call needs to reference a column that has data of the correct type (this is
different for each argument). Additional columns for parameter values can be
generated with the \code{\link[=cols_add]{cols_add()}} function (if not already present). Columns
that contain parameter data can also be hidden from final display with
\code{\link[=cols_hide]{cols_hide()}}. Finally, there is no limitation to how many arguments the
\code{\link[=from_column]{from_column()}} helper is applied so long as the arguments belong to this
closed set.
}
\section{Examples}{
Using a portion of the \code{\link{towny}} dataset, let's create a \strong{gt} table. We can
use the \code{fmt_url()} function on the \code{website} column to generate navigable
links to websites. By default the links are underlined and the color will be
chosen for you (it's dark cyan).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(columns = website) |>
cols_label(
name = "Name",
website = "Site",
population_2021 = "Population"
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_1.png" alt="This image of a table was generated from the first code example in the `fmt_url()` help file." style="width:100\%;">
}}
Let's try something else. We can set a static text label for the link with
the \code{label} argument (and we'll use the word \code{"site"} for this). The link
underline is removable with \code{show_underline = FALSE}. With this change, it
seems sensible to merge the link to the \code{"name"} column and enclose the link
text in parentheses (the \code{\link[=cols_merge]{cols_merge()}} function handles all that).
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(
columns = website,
label = "site",
show_underline = FALSE
) |>
cols_merge(
columns = c(name, website),
pattern = "\{1\} (\{2\})"
) |>
cols_label(
name = "Name",
population_2021 = "Population"
)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_2.png" alt="This image of a table was generated from the second code example in the `fmt_url()` help file." style="width:100\%;">
}}
The \code{fmt_url()} function allows for the styling of links as 'buttons'. This
is as easy as setting \code{as_button = TRUE}. Doing that unlocks the ability to
set a \code{button_fill} color. This color can automatically selected by \strong{gt}
(this is the default) but here we're using \code{"steelblue"}. The \code{label}
argument also accepts a function! We can choose to adapt the label text from
the URLs by eliminating any leading \code{"https://"} or \code{"www."} parts.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::filter(csd_type == "city") |>
dplyr::arrange(desc(population_2021)) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
dplyr::mutate(ranking = dplyr::row_number()) |>
gt(rowname_col = "ranking") |>
tab_header(
title = md("The 10 Largest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(
columns = website,
label = function(x) gsub("https://|www.", "", x),
as_button = TRUE,
button_fill = "steelblue",
button_width = px(150)
) |>
cols_move_to_end(columns = website) |>
cols_align(align = "center", columns = website) |>
cols_width(
ranking ~ px(40),
website ~ px(200)
) |>
tab_options(column_labels.hidden = TRUE) |>
tab_style(
style = cell_text(weight = "bold"),
locations = cells_stub()
) \%>\%
opt_vertical_padding(scale = 0.75)
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_3.png" alt="This image of a table was generated from the third code example in the `fmt_url()` help file." style="width:100\%;">
}}
It's perhaps inevitable that you'll come across missing values in your column
of URLs. The \code{fmt_url()} function will preserve input \code{NA} values, allowing
you to handle them with \code{\link[=sub_missing]{sub_missing()}}. Here's an example of that.
\if{html}{\out{<div class="sourceCode r">}}\preformatted{towny |>
dplyr::arrange(population_2021) |>
dplyr::select(name, website, population_2021) |>
dplyr::slice_head(n = 10) |>
gt() |>
tab_header(
title = md("The 10 Smallest Municipalities in `towny`"),
subtitle = "Population values taken from the 2021 census."
) |>
fmt_integer() |>
fmt_url(columns = website) |>
cols_label(
name = "Name",
website = "Site",
population_2021 = "Population"
) |>
sub_missing()
}\if{html}{\out{</div>}}
\if{html}{\out{
<img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_fmt_url_4.png" alt="This image of a table was generated from the fourth code example in the `fmt_url()` help file." style="width:100\%;">
}}
}
\section{Function ID}{
3-19
}
\section{Function Introduced}{
\code{v0.9.0} (Mar 31, 2023)
}
\seealso{
Other data formatting functions:
\code{\link{data_color}()},
\code{\link{fmt_auto}()},
\code{\link{fmt_bins}()},
\code{\link{fmt_bytes}()},
\code{\link{fmt_currency}()},
\code{\link{fmt_datetime}()},
\code{\link{fmt_date}()},
\code{\link{fmt_duration}()},
\code{\link{fmt_engineering}()},
\code{\link{fmt_flag}()},
\code{\link{fmt_fraction}()},
\code{\link{fmt_icon}()},
\code{\link{fmt_image}()},
\code{\link{fmt_index}()},
\code{\link{fmt_integer}()},
\code{\link{fmt_markdown}()},
\code{\link{fmt_number}()},
\code{\link{fmt_partsper}()},
\code{\link{fmt_passthrough}()},
\code{\link{fmt_percent}()},
\code{\link{fmt_roman}()},
\code{\link{fmt_scientific}()},
\code{\link{fmt_spelled_num}()},
\code{\link{fmt_time}()},
\code{\link{fmt_units}()},
\code{\link{fmt}()},
\code{\link{sub_large_vals}()},
\code{\link{sub_missing}()},
\code{\link{sub_small_vals}()},
\code{\link{sub_values}()},
\code{\link{sub_zero}()}
}
\concept{data formatting functions}
|
data.replace <- function(datavector, to, from) {
datavector[datavector %in% from] <- to
datavector
}
|
/scripts/data-replace.R
|
no_license
|
philsf-biostat/analise_dados_ACD_2017
|
R
| false | false | 105 |
r
|
data.replace <- function(datavector, to, from) {
datavector[datavector %in% from] <- to
datavector
}
|
\name{augm}
\alias{augm}
\docType{data}
\title{\eqn{2^{(7-3)}}{2^{(7-3)}} arsenic removal experiment augmented with mirror image}
\description{
Data from the \eqn{2^{(7-3)}} arsenic removal experiment augmented with mirror image in chapter 6 of Design and Analysis
of Experiments with R
}
\usage{data(augm)}
\format{
A data frame with 8 observations on the following 8 variables.
\describe{
\item{\code{A}}{a factor with levels \code{-1} \code{1} }
\item{\code{B}}{a factor with levels \code{-1} \code{1} }
\item{\code{C}}{a factor with levels \code{-1} \code{1} }
\item{\code{fold}}{a factor with levels \code{original} \code{mirror} }
\item{\code{D}}{a factor with levels \code{-1} \code{1} }
\item{\code{E}}{a factor with levels \code{-1} \code{1} }
\item{\code{F}}{a factor with levels \code{-1} \code{1} }
\item{\code{G}}{a factor with levels \code{-1} \code{1} }
\item{\code{y}}{a numeric vector}
}
}
\source{
Design and Analysis of Experiments with R, by John Lawson, CRC/Chapman Hall
}
\examples{
data(augm)
}
\keyword{datasets}
|
/man/augm.Rd
|
no_license
|
cran/daewr
|
R
| false | false | 1,098 |
rd
|
\name{augm}
\alias{augm}
\docType{data}
\title{\eqn{2^{(7-3)}}{2^{(7-3)}} arsenic removal experiment augmented with mirror image}
\description{
Data from the \eqn{2^{(7-3)}} arsenic removal experiment augmented with mirror image in chapter 6 of Design and Analysis
of Experiments with R
}
\usage{data(augm)}
\format{
A data frame with 8 observations on the following 8 variables.
\describe{
\item{\code{A}}{a factor with levels \code{-1} \code{1} }
\item{\code{B}}{a factor with levels \code{-1} \code{1} }
\item{\code{C}}{a factor with levels \code{-1} \code{1} }
\item{\code{fold}}{a factor with levels \code{original} \code{mirror} }
\item{\code{D}}{a factor with levels \code{-1} \code{1} }
\item{\code{E}}{a factor with levels \code{-1} \code{1} }
\item{\code{F}}{a factor with levels \code{-1} \code{1} }
\item{\code{G}}{a factor with levels \code{-1} \code{1} }
\item{\code{y}}{a numeric vector}
}
}
\source{
Design and Analysis of Experiments with R, by John Lawson, CRC/Chapman Hall
}
\examples{
data(augm)
}
\keyword{datasets}
|
#read the file
myFile <- "household_power_consumption.txt"
#read header's names
myHeader <- read.csv(myFile, sep=";", skip=0, nrows=1)
#read data, skip unnecessary rows
myData <- read.csv(myFile, sep=";", skip=66637, nrows=2880, na.strings="?", colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
names(myData)<-names(myHeader)
#new column creation in datetime format
myData$DateTime = paste(myData$Date, myData$Time)
myData$DateTime = strptime(myData$DateTime, "%d/%m/%Y %H:%M:%S")
#prepare png file
png(file = "plot3.png")
#plot
plot(myData$DateTime,y = myData$Sub_metering_1,type='l', xlab = "", ylab = "Energy sub metering")
#add info
lines(myData$DateTime,y =myData$Sub_metering_2, col = "red")
lines(myData$DateTime,y =myData$Sub_metering_3, col = "blue")
legend("topright", pch = "_", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=1.2,lwd = 2)
#dev.copy(png, file = "plot3.png")
dev.off()
|
/plot3.R
|
no_license
|
txevas/ExData_Plotting1
|
R
| false | false | 1,014 |
r
|
#read the file
myFile <- "household_power_consumption.txt"
#read header's names
myHeader <- read.csv(myFile, sep=";", skip=0, nrows=1)
#read data, skip unnecessary rows
myData <- read.csv(myFile, sep=";", skip=66637, nrows=2880, na.strings="?", colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
names(myData)<-names(myHeader)
#new column creation in datetime format
myData$DateTime = paste(myData$Date, myData$Time)
myData$DateTime = strptime(myData$DateTime, "%d/%m/%Y %H:%M:%S")
#prepare png file
png(file = "plot3.png")
#plot
plot(myData$DateTime,y = myData$Sub_metering_1,type='l', xlab = "", ylab = "Energy sub metering")
#add info
lines(myData$DateTime,y =myData$Sub_metering_2, col = "red")
lines(myData$DateTime,y =myData$Sub_metering_3, col = "blue")
legend("topright", pch = "_", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=1.2,lwd = 2)
#dev.copy(png, file = "plot3.png")
dev.off()
|
rm(list = ls())
data <- read.csv("~/ProgrammingAssignment2/ProgrammingAssignment2/assignment/ExData_Plotting1/household_power_consumption.txt", sep=";",stringsAsFactors=F,comment.char="")
neededData <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
neededData$Date <- as.Date(neededData$Date, format="%d/%m/%Y")
datetime <- as.POSIXct(paste(neededData$Date, neededData$Time))
Global_active_power<- as.numeric(neededData$Global_active_power)
with(neededData, {
plot(as.numeric(Sub_metering_1)~datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(as.numeric(Sub_metering_2)~datetime,col='Red')
lines(as.numeric(Sub_metering_3)~datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/Plot3.R
|
no_license
|
MohitsSingh/ExData_Plotting1
|
R
| false | false | 905 |
r
|
rm(list = ls())
data <- read.csv("~/ProgrammingAssignment2/ProgrammingAssignment2/assignment/ExData_Plotting1/household_power_consumption.txt", sep=";",stringsAsFactors=F,comment.char="")
neededData <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
neededData$Date <- as.Date(neededData$Date, format="%d/%m/%Y")
datetime <- as.POSIXct(paste(neededData$Date, neededData$Time))
Global_active_power<- as.numeric(neededData$Global_active_power)
with(neededData, {
plot(as.numeric(Sub_metering_1)~datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(as.numeric(Sub_metering_2)~datetime,col='Red')
lines(as.numeric(Sub_metering_3)~datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
### Explaining the Student t-test
## Let's look at our data:
anastasia <- c(65, 74, 73, 83, 76, 65, 86, 70, 80, 55, 78, 78, 90, 77, 68)
bernadette <- c(72, 66, 71, 66, 76, 69, 79, 73, 62, 69, 68, 60, 73, 68, 67, 74, 56, 74)
mean(anastasia) # 74.5
sd(anastasia) # 9.0
nA<-length(anastasia) # 15
nA
mean(bernadette) # 69.1
sd(bernadette) # 5.8
nB <- length(bernadette) # 18
nB
# plot the data:
d <- data.frame(values = c(anastasia, bernadette),
group = c(rep("anastasia",15), rep("bernadette", 18))
)
d
ggplot(d, aes(x = group, y = values, fill = group)) +
geom_boxplot(alpha=.3) +
geom_jitter(width=.1, size=2) +
theme_classic() +
scale_fill_manual(values = c("firebrick", "dodgerblue"))
# what's the difference in means?
mean(anastasia) - mean(bernadette) # 5.48
# anastasia students have on average 5.48 higher scores.
## But how meaningful is this difference?
#### Student's t-test approach:
## we're going to work out how usual/unusual our one observed sample mean difference is.
# we need to construct the sampling distribution of differences in sample means.
# we hypothesise that its mean is 0 (no difference between groups)
# we have to work out the standard deviation of the sampling dist....
# If we assume equal variances between the population of group A and B, we can calculate the
# standard deviation of this sampling distribution as
# the pooled estimate of the common standard devation * sqrt(1/n1 + 1/n2)
## step 1: calculate the pooled SD between the two samples...
# from first principles, calculating deviations from each group mean
difA2 <- (anastasia - mean(anastasia))^2
difB2 <- (bernadette - mean(bernadette))^2
sumsq <- sum(difA2) + sum(difB2)
n <- nA + nB #33
sd.pool <- sqrt(sumsq/(n-2))
sd.pool # 7.41 this is the estimated pooled s.d.
sd(anastasia) #8.999
sd(bernadette) #5.775
## step 2: use the pooled SD to calculate the S.D. of the Sampling Dist.
sedm <- sd.pool * sqrt( (1/nA) + (1/nB))
sedm # this is the Standard Deviation of the Sampling Distribution of differences in sample means
### We can now visualize this theoretical Sampling Distribution:
## Plotting our Sampling Distribution of Differences in Sample Means
# Don't worry about the gross loooking code... just using it to make the plot:
m <- 0 # mean
v <- sedm^2 # variance, sedm squared
df <- 31
vals <- rt(n=500000, df=df)*sqrt(v * (df-2)/df) + m
df1 <- data.frame(val = vals)
ggplot(df1, aes(x=val)) +
geom_histogram(aes(y = ..density..), color='black', fill='purple', alpha=.3)+
theme_classic()+
geom_density(alpha = 0.7, fill = "white") +
geom_vline(xintercept = 5.48, lwd=1, color="red") +
geom_vline(xintercept = 0, lwd=1,lty=2, color='black')+
xlab("Difference in Sample Means") +
ylab("") +
ggtitle("Sampling Distribution of Differences in Sample Means")
## Step 3... Calculate our observed t.
# the observed value of t, is how (un)expected our observed sample difference in means is...
# essentially we say how many SDs is our one observed sample mean difference from the mean?
tobs <- (mean(anastasia) - mean(bernadette)) / sedm
tobs # t = 2.1154
## Calculate the p-value
# we are concerned with knowning how much of the t distribution is greater than our observed t.
pt(tobs, df=n-2) # 0.9787353 - this is the proportion to the left.
1 - pt(tobs, df=n-2) # 0.0213 # this is the one-tailed p-value
(1 - pt(tobs, df=n-2)) * 2 # p = 0.04253 # the two-tailed p-value
### Let's check with R's function:
t.test(anastasia, bernadette, var.equal = T) # yes! t=2.1154, df=31, p=0.04253
#### We can visualize this sampling distribution in terms of t:
### make a t-distribution
t <- seq(-3,3,by=.01)
Density <- dt(t, df=31)
df <- data.frame(t,Density)
ggplot(df, aes(x=t,y=Density))+
theme_classic()+
geom_line(intercept=0, color='firebrick',lwd=1) +
geom_vline(xintercept=0, lty=2, color='black', lwd=1)+
geom_vline(xintercept=2.12, color='red',lwd=1 )
|
/statistics/two_sample_ttest_theory.R
|
no_license
|
depocen/PSY317L
|
R
| false | false | 4,064 |
r
|
### Explaining the Student t-test
## Let's look at our data:
anastasia <- c(65, 74, 73, 83, 76, 65, 86, 70, 80, 55, 78, 78, 90, 77, 68)
bernadette <- c(72, 66, 71, 66, 76, 69, 79, 73, 62, 69, 68, 60, 73, 68, 67, 74, 56, 74)
mean(anastasia) # 74.5
sd(anastasia) # 9.0
nA<-length(anastasia) # 15
nA
mean(bernadette) # 69.1
sd(bernadette) # 5.8
nB <- length(bernadette) # 18
nB
# plot the data:
d <- data.frame(values = c(anastasia, bernadette),
group = c(rep("anastasia",15), rep("bernadette", 18))
)
d
ggplot(d, aes(x = group, y = values, fill = group)) +
geom_boxplot(alpha=.3) +
geom_jitter(width=.1, size=2) +
theme_classic() +
scale_fill_manual(values = c("firebrick", "dodgerblue"))
# what's the difference in means?
mean(anastasia) - mean(bernadette) # 5.48
# anastasia students have on average 5.48 higher scores.
## But how meaningful is this difference?
#### Student's t-test approach:
## we're going to work out how usual/unusual our one observed sample mean difference is.
# we need to construct the sampling distribution of differences in sample means.
# we hypothesise that its mean is 0 (no difference between groups)
# we have to work out the standard deviation of the sampling dist....
# If we assume equal variances between the population of group A and B, we can calculate the
# standard deviation of this sampling distribution as
# the pooled estimate of the common standard devation * sqrt(1/n1 + 1/n2)
## step 1: calculate the pooled SD between the two samples...
# from first principles, calculating deviations from each group mean
difA2 <- (anastasia - mean(anastasia))^2
difB2 <- (bernadette - mean(bernadette))^2
sumsq <- sum(difA2) + sum(difB2)
n <- nA + nB #33
sd.pool <- sqrt(sumsq/(n-2))
sd.pool # 7.41 this is the estimated pooled s.d.
sd(anastasia) #8.999
sd(bernadette) #5.775
## step 2: use the pooled SD to calculate the S.D. of the Sampling Dist.
sedm <- sd.pool * sqrt( (1/nA) + (1/nB))
sedm # this is the Standard Deviation of the Sampling Distribution of differences in sample means
### We can now visualize this theoretical Sampling Distribution:
## Plotting our Sampling Distribution of Differences in Sample Means
# Don't worry about the gross loooking code... just using it to make the plot:
m <- 0 # mean
v <- sedm^2 # variance, sedm squared
df <- 31
vals <- rt(n=500000, df=df)*sqrt(v * (df-2)/df) + m
df1 <- data.frame(val = vals)
ggplot(df1, aes(x=val)) +
geom_histogram(aes(y = ..density..), color='black', fill='purple', alpha=.3)+
theme_classic()+
geom_density(alpha = 0.7, fill = "white") +
geom_vline(xintercept = 5.48, lwd=1, color="red") +
geom_vline(xintercept = 0, lwd=1,lty=2, color='black')+
xlab("Difference in Sample Means") +
ylab("") +
ggtitle("Sampling Distribution of Differences in Sample Means")
## Step 3... Calculate our observed t.
# the observed value of t, is how (un)expected our observed sample difference in means is...
# essentially we say how many SDs is our one observed sample mean difference from the mean?
tobs <- (mean(anastasia) - mean(bernadette)) / sedm
tobs # t = 2.1154
## Calculate the p-value
# we are concerned with knowning how much of the t distribution is greater than our observed t.
pt(tobs, df=n-2) # 0.9787353 - this is the proportion to the left.
1 - pt(tobs, df=n-2) # 0.0213 # this is the one-tailed p-value
(1 - pt(tobs, df=n-2)) * 2 # p = 0.04253 # the two-tailed p-value
### Let's check with R's function:
t.test(anastasia, bernadette, var.equal = T) # yes! t=2.1154, df=31, p=0.04253
#### We can visualize this sampling distribution in terms of t:
### make a t-distribution
t <- seq(-3,3,by=.01)
Density <- dt(t, df=31)
df <- data.frame(t,Density)
ggplot(df, aes(x=t,y=Density))+
theme_classic()+
geom_line(intercept=0, color='firebrick',lwd=1) +
geom_vline(xintercept=0, lty=2, color='black', lwd=1)+
geom_vline(xintercept=2.12, color='red',lwd=1 )
|
#' Import Canadian Snow Data from .dly file
#' @param fileLoc File path to .dly data
#' @param progress boolean spesifing if you want progress of code to be printed out
#' @return nicely organized dataframe of snow data
#' @export
importDLY<-function(fileLoc,progress=FALSE){
SnowDataUpdated <- read.delim(file = fileLoc, header=FALSE, stringsAsFactors=FALSE)
SnowDataUpdated<-SnowDataUpdated$V1
monthlymat<-matrix("",nrow = 31,ncol = 6)
accumulatedmat<-c()
FinalOutput<-c()
monthlymat[,6]=as.character(1:31)
len<-length(SnowDataUpdated)
for(i in 1:len){
curstr<-SnowDataUpdated[i]
if(str_length(curstr)==77){
if(i>1){
FinalOutput<-rbind(FinalOutput,cbind(id,Name,Lat,Lon,Elev,Sdate,Edate,Nobs,accumulatedmat))
accumulatedmat<-c()
if(progress){
print(paste(i,"of",len,"is complete."))
}
}
id=.Internal(substr(curstr,1L,7L))
Name=.Internal(substr(curstr,9L,38L))
Lat=.Internal(substr(curstr,40L,45L))
Lon=.Internal(substr(curstr,47L,53L))
Elev=.Internal(substr(curstr,55L,58L))
Sdate=.Internal(substr(curstr,60L,65L))
Edate=.Internal(substr(curstr,67L,72L))
Nobs=.Internal(substr(curstr,74L,77L))
} else{
monthlymat[,1]=.Internal(substr(curstr,9L,12L))
monthlymat[,2]=.Internal(substr(curstr,13L,14L))
for(j in 1:31){
cur<-as.integer((j-1)*10)
monthlymat[j,3]<-.Internal(substr(curstr,cur+16L,cur+18L))
monthlymat[j,4]<-.Internal(substr(curstr,cur+20L,cur+22L))
monthlymat[j,5]<-.Internal(substr(curstr,cur+24L,cur+24L))
}
accumulatedmat<-rbind(accumulatedmat,monthlymat)
}
}
data.frame(id=FinalOutput[,1],Name=FinalOutput[,2],Lat=as.numeric(FinalOutput[,3]),Lon=as.numeric(FinalOutput[,4]),Elev=as.numeric(FinalOutput[,5]),Sdate=as.numeric(FinalOutput[,6]),Edate=as.numeric(FinalOutput[,7]),Nobs=as.numeric(FinalOutput[,8]),Year=as.numeric(FinalOutput[,9]),Month=as.numeric(FinalOutput[,10]),SnowDepth=as.numeric(FinalOutput[,11]),QualityFlag=as.numeric(FinalOutput[,12]),ClimateFlag=FinalOutput[,13])
}
|
/R/importDLY.R
|
no_license
|
joej1997/importDLY
|
R
| false | false | 2,151 |
r
|
#' Import Canadian Snow Data from .dly file
#' @param fileLoc File path to .dly data
#' @param progress boolean spesifing if you want progress of code to be printed out
#' @return nicely organized dataframe of snow data
#' @export
importDLY<-function(fileLoc,progress=FALSE){
SnowDataUpdated <- read.delim(file = fileLoc, header=FALSE, stringsAsFactors=FALSE)
SnowDataUpdated<-SnowDataUpdated$V1
monthlymat<-matrix("",nrow = 31,ncol = 6)
accumulatedmat<-c()
FinalOutput<-c()
monthlymat[,6]=as.character(1:31)
len<-length(SnowDataUpdated)
for(i in 1:len){
curstr<-SnowDataUpdated[i]
if(str_length(curstr)==77){
if(i>1){
FinalOutput<-rbind(FinalOutput,cbind(id,Name,Lat,Lon,Elev,Sdate,Edate,Nobs,accumulatedmat))
accumulatedmat<-c()
if(progress){
print(paste(i,"of",len,"is complete."))
}
}
id=.Internal(substr(curstr,1L,7L))
Name=.Internal(substr(curstr,9L,38L))
Lat=.Internal(substr(curstr,40L,45L))
Lon=.Internal(substr(curstr,47L,53L))
Elev=.Internal(substr(curstr,55L,58L))
Sdate=.Internal(substr(curstr,60L,65L))
Edate=.Internal(substr(curstr,67L,72L))
Nobs=.Internal(substr(curstr,74L,77L))
} else{
monthlymat[,1]=.Internal(substr(curstr,9L,12L))
monthlymat[,2]=.Internal(substr(curstr,13L,14L))
for(j in 1:31){
cur<-as.integer((j-1)*10)
monthlymat[j,3]<-.Internal(substr(curstr,cur+16L,cur+18L))
monthlymat[j,4]<-.Internal(substr(curstr,cur+20L,cur+22L))
monthlymat[j,5]<-.Internal(substr(curstr,cur+24L,cur+24L))
}
accumulatedmat<-rbind(accumulatedmat,monthlymat)
}
}
data.frame(id=FinalOutput[,1],Name=FinalOutput[,2],Lat=as.numeric(FinalOutput[,3]),Lon=as.numeric(FinalOutput[,4]),Elev=as.numeric(FinalOutput[,5]),Sdate=as.numeric(FinalOutput[,6]),Edate=as.numeric(FinalOutput[,7]),Nobs=as.numeric(FinalOutput[,8]),Year=as.numeric(FinalOutput[,9]),Month=as.numeric(FinalOutput[,10]),SnowDepth=as.numeric(FinalOutput[,11]),QualityFlag=as.numeric(FinalOutput[,12]),ClimateFlag=FinalOutput[,13])
}
|
################################################
# Center a matrix (genes as columns, samples as rows)
SampleCenterMean <- function(mat){
# Ensure data is matrix
if(!is.matrix(mat)){
stop("Data must be matrix")
}
# Ensure data is numeric
if(!is.numeric(mat)){
stop("Data must be numeric")
}
# Iterate over columns
mat_out <- mat
for(n in 1:nrow(mat)){
# Center
mat_out[n,] <- (mat[n,] - mean(mat[n,], na.rm=T))
}
# Output data
mat_out
}
|
/functions/SampleCenterMean.R
|
permissive
|
steepale/20200915_metabolomics-pass1a
|
R
| false | false | 586 |
r
|
################################################
# Center a matrix (genes as columns, samples as rows)
SampleCenterMean <- function(mat){
# Ensure data is matrix
if(!is.matrix(mat)){
stop("Data must be matrix")
}
# Ensure data is numeric
if(!is.numeric(mat)){
stop("Data must be numeric")
}
# Iterate over columns
mat_out <- mat
for(n in 1:nrow(mat)){
# Center
mat_out[n,] <- (mat[n,] - mean(mat[n,], na.rm=T))
}
# Output data
mat_out
}
|
# ------------------------------------------------------------------------------
# H2O GBM for Santander Product Recommendations
# Generate level one data using H2O Random Grid Search
# ------------------------------------------------------------------------------
# Core model parameters
n_seed <- 1234
n_trees_max <- 500 # with early stopping, usually <300 trees
n_rate <- 0.05 # fixed
n_folds <- 5 # CV fold
n_grid_models <- 5 # max no. of random grid search models
n_score_interval <- 5
n_stop_round <- 10
stop_metric <- "logloss"
# H2O's R Package
suppressPackageStartupMessages(library(h2o)) # h2o_3.10.2.1
suppressPackageStartupMessages(library(data.table)) # data.table_1.10.1
# Data in gz files
gz_train <- "./data/d_train.csv.gz"
gz_valid <- "./data/d_valid.csv.gz"
gz_test <- "./data/d_test.csv.gz"
csv_train <- "./data/d_train.csv"
csv_valid <- "./data/d_valid.csv"
csv_test <- "./data/d_test.csv"
# ------------------------------------------------------------------------------
# Import Data into H2O
# ------------------------------------------------------------------------------
# Start H2O clusters
h2o.init(nthreads = -1)
# h2o.no_progress() # disable progress bar
# Data created with data_prep.R
h_train <- h2o.importFile(gz_train)
h_valid <- h2o.importFile(gz_valid)
h_test <- h2o.importFile(gz_test)
# Check size
# dim(h_train) # 405809 x 158
# dim(h_valid) # 35843 x 158
# dim(h_test) # 929615 x 158
# ------------------------------------------------------------------------------
# Convert data types
# ------------------------------------------------------------------------------
# Convert some columns to categorical
h_train$indrel_1mes <- as.factor(h_train$indrel_1mes) # Customer type
h_train$cod_prov <- as.factor(h_train$cod_prov) # Province code (customer's address)
h_train$dato_month <- as.factor(h_train$dato_month)
h_train$alta_month <- as.factor(h_train$alta_month)
h_train$alta_year <- as.factor(h_train$alta_year)
# Convert some columns to categorical
h_valid$indrel_1mes <- as.factor(h_valid$indrel_1mes) # Customer type
h_valid$cod_prov <- as.factor(h_valid$cod_prov) # Province code (customer's address)
h_valid$dato_month <- as.factor(h_valid$dato_month)
h_valid$alta_month <- as.factor(h_valid$alta_month)
h_valid$alta_year <- as.factor(h_valid$alta_year)
# Convert some columns to categorical
h_test$indrel_1mes <- as.factor(h_test$indrel_1mes) # Customer type
h_test$cod_prov <- as.factor(h_test$cod_prov) # Province code (customer's address)
h_test$dato_month <- as.factor(h_test$dato_month)
h_test$alta_month <- as.factor(h_test$alta_month)
h_test$alta_year <- as.factor(h_test$alta_year)
# ------------------------------------------------------------------------------
# Define features
# ------------------------------------------------------------------------------
col_ignore <- c("fecha_dato", "ncodpers", "fecha_alta", "cod_prov",
"ult_fec_cli_1t", "added_products", "last_year", "last_month",
"alta_year_month", "dato_year_month", "cv_fold")
features <- setdiff(colnames(h_train), col_ignore) # all features
print(features)
# ------------------------------------------------------------------------------
# Using H2O random grid search to generate level one data
# ------------------------------------------------------------------------------
search_criteria <- list(strategy = "RandomDiscrete",
max_models = n_grid_models,
seed = n_seed)
params_gbm <- list(max_depth = seq(3, 5, 1),
sample_rate = seq(0.5, 0.9, 0.1),
col_sample_rate = seq(0.5, 0.9, 0.1))
# H2O GBM Grid
grid_gbm <- h2o.grid(
# Grid search parameters
algorithm = "gbm",
grid_id = "grid_gbm",
hyper_params = params_gbm,
search_criteria = search_criteria,
# Core model parameters
training_frame = h_train,
x = features,
y = "added_products",
learn_rate = n_rate,
ntrees = n_trees_max,
seed = n_seed,
nfolds = n_folds,
keep_cross_validation_predictions = TRUE,
fold_assignment = "Stratified",
# using Stratified instead of Modulo as I am not using
# h2oEnsemble::h2o.stack() for stacking
# Early stopping parameters
score_tree_interval = n_score_interval,
stopping_metric = stop_metric,
stopping_tolerance = 0.01,
stopping_rounds = n_stop_round
)
# ------------------------------------------------------------------------------
# Extract models and data
# ------------------------------------------------------------------------------
# Extract all models
gbm_models <- lapply(grid_gbm@model_ids, function(model_id) h2o.getModel(model_id))
# Extract Level One Data
for (n in 1:n_folds) {
# Display
cat("[Extracting Data] ... CV Model", n, "...\n")
# Extract predictions (L1 data)
L1_train_temp <- h2o.cross_validation_holdout_predictions(gbm_models[[n]])
L1_valid_temp <- h2o.predict(gbm_models[[n]], h_valid)
L1_test_temp <- h2o.predict(gbm_models[[n]], h_test)
# Trim
L1_train_temp <- as.data.frame(L1_train_temp)[-1]
L1_valid_temp <- as.data.frame(L1_valid_temp)[-1]
L1_test_temp <- as.data.frame(L1_test_temp)[-1]
# Update colnames (to include model number)
colnames(L1_train_temp) <- paste0("L1_m", n, "_", colnames(L1_train_temp))
colnames(L1_valid_temp) <- paste0("L1_m", n, "_", colnames(L1_valid_temp))
colnames(L1_test_temp) <- paste0("L1_m", n, "_", colnames(L1_test_temp))
if (n == 1) {
L1_train <- L1_train_temp
L1_valid <- L1_valid_temp
L1_test <- L1_test_temp
} else {
L1_train <- cbind(L1_train, L1_train_temp)
L1_valid <- cbind(L1_valid, L1_valid_temp)
L1_test <- cbind(L1_test, L1_test_temp)
}
# Clean up
rm(L1_train_temp, L1_valid_temp, L1_test_temp)
gc()
}
# Adding target to L1_train and L1_valid (for stacking in next stage)
y_train <- as.data.frame(h_train$added_products)
y_valid <- as.data.frame(h_valid$added_products)
L1_train <- cbind(L1_train, y_train)
L1_valid <- cbind(L1_valid, y_valid)
# ------------------------------------------------------------------------------
# Evaluate Random Grid Search Models
# ------------------------------------------------------------------------------
d_eval <- c()
for (n in 1:n_folds) {
# Extract model
model <- gbm_models[[n]]
# Evaluate performance on validation set
perf_valid <- h2o.performance(model, newdata = h_valid)
# Create results summary data frame
d_eval_temp <- data.frame(model_id = model@model_id,
algo = model@algorithm,
learn_rate = model@parameters$learn_rate,
n_trees = model@parameters$ntrees,
max_depth = model@parameters$max_depth,
row_samp = model@parameters$sample_rate,
col_samp = model@parameters$col_sample_rate,
seed = model@parameters$seed,
n_cv_fold = n_folds,
logloss_train = model@model$training_metrics@metrics$logloss,
logloss_cv = model@model$cross_validation_metrics@metrics$logloss,
logloss_valid = perf_valid@metrics$logloss)
# Stack
d_eval <- rbind(d_eval, d_eval_temp)
rm(d_eval_temp)
}
# Print out
cat("\n\n=============== Summary of Metrics: =============== \n")
print(d_eval)
# =============== Summary of Metrics: ===============
# model_id algo learn_rate n_trees max_depth row_samp col_samp seed
# 1 grid_gbm_model_0 gbm 0.05 198 4 0.7 0.9 1234
# 2 grid_gbm_model_4 gbm 0.05 194 4 0.6 0.6 1234
# 3 grid_gbm_model_2 gbm 0.05 193 4 0.6 0.9 1234
# 4 grid_gbm_model_1 gbm 0.05 240 3 0.9 0.7 1234
# 5 grid_gbm_model_3 gbm 0.05 241 3 0.7 0.8 1234
# n_cv_fold logloss_train logloss_cv logloss_valid
# 1 5 0.9502685 0.9934115 0.9464101
# 2 5 0.9522171 0.9938626 0.9448556
# 3 5 0.9566952 0.9980570 0.9685286
# 4 5 0.9734655 0.9994102 0.9443164
# 5 5 0.9742857 1.0013029 0.9458400
# ------------------------------------------------------------------------------
# Saving files
# ------------------------------------------------------------------------------
# Save H2O models
for (n in 1:n_folds) {
h2o.saveModel(gbm_models[[n]], path = "./output/h2o_gbm_L1_run2/", force = TRUE)
}
# Write evaluaton results to disk
fwrite(d_eval, file = "./output/h2o_gbm_L1_run2/L1_eval.csv")
# Round it
L1_train[, -ncol(L1_train)] <- round(L1_train[, -ncol(L1_train)], 4)
L1_valid[, -ncol(L1_valid)] <- round(L1_valid[, -ncol(L1_valid)], 4)
L1_test <- round(L1_test, 4)
# Write L1 data to disk
options(digits = 18)
fwrite(L1_train, file = "./output/h2o_gbm_L1_run2/L1_train.csv")
fwrite(L1_valid, file = "./output/h2o_gbm_L1_run2/L1_valid.csv")
fwrite(L1_test, file = "./output/h2o_gbm_L1_run2/L1_test.csv")
# Gzip L1 data
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_train.csv")
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_valid.csv")
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_test.csv")
# ------------------------------------------------------------------------------
# Print System Info
# ------------------------------------------------------------------------------
print(sessionInfo())
print(Sys.info())
# R version 3.2.3 (2015-12-10)
# Platform: aarch64-unknown-linux-gnu (64-bit)
# Running under: Ubuntu 16.04.1 LTS
#
# locale:
# [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C
# [3] LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8
# [5] LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8
# [7] LC_PAPER=en_US.UTF-8 LC_NAME=C
# [9] LC_ADDRESS=C LC_TELEPHONE=C
# [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C
#
# attached base packages:
# [1] methods stats graphics grDevices utils datasets base
#
# other attached packages:
# [1] data.table_1.10.0 h2o_3.10.2.1
#
# loaded via a namespace (and not attached):
# [1] tools_3.2.3 RCurl_1.95-4.8 jsonlite_1.2 bitops_1.0-6
# sysname
# "Linux"
# release
# "4.4.0-38-generic"
# version
# "#57-Ubuntu SMP Wed Sep 7 10:19:14 UTC 2016"
# nodename
# "joe.local.lan"
# machine
# "aarch64"
# login
# "root"
# user
# "root"
# effective_user
# "root"
|
/B_analysts_sources_github/woobe/kaggle_santander_product/h2o_gbm_L1.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false | false | 11,125 |
r
|
# ------------------------------------------------------------------------------
# H2O GBM for Santander Product Recommendations
# Generate level one data using H2O Random Grid Search
# ------------------------------------------------------------------------------
# Core model parameters
n_seed <- 1234
n_trees_max <- 500 # with early stopping, usually <300 trees
n_rate <- 0.05 # fixed
n_folds <- 5 # CV fold
n_grid_models <- 5 # max no. of random grid search models
n_score_interval <- 5
n_stop_round <- 10
stop_metric <- "logloss"
# H2O's R Package
suppressPackageStartupMessages(library(h2o)) # h2o_3.10.2.1
suppressPackageStartupMessages(library(data.table)) # data.table_1.10.1
# Data in gz files
gz_train <- "./data/d_train.csv.gz"
gz_valid <- "./data/d_valid.csv.gz"
gz_test <- "./data/d_test.csv.gz"
csv_train <- "./data/d_train.csv"
csv_valid <- "./data/d_valid.csv"
csv_test <- "./data/d_test.csv"
# ------------------------------------------------------------------------------
# Import Data into H2O
# ------------------------------------------------------------------------------
# Start H2O clusters
h2o.init(nthreads = -1)
# h2o.no_progress() # disable progress bar
# Data created with data_prep.R
h_train <- h2o.importFile(gz_train)
h_valid <- h2o.importFile(gz_valid)
h_test <- h2o.importFile(gz_test)
# Check size
# dim(h_train) # 405809 x 158
# dim(h_valid) # 35843 x 158
# dim(h_test) # 929615 x 158
# ------------------------------------------------------------------------------
# Convert data types
# ------------------------------------------------------------------------------
# Convert some columns to categorical
h_train$indrel_1mes <- as.factor(h_train$indrel_1mes) # Customer type
h_train$cod_prov <- as.factor(h_train$cod_prov) # Province code (customer's address)
h_train$dato_month <- as.factor(h_train$dato_month)
h_train$alta_month <- as.factor(h_train$alta_month)
h_train$alta_year <- as.factor(h_train$alta_year)
# Convert some columns to categorical
h_valid$indrel_1mes <- as.factor(h_valid$indrel_1mes) # Customer type
h_valid$cod_prov <- as.factor(h_valid$cod_prov) # Province code (customer's address)
h_valid$dato_month <- as.factor(h_valid$dato_month)
h_valid$alta_month <- as.factor(h_valid$alta_month)
h_valid$alta_year <- as.factor(h_valid$alta_year)
# Convert some columns to categorical
h_test$indrel_1mes <- as.factor(h_test$indrel_1mes) # Customer type
h_test$cod_prov <- as.factor(h_test$cod_prov) # Province code (customer's address)
h_test$dato_month <- as.factor(h_test$dato_month)
h_test$alta_month <- as.factor(h_test$alta_month)
h_test$alta_year <- as.factor(h_test$alta_year)
# ------------------------------------------------------------------------------
# Define features
# ------------------------------------------------------------------------------
col_ignore <- c("fecha_dato", "ncodpers", "fecha_alta", "cod_prov",
"ult_fec_cli_1t", "added_products", "last_year", "last_month",
"alta_year_month", "dato_year_month", "cv_fold")
features <- setdiff(colnames(h_train), col_ignore) # all features
print(features)
# ------------------------------------------------------------------------------
# Using H2O random grid search to generate level one data
# ------------------------------------------------------------------------------
search_criteria <- list(strategy = "RandomDiscrete",
max_models = n_grid_models,
seed = n_seed)
params_gbm <- list(max_depth = seq(3, 5, 1),
sample_rate = seq(0.5, 0.9, 0.1),
col_sample_rate = seq(0.5, 0.9, 0.1))
# H2O GBM Grid
grid_gbm <- h2o.grid(
# Grid search parameters
algorithm = "gbm",
grid_id = "grid_gbm",
hyper_params = params_gbm,
search_criteria = search_criteria,
# Core model parameters
training_frame = h_train,
x = features,
y = "added_products",
learn_rate = n_rate,
ntrees = n_trees_max,
seed = n_seed,
nfolds = n_folds,
keep_cross_validation_predictions = TRUE,
fold_assignment = "Stratified",
# using Stratified instead of Modulo as I am not using
# h2oEnsemble::h2o.stack() for stacking
# Early stopping parameters
score_tree_interval = n_score_interval,
stopping_metric = stop_metric,
stopping_tolerance = 0.01,
stopping_rounds = n_stop_round
)
# ------------------------------------------------------------------------------
# Extract models and data
# ------------------------------------------------------------------------------
# Extract all models
gbm_models <- lapply(grid_gbm@model_ids, function(model_id) h2o.getModel(model_id))
# Extract Level One Data
for (n in 1:n_folds) {
# Display
cat("[Extracting Data] ... CV Model", n, "...\n")
# Extract predictions (L1 data)
L1_train_temp <- h2o.cross_validation_holdout_predictions(gbm_models[[n]])
L1_valid_temp <- h2o.predict(gbm_models[[n]], h_valid)
L1_test_temp <- h2o.predict(gbm_models[[n]], h_test)
# Trim
L1_train_temp <- as.data.frame(L1_train_temp)[-1]
L1_valid_temp <- as.data.frame(L1_valid_temp)[-1]
L1_test_temp <- as.data.frame(L1_test_temp)[-1]
# Update colnames (to include model number)
colnames(L1_train_temp) <- paste0("L1_m", n, "_", colnames(L1_train_temp))
colnames(L1_valid_temp) <- paste0("L1_m", n, "_", colnames(L1_valid_temp))
colnames(L1_test_temp) <- paste0("L1_m", n, "_", colnames(L1_test_temp))
if (n == 1) {
L1_train <- L1_train_temp
L1_valid <- L1_valid_temp
L1_test <- L1_test_temp
} else {
L1_train <- cbind(L1_train, L1_train_temp)
L1_valid <- cbind(L1_valid, L1_valid_temp)
L1_test <- cbind(L1_test, L1_test_temp)
}
# Clean up
rm(L1_train_temp, L1_valid_temp, L1_test_temp)
gc()
}
# Adding target to L1_train and L1_valid (for stacking in next stage)
y_train <- as.data.frame(h_train$added_products)
y_valid <- as.data.frame(h_valid$added_products)
L1_train <- cbind(L1_train, y_train)
L1_valid <- cbind(L1_valid, y_valid)
# ------------------------------------------------------------------------------
# Evaluate Random Grid Search Models
# ------------------------------------------------------------------------------
d_eval <- c()
for (n in 1:n_folds) {
# Extract model
model <- gbm_models[[n]]
# Evaluate performance on validation set
perf_valid <- h2o.performance(model, newdata = h_valid)
# Create results summary data frame
d_eval_temp <- data.frame(model_id = model@model_id,
algo = model@algorithm,
learn_rate = model@parameters$learn_rate,
n_trees = model@parameters$ntrees,
max_depth = model@parameters$max_depth,
row_samp = model@parameters$sample_rate,
col_samp = model@parameters$col_sample_rate,
seed = model@parameters$seed,
n_cv_fold = n_folds,
logloss_train = model@model$training_metrics@metrics$logloss,
logloss_cv = model@model$cross_validation_metrics@metrics$logloss,
logloss_valid = perf_valid@metrics$logloss)
# Stack
d_eval <- rbind(d_eval, d_eval_temp)
rm(d_eval_temp)
}
# Print out
cat("\n\n=============== Summary of Metrics: =============== \n")
print(d_eval)
# =============== Summary of Metrics: ===============
# model_id algo learn_rate n_trees max_depth row_samp col_samp seed
# 1 grid_gbm_model_0 gbm 0.05 198 4 0.7 0.9 1234
# 2 grid_gbm_model_4 gbm 0.05 194 4 0.6 0.6 1234
# 3 grid_gbm_model_2 gbm 0.05 193 4 0.6 0.9 1234
# 4 grid_gbm_model_1 gbm 0.05 240 3 0.9 0.7 1234
# 5 grid_gbm_model_3 gbm 0.05 241 3 0.7 0.8 1234
# n_cv_fold logloss_train logloss_cv logloss_valid
# 1 5 0.9502685 0.9934115 0.9464101
# 2 5 0.9522171 0.9938626 0.9448556
# 3 5 0.9566952 0.9980570 0.9685286
# 4 5 0.9734655 0.9994102 0.9443164
# 5 5 0.9742857 1.0013029 0.9458400
# ------------------------------------------------------------------------------
# Saving files
# ------------------------------------------------------------------------------
# Save H2O models
for (n in 1:n_folds) {
h2o.saveModel(gbm_models[[n]], path = "./output/h2o_gbm_L1_run2/", force = TRUE)
}
# Write evaluaton results to disk
fwrite(d_eval, file = "./output/h2o_gbm_L1_run2/L1_eval.csv")
# Round it
L1_train[, -ncol(L1_train)] <- round(L1_train[, -ncol(L1_train)], 4)
L1_valid[, -ncol(L1_valid)] <- round(L1_valid[, -ncol(L1_valid)], 4)
L1_test <- round(L1_test, 4)
# Write L1 data to disk
options(digits = 18)
fwrite(L1_train, file = "./output/h2o_gbm_L1_run2/L1_train.csv")
fwrite(L1_valid, file = "./output/h2o_gbm_L1_run2/L1_valid.csv")
fwrite(L1_test, file = "./output/h2o_gbm_L1_run2/L1_test.csv")
# Gzip L1 data
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_train.csv")
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_valid.csv")
system("gzip -9 -v ./output/h2o_gbm_L1_run2/L1_test.csv")
# ------------------------------------------------------------------------------
# Print System Info
# ------------------------------------------------------------------------------
print(sessionInfo())
print(Sys.info())
# R version 3.2.3 (2015-12-10)
# Platform: aarch64-unknown-linux-gnu (64-bit)
# Running under: Ubuntu 16.04.1 LTS
#
# locale:
# [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C
# [3] LC_TIME=en_US.UTF-8 LC_COLLATE=en_US.UTF-8
# [5] LC_MONETARY=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8
# [7] LC_PAPER=en_US.UTF-8 LC_NAME=C
# [9] LC_ADDRESS=C LC_TELEPHONE=C
# [11] LC_MEASUREMENT=en_US.UTF-8 LC_IDENTIFICATION=C
#
# attached base packages:
# [1] methods stats graphics grDevices utils datasets base
#
# other attached packages:
# [1] data.table_1.10.0 h2o_3.10.2.1
#
# loaded via a namespace (and not attached):
# [1] tools_3.2.3 RCurl_1.95-4.8 jsonlite_1.2 bitops_1.0-6
# sysname
# "Linux"
# release
# "4.4.0-38-generic"
# version
# "#57-Ubuntu SMP Wed Sep 7 10:19:14 UTC 2016"
# nodename
# "joe.local.lan"
# machine
# "aarch64"
# login
# "root"
# user
# "root"
# effective_user
# "root"
|
#******************************************************************************************************
# Applied generalized linear model - FS
# Viviana Amati
# Social Network Labs
# Department of Humanities, Social and Political Sciences
# ETH Zurich
# 24 March 2020
# This script provides the code for applying binary logistic regression models
# The commented output is in the lecture notes.
#******************************************************************************************************
#-----------------------------------------------------------------------------------------------------
# Setting directory and loading packages
#-----------------------------------------------------------------------------------------------------
setwd("~/Data/github/AGLM/Course_Material")
library(ggplot2)
library(tidyr)
library(car)
# Importing the data and check
admission <- read.csv("admission.csv",header=TRUE)
head(admission)
summary(admission)
# Recoding the variable rank as a factor
admission$rank <- factor(admission$rank,levels=1:4,labels=1:4)
summary(admission)
#-----------------------------------------------------------------------------------------------------
# Some descriptive statistics
#-----------------------------------------------------------------------------------------------------
# Histograms showing the distribution of the variables
histData <- gather(admission, key=key, value=value)
histData$value <- as.integer(histData$value)
plot1= ggplot(histData, aes(value)) +
geom_histogram(bins = 10, color= "black", fill="grey70") +
facet_wrap(~key, scales = "free_x", nrow = 2, ncol = 2) +
theme_bw()
plot1
# Scatter matrix
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use="pairwise.complete.obs")
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
text(0.5, 0.5, txt)
}
pairs(admission, lower.panel = panel.cor, pch = 18)
# Proportions of admitted by gpa, gre and rank
gpaCat <- cut(admission$gpa,c(seq(4,6,0.2)), labels=FALSE)
prop.admit.gpa <- tapply(admission$admit,gpaCat,mean)
greCat <- cut(admission$gre,c(seq(260,960,50)), labels=FALSE)
prop.admit.gre <- tapply(admission$admit,greCat,mean)
prop.admit.rank <-tapply(admission$admit,admission$rank,mean)
plot(prop.admit.rank,pch=19,xlab="rank")
par(mfrow=c(2,2))
plot(seq(4.1,6,0.2),prop.admit.gpa,pch=19,xlab="gpa")
plot(seq(275,935,50), prop.admit.gre,pch=19,xlab="gre")
plot(prop.admit.rank,pch=19,xlab="rank")
#-----------------------------------------------------------------------------------------------------
# Model estimation
#-----------------------------------------------------------------------------------------------------
# The code below estimates a logistic regression model using the
# glm (generalized linear model) function.
# This function is used to fit generalized linear models and requires the specification
# of the
# - dependent and explanatory variables using the usual formula
# dependent variable ~ explanatory variables separated by +
# - description of the error distribution using the "family" argument
# For the logistic model family = binomial(link = "logit")
mod1 <- glm(admit~gre+gpa+rank, family=binomial(link = "logit"), data=admission[,1:4])
# When a model includes all the other variables in the data frame
# we can avoid to list all the variables by using
mod1 <- glm(admit~.,family="binomial",data=admission)
# The default link function for the binomial family is the logit. Therefore, we can omit
# (link = "logit") from the formula above and use the upper commas.
#-----------------------------------------------------------------------------------------------------
# Model diagnostics
#-----------------------------------------------------------------------------------------------------
# Standard way not too helpful
par(mfrow=c(2,2))
plot(mod1, pch=19,cex=0.1)
# A better way to visualize the diagnostics
# Linearity
residualPlots(mod1, type = "deviance", pch=20, smooth=list(col="red"))
# Outliers, leverage, Cook's distance
influenceIndexPlot(mod1,vars=c("Studentized", "hat", "Cook"), id=list(n=c(4)))
outlierTest(mod1) # Testing outliers
CookThreshold <- 5/400*qchisq(0.05,1,lower.tail=FALSE) # Cook?s distance threshold for GLM
CookThreshold
# Are 198 and 156 really influential?
mod2 <-update(mod1,subset=-c(198))
compareCoefs(mod1,mod2)
mod3 <-update(mod1,subset=-c(156))
compareCoefs(mod1,mod3)
#-----------------------------------------------------------------------------------------------------
# Parameter interpretation
#-----------------------------------------------------------------------------------------------------
# The commented code shows how the p-values of the Wald test are computed
# Wald test for testing the association between admit and each explanatory variable:
# H_0: beta_j=0 vs. H_1: beta_j != 0
# names(summary(mod1))
# summary(mod1)$coefficients
# beta.est <- summary(mod1)$coefficients[,1]
# se.est <- summary(mod1)$coefficients[,2]
# z.values <- beta.est/se.est
# p.values <- 2*pnorm(abs(z.values),lower.tail=FALSE)
# data.frame(beta.est,se.est,z.values,p.values)
summary(mod1)
# Odds ratios and Wald CIs
results <- cbind(coefs=mod1$coefficients, OR = exp(coef(mod1)), exp(confint.default(mod1)))
exp(summary(mod1)$coefficients[,1]-qnorm(0.975)*summary(mod1)$coefficients[,2])
exp(summary(mod1)$coefficients[,1]+qnorm(0.975)*summary(mod1)$coefficients[,2])
# Odds ratios and profile-likelihood CIs
results <- cbind(coefs=mod1$coefficients, OR = exp(coef(mod1)), exp(confint(mod1)))
results
# Percentage change
100*(exp(coef(mod1))-1)
# Predicted probabilities
source("multiplot.R")
# Predicted probabilities for the variable gpa
data.gpa <- with(admission,
data.frame(gre = mean(gre), gpa = rep(seq(from = 4, to = 6, length.out=200),4),
rank = factor(rep(1:4, each = 200))))
predict.gpa <- cbind(data.gpa, predict(mod1, newdata=data.gpa, type = "response", se = TRUE))
predict.gpa <- within(predict.gpa, PredictedProb <- plogis(fit))
head(predict.gpa)
p.gpa <- ggplot(predict.gpa, aes(x = gpa, y = PredictedProb)) +
geom_line(aes(colour = rank), size = 1)+theme_bw()
# Predicted probabilities for the variable gre
data.gre <- with(admission,
data.frame(gpa = mean(gpa), gre = rep(seq(from = 260, to = 960, length.out=700),4),
rank = factor(rep(1:4, each = 700))))
predict.gre <- cbind(data.gre, predict(mod1, newdata=data.gre, type = "response", se = TRUE))
predict.gre <- within(predict.gre, PredictedProb <- plogis(fit))
head(predict.gre)
p.gre <- ggplot(predict.gre, aes(x = gre, y = PredictedProb)) +
geom_line(aes(colour = rank), size = 1)+theme_bw()
# Predicted probabilities for the variable rank
data.rank <- with(admission,
data.frame(gpa=mean(gpa), gre = mean(gre), rank = factor(1:4)))
predict.rank <- cbind(data.rank, predict(mod1, newdata = data.rank, type = "response"))
colnames(predict.rank)[4] <- "PredictedProb"
p.rank <- ggplot(predict.rank, aes(x = rank, y = PredictedProb)) +
geom_point(aes(colour = rank))+theme_bw()
multiplot(p.gpa, p.gre,p.rank, cols = 1)
#-----------------------------------------------------------------------------------------------------
# Hypothesis testing
#-----------------------------------------------------------------------------------------------------
# More than one parameter:
# Model fit (overall test): H_0: beta_1=...=beta_p=0
mod.empty <- glm(admit~1,family="binomial",data=admission)
anova(mod.empty,mod1,test="Chisq")
# Computing the test by hand
# G.value <- with(mod1, null.deviance - deviance)
# G.value
# df.G <- with(mod1, df.null - df.residual)
# df.G
# pvalue.G <- pchisq(G.value,df.G,lower.tail=FALSE)
# pvalue.G
# quantile.G <- qchisq(0.05,df.G)
# quantile.G
# Subset of parameters
# E.g. H_0 = beta_{r2}=beta_{r3}=beta_{r4}
mod.red <- glm(admit~gre+gpa, family="binomial", data=admission)
anova(mod.red,mod1,test="Chisq")
# By hand
# G.value <- mod.red$deviance - mod1$deviance
# G.value
# df.G <- with(mod1, df.null - df.residual)-with(mod.red, df.null - df.residual)
# df.G
# pvalue.G <- pchisq(G.value,df.G,lower.tail=FALSE)
# pvalue.G
# quantile.G <- qchisq(0.05,df.G)
# quantile.G
#-----------------------------------------------------------------------------------------------------
# Model selection
#-----------------------------------------------------------------------------------------------------
# Forward selection: start from the model with only the intercept:
mod.fin <- step(mod.empty, direction="forward",
scope=formula(mod1))
mod.fin
summary(mod.fin)
#-----------------------------------------------------------------------------------------------------
# Probit model
#-----------------------------------------------------------------------------------------------------
mod2 <- glm(admit~gre+gpa+rank,family=binomial(link = "probit"),data=admission[,1:4])
summary(mod2)
#-----------------------------------------------------------------------------------------------------
# Grouped data
#-----------------------------------------------------------------------------------------------------
titanic <- read.csv("titanic.csv",header=TRUE)
modTitanic <- glm(cbind(Survived,Died)~.,data=titanic,family="binomial")
summary(modTitanic)
w
|
/Course_Material/BinaryDataModel.R
|
no_license
|
japolak/ETH-AGLM
|
R
| false | false | 9,459 |
r
|
#******************************************************************************************************
# Applied generalized linear model - FS
# Viviana Amati
# Social Network Labs
# Department of Humanities, Social and Political Sciences
# ETH Zurich
# 24 March 2020
# This script provides the code for applying binary logistic regression models
# The commented output is in the lecture notes.
#******************************************************************************************************
#-----------------------------------------------------------------------------------------------------
# Setting directory and loading packages
#-----------------------------------------------------------------------------------------------------
setwd("~/Data/github/AGLM/Course_Material")
library(ggplot2)
library(tidyr)
library(car)
# Importing the data and check
admission <- read.csv("admission.csv",header=TRUE)
head(admission)
summary(admission)
# Recoding the variable rank as a factor
admission$rank <- factor(admission$rank,levels=1:4,labels=1:4)
summary(admission)
#-----------------------------------------------------------------------------------------------------
# Some descriptive statistics
#-----------------------------------------------------------------------------------------------------
# Histograms showing the distribution of the variables
histData <- gather(admission, key=key, value=value)
histData$value <- as.integer(histData$value)
plot1= ggplot(histData, aes(value)) +
geom_histogram(bins = 10, color= "black", fill="grey70") +
facet_wrap(~key, scales = "free_x", nrow = 2, ncol = 2) +
theme_bw()
plot1
# Scatter matrix
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) {
usr <- par("usr")
on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- cor(x, y, use="pairwise.complete.obs")
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
text(0.5, 0.5, txt)
}
pairs(admission, lower.panel = panel.cor, pch = 18)
# Proportions of admitted by gpa, gre and rank
gpaCat <- cut(admission$gpa,c(seq(4,6,0.2)), labels=FALSE)
prop.admit.gpa <- tapply(admission$admit,gpaCat,mean)
greCat <- cut(admission$gre,c(seq(260,960,50)), labels=FALSE)
prop.admit.gre <- tapply(admission$admit,greCat,mean)
prop.admit.rank <-tapply(admission$admit,admission$rank,mean)
plot(prop.admit.rank,pch=19,xlab="rank")
par(mfrow=c(2,2))
plot(seq(4.1,6,0.2),prop.admit.gpa,pch=19,xlab="gpa")
plot(seq(275,935,50), prop.admit.gre,pch=19,xlab="gre")
plot(prop.admit.rank,pch=19,xlab="rank")
#-----------------------------------------------------------------------------------------------------
# Model estimation
#-----------------------------------------------------------------------------------------------------
# The code below estimates a logistic regression model using the
# glm (generalized linear model) function.
# This function is used to fit generalized linear models and requires the specification
# of the
# - dependent and explanatory variables using the usual formula
# dependent variable ~ explanatory variables separated by +
# - description of the error distribution using the "family" argument
# For the logistic model family = binomial(link = "logit")
mod1 <- glm(admit~gre+gpa+rank, family=binomial(link = "logit"), data=admission[,1:4])
# When a model includes all the other variables in the data frame
# we can avoid to list all the variables by using
mod1 <- glm(admit~.,family="binomial",data=admission)
# The default link function for the binomial family is the logit. Therefore, we can omit
# (link = "logit") from the formula above and use the upper commas.
#-----------------------------------------------------------------------------------------------------
# Model diagnostics
#-----------------------------------------------------------------------------------------------------
# Standard way not too helpful
par(mfrow=c(2,2))
plot(mod1, pch=19,cex=0.1)
# A better way to visualize the diagnostics
# Linearity
residualPlots(mod1, type = "deviance", pch=20, smooth=list(col="red"))
# Outliers, leverage, Cook's distance
influenceIndexPlot(mod1,vars=c("Studentized", "hat", "Cook"), id=list(n=c(4)))
outlierTest(mod1) # Testing outliers
CookThreshold <- 5/400*qchisq(0.05,1,lower.tail=FALSE) # Cook?s distance threshold for GLM
CookThreshold
# Are 198 and 156 really influential?
mod2 <-update(mod1,subset=-c(198))
compareCoefs(mod1,mod2)
mod3 <-update(mod1,subset=-c(156))
compareCoefs(mod1,mod3)
#-----------------------------------------------------------------------------------------------------
# Parameter interpretation
#-----------------------------------------------------------------------------------------------------
# The commented code shows how the p-values of the Wald test are computed
# Wald test for testing the association between admit and each explanatory variable:
# H_0: beta_j=0 vs. H_1: beta_j != 0
# names(summary(mod1))
# summary(mod1)$coefficients
# beta.est <- summary(mod1)$coefficients[,1]
# se.est <- summary(mod1)$coefficients[,2]
# z.values <- beta.est/se.est
# p.values <- 2*pnorm(abs(z.values),lower.tail=FALSE)
# data.frame(beta.est,se.est,z.values,p.values)
summary(mod1)
# Odds ratios and Wald CIs
results <- cbind(coefs=mod1$coefficients, OR = exp(coef(mod1)), exp(confint.default(mod1)))
exp(summary(mod1)$coefficients[,1]-qnorm(0.975)*summary(mod1)$coefficients[,2])
exp(summary(mod1)$coefficients[,1]+qnorm(0.975)*summary(mod1)$coefficients[,2])
# Odds ratios and profile-likelihood CIs
results <- cbind(coefs=mod1$coefficients, OR = exp(coef(mod1)), exp(confint(mod1)))
results
# Percentage change
100*(exp(coef(mod1))-1)
# Predicted probabilities
source("multiplot.R")
# Predicted probabilities for the variable gpa
data.gpa <- with(admission,
data.frame(gre = mean(gre), gpa = rep(seq(from = 4, to = 6, length.out=200),4),
rank = factor(rep(1:4, each = 200))))
predict.gpa <- cbind(data.gpa, predict(mod1, newdata=data.gpa, type = "response", se = TRUE))
predict.gpa <- within(predict.gpa, PredictedProb <- plogis(fit))
head(predict.gpa)
p.gpa <- ggplot(predict.gpa, aes(x = gpa, y = PredictedProb)) +
geom_line(aes(colour = rank), size = 1)+theme_bw()
# Predicted probabilities for the variable gre
data.gre <- with(admission,
data.frame(gpa = mean(gpa), gre = rep(seq(from = 260, to = 960, length.out=700),4),
rank = factor(rep(1:4, each = 700))))
predict.gre <- cbind(data.gre, predict(mod1, newdata=data.gre, type = "response", se = TRUE))
predict.gre <- within(predict.gre, PredictedProb <- plogis(fit))
head(predict.gre)
p.gre <- ggplot(predict.gre, aes(x = gre, y = PredictedProb)) +
geom_line(aes(colour = rank), size = 1)+theme_bw()
# Predicted probabilities for the variable rank
data.rank <- with(admission,
data.frame(gpa=mean(gpa), gre = mean(gre), rank = factor(1:4)))
predict.rank <- cbind(data.rank, predict(mod1, newdata = data.rank, type = "response"))
colnames(predict.rank)[4] <- "PredictedProb"
p.rank <- ggplot(predict.rank, aes(x = rank, y = PredictedProb)) +
geom_point(aes(colour = rank))+theme_bw()
multiplot(p.gpa, p.gre,p.rank, cols = 1)
#-----------------------------------------------------------------------------------------------------
# Hypothesis testing
#-----------------------------------------------------------------------------------------------------
# More than one parameter:
# Model fit (overall test): H_0: beta_1=...=beta_p=0
mod.empty <- glm(admit~1,family="binomial",data=admission)
anova(mod.empty,mod1,test="Chisq")
# Computing the test by hand
# G.value <- with(mod1, null.deviance - deviance)
# G.value
# df.G <- with(mod1, df.null - df.residual)
# df.G
# pvalue.G <- pchisq(G.value,df.G,lower.tail=FALSE)
# pvalue.G
# quantile.G <- qchisq(0.05,df.G)
# quantile.G
# Subset of parameters
# E.g. H_0 = beta_{r2}=beta_{r3}=beta_{r4}
mod.red <- glm(admit~gre+gpa, family="binomial", data=admission)
anova(mod.red,mod1,test="Chisq")
# By hand
# G.value <- mod.red$deviance - mod1$deviance
# G.value
# df.G <- with(mod1, df.null - df.residual)-with(mod.red, df.null - df.residual)
# df.G
# pvalue.G <- pchisq(G.value,df.G,lower.tail=FALSE)
# pvalue.G
# quantile.G <- qchisq(0.05,df.G)
# quantile.G
#-----------------------------------------------------------------------------------------------------
# Model selection
#-----------------------------------------------------------------------------------------------------
# Forward selection: start from the model with only the intercept:
mod.fin <- step(mod.empty, direction="forward",
scope=formula(mod1))
mod.fin
summary(mod.fin)
#-----------------------------------------------------------------------------------------------------
# Probit model
#-----------------------------------------------------------------------------------------------------
mod2 <- glm(admit~gre+gpa+rank,family=binomial(link = "probit"),data=admission[,1:4])
summary(mod2)
#-----------------------------------------------------------------------------------------------------
# Grouped data
#-----------------------------------------------------------------------------------------------------
titanic <- read.csv("titanic.csv",header=TRUE)
modTitanic <- glm(cbind(Survived,Died)~.,data=titanic,family="binomial")
summary(modTitanic)
w
|
path_scaling <- paste0(path_sub, "scaling/")
dir.create(path_scaling, recursive = T)
# convert data and variance into 3d arrays
x <- array(NA,
dim = c(nrow(coord_df), length(date_list), length(var_list)),
dimnames = list(
as.character(1:nrow(coord_df)),
as.character(date_list),
var_list
)
)
for (j in 1:length(date_list)) { # date
for (v in 1:length(var_list)) { # covariate
if (var_list[v] %in% c("pheno", "temp", "prcp")) {
ts_date <- ts %>% filter(date == date_list[j])
if (nrow(ts_date) > 0) {
x[, j, v] <- ts_date[var_list[v]] %>%
unlist() %>%
as.numeric()
} else {
x[, j, v] <- rep(NA, nrow(coord_df))
}
} else if (var_list[v] == "doy") {
x[, j, v] <- rep(sin(as.numeric(format(date_list[j], "%j")) * 2 * pi), nrow(coord_df))
}
}
print(date_list[j])
}
Sigma <- array(NA,
dim = c(nrow(coord_df), length(date_list), length(var_list)),
dimnames = list(
as.character(1:nrow(coord_df)),
as.character(date_list),
var_list
)
)
for (j in 1:length(date_list)) { # date
for (v in 1:length(var_list)) { # covariate
if (var_list[v] %in% c("pheno")) {
ts_date <- ts %>% filter(date == date_list[j])
if (nrow(ts_date) > 0) {
Sigma[, j, v] <- (ts_date[paste0(var_list[v], "_sd")] %>% unlist() %>% as.numeric())^2
} else {
Sigma[, j, v] <- rep(NA, nrow(coord_df))
}
} else {
Sigma[, j, v] <- rep(0, nrow(coord_df))
}
}
print(date_list[j])
}
# scale data to be roughly between 0 and 1
df_upper_lower <- vector(mode = "list")
for (j in 1:length(var_list)) {
if (var_list[j] %in% c("pheno")) { # scale by each site
df_upper_lower[[j]] <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
group_by(site) %>%
dplyr::summarize(
lower = quantile(value, 0.025),
upper = quantile(value, 0.975)
) %>%
mutate(range = upper - lower)
} else { # scale for all sites
all_upper_lower <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
dplyr::summarize(
lower = quantile(value, 0),
upper = quantile(value, 1)
) %>%
mutate(range = upper - lower)
df_upper_lower[[j]] <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
distinct(site) %>%
mutate(
lower = all_upper_lower$lower,
upper = all_upper_lower$upper,
range = all_upper_lower$range
)
}
lower <- matrix(df_upper_lower[[j]]$lower) %*% matrix(1, nrow = 1, ncol = ncol(x[, , j, drop = F]))
range <- matrix(df_upper_lower[[j]]$range) %*% matrix(1, nrow = 1, ncol = ncol(x[, , j, drop = F]))
x[, , j] <- (x[, , j] - lower) / range - 0.5
}
for (j in 1:length(var_list)) {
Sigma[, , j] <- Sigma[, , j, drop = F] / (df_upper_lower[[j]]$range)^2
}
for (j in 1:length(var_list)) {
write_csv(df_upper_lower[[j]], paste0(path_scaling, j, ".csv"))
print(j)
}
# linear interpolation
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
min_id <- min(which(!is.na(x[i, , j])))
max_id <- max(which(!is.na(x[i, , j])))
x[i, min_id:max_id, j] <- zoo::na.approx(object = x[i, min_id:max_id, j], x = as.Date(names(x[i, min_id:max_id, j])), maxgap = 14)
}
}
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
min_id <- min(which(!is.na(Sigma[i, , j])))
max_id <- max(which(!is.na(Sigma[i, , j])))
Sigma[i, min_id:max_id, j] <- zoo::na.approx(object = Sigma[i, min_id:max_id, j], x = as.Date(names(Sigma[i, min_id:max_id, j])), maxgap = 14)
}
}
x_raw <- x
Simga_raw <- Sigma
# whittaker smoothing
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
max_id <- 0
done <- F
while (!done) {
min_id <- min(which(!is.na(x[i, (max_id + 1):length(date_list), j]))) + (max_id)
if (min_id == Inf) {
done <- T
} else {
max_id <- min(which(is.na(x[i, min_id:length(date_list), j]))) - 1 + (min_id - 1)
if (max_id == Inf) {
max_id <- length(date_list)
done <- T
}
x[i, min_id:max_id, j] <- ptw::whit1(x[i, min_id:max_id, j], 5)
}
}
}
}
|
/simulations/code/steps/21 preprocess data.R
|
permissive
|
zhulabgroup/song-2023-landsc-ecol
|
R
| false | false | 4,432 |
r
|
path_scaling <- paste0(path_sub, "scaling/")
dir.create(path_scaling, recursive = T)
# convert data and variance into 3d arrays
x <- array(NA,
dim = c(nrow(coord_df), length(date_list), length(var_list)),
dimnames = list(
as.character(1:nrow(coord_df)),
as.character(date_list),
var_list
)
)
for (j in 1:length(date_list)) { # date
for (v in 1:length(var_list)) { # covariate
if (var_list[v] %in% c("pheno", "temp", "prcp")) {
ts_date <- ts %>% filter(date == date_list[j])
if (nrow(ts_date) > 0) {
x[, j, v] <- ts_date[var_list[v]] %>%
unlist() %>%
as.numeric()
} else {
x[, j, v] <- rep(NA, nrow(coord_df))
}
} else if (var_list[v] == "doy") {
x[, j, v] <- rep(sin(as.numeric(format(date_list[j], "%j")) * 2 * pi), nrow(coord_df))
}
}
print(date_list[j])
}
Sigma <- array(NA,
dim = c(nrow(coord_df), length(date_list), length(var_list)),
dimnames = list(
as.character(1:nrow(coord_df)),
as.character(date_list),
var_list
)
)
for (j in 1:length(date_list)) { # date
for (v in 1:length(var_list)) { # covariate
if (var_list[v] %in% c("pheno")) {
ts_date <- ts %>% filter(date == date_list[j])
if (nrow(ts_date) > 0) {
Sigma[, j, v] <- (ts_date[paste0(var_list[v], "_sd")] %>% unlist() %>% as.numeric())^2
} else {
Sigma[, j, v] <- rep(NA, nrow(coord_df))
}
} else {
Sigma[, j, v] <- rep(0, nrow(coord_df))
}
}
print(date_list[j])
}
# scale data to be roughly between 0 and 1
df_upper_lower <- vector(mode = "list")
for (j in 1:length(var_list)) {
if (var_list[j] %in% c("pheno")) { # scale by each site
df_upper_lower[[j]] <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
group_by(site) %>%
dplyr::summarize(
lower = quantile(value, 0.025),
upper = quantile(value, 0.975)
) %>%
mutate(range = upper - lower)
} else { # scale for all sites
all_upper_lower <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
dplyr::summarize(
lower = quantile(value, 0),
upper = quantile(value, 1)
) %>%
mutate(range = upper - lower)
df_upper_lower[[j]] <- data.frame(x[, , j, drop = F]) %>%
mutate(site = row_number()) %>%
gather(key = "date", value = "value", -site) %>%
drop_na() %>%
distinct(site) %>%
mutate(
lower = all_upper_lower$lower,
upper = all_upper_lower$upper,
range = all_upper_lower$range
)
}
lower <- matrix(df_upper_lower[[j]]$lower) %*% matrix(1, nrow = 1, ncol = ncol(x[, , j, drop = F]))
range <- matrix(df_upper_lower[[j]]$range) %*% matrix(1, nrow = 1, ncol = ncol(x[, , j, drop = F]))
x[, , j] <- (x[, , j] - lower) / range - 0.5
}
for (j in 1:length(var_list)) {
Sigma[, , j] <- Sigma[, , j, drop = F] / (df_upper_lower[[j]]$range)^2
}
for (j in 1:length(var_list)) {
write_csv(df_upper_lower[[j]], paste0(path_scaling, j, ".csv"))
print(j)
}
# linear interpolation
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
min_id <- min(which(!is.na(x[i, , j])))
max_id <- max(which(!is.na(x[i, , j])))
x[i, min_id:max_id, j] <- zoo::na.approx(object = x[i, min_id:max_id, j], x = as.Date(names(x[i, min_id:max_id, j])), maxgap = 14)
}
}
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
min_id <- min(which(!is.na(Sigma[i, , j])))
max_id <- max(which(!is.na(Sigma[i, , j])))
Sigma[i, min_id:max_id, j] <- zoo::na.approx(object = Sigma[i, min_id:max_id, j], x = as.Date(names(Sigma[i, min_id:max_id, j])), maxgap = 14)
}
}
x_raw <- x
Simga_raw <- Sigma
# whittaker smoothing
for (j in 1:length(var_list)) {
for (i in 1:nrow(coord_df)) {
max_id <- 0
done <- F
while (!done) {
min_id <- min(which(!is.na(x[i, (max_id + 1):length(date_list), j]))) + (max_id)
if (min_id == Inf) {
done <- T
} else {
max_id <- min(which(is.na(x[i, min_id:length(date_list), j]))) - 1 + (min_id - 1)
if (max_id == Inf) {
max_id <- length(date_list)
done <- T
}
x[i, min_id:max_id, j] <- ptw::whit1(x[i, min_id:max_id, j], 5)
}
}
}
}
|
\name{mr_union}
\alias{mr_union}
\alias{mr_intersect}
\alias{mr_diff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Set operations on multiple-response objects
}
\description{
These functions take union, intersection, and difference of two multiple-response objects. An observation has a level in the union if it has that level in either input. It has the level in the intersection if it has the level in both inputs. It has the level in the difference if it has the level in \code{x} and not in \code{y}
}
\usage{
mr_union(x, y)
mr_intersect(x, y)
mr_diff(x, y)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x,y}{Objects of class \code{mr}}
}
\value{
Object of class \code{mr}
}
\examples{
data(usethnicity)
race<-as.mr(strsplit(as.character(usethnicity$Q5),""))
race<-mr_drop(race,c(" ","F","G","H"))
race <- mr_recode(race, AmIndian="A",Asian="B", Black="C", Pacific="D", White="E")
mtable(race)
hispanic<-as.mr(usethnicity$Q4==1, "Hispanic")
ethnicity<-mr_union(race, hispanic)
mtable(ethnicity)
ethnicity[101:120]
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}% use one of RShowDoc("KEYWORDS")
|
/man/mr_union.Rd
|
no_license
|
mabafaba/rimu
|
R
| false | false | 1,231 |
rd
|
\name{mr_union}
\alias{mr_union}
\alias{mr_intersect}
\alias{mr_diff}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Set operations on multiple-response objects
}
\description{
These functions take union, intersection, and difference of two multiple-response objects. An observation has a level in the union if it has that level in either input. It has the level in the intersection if it has the level in both inputs. It has the level in the difference if it has the level in \code{x} and not in \code{y}
}
\usage{
mr_union(x, y)
mr_intersect(x, y)
mr_diff(x, y)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x,y}{Objects of class \code{mr}}
}
\value{
Object of class \code{mr}
}
\examples{
data(usethnicity)
race<-as.mr(strsplit(as.character(usethnicity$Q5),""))
race<-mr_drop(race,c(" ","F","G","H"))
race <- mr_recode(race, AmIndian="A",Asian="B", Black="C", Pacific="D", White="E")
mtable(race)
hispanic<-as.mr(usethnicity$Q4==1, "Hispanic")
ethnicity<-mr_union(race, hispanic)
mtable(ethnicity)
ethnicity[101:120]
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}% use one of RShowDoc("KEYWORDS")
|
# Getting dataset
all_data <- read.table("~/Data/household_power_consumption.txt", header = T, sep=";",
quote="\"", na.strings="?", stringsAsFactors=FALSE)
all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y")
# Subsetting the data
data <- subset(all_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/Course Project 1/plot3.R
|
no_license
|
sturekm/Exploratory-Data-Analysis
|
R
| false | false | 877 |
r
|
# Getting dataset
all_data <- read.table("~/Data/household_power_consumption.txt", header = T, sep=";",
quote="\"", na.strings="?", stringsAsFactors=FALSE)
all_data$Date <- as.Date(all_data$Date, format="%d/%m/%Y")
# Subsetting the data
data <- subset(all_data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
# Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
# Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
# web crwaling
library(httr)
library(rvest)
library(readr)
library(dplyr)
otp_url = "http://marketdata.krx.co.kr/contents/COM/GenerateOTP.jspx?name=fileDown&filetype=csv&url=MKD/13/1302/13020401/mkd13020401&market_gubun=ALL&gubun=1&isu_cdnm=A005930%2F%EC%82%BC%EC%84%B1%EC%A0%84%EC%9E%90&isu_cd=KR7005930003&isu_nm=%EC%82%BC%EC%84%B1%EC%A0%84%EC%9E%90&isu_srt_cd=A005930&schdate=20200410&fromdate=20200403&todate=20200410&pagePath=%2Fcontents%2FMKD%2F13%2F1302%2F13020401%2FMKD13020401.jsp"
payload = list(
name = 'fileDown',
filetype = 'csv',
url = "MKD/13/1302/13020401/mkd13020401",
market_gubun = "ALL",
gubun = '1',
schdate = "20200412",
pagePath = "/contents/MKD/13/1302/13020401/MKD13020401.jsp")
otp = POST(url = otp_url, query = payload) %>%
read_html() %>%
html_text()
url = "http://file.krx.co.kr/download.jspx"
data = POST(url = url,
query = list(code = otp),
add_headers(referer = otp_url)) %>%
read_html() %>%
html_text() %>%
read_csv()
csv = data %>%
select(종목코드, 종목명)
write.csv(csv, 'code.csv', row.names = F)
|
/code_webcrwaling.R
|
no_license
|
ParkChanhyeock/Stock
|
R
| false | false | 1,126 |
r
|
# web crwaling
library(httr)
library(rvest)
library(readr)
library(dplyr)
otp_url = "http://marketdata.krx.co.kr/contents/COM/GenerateOTP.jspx?name=fileDown&filetype=csv&url=MKD/13/1302/13020401/mkd13020401&market_gubun=ALL&gubun=1&isu_cdnm=A005930%2F%EC%82%BC%EC%84%B1%EC%A0%84%EC%9E%90&isu_cd=KR7005930003&isu_nm=%EC%82%BC%EC%84%B1%EC%A0%84%EC%9E%90&isu_srt_cd=A005930&schdate=20200410&fromdate=20200403&todate=20200410&pagePath=%2Fcontents%2FMKD%2F13%2F1302%2F13020401%2FMKD13020401.jsp"
payload = list(
name = 'fileDown',
filetype = 'csv',
url = "MKD/13/1302/13020401/mkd13020401",
market_gubun = "ALL",
gubun = '1',
schdate = "20200412",
pagePath = "/contents/MKD/13/1302/13020401/MKD13020401.jsp")
otp = POST(url = otp_url, query = payload) %>%
read_html() %>%
html_text()
url = "http://file.krx.co.kr/download.jspx"
data = POST(url = url,
query = list(code = otp),
add_headers(referer = otp_url)) %>%
read_html() %>%
html_text() %>%
read_csv()
csv = data %>%
select(종목코드, 종목명)
write.csv(csv, 'code.csv', row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apple_mobility_data.R
\name{apple_mobility_data}
\alias{apple_mobility_data}
\title{Access Apple mobility data}
\usage{
apple_mobility_data(agree_to_terms = TRUE, max_tries = 3, message_url = FALSE)
}
\arguments{
\item{agree_to_terms}{logical, when TRUE, implies that the user
has agreed to Apple's terms of use. See references and note.}
\item{max_tries}{integer, the number of tries to attempt downloading}
\item{message_url}{logical, output a message with the URL for the day
since Apple changes it daily.}
}
\description{
From Apple's website:
"Learn about COVID‑19 mobility trends in countries/regions and cities. Reports are published daily and reflect requests for directions in Apple Maps. Privacy is one of our core values, so Maps doesn’t associate your data with your Apple ID, and Apple doesn’t keep a history of where you’ve been."
}
\details{
The CSV file and charts on this site show a relative volume of
directions requests per country/region or city compared to a
baseline volume on January 13th, 2020.
We define our day as midnight-to-midnight, Pacific time. Cities
represent usage in greater metropolitan areas and are stably
defined during this period. In many countries/regions and cities,
relative volume has increased since January 13th, consistent with
normal, seasonal usage of Apple Maps. Day of week effects are
important to normalize as you use this data.
Data that is sent from users’ devices to the Maps service is
associated with random, rotating identifiers so Apple doesn’t have
a profile of your movements and searches. Apple Maps has no
demographic information about our users, so we can’t make any
statements about the representativeness of our usage against the
overall population.
These data are available from a URL that changes daily. The parent
page is the place to check to see what is going on if there are problems.
}
\note{
Apple requires that all users agree to their terms of use.
See \url{https://www.apple.com/covid19/mobility}.
}
\examples{
res = apple_mobility_data()
colnames(res)
head(res)
table(res$transportation_type)
require(ggplot2)
pl = res \%>\%
dplyr::filter(region \%in\% c('Russia','New York City','Italy')) \%>\%
ggplot(aes(x=date)) +
geom_line(aes(y=mobility_index,color=transportation_type)) +
scale_x_date(date_breaks = '1 week', date_labels='\%b-\%d') +
facet_grid(rows=vars(region)) +
ggtitle('Changes in Apple Mobility Index over time')
pl
regs_of_interest = c('Seattle', 'New York City',
'Chicago', 'Italy',
'Russia', 'UK',
'Brazil')
res \%>\%
dplyr::filter(region \%in\% regs_of_interest) \%>\%
ggplot(aes(x=date, y=region, fill=mobility_index)) +
geom_tile() +
facet_grid(rows=vars(transportation_type)) +
ggtitle('Changes in Apple Mobility Index over time')
if(require(viridis)) {
res \%>\%
dplyr::filter(region \%in\% regs_of_interest) \%>\%
ggplot(aes(x=date, y=region, fill=mobility_index)) +
geom_tile() +
facet_grid(rows=vars(transportation_type)) +
scale_fill_viridis() +
ggtitle('Changes in Apple Mobility Index over time')
}
if(require(plotly)) {
ggplotly(pl)
}
}
\references{
\itemize{
\item \url{https://www.apple.com/covid19/mobility}
}
}
\seealso{
Other data-import:
\code{\link{acaps_government_measures_data}()},
\code{\link{beoutbreakprepared_data}()},
\code{\link{cdc_aggregated_projections}()},
\code{\link{cdc_excess_deaths}()},
\code{\link{cdc_social_vulnerability_index}()},
\code{\link{coronadatascraper_data}()},
\code{\link{coronanet_government_response_data}()},
\code{\link{cov_glue_lineage_data}()},
\code{\link{cov_glue_newick_data}()},
\code{\link{cov_glue_snp_lineage}()},
\code{\link{covidtracker_data}()},
\code{\link{descartes_mobility_data}()},
\code{\link{ecdc_data}()},
\code{\link{economist_excess_deaths}()},
\code{\link{eu_data_cache_data}()},
\code{\link{financial_times_excess_deaths}()},
\code{\link{google_mobility_data}()},
\code{\link{government_policy_timeline}()},
\code{\link{healthdata_mobility_data}()},
\code{\link{healthdata_projections_data}()},
\code{\link{healthdata_testing_data}()},
\code{\link{jhu_data}()},
\code{\link{jhu_us_data}()},
\code{\link{kff_icu_beds}()},
\code{\link{nytimes_county_data}()},
\code{\link{oecd_unemployment_data}()},
\code{\link{owid_data}()},
\code{\link{param_estimates_published}()},
\code{\link{test_and_trace_data}()},
\code{\link{us_county_geo_details}()},
\code{\link{us_county_health_rankings}()},
\code{\link{us_healthcare_capacity}()},
\code{\link{us_hospital_details}()},
\code{\link{us_state_distancing_policy}()},
\code{\link{usa_facts_data}()},
\code{\link{who_cases}()}
Other mobility:
\code{\link{descartes_mobility_data}()},
\code{\link{google_mobility_data}()},
\code{\link{healthdata_mobility_data}()}
}
\author{
Sean Davis \href{mailto:seandavi@gmail.com}{seandavi@gmail.com}
}
\concept{data-import}
\concept{mobility}
|
/man/apple_mobility_data.Rd
|
permissive
|
kartechbabu/sars2pack
|
R
| false | true | 5,084 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apple_mobility_data.R
\name{apple_mobility_data}
\alias{apple_mobility_data}
\title{Access Apple mobility data}
\usage{
apple_mobility_data(agree_to_terms = TRUE, max_tries = 3, message_url = FALSE)
}
\arguments{
\item{agree_to_terms}{logical, when TRUE, implies that the user
has agreed to Apple's terms of use. See references and note.}
\item{max_tries}{integer, the number of tries to attempt downloading}
\item{message_url}{logical, output a message with the URL for the day
since Apple changes it daily.}
}
\description{
From Apple's website:
"Learn about COVID‑19 mobility trends in countries/regions and cities. Reports are published daily and reflect requests for directions in Apple Maps. Privacy is one of our core values, so Maps doesn’t associate your data with your Apple ID, and Apple doesn’t keep a history of where you’ve been."
}
\details{
The CSV file and charts on this site show a relative volume of
directions requests per country/region or city compared to a
baseline volume on January 13th, 2020.
We define our day as midnight-to-midnight, Pacific time. Cities
represent usage in greater metropolitan areas and are stably
defined during this period. In many countries/regions and cities,
relative volume has increased since January 13th, consistent with
normal, seasonal usage of Apple Maps. Day of week effects are
important to normalize as you use this data.
Data that is sent from users’ devices to the Maps service is
associated with random, rotating identifiers so Apple doesn’t have
a profile of your movements and searches. Apple Maps has no
demographic information about our users, so we can’t make any
statements about the representativeness of our usage against the
overall population.
These data are available from a URL that changes daily. The parent
page is the place to check to see what is going on if there are problems.
}
\note{
Apple requires that all users agree to their terms of use.
See \url{https://www.apple.com/covid19/mobility}.
}
\examples{
res = apple_mobility_data()
colnames(res)
head(res)
table(res$transportation_type)
require(ggplot2)
pl = res \%>\%
dplyr::filter(region \%in\% c('Russia','New York City','Italy')) \%>\%
ggplot(aes(x=date)) +
geom_line(aes(y=mobility_index,color=transportation_type)) +
scale_x_date(date_breaks = '1 week', date_labels='\%b-\%d') +
facet_grid(rows=vars(region)) +
ggtitle('Changes in Apple Mobility Index over time')
pl
regs_of_interest = c('Seattle', 'New York City',
'Chicago', 'Italy',
'Russia', 'UK',
'Brazil')
res \%>\%
dplyr::filter(region \%in\% regs_of_interest) \%>\%
ggplot(aes(x=date, y=region, fill=mobility_index)) +
geom_tile() +
facet_grid(rows=vars(transportation_type)) +
ggtitle('Changes in Apple Mobility Index over time')
if(require(viridis)) {
res \%>\%
dplyr::filter(region \%in\% regs_of_interest) \%>\%
ggplot(aes(x=date, y=region, fill=mobility_index)) +
geom_tile() +
facet_grid(rows=vars(transportation_type)) +
scale_fill_viridis() +
ggtitle('Changes in Apple Mobility Index over time')
}
if(require(plotly)) {
ggplotly(pl)
}
}
\references{
\itemize{
\item \url{https://www.apple.com/covid19/mobility}
}
}
\seealso{
Other data-import:
\code{\link{acaps_government_measures_data}()},
\code{\link{beoutbreakprepared_data}()},
\code{\link{cdc_aggregated_projections}()},
\code{\link{cdc_excess_deaths}()},
\code{\link{cdc_social_vulnerability_index}()},
\code{\link{coronadatascraper_data}()},
\code{\link{coronanet_government_response_data}()},
\code{\link{cov_glue_lineage_data}()},
\code{\link{cov_glue_newick_data}()},
\code{\link{cov_glue_snp_lineage}()},
\code{\link{covidtracker_data}()},
\code{\link{descartes_mobility_data}()},
\code{\link{ecdc_data}()},
\code{\link{economist_excess_deaths}()},
\code{\link{eu_data_cache_data}()},
\code{\link{financial_times_excess_deaths}()},
\code{\link{google_mobility_data}()},
\code{\link{government_policy_timeline}()},
\code{\link{healthdata_mobility_data}()},
\code{\link{healthdata_projections_data}()},
\code{\link{healthdata_testing_data}()},
\code{\link{jhu_data}()},
\code{\link{jhu_us_data}()},
\code{\link{kff_icu_beds}()},
\code{\link{nytimes_county_data}()},
\code{\link{oecd_unemployment_data}()},
\code{\link{owid_data}()},
\code{\link{param_estimates_published}()},
\code{\link{test_and_trace_data}()},
\code{\link{us_county_geo_details}()},
\code{\link{us_county_health_rankings}()},
\code{\link{us_healthcare_capacity}()},
\code{\link{us_hospital_details}()},
\code{\link{us_state_distancing_policy}()},
\code{\link{usa_facts_data}()},
\code{\link{who_cases}()}
Other mobility:
\code{\link{descartes_mobility_data}()},
\code{\link{google_mobility_data}()},
\code{\link{healthdata_mobility_data}()}
}
\author{
Sean Davis \href{mailto:seandavi@gmail.com}{seandavi@gmail.com}
}
\concept{data-import}
\concept{mobility}
|
/R-Portable/tests/utf8-regex.R
|
permissive
|
ksasso/Electron_ShinyApp_Deployment
|
R
| false | false | 8,104 |
r
| ||
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
test.rdocapply.golden <- function(H2Oserver) {
irisPath = system.file("extdata", "iris.csv", package="h2oRClient")
iris.hex = h2o.importFile(H2Oserver, path = irisPath, key = "iris.hex")
summary(apply(iris.hex, 1, sum))
testEnd()
}
doTest("R Doc Apply", test.rdocapply.golden)
|
/R/tests/testdir_docexamples/runit_Rdoc_apply.R
|
permissive
|
svaithianatha/h2o
|
R
| false | false | 387 |
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../findNSourceUtils.R')
test.rdocapply.golden <- function(H2Oserver) {
irisPath = system.file("extdata", "iris.csv", package="h2oRClient")
iris.hex = h2o.importFile(H2Oserver, path = irisPath, key = "iris.hex")
summary(apply(iris.hex, 1, sum))
testEnd()
}
doTest("R Doc Apply", test.rdocapply.golden)
|
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <<- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
cachemean <- function(x, ...) {
inv <- x$getmean()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
/makeCacheMatrix.R
|
no_license
|
priti-27/week-3
|
R
| false | false | 541 |
r
|
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <<- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
cachemean <- function(x, ...) {
inv <- x$getmean()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
# read in data
myurl = "https://liangfgithub.github.io/MovieData/"
movies = readLines(paste0(myurl, 'movies.dat?raw=true'))
movies = strsplit(movies, split = "::", fixed = TRUE, useBytes = TRUE)
movies = matrix(unlist(movies), ncol = 3, byrow = TRUE)
movies = data.frame(movies, stringsAsFactors = FALSE)
colnames(movies) = c('MovieID', 'Title', 'Genres')
movies$MovieID = as.integer(movies$MovieID)
movies$Title = iconv(movies$Title, "latin1", "UTF-8")
small_image_url = "https://liangfgithub.github.io/MovieImages/"
movies$image_url = sapply(movies$MovieID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
image = sapply(movies$MovieID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
myurl = "https://liangfgithub.github.io/MovieData/"
ratings = read.csv(paste0(myurl, 'ratings.dat?raw=true'),
sep = ':',
colClasses = c('integer', 'NULL'),
header = FALSE)
colnames(ratings) = c('UserID', 'MovieID', 'Rating', 'Timestamp')
# filter ratings_per_movie > 500 and ratings_per_user >100 to get ratings_new
popMovie = ratings %>%
group_by(MovieID) %>%
summarize(ratings_per_movie = n(), ave_ratings = mean(Rating)) %>%
inner_join(movies, by = 'MovieID') %>%
filter(ratings_per_movie > 500)
popID = popMovie %>% select(MovieID)
popImage = sapply(popID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
|
/MovieRecommender/functions/sample.R
|
no_license
|
chien314/Movie_Recommender_in_R
|
R
| false | false | 1,468 |
r
|
# read in data
myurl = "https://liangfgithub.github.io/MovieData/"
movies = readLines(paste0(myurl, 'movies.dat?raw=true'))
movies = strsplit(movies, split = "::", fixed = TRUE, useBytes = TRUE)
movies = matrix(unlist(movies), ncol = 3, byrow = TRUE)
movies = data.frame(movies, stringsAsFactors = FALSE)
colnames(movies) = c('MovieID', 'Title', 'Genres')
movies$MovieID = as.integer(movies$MovieID)
movies$Title = iconv(movies$Title, "latin1", "UTF-8")
small_image_url = "https://liangfgithub.github.io/MovieImages/"
movies$image_url = sapply(movies$MovieID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
image = sapply(movies$MovieID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
myurl = "https://liangfgithub.github.io/MovieData/"
ratings = read.csv(paste0(myurl, 'ratings.dat?raw=true'),
sep = ':',
colClasses = c('integer', 'NULL'),
header = FALSE)
colnames(ratings) = c('UserID', 'MovieID', 'Rating', 'Timestamp')
# filter ratings_per_movie > 500 and ratings_per_user >100 to get ratings_new
popMovie = ratings %>%
group_by(MovieID) %>%
summarize(ratings_per_movie = n(), ave_ratings = mean(Rating)) %>%
inner_join(movies, by = 'MovieID') %>%
filter(ratings_per_movie > 500)
popID = popMovie %>% select(MovieID)
popImage = sapply(popID,
function(x) paste0(small_image_url, x, '.jpg?raw=true'))
|
filter_df_by_apt <- function(.df, .apt){
df <- .df %>% filter(APT_ICAO == .apt) %>%
filter(YEAR >= min_year) # ensure only 5 years of data
}
#
pick_apt_name <- function(.df, .apt){
name <- .df %>% filter(APT_ICAO == .apt)
name <- name$APT_NAME[1]
}
#
pick_state_name <- function(.df, .apt){
state <- .df %>% filter(APT_ICAO == .apt)
state <- state$STATE_NAME[1]
}
#
pick_apt_iata <- function(.df, .apt){
iata <- .df %>% filter(APT_ICAO == .apt)
iata <- iata$APT_IATA[1]
}
#
landing_page_indicators <- function(.df=db_df, .atfm=atfm_df, .apt){
inds <- .df %>% filter(APT_ICAO == .apt)
ind_tfc_2019 <- inds %>%
select(APT_ICAO, YEAR, NB_NM_TOT) %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise(NB_NM_TOT = sum(NB_NM_TOT, na.rm = TRUE)) %>% ungroup()
ind_txot_2019 <- inds %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise( ADD_TAXI_OUT_TIME_MIN = sum(ADD_TAXI_OUT_TIME_MIN, na.rm = TRUE)
,NB_TAXI_OUT_FL = sum(NB_TAXI_OUT_FL, na.rm = TRUE)
)%>% ungroup() %>%
mutate(AVG_ADD_TXOT = round(ADD_TAXI_OUT_TIME_MIN / NB_TAXI_OUT_FL,2) ) %>%
select(AVG_ADD_TXOT)
ind_asma_2019 <- inds %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise( ADD_ASMA_TIME_MIN = sum(ADD_ASMA_TIME_MIN, na.rm = TRUE)
,NB_ASMA_FL = sum(NB_ASMA_FL, na.rm = TRUE)
)%>% ungroup() %>%
mutate(AVG_ADD_ASMA = round(ADD_ASMA_TIME_MIN / NB_ASMA_FL,2) ) %>%
select(AVG_ADD_ASMA)
ind_atfm_2019 <- .atfm %>% filter(APT_ICAO == .apt, YEAR == 2019) %>%
select(FLT_ARR_1, DLY_APT_ARR_1) %>%
summarise( FLT_ARR_1 = sum(FLT_ARR_1, na.rm = TRUE)
,DLY_APT_ARR_1 = sum(DLY_APT_ARR_1, na.rm = TRUE)) %>%
mutate(AVG_ARR_ATFM = round(DLY_APT_ARR_1 / FLT_ARR_1,2) ) %>%
select(AVG_ARR_ATFM)
out <- ind_tfc_2019 %>%
bind_cols(ind_txot_2019, ind_asma_2019, ind_atfm_2019)
}
#
latest_month_indicators <- function(.df=db_df, .atfm=atfm_df, .apt){
inds <- .df %>% filter(APT_ICAO == .apt)
mth_name <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind_tfc_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, NB_NM_TOT) %>% na.omit() %>%
filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate( MONTH = mth_name[MONTH_NUM]
, TFC = paste0(NB_NM_TOT," (", MONTH, " ", YEAR,")")
) %>%
select(APT_ICAO, TFC)
ind_txot_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, ADD_TAXI_OUT_TIME_MIN, NB_TAXI_OUT_FL) %>%
na.omit() %>% filter(YEAR == max(YEAR) ) %>%
filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate(AVG_ADD_TXOT = round(ADD_TAXI_OUT_TIME_MIN / NB_TAXI_OUT_FL,2)
,AVG_ADD_TXOT= paste0(AVG_ADD_TXOT," (", mth_name[MONTH_NUM], " ", YEAR,")")
) %>%
select(AVG_ADD_TXOT)
ind_asma_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, ADD_ASMA_TIME_MIN, NB_ASMA_FL) %>%
na.omit() %>% filter(YEAR == max(YEAR) ) %>%
filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate( AVG_ADD_ASMA = round(ADD_ASMA_TIME_MIN / NB_ASMA_FL, 2)
,AVG_ADD_ASMA = paste0(AVG_ADD_ASMA," (", mth_name[MONTH_NUM], " ", YEAR,")")
) %>%
select(AVG_ADD_ASMA)
ind_atfm_lm <- .atfm %>% filter(APT_ICAO == .apt) %>%
select(YEAR, MONTH_NUM, FLT_ARR_1, DLY_APT_ARR_1) %>%
filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
na.omit() %>%
group_by(YEAR, MONTH_NUM) %>%
summarise( FLT_ARR_1 = sum(FLT_ARR_1, na.rm = TRUE)
,DLY_APT_ARR_1 = sum(DLY_APT_ARR_1, na.rm = TRUE)) %>%
mutate( AVG_ARR_ATFM = round(DLY_APT_ARR_1 / FLT_ARR_1, 2)
,AVG_ARR_ATFM = paste0(AVG_ARR_ATFM," (", mth_name[MONTH_NUM], " ", YEAR,")")) %>%
select(AVG_ARR_ATFM)
inds_lm <- ind_tfc_lm %>%
bind_cols(ind_txot_lm, ind_asma_lm, ind_atfm_lm)
}
#
trim_covid <- function(.df, .apt){
df <- .df %>% filter(APT_ICAO == .apt) %>%
select(DAY, FLTS_2020, FLTS_2019, MOV_AVG_WK)
}
#
pack_thru <- function(.df, .apt){
df <- .df %>% dplyr::filter(APT_ICAO == .apt) %>%
dplyr::mutate(
# DATE = lubridate::dmy(DAY, tz="UTC")
DATE = DAY
,YEAR = year(DATE), MONTH_NUM = month(DATE)
, WEEKDAY = lubridate::wday(DATE, label=TRUE)) %>%
dplyr::filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
dplyr::select(APT_ICAO, YEAR, MONTH_NUM, DATE, WEEKDAY, TIME, ROLLING_HOUR_MVT, PHASE) %>%
dplyr::group_by(YEAR, MONTH_NUM, TIME, PHASE) %>% summarise(ROLLING_HOUR_MVT = mean(ROLLING_HOUR_MVT)) %>%
dplyr::ungroup()
}
prepare_params <- function(apt_icao) {
list( #------ start params -------------------------
icao = apt_icao
,iata = pick_apt_iata( db_df , .apt = apt_icao) # merge iata code with other source
,name = pick_apt_name( tfc_df, .apt = apt_icao)
,state = pick_state_name( tfc_df, .apt = apt_icao)
,config= filter_df_by_apt(config_df,.apt = apt_icao)
,ldgsum= landing_page_indicators(db_df, atfm_df, .apt = apt_icao)
,latest= latest_month_indicators(db_df, atfm_df, .apt = apt_icao)
,covid = trim_covid( covid_df, .apt = apt_icao)
,tfc = filter_df_by_apt(tfc_df, .apt = apt_icao)
,thru = pack_thru( thru_df, .apt = apt_icao)
,atfm = filter_df_by_apt(atfm_df, .apt = apt_icao)
,slot = filter_df_by_apt(slot_df, .apt = apt_icao)
,asma = filter_df_by_apt(asma_df, .apt = apt_icao)
,txot = filter_df_by_apt(txot_df, .apt = apt_icao)
,txit = filter_df_by_apt(txit_df, .apt = apt_icao)
,pddly = filter_df_by_apt(pddly_df, .apt = apt_icao)
,turn = filter_df_by_apt(turn_df, .apt = apt_icao)
# ,punc = filter_df_by_apt(punc_df, .apt = apt_icao)
) #----------------- end params ---------------------------
}
|
/R/utils.R
|
no_license
|
rainer-rq-koelle/pru-apt-dashboards
|
R
| false | false | 5,929 |
r
|
filter_df_by_apt <- function(.df, .apt){
df <- .df %>% filter(APT_ICAO == .apt) %>%
filter(YEAR >= min_year) # ensure only 5 years of data
}
#
pick_apt_name <- function(.df, .apt){
name <- .df %>% filter(APT_ICAO == .apt)
name <- name$APT_NAME[1]
}
#
pick_state_name <- function(.df, .apt){
state <- .df %>% filter(APT_ICAO == .apt)
state <- state$STATE_NAME[1]
}
#
pick_apt_iata <- function(.df, .apt){
iata <- .df %>% filter(APT_ICAO == .apt)
iata <- iata$APT_IATA[1]
}
#
landing_page_indicators <- function(.df=db_df, .atfm=atfm_df, .apt){
inds <- .df %>% filter(APT_ICAO == .apt)
ind_tfc_2019 <- inds %>%
select(APT_ICAO, YEAR, NB_NM_TOT) %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise(NB_NM_TOT = sum(NB_NM_TOT, na.rm = TRUE)) %>% ungroup()
ind_txot_2019 <- inds %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise( ADD_TAXI_OUT_TIME_MIN = sum(ADD_TAXI_OUT_TIME_MIN, na.rm = TRUE)
,NB_TAXI_OUT_FL = sum(NB_TAXI_OUT_FL, na.rm = TRUE)
)%>% ungroup() %>%
mutate(AVG_ADD_TXOT = round(ADD_TAXI_OUT_TIME_MIN / NB_TAXI_OUT_FL,2) ) %>%
select(AVG_ADD_TXOT)
ind_asma_2019 <- inds %>% filter(YEAR == 2019) %>%
group_by(APT_ICAO, YEAR) %>%
summarise( ADD_ASMA_TIME_MIN = sum(ADD_ASMA_TIME_MIN, na.rm = TRUE)
,NB_ASMA_FL = sum(NB_ASMA_FL, na.rm = TRUE)
)%>% ungroup() %>%
mutate(AVG_ADD_ASMA = round(ADD_ASMA_TIME_MIN / NB_ASMA_FL,2) ) %>%
select(AVG_ADD_ASMA)
ind_atfm_2019 <- .atfm %>% filter(APT_ICAO == .apt, YEAR == 2019) %>%
select(FLT_ARR_1, DLY_APT_ARR_1) %>%
summarise( FLT_ARR_1 = sum(FLT_ARR_1, na.rm = TRUE)
,DLY_APT_ARR_1 = sum(DLY_APT_ARR_1, na.rm = TRUE)) %>%
mutate(AVG_ARR_ATFM = round(DLY_APT_ARR_1 / FLT_ARR_1,2) ) %>%
select(AVG_ARR_ATFM)
out <- ind_tfc_2019 %>%
bind_cols(ind_txot_2019, ind_asma_2019, ind_atfm_2019)
}
#
latest_month_indicators <- function(.df=db_df, .atfm=atfm_df, .apt){
inds <- .df %>% filter(APT_ICAO == .apt)
mth_name <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
ind_tfc_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, NB_NM_TOT) %>% na.omit() %>%
filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate( MONTH = mth_name[MONTH_NUM]
, TFC = paste0(NB_NM_TOT," (", MONTH, " ", YEAR,")")
) %>%
select(APT_ICAO, TFC)
ind_txot_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, ADD_TAXI_OUT_TIME_MIN, NB_TAXI_OUT_FL) %>%
na.omit() %>% filter(YEAR == max(YEAR) ) %>%
filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate(AVG_ADD_TXOT = round(ADD_TAXI_OUT_TIME_MIN / NB_TAXI_OUT_FL,2)
,AVG_ADD_TXOT= paste0(AVG_ADD_TXOT," (", mth_name[MONTH_NUM], " ", YEAR,")")
) %>%
select(AVG_ADD_TXOT)
ind_asma_lm <- inds %>%
select(APT_ICAO, YEAR, MONTH_NUM, ADD_ASMA_TIME_MIN, NB_ASMA_FL) %>%
na.omit() %>% filter(YEAR == max(YEAR) ) %>%
filter(MONTH_NUM == max(MONTH_NUM)) %>%
mutate( AVG_ADD_ASMA = round(ADD_ASMA_TIME_MIN / NB_ASMA_FL, 2)
,AVG_ADD_ASMA = paste0(AVG_ADD_ASMA," (", mth_name[MONTH_NUM], " ", YEAR,")")
) %>%
select(AVG_ADD_ASMA)
ind_atfm_lm <- .atfm %>% filter(APT_ICAO == .apt) %>%
select(YEAR, MONTH_NUM, FLT_ARR_1, DLY_APT_ARR_1) %>%
filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
na.omit() %>%
group_by(YEAR, MONTH_NUM) %>%
summarise( FLT_ARR_1 = sum(FLT_ARR_1, na.rm = TRUE)
,DLY_APT_ARR_1 = sum(DLY_APT_ARR_1, na.rm = TRUE)) %>%
mutate( AVG_ARR_ATFM = round(DLY_APT_ARR_1 / FLT_ARR_1, 2)
,AVG_ARR_ATFM = paste0(AVG_ARR_ATFM," (", mth_name[MONTH_NUM], " ", YEAR,")")) %>%
select(AVG_ARR_ATFM)
inds_lm <- ind_tfc_lm %>%
bind_cols(ind_txot_lm, ind_asma_lm, ind_atfm_lm)
}
#
trim_covid <- function(.df, .apt){
df <- .df %>% filter(APT_ICAO == .apt) %>%
select(DAY, FLTS_2020, FLTS_2019, MOV_AVG_WK)
}
#
pack_thru <- function(.df, .apt){
df <- .df %>% dplyr::filter(APT_ICAO == .apt) %>%
dplyr::mutate(
# DATE = lubridate::dmy(DAY, tz="UTC")
DATE = DAY
,YEAR = year(DATE), MONTH_NUM = month(DATE)
, WEEKDAY = lubridate::wday(DATE, label=TRUE)) %>%
dplyr::filter(YEAR == max(YEAR)) %>% filter(MONTH_NUM == max(MONTH_NUM)) %>%
dplyr::select(APT_ICAO, YEAR, MONTH_NUM, DATE, WEEKDAY, TIME, ROLLING_HOUR_MVT, PHASE) %>%
dplyr::group_by(YEAR, MONTH_NUM, TIME, PHASE) %>% summarise(ROLLING_HOUR_MVT = mean(ROLLING_HOUR_MVT)) %>%
dplyr::ungroup()
}
prepare_params <- function(apt_icao) {
list( #------ start params -------------------------
icao = apt_icao
,iata = pick_apt_iata( db_df , .apt = apt_icao) # merge iata code with other source
,name = pick_apt_name( tfc_df, .apt = apt_icao)
,state = pick_state_name( tfc_df, .apt = apt_icao)
,config= filter_df_by_apt(config_df,.apt = apt_icao)
,ldgsum= landing_page_indicators(db_df, atfm_df, .apt = apt_icao)
,latest= latest_month_indicators(db_df, atfm_df, .apt = apt_icao)
,covid = trim_covid( covid_df, .apt = apt_icao)
,tfc = filter_df_by_apt(tfc_df, .apt = apt_icao)
,thru = pack_thru( thru_df, .apt = apt_icao)
,atfm = filter_df_by_apt(atfm_df, .apt = apt_icao)
,slot = filter_df_by_apt(slot_df, .apt = apt_icao)
,asma = filter_df_by_apt(asma_df, .apt = apt_icao)
,txot = filter_df_by_apt(txot_df, .apt = apt_icao)
,txit = filter_df_by_apt(txit_df, .apt = apt_icao)
,pddly = filter_df_by_apt(pddly_df, .apt = apt_icao)
,turn = filter_df_by_apt(turn_df, .apt = apt_icao)
# ,punc = filter_df_by_apt(punc_df, .apt = apt_icao)
) #----------------- end params ---------------------------
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{cola.original}
\alias{cola.original}
\title{Cola Original}
\format{A \code{\link{data.frame}}.}
\usage{
cola.original
}
\description{
A data file from a survey of the Australian cola market in 2007.
}
\keyword{datasets}
|
/man/cola.original.Rd
|
no_license
|
gkalnytskyi/flipExampleData
|
R
| false | true | 336 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{cola.original}
\alias{cola.original}
\title{Cola Original}
\format{A \code{\link{data.frame}}.}
\usage{
cola.original
}
\description{
A data file from a survey of the Australian cola market in 2007.
}
\keyword{datasets}
|
/funciones.R
|
no_license
|
dcsolano10/cancer
|
R
| false | false | 58,574 |
r
| ||
# still need to insert factor name in error message; look for which
.BlankStop = function() {
stop("\n")
return(invisible(NULL))
}
.FactorNotFactor = function(which=NULL) {
stop("The factor is not stored as a factor.\nTry using as.factor() on a copy of the data.frame.")
return(invisible(NULL))
}
.GroupNotFactor = function() {
stop("The group variable is not a factor.\nTry using as.factor() on a copy of the data.frame.")
return(invisible(NULL))
}
.MissingFolder = function() {
stop("Specified folder does not exist.\n")
return(invisible(NULL))
}
.MissingMethod = function() {
stop("Specified method is not yet available.\n")
return(invisible(NULL))
}
.NoBrailleRFolder= function() {
stop("No permanent MyBrailleR folder was found.\n Use `SetupBrailleR()` to fix this problem.")
return(invisible(NULL))
}
.NoResponse = function() {
stop("You must specify either the Response or the ResponseName.")
return(invisible(NULL))
}
.NotADataFrame = function() {
stop("The named dataset is not a data.frame.")
return(invisible(NULL))
}
.NotAProperFileName = function() {
stop('file must be a character string or connection')
return(invisible(NULL))
}
.NotViewable = function() {
stop("The named data is not a data.frame, matrix, or vector so cannot be viewed.")
return(invisible(NULL))
}
.NoYNeeds2X = function() {
stop("If y is not supplied, x must have two numeric columns")
return(invisible(NULL))
}
.PredictorNotNumeric = function() {
stop("The predictor variable is not numeric.")
return(invisible(NULL))
}
.ResponseNotNumeric = function() {
stop("The response variable is not numeric.")
return(invisible(NULL))
}
.ResponseNotAVector = function() {
stop("Input response is not a vector.")
return(invisible(NULL))
}
.XOrYNotNumeric = function(which="y") {
stop("The x or y variable is not numeric.")
return(invisible(NULL))
}
|
/R/Stop.R
|
no_license
|
ajrgodfrey/BrailleR
|
R
| false | false | 1,968 |
r
|
# still need to insert factor name in error message; look for which
.BlankStop = function() {
stop("\n")
return(invisible(NULL))
}
.FactorNotFactor = function(which=NULL) {
stop("The factor is not stored as a factor.\nTry using as.factor() on a copy of the data.frame.")
return(invisible(NULL))
}
.GroupNotFactor = function() {
stop("The group variable is not a factor.\nTry using as.factor() on a copy of the data.frame.")
return(invisible(NULL))
}
.MissingFolder = function() {
stop("Specified folder does not exist.\n")
return(invisible(NULL))
}
.MissingMethod = function() {
stop("Specified method is not yet available.\n")
return(invisible(NULL))
}
.NoBrailleRFolder= function() {
stop("No permanent MyBrailleR folder was found.\n Use `SetupBrailleR()` to fix this problem.")
return(invisible(NULL))
}
.NoResponse = function() {
stop("You must specify either the Response or the ResponseName.")
return(invisible(NULL))
}
.NotADataFrame = function() {
stop("The named dataset is not a data.frame.")
return(invisible(NULL))
}
.NotAProperFileName = function() {
stop('file must be a character string or connection')
return(invisible(NULL))
}
.NotViewable = function() {
stop("The named data is not a data.frame, matrix, or vector so cannot be viewed.")
return(invisible(NULL))
}
.NoYNeeds2X = function() {
stop("If y is not supplied, x must have two numeric columns")
return(invisible(NULL))
}
.PredictorNotNumeric = function() {
stop("The predictor variable is not numeric.")
return(invisible(NULL))
}
.ResponseNotNumeric = function() {
stop("The response variable is not numeric.")
return(invisible(NULL))
}
.ResponseNotAVector = function() {
stop("Input response is not a vector.")
return(invisible(NULL))
}
.XOrYNotNumeric = function(which="y") {
stop("The x or y variable is not numeric.")
return(invisible(NULL))
}
|
power.consumption.all <- read.csv2(
"household_power_consumption.txt",
#"household_power_consumption_subset.txt",
colClasses = c("character", "character", "character", "character", "character", "character", "character", "character", "character")
)
#head(power.consumption.all)
power.consumption <- power.consumption.all[power.consumption.all$Date == '1/2/2007' | power.consumption.all$Date == '2/2/2007', ]
power.consumption$DateTime <- apply(power.consumption, 1, function(row) paste(row[1], row[2], sep = " "))
power.consumption$DateTime <- as.POSIXct(power.consumption$DateTime, format="%d/%m/%Y %H:%M:%S")
power.consumption$Global_active_power <- as.numeric(power.consumption$Global_active_power)
power.consumption$Global_reactive_power <- as.numeric(power.consumption$Global_reactive_power)
power.consumption$Voltage <- as.numeric(power.consumption$Voltage)
power.consumption$Global_intensity <- as.numeric(power.consumption$Global_intensity)
power.consumption$Sub_metering_1 <- as.numeric(power.consumption$Sub_metering_1)
power.consumption$Sub_metering_2 <- as.numeric(power.consumption$Sub_metering_2)
power.consumption$Sub_metering_3 <- as.numeric(power.consumption$Sub_metering_3)
#nrow(power.consumption)
#head(power.consumption)
par(mfrow = c(2, 2))
with(power.consumption, plot(DateTime, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
with(power.consumption, plot(DateTime, Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
with(power.consumption, plot(DateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l"))
lines(power.consumption$DateTime, power.consumption$Sub_metering_2, col = "red")
lines(power.consumption$DateTime, power.consumption$Sub_metering_3, col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = 0.75, bty = "n")
with(power.consumption, plot(DateTime, Global_reactive_power, xlab = "datetime", type = "l"))
dev.copy(png, "plot4.png")
dev.off()
|
/plot4.R
|
no_license
|
swapnildipankar/ExData_Plotting1
|
R
| false | false | 2,145 |
r
|
power.consumption.all <- read.csv2(
"household_power_consumption.txt",
#"household_power_consumption_subset.txt",
colClasses = c("character", "character", "character", "character", "character", "character", "character", "character", "character")
)
#head(power.consumption.all)
power.consumption <- power.consumption.all[power.consumption.all$Date == '1/2/2007' | power.consumption.all$Date == '2/2/2007', ]
power.consumption$DateTime <- apply(power.consumption, 1, function(row) paste(row[1], row[2], sep = " "))
power.consumption$DateTime <- as.POSIXct(power.consumption$DateTime, format="%d/%m/%Y %H:%M:%S")
power.consumption$Global_active_power <- as.numeric(power.consumption$Global_active_power)
power.consumption$Global_reactive_power <- as.numeric(power.consumption$Global_reactive_power)
power.consumption$Voltage <- as.numeric(power.consumption$Voltage)
power.consumption$Global_intensity <- as.numeric(power.consumption$Global_intensity)
power.consumption$Sub_metering_1 <- as.numeric(power.consumption$Sub_metering_1)
power.consumption$Sub_metering_2 <- as.numeric(power.consumption$Sub_metering_2)
power.consumption$Sub_metering_3 <- as.numeric(power.consumption$Sub_metering_3)
#nrow(power.consumption)
#head(power.consumption)
par(mfrow = c(2, 2))
with(power.consumption, plot(DateTime, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
with(power.consumption, plot(DateTime, Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
with(power.consumption, plot(DateTime, Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l"))
lines(power.consumption$DateTime, power.consumption$Sub_metering_2, col = "red")
lines(power.consumption$DateTime, power.consumption$Sub_metering_3, col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = 0.75, bty = "n")
with(power.consumption, plot(DateTime, Global_reactive_power, xlab = "datetime", type = "l"))
dev.copy(png, "plot4.png")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_geo_dist.R
\name{get.geo.dist}
\alias{get.geo.dist}
\title{Compute the geodesic distance between two coordinate locations}
\usage{
get.geo.dist(long1, lat1, long2, lat2, units = "m")
}
\arguments{
\item{long1}{Numerical argument -- the longitude of the first coordinate location}
\item{lat1}{Numerical argument -- the latitude of the first coordinate location}
\item{long2}{Numerical argument -- the longitude of the second coordinate location}
\item{lat2}{Numerical argument -- the latitude of the second coordinate location}
\item{units}{The geodesic distance will be computed in terms of these units -- Defaults to km}
}
\value{
Returns the geodesic distance between two coordinate locations
}
\description{
This "helper" function is used in crop.sample.area()
}
|
/man/get.geo.dist.Rd
|
no_license
|
jBernardADFG/cpuesim
|
R
| false | true | 852 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_geo_dist.R
\name{get.geo.dist}
\alias{get.geo.dist}
\title{Compute the geodesic distance between two coordinate locations}
\usage{
get.geo.dist(long1, lat1, long2, lat2, units = "m")
}
\arguments{
\item{long1}{Numerical argument -- the longitude of the first coordinate location}
\item{lat1}{Numerical argument -- the latitude of the first coordinate location}
\item{long2}{Numerical argument -- the longitude of the second coordinate location}
\item{lat2}{Numerical argument -- the latitude of the second coordinate location}
\item{units}{The geodesic distance will be computed in terms of these units -- Defaults to km}
}
\value{
Returns the geodesic distance between two coordinate locations
}
\description{
This "helper" function is used in crop.sample.area()
}
|
suppressPackageStartupMessages(library(float))
set.seed(1234)
tol = 1e-6
x = crossprod(matrix(stats::rnorm(30), 10))
xs = fl(x)
y = 1:3
ys = fl(y)
z = cbind(y, rev(y))
zs = fl(z)
test = dbl(backsolve(xs, ys, upper.tri=FALSE))
truth = backsolve(x, y, upper.tri=FALSE)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, ys))
truth = backsolve(x, y)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, ys, k=2))
truth = backsolve(x, y, k=2)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, zs))
truth = backsolve(x, z)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, zs, k=2))
truth = backsolve(x, z, k=2)
stopifnot(all.equal(test, truth, tol=tol))
|
/tests/backsolve.r
|
permissive
|
wrathematics/float
|
R
| false | false | 726 |
r
|
suppressPackageStartupMessages(library(float))
set.seed(1234)
tol = 1e-6
x = crossprod(matrix(stats::rnorm(30), 10))
xs = fl(x)
y = 1:3
ys = fl(y)
z = cbind(y, rev(y))
zs = fl(z)
test = dbl(backsolve(xs, ys, upper.tri=FALSE))
truth = backsolve(x, y, upper.tri=FALSE)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, ys))
truth = backsolve(x, y)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, ys, k=2))
truth = backsolve(x, y, k=2)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, zs))
truth = backsolve(x, z)
stopifnot(all.equal(test, truth, tol=tol))
test = dbl(backsolve(xs, zs, k=2))
truth = backsolve(x, z, k=2)
stopifnot(all.equal(test, truth, tol=tol))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colors.R
\name{values2colors}
\alias{values2colors}
\title{values to colors}
\usage{
values2colors(v, n = 100, zlim, col = heat.colors, na.col = "gray50",
...)
}
\arguments{
\item{v}{the values}
\item{n}{number of colors}
\item{zlim}{limits}
\item{color}{function, e.g. heat.colors, gray.colors}
}
\description{
values to colors
}
|
/man/values2colors.Rd
|
no_license
|
antiphon/sphere
|
R
| false | true | 414 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colors.R
\name{values2colors}
\alias{values2colors}
\title{values to colors}
\usage{
values2colors(v, n = 100, zlim, col = heat.colors, na.col = "gray50",
...)
}
\arguments{
\item{v}{the values}
\item{n}{number of colors}
\item{zlim}{limits}
\item{color}{function, e.g. heat.colors, gray.colors}
}
\description{
values to colors
}
|
#' Calculate the delta_delta_ct model
#'
#' Uses the \eqn{C_T} values and a reference gene and a group to calculate the delta
#' delta \eqn{C_T} model to estimate the normalized relative expression of target
#' genes.
#'
#' @param df A data.frame of \eqn{C_T} values with genes in the columns and samples
#' in rows rows
#' @param group_var A character vector of a grouping variable. The length of
#' this variable should equal the number of rows of df
#' @param reference_gene A character string of the column name of a control gene
#' @param reference_group A character string of the control group in group_var
#' @param mode A character string of; 'separate_tube' (default) or 'same_tube'.
#' This is to indicate whether the different genes were run in separate or the
#' same PCR tube
#' @param plot A logical (default is FALSE)
#' @param ... Arguments passed to customize plot
#'
#' @return A data.frame of 8 columns:
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df. reference_gene is dropped
#' \item normalized The \eqn{C_T} value (or the average \eqn{C_T} value) of target genes
#' after subtracting that of the reference_gene
#' \item calibrated The normalized average \eqn{C_T} value of target genes after
#' subtracting that of the reference_group
#' \item relative_expression The expression of target genes normalized by
#' a reference_gene and calibrated by a reference_group
#' \item error The standard deviation of the relative_expression
#' \item lower The lower interval of the relative_expression
#' \item upper The upper interval of the relative_expression
#' }
#' When \code{plot} is TRUE, returns a bar graph of the relative expression of
#' the genes in the column and the groups in the column group. Error bars are
#' drawn using the columns lower and upper. When more one gene are plotted the
#' default in dodge bars. When the argument facet is TRUE a separate panel is
#' drawn for each gene.
#'
#' @details The comparative \eqn{C_T} methods assume that the cDNA templates of the
#' gene/s of interest as well as the control/reference gene have similar
#' amplification efficiency. And that this amplification efficiency is near
#' perfect. Meaning, at a certain threshold during the linear portion of the
#' PCR reaction, the amount of the gene of the interest and the control double
#' each cycle. Another assumptions is that, the expression difference between
#' two genes or two samples can be captured by subtracting one (gene or
#' sample of interest) from another (reference). This final assumption
#' requires also that these references don't change with the treatment or
#' the course in question.
#'
#' @examples
#' ## locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate all values and errors in one step
#' pcr_ddct(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain')
#'
#' # return a plot
#' pcr_ddct(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' plot = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr mutate full_join
#'
#' @export
pcr_ddct <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', plot = FALSE, ...) {
# calculate the delta_ct
if(mode == 'separate_tube') {
# calculate average ct and normalize
ave <- .pcr_average(df, group_var = group_var)
dct <- .pcr_normalize(ave, reference_gene = reference_gene)
} else if(mode == 'same_tube') {
# normalize and average normalized ct values
dct <- .pcr_normalize(df, reference_gene = reference_gene)
dct <- .pcr_average(dct, group_var = group_var)
}
# retain the normalized ct
delta_ct <- gather(dct, gene, normalized, -group)
# calculate the delta_delta_ct
ddct <- .pcr_calibrate(dct, reference_group = reference_group, tidy = TRUE)
# calculate the relative expression
norm_rel <- mutate(ddct, relative_expression = 2 ^ calibrated)
if(mode == 'separate_tube') {
# calculate the error from ct values
sds <- .pcr_sd(df, group_var = group_var)
error <- .pcr_error(sds, reference_gene = reference_gene, tidy = TRUE)
} else if(mode == 'same_tube') {
# calculate error from normalized ct values
dct <- .pcr_normalize(df, reference_gene = reference_gene)
error <- .pcr_sd(dct, group_var = group_var, tidy = TRUE)
}
# merge data.frames and calculate intervals
res <- full_join(delta_ct, ddct) %>%
full_join(norm_rel) %>%
full_join(error) %>%
mutate(lower = 2 ^ -(calibrated + error),
upper = 2 ^ -(calibrated - error))
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'delta_delta_ct', ...)
return(gg)
} else {
return(res)
}
}
#' Calculate the delta_ct model
#'
#' Uses the \eqn{C_T} values and a reference group to calculate the delta \eqn{C_T}
#' model to estimate the relative fold change of a gene between groups
#'
#' @inheritParams pcr_ddct
#'
#' @return A data.frame of 7 columns
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df
#' \item calibrated The average \eqn{C_T} value of target genes after
#' subtracting that of the reference_group
#' \item fold_change The fold change of genes relative to a reference_group
#' \item error The standard deviation of the fold_change
#' \item lower The lower interval of the fold_change
#' \item upper The upper interval of the fold_change
#' }
#' When \code{plot} is TRUE, returns a bar graph of the fold change of
#' the genes in the column and the groups in the column group. Error bars are
#' drawn using the columns lower and upper. When more one gene are plotted the
#' default in dodge bars. When the argument facet is TRUE a separate panel is
#' drawn for each gene.
#'
#' @details This method is a variation of the double delta \eqn{C_T} model,
#' \code{\link{pcr_ddct}}. It can be used to calculate the fold change
#' of in one sample relative to the others. For example, it can be used to
#' compare and choosing a control/reference genes.
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # locate and read file
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a data.frame of two identical columns
#' pcr_hk <- data.frame(
#' GAPDH1 = ct1$GAPDH,
#' GAPDH2 = ct1$GAPDH
#' )
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate caliberation
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain')
#'
#' # returns a plot
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' plot = TRUE)
#'
#' # returns a plot with facets
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' plot = TRUE,
#' facet = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr mutate full_join
#'
#' @export
pcr_dct <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', plot = FALSE, ...) {
if(mode == 'separate_tube') {
# average ct and calibrate to a reference group
ave <- .pcr_average(df, group_var = group_var)
dct <- .pcr_calibrate(ave, reference_group = reference_group)
} else if(mode == 'same_tube') {
# calibrate ct and average
dct <- .pcr_calibrate(df, reference_group = reference_group)
dct <- .pcr_average(dct, group_var = group_var)
}
# retain calibrated values
# calculate the fold change
calib <- gather(dct, gene, calibrated, -group) %>%
mutate(fold_change = 2 ^ -calibrated)
if(mode == 'separate_tube') {
# calculate the standard deviation from ct values
sds <- .pcr_sd(df, group_var = group_var, tidy = TRUE)
} else if(mode == 'same_tube') {
# calibrate ct values to a reference group
# calculated sd from calibrated values
dct <- .pcr_calibrate(df, reference_group = reference_group)
sds <- .pcr_sd(dct, group_var = group_var, tidy = TRUE)
}
# join data frame and calculate intervals
res <- full_join(calib, sds) %>%
mutate(lower = 2 ^ -(calibrated + error),
upper = 2 ^ -(calibrated - error))
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'delta_ct', ...)
return(gg)
} else {
return(res)
}
}
#' Calculate the standard curve model
#'
#' Uses the \eqn{C_T} values and a reference gene and a group, in addition to the
#' intercept and slope of each gene form a serial dilution experiment, to calculate
#' the standard curve model and estimate the normalized relative expression of the
#' target genes.
#'
#' @inheritParams pcr_ddct
#' @param intercept A numeric vector of intercept and length equals the number of genes
#' @param slope A numeric vector of slopes length equals the number of genes
#'
#' @return A data.frame of 7 columns
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df
#' \item normalized The normalized expression of target genes relative to a reference_gene
#' \item calibrated The calibrated expression of target genes relative to a reference_group
#' \item error The standard deviation of normalized relative expression
#' \item lower The lower interval of the normalized relative expression
#' \item upper The upper interval of the normalized relative expression
#' }
#' When \code{plot} is TRUE, returns a bar graph of the calibrated expression
#' of the genes in the column and the groups in the column group. Error bars
#' are drawn using the columns lower and upper. When more one gene are plotted
#' the default in dodge bars. When the argument facet is TRUE a separate
#' panel is drawn for each gene.
#'
#' @details this model doesn't assume perfect amplification but rather actively
#' use the amplification in calculating the relative expression. So when the
#' amplification efficiency of all genes are 100\% both methods should give
#' similar results. The standard curve method is applied using two steps.
#' First, serial dilutions of the mRNAs from the samples of interest are used
#' as input to the PCR reaction. The linear trend of the log input amount and
#' the resulting \eqn{C_T} values for each gene are used to calculate an intercept
#' and a slope. Secondly, these intercepts and slopes are used to calculate the
#' amounts of mRNA of the genes of interest and the control/reference in the
#' samples of interest and the control sample/reference. These amounts are
#' finally used to calculate the relative expression.
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # locate and read file
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # make grouping variable
#' group <- rep(c('brain', 'kidney'), each = 6)
#'
#' # apply the standard curve method
#' pcr_curve(ct1,
#' group_var = group,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope)
#'
#' # returns a plot
#' pcr_curve(ct1,
#' group_var = group,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' plot = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr full_join mutate
#'
#' @export
pcr_curve <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', intercept, slope,
plot = FALSE, ...) {
# calculate the amount of rna in samples
amounts <- .pcr_amount(df,
intercept = intercept,
slope = slope)
if(mode == 'separate_tube') {
# average amounts and normalize by a reference_gene
ave <- .pcr_average(amounts, group_var = group_var)
norm <- .pcr_normalize(ave,
reference_gene = reference_gene,
mode = 'divide')
} else if(mode == 'same_tube') {
# normalize amounts and average
norm <- .pcr_normalize(amounts,
reference_gene = reference_gene,
mode = 'divide')
norm <- .pcr_average(norm, group_var = group_var)
}
# retain normalized amounts
normalized <- gather(norm, gene, normalized, -group)
# calibrate to a reference_group
calib <- .pcr_calibrate(norm, reference_group = reference_group,
mode = 'divide', tidy = TRUE)
if(mode == 'separate_tube') {
# calculate cv from amounts
cv <- .pcr_cv(amounts, group_var = group_var)
error <- .pcr_error(cv, reference_gene = reference_gene, tidy = TRUE)
} else if(mode == 'same_tube') {
# calculate cv from normalized amounts
norm <- .pcr_normalize(amounts,
reference_gene = reference_gene,
mode = 'divide')
error <- .pcr_cv(norm, group_var = group_var, tidy = TRUE)
}
# join data.frames and calculate intervals
res <- full_join(normalized, calib) %>%
full_join(error) %>%
mutate(lower = calibrated - error,
upper = calibrated + error,
error = error * normalized)
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'relative_curve', ...)
return(gg)
} else {
return(res)
}
}
#' Apply qPCR analysis methods
#'
#' A unified interface to invoke different analysis methods of qPCR data.
#'
#' @inheritParams pcr_ddct
#' @inheritParams pcr_curve
#' @param method A character string; 'delta_delta_ct' default, 'delta_ct' or
#' 'relative_curve' for invoking a certain analysis model
#' @param ... Arguments passed to the methods
#'
#' @return A data.frame by default, when \code{plot} is TRUE returns a plot.
#' For details; \link{pcr_ddct}, \link{pcr_dct} and \link{pcr_curve}.
#'
#' @details The different analysis methods can be invoked using the
#' argument method with 'delta_delta_ct' default, 'delta_ct' or
#' 'relative_curve' for the double delta \eqn{C_T}, delta ct or the standard curve
#' model respectively. Alternatively, the same methods can be applied by using
#' the corresponding functions directly: \link{pcr_ddct}, \link{pcr_dct} or
#' \link{pcr_curve}
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # applying the delta delta ct method
#' ## locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate all values and errors in one step
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' method = 'delta_delta_ct')
#'
#' # return a plot
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' method = 'delta_delta_ct',
#' plot = TRUE)
#'
#' # applying the delta ct method
#' # make a data.frame of two identical columns
#' pcr_hk <- data.frame(
#' GAPDH1 = ct1$GAPDH,
#' GAPDH2 = ct1$GAPDH
#' )
#'
#' # calculate fold change
#' pcr_analyze(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' method = 'delta_ct')
#'
#' # return a plot
#' pcr_analyze(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' method = 'delta_ct',
#' plot = TRUE)
#'
#' # applying the standard curve method
#' # locate and read file
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # apply the standard curve method
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' method = 'relative_curve')
#'
#' # return a plot
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' method = 'relative_curve',
#' plot = TRUE)
#'
#' @export
pcr_analyze <- function(df, method = 'delta_delta_ct', ...) {
switch(method,
'delta_delta_ct' = pcr_ddct(df, ...),
'delta_ct' = pcr_dct(df, ...),
'relative_curve' = pcr_curve(df, ...))
}
|
/R/analyses_fun.R
|
no_license
|
felix28dls/ddCt_QPCR_Analysis
|
R
| false | false | 18,281 |
r
|
#' Calculate the delta_delta_ct model
#'
#' Uses the \eqn{C_T} values and a reference gene and a group to calculate the delta
#' delta \eqn{C_T} model to estimate the normalized relative expression of target
#' genes.
#'
#' @param df A data.frame of \eqn{C_T} values with genes in the columns and samples
#' in rows rows
#' @param group_var A character vector of a grouping variable. The length of
#' this variable should equal the number of rows of df
#' @param reference_gene A character string of the column name of a control gene
#' @param reference_group A character string of the control group in group_var
#' @param mode A character string of; 'separate_tube' (default) or 'same_tube'.
#' This is to indicate whether the different genes were run in separate or the
#' same PCR tube
#' @param plot A logical (default is FALSE)
#' @param ... Arguments passed to customize plot
#'
#' @return A data.frame of 8 columns:
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df. reference_gene is dropped
#' \item normalized The \eqn{C_T} value (or the average \eqn{C_T} value) of target genes
#' after subtracting that of the reference_gene
#' \item calibrated The normalized average \eqn{C_T} value of target genes after
#' subtracting that of the reference_group
#' \item relative_expression The expression of target genes normalized by
#' a reference_gene and calibrated by a reference_group
#' \item error The standard deviation of the relative_expression
#' \item lower The lower interval of the relative_expression
#' \item upper The upper interval of the relative_expression
#' }
#' When \code{plot} is TRUE, returns a bar graph of the relative expression of
#' the genes in the column and the groups in the column group. Error bars are
#' drawn using the columns lower and upper. When more one gene are plotted the
#' default in dodge bars. When the argument facet is TRUE a separate panel is
#' drawn for each gene.
#'
#' @details The comparative \eqn{C_T} methods assume that the cDNA templates of the
#' gene/s of interest as well as the control/reference gene have similar
#' amplification efficiency. And that this amplification efficiency is near
#' perfect. Meaning, at a certain threshold during the linear portion of the
#' PCR reaction, the amount of the gene of the interest and the control double
#' each cycle. Another assumptions is that, the expression difference between
#' two genes or two samples can be captured by subtracting one (gene or
#' sample of interest) from another (reference). This final assumption
#' requires also that these references don't change with the treatment or
#' the course in question.
#'
#' @examples
#' ## locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate all values and errors in one step
#' pcr_ddct(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain')
#'
#' # return a plot
#' pcr_ddct(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' plot = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr mutate full_join
#'
#' @export
pcr_ddct <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', plot = FALSE, ...) {
# calculate the delta_ct
if(mode == 'separate_tube') {
# calculate average ct and normalize
ave <- .pcr_average(df, group_var = group_var)
dct <- .pcr_normalize(ave, reference_gene = reference_gene)
} else if(mode == 'same_tube') {
# normalize and average normalized ct values
dct <- .pcr_normalize(df, reference_gene = reference_gene)
dct <- .pcr_average(dct, group_var = group_var)
}
# retain the normalized ct
delta_ct <- gather(dct, gene, normalized, -group)
# calculate the delta_delta_ct
ddct <- .pcr_calibrate(dct, reference_group = reference_group, tidy = TRUE)
# calculate the relative expression
norm_rel <- mutate(ddct, relative_expression = 2 ^ calibrated)
if(mode == 'separate_tube') {
# calculate the error from ct values
sds <- .pcr_sd(df, group_var = group_var)
error <- .pcr_error(sds, reference_gene = reference_gene, tidy = TRUE)
} else if(mode == 'same_tube') {
# calculate error from normalized ct values
dct <- .pcr_normalize(df, reference_gene = reference_gene)
error <- .pcr_sd(dct, group_var = group_var, tidy = TRUE)
}
# merge data.frames and calculate intervals
res <- full_join(delta_ct, ddct) %>%
full_join(norm_rel) %>%
full_join(error) %>%
mutate(lower = 2 ^ -(calibrated + error),
upper = 2 ^ -(calibrated - error))
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'delta_delta_ct', ...)
return(gg)
} else {
return(res)
}
}
#' Calculate the delta_ct model
#'
#' Uses the \eqn{C_T} values and a reference group to calculate the delta \eqn{C_T}
#' model to estimate the relative fold change of a gene between groups
#'
#' @inheritParams pcr_ddct
#'
#' @return A data.frame of 7 columns
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df
#' \item calibrated The average \eqn{C_T} value of target genes after
#' subtracting that of the reference_group
#' \item fold_change The fold change of genes relative to a reference_group
#' \item error The standard deviation of the fold_change
#' \item lower The lower interval of the fold_change
#' \item upper The upper interval of the fold_change
#' }
#' When \code{plot} is TRUE, returns a bar graph of the fold change of
#' the genes in the column and the groups in the column group. Error bars are
#' drawn using the columns lower and upper. When more one gene are plotted the
#' default in dodge bars. When the argument facet is TRUE a separate panel is
#' drawn for each gene.
#'
#' @details This method is a variation of the double delta \eqn{C_T} model,
#' \code{\link{pcr_ddct}}. It can be used to calculate the fold change
#' of in one sample relative to the others. For example, it can be used to
#' compare and choosing a control/reference genes.
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # locate and read file
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a data.frame of two identical columns
#' pcr_hk <- data.frame(
#' GAPDH1 = ct1$GAPDH,
#' GAPDH2 = ct1$GAPDH
#' )
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate caliberation
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain')
#'
#' # returns a plot
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' plot = TRUE)
#'
#' # returns a plot with facets
#' pcr_dct(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' plot = TRUE,
#' facet = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr mutate full_join
#'
#' @export
pcr_dct <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', plot = FALSE, ...) {
if(mode == 'separate_tube') {
# average ct and calibrate to a reference group
ave <- .pcr_average(df, group_var = group_var)
dct <- .pcr_calibrate(ave, reference_group = reference_group)
} else if(mode == 'same_tube') {
# calibrate ct and average
dct <- .pcr_calibrate(df, reference_group = reference_group)
dct <- .pcr_average(dct, group_var = group_var)
}
# retain calibrated values
# calculate the fold change
calib <- gather(dct, gene, calibrated, -group) %>%
mutate(fold_change = 2 ^ -calibrated)
if(mode == 'separate_tube') {
# calculate the standard deviation from ct values
sds <- .pcr_sd(df, group_var = group_var, tidy = TRUE)
} else if(mode == 'same_tube') {
# calibrate ct values to a reference group
# calculated sd from calibrated values
dct <- .pcr_calibrate(df, reference_group = reference_group)
sds <- .pcr_sd(dct, group_var = group_var, tidy = TRUE)
}
# join data frame and calculate intervals
res <- full_join(calib, sds) %>%
mutate(lower = 2 ^ -(calibrated + error),
upper = 2 ^ -(calibrated - error))
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'delta_ct', ...)
return(gg)
} else {
return(res)
}
}
#' Calculate the standard curve model
#'
#' Uses the \eqn{C_T} values and a reference gene and a group, in addition to the
#' intercept and slope of each gene form a serial dilution experiment, to calculate
#' the standard curve model and estimate the normalized relative expression of the
#' target genes.
#'
#' @inheritParams pcr_ddct
#' @param intercept A numeric vector of intercept and length equals the number of genes
#' @param slope A numeric vector of slopes length equals the number of genes
#'
#' @return A data.frame of 7 columns
#' \itemize{
#' \item group The unique entries in group_var
#' \item gene The column names of df
#' \item normalized The normalized expression of target genes relative to a reference_gene
#' \item calibrated The calibrated expression of target genes relative to a reference_group
#' \item error The standard deviation of normalized relative expression
#' \item lower The lower interval of the normalized relative expression
#' \item upper The upper interval of the normalized relative expression
#' }
#' When \code{plot} is TRUE, returns a bar graph of the calibrated expression
#' of the genes in the column and the groups in the column group. Error bars
#' are drawn using the columns lower and upper. When more one gene are plotted
#' the default in dodge bars. When the argument facet is TRUE a separate
#' panel is drawn for each gene.
#'
#' @details this model doesn't assume perfect amplification but rather actively
#' use the amplification in calculating the relative expression. So when the
#' amplification efficiency of all genes are 100\% both methods should give
#' similar results. The standard curve method is applied using two steps.
#' First, serial dilutions of the mRNAs from the samples of interest are used
#' as input to the PCR reaction. The linear trend of the log input amount and
#' the resulting \eqn{C_T} values for each gene are used to calculate an intercept
#' and a slope. Secondly, these intercepts and slopes are used to calculate the
#' amounts of mRNA of the genes of interest and the control/reference in the
#' samples of interest and the control sample/reference. These amounts are
#' finally used to calculate the relative expression.
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # locate and read file
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # make grouping variable
#' group <- rep(c('brain', 'kidney'), each = 6)
#'
#' # apply the standard curve method
#' pcr_curve(ct1,
#' group_var = group,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope)
#'
#' # returns a plot
#' pcr_curve(ct1,
#' group_var = group,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' plot = TRUE)
#'
#' @importFrom magrittr %>%
#' @importFrom tidyr gather
#' @importFrom dplyr full_join mutate
#'
#' @export
pcr_curve <- function(df, group_var, reference_gene, reference_group,
mode = 'separate_tube', intercept, slope,
plot = FALSE, ...) {
# calculate the amount of rna in samples
amounts <- .pcr_amount(df,
intercept = intercept,
slope = slope)
if(mode == 'separate_tube') {
# average amounts and normalize by a reference_gene
ave <- .pcr_average(amounts, group_var = group_var)
norm <- .pcr_normalize(ave,
reference_gene = reference_gene,
mode = 'divide')
} else if(mode == 'same_tube') {
# normalize amounts and average
norm <- .pcr_normalize(amounts,
reference_gene = reference_gene,
mode = 'divide')
norm <- .pcr_average(norm, group_var = group_var)
}
# retain normalized amounts
normalized <- gather(norm, gene, normalized, -group)
# calibrate to a reference_group
calib <- .pcr_calibrate(norm, reference_group = reference_group,
mode = 'divide', tidy = TRUE)
if(mode == 'separate_tube') {
# calculate cv from amounts
cv <- .pcr_cv(amounts, group_var = group_var)
error <- .pcr_error(cv, reference_gene = reference_gene, tidy = TRUE)
} else if(mode == 'same_tube') {
# calculate cv from normalized amounts
norm <- .pcr_normalize(amounts,
reference_gene = reference_gene,
mode = 'divide')
error <- .pcr_cv(norm, group_var = group_var, tidy = TRUE)
}
# join data.frames and calculate intervals
res <- full_join(normalized, calib) %>%
full_join(error) %>%
mutate(lower = calibrated - error,
upper = calibrated + error,
error = error * normalized)
# return
# return plot when plot == TRUE
if(plot == TRUE) {
gg <- .pcr_plot_analyze(res, method = 'relative_curve', ...)
return(gg)
} else {
return(res)
}
}
#' Apply qPCR analysis methods
#'
#' A unified interface to invoke different analysis methods of qPCR data.
#'
#' @inheritParams pcr_ddct
#' @inheritParams pcr_curve
#' @param method A character string; 'delta_delta_ct' default, 'delta_ct' or
#' 'relative_curve' for invoking a certain analysis model
#' @param ... Arguments passed to the methods
#'
#' @return A data.frame by default, when \code{plot} is TRUE returns a plot.
#' For details; \link{pcr_ddct}, \link{pcr_dct} and \link{pcr_curve}.
#'
#' @details The different analysis methods can be invoked using the
#' argument method with 'delta_delta_ct' default, 'delta_ct' or
#' 'relative_curve' for the double delta \eqn{C_T}, delta ct or the standard curve
#' model respectively. Alternatively, the same methods can be applied by using
#' the corresponding functions directly: \link{pcr_ddct}, \link{pcr_dct} or
#' \link{pcr_curve}
#'
#' @references Livak, Kenneth J, and Thomas D Schmittgen. 2001. “Analysis of
#' Relative Gene Expression Data Using Real-Time Quantitative PCR and the
#' Double Delta CT Method.” Methods 25 (4). ELSEVIER.
#' doi:10.1006/meth.2001.1262.
#'
#' @examples
#' # applying the delta delta ct method
#' ## locate and read raw ct data
#' fl <- system.file('extdata', 'ct1.csv', package = 'pcr')
#' ct1 <- readr::read_csv(fl)
#'
#' # add grouping variable
#' group_var <- rep(c('brain', 'kidney'), each = 6)
#'
#' # calculate all values and errors in one step
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' method = 'delta_delta_ct')
#'
#' # return a plot
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' method = 'delta_delta_ct',
#' plot = TRUE)
#'
#' # applying the delta ct method
#' # make a data.frame of two identical columns
#' pcr_hk <- data.frame(
#' GAPDH1 = ct1$GAPDH,
#' GAPDH2 = ct1$GAPDH
#' )
#'
#' # calculate fold change
#' pcr_analyze(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' method = 'delta_ct')
#'
#' # return a plot
#' pcr_analyze(pcr_hk,
#' group_var = group_var,
#' reference_group = 'brain',
#' method = 'delta_ct',
#' plot = TRUE)
#'
#' # applying the standard curve method
#' # locate and read file
#' fl <- system.file('extdata', 'ct3.csv', package = 'pcr')
#' ct3 <- readr::read_csv(fl)
#'
#' # make a vector of RNA amounts
#' amount <- rep(c(1, .5, .2, .1, .05, .02, .01), each = 3)
#'
#' # calculate curve
#' standard_curve <- pcr_assess(ct3, amount = amount, method = 'standard_curve')
#' intercept <- standard_curve$intercept
#' slope <- standard_curve$slope
#'
#' # apply the standard curve method
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' method = 'relative_curve')
#'
#' # return a plot
#' pcr_analyze(ct1,
#' group_var = group_var,
#' reference_gene = 'GAPDH',
#' reference_group = 'brain',
#' intercept = intercept,
#' slope = slope,
#' method = 'relative_curve',
#' plot = TRUE)
#'
#' @export
pcr_analyze <- function(df, method = 'delta_delta_ct', ...) {
switch(method,
'delta_delta_ct' = pcr_ddct(df, ...),
'delta_ct' = pcr_dct(df, ...),
'relative_curve' = pcr_curve(df, ...))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combi_3.R
\name{gmdh.combi_3}
\alias{gmdh.combi_3}
\title{GMDH COMBI auxiliar functions}
\usage{
gmdh.combi_3(X, y, G = 2)
}
\description{
Performs auxiliar tasks to predict.mia
}
\keyword{internal}
|
/man/gmdh.combi_3.Rd
|
no_license
|
perelom3/GMDHreg
|
R
| false | true | 277 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combi_3.R
\name{gmdh.combi_3}
\alias{gmdh.combi_3}
\title{GMDH COMBI auxiliar functions}
\usage{
gmdh.combi_3(X, y, G = 2)
}
\description{
Performs auxiliar tasks to predict.mia
}
\keyword{internal}
|
## MS script to process account$billing.geo.code
# add "hotspots"
# us no us geos
# standardize geos
# group geos by city, state
# map geos
library(stringr)
library(fields)
library(mi)
geo <- rawData$accounts
geo <- as.data.frame(geo[, c(1,3,5)]) #c(1,3)
table(str_length(geo[,2]))
# add missing zero to four-digit US geos
for (i in 1:19833){
if(str_length(geo[i, 2]) == 4){
geo[i,2] <- str_pad(geo[i,2], 5, "left", "0")
print(geo[i,2])
}
}
table(str_length(geo[,2]))
# trim +4 from nine-digit US geos
for (i in 1:19833){
if(str_length(geo[i, 2]) == 10){
geo[i,2] <- str_split_fixed(geo[i,2], "-", 2)[1]
print(geo[i,2])
}
}
table(str_length(geo[,2]))
## inspect
geo$billing.zip.code[geo$billing.zip.code ==""] <- NA
geo$billing.city[geo$billing.city ==""] <- NA
mp.plot(geo, y.order = TRUE, x.order = F, clustered = FALSE, gray.scale = TRUE)
## tag 1 if originally NULL in billing.zip.code and billing.city; 0 otherwise
geo$missing <- 0
geo$missing[is.na(geo$billing.zip.code)&is.na(geo$billing.city)] <- 1
table(geo$missing)
# read in and process zip code directory
zipDir <- read.csv('data/free-zipcode-database-Primary.csv',colClasses='character')
# add missing zero to four-digit US geos
for (i in 1:dim(zipDir)[[1]]){
if(str_length(zipDir[i, 1]) < 5){
zipDir[i,1] <- str_pad(zipDir[i,1], 5, "left", "0")
}
}
table(str_length(zipDir[,1]))
zipDir <- subset(zipDir, select = c(Zipcode,City,State,Lat,Long)) # keep only relevent fields
# merge city, state info to geo
geo <- merge(geo, zipDir, by.x="billing.zip.code", by.y="Zipcode",all.x=T)
names(geo)
# merge in hotspots from QGIS
hot <- read.csv("data/hotspot.csv", as.is=T)
hot <- hot[,c(4,16)]
geo <- merge(geo, hot, by="account.id", all.x=T)
# tag accounts with null billing.zip.codes as "CA" if billing.city is a California city
geo[19751,3] <- NA #removed value with troublesome "/"
caCity <- as.data.frame(table(subset(geo, State=="CA", select=City))) #list of CA cities
noZip <- is.na(geo$billing.zip.code) # index of accounts with no billing.zip.code value
df.noZip <- subset(geo, is.na(billing.zip.code))
######## this is code I can't get to work ##################
#geo$test <- "" # create temp column to test code. If it works, then update directly to geo$State
#for (i in 1:973){
# geo$test[str_detect(as.character(geo[noZip,3]), ignore.case(as.character(caCity[i,1])))] <- "CA" # need to subset for missing
#}
#table(geo$test) # CA must be less than 2955
###########################################################
noZipIndices = which(noZip)
for(j in noZipIndices){
for (i in 1:973){
city = str_trim(tolower(as.character(geo[j,3])))
cal = str_trim(tolower(as.character(caCity[i,1])))
if(!is.na(city) && !is.na(cal) && city == cal) {
print(paste("Assigned ", j, " city ", city))
geo$State[j] <- "CA" # need to subset for missing
break
}
}
}
table(geo$State)
#######
# dump csv with geos for use as categorical predictors
write.csv(geo, "data/geo.account.csv", row.names=F)
# add distance from account to the 3 locations
geo <- read.csv("data/geo.account.csv")
# locations
dBerkley <- c(37.867005,-122.261542)
dSF <- c(37.7763272,-122.421545)
dPeninsula <- c(37.4320436,-122.1661352)
venues <- rbind(dBerkley, dSF, dPeninsula)
venues <- venues[,c(2,1)]
colnames(venues) = c("Long","Lat")
locDist <- rdist.earth(geo[,c(8,7)], venues)
geo <- cbind(geo,locDist)
## dump csv for use in data.r
write.csv(geo, "data/geo.account.csv", row.names=F)
######## top predictors for mapping (uses main.gbm.r objects) ######
topPred = summary(gbm.orch)
write.csv(topPred, "topPred.csv", row.names=F)
# dump csv for mapping
geo <- merge(geo, data$allSetAll, by="account.id", all.y=T)
colnames(geo)[1] <- "accountID"
colnames(geo)[2] <- "billingZipCode"
write.csv(geo, "viz/topPred.csv", row.names=F)
# dump locations for mapping
venues <- venues[,c(2,1)]
write.csv(venues, "viz/venues.csv", row.names=T)
#need to add field name to row names manually
####################################
####### old code below ############
# add $is.us for US/non-us accounts ## inserted into data.r
rawData$accounts$is.us = 1 # MS: tag foreign accounts by geo
for (i in 1:dim(rawData$accounts)[1]){
if(str_detect(rawData$accounts[i, 3], "[A-Z]|[a-z]")){
rawData$accounts$is.us[i] <- 0
print(rawData$accounts[i, c(1,3,11)])
}
}
# dump csv with geos for geocoding
geo.list <- as.data.frame(table(geo[,2]))
names(geo.list) <- c("geo", "count")
write.csv(geo.list, "data/billing.geo.csv", row.names=F)
geo = read.csv('data/geo.account.csv',colClasses='character')
rawData$geo <- geo
data <- rawData
catGeo <- c("State", "City") # categorical variables
numGeo <- c("Lat", "Long") # numeric variables
data$geoFactors = data$geo[, c("account.id", catGeo)]
data$geoFactors[catGeo] = sapply(data$geoFactors[catGeo], as.factor)
data$geoNum = data$geo[, c("account.id", numGeo)]
data$accounts$geo.state = "" # MS: add state predictor
states = data$geo[, c("account.id", "State")]
data$accounts$geo.state = merge(data$accounts, states, by="account.id", all.x=T) # MS: pull in state from zip code merge
|
/geo.2.R
|
no_license
|
MatthewSchumwinger/towerProperty
|
R
| false | false | 5,153 |
r
|
## MS script to process account$billing.geo.code
# add "hotspots"
# us no us geos
# standardize geos
# group geos by city, state
# map geos
library(stringr)
library(fields)
library(mi)
geo <- rawData$accounts
geo <- as.data.frame(geo[, c(1,3,5)]) #c(1,3)
table(str_length(geo[,2]))
# add missing zero to four-digit US geos
for (i in 1:19833){
if(str_length(geo[i, 2]) == 4){
geo[i,2] <- str_pad(geo[i,2], 5, "left", "0")
print(geo[i,2])
}
}
table(str_length(geo[,2]))
# trim +4 from nine-digit US geos
for (i in 1:19833){
if(str_length(geo[i, 2]) == 10){
geo[i,2] <- str_split_fixed(geo[i,2], "-", 2)[1]
print(geo[i,2])
}
}
table(str_length(geo[,2]))
## inspect
geo$billing.zip.code[geo$billing.zip.code ==""] <- NA
geo$billing.city[geo$billing.city ==""] <- NA
mp.plot(geo, y.order = TRUE, x.order = F, clustered = FALSE, gray.scale = TRUE)
## tag 1 if originally NULL in billing.zip.code and billing.city; 0 otherwise
geo$missing <- 0
geo$missing[is.na(geo$billing.zip.code)&is.na(geo$billing.city)] <- 1
table(geo$missing)
# read in and process zip code directory
zipDir <- read.csv('data/free-zipcode-database-Primary.csv',colClasses='character')
# add missing zero to four-digit US geos
for (i in 1:dim(zipDir)[[1]]){
if(str_length(zipDir[i, 1]) < 5){
zipDir[i,1] <- str_pad(zipDir[i,1], 5, "left", "0")
}
}
table(str_length(zipDir[,1]))
zipDir <- subset(zipDir, select = c(Zipcode,City,State,Lat,Long)) # keep only relevent fields
# merge city, state info to geo
geo <- merge(geo, zipDir, by.x="billing.zip.code", by.y="Zipcode",all.x=T)
names(geo)
# merge in hotspots from QGIS
hot <- read.csv("data/hotspot.csv", as.is=T)
hot <- hot[,c(4,16)]
geo <- merge(geo, hot, by="account.id", all.x=T)
# tag accounts with null billing.zip.codes as "CA" if billing.city is a California city
geo[19751,3] <- NA #removed value with troublesome "/"
caCity <- as.data.frame(table(subset(geo, State=="CA", select=City))) #list of CA cities
noZip <- is.na(geo$billing.zip.code) # index of accounts with no billing.zip.code value
df.noZip <- subset(geo, is.na(billing.zip.code))
######## this is code I can't get to work ##################
#geo$test <- "" # create temp column to test code. If it works, then update directly to geo$State
#for (i in 1:973){
# geo$test[str_detect(as.character(geo[noZip,3]), ignore.case(as.character(caCity[i,1])))] <- "CA" # need to subset for missing
#}
#table(geo$test) # CA must be less than 2955
###########################################################
noZipIndices = which(noZip)
for(j in noZipIndices){
for (i in 1:973){
city = str_trim(tolower(as.character(geo[j,3])))
cal = str_trim(tolower(as.character(caCity[i,1])))
if(!is.na(city) && !is.na(cal) && city == cal) {
print(paste("Assigned ", j, " city ", city))
geo$State[j] <- "CA" # need to subset for missing
break
}
}
}
table(geo$State)
#######
# dump csv with geos for use as categorical predictors
write.csv(geo, "data/geo.account.csv", row.names=F)
# add distance from account to the 3 locations
geo <- read.csv("data/geo.account.csv")
# locations
dBerkley <- c(37.867005,-122.261542)
dSF <- c(37.7763272,-122.421545)
dPeninsula <- c(37.4320436,-122.1661352)
venues <- rbind(dBerkley, dSF, dPeninsula)
venues <- venues[,c(2,1)]
colnames(venues) = c("Long","Lat")
locDist <- rdist.earth(geo[,c(8,7)], venues)
geo <- cbind(geo,locDist)
## dump csv for use in data.r
write.csv(geo, "data/geo.account.csv", row.names=F)
######## top predictors for mapping (uses main.gbm.r objects) ######
topPred = summary(gbm.orch)
write.csv(topPred, "topPred.csv", row.names=F)
# dump csv for mapping
geo <- merge(geo, data$allSetAll, by="account.id", all.y=T)
colnames(geo)[1] <- "accountID"
colnames(geo)[2] <- "billingZipCode"
write.csv(geo, "viz/topPred.csv", row.names=F)
# dump locations for mapping
venues <- venues[,c(2,1)]
write.csv(venues, "viz/venues.csv", row.names=T)
#need to add field name to row names manually
####################################
####### old code below ############
# add $is.us for US/non-us accounts ## inserted into data.r
rawData$accounts$is.us = 1 # MS: tag foreign accounts by geo
for (i in 1:dim(rawData$accounts)[1]){
if(str_detect(rawData$accounts[i, 3], "[A-Z]|[a-z]")){
rawData$accounts$is.us[i] <- 0
print(rawData$accounts[i, c(1,3,11)])
}
}
# dump csv with geos for geocoding
geo.list <- as.data.frame(table(geo[,2]))
names(geo.list) <- c("geo", "count")
write.csv(geo.list, "data/billing.geo.csv", row.names=F)
geo = read.csv('data/geo.account.csv',colClasses='character')
rawData$geo <- geo
data <- rawData
catGeo <- c("State", "City") # categorical variables
numGeo <- c("Lat", "Long") # numeric variables
data$geoFactors = data$geo[, c("account.id", catGeo)]
data$geoFactors[catGeo] = sapply(data$geoFactors[catGeo], as.factor)
data$geoNum = data$geo[, c("account.id", numGeo)]
data$accounts$geo.state = "" # MS: add state predictor
states = data$geo[, c("account.id", "State")]
data$accounts$geo.state = merge(data$accounts, states, by="account.id", all.x=T) # MS: pull in state from zip code merge
|
## Plot1.R script
source("./load_dataset.R")
##
# open png grDevice
png(filename = "plot3.png", width = 480, height = 480, units = "px")
plot(epc$Datetime, epc$Sub_metering_1,
type = "l",
col = "black",
xlab = "",
ylab = "Energy sub metering")
lines(epc$Datetime, epc$Sub_metering_2, col = "red")
lines(epc$Datetime, epc$Sub_metering_3, col = "blue")
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off()
|
/plot3.R
|
no_license
|
jahirul76/ExData_Plotting1
|
R
| false | false | 534 |
r
|
## Plot1.R script
source("./load_dataset.R")
##
# open png grDevice
png(filename = "plot3.png", width = 480, height = 480, units = "px")
plot(epc$Datetime, epc$Sub_metering_1,
type = "l",
col = "black",
xlab = "",
ylab = "Energy sub metering")
lines(epc$Datetime, epc$Sub_metering_2, col = "red")
lines(epc$Datetime, epc$Sub_metering_3, col = "blue")
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/probSign.R
\name{probSign}
\alias{probSign}
\title{Compute probability of positive or negative sign from bootPairs output}
\usage{
probSign(out, tau = 0.476)
}
\arguments{
\item{out}{{output from bootPairs with p-1 columns and n999 rows}}
\item{tau}{{threshold to determine what value is too close to
zero, default tau=0.476 is equivalent to 15 percent threshold for
the unanimity index ui}}
}
\value{
sgn {When \code{mtx} has p columns, \code{sgn}
reports pairwise p-1 signs representing
(fixing the first column in each pair)
the average sign after averaging the
output of of \code{bootPairs(mtx)} (a n999 by p-1 matrix)
each containing resampled `sum' values summarizing the weighted sums
associated with all three criteria from the
function \code{silentPairs(mtx)}
applied to each bootstrap sample separately.} #'
}
\description{
If there are p columns of data, \code{probSign} produces a p-1 by 1 vector
of probabilities of correct signs assuming that the mean of n999 values
has the correct sign and assuming that m of the 'sum' index values inside the
range [-tau, tau] are neither positive nor negative but
indeterminate or ambiguous (being too close to zero). That is,
the denominator of P(+1) or P(-1) is (n999-m) if m signs are too close to zero.
}
\examples{
\dontrun{
options(np.messages = FALSE)
set.seed(34);x=sample(1:10);y=sample(2:11)
bb=bootPairs(cbind(x,y),n999=29)
probSign(bb,tau=0.476) #gives summary stats for n999 bootstrap sum computations
bb=bootPairs(airquality,n999=999);options(np.messages=FALSE)
probSign(bb,tau=0.476)#signs for n999 bootstrap sum computations
data('EuroCrime')
attach(EuroCrime)
bb=bootPairs(cbind(crim,off),n999=29) #col.1= crim causes off
#hence positive signs are more intuitively meaningful.
#note that n999=29 is too small for real problems, chosen for quickness here.
probSign(bb,tau=0.476)#signs for n999 bootstrap sum computations
}
}
\references{
Vinod, H. D. `Generalized Correlation and Kernel Causality with
Applications in Development Economics' in Communications in
Statistics -Simulation and Computation, 2015,
\doi{10.1080/03610918.2015.1122048}
Vinod, H. D. and Lopez-de-Lacalle, J. (2009). 'Maximum entropy bootstrap
for time series: The meboot R package.' Journal of Statistical Software,
Vol. 29(5), pp. 1-19.
Vinod, H. D. Causal Paths and Exogeneity Tests
in {Generalcorr} Package for Air Pollution and Monetary Policy
(June 6, 2017). Available at SSRN: \url{https://www.ssrn.com/abstract=2982128}
}
\seealso{
See Also \code{\link{silentPairs}}.
}
\author{
Prof. H. D. Vinod, Economics Dept., Fordham University, NY
}
\concept{bootstrap}
\concept{kernel regression}
\concept{meboot}
\concept{pairwise comparisons}
|
/man/probSign.Rd
|
no_license
|
cran/generalCorr
|
R
| false | true | 2,785 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/probSign.R
\name{probSign}
\alias{probSign}
\title{Compute probability of positive or negative sign from bootPairs output}
\usage{
probSign(out, tau = 0.476)
}
\arguments{
\item{out}{{output from bootPairs with p-1 columns and n999 rows}}
\item{tau}{{threshold to determine what value is too close to
zero, default tau=0.476 is equivalent to 15 percent threshold for
the unanimity index ui}}
}
\value{
sgn {When \code{mtx} has p columns, \code{sgn}
reports pairwise p-1 signs representing
(fixing the first column in each pair)
the average sign after averaging the
output of of \code{bootPairs(mtx)} (a n999 by p-1 matrix)
each containing resampled `sum' values summarizing the weighted sums
associated with all three criteria from the
function \code{silentPairs(mtx)}
applied to each bootstrap sample separately.} #'
}
\description{
If there are p columns of data, \code{probSign} produces a p-1 by 1 vector
of probabilities of correct signs assuming that the mean of n999 values
has the correct sign and assuming that m of the 'sum' index values inside the
range [-tau, tau] are neither positive nor negative but
indeterminate or ambiguous (being too close to zero). That is,
the denominator of P(+1) or P(-1) is (n999-m) if m signs are too close to zero.
}
\examples{
\dontrun{
options(np.messages = FALSE)
set.seed(34);x=sample(1:10);y=sample(2:11)
bb=bootPairs(cbind(x,y),n999=29)
probSign(bb,tau=0.476) #gives summary stats for n999 bootstrap sum computations
bb=bootPairs(airquality,n999=999);options(np.messages=FALSE)
probSign(bb,tau=0.476)#signs for n999 bootstrap sum computations
data('EuroCrime')
attach(EuroCrime)
bb=bootPairs(cbind(crim,off),n999=29) #col.1= crim causes off
#hence positive signs are more intuitively meaningful.
#note that n999=29 is too small for real problems, chosen for quickness here.
probSign(bb,tau=0.476)#signs for n999 bootstrap sum computations
}
}
\references{
Vinod, H. D. `Generalized Correlation and Kernel Causality with
Applications in Development Economics' in Communications in
Statistics -Simulation and Computation, 2015,
\doi{10.1080/03610918.2015.1122048}
Vinod, H. D. and Lopez-de-Lacalle, J. (2009). 'Maximum entropy bootstrap
for time series: The meboot R package.' Journal of Statistical Software,
Vol. 29(5), pp. 1-19.
Vinod, H. D. Causal Paths and Exogeneity Tests
in {Generalcorr} Package for Air Pollution and Monetary Policy
(June 6, 2017). Available at SSRN: \url{https://www.ssrn.com/abstract=2982128}
}
\seealso{
See Also \code{\link{silentPairs}}.
}
\author{
Prof. H. D. Vinod, Economics Dept., Fordham University, NY
}
\concept{bootstrap}
\concept{kernel regression}
\concept{meboot}
\concept{pairwise comparisons}
|
tema_gg_blank <- function() {
ggplot2::theme(
rect = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
text = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank()
)
}
|
/R/utils_tema.R
|
no_license
|
nupec/ods6
|
R
| false | false | 212 |
r
|
tema_gg_blank <- function() {
ggplot2::theme(
rect = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
text = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank()
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betaDist.R
\name{betaDist}
\alias{betaDist}
\title{The Beta distribution}
\usage{
betaDist(x, alpha, beta)
}
\arguments{
\item{x}{Double - A value within the intervall [0,1].}
\item{alpha}{Double - A value greater than zero.}
\item{beta}{Double - A value greater than zero.}
}
\value{
Double - The corresponding probability.
}
\description{
The beta distribution is a continuous propability distribution defined in the interval [0,1].
}
\author{
J.C. Lemm, P.v.W. Crommelin
}
|
/man/betaDist.Rd
|
no_license
|
PhilippVWC/myBayes
|
R
| false | true | 556 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/betaDist.R
\name{betaDist}
\alias{betaDist}
\title{The Beta distribution}
\usage{
betaDist(x, alpha, beta)
}
\arguments{
\item{x}{Double - A value within the intervall [0,1].}
\item{alpha}{Double - A value greater than zero.}
\item{beta}{Double - A value greater than zero.}
}
\value{
Double - The corresponding probability.
}
\description{
The beta distribution is a continuous propability distribution defined in the interval [0,1].
}
\author{
J.C. Lemm, P.v.W. Crommelin
}
|
###
# QUESTÃO 1
###
install.packages("tidyverse")
library(tidyverse)
nomero <- function(name) {
name = tolower(name)
name = gsub(' ', '', name)
total = 0
name = match(unlist(strsplit(name, split="")), letters)
for (i in name) {
total = total + i
}
return(total)
}
name = 'filipe zabala'
nomero(name)
###
# QUESTÃO 2
###
# AINDA NÃO FOI REVISADA
#Utilize set.seed(m) e gere 10*m observações de uma normal de média m e desvio padrão s.
#Faça o histograma e obtenha as principais medidas de posição dos valores simulados
name = 'bruna priscila suzane'
m = nomero(name)
s<-m/4
set.seed(m)
for (i in 1:10) {
print(rnorm(m))
}
m
pnorm(m+s, mean = m, sd = s)
n=10*m
# Distribuição Normal ou gaussiana
par(mfrow=c(2,2))
curve(dnorm(x),-s,s,add = F, col = 'orange')
|
/questao_1&2.R
|
no_license
|
brunaoliveira/t1_estatistica
|
R
| false | false | 806 |
r
|
###
# QUESTÃO 1
###
install.packages("tidyverse")
library(tidyverse)
nomero <- function(name) {
name = tolower(name)
name = gsub(' ', '', name)
total = 0
name = match(unlist(strsplit(name, split="")), letters)
for (i in name) {
total = total + i
}
return(total)
}
name = 'filipe zabala'
nomero(name)
###
# QUESTÃO 2
###
# AINDA NÃO FOI REVISADA
#Utilize set.seed(m) e gere 10*m observações de uma normal de média m e desvio padrão s.
#Faça o histograma e obtenha as principais medidas de posição dos valores simulados
name = 'bruna priscila suzane'
m = nomero(name)
s<-m/4
set.seed(m)
for (i in 1:10) {
print(rnorm(m))
}
m
pnorm(m+s, mean = m, sd = s)
n=10*m
# Distribuição Normal ou gaussiana
par(mfrow=c(2,2))
curve(dnorm(x),-s,s,add = F, col = 'orange')
|
###############################################################################
################################ Standardise ##################################
###############################################################################
#' Use standard names and spellings
#'
#' @description Standardise the names of lineage groups, neuron compartments and transmitters.
#'
#' @param x a character vector to be standardised.
#' @param invert return compartment numbers rather than names.
#'
#' @return a character vector
#' @export
#' @rdname standardise
standard_transmitters <- function(x){
x[grepl("^Neurotrans|^neurotrans",x)] = "transmitter"
x[grepl("^ACh|^Ach|^ach|^acet|^ChA|^CHa|^cholin|^ACH",x)] = "acetylcholine"
x[grepl("^gaba|^GABA|^GAD",x)] = "GABA"
x[grepl("^glut|^vGlut|^Vglut|^glutamate|^Glutamate|^GLUT",x)] = "glutamate"
x[grepl("^5-HT|^5HT|^Dope|^dope|^Dopa|^dopa|^DOP",x)] = "dopamine"
x[grepl("^Sero|^sero|^TH-|^SER",x)] = "serotonin"
x[grepl("^Oct|^oct|^OCT",x)] = "octopamine"
x[grepl("^Unknow|NA|unknow|^None|^none",x)] = "unknown"
x[is.na(x)] = "unknown"
x = tolower(x)
x
}
#' @export
#' @rdname standardise
standard_statuses <- function(x, invert= FALSE){
x = tolower(x)
standard_status <-function(z, invert = FALSE){
if(invert){
z[is.na(z)] = "u"
z[z=="done"] = "d"
z[z=="unassessed"] = "u"
z[z=="incomplete"] = "i"
z[z=="complete"] = "c"
z[z=="adequate"] = "a"
z[z=="merge_error"] = "m"
z[z=="needs_extending"] = "e "
z[z=="wrong_hemilineage"] = "w"
z[z=="wrong_side"] = "s"
z[z=="not_neuron"] = "n"
z[z=="tiny"] = "t"
}else{
z[is.na(z)] = "unassessed"
z[z=="d"] = "done"
z[z=="u"] = "unassessed"
z[z=="i"] = "incomplete"
z[z=="c"] = "complete"
z[z=="a"] = "adequate"
z[z=="m"] = "merge_error"
z[z=="e"] = "needs_extending "
z[z=="w"] = "wrong_hemilineage"
z[z=="s"] = "wrong_side"
z[z=="n"] = "not_neuron"
z[z=="t"] = "tiny"
}
paste(sort(z),collapse="/",sep="/")
}
y = strsplit(x=x,split="/| / | /|/ ")
z = sapply(y,standard_status)
z
}
# u = not yet examined by a trusted human
# i = incomplete [very small fragment]
# c = complete [well fleshed out neuron, may even have most medium/small branches]
# a = adequate [there is a cell body fibre, axon and dendrite]
# m = noticable merge error [this neuron is merged to another]
# e = needs extending [not quite adequate, but more than a tiny fragment]
# w = wrong hemilineage [based on its soma position and cell body fibre, this neuron looks like it is not in the same hemilineage as others of this tab]
# s = wrong side [soma is on the wrong hemisphere, given the name of this tab]
# n = not a neuron [this segmentation is not a neuron, i.e. glia, erroneous]
#' @export
#' @rdname standardise
standard_lineages <- function(x){
x[grepl("^ItoLee_l|^itolee_l|^ItoLee_L|^itolee_L",x)] = "ito_lee_lineage"
x[grepl("^hartenstein_l|^Hartenstein_l|^Volker_l|^volker_l|
^hartenstein_L|^Hartenstein_L|^Volker_L|^volker_L",x)] = "hartenstein_lineage"
x[grepl("^ItoLee_h|^itolee_h",x)] = "ito_lee_hemilineage"
x[grepl("^hartenstein_h|^Hartenstein_h|^Volker_h|^volker_h|
^hartenstein_H|^Hartenstein_H|^Volker_h|^volker_H",x)] = "hartenstein_hemilineage"
x[is.na(x)] = "unknown"
x
}
#' @export
#' @rdname standardise
standard_compartments <- function(x, invert = FALSE){
x = tolower(x)
if(invert){
x[x=="dendrite"] = 3
x[x=="axon"] = 2
x[x=="soma"] = 1
x[x=="primary.dendrite"] = 4
x[x=="primary.neurite"] = 7
}else{
x[x==0] = "unknown"
x[x==3] = "dendrite"
x[x==2] = "axon"
x[x==1] = "soma"
x[x==4] = "primary.dendrite"
x[x==7] = "primary.neurite"
}
x
}
#' @export
#' @rdname standardise
standardise <- standardize <- function(x){
x <- standard_transmitters(x)
x <- standard_lineages(x)
x <- standard_compartments(x)
x <- standard_statuses(x)
x
}
# hidden
standardise_quality <- function(x){
x = tolower(x)
x[x=="e"] = "good"
x[x=="o"] = "medium"
x[x=="p"] = "poor"
x[x=="t"] = "tract"
x[x=="n"] = "none"
x
}
#' @export
#' @rdname standardise
standard_workflow <- function(x, invert= FALSE){
x = tolower(x)
standard_work <-function(z, invert = FALSE){
if(invert){
z[is.na(z)] = "t"
z[z=="trace"] = "t"
z[z=="inputs"] = "i"
z[z=="outputs"] = "o"
z[z=="match"] = "m"
z[z=="find_line"] = "l"
}else{
z[is.na(z)] = "trace"
z[z=="t"] = "trace"
z[z=="i"] = "inputs"
z[z=="o"] = "outputs"
z[z=="m"] = "match"
z[z=="l"] = "find_line"
}
paste(sort(z),collapse="/",sep="/")
}
y = strsplit(x=x,split="/| / | /|/ ")
z = sapply(y,standard_work)
z
}
|
/R/hemibrain_standardise.R
|
no_license
|
natverse/hemibrainr
|
R
| false | false | 4,816 |
r
|
###############################################################################
################################ Standardise ##################################
###############################################################################
#' Use standard names and spellings
#'
#' @description Standardise the names of lineage groups, neuron compartments and transmitters.
#'
#' @param x a character vector to be standardised.
#' @param invert return compartment numbers rather than names.
#'
#' @return a character vector
#' @export
#' @rdname standardise
standard_transmitters <- function(x){
x[grepl("^Neurotrans|^neurotrans",x)] = "transmitter"
x[grepl("^ACh|^Ach|^ach|^acet|^ChA|^CHa|^cholin|^ACH",x)] = "acetylcholine"
x[grepl("^gaba|^GABA|^GAD",x)] = "GABA"
x[grepl("^glut|^vGlut|^Vglut|^glutamate|^Glutamate|^GLUT",x)] = "glutamate"
x[grepl("^5-HT|^5HT|^Dope|^dope|^Dopa|^dopa|^DOP",x)] = "dopamine"
x[grepl("^Sero|^sero|^TH-|^SER",x)] = "serotonin"
x[grepl("^Oct|^oct|^OCT",x)] = "octopamine"
x[grepl("^Unknow|NA|unknow|^None|^none",x)] = "unknown"
x[is.na(x)] = "unknown"
x = tolower(x)
x
}
#' @export
#' @rdname standardise
standard_statuses <- function(x, invert= FALSE){
x = tolower(x)
standard_status <-function(z, invert = FALSE){
if(invert){
z[is.na(z)] = "u"
z[z=="done"] = "d"
z[z=="unassessed"] = "u"
z[z=="incomplete"] = "i"
z[z=="complete"] = "c"
z[z=="adequate"] = "a"
z[z=="merge_error"] = "m"
z[z=="needs_extending"] = "e "
z[z=="wrong_hemilineage"] = "w"
z[z=="wrong_side"] = "s"
z[z=="not_neuron"] = "n"
z[z=="tiny"] = "t"
}else{
z[is.na(z)] = "unassessed"
z[z=="d"] = "done"
z[z=="u"] = "unassessed"
z[z=="i"] = "incomplete"
z[z=="c"] = "complete"
z[z=="a"] = "adequate"
z[z=="m"] = "merge_error"
z[z=="e"] = "needs_extending "
z[z=="w"] = "wrong_hemilineage"
z[z=="s"] = "wrong_side"
z[z=="n"] = "not_neuron"
z[z=="t"] = "tiny"
}
paste(sort(z),collapse="/",sep="/")
}
y = strsplit(x=x,split="/| / | /|/ ")
z = sapply(y,standard_status)
z
}
# u = not yet examined by a trusted human
# i = incomplete [very small fragment]
# c = complete [well fleshed out neuron, may even have most medium/small branches]
# a = adequate [there is a cell body fibre, axon and dendrite]
# m = noticable merge error [this neuron is merged to another]
# e = needs extending [not quite adequate, but more than a tiny fragment]
# w = wrong hemilineage [based on its soma position and cell body fibre, this neuron looks like it is not in the same hemilineage as others of this tab]
# s = wrong side [soma is on the wrong hemisphere, given the name of this tab]
# n = not a neuron [this segmentation is not a neuron, i.e. glia, erroneous]
#' @export
#' @rdname standardise
standard_lineages <- function(x){
x[grepl("^ItoLee_l|^itolee_l|^ItoLee_L|^itolee_L",x)] = "ito_lee_lineage"
x[grepl("^hartenstein_l|^Hartenstein_l|^Volker_l|^volker_l|
^hartenstein_L|^Hartenstein_L|^Volker_L|^volker_L",x)] = "hartenstein_lineage"
x[grepl("^ItoLee_h|^itolee_h",x)] = "ito_lee_hemilineage"
x[grepl("^hartenstein_h|^Hartenstein_h|^Volker_h|^volker_h|
^hartenstein_H|^Hartenstein_H|^Volker_h|^volker_H",x)] = "hartenstein_hemilineage"
x[is.na(x)] = "unknown"
x
}
#' @export
#' @rdname standardise
standard_compartments <- function(x, invert = FALSE){
x = tolower(x)
if(invert){
x[x=="dendrite"] = 3
x[x=="axon"] = 2
x[x=="soma"] = 1
x[x=="primary.dendrite"] = 4
x[x=="primary.neurite"] = 7
}else{
x[x==0] = "unknown"
x[x==3] = "dendrite"
x[x==2] = "axon"
x[x==1] = "soma"
x[x==4] = "primary.dendrite"
x[x==7] = "primary.neurite"
}
x
}
#' @export
#' @rdname standardise
standardise <- standardize <- function(x){
x <- standard_transmitters(x)
x <- standard_lineages(x)
x <- standard_compartments(x)
x <- standard_statuses(x)
x
}
# hidden
standardise_quality <- function(x){
x = tolower(x)
x[x=="e"] = "good"
x[x=="o"] = "medium"
x[x=="p"] = "poor"
x[x=="t"] = "tract"
x[x=="n"] = "none"
x
}
#' @export
#' @rdname standardise
standard_workflow <- function(x, invert= FALSE){
x = tolower(x)
standard_work <-function(z, invert = FALSE){
if(invert){
z[is.na(z)] = "t"
z[z=="trace"] = "t"
z[z=="inputs"] = "i"
z[z=="outputs"] = "o"
z[z=="match"] = "m"
z[z=="find_line"] = "l"
}else{
z[is.na(z)] = "trace"
z[z=="t"] = "trace"
z[z=="i"] = "inputs"
z[z=="o"] = "outputs"
z[z=="m"] = "match"
z[z=="l"] = "find_line"
}
paste(sort(z),collapse="/",sep="/")
}
y = strsplit(x=x,split="/| / | /|/ ")
z = sapply(y,standard_work)
z
}
|
#######################################################################################################################
############### Bean seed microbiome analysis for the rain out shelter experiment: OTU 97% ############################
#######################################################################################################################
# Date: August 18th 2021
# By : Ari Fina Bintarti
# INSTALL PACKAGES
install.packages(c('vegan', 'tidyverse'))
install.packages('reshape')
install.packages("ggpubr")
install.packages("car")
install.packages("agricolae")
install.packages("multcompView")
install.packages("gridExtra")
install.packages("ggplot2")
install.packages("sjmisc")
install.packages("sjPlot")
install.packages("MASS")
install.packages("FSA")
install.packages('mvtnorm', dep = TRUE)
install.packages("rcompanion")
install.packages("onewaytests")
install.packages("PerformanceAnalytics")
install.packages("gvlma")
install.packages("userfriendlyscience")
install.packages("ggpmisc")
install.packages("fitdistrplus")
install.packages('BiocManager')
#install.packages("cowplot")
install.packages("dplyr")
install.packages("lme4")
install.packages("nlme")
install.packages("car")
install.packages("multcomp")
library(multcomp)
library(car)
library(BiocManager)
library(vegan)
library(dplyr)
library(plyr)
library(tidyverse)
library(tidyr)
#library(cowplot)
library(ggplot2)
library(reshape)
library(ggpubr)
library(car)
library(agricolae)
library(multcompView)
library(grid)
library(gridExtra)
library(sjmisc)
library(sjPlot)
library(MASS)
library(FSA)
library(rcompanion)
library(onewaytests)
library(ggsignif)
library(PerformanceAnalytics)
library(gvlma)
library(userfriendlyscience)
library(ggpmisc)
library(tibble)
library(fitdistrplus)
library(lme4)
library(nlme)
# SET THE WORKING DIRECTORY
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
# READ PROPORTION OF CHLOROPLAST AND MITOCHONDRIA
#read the unfiltered otu table
otu.unfil <- read.table(file = 'OTU_table_tax.txt', sep = '\t', header = TRUE,check.names = FALSE)
otu.unfil
tax.unfil <- otu.unfil[,'taxonomy']
tax.unfil
#write.csv(tax.unfil, file = "tax.unfil.csv")
dim(otu.unfil) #[1] 325 81
colnames(otu.unfil)
otu.unfil <- otu.unfil[,-82]
dim(otu.unfil)# otu= 325, otu table still has Mock, NC, and PC in the sample
otu.unfil <- column_to_rownames(otu.unfil,var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
#read taxonomy
tax.unfil.ed = read.csv("tax.unfil.ed.csv", header=T)
rownames(tax.unfil.ed) <- rownames(otu.unfil)
dim(tax.unfil.ed) #[1] 325 7
otu.unfil <- rownames_to_column(otu.unfil,var = "OTUID")
tax.unfil.ed <- rownames_to_column(tax.unfil.ed,var = "OTUID")
otu.tax.unfiltered <- merge(otu.unfil, tax.unfil.ed, by="OTUID")
View(otu.tax.unfiltered)
colnames(otu.tax.unfiltered)
#write.csv(otu.tax.unfiltered, file = "otu.tax.unfiltered.csv")
#read the metadata
#############################################################################################################################################################
#READ PROPORTION OF CHLOROPLAST AND MITOCHONDRIA OF EXPERIMENTAL SAMPLES
#select only biological sample from otu table
otu.bio.unfil <- otu.unfil[,1:65] #unselect Mock, NC, and PC from the otu table
dim(otu.bio.unfil)
colnames(otu.bio.unfil)
otu.bio.unfil <- column_to_rownames(otu.bio.unfil, var = "OTUID")
sort(rowSums(otu.bio.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# remove OTUs that do not present in biological sample
otu.bio1.unfil <- otu.bio.unfil[which(rowSums(otu.bio.unfil) > 0),]
dim(otu.bio1.unfil) # [1] 244 64, otu table before plant contaminant removal and normalization using metagenomeSeq package and before decontamination
sort(rowSums(otu.bio1.unfil, na.rm = FALSE, dims = 1), decreasing = F)
sum(otu.bio1.unfil)
# load the otu table
head(otu.bio1.unfil)
otu.bio1.unfil <- rownames_to_column(otu.bio1.unfil, var = "OTUID")
# merge the taxonomy with otu table
head(tax.unfil.ed)
#tax.unfil.ed <- rownames_to_column(tax.unfil.ed, var = "OTUID")
otu.tax.unfil <- merge(otu.bio1.unfil, tax.unfil.ed, by="OTUID")
dim(otu.tax.unfil)
colnames(otu.tax.unfil)
#select only the otu table and "Order" & "Family"
#otu.tax.unfil.ed <- otu.tax.unfil[,c(1:48,52,53)]
#colnames(otu.tax.unfil.ed)
#edit the taxonomy
colnames(otu.tax.unfil)
otu.tax.unfil.ed <- otu.tax.unfil %>%
mutate(Taxonomy = case_when(Order == "Chloroplast" ~ 'Chloroplast',
Phylum == "Cyanobacteria"~ 'Chloroplast',
Family == "Mitochondria" ~ 'Mitochondria',
#Family == "Magnoliophyta" ~ 'Magnoliophyta',
TRUE ~ 'Bacteria')) %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Phylum == "Cyanobacteria"~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
#Family == "Magnoliophyta" ~ 'Plant',
TRUE ~ 'Bacteria'))
tail(otu.tax.unfil.ed)
otu.tax.unfil.ed
colnames(otu.tax.unfil.ed)
otu.tax.unfil.ed1 <- otu.tax.unfil.ed[,c(1:66,75)]
View(otu.tax.unfil.ed1)
colnames(otu.tax.unfil.ed1)
tail(otu.tax.unfil.ed1)
long.dat <- gather(otu.tax.unfil.ed1, Sample, Read, 2:65, factor_key = T)
long.dat
### 1. Plant contaminant proportion
detach(package:plyr)
df.unfil <- long.dat %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.unfil1 <- df.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#with(df.unfil1, sum(percent[Sample == "1001"]))
library(ggbeeswarm)
library(ggtext)
plot.unfil.dom <- ggplot(df.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "A. Experimental Sample")+
ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title.x = element_blank(),
axis.text= element_text(size = 12),
strip.text = element_text(size=12),
plot.title = element_text(size = 14),
axis.title.y = element_markdown(size=13),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=7, color="red", shape=95)
plot.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_plant_proportion.eps",
plot.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
### 2. Chloroplast and Mitochondria contaminant proportion
df.unfil.tax <- long.dat %>%
group_by(Sample, Taxonomy) %>%
summarize(read.number = sum(Read))
df.unfil.tax1 <- df.unfil.tax %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
plot.unfil.tax <- ggplot(df.unfil.tax1, aes(x=Taxonomy, y=percent, fill=Taxonomy))+
geom_violin(trim = F, scale="width") +
#geom_beeswarm(dodge.width = 1, alpha = 0.3)+
#scale_fill_manual(labels = c("A1","A2", "A3","B1","B2","B3","B4","B5","B6","C5","C6","C7"),values=c("#440154FF", "#482677FF","#3F4788FF","#238A8DFF","#1F968BFF","#20A386FF","#29AF7FF","#3CBC75F","#56C667FF","#B8DE29FF","#DCE318FF","#FDE725FF"))+
#scale_fill_viridis(discrete = T)+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
#geom_text(data=sum_rich_plant_new, aes(x=Plant,y=2+max.rich,label=Letter), vjust=0)+
labs(title = "B")+
ylab("Read Proportion (%)")+
theme(legend.position="none",
#axis.text.x=element_blank(),
#axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.text= element_text(size = 14),
strip.text = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14, face = 'bold'),
#axis.title.y=element_text(size=13,face="bold"),
axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
#plot.margin = unit(c(0, 0, 0, 0), "cm"))
stat_summary(fun="median",geom="point", size=7, color="red", shape=95)
#width=1, position=position_dodge(),show.legend = FALSE)
plot.unfil.tax
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_chloromito_proportion.eps",
plot.unfil.tax, device=cairo_ps,
width = 7, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF NEGATIVE CONTROLS
# otu table of the negative control
colnames(otu.unfil)
NC.unfiltered <- otu.unfil[,c(1,73:79)]#only negative control
colnames(NC.unfiltered)
NC.unfiltered <- column_to_rownames(NC.unfiltered,var="OTUID")
sort(rowSums(NC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC1.unfiltered=NC.unfiltered[which(rowSums(NC.unfiltered) > 0),]
sort(rowSums(NC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC1.unfiltered <- rownames_to_column(NC1.unfiltered,var="OTUID")
NC1.tax.unfiltered <- merge(NC1.unfiltered, tax.unfil.ed, by="OTUID")
NC1.unfiltered <- column_to_rownames(NC1.unfiltered,var="OTUID")
#write.csv(NC1.tax.unfiltered, file = "NC1.tax.unfiltered.csv")
head(NC1.unfiltered)
colnames(NC1.unfiltered)
#edit the taxonomy
colnames(NC1.tax.unfiltered)
NC1.tax.unfil.ed <- NC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(NC1.tax.unfil.ed)
NC1.tax.unfil.ed1 <- NC1.tax.unfil.ed[,c(1:9)]
colnames(NC1.tax.unfil.ed1)
tail(NC1.tax.unfil.ed1)
str(NC1.tax.unfil.ed1)
library(tidyr)
long.dat.nc.unfil <- gather(NC1.tax.unfil.ed1, Sample, Read, NC1r2:NC7r2, factor_key = T)
long.dat.nc.unfil
#detach(package:plyr)
df.nc.unfil <- long.dat.nc.unfil %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.nc.unfil1 <- df.nc.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#with(df.nc.unfil1, sum(percent[Sample == "NC1r2"]))
library(ggbeeswarm)
library(ggtext)
plot.nc.unfil.dom <- ggplot(df.nc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "B. Negative Control")+
#ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
#strip.text.x = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14),
#axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=10, color="red", shape=95)
plot.nc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_nc_plant_proportion.eps",
plot.nc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE POSITIVE CONTROLS
# otu table of the positive control
colnames(otu.unfil)
PC.unfiltered <- otu.unfil[,c(1,66:72)]#only positive control
PC.unfiltered
PC.unfiltered <- column_to_rownames(PC.unfiltered,var="OTUID")
sort(rowSums(PC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
PC1.unfiltered <- PC.unfiltered[which(rowSums(PC.unfiltered) > 0),]
sort(rowSums(PC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
PC1.unfiltered <- rownames_to_column(PC1.unfiltered,var="OTUID")
PC1.tax.unfiltered <- merge(PC1.unfiltered, tax.unfil.ed, by="OTUID")
PC1.unfiltered <- column_to_rownames(PC1.unfiltered,var="OTUID")
#write.csv(NC1.tax.unfiltered, file = "NC1.tax.unfiltered.csv")
sum(PC1.unfiltered)
dim(PC1.unfiltered)
#edit the taxonomy
colnames(PC1.tax.unfiltered)
PC1.tax.unfil.ed <- PC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(PC1.tax.unfil.ed)
PC1.tax.unfil.ed1 <- PC1.tax.unfil.ed[,c(1:9)]
colnames(PC1.tax.unfil.ed1)
#library(tidyr)
long.dat.pc.unfil <- gather(PC1.tax.unfil.ed1, Sample, Read, Mock1r2:Mock7r2, factor_key = T)
long.dat.pc.unfil
#detach(package:plyr)
df.pc.unfil <- long.dat.pc.unfil %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.pc.unfil1 <- df.pc.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.pc.unfil.dom <- ggplot(df.pc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "C. Positive Control")+
#ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
#strip.text.x = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14),
#axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=10, color="red", shape=95)
plot.pc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_pc_plant_proportion.eps",
plot.pc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE RTSF POSITIVE CONTROL
# otu table of the RTSF Zymo
colnames(otu.unfil)
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
zymo.unfiltered <- otu.unfil[,"ZymoMockDNAr2", drop=F]
zymo.unfiltered
#zymo.unfiltered <- column_to_rownames(zymo.unfiltered,var="OTUID")
sort(rowSums(zymo.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
zymo.unfiltered
zymo1.unfiltered <- subset(zymo.unfiltered,rowSums(zymo.unfiltered["ZymoMockDNAr2"]) > 0)
zymo1.unfiltered
sort(rowSums(zymo1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
zymo1.unfiltered <- rownames_to_column(zymo1.unfiltered,var="OTUID")
zymo1.tax.unfiltered <- merge(zymo1.unfiltered, tax.unfil.ed, by="OTUID")
zymo1.unfiltered <- column_to_rownames(zymo1.unfiltered,var="OTUID")
#write.csv(zymo1.tax.unfiltered, file = "zymo1.tax.unfiltered.csv")
sum(zymo1.unfiltered)
dim(zymo1.unfiltered)
#edit the taxonomy
colnames(zymo1.tax.unfiltered)
zymo1.tax.unfil.ed <- zymo1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(zymo1.tax.unfil.ed)
zymo1.tax.unfil.ed1 <- zymo1.tax.unfil.ed[,c(1:3)]
colnames(zymo1.tax.unfil.ed1)
#library(tidyr)
long.dat.zymo.unfil <- zymo1.tax.unfil.ed1
long.dat.zymo.unfil$Read <- long.dat.zymo.unfil$ZymoMockDNAr2
long.dat.zymo.unfil
#detach(package:plyr)
df.zymo.unfil <- long.dat.zymo.unfil %>%
group_by(Domain) %>%
summarise(read.number = sum(Read))
df.zymo.unfil1 <- df.zymo.unfil %>%
#group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.zymo.unfil.dom <- ggplot(df.zymo.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_bar(stat='identity') +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
theme_bw()+
ylab("Read Proportion (%)")+
labs(title = "D. RTSF Positive Control")+
theme(legend.position="none",
axis.title.y = element_markdown(size=13),
axis.title.x = element_blank(),
axis.text.y = element_text(size = 13),
axis.text.x = element_text(size = 13),
plot.title = element_text(size = 14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot.zymo.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_zymo_plant_proportion.eps",
plot.zymo.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE RTSF NEGATIVE CONTROL
# otu table of the RTSF NC
colnames(otu.unfil)
#otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
RTNC.unfiltered <- otu.unfil[,"RTSFNTCr2", drop=F]
RTNC.unfiltered
sort(rowSums(RTNC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.unfiltered <- subset(RTNC.unfiltered,rowSums(RTNC.unfiltered["RTSFNTCr2"]) > 0)
RTNC1.unfiltered
sort(rowSums(RTNC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.unfiltered <- rownames_to_column(RTNC1.unfiltered,var="OTUID")
RTNC1.tax.unfiltered <- merge(RTNC1.unfiltered, tax.unfil.ed, by="OTUID")
RTNC1.unfiltered <- column_to_rownames(RTNC1.unfiltered,var="OTUID")
#write.csv(RTNC1.tax.unfiltered, file = "RTNC1.tax.unfiltered.csv")
sum(RTNC1.unfiltered)
dim(RTNC1.unfiltered)
#edit the taxonomy
colnames(RTNC1.tax.unfiltered)
RTNC1.tax.unfil.ed <- RTNC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(RTNC1.tax.unfil.ed)
RTNC1.tax.unfil.ed1 <- RTNC1.tax.unfil.ed[,c(1:3)]
colnames(RTNC1.tax.unfil.ed1)
#library(tidyr)
long.dat.rtnc.unfil <- RTNC1.tax.unfil.ed1
long.dat.rtnc.unfil$Read <- long.dat.rtnc.unfil$RTSFNTCr2
long.dat.rtnc.unfil
#detach(package:plyr)
df.rtnc.unfil <- long.dat.rtnc.unfil %>%
group_by(Domain) %>%
summarise(read.number = sum(Read))
df.rtnc.unfil1 <- df.rtnc.unfil %>%
#group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.rtnc.unfil.dom <- ggplot(df.rtnc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_bar(stat='identity') +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "E. RTSF Negative Control")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
plot.title = element_text(size = 14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot.rtnc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_rtnc_plant_proportion.eps",
plot.rtnc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
# COMPILE ALL READ PROPORTION OF PLANT CONTAMINANTS FIGURES
plot.unfil.dom
plot.nc.unfil.dom
plot.pc.unfil.dom
plot.zymo.unfil.dom
plot.rtnc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
library(ggpubr)
PlantContProp <- ggarrange(plot.unfil.dom,plot.nc.unfil.dom,plot.pc.unfil.dom,plot.zymo.unfil.dom,plot.rtnc.unfil.dom, ncol = 3, nrow = 2)
PlantContProp
ggsave("20210604_rPlantContProp.eps",
PlantContProp, device=cairo_ps,
width = 10, height =7,
units= "in", dpi = 600)
#############################################################################################################################################################
# ANALYSIS OF READS AFTER CHLOROPLAST AND MITOCHONDRIA REMOVAL
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
otu <- read.table('OTU_table_tax_filt.txt', sep='\t', header=T, row.names = 1, check.names = FALSE)
otu
head(otu)
colnames(otu)
tax <- otu[,'taxonomy']
str(tax)
#write.csv(tax, file = "tax.fil.csv")
dim(otu)
colnames(otu)
otu <- otu[,-81]
dim(otu) # [1] 298 79, otu table still has Mock, NC, and PC in the sample
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
otu <- rownames_to_column(otu, var = "OTUID")
#read taxonomy
tax.ed = read.csv("tax.fil.ed.csv", header=T)
head(tax.ed)
colnames(otu)
otu <- column_to_rownames(otu, var = "OTUID")
rownames(tax.ed) <- rownames(otu)
dim(tax.ed)
#read the metadata
#select only biological sample from otu table
colnames(otu)
otu.bio <- otu[,1:64] #unselect Mock, NC, and PC from the otu table
colnames(otu.bio)
dim(otu.bio)
#otu.bio <- column_to_rownames(otu.bio,var = "OTUID")
sort(rowSums(otu.bio, na.rm = FALSE, dims = 1), decreasing = F)
# remove OTUs that do not present in sample
otu.bio1=otu.bio[which(rowSums(otu.bio) > 0),]
dim(otu.bio1) # otu= 218, otu table before normalization using metagenomeSeq package and before decontamination
sort(rowSums(otu.bio1, na.rm = FALSE, dims = 1), decreasing = F)
# merge otu.bio1 with taxonomy to have match taxonomy table
head(otu.bio1)
#otu.bio1 <- rownames_to_column(otu.bio1,var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed,var = "OTUID")
otu.bio1 <- rownames_to_column(otu.bio1,var = "OTUID")
otu.bio1.tax <- merge(otu.bio1, tax.ed, by="OTUID")
dim(otu.bio1.tax)
# separate the sample
# otu table
otu.bac.fil <- otu.bio1.tax[,c(1:65)]
head(otu.bac.fil)
otu.bac.fil <- column_to_rownames(otu.bac.fil,var="OTUID")
sum(otu.bac.fil)
dim(otu.bac.fil)
#otu table of the negative control
NC <- otu[,c(72:78)]#only negative control
NC
#NC <- column_to_rownames(NC,var="OTUID")
sort(rowSums(NC, na.rm = FALSE, dims = 1), decreasing = F)
NC1=NC[which(rowSums(NC) > 0),]
sort(rowSums(NC1, na.rm = FALSE, dims = 1), decreasing = F)
NC1
NC1 <- rownames_to_column(NC1,var="OTUID")
tax.ed
NC1.tax <- merge(NC1, tax.ed, by="OTUID")
#write.csv(NC1.tax, file = "NC1.tax.csv")
dim(NC1)
NC1 <- column_to_rownames(NC1,var="OTUID")
sum(NC1)
#otu table of the positive control
colnames(otu)
PC <- otu[,c(65:71)]#only positive control
PC
#PC <- column_to_rownames(PC,var="OTUID")
sort(rowSums(PC, na.rm = FALSE, dims = 1), decreasing = F)
PC1=PC[which(rowSums(PC) > 0),]
sort(rowSums(PC1, na.rm = FALSE, dims = 1), decreasing = F)
PC1
PC1 <- rownames_to_column(PC1,var="OTUID")
tax.ed
PC1.tax <- merge(PC1, tax.ed, by="OTUID")
#write.csv(PC1.tax, file = "PC1.tax.csv")
dim(PC1)
PC1 <- column_to_rownames(PC1,var="OTUID")
sum(PC1)
# otu table of the RTSF Zymo
colnames(otu)
zymo.fil <- otu[,"ZymoMockDNAr2", drop=F]
zymo.fil
zymo.fil <- column_to_rownames(zymo.fil,var="OTUID")
sort(rowSums(zymo.fil, na.rm = FALSE, dims = 1), decreasing = F)
zymo.fil
zymo1.fil <- subset(zymo.fil,rowSums(zymo.fil["ZymoMockDNAr2"]) > 0)
zymo1.fil
sort(rowSums(zymo1.fil, na.rm = FALSE, dims = 1), decreasing = F)
zymo1.fil <- rownames_to_column(zymo1.fil,var="OTUID")
zymo1.tax.fil <- merge(zymo1.fil, tax.ed, by="OTUID")
zymo1.fil <- column_to_rownames(zymo1.fil,var="OTUID")
#write.csv(zymo1.tax.fil, file = "zymo1.tax.fil.csv")
sum(zymo1.fil)
dim(zymo1.fil)
# otu table of the RTSF NC
colnames(otu)
RTNC.fil <- otu[,"RTSFNTCr2", drop=F]
RTNC.fil
sort(rowSums(RTNC.fil, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.fil <- subset(RTNC.fil,rowSums(RTNC.fil["RTSFNTCr2"]) > 0)
RTNC1.fil
sort(rowSums(RTNC1.fil, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.fil <- rownames_to_column(RTNC1.fil,var="OTUID")
RTNC1.tax.fil <- merge(RTNC1.fil, tax.ed, by="OTUID")
RTNC1.fil <- column_to_rownames(RTNC1.fil,var="OTUID")
#write.csv(RTNC1.tax.fil, file = "RTNC1.tax.fil.csv")
sum(RTNC1.fil)
dim(RTNC1.fil)
#####################################################################################################################################
######################################################################################################################################
### Rarefaction curves ######
# using GlobalPatterns
library(phyloseq)
# 1. rarefaction curve for otu table after plant contaminant removal before microbial decontamination and normalization
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
otu <- read.table('OTU_table_tax_filt.txt', sep='\t', header=T, row.names = 1, check.names = FALSE)
otu
otu #otu table after plant contaminant removal
colnames(otu)
head(otu)
otu <- otu[,-81]
dim(otu) # [1] 298 79, otu table still has Mock, NC, and PC in the sample
colnames(otu)
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# change name of ZymoMockDNAr2 to RTSF_ZymoMockDNAr2
library(dplyr)
is.data.frame(otu)
R.utils::detachPackage("plyr")
otu <- otu %>%
dplyr::rename(RTSF_ZymoMockDNAr2=ZymoMockDNAr2)
colnames(otu)
# make phyloseq otu table and taxonomy
otu.phyl = otu_table(otu, taxa_are_rows = TRUE)
head(tax.ed)
tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.phyl = tax_table(as.matrix(tax.ed))
# make phyloseq map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.obj <- merge_phyloseq(otu.phyl,tax.phyl,map.phyl)
phyl.obj
otu_table(phyl.obj)
#set seed
set.seed(42)
#rarefy the data
# make sure to run ggrare function in the "generating_rarecurfe.r" file
# data = phyloseq object of decontaminated non normalized otu table
# run the ggrare function attached in the file "generating_rarecurve.r"
p.rare <- ggrare(phyl.obj, step = 1, color = "sample_type", label = "sample_type", se = FALSE)
#set up your own color palette
#Palette <- c("#440154FF","#1F968BFF","#FDE725FF",)
#names(Palette) <- levels(sample_data(phyl.obj)$sample_type)
#Palette
#plot the rarecurve
#p <- ggrare(psdata, step = 1000, color = "SampleType", label = "Sample", se = FALSE)
library(ggtext)
rare <- p.rare +
scale_color_manual(labels = c("Experimental Sample", "Negative Control", "Positive Control", "RTSF Negative Control", "RTSF Positive Control"), values = c("#88CCEE", "#CC6677", "#DDCC77", "#117733", "#332288"))+
theme_bw()+
scale_size_manual(values = 60)+
ylab("Number of OTUs")+
xlab("Number of Reads")+
labs(color='Sample Type:') +
theme( strip.text.x = element_text(size=14, face='bold'),
axis.text.x=element_text(size = 14),
axis.text.y = element_text(size = 14),
strip.text.y = element_text(size=18, face = 'bold'),
plot.title = element_text(size =20 ,face='bold'),
axis.title.y = element_text(size=15,face="bold"),
axis.title.x = element_text(size=15,face="bold"),
legend.position = "right",
legend.title = element_text(size=15, face ="bold"),
legend.text = element_text(size=14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot(rare)
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604rarefactioncurve.pdf",
rare, device= "pdf",
width = 9, height = 7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### bacterial taxa composition of all samples (after plant contaminant removal)
# make phyloseq object
otu #otu table after plant contaminant removal
colnames(otu)
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# make phyloseq otu table and taxonomy
head(otu)
colnames(otu)
colnames(otu)[80] <- "RTSF_ZymoMockDNAr2"
otu.phyl = otu_table(otu, taxa_are_rows = TRUE)
head(tax.ed)
tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.phyl = tax_table(as.matrix(tax.ed))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
map$batch <- as.factor(map$batch)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.obj <- merge_phyloseq(otu.phyl,tax.phyl,map.phyl)
phyl.obj
# merge taxa by class
# 1. class - Bacteria
bac.cl <- tax_glom(phyl.obj, taxrank = "Class", NArm = F)
bac.cl.ra <- transform_sample_counts(bac.cl, function(x) x/sum(x))
bac.cl.ra
df.cl <- psmelt(bac.cl.ra) %>%
group_by(batch,Sample, Class) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.cl$Class <- as.character(df.cl$Class)
#df.cl$Class[df.cl$Mean < 0.1] <- "Other"
# barplot of bacterial/archaeal composition across pods at Phylum level
#library(rcartocolor)
#display_carto_all(colorblind_friendly = TRUE)
#my_colors = carto_pal(12, "Safe")
#my_colors
# New facet label names for plant variable
#plant.labs <- c("Plant: A", "Plant: B", "Plant: C")
#names(plant.labs) <- c("A", "B", "C")
# Create the plot
#install.packages("pals")
library(pals)
cl <- ggplot(data=df.cl, aes(x=Sample, y=Mean, fill=Class))
plot.cl <- cl +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(values=as.vector(stepped(n=24))) +
#scale_fill_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c','#f58231', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', 'lightslateblue', '#000000', 'tomato','hotpink2'))+
#scale_fill_manual(values=c("#44AA99", "#332288", "#117733","#CC6677","#DDCC77", "#88CCEE","#661100","#AA4499" ,"#888888"))+
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
#labs(y= "Mean Relative Abundance", x="Plant")+
labs(y= "Mean Relative Abundance")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=12, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=1,bycol=TRUE))
plot.cl
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_class.eps",
plot.cl, device = "eps",
width = 9.5, height =6.5,
units= "in", dpi = 600)
# merge taxa by genus
# 2. genus - Bacteria
bac.gen <- tax_glom(phyl.obj, taxrank = "Genus", NArm = F)
bac.gen.ra <- transform_sample_counts(bac.gen, function(x) x/sum(x))
bac.gen.ra #153 taxa
df.gen <- psmelt(bac.gen.ra) %>%
group_by(batch,Sample, Genus) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.gen$Genus <- as.character(df.gen$Genus)
df.gen$Genus[df.gen$Mean < 0.03] <- "Other (less than 3%)"
library(randomcoloR)
set.seed(1)
n <- 45
palette <- distinctColorPalette(n)
col=palette
gen <- ggplot(data=df.gen, aes(x=Sample, y=Mean, fill=Genus))
plot.gen <- gen +
geom_bar(aes(), stat="identity", position="fill") +
#scale_colour_viridis(discrete = T)+
#facet_grid(. ~ batch) +
scale_fill_manual(name="Genus",values=col) +
#scale_fill_manual(values=as.vector(stepped(n=24))) +
#scale_fill_manual(name="Genus",values=as.vector(polychrome(n=36))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=10, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.gen
plot.gen1 <- plot.gen +
facet_wrap(~ batch, scales="free_x", nrow = 2)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
plot.gen1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus_all.eps",
plot.gen1, device = "eps",
width = 15, height = 8,
units= "in", dpi = 600)
# merge taxa by family
# 2. Family - Bacteria
bac.fam <- tax_glom(phyl.obj, taxrank = "Family", NArm = F)
bac.fam.ra <- transform_sample_counts(bac.fam, function(x) x/sum(x))
bac.fam.ra #87 taxa
df.fam <- psmelt(bac.fam.ra) %>%
group_by(batch,Sample, Family) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.fam$Family <- as.character(df.fam$Family)
df.fam$Family[df.fam$Mean < 0.01] <- "Other (less than 1%)"
fam <- ggplot(data=df.fam, aes(x=Sample, y=Mean, fill=Family))
plot.fam <- fam +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(name="Family",values=col) +
#scale_fill_manual(name="Family",values=as.vector(polychrome(n=36))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample Type")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=12, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.fam
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus.eps",
plot.gen, device = "eps",
width = 13, height =7.5,
units= "in", dpi = 600)
#####################################################################################################################################
#####################################################################################################################################
## 2. bacterial taxa found in the negative control
# make phyloseq object
# otu table of negative control only
dim(NC)
NC <- rownames_to_column(NC, var = "OTUID")
head(NC)
dim(RTNC.fil)
head(RTNC.fil)
RTNC.fil <- rownames_to_column(RTNC.fil, var = "OTUID")
colnames(RTNC.fil)
#colnames(RTNC.fil)[2] <- "RTSF_NC"
ncrtnc <- merge(NC, RTNC.fil)
head(ncrtnc)
colnames(ncrtnc)
ncrtnc <- column_to_rownames(ncrtnc, var = "OTUID")
sort(rowSums(ncrtnc, na.rm = FALSE, dims = 1), decreasing = F)
ncrtnc1 <- ncrtnc[which(rowSums(ncrtnc) > 0),]
sort(rowSums(ncrtnc1, na.rm = FALSE, dims = 1), decreasing = F)
# taxonomy negative control
head(ncrtnc1)
ncrtnc1 <- rownames_to_column(ncrtnc1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
ncrtnc1.tax <- merge(ncrtnc1, tax.ed, by="OTUID")
colnames(ncrtnc1.tax)
tax.ncrtnc <- ncrtnc1.tax[,c(1,10:18)]
head(tax.ncrtnc)
# make phyloseq otu table and taxonomy
ncrtnc1 <- column_to_rownames(ncrtnc1, var = "OTUID")
ncrtnc.phyl = otu_table(ncrtnc1, taxa_are_rows = TRUE)
tax.ncrtnc <- column_to_rownames(tax.ncrtnc, var = "OTUID")
tax.ncrtnc.phyl = tax_table(as.matrix(tax.ncrtnc))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
ncrtnc.phyl.obj <- merge_phyloseq(ncrtnc.phyl,tax.ncrtnc.phyl,map.phyl)
ncrtnc.phyl.obj
# 1. genus - Bacteria
ncrtnc.gen <- tax_glom(ncrtnc.phyl.obj, taxrank = "Genus.ed", NArm = F)
ncrtnc.gen.ra <- transform_sample_counts(ncrtnc.gen, function(x) x/sum(x))
ncrtnc.gen.ra #61 taxa
df.ncrtnc.gen <- psmelt(ncrtnc.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.ncrtnc.gen$Genus.ed <- as.character(df.ncrtnc.gen$Genus.ed)
df.ncrtnc.gen$percent.mean <- df.ncrtnc.gen$Mean*100
ncrtnc.bubble.plot <- ggplot(data=df.ncrtnc.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Negative Controls", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, hjust = 1, angle=45),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
ncrtnc.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_NC_RTSFNC.bubble.plot.tiff",
ncrtnc.bubble.plot, device = "tiff",
width = 13.8, height =7.5,
units= "in", dpi = 600)
## 3. bacterial taxa found in the positive control
# make phyloseq object
# otu table of positive control and RTSF_Zymo mock
dim(PC)
colnames(PC)
PC <- rownames_to_column(PC, var = "OTUID")
dim(zymo.fil)
colnames(zymo.fil)
zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
colnames(zymo.fil)[2] <- "RTSF_ZymoMockDNAr2"
colnames(zymo.fil)
#zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
PC.zymo <- merge(PC, zymo.fil)
PC.zymo <- column_to_rownames(PC.zymo, var = "OTUID")
sort(rowSums(PC.zymo, na.rm = FALSE, dims = 1), decreasing = F)
PC.zymo1 <- PC.zymo[which(rowSums(PC.zymo) > 0),]
sort(rowSums(PC.zymo1, na.rm = FALSE, dims = 1), decreasing = F)
colnames(PC.zymo1)
# taxonomy positive control
head(PC.zymo1)
PC.zymo1 <- rownames_to_column(PC.zymo1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
PC.zymo1.tax <- merge(PC.zymo1, tax.ed, by="OTUID")
colnames(PC.zymo1.tax)
tax.PC.zymo <- PC.zymo1.tax[,c(1,10:18)]
head(tax.PC.zymo)
# make phyloseq otu table and taxonomy
PC.zymo1 <- column_to_rownames(PC.zymo1, var = "OTUID")
PC.zymo.phyl = otu_table(PC.zymo1, taxa_are_rows = TRUE)
tax.PC.zymo <- column_to_rownames(tax.PC.zymo, var = "OTUID")
tax.PC.zymo.phyl = tax_table(as.matrix(tax.PC.zymo))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
colnames(map)
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
PC.zymo.phyl.obj <- merge_phyloseq(PC.zymo.phyl,tax.PC.zymo.phyl,map.phyl)
PC.zymo.phyl.obj #121 taxa
# 1. genus - Bacteria
PC.zymo.gen <- tax_glom(PC.zymo.phyl.obj, taxrank = "Genus.ed", NArm = F)
PC.zymo.gen.ra <- transform_sample_counts(PC.zymo.gen, function(x) x/sum(x))
PC.zymo.gen.ra #61 taxa
df.PC.zymo.gen <- psmelt(PC.zymo.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.PC.zymo.gen$Genus.ed <- as.character(df.PC.zymo.gen$Genus.ed)
df.PC.zymo.gen$percent.mean <- df.PC.zymo.gen$Mean*100
PC.zymo.bubble.plot <- ggplot(data=df.PC.zymo.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", y="Taxa")+
theme(legend.key=element_blank(),
axis.title.y = element_markdown(size=15,face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, angle=45, hjust = 1),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
PC.zymo.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_PC.zymo.bubble.plot.tiff",
PC.zymo.bubble.plot, device = "tiff",
width = 12.5, height =7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### bacterial taxa composition of all samples (before plant contaminant removal and before microbial decontamination and normalization)
# make phyloseq object
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
# unfiltered otu table
otu.unfil
colnames(otu.unfil)
head(otu.unfil)
colnames(otu.unfil)[80] <- "RTSF_ZymoMockDNAr2"
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# make phyloseq otu table and taxonomy
otu.unfil.phyl = otu_table(otu.unfil, taxa_are_rows = TRUE)
head(tax.unfil.ed)
tax.unfil.ed <- column_to_rownames(tax.unfil.ed, var = "OTUID")
tax.unfil.phyl = tax_table(as.matrix(tax.unfil.ed))
# make phyloseq map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.unfil.obj <- merge_phyloseq(otu.unfil.phyl,tax.unfil.phyl,map.phyl)
phyl.unfil.obj
otu_table(phyl.unfil.obj)
# merge taxa by class
# 1. class - Bacteria
bac.unfil.cl <- tax_glom(phyl.unfil.obj, taxrank = "Class", NArm = F)
bac.unfil.cl.ra <- transform_sample_counts(bac.unfil.cl, function(x) x/sum(x))
bac.unfil.cl.ra #23 taxa
otu_table(bac.unfil.cl.ra)
df.unfil.cl <- psmelt(bac.unfil.cl.ra) %>%
group_by(sample_type, Class) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.cl$Class <- as.character(df.unfil.cl$Class)
#df.cl$Class[df.cl$Mean < 0.1] <- "Other"
# Create the plot
#install.packages("pals")
library(pals)
unfil.cl <- ggplot(data=df.unfil.cl, aes(x=sample_type, y=Mean, fill=Class))
plot.unfil.cl <- unfil.cl +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(values=as.vector(stepped(n=24))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample Type")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text=element_text(size=14),
axis.title = element_markdown(size=15,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=1,bycol=TRUE))
plot.unfil.cl
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_class.unfiltered.eps",
plot.unfil.cl, device = "eps",
width = 9.5, height =6.5,
units= "in", dpi = 600)
# merge taxa by genus
# 2. genus - Bacteria
bac.unfil.gen <- tax_glom(phyl.unfil.obj, taxrank = "Genus.ed", NArm = F)
bac.unfil.gen.ra <- transform_sample_counts(bac.unfil.gen, function(x) x/sum(x))
bac.unfil.gen.ra #209 taxa
df.unfil.gen <- psmelt(bac.unfil.gen.ra) %>%
group_by(batch, Sample, Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.gen$Genus.ed <- as.character(df.unfil.gen$Genus.ed)
df.unfil.gen$Genus.ed[df.unfil.gen$Mean < 0.001] <- "Other (less than 0.1%)"
library(randomcoloR)
set.seed(1)
n <- 50
palette <- distinctColorPalette(n)
col=palette
unfil.gen <- ggplot(data=df.unfil.gen, aes(x=Sample, y=Mean, fill=Genus.ed))
plot.unfil.gen <- unfil.gen +
geom_bar(aes(), stat="identity", position="fill") +
#scale_fill_manual(name="Genus", values=as.vector(stepped(n=24))) +
scale_fill_manual(name="Genus",values=col) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=10, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=15,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.unfil.gen
plot.unfil.gen1 <- plot.unfil.gen +
facet_wrap(~ batch, scales="free_x", nrow = 2)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
plot.unfil.gen1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus_all_unfiltered.eps",
plot.unfil.gen1, device = "eps",
width = 15, height =8,
units= "in", dpi = 600)
## make a bubble plot for all samples
df.unfil.gen <- psmelt(bac.unfil.gen.ra) %>%
group_by(batch, Sample, Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.gen$Genus.ed <- as.character(df.unfil.gen$Genus.ed)
df.unfil.gen$Genus.ed[df.unfil.gen$Mean < 0.0001] <- "Other (less than 0.01%)"
df.unfil.gen$percent.mean <- df.unfil.gen$Mean*100
unfil.gen.bubble.plot <- ggplot(data=df.unfil.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Sample", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 8, vjust = 0.5, hjust = 1, angle=90),
axis.text.y = element_text(colour = "black", size = 10),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
unfil.gen.bubble.plot
unfil.gen.bubble.plot1 <- unfil.gen.bubble.plot +
facet_wrap(~ batch, scales="free_x", nrow = 1)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
unfil.gen.bubble.plot1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_unfil.gen.bubble.plot1.tiff",
unfil.gen.bubble.plot1, device = "tiff",
width = 23, height =10,
units= "in", dpi = 600)
#####################################################################################################################################
#####################################################################################################################################
## 2. bacterial taxa found in the negative control before plant contamination
# make phyloseq object
# otu table of negative control only
colnames(NC.unfiltered)
head(NC.unfiltered)
sort(rowSums(NC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC.unfiltered1 <- NC.unfiltered[which(rowSums(NC.unfiltered) > 0),]
# taxonomy negative control
head(NC.unfiltered1)
NC.unfiltered1 <- rownames_to_column(NC.unfiltered1, var = "OTUID")
head(tax.unfil.ed)
tax.unfil.ed <- rownames_to_column(tax.ed, var = "OTUID")
colnames(tax.unfil.ed)
NC.unfiltered1.tax <- merge(NC.unfiltered1, tax.unfil.ed, by="OTUID")
colnames(NC.unfiltered1.tax)
tax.NC.unfiltered1 <- NC.unfiltered1.tax[,c(1,10:18)]
head(tax.NC.unfiltered1)
# make phyloseq otu table and taxonomy
NC.unfiltered1 <- column_to_rownames(NC.unfiltered1, var = "OTUID")
NC.unfiltered1.phyl = otu_table(NC.unfiltered1, taxa_are_rows = TRUE)
tax.NC.unfiltered1 <- column_to_rownames(tax.NC.unfiltered1, var = "OTUID")
tax.NC.unfiltered1.phyl = tax_table(as.matrix(tax.NC.unfiltered1))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
NC.unfiltered1.phyl.obj <- merge_phyloseq(NC.unfiltered1.phyl,tax.NC.unfiltered1.phyl,map.phyl)
NC.unfiltered1.phyl.obj
# 1. genus - Bacteria
NC.unfiltered1.gen <- tax_glom(NC.unfiltered1.phyl.obj, taxrank = "Genus.ed", NArm = F)
NC.unfiltered1.gen.ra <- transform_sample_counts(NC.unfiltered1.gen, function(x) x/sum(x))
NC.unfiltered1.gen.ra #52 taxa
df.NC.unfiltered1.gen <- psmelt(NC.unfiltered1.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.NC.unfiltered1.gen$Genus.ed <- as.character(df.NC.unfiltered1.gen$Genus.ed)
df.NC.unfiltered1.gen$percent.mean <- df.NC.unfiltered1.gen$Mean*100
NC.unfiltered1.bubble.plot <- ggplot(data=df.NC.unfiltered1.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Negative Controls", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, hjust = 1, angle=45),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
NC.unfiltered1.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_NC.unfiltered1.bubble.plot.tiff",
NC.unfiltered1.bubble.plot, device = "tiff",
width = 13.8, height =7.5,
units= "in", dpi = 600)
## 3. bacterial taxa found in the positive control
# make phyloseq object
# otu table of positive control and RTSF_Zymo mock
dim(PC)
colnames(PC)
PC <- rownames_to_column(PC, var = "OTUID")
dim(zymo.fil)
colnames(zymo.fil)
zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
colnames(zymo.fil)[2] <- "RTSF_ZymoMockDNAr2"
colnames(zymo.fil)
#zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
PC.zymo <- merge(PC, zymo.fil)
PC.zymo <- column_to_rownames(PC.zymo, var = "OTUID")
sort(rowSums(PC.zymo, na.rm = FALSE, dims = 1), decreasing = F)
PC.zymo1 <- PC.zymo[which(rowSums(PC.zymo) > 0),]
sort(rowSums(PC.zymo1, na.rm = FALSE, dims = 1), decreasing = F)
colnames(PC.zymo1)
# taxonomy positive control
head(PC.zymo1)
PC.zymo1 <- rownames_to_column(PC.zymo1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
PC.zymo1.tax <- merge(PC.zymo1, tax.ed, by="OTUID")
colnames(PC.zymo1.tax)
tax.PC.zymo <- PC.zymo1.tax[,c(1,10:18)]
head(tax.PC.zymo)
# make phyloseq otu table and taxonomy
PC.zymo1 <- column_to_rownames(PC.zymo1, var = "OTUID")
PC.zymo.phyl = otu_table(PC.zymo1, taxa_are_rows = TRUE)
tax.PC.zymo <- column_to_rownames(tax.PC.zymo, var = "OTUID")
tax.PC.zymo.phyl = tax_table(as.matrix(tax.PC.zymo))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
colnames(map)
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
PC.zymo.phyl.obj <- merge_phyloseq(PC.zymo.phyl,tax.PC.zymo.phyl,map.phyl)
PC.zymo.phyl.obj #121 taxa
# 1. genus - Bacteria
PC.zymo.gen <- tax_glom(PC.zymo.phyl.obj, taxrank = "Genus.ed", NArm = F)
PC.zymo.gen.ra <- transform_sample_counts(PC.zymo.gen, function(x) x/sum(x))
PC.zymo.gen.ra #61 taxa
df.PC.zymo.gen <- psmelt(PC.zymo.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.PC.zymo.gen$Genus.ed <- as.character(df.PC.zymo.gen$Genus.ed)
df.PC.zymo.gen$percent.mean <- df.PC.zymo.gen$Mean*100
PC.zymo.bubble.plot <- ggplot(data=df.PC.zymo.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", y="Taxa")+
theme(legend.key=element_blank(),
axis.title.y = element_markdown(size=15,face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, angle=45, hjust = 1),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
PC.zymo.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_PC.zymo.bubble.plot.tiff",
PC.zymo.bubble.plot, device = "tiff",
width = 12.5, height =7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### Shared taxa among all total samples (before plant contaminants removal)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
## 1.calculate the occupancy of each OTUID across all samples
# unfiltered otu
# unfiltered otu table
otu.unfil
colnames(otu.unfil)
head(otu.unfil)
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# unfiltered taxonomy
head(tax.unfil.ed)
#tax.unfil.ed <- column_to_rownames(tax.unfil.ed, var = "OTUID")
tax.unfil.ed <- rownames_to_column(tax.unfil.ed, var = "OTUID")
# read map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.unfil <- data.frame(OTUID=as.factor(rownames(otu.unfil)), otu.unfil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.unfil.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
#df <- data.frame(OTUID=as.factor(rownames(otu.unfil)), otu.unfil, check.names = F)
#colnames(df)
#ldf <- gather(df,sample_id, abun, -OTUID)
##build the new table: OTUID as rownames and sample_id as colnames
widedf.unfil <- as.data.frame(spread(longdf.unfil, OTUID, n, fill=0))
rownames(widedf.unfil) <- widedf.unfil[,1]
widedf.unfil <- widedf.unfil[,-1]
widedf.unfil <- t(widedf.unfil)
## calculate the occupancy of each OTUID across all samples
widedf.unfil.PA <- 1*((widedf.unfil>0)==1)
Occ.unfil <- rowSums(widedf.unfil.PA)/ncol(widedf.unfil.PA)
df.Occ.unfil <- as.data.frame(Occ.unfil)
df.Occ.unfil <- rownames_to_column(df.Occ.unfil, var = "OTUID")
df.Occ.unfil.tax <- merge(df.Occ.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.unfil.tax <- df.Occ.unfil.tax[order(df.Occ.unfil.tax$Occ.unfil, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(sort.df.Occ.unfil.tax, file = "sort.df.Occ.unfil.tax_all.csv")
##calculate the mean relative abundance of each OTUID across all samples
widedf.unfil.RA <- decostand(widedf.unfil, method="total", MARGIN=2)
widedf.unfil.RA
relabund.unfil <- rowSums(widedf.unfil.RA)
df.relabund.unfil <- as.data.frame(relabund.unfil)
df.relabund.unfil$meanRelAbund <- df.relabund.unfil$relabund.unfil/ncol(widedf.unfil.RA)
df.relabund.unfil = rownames_to_column(df.relabund.unfil, var = "OTUID")
sum(df.relabund.unfil$meanRelAbund)
sort.relabund.unfil <- df.relabund.unfil[order(df.relabund.unfil$meanRelAbund, decreasing = TRUE),]
##merge OCC table and mean relative abundance table
df.Occ.ra.unfil <- merge(df.Occ.unfil, df.relabund.unfil, by.x =c("OTUID"), by.y = c("OTUID"))
df.Occ.ra.unfil.tax <- merge(df.Occ.ra.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.ra.unfil.tax <- df.Occ.ra.unfil.tax[order(df.Occ.ra.unfil.tax$Occ.unfil, decreasing = TRUE),]
#select OTUID with occ more than and equal to 50 %
Occ50.unfil <- subset(sort.df.Occ.ra.unfil.tax , sort.df.Occ.ra.unfil.tax$Occ.unfil>= 0.5)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(Occ50.unfil, file = "Occ50.unfil.csv")
Occ50.unfil.ed <- read.csv("Occ50.unfil.ed.csv")
### Occupancy-mean relative abundance across all total samples before plant contaminants removal
Occ50.unfil.plot <- ggplot(Occ50.unfil.ed,aes(x=fct_reorder(OTUID.genus, Occ.unfil, .desc=T), y=Occ.unfil))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
#scale_fill_manual(values = palette)+
labs(y= "Occupancy", x="OTU.ID")+
theme_bw()+
coord_flip()+
theme(plot.title = element_text(size=16, face="bold"),
axis.text=element_text(size=12, hjust = 0.5),
axis.title=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
Occ50.unfil.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_Occ50.unfil.eps",
Occ50.unfil.plot, device = "eps",
width = 9, height =6.5,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### Shared taxa among samples (after plant contaminants removal)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
## 1.calculate the occupancy of each OTUID across all samples
# plant filtered otu
otu
colnames(otu)
#otu <- column_to_rownames(otu, var = "OTUID")
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# filtered taxonomy
head(tax.ed)
#tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
# read map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.fil <- data.frame(OTUID=as.factor(rownames(otu)), otu, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
##build the new table: OTUID as rownames and sample_id as colnames
widedf.fil <- as.data.frame(spread(longdf.fil, OTUID, n, fill=0))
rownames(widedf.fil) <- widedf.fil[,1]
widedf.fil <- widedf.fil[,-1]
widedf.fil <- t(widedf.fil)
## calculate the occupancy of each OTUID across all samples
widedf.fil.PA <- 1*((widedf.fil>0)==1)
Occ.fil <- rowSums(widedf.fil.PA)/ncol(widedf.fil.PA)
df.Occ.fil <- as.data.frame(Occ.fil)
df.Occ.fil <- rownames_to_column(df.Occ.fil, var = "OTUID")
df.Occ.fil.tax <- merge(df.Occ.fil, tax.ed, by="OTUID")
sort.df.Occ.fil.tax <- df.Occ.fil.tax[order(df.Occ.fil.tax$Occ.fil, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(sort.df.Occ.fil.tax, file = "sort.df.Occ.fil.tax_all.csv")
#####################################################################################################################################
######################################################################################################################################
## 2.calculate the occupancy of each OTUID across all biological samples and all negative controls before plant contaminants removal
# subset otu only biological samples and negative controls
colnames(otu.unfil)
otu.bio.nc.unfil <- data.frame(otu.unfil[,c(1:64,72:78)], check.names = F)
colnames(otu.bio.nc.unfil)
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.bio.nc.unfil <- data.frame(OTUID=as.factor(rownames(otu.bio.nc.unfil)), otu.bio.nc.unfil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.unfil.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
##build the new table: OTUID as rownames and sample_id as colnames
widedf.bio.nc.unfil <- as.data.frame(spread(longdf.bio.nc.unfil, OTUID, n, fill=0))
rownames(widedf.bio.nc.unfil) <- widedf.bio.nc.unfil[,1]
widedf.bio.nc.unfil <- widedf.bio.nc.unfil[,-1]
widedf.bio.nc.unfil <- t(widedf.bio.nc.unfil)
colnames(widedf.bio.nc.unfil)
## calculate the occupancy of each OTUID across all biological samples and all negative controls
widedf.bio.nc.unfil.PA <- 1*((widedf.bio.nc.unfil>0)==1)
Occ.bio.nc.unfil <- rowSums(widedf.bio.nc.unfil.PA)/ncol(widedf.bio.nc.unfil.PA)
df.Occ.bio.nc.unfil <- as.data.frame(Occ.bio.nc.unfil)
df.Occ.bio.nc.unfil <- rownames_to_column(df.Occ.bio.nc.unfil, var = "OTUID")
df.Occ.bio.nc.unfil.tax <- merge(df.Occ.bio.nc.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.bio.nc.unfil.tax <- df.Occ.bio.nc.unfil.tax[order(df.Occ.bio.nc.unfil.tax$Occ.bio.nc.unfil, decreasing = TRUE),]
View(sort.df.Occ.bio.nc.unfil.tax)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(sort.df.Occ.bio.nc.unfil.tax, file = "sort.df.Occ.unfil.tax_BioNc.csv")
#####################################################################################################################################
######################################################################################################################################
## calculate the occupancy of each OTUID across all biological samples and all negative controls after plant contaminants removal
## what taxa are shared among experimental samples and the negative controls
# subset otu only biological samples and negative controls
colnames(otu)
otu.bio.nc.fil <- data.frame(otu[,c(1:64,72:78)], check.names = F)
colnames(otu.bio.nc.fil)
##build a long data frame joining filtered otu table, map, and taxonomy
longdf.bio.nc.fil2 <- data.frame(OTUID=as.factor(rownames(otu.bio.nc.fil)), otu.bio.nc.fil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(Genus.ed,sample_id) %>%
summarise(n=sum(abun))
##build the new table: Genus as rownames and sample_id as colnames
widedf.bio.nc.fil2 <- as.data.frame(spread(longdf.bio.nc.fil2, Genus.ed, n, fill=0))
rownames(widedf.bio.nc.fil2) <- widedf.bio.nc.fil2[,1]
widedf.bio.nc.fil2 <- widedf.bio.nc.fil2[,-1]
widedf.bio.nc.fil2 <- t(widedf.bio.nc.fil2)
colnames(widedf.bio.nc.fil2)
## calculate the occupancy of each Genus across all biological samples and all negative controls
widedf.bio.nc.fil.PA2 <- 1*((widedf.bio.nc.fil2>0)==1)
Occ.bio.nc.fil2 <- rowSums(widedf.bio.nc.fil.PA2)/ncol(widedf.bio.nc.fil.PA2)
df.Occ.bio.nc.fil2 <- as.data.frame(Occ.bio.nc.fil2)
df.Occ.bio.nc.fil2 <- rownames_to_column(df.Occ.bio.nc.fil2, var = "Genus")
sort.df.Occ.bio.nc.fil2 <- df.Occ.bio.nc.fil2[order(df.Occ.bio.nc.fil2$Occ.bio.nc.fil2, decreasing = TRUE),]
##calculate the mean relative abundance of each Genus across experimental samples and the negative controls
widedf.bio.nc.fil2.RA <- decostand(widedf.bio.nc.fil2, method="total", MARGIN=2)
widedf.bio.nc.fil2.RA
relabund <- rowSums(widedf.bio.nc.fil2.RA)
df.relabund <- as.data.frame(relabund)
df.relabund$meanRelAbund <- df.relabund$relabund/ncol(widedf.bio.nc.fil2.RA)
df.relabund = rownames_to_column(df.relabund, var = "Genus")
sum(df.relabund$meanRelAbund)
sort.relabund <- df.relabund[order(df.relabund$meanRelAbund, decreasing = TRUE),]
##merge OCC table and mean relative abundance table
df.Occ.ra <- merge(df.Occ.bio.nc.fil2, df.relabund, by.x =c("Genus"), by.y = c("Genus"))
sort.df.Occ.ra <- df.Occ.ra[order(df.Occ.ra$Occ.bio.nc.fil2, decreasing = TRUE),]
#select Genus with occ more than and equal to 2 %
Occ0.02 <- subset(sort.df.Occ.ra, sort.df.Occ.ra$Occ.bio.nc.fil2 >= 0.02)
#Occ1.pf
##sort the mean relative abundance
#sort_Occ1.pf <- Occ1.pf[order(Occ1.pf$meanRelAbund, decreasing = TRUE),]
### Occupancy-mean relative abundance across calculate the occupancy of each OTUID across all biological samples and all negative controls after plant contaminants removal
Occ.bio.nc.fil.plot <- ggplot(Occ0.02,aes(x=fct_reorder(Genus, Occ.bio.nc.fil2, .desc=T), y=Occ.bio.nc.fil2))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
#scale_fill_manual(values = palette)+
labs(y= "Occupancy", x="Genus")+
theme_bw()+
coord_flip()+
theme(plot.title = element_text(size=16, face="bold"),
axis.text.x=element_text(size=10,vjust = 0.5, hjust = 1),
axis.title=element_text(size=12,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
Occ.bio.nc.fil.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_expe_nc_0.02.eps",
Occ.bio.nc.fil.plot, device = "eps",
width = 5.5, height =6,
units= "in", dpi = 600)
##################################################################################################################
# Subset OTU that present only in the negative control(not present in the biological samples)
colnames(widedf.bio.nc.unfil.PA)
unique.nc.unfil <- as.data.frame(subset(widedf.bio.nc.unfil.PA, rowSums(widedf.bio.nc.unfil.PA[,1:64]) == 0))
colnames(unique.nc.unfil)
unique.nc.unfil2 <- as.data.frame(subset(unique.nc.unfil, rowSums(unique.nc.unfil[,65:71]) > 0))
unique.nc.unfil2 <- rownames_to_column(unique.nc.unfil2, var = "OTUID")
dim(unique.nc.unfil2) # 22 OTU present only in the negative control
unique.nc.unfil.tax <- merge(unique.nc.unfil2, tax.unfil.ed, by="OTUID")
dim(unique.nc.unfil.tax)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(unique.nc.unfil.tax, file = "unique.nc.unfil.tax.csv")
##### chloroplast sequences distribution ######
# 20210604_16SV4_OTU97
# load unfiltered otu and tax table
otu.tax.unfiltered
colnames(otu.tax.unfiltered)
# select otu chloroplast and mitochondria
otu.tax.chlo <- otu.tax.unfiltered %>%
filter(Order == "Chloroplast")
dim(otu.tax.chlo)
head(otu.tax.chlo)
tail(otu.tax.chlo)
colnames(otu.tax.chlo)
# otu table chloroplast
otu.chlo <- otu.tax.chlo[1:81]
head(otu.chlo)
dim(otu.chlo)
# taxonomy table chloroplast
tax.chlo <- otu.tax.chlo[,c(1,85:90)]
head(tax.chlo)
# occupancy
otu.chlo <- column_to_rownames(otu.chlo, var = "OTUID")
otu.chlo.PA <- 1*((otu.chlo>0)==1)
sum(otu.chlo.PA)
otu.chlo.PA <- otu.chlo.PA[rowSums(otu.chlo.PA)>0,]
occ.chlo <- rowSums(otu.chlo.PA)/ncol(otu.chlo.PA)
df.occ.chlo <- as.data.frame(occ.chlo)
df.occ.chlo <- rownames_to_column(df.occ.chlo, var = "OTUID")
dim(df.occ.chlo)
# rel. abund.
otu.rel.chlo <- decostand(otu.chlo, method="total", MARGIN=2)
com_abund.chlo <- rowSums(otu.rel.chlo)
df.com_abund.chlo <- as.data.frame(com_abund.chlo)
head(df.com_abund.chlo)
df.com_abund.chlo$relabund <- df.com_abund.chlo$com_abund.chlo/80
sum(df.com_abund.chlo$com_abund.chlo)
sum(df.com_abund.chlo$relabund)
df.com_abund.chlo$percentrelabund=df.com_abund.chlo$relabund*100
sum(df.com_abund.chlo$percentrelabund)
df.com_abund.chlo <- rownames_to_column(df.com_abund.chlo, var = "OTUID")
head(df.com_abund.chlo)
dim(df.com_abund.chlo) ### all OTU with CumulativeRelAbund, percent CumulativeRelAbund!!!!!!!!!!!
# merge occupancy table and mean relative abundance table
df.occ.ra.chlo <- merge(df.occ.chlo, df.com_abund.chlo, by.x =c("OTUID"), by.y = c("OTUID"))
# merge the occupancy and relabund tabel with the taxonomy
df.occ.ra.chlo.tax <- merge(df.occ.ra.chlo, tax.chlo, by="OTUID")
# re-order
sort.occ.ra.chlo.tax <- df.occ.ra.chlo.tax[order(df.occ.ra.chlo.tax$relabund, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(sort.occ.ra.chlo.tax, file = "sort.occ.ra.chlo.tax.csv")
sort.occ.ra.chlo.tax.ed <- read.csv("sort.occ.ra.chlo.tax.ed.csv")
# plot ra
library(forcats)
library(dplyr)
plot.ra.chlo <- ggplot(sort.occ.ra.chlo.tax.ed,aes(x=fct_reorder(OTUID.ed, percentrelabund, .desc=T), y=percentrelabund, fill=OTUID))+
geom_bar(aes(), stat="identity")+
coord_flip()+
scale_fill_manual(values=as.vector(stepped(n=24))) +
labs(y= "Relative Abundance (%)", x="OTU ID")+
theme_bw()+
scale_y_continuous(expand = expansion(mult = c(0.01, .1)))+
theme(axis.text=element_text(size=12),
axis.title.y = element_blank(),
axis.title.x=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
plot.ra.chlo
# plot occ
plot.occ.chlo <- ggplot(sort.occ.ra.chlo.tax.ed,aes(x=fct_reorder(OTUID.ed, occ.chlo, .desc=T), y=occ.chlo, fill=OTUID))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
scale_fill_manual(values=as.vector(stepped(n=24))) +
labs(y= "Occupancy", x="OTU ID")+
theme_bw()+
scale_y_continuous(expand = expansion(mult = c(0.01, .1)))+
coord_flip()+
theme(axis.text=element_text(size=12, hjust = 0.5),
axis.title=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
plot.occ.chlo
library(patchwork)
plot.occ.ra.chlo <- plot.occ.chlo | plot.ra.chlo
plot.occ.ra.chlo
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("plot.occ.ra.chlo.png",
plot.occ.ra.chlo, device = "png",
width = 13, height =7,
units= "in", dpi = 600)
##################################################################################################################
## Making plot for the DNA cocentration
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
dna.con = read.csv("dnaconc.csv", header=T)
library(viridis)
library(grid)
dna.con$SampleID <- as.factor(dna.con$SampleID)
dna.con$batch <- as.factor(dna.con$batch)
#create list of dna. conc. plots
dna.conc.plot <- lapply(split(dna.con,dna.con$batch), function(x){
#relevel factor partei by wert inside this subset
x$SampleID <- factor(x$SampleID, levels=x$SampleID[order(x$DNA_conc_ng_per_ul,decreasing=F)])
#make the plot
p <- ggplot(x, aes(x = SampleID, y = DNA_conc_ng_per_ul, fill = batch, width=0.75)) +
geom_bar(stat = "identity") +
scale_fill_discrete(drop=F)+ #to force all levels to be considered, and thus different colors
theme(panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
theme(legend.position="none")+
labs(y="DNA concentration (ng/ul)", x="", title=unique(x$batch))+
coord_flip()
})
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
setEPS()
postscript("dna_conct.eps", height = 7, width = 8)
do.call(grid.arrange,(c(dna.conc.plot, ncol=3)))
dev.off()
graphics.off()
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210601_16SV4')
ggsave("20210601_barplot_genus.unfiltered.eps",
plot.unfil.gen, device = "eps",
width = 12, height =7.5,
units= "in", dpi = 600)
|
/16SV4_OTU97/20210604_16SV4/20210604_16SV4_OTU97_Rainout.R
|
no_license
|
ShadeLab/PAPER_Bintarti_2021_Bean_Rainoutshelter
|
R
| false | false | 83,308 |
r
|
#######################################################################################################################
############### Bean seed microbiome analysis for the rain out shelter experiment: OTU 97% ############################
#######################################################################################################################
# Date: August 18th 2021
# By : Ari Fina Bintarti
# INSTALL PACKAGES
install.packages(c('vegan', 'tidyverse'))
install.packages('reshape')
install.packages("ggpubr")
install.packages("car")
install.packages("agricolae")
install.packages("multcompView")
install.packages("gridExtra")
install.packages("ggplot2")
install.packages("sjmisc")
install.packages("sjPlot")
install.packages("MASS")
install.packages("FSA")
install.packages('mvtnorm', dep = TRUE)
install.packages("rcompanion")
install.packages("onewaytests")
install.packages("PerformanceAnalytics")
install.packages("gvlma")
install.packages("userfriendlyscience")
install.packages("ggpmisc")
install.packages("fitdistrplus")
install.packages('BiocManager')
#install.packages("cowplot")
install.packages("dplyr")
install.packages("lme4")
install.packages("nlme")
install.packages("car")
install.packages("multcomp")
library(multcomp)
library(car)
library(BiocManager)
library(vegan)
library(dplyr)
library(plyr)
library(tidyverse)
library(tidyr)
#library(cowplot)
library(ggplot2)
library(reshape)
library(ggpubr)
library(car)
library(agricolae)
library(multcompView)
library(grid)
library(gridExtra)
library(sjmisc)
library(sjPlot)
library(MASS)
library(FSA)
library(rcompanion)
library(onewaytests)
library(ggsignif)
library(PerformanceAnalytics)
library(gvlma)
library(userfriendlyscience)
library(ggpmisc)
library(tibble)
library(fitdistrplus)
library(lme4)
library(nlme)
# SET THE WORKING DIRECTORY
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
# READ PROPORTION OF CHLOROPLAST AND MITOCHONDRIA
#read the unfiltered otu table
otu.unfil <- read.table(file = 'OTU_table_tax.txt', sep = '\t', header = TRUE,check.names = FALSE)
otu.unfil
tax.unfil <- otu.unfil[,'taxonomy']
tax.unfil
#write.csv(tax.unfil, file = "tax.unfil.csv")
dim(otu.unfil) #[1] 325 81
colnames(otu.unfil)
otu.unfil <- otu.unfil[,-82]
dim(otu.unfil)# otu= 325, otu table still has Mock, NC, and PC in the sample
otu.unfil <- column_to_rownames(otu.unfil,var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
#read taxonomy
tax.unfil.ed = read.csv("tax.unfil.ed.csv", header=T)
rownames(tax.unfil.ed) <- rownames(otu.unfil)
dim(tax.unfil.ed) #[1] 325 7
otu.unfil <- rownames_to_column(otu.unfil,var = "OTUID")
tax.unfil.ed <- rownames_to_column(tax.unfil.ed,var = "OTUID")
otu.tax.unfiltered <- merge(otu.unfil, tax.unfil.ed, by="OTUID")
View(otu.tax.unfiltered)
colnames(otu.tax.unfiltered)
#write.csv(otu.tax.unfiltered, file = "otu.tax.unfiltered.csv")
#read the metadata
#############################################################################################################################################################
#READ PROPORTION OF CHLOROPLAST AND MITOCHONDRIA OF EXPERIMENTAL SAMPLES
#select only biological sample from otu table
otu.bio.unfil <- otu.unfil[,1:65] #unselect Mock, NC, and PC from the otu table
dim(otu.bio.unfil)
colnames(otu.bio.unfil)
otu.bio.unfil <- column_to_rownames(otu.bio.unfil, var = "OTUID")
sort(rowSums(otu.bio.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# remove OTUs that do not present in biological sample
otu.bio1.unfil <- otu.bio.unfil[which(rowSums(otu.bio.unfil) > 0),]
dim(otu.bio1.unfil) # [1] 244 64, otu table before plant contaminant removal and normalization using metagenomeSeq package and before decontamination
sort(rowSums(otu.bio1.unfil, na.rm = FALSE, dims = 1), decreasing = F)
sum(otu.bio1.unfil)
# load the otu table
head(otu.bio1.unfil)
otu.bio1.unfil <- rownames_to_column(otu.bio1.unfil, var = "OTUID")
# merge the taxonomy with otu table
head(tax.unfil.ed)
#tax.unfil.ed <- rownames_to_column(tax.unfil.ed, var = "OTUID")
otu.tax.unfil <- merge(otu.bio1.unfil, tax.unfil.ed, by="OTUID")
dim(otu.tax.unfil)
colnames(otu.tax.unfil)
#select only the otu table and "Order" & "Family"
#otu.tax.unfil.ed <- otu.tax.unfil[,c(1:48,52,53)]
#colnames(otu.tax.unfil.ed)
#edit the taxonomy
colnames(otu.tax.unfil)
otu.tax.unfil.ed <- otu.tax.unfil %>%
mutate(Taxonomy = case_when(Order == "Chloroplast" ~ 'Chloroplast',
Phylum == "Cyanobacteria"~ 'Chloroplast',
Family == "Mitochondria" ~ 'Mitochondria',
#Family == "Magnoliophyta" ~ 'Magnoliophyta',
TRUE ~ 'Bacteria')) %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Phylum == "Cyanobacteria"~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
#Family == "Magnoliophyta" ~ 'Plant',
TRUE ~ 'Bacteria'))
tail(otu.tax.unfil.ed)
otu.tax.unfil.ed
colnames(otu.tax.unfil.ed)
otu.tax.unfil.ed1 <- otu.tax.unfil.ed[,c(1:66,75)]
View(otu.tax.unfil.ed1)
colnames(otu.tax.unfil.ed1)
tail(otu.tax.unfil.ed1)
long.dat <- gather(otu.tax.unfil.ed1, Sample, Read, 2:65, factor_key = T)
long.dat
### 1. Plant contaminant proportion
detach(package:plyr)
df.unfil <- long.dat %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.unfil1 <- df.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#with(df.unfil1, sum(percent[Sample == "1001"]))
library(ggbeeswarm)
library(ggtext)
plot.unfil.dom <- ggplot(df.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "A. Experimental Sample")+
ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title.x = element_blank(),
axis.text= element_text(size = 12),
strip.text = element_text(size=12),
plot.title = element_text(size = 14),
axis.title.y = element_markdown(size=13),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=7, color="red", shape=95)
plot.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_plant_proportion.eps",
plot.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
### 2. Chloroplast and Mitochondria contaminant proportion
df.unfil.tax <- long.dat %>%
group_by(Sample, Taxonomy) %>%
summarize(read.number = sum(Read))
df.unfil.tax1 <- df.unfil.tax %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
plot.unfil.tax <- ggplot(df.unfil.tax1, aes(x=Taxonomy, y=percent, fill=Taxonomy))+
geom_violin(trim = F, scale="width") +
#geom_beeswarm(dodge.width = 1, alpha = 0.3)+
#scale_fill_manual(labels = c("A1","A2", "A3","B1","B2","B3","B4","B5","B6","C5","C6","C7"),values=c("#440154FF", "#482677FF","#3F4788FF","#238A8DFF","#1F968BFF","#20A386FF","#29AF7FF","#3CBC75F","#56C667FF","#B8DE29FF","#DCE318FF","#FDE725FF"))+
#scale_fill_viridis(discrete = T)+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
#geom_text(data=sum_rich_plant_new, aes(x=Plant,y=2+max.rich,label=Letter), vjust=0)+
labs(title = "B")+
ylab("Read Proportion (%)")+
theme(legend.position="none",
#axis.text.x=element_blank(),
#axis.ticks.x = element_blank(),
axis.title.x = element_blank(),
axis.text= element_text(size = 14),
strip.text = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14, face = 'bold'),
#axis.title.y=element_text(size=13,face="bold"),
axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
#plot.margin = unit(c(0, 0, 0, 0), "cm"))
stat_summary(fun="median",geom="point", size=7, color="red", shape=95)
#width=1, position=position_dodge(),show.legend = FALSE)
plot.unfil.tax
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_chloromito_proportion.eps",
plot.unfil.tax, device=cairo_ps,
width = 7, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF NEGATIVE CONTROLS
# otu table of the negative control
colnames(otu.unfil)
NC.unfiltered <- otu.unfil[,c(1,73:79)]#only negative control
colnames(NC.unfiltered)
NC.unfiltered <- column_to_rownames(NC.unfiltered,var="OTUID")
sort(rowSums(NC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC1.unfiltered=NC.unfiltered[which(rowSums(NC.unfiltered) > 0),]
sort(rowSums(NC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC1.unfiltered <- rownames_to_column(NC1.unfiltered,var="OTUID")
NC1.tax.unfiltered <- merge(NC1.unfiltered, tax.unfil.ed, by="OTUID")
NC1.unfiltered <- column_to_rownames(NC1.unfiltered,var="OTUID")
#write.csv(NC1.tax.unfiltered, file = "NC1.tax.unfiltered.csv")
head(NC1.unfiltered)
colnames(NC1.unfiltered)
#edit the taxonomy
colnames(NC1.tax.unfiltered)
NC1.tax.unfil.ed <- NC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(NC1.tax.unfil.ed)
NC1.tax.unfil.ed1 <- NC1.tax.unfil.ed[,c(1:9)]
colnames(NC1.tax.unfil.ed1)
tail(NC1.tax.unfil.ed1)
str(NC1.tax.unfil.ed1)
library(tidyr)
long.dat.nc.unfil <- gather(NC1.tax.unfil.ed1, Sample, Read, NC1r2:NC7r2, factor_key = T)
long.dat.nc.unfil
#detach(package:plyr)
df.nc.unfil <- long.dat.nc.unfil %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.nc.unfil1 <- df.nc.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#with(df.nc.unfil1, sum(percent[Sample == "NC1r2"]))
library(ggbeeswarm)
library(ggtext)
plot.nc.unfil.dom <- ggplot(df.nc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "B. Negative Control")+
#ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
#strip.text.x = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14),
#axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=10, color="red", shape=95)
plot.nc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_nc_plant_proportion.eps",
plot.nc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE POSITIVE CONTROLS
# otu table of the positive control
colnames(otu.unfil)
PC.unfiltered <- otu.unfil[,c(1,66:72)]#only positive control
PC.unfiltered
PC.unfiltered <- column_to_rownames(PC.unfiltered,var="OTUID")
sort(rowSums(PC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
PC1.unfiltered <- PC.unfiltered[which(rowSums(PC.unfiltered) > 0),]
sort(rowSums(PC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
PC1.unfiltered <- rownames_to_column(PC1.unfiltered,var="OTUID")
PC1.tax.unfiltered <- merge(PC1.unfiltered, tax.unfil.ed, by="OTUID")
PC1.unfiltered <- column_to_rownames(PC1.unfiltered,var="OTUID")
#write.csv(NC1.tax.unfiltered, file = "NC1.tax.unfiltered.csv")
sum(PC1.unfiltered)
dim(PC1.unfiltered)
#edit the taxonomy
colnames(PC1.tax.unfiltered)
PC1.tax.unfil.ed <- PC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(PC1.tax.unfil.ed)
PC1.tax.unfil.ed1 <- PC1.tax.unfil.ed[,c(1:9)]
colnames(PC1.tax.unfil.ed1)
#library(tidyr)
long.dat.pc.unfil <- gather(PC1.tax.unfil.ed1, Sample, Read, Mock1r2:Mock7r2, factor_key = T)
long.dat.pc.unfil
#detach(package:plyr)
df.pc.unfil <- long.dat.pc.unfil %>%
group_by(Sample, Domain) %>%
summarise(read.number = sum(Read))
df.pc.unfil1 <- df.pc.unfil %>%
group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.pc.unfil.dom <- ggplot(df.pc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_violin(trim = F, scale="width") +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
geom_jitter(position = position_jitter(width = 0.1, height = 0, seed=13), alpha=0.3)+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "C. Positive Control")+
#ylab("Read Proportion (%)")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
#strip.text.x = element_text(size=18, face = 'bold'),
plot.title = element_text(size = 14),
#axis.title.y = element_markdown(size=15,face="bold"),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
stat_summary(fun="median",geom="point", size=10, color="red", shape=95)
plot.pc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_pc_plant_proportion.eps",
plot.pc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE RTSF POSITIVE CONTROL
# otu table of the RTSF Zymo
colnames(otu.unfil)
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
zymo.unfiltered <- otu.unfil[,"ZymoMockDNAr2", drop=F]
zymo.unfiltered
#zymo.unfiltered <- column_to_rownames(zymo.unfiltered,var="OTUID")
sort(rowSums(zymo.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
zymo.unfiltered
zymo1.unfiltered <- subset(zymo.unfiltered,rowSums(zymo.unfiltered["ZymoMockDNAr2"]) > 0)
zymo1.unfiltered
sort(rowSums(zymo1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
zymo1.unfiltered <- rownames_to_column(zymo1.unfiltered,var="OTUID")
zymo1.tax.unfiltered <- merge(zymo1.unfiltered, tax.unfil.ed, by="OTUID")
zymo1.unfiltered <- column_to_rownames(zymo1.unfiltered,var="OTUID")
#write.csv(zymo1.tax.unfiltered, file = "zymo1.tax.unfiltered.csv")
sum(zymo1.unfiltered)
dim(zymo1.unfiltered)
#edit the taxonomy
colnames(zymo1.tax.unfiltered)
zymo1.tax.unfil.ed <- zymo1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(zymo1.tax.unfil.ed)
zymo1.tax.unfil.ed1 <- zymo1.tax.unfil.ed[,c(1:3)]
colnames(zymo1.tax.unfil.ed1)
#library(tidyr)
long.dat.zymo.unfil <- zymo1.tax.unfil.ed1
long.dat.zymo.unfil$Read <- long.dat.zymo.unfil$ZymoMockDNAr2
long.dat.zymo.unfil
#detach(package:plyr)
df.zymo.unfil <- long.dat.zymo.unfil %>%
group_by(Domain) %>%
summarise(read.number = sum(Read))
df.zymo.unfil1 <- df.zymo.unfil %>%
#group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.zymo.unfil.dom <- ggplot(df.zymo.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_bar(stat='identity') +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
theme_bw()+
ylab("Read Proportion (%)")+
labs(title = "D. RTSF Positive Control")+
theme(legend.position="none",
axis.title.y = element_markdown(size=13),
axis.title.x = element_blank(),
axis.text.y = element_text(size = 13),
axis.text.x = element_text(size = 13),
plot.title = element_text(size = 14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot.zymo.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_zymo_plant_proportion.eps",
plot.zymo.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
#READ PROPORTION OF PLANT CONTAMINANTS OF THE RTSF NEGATIVE CONTROL
# otu table of the RTSF NC
colnames(otu.unfil)
#otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
RTNC.unfiltered <- otu.unfil[,"RTSFNTCr2", drop=F]
RTNC.unfiltered
sort(rowSums(RTNC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.unfiltered <- subset(RTNC.unfiltered,rowSums(RTNC.unfiltered["RTSFNTCr2"]) > 0)
RTNC1.unfiltered
sort(rowSums(RTNC1.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.unfiltered <- rownames_to_column(RTNC1.unfiltered,var="OTUID")
RTNC1.tax.unfiltered <- merge(RTNC1.unfiltered, tax.unfil.ed, by="OTUID")
RTNC1.unfiltered <- column_to_rownames(RTNC1.unfiltered,var="OTUID")
#write.csv(RTNC1.tax.unfiltered, file = "RTNC1.tax.unfiltered.csv")
sum(RTNC1.unfiltered)
dim(RTNC1.unfiltered)
#edit the taxonomy
colnames(RTNC1.tax.unfiltered)
RTNC1.tax.unfil.ed <- RTNC1.tax.unfiltered %>%
mutate(Domain = case_when(Order == "Chloroplast" ~ 'Plant',
Family == "Mitochondria" ~ 'Plant',
TRUE ~ 'Bacteria'))
colnames(RTNC1.tax.unfil.ed)
RTNC1.tax.unfil.ed1 <- RTNC1.tax.unfil.ed[,c(1:3)]
colnames(RTNC1.tax.unfil.ed1)
#library(tidyr)
long.dat.rtnc.unfil <- RTNC1.tax.unfil.ed1
long.dat.rtnc.unfil$Read <- long.dat.rtnc.unfil$RTSFNTCr2
long.dat.rtnc.unfil
#detach(package:plyr)
df.rtnc.unfil <- long.dat.rtnc.unfil %>%
group_by(Domain) %>%
summarise(read.number = sum(Read))
df.rtnc.unfil1 <- df.rtnc.unfil %>%
#group_by(Sample) %>%
mutate(percent= prop.table(read.number) * 100)
#library(ggbeeswarm)
#library(ggtext)
plot.rtnc.unfil.dom <- ggplot(df.rtnc.unfil1, aes(x=Domain, y=percent, fill=Domain))+
geom_bar(stat='identity') +
scale_fill_manual(labels = c("Bacteria","Plant"),values=c("#CC79A7", "#009E73"))+
theme_bw()+
#expand_limits(x = 0, y = 0)+
labs(title = "E. RTSF Negative Control")+
theme(legend.position="none",
axis.title = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_text(size = 13),
plot.title = element_text(size = 14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot.rtnc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_rtnc_plant_proportion.eps",
plot.rtnc.unfil.dom, device=cairo_ps,
width = 5, height =5,
units= "in", dpi = 600)
#############################################################################################################################################################
# COMPILE ALL READ PROPORTION OF PLANT CONTAMINANTS FIGURES
plot.unfil.dom
plot.nc.unfil.dom
plot.pc.unfil.dom
plot.zymo.unfil.dom
plot.rtnc.unfil.dom
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
library(ggpubr)
PlantContProp <- ggarrange(plot.unfil.dom,plot.nc.unfil.dom,plot.pc.unfil.dom,plot.zymo.unfil.dom,plot.rtnc.unfil.dom, ncol = 3, nrow = 2)
PlantContProp
ggsave("20210604_rPlantContProp.eps",
PlantContProp, device=cairo_ps,
width = 10, height =7,
units= "in", dpi = 600)
#############################################################################################################################################################
# ANALYSIS OF READS AFTER CHLOROPLAST AND MITOCHONDRIA REMOVAL
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
otu <- read.table('OTU_table_tax_filt.txt', sep='\t', header=T, row.names = 1, check.names = FALSE)
otu
head(otu)
colnames(otu)
tax <- otu[,'taxonomy']
str(tax)
#write.csv(tax, file = "tax.fil.csv")
dim(otu)
colnames(otu)
otu <- otu[,-81]
dim(otu) # [1] 298 79, otu table still has Mock, NC, and PC in the sample
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
otu <- rownames_to_column(otu, var = "OTUID")
#read taxonomy
tax.ed = read.csv("tax.fil.ed.csv", header=T)
head(tax.ed)
colnames(otu)
otu <- column_to_rownames(otu, var = "OTUID")
rownames(tax.ed) <- rownames(otu)
dim(tax.ed)
#read the metadata
#select only biological sample from otu table
colnames(otu)
otu.bio <- otu[,1:64] #unselect Mock, NC, and PC from the otu table
colnames(otu.bio)
dim(otu.bio)
#otu.bio <- column_to_rownames(otu.bio,var = "OTUID")
sort(rowSums(otu.bio, na.rm = FALSE, dims = 1), decreasing = F)
# remove OTUs that do not present in sample
otu.bio1=otu.bio[which(rowSums(otu.bio) > 0),]
dim(otu.bio1) # otu= 218, otu table before normalization using metagenomeSeq package and before decontamination
sort(rowSums(otu.bio1, na.rm = FALSE, dims = 1), decreasing = F)
# merge otu.bio1 with taxonomy to have match taxonomy table
head(otu.bio1)
#otu.bio1 <- rownames_to_column(otu.bio1,var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed,var = "OTUID")
otu.bio1 <- rownames_to_column(otu.bio1,var = "OTUID")
otu.bio1.tax <- merge(otu.bio1, tax.ed, by="OTUID")
dim(otu.bio1.tax)
# separate the sample
# otu table
otu.bac.fil <- otu.bio1.tax[,c(1:65)]
head(otu.bac.fil)
otu.bac.fil <- column_to_rownames(otu.bac.fil,var="OTUID")
sum(otu.bac.fil)
dim(otu.bac.fil)
#otu table of the negative control
NC <- otu[,c(72:78)]#only negative control
NC
#NC <- column_to_rownames(NC,var="OTUID")
sort(rowSums(NC, na.rm = FALSE, dims = 1), decreasing = F)
NC1=NC[which(rowSums(NC) > 0),]
sort(rowSums(NC1, na.rm = FALSE, dims = 1), decreasing = F)
NC1
NC1 <- rownames_to_column(NC1,var="OTUID")
tax.ed
NC1.tax <- merge(NC1, tax.ed, by="OTUID")
#write.csv(NC1.tax, file = "NC1.tax.csv")
dim(NC1)
NC1 <- column_to_rownames(NC1,var="OTUID")
sum(NC1)
#otu table of the positive control
colnames(otu)
PC <- otu[,c(65:71)]#only positive control
PC
#PC <- column_to_rownames(PC,var="OTUID")
sort(rowSums(PC, na.rm = FALSE, dims = 1), decreasing = F)
PC1=PC[which(rowSums(PC) > 0),]
sort(rowSums(PC1, na.rm = FALSE, dims = 1), decreasing = F)
PC1
PC1 <- rownames_to_column(PC1,var="OTUID")
tax.ed
PC1.tax <- merge(PC1, tax.ed, by="OTUID")
#write.csv(PC1.tax, file = "PC1.tax.csv")
dim(PC1)
PC1 <- column_to_rownames(PC1,var="OTUID")
sum(PC1)
# otu table of the RTSF Zymo
colnames(otu)
zymo.fil <- otu[,"ZymoMockDNAr2", drop=F]
zymo.fil
zymo.fil <- column_to_rownames(zymo.fil,var="OTUID")
sort(rowSums(zymo.fil, na.rm = FALSE, dims = 1), decreasing = F)
zymo.fil
zymo1.fil <- subset(zymo.fil,rowSums(zymo.fil["ZymoMockDNAr2"]) > 0)
zymo1.fil
sort(rowSums(zymo1.fil, na.rm = FALSE, dims = 1), decreasing = F)
zymo1.fil <- rownames_to_column(zymo1.fil,var="OTUID")
zymo1.tax.fil <- merge(zymo1.fil, tax.ed, by="OTUID")
zymo1.fil <- column_to_rownames(zymo1.fil,var="OTUID")
#write.csv(zymo1.tax.fil, file = "zymo1.tax.fil.csv")
sum(zymo1.fil)
dim(zymo1.fil)
# otu table of the RTSF NC
colnames(otu)
RTNC.fil <- otu[,"RTSFNTCr2", drop=F]
RTNC.fil
sort(rowSums(RTNC.fil, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.fil <- subset(RTNC.fil,rowSums(RTNC.fil["RTSFNTCr2"]) > 0)
RTNC1.fil
sort(rowSums(RTNC1.fil, na.rm = FALSE, dims = 1), decreasing = F)
RTNC1.fil <- rownames_to_column(RTNC1.fil,var="OTUID")
RTNC1.tax.fil <- merge(RTNC1.fil, tax.ed, by="OTUID")
RTNC1.fil <- column_to_rownames(RTNC1.fil,var="OTUID")
#write.csv(RTNC1.tax.fil, file = "RTNC1.tax.fil.csv")
sum(RTNC1.fil)
dim(RTNC1.fil)
#####################################################################################################################################
######################################################################################################################################
### Rarefaction curves ######
# using GlobalPatterns
library(phyloseq)
# 1. rarefaction curve for otu table after plant contaminant removal before microbial decontamination and normalization
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
otu <- read.table('OTU_table_tax_filt.txt', sep='\t', header=T, row.names = 1, check.names = FALSE)
otu
otu #otu table after plant contaminant removal
colnames(otu)
head(otu)
otu <- otu[,-81]
dim(otu) # [1] 298 79, otu table still has Mock, NC, and PC in the sample
colnames(otu)
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# change name of ZymoMockDNAr2 to RTSF_ZymoMockDNAr2
library(dplyr)
is.data.frame(otu)
R.utils::detachPackage("plyr")
otu <- otu %>%
dplyr::rename(RTSF_ZymoMockDNAr2=ZymoMockDNAr2)
colnames(otu)
# make phyloseq otu table and taxonomy
otu.phyl = otu_table(otu, taxa_are_rows = TRUE)
head(tax.ed)
tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.phyl = tax_table(as.matrix(tax.ed))
# make phyloseq map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.obj <- merge_phyloseq(otu.phyl,tax.phyl,map.phyl)
phyl.obj
otu_table(phyl.obj)
#set seed
set.seed(42)
#rarefy the data
# make sure to run ggrare function in the "generating_rarecurfe.r" file
# data = phyloseq object of decontaminated non normalized otu table
# run the ggrare function attached in the file "generating_rarecurve.r"
p.rare <- ggrare(phyl.obj, step = 1, color = "sample_type", label = "sample_type", se = FALSE)
#set up your own color palette
#Palette <- c("#440154FF","#1F968BFF","#FDE725FF",)
#names(Palette) <- levels(sample_data(phyl.obj)$sample_type)
#Palette
#plot the rarecurve
#p <- ggrare(psdata, step = 1000, color = "SampleType", label = "Sample", se = FALSE)
library(ggtext)
rare <- p.rare +
scale_color_manual(labels = c("Experimental Sample", "Negative Control", "Positive Control", "RTSF Negative Control", "RTSF Positive Control"), values = c("#88CCEE", "#CC6677", "#DDCC77", "#117733", "#332288"))+
theme_bw()+
scale_size_manual(values = 60)+
ylab("Number of OTUs")+
xlab("Number of Reads")+
labs(color='Sample Type:') +
theme( strip.text.x = element_text(size=14, face='bold'),
axis.text.x=element_text(size = 14),
axis.text.y = element_text(size = 14),
strip.text.y = element_text(size=18, face = 'bold'),
plot.title = element_text(size =20 ,face='bold'),
axis.title.y = element_text(size=15,face="bold"),
axis.title.x = element_text(size=15,face="bold"),
legend.position = "right",
legend.title = element_text(size=15, face ="bold"),
legend.text = element_text(size=14),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot(rare)
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604rarefactioncurve.pdf",
rare, device= "pdf",
width = 9, height = 7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### bacterial taxa composition of all samples (after plant contaminant removal)
# make phyloseq object
otu #otu table after plant contaminant removal
colnames(otu)
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# make phyloseq otu table and taxonomy
head(otu)
colnames(otu)
colnames(otu)[80] <- "RTSF_ZymoMockDNAr2"
otu.phyl = otu_table(otu, taxa_are_rows = TRUE)
head(tax.ed)
tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.phyl = tax_table(as.matrix(tax.ed))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
map$batch <- as.factor(map$batch)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.obj <- merge_phyloseq(otu.phyl,tax.phyl,map.phyl)
phyl.obj
# merge taxa by class
# 1. class - Bacteria
bac.cl <- tax_glom(phyl.obj, taxrank = "Class", NArm = F)
bac.cl.ra <- transform_sample_counts(bac.cl, function(x) x/sum(x))
bac.cl.ra
df.cl <- psmelt(bac.cl.ra) %>%
group_by(batch,Sample, Class) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.cl$Class <- as.character(df.cl$Class)
#df.cl$Class[df.cl$Mean < 0.1] <- "Other"
# barplot of bacterial/archaeal composition across pods at Phylum level
#library(rcartocolor)
#display_carto_all(colorblind_friendly = TRUE)
#my_colors = carto_pal(12, "Safe")
#my_colors
# New facet label names for plant variable
#plant.labs <- c("Plant: A", "Plant: B", "Plant: C")
#names(plant.labs) <- c("A", "B", "C")
# Create the plot
#install.packages("pals")
library(pals)
cl <- ggplot(data=df.cl, aes(x=Sample, y=Mean, fill=Class))
plot.cl <- cl +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(values=as.vector(stepped(n=24))) +
#scale_fill_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c','#f58231', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', 'lightslateblue', '#000000', 'tomato','hotpink2'))+
#scale_fill_manual(values=c("#44AA99", "#332288", "#117733","#CC6677","#DDCC77", "#88CCEE","#661100","#AA4499" ,"#888888"))+
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
#labs(y= "Mean Relative Abundance", x="Plant")+
labs(y= "Mean Relative Abundance")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=12, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=1,bycol=TRUE))
plot.cl
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_class.eps",
plot.cl, device = "eps",
width = 9.5, height =6.5,
units= "in", dpi = 600)
# merge taxa by genus
# 2. genus - Bacteria
bac.gen <- tax_glom(phyl.obj, taxrank = "Genus", NArm = F)
bac.gen.ra <- transform_sample_counts(bac.gen, function(x) x/sum(x))
bac.gen.ra #153 taxa
df.gen <- psmelt(bac.gen.ra) %>%
group_by(batch,Sample, Genus) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.gen$Genus <- as.character(df.gen$Genus)
df.gen$Genus[df.gen$Mean < 0.03] <- "Other (less than 3%)"
library(randomcoloR)
set.seed(1)
n <- 45
palette <- distinctColorPalette(n)
col=palette
gen <- ggplot(data=df.gen, aes(x=Sample, y=Mean, fill=Genus))
plot.gen <- gen +
geom_bar(aes(), stat="identity", position="fill") +
#scale_colour_viridis(discrete = T)+
#facet_grid(. ~ batch) +
scale_fill_manual(name="Genus",values=col) +
#scale_fill_manual(values=as.vector(stepped(n=24))) +
#scale_fill_manual(name="Genus",values=as.vector(polychrome(n=36))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=10, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.gen
plot.gen1 <- plot.gen +
facet_wrap(~ batch, scales="free_x", nrow = 2)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
plot.gen1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus_all.eps",
plot.gen1, device = "eps",
width = 15, height = 8,
units= "in", dpi = 600)
# merge taxa by family
# 2. Family - Bacteria
bac.fam <- tax_glom(phyl.obj, taxrank = "Family", NArm = F)
bac.fam.ra <- transform_sample_counts(bac.fam, function(x) x/sum(x))
bac.fam.ra #87 taxa
df.fam <- psmelt(bac.fam.ra) %>%
group_by(batch,Sample, Family) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.fam$Family <- as.character(df.fam$Family)
df.fam$Family[df.fam$Mean < 0.01] <- "Other (less than 1%)"
fam <- ggplot(data=df.fam, aes(x=Sample, y=Mean, fill=Family))
plot.fam <- fam +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(name="Family",values=col) +
#scale_fill_manual(name="Family",values=as.vector(polychrome(n=36))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample Type")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=12, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=13,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.fam
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus.eps",
plot.gen, device = "eps",
width = 13, height =7.5,
units= "in", dpi = 600)
#####################################################################################################################################
#####################################################################################################################################
## 2. bacterial taxa found in the negative control
# make phyloseq object
# otu table of negative control only
dim(NC)
NC <- rownames_to_column(NC, var = "OTUID")
head(NC)
dim(RTNC.fil)
head(RTNC.fil)
RTNC.fil <- rownames_to_column(RTNC.fil, var = "OTUID")
colnames(RTNC.fil)
#colnames(RTNC.fil)[2] <- "RTSF_NC"
ncrtnc <- merge(NC, RTNC.fil)
head(ncrtnc)
colnames(ncrtnc)
ncrtnc <- column_to_rownames(ncrtnc, var = "OTUID")
sort(rowSums(ncrtnc, na.rm = FALSE, dims = 1), decreasing = F)
ncrtnc1 <- ncrtnc[which(rowSums(ncrtnc) > 0),]
sort(rowSums(ncrtnc1, na.rm = FALSE, dims = 1), decreasing = F)
# taxonomy negative control
head(ncrtnc1)
ncrtnc1 <- rownames_to_column(ncrtnc1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
ncrtnc1.tax <- merge(ncrtnc1, tax.ed, by="OTUID")
colnames(ncrtnc1.tax)
tax.ncrtnc <- ncrtnc1.tax[,c(1,10:18)]
head(tax.ncrtnc)
# make phyloseq otu table and taxonomy
ncrtnc1 <- column_to_rownames(ncrtnc1, var = "OTUID")
ncrtnc.phyl = otu_table(ncrtnc1, taxa_are_rows = TRUE)
tax.ncrtnc <- column_to_rownames(tax.ncrtnc, var = "OTUID")
tax.ncrtnc.phyl = tax_table(as.matrix(tax.ncrtnc))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
ncrtnc.phyl.obj <- merge_phyloseq(ncrtnc.phyl,tax.ncrtnc.phyl,map.phyl)
ncrtnc.phyl.obj
# 1. genus - Bacteria
ncrtnc.gen <- tax_glom(ncrtnc.phyl.obj, taxrank = "Genus.ed", NArm = F)
ncrtnc.gen.ra <- transform_sample_counts(ncrtnc.gen, function(x) x/sum(x))
ncrtnc.gen.ra #61 taxa
df.ncrtnc.gen <- psmelt(ncrtnc.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.ncrtnc.gen$Genus.ed <- as.character(df.ncrtnc.gen$Genus.ed)
df.ncrtnc.gen$percent.mean <- df.ncrtnc.gen$Mean*100
ncrtnc.bubble.plot <- ggplot(data=df.ncrtnc.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Negative Controls", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, hjust = 1, angle=45),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
ncrtnc.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_NC_RTSFNC.bubble.plot.tiff",
ncrtnc.bubble.plot, device = "tiff",
width = 13.8, height =7.5,
units= "in", dpi = 600)
## 3. bacterial taxa found in the positive control
# make phyloseq object
# otu table of positive control and RTSF_Zymo mock
dim(PC)
colnames(PC)
PC <- rownames_to_column(PC, var = "OTUID")
dim(zymo.fil)
colnames(zymo.fil)
zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
colnames(zymo.fil)[2] <- "RTSF_ZymoMockDNAr2"
colnames(zymo.fil)
#zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
PC.zymo <- merge(PC, zymo.fil)
PC.zymo <- column_to_rownames(PC.zymo, var = "OTUID")
sort(rowSums(PC.zymo, na.rm = FALSE, dims = 1), decreasing = F)
PC.zymo1 <- PC.zymo[which(rowSums(PC.zymo) > 0),]
sort(rowSums(PC.zymo1, na.rm = FALSE, dims = 1), decreasing = F)
colnames(PC.zymo1)
# taxonomy positive control
head(PC.zymo1)
PC.zymo1 <- rownames_to_column(PC.zymo1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
PC.zymo1.tax <- merge(PC.zymo1, tax.ed, by="OTUID")
colnames(PC.zymo1.tax)
tax.PC.zymo <- PC.zymo1.tax[,c(1,10:18)]
head(tax.PC.zymo)
# make phyloseq otu table and taxonomy
PC.zymo1 <- column_to_rownames(PC.zymo1, var = "OTUID")
PC.zymo.phyl = otu_table(PC.zymo1, taxa_are_rows = TRUE)
tax.PC.zymo <- column_to_rownames(tax.PC.zymo, var = "OTUID")
tax.PC.zymo.phyl = tax_table(as.matrix(tax.PC.zymo))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
colnames(map)
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
PC.zymo.phyl.obj <- merge_phyloseq(PC.zymo.phyl,tax.PC.zymo.phyl,map.phyl)
PC.zymo.phyl.obj #121 taxa
# 1. genus - Bacteria
PC.zymo.gen <- tax_glom(PC.zymo.phyl.obj, taxrank = "Genus.ed", NArm = F)
PC.zymo.gen.ra <- transform_sample_counts(PC.zymo.gen, function(x) x/sum(x))
PC.zymo.gen.ra #61 taxa
df.PC.zymo.gen <- psmelt(PC.zymo.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.PC.zymo.gen$Genus.ed <- as.character(df.PC.zymo.gen$Genus.ed)
df.PC.zymo.gen$percent.mean <- df.PC.zymo.gen$Mean*100
PC.zymo.bubble.plot <- ggplot(data=df.PC.zymo.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", y="Taxa")+
theme(legend.key=element_blank(),
axis.title.y = element_markdown(size=15,face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, angle=45, hjust = 1),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
PC.zymo.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_PC.zymo.bubble.plot.tiff",
PC.zymo.bubble.plot, device = "tiff",
width = 12.5, height =7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### bacterial taxa composition of all samples (before plant contaminant removal and before microbial decontamination and normalization)
# make phyloseq object
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
wd <- print(getwd())
# unfiltered otu table
otu.unfil
colnames(otu.unfil)
head(otu.unfil)
colnames(otu.unfil)[80] <- "RTSF_ZymoMockDNAr2"
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# make phyloseq otu table and taxonomy
otu.unfil.phyl = otu_table(otu.unfil, taxa_are_rows = TRUE)
head(tax.unfil.ed)
tax.unfil.ed <- column_to_rownames(tax.unfil.ed, var = "OTUID")
tax.unfil.phyl = tax_table(as.matrix(tax.unfil.ed))
# make phyloseq map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
phyl.unfil.obj <- merge_phyloseq(otu.unfil.phyl,tax.unfil.phyl,map.phyl)
phyl.unfil.obj
otu_table(phyl.unfil.obj)
# merge taxa by class
# 1. class - Bacteria
bac.unfil.cl <- tax_glom(phyl.unfil.obj, taxrank = "Class", NArm = F)
bac.unfil.cl.ra <- transform_sample_counts(bac.unfil.cl, function(x) x/sum(x))
bac.unfil.cl.ra #23 taxa
otu_table(bac.unfil.cl.ra)
df.unfil.cl <- psmelt(bac.unfil.cl.ra) %>%
group_by(sample_type, Class) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.cl$Class <- as.character(df.unfil.cl$Class)
#df.cl$Class[df.cl$Mean < 0.1] <- "Other"
# Create the plot
#install.packages("pals")
library(pals)
unfil.cl <- ggplot(data=df.unfil.cl, aes(x=sample_type, y=Mean, fill=Class))
plot.unfil.cl <- unfil.cl +
geom_bar(aes(), stat="identity", position="fill") +
scale_fill_manual(values=as.vector(stepped(n=24))) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample Type")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text=element_text(size=14),
axis.title = element_markdown(size=15,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=1,bycol=TRUE))
plot.unfil.cl
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_class.unfiltered.eps",
plot.unfil.cl, device = "eps",
width = 9.5, height =6.5,
units= "in", dpi = 600)
# merge taxa by genus
# 2. genus - Bacteria
bac.unfil.gen <- tax_glom(phyl.unfil.obj, taxrank = "Genus.ed", NArm = F)
bac.unfil.gen.ra <- transform_sample_counts(bac.unfil.gen, function(x) x/sum(x))
bac.unfil.gen.ra #209 taxa
df.unfil.gen <- psmelt(bac.unfil.gen.ra) %>%
group_by(batch, Sample, Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.gen$Genus.ed <- as.character(df.unfil.gen$Genus.ed)
df.unfil.gen$Genus.ed[df.unfil.gen$Mean < 0.001] <- "Other (less than 0.1%)"
library(randomcoloR)
set.seed(1)
n <- 50
palette <- distinctColorPalette(n)
col=palette
unfil.gen <- ggplot(data=df.unfil.gen, aes(x=Sample, y=Mean, fill=Genus.ed))
plot.unfil.gen <- unfil.gen +
geom_bar(aes(), stat="identity", position="fill") +
#scale_fill_manual(name="Genus", values=as.vector(stepped(n=24))) +
scale_fill_manual(name="Genus",values=col) +
theme(legend.position="right") +
guides(fill=guide_legend(nrow=5))+
labs(y= "Mean Relative Abundance", x="Sample")+
theme(plot.title = element_text(size = 20, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.text.y=element_text(size=12),
axis.text.x = element_text(size=10, vjust = 0.5, hjust = 1, angle=90),
axis.title = element_markdown(size=15,face="bold"),
legend.text=element_text(size = 10),
legend.title = element_text(size=11, face = "bold"),
panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
guides(fill=guide_legend(ncol=2,bycol=TRUE))
plot.unfil.gen
plot.unfil.gen1 <- plot.unfil.gen +
facet_wrap(~ batch, scales="free_x", nrow = 2)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
plot.unfil.gen1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_barplot_genus_all_unfiltered.eps",
plot.unfil.gen1, device = "eps",
width = 15, height =8,
units= "in", dpi = 600)
## make a bubble plot for all samples
df.unfil.gen <- psmelt(bac.unfil.gen.ra) %>%
group_by(batch, Sample, Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.unfil.gen$Genus.ed <- as.character(df.unfil.gen$Genus.ed)
df.unfil.gen$Genus.ed[df.unfil.gen$Mean < 0.0001] <- "Other (less than 0.01%)"
df.unfil.gen$percent.mean <- df.unfil.gen$Mean*100
unfil.gen.bubble.plot <- ggplot(data=df.unfil.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Sample", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 8, vjust = 0.5, hjust = 1, angle=90),
axis.text.y = element_text(colour = "black", size = 10),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
unfil.gen.bubble.plot
unfil.gen.bubble.plot1 <- unfil.gen.bubble.plot +
facet_wrap(~ batch, scales="free_x", nrow = 1)+
theme(strip.background =element_rect(fill="grey"))+
theme(strip.text = element_text(colour = 'black', size = 14, face = 'bold'))
unfil.gen.bubble.plot1
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_unfil.gen.bubble.plot1.tiff",
unfil.gen.bubble.plot1, device = "tiff",
width = 23, height =10,
units= "in", dpi = 600)
#####################################################################################################################################
#####################################################################################################################################
## 2. bacterial taxa found in the negative control before plant contamination
# make phyloseq object
# otu table of negative control only
colnames(NC.unfiltered)
head(NC.unfiltered)
sort(rowSums(NC.unfiltered, na.rm = FALSE, dims = 1), decreasing = F)
NC.unfiltered1 <- NC.unfiltered[which(rowSums(NC.unfiltered) > 0),]
# taxonomy negative control
head(NC.unfiltered1)
NC.unfiltered1 <- rownames_to_column(NC.unfiltered1, var = "OTUID")
head(tax.unfil.ed)
tax.unfil.ed <- rownames_to_column(tax.ed, var = "OTUID")
colnames(tax.unfil.ed)
NC.unfiltered1.tax <- merge(NC.unfiltered1, tax.unfil.ed, by="OTUID")
colnames(NC.unfiltered1.tax)
tax.NC.unfiltered1 <- NC.unfiltered1.tax[,c(1,10:18)]
head(tax.NC.unfiltered1)
# make phyloseq otu table and taxonomy
NC.unfiltered1 <- column_to_rownames(NC.unfiltered1, var = "OTUID")
NC.unfiltered1.phyl = otu_table(NC.unfiltered1, taxa_are_rows = TRUE)
tax.NC.unfiltered1 <- column_to_rownames(tax.NC.unfiltered1, var = "OTUID")
tax.NC.unfiltered1.phyl = tax_table(as.matrix(tax.NC.unfiltered1))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
NC.unfiltered1.phyl.obj <- merge_phyloseq(NC.unfiltered1.phyl,tax.NC.unfiltered1.phyl,map.phyl)
NC.unfiltered1.phyl.obj
# 1. genus - Bacteria
NC.unfiltered1.gen <- tax_glom(NC.unfiltered1.phyl.obj, taxrank = "Genus.ed", NArm = F)
NC.unfiltered1.gen.ra <- transform_sample_counts(NC.unfiltered1.gen, function(x) x/sum(x))
NC.unfiltered1.gen.ra #52 taxa
df.NC.unfiltered1.gen <- psmelt(NC.unfiltered1.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.NC.unfiltered1.gen$Genus.ed <- as.character(df.NC.unfiltered1.gen$Genus.ed)
df.NC.unfiltered1.gen$percent.mean <- df.NC.unfiltered1.gen$Mean*100
NC.unfiltered1.bubble.plot <- ggplot(data=df.NC.unfiltered1.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", x ="Negative Controls", y="Taxa")+
theme(legend.key=element_blank(),
axis.title = element_markdown(size=15,face="bold"),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, hjust = 1, angle=45),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
NC.unfiltered1.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_NC.unfiltered1.bubble.plot.tiff",
NC.unfiltered1.bubble.plot, device = "tiff",
width = 13.8, height =7.5,
units= "in", dpi = 600)
## 3. bacterial taxa found in the positive control
# make phyloseq object
# otu table of positive control and RTSF_Zymo mock
dim(PC)
colnames(PC)
PC <- rownames_to_column(PC, var = "OTUID")
dim(zymo.fil)
colnames(zymo.fil)
zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
colnames(zymo.fil)[2] <- "RTSF_ZymoMockDNAr2"
colnames(zymo.fil)
#zymo.fil <- rownames_to_column(zymo.fil, var = "OTUID")
PC.zymo <- merge(PC, zymo.fil)
PC.zymo <- column_to_rownames(PC.zymo, var = "OTUID")
sort(rowSums(PC.zymo, na.rm = FALSE, dims = 1), decreasing = F)
PC.zymo1 <- PC.zymo[which(rowSums(PC.zymo) > 0),]
sort(rowSums(PC.zymo1, na.rm = FALSE, dims = 1), decreasing = F)
colnames(PC.zymo1)
# taxonomy positive control
head(PC.zymo1)
PC.zymo1 <- rownames_to_column(PC.zymo1, var = "OTUID")
head(tax.ed)
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
PC.zymo1.tax <- merge(PC.zymo1, tax.ed, by="OTUID")
colnames(PC.zymo1.tax)
tax.PC.zymo <- PC.zymo1.tax[,c(1,10:18)]
head(tax.PC.zymo)
# make phyloseq otu table and taxonomy
PC.zymo1 <- column_to_rownames(PC.zymo1, var = "OTUID")
PC.zymo.phyl = otu_table(PC.zymo1, taxa_are_rows = TRUE)
tax.PC.zymo <- column_to_rownames(tax.PC.zymo, var = "OTUID")
tax.PC.zymo.phyl = tax_table(as.matrix(tax.PC.zymo))
# make phyloseq map
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
map <- read.csv("metadata_part.csv")
colnames(map)
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
map.phyl <- sample_data(map)
# make phyloseq object
PC.zymo.phyl.obj <- merge_phyloseq(PC.zymo.phyl,tax.PC.zymo.phyl,map.phyl)
PC.zymo.phyl.obj #121 taxa
# 1. genus - Bacteria
PC.zymo.gen <- tax_glom(PC.zymo.phyl.obj, taxrank = "Genus.ed", NArm = F)
PC.zymo.gen.ra <- transform_sample_counts(PC.zymo.gen, function(x) x/sum(x))
PC.zymo.gen.ra #61 taxa
df.PC.zymo.gen <- psmelt(PC.zymo.gen.ra) %>%
group_by(Sample,Genus.ed) %>%
summarize(Mean = mean(Abundance)) %>%
arrange(-Mean)
df.PC.zymo.gen$Genus.ed <- as.character(df.PC.zymo.gen$Genus.ed)
df.PC.zymo.gen$percent.mean <- df.PC.zymo.gen$Mean*100
PC.zymo.bubble.plot <- ggplot(data=df.PC.zymo.gen, aes(x=Sample, y=Genus.ed)) +
geom_point(aes(size=percent.mean), alpha = 0.75, shape = 21) +
scale_size_continuous(limits = c(0.0000000000000000000001, 100), range = c(1,10), breaks = c(0.1,1,10,50)) +
labs(size = "Mean Relative Abundance (%)", y="Taxa")+
theme(legend.key=element_blank(),
axis.title.y = element_markdown(size=15,face="bold"),
axis.title.x = element_blank(),
axis.text.x = element_text(colour = "black", size = 12, face = "bold", vjust = 0.95, angle=45, hjust = 1),
axis.text.y = element_text(colour = "black", face = "bold", size = 11),
legend.text = element_text(size = 10, face ="bold", colour ="black"),
legend.title = element_text(size = 12, face = "bold"),
panel.border = element_rect(colour = "black", fill = NA, size = 1.2),
legend.position = "right") +
scale_fill_manual(values = colours, guide = "none")
PC.zymo.bubble.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_PC.zymo.bubble.plot.tiff",
PC.zymo.bubble.plot, device = "tiff",
width = 12.5, height =7,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### Shared taxa among all total samples (before plant contaminants removal)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
## 1.calculate the occupancy of each OTUID across all samples
# unfiltered otu
# unfiltered otu table
otu.unfil
colnames(otu.unfil)
head(otu.unfil)
otu.unfil <- column_to_rownames(otu.unfil, var = "OTUID")
sort(rowSums(otu.unfil, na.rm = FALSE, dims = 1), decreasing = F)
# unfiltered taxonomy
head(tax.unfil.ed)
#tax.unfil.ed <- column_to_rownames(tax.unfil.ed, var = "OTUID")
tax.unfil.ed <- rownames_to_column(tax.unfil.ed, var = "OTUID")
# read map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.unfil <- data.frame(OTUID=as.factor(rownames(otu.unfil)), otu.unfil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.unfil.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
#df <- data.frame(OTUID=as.factor(rownames(otu.unfil)), otu.unfil, check.names = F)
#colnames(df)
#ldf <- gather(df,sample_id, abun, -OTUID)
##build the new table: OTUID as rownames and sample_id as colnames
widedf.unfil <- as.data.frame(spread(longdf.unfil, OTUID, n, fill=0))
rownames(widedf.unfil) <- widedf.unfil[,1]
widedf.unfil <- widedf.unfil[,-1]
widedf.unfil <- t(widedf.unfil)
## calculate the occupancy of each OTUID across all samples
widedf.unfil.PA <- 1*((widedf.unfil>0)==1)
Occ.unfil <- rowSums(widedf.unfil.PA)/ncol(widedf.unfil.PA)
df.Occ.unfil <- as.data.frame(Occ.unfil)
df.Occ.unfil <- rownames_to_column(df.Occ.unfil, var = "OTUID")
df.Occ.unfil.tax <- merge(df.Occ.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.unfil.tax <- df.Occ.unfil.tax[order(df.Occ.unfil.tax$Occ.unfil, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(sort.df.Occ.unfil.tax, file = "sort.df.Occ.unfil.tax_all.csv")
##calculate the mean relative abundance of each OTUID across all samples
widedf.unfil.RA <- decostand(widedf.unfil, method="total", MARGIN=2)
widedf.unfil.RA
relabund.unfil <- rowSums(widedf.unfil.RA)
df.relabund.unfil <- as.data.frame(relabund.unfil)
df.relabund.unfil$meanRelAbund <- df.relabund.unfil$relabund.unfil/ncol(widedf.unfil.RA)
df.relabund.unfil = rownames_to_column(df.relabund.unfil, var = "OTUID")
sum(df.relabund.unfil$meanRelAbund)
sort.relabund.unfil <- df.relabund.unfil[order(df.relabund.unfil$meanRelAbund, decreasing = TRUE),]
##merge OCC table and mean relative abundance table
df.Occ.ra.unfil <- merge(df.Occ.unfil, df.relabund.unfil, by.x =c("OTUID"), by.y = c("OTUID"))
df.Occ.ra.unfil.tax <- merge(df.Occ.ra.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.ra.unfil.tax <- df.Occ.ra.unfil.tax[order(df.Occ.ra.unfil.tax$Occ.unfil, decreasing = TRUE),]
#select OTUID with occ more than and equal to 50 %
Occ50.unfil <- subset(sort.df.Occ.ra.unfil.tax , sort.df.Occ.ra.unfil.tax$Occ.unfil>= 0.5)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(Occ50.unfil, file = "Occ50.unfil.csv")
Occ50.unfil.ed <- read.csv("Occ50.unfil.ed.csv")
### Occupancy-mean relative abundance across all total samples before plant contaminants removal
Occ50.unfil.plot <- ggplot(Occ50.unfil.ed,aes(x=fct_reorder(OTUID.genus, Occ.unfil, .desc=T), y=Occ.unfil))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
#scale_fill_manual(values = palette)+
labs(y= "Occupancy", x="OTU.ID")+
theme_bw()+
coord_flip()+
theme(plot.title = element_text(size=16, face="bold"),
axis.text=element_text(size=12, hjust = 0.5),
axis.title=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
Occ50.unfil.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_Occ50.unfil.eps",
Occ50.unfil.plot, device = "eps",
width = 9, height =6.5,
units= "in", dpi = 600)
#####################################################################################################################################
######################################################################################################################################
### Shared taxa among samples (after plant contaminants removal)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
## 1.calculate the occupancy of each OTUID across all samples
# plant filtered otu
otu
colnames(otu)
#otu <- column_to_rownames(otu, var = "OTUID")
sort(rowSums(otu, na.rm = FALSE, dims = 1), decreasing = F)
# filtered taxonomy
head(tax.ed)
#tax.ed <- column_to_rownames(tax.ed, var = "OTUID")
tax.ed <- rownames_to_column(tax.ed, var = "OTUID")
# read map
map <- read.csv("metadata_part.csv")
head(map)
map$sample_id <- as.factor(map$sample_id)
rownames(map) <- map$sample_id
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.fil <- data.frame(OTUID=as.factor(rownames(otu)), otu, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
##build the new table: OTUID as rownames and sample_id as colnames
widedf.fil <- as.data.frame(spread(longdf.fil, OTUID, n, fill=0))
rownames(widedf.fil) <- widedf.fil[,1]
widedf.fil <- widedf.fil[,-1]
widedf.fil <- t(widedf.fil)
## calculate the occupancy of each OTUID across all samples
widedf.fil.PA <- 1*((widedf.fil>0)==1)
Occ.fil <- rowSums(widedf.fil.PA)/ncol(widedf.fil.PA)
df.Occ.fil <- as.data.frame(Occ.fil)
df.Occ.fil <- rownames_to_column(df.Occ.fil, var = "OTUID")
df.Occ.fil.tax <- merge(df.Occ.fil, tax.ed, by="OTUID")
sort.df.Occ.fil.tax <- df.Occ.fil.tax[order(df.Occ.fil.tax$Occ.fil, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(sort.df.Occ.fil.tax, file = "sort.df.Occ.fil.tax_all.csv")
#####################################################################################################################################
######################################################################################################################################
## 2.calculate the occupancy of each OTUID across all biological samples and all negative controls before plant contaminants removal
# subset otu only biological samples and negative controls
colnames(otu.unfil)
otu.bio.nc.unfil <- data.frame(otu.unfil[,c(1:64,72:78)], check.names = F)
colnames(otu.bio.nc.unfil)
##build a long data frame joining unfiltered otu table, map, and taxonomy
longdf.bio.nc.unfil <- data.frame(OTUID=as.factor(rownames(otu.bio.nc.unfil)), otu.bio.nc.unfil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.unfil.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(OTUID, sample_id) %>%
summarise(n=sum(abun))
##build the new table: OTUID as rownames and sample_id as colnames
widedf.bio.nc.unfil <- as.data.frame(spread(longdf.bio.nc.unfil, OTUID, n, fill=0))
rownames(widedf.bio.nc.unfil) <- widedf.bio.nc.unfil[,1]
widedf.bio.nc.unfil <- widedf.bio.nc.unfil[,-1]
widedf.bio.nc.unfil <- t(widedf.bio.nc.unfil)
colnames(widedf.bio.nc.unfil)
## calculate the occupancy of each OTUID across all biological samples and all negative controls
widedf.bio.nc.unfil.PA <- 1*((widedf.bio.nc.unfil>0)==1)
Occ.bio.nc.unfil <- rowSums(widedf.bio.nc.unfil.PA)/ncol(widedf.bio.nc.unfil.PA)
df.Occ.bio.nc.unfil <- as.data.frame(Occ.bio.nc.unfil)
df.Occ.bio.nc.unfil <- rownames_to_column(df.Occ.bio.nc.unfil, var = "OTUID")
df.Occ.bio.nc.unfil.tax <- merge(df.Occ.bio.nc.unfil, tax.unfil.ed, by="OTUID")
sort.df.Occ.bio.nc.unfil.tax <- df.Occ.bio.nc.unfil.tax[order(df.Occ.bio.nc.unfil.tax$Occ.bio.nc.unfil, decreasing = TRUE),]
View(sort.df.Occ.bio.nc.unfil.tax)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(sort.df.Occ.bio.nc.unfil.tax, file = "sort.df.Occ.unfil.tax_BioNc.csv")
#####################################################################################################################################
######################################################################################################################################
## calculate the occupancy of each OTUID across all biological samples and all negative controls after plant contaminants removal
## what taxa are shared among experimental samples and the negative controls
# subset otu only biological samples and negative controls
colnames(otu)
otu.bio.nc.fil <- data.frame(otu[,c(1:64,72:78)], check.names = F)
colnames(otu.bio.nc.fil)
##build a long data frame joining filtered otu table, map, and taxonomy
longdf.bio.nc.fil2 <- data.frame(OTUID=as.factor(rownames(otu.bio.nc.fil)), otu.bio.nc.fil, check.names = F) %>%
gather(sample_id, abun, -OTUID) %>% #keep same column nameing as in mapping file, calling counts as "abun" (abundance)
left_join(map) %>% #will add the info form mapping file (grouped by the 'sample_id' column)
left_join(tax.ed) %>% #adding the taxonomy info (grouped by the 'OTUID' column)
group_by(Genus.ed,sample_id) %>%
summarise(n=sum(abun))
##build the new table: Genus as rownames and sample_id as colnames
widedf.bio.nc.fil2 <- as.data.frame(spread(longdf.bio.nc.fil2, Genus.ed, n, fill=0))
rownames(widedf.bio.nc.fil2) <- widedf.bio.nc.fil2[,1]
widedf.bio.nc.fil2 <- widedf.bio.nc.fil2[,-1]
widedf.bio.nc.fil2 <- t(widedf.bio.nc.fil2)
colnames(widedf.bio.nc.fil2)
## calculate the occupancy of each Genus across all biological samples and all negative controls
widedf.bio.nc.fil.PA2 <- 1*((widedf.bio.nc.fil2>0)==1)
Occ.bio.nc.fil2 <- rowSums(widedf.bio.nc.fil.PA2)/ncol(widedf.bio.nc.fil.PA2)
df.Occ.bio.nc.fil2 <- as.data.frame(Occ.bio.nc.fil2)
df.Occ.bio.nc.fil2 <- rownames_to_column(df.Occ.bio.nc.fil2, var = "Genus")
sort.df.Occ.bio.nc.fil2 <- df.Occ.bio.nc.fil2[order(df.Occ.bio.nc.fil2$Occ.bio.nc.fil2, decreasing = TRUE),]
##calculate the mean relative abundance of each Genus across experimental samples and the negative controls
widedf.bio.nc.fil2.RA <- decostand(widedf.bio.nc.fil2, method="total", MARGIN=2)
widedf.bio.nc.fil2.RA
relabund <- rowSums(widedf.bio.nc.fil2.RA)
df.relabund <- as.data.frame(relabund)
df.relabund$meanRelAbund <- df.relabund$relabund/ncol(widedf.bio.nc.fil2.RA)
df.relabund = rownames_to_column(df.relabund, var = "Genus")
sum(df.relabund$meanRelAbund)
sort.relabund <- df.relabund[order(df.relabund$meanRelAbund, decreasing = TRUE),]
##merge OCC table and mean relative abundance table
df.Occ.ra <- merge(df.Occ.bio.nc.fil2, df.relabund, by.x =c("Genus"), by.y = c("Genus"))
sort.df.Occ.ra <- df.Occ.ra[order(df.Occ.ra$Occ.bio.nc.fil2, decreasing = TRUE),]
#select Genus with occ more than and equal to 2 %
Occ0.02 <- subset(sort.df.Occ.ra, sort.df.Occ.ra$Occ.bio.nc.fil2 >= 0.02)
#Occ1.pf
##sort the mean relative abundance
#sort_Occ1.pf <- Occ1.pf[order(Occ1.pf$meanRelAbund, decreasing = TRUE),]
### Occupancy-mean relative abundance across calculate the occupancy of each OTUID across all biological samples and all negative controls after plant contaminants removal
Occ.bio.nc.fil.plot <- ggplot(Occ0.02,aes(x=fct_reorder(Genus, Occ.bio.nc.fil2, .desc=T), y=Occ.bio.nc.fil2))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
#scale_fill_manual(values = palette)+
labs(y= "Occupancy", x="Genus")+
theme_bw()+
coord_flip()+
theme(plot.title = element_text(size=16, face="bold"),
axis.text.x=element_text(size=10,vjust = 0.5, hjust = 1),
axis.title=element_text(size=12,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
Occ.bio.nc.fil.plot
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("20210604_expe_nc_0.02.eps",
Occ.bio.nc.fil.plot, device = "eps",
width = 5.5, height =6,
units= "in", dpi = 600)
##################################################################################################################
# Subset OTU that present only in the negative control(not present in the biological samples)
colnames(widedf.bio.nc.unfil.PA)
unique.nc.unfil <- as.data.frame(subset(widedf.bio.nc.unfil.PA, rowSums(widedf.bio.nc.unfil.PA[,1:64]) == 0))
colnames(unique.nc.unfil)
unique.nc.unfil2 <- as.data.frame(subset(unique.nc.unfil, rowSums(unique.nc.unfil[,65:71]) > 0))
unique.nc.unfil2 <- rownames_to_column(unique.nc.unfil2, var = "OTUID")
dim(unique.nc.unfil2) # 22 OTU present only in the negative control
unique.nc.unfil.tax <- merge(unique.nc.unfil2, tax.unfil.ed, by="OTUID")
dim(unique.nc.unfil.tax)
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
write.csv(unique.nc.unfil.tax, file = "unique.nc.unfil.tax.csv")
##### chloroplast sequences distribution ######
# 20210604_16SV4_OTU97
# load unfiltered otu and tax table
otu.tax.unfiltered
colnames(otu.tax.unfiltered)
# select otu chloroplast and mitochondria
otu.tax.chlo <- otu.tax.unfiltered %>%
filter(Order == "Chloroplast")
dim(otu.tax.chlo)
head(otu.tax.chlo)
tail(otu.tax.chlo)
colnames(otu.tax.chlo)
# otu table chloroplast
otu.chlo <- otu.tax.chlo[1:81]
head(otu.chlo)
dim(otu.chlo)
# taxonomy table chloroplast
tax.chlo <- otu.tax.chlo[,c(1,85:90)]
head(tax.chlo)
# occupancy
otu.chlo <- column_to_rownames(otu.chlo, var = "OTUID")
otu.chlo.PA <- 1*((otu.chlo>0)==1)
sum(otu.chlo.PA)
otu.chlo.PA <- otu.chlo.PA[rowSums(otu.chlo.PA)>0,]
occ.chlo <- rowSums(otu.chlo.PA)/ncol(otu.chlo.PA)
df.occ.chlo <- as.data.frame(occ.chlo)
df.occ.chlo <- rownames_to_column(df.occ.chlo, var = "OTUID")
dim(df.occ.chlo)
# rel. abund.
otu.rel.chlo <- decostand(otu.chlo, method="total", MARGIN=2)
com_abund.chlo <- rowSums(otu.rel.chlo)
df.com_abund.chlo <- as.data.frame(com_abund.chlo)
head(df.com_abund.chlo)
df.com_abund.chlo$relabund <- df.com_abund.chlo$com_abund.chlo/80
sum(df.com_abund.chlo$com_abund.chlo)
sum(df.com_abund.chlo$relabund)
df.com_abund.chlo$percentrelabund=df.com_abund.chlo$relabund*100
sum(df.com_abund.chlo$percentrelabund)
df.com_abund.chlo <- rownames_to_column(df.com_abund.chlo, var = "OTUID")
head(df.com_abund.chlo)
dim(df.com_abund.chlo) ### all OTU with CumulativeRelAbund, percent CumulativeRelAbund!!!!!!!!!!!
# merge occupancy table and mean relative abundance table
df.occ.ra.chlo <- merge(df.occ.chlo, df.com_abund.chlo, by.x =c("OTUID"), by.y = c("OTUID"))
# merge the occupancy and relabund tabel with the taxonomy
df.occ.ra.chlo.tax <- merge(df.occ.ra.chlo, tax.chlo, by="OTUID")
# re-order
sort.occ.ra.chlo.tax <- df.occ.ra.chlo.tax[order(df.occ.ra.chlo.tax$relabund, decreasing = TRUE),]
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
#write.csv(sort.occ.ra.chlo.tax, file = "sort.occ.ra.chlo.tax.csv")
sort.occ.ra.chlo.tax.ed <- read.csv("sort.occ.ra.chlo.tax.ed.csv")
# plot ra
library(forcats)
library(dplyr)
plot.ra.chlo <- ggplot(sort.occ.ra.chlo.tax.ed,aes(x=fct_reorder(OTUID.ed, percentrelabund, .desc=T), y=percentrelabund, fill=OTUID))+
geom_bar(aes(), stat="identity")+
coord_flip()+
scale_fill_manual(values=as.vector(stepped(n=24))) +
labs(y= "Relative Abundance (%)", x="OTU ID")+
theme_bw()+
scale_y_continuous(expand = expansion(mult = c(0.01, .1)))+
theme(axis.text=element_text(size=12),
axis.title.y = element_blank(),
axis.title.x=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
plot.ra.chlo
# plot occ
plot.occ.chlo <- ggplot(sort.occ.ra.chlo.tax.ed,aes(x=fct_reorder(OTUID.ed, occ.chlo, .desc=T), y=occ.chlo, fill=OTUID))+
geom_bar(aes(), stat="identity")+
#coord_flip()+
scale_fill_manual(values=as.vector(stepped(n=24))) +
labs(y= "Occupancy", x="OTU ID")+
theme_bw()+
scale_y_continuous(expand = expansion(mult = c(0.01, .1)))+
coord_flip()+
theme(axis.text=element_text(size=12, hjust = 0.5),
axis.title=element_text(size=14,face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#legend.position = "right",
legend.position = "none",
panel.background = element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(colour = "black"),
axis.line.y = element_line(colour = "black"),
plot.margin = unit(c(0.2,0.2,0.2,0.2), "lines"))
plot.occ.chlo
library(patchwork)
plot.occ.ra.chlo <- plot.occ.chlo | plot.ra.chlo
plot.occ.ra.chlo
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
ggsave("plot.occ.ra.chlo.png",
plot.occ.ra.chlo, device = "png",
width = 13, height =7,
units= "in", dpi = 600)
##################################################################################################################
## Making plot for the DNA cocentration
setwd('/Users/arifinabintarti/Documents/PAPER/PAPER_Bintarti_2021_Bean_Rainoutshelter/16SV4_OTU97/20210604_16SV4')
dna.con = read.csv("dnaconc.csv", header=T)
library(viridis)
library(grid)
dna.con$SampleID <- as.factor(dna.con$SampleID)
dna.con$batch <- as.factor(dna.con$batch)
#create list of dna. conc. plots
dna.conc.plot <- lapply(split(dna.con,dna.con$batch), function(x){
#relevel factor partei by wert inside this subset
x$SampleID <- factor(x$SampleID, levels=x$SampleID[order(x$DNA_conc_ng_per_ul,decreasing=F)])
#make the plot
p <- ggplot(x, aes(x = SampleID, y = DNA_conc_ng_per_ul, fill = batch, width=0.75)) +
geom_bar(stat = "identity") +
scale_fill_discrete(drop=F)+ #to force all levels to be considered, and thus different colors
theme(panel.grid = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA,size = 0.2))+
theme(legend.position="none")+
labs(y="DNA concentration (ng/ul)", x="", title=unique(x$batch))+
coord_flip()
})
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210604_16SV4')
setEPS()
postscript("dna_conct.eps", height = 7, width = 8)
do.call(grid.arrange,(c(dna.conc.plot, ncol=3)))
dev.off()
graphics.off()
setwd('/Users/arifinabintarti/Documents/Research/Seeds_microbiome/Rainoutshelter/16SV4_OTU97/20210601_16SV4')
ggsave("20210601_barplot_genus.unfiltered.eps",
plot.unfil.gen, device = "eps",
width = 12, height =7.5,
units= "in", dpi = 600)
|
# Analysis
# Load libraries -----
library(openair)
library(ggplot2)
library(reshape2)
library(readr)
# Load the data -------------
filepath <- '~/data/ODIN_SD/2017-traffic-AK'
load(paste0(filepath,'/odin_traffic_data.RData'))
# Merge the datasets
raw.odin.data <- merge(ODIN.100,ODIN.101,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.102,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.103,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.106,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.107,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.108,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.109,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.110,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.114,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.115,by='date',all=TRUE)
# Homogenise a time bases
all.odin.data <- timeAverage(raw.odin.data,avg.time = '1 min')
# Separate the data into chunks per metric
pm10.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"PM10.")])]
write_csv(pm10.odin,paste0(filepath,'/pm10.csv'))
pm2.5.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"PM2.5")])]
write_csv(pm2.5.odin,paste0(filepath,'/pm2.5.csv'))
Temperature.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"Tempera")])]
write_csv(Temperature.odin,paste0(filepath,'/Temperature.csv'))
RH.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"RH")])]
write_csv(RH.odin,paste0(filepath,'/RH.csv'))
odin.data.1hr <- timeAverage(all.odin.data,avg.time = '1 hour')
long.odin.data <- melt(odin.data,id.vars = 'date')
long.odin.data.1hr <- melt(odin.data.1hr,id.vars = 'date')
# 1 minute data ----
# PM1 data
long.pm1 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM1."),]
# PM2.5 data
long.pm2.5 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM2.5"),]
# PM10 data
long.pm10 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM10"),]
long.pm1$log_value <- log(long.pm1$value)
long.pm2.5$log_value <- log(long.pm2.5$value)
long.pm10$log_value <- log(long.pm10$value)
ggplot(long.pm10, aes(x=variable, value)) +
geom_boxplot(position=position_dodge(1)) +
ylab("Daily PM10") +
xlab("")
ggplot(long.pm10, aes(x=date,y=value,colour=variable)) +
geom_line()
# 1 hour data ----
# PM1 data
long.pm1.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM1."),]
# PM2.5 data
long.pm2.5.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM2.5"),]
# PM10 data
long.pm10.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM10"),]
long.pm1.1hr$log_value <- log(long.pm1.1hr$value)
long.pm2.5.1hr$log_value <- log(long.pm2.5.1hr$value)
long.pm10.1hr$log_value <- log(long.pm10.1hr$value)
ggplot(long.pm2.5.1hr, aes(x=variable, value)) +
geom_boxplot(position=position_dodge(1))
ggplot(long.pm10.1hr, aes(x=date,y=value,colour=variable)) +
geom_line()
# Time Variation plots ----------------
timeVariation(all.odin.data,pollutant = c('PM2.5.101','PM2.5.100','PM2.5.110','PM2.5.103','PM2.5.108'))
timeVariation(all.odin.data,pollutant = c('PM2.5.101','PM2.5.110','PM2.5.114'))
|
/analysis.R
|
permissive
|
guolivar/poet-auckland
|
R
| false | false | 3,394 |
r
|
# Analysis
# Load libraries -----
library(openair)
library(ggplot2)
library(reshape2)
library(readr)
# Load the data -------------
filepath <- '~/data/ODIN_SD/2017-traffic-AK'
load(paste0(filepath,'/odin_traffic_data.RData'))
# Merge the datasets
raw.odin.data <- merge(ODIN.100,ODIN.101,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.102,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.103,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.106,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.107,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.108,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.109,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.110,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.114,by='date',all=TRUE)
raw.odin.data <- merge(raw.odin.data,ODIN.115,by='date',all=TRUE)
# Homogenise a time bases
all.odin.data <- timeAverage(raw.odin.data,avg.time = '1 min')
# Separate the data into chunks per metric
pm10.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"PM10.")])]
write_csv(pm10.odin,paste0(filepath,'/pm10.csv'))
pm2.5.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"PM2.5")])]
write_csv(pm2.5.odin,paste0(filepath,'/pm2.5.csv'))
Temperature.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"Tempera")])]
write_csv(Temperature.odin,paste0(filepath,'/Temperature.csv'))
RH.odin <- all.odin.data[,c('date',names(all.odin.data)[startsWith(names(all.odin.data),"RH")])]
write_csv(RH.odin,paste0(filepath,'/RH.csv'))
odin.data.1hr <- timeAverage(all.odin.data,avg.time = '1 hour')
long.odin.data <- melt(odin.data,id.vars = 'date')
long.odin.data.1hr <- melt(odin.data.1hr,id.vars = 'date')
# 1 minute data ----
# PM1 data
long.pm1 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM1."),]
# PM2.5 data
long.pm2.5 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM2.5"),]
# PM10 data
long.pm10 <- long.odin.data[startsWith(as.character(long.odin.data$variable),"PM10"),]
long.pm1$log_value <- log(long.pm1$value)
long.pm2.5$log_value <- log(long.pm2.5$value)
long.pm10$log_value <- log(long.pm10$value)
ggplot(long.pm10, aes(x=variable, value)) +
geom_boxplot(position=position_dodge(1)) +
ylab("Daily PM10") +
xlab("")
ggplot(long.pm10, aes(x=date,y=value,colour=variable)) +
geom_line()
# 1 hour data ----
# PM1 data
long.pm1.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM1."),]
# PM2.5 data
long.pm2.5.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM2.5"),]
# PM10 data
long.pm10.1hr <- long.odin.data.1hr[startsWith(as.character(long.odin.data.1hr$variable),"PM10"),]
long.pm1.1hr$log_value <- log(long.pm1.1hr$value)
long.pm2.5.1hr$log_value <- log(long.pm2.5.1hr$value)
long.pm10.1hr$log_value <- log(long.pm10.1hr$value)
ggplot(long.pm2.5.1hr, aes(x=variable, value)) +
geom_boxplot(position=position_dodge(1))
ggplot(long.pm10.1hr, aes(x=date,y=value,colour=variable)) +
geom_line()
# Time Variation plots ----------------
timeVariation(all.odin.data,pollutant = c('PM2.5.101','PM2.5.100','PM2.5.110','PM2.5.103','PM2.5.108'))
timeVariation(all.odin.data,pollutant = c('PM2.5.101','PM2.5.110','PM2.5.114'))
|
#' Check the new time series
#'
#' \code{check_time_series} examines the first value in the Time column
#' for each event. If they are equal, it will return a single value. The returned
#' value should be equal to 0 minus the offset.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{create_time_series}}.
#' @return The value(s) of Time (in milliseconds) at which events begin relative
#' to the onset of the auditory stimulus.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Check the starting Time column...
#' check_time_series(dat)
#' }
check_time_series = function(data = data) {
event_start_table = data %>%
summarise(ftime = min(Time))
print(unique(event_start_table$ftime))
}
#' Check the number of samples in each bin
#'
#' \code{check_samples_per_bin} determines the number of samples in each
#' bin produced by \code{\link{bin_prop}}.
#' This function is helpful for determining the obligatory parameter input to
#' \code{\link{transform_to_elogit}}.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{bin_prop}}.
#' @return A printed summary of the number of samples in each bin.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine the number of samples per bin...
#' check_samples_per_bin(dat)
#' }
check_samples_per_bin <- function (data = data) {
samples <- max(data$IA_0_C)
rate <- abs(data$Time[2] - data$Time[1])
print(paste("There are", samples, "samples in each bin."))
print(paste("One data point every", rate, "millisecond(s)"))
}
#' Determine the sampling rate present in the data
#'
#' \code{check_samplingrate} determines the sampling rate in the data.
#' This function is helpful for determining the obligatory parameter input to
#' \code{\link{bin_prop}}. If different sampling rates were used, the
#' function adds a sampling rate column, which can be used to subset the
#' data for further processing.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{select_recorded_eye}}.
#' @param ReturnData A logical indicating whether to return a data table containing
#' a new column called SamplingRate
#' @return A printed summary and/or a data table object
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine the sampling rate...
#' check_samplingrate(dat)
#' }
check_samplingrate <- function(data = data, ReturnData = FALSE) {
ReturnData <- ReturnData
tmp <- data %>%
group_by(Event) %>%
mutate(., SamplingRate = 1000 / (Time[2] - Time[1]))
print(paste("Sampling rate(s) present in the data are:", unique(tmp$SamplingRate), "Hz."))
if (length(unique(tmp$SamplingRate)) > 1) {
warning("There are multiple sampling rates present in the data. Please use the ReturnData parameter to include a sampling rate column in the dataset. This can be used to subset by sampling rate before proceeding with the remaining preprocessing operations.")
}
if (ReturnData == TRUE) {
return(tmp)
}
}
#' Determine downsampling options based on current sampling rate
#'
#' \code{ds_options} determines the possible rates to which
#' the current sampling rate can downsampled. It then prints the
#' options in both bin size (milliseconds) and corresponding
#' sampling rate (Hertz).
#'
#' @export
#' @import dplyr
#' @import lazyeval
#'
#' @param SamplingRate A positive integer indicating the sampling rate (in Hertz)
#' used to record the gaze data, which can be determined with the function
#' \code{\link{check_samplingrate}}.
#' @return A printed summary of options (bin size and rate) for downsampling.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine downsampling options...
#' ds_options(SamplingRate = 1000)
#' }
ds_options <- function(SamplingRate=SamplingRate) {
SamplingRate = SamplingRate
for (x in 1:100) {
if (x %% (1000/SamplingRate) == 0) {
if ((1000/x) %% 1 == 0) {
print(paste("Bin size:", x, "ms;", "Downsampled rate:", 1000/x, "Hz"))
}
}
}
}
#' Check which eyes were recorded during the experiment
#'
#' \code{check_eye_recording} quickly checks if the dataset contains gaze data
#' in both the Right and Left interest area columns. It prints a summary and
#' suggests which setting to use for the \code{Recording} parameter in the
#' function \code{\link{select_recorded_eye}}.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{create_time_series}}.
#' @return Text feedback and instruction.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Create a unified columns for the gaze data...
#' check_eye_recording(dat)
#' }
check_eye_recording <- function(data = data) {
if (sum(data$LEFT_INTEREST_AREA_ID) > 0 & sum(data$RIGHT_INTEREST_AREA_ID) > 0) {
print("The dataset contains recordings for both eyes. If any participants had both eyes tracked, set the Recording parameter in select_recorded_eye() to 'LandR'. If participants had either the left OR the right eye tracked, set the Recording parameter in select_recorded_eye() to 'LorR'.")
} else if (sum(data$LEFT_INTEREST_AREA_ID) > 0 & sum(data$RIGHT_INTEREST_AREA_ID) == 0) {
print("The dataset contains recordings for ONLY the left eye. Set the Recording parameter in select_recorded_eye() to 'L'.")
} else if (sum(data$LEFT_INTEREST_AREA_ID) == 0 & sum(data$RIGHT_INTEREST_AREA_ID) > 0) {
print("The dataset contains recordings for ONLY the right eye. Set the Recording parameter in select_recorded_eye() to 'R'.")
}
}
#' Rename default column names for interest areas.
#'
#' \code{rename_columns} will replace the default numerical coding of the
#' interest area columns with more meaningful user-specified names. For example,
#' IA_1_C and IA_1_P could be converted to IA_Target_C and IA_Target_P. Again,
#' this will work for upto 8 interest areas.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by either \code{\link{bin_prop}}.
#' \code{\link{transform_to_elogit}}, or \code{\link{create_binomial}}.
#' @param Labels A named character vector specifying the interest areas and the
#' desired names to be inserted in place of the numerical labelling.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # For renaming default interest area columns
#' dat2 <- rename_columns(dat, Labels = c(IA1="Target", IA2="Rhyme",
#' IA3="OnsetComp", IA4="Distractor"))
#' }
rename_columns <- function(data = data, Labels = Labels) {
Labels <- Labels
tmp <- data
if (length(names(Labels))>8) {
stop("You have more than 8 interest areas.")
} else {
print(paste("Renaming", length(names(Labels)), "interest areas.", sep = " "))
}
Labels <- c("0" = "outside", Labels)
NoIA <- length(names(Labels))
for (x in 1:NoIA) {
Labels[[x]] <- paste("_",Labels[[x]],"_", sep = "")
names(Labels)[x] <- paste("_",x-1,"_", sep = "")
tmp<-setNames(tmp, gsub(names(Labels)[x],Labels[[x]],names(tmp)))
}
return(tmp)
}
|
/VWPre/R/utilities.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 7,426 |
r
|
#' Check the new time series
#'
#' \code{check_time_series} examines the first value in the Time column
#' for each event. If they are equal, it will return a single value. The returned
#' value should be equal to 0 minus the offset.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{create_time_series}}.
#' @return The value(s) of Time (in milliseconds) at which events begin relative
#' to the onset of the auditory stimulus.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Check the starting Time column...
#' check_time_series(dat)
#' }
check_time_series = function(data = data) {
event_start_table = data %>%
summarise(ftime = min(Time))
print(unique(event_start_table$ftime))
}
#' Check the number of samples in each bin
#'
#' \code{check_samples_per_bin} determines the number of samples in each
#' bin produced by \code{\link{bin_prop}}.
#' This function is helpful for determining the obligatory parameter input to
#' \code{\link{transform_to_elogit}}.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{bin_prop}}.
#' @return A printed summary of the number of samples in each bin.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine the number of samples per bin...
#' check_samples_per_bin(dat)
#' }
check_samples_per_bin <- function (data = data) {
samples <- max(data$IA_0_C)
rate <- abs(data$Time[2] - data$Time[1])
print(paste("There are", samples, "samples in each bin."))
print(paste("One data point every", rate, "millisecond(s)"))
}
#' Determine the sampling rate present in the data
#'
#' \code{check_samplingrate} determines the sampling rate in the data.
#' This function is helpful for determining the obligatory parameter input to
#' \code{\link{bin_prop}}. If different sampling rates were used, the
#' function adds a sampling rate column, which can be used to subset the
#' data for further processing.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{select_recorded_eye}}.
#' @param ReturnData A logical indicating whether to return a data table containing
#' a new column called SamplingRate
#' @return A printed summary and/or a data table object
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine the sampling rate...
#' check_samplingrate(dat)
#' }
check_samplingrate <- function(data = data, ReturnData = FALSE) {
ReturnData <- ReturnData
tmp <- data %>%
group_by(Event) %>%
mutate(., SamplingRate = 1000 / (Time[2] - Time[1]))
print(paste("Sampling rate(s) present in the data are:", unique(tmp$SamplingRate), "Hz."))
if (length(unique(tmp$SamplingRate)) > 1) {
warning("There are multiple sampling rates present in the data. Please use the ReturnData parameter to include a sampling rate column in the dataset. This can be used to subset by sampling rate before proceeding with the remaining preprocessing operations.")
}
if (ReturnData == TRUE) {
return(tmp)
}
}
#' Determine downsampling options based on current sampling rate
#'
#' \code{ds_options} determines the possible rates to which
#' the current sampling rate can downsampled. It then prints the
#' options in both bin size (milliseconds) and corresponding
#' sampling rate (Hertz).
#'
#' @export
#' @import dplyr
#' @import lazyeval
#'
#' @param SamplingRate A positive integer indicating the sampling rate (in Hertz)
#' used to record the gaze data, which can be determined with the function
#' \code{\link{check_samplingrate}}.
#' @return A printed summary of options (bin size and rate) for downsampling.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Determine downsampling options...
#' ds_options(SamplingRate = 1000)
#' }
ds_options <- function(SamplingRate=SamplingRate) {
SamplingRate = SamplingRate
for (x in 1:100) {
if (x %% (1000/SamplingRate) == 0) {
if ((1000/x) %% 1 == 0) {
print(paste("Bin size:", x, "ms;", "Downsampled rate:", 1000/x, "Hz"))
}
}
}
}
#' Check which eyes were recorded during the experiment
#'
#' \code{check_eye_recording} quickly checks if the dataset contains gaze data
#' in both the Right and Left interest area columns. It prints a summary and
#' suggests which setting to use for the \code{Recording} parameter in the
#' function \code{\link{select_recorded_eye}}.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by \code{\link{create_time_series}}.
#' @return Text feedback and instruction.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # Create a unified columns for the gaze data...
#' check_eye_recording(dat)
#' }
check_eye_recording <- function(data = data) {
if (sum(data$LEFT_INTEREST_AREA_ID) > 0 & sum(data$RIGHT_INTEREST_AREA_ID) > 0) {
print("The dataset contains recordings for both eyes. If any participants had both eyes tracked, set the Recording parameter in select_recorded_eye() to 'LandR'. If participants had either the left OR the right eye tracked, set the Recording parameter in select_recorded_eye() to 'LorR'.")
} else if (sum(data$LEFT_INTEREST_AREA_ID) > 0 & sum(data$RIGHT_INTEREST_AREA_ID) == 0) {
print("The dataset contains recordings for ONLY the left eye. Set the Recording parameter in select_recorded_eye() to 'L'.")
} else if (sum(data$LEFT_INTEREST_AREA_ID) == 0 & sum(data$RIGHT_INTEREST_AREA_ID) > 0) {
print("The dataset contains recordings for ONLY the right eye. Set the Recording parameter in select_recorded_eye() to 'R'.")
}
}
#' Rename default column names for interest areas.
#'
#' \code{rename_columns} will replace the default numerical coding of the
#' interest area columns with more meaningful user-specified names. For example,
#' IA_1_C and IA_1_P could be converted to IA_Target_C and IA_Target_P. Again,
#' this will work for upto 8 interest areas.
#'
#' @export
#' @import dplyr
#' @import tidyr
#' @import lazyeval
#'
#' @param data A data table object output by either \code{\link{bin_prop}}.
#' \code{\link{transform_to_elogit}}, or \code{\link{create_binomial}}.
#' @param Labels A named character vector specifying the interest areas and the
#' desired names to be inserted in place of the numerical labelling.
#' @examples
#' \dontrun{
#' library(VWPre)
#' # For renaming default interest area columns
#' dat2 <- rename_columns(dat, Labels = c(IA1="Target", IA2="Rhyme",
#' IA3="OnsetComp", IA4="Distractor"))
#' }
rename_columns <- function(data = data, Labels = Labels) {
Labels <- Labels
tmp <- data
if (length(names(Labels))>8) {
stop("You have more than 8 interest areas.")
} else {
print(paste("Renaming", length(names(Labels)), "interest areas.", sep = " "))
}
Labels <- c("0" = "outside", Labels)
NoIA <- length(names(Labels))
for (x in 1:NoIA) {
Labels[[x]] <- paste("_",Labels[[x]],"_", sep = "")
names(Labels)[x] <- paste("_",x-1,"_", sep = "")
tmp<-setNames(tmp, gsub(names(Labels)[x],Labels[[x]],names(tmp)))
}
return(tmp)
}
|
#!/usr/bin/env Rscript
source("functions.R")
context("Dual Regression")
# This script will example the ALFF output
# from the complete CPAC
# to the partial quick pack run
base.0 <- "/home2/data/Projects/ABIDE_Initiative/CPAC/test_qp/All_Output/pipeline_MerrittIsland/0051466_session_1"
base.1 <- "/home2/data/Projects/ABIDE_Initiative/CPAC/test_qp/DR_Output/pipeline_nofilt_global/0051466_session_1"
###
# DR Z Stack 2 Standard
###
# So first I want to know the REHO output
dr.0 <- file.path(base.0, "dr_tempreg_maps_z_stack_to_standard/_scan_rest_1_rest/_csf_threshold_0.96/_gm_threshold_0.7/_wm_threshold_0.96/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global1.motion1.quadratic1.gm0.compcor1.csf0/_spatial_map_PNAS_Smith09_rsn10/temp_reg_map_z_wimt.nii.gz")
# Then I want to know the QP REHO output
dr.1 <- file.path(base.1, "dr_tempreg_maps_z_stack_to_standard/_scan_rest_1_rest/_scan_rest_1_rest/_spatial_map_PNAS_Smith09_rsn10/_scan_rest_1_rest/_scan_rest_1_rest/_scan_rest_1_rest/temp_reg_map_z_wimt.nii.gz")
# Finally, I should read them in and compare them
compare_3D_brains("DR Z Stack 2 Standard", dr.0, dr.1)
|
/scripts/tests/quickpack/compare_50_dr.R
|
no_license
|
fitrialif/abide-1
|
R
| false | false | 1,140 |
r
|
#!/usr/bin/env Rscript
source("functions.R")
context("Dual Regression")
# This script will example the ALFF output
# from the complete CPAC
# to the partial quick pack run
base.0 <- "/home2/data/Projects/ABIDE_Initiative/CPAC/test_qp/All_Output/pipeline_MerrittIsland/0051466_session_1"
base.1 <- "/home2/data/Projects/ABIDE_Initiative/CPAC/test_qp/DR_Output/pipeline_nofilt_global/0051466_session_1"
###
# DR Z Stack 2 Standard
###
# So first I want to know the REHO output
dr.0 <- file.path(base.0, "dr_tempreg_maps_z_stack_to_standard/_scan_rest_1_rest/_csf_threshold_0.96/_gm_threshold_0.7/_wm_threshold_0.96/_compcor_ncomponents_5_selector_pc10.linear1.wm0.global1.motion1.quadratic1.gm0.compcor1.csf0/_spatial_map_PNAS_Smith09_rsn10/temp_reg_map_z_wimt.nii.gz")
# Then I want to know the QP REHO output
dr.1 <- file.path(base.1, "dr_tempreg_maps_z_stack_to_standard/_scan_rest_1_rest/_scan_rest_1_rest/_spatial_map_PNAS_Smith09_rsn10/_scan_rest_1_rest/_scan_rest_1_rest/_scan_rest_1_rest/temp_reg_map_z_wimt.nii.gz")
# Finally, I should read them in and compare them
compare_3D_brains("DR Z Stack 2 Standard", dr.0, dr.1)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred_funs.R
\name{LPDS}
\alias{LPDS}
\title{Calculate the Log Predictive Density Score for a fitted TVP model}
\usage{
LPDS(mod, data_test)
}
\arguments{
\item{mod}{an object of class \code{shrinkTVP}, containing the fitted model for which the LPDS should be calculated.}
\item{data_test}{a data frame with one row, containing the one-step ahead covariates and response. The names of the covariates
and the response have to match the names used during model estimation in the call to \code{shrinkTVP}.}
}
\value{
A real number equaling the calculated LPDS.
}
\description{
\code{LPDS} calculates the one-step ahead Log Predictive Density Score (LPDS) of a fitted TVP model resulting from a call to
\code{shrinkTVP} For details on the approximation of the one-step ahead predictive density used, see the vignette.
}
\examples{
\donttest{
# Simulate data
set.seed(123)
sim <- simTVP(theta = c(0.2, 0, 0), beta_mean = c(1.5, -0.3, 0))
data <- sim$data
# Estimate model
res <- shrinkTVP(y ~ x1 + x2, data = data[1:199, ])
# Calculate LPDS
LPDS(res, data[200,])
}
}
\seealso{
Other prediction functions:
\code{\link{eval_pred_dens}()},
\code{\link{fitted.shrinkTVP}()},
\code{\link{forecast_shrinkTVP}()},
\code{\link{predict.shrinkTVP}()},
\code{\link{residuals.shrinkTVP}()}
}
\author{
Peter Knaus \email{peter.knaus@wu.ac.at}
}
\concept{prediction functions}
|
/man/LPDS.Rd
|
no_license
|
cran/shrinkTVP
|
R
| false | true | 1,440 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pred_funs.R
\name{LPDS}
\alias{LPDS}
\title{Calculate the Log Predictive Density Score for a fitted TVP model}
\usage{
LPDS(mod, data_test)
}
\arguments{
\item{mod}{an object of class \code{shrinkTVP}, containing the fitted model for which the LPDS should be calculated.}
\item{data_test}{a data frame with one row, containing the one-step ahead covariates and response. The names of the covariates
and the response have to match the names used during model estimation in the call to \code{shrinkTVP}.}
}
\value{
A real number equaling the calculated LPDS.
}
\description{
\code{LPDS} calculates the one-step ahead Log Predictive Density Score (LPDS) of a fitted TVP model resulting from a call to
\code{shrinkTVP} For details on the approximation of the one-step ahead predictive density used, see the vignette.
}
\examples{
\donttest{
# Simulate data
set.seed(123)
sim <- simTVP(theta = c(0.2, 0, 0), beta_mean = c(1.5, -0.3, 0))
data <- sim$data
# Estimate model
res <- shrinkTVP(y ~ x1 + x2, data = data[1:199, ])
# Calculate LPDS
LPDS(res, data[200,])
}
}
\seealso{
Other prediction functions:
\code{\link{eval_pred_dens}()},
\code{\link{fitted.shrinkTVP}()},
\code{\link{forecast_shrinkTVP}()},
\code{\link{predict.shrinkTVP}()},
\code{\link{residuals.shrinkTVP}()}
}
\author{
Peter Knaus \email{peter.knaus@wu.ac.at}
}
\concept{prediction functions}
|
#' Ratio of maximum to minimum
#'
#' @export
#' @param x numeric vector.
#' @return \code{max(x) / min(x)}
#'
max_over_min <- function(x) {
stopifnot(is.numeric(x))
max(x) / min(x)
}
|
/R/max_over_min.R
|
no_license
|
jgabry/RHhelpers
|
R
| false | false | 187 |
r
|
#' Ratio of maximum to minimum
#'
#' @export
#' @param x numeric vector.
#' @return \code{max(x) / min(x)}
#'
max_over_min <- function(x) {
stopifnot(is.numeric(x))
max(x) / min(x)
}
|
library("stringr")
library ("RCurl")
library ("XML")
new_results <- '/government/announcements?keywords=&announcement_filter_option=press-releases&topics[]=all&departments[]=all&world_locations[]=all& from_date=&to_date=01%2F07%2F2018'
signatures = system.file("CurlSSL", cainfo = "cacert.pem",
package = "RCurl")
all_links <- character()
while(length(new_results) > 0){
new_results <- str_c("https://www.gov.uk/", new_results)
results <- getURL(new_results, cainfo = signatures)
results_tree <- htmlParse(results)
all_links <- c(all_links, xpathSApply(results_tree,
"//li[@id]//a", xmlGetAttr, "href"))
new_results <- xpathSApply(results_tree,
"//nav[@id='show-more-documents']//li[@class='next']//a",
xmlGetAttr, "href")
}
for(i in 1:length(all_links)){
url <- str_c("https://www.gov.uk", all_links[i])
tmp <- getURL(url, cainfo = signatures)
write(tmp, str_c("Press_Releases/", i, ".html"))
}
tmp <- readLines("Press_Releases/1.html")
tmp <- str_c(tmp, collapse = "")
tmp <- htmlParse(tmp)
release <- xpathSApply(tmp, "//div[@class='block-4']", xmlValue)
organisation <- xpathSApply(tmp, "//a[@class='organisation-link']", xmlValue)
publication <- xpathSApply(tmp, "//div[@class='block-5']//time[@class='date']", xmlValue)
library(tm)
release_corpus <- Corpus(VectorSource(release))
meta(release_corpus[[1]], "organisation") <- organisation[1]
meta(release_corpus[[1]], "publication") <- publication
meta(release_corpus[[1]])
n <- 1
for(i in 2:length(list.files("Press_Releases/"))){
tmp <- readLines(str_c("Press_Releases/", i, ".html"))
tmp <- str_c(tmp, collapse = "")
tmp <- htmlParse(tmp)
release <- xpathSApply(tmp,"//div[@class='block-4']", xmlValue)
organisation <- xpathSApply(tmp, "//a[@class='organisation-link']", xmlValue)
publication <- xpathSApply(tmp, "//div[@class='block-5']//time[@class='date']", xmlValue)
if (length(release)!=0 &
(organisation == 'Department for Business, Innovation & Skills' |
organisation == 'Ministry of Defence' |
organisation == 'Foreign & Commonwealth Office')) {
n <- n + 1
tmp_corpus <- Corpus(VectorSource(release))
release_corpus <- c(release_corpus, tmp_corpus)
meta(release_corpus[[n]], "organisation") <- organisation[1]
cat("n=",n)
}
}
meta_data<- data.frame()
for (i in 1:NROW(release_corpus))
{
meta_data [i, "organisation"] <- meta(release_corpus[[i]], "organisation")
meta_data [i, "num"] <- i
}
table(as.character(meta_data[, "organisation"]))
release_corpus <- tm_map(release_corpus, content_transformer(removeNumbers))
release_corpus <- tm_map(release_corpus,
content_transformer(str_replace_all),
pattern = "[[:punct:]]", replacement = " ")
release_corpus[[1]]$content
release_corpus <- tm_map(release_corpus, content_transformer(removeWords), words = stopwords("en"))
release_corpus <- tm_map(release_corpus, content_transformer(tolower))
release_corpus <- tm_map(release_corpus, stemDocument, language = "english")
tdm <- TermDocumentMatrix(release_corpus)
dtm <- DocumentTermMatrix(release_corpus)
dtm <- removeSparseTerms(dtm, 1-(10/length(release_corpus)))
library(RTextTools)
org_labels<-meta_data[, "organisation"]
N <- length(org_labels)
container <- create_container(
dtm,
labels = org_labels,
trainSize = 1:350,
testSize = 351:N,
virgin = FALSE
)
svm_model <- train_model(container, "SVM")
tree_model <- train_model(container, "TREE")
maxent_model <- train_model(container, "MAXENT")
svm_out <- classify_model(container, svm_model)
tree_out <- classify_model(container, tree_model)
maxent_out <- classify_model(container, maxent_model)
labels_out <- data.frame(
correct_label = org_labels[351:N],
svm = as.character(svm_out[,1]),
tree = as.character(tree_out[,1]),
maxent = as.character(maxent_out[,1]),
stringsAsFactors = F)
table(labels_out[,1] == labels_out[,2])
table(labels_out[,1] == labels_out[,3])
table(labels_out[,1] == labels_out[,4])
|
/lab5/text_mining.R
|
no_license
|
max-kalganov/internet_data_analysis
|
R
| false | false | 4,135 |
r
|
library("stringr")
library ("RCurl")
library ("XML")
new_results <- '/government/announcements?keywords=&announcement_filter_option=press-releases&topics[]=all&departments[]=all&world_locations[]=all& from_date=&to_date=01%2F07%2F2018'
signatures = system.file("CurlSSL", cainfo = "cacert.pem",
package = "RCurl")
all_links <- character()
while(length(new_results) > 0){
new_results <- str_c("https://www.gov.uk/", new_results)
results <- getURL(new_results, cainfo = signatures)
results_tree <- htmlParse(results)
all_links <- c(all_links, xpathSApply(results_tree,
"//li[@id]//a", xmlGetAttr, "href"))
new_results <- xpathSApply(results_tree,
"//nav[@id='show-more-documents']//li[@class='next']//a",
xmlGetAttr, "href")
}
for(i in 1:length(all_links)){
url <- str_c("https://www.gov.uk", all_links[i])
tmp <- getURL(url, cainfo = signatures)
write(tmp, str_c("Press_Releases/", i, ".html"))
}
tmp <- readLines("Press_Releases/1.html")
tmp <- str_c(tmp, collapse = "")
tmp <- htmlParse(tmp)
release <- xpathSApply(tmp, "//div[@class='block-4']", xmlValue)
organisation <- xpathSApply(tmp, "//a[@class='organisation-link']", xmlValue)
publication <- xpathSApply(tmp, "//div[@class='block-5']//time[@class='date']", xmlValue)
library(tm)
release_corpus <- Corpus(VectorSource(release))
meta(release_corpus[[1]], "organisation") <- organisation[1]
meta(release_corpus[[1]], "publication") <- publication
meta(release_corpus[[1]])
n <- 1
for(i in 2:length(list.files("Press_Releases/"))){
tmp <- readLines(str_c("Press_Releases/", i, ".html"))
tmp <- str_c(tmp, collapse = "")
tmp <- htmlParse(tmp)
release <- xpathSApply(tmp,"//div[@class='block-4']", xmlValue)
organisation <- xpathSApply(tmp, "//a[@class='organisation-link']", xmlValue)
publication <- xpathSApply(tmp, "//div[@class='block-5']//time[@class='date']", xmlValue)
if (length(release)!=0 &
(organisation == 'Department for Business, Innovation & Skills' |
organisation == 'Ministry of Defence' |
organisation == 'Foreign & Commonwealth Office')) {
n <- n + 1
tmp_corpus <- Corpus(VectorSource(release))
release_corpus <- c(release_corpus, tmp_corpus)
meta(release_corpus[[n]], "organisation") <- organisation[1]
cat("n=",n)
}
}
meta_data<- data.frame()
for (i in 1:NROW(release_corpus))
{
meta_data [i, "organisation"] <- meta(release_corpus[[i]], "organisation")
meta_data [i, "num"] <- i
}
table(as.character(meta_data[, "organisation"]))
release_corpus <- tm_map(release_corpus, content_transformer(removeNumbers))
release_corpus <- tm_map(release_corpus,
content_transformer(str_replace_all),
pattern = "[[:punct:]]", replacement = " ")
release_corpus[[1]]$content
release_corpus <- tm_map(release_corpus, content_transformer(removeWords), words = stopwords("en"))
release_corpus <- tm_map(release_corpus, content_transformer(tolower))
release_corpus <- tm_map(release_corpus, stemDocument, language = "english")
tdm <- TermDocumentMatrix(release_corpus)
dtm <- DocumentTermMatrix(release_corpus)
dtm <- removeSparseTerms(dtm, 1-(10/length(release_corpus)))
library(RTextTools)
org_labels<-meta_data[, "organisation"]
N <- length(org_labels)
container <- create_container(
dtm,
labels = org_labels,
trainSize = 1:350,
testSize = 351:N,
virgin = FALSE
)
svm_model <- train_model(container, "SVM")
tree_model <- train_model(container, "TREE")
maxent_model <- train_model(container, "MAXENT")
svm_out <- classify_model(container, svm_model)
tree_out <- classify_model(container, tree_model)
maxent_out <- classify_model(container, maxent_model)
labels_out <- data.frame(
correct_label = org_labels[351:N],
svm = as.character(svm_out[,1]),
tree = as.character(tree_out[,1]),
maxent = as.character(maxent_out[,1]),
stringsAsFactors = F)
table(labels_out[,1] == labels_out[,2])
table(labels_out[,1] == labels_out[,3])
table(labels_out[,1] == labels_out[,4])
|
## code to prepare `garden_spending` dataset
library(googlesheets4)
library(tidyverse)
gs4_deauth()
garden_spending <- read_sheet("https://docs.google.com/spreadsheets/d/1dPVHwZgR9BxpigbHLnA0U99TtVHHQtUzNB9UR0wvb7o/edit?usp=sharing",
col_types = "ccccnn")
usethis::use_data(garden_spending, overwrite = TRUE)
|
/data-raw/clean_garden_spending.R
|
no_license
|
mariorollojr/gardenR
|
R
| false | false | 342 |
r
|
## code to prepare `garden_spending` dataset
library(googlesheets4)
library(tidyverse)
gs4_deauth()
garden_spending <- read_sheet("https://docs.google.com/spreadsheets/d/1dPVHwZgR9BxpigbHLnA0U99TtVHHQtUzNB9UR0wvb7o/edit?usp=sharing",
col_types = "ccccnn")
usethis::use_data(garden_spending, overwrite = TRUE)
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
hello <- function() {
print("Hello, fruitcakes!")
}
|
/R/hello.R
|
no_license
|
wcrump/crumpTest
|
R
| false | false | 451 |
r
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
hello <- function() {
print("Hello, fruitcakes!")
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/dirs-files.R
\name{copy_dirs}
\alias{copy_dirs}
\title{Copy directories recursively, creating a new directory if not already there}
\usage{
copy_dirs(from, to)
}
\arguments{
\item{string}{}
}
\value{
string
}
\description{
copy_dirs
}
\examples{
\dontrun{
}
}
|
/man/copy_dirs.Rd
|
no_license
|
jpmarindiaz/utter
|
R
| false | false | 347 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/dirs-files.R
\name{copy_dirs}
\alias{copy_dirs}
\title{Copy directories recursively, creating a new directory if not already there}
\usage{
copy_dirs(from, to)
}
\arguments{
\item{string}{}
}
\value{
string
}
\description{
copy_dirs
}
\examples{
\dontrun{
}
}
|
/distrib_energy_price/src/regression.R
|
no_license
|
juananguita10/adi-energy-cost-analysis
|
R
| false | false | 3,918 |
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/burst_tj.R
\name{group_labels}
\alias{group_labels}
\alias{group_labels.sftrack}
\alias{group_labels.sftraj}
\alias{group_labels.c_grouping}
\title{Shows grouping labels created from the s_group and the c_grouping}
\usage{
group_labels(x)
\method{group_labels}{sftrack}(x)
\method{group_labels}{sftraj}(x)
\method{group_labels}{c_grouping}(x)
}
\arguments{
\item{x}{a sftrack or grouping object}
}
\description{
Shows grouping labels created from the s_group and the c_grouping
}
|
/man/group_labels.Rd
|
permissive
|
jmsigner/sftrack
|
R
| false | true | 561 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/burst_tj.R
\name{group_labels}
\alias{group_labels}
\alias{group_labels.sftrack}
\alias{group_labels.sftraj}
\alias{group_labels.c_grouping}
\title{Shows grouping labels created from the s_group and the c_grouping}
\usage{
group_labels(x)
\method{group_labels}{sftrack}(x)
\method{group_labels}{sftraj}(x)
\method{group_labels}{c_grouping}(x)
}
\arguments{
\item{x}{a sftrack or grouping object}
}
\description{
Shows grouping labels created from the s_group and the c_grouping
}
|
predict.knnTree <- function(object, test, train, verbose = FALSE, ...)
{
#
# predict.knnTree: get prediction from "test" on model in "object"
#
# Arguments: object: object of class "knnTree"
# test: Data on which to make predictions
# train: Data from which model was built (required)
# verbose: Level of verbosity.
#
# Extract the tree (which is the first entry in "object") and deduce its size.
#
my.tree <- object[[1]]
size <- sum(my.tree$frame[, 1] == "<leaf>") #
#
# If the tree has size one, call predict on the second object, re-classifiy the
# resulting classifications, and return them.
#
if(size == 1) {
thing <- object[[2]]
class <- predict(thing, test, train, theyre.the.same = FALSE,
return.classifications = TRUE)$class
if(is.factor(train[, 1]))
class <- factor(class, levels = levels(train[, 1]),
labels = levels(train[, 1]))
return(class)
}
#
#
# Create a vector of classifications. Then go through the leaves, calling
# predict on each one.
#
if(is.factor(train[, 1]))
class <- factor(rep("", nrow(test)), levels = levels(
train[, 1]), labels = levels(train[, 1]))
else class <- character(nrow(test))
leaf.locations <- my.tree$frame[, 1] == "<leaf>"
where <- (1:nrow(my.tree$frame))[leaf.locations]
leaf.number <- dimnames(my.tree$frame)[[1]][leaf.locations]
new.leaves <- predict(my.tree, test, type = "where")
for(i in 1:length(where)) {
new.ind <- new.leaves == where[i]
if(sum(new.ind) == 0)
next
old.ind <- my.tree$where == where[i]
thing <- object[[leaf.number[i]]]
predict.out <- predict(thing, test[new.ind, ], train[
old.ind, ], theyre.the.same = FALSE,
return.classifications = TRUE)
class[new.ind] <- predict.out$classifications
if(verbose)
cat(i, ": Leaf", leaf.number[i], "(where =", where[i],
") has size", sum(new.ind), ", rate", signif(
predict.out$rate, 4), "\n") #
}
if(is.factor(train[, 1]))
class <- factor(class, levels = levels(train[, 1]), labels
= levels(train[, 1]))
return(class)
}
|
/R/predict.knnTree.R
|
no_license
|
cran/knnTree
|
R
| false | false | 2,054 |
r
|
predict.knnTree <- function(object, test, train, verbose = FALSE, ...)
{
#
# predict.knnTree: get prediction from "test" on model in "object"
#
# Arguments: object: object of class "knnTree"
# test: Data on which to make predictions
# train: Data from which model was built (required)
# verbose: Level of verbosity.
#
# Extract the tree (which is the first entry in "object") and deduce its size.
#
my.tree <- object[[1]]
size <- sum(my.tree$frame[, 1] == "<leaf>") #
#
# If the tree has size one, call predict on the second object, re-classifiy the
# resulting classifications, and return them.
#
if(size == 1) {
thing <- object[[2]]
class <- predict(thing, test, train, theyre.the.same = FALSE,
return.classifications = TRUE)$class
if(is.factor(train[, 1]))
class <- factor(class, levels = levels(train[, 1]),
labels = levels(train[, 1]))
return(class)
}
#
#
# Create a vector of classifications. Then go through the leaves, calling
# predict on each one.
#
if(is.factor(train[, 1]))
class <- factor(rep("", nrow(test)), levels = levels(
train[, 1]), labels = levels(train[, 1]))
else class <- character(nrow(test))
leaf.locations <- my.tree$frame[, 1] == "<leaf>"
where <- (1:nrow(my.tree$frame))[leaf.locations]
leaf.number <- dimnames(my.tree$frame)[[1]][leaf.locations]
new.leaves <- predict(my.tree, test, type = "where")
for(i in 1:length(where)) {
new.ind <- new.leaves == where[i]
if(sum(new.ind) == 0)
next
old.ind <- my.tree$where == where[i]
thing <- object[[leaf.number[i]]]
predict.out <- predict(thing, test[new.ind, ], train[
old.ind, ], theyre.the.same = FALSE,
return.classifications = TRUE)
class[new.ind] <- predict.out$classifications
if(verbose)
cat(i, ": Leaf", leaf.number[i], "(where =", where[i],
") has size", sum(new.ind), ", rate", signif(
predict.out$rate, 4), "\n") #
}
if(is.factor(train[, 1]))
class <- factor(class, levels = levels(train[, 1]), labels
= levels(train[, 1]))
return(class)
}
|
# tepCCA -----
#' @title A \code{TExPosition}-type version of Canonical Correlation
#' Analysis (CCA).\emph{Temporary Version (11-04-2019)}.
#'
#' @description \code{tepCCA}:
#' A \code{TExPosition}-type version of Canonical Correlation
#' Analysis (CCA). \emph{Temporary Version.
#' This version will soon be revised to take into account
#' the new \code{GSVD}-package from Derek Beaton}.
#' \emph{Note: This is a temporary version}.
#'
#' @param DATA1 an \eqn{N*I} matrix of quantitative data.
#' @param DATA2 an \eqn{N*J} matrix of quantitative data.
#' @param center1 when \code{TRUE} (default) \code{DATA1}
#' will be centered.
#' @param center2 when \code{TRUE} (default) \code{DATA2}
#' will be centered.
#' @param scale1 when \code{TRUE} (default) \code{DATA1}
#' will be normalized. Depends upon \code{ExPosition}
#' function \code{expo.scale} whose description is:
#' boolean, text, or (numeric) vector.
#'If boolean or vector,
#'it works just like \code{scale}.
#'The following text options are available:
#' \code{'z'}: z-score normalization,
#' \code{'sd'}: standard deviation normalization,
#' \code{'rms'}: root mean square normalization,
#' \code{'ss1'}: sum of squares
#' (of columns) equals 1
#' (i.e., column vector of length of 1).
#' @param scale2 when \code{TRUE} (default) \code{DATA2}
#' will be normalized
#' (same options as for \code{scale1}).
#' @param DESIGN a design matrix
#' to indicate if the rows comprise several groups.
#' @param make_design_nominal
#' a boolean. If \code{TRUE} (default),
#' DESIGN is a vector that indicates groups
#' (and will be dummy-coded).
#' If \code{FALSE}, \code{DESIGN} is a dummy-coded matrix.
#' @param graphs
#' a boolean. If \code{TRUE},
#' graphs and plots are provided
#' (via \code{TExPosition::tepGraphs}).
#' @param k number of components to return.
#' @author Vincent Guillemot, Derek Beaton, Hervé Abdi
#' @return
#' See \code{ExPosition::epGPCA} (and also \code{ExPosition::corePCA})
#' for details on what is returned.
#' In addition to the values returned:
#' \code{tepCCA} returns
#'
#' \code{lx}:
#' the latent variables for \code{DATA1}, and
#' \code{ly}:
#' the latent variables for \code{DATA2}'
#'
#' \code{data1.norm}: the
#' center and scale information for \code{DATA1}. and
#' \code{data2.norm}: the
#' center and scale information for \code{DATA2}.
#' @references
#' Abdi H., Eslami, A., Guillemot, V., & Beaton D. (2018).
#' Canonical correlation analysis (CCA).
#' In R. Alhajj and J. Rokne (Eds.),
#' \emph{Encyclopedia of Social Networks and Mining (2nd Edition)}.
#' New York: Springer Verlag.
#' @importFrom ExPosition epGPCA
#' @import TExPosition
# #' @importFrom TExPosition tepGraphs
#' @export
#' @examples
#' \dontrun{
#' # *** Some example here at some point ***}
tepCCA <- function (DATA1, DATA2,
center1 = TRUE, scale1 = "SS1",
center2 = TRUE, scale2 = "SS1",
DESIGN = NULL, make_design_nominal = TRUE,
graphs = TRUE, k = 0) {
if (nrow(DATA1) != nrow(DATA2)) {
stop("DATA1 and DATA2 must have the same number of rows.")
}
# Internal function ----
tepOutputHandler <- function (res = NULL, tepPlotInfo = NULL) {
if (!is.null(res) && !is.null(tepPlotInfo)) {
final.output <- list(TExPosition.Data = res,
Plotting.Data = tepPlotInfo)
class(final.output) <- c("texpoOutput", "list")
return(final.output)
}
else if (!is.null(res) && is.null(tepPlotInfo)) {
return(res)
}
else {
print("Unknown inputs. tepOutputHandler must exit.")
return(0)
}
print("It is unknown how this was executed. tepOutputHandler must exit.")
return(0)
}
#___________________________________________________________________
main <- paste("CCA: ", deparse(substitute(DATA1)), " & ",
deparse(substitute(DATA2)), sep = "")
DESIGN <- texpoDesignCheck(DATA1, DESIGN,
make_design_nominal = make_design_nominal)
DESIGN <- texpoDesignCheck(DATA2, DESIGN,
make_design_nominal = FALSE)
DATA1 <- as.matrix(DATA1)
DATA2 <- as.matrix(DATA2)
DATA1 <- expo.scale(DATA1, scale = scale1, center = center1)
DATA2 <- expo.scale(DATA2, scale = scale2, center = center2)
R <- t(DATA1) %*% DATA2
#
M <- t(DATA1) %*% DATA1
# M <- cor(DATA1)
W <- t(DATA2) %*% DATA2
# W <- cor(DATA2)
Mm1 <- matrix.exponent(M, power = -1)
Wm1 <- matrix.exponent(W, power = -1)
res <- epGPCA2(DATA = R,
k = k,
graphs = FALSE,
masses = Mm1,
weights = Wm1,
scale = FALSE,
center = FALSE)
res <- res$ExPosition.Data
res$center <- NULL
res$scale <- NULL
res$W1 <- res$M
res$W2 <- res$W
res$M <- res$W <- NULL
res$data1.norm <- list(center = attributes(DATA1)$`scaled:center`,
scale = attributes(DATA1)$`scaled:scale`)
res$data2.norm <- list(center = attributes(DATA2)$`scaled:center`,
scale = attributes(DATA2)$`scaled:scale`)
res$lx <- ExPosition::supplementalProjection(DATA1, res$fi, Dv = res$pdq$Dv)$f.out
res$ly <- ExPosition::supplementalProjection(DATA2, res$fj, Dv = res$pdq$Dv)$f.out
class(res) <- c("tepPLS", "list")
#
tepPlotInfo <- TExPosition::tepGraphs(res = res,
DESIGN = DESIGN, main = main,
graphs = graphs)
#
return(tepOutputHandler(res = res, tepPlotInfo = tepPlotInfo))
}
|
/R/tepCCA.R
|
no_license
|
weiwei-wch/data4PCCAR
|
R
| false | false | 5,620 |
r
|
# tepCCA -----
#' @title A \code{TExPosition}-type version of Canonical Correlation
#' Analysis (CCA).\emph{Temporary Version (11-04-2019)}.
#'
#' @description \code{tepCCA}:
#' A \code{TExPosition}-type version of Canonical Correlation
#' Analysis (CCA). \emph{Temporary Version.
#' This version will soon be revised to take into account
#' the new \code{GSVD}-package from Derek Beaton}.
#' \emph{Note: This is a temporary version}.
#'
#' @param DATA1 an \eqn{N*I} matrix of quantitative data.
#' @param DATA2 an \eqn{N*J} matrix of quantitative data.
#' @param center1 when \code{TRUE} (default) \code{DATA1}
#' will be centered.
#' @param center2 when \code{TRUE} (default) \code{DATA2}
#' will be centered.
#' @param scale1 when \code{TRUE} (default) \code{DATA1}
#' will be normalized. Depends upon \code{ExPosition}
#' function \code{expo.scale} whose description is:
#' boolean, text, or (numeric) vector.
#'If boolean or vector,
#'it works just like \code{scale}.
#'The following text options are available:
#' \code{'z'}: z-score normalization,
#' \code{'sd'}: standard deviation normalization,
#' \code{'rms'}: root mean square normalization,
#' \code{'ss1'}: sum of squares
#' (of columns) equals 1
#' (i.e., column vector of length of 1).
#' @param scale2 when \code{TRUE} (default) \code{DATA2}
#' will be normalized
#' (same options as for \code{scale1}).
#' @param DESIGN a design matrix
#' to indicate if the rows comprise several groups.
#' @param make_design_nominal
#' a boolean. If \code{TRUE} (default),
#' DESIGN is a vector that indicates groups
#' (and will be dummy-coded).
#' If \code{FALSE}, \code{DESIGN} is a dummy-coded matrix.
#' @param graphs
#' a boolean. If \code{TRUE},
#' graphs and plots are provided
#' (via \code{TExPosition::tepGraphs}).
#' @param k number of components to return.
#' @author Vincent Guillemot, Derek Beaton, Hervé Abdi
#' @return
#' See \code{ExPosition::epGPCA} (and also \code{ExPosition::corePCA})
#' for details on what is returned.
#' In addition to the values returned:
#' \code{tepCCA} returns
#'
#' \code{lx}:
#' the latent variables for \code{DATA1}, and
#' \code{ly}:
#' the latent variables for \code{DATA2}'
#'
#' \code{data1.norm}: the
#' center and scale information for \code{DATA1}. and
#' \code{data2.norm}: the
#' center and scale information for \code{DATA2}.
#' @references
#' Abdi H., Eslami, A., Guillemot, V., & Beaton D. (2018).
#' Canonical correlation analysis (CCA).
#' In R. Alhajj and J. Rokne (Eds.),
#' \emph{Encyclopedia of Social Networks and Mining (2nd Edition)}.
#' New York: Springer Verlag.
#' @importFrom ExPosition epGPCA
#' @import TExPosition
# #' @importFrom TExPosition tepGraphs
#' @export
#' @examples
#' \dontrun{
#' # *** Some example here at some point ***}
tepCCA <- function (DATA1, DATA2,
center1 = TRUE, scale1 = "SS1",
center2 = TRUE, scale2 = "SS1",
DESIGN = NULL, make_design_nominal = TRUE,
graphs = TRUE, k = 0) {
if (nrow(DATA1) != nrow(DATA2)) {
stop("DATA1 and DATA2 must have the same number of rows.")
}
# Internal function ----
tepOutputHandler <- function (res = NULL, tepPlotInfo = NULL) {
if (!is.null(res) && !is.null(tepPlotInfo)) {
final.output <- list(TExPosition.Data = res,
Plotting.Data = tepPlotInfo)
class(final.output) <- c("texpoOutput", "list")
return(final.output)
}
else if (!is.null(res) && is.null(tepPlotInfo)) {
return(res)
}
else {
print("Unknown inputs. tepOutputHandler must exit.")
return(0)
}
print("It is unknown how this was executed. tepOutputHandler must exit.")
return(0)
}
#___________________________________________________________________
main <- paste("CCA: ", deparse(substitute(DATA1)), " & ",
deparse(substitute(DATA2)), sep = "")
DESIGN <- texpoDesignCheck(DATA1, DESIGN,
make_design_nominal = make_design_nominal)
DESIGN <- texpoDesignCheck(DATA2, DESIGN,
make_design_nominal = FALSE)
DATA1 <- as.matrix(DATA1)
DATA2 <- as.matrix(DATA2)
DATA1 <- expo.scale(DATA1, scale = scale1, center = center1)
DATA2 <- expo.scale(DATA2, scale = scale2, center = center2)
R <- t(DATA1) %*% DATA2
#
M <- t(DATA1) %*% DATA1
# M <- cor(DATA1)
W <- t(DATA2) %*% DATA2
# W <- cor(DATA2)
Mm1 <- matrix.exponent(M, power = -1)
Wm1 <- matrix.exponent(W, power = -1)
res <- epGPCA2(DATA = R,
k = k,
graphs = FALSE,
masses = Mm1,
weights = Wm1,
scale = FALSE,
center = FALSE)
res <- res$ExPosition.Data
res$center <- NULL
res$scale <- NULL
res$W1 <- res$M
res$W2 <- res$W
res$M <- res$W <- NULL
res$data1.norm <- list(center = attributes(DATA1)$`scaled:center`,
scale = attributes(DATA1)$`scaled:scale`)
res$data2.norm <- list(center = attributes(DATA2)$`scaled:center`,
scale = attributes(DATA2)$`scaled:scale`)
res$lx <- ExPosition::supplementalProjection(DATA1, res$fi, Dv = res$pdq$Dv)$f.out
res$ly <- ExPosition::supplementalProjection(DATA2, res$fj, Dv = res$pdq$Dv)$f.out
class(res) <- c("tepPLS", "list")
#
tepPlotInfo <- TExPosition::tepGraphs(res = res,
DESIGN = DESIGN, main = main,
graphs = graphs)
#
return(tepOutputHandler(res = res, tepPlotInfo = tepPlotInfo))
}
|
library(GeneSurvey)
#################################################################
#################################################################
baseDir <- getBaseDir()
zipFile <- getZipDir()
if ((!is.null(baseDir))&&(!is.null(zipFile)))
{
initGeneReport("-Xmx4800m")
foo <- getMirs_List_Mir(theZipFile=zipFile)
(4446==length(foo))&&
("hsa-let-7a-1"==foo[1])
} else {
message("No test data. Skip test.")
TRUE
}
|
/tests/getMirs_List_Mir.R
|
no_license
|
minghao2016/GeneSurvey
|
R
| false | false | 433 |
r
|
library(GeneSurvey)
#################################################################
#################################################################
baseDir <- getBaseDir()
zipFile <- getZipDir()
if ((!is.null(baseDir))&&(!is.null(zipFile)))
{
initGeneReport("-Xmx4800m")
foo <- getMirs_List_Mir(theZipFile=zipFile)
(4446==length(foo))&&
("hsa-let-7a-1"==foo[1])
} else {
message("No test data. Skip test.")
TRUE
}
|
data = read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE)
subset_data = data[data$Date %in% c("1/2/2007","2/2/2007") ,]
day = strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
gap = as.numeric(subset_data$Global_active_power)
png(filename = "plot2.png", width = 480, height = 480)
plot(day, gap,type = "l", ylab = "Global Active Power(kilowatts)", xlab = "")
dev.off()
|
/plot2.R
|
no_license
|
spoorthyparne/ExData_Plotting1
|
R
| false | false | 446 |
r
|
data = read.table("household_power_consumption.txt", header = TRUE, sep = ";", stringsAsFactors=FALSE)
subset_data = data[data$Date %in% c("1/2/2007","2/2/2007") ,]
day = strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
gap = as.numeric(subset_data$Global_active_power)
png(filename = "plot2.png", width = 480, height = 480)
plot(day, gap,type = "l", ylab = "Global Active Power(kilowatts)", xlab = "")
dev.off()
|
########
# 1D dynamic densities
########
# Simulate the data: 100 time points with 10 obsv each
nx <- 10
total <- nx*100
x <- c()
times <- c()
sd <- 13
xx <- seq(-120,120,length=100)
dd <- c()
for(i in 1:10)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80, sd)*r + rnorm(nx, -80, sd)*(1-r) )
times <- c(times, rep(i-1,nx))
dd <- rbind(dd, dnorm(xx, 80, sd)/2 + dnorm(xx, -80, sd)/2) }
for(i in 1:40)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80-2*i, sd+i/4)*r + rnorm(nx, -80+2*i, sd+i/4)*(1-r) )
times <- c(times, rep(10+i-1,nx))
dd <- rbind(dd, dnorm(xx, 80-2*i, sd+i/4)/2 + dnorm(xx, -80+2*i, sd+i/4)/2) }
for(i in 1:40)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 2*i, sd+(40-i)/4)*r + rnorm(nx, -2*i, sd+(40-i)/4)*(1-r) )
times <- c(times, rep(50+i-1,nx))
dd <- rbind(dd, dnorm(xx, 2*i, sd+(40-i)/4)/2 + dnorm(xx, -2*i, sd+(40-i)/4)/2) }
for(i in 1:10)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80, sd)*r + rnorm(nx, -80, sd)*(1-r) )
times <- c(times, rep(90+i-1,nx))
dd <- rbind(dd, dnorm(xx, 80, sd)/2 + dnorm(xx, -80, sd)/2) }
alpha <- 4
params <- c(0, #gamma
.2, #kappa
3, #nu
3, #gam0
50 #psi0
)
N <- 50 # very small number of particles! You'll notice markov error in repeated runs
# independent DP for each time
l0 <- mix(x, alpha=alpha, g0params=params,
times=times, rho=0, cat=0,
N=N, niter=0, read=0, print=1)
# BAR stick-breaking with rho=1/2
l1 <- mix(x, alpha=alpha, g0params=params,
times=times, rho=0.5, cat=0,
N=N, niter=0, read=0, print=1)
# Plot the Bayes factor for rho=.5 vs independence
bf <- l1$logprob-l0$logprob
par(mai=c(.7,.7,0.4,0.4), mfrow=c(1,1))
plot(c(-100:(total+100)), rep(0,total+201), type="l", col=grey(.5), xlim=c(10,total+10), ylim=range(bf),
xlab="", ylab="", main="", cex.axis=.8)
mtext("Log Bayes Factor", side=2, font=3, cex=1.1, line=2.3)
lines(bf, col=6)
text(x=total+20, y=bf[total], label="0.5", cex=.8, font=3)
mtext("Observation", side=1, font=3, cex=1.1, line=-1.25, outer=TRUE)
# Extract mean pdfs and compare the filtered densities
dens <- function(prt)
{ pdf <- rep(0,100)
for(j in 1:nrow(prt))
{ pdf <- pdf + prt$p[j]*dt( (xx-prt[j,]$a.1)/sqrt(prt[j,]$B.1), df = prt$c[j] )/sqrt( prt[j,]$B.1 ) }
return(pdf) }
prts1 <- vector(mode="list", length=0)
prts0 <- vector(mode="list", length=0)
for(t in 1:99){
prt <- vector(mode="list", length=N)
for(i in 1:N) prt[[i]] <- particle(i, l0, t, 0)
prts0 <- cbind(prts0, prt)
for(i in 1:N) prt[[i]] <- particle(i, l1, t, 0.5)
prts1 <- cbind(prts1, prt) }
post0 <- lapply(prts0,dens)
post1 <- lapply(prts1,dens)
pdfs0 <- array( unlist(post0), dim=c(100,N,99) )
pdfs1 <- array( unlist(post1), dim=c(100,N,99) )
mf0 <- apply(pdfs0, c(1,3), mean)
mf1 <- apply(pdfs1, c(1,3), mean)
rl <- readline("press RETURN to continue: ")
# plot
cols <- rainbow(99)
par(mfrow=c(1,3))
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100, tcl=.1,
xlab="x", ylab="time", zlab="", border=0, col=0, zlim=range(dd))
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", cex=1, font=3)
mtext("Filtered AR Fit", side=3, font=3)
for(i in 99:1){ lines(trans3d(x=xx, y=i, z=mf1[,i], pmat=pmat), col=cols[i]) }
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100,
xlab="x", ylab="time", zlab="", border=NA, col=matrix(rep(cols,99), ncol=99, byrow=TRUE), zlim=range(dd) )
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", cex=1, font=3)
mtext("The Truth", side=3, font=3)
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100,
xlab="x", ylab="time", zlab="", border=0, col=0, zlim=range(dd))
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", font=3)
for(i in 99:1){ lines(trans3d(x=xx, y=i, z=mf0[,i], pmat=pmat), col=cols[i]) }
mtext("Independent Fit", side=3, font=3)
|
/Bmix/demo/bar1D.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 4,039 |
r
|
########
# 1D dynamic densities
########
# Simulate the data: 100 time points with 10 obsv each
nx <- 10
total <- nx*100
x <- c()
times <- c()
sd <- 13
xx <- seq(-120,120,length=100)
dd <- c()
for(i in 1:10)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80, sd)*r + rnorm(nx, -80, sd)*(1-r) )
times <- c(times, rep(i-1,nx))
dd <- rbind(dd, dnorm(xx, 80, sd)/2 + dnorm(xx, -80, sd)/2) }
for(i in 1:40)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80-2*i, sd+i/4)*r + rnorm(nx, -80+2*i, sd+i/4)*(1-r) )
times <- c(times, rep(10+i-1,nx))
dd <- rbind(dd, dnorm(xx, 80-2*i, sd+i/4)/2 + dnorm(xx, -80+2*i, sd+i/4)/2) }
for(i in 1:40)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 2*i, sd+(40-i)/4)*r + rnorm(nx, -2*i, sd+(40-i)/4)*(1-r) )
times <- c(times, rep(50+i-1,nx))
dd <- rbind(dd, dnorm(xx, 2*i, sd+(40-i)/4)/2 + dnorm(xx, -2*i, sd+(40-i)/4)/2) }
for(i in 1:10)
{ r <- rbinom(nx, 1, 0.5)
x <- c(x, rnorm(nx, 80, sd)*r + rnorm(nx, -80, sd)*(1-r) )
times <- c(times, rep(90+i-1,nx))
dd <- rbind(dd, dnorm(xx, 80, sd)/2 + dnorm(xx, -80, sd)/2) }
alpha <- 4
params <- c(0, #gamma
.2, #kappa
3, #nu
3, #gam0
50 #psi0
)
N <- 50 # very small number of particles! You'll notice markov error in repeated runs
# independent DP for each time
l0 <- mix(x, alpha=alpha, g0params=params,
times=times, rho=0, cat=0,
N=N, niter=0, read=0, print=1)
# BAR stick-breaking with rho=1/2
l1 <- mix(x, alpha=alpha, g0params=params,
times=times, rho=0.5, cat=0,
N=N, niter=0, read=0, print=1)
# Plot the Bayes factor for rho=.5 vs independence
bf <- l1$logprob-l0$logprob
par(mai=c(.7,.7,0.4,0.4), mfrow=c(1,1))
plot(c(-100:(total+100)), rep(0,total+201), type="l", col=grey(.5), xlim=c(10,total+10), ylim=range(bf),
xlab="", ylab="", main="", cex.axis=.8)
mtext("Log Bayes Factor", side=2, font=3, cex=1.1, line=2.3)
lines(bf, col=6)
text(x=total+20, y=bf[total], label="0.5", cex=.8, font=3)
mtext("Observation", side=1, font=3, cex=1.1, line=-1.25, outer=TRUE)
# Extract mean pdfs and compare the filtered densities
dens <- function(prt)
{ pdf <- rep(0,100)
for(j in 1:nrow(prt))
{ pdf <- pdf + prt$p[j]*dt( (xx-prt[j,]$a.1)/sqrt(prt[j,]$B.1), df = prt$c[j] )/sqrt( prt[j,]$B.1 ) }
return(pdf) }
prts1 <- vector(mode="list", length=0)
prts0 <- vector(mode="list", length=0)
for(t in 1:99){
prt <- vector(mode="list", length=N)
for(i in 1:N) prt[[i]] <- particle(i, l0, t, 0)
prts0 <- cbind(prts0, prt)
for(i in 1:N) prt[[i]] <- particle(i, l1, t, 0.5)
prts1 <- cbind(prts1, prt) }
post0 <- lapply(prts0,dens)
post1 <- lapply(prts1,dens)
pdfs0 <- array( unlist(post0), dim=c(100,N,99) )
pdfs1 <- array( unlist(post1), dim=c(100,N,99) )
mf0 <- apply(pdfs0, c(1,3), mean)
mf1 <- apply(pdfs1, c(1,3), mean)
rl <- readline("press RETURN to continue: ")
# plot
cols <- rainbow(99)
par(mfrow=c(1,3))
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100, tcl=.1,
xlab="x", ylab="time", zlab="", border=0, col=0, zlim=range(dd))
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", cex=1, font=3)
mtext("Filtered AR Fit", side=3, font=3)
for(i in 99:1){ lines(trans3d(x=xx, y=i, z=mf1[,i], pmat=pmat), col=cols[i]) }
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100,
xlab="x", ylab="time", zlab="", border=NA, col=matrix(rep(cols,99), ncol=99, byrow=TRUE), zlim=range(dd) )
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", cex=1, font=3)
mtext("The Truth", side=3, font=3)
pmat <- persp(x=xx, y=1:100, z=t(dd), theta=20, phi=40, expand=.6, ticktype="detailed", r=100,
xlab="x", ylab="time", zlab="", border=0, col=0, zlim=range(dd))
text(trans3d(x=-115, y=0, z=.025, pmat=pmat), label="f(x)", font=3)
for(i in 99:1){ lines(trans3d(x=xx, y=i, z=mf0[,i], pmat=pmat), col=cols[i]) }
mtext("Independent Fit", side=3, font=3)
|
#Script R
args <- commandArgs(trailingOnly = TRUE)
col_to_use_min=as.numeric(args[1])
col_to_use_max=as.numeric(args[2])
# Threshold of detection:
thres=0.05
#Expression
data <- read.delim("Expression.txt")
rownames(data)=data[,1]
data=data[,-1]
colnames(data)=gsub("DC", "TT06DC." , colnames(data))
rownames(data)=gsub("\\|T.*","",rownames(data) )
#Phenotype matrix:
pheno=read.table("phenotypage_all_fusa.csv" , header=T , sep=";" )
# Keep only the selected phenotypes:
pheno=pheno[ , c(1,c(col_to_use_min:col_to_use_max)[c(col_to_use_min:col_to_use_max) < ncol(pheno)] )]
#numeric
pheno[,-1]=apply(pheno[,-1],2,as.numeric)
# put geno name as rowname
rownames(pheno)=pheno$geno
pheno=pheno[,-1]
# delete columns with only NA
which(apply( pheno , 2 , function(x) all(is.na(x)) )==TRUE)
pheno=pheno[ , ! apply( pheno , 2 , function(x) all(is.na(x)) ) ]
# Library
library(DESeq2)
# A function that compute the DEgenes related to a phenotypic trait
get_DE_genes_from_pheno=function( trait ){
# TMP On ne prend que les n premières lignes de data
don=data
don<-head(data,n=100)
# sum_expe contains the trait of interest
sum_expe=data.frame(geno=rownames(pheno),trait=pheno[,trait] )
sum_expe=na.omit(sum_expe)
# in the expression matrix, I keep only individuals genotyped for the marker
don=don[ , which(colnames(don)%in%sum_expe[,1]) ]
# reorder sum_expe
sum_expe=sum_expe[match(colnames(don),sum_expe[,1] ), ]
rownames(sum_expe)=sum_expe[,1]
# Call DeSeq2
dds <- DESeqDataSetFromMatrix(don, sum_expe, formula( ~ trait) )
dds <- DESeq(dds, test = c("Wald") )
res <- results(dds)
return(res)
# close function
}
# Apply the function to all columns
bilan=data.frame(matrix(0,0,7))
colnames(bilan)=c("baseMean","log2FoldChange","lfcSE","stat","pvalue","padj","carac")
for(i in 1:ncol(pheno)){
print(colnames(pheno)[i])
print(i)
DE_genes=get_DE_genes_from_pheno(colnames(pheno)[i])
DE_genes$carac=colnames(pheno)[i]
res_sig=as.data.frame( DE_genes[ which(DE_genes$padj<thres) , ] )
bilan=rbind(bilan, res_sig)
}
bilan=data.frame(gene=rownames(bilan), bilan)
# Write the result
name=paste("resultat_DE_pheno_",col_to_use_min,"_to_",col_to_use_max, sep="")
write.table(bilan, file=name, quote=F, row.names=F, col.names=T)
|
/6_Expression/Find_related_genes_DESeq2.R
|
no_license
|
holtzy/Resistance-to-fusarium
|
R
| false | false | 2,288 |
r
|
#Script R
args <- commandArgs(trailingOnly = TRUE)
col_to_use_min=as.numeric(args[1])
col_to_use_max=as.numeric(args[2])
# Threshold of detection:
thres=0.05
#Expression
data <- read.delim("Expression.txt")
rownames(data)=data[,1]
data=data[,-1]
colnames(data)=gsub("DC", "TT06DC." , colnames(data))
rownames(data)=gsub("\\|T.*","",rownames(data) )
#Phenotype matrix:
pheno=read.table("phenotypage_all_fusa.csv" , header=T , sep=";" )
# Keep only the selected phenotypes:
pheno=pheno[ , c(1,c(col_to_use_min:col_to_use_max)[c(col_to_use_min:col_to_use_max) < ncol(pheno)] )]
#numeric
pheno[,-1]=apply(pheno[,-1],2,as.numeric)
# put geno name as rowname
rownames(pheno)=pheno$geno
pheno=pheno[,-1]
# delete columns with only NA
which(apply( pheno , 2 , function(x) all(is.na(x)) )==TRUE)
pheno=pheno[ , ! apply( pheno , 2 , function(x) all(is.na(x)) ) ]
# Library
library(DESeq2)
# A function that compute the DEgenes related to a phenotypic trait
get_DE_genes_from_pheno=function( trait ){
# TMP On ne prend que les n premières lignes de data
don=data
don<-head(data,n=100)
# sum_expe contains the trait of interest
sum_expe=data.frame(geno=rownames(pheno),trait=pheno[,trait] )
sum_expe=na.omit(sum_expe)
# in the expression matrix, I keep only individuals genotyped for the marker
don=don[ , which(colnames(don)%in%sum_expe[,1]) ]
# reorder sum_expe
sum_expe=sum_expe[match(colnames(don),sum_expe[,1] ), ]
rownames(sum_expe)=sum_expe[,1]
# Call DeSeq2
dds <- DESeqDataSetFromMatrix(don, sum_expe, formula( ~ trait) )
dds <- DESeq(dds, test = c("Wald") )
res <- results(dds)
return(res)
# close function
}
# Apply the function to all columns
bilan=data.frame(matrix(0,0,7))
colnames(bilan)=c("baseMean","log2FoldChange","lfcSE","stat","pvalue","padj","carac")
for(i in 1:ncol(pheno)){
print(colnames(pheno)[i])
print(i)
DE_genes=get_DE_genes_from_pheno(colnames(pheno)[i])
DE_genes$carac=colnames(pheno)[i]
res_sig=as.data.frame( DE_genes[ which(DE_genes$padj<thres) , ] )
bilan=rbind(bilan, res_sig)
}
bilan=data.frame(gene=rownames(bilan), bilan)
# Write the result
name=paste("resultat_DE_pheno_",col_to_use_min,"_to_",col_to_use_max, sep="")
write.table(bilan, file=name, quote=F, row.names=F, col.names=T)
|
# devtools::use_data(defaults, noiseThresholdsDict, BaNaRatios, notesDict, internal = TRUE, overwrite = TRUE)
#' Manual counts of syllables in 260 sounds
#'
#' A vector of the number of syllables in the corpus of 260 human non-linguistic emotional vocalizations from Anikin & Persson (2017). The corpus can be downloaded from http://cogsci.se/personal/results/01_anikin-persson_2016_naturalistics-non-linguistic-vocalizations/01_anikin-persson_2016_naturalistic-non-linguistic-vocalizations.html
"segmentManual"
#' Manual pitch estimation in 260 sounds
#'
#' A vector of manually verified pitch values per sound in the corpus of 590 human non-linguistic emotional vocalizations from Anikin & Persson (2017). The corpus can be downloaded from http://cogsci.se/personal/results/01_anikin-persson_2016_naturalistics-non-linguistic-vocalizations/01_anikin-persson_2016_naturalistic-non-linguistic-vocalizations.html
"pitchManual"
#' Conversion table from Hz to semitones above C0 to musical notation
#'
#' A dataframe of 132 rows and 2 columns: "note" and "freq" (Hz)
#'
#' @examples
#' # To recompile:
#' notes = c('C', 'C\U266F', 'D', 'D\U266F', 'E', 'F', 'F\U266F', 'G', 'G\U266F', 'A', 'B\U266D', 'B')
#' nOct = 11
#' notes_all = paste0(notes, rep(0:(nOct - 1), each = 12))
#' # 440 / 32 = 13.75 # A-1, and C0 is 3 semitones higher: 16.3516 Hz exactly.
#' c0 = 13.75 * 2 ^ (3 / 12)
#' notes_freq = round (c0 * 2^(0:(12 * nOct - 1) / 12), 1)
"notesDict"
|
/R/data.R
|
no_license
|
danstowell/soundgen
|
R
| false | false | 1,460 |
r
|
# devtools::use_data(defaults, noiseThresholdsDict, BaNaRatios, notesDict, internal = TRUE, overwrite = TRUE)
#' Manual counts of syllables in 260 sounds
#'
#' A vector of the number of syllables in the corpus of 260 human non-linguistic emotional vocalizations from Anikin & Persson (2017). The corpus can be downloaded from http://cogsci.se/personal/results/01_anikin-persson_2016_naturalistics-non-linguistic-vocalizations/01_anikin-persson_2016_naturalistic-non-linguistic-vocalizations.html
"segmentManual"
#' Manual pitch estimation in 260 sounds
#'
#' A vector of manually verified pitch values per sound in the corpus of 590 human non-linguistic emotional vocalizations from Anikin & Persson (2017). The corpus can be downloaded from http://cogsci.se/personal/results/01_anikin-persson_2016_naturalistics-non-linguistic-vocalizations/01_anikin-persson_2016_naturalistic-non-linguistic-vocalizations.html
"pitchManual"
#' Conversion table from Hz to semitones above C0 to musical notation
#'
#' A dataframe of 132 rows and 2 columns: "note" and "freq" (Hz)
#'
#' @examples
#' # To recompile:
#' notes = c('C', 'C\U266F', 'D', 'D\U266F', 'E', 'F', 'F\U266F', 'G', 'G\U266F', 'A', 'B\U266D', 'B')
#' nOct = 11
#' notes_all = paste0(notes, rep(0:(nOct - 1), each = 12))
#' # 440 / 32 = 13.75 # A-1, and C0 is 3 semitones higher: 16.3516 Hz exactly.
#' c0 = 13.75 * 2 ^ (3 / 12)
#' notes_freq = round (c0 * 2^(0:(12 * nOct - 1) / 12), 1)
"notesDict"
|
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(tidyverse)
### Load genofile
genofile <- seqOpen("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexOnlyB_filtsnps10bpindels_snps_filter_pass_lowGQmiss.seq.gds")
### Load superclone file
sc <- fread("Superclones201617182019pulexonlyD82016problematic_20200122")
### Add in pond and year info
temp <- unlist(strsplit(sc$clone, split="_"))
mat <- matrix(temp, ncol=4, byrow=TRUE)
matdat <- as.data.table(mat)
sc$population <- matdat$V3
sc$year <- matdat$V2
### Pull out D82018 individuals
D82018clones <- sc[population=="D8" & year=="2018"]
D82018clonesids <- D82018clones$clone
#Add in 2 Bs
Bs <- sc[clone=="May_2017_D8_515" | clone=="April_2017_D8_125"]
D82018clonesandBs <- rbind(D82018clones, Bs)
D82018clonesandBsids <- D82018clonesandBs$clone
seqSetFilter(genofile, sample.id=D82018clonesandBsids)
### Load filtered but not LD pruned snpset
load("snpsvarpulexpresentinhalf_20200121.Rdata")
seqSetFilter(genofile, variant.id=snpsvarpulexpresentinhalf)
### Pull out SNPs on scaffold Scaffold_7757_HRSCAF_8726 between 8660157 - 8710157, results in 1530 SNPs.
snpsvarPulex <- data.table(variant.ids = seqGetData(genofile, "variant.id"),
chr = seqGetData(genofile, "chromosome"),
pos = seqGetData(genofile, "position"),
dp = seqGetData(genofile, "annotation/info/DP"))
snpsvarPulex7757 <- snpsvarPulex[chr=="Scaffold_7757_HRSCAF_8726" & pos > 8660156 &
pos < 8710158]
snpsvarPulex7757ids <- snpsvarPulex7757$variant.ids
seqSetFilter(genofile, variant.id=snpsvarPulex7757ids)
### Pull out genotypes
het <- t(seqGetData(genofile, "$dosage"))
het <- as.data.table(het)
colnames(het) <- c(seqGetData(genofile, "sample.id"))
het$variant.ids <- seqGetData(genofile, "variant.id")
setkey(het, variant.ids)
setkey(snpsvarPulex7757, variant.ids)
mhetABfixed <- merge(snpsvarPulex7757, het)
mhetABfixedlong <- melt(mhetABfixed, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dosage")
#Remove NAs
mhetABfixedlong <- mhetABfixedlong[dosage!="NA"]
dosagecountsABfixed <- mhetABfixedlong[, .N, by=list(clone, dosage)]
#Transform to wide format
dosagecountsABfixedwide <- dcast(dosagecountsABfixed, clone ~ dosage, value.var="N")
colnames(dosagecountsABfixedwide) <- c("clone", "dos0", "dos1", "dos2")
dosagecountsABfixedwide[is.na(dos0),dos0:=0]
dosagecountsABfixedwide[is.na(dos1),dos1:=0]
dosagecountsABfixedwide[is.na(dos2),dos2:=0]
setkey(dosagecountsABfixedwide, clone)
setkey(D82018clonesandBs, clone)
mdosagecountsABfixedwide <- merge(D82018clonesandBs, dosagecountsABfixedwide)
mdosagecountsABfixedwide$total <- mdosagecountsABfixedwide$dos0+mdosagecountsABfixedwide$dos1+
mdosagecountsABfixedwide$dos2
mdosagecountsABfixedwide$prophet <- mdosagecountsABfixedwide$dos1/mdosagecountsABfixedwide$total
setkey(mdosagecountsABfixedwide, SC, prophet)
mdosagecountsABfixedwide$sex <- ifelse(mdosagecountsABfixedwide$clone=="April17_2018_D8_Male1" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male1" | mdosagecountsABfixedwide$clone=="March20_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male4" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male5", "male", ifelse(mdosagecountsABfixedwide$SC=="A", "A", ifelse(
mdosagecountsABfixedwide$SC=="B", "B", "female"
)))
ggplot(data=mdosagecountsABfixedwide, aes(x=prophet, fill=sex)) + geom_histogram()
dp <- t((seqGetData(genofile, "annotation/format/DP"))$data)
dp <- as.data.table(dp)
colnames(dp) <- c(seqGetData(genofile, "sample.id"))
dp$variant.ids <- seqGetData(genofile, "variant.id")
dplong <- melt(dp, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dp")
dplong.ag <- dplong[,list(medrd = median(dp, na.rm=TRUE)), list(clone) ]
setkey(mdosagecountsABfixedwide, clone)
setkey(dplong.ag, clone)
m <- merge(mdosagecountsABfixedwide, dplong.ag)
mhighRD <- m[medrd > 3]
setkey(mhighRD, sex, SC)
ggplot(data=mhighRD, aes(x=prophet, fill=sex)) + geom_histogram()
### So this didn't really work... Bs are more heterozygous than As overall in this region... Need to focus more on SNPs of interest.
### What if we pull out SNPs that are heterozygous in A but homozygous in B.
setkey(mhetABfixedlong, clone)
setkey(sc, clone)
m <- merge(sc, mhetABfixedlong)
dosagecountsABfixed <- m[, .N, by=list(SC, variant.ids, dosage)]
dosagecountsABfixedA<- dosagecountsABfixed[SC=="A"]
dosagecountsABfixedAsub <- data.table(variant.ids=dosagecountsABfixedA$variant.ids, dosage=dosagecountsABfixedA$dosage,
N=dosagecountsABfixedA$N)
dosagecountsABfixedAsubwide <- dcast(dosagecountsABfixedAsub, variant.ids ~ dosage, value.var="N")
colnames(dosagecountsABfixedAsubwide) <- c("variant.ids", "dos0A", "dos1A", "dos2A")
dosagecountsABfixedAsubwide[is.na(dos0A),dos0A:=0]
dosagecountsABfixedAsubwide[is.na(dos1A),dos1A:=0]
dosagecountsABfixedAsubwide[is.na(dos2A),dos2A:=0]
dosagecountsABfixedAsubwide$totalA <- dosagecountsABfixedAsubwide$dos0A + dosagecountsABfixedAsubwide$dos1A +
dosagecountsABfixedAsubwide$dos2A
dosagecountsABfixedB<- dosagecountsABfixed[SC=="B"]
dosagecountsABfixedBsub <- data.table(variant.ids=dosagecountsABfixedB$variant.ids, dosage=dosagecountsABfixedB$dosage,
N=dosagecountsABfixedB$N)
dosagecountsABfixedBsubwide <- dcast(dosagecountsABfixedBsub, variant.ids ~ dosage, value.var="N")
colnames(dosagecountsABfixedBsubwide) <- c("variant.ids", "dos1B", "dos2B")
dosagecountsABfixedBsubwide$dos0B <- c(0)
dosagecountsABfixedBsubwide[is.na(dos0B),dos0B:=0]
dosagecountsABfixedBsubwide[is.na(dos1B),dos1B:=0]
dosagecountsABfixedBsubwide[is.na(dos2B),dos2B:=0]
dosagecountsABfixedBsubwide$totalB <- dosagecountsABfixedBsubwide$dos0B + dosagecountsABfixedBsubwide$dos1B +
dosagecountsABfixedBsubwide$dos2B
setkey(dosagecountsABfixedAsubwide, variant.ids)
setkey(dosagecountsABfixedBsubwide, variant.ids)
mAB <- merge(dosagecountsABfixedAsubwide, dosagecountsABfixedBsubwide)
AhetBhom <- mAB[dos1A==totalA & dos2B==totalB]
AhetBhomids <- AhetBhom$variant.ids
seqSetFilter(genofile, variant.id=AhetBhomids)
### Pull out genotypes
het <- t(seqGetData(genofile, "$dosage"))
het <- as.data.table(het)
colnames(het) <- c(seqGetData(genofile, "sample.id"))
het$variant.ids <- seqGetData(genofile, "variant.id")
setkey(het, variant.ids)
setkey(snpsvarPulex7757, variant.ids)
mhetABfixed <- merge(snpsvarPulex7757, het)
mhetABfixedlong <- melt(mhetABfixed, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dosage")
#Remove NAs
mhetABfixedlong <- mhetABfixedlong[dosage!="NA"]
dosagecountsABfixed <- mhetABfixedlong[, .N, by=list(clone, dosage)]
#Transform to wide format
dosagecountsABfixedwide <- dcast(dosagecountsABfixed, clone ~ dosage, value.var="N")
colnames(dosagecountsABfixedwide) <- c("clone", "dos0", "dos1", "dos2")
dosagecountsABfixedwide[is.na(dos0),dos0:=0]
dosagecountsABfixedwide[is.na(dos1),dos1:=0]
dosagecountsABfixedwide[is.na(dos2),dos2:=0]
setkey(dosagecountsABfixedwide, clone)
setkey(D82018clonesandBs, clone)
mdosagecountsABfixedwide <- merge(D82018clonesandBs, dosagecountsABfixedwide)
mdosagecountsABfixedwide$total <- mdosagecountsABfixedwide$dos0+mdosagecountsABfixedwide$dos1+
mdosagecountsABfixedwide$dos2
mdosagecountsABfixedwide$prophet <- mdosagecountsABfixedwide$dos1/mdosagecountsABfixedwide$total
setkey(mdosagecountsABfixedwide, SC, prophet)
mdosagecountsABfixedwide$sex <- ifelse(mdosagecountsABfixedwide$clone=="April17_2018_D8_Male1" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male1" | mdosagecountsABfixedwide$clone=="March20_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male4" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male5", "male", ifelse(mdosagecountsABfixedwide$SC=="A", "A", ifelse(
mdosagecountsABfixedwide$SC=="B", "B", "female"
)))
ggplot(data=mdosagecountsABfixedwide, aes(x=prophet, fill=sex)) + geom_histogram()
|
/December2019/Checkmales7757region
|
no_license
|
kbkubow/DaphniaPulex20162017Sequencing
|
R
| false | false | 9,716 |
#!/usr/bin/env Rscript
### libraries
library(gdsfmt)
library(SNPRelate)
library(data.table)
library(ggplot2)
library(foreach)
library(lattice)
library(tidyr)
library(SeqArray)
library(tidyverse)
### Load genofile
genofile <- seqOpen("/scratch/kbb7sh/Daphnia/MappingDecember2019/MapDec19PulexOnlyB_filtsnps10bpindels_snps_filter_pass_lowGQmiss.seq.gds")
### Load superclone file
sc <- fread("Superclones201617182019pulexonlyD82016problematic_20200122")
### Add in pond and year info
temp <- unlist(strsplit(sc$clone, split="_"))
mat <- matrix(temp, ncol=4, byrow=TRUE)
matdat <- as.data.table(mat)
sc$population <- matdat$V3
sc$year <- matdat$V2
### Pull out D82018 individuals
D82018clones <- sc[population=="D8" & year=="2018"]
D82018clonesids <- D82018clones$clone
#Add in 2 Bs
Bs <- sc[clone=="May_2017_D8_515" | clone=="April_2017_D8_125"]
D82018clonesandBs <- rbind(D82018clones, Bs)
D82018clonesandBsids <- D82018clonesandBs$clone
seqSetFilter(genofile, sample.id=D82018clonesandBsids)
### Load filtered but not LD pruned snpset
load("snpsvarpulexpresentinhalf_20200121.Rdata")
seqSetFilter(genofile, variant.id=snpsvarpulexpresentinhalf)
### Pull out SNPs on scaffold Scaffold_7757_HRSCAF_8726 between 8660157 - 8710157, results in 1530 SNPs.
snpsvarPulex <- data.table(variant.ids = seqGetData(genofile, "variant.id"),
chr = seqGetData(genofile, "chromosome"),
pos = seqGetData(genofile, "position"),
dp = seqGetData(genofile, "annotation/info/DP"))
snpsvarPulex7757 <- snpsvarPulex[chr=="Scaffold_7757_HRSCAF_8726" & pos > 8660156 &
pos < 8710158]
snpsvarPulex7757ids <- snpsvarPulex7757$variant.ids
seqSetFilter(genofile, variant.id=snpsvarPulex7757ids)
### Pull out genotypes
het <- t(seqGetData(genofile, "$dosage"))
het <- as.data.table(het)
colnames(het) <- c(seqGetData(genofile, "sample.id"))
het$variant.ids <- seqGetData(genofile, "variant.id")
setkey(het, variant.ids)
setkey(snpsvarPulex7757, variant.ids)
mhetABfixed <- merge(snpsvarPulex7757, het)
mhetABfixedlong <- melt(mhetABfixed, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dosage")
#Remove NAs
mhetABfixedlong <- mhetABfixedlong[dosage!="NA"]
dosagecountsABfixed <- mhetABfixedlong[, .N, by=list(clone, dosage)]
#Transform to wide format
dosagecountsABfixedwide <- dcast(dosagecountsABfixed, clone ~ dosage, value.var="N")
colnames(dosagecountsABfixedwide) <- c("clone", "dos0", "dos1", "dos2")
dosagecountsABfixedwide[is.na(dos0),dos0:=0]
dosagecountsABfixedwide[is.na(dos1),dos1:=0]
dosagecountsABfixedwide[is.na(dos2),dos2:=0]
setkey(dosagecountsABfixedwide, clone)
setkey(D82018clonesandBs, clone)
mdosagecountsABfixedwide <- merge(D82018clonesandBs, dosagecountsABfixedwide)
mdosagecountsABfixedwide$total <- mdosagecountsABfixedwide$dos0+mdosagecountsABfixedwide$dos1+
mdosagecountsABfixedwide$dos2
mdosagecountsABfixedwide$prophet <- mdosagecountsABfixedwide$dos1/mdosagecountsABfixedwide$total
setkey(mdosagecountsABfixedwide, SC, prophet)
mdosagecountsABfixedwide$sex <- ifelse(mdosagecountsABfixedwide$clone=="April17_2018_D8_Male1" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male1" | mdosagecountsABfixedwide$clone=="March20_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male4" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male5", "male", ifelse(mdosagecountsABfixedwide$SC=="A", "A", ifelse(
mdosagecountsABfixedwide$SC=="B", "B", "female"
)))
ggplot(data=mdosagecountsABfixedwide, aes(x=prophet, fill=sex)) + geom_histogram()
dp <- t((seqGetData(genofile, "annotation/format/DP"))$data)
dp <- as.data.table(dp)
colnames(dp) <- c(seqGetData(genofile, "sample.id"))
dp$variant.ids <- seqGetData(genofile, "variant.id")
dplong <- melt(dp, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dp")
dplong.ag <- dplong[,list(medrd = median(dp, na.rm=TRUE)), list(clone) ]
setkey(mdosagecountsABfixedwide, clone)
setkey(dplong.ag, clone)
m <- merge(mdosagecountsABfixedwide, dplong.ag)
mhighRD <- m[medrd > 3]
setkey(mhighRD, sex, SC)
ggplot(data=mhighRD, aes(x=prophet, fill=sex)) + geom_histogram()
### So this didn't really work... Bs are more heterozygous than As overall in this region... Need to focus more on SNPs of interest.
### What if we pull out SNPs that are heterozygous in A but homozygous in B.
setkey(mhetABfixedlong, clone)
setkey(sc, clone)
m <- merge(sc, mhetABfixedlong)
dosagecountsABfixed <- m[, .N, by=list(SC, variant.ids, dosage)]
dosagecountsABfixedA<- dosagecountsABfixed[SC=="A"]
dosagecountsABfixedAsub <- data.table(variant.ids=dosagecountsABfixedA$variant.ids, dosage=dosagecountsABfixedA$dosage,
N=dosagecountsABfixedA$N)
dosagecountsABfixedAsubwide <- dcast(dosagecountsABfixedAsub, variant.ids ~ dosage, value.var="N")
colnames(dosagecountsABfixedAsubwide) <- c("variant.ids", "dos0A", "dos1A", "dos2A")
dosagecountsABfixedAsubwide[is.na(dos0A),dos0A:=0]
dosagecountsABfixedAsubwide[is.na(dos1A),dos1A:=0]
dosagecountsABfixedAsubwide[is.na(dos2A),dos2A:=0]
dosagecountsABfixedAsubwide$totalA <- dosagecountsABfixedAsubwide$dos0A + dosagecountsABfixedAsubwide$dos1A +
dosagecountsABfixedAsubwide$dos2A
dosagecountsABfixedB<- dosagecountsABfixed[SC=="B"]
dosagecountsABfixedBsub <- data.table(variant.ids=dosagecountsABfixedB$variant.ids, dosage=dosagecountsABfixedB$dosage,
N=dosagecountsABfixedB$N)
dosagecountsABfixedBsubwide <- dcast(dosagecountsABfixedBsub, variant.ids ~ dosage, value.var="N")
colnames(dosagecountsABfixedBsubwide) <- c("variant.ids", "dos1B", "dos2B")
dosagecountsABfixedBsubwide$dos0B <- c(0)
dosagecountsABfixedBsubwide[is.na(dos0B),dos0B:=0]
dosagecountsABfixedBsubwide[is.na(dos1B),dos1B:=0]
dosagecountsABfixedBsubwide[is.na(dos2B),dos2B:=0]
dosagecountsABfixedBsubwide$totalB <- dosagecountsABfixedBsubwide$dos0B + dosagecountsABfixedBsubwide$dos1B +
dosagecountsABfixedBsubwide$dos2B
setkey(dosagecountsABfixedAsubwide, variant.ids)
setkey(dosagecountsABfixedBsubwide, variant.ids)
mAB <- merge(dosagecountsABfixedAsubwide, dosagecountsABfixedBsubwide)
AhetBhom <- mAB[dos1A==totalA & dos2B==totalB]
AhetBhomids <- AhetBhom$variant.ids
seqSetFilter(genofile, variant.id=AhetBhomids)
### Pull out genotypes
het <- t(seqGetData(genofile, "$dosage"))
het <- as.data.table(het)
colnames(het) <- c(seqGetData(genofile, "sample.id"))
het$variant.ids <- seqGetData(genofile, "variant.id")
setkey(het, variant.ids)
setkey(snpsvarPulex7757, variant.ids)
mhetABfixed <- merge(snpsvarPulex7757, het)
mhetABfixedlong <- melt(mhetABfixed, measure.vars=D82018clonesandBsids, variable.name="clone", value.name="dosage")
#Remove NAs
mhetABfixedlong <- mhetABfixedlong[dosage!="NA"]
dosagecountsABfixed <- mhetABfixedlong[, .N, by=list(clone, dosage)]
#Transform to wide format
dosagecountsABfixedwide <- dcast(dosagecountsABfixed, clone ~ dosage, value.var="N")
colnames(dosagecountsABfixedwide) <- c("clone", "dos0", "dos1", "dos2")
dosagecountsABfixedwide[is.na(dos0),dos0:=0]
dosagecountsABfixedwide[is.na(dos1),dos1:=0]
dosagecountsABfixedwide[is.na(dos2),dos2:=0]
setkey(dosagecountsABfixedwide, clone)
setkey(D82018clonesandBs, clone)
mdosagecountsABfixedwide <- merge(D82018clonesandBs, dosagecountsABfixedwide)
mdosagecountsABfixedwide$total <- mdosagecountsABfixedwide$dos0+mdosagecountsABfixedwide$dos1+
mdosagecountsABfixedwide$dos2
mdosagecountsABfixedwide$prophet <- mdosagecountsABfixedwide$dos1/mdosagecountsABfixedwide$total
setkey(mdosagecountsABfixedwide, SC, prophet)
mdosagecountsABfixedwide$sex <- ifelse(mdosagecountsABfixedwide$clone=="April17_2018_D8_Male1" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male1" | mdosagecountsABfixedwide$clone=="March20_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="March20_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male2" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male3" | mdosagecountsABfixedwide$clone=="April17_2018_D8_Male4" |
mdosagecountsABfixedwide$clone=="April17_2018_D8_Male5", "male", ifelse(mdosagecountsABfixedwide$SC=="A", "A", ifelse(
mdosagecountsABfixedwide$SC=="B", "B", "female"
)))
ggplot(data=mdosagecountsABfixedwide, aes(x=prophet, fill=sex)) + geom_histogram()
|
|
library(embed)
library(dplyr)
library(testthat)
library(modeldata)
|
/tests/testthat/test_helpers.R
|
no_license
|
konradsemsch/embed-1
|
R
| false | false | 67 |
r
|
library(embed)
library(dplyr)
library(testthat)
library(modeldata)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MetaAnalysisForFamiliesOfExperimentsSR.R
\name{transformZrtoHgapprox}
\alias{transformZrtoHgapprox}
\title{transformZrtoHgapprox}
\usage{
transformZrtoHgapprox(Zr)
}
\arguments{
\item{Zr}{A vector of normalised point bi-serial values}
}
\value{
approx. value of Hedges' g
}
\description{
This function provides an approximate transformation from Zr to Hedges g when the number of observations in the treatment and control group are unknown. It is also used to allow the forest plots to display Hedge's g when they are based on r. It is necessary because the transformation function in the forest plot function does not allow any parameters other than effect size used. The function assumes that Nc=Nt and gives the same results as transformZrtoHg when Nc=Nt.
}
\examples{
transformZrtoHgapprox(c(0.4, 0.2))
# [1] 0.8215047 0.4026720
}
\author{
Barbara Kitchenham and Lech Madeyski
}
|
/man/transformZrtoHgapprox.Rd
|
no_license
|
cran/reproducer
|
R
| false | true | 961 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MetaAnalysisForFamiliesOfExperimentsSR.R
\name{transformZrtoHgapprox}
\alias{transformZrtoHgapprox}
\title{transformZrtoHgapprox}
\usage{
transformZrtoHgapprox(Zr)
}
\arguments{
\item{Zr}{A vector of normalised point bi-serial values}
}
\value{
approx. value of Hedges' g
}
\description{
This function provides an approximate transformation from Zr to Hedges g when the number of observations in the treatment and control group are unknown. It is also used to allow the forest plots to display Hedge's g when they are based on r. It is necessary because the transformation function in the forest plot function does not allow any parameters other than effect size used. The function assumes that Nc=Nt and gives the same results as transformZrtoHg when Nc=Nt.
}
\examples{
transformZrtoHgapprox(c(0.4, 0.2))
# [1] 0.8215047 0.4026720
}
\author{
Barbara Kitchenham and Lech Madeyski
}
|
## Code is for ProgrammingAssignment2 of the Coursera R programming course.
## makeCacheMatrix: This function creates a special "matrix" object that can
## cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list(
set = set,
get = get,
setSolve = setSolve,
getSolve = getSolve
)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then cacheSolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setSolve(m)
m
}
|
/cachematrix.R
|
no_license
|
shortd/ProgrammingAssignment2
|
R
| false | false | 1,108 |
r
|
## Code is for ProgrammingAssignment2 of the Coursera R programming course.
## makeCacheMatrix: This function creates a special "matrix" object that can
## cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list(
set = set,
get = get,
setSolve = setSolve,
getSolve = getSolve
)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then cacheSolve should retrieve
## the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setSolve(m)
m
}
|
source("~/r-workspace/dec-functions.r")
heights20<-read.table(mfn22(pvalues[9],TRUE),header=T)
heights5<-read.table(mfn22(pvalues[3],TRUE),header=T)
prc20<-pca(heights20)
prc5<-pca(heights5)
opt20s<-"PC1+1:PC1-1:PC4+1:PC4-1:PC2+1:PC2-1"
opt5s<-"PC1+1:PC1-1:PC3+1:PC3-1:PC5+1:PC5-1:PC7+1:PC7-1"
opt20<-strsplit(opt20s,":")[[1]]
opt5<-strsplit(ot5s,":")[[1]]
reg20<-qPeakP(prc20$eigenVectors,opt20s)
reg5<-qPeakP(prc5$eigenVectors,opt5s)
HSC5<-addColnames(cbind(reg5[,"PC3+1"]®5[,"PC7+1"],
reg5[,"PC3-1"]®5[,"PC7-1"])|(reg5[,"PC3-1"]®5[,"PC7+1"])|(reg5[,"PC3+1"]®5[,"PC7-1"]),c("HSC","NotHSC"))
Other5<-addColnames(cbind(reg5[,"PC3+1"]®5[,"PC5+1"],
reg5[,"PC3-1"]®5[,"PC5-1"])|(reg5[,"PC3-1"]®5[,"PC5+1"])|(reg5[,"PC3+1"]®5[,"PC5-1"]),c("Other","NotOther"))
bed20<-ifZeroShift(read.table("~/thesis-november/22x22-pvalue=20.matrix",header=T)[,1:3])
bed5<-ifZeroShift(read.table("~/thesis-november/22x22-pvalue=5.matrix",header=T)[,1:3])
#fasta20<-getSeq(BSgenome.Hsapiens.UCSC.hg19,bed20$chro,start=bed20$start+150,width=300)
#fasta5<-getSeq(BSgenome.Hsapiens.UCSC.hg19,bed5$chro,start=bed5$start+150,width=300)
#fasta5<-read
motifFiles<-c(motifFileNames(opt20,pairSwitch(opt20),rep(20,6),rep(8,6)),
motifFileNames(opt20,pairSwitch(opt20),rep(20,6),rep(6,6)),
motifFileNames(opt5,pairSwitch(opt5),rep(5,8),rep(8,8)),
motifFileNames(opt5,pairSwitch(opt5),rep(5,8),rep(6,8)),
motifFileNames(c("HSC","Other"),c("NotHSC","NotOther"),rep(5,2),rep(6,2)),
motifFileNames(c("HSC","Other"),c("NotHSC","NotOther"),rep(5,2),rep(8,2))
)
|
/variables/dec-variables.r
|
no_license
|
alexjgriffith/r-workspace
|
R
| false | false | 1,608 |
r
|
source("~/r-workspace/dec-functions.r")
heights20<-read.table(mfn22(pvalues[9],TRUE),header=T)
heights5<-read.table(mfn22(pvalues[3],TRUE),header=T)
prc20<-pca(heights20)
prc5<-pca(heights5)
opt20s<-"PC1+1:PC1-1:PC4+1:PC4-1:PC2+1:PC2-1"
opt5s<-"PC1+1:PC1-1:PC3+1:PC3-1:PC5+1:PC5-1:PC7+1:PC7-1"
opt20<-strsplit(opt20s,":")[[1]]
opt5<-strsplit(ot5s,":")[[1]]
reg20<-qPeakP(prc20$eigenVectors,opt20s)
reg5<-qPeakP(prc5$eigenVectors,opt5s)
HSC5<-addColnames(cbind(reg5[,"PC3+1"]®5[,"PC7+1"],
reg5[,"PC3-1"]®5[,"PC7-1"])|(reg5[,"PC3-1"]®5[,"PC7+1"])|(reg5[,"PC3+1"]®5[,"PC7-1"]),c("HSC","NotHSC"))
Other5<-addColnames(cbind(reg5[,"PC3+1"]®5[,"PC5+1"],
reg5[,"PC3-1"]®5[,"PC5-1"])|(reg5[,"PC3-1"]®5[,"PC5+1"])|(reg5[,"PC3+1"]®5[,"PC5-1"]),c("Other","NotOther"))
bed20<-ifZeroShift(read.table("~/thesis-november/22x22-pvalue=20.matrix",header=T)[,1:3])
bed5<-ifZeroShift(read.table("~/thesis-november/22x22-pvalue=5.matrix",header=T)[,1:3])
#fasta20<-getSeq(BSgenome.Hsapiens.UCSC.hg19,bed20$chro,start=bed20$start+150,width=300)
#fasta5<-getSeq(BSgenome.Hsapiens.UCSC.hg19,bed5$chro,start=bed5$start+150,width=300)
#fasta5<-read
motifFiles<-c(motifFileNames(opt20,pairSwitch(opt20),rep(20,6),rep(8,6)),
motifFileNames(opt20,pairSwitch(opt20),rep(20,6),rep(6,6)),
motifFileNames(opt5,pairSwitch(opt5),rep(5,8),rep(8,8)),
motifFileNames(opt5,pairSwitch(opt5),rep(5,8),rep(6,8)),
motifFileNames(c("HSC","Other"),c("NotHSC","NotOther"),rep(5,2),rep(6,2)),
motifFileNames(c("HSC","Other"),c("NotHSC","NotOther"),rep(5,2),rep(8,2))
)
|
testlist <- list(A = structure(c(2.32784507011897e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613107463-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 344 |
r
|
testlist <- list(A = structure(c(2.32784507011897e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
library(R2Cuba)
library(MASS)
n <- 100
p <- 1
ht <- n^(-1/3)
hx <- rep(ht, p)
m = 10
theta <- c(0.5, 0.5, 0.5, 2, 2, 2, -0.5, -0.6, -0.7)
q <- length(theta) - 3
mA <- matrix(NA, m, m)
mb <- matrix(NA, q, m)
vg <- seq(0.5, 1.5, length.out = m)
vq <- dunif(vg, 0.5, 1.5)
n <- 100
cn <- 50
p <- 1
ij <- as.matrix(expand.grid(1 : m, 1 : m))
hzd1 <- function(kappa1, alpha1, beta1, t, x, g){
exp((trans1(t, alpha1)-t(beta1) %*% x-log(g))/kappa1^2) * 1/t * 1/kappa1^2
}
hzd2 <- function(kappa2, alpha2, beta2, t, x, g){
exp((trans2(t, alpha2)-t(beta2) %*% x-log(g))/kappa2^2) * 1/t * 1/kappa2^2
}
hzd3 <- function(kappa3, alpha3, beta3, t, x, g){
exp((trans3(t, alpha3)-t(beta3) %*% x-log(g))/kappa3^2) * 1/t * 1/kappa3^2
}
surv1 <- function(kappa1, alpha1, beta1, t, x, g){
exp(- exp( (trans1(t, alpha1) - log(g) - t(beta1) %*% x) / kappa1 ^2))
}
surv2 <- function(kappa2, alpha2, beta2, t, x, g){
exp(- exp( (trans2(t, alpha2) - log(g) - t(beta2) %*% x) / kappa2 ^2))
}
surv3 <- function(kappa3, alpha3, beta3, t, x, g){
exp(- exp( (trans3(t, alpha3) - log(g) - t(beta3) %*% x) / kappa3 ^2))
}
trans1 <- function(t, alpha1= 1){
log(t)
}
trans2 <- function(t, alpha2= 1){
log(t)
}
trans3 <- function(t, alpha3=1){
log(t)
}
lkhd.exp <- expression((( ((log(y1)-b1x -log(g))/kappa1^2) + log( 1/y1) + log( 1/kappa1^2)) * d1 + ( ((log(y1)-b2x-log(g))/kappa2^2) + log(1/y1) +log( 1/kappa2^2)) * (1-d1) + (((log(y2)-b3x-log(g))/kappa3^2) + log( 1/y2) + log( 1/kappa3^2)) * d1 + (- exp( (log(y1) - log(g) - b1x) / kappa1^2)) + (- exp( (log(y1) - log(g) - b2x) / kappa2^2)) + ((- exp( (log(y2) - log(g) - b3x) / kappa3^2)) -(- exp( (log(y1) - log(g) - b3x) / kappa3^2))) * d1 - ((- exp( (log(v) - log(g) -b1x) / kappa1^2)) + (- exp( (log(v) - log(g) - b2x) / kappa2^2))) + log(1 / (1 - vt11)) + log(1/ (1 - vt12)) ))
lkhd.exp <- expression((( ((log(y1)-b1x -log(g))/kappa1^2) + log( 1/y1) + log( 1/kappa1^2)) * d1 + ( ((log(y1)-b2x-log(g))/kappa2^2) + log(1/y1) +log( 1/kappa2^2)) * (1-d1) + (((log(y2)-b3x-log(g))/kappa3^2) + log( 1/y2) + log( 1/kappa3^2)) * d1 + (- exp( (log(y1) - log(g) - b1x) / kappa1^2)) + (- exp( (log(y1) - log(g) - b2x) / kappa2^2)) + ((- exp( (log(y2) - log(g) - b3x) / kappa3^2)) -(- exp( (log(y1) - log(g) - b3x) / kappa3^2))) * d1 + log(1 / (1 - vt11)) + log(1/ (1 - vt12)) ))
#eval(deriv(lkhd.exp, c("b1x", "b2x", "b3x", "alpha1", "alpha2", "alpha3")))
dlike <- deriv(lkhd.exp, c("b1x", "b2x", "b3x", "kappa1", "kappa2", "kappa3"))
score <- function(vt, theta, x, g, v= 1e-5){
vt1 <- vt
vt11 <- vt1[, 1]
vt12 <- vt1[, 2]
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
b1x <- t(beta1)%*%x
b2x <- t(beta2)%*%x
b3x <- t(beta3)%*%x
d1 <- as.numeric(vt[, 1] < vt[, 2])
y1 <- pmin(vt[, 1], vt[, 2])
y2 <- vt[, 2]
derivlike <- attributes(eval(dlike))$gradient
derivlike[is.nan(derivlike)] <- 0
#browser()
score <- cbind( derivlike[, 4: ncol(derivlike)], derivlike[, 1] %*% diag(x, p, p), derivlike[, 2] %*% diag(x, p, p), derivlike[, 3] %*% diag(x, p, p))
}
singlescore <- function(vt, theta, x, g, v= 1e-5){
vt1 <- vt
vt11 <- vt1[1]
vt12 <- vt1[2]
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
b1x <- t(beta1)%*%x
b2x <- t(beta2)%*%x
b3x <- t(beta3)%*%x
d1 <- as.numeric(vt[1] < vt[2])
y1 <- pmin(vt[1], vt[2])
y2 <- vt[2]
derivlike <- attributes(eval(dlike))$gradient
derivlike[is.nan(derivlike)] <- 0
#browser()
score <- c(derivlike[4: length(derivlike)], derivlike[1] * (x), derivlike[2] * x, derivlike[3] * x )
}
likelihood <- function(vt, x, g, theta, v=1e-5){
vt1 <- vt
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
t1 <- vt[, 1]
t2 <- vt[, 2]
d1 <- as.numeric(t1 < t2)
y2 <- t2
y1 <- pmin(t1, t2)
# likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / surv3(kappa3, alpha3, beta3, y1, x, g) )^(d1)/(surv1(kappa1, alpha1, beta1, v, x, g) *surv2(kappa2, alpha2, beta2, v, x, g)) * apply(1 / (1 - vt1), 1, prod)
likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / (surv3(kappa3, alpha3, beta3, y1, x, g) + 1e-200) )^(d1) * apply(1 / (1 - vt1), 1, prod)
# if(sum(is.nan(likelihood)) > 0)
# browser()
likelihood[is.nan(likelihood)] <- 0
return(likelihood)
}
singlelikelihood <- function(vt, x, g, theta, v=1e-5){
vt1 <- vt
vt <- -log(1 - vt)
kappa1 <- theta[1]
kappa2 <- theta[2]
kappa3 <- theta[3]
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
t1 <- vt[1]
t2 <- vt[2]
d1 <- as.numeric(t1 < t2)
y2 <- t2
y1 <- pmin(t1, t2)
#likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / surv3(kappa3, alpha3, beta3, y1, x, g)+ 1e-200 )^(d1)/(surv1(kappa1, alpha1, beta1, v, x, g) *surv2(kappa2, alpha2, beta2, v, x, g)) * prod(1 / (1 - vt1))
likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / (surv3(kappa3, alpha3, beta3, y1, x, g)+ 1e-200) )^(d1) * prod(1 / (1 - vt1))
likelihood[is.nan(likelihood)] <- 0
return(likelihood)
}
Amatx <- function(ij, vg, vq, theta, x, v = 0){
#print("A")
i <- ij[1]
j <- ij[2]
dm <- function(k, vg, vt, x, theta, v){
likelihood(vt, x, vg[k], theta, v)* vq[k]
}
A <- function(vt){
likelihood(vt, x, vg[j], theta, v)* vq[j] * likelihood(vt, x, vg[i], theta, v)/ (apply((sapply(1 : m, dm, vg, vt, x, theta, v)), 1, sum) + 1e-200)
}
#Aij <- mean(A(vtg))#mean(apply(vt, 1, A), na.rm = T)
#mA[i, j] <<- Aij
Aij <- my2d(A, mgrid, 1)
mA[i, j] <<- Aij
return(NULL)
}
bmatx <- function(i, vg, vq, theta, x, v=1e-5){
# print("b")
num <- function(k, vg, vt, x, theta, v= v){
score( vt, theta, x, vg[k], v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v= v){
likelihood(vt, x, vg[k], theta, v)* vq[k]
}
b <- function(vt){
Reduce('+', lapply(1 :m, num, vg, vt, x, theta, v))/(matrix(rep(apply(sapply(1 : m, dm, vg, vt, x, theta, v), 1, sum), q), ncol = q) + 1e-200) * matrix(rep(likelihood(vt, x, vg[i], theta, v), q), ncol = q)
}
#bi <- apply(b(vtg), 2, mean)
bi <- my2d(b, mgrid, q)#vegas (2, length(theta) -3, b, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
if(sum(is.nan(bi)) > 0){
browser()
}
bi[is.nan(bi)] <- 0
mb[, i] <<- bi
return(NULL)
}
singleAmatx <- function(ij, vg, vq, theta, x, v = 0){
#print("A")
i <- ij[1]
j <- ij[2]
dm <- function(k, vg, vt, x, theta, v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
A <- function(vt){
singlelikelihood(vt, x, vg[j], theta, v)* vq[j] * singlelikelihood(vt, x, vg[i], theta, v)/ sum(sapply(1 : m, dm, vg, vt, x, theta, v))
}
#Aij <- area* mean(A(vtg))#mean(apply(vt, 1, A), na.rm = T)
#mA[i, j] <<- Aij
Aij <- vegas (2, 1, A, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
mA[i, j] <<- Aij
return(NULL)
}
singlebmatx <- function(i, vg, vq, theta, x, v=1e-5){
# print("b")
num <- function(k, vg, vt, x, theta, v= v){
singlescore( vt, theta, x, vg[k], v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v= v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
b <- function(vt){
Reduce('+', lapply(1 :m, num, vg, vt, x, theta, v))/sum(sapply(1 : m, dm, vg, vt, x, theta, v)) * singlelikelihood(vt, x, vg[i], theta, v)
}
#bi <- area * apply(b(vtg), 2, mean)
bi <- vegas (2, length(theta) -3, b, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
mb[, i] <<- bi
return(NULL)
}
projscore <- function(vg, vq, theta, vt, x, a, v= 0){
num <- function(k, vg, vt, x, theta, v){
(singlescore(vt, theta, x, vg[k], v) - a[, k]) * singlelikelihood(vt, x, vg[k], theta, v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
apply(sapply(1 :m, num, vg, vt, x, theta, v), 1, sum)/sum(sapply(1 : m, dm, vg, vt, x, theta, v))
}
creata <- function(i, theta, cmptresp, p, mx, cmptv){
apply(ij, 1, Amatx, vg, vq, theta, mx[i, ], v = cmptv[i])
lapply(1 :m, bmatx, vg, vq, theta, mx[i,], v= cmptv[i])
invA <- try(ginv(mA))
if(class(invA) == "try-error"){
browser()
}
a <- t(invA %*% t(mb))
}
completescore <- function(i, theta, cmptresp, cn, p, ma, cmptcovm, cmptv){
## apply(ij, 1, Amatx, vg, vq, theta, cmptcovm[i, ], v = cmptv[i])
## lapply(1 :m, bmatx, vg, vq, theta, cmptcovm[i,], v= cmptv[i])
## invA <- try(ginv(mA))
## if(class(invA) == "try-error"){
## browser()
## }
## a <- t(invA %*% t(mb))
a <- ma[[which(apply(mx, 1, identical, cmptcovm[i, ]))]]
pjscore <- projscore(vg, vq, theta, cmptresp[i,c("y1", "y2")], cmptcovm[i, ], a, v = cmptv[i])
if(is.nan(sum(pjscore))){
browser()
}
pjscore
}
missingscore <- function(i, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv ){
if(missresp[i, "d1"] == 1 & missresp[i, "d2"] == 0){
cn <- missresp[i, "y2"]
y1 <- missresp[i, "y1"]
x <- misscovm[i, ]
ix <- cmptresp[, "y2"] >= cn & cmptresp[, "y1"] < cn
if(sum(ix) > 0)
missscore <- sum(cmptscore[ix,, drop = F ]* kert(y1, cmptresp[ix, "y1"], ht) * kerx(x, cmptcovm[ix, ], hx)) / (sum( kert(y1, cmptresp[ix, "y1"], ht) * kerx(x, cmptcovm[ix, ], hx)) + 0.0001)
else
missscore <- rep(0, q)
}else if(missresp[i, "d1"] == 0 & missresp[i, "d2"] == 0) {
cn <- missresp[i, "y2"]
y1 <- missresp[i, "y1"]
x <- misscovm[i, ]
ix <- cmptresp[, "y1"] >= cn
if(sum(ix) > 0)
missscore <- apply(cmptscore[ix, , drop = F]* kerx(x, cmptcovm[ix, ], ht), 2, sum) / sum( kerx(x, cmptcovm[ix, ], hx) + 0.0001)
else
missscore <- rep(0, q)
}
return(missscore)
}
estm1 <- function(theta, resp, covm, n, p, mv = rep(1e-5, n)){
colnames(resp) <- c("y1", "d1", "y2", "d2")
cmptix <- resp[, "d2"] == 1
covm <- matrix(covm, n, p)
cn <- sum(cmptix)
missix <- resp[, "d2"] == 0
mn <- sum(missix)
cmptresp <- resp[cmptix, ]
cmptcovm <- covm[cmptix, , drop = F]
cmptv <- mv[cmptix]
missresp <- resp[missix, ]
misscovm <- covm[missix, , drop = F]
missv <- mv[missix]
cmptscore <- do.call(rbind, lapply(1 : cn,completescore, theta, cmptresp, cn, p, cmptcovm, cmptv))
#browser()
missscore <- do.call(rbind, lapply(1 : mn, missingscore, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv))
#browser()
score <- sum((apply(rbind(cmptscore, missscore), 2, sum) )^2)
}
estm <- function(theta, resp, covm, n, p, mv = rep(1e-5, n)){
print(theta)
colnames(resp) <- c("y1", "d1", "y2", "d2")
cmptix <- resp[, "d2"] == 1
covm <- matrix(covm, n, p)
cn <- sum(cmptix)
missix <- resp[, "d2"] == 0
mn <- sum(missix)
cmptresp <- resp[cmptix, ]
cmptcovm <- covm[cmptix, , drop = F]
cmptv <- mv[cmptix]
missresp <- resp[missix, ]
misscovm <- covm[missix, , drop = F]
missv <- mv[missix]
ma <- lapply(1 : length(mx), creata, theta, cmptresp, p, mx, cmptv)
cmptscore <- do.call(rbind, lapply(1 : cn,completescore, theta, cmptresp, cn, p, ma, cmptcovm, cmptv))
#browser()
if(mn > 0){
missscore <- do.call(rbind, lapply(1 : mn, missingscore, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv))
#browser()
score <- apply(rbind(cmptscore, missscore), 2, sum)
}else{
score <- apply((cmptscore), 2, sum)
}
score <- c(score[1:3], 0, 0, 0, score[4:length(score)]) /n
}
simuRsk <- function(i, n, p, theta, cen1, cen2 ,covm = NULL){
if(is.null(covm)){
covm <- matrix(rbinom(1, 1, 0.5), 1, p)
}
kappa1 <- theta[1] ^ 2
kappa2 <- theta[2] ^ 2
kappa3 <- theta[3] ^ 2
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
x <- covm
g <- runif(1, 0.5, 1.5)
lb1 <- exp((- t(beta1)%*%x - log(g))/kappa1)
lb2 <- exp((- t(beta2)%*%x - log(g))/kappa2)
lb3 <- exp((- t(beta3)%*%x - log(g))/kappa3)
a1 <- 1/kappa1
a2 <- 1/kappa2
a3 <- 1/kappa3
p1g2 <- lb2 /(lb1 + lb2)
r <- rbinom(1, 1, p1g2)
c <- runif(1, cen1, cen2)
if(r == 1){
u <- runif(1)
t2 <- (- log(1- u) / (lb1 + lb2))^(1/a2)
t1 = t2 + 3
}else{
u <- runif(1)
t1 <- (- log(1- u) / (lb1 + lb2))^(1/a1)
u <- runif(1)
t2 <- (-log((1 - u) * exp(-lb3 * t1 ^ a3)) / lb3) ^ (1/a3)
}
y2 = min(t2, c)
y1 = min(t1, y2)
d1 <- as.numeric(t1 < y2)
d2 <- as.numeric(y2 < c)
simdata <- cbind(y1, d1, y2, d2)
colnames(simdata) <- c("y1", "d1", "y2", "d2")
return(c(simdata, covm, g))
}
kert <- function(t1, vt2, h){
dnorm((t1 - vt2)/h)
}
kerx <- function(x1, vx2, h){
vx2 <- matrix(vx2, ncol = p)
x1 <- matrix(rep(x1, nrow(vx2)), ncol = p, byrow = T)
h <- matrix(rep(h, nrow(vx2)), ncol = p, byrow = T)
apply(dnorm((vx2 - x1)/h), 1, prod)
}
#a <- solve(mA) %*% mb
set.seed(2013)
survData <- do.call(rbind, lapply(1:n, simuRsk, n, p, theta, 10000, 300000))
resp <- cbind(1 - exp(-survData[, 1]), survData[, 2], 1 - exp(-survData[, 3]), survData[, 4])
colnames(resp) <- c("y1", "d1", "y2", "d2")
covm <- survData[, 5]
estm2 <- function(...){
estm(...)^2
}
rt <- c(max(resp[, 1]), max(resp[, 3]))
area <- prod(rt)
set.seed(2014)
vta <- runif(100000, 0, 1)
vtb <- runif(100000, 0, 1)
vtg <- cbind(vta, vtb)
findint <- function(vv){
set.seed(2014)
va <- vv[1]
vb <- vv[2]
#vc <- vv[3]
#vd <- vv[4]
vt <- cbind(runif(10000, va, 1), runif(10000, vb, 1))
res <- try(sum(abs(apply(score(vt, theta, covm[1], vg[1]) / (dunif(vt[,1], va, 1) * dunif(vt[, 2], vb, 1)), 2, mean) - d$integral)))
if(is.nan(res)){
browser()
}
return(res)
}
ng <- 32
cx <- gaussLegendre(ng, 0.01, 0.99)
x <- cx$x
wx <- cx$w
cy <- gaussLegendre(ng, 0.01, 0.99)
y <- cy$x
wy <- cy$w
mgrid <- meshgrid(x, y)
my2d <- function (f, mgrid, nf, ...)
{
fun <- match.fun(f)
f <- function(vt) fun(vt, ...)
mZ <- as.matrix(f(cbind(as.vector(mgrid$X), as.vector(mgrid$Y))), ncol = nf)
temp <- function(i){
Z <- matrix(mZ[, i], ng, ng)
Q <- c( wx %*% Z %*% as.matrix(wy))
}
Q <- sapply(1 : nf, temp)
return(Q)
}
#dfsane(c(rep(0.5, 6), rep(-0.5, 3)), estm, method = 2, control = list(tol = 1.e-5, noimp = 100 ), quiet = FALSE, resp, covm, n, p, rep(min(resp[, 1] /2), n))
mx <- matrix(c(0, 1), ncol = p)
#multiroot(estm, c(rep(1, 6), rep(-0.5, 3)), maxiter = 100, rtol = 1e-6, atol = 1e-8, ctol = 1e-8,useFortran = TRUE, positive = FALSE,jacfunc = NULL, jactype = "fullint", verbose = FALSE, bandup = 1, banddown = 1,resp, covm, n, p)
theta<- c(0.08241974, -0.06403001, 0.21495395, 0.50000000, 0.50000000, 0.50000000, -0.44144138, -0.50645970, -0.85097759)
Z<- rnorm(1000, 0, 1)
p <- rbinom(1000, 1, pnorm(Z, 0, 1))
g <- rgamma(1000, 1, 1/0.5) * p + (2 + rgamma(1000, 1, 1/6)) * (1 - p)
plot(density(g), main = "")
|
/accleft/accleft.r
|
no_license
|
homebovine/harvard
|
R
| false | false | 17,232 |
r
|
library(R2Cuba)
library(MASS)
n <- 100
p <- 1
ht <- n^(-1/3)
hx <- rep(ht, p)
m = 10
theta <- c(0.5, 0.5, 0.5, 2, 2, 2, -0.5, -0.6, -0.7)
q <- length(theta) - 3
mA <- matrix(NA, m, m)
mb <- matrix(NA, q, m)
vg <- seq(0.5, 1.5, length.out = m)
vq <- dunif(vg, 0.5, 1.5)
n <- 100
cn <- 50
p <- 1
ij <- as.matrix(expand.grid(1 : m, 1 : m))
hzd1 <- function(kappa1, alpha1, beta1, t, x, g){
exp((trans1(t, alpha1)-t(beta1) %*% x-log(g))/kappa1^2) * 1/t * 1/kappa1^2
}
hzd2 <- function(kappa2, alpha2, beta2, t, x, g){
exp((trans2(t, alpha2)-t(beta2) %*% x-log(g))/kappa2^2) * 1/t * 1/kappa2^2
}
hzd3 <- function(kappa3, alpha3, beta3, t, x, g){
exp((trans3(t, alpha3)-t(beta3) %*% x-log(g))/kappa3^2) * 1/t * 1/kappa3^2
}
surv1 <- function(kappa1, alpha1, beta1, t, x, g){
exp(- exp( (trans1(t, alpha1) - log(g) - t(beta1) %*% x) / kappa1 ^2))
}
surv2 <- function(kappa2, alpha2, beta2, t, x, g){
exp(- exp( (trans2(t, alpha2) - log(g) - t(beta2) %*% x) / kappa2 ^2))
}
surv3 <- function(kappa3, alpha3, beta3, t, x, g){
exp(- exp( (trans3(t, alpha3) - log(g) - t(beta3) %*% x) / kappa3 ^2))
}
trans1 <- function(t, alpha1= 1){
log(t)
}
trans2 <- function(t, alpha2= 1){
log(t)
}
trans3 <- function(t, alpha3=1){
log(t)
}
lkhd.exp <- expression((( ((log(y1)-b1x -log(g))/kappa1^2) + log( 1/y1) + log( 1/kappa1^2)) * d1 + ( ((log(y1)-b2x-log(g))/kappa2^2) + log(1/y1) +log( 1/kappa2^2)) * (1-d1) + (((log(y2)-b3x-log(g))/kappa3^2) + log( 1/y2) + log( 1/kappa3^2)) * d1 + (- exp( (log(y1) - log(g) - b1x) / kappa1^2)) + (- exp( (log(y1) - log(g) - b2x) / kappa2^2)) + ((- exp( (log(y2) - log(g) - b3x) / kappa3^2)) -(- exp( (log(y1) - log(g) - b3x) / kappa3^2))) * d1 - ((- exp( (log(v) - log(g) -b1x) / kappa1^2)) + (- exp( (log(v) - log(g) - b2x) / kappa2^2))) + log(1 / (1 - vt11)) + log(1/ (1 - vt12)) ))
lkhd.exp <- expression((( ((log(y1)-b1x -log(g))/kappa1^2) + log( 1/y1) + log( 1/kappa1^2)) * d1 + ( ((log(y1)-b2x-log(g))/kappa2^2) + log(1/y1) +log( 1/kappa2^2)) * (1-d1) + (((log(y2)-b3x-log(g))/kappa3^2) + log( 1/y2) + log( 1/kappa3^2)) * d1 + (- exp( (log(y1) - log(g) - b1x) / kappa1^2)) + (- exp( (log(y1) - log(g) - b2x) / kappa2^2)) + ((- exp( (log(y2) - log(g) - b3x) / kappa3^2)) -(- exp( (log(y1) - log(g) - b3x) / kappa3^2))) * d1 + log(1 / (1 - vt11)) + log(1/ (1 - vt12)) ))
#eval(deriv(lkhd.exp, c("b1x", "b2x", "b3x", "alpha1", "alpha2", "alpha3")))
dlike <- deriv(lkhd.exp, c("b1x", "b2x", "b3x", "kappa1", "kappa2", "kappa3"))
score <- function(vt, theta, x, g, v= 1e-5){
vt1 <- vt
vt11 <- vt1[, 1]
vt12 <- vt1[, 2]
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
b1x <- t(beta1)%*%x
b2x <- t(beta2)%*%x
b3x <- t(beta3)%*%x
d1 <- as.numeric(vt[, 1] < vt[, 2])
y1 <- pmin(vt[, 1], vt[, 2])
y2 <- vt[, 2]
derivlike <- attributes(eval(dlike))$gradient
derivlike[is.nan(derivlike)] <- 0
#browser()
score <- cbind( derivlike[, 4: ncol(derivlike)], derivlike[, 1] %*% diag(x, p, p), derivlike[, 2] %*% diag(x, p, p), derivlike[, 3] %*% diag(x, p, p))
}
singlescore <- function(vt, theta, x, g, v= 1e-5){
vt1 <- vt
vt11 <- vt1[1]
vt12 <- vt1[2]
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
b1x <- t(beta1)%*%x
b2x <- t(beta2)%*%x
b3x <- t(beta3)%*%x
d1 <- as.numeric(vt[1] < vt[2])
y1 <- pmin(vt[1], vt[2])
y2 <- vt[2]
derivlike <- attributes(eval(dlike))$gradient
derivlike[is.nan(derivlike)] <- 0
#browser()
score <- c(derivlike[4: length(derivlike)], derivlike[1] * (x), derivlike[2] * x, derivlike[3] * x )
}
likelihood <- function(vt, x, g, theta, v=1e-5){
vt1 <- vt
vt <- -log(1 - vt)
kappa1 <- abs(theta[1])
kappa2 <- abs(theta[2])
kappa3 <- abs(theta[3])
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
t1 <- vt[, 1]
t2 <- vt[, 2]
d1 <- as.numeric(t1 < t2)
y2 <- t2
y1 <- pmin(t1, t2)
# likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / surv3(kappa3, alpha3, beta3, y1, x, g) )^(d1)/(surv1(kappa1, alpha1, beta1, v, x, g) *surv2(kappa2, alpha2, beta2, v, x, g)) * apply(1 / (1 - vt1), 1, prod)
likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / (surv3(kappa3, alpha3, beta3, y1, x, g) + 1e-200) )^(d1) * apply(1 / (1 - vt1), 1, prod)
# if(sum(is.nan(likelihood)) > 0)
# browser()
likelihood[is.nan(likelihood)] <- 0
return(likelihood)
}
singlelikelihood <- function(vt, x, g, theta, v=1e-5){
vt1 <- vt
vt <- -log(1 - vt)
kappa1 <- theta[1]
kappa2 <- theta[2]
kappa3 <- theta[3]
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
t1 <- vt[1]
t2 <- vt[2]
d1 <- as.numeric(t1 < t2)
y2 <- t2
y1 <- pmin(t1, t2)
#likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / surv3(kappa3, alpha3, beta3, y1, x, g)+ 1e-200 )^(d1)/(surv1(kappa1, alpha1, beta1, v, x, g) *surv2(kappa2, alpha2, beta2, v, x, g)) * prod(1 / (1 - vt1))
likelihood <- hzd1(kappa1, alpha1, beta1, y1, x, g)^(d1) * hzd2(kappa2, alpha2, beta2, y1, x, g)^(1-d1) * hzd3(kappa3, alpha3, beta3, y2, x, g)^d1 * surv1(kappa1, alpha1, beta1, y1, x, g) * surv2(kappa2, alpha2, beta2, y1, x, g) * (surv3(kappa3, alpha3, beta3, y2, x, g) / (surv3(kappa3, alpha3, beta3, y1, x, g)+ 1e-200) )^(d1) * prod(1 / (1 - vt1))
likelihood[is.nan(likelihood)] <- 0
return(likelihood)
}
Amatx <- function(ij, vg, vq, theta, x, v = 0){
#print("A")
i <- ij[1]
j <- ij[2]
dm <- function(k, vg, vt, x, theta, v){
likelihood(vt, x, vg[k], theta, v)* vq[k]
}
A <- function(vt){
likelihood(vt, x, vg[j], theta, v)* vq[j] * likelihood(vt, x, vg[i], theta, v)/ (apply((sapply(1 : m, dm, vg, vt, x, theta, v)), 1, sum) + 1e-200)
}
#Aij <- mean(A(vtg))#mean(apply(vt, 1, A), na.rm = T)
#mA[i, j] <<- Aij
Aij <- my2d(A, mgrid, 1)
mA[i, j] <<- Aij
return(NULL)
}
bmatx <- function(i, vg, vq, theta, x, v=1e-5){
# print("b")
num <- function(k, vg, vt, x, theta, v= v){
score( vt, theta, x, vg[k], v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v= v){
likelihood(vt, x, vg[k], theta, v)* vq[k]
}
b <- function(vt){
Reduce('+', lapply(1 :m, num, vg, vt, x, theta, v))/(matrix(rep(apply(sapply(1 : m, dm, vg, vt, x, theta, v), 1, sum), q), ncol = q) + 1e-200) * matrix(rep(likelihood(vt, x, vg[i], theta, v), q), ncol = q)
}
#bi <- apply(b(vtg), 2, mean)
bi <- my2d(b, mgrid, q)#vegas (2, length(theta) -3, b, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
if(sum(is.nan(bi)) > 0){
browser()
}
bi[is.nan(bi)] <- 0
mb[, i] <<- bi
return(NULL)
}
singleAmatx <- function(ij, vg, vq, theta, x, v = 0){
#print("A")
i <- ij[1]
j <- ij[2]
dm <- function(k, vg, vt, x, theta, v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
A <- function(vt){
singlelikelihood(vt, x, vg[j], theta, v)* vq[j] * singlelikelihood(vt, x, vg[i], theta, v)/ sum(sapply(1 : m, dm, vg, vt, x, theta, v))
}
#Aij <- area* mean(A(vtg))#mean(apply(vt, 1, A), na.rm = T)
#mA[i, j] <<- Aij
Aij <- vegas (2, 1, A, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
mA[i, j] <<- Aij
return(NULL)
}
singlebmatx <- function(i, vg, vq, theta, x, v=1e-5){
# print("b")
num <- function(k, vg, vt, x, theta, v= v){
singlescore( vt, theta, x, vg[k], v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v= v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
b <- function(vt){
Reduce('+', lapply(1 :m, num, vg, vt, x, theta, v))/sum(sapply(1 : m, dm, vg, vt, x, theta, v)) * singlelikelihood(vt, x, vg[i], theta, v)
}
#bi <- area * apply(b(vtg), 2, mean)
bi <- vegas (2, length(theta) -3, b, lower = c(0.01, 0.01), upper = c(0.99, 0.99), abs.tol = 0.01)$value
mb[, i] <<- bi
return(NULL)
}
projscore <- function(vg, vq, theta, vt, x, a, v= 0){
num <- function(k, vg, vt, x, theta, v){
(singlescore(vt, theta, x, vg[k], v) - a[, k]) * singlelikelihood(vt, x, vg[k], theta, v) * vq[k]
}
dm <- function(k, vg, vt, x, theta, v){
singlelikelihood(vt, x, vg[k], theta, v)* vq[k]
}
apply(sapply(1 :m, num, vg, vt, x, theta, v), 1, sum)/sum(sapply(1 : m, dm, vg, vt, x, theta, v))
}
creata <- function(i, theta, cmptresp, p, mx, cmptv){
apply(ij, 1, Amatx, vg, vq, theta, mx[i, ], v = cmptv[i])
lapply(1 :m, bmatx, vg, vq, theta, mx[i,], v= cmptv[i])
invA <- try(ginv(mA))
if(class(invA) == "try-error"){
browser()
}
a <- t(invA %*% t(mb))
}
completescore <- function(i, theta, cmptresp, cn, p, ma, cmptcovm, cmptv){
## apply(ij, 1, Amatx, vg, vq, theta, cmptcovm[i, ], v = cmptv[i])
## lapply(1 :m, bmatx, vg, vq, theta, cmptcovm[i,], v= cmptv[i])
## invA <- try(ginv(mA))
## if(class(invA) == "try-error"){
## browser()
## }
## a <- t(invA %*% t(mb))
a <- ma[[which(apply(mx, 1, identical, cmptcovm[i, ]))]]
pjscore <- projscore(vg, vq, theta, cmptresp[i,c("y1", "y2")], cmptcovm[i, ], a, v = cmptv[i])
if(is.nan(sum(pjscore))){
browser()
}
pjscore
}
missingscore <- function(i, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv ){
if(missresp[i, "d1"] == 1 & missresp[i, "d2"] == 0){
cn <- missresp[i, "y2"]
y1 <- missresp[i, "y1"]
x <- misscovm[i, ]
ix <- cmptresp[, "y2"] >= cn & cmptresp[, "y1"] < cn
if(sum(ix) > 0)
missscore <- sum(cmptscore[ix,, drop = F ]* kert(y1, cmptresp[ix, "y1"], ht) * kerx(x, cmptcovm[ix, ], hx)) / (sum( kert(y1, cmptresp[ix, "y1"], ht) * kerx(x, cmptcovm[ix, ], hx)) + 0.0001)
else
missscore <- rep(0, q)
}else if(missresp[i, "d1"] == 0 & missresp[i, "d2"] == 0) {
cn <- missresp[i, "y2"]
y1 <- missresp[i, "y1"]
x <- misscovm[i, ]
ix <- cmptresp[, "y1"] >= cn
if(sum(ix) > 0)
missscore <- apply(cmptscore[ix, , drop = F]* kerx(x, cmptcovm[ix, ], ht), 2, sum) / sum( kerx(x, cmptcovm[ix, ], hx) + 0.0001)
else
missscore <- rep(0, q)
}
return(missscore)
}
estm1 <- function(theta, resp, covm, n, p, mv = rep(1e-5, n)){
colnames(resp) <- c("y1", "d1", "y2", "d2")
cmptix <- resp[, "d2"] == 1
covm <- matrix(covm, n, p)
cn <- sum(cmptix)
missix <- resp[, "d2"] == 0
mn <- sum(missix)
cmptresp <- resp[cmptix, ]
cmptcovm <- covm[cmptix, , drop = F]
cmptv <- mv[cmptix]
missresp <- resp[missix, ]
misscovm <- covm[missix, , drop = F]
missv <- mv[missix]
cmptscore <- do.call(rbind, lapply(1 : cn,completescore, theta, cmptresp, cn, p, cmptcovm, cmptv))
#browser()
missscore <- do.call(rbind, lapply(1 : mn, missingscore, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv))
#browser()
score <- sum((apply(rbind(cmptscore, missscore), 2, sum) )^2)
}
estm <- function(theta, resp, covm, n, p, mv = rep(1e-5, n)){
print(theta)
colnames(resp) <- c("y1", "d1", "y2", "d2")
cmptix <- resp[, "d2"] == 1
covm <- matrix(covm, n, p)
cn <- sum(cmptix)
missix <- resp[, "d2"] == 0
mn <- sum(missix)
cmptresp <- resp[cmptix, ]
cmptcovm <- covm[cmptix, , drop = F]
cmptv <- mv[cmptix]
missresp <- resp[missix, ]
misscovm <- covm[missix, , drop = F]
missv <- mv[missix]
ma <- lapply(1 : length(mx), creata, theta, cmptresp, p, mx, cmptv)
cmptscore <- do.call(rbind, lapply(1 : cn,completescore, theta, cmptresp, cn, p, ma, cmptcovm, cmptv))
#browser()
if(mn > 0){
missscore <- do.call(rbind, lapply(1 : mn, missingscore, theta, missresp, cmptresp, mn, cn, p, misscovm, cmptcovm, cmptscore, missv))
#browser()
score <- apply(rbind(cmptscore, missscore), 2, sum)
}else{
score <- apply((cmptscore), 2, sum)
}
score <- c(score[1:3], 0, 0, 0, score[4:length(score)]) /n
}
simuRsk <- function(i, n, p, theta, cen1, cen2 ,covm = NULL){
if(is.null(covm)){
covm <- matrix(rbinom(1, 1, 0.5), 1, p)
}
kappa1 <- theta[1] ^ 2
kappa2 <- theta[2] ^ 2
kappa3 <- theta[3] ^ 2
alpha1 <- theta[4]
alpha2 <- theta[5]
alpha3 <- theta[6]
beta1 <- theta[7 : (6 + p)]
beta2 <- theta[(7 + p) : (6 + 2 * p)]
beta3 <- theta[(7 + 2* p) : (6 + 3 * p)]
x <- covm
g <- runif(1, 0.5, 1.5)
lb1 <- exp((- t(beta1)%*%x - log(g))/kappa1)
lb2 <- exp((- t(beta2)%*%x - log(g))/kappa2)
lb3 <- exp((- t(beta3)%*%x - log(g))/kappa3)
a1 <- 1/kappa1
a2 <- 1/kappa2
a3 <- 1/kappa3
p1g2 <- lb2 /(lb1 + lb2)
r <- rbinom(1, 1, p1g2)
c <- runif(1, cen1, cen2)
if(r == 1){
u <- runif(1)
t2 <- (- log(1- u) / (lb1 + lb2))^(1/a2)
t1 = t2 + 3
}else{
u <- runif(1)
t1 <- (- log(1- u) / (lb1 + lb2))^(1/a1)
u <- runif(1)
t2 <- (-log((1 - u) * exp(-lb3 * t1 ^ a3)) / lb3) ^ (1/a3)
}
y2 = min(t2, c)
y1 = min(t1, y2)
d1 <- as.numeric(t1 < y2)
d2 <- as.numeric(y2 < c)
simdata <- cbind(y1, d1, y2, d2)
colnames(simdata) <- c("y1", "d1", "y2", "d2")
return(c(simdata, covm, g))
}
kert <- function(t1, vt2, h){
dnorm((t1 - vt2)/h)
}
kerx <- function(x1, vx2, h){
vx2 <- matrix(vx2, ncol = p)
x1 <- matrix(rep(x1, nrow(vx2)), ncol = p, byrow = T)
h <- matrix(rep(h, nrow(vx2)), ncol = p, byrow = T)
apply(dnorm((vx2 - x1)/h), 1, prod)
}
#a <- solve(mA) %*% mb
set.seed(2013)
survData <- do.call(rbind, lapply(1:n, simuRsk, n, p, theta, 10000, 300000))
resp <- cbind(1 - exp(-survData[, 1]), survData[, 2], 1 - exp(-survData[, 3]), survData[, 4])
colnames(resp) <- c("y1", "d1", "y2", "d2")
covm <- survData[, 5]
estm2 <- function(...){
estm(...)^2
}
rt <- c(max(resp[, 1]), max(resp[, 3]))
area <- prod(rt)
set.seed(2014)
vta <- runif(100000, 0, 1)
vtb <- runif(100000, 0, 1)
vtg <- cbind(vta, vtb)
findint <- function(vv){
set.seed(2014)
va <- vv[1]
vb <- vv[2]
#vc <- vv[3]
#vd <- vv[4]
vt <- cbind(runif(10000, va, 1), runif(10000, vb, 1))
res <- try(sum(abs(apply(score(vt, theta, covm[1], vg[1]) / (dunif(vt[,1], va, 1) * dunif(vt[, 2], vb, 1)), 2, mean) - d$integral)))
if(is.nan(res)){
browser()
}
return(res)
}
ng <- 32
cx <- gaussLegendre(ng, 0.01, 0.99)
x <- cx$x
wx <- cx$w
cy <- gaussLegendre(ng, 0.01, 0.99)
y <- cy$x
wy <- cy$w
mgrid <- meshgrid(x, y)
my2d <- function (f, mgrid, nf, ...)
{
fun <- match.fun(f)
f <- function(vt) fun(vt, ...)
mZ <- as.matrix(f(cbind(as.vector(mgrid$X), as.vector(mgrid$Y))), ncol = nf)
temp <- function(i){
Z <- matrix(mZ[, i], ng, ng)
Q <- c( wx %*% Z %*% as.matrix(wy))
}
Q <- sapply(1 : nf, temp)
return(Q)
}
#dfsane(c(rep(0.5, 6), rep(-0.5, 3)), estm, method = 2, control = list(tol = 1.e-5, noimp = 100 ), quiet = FALSE, resp, covm, n, p, rep(min(resp[, 1] /2), n))
mx <- matrix(c(0, 1), ncol = p)
#multiroot(estm, c(rep(1, 6), rep(-0.5, 3)), maxiter = 100, rtol = 1e-6, atol = 1e-8, ctol = 1e-8,useFortran = TRUE, positive = FALSE,jacfunc = NULL, jactype = "fullint", verbose = FALSE, bandup = 1, banddown = 1,resp, covm, n, p)
theta<- c(0.08241974, -0.06403001, 0.21495395, 0.50000000, 0.50000000, 0.50000000, -0.44144138, -0.50645970, -0.85097759)
Z<- rnorm(1000, 0, 1)
p <- rbinom(1000, 1, pnorm(Z, 0, 1))
g <- rgamma(1000, 1, 1/0.5) * p + (2 + rgamma(1000, 1, 1/6)) * (1 - p)
plot(density(g), main = "")
|
#!/usr/bin/env Rscript
#test how to use anova in windows:
suppressPackageStartupMessages(library("argparse"))
suppressPackageStartupMessages(library("reshape2"))
suppressPackageStartupMessages(library("zoo"))
suppressPackageStartupMessages(library("ggplot2"))
# create parser object
parser <- ArgumentParser()
# specify our desired options
parser$add_argument("-e", "--rseed", type="integer", default=0,
help="random number generator seed (default=0).")
parser$add_argument("-r", "--reps", type="integer", default=20,
help="Number of replicate control individuals (default=20).")
parser$add_argument("-R", "--sperm_reps", type="integer", default=20,
help="Number of replicate sperm samples (default=20).")
parser$add_argument("-g", "--gensize", type="integer", default=2000,
help="Number of heterozygous SNPs in the genome (default=2000).")
parser$add_argument("-t", "--treatsize", type="integer", default=1000,
help="Number of heterozygous SNPs in the distorted region (default=1000).")
parser$add_argument("-c", "--chroms", type="integer", default=4,
help="Number of chromosomes per genome(default=4).")
parser$add_argument("-b", "--bps_per_hetsnp", type="integer", default=2000,
help="Basepairs per heterozygous SNP (default=2000).")
parser$add_argument("-d", "--distortion_frac", type="double", default=0.1,
help="Degree of distortion as a fraction of allele frequency(default=0.1).")
parser$add_argument("-a", "--average_coverage", type="double", default=1.75,
help="Average genome coverage (default=1.75).")
parser$add_argument("-O", "--simulation_data_out", default="out_sim.txt",
help="Path to simulation data output file (default=out_sim.txt).")
parser$add_argument("-p", "--pdf_out", default="out.pdf",
help="Path to pdf output file (default=out.pdf).")
parser$add_argument("-m", "--pdf_title", default="2Mb sliding window ANOVA (simulated)",
help="Title of plot.")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
rseed = args$rseed
gensize = args$gensize
treatsize = args$treatsize
winsize = args$winsize
winstep = args$winstep
bps_per_hetsnp = args$bps_per_hetsnp
distortion_frac = args$distortion_frac
txt_sim_out = args$simulation_data_out
pdf_out = args$pdf_out
pdf_title = args$pdf_title
reps = args$reps
sperm_reps = args$sperm_reps
nchroms = args$chroms
avgcov = args$average_coverage
# set random seed
set.seed(seed = rseed)
# generate mean for each locus
means <- rnorm(gensize)
# generate a set of identical sperm samples
b <- t(sapply(means, function(x){rep(x, reps)}))
# generate a set of identical blood samples
a <- t(sapply(means, function(x){rep(x, reps)}))
colnames(a) = 1:ncol(a)
colnames(b) = 1:ncol(b)
# melt sperm and blood samples, name them
a=melt(a)
a$tissue = rep("sperm", nrow(a))
b=melt(b)
b$tissue = rep("blood", nrow(b))
# combine blood and sperm samples
new2_ab = as.data.frame(rbind(a,b))
# specify chromosomes for all samples
new2_ab$chrom = rep(
rep(
seq(1,nchroms),
each=gensize / nchroms
),
nrow(new2_ab) / gensize
)
# generate per-chromosome gc bias values, add to data
gcs = rnorm(n=nchroms)
new2_ab$gc = sapply(new2_ab$chrom, function(x){gcs[x]})
# make sure chroms are factors
new2_ab$chrom = factor(new2_ab$chrom)
# assign a unique sample number to each sample
new2_ab$sample = rep(seq(1,nrow(new2_ab) / gensize), each = gensize)
# name pos and indiv columns correctly
colnames(new2_ab)[1] = "pos"
colnames(new2_ab)[2] = "indiv"
# generate and apply sample biases
biases = 0.5 + rnorm(n=length(levels(factor(new2_ab$sample))), sd=0.1)
new2_ab$bias = sapply(new2_ab$sample, function(x){biases[x]})
# generate coverage counts at each locus, with bias based on sample
new2_ab$count = rpois(nrow(new2_ab), (rep(avgcov, nrow(new2_ab)) + new2_ab$bias))
# generate allele counts based on binomial draws from coverage
new2_ab$hits = rbinom(nrow(new2_ab), new2_ab$count, new2_ab$bias)
# make sure 1 region of the genome is selected, and give it a bias toward 1 allele
# this region of the genome should be found in only 1 chromosome of 1 individual
selectedstart = (gensize-treatsize) + 1
selectedend = gensize
selectedrange = selectedstart:selectedend
new2_ab$hits[selectedrange] = rbinom((selectedend - selectedstart) + 1, new2_ab$count[selectedrange], new2_ab$bias[selectedrange] + (distortion_frac * sample(c(1,-1),selectedend-selectedstart,replace=TRUE))
write.table(new2_ab, txt_sim_out)
|
/Distortion_2019/simulation/full_binomdat_sim_unphased.R
|
no_license
|
jgbaldwinbrown/jgbutils
|
R
| false | false | 4,572 |
r
|
#!/usr/bin/env Rscript
#test how to use anova in windows:
suppressPackageStartupMessages(library("argparse"))
suppressPackageStartupMessages(library("reshape2"))
suppressPackageStartupMessages(library("zoo"))
suppressPackageStartupMessages(library("ggplot2"))
# create parser object
parser <- ArgumentParser()
# specify our desired options
parser$add_argument("-e", "--rseed", type="integer", default=0,
help="random number generator seed (default=0).")
parser$add_argument("-r", "--reps", type="integer", default=20,
help="Number of replicate control individuals (default=20).")
parser$add_argument("-R", "--sperm_reps", type="integer", default=20,
help="Number of replicate sperm samples (default=20).")
parser$add_argument("-g", "--gensize", type="integer", default=2000,
help="Number of heterozygous SNPs in the genome (default=2000).")
parser$add_argument("-t", "--treatsize", type="integer", default=1000,
help="Number of heterozygous SNPs in the distorted region (default=1000).")
parser$add_argument("-c", "--chroms", type="integer", default=4,
help="Number of chromosomes per genome(default=4).")
parser$add_argument("-b", "--bps_per_hetsnp", type="integer", default=2000,
help="Basepairs per heterozygous SNP (default=2000).")
parser$add_argument("-d", "--distortion_frac", type="double", default=0.1,
help="Degree of distortion as a fraction of allele frequency(default=0.1).")
parser$add_argument("-a", "--average_coverage", type="double", default=1.75,
help="Average genome coverage (default=1.75).")
parser$add_argument("-O", "--simulation_data_out", default="out_sim.txt",
help="Path to simulation data output file (default=out_sim.txt).")
parser$add_argument("-p", "--pdf_out", default="out.pdf",
help="Path to pdf output file (default=out.pdf).")
parser$add_argument("-m", "--pdf_title", default="2Mb sliding window ANOVA (simulated)",
help="Title of plot.")
# get command line options, if help option encountered print help and exit,
# otherwise if options not found on command line then set defaults,
args <- parser$parse_args()
rseed = args$rseed
gensize = args$gensize
treatsize = args$treatsize
winsize = args$winsize
winstep = args$winstep
bps_per_hetsnp = args$bps_per_hetsnp
distortion_frac = args$distortion_frac
txt_sim_out = args$simulation_data_out
pdf_out = args$pdf_out
pdf_title = args$pdf_title
reps = args$reps
sperm_reps = args$sperm_reps
nchroms = args$chroms
avgcov = args$average_coverage
# set random seed
set.seed(seed = rseed)
# generate mean for each locus
means <- rnorm(gensize)
# generate a set of identical sperm samples
b <- t(sapply(means, function(x){rep(x, reps)}))
# generate a set of identical blood samples
a <- t(sapply(means, function(x){rep(x, reps)}))
colnames(a) = 1:ncol(a)
colnames(b) = 1:ncol(b)
# melt sperm and blood samples, name them
a=melt(a)
a$tissue = rep("sperm", nrow(a))
b=melt(b)
b$tissue = rep("blood", nrow(b))
# combine blood and sperm samples
new2_ab = as.data.frame(rbind(a,b))
# specify chromosomes for all samples
new2_ab$chrom = rep(
rep(
seq(1,nchroms),
each=gensize / nchroms
),
nrow(new2_ab) / gensize
)
# generate per-chromosome gc bias values, add to data
gcs = rnorm(n=nchroms)
new2_ab$gc = sapply(new2_ab$chrom, function(x){gcs[x]})
# make sure chroms are factors
new2_ab$chrom = factor(new2_ab$chrom)
# assign a unique sample number to each sample
new2_ab$sample = rep(seq(1,nrow(new2_ab) / gensize), each = gensize)
# name pos and indiv columns correctly
colnames(new2_ab)[1] = "pos"
colnames(new2_ab)[2] = "indiv"
# generate and apply sample biases
biases = 0.5 + rnorm(n=length(levels(factor(new2_ab$sample))), sd=0.1)
new2_ab$bias = sapply(new2_ab$sample, function(x){biases[x]})
# generate coverage counts at each locus, with bias based on sample
new2_ab$count = rpois(nrow(new2_ab), (rep(avgcov, nrow(new2_ab)) + new2_ab$bias))
# generate allele counts based on binomial draws from coverage
new2_ab$hits = rbinom(nrow(new2_ab), new2_ab$count, new2_ab$bias)
# make sure 1 region of the genome is selected, and give it a bias toward 1 allele
# this region of the genome should be found in only 1 chromosome of 1 individual
selectedstart = (gensize-treatsize) + 1
selectedend = gensize
selectedrange = selectedstart:selectedend
new2_ab$hits[selectedrange] = rbinom((selectedend - selectedstart) + 1, new2_ab$count[selectedrange], new2_ab$bias[selectedrange] + (distortion_frac * sample(c(1,-1),selectedend-selectedstart,replace=TRUE))
write.table(new2_ab, txt_sim_out)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Datasets}
\alias{Datasets}
\alias{data_all}
\title{data_all}
\format{
An object of class \code{list} of length 22.
}
\usage{
data(data_all)
}
\description{
See wichita
}
\details{
See description.
}
\keyword{datasets}
|
/man/Datasets.Rd
|
no_license
|
FranzKrah/ClimInd
|
R
| false | true | 324 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Datasets}
\alias{Datasets}
\alias{data_all}
\title{data_all}
\format{
An object of class \code{list} of length 22.
}
\usage{
data(data_all)
}
\description{
See wichita
}
\details{
See description.
}
\keyword{datasets}
|
## directory to download reports
dl.dir <- file.path(getwd(), paste0("downloads.", format(Sys.time(), "%Y%m%d.%H%M%S")))
dir.create(dl.dir)
## connect to server and navigate to page
require(RSelenium)
rd <- rsDriver(browser="chrome", extraCapabilities=list(chromeOptions=list(prefs=list("download.default_directory"=dl.dir))))
cl <- rd$client
cl$navigate("https://dashboards.lordashcroftpolls.com/login.aspx?target=1rH46DzH68RfFl7AknVWbbl4nsH0s%2f%2fj5uXrUWFycQ4%3d")
## dropdown for selecting constituencies
dropdown <- cl$findElement(using="css", "button.ui-multiselect")
dd.elems <- cl$findElements(using="css", "ul.ui-multiselect-checkboxes > li > label")
update.dd <- cl$findElement(using="css", "span#btnUpdateCharts")
dd.elem.name <- function(e) {
## have to open and close dropdown
dropdown$clickElement()
nm <- e$findChildElement(value="span")$getElementText()
dropdown$clickElement()
stopifnot(length(nm) == 1)
nm[[1]]
}
select.const <- function(e) {
dropdown$clickElement()
e$clickElement()
update.dd$clickElement()
}
## should be 633 elements: 1 "no selection" + 632 constituencies
stopifnot(length(dd.elems) == 633)
## drop first "no selection"
stopifnot(dd.elem.name(dd.elems[[1]]) == "No selection")
dd.elems <- dd.elems[-1]
## export
export.btn <- cl$findElement(using="css", "div#btnOpenExportPanel")
export.btn$clickElement(); export.btn$clickElement() ## open and close to load panel
excel.btn <- cl$findElement(using="css", "input#downloadSelectedPPTExcel")
dl <- function(max.wait=30) {
## generate and wait for link
export.btn$clickElement()
len <- length(cl$findElements(using="css", "div#downloadReportsPowerpoint > a"))
excel.btn$clickElement()
for (i in seq(max.wait)) {
lnk <- cl$findElements(using="css", "div#downloadReportsPowerpoint > a")
if (length(lnk) > len) break
Sys.sleep(1)
}
if (length(lnk) <= len) {
excel.btn$clickElement()
stop("timeout: waiting for link")
}
lnk <- lnk[[length(lnk)]]
## download and wait
prev.f <- list.files(dl.dir, "^Welcome.*\\.zip$", full.names=TRUE)
lnk$clickElement()
export.btn$clickElement()
for (i in seq(max.wait)) {
f <- list.files(dl.dir, "^Welcome.*\\.zip$", full.names=TRUE)
if (length(f) > length(prev.f)) return(setdiff(f, prev.f))
Sys.sleep(1)
}
stop("timeout: waiting for download")
}
## generate and download all
for (i in seq_along(dd.elems)) {
nm <- dd.elem.name(dd.elems[[i]])
cat(nm, "\n")
select.const(dd.elems[[i]])
f <- dl()
print(f)
}
|
/scraper.R
|
no_license
|
johnlaing/ashcroft.polls.scraper
|
R
| false | false | 2,621 |
r
|
## directory to download reports
dl.dir <- file.path(getwd(), paste0("downloads.", format(Sys.time(), "%Y%m%d.%H%M%S")))
dir.create(dl.dir)
## connect to server and navigate to page
require(RSelenium)
rd <- rsDriver(browser="chrome", extraCapabilities=list(chromeOptions=list(prefs=list("download.default_directory"=dl.dir))))
cl <- rd$client
cl$navigate("https://dashboards.lordashcroftpolls.com/login.aspx?target=1rH46DzH68RfFl7AknVWbbl4nsH0s%2f%2fj5uXrUWFycQ4%3d")
## dropdown for selecting constituencies
dropdown <- cl$findElement(using="css", "button.ui-multiselect")
dd.elems <- cl$findElements(using="css", "ul.ui-multiselect-checkboxes > li > label")
update.dd <- cl$findElement(using="css", "span#btnUpdateCharts")
dd.elem.name <- function(e) {
## have to open and close dropdown
dropdown$clickElement()
nm <- e$findChildElement(value="span")$getElementText()
dropdown$clickElement()
stopifnot(length(nm) == 1)
nm[[1]]
}
select.const <- function(e) {
dropdown$clickElement()
e$clickElement()
update.dd$clickElement()
}
## should be 633 elements: 1 "no selection" + 632 constituencies
stopifnot(length(dd.elems) == 633)
## drop first "no selection"
stopifnot(dd.elem.name(dd.elems[[1]]) == "No selection")
dd.elems <- dd.elems[-1]
## export
export.btn <- cl$findElement(using="css", "div#btnOpenExportPanel")
export.btn$clickElement(); export.btn$clickElement() ## open and close to load panel
excel.btn <- cl$findElement(using="css", "input#downloadSelectedPPTExcel")
dl <- function(max.wait=30) {
## generate and wait for link
export.btn$clickElement()
len <- length(cl$findElements(using="css", "div#downloadReportsPowerpoint > a"))
excel.btn$clickElement()
for (i in seq(max.wait)) {
lnk <- cl$findElements(using="css", "div#downloadReportsPowerpoint > a")
if (length(lnk) > len) break
Sys.sleep(1)
}
if (length(lnk) <= len) {
excel.btn$clickElement()
stop("timeout: waiting for link")
}
lnk <- lnk[[length(lnk)]]
## download and wait
prev.f <- list.files(dl.dir, "^Welcome.*\\.zip$", full.names=TRUE)
lnk$clickElement()
export.btn$clickElement()
for (i in seq(max.wait)) {
f <- list.files(dl.dir, "^Welcome.*\\.zip$", full.names=TRUE)
if (length(f) > length(prev.f)) return(setdiff(f, prev.f))
Sys.sleep(1)
}
stop("timeout: waiting for download")
}
## generate and download all
for (i in seq_along(dd.elems)) {
nm <- dd.elem.name(dd.elems[[i]])
cat(nm, "\n")
select.const(dd.elems[[i]])
f <- dl()
print(f)
}
|
## pattern scaling
## run timeshift.r
library(stringi)
library(dplyr)
setwd('~/Desktop/PatternScaling/xtreme_indices')
ndxs <- list.files(pattern = 'RData_allscenarios')
index_names = c('cdd','fd','gsl','r10mm','r95ptot','rx5day',
'sdii','tnn','txx','wsdi')
## scenarios
scenarios = c('1pt5degC','2pt0degC','RCP45','RCP85')
highscenarios = scenarios[3:4]
lowscenarios = scenarios[1:2]
source('timeshift.R')
## all global temp objs
glbtemps <- ls(pattern = "^annglbtas*")
## take average global temperature for begining and end of century
temp_avgs <- c()
for(g in scenarios){
scen = paste('annglbtas',g,sep = '.')
glbtemp = eval(parse(text = scen))
change <- apply(tail(glbtemp,20),2,mean) - apply(glbtemp[1:20,],2,mean)
temp_avgs <- rbind(temp_avgs,c(g,change))
}
########################################################
# #
# pattern scaling loop #
# each iteration we approximate #
# 1.5C and 2.0 values for one index #
# #
########################################################
i = 1
for (ndx in ndxs)
{
print(paste('pattern scaling:',ndx))
load(ndx)
for (scen in highscenarios)
{
print(paste(index_names[i],scen,sep = '.'))
x = paste(index_names[i],scen,sep = '.')
mx = eval(parse(text = x))
## mean at each gridpoint of first 20 years
first_mean = apply(mx[,,1:20,],c(1,2,4),mean, na.rm=TRUE)
## mean at each gridpoint of last 20 years
second_mean = apply(mx[,,(dim(mx)[3]-20):dim(mx)[3],],c(1,2,4),mean,na.rm = TRUE)
## century change
change = second_mean - first_mean
## gat
gat = as.numeric(temp_avgs[temp_avgs[,1] == scen,][2:11])
## grid (pattern) of local change per degree of warming
pattern = aperm(change,c(3,1,2)) / rep(gat,288*192)
## get GAT for the scenarios
gat1.5 = as.numeric(temp_avgs[temp_avgs[,1] == "1pt5degC",][2:11])
gat2 = as.numeric(temp_avgs[temp_avgs[,1] == "2pt0degC",][2:11])
## now scale the pattern to the scenario we want to approximate
scaled1.5 = pattern * rep(gat1.5,288*192)
scaled2 = pattern * rep(gat2,288*192)
scaled1.5 = round(aperm(scaled1.5,c(2,3,1)),digits = 2)
scaled2 = round(aperm(scaled2,c(2,3,1)),digits = 2)
cutoffs1.5 = as.numeric(filter(tshift_cutoffs, hscen == scen, lscen == '1pt5degC')[3:4])
cutoffs2 = as.numeric(filter(tshift_cutoffs, hscen == scen, lscen == '2pt0degC')[3:4])
scaled1.5_tshift = apply(mx[,,cutoffs1.5[1]:cutoffs1.5[2],],c(1,2,4),mean,na.rm = TRUE) - apply(mx[,,1:20,],c(1,2,4),mean,na.rm = TRUE)
scaled2_tshift = apply(mx[,,cutoffs2[1]:cutoffs2[2],],c(1,2,4),mean,na.rm = TRUE) - apply(mx[,,1:20,],c(1,2,4),mean,na.rm = TRUE)
assign(paste(index_names[i],'scaled_1.5','from',scen,sep = '_'),scaled1.5)
assign(paste(index_names[i],'scaled_2','from',scen,sep = '_'),scaled2)
assign(paste(index_names[i],'timeshift_scaled_1.5','from',scen,sep = '_'),scaled1.5_tshift)
assign(paste(index_names[i],'timeshift_scaled_2','from',scen,sep = '_'),scaled2_tshift)
}
to_rm = ls(pattern = paste0('^',index_names[i],'\\.'))
rm(list = to_rm)
i = i + 1
}
|
/scaling.R
|
no_license
|
armbuster/Pattern-Scaling-Research
|
R
| false | false | 3,413 |
r
|
## pattern scaling
## run timeshift.r
library(stringi)
library(dplyr)
setwd('~/Desktop/PatternScaling/xtreme_indices')
ndxs <- list.files(pattern = 'RData_allscenarios')
index_names = c('cdd','fd','gsl','r10mm','r95ptot','rx5day',
'sdii','tnn','txx','wsdi')
## scenarios
scenarios = c('1pt5degC','2pt0degC','RCP45','RCP85')
highscenarios = scenarios[3:4]
lowscenarios = scenarios[1:2]
source('timeshift.R')
## all global temp objs
glbtemps <- ls(pattern = "^annglbtas*")
## take average global temperature for begining and end of century
temp_avgs <- c()
for(g in scenarios){
scen = paste('annglbtas',g,sep = '.')
glbtemp = eval(parse(text = scen))
change <- apply(tail(glbtemp,20),2,mean) - apply(glbtemp[1:20,],2,mean)
temp_avgs <- rbind(temp_avgs,c(g,change))
}
########################################################
# #
# pattern scaling loop #
# each iteration we approximate #
# 1.5C and 2.0 values for one index #
# #
########################################################
i = 1
for (ndx in ndxs)
{
print(paste('pattern scaling:',ndx))
load(ndx)
for (scen in highscenarios)
{
print(paste(index_names[i],scen,sep = '.'))
x = paste(index_names[i],scen,sep = '.')
mx = eval(parse(text = x))
## mean at each gridpoint of first 20 years
first_mean = apply(mx[,,1:20,],c(1,2,4),mean, na.rm=TRUE)
## mean at each gridpoint of last 20 years
second_mean = apply(mx[,,(dim(mx)[3]-20):dim(mx)[3],],c(1,2,4),mean,na.rm = TRUE)
## century change
change = second_mean - first_mean
## gat
gat = as.numeric(temp_avgs[temp_avgs[,1] == scen,][2:11])
## grid (pattern) of local change per degree of warming
pattern = aperm(change,c(3,1,2)) / rep(gat,288*192)
## get GAT for the scenarios
gat1.5 = as.numeric(temp_avgs[temp_avgs[,1] == "1pt5degC",][2:11])
gat2 = as.numeric(temp_avgs[temp_avgs[,1] == "2pt0degC",][2:11])
## now scale the pattern to the scenario we want to approximate
scaled1.5 = pattern * rep(gat1.5,288*192)
scaled2 = pattern * rep(gat2,288*192)
scaled1.5 = round(aperm(scaled1.5,c(2,3,1)),digits = 2)
scaled2 = round(aperm(scaled2,c(2,3,1)),digits = 2)
cutoffs1.5 = as.numeric(filter(tshift_cutoffs, hscen == scen, lscen == '1pt5degC')[3:4])
cutoffs2 = as.numeric(filter(tshift_cutoffs, hscen == scen, lscen == '2pt0degC')[3:4])
scaled1.5_tshift = apply(mx[,,cutoffs1.5[1]:cutoffs1.5[2],],c(1,2,4),mean,na.rm = TRUE) - apply(mx[,,1:20,],c(1,2,4),mean,na.rm = TRUE)
scaled2_tshift = apply(mx[,,cutoffs2[1]:cutoffs2[2],],c(1,2,4),mean,na.rm = TRUE) - apply(mx[,,1:20,],c(1,2,4),mean,na.rm = TRUE)
assign(paste(index_names[i],'scaled_1.5','from',scen,sep = '_'),scaled1.5)
assign(paste(index_names[i],'scaled_2','from',scen,sep = '_'),scaled2)
assign(paste(index_names[i],'timeshift_scaled_1.5','from',scen,sep = '_'),scaled1.5_tshift)
assign(paste(index_names[i],'timeshift_scaled_2','from',scen,sep = '_'),scaled2_tshift)
}
to_rm = ls(pattern = paste0('^',index_names[i],'\\.'))
rm(list = to_rm)
i = i + 1
}
|
library(jstor)
### Name: find_article
### Title: Defunct: Extract meta information for articles
### Aliases: find_article
### ** Examples
## Not run:
##D
##D find_article(jstor_example("sample_with_references.xml"))
## End(Not run)
|
/data/genthat_extracted_code/jstor/examples/find_article.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 242 |
r
|
library(jstor)
### Name: find_article
### Title: Defunct: Extract meta information for articles
### Aliases: find_article
### ** Examples
## Not run:
##D
##D find_article(jstor_example("sample_with_references.xml"))
## End(Not run)
|
## ----setup, echo = FALSE, include=FALSE---------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----logo, echo=FALSE, fig.height=8.5, fig.pos="H", fig.align='center'--------
knitr::include_graphics('img/logo.png')
## ----libraries, echo=TRUE, message=FALSE--------------------------------------
library(waydown)
# To calculate some trajectories
library(deSolve)
# To plot our results
library(ggplot2)
# To arrange our plots in panels
library(latticeExtra)
library(gridExtra)
# For nicer plots
library(colorRamps)
## ----Allee-def----------------------------------------------------------------
r <- 1
A <- 0.5
K <- 1
f <- function(x) { r * x * (x/A - 1) * (1 - x/K) }
## ----Allee-points-------------------------------------------------------------
xs <- seq(0, 1.25, by = 0.01)
## ----Allee-algorithm, cache = TRUE--------------------------------------------
Vs <- approxPot1D(f, xs)
## ----Allee-plot---------------------------------------------------------------
plot(xs, Vs,
type = 'l', xlab = 'N', ylab = 'V')
## ----Four-def-----------------------------------------------------------------
f <- function(x) {c(-x[1]*(x[1]^2 - 1),
-x[2]*(x[2]^2 - 1))}
## ----Four-points--------------------------------------------------------------
xs <- seq(-1.5, 1.5, by = 0.025)
ys <- seq(-1.5, 1.5, by = 0.025)
## ----Four-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Four-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(-1, -1, 0, 1, 1),
y_eq = c(-1, 1, 0, -1, 1),
equilibrium = factor(c('stable', 'stable', 'unstable', 'stable', 'stable')))
## ----Four-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Four-check---------------------------------------------------------------
max(result$err) == 0
## ----Curl-def-----------------------------------------------------------------
f <- function(x) {c(-x[2],
x[1])}
## ----Curl-points--------------------------------------------------------------
xs <- seq(-2, 2, by = 0.05)
ys <- seq(-2, 2, by = 0.05)
## ----Curl-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Curl-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----Curl-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Wadd-def-----------------------------------------------------------------
# Parameters
bx <- 0.2
ax <- 0.125
kx <- 0.0625
rx <- 1
by <- 0.05
ay <- 0.1094
ky <- 0.0625
ry <- 1
n <- 4
# Dynamics
f <- function(x) {c(bx - rx*x[1] + ax/(kx + x[2]^n),
by - ry*x[2] + ay/(ky + x[1]^n))}
## ----Wadd-points--------------------------------------------------------------
xs <- seq(0, 4, by = 0.05)
ys <- seq(0, 4, by = 0.05)
## ----Wadd-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Wadd-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
#
# Estimated with Wolfram Alpha
# Prompt: 0 = 0.2 - x + 0.125/(0.0625 + y^4); 0 = 0.05 - y + 0.1094/(0.0625 + x^4)
eqPoints <- data.frame(x_eq = c(0.213416, 0.559865, 2.19971),
y_eq = c(1.74417, 0.730558, 0.0546602),
equilibrium = factor(c('stable', 'unstable', 'stable')))
## ----Wadd-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 25
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Selkov-def---------------------------------------------------------------
# Parameters
a <- 0.1
b <- 0.5
# Dynamics
f <- function(x) {c(-x[1] + a*x[2] + x[1]^2*x[2],
b - a*x[2] - x[1]^2*x[2])}
## ----Selkov-solution, echo = FALSE--------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
roi <- c(0, 2.5, 0, 2.5)
init_state <- c(1, .05)
ts <- seq(0, 1000, by = 0.01)
bs <- c(0.1, 0.6, 1.3)
for (b in bs) {
out <- ode(y = init_state, times = ts, func = f_dyn, parms = c(a = a, b = b))
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
xs <- seq(roi[1], roi[2], by = 0.05)
ys <- seq(roi[3], roi[4], by = 0.05)
result <- approxPot2D(f, xs, ys)
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 0)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle(sprintf("Error map. b = %.3f ", b)) +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
}
## ----VL-def-------------------------------------------------------------------
# Parameters
r <- 1
k <- 10
h <- 2
e <- 0.2
m <- 0.1
# Auxiliary function
g <- function(x) {1/(h + x)}
# Dynamics
f <- function(x) {c(r*x[1]*(1 - x[1]/k) -g(x[1])*x[1]*x[2],
e*g(x[1])*x[1]*x[2] - m*x[2])}
## ----VL-solution, echo = FALSE------------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
parms <- c(r =r,
k = k,
h = h,
e = e,
m = m)
init_state <- c(1,2)
ts <- seq(0, 300, by = 0.01)
out <- ode(y = init_state, times = ts, func = f_dyn, parms = parms)
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
plot(out$x, out$y, type = 'l', asp = 1,
main = 'Trajectory', xlab = 'x (prey biomass)', ylab = 'y (predator biomass)')
## ----VL-points----------------------------------------------------------------
xs <- seq(0, 10, by = 0.05)
ys <- seq(0, 5, by = 0.05)
## ----VL-algorithm, cache = TRUE-----------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----VL-extra, echo = FALSE---------------------------------------------------
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 200)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----VL-plot, echo=FALSE, message=FALSE, warning=FALSE------------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
|
/inst/doc/examples.R
|
permissive
|
cran/waydown
|
R
| false | false | 12,358 |
r
|
## ----setup, echo = FALSE, include=FALSE---------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----logo, echo=FALSE, fig.height=8.5, fig.pos="H", fig.align='center'--------
knitr::include_graphics('img/logo.png')
## ----libraries, echo=TRUE, message=FALSE--------------------------------------
library(waydown)
# To calculate some trajectories
library(deSolve)
# To plot our results
library(ggplot2)
# To arrange our plots in panels
library(latticeExtra)
library(gridExtra)
# For nicer plots
library(colorRamps)
## ----Allee-def----------------------------------------------------------------
r <- 1
A <- 0.5
K <- 1
f <- function(x) { r * x * (x/A - 1) * (1 - x/K) }
## ----Allee-points-------------------------------------------------------------
xs <- seq(0, 1.25, by = 0.01)
## ----Allee-algorithm, cache = TRUE--------------------------------------------
Vs <- approxPot1D(f, xs)
## ----Allee-plot---------------------------------------------------------------
plot(xs, Vs,
type = 'l', xlab = 'N', ylab = 'V')
## ----Four-def-----------------------------------------------------------------
f <- function(x) {c(-x[1]*(x[1]^2 - 1),
-x[2]*(x[2]^2 - 1))}
## ----Four-points--------------------------------------------------------------
xs <- seq(-1.5, 1.5, by = 0.025)
ys <- seq(-1.5, 1.5, by = 0.025)
## ----Four-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Four-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(-1, -1, 0, 1, 1),
y_eq = c(-1, 1, 0, -1, 1),
equilibrium = factor(c('stable', 'stable', 'unstable', 'stable', 'stable')))
## ----Four-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Four-check---------------------------------------------------------------
max(result$err) == 0
## ----Curl-def-----------------------------------------------------------------
f <- function(x) {c(-x[2],
x[1])}
## ----Curl-points--------------------------------------------------------------
xs <- seq(-2, 2, by = 0.05)
ys <- seq(-2, 2, by = 0.05)
## ----Curl-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Curl-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----Curl-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
# geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Wadd-def-----------------------------------------------------------------
# Parameters
bx <- 0.2
ax <- 0.125
kx <- 0.0625
rx <- 1
by <- 0.05
ay <- 0.1094
ky <- 0.0625
ry <- 1
n <- 4
# Dynamics
f <- function(x) {c(bx - rx*x[1] + ax/(kx + x[2]^n),
by - ry*x[2] + ay/(ky + x[1]^n))}
## ----Wadd-points--------------------------------------------------------------
xs <- seq(0, 4, by = 0.05)
ys <- seq(0, 4, by = 0.05)
## ----Wadd-algorithm, cache = TRUE---------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----Wadd-extra, include=FALSE------------------------------------------------
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
#
# Estimated with Wolfram Alpha
# Prompt: 0 = 0.2 - x + 0.125/(0.0625 + y^4); 0 = 0.05 - y + 0.1094/(0.0625 + x^4)
eqPoints <- data.frame(x_eq = c(0.213416, 0.559865, 2.19971),
y_eq = c(1.74417, 0.730558, 0.0546602),
equilibrium = factor(c('stable', 'unstable', 'stable')))
## ----Wadd-plot, echo=FALSE, message=FALSE, warning=FALSE----------------------
nbins <- 25
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
## ----Selkov-def---------------------------------------------------------------
# Parameters
a <- 0.1
b <- 0.5
# Dynamics
f <- function(x) {c(-x[1] + a*x[2] + x[1]^2*x[2],
b - a*x[2] - x[1]^2*x[2])}
## ----Selkov-solution, echo = FALSE--------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
roi <- c(0, 2.5, 0, 2.5)
init_state <- c(1, .05)
ts <- seq(0, 1000, by = 0.01)
bs <- c(0.1, 0.6, 1.3)
for (b in bs) {
out <- ode(y = init_state, times = ts, func = f_dyn, parms = c(a = a, b = b))
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
xs <- seq(roi[1], roi[2], by = 0.05)
ys <- seq(roi[3], roi[4], by = 0.05)
result <- approxPot2D(f, xs, ys)
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 0)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle(sprintf("Error map. b = %.3f ", b)) +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
}
## ----VL-def-------------------------------------------------------------------
# Parameters
r <- 1
k <- 10
h <- 2
e <- 0.2
m <- 0.1
# Auxiliary function
g <- function(x) {1/(h + x)}
# Dynamics
f <- function(x) {c(r*x[1]*(1 - x[1]/k) -g(x[1])*x[1]*x[2],
e*g(x[1])*x[1]*x[2] - m*x[2])}
## ----VL-solution, echo = FALSE------------------------------------------------
# Package desolve requires a slightly different syntax
f_dyn <- function(t, state, parameters) {
with(as.list(c(state, parameters)),{
# rate of change
df <- f(state)
dX <- df[1]
dY <- df[2]
# return the rate of change
list(c(dX, dY))
}) # end with(as.list ...
}
parms <- c(r =r,
k = k,
h = h,
e = e,
m = m)
init_state <- c(1,2)
ts <- seq(0, 300, by = 0.01)
out <- ode(y = init_state, times = ts, func = f_dyn, parms = parms)
colnames(out) <- c("time", "x", "y")
out <- as.data.frame(out)
plot(out$x, out$y, type = 'l', asp = 1,
main = 'Trajectory', xlab = 'x (prey biomass)', ylab = 'y (predator biomass)')
## ----VL-points----------------------------------------------------------------
xs <- seq(0, 10, by = 0.05)
ys <- seq(0, 5, by = 0.05)
## ----VL-algorithm, cache = TRUE-----------------------------------------------
result <- approxPot2D(f, xs, ys)
## ----VL-extra, echo = FALSE---------------------------------------------------
# Get the limit cycle attractor
attr <- dplyr::filter(as.data.frame(out), time > 200)
# Transform result into dataframe
data <- expand.grid(X = xs, Y = ys)
data$V <- as.vector(result$V)
data$err <- as.vector(result$err)
# Input equilibrium points (calculated externally)
eqPoints <- data.frame(x_eq = c(0),
y_eq = c(0),
equilibrium = factor(c('unstable')))
## ----VL-plot, echo=FALSE, message=FALSE, warning=FALSE------------------------
nbins <- 15
plotV <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = V)) +
geom_contour(data = data, aes(x = X, y = Y, z = V), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::matlab.like(nbins)) +
xlab("x") + ylab("y") + ggtitle("Approximate potential") +
theme_bw()
plotErr <- ggplot() +
geom_tile(data = data, aes(x = X, y = Y, fill = err)) +
geom_contour(data = data, aes(x = X, y = Y, z = err), colour = 'white', alpha = 0.5, bins = nbins) +
geom_point(data = eqPoints, aes(x = x_eq, y = y_eq, color = equilibrium)) +
geom_path(data = attr, aes(x = x, y = y)) +
coord_fixed() +
scale_fill_gradientn(colours = colorRamps::green2red(nbins), limits = c(0,1)) +
xlab("x") + ylab("y") + ggtitle("Error map") +
theme_bw()
grid.arrange(plotV, plotErr, ncol = 2)
|
\name{standardColors}
\alias{standardColors}
\title{Colors this library uses for labeling modules.}
\description{
Returns the vector of color names in the order they are assigned by other functions in this library.
}
\usage{
standardColors(n = NULL)
}
\arguments{
\item{n}{Number of colors requested. If \code{NULL}, all (approx. 450) colors will be returned. Any
other invalid argument such as less than one or more than maximum (\code{length(standardColors())}) will
trigger an error. }
}
\value{
A vector of character color names of the requested length.
}
\author{
Peter Langfelder, \email{Peter.Langfelder@gmail.com}
}
\examples{
standardColors(10);
}
\keyword{color}
\keyword{misc}
|
/man/standardColors.Rd
|
no_license
|
cran/WGCNA
|
R
| false | false | 697 |
rd
|
\name{standardColors}
\alias{standardColors}
\title{Colors this library uses for labeling modules.}
\description{
Returns the vector of color names in the order they are assigned by other functions in this library.
}
\usage{
standardColors(n = NULL)
}
\arguments{
\item{n}{Number of colors requested. If \code{NULL}, all (approx. 450) colors will be returned. Any
other invalid argument such as less than one or more than maximum (\code{length(standardColors())}) will
trigger an error. }
}
\value{
A vector of character color names of the requested length.
}
\author{
Peter Langfelder, \email{Peter.Langfelder@gmail.com}
}
\examples{
standardColors(10);
}
\keyword{color}
\keyword{misc}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
library(C50)
library(modeldata)
## ----credit-data--------------------------------------------------------------
library(modeldata)
data(credit_data)
## ----credit-vars--------------------------------------------------------------
vars <- c("Home", "Seniority")
str(credit_data[, c(vars, "Status")])
# a simple split
set.seed(2411)
in_train <- sample(1:nrow(credit_data), size = 3000)
train_data <- credit_data[ in_train,]
test_data <- credit_data[-in_train,]
## ----tree-mod-----------------------------------------------------------------
library(C50)
tree_mod <- C5.0(x = train_data[, vars], y = train_data$Status)
tree_mod
## ----tree-summ----------------------------------------------------------------
summary(tree_mod)
## ----tree-plot, fig.width = 10------------------------------------------------
plot(tree_mod)
## ----tree-boost---------------------------------------------------------------
tree_boost <- C5.0(x = train_data[, vars], y = train_data$Status, trials = 3)
summary(tree_boost)
## ----rule-mod-----------------------------------------------------------------
rule_mod <- C5.0(x = train_data[, vars], y = train_data$Status, rules = TRUE)
rule_mod
summary(rule_mod)
## ----pred---------------------------------------------------------------------
predict(rule_mod, newdata = test_data[1:3, vars])
predict(tree_boost, newdata = test_data[1:3, vars], type = "prob")
## ----cost---------------------------------------------------------------------
cost_mat <- matrix(c(0, 2, 1, 0), nrow = 2)
rownames(cost_mat) <- colnames(cost_mat) <- c("bad", "good")
cost_mat
cost_mod <- C5.0(x = train_data[, vars], y = train_data$Status,
costs = cost_mat)
summary(cost_mod)
# more samples predicted as "bad"
table(predict(cost_mod, test_data[, vars]))
# that previously
table(predict(tree_mod, test_data[, vars]))
|
/inst/doc/C5.0.R
|
no_license
|
cran/C50
|
R
| false | false | 1,969 |
r
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
library(C50)
library(modeldata)
## ----credit-data--------------------------------------------------------------
library(modeldata)
data(credit_data)
## ----credit-vars--------------------------------------------------------------
vars <- c("Home", "Seniority")
str(credit_data[, c(vars, "Status")])
# a simple split
set.seed(2411)
in_train <- sample(1:nrow(credit_data), size = 3000)
train_data <- credit_data[ in_train,]
test_data <- credit_data[-in_train,]
## ----tree-mod-----------------------------------------------------------------
library(C50)
tree_mod <- C5.0(x = train_data[, vars], y = train_data$Status)
tree_mod
## ----tree-summ----------------------------------------------------------------
summary(tree_mod)
## ----tree-plot, fig.width = 10------------------------------------------------
plot(tree_mod)
## ----tree-boost---------------------------------------------------------------
tree_boost <- C5.0(x = train_data[, vars], y = train_data$Status, trials = 3)
summary(tree_boost)
## ----rule-mod-----------------------------------------------------------------
rule_mod <- C5.0(x = train_data[, vars], y = train_data$Status, rules = TRUE)
rule_mod
summary(rule_mod)
## ----pred---------------------------------------------------------------------
predict(rule_mod, newdata = test_data[1:3, vars])
predict(tree_boost, newdata = test_data[1:3, vars], type = "prob")
## ----cost---------------------------------------------------------------------
cost_mat <- matrix(c(0, 2, 1, 0), nrow = 2)
rownames(cost_mat) <- colnames(cost_mat) <- c("bad", "good")
cost_mat
cost_mod <- C5.0(x = train_data[, vars], y = train_data$Status,
costs = cost_mat)
summary(cost_mod)
# more samples predicted as "bad"
table(predict(cost_mod, test_data[, vars]))
# that previously
table(predict(tree_mod, test_data[, vars]))
|
#' @description
#' Simple interface for the Keywords Everywhere API.
#' @keywords internal
#' @importFrom httr GET POST add_headers
#' @importFrom jsonlite fromJSON
"_PACKAGE"
|
/R/kwewr.R
|
no_license
|
retowyss/kwewr
|
R
| false | false | 176 |
r
|
#' @description
#' Simple interface for the Keywords Everywhere API.
#' @keywords internal
#' @importFrom httr GET POST add_headers
#' @importFrom jsonlite fromJSON
"_PACKAGE"
|
library(testthat)
library(devtools)
library(doParallel)
library(StepwiseTest)
library(matrixcalc)
library(foreach)
library(mvtnorm)
# # THIS TEST TAKES A WHILE BECAUSE IT RUNS 500 RESAMPLES
# # COMMENT IT OUT IF YOU WANT TO AVOID IT
# # test that increasing the correlation between outcomes increases width of null interval
# test_that("corr_tests #2", {
#
# ######## Low Correlation Between Outcomes ########
# N = 250
#
# cor = make_corr_mat( nX = 3,
# nY = 100,
# rho.XX = 0,
# rho.YY = 0.05,
# rho.XY = 0,
# prop.corr = 1 )
#
# d = sim_data( n = N, cor = cor )
# all.covars = names(d)[ grep( "X", names(d) ) ]
# C = all.covars[ !all.covars == "X1" ]
# Y = names(d)[ grep( "Y", names(d) ) ]
#
# res1 = corr_tests( d,
# X = "X1",
# C = C,
# Ys = Y,
# B = 500,
# alpha = 0.1,
# alpha.fam=0.1,
# method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
#
# ######## Check Results of First Sample ########
# # check inference: excess hits
# expect_equal( as.numeric(res1$samp.res$rej) - as.numeric(res1$null.int[2]),
# res1$excess.hits )
#
# # check inference: critical value from global test
# expect_equal( as.numeric( quantile( res1$nrej.bt, 1-0.1 ) ),
# as.numeric( res1$global.test$crit[ res1$global.test$method == "nreject"] ) )
#
# # check p-value of global test
# expect_equal( sum( res1$nrej.bt >= res1$samp.res$rej ) / length( res1$nrej.bt ),
# res1$global.test$pval[ res1$global.test$method == "nreject"] )
#
#
# # check results from original sample
# # do analysis manually
# alpha = 0.1
#
# rej.man = 0
# tvals.man = c()
# bhats.man = c()
# pvals.man = c()
# resid.man = matrix( NA, nrow = nrow(d), ncol = length(Y) )
#
# for ( i in 1:length(Y) ) {
# m = lm( d[[ Y[i] ]] ~ X1 + X2 + X3, data = d )
# bhats.man[i] = coef(m)[["X1"]]
# tvals.man[i] = summary(m)$coefficients["X1","t value"]
# pvals.man[i] = summary(m)$coefficients["X1", "Pr(>|t|)"]
# resid.man[,i] = residuals(m)
#
# # did we reject it?
# if ( summary(m)$coefficients["X1", "Pr(>|t|)"] < alpha ) rej.man = rej.man + 1
# }
#
# # check bhats
# expect_equal( bhats.man, res1$samp.res$bhats )
# expect_equal( tvals.man, res1$samp.res$tvals )
# expect_equal( pvals.man, res1$samp.res$pvals )
# expect_equal( as.numeric(as.matrix(resid.man)),
# as.numeric(as.matrix(res1$samp.res$resid)) )
#
# expect_equal( sum( pvals.man < alpha ),
# sum( res1$samp.res$rej ) )
#
# # check other global tests
# expect_equal( res1$global.test$pval[ res1$global.test$method == "Wstep" ],
# min( adj_Wstep( p = res1$samp.res$pvals, p.bt = res1$pvals.bt ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "minP" ],
# min( adj_minP( p = res1$samp.res$pvals, p.bt = res1$pvals.bt ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "bonferroni" ],
# min( p.adjust( res1$samp.res$pvals, method="bonferroni" ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "holm" ],
# min( p.adjust( res1$samp.res$pvals, method="holm" ) ) )
#
# expect_equal( res1$global.test$reject[ res1$global.test$method == "romano" ],
# any( FWERkControl( res1$samp.res$tvals, as.matrix( res1$tvals.bt ), k = 1, alpha = .1 )$Reject == 1 ) )
#
# ######## Higher Correlation Between Outcomes ########
# cor = make_corr_mat( nX = 3,
# nY = 100,
# rho.XX = 0,
# rho.YY = 0.25,
# rho.XY = 0,
# prop.corr = 1 )
#
# d = sim_data( n = N, cor = cor )
# all.covars = names(d)[ grep( "X", names(d) ) ]
# C = all.covars[ !all.covars == "X1" ]
# Y = names(d)[ grep( "Y", names(d) ) ]
#
# res2 = corr_tests( d,
# X = "X1",
# C = C,
# Ys = Y,
# B = 500,
# alpha = 0.1,
# alpha.fam = 0.1,
# method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
#
#
# ######## Tests ########
# # null interval should be wider for the second one
# expect_equal( as.logical( res2$null.int[2] >= res1$null.int[2] ), TRUE )
#
# # p-value should be larger for the second one
# expect_equal( as.logical( res2$global.test$pval[ res2$global.test$method == "nreject" ] >=
# res1$global.test$pval[ res1$global.test$method == "nreject" ] ), TRUE )
#
# } )
# only checks a few things:
# two of the global tests
# and the average number of rejections in resamples
test_that( "corr_tests #1", {
library(carData)
data(Soils)
X = "pH"
C = c("Na", "Conduc")
Y = c("N", "Dens", "P", "Ca", "Mg", "K")
res = corr_tests( Soils,
X = X,
Ys = Y,
B = 200,
alpha = 0.1,
method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
# should be about equal
expect_equal( mean(res$nrej.bt),
.10*length(Y),
tolerance = 0.1 )
# Bonferroni: should be exactly equal
expect_equal( min( res$samp.res$pvals * length(Y) ),
res$global.test$pval[2] )
# Holm: should be exactly equal
expect_equal( min( p.adjust( res$samp.res$pvals, method = "holm" ) ),
res$global.test$pval[3] )
} )
###################### TEST FNS FOR APPLYING OUR METRICS ######################
# fix_input with extra covariates
# X1 is extra and should be removed
test_that("fix_input #2", {
cor = make_corr_mat( nX = 1,
nY = 4,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0,
prop.corr = 1 )
d = sim_data( n = 20, cor = cor )
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == "X1" ]
##### Add Bad Input ######
# insert missing data
d[1,4] = NA
# insert a decoy variable that should be removed in analysis
d$X20 = rnorm( n = nrow(d) )
d$X21 = rnorm( n = nrow(d) )
# make one of the covariates not mean-centered
d$X1 = d$X1 + 2
d = fix_input( X="X1",
C=NA,
Ys=names(d)[ grep( "Y", names(d) ) ],
d = d )
# check that it caught bad input
expect_equal( c( "X20", "X21" ) %in% names(d),
c(FALSE, FALSE) )
expect_equal( any( is.na(d) ),
FALSE )
} )
# fix_input with extra covariates
test_that("fix_input #1", {
cor = make_corr_mat( nX = 5,
nY = 10,
rho.XX = -0.06,
rho.YY = 0.1,
rho.XY = -0.1,
prop.corr = 8/40 )
d = sim_data( n = 20, cor = cor )
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == "X1" ]
##### Add Bad Input ######
# insert missing data
d[1,4] = NA
# insert a decoy variable that should be removed in analysis
d$X20 = rnorm( n = nrow(d) )
d$X21 = rnorm( n = nrow(d) )
# make one of the covariates not mean-centered
d$X5 = d$X5 + 2
d = fix_input( X="X1",
C=C,
Ys=names(d)[ grep( "Y", names(d) ) ],
d = d )
# check that it caught bad input
expect_equal( c( "X20", "X21" ) %in% names(d),
c(FALSE, FALSE) )
expect_equal( any( is.na(d) ),
FALSE )
} )
# fit_model doesn't need a test because we test it through the dataset_result tests
# without centering test stats
test_that("dataset_result #1", {
cor = make_corr_mat( nX = 5,
nY = 2,
rho.XX = -0.06,
rho.YY = 0.1,
rho.XY = -0.1,
prop.corr = 8/40 )
d = sim_data( n = 50, cor = cor )
# try to confuse fn by choosing a different X as covariate of interest
Ys = names(d)[ grep( "Y", names(d) ) ]
X = "X2"
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == X ]
# do analysis manually
alpha = 0.05
rej.man = 0
tvals.man = c()
bhats.man = c()
pvals.man = c()
resid.man = matrix( NA, nrow = 50, ncol = 2 )
for ( i in 1:length(Ys) ) {
m = lm( d[[ Ys[i] ]] ~ X1 + X2 + X3 + X4 + X5, data = d )
bhats.man[i] = coef(m)[[X]]
tvals.man[i] = summary(m)$coefficients[X,"t value"]
pvals.man[i] = summary(m)$coefficients[X, "Pr(>|t|)"]
resid.man[,i] = residuals(m)
# did we reject it?
if ( summary(m)$coefficients[X, "Pr(>|t|)"] < alpha ) rej.man = rej.man + 1
}
# with function
samp.res = dataset_result( d = d,
X = X,
C = C,
Ys = Ys, # all outcome names
alpha = alpha,
center.stats = FALSE,
bhat.orig = NA )
resid.man = as.data.frame(resid.man)
names(resid.man) = Ys
expect_equal( rej.man, samp.res$rej )
expect_equal( bhats.man, samp.res$bhat )
expect_equal( tvals.man, samp.res$tvals )
expect_equal( pvals.man, samp.res$pvals )
expect_equal( as.matrix(resid.man), as.matrix(samp.res$resid) )
} )
# with centered test stats
test_that("dataset_result #2", {
cor = make_corr_mat( nX = 5,
nY = 20,
rho.XX = 0.16,
rho.YY = 0.1,
rho.XY = 0.1,
prop.corr = 1 )
d = sim_data( n = 50, cor = cor )
# try to confuse fn by choosing a different X as covariate of interest
Ys = names(d)[ grep( "Y", names(d) ) ]
X = "X2"
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == X ]
# do analysis manually
# choose an unusual alpha level to make sure it's working
alpha = 0.4
rej.man = 0
tvals.man = c()
bhats.man = c()
pvals.man = c()
resid.man = matrix( NA, nrow = 50, ncol = length(Ys) )
# fake original coefficients
bhat.orig = rnorm( n=length(Ys), mean = 0.8, sd = 2 )
for ( i in 1:length(Ys) ) {
m = lm( d[[ Ys[i] ]] ~ X1 + X2 + X3 + X4 + X5, data = d )
bhats.man[i] = coef(m)[[X]] - bhat.orig[i]
df = 50 - 5 - 1
se = summary(m)$coefficients[X, "Std. Error"]
tvals.man[i] = bhats.man[i] / se
pvals.man[i] = 2 * ( 1 - pt( abs( tvals.man[i] ), df = df ) )
resid.man[,i] = residuals(m)
# did we reject it?
if ( pvals.man[i] < alpha ) rej.man = rej.man + 1
}
# with function
samp.res = dataset_result( d = d,
X = X,
C = C,
Ys = Ys, # all outcome names
alpha = alpha,
center.stats = TRUE,
bhat.orig = bhat.orig )
resid.man = as.data.frame(resid.man)
names(resid.man) = Ys
expect_equal( rej.man, samp.res$rej )
expect_equal( bhats.man, samp.res$bhat )
expect_equal( tvals.man, samp.res$tvals )
expect_equal( pvals.man, samp.res$pvals )
expect_equal( as.matrix(resid.man), as.matrix(samp.res$resid) )
} )
###################### TEST FNS FOR SIMULATING DATA ######################
test_that("cell_corr #1", {
expect_equal( -0.1,
cell_corr( vname.1 = "X1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( 0.25,
cell_corr( vname.1 = "Y1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( 0,
cell_corr( vname.1 = "X2",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( -0.1,
cell_corr( vname.1 = "X1",
vname.2 = "Y2",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 10,
prop.corr = .2 ) )
expect_equal( 0,
cell_corr( vname.1 = "X1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 10,
prop.corr = .2 ) )
} )
test_that("make_corr_mat #1", {
# sanity checks
cor = make_corr_mat( nX = 1,
nY = 40,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0.1,
prop.corr = 8/40 )
# do we have the right number of each type of correlation?
# only look at first row (correlations of X1 with everything else)
expect_equal( c( 1, rep(0.10, 8), rep(0, 40-8) ),
as.numeric( cor[1,] ) )
} )
test_that("make_corr_mat #1", {
cor = make_corr_mat( nX = 2,
nY = 40,
rho.XX = 0.35,
rho.YY = 0.25,
rho.XY = 0.1,
prop.corr = 8/40 )
d = sim_data( n = 10000, cor = cor )
# rho.XX correlations
expect_equal( cor(d$X1, d$X2), 0.35, tolerance = 0.05 )
# rho.XY correlations for non-null ones
names = paste( "Y", seq(1,8,1), sep="" )
expect_equal( as.numeric( cor( d$X1, d[, names] ) ),
rep( 0.1, 8 ),
tolerance = 0.05 )
# rho.XY correlations for null ones
names = paste( "Y", seq(9,40,1), sep="" )
expect_equal( as.numeric( cor( d$X1, d[, names] ) ),
rep( 0, 40-8 ),
tolerance = 0.05 )
# plot empirical vs. real correlations
#plot( as.numeric(cor(d)), as.numeric(as.matrix(cor)) ); abline( a = 0, b = 1, col="red")
} )
###################### TEST WESTFALL FNS ######################
test_that("adj_minP #1", {
# sanity check
B = 200
n.tests = 10
# generate fake p-values under strong null
p.bt = matrix( runif(B*n.tests, 0, 1), nrow = n.tests)
# generate fake p-values from real dataset
p = runif( n.tests, 0, .1)
p.adj = adj_minP( p, p.bt )
#plot(p, p.adj)
# manually adjust second p-value
mins = apply( p.bt, MARGIN = 2, FUN = min )
expect_equal( prop.table( table( mins <= p[2] ) )[["TRUE"]],
p.adj[2] )
})
test_that("adjust_Wstep #1", {
# # Sanity Check
# nX = 1
# nY = 3
# B = 5
#
# library(matrixcalc)
# library(mvtnorm)
#
# cor = make_corr_mat( nX = nX,
# nY = nY,
# rho.XX = 0,
# rho.YY = 0.25,
# rho.XY = 0.05,
# prop.corr = 1 )
#
# d = sim_data( n = 1000, cor = cor )
#
# samp.res = dataset_result( X = "X1",
# C = NA,
# Ys = c("Y1", "Y2", "Y3"),
# d = d,
# alpha = 0.05,
# center.stats = FALSE )
#
#
# # do 5 bootstraps
# resamps = resample_resid( X = "X1",
# C = NA,
# Ys = c("Y1", "Y2", "Y3"),
# d = d,
# alpha = 0.05,
# resid = samp.res$resid,
# bhat.orig = samp.res$bhats,
# B=5,
# cores = 8 )
# p.bt = t( resamps$p.bt )
# pvals = samp.res$pvals
pvals = c(0.00233103655078803, 0.470366742594242, 0.00290278216035089
)
p.bt = structure(c(0.308528665936264, 0.517319402377912, 0.686518314693482,
0.637306248855186, 0.106805510862352, 0.116705315041494, 0.0732076817175753,
0.770308936364482, 0.384405349738909, 0.0434358213611965, 0.41497067850141,
0.513471489744384, 0.571213377144122, 0.628054979652722, 0.490196884985226
), .Dim = c(5L, 3L))
# indicators of which hypothesis the sorted p-vals go with
sort(pvals)
r = c(1,3,2)
qstar = matrix( NA, nrow = nrow(p.bt), ncol = ncol(p.bt) )
for (i in 1:nrow(p.bt)) {
qstar[i,3] = p.bt[ i, r[3] ]
qstar[i,2] = min( qstar[i,3], p.bt[ i, r[2] ] )
qstar[i,1] = min( qstar[i,2], p.bt[ i, r[1] ] )
}
less = t( apply( qstar, MARGIN = 1,
function(row) row <= sort(pvals) ) )
p.tilde = colMeans(less)
# enforce monotonicity
p.tilde.sort = sort(p.tilde)
p.tilde.sort[2] = max( p.tilde.sort[1], p.tilde.sort[2] )
p.tilde.sort[3] = max( p.tilde.sort[2], p.tilde.sort[3] )
# put back in original order
p.adj = p.tilde.sort[r]
expect_equal( p.adj, adj_Wstep( p = pvals, p.bt = t(p.bt) ) )
})
###################### TEST RESAMPLE_RESID ######################
# generate data NOT under null and
# check that mean p-value is .5 in resamples
# and that we have the expected number of rejections
test_that("resample_resid #1", {
# Sanity Check
nX = 1
nY = 3
B = 5
library(matrixcalc)
library(mvtnorm)
cor = make_corr_mat( nX = nX,
nY = nY,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0.05,
prop.corr = 1 )
d = sim_data( n = 1000, cor = cor )
# mean-center them
d = as.data.frame( apply( d, 2, function(col) col - mean(col) ) )
# bookmark
samp.res = dataset_result( X = "X1",
C = NA,
Ys = c("Y1", "Y2", "Y3"),
d = d,
alpha = 0.05,
center.stats = FALSE )
# do 5 bootstraps
resamps = resample_resid( X = "X1",
C = NA,
Ys = c("Y1", "Y2", "Y3"),
d = d,
alpha = 0.05,
resid = samp.res$resid,
bhat.orig = samp.res$bhats,
B=500,
cores = 8 )
expect_equal( mean(resamps$p.bt), .5, tolerance = 0.03 )
expect_equal( mean(resamps$rej.bt), .05*nY, tolerance = 0.03 )
} )
|
/NRejections/tests/testthat/testthat.R
|
no_license
|
mayamathur/NRejections
|
R
| false | false | 18,836 |
r
|
library(testthat)
library(devtools)
library(doParallel)
library(StepwiseTest)
library(matrixcalc)
library(foreach)
library(mvtnorm)
# # THIS TEST TAKES A WHILE BECAUSE IT RUNS 500 RESAMPLES
# # COMMENT IT OUT IF YOU WANT TO AVOID IT
# # test that increasing the correlation between outcomes increases width of null interval
# test_that("corr_tests #2", {
#
# ######## Low Correlation Between Outcomes ########
# N = 250
#
# cor = make_corr_mat( nX = 3,
# nY = 100,
# rho.XX = 0,
# rho.YY = 0.05,
# rho.XY = 0,
# prop.corr = 1 )
#
# d = sim_data( n = N, cor = cor )
# all.covars = names(d)[ grep( "X", names(d) ) ]
# C = all.covars[ !all.covars == "X1" ]
# Y = names(d)[ grep( "Y", names(d) ) ]
#
# res1 = corr_tests( d,
# X = "X1",
# C = C,
# Ys = Y,
# B = 500,
# alpha = 0.1,
# alpha.fam=0.1,
# method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
#
# ######## Check Results of First Sample ########
# # check inference: excess hits
# expect_equal( as.numeric(res1$samp.res$rej) - as.numeric(res1$null.int[2]),
# res1$excess.hits )
#
# # check inference: critical value from global test
# expect_equal( as.numeric( quantile( res1$nrej.bt, 1-0.1 ) ),
# as.numeric( res1$global.test$crit[ res1$global.test$method == "nreject"] ) )
#
# # check p-value of global test
# expect_equal( sum( res1$nrej.bt >= res1$samp.res$rej ) / length( res1$nrej.bt ),
# res1$global.test$pval[ res1$global.test$method == "nreject"] )
#
#
# # check results from original sample
# # do analysis manually
# alpha = 0.1
#
# rej.man = 0
# tvals.man = c()
# bhats.man = c()
# pvals.man = c()
# resid.man = matrix( NA, nrow = nrow(d), ncol = length(Y) )
#
# for ( i in 1:length(Y) ) {
# m = lm( d[[ Y[i] ]] ~ X1 + X2 + X3, data = d )
# bhats.man[i] = coef(m)[["X1"]]
# tvals.man[i] = summary(m)$coefficients["X1","t value"]
# pvals.man[i] = summary(m)$coefficients["X1", "Pr(>|t|)"]
# resid.man[,i] = residuals(m)
#
# # did we reject it?
# if ( summary(m)$coefficients["X1", "Pr(>|t|)"] < alpha ) rej.man = rej.man + 1
# }
#
# # check bhats
# expect_equal( bhats.man, res1$samp.res$bhats )
# expect_equal( tvals.man, res1$samp.res$tvals )
# expect_equal( pvals.man, res1$samp.res$pvals )
# expect_equal( as.numeric(as.matrix(resid.man)),
# as.numeric(as.matrix(res1$samp.res$resid)) )
#
# expect_equal( sum( pvals.man < alpha ),
# sum( res1$samp.res$rej ) )
#
# # check other global tests
# expect_equal( res1$global.test$pval[ res1$global.test$method == "Wstep" ],
# min( adj_Wstep( p = res1$samp.res$pvals, p.bt = res1$pvals.bt ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "minP" ],
# min( adj_minP( p = res1$samp.res$pvals, p.bt = res1$pvals.bt ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "bonferroni" ],
# min( p.adjust( res1$samp.res$pvals, method="bonferroni" ) ) )
#
# expect_equal( res1$global.test$pval[ res1$global.test$method == "holm" ],
# min( p.adjust( res1$samp.res$pvals, method="holm" ) ) )
#
# expect_equal( res1$global.test$reject[ res1$global.test$method == "romano" ],
# any( FWERkControl( res1$samp.res$tvals, as.matrix( res1$tvals.bt ), k = 1, alpha = .1 )$Reject == 1 ) )
#
# ######## Higher Correlation Between Outcomes ########
# cor = make_corr_mat( nX = 3,
# nY = 100,
# rho.XX = 0,
# rho.YY = 0.25,
# rho.XY = 0,
# prop.corr = 1 )
#
# d = sim_data( n = N, cor = cor )
# all.covars = names(d)[ grep( "X", names(d) ) ]
# C = all.covars[ !all.covars == "X1" ]
# Y = names(d)[ grep( "Y", names(d) ) ]
#
# res2 = corr_tests( d,
# X = "X1",
# C = C,
# Ys = Y,
# B = 500,
# alpha = 0.1,
# alpha.fam = 0.1,
# method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
#
#
# ######## Tests ########
# # null interval should be wider for the second one
# expect_equal( as.logical( res2$null.int[2] >= res1$null.int[2] ), TRUE )
#
# # p-value should be larger for the second one
# expect_equal( as.logical( res2$global.test$pval[ res2$global.test$method == "nreject" ] >=
# res1$global.test$pval[ res1$global.test$method == "nreject" ] ), TRUE )
#
# } )
# only checks a few things:
# two of the global tests
# and the average number of rejections in resamples
test_that( "corr_tests #1", {
library(carData)
data(Soils)
X = "pH"
C = c("Na", "Conduc")
Y = c("N", "Dens", "P", "Ca", "Mg", "K")
res = corr_tests( Soils,
X = X,
Ys = Y,
B = 200,
alpha = 0.1,
method = c( "nreject", "bonferroni", "holm", "minP", "Wstep", "romano" ) )
# should be about equal
expect_equal( mean(res$nrej.bt),
.10*length(Y),
tolerance = 0.1 )
# Bonferroni: should be exactly equal
expect_equal( min( res$samp.res$pvals * length(Y) ),
res$global.test$pval[2] )
# Holm: should be exactly equal
expect_equal( min( p.adjust( res$samp.res$pvals, method = "holm" ) ),
res$global.test$pval[3] )
} )
###################### TEST FNS FOR APPLYING OUR METRICS ######################
# fix_input with extra covariates
# X1 is extra and should be removed
test_that("fix_input #2", {
cor = make_corr_mat( nX = 1,
nY = 4,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0,
prop.corr = 1 )
d = sim_data( n = 20, cor = cor )
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == "X1" ]
##### Add Bad Input ######
# insert missing data
d[1,4] = NA
# insert a decoy variable that should be removed in analysis
d$X20 = rnorm( n = nrow(d) )
d$X21 = rnorm( n = nrow(d) )
# make one of the covariates not mean-centered
d$X1 = d$X1 + 2
d = fix_input( X="X1",
C=NA,
Ys=names(d)[ grep( "Y", names(d) ) ],
d = d )
# check that it caught bad input
expect_equal( c( "X20", "X21" ) %in% names(d),
c(FALSE, FALSE) )
expect_equal( any( is.na(d) ),
FALSE )
} )
# fix_input with extra covariates
test_that("fix_input #1", {
cor = make_corr_mat( nX = 5,
nY = 10,
rho.XX = -0.06,
rho.YY = 0.1,
rho.XY = -0.1,
prop.corr = 8/40 )
d = sim_data( n = 20, cor = cor )
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == "X1" ]
##### Add Bad Input ######
# insert missing data
d[1,4] = NA
# insert a decoy variable that should be removed in analysis
d$X20 = rnorm( n = nrow(d) )
d$X21 = rnorm( n = nrow(d) )
# make one of the covariates not mean-centered
d$X5 = d$X5 + 2
d = fix_input( X="X1",
C=C,
Ys=names(d)[ grep( "Y", names(d) ) ],
d = d )
# check that it caught bad input
expect_equal( c( "X20", "X21" ) %in% names(d),
c(FALSE, FALSE) )
expect_equal( any( is.na(d) ),
FALSE )
} )
# fit_model doesn't need a test because we test it through the dataset_result tests
# without centering test stats
test_that("dataset_result #1", {
cor = make_corr_mat( nX = 5,
nY = 2,
rho.XX = -0.06,
rho.YY = 0.1,
rho.XY = -0.1,
prop.corr = 8/40 )
d = sim_data( n = 50, cor = cor )
# try to confuse fn by choosing a different X as covariate of interest
Ys = names(d)[ grep( "Y", names(d) ) ]
X = "X2"
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == X ]
# do analysis manually
alpha = 0.05
rej.man = 0
tvals.man = c()
bhats.man = c()
pvals.man = c()
resid.man = matrix( NA, nrow = 50, ncol = 2 )
for ( i in 1:length(Ys) ) {
m = lm( d[[ Ys[i] ]] ~ X1 + X2 + X3 + X4 + X5, data = d )
bhats.man[i] = coef(m)[[X]]
tvals.man[i] = summary(m)$coefficients[X,"t value"]
pvals.man[i] = summary(m)$coefficients[X, "Pr(>|t|)"]
resid.man[,i] = residuals(m)
# did we reject it?
if ( summary(m)$coefficients[X, "Pr(>|t|)"] < alpha ) rej.man = rej.man + 1
}
# with function
samp.res = dataset_result( d = d,
X = X,
C = C,
Ys = Ys, # all outcome names
alpha = alpha,
center.stats = FALSE,
bhat.orig = NA )
resid.man = as.data.frame(resid.man)
names(resid.man) = Ys
expect_equal( rej.man, samp.res$rej )
expect_equal( bhats.man, samp.res$bhat )
expect_equal( tvals.man, samp.res$tvals )
expect_equal( pvals.man, samp.res$pvals )
expect_equal( as.matrix(resid.man), as.matrix(samp.res$resid) )
} )
# with centered test stats
test_that("dataset_result #2", {
cor = make_corr_mat( nX = 5,
nY = 20,
rho.XX = 0.16,
rho.YY = 0.1,
rho.XY = 0.1,
prop.corr = 1 )
d = sim_data( n = 50, cor = cor )
# try to confuse fn by choosing a different X as covariate of interest
Ys = names(d)[ grep( "Y", names(d) ) ]
X = "X2"
all.covars = names(d)[ grep( "X", names(d) ) ]
C = all.covars[ !all.covars == X ]
# do analysis manually
# choose an unusual alpha level to make sure it's working
alpha = 0.4
rej.man = 0
tvals.man = c()
bhats.man = c()
pvals.man = c()
resid.man = matrix( NA, nrow = 50, ncol = length(Ys) )
# fake original coefficients
bhat.orig = rnorm( n=length(Ys), mean = 0.8, sd = 2 )
for ( i in 1:length(Ys) ) {
m = lm( d[[ Ys[i] ]] ~ X1 + X2 + X3 + X4 + X5, data = d )
bhats.man[i] = coef(m)[[X]] - bhat.orig[i]
df = 50 - 5 - 1
se = summary(m)$coefficients[X, "Std. Error"]
tvals.man[i] = bhats.man[i] / se
pvals.man[i] = 2 * ( 1 - pt( abs( tvals.man[i] ), df = df ) )
resid.man[,i] = residuals(m)
# did we reject it?
if ( pvals.man[i] < alpha ) rej.man = rej.man + 1
}
# with function
samp.res = dataset_result( d = d,
X = X,
C = C,
Ys = Ys, # all outcome names
alpha = alpha,
center.stats = TRUE,
bhat.orig = bhat.orig )
resid.man = as.data.frame(resid.man)
names(resid.man) = Ys
expect_equal( rej.man, samp.res$rej )
expect_equal( bhats.man, samp.res$bhat )
expect_equal( tvals.man, samp.res$tvals )
expect_equal( pvals.man, samp.res$pvals )
expect_equal( as.matrix(resid.man), as.matrix(samp.res$resid) )
} )
###################### TEST FNS FOR SIMULATING DATA ######################
test_that("cell_corr #1", {
expect_equal( -0.1,
cell_corr( vname.1 = "X1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( 0.25,
cell_corr( vname.1 = "Y1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( 0,
cell_corr( vname.1 = "X2",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 6,
prop.corr = 1 ) )
expect_equal( -0.1,
cell_corr( vname.1 = "X1",
vname.2 = "Y2",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 10,
prop.corr = .2 ) )
expect_equal( 0,
cell_corr( vname.1 = "X1",
vname.2 = "Y3",
rho.XX = 0,
rho.YY = 0.25,
rho.XY = -0.1,
nY = 10,
prop.corr = .2 ) )
} )
test_that("make_corr_mat #1", {
# sanity checks
cor = make_corr_mat( nX = 1,
nY = 40,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0.1,
prop.corr = 8/40 )
# do we have the right number of each type of correlation?
# only look at first row (correlations of X1 with everything else)
expect_equal( c( 1, rep(0.10, 8), rep(0, 40-8) ),
as.numeric( cor[1,] ) )
} )
test_that("make_corr_mat #1", {
cor = make_corr_mat( nX = 2,
nY = 40,
rho.XX = 0.35,
rho.YY = 0.25,
rho.XY = 0.1,
prop.corr = 8/40 )
d = sim_data( n = 10000, cor = cor )
# rho.XX correlations
expect_equal( cor(d$X1, d$X2), 0.35, tolerance = 0.05 )
# rho.XY correlations for non-null ones
names = paste( "Y", seq(1,8,1), sep="" )
expect_equal( as.numeric( cor( d$X1, d[, names] ) ),
rep( 0.1, 8 ),
tolerance = 0.05 )
# rho.XY correlations for null ones
names = paste( "Y", seq(9,40,1), sep="" )
expect_equal( as.numeric( cor( d$X1, d[, names] ) ),
rep( 0, 40-8 ),
tolerance = 0.05 )
# plot empirical vs. real correlations
#plot( as.numeric(cor(d)), as.numeric(as.matrix(cor)) ); abline( a = 0, b = 1, col="red")
} )
###################### TEST WESTFALL FNS ######################
test_that("adj_minP #1", {
# sanity check
B = 200
n.tests = 10
# generate fake p-values under strong null
p.bt = matrix( runif(B*n.tests, 0, 1), nrow = n.tests)
# generate fake p-values from real dataset
p = runif( n.tests, 0, .1)
p.adj = adj_minP( p, p.bt )
#plot(p, p.adj)
# manually adjust second p-value
mins = apply( p.bt, MARGIN = 2, FUN = min )
expect_equal( prop.table( table( mins <= p[2] ) )[["TRUE"]],
p.adj[2] )
})
test_that("adjust_Wstep #1", {
# # Sanity Check
# nX = 1
# nY = 3
# B = 5
#
# library(matrixcalc)
# library(mvtnorm)
#
# cor = make_corr_mat( nX = nX,
# nY = nY,
# rho.XX = 0,
# rho.YY = 0.25,
# rho.XY = 0.05,
# prop.corr = 1 )
#
# d = sim_data( n = 1000, cor = cor )
#
# samp.res = dataset_result( X = "X1",
# C = NA,
# Ys = c("Y1", "Y2", "Y3"),
# d = d,
# alpha = 0.05,
# center.stats = FALSE )
#
#
# # do 5 bootstraps
# resamps = resample_resid( X = "X1",
# C = NA,
# Ys = c("Y1", "Y2", "Y3"),
# d = d,
# alpha = 0.05,
# resid = samp.res$resid,
# bhat.orig = samp.res$bhats,
# B=5,
# cores = 8 )
# p.bt = t( resamps$p.bt )
# pvals = samp.res$pvals
pvals = c(0.00233103655078803, 0.470366742594242, 0.00290278216035089
)
p.bt = structure(c(0.308528665936264, 0.517319402377912, 0.686518314693482,
0.637306248855186, 0.106805510862352, 0.116705315041494, 0.0732076817175753,
0.770308936364482, 0.384405349738909, 0.0434358213611965, 0.41497067850141,
0.513471489744384, 0.571213377144122, 0.628054979652722, 0.490196884985226
), .Dim = c(5L, 3L))
# indicators of which hypothesis the sorted p-vals go with
sort(pvals)
r = c(1,3,2)
qstar = matrix( NA, nrow = nrow(p.bt), ncol = ncol(p.bt) )
for (i in 1:nrow(p.bt)) {
qstar[i,3] = p.bt[ i, r[3] ]
qstar[i,2] = min( qstar[i,3], p.bt[ i, r[2] ] )
qstar[i,1] = min( qstar[i,2], p.bt[ i, r[1] ] )
}
less = t( apply( qstar, MARGIN = 1,
function(row) row <= sort(pvals) ) )
p.tilde = colMeans(less)
# enforce monotonicity
p.tilde.sort = sort(p.tilde)
p.tilde.sort[2] = max( p.tilde.sort[1], p.tilde.sort[2] )
p.tilde.sort[3] = max( p.tilde.sort[2], p.tilde.sort[3] )
# put back in original order
p.adj = p.tilde.sort[r]
expect_equal( p.adj, adj_Wstep( p = pvals, p.bt = t(p.bt) ) )
})
###################### TEST RESAMPLE_RESID ######################
# generate data NOT under null and
# check that mean p-value is .5 in resamples
# and that we have the expected number of rejections
test_that("resample_resid #1", {
# Sanity Check
nX = 1
nY = 3
B = 5
library(matrixcalc)
library(mvtnorm)
cor = make_corr_mat( nX = nX,
nY = nY,
rho.XX = 0,
rho.YY = 0.25,
rho.XY = 0.05,
prop.corr = 1 )
d = sim_data( n = 1000, cor = cor )
# mean-center them
d = as.data.frame( apply( d, 2, function(col) col - mean(col) ) )
# bookmark
samp.res = dataset_result( X = "X1",
C = NA,
Ys = c("Y1", "Y2", "Y3"),
d = d,
alpha = 0.05,
center.stats = FALSE )
# do 5 bootstraps
resamps = resample_resid( X = "X1",
C = NA,
Ys = c("Y1", "Y2", "Y3"),
d = d,
alpha = 0.05,
resid = samp.res$resid,
bhat.orig = samp.res$bhats,
B=500,
cores = 8 )
expect_equal( mean(resamps$p.bt), .5, tolerance = 0.03 )
expect_equal( mean(resamps$rej.bt), .05*nY, tolerance = 0.03 )
} )
|
#' Martingale Difference Divergence
#'
#' \code{mdd} measures conditional mean dependence of \code{Y} given \code{X},
#' where each contains one variable (univariate) or more variables (multivariate).
#'
#' @param X A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Y A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param compute The method for computation, including
#' \itemize{
#' \item \code{C}: computation implemented in C code;
#' \item \code{R}: computation implemented in R code.
#' }
#' @param center The approach for centering, including
#' \itemize{
#' \item \code{U}: U-centering which leads to an unbiased estimator;
#' \item \code{D}: double-centering which leads to a biased estimator.
#' }
#'
#' @return \code{mdd} returns the squared martingale difference divergence of \code{Y} given \code{X}.
#'
#' @references Shao, X., and Zhang, J. (2014).
#' Martingale difference correlation and its use in high-dimensional variable screening.
#' Journal of the American Statistical Association, 109(507), 1302-1318.
#' \url{http://dx.doi.org/10.1080/01621459.2014.887012}.
#' @references Park, T., Shao, X., and Yao, S. (2015).
#' Partial martingale difference correlation.
#' Electronic Journal of Statistics, 9(1), 1492-1517.
#' \url{http://dx.doi.org/10.1214/15-EJS1047}.
#'
#' @importFrom stats dist
#'
#' @include cmdm_functions.R
#'
#' @export
#'
#' @examples
#' # X, Y are vectors with 10 samples and 1 variable
#' X <- rnorm(10)
#' Y <- rnorm(10)
#'
#' mdd(X, Y, compute = "C")
#' mdd(X, Y, compute = "R")
#'
#' # X, Y are 10 x 2 matrices with 10 samples and 2 variables
#' X <- matrix(rnorm(10 * 2), 10, 2)
#' Y <- matrix(rnorm(10 * 2), 10, 2)
#'
#' mdd(X, Y, center = "U")
#' mdd(X, Y, center = "D")
mdd <- function(X, Y, compute = "C", center = "U") {
X <- as.matrix(X)
Y <- as.matrix(Y)
n <- nrow(X)
if (n != nrow(Y)) {
stop("The dimensions of X and Y do not agree.")
}
p <- ncol(X)
q <- ncol(Y)
if (compute == "C") {
X <- as.vector(X)
Y <- as.vector(Y)
if (center == "U") {
mdd <- .C("MDD_UCenter",
N = as.integer(n),
P = as.integer(p),
Q = as.integer(q),
X = as.double(X),
Y = as.double(Y),
V = as.double(numeric(1)),
PACKAGE = "EDMeasure")$V
} else if (center == "D") {
mdd <- .C("MDD_DCenter",
N = as.integer(n),
P = as.integer(p),
Q = as.integer(q),
X = as.double(X),
Y = as.double(Y),
V = as.double(numeric(1)),
PACKAGE = "EDMeasure")$V
} else {
stop("Invalid center. Read ?mdd for proper syntax.")
}
} else if (compute == "R") {
if (center == "U") {
A <- u.center(X)
B <- u.center(0.5 * as.matrix(dist(Y))^2)
mdd <- u.inner(A, B)
} else if (center == "D") {
A <- d.center(X)
B <- d.center(0.5 * as.matrix(dist(Y))^2)
mdd <- d.inner(A, B)
} else {
stop("Invalid center. Read ?mdd for proper syntax.")
}
} else {
stop("Invalid compute. Read ?mdd for proper syntax.")
}
return(mdd)
}
|
/R/mdd.R
|
no_license
|
cran/EDMeasure
|
R
| false | false | 3,410 |
r
|
#' Martingale Difference Divergence
#'
#' \code{mdd} measures conditional mean dependence of \code{Y} given \code{X},
#' where each contains one variable (univariate) or more variables (multivariate).
#'
#' @param X A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param Y A vector, matrix or data frame, where rows represent samples, and columns represent variables.
#' @param compute The method for computation, including
#' \itemize{
#' \item \code{C}: computation implemented in C code;
#' \item \code{R}: computation implemented in R code.
#' }
#' @param center The approach for centering, including
#' \itemize{
#' \item \code{U}: U-centering which leads to an unbiased estimator;
#' \item \code{D}: double-centering which leads to a biased estimator.
#' }
#'
#' @return \code{mdd} returns the squared martingale difference divergence of \code{Y} given \code{X}.
#'
#' @references Shao, X., and Zhang, J. (2014).
#' Martingale difference correlation and its use in high-dimensional variable screening.
#' Journal of the American Statistical Association, 109(507), 1302-1318.
#' \url{http://dx.doi.org/10.1080/01621459.2014.887012}.
#' @references Park, T., Shao, X., and Yao, S. (2015).
#' Partial martingale difference correlation.
#' Electronic Journal of Statistics, 9(1), 1492-1517.
#' \url{http://dx.doi.org/10.1214/15-EJS1047}.
#'
#' @importFrom stats dist
#'
#' @include cmdm_functions.R
#'
#' @export
#'
#' @examples
#' # X, Y are vectors with 10 samples and 1 variable
#' X <- rnorm(10)
#' Y <- rnorm(10)
#'
#' mdd(X, Y, compute = "C")
#' mdd(X, Y, compute = "R")
#'
#' # X, Y are 10 x 2 matrices with 10 samples and 2 variables
#' X <- matrix(rnorm(10 * 2), 10, 2)
#' Y <- matrix(rnorm(10 * 2), 10, 2)
#'
#' mdd(X, Y, center = "U")
#' mdd(X, Y, center = "D")
mdd <- function(X, Y, compute = "C", center = "U") {
X <- as.matrix(X)
Y <- as.matrix(Y)
n <- nrow(X)
if (n != nrow(Y)) {
stop("The dimensions of X and Y do not agree.")
}
p <- ncol(X)
q <- ncol(Y)
if (compute == "C") {
X <- as.vector(X)
Y <- as.vector(Y)
if (center == "U") {
mdd <- .C("MDD_UCenter",
N = as.integer(n),
P = as.integer(p),
Q = as.integer(q),
X = as.double(X),
Y = as.double(Y),
V = as.double(numeric(1)),
PACKAGE = "EDMeasure")$V
} else if (center == "D") {
mdd <- .C("MDD_DCenter",
N = as.integer(n),
P = as.integer(p),
Q = as.integer(q),
X = as.double(X),
Y = as.double(Y),
V = as.double(numeric(1)),
PACKAGE = "EDMeasure")$V
} else {
stop("Invalid center. Read ?mdd for proper syntax.")
}
} else if (compute == "R") {
if (center == "U") {
A <- u.center(X)
B <- u.center(0.5 * as.matrix(dist(Y))^2)
mdd <- u.inner(A, B)
} else if (center == "D") {
A <- d.center(X)
B <- d.center(0.5 * as.matrix(dist(Y))^2)
mdd <- d.inner(A, B)
} else {
stop("Invalid center. Read ?mdd for proper syntax.")
}
} else {
stop("Invalid compute. Read ?mdd for proper syntax.")
}
return(mdd)
}
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## A function calculates the mean of a pollutant (sulfate or
## nitrate) across a specified list of monitors.
##
## Args:
## 'directory' : a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' : a character vector of length 1 indicating
## the name of the pollutant for which we will
## calculate the mean
## 'id' : an integer vector indicating the monitor ID numbers
## to be used
##
## Returns:
## The mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
# Get a list of CSV files to process
files_list <- list.files(directory, full.names = TRUE)
# Initialize empty data frame
data <- data.frame()
# Iterate over each of the specified monitor results and merge them into
# one frame
for(i in id) {
temp_data <- read.csv(files_list[i])
data <- rbind(data, temp_data)
}
# Extract the column for which the mean is being calculated
values <- data[[pollutant]]
# Calculate the mean of the given column, ignoring the missing values
result <- mean(values, na.rm=TRUE)
# Return the result
result
}
|
/assignment01/pollutantmean.R
|
permissive
|
ksokolovic/R-Programming
|
R
| false | false | 1,339 |
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## A function calculates the mean of a pollutant (sulfate or
## nitrate) across a specified list of monitors.
##
## Args:
## 'directory' : a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' : a character vector of length 1 indicating
## the name of the pollutant for which we will
## calculate the mean
## 'id' : an integer vector indicating the monitor ID numbers
## to be used
##
## Returns:
## The mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
# Get a list of CSV files to process
files_list <- list.files(directory, full.names = TRUE)
# Initialize empty data frame
data <- data.frame()
# Iterate over each of the specified monitor results and merge them into
# one frame
for(i in id) {
temp_data <- read.csv(files_list[i])
data <- rbind(data, temp_data)
}
# Extract the column for which the mean is being calculated
values <- data[[pollutant]]
# Calculate the mean of the given column, ignoring the missing values
result <- mean(values, na.rm=TRUE)
# Return the result
result
}
|
# 2.2 ATE y estimación MCO
library(Matching)
library(stargazer)
data(lalonde)
attach(lalonde)
mean(re78[treat==1]) - mean(re78[treat==0])
# Prueba de Neyman
# se vió clase pasada
reg1 <- lm(re78~treat)
stargazer(reg1,type="text")
c(mean(age[treat==1]),mean(age[treat==0]))
t.test(
x = age[treat==1],
y = age[treat==0]
)
|
/clase03_lalonde.R
|
no_license
|
rsf94/taller_econometria
|
R
| false | false | 333 |
r
|
# 2.2 ATE y estimación MCO
library(Matching)
library(stargazer)
data(lalonde)
attach(lalonde)
mean(re78[treat==1]) - mean(re78[treat==0])
# Prueba de Neyman
# se vió clase pasada
reg1 <- lm(re78~treat)
stargazer(reg1,type="text")
c(mean(age[treat==1]),mean(age[treat==0]))
t.test(
x = age[treat==1],
y = age[treat==0]
)
|
#' Make study function
#'
#' This is the main study function and runs the entire study.
#' @param data_path Path to data set. Should be a character vector of length 1. Defaults to c("../data/mdf.csv")
#' @param bs_samples The number of bootstrap samples to be generated as int. Defaults to 10
#' @export
make.study <- function(
data_path = c("./extdata/sample.csv"),
bs_samples = 5
)
{
## Set seed for reproducability
set.seed(123)
## Load all required packages (remove when turned into package)
load.required.packages()
## Import study data
study_data <- read.csv(data_path, stringsAsFactors = FALSE)
## Drop obsevations collected before all centres started collecting triage
## category data and observations later than one month prior to creating
## this dataset
study_data <- drop.observations(study_data, test = TRUE)
## Get data dictionary
data_dictionary <- get.data.dictionary()
## Keep only variables relevant to this study
study_data <- keep.relevant.variables(study_data, data_dictionary)
## Define 999 as missing
study_data[study_data == 999] <- NA
## Prepare study data using the data dictionary
study_data <- prepare.study.data(study_data, data_dictionary, test = TRUE)
## Set patients to dead if dead at discharge or at 24 hours
## and alive if coded alive and admitted to other hospital
study_data <- set.to.outcome(study_data)
## Replace age >89 with 90 and make age numeric
study_data$age[study_data$age == ">89"] <- "90"
study_data$age <- as.numeric(study_data$age)
## Collapse mechanism of injury
study_data <- collapse.moi(study_data)
## Add time between injury and arrival and drop date and time variables from
## study data
study_data <- add.time.between.injury.and.arrival(study_data, data_dictionary)
## Apply exclusion criteria, i.e. drop observations with missing outcome
## data and save exclusions to results list
results <- list() # List to hold results
study_data <- apply.exclusion.criteria(study_data)
## Create missing indicator variables and save table of number of missing
## values per variable
study_data <- add.missing.indicator.variables(study_data)
## Prepare data for SuperLearner predictions
prepped_data <- prep.data.for.superlearner(study_data, test = TRUE)
## Create table of sample characteristics
tables <- create.table.of.sample.characteristics(prepped_data, data_dictionary)
results$table_of_sample_characteristics <- tables$formatted
results$raw_table_of_sample_characteristics <- tables$raw
## Transform factors into dummy variables
prepped_data <- to.dummy.variables(prepped_data)
## Train and review SuperLearner on study sample
study_sample <- predictions.with.superlearner(prepped_data)
## Bootstrap samples
samples <- generate.bootstrap.samples(study_data,
bs_samples)
## Prepare samples
prepped_samples <- prep.bssamples(samples)
## Train and review SuperLearner on bootstrap samples
samples <- train.predict.bssamples(prepped_samples)
## Create list of analysis to conduct
funcList <- list(list(func = 'model.review.AUROCC',
model_or_pe = c('pred_cat',
'tc'),
diffci_or_ci = "diff"),
list(func = 'model.review.reclassification',
model_or_pe = c('NRI+',
'NRI'),
diffci_or_ci = "ci"))
## Generate confidence intervals around point estimates from funcList
CIs <- lapply(funcList,
function(i) generate.confidence.intervals(study_sample,
func = get(i$func),
model_or_pointestimate = i$model_or_pe,
samples = samples,
diffci_or_ci = i$diffci_or_ci))
## Set names of cis
names(CIs) <- c('AUROCC',
'reclassification')
## Compile manuscript
compile.manuscript(results, "superlearner_vs_clinicians_manuscript")
}
|
/R/make.study.r
|
no_license
|
martingerdin/SupaLarna
|
R
| false | false | 4,403 |
r
|
#' Make study function
#'
#' This is the main study function and runs the entire study.
#' @param data_path Path to data set. Should be a character vector of length 1. Defaults to c("../data/mdf.csv")
#' @param bs_samples The number of bootstrap samples to be generated as int. Defaults to 10
#' @export
make.study <- function(
data_path = c("./extdata/sample.csv"),
bs_samples = 5
)
{
## Set seed for reproducability
set.seed(123)
## Load all required packages (remove when turned into package)
load.required.packages()
## Import study data
study_data <- read.csv(data_path, stringsAsFactors = FALSE)
## Drop obsevations collected before all centres started collecting triage
## category data and observations later than one month prior to creating
## this dataset
study_data <- drop.observations(study_data, test = TRUE)
## Get data dictionary
data_dictionary <- get.data.dictionary()
## Keep only variables relevant to this study
study_data <- keep.relevant.variables(study_data, data_dictionary)
## Define 999 as missing
study_data[study_data == 999] <- NA
## Prepare study data using the data dictionary
study_data <- prepare.study.data(study_data, data_dictionary, test = TRUE)
## Set patients to dead if dead at discharge or at 24 hours
## and alive if coded alive and admitted to other hospital
study_data <- set.to.outcome(study_data)
## Replace age >89 with 90 and make age numeric
study_data$age[study_data$age == ">89"] <- "90"
study_data$age <- as.numeric(study_data$age)
## Collapse mechanism of injury
study_data <- collapse.moi(study_data)
## Add time between injury and arrival and drop date and time variables from
## study data
study_data <- add.time.between.injury.and.arrival(study_data, data_dictionary)
## Apply exclusion criteria, i.e. drop observations with missing outcome
## data and save exclusions to results list
results <- list() # List to hold results
study_data <- apply.exclusion.criteria(study_data)
## Create missing indicator variables and save table of number of missing
## values per variable
study_data <- add.missing.indicator.variables(study_data)
## Prepare data for SuperLearner predictions
prepped_data <- prep.data.for.superlearner(study_data, test = TRUE)
## Create table of sample characteristics
tables <- create.table.of.sample.characteristics(prepped_data, data_dictionary)
results$table_of_sample_characteristics <- tables$formatted
results$raw_table_of_sample_characteristics <- tables$raw
## Transform factors into dummy variables
prepped_data <- to.dummy.variables(prepped_data)
## Train and review SuperLearner on study sample
study_sample <- predictions.with.superlearner(prepped_data)
## Bootstrap samples
samples <- generate.bootstrap.samples(study_data,
bs_samples)
## Prepare samples
prepped_samples <- prep.bssamples(samples)
## Train and review SuperLearner on bootstrap samples
samples <- train.predict.bssamples(prepped_samples)
## Create list of analysis to conduct
funcList <- list(list(func = 'model.review.AUROCC',
model_or_pe = c('pred_cat',
'tc'),
diffci_or_ci = "diff"),
list(func = 'model.review.reclassification',
model_or_pe = c('NRI+',
'NRI'),
diffci_or_ci = "ci"))
## Generate confidence intervals around point estimates from funcList
CIs <- lapply(funcList,
function(i) generate.confidence.intervals(study_sample,
func = get(i$func),
model_or_pointestimate = i$model_or_pe,
samples = samples,
diffci_or_ci = i$diffci_or_ci))
## Set names of cis
names(CIs) <- c('AUROCC',
'reclassification')
## Compile manuscript
compile.manuscript(results, "superlearner_vs_clinicians_manuscript")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{emr}
\alias{emr}
\title{Amazon EMR}
\usage{
emr(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon EMR is a web service that makes it easier to process large
amounts of data efficiently. Amazon EMR uses Hadoop processing combined
with several Amazon Web Services services to do tasks such as web
indexing, data mining, log file analysis, machine learning, scientific
simulation, and data warehouse management.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- emr(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.analytics:emr_add_instance_fleet]{add_instance_fleet} \tab Adds an instance fleet to a running cluster\cr
\link[paws.analytics:emr_add_instance_groups]{add_instance_groups} \tab Adds one or more instance groups to a running cluster\cr
\link[paws.analytics:emr_add_job_flow_steps]{add_job_flow_steps} \tab AddJobFlowSteps adds new steps to a running cluster\cr
\link[paws.analytics:emr_add_tags]{add_tags} \tab Adds tags to an Amazon EMR resource, such as a cluster or an Amazon EMR Studio\cr
\link[paws.analytics:emr_cancel_steps]{cancel_steps} \tab Cancels a pending step or steps in a running cluster\cr
\link[paws.analytics:emr_create_security_configuration]{create_security_configuration} \tab Creates a security configuration, which is stored in the service and can be specified when a cluster is created\cr
\link[paws.analytics:emr_create_studio]{create_studio} \tab Creates a new Amazon EMR Studio\cr
\link[paws.analytics:emr_create_studio_session_mapping]{create_studio_session_mapping} \tab Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group\cr
\link[paws.analytics:emr_delete_security_configuration]{delete_security_configuration} \tab Deletes a security configuration\cr
\link[paws.analytics:emr_delete_studio]{delete_studio} \tab Removes an Amazon EMR Studio from the Studio metadata store\cr
\link[paws.analytics:emr_delete_studio_session_mapping]{delete_studio_session_mapping} \tab Removes a user or group from an Amazon EMR Studio\cr
\link[paws.analytics:emr_describe_cluster]{describe_cluster} \tab Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on\cr
\link[paws.analytics:emr_describe_job_flows]{describe_job_flows} \tab This API is no longer supported and will eventually be removed\cr
\link[paws.analytics:emr_describe_notebook_execution]{describe_notebook_execution} \tab Provides details of a notebook execution\cr
\link[paws.analytics:emr_describe_release_label]{describe_release_label} \tab Provides Amazon EMR release label details, such as the releases available the Region where the API request is run, and the available applications for a specific Amazon EMR release label\cr
\link[paws.analytics:emr_describe_security_configuration]{describe_security_configuration} \tab Provides the details of a security configuration by returning the configuration JSON\cr
\link[paws.analytics:emr_describe_step]{describe_step} \tab Provides more detail about the cluster step\cr
\link[paws.analytics:emr_describe_studio]{describe_studio} \tab Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on\cr
\link[paws.analytics:emr_get_auto_termination_policy]{get_auto_termination_policy} \tab Returns the auto-termination policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_get_block_public_access_configuration]{get_block_public_access_configuration} \tab Returns the Amazon EMR block public access configuration for your Amazon Web Services account in the current Region\cr
\link[paws.analytics:emr_get_cluster_session_credentials]{get_cluster_session_credentials} \tab Provides temporary, HTTP basic credentials that are associated with a given runtime IAM role and used by a cluster with fine-grained access control activated\cr
\link[paws.analytics:emr_get_managed_scaling_policy]{get_managed_scaling_policy} \tab Fetches the attached managed scaling policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_get_studio_session_mapping]{get_studio_session_mapping} \tab Fetches mapping details for the specified Amazon EMR Studio and identity (user or group)\cr
\link[paws.analytics:emr_list_bootstrap_actions]{list_bootstrap_actions} \tab Provides information about the bootstrap actions associated with a cluster\cr
\link[paws.analytics:emr_list_clusters]{list_clusters} \tab Provides the status of all clusters visible to this Amazon Web Services account\cr
\link[paws.analytics:emr_list_instance_fleets]{list_instance_fleets} \tab Lists all available details about the instance fleets in a cluster\cr
\link[paws.analytics:emr_list_instance_groups]{list_instance_groups} \tab Provides all available details about the instance groups in a cluster\cr
\link[paws.analytics:emr_list_instances]{list_instances} \tab Provides information for all active Amazon EC2 instances and Amazon EC2 instances terminated in the last 30 days, up to a maximum of 2,000\cr
\link[paws.analytics:emr_list_notebook_executions]{list_notebook_executions} \tab Provides summaries of all notebook executions\cr
\link[paws.analytics:emr_list_release_labels]{list_release_labels} \tab Retrieves release labels of Amazon EMR services in the Region where the API is called\cr
\link[paws.analytics:emr_list_security_configurations]{list_security_configurations} \tab Lists all the security configurations visible to this account, providing their creation dates and times, and their names\cr
\link[paws.analytics:emr_list_steps]{list_steps} \tab Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request or filter by StepStates\cr
\link[paws.analytics:emr_list_studios]{list_studios} \tab Returns a list of all Amazon EMR Studios associated with the Amazon Web Services account\cr
\link[paws.analytics:emr_list_studio_session_mappings]{list_studio_session_mappings} \tab Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId\cr
\link[paws.analytics:emr_modify_cluster]{modify_cluster} \tab Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID\cr
\link[paws.analytics:emr_modify_instance_fleet]{modify_instance_fleet} \tab Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID\cr
\link[paws.analytics:emr_modify_instance_groups]{modify_instance_groups} \tab ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group\cr
\link[paws.analytics:emr_put_auto_scaling_policy]{put_auto_scaling_policy} \tab Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster\cr
\link[paws.analytics:emr_put_auto_termination_policy]{put_auto_termination_policy} \tab Auto-termination is supported in Amazon EMR releases 5\cr
\link[paws.analytics:emr_put_block_public_access_configuration]{put_block_public_access_configuration} \tab Creates or updates an Amazon EMR block public access configuration for your Amazon Web Services account in the current Region\cr
\link[paws.analytics:emr_put_managed_scaling_policy]{put_managed_scaling_policy} \tab Creates or updates a managed scaling policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_auto_scaling_policy]{remove_auto_scaling_policy} \tab Removes an automatic scaling policy from a specified instance group within an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_auto_termination_policy]{remove_auto_termination_policy} \tab Removes an auto-termination policy from an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_managed_scaling_policy]{remove_managed_scaling_policy} \tab Removes a managed scaling policy from a specified Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_tags]{remove_tags} \tab Removes tags from an Amazon EMR resource, such as a cluster or Amazon EMR Studio\cr
\link[paws.analytics:emr_run_job_flow]{run_job_flow} \tab RunJobFlow creates and starts running a new cluster (job flow)\cr
\link[paws.analytics:emr_set_termination_protection]{set_termination_protection} \tab SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error\cr
\link[paws.analytics:emr_set_visible_to_all_users]{set_visible_to_all_users} \tab The SetVisibleToAllUsers parameter is no longer supported\cr
\link[paws.analytics:emr_start_notebook_execution]{start_notebook_execution} \tab Starts a notebook execution\cr
\link[paws.analytics:emr_stop_notebook_execution]{stop_notebook_execution} \tab Stops a notebook execution\cr
\link[paws.analytics:emr_terminate_job_flows]{terminate_job_flows} \tab TerminateJobFlows shuts a list of clusters (job flows) down\cr
\link[paws.analytics:emr_update_studio]{update_studio} \tab Updates an Amazon EMR Studio configuration, including attributes such as name, description, and subnets\cr
\link[paws.analytics:emr_update_studio_session_mapping]{update_studio_session_mapping} \tab Updates the session policy attached to the user or group for the specified Amazon EMR Studio
}
}
\examples{
\dontrun{
svc <- emr()
svc$add_instance_fleet(
Foo = 123
)
}
}
|
/man/emr.Rd
|
no_license
|
cran/paws
|
R
| false | true | 11,114 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{emr}
\alias{emr}
\title{Amazon EMR}
\usage{
emr(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
Amazon EMR is a web service that makes it easier to process large
amounts of data efficiently. Amazon EMR uses Hadoop processing combined
with several Amazon Web Services services to do tasks such as web
indexing, data mining, log file analysis, machine learning, scientific
simulation, and data warehouse management.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- emr(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.analytics:emr_add_instance_fleet]{add_instance_fleet} \tab Adds an instance fleet to a running cluster\cr
\link[paws.analytics:emr_add_instance_groups]{add_instance_groups} \tab Adds one or more instance groups to a running cluster\cr
\link[paws.analytics:emr_add_job_flow_steps]{add_job_flow_steps} \tab AddJobFlowSteps adds new steps to a running cluster\cr
\link[paws.analytics:emr_add_tags]{add_tags} \tab Adds tags to an Amazon EMR resource, such as a cluster or an Amazon EMR Studio\cr
\link[paws.analytics:emr_cancel_steps]{cancel_steps} \tab Cancels a pending step or steps in a running cluster\cr
\link[paws.analytics:emr_create_security_configuration]{create_security_configuration} \tab Creates a security configuration, which is stored in the service and can be specified when a cluster is created\cr
\link[paws.analytics:emr_create_studio]{create_studio} \tab Creates a new Amazon EMR Studio\cr
\link[paws.analytics:emr_create_studio_session_mapping]{create_studio_session_mapping} \tab Maps a user or group to the Amazon EMR Studio specified by StudioId, and applies a session policy to refine Studio permissions for that user or group\cr
\link[paws.analytics:emr_delete_security_configuration]{delete_security_configuration} \tab Deletes a security configuration\cr
\link[paws.analytics:emr_delete_studio]{delete_studio} \tab Removes an Amazon EMR Studio from the Studio metadata store\cr
\link[paws.analytics:emr_delete_studio_session_mapping]{delete_studio_session_mapping} \tab Removes a user or group from an Amazon EMR Studio\cr
\link[paws.analytics:emr_describe_cluster]{describe_cluster} \tab Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on\cr
\link[paws.analytics:emr_describe_job_flows]{describe_job_flows} \tab This API is no longer supported and will eventually be removed\cr
\link[paws.analytics:emr_describe_notebook_execution]{describe_notebook_execution} \tab Provides details of a notebook execution\cr
\link[paws.analytics:emr_describe_release_label]{describe_release_label} \tab Provides Amazon EMR release label details, such as the releases available the Region where the API request is run, and the available applications for a specific Amazon EMR release label\cr
\link[paws.analytics:emr_describe_security_configuration]{describe_security_configuration} \tab Provides the details of a security configuration by returning the configuration JSON\cr
\link[paws.analytics:emr_describe_step]{describe_step} \tab Provides more detail about the cluster step\cr
\link[paws.analytics:emr_describe_studio]{describe_studio} \tab Returns details for the specified Amazon EMR Studio including ID, Name, VPC, Studio access URL, and so on\cr
\link[paws.analytics:emr_get_auto_termination_policy]{get_auto_termination_policy} \tab Returns the auto-termination policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_get_block_public_access_configuration]{get_block_public_access_configuration} \tab Returns the Amazon EMR block public access configuration for your Amazon Web Services account in the current Region\cr
\link[paws.analytics:emr_get_cluster_session_credentials]{get_cluster_session_credentials} \tab Provides temporary, HTTP basic credentials that are associated with a given runtime IAM role and used by a cluster with fine-grained access control activated\cr
\link[paws.analytics:emr_get_managed_scaling_policy]{get_managed_scaling_policy} \tab Fetches the attached managed scaling policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_get_studio_session_mapping]{get_studio_session_mapping} \tab Fetches mapping details for the specified Amazon EMR Studio and identity (user or group)\cr
\link[paws.analytics:emr_list_bootstrap_actions]{list_bootstrap_actions} \tab Provides information about the bootstrap actions associated with a cluster\cr
\link[paws.analytics:emr_list_clusters]{list_clusters} \tab Provides the status of all clusters visible to this Amazon Web Services account\cr
\link[paws.analytics:emr_list_instance_fleets]{list_instance_fleets} \tab Lists all available details about the instance fleets in a cluster\cr
\link[paws.analytics:emr_list_instance_groups]{list_instance_groups} \tab Provides all available details about the instance groups in a cluster\cr
\link[paws.analytics:emr_list_instances]{list_instances} \tab Provides information for all active Amazon EC2 instances and Amazon EC2 instances terminated in the last 30 days, up to a maximum of 2,000\cr
\link[paws.analytics:emr_list_notebook_executions]{list_notebook_executions} \tab Provides summaries of all notebook executions\cr
\link[paws.analytics:emr_list_release_labels]{list_release_labels} \tab Retrieves release labels of Amazon EMR services in the Region where the API is called\cr
\link[paws.analytics:emr_list_security_configurations]{list_security_configurations} \tab Lists all the security configurations visible to this account, providing their creation dates and times, and their names\cr
\link[paws.analytics:emr_list_steps]{list_steps} \tab Provides a list of steps for the cluster in reverse order unless you specify stepIds with the request or filter by StepStates\cr
\link[paws.analytics:emr_list_studios]{list_studios} \tab Returns a list of all Amazon EMR Studios associated with the Amazon Web Services account\cr
\link[paws.analytics:emr_list_studio_session_mappings]{list_studio_session_mappings} \tab Returns a list of all user or group session mappings for the Amazon EMR Studio specified by StudioId\cr
\link[paws.analytics:emr_modify_cluster]{modify_cluster} \tab Modifies the number of steps that can be executed concurrently for the cluster specified using ClusterID\cr
\link[paws.analytics:emr_modify_instance_fleet]{modify_instance_fleet} \tab Modifies the target On-Demand and target Spot capacities for the instance fleet with the specified InstanceFleetID within the cluster specified using ClusterID\cr
\link[paws.analytics:emr_modify_instance_groups]{modify_instance_groups} \tab ModifyInstanceGroups modifies the number of nodes and configuration settings of an instance group\cr
\link[paws.analytics:emr_put_auto_scaling_policy]{put_auto_scaling_policy} \tab Creates or updates an automatic scaling policy for a core instance group or task instance group in an Amazon EMR cluster\cr
\link[paws.analytics:emr_put_auto_termination_policy]{put_auto_termination_policy} \tab Auto-termination is supported in Amazon EMR releases 5\cr
\link[paws.analytics:emr_put_block_public_access_configuration]{put_block_public_access_configuration} \tab Creates or updates an Amazon EMR block public access configuration for your Amazon Web Services account in the current Region\cr
\link[paws.analytics:emr_put_managed_scaling_policy]{put_managed_scaling_policy} \tab Creates or updates a managed scaling policy for an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_auto_scaling_policy]{remove_auto_scaling_policy} \tab Removes an automatic scaling policy from a specified instance group within an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_auto_termination_policy]{remove_auto_termination_policy} \tab Removes an auto-termination policy from an Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_managed_scaling_policy]{remove_managed_scaling_policy} \tab Removes a managed scaling policy from a specified Amazon EMR cluster\cr
\link[paws.analytics:emr_remove_tags]{remove_tags} \tab Removes tags from an Amazon EMR resource, such as a cluster or Amazon EMR Studio\cr
\link[paws.analytics:emr_run_job_flow]{run_job_flow} \tab RunJobFlow creates and starts running a new cluster (job flow)\cr
\link[paws.analytics:emr_set_termination_protection]{set_termination_protection} \tab SetTerminationProtection locks a cluster (job flow) so the Amazon EC2 instances in the cluster cannot be terminated by user intervention, an API call, or in the event of a job-flow error\cr
\link[paws.analytics:emr_set_visible_to_all_users]{set_visible_to_all_users} \tab The SetVisibleToAllUsers parameter is no longer supported\cr
\link[paws.analytics:emr_start_notebook_execution]{start_notebook_execution} \tab Starts a notebook execution\cr
\link[paws.analytics:emr_stop_notebook_execution]{stop_notebook_execution} \tab Stops a notebook execution\cr
\link[paws.analytics:emr_terminate_job_flows]{terminate_job_flows} \tab TerminateJobFlows shuts a list of clusters (job flows) down\cr
\link[paws.analytics:emr_update_studio]{update_studio} \tab Updates an Amazon EMR Studio configuration, including attributes such as name, description, and subnets\cr
\link[paws.analytics:emr_update_studio_session_mapping]{update_studio_session_mapping} \tab Updates the session policy attached to the user or group for the specified Amazon EMR Studio
}
}
\examples{
\dontrun{
svc <- emr()
svc$add_instance_fleet(
Foo = 123
)
}
}
|
###################################################################################################
#HOMEWORK 04#
###################################################################################################
Kamilar_Cooper <- read.csv("~/Desktop/Development/Assignment_4/Kamilar_Cooper.csv")
View(Kamilar_Cooper)
KC <- Kamilar_Cooper
#Remove any Nas
KC <- na.omit(KC)
#Run a basic linear model to see the initial pattern
plot(data = KC, log(HomeRange_km2) ~ log(Body_mass_female_mean))
#Run a linear model of the interaction
m1 <- lm(log(HomeRange_km2) ~ log(Body_mass_female_mean), data = KC)
m1
#model output: Beta0(Intercept) = -9.354
# Beta 1 = 1.024
print(coef(summary(m1))[,"Std. Error"])
#Standard Error from m1 linear model:
#Intercept = 1.6380707
#log(Body_Mass_Female) = 0.1868741
confint(m1)
#OUTPUT:
# 2.5 % 97.5 %
# (Intercept) -12.7511475 -5.956846
#log(Body_mass_female_mean) 0.6364542 1.411560
#QUestion 2 Bootstrapping
#from https://www.rdocumentation.org/packages/simpleboot/versions/1.1-7/topics/lm.boot
#library(simpleboot)
#lm.object <- lm(log(HomeRange_km2) ~ log(Body_mass_female_mean), data = KC)
#R <- 1000
#m2 <- lm.boot(lm.object, R, rows = TRUE)
#from https://rdrr.io/cran/car/man/Boot.html
library(car)
data=na.omit(KC)
m2 <- Boot(m1, R=1000, method=c("case"))
summary(m2)
#Output
#Number of bootstrap replications R = 999
# original bootBias bootSE bootMed
#(Intercept) -9.354 -0.319302 1.73792 -9.5455
#log(Body_mass_female_mean) 1.024 0.036161 0.19585 1.0460
confint(m2)
#Output
#Bootstrap bca confidence intervals
# 2.5 % 97.5 %
# (Intercept) -12.3244897 -5.921240
#log(Body_mass_female_mean) 0.6406987 1.352249
hist(m2)
#### Question 3 #####
#Estimate the standard error for each of your β coefficients as the standard deviation of the sampling distribution from your bootstrap.
summary(m2)
#coef(summary(m2))[,"bootSE"]
#coef(summary(m2))
# I want to pull out the results for each iteration of the bootstrap.
#Should be a matrix with 1000 rows (1000 iterations), and 2 columns, one for each B coefficient
m2_rep_results <- m2$t
m2_rep_results <- as.data.frame(m2_rep_results)
# Now calculate the Standard Error from the sampling distribution
#m2_rep_se <- sd(m2_rep_results$Intercept)/sqrt(length(m2_rep_results$Intercept))
#m2_Intercept_se <- sd(m2_rep_results[, 1])/sqrt(length(m2_rep_results[, 1]))
#print(m2_rep_se)
#SE for the "Intercept" sampling distribution is 0.05498
#m2_bodymass_se <- sd(m2_rep_results[, 2])/sqrt(length(m2_rep_results[, 2]))
#print(m2_bodymass_se)
#SE for log(Body_Mass_Female) sampling distribution is 0.05498529
sd_m2_intercept <- sd(m2_rep_results$`(Intercept)`)
sd_m2_bodymass <- sd(m2_rep_results$`log(Body_mass_female_mean)`)
se_m2_intercept <- sd_m2_intercept/sqrt(length(m2_rep_results))
print(se_m2_intercept)
#Output: 1.150876
se_m2_bodymass <- sd_m2_bodymass/sqrt(length(m2_rep_results))
print(se_m2_bodymass)
#Output: 0.1289406
#### Question 4 ######
# Also determine the 95% CI for each of your β coefficients based on the appropriate quantiles from your sampling distribution.
# 'boot' R package should have already generated the confidence interval. I just need to call it.
library(boot)
boot.ci(m2, conf=0.95, type="basic")
#95% CI = (-12.251, -6.010)
boot.ci(m2, conf=0.95, type="bca")
#95% CI = (-12.647, -6.336 )
|
/Homework_04.R
|
no_license
|
naivers/Ivers_Nick_Homework-04
|
R
| false | false | 3,576 |
r
|
###################################################################################################
#HOMEWORK 04#
###################################################################################################
Kamilar_Cooper <- read.csv("~/Desktop/Development/Assignment_4/Kamilar_Cooper.csv")
View(Kamilar_Cooper)
KC <- Kamilar_Cooper
#Remove any Nas
KC <- na.omit(KC)
#Run a basic linear model to see the initial pattern
plot(data = KC, log(HomeRange_km2) ~ log(Body_mass_female_mean))
#Run a linear model of the interaction
m1 <- lm(log(HomeRange_km2) ~ log(Body_mass_female_mean), data = KC)
m1
#model output: Beta0(Intercept) = -9.354
# Beta 1 = 1.024
print(coef(summary(m1))[,"Std. Error"])
#Standard Error from m1 linear model:
#Intercept = 1.6380707
#log(Body_Mass_Female) = 0.1868741
confint(m1)
#OUTPUT:
# 2.5 % 97.5 %
# (Intercept) -12.7511475 -5.956846
#log(Body_mass_female_mean) 0.6364542 1.411560
#QUestion 2 Bootstrapping
#from https://www.rdocumentation.org/packages/simpleboot/versions/1.1-7/topics/lm.boot
#library(simpleboot)
#lm.object <- lm(log(HomeRange_km2) ~ log(Body_mass_female_mean), data = KC)
#R <- 1000
#m2 <- lm.boot(lm.object, R, rows = TRUE)
#from https://rdrr.io/cran/car/man/Boot.html
library(car)
data=na.omit(KC)
m2 <- Boot(m1, R=1000, method=c("case"))
summary(m2)
#Output
#Number of bootstrap replications R = 999
# original bootBias bootSE bootMed
#(Intercept) -9.354 -0.319302 1.73792 -9.5455
#log(Body_mass_female_mean) 1.024 0.036161 0.19585 1.0460
confint(m2)
#Output
#Bootstrap bca confidence intervals
# 2.5 % 97.5 %
# (Intercept) -12.3244897 -5.921240
#log(Body_mass_female_mean) 0.6406987 1.352249
hist(m2)
#### Question 3 #####
#Estimate the standard error for each of your β coefficients as the standard deviation of the sampling distribution from your bootstrap.
summary(m2)
#coef(summary(m2))[,"bootSE"]
#coef(summary(m2))
# I want to pull out the results for each iteration of the bootstrap.
#Should be a matrix with 1000 rows (1000 iterations), and 2 columns, one for each B coefficient
m2_rep_results <- m2$t
m2_rep_results <- as.data.frame(m2_rep_results)
# Now calculate the Standard Error from the sampling distribution
#m2_rep_se <- sd(m2_rep_results$Intercept)/sqrt(length(m2_rep_results$Intercept))
#m2_Intercept_se <- sd(m2_rep_results[, 1])/sqrt(length(m2_rep_results[, 1]))
#print(m2_rep_se)
#SE for the "Intercept" sampling distribution is 0.05498
#m2_bodymass_se <- sd(m2_rep_results[, 2])/sqrt(length(m2_rep_results[, 2]))
#print(m2_bodymass_se)
#SE for log(Body_Mass_Female) sampling distribution is 0.05498529
sd_m2_intercept <- sd(m2_rep_results$`(Intercept)`)
sd_m2_bodymass <- sd(m2_rep_results$`log(Body_mass_female_mean)`)
se_m2_intercept <- sd_m2_intercept/sqrt(length(m2_rep_results))
print(se_m2_intercept)
#Output: 1.150876
se_m2_bodymass <- sd_m2_bodymass/sqrt(length(m2_rep_results))
print(se_m2_bodymass)
#Output: 0.1289406
#### Question 4 ######
# Also determine the 95% CI for each of your β coefficients based on the appropriate quantiles from your sampling distribution.
# 'boot' R package should have already generated the confidence interval. I just need to call it.
library(boot)
boot.ci(m2, conf=0.95, type="basic")
#95% CI = (-12.251, -6.010)
boot.ci(m2, conf=0.95, type="bca")
#95% CI = (-12.647, -6.336 )
|
##################################################
# UI
##################################################
#' @import shiny
#' @import shinydashboard
#' @import leaflet
#' @import shiny
#' @import ggplot2
#' @import shinyMobile
mobile_app_ui <- function(request) {
tagList(
mobile_golem_add_external_resources(),
f7Page(
init = f7Init(
skin = 'ios', # c("ios", "md", "auto", "aurora"),
theme = 'light', #c("dark", "light"),
filled = TRUE
),
title = "Databrew's COVID-19 epidemic curve explorer",
f7SingleLayout(
navbar = f7Navbar(
title = "Databrew's COVID-19 epidemic curve explorer",
hairline = TRUE,
shadow = TRUE
),
toolbar = f7Toolbar(
position = "bottom",
f7Link(label = "Databrew", src = "https://databrew.cc", external = TRUE),
f7Link(label = "Blog post on COVID-19 epidemic curves", src = "https://www.databrew.cc/posts/covid.html", external = TRUE)
),
# main content
f7Shadow(
intensity = 10,
hover = TRUE,
f7Card(
plotOutput('day0'),
selectInput('country', 'Country/Countries',
multiple = TRUE,
choices = sort(unique(sort(unique(covid19::df_country$country)))),
selected = c('Italy', 'Spain', 'France', 'US')),
# f7Stepper('day0', '"Critical mass": number of cases to be considered start of outbreak (day 0)', min = 1, max = 500, value = 150, step = 5),
sliderInput('day0', '"Critical mass" adjustment: Number of cases to be considered "day 0"',
min = 1,
max = 500,
value = 150,
# scale = TRUE,
step = 1),
f7Toggle('deaths', 'Deaths instead of cases?',
checked = FALSE),
f7Toggle('pop', 'Adjust by population?',
checked = FALSE),
height = 300,
)
),
f7Shadow(
intensity = 10,
hover = TRUE,
f7Card(
sliderInput('time_before', 'Number of days to show before "critical mass"',
min = -20,
max = 0,
value = 0,
# scale = TRUE,
step = 1),
br(),
f7Toggle('ylog', 'Logarithmic y-axis?',
checked = TRUE),
br(),
f7Toggle('cumulative', 'Cumulative cases?',
checked = TRUE),
br(),
f7Toggle('add_markers', 'Add visual markers at "critical mass"?',
checked = TRUE),
br(),
f7Stepper('line_size', 'Line thickness', min = 0.5, max = 4, value = 1, step = 0.5),
br(),
)
)
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
mobile_golem_add_external_resources <- function(){
# addResourcePath(
# 'www', system.file('app/www', package = 'covid19')
# )
share <- list(
title = "Databrew's COVID-19 Data Explorer",
url = "https://datacat.cc/covid19/",
image = "http://www.databrew.cc/images/blog/covid2.png",
description = "Comparing epidemic curves across countries",
twitter_user = "data_brew"
)
tags$head(
# Facebook OpenGraph tags
tags$meta(property = "og:title", content = share$title),
tags$meta(property = "og:type", content = "website"),
tags$meta(property = "og:url", content = share$url),
tags$meta(property = "og:image", content = share$image),
tags$meta(property = "og:description", content = share$description),
# Twitter summary cards
tags$meta(name = "twitter:card", content = "summary"),
tags$meta(name = "twitter:site", content = paste0("@", share$twitter_user)),
tags$meta(name = "twitter:creator", content = paste0("@", share$twitter_user)),
tags$meta(name = "twitter:title", content = share$title),
tags$meta(name = "twitter:description", content = share$description),
tags$meta(name = "twitter:image", content = share$image),
# golem::activate_js(),
# golem::favicon(),
# Add here all the external resources
# Google analytics script
includeHTML(system.file('app/www/google-analytics-mini.html', package = 'covid19')),
includeScript(system.file('app/www/script.js', package = 'covid19')),
includeScript(system.file('app/www/mobile.js', package = 'covid19')),
# includeScript('inst/app/www/script.js'),
# includeScript('www/google-analytics.js'),
# If you have a custom.css in the inst/app/www
tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
# tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
)
}
##################################################
# SERVER
##################################################
#' @import shiny
#' @import leaflet
mobile_app_server <- function(input, output, session) {
output$day0 <- renderPlot({
plot_day_zero(countries = input$country,
ylog = input$ylog,
day0 = input$day0,
cumulative = input$cumulative,
time_before = input$time_before,
line_size = input$line_size,
add_markers = input$add_markers,
deaths = input$deaths,
pop = input$pop)
})
}
mobile_app <- function(){
# Detect the system. If on AWS, don't launch browswer
is_aws <- grepl('aws', tolower(Sys.info()['release']))
shinyApp(ui = mobile_app_ui,
server = mobile_app_server,
options = list('launch.browswer' = !is_aws))
}
|
/R/mobile_app.R
|
permissive
|
griu/covid19
|
R
| false | false | 6,057 |
r
|
##################################################
# UI
##################################################
#' @import shiny
#' @import shinydashboard
#' @import leaflet
#' @import shiny
#' @import ggplot2
#' @import shinyMobile
mobile_app_ui <- function(request) {
tagList(
mobile_golem_add_external_resources(),
f7Page(
init = f7Init(
skin = 'ios', # c("ios", "md", "auto", "aurora"),
theme = 'light', #c("dark", "light"),
filled = TRUE
),
title = "Databrew's COVID-19 epidemic curve explorer",
f7SingleLayout(
navbar = f7Navbar(
title = "Databrew's COVID-19 epidemic curve explorer",
hairline = TRUE,
shadow = TRUE
),
toolbar = f7Toolbar(
position = "bottom",
f7Link(label = "Databrew", src = "https://databrew.cc", external = TRUE),
f7Link(label = "Blog post on COVID-19 epidemic curves", src = "https://www.databrew.cc/posts/covid.html", external = TRUE)
),
# main content
f7Shadow(
intensity = 10,
hover = TRUE,
f7Card(
plotOutput('day0'),
selectInput('country', 'Country/Countries',
multiple = TRUE,
choices = sort(unique(sort(unique(covid19::df_country$country)))),
selected = c('Italy', 'Spain', 'France', 'US')),
# f7Stepper('day0', '"Critical mass": number of cases to be considered start of outbreak (day 0)', min = 1, max = 500, value = 150, step = 5),
sliderInput('day0', '"Critical mass" adjustment: Number of cases to be considered "day 0"',
min = 1,
max = 500,
value = 150,
# scale = TRUE,
step = 1),
f7Toggle('deaths', 'Deaths instead of cases?',
checked = FALSE),
f7Toggle('pop', 'Adjust by population?',
checked = FALSE),
height = 300,
)
),
f7Shadow(
intensity = 10,
hover = TRUE,
f7Card(
sliderInput('time_before', 'Number of days to show before "critical mass"',
min = -20,
max = 0,
value = 0,
# scale = TRUE,
step = 1),
br(),
f7Toggle('ylog', 'Logarithmic y-axis?',
checked = TRUE),
br(),
f7Toggle('cumulative', 'Cumulative cases?',
checked = TRUE),
br(),
f7Toggle('add_markers', 'Add visual markers at "critical mass"?',
checked = TRUE),
br(),
f7Stepper('line_size', 'Line thickness', min = 0.5, max = 4, value = 1, step = 0.5),
br(),
)
)
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
mobile_golem_add_external_resources <- function(){
# addResourcePath(
# 'www', system.file('app/www', package = 'covid19')
# )
share <- list(
title = "Databrew's COVID-19 Data Explorer",
url = "https://datacat.cc/covid19/",
image = "http://www.databrew.cc/images/blog/covid2.png",
description = "Comparing epidemic curves across countries",
twitter_user = "data_brew"
)
tags$head(
# Facebook OpenGraph tags
tags$meta(property = "og:title", content = share$title),
tags$meta(property = "og:type", content = "website"),
tags$meta(property = "og:url", content = share$url),
tags$meta(property = "og:image", content = share$image),
tags$meta(property = "og:description", content = share$description),
# Twitter summary cards
tags$meta(name = "twitter:card", content = "summary"),
tags$meta(name = "twitter:site", content = paste0("@", share$twitter_user)),
tags$meta(name = "twitter:creator", content = paste0("@", share$twitter_user)),
tags$meta(name = "twitter:title", content = share$title),
tags$meta(name = "twitter:description", content = share$description),
tags$meta(name = "twitter:image", content = share$image),
# golem::activate_js(),
# golem::favicon(),
# Add here all the external resources
# Google analytics script
includeHTML(system.file('app/www/google-analytics-mini.html', package = 'covid19')),
includeScript(system.file('app/www/script.js', package = 'covid19')),
includeScript(system.file('app/www/mobile.js', package = 'covid19')),
# includeScript('inst/app/www/script.js'),
# includeScript('www/google-analytics.js'),
# If you have a custom.css in the inst/app/www
tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
# tags$link(rel="stylesheet", type="text/css", href="www/custom.css")
)
}
##################################################
# SERVER
##################################################
#' @import shiny
#' @import leaflet
mobile_app_server <- function(input, output, session) {
output$day0 <- renderPlot({
plot_day_zero(countries = input$country,
ylog = input$ylog,
day0 = input$day0,
cumulative = input$cumulative,
time_before = input$time_before,
line_size = input$line_size,
add_markers = input$add_markers,
deaths = input$deaths,
pop = input$pop)
})
}
mobile_app <- function(){
# Detect the system. If on AWS, don't launch browswer
is_aws <- grepl('aws', tolower(Sys.info()['release']))
shinyApp(ui = mobile_app_ui,
server = mobile_app_server,
options = list('launch.browswer' = !is_aws))
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 2.73876647344422e+189, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615831338-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 362 |
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 2.73876647344422e+189, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
#' Framework 7 searchbar
#'
#' Searchbar to filter elements in a page.
#'
#' @param id Necessary when using \link{f7SearchbarTrigger}. NULL otherwise.
#' @param placeholder Searchbar placeholder.
#' @param expandable Whether to enable the searchbar with a target link,
#' in the navbar. See \link{f7SearchbarTrigger}.
#' @param inline Useful to add a \link{f7Searchbar} in a \link{f7Appbar}.
#' Notice that utilities like \link{f7HideOnSearch} and \link{f7NotFound} are not
#' compatible with this mode.
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#'
#' cars <- rownames(mtcars)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "Simple searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar",
#' hairline = FALSE,
#' shadow = TRUE,
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1")
#' )
#' ),
#' f7Block(
#' "This block will be hidden on search.
#' Lorem ipsum dolor sit amet, consectetur adipisicing elit."
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cars), function(i) {
#' f7ListItem(cars[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Expandable searchbar with trigger
#' cities <- names(precip)
#'
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "Expandable searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar with trigger",
#' hairline = FALSE,
#' shadow = TRUE,
#' f7SearchbarTrigger(targetId = "search1"),
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1", expandable = TRUE)
#' )
#' ),
#' f7Block(
#' "This block will be hidden on search.
#' Lorem ipsum dolor sit amet, consectetur adipisicing elit."
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Searchbar in \link{f7Appbar}
#' shinyApp(
#' ui = f7Page(
#' title = "Searchbar in appbar",
#' f7Appbar(
#' f7Searchbar(id = "search1", inline = TRUE)
#' ),
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar in f7Appbar",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found()
#' )
#' ),
#' server = function(input, output) {}
#' )
#' }
f7Searchbar <- function(id, placeholder = "Search", expandable = FALSE, inline = FALSE) {
searchBarCl <- "searchbar"
if (expandable) searchBarCl <- paste0(searchBarCl, " searchbar-expandable")
if (inline) {
shiny::tags$div(
class = "searchbar searchbar-inline",
id = id,
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
)
)
} else {
shiny::tags$form(
class = searchBarCl,
id = id,
shiny::tags$div(
class = "searchbar-inner",
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
),
shiny::tags$span(class = "searchbar-disable-button", "Cancel")
)
)
}
}
#' Framework 7 searchbar trigger
#'
#' Element that triggers the searchbar.
#'
#' @param targetId Id of the \link{f7Searchbar}.
#' @export
#'
#' @examples
#' if (interactive()) {
#'
#' }
f7SearchbarTrigger <- function(targetId) {
shiny::tags$a(
class = "link icon-only searchbar-enable",
`data-searchbar` = paste0("#", targetId),
shiny::tags$i(class = "icon f7-icons if-not-md", "search"),
shiny::tags$i(class = "icon material-icons md-only", "search")
)
}
#' Utility to hide a given tag on search
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnSearch <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-search")
return(tag)
}
#' Utility to hide a given tag when \link{f7Searchbar} is enabled.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnEnable <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-enable")
return(tag)
}
#' Utility to display an item when the search is unsuccessful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to use.
#' @export
f7NotFound <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-not-found")
return(tag)
}
#' Utility to display an item when the search is successful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to display. When using \link{f7Searchbar}, one must
#' wrap the items to search in inside \link{f7Found}.
#' @export
f7Found <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-found")
return(tag)
}
#' Utility to ignore an item from search.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to ignore.
#' @export
f7SearchIgnore <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-ignore")
return(tag)
}
|
/R/f7Searchbar.R
|
no_license
|
grambretagna/shinyMobile
|
R
| false | false | 5,812 |
r
|
#' Framework 7 searchbar
#'
#' Searchbar to filter elements in a page.
#'
#' @param id Necessary when using \link{f7SearchbarTrigger}. NULL otherwise.
#' @param placeholder Searchbar placeholder.
#' @param expandable Whether to enable the searchbar with a target link,
#' in the navbar. See \link{f7SearchbarTrigger}.
#' @param inline Useful to add a \link{f7Searchbar} in a \link{f7Appbar}.
#' Notice that utilities like \link{f7HideOnSearch} and \link{f7NotFound} are not
#' compatible with this mode.
#' @export
#'
#' @examples
#' if (interactive()) {
#' library(shiny)
#' library(shinyMobile)
#'
#' cars <- rownames(mtcars)
#'
#' shinyApp(
#' ui = f7Page(
#' title = "Simple searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar",
#' hairline = FALSE,
#' shadow = TRUE,
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1")
#' )
#' ),
#' f7Block(
#' "This block will be hidden on search.
#' Lorem ipsum dolor sit amet, consectetur adipisicing elit."
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cars), function(i) {
#' f7ListItem(cars[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Expandable searchbar with trigger
#' cities <- names(precip)
#'
#' shiny::shinyApp(
#' ui = f7Page(
#' title = "Expandable searchbar",
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar with trigger",
#' hairline = FALSE,
#' shadow = TRUE,
#' f7SearchbarTrigger(targetId = "search1"),
#' subNavbar = f7SubNavbar(
#' f7Searchbar(id = "search1", expandable = TRUE)
#' )
#' ),
#' f7Block(
#' "This block will be hidden on search.
#' Lorem ipsum dolor sit amet, consectetur adipisicing elit."
#' ) %>% f7HideOnSearch(),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found(),
#'
#' f7Block(
#' p("Nothing found")
#' ) %>% f7NotFound()
#'
#' )
#' ),
#' server = function(input, output) {}
#' )
#'
#' # Searchbar in \link{f7Appbar}
#' shinyApp(
#' ui = f7Page(
#' title = "Searchbar in appbar",
#' f7Appbar(
#' f7Searchbar(id = "search1", inline = TRUE)
#' ),
#' f7SingleLayout(
#' navbar = f7Navbar(
#' title = "f7Searchbar in f7Appbar",
#' hairline = FALSE,
#' shadow = TRUE
#' ),
#' f7List(
#' lapply(seq_along(cities), function(i) {
#' f7ListItem(cities[i])
#' })
#' ) %>% f7Found()
#' )
#' ),
#' server = function(input, output) {}
#' )
#' }
f7Searchbar <- function(id, placeholder = "Search", expandable = FALSE, inline = FALSE) {
searchBarCl <- "searchbar"
if (expandable) searchBarCl <- paste0(searchBarCl, " searchbar-expandable")
if (inline) {
shiny::tags$div(
class = "searchbar searchbar-inline",
id = id,
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
)
)
} else {
shiny::tags$form(
class = searchBarCl,
id = id,
shiny::tags$div(
class = "searchbar-inner",
shiny::tags$div(
class = "searchbar-input-wrap",
shiny::tags$input(type = "search", placeholder = placeholder),
shiny::tags$i(class = "searchbar-icon"),
shiny::tags$span(class = "input-clear-button")
),
shiny::tags$span(class = "searchbar-disable-button", "Cancel")
)
)
}
}
#' Framework 7 searchbar trigger
#'
#' Element that triggers the searchbar.
#'
#' @param targetId Id of the \link{f7Searchbar}.
#' @export
#'
#' @examples
#' if (interactive()) {
#'
#' }
f7SearchbarTrigger <- function(targetId) {
shiny::tags$a(
class = "link icon-only searchbar-enable",
`data-searchbar` = paste0("#", targetId),
shiny::tags$i(class = "icon f7-icons if-not-md", "search"),
shiny::tags$i(class = "icon material-icons md-only", "search")
)
}
#' Utility to hide a given tag on search
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnSearch <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-search")
return(tag)
}
#' Utility to hide a given tag when \link{f7Searchbar} is enabled.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to hide.
#' @export
f7HideOnEnable <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-hide-on-enable")
return(tag)
}
#' Utility to display an item when the search is unsuccessful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to use.
#' @export
f7NotFound <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-not-found")
return(tag)
}
#' Utility to display an item when the search is successful.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to display. When using \link{f7Searchbar}, one must
#' wrap the items to search in inside \link{f7Found}.
#' @export
f7Found <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-found")
return(tag)
}
#' Utility to ignore an item from search.
#'
#' Use with \link{f7Searchbar}.
#'
#' @param tag tag to ignore.
#' @export
f7SearchIgnore <- function(tag) {
tag$attribs$class <- paste0(tag$attribs$class, " searchbar-ignore")
return(tag)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rank_sites.R
\name{rank_sites_DT}
\alias{rank_sites_DT}
\alias{rank_sites}
\title{Rank sites by EAR}
\usage{
rank_sites_DT(
chemical_summary,
category = "Biological",
mean_logic = FALSE,
sum_logic = TRUE,
hit_threshold = 0.1
)
rank_sites(
chemical_summary,
category,
hit_threshold = 0.1,
mean_logic = FALSE,
sum_logic = TRUE
)
}
\arguments{
\item{chemical_summary}{Data frame from \code{\link{get_chemical_summary}}.}
\item{category}{Character. Either "Biological", "Chemical Class", or "Chemical".}
\item{mean_logic}{Logical. \code{TRUE} displays the mean sample from each site,
\code{FALSE} displays the maximum sample from each site.}
\item{sum_logic}{Logical. \code{TRUE} sums the EARs in a specified grouping,
\code{FALSE} does not. \code{FALSE} may be better for traditional benchmarks as
opposed to ToxCast benchmarks.}
\item{hit_threshold}{Numeric threshold defining a "hit".}
}
\value{
data frame with one row per site, and the mas or mean EAR and frequency of
hits based on the category.
}
\description{
The \code{rank_sites_DT} (DT option) and \code{rank_sites} (data frame option) functions
create tables with one row per site. Columns represent the maximum or mean EAR
(depending on the mean_logic argument) for each category ("Chemical Class",
"Chemical", or "Biological") and the frequency of the maximum or mean EAR
exceeding a user specified hit_threshold.
}
\details{
The tables show slightly different results for a single site. Rather than multiple
columns for categories, there is now 1 row per category (since the site is known).
}
\examples{
# This is the example workflow:
path_to_tox <- system.file("extdata", package="toxEval")
file_name <- "OWC_data_fromSup.xlsx"
full_path <- file.path(path_to_tox, file_name)
tox_list <- create_toxEval(full_path)
ACC <- get_ACC(tox_list$chem_info$CAS)
ACC <- remove_flags(ACC)
cleaned_ep <- clean_endPoint_info(end_point_info)
filtered_ep <- filter_groups(cleaned_ep)
chemical_summary <- get_chemical_summary(tox_list, ACC, filtered_ep)
stats_df <- rank_sites(chemical_summary, "Biological")
rank_sites_DT(chemical_summary, category = "Biological")
rank_sites_DT(chemical_summary, category = "Chemical Class")
rank_sites_DT(chemical_summary, category = "Chemical")
}
|
/man/rank_sites_DT.Rd
|
permissive
|
jcmartinmu/toxEval
|
R
| false | true | 2,348 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rank_sites.R
\name{rank_sites_DT}
\alias{rank_sites_DT}
\alias{rank_sites}
\title{Rank sites by EAR}
\usage{
rank_sites_DT(
chemical_summary,
category = "Biological",
mean_logic = FALSE,
sum_logic = TRUE,
hit_threshold = 0.1
)
rank_sites(
chemical_summary,
category,
hit_threshold = 0.1,
mean_logic = FALSE,
sum_logic = TRUE
)
}
\arguments{
\item{chemical_summary}{Data frame from \code{\link{get_chemical_summary}}.}
\item{category}{Character. Either "Biological", "Chemical Class", or "Chemical".}
\item{mean_logic}{Logical. \code{TRUE} displays the mean sample from each site,
\code{FALSE} displays the maximum sample from each site.}
\item{sum_logic}{Logical. \code{TRUE} sums the EARs in a specified grouping,
\code{FALSE} does not. \code{FALSE} may be better for traditional benchmarks as
opposed to ToxCast benchmarks.}
\item{hit_threshold}{Numeric threshold defining a "hit".}
}
\value{
data frame with one row per site, and the mas or mean EAR and frequency of
hits based on the category.
}
\description{
The \code{rank_sites_DT} (DT option) and \code{rank_sites} (data frame option) functions
create tables with one row per site. Columns represent the maximum or mean EAR
(depending on the mean_logic argument) for each category ("Chemical Class",
"Chemical", or "Biological") and the frequency of the maximum or mean EAR
exceeding a user specified hit_threshold.
}
\details{
The tables show slightly different results for a single site. Rather than multiple
columns for categories, there is now 1 row per category (since the site is known).
}
\examples{
# This is the example workflow:
path_to_tox <- system.file("extdata", package="toxEval")
file_name <- "OWC_data_fromSup.xlsx"
full_path <- file.path(path_to_tox, file_name)
tox_list <- create_toxEval(full_path)
ACC <- get_ACC(tox_list$chem_info$CAS)
ACC <- remove_flags(ACC)
cleaned_ep <- clean_endPoint_info(end_point_info)
filtered_ep <- filter_groups(cleaned_ep)
chemical_summary <- get_chemical_summary(tox_list, ACC, filtered_ep)
stats_df <- rank_sites(chemical_summary, "Biological")
rank_sites_DT(chemical_summary, category = "Biological")
rank_sites_DT(chemical_summary, category = "Chemical Class")
rank_sites_DT(chemical_summary, category = "Chemical")
}
|
library(shiny)
library(dplyr)
library(ggplot2)
library(cgdsr)
load(file.path("data", "pam50centroids.rda"))
source("utility_functions.R")
ggplot2::theme_set(theme_classic() +
theme(axis.line.x = element_blank()) +
theme(axis.line.y = element_blank()))
colmutcat <- c("(germline)" = "black", "mutated" = "#1070b8")
alphamutcat <- c("(germline)" = 0.5, "mutated" = 1)
shapemutcat <- c("(germline)" = 1, "mutated" = 16)
conn <- CGDS("http://www.cbioportal.org/public-portal/")
subtype_data <- perform_subtype_classification(conn, pam50centroids)
function(input, output) {
conn <- CGDS("http://www.cbioportal.org/public-portal/")
retrieved_tcga_data <- reactive({
input$retrieve_data_button
ids <- split_query_str(isolate(input$query_str))
retrieve_tcga_data(conn, ids)
})
output$retrieved_genes <- renderUI({
p("Data retrieved for genes:",
lapply(retrieved_tcga_data()$ids, function(x)
a(x,
href = paste0("http://www.genecards.org/cgi-bin/carddisp.pl?gene=", x),
target = "_blank")))
})
output$var_y_ui = renderUI({
ids <- retrieved_tcga_data()$ids
selectInput("var_y", "Gene on vertical axis",
choices = ids, selected = ids[1])
})
output$var_x_ui = renderUI({
ids <- retrieved_tcga_data()$ids
selectInput("var_x", "Gene on horizontal axes",
choices = ids, selected = ids[min(2, length(ids))])
})
assembled_graphics_data <- reactive({
ids <- retrieved_tcga_data()$ids
var_x <- input$var_x
var_y <- input$var_y
if (is.null(var_x) | is.null(var_y)) {
var_x <- ids[min(2, length(ids))]
var_y <- ids[1]
}
if (!(var_x %in% ids)) {
var_x <- ids[min(2, length(ids))]
}
if (!(var_y %in% ids)) {
var_y <- ids[1]
}
graphics_data <- retrieved_tcga_data()$data %>%
mutate_(
x_mut = paste0(var_x, "_mutations"),
x_gistic = paste0(var_x, "_gistic"),
x_rna = paste0(var_x, "_rna"),
y = paste0(var_y, "_rna")) %>%
mutate(
x_mutcat =
factor(x_mut == "(germline)",
levels = c(TRUE, FALSE),
labels = c("(germline)", "mutated"))) %>%
'['(c("subjid", "x_mut", "x_mutcat", "x_gistic", "x_rna", "y")) %>%
left_join(subtype_data, by = "subjid")
graphics_data
})
output$tab1 <- renderTable({
tab1 <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
'['("x_mut") %>%
table() %>%
as.data.frame.table()
names(tab1) <- c(paste0(input$var_x, ", AA change(s)"), "n")
tab1
})
output$fig1 <- renderPlot({
if (input$show_mut) {
gg <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
ggplot(aes(x = x_mut, y = y))
} else {
gg <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
ggplot(aes(x = x_mutcat, y = y))
}
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat),
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
scale_colour_manual(values = colmutcat, na.value = "black", guide = FALSE) +
scale_alpha_manual(values = alphamutcat, na.value = 1, guide = FALSE) +
scale_shape_manual(values = shapemutcat, na.value = 4, guide = FALSE) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", predicted somatic non-silent mutation"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5,
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", predicted somatic non-silent mutation"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$fig2 <- renderPlot({
gg <- assembled_graphics_data() %>%
filter(!is.na(x_gistic) & !is.na(y)) %>%
ggplot(aes(x = x_gistic, y = y))
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat),
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
scale_colour_manual(values = colmutcat, na.value = "black") +
scale_alpha_manual(values = alphamutcat, na.value = 1) +
scale_shape_manual(values = shapemutcat, na.value = 4) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", putative CNA (GISTIC)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"),
col = input$var_x, alpha = input$var_x, shape = input$var_x)
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5,
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", putative CNA (GISTIC)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$fig3 <- renderPlot({
gg <- assembled_graphics_data() %>%
filter(!is.na(x_rna) & !is.na(y)) %>%
ggplot(aes(x = x_rna, y = y))
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat)) +
scale_colour_manual(values = colmutcat, na.value = "black") +
scale_alpha_manual(values = alphamutcat, na.value = 1) +
scale_shape_manual(values = shapemutcat, na.value = 4) +
labs(
x = paste0(input$var_x, ", mRNA expression (log2 RNA-seq)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"),
col = input$var_x, alpha = input$var_x, shape = input$var_x)
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5) +
labs(
x = paste0(input$var_x, ", mRNA expression (log2 RNA-seq)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$fig3_smooth_method != "(none)")
gg <- gg + geom_smooth(col = "darkred", method = input$fig3_smooth_method)
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$tab2 <- renderTable({
graphics_data <- assembled_graphics_data()
r_all <- cor(
graphics_data$x_rna, graphics_data$y,
use = "complete.obs", method = "spearman")
r_subtype <- unlist(lapply(
split(graphics_data, graphics_data$subtype),
function(df) cor(df$x_rna, df$y,
use = "complete.obs", method = "spearman")))
tab2 <- data.frame(
grp = c("(all)", names(r_subtype)),
r = c(r_all, r_subtype))
names(tab2) <- c("Molecular subtype", "r")
tab2
})
}
|
/server.R
|
permissive
|
gsh150801/tcga-brca-explorer
|
R
| false | false | 7,582 |
r
|
library(shiny)
library(dplyr)
library(ggplot2)
library(cgdsr)
load(file.path("data", "pam50centroids.rda"))
source("utility_functions.R")
ggplot2::theme_set(theme_classic() +
theme(axis.line.x = element_blank()) +
theme(axis.line.y = element_blank()))
colmutcat <- c("(germline)" = "black", "mutated" = "#1070b8")
alphamutcat <- c("(germline)" = 0.5, "mutated" = 1)
shapemutcat <- c("(germline)" = 1, "mutated" = 16)
conn <- CGDS("http://www.cbioportal.org/public-portal/")
subtype_data <- perform_subtype_classification(conn, pam50centroids)
function(input, output) {
conn <- CGDS("http://www.cbioportal.org/public-portal/")
retrieved_tcga_data <- reactive({
input$retrieve_data_button
ids <- split_query_str(isolate(input$query_str))
retrieve_tcga_data(conn, ids)
})
output$retrieved_genes <- renderUI({
p("Data retrieved for genes:",
lapply(retrieved_tcga_data()$ids, function(x)
a(x,
href = paste0("http://www.genecards.org/cgi-bin/carddisp.pl?gene=", x),
target = "_blank")))
})
output$var_y_ui = renderUI({
ids <- retrieved_tcga_data()$ids
selectInput("var_y", "Gene on vertical axis",
choices = ids, selected = ids[1])
})
output$var_x_ui = renderUI({
ids <- retrieved_tcga_data()$ids
selectInput("var_x", "Gene on horizontal axes",
choices = ids, selected = ids[min(2, length(ids))])
})
assembled_graphics_data <- reactive({
ids <- retrieved_tcga_data()$ids
var_x <- input$var_x
var_y <- input$var_y
if (is.null(var_x) | is.null(var_y)) {
var_x <- ids[min(2, length(ids))]
var_y <- ids[1]
}
if (!(var_x %in% ids)) {
var_x <- ids[min(2, length(ids))]
}
if (!(var_y %in% ids)) {
var_y <- ids[1]
}
graphics_data <- retrieved_tcga_data()$data %>%
mutate_(
x_mut = paste0(var_x, "_mutations"),
x_gistic = paste0(var_x, "_gistic"),
x_rna = paste0(var_x, "_rna"),
y = paste0(var_y, "_rna")) %>%
mutate(
x_mutcat =
factor(x_mut == "(germline)",
levels = c(TRUE, FALSE),
labels = c("(germline)", "mutated"))) %>%
'['(c("subjid", "x_mut", "x_mutcat", "x_gistic", "x_rna", "y")) %>%
left_join(subtype_data, by = "subjid")
graphics_data
})
output$tab1 <- renderTable({
tab1 <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
'['("x_mut") %>%
table() %>%
as.data.frame.table()
names(tab1) <- c(paste0(input$var_x, ", AA change(s)"), "n")
tab1
})
output$fig1 <- renderPlot({
if (input$show_mut) {
gg <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
ggplot(aes(x = x_mut, y = y))
} else {
gg <- assembled_graphics_data() %>%
filter(!is.na(x_mut) & !is.na(y)) %>%
ggplot(aes(x = x_mutcat, y = y))
}
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat),
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
scale_colour_manual(values = colmutcat, na.value = "black", guide = FALSE) +
scale_alpha_manual(values = alphamutcat, na.value = 1, guide = FALSE) +
scale_shape_manual(values = shapemutcat, na.value = 4, guide = FALSE) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", predicted somatic non-silent mutation"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5,
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", predicted somatic non-silent mutation"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$fig2 <- renderPlot({
gg <- assembled_graphics_data() %>%
filter(!is.na(x_gistic) & !is.na(y)) %>%
ggplot(aes(x = x_gistic, y = y))
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat),
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
scale_colour_manual(values = colmutcat, na.value = "black") +
scale_alpha_manual(values = alphamutcat, na.value = 1) +
scale_shape_manual(values = shapemutcat, na.value = 4) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", putative CNA (GISTIC)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"),
col = input$var_x, alpha = input$var_x, shape = input$var_x)
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5,
position = position_jitter(h = 0, w = 0.1)) +
geom_boxplot(col = "darkred", varwidth = TRUE,
fill = "transparent", outlier.colour = "transparent") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
labs(
x = paste0(input$var_x, ", putative CNA (GISTIC)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$fig3 <- renderPlot({
gg <- assembled_graphics_data() %>%
filter(!is.na(x_rna) & !is.na(y)) %>%
ggplot(aes(x = x_rna, y = y))
if (input$mark_mut) {
gg <- gg +
geom_point(aes(col = x_mutcat, alpha = x_mutcat, shape = x_mutcat)) +
scale_colour_manual(values = colmutcat, na.value = "black") +
scale_alpha_manual(values = alphamutcat, na.value = 1) +
scale_shape_manual(values = shapemutcat, na.value = 4) +
labs(
x = paste0(input$var_x, ", mRNA expression (log2 RNA-seq)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"),
col = input$var_x, alpha = input$var_x, shape = input$var_x)
} else {
gg <- gg +
geom_point(shape = 1, alpha = 0.5) +
labs(
x = paste0(input$var_x, ", mRNA expression (log2 RNA-seq)"),
y = paste0(input$var_y, ", mRNA expression (log2 RNA-seq)"))
}
if (input$fig3_smooth_method != "(none)")
gg <- gg + geom_smooth(col = "darkred", method = input$fig3_smooth_method)
if (input$by_subtype)
gg <- gg + facet_wrap(~ subtype2, nrow = 2, as.table = FALSE)
plot(gg)
})
output$tab2 <- renderTable({
graphics_data <- assembled_graphics_data()
r_all <- cor(
graphics_data$x_rna, graphics_data$y,
use = "complete.obs", method = "spearman")
r_subtype <- unlist(lapply(
split(graphics_data, graphics_data$subtype),
function(df) cor(df$x_rna, df$y,
use = "complete.obs", method = "spearman")))
tab2 <- data.frame(
grp = c("(all)", names(r_subtype)),
r = c(r_all, r_subtype))
names(tab2) <- c("Molecular subtype", "r")
tab2
})
}
|
setwd("~/Dropbox/@Next/AI/JH_EDA/HW1")
library(readr)
household_power_consumption <- read_delim("household_power_consumption.txt",
";", escape_double = FALSE, locale = locale(date_format = "%d/%m/%Y"),
na = "NA", trim_ws = TRUE)
names(household_power_consumption)<-tolower(names(household_power_consumption))
library(dplyr)
hh_subdata<-filter(household_power_consumption, date>="2007-02-01" & date<="2007-02-02")
rm (household_power_consumption)
hh_subdata$datetime <- as.POSIXct(paste(hh_subdata$date, hh_subdata$time), format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(1,1))
plot(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_1),type="l", col="black",xlab="",ylab="Energy sub metering")
lines(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_2), col="red")
lines(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_3), col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1) ,col=c("black", "red", "blue") )
dev.copy(png,file="plot3.png",width=480,height=480)
dev.off()
|
/plot3.r
|
no_license
|
rjcc/ExData_Plotting1_RJCC
|
R
| false | false | 1,031 |
r
|
setwd("~/Dropbox/@Next/AI/JH_EDA/HW1")
library(readr)
household_power_consumption <- read_delim("household_power_consumption.txt",
";", escape_double = FALSE, locale = locale(date_format = "%d/%m/%Y"),
na = "NA", trim_ws = TRUE)
names(household_power_consumption)<-tolower(names(household_power_consumption))
library(dplyr)
hh_subdata<-filter(household_power_consumption, date>="2007-02-01" & date<="2007-02-02")
rm (household_power_consumption)
hh_subdata$datetime <- as.POSIXct(paste(hh_subdata$date, hh_subdata$time), format="%Y-%m-%d %H:%M:%S")
par(mfrow=c(1,1))
plot(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_1),type="l", col="black",xlab="",ylab="Energy sub metering")
lines(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_2), col="red")
lines(hh_subdata$datetime,as.numeric(hh_subdata$sub_metering_3), col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1) ,col=c("black", "red", "blue") )
dev.copy(png,file="plot3.png",width=480,height=480)
dev.off()
|
#Monty Hall Problem
#Keeping your choice
montyhall1<-function(n){
count=0
for (i in 1:n){
#Assigning a car to one door
car=sample(3,1)
#Selecting your door
pick=1
#If your pick matches your car
if (pick==car){
count=count+1
}
}
print(count/n)
}
#Changing your choice
montyhall2<-function(n){
count=0
for (i in 1:n){
car=sample(3,1)
pick=sample(3,1)
v=c(1:3)
monty=v[!v %in% c(car,pick)][1]
newpick=v[!v %in% c(pick,monty)]
if (newpick==car){
count=count+1
}
}
print(count/n)
}
montyhall1(10000)
montyhall2(10000)
|
/University/Monty Hall Problem/montyhallproblem.R
|
no_license
|
michaelfilletti/myrepository
|
R
| false | false | 561 |
r
|
#Monty Hall Problem
#Keeping your choice
montyhall1<-function(n){
count=0
for (i in 1:n){
#Assigning a car to one door
car=sample(3,1)
#Selecting your door
pick=1
#If your pick matches your car
if (pick==car){
count=count+1
}
}
print(count/n)
}
#Changing your choice
montyhall2<-function(n){
count=0
for (i in 1:n){
car=sample(3,1)
pick=sample(3,1)
v=c(1:3)
monty=v[!v %in% c(car,pick)][1]
newpick=v[!v %in% c(pick,monty)]
if (newpick==car){
count=count+1
}
}
print(count/n)
}
montyhall1(10000)
montyhall2(10000)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{chart.RollingCorr}
\alias{chart.RollingCorr}
\title{\code{chart.RollingCorr}}
\usage{
chart.RollingCorr(Ra, Rb, width = 12, xaxis = TRUE, legend.loc = NULL,
colorset = (1:12), ylimmin = -1, ylimmax = 1, ..., fill = NA)
}
\arguments{
\item{Ra}{A univariate xts object of returns.}
\item{Rb}{A univariate or multivariate xts object of returns.}
\item{width}{Number of periods to compute correlation over.}
\item{legend.loc}{places a legend into one of nine locations on the chart: bottomright, bottom, bottomleft, left, topleft, top, topright, right, or center.}
\item{xaxis}{If true, draws the x axis}
\item{colorset}{Color palette to use, set by default to rational choices}
\item{...}{any other passthru parameters}
\item{ylimmin}{ylim minimum value}
\item{ylimmax}{ylim maximum value}
\item{fill}{a three-component vector or list (recycled otherwise) providing filling values at the left/within/to the right of the data range. See the fill argument of na.fill for details.}
}
\value{
A univariate xts object representing the average of averages.
}
\description{
chart.RollingCorrelation from PerformanceAnalytics using Spearman method and customized ylim
}
\examples{
data(data)
Ra<-RTL:::data_ret(x=Cl(CL1),returntype=c("relative"))
Rb<-RTL:::data_ret(x=(CL2),returntype=c("relative"))
chart.RollingCorr<-function (Ra=Ra, Rb=Rb, width = 12, xaxis = TRUE, legend.loc = NULL,colorset = (1:12), ylimmin=-1,ylimmax=1,..., fill = NA)
}
\author{
Philippe Cote <coteph@mac.com,philippe.cote@scotiabank.com>, Nima Safain <nima.safaian@gmail.com,nima.safaian@scotiabank.com>
}
|
/man/chart.RollingCorr.Rd
|
no_license
|
bigdatalib/RTL
|
R
| false | false | 1,642 |
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{chart.RollingCorr}
\alias{chart.RollingCorr}
\title{\code{chart.RollingCorr}}
\usage{
chart.RollingCorr(Ra, Rb, width = 12, xaxis = TRUE, legend.loc = NULL,
colorset = (1:12), ylimmin = -1, ylimmax = 1, ..., fill = NA)
}
\arguments{
\item{Ra}{A univariate xts object of returns.}
\item{Rb}{A univariate or multivariate xts object of returns.}
\item{width}{Number of periods to compute correlation over.}
\item{legend.loc}{places a legend into one of nine locations on the chart: bottomright, bottom, bottomleft, left, topleft, top, topright, right, or center.}
\item{xaxis}{If true, draws the x axis}
\item{colorset}{Color palette to use, set by default to rational choices}
\item{...}{any other passthru parameters}
\item{ylimmin}{ylim minimum value}
\item{ylimmax}{ylim maximum value}
\item{fill}{a three-component vector or list (recycled otherwise) providing filling values at the left/within/to the right of the data range. See the fill argument of na.fill for details.}
}
\value{
A univariate xts object representing the average of averages.
}
\description{
chart.RollingCorrelation from PerformanceAnalytics using Spearman method and customized ylim
}
\examples{
data(data)
Ra<-RTL:::data_ret(x=Cl(CL1),returntype=c("relative"))
Rb<-RTL:::data_ret(x=(CL2),returntype=c("relative"))
chart.RollingCorr<-function (Ra=Ra, Rb=Rb, width = 12, xaxis = TRUE, legend.loc = NULL,colorset = (1:12), ylimmin=-1,ylimmax=1,..., fill = NA)
}
\author{
Philippe Cote <coteph@mac.com,philippe.cote@scotiabank.com>, Nima Safain <nima.safaian@gmail.com,nima.safaian@scotiabank.com>
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/getIndex.R
\name{getIndex}
\alias{getIndex}
\title{Group experiments.}
\usage{
getIndex(reg, ids, by.prob = FALSE, by.algo = FALSE, by.repl = FALSE,
by.prob.pars, by.algo.pars, enclos = parent.frame())
}
\arguments{
\item{reg}{[\code{\link{ExperimentRegistry}}]\cr
Registry.}
\item{ids}{[\code{integer}]\cr
If not missing, restict grouping to this subset of experiment ids.}
\item{by.prob}{[\code{logical}]\cr
Group experiments by problem. Default is \code{FALSE}.}
\item{by.algo}{[\code{logical}]\cr
Group experiments by algorithm. Default is \code{FALSE}.}
\item{by.repl}{[\code{logical}]\cr
Group experiments by replication. Default is \code{FALSE}.}
\item{by.prob.pars}{[R expression]\cr
If not missing, group experiments by this R expression.
The expression is evaluated in the environment of problem parameters and
converted to a factor using \code{as.factor}.}
\item{by.algo.pars}{[R expression]\cr
If not missing, group experiments by this R expression.
The expression is evaluated in the environment of algorithm parameters and
converted to a factor using \code{\link{as.factor}}.}
\item{enclos}{[\code{environment}]\cr
Enclosing frame for evaluation of parameters used by \code{by.prob.pars} and
\code{by.algo.pars}, see \code{\link[base]{eval}}. Defaults to the parent
frame.}
}
\value{
[\code{list}]. List of factors.
}
\description{
Creates a list of \code{\link{factor}} to use in functions like \code{\link{tapply}}, \code{\link{by}}
or \code{\link{aggregate}}.
}
\examples{
# create a registry and add problems and algorithms
reg = makeExperimentRegistry("getIndex", file.dir = tempfile(""))
addProblem(reg, "prob", static = 1)
addAlgorithm(reg, "f0", function(static, dynamic) static)
addAlgorithm(reg, "f1", function(static, dynamic, i, k) static * i^k)
ad = list(makeDesign("f0"), makeDesign("f1", exhaustive = list(i = 1:5, k = 1:3)))
addExperiments(reg, algo.designs = ad)
submitJobs(reg)
# get grouped job ids
ids = getJobIds(reg)
by(ids, getIndex(reg, by.prob = TRUE, by.algo = TRUE), identity)
ids = findExperiments(reg, algo.pattern = "f1")
by(ids, getIndex(reg, ids, by.algo.pars = (k == 1)), identity)
# groupwise reduction
ids = findExperiments(reg, algo.pattern = "f1")
showStatus(reg, ids)
f = function(aggr, job, res) aggr + res
by(ids, getIndex(reg, ids, by.algo.pars = k), reduceResults, reg = reg, fun = f)
by(ids, getIndex(reg, ids, by.algo.pars = i), reduceResults, reg = reg, fun = f)
}
|
/man/getIndex.Rd
|
no_license
|
renozao/BatchExperiments
|
R
| false | false | 2,522 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/getIndex.R
\name{getIndex}
\alias{getIndex}
\title{Group experiments.}
\usage{
getIndex(reg, ids, by.prob = FALSE, by.algo = FALSE, by.repl = FALSE,
by.prob.pars, by.algo.pars, enclos = parent.frame())
}
\arguments{
\item{reg}{[\code{\link{ExperimentRegistry}}]\cr
Registry.}
\item{ids}{[\code{integer}]\cr
If not missing, restict grouping to this subset of experiment ids.}
\item{by.prob}{[\code{logical}]\cr
Group experiments by problem. Default is \code{FALSE}.}
\item{by.algo}{[\code{logical}]\cr
Group experiments by algorithm. Default is \code{FALSE}.}
\item{by.repl}{[\code{logical}]\cr
Group experiments by replication. Default is \code{FALSE}.}
\item{by.prob.pars}{[R expression]\cr
If not missing, group experiments by this R expression.
The expression is evaluated in the environment of problem parameters and
converted to a factor using \code{as.factor}.}
\item{by.algo.pars}{[R expression]\cr
If not missing, group experiments by this R expression.
The expression is evaluated in the environment of algorithm parameters and
converted to a factor using \code{\link{as.factor}}.}
\item{enclos}{[\code{environment}]\cr
Enclosing frame for evaluation of parameters used by \code{by.prob.pars} and
\code{by.algo.pars}, see \code{\link[base]{eval}}. Defaults to the parent
frame.}
}
\value{
[\code{list}]. List of factors.
}
\description{
Creates a list of \code{\link{factor}} to use in functions like \code{\link{tapply}}, \code{\link{by}}
or \code{\link{aggregate}}.
}
\examples{
# create a registry and add problems and algorithms
reg = makeExperimentRegistry("getIndex", file.dir = tempfile(""))
addProblem(reg, "prob", static = 1)
addAlgorithm(reg, "f0", function(static, dynamic) static)
addAlgorithm(reg, "f1", function(static, dynamic, i, k) static * i^k)
ad = list(makeDesign("f0"), makeDesign("f1", exhaustive = list(i = 1:5, k = 1:3)))
addExperiments(reg, algo.designs = ad)
submitJobs(reg)
# get grouped job ids
ids = getJobIds(reg)
by(ids, getIndex(reg, by.prob = TRUE, by.algo = TRUE), identity)
ids = findExperiments(reg, algo.pattern = "f1")
by(ids, getIndex(reg, ids, by.algo.pars = (k == 1)), identity)
# groupwise reduction
ids = findExperiments(reg, algo.pattern = "f1")
showStatus(reg, ids)
f = function(aggr, job, res) aggr + res
by(ids, getIndex(reg, ids, by.algo.pars = k), reduceResults, reg = reg, fun = f)
by(ids, getIndex(reg, ids, by.algo.pars = i), reduceResults, reg = reg, fun = f)
}
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#' @field bundle_location
#' @field service_location
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo <- R6::R6Class(
'ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
`bundle_location` = NULL,
`service_location` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`, `bundle_location`, `service_location`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
if (!missing(`bundle_location`)) {
stopifnot(is.character(`bundle_location`), length(`bundle_location`) == 1)
self$`bundle_location` <- `bundle_location`
}
if (!missing(`service_location`)) {
stopifnot(is.character(`service_location`), length(`service_location`) == 1)
self$`service_location` <- `service_location`
}
},
toJSON = function() {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['properties']] <- self$`properties`$toJSON()
}
if (!is.null(self$`bundle_location`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['bundle_location']] <- self$`bundle_location`
}
if (!is.null(self$`service_location`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['service_location']] <- self$`service_location`
}
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject
},
fromJSON = function(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- jsonlite::fromJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson)
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`)) {
self$`pid` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`)) {
self$`title` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`)) {
self$`description` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`properties`)) {
propertiesObject <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`)) {
self$`bundle_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`)) {
self$`service_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s,
"bundle_location": %s,
"service_location": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON(),
self$`bundle_location`,
self$`service_location`
)
},
fromJSONString = function(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- jsonlite::fromJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson)
self$`pid` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`
self$`title` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`
self$`description` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`
ComDayCqWcmMobileCoreImplRedirectRedirectFilterPropertiesObject <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterProperties$new()
self$`properties` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$properties, auto_unbox = TRUE))
self$`bundle_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`
self$`service_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`
}
)
)
|
/clients/r/generated/R/ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo.r
|
permissive
|
shinesolutions/swagger-aem-osgi
|
R
| false | false | 6,119 |
r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#' @field bundle_location
#' @field service_location
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo <- R6::R6Class(
'ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
`bundle_location` = NULL,
`service_location` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`, `bundle_location`, `service_location`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
if (!missing(`bundle_location`)) {
stopifnot(is.character(`bundle_location`), length(`bundle_location`) == 1)
self$`bundle_location` <- `bundle_location`
}
if (!missing(`service_location`)) {
stopifnot(is.character(`service_location`), length(`service_location`) == 1)
self$`service_location` <- `service_location`
}
},
toJSON = function() {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['properties']] <- self$`properties`$toJSON()
}
if (!is.null(self$`bundle_location`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['bundle_location']] <- self$`bundle_location`
}
if (!is.null(self$`service_location`)) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject[['service_location']] <- self$`service_location`
}
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject
},
fromJSON = function(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- jsonlite::fromJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson)
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`)) {
self$`pid` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`)) {
self$`title` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`)) {
self$`description` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`properties`)) {
propertiesObject <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`)) {
self$`bundle_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`
}
if (!is.null(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`)) {
self$`service_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s,
"bundle_location": %s,
"service_location": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON(),
self$`bundle_location`,
self$`service_location`
)
},
fromJSONString = function(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson) {
ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject <- jsonlite::fromJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoJson)
self$`pid` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`pid`
self$`title` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`title`
self$`description` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`description`
ComDayCqWcmMobileCoreImplRedirectRedirectFilterPropertiesObject <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterProperties$new()
self$`properties` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$properties, auto_unbox = TRUE))
self$`bundle_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`bundle_location`
self$`service_location` <- ComDayCqWcmMobileCoreImplRedirectRedirectFilterInfoObject$`service_location`
}
)
)
|
library(geosphere)
lon1 = -97.040443
lat1 = 32.897480
lon2 = -97.0150
lat2 = 32.9546
distm(c(lon1, lat1), c(lon2, lat2), fun = distHaversine) * 0.000621371
|
/usefulFunctions/distance.between.locations.r
|
no_license
|
gsdavis1959/R_examples
|
R
| false | false | 159 |
r
|
library(geosphere)
lon1 = -97.040443
lat1 = 32.897480
lon2 = -97.0150
lat2 = 32.9546
distm(c(lon1, lat1), c(lon2, lat2), fun = distHaversine) * 0.000621371
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.r
\name{create.capt}
\alias{create.capt}
\title{Creating capture history object.}
\usage{
create.capt(captures, n.traps = NULL)
}
\arguments{
\item{captures}{A data frame of capture records, see 'Details' for
the correct format.}
\item{n.traps}{The total number of traps. If \code{NULL} then the
number of traps is assumed to be the largest value in the
\code{traps} column of the \code{captures} argument.}
}
\description{
Creates a capture history object to use with the function
\code{\link{fit.ascr}}.
}
\details{
The \code{captures} argument to this function is intended to be of
a similar format to the \code{captures} argument to
\link{make.capthist} in the \link{secr} package. That is, users can
use the same \code{captures} data frame with \code{create.capt} and
\code{make.capthist}, which generate capture histories for use with
the \code{ascr} and \link{secr} packages respectively.
As such, the second and fourth columns should provide the ID of the
detection and the trap number of the trap which made the detection
(where the trap number is the row number of the corresponding trap
in the matrix of trap locations). Note that the first and third
columns provide the 'session' and 'occassion' of the detection for
\link{make.capthist}, but as the ascr package does not
presently have the capabilities to deal with multi-session or
multi-occassion data, these columns are ignored by
\code{create.capt}.
Additional optional columns can specify the additional information
collected over the course of the survey:
\itemize{
\item A column named \code{bearing} containing estimated bearings
from which the detector detected the individual.
\item A column named \code{dist} containing the estimated
distance between the individual detected and the detector.
\item A column named \code{ss} containing the measured signal
strengh of the detected acoustic signal (only possible when
detectors are microphones).
\item A column named \code{toa} containing the measured time of
arrival (in seconds) since the start of the survey (or some
other reference time) of the detected acoustic signal (only
possible when the detectors are microphones).
\item A column named \code{mrds} containing the \emph{known} (not
estimated) distance between the individual detected and the
detector.
}
}
|
/man/create.capt.Rd
|
no_license
|
cmjt/ascr
|
R
| false | true | 2,458 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.r
\name{create.capt}
\alias{create.capt}
\title{Creating capture history object.}
\usage{
create.capt(captures, n.traps = NULL)
}
\arguments{
\item{captures}{A data frame of capture records, see 'Details' for
the correct format.}
\item{n.traps}{The total number of traps. If \code{NULL} then the
number of traps is assumed to be the largest value in the
\code{traps} column of the \code{captures} argument.}
}
\description{
Creates a capture history object to use with the function
\code{\link{fit.ascr}}.
}
\details{
The \code{captures} argument to this function is intended to be of
a similar format to the \code{captures} argument to
\link{make.capthist} in the \link{secr} package. That is, users can
use the same \code{captures} data frame with \code{create.capt} and
\code{make.capthist}, which generate capture histories for use with
the \code{ascr} and \link{secr} packages respectively.
As such, the second and fourth columns should provide the ID of the
detection and the trap number of the trap which made the detection
(where the trap number is the row number of the corresponding trap
in the matrix of trap locations). Note that the first and third
columns provide the 'session' and 'occassion' of the detection for
\link{make.capthist}, but as the ascr package does not
presently have the capabilities to deal with multi-session or
multi-occassion data, these columns are ignored by
\code{create.capt}.
Additional optional columns can specify the additional information
collected over the course of the survey:
\itemize{
\item A column named \code{bearing} containing estimated bearings
from which the detector detected the individual.
\item A column named \code{dist} containing the estimated
distance between the individual detected and the detector.
\item A column named \code{ss} containing the measured signal
strengh of the detected acoustic signal (only possible when
detectors are microphones).
\item A column named \code{toa} containing the measured time of
arrival (in seconds) since the start of the survey (or some
other reference time) of the detected acoustic signal (only
possible when the detectors are microphones).
\item A column named \code{mrds} containing the \emph{known} (not
estimated) distance between the individual detected and the
detector.
}
}
|
##MDML Final Project
##Shannon Kay, Jaejin Kim, & Jessica Spencer
##December 13, 2019
##Preprocess Chicago Restaurant Data
##Load required packages and data
library(tidyverse)
chicago <- read_csv("../Food_Inspections.csv")
#1. Drop unnecessary columns and rename variables
chicago <- chicago %>%
select(-`DBA Name`,
-Latitude,
-Longitude,
-Location,
-Risk,
-Address,
-State) %>%
rename(Name = `AKA Name`,
Restaurant_ID = `License #`,
Inspection_Type = `Inspection Type`,
Inspection_ID = `Inspection ID`,
Zip_code = Zip)
#2. Drop rows with 'results' that are not relevant, filter 'facility type' and 'inspection type' to only have relevant types, create Date, Year, Month and Weekday, and filter to relevant years
chicago <- chicago %>%
filter(!Results %in% c("Out of Business", "Not Ready"))%>%
mutate(Facility_Type = tolower(`Facility Type`),
Inspection_Type = tolower(Inspection_Type))%>%
select(-`Facility Type`) %>%
filter(Facility_Type %in% c("bakery", "cafe", "restaurant", "tavern", "deli", "ice cream", "paleteria"),
Inspection_Type %in% c("canvass", "complaint", "license", "suspected food poisoning")) %>%
mutate(Date = lubridate::mdy(`Inspection Date`),
Year = lubridate::year(Date),
Month = lubridate::month(Date),
Weekday = weekdays(Date)) %>%
filter(Year %in% c(2015, 2016, 2017)) %>%
select(-`Inspection Date`)
#3. calculate # of violations per inspection for each restaurant
##grouping by Inspection ID because restaurants with the same name may have multiple locations
##Also documenting the presence of violations #1-14, which are identified as critical violations in the Chicago data dictionary
chicago <- chicago %>%
separate_rows(Violations, sep = "\\|") %>%
group_by(Inspection_ID) %>%
mutate(Number_Violations = n(),
violation_num = as.numeric(substr(Violations,1,3)),
flag = ifelse(violation_num < 15, 1, 0),
critical_flag = sum(flag)) %>%
select(Inspection_ID,
Name,
Restaurant_ID,
City,
Zip_code,
Date,
Year,
Month,
Weekday,
Inspection_Type,
Results,
Facility_Type,
Number_Violations,
critical_flag) %>%
unique()
#4. Change critical flag from the sum of critical violations to an indicator variable
chicago <- chicago %>%
group_by(Inspection_ID) %>%
mutate(critical_flag = ifelse(critical_flag > 0, 1,0)) %>%
ungroup()
#5. NA's didn't get changed to 0's to changed them separately
chicago$critical_flag <- ifelse(is.na(chicago$critical_flag), 0, chicago$critical_flag)
#6. Create outcome variable- fail is 1
chicago$outcome <- ifelse(chicago$Results %in% "Pass", 0, 1)
#7. Standardize dataset
chicago <- chicago %>%
select(Restaurant_ID,
Name,
City,
Zip_code,
Date,
Year,
Month,
Weekday,
Inspection_Type,
Number_Violations,
critical_flag,
outcome)
#8. write final data to csv
write_csv(chicago, path = "data/pre-processed_chicago_final.csv")
|
/Code/preprocess_chicago.R
|
no_license
|
Jaejin-Kim/Restaurant_Inspection_Forecasting
|
R
| false | false | 3,243 |
r
|
##MDML Final Project
##Shannon Kay, Jaejin Kim, & Jessica Spencer
##December 13, 2019
##Preprocess Chicago Restaurant Data
##Load required packages and data
library(tidyverse)
chicago <- read_csv("../Food_Inspections.csv")
#1. Drop unnecessary columns and rename variables
chicago <- chicago %>%
select(-`DBA Name`,
-Latitude,
-Longitude,
-Location,
-Risk,
-Address,
-State) %>%
rename(Name = `AKA Name`,
Restaurant_ID = `License #`,
Inspection_Type = `Inspection Type`,
Inspection_ID = `Inspection ID`,
Zip_code = Zip)
#2. Drop rows with 'results' that are not relevant, filter 'facility type' and 'inspection type' to only have relevant types, create Date, Year, Month and Weekday, and filter to relevant years
chicago <- chicago %>%
filter(!Results %in% c("Out of Business", "Not Ready"))%>%
mutate(Facility_Type = tolower(`Facility Type`),
Inspection_Type = tolower(Inspection_Type))%>%
select(-`Facility Type`) %>%
filter(Facility_Type %in% c("bakery", "cafe", "restaurant", "tavern", "deli", "ice cream", "paleteria"),
Inspection_Type %in% c("canvass", "complaint", "license", "suspected food poisoning")) %>%
mutate(Date = lubridate::mdy(`Inspection Date`),
Year = lubridate::year(Date),
Month = lubridate::month(Date),
Weekday = weekdays(Date)) %>%
filter(Year %in% c(2015, 2016, 2017)) %>%
select(-`Inspection Date`)
#3. calculate # of violations per inspection for each restaurant
##grouping by Inspection ID because restaurants with the same name may have multiple locations
##Also documenting the presence of violations #1-14, which are identified as critical violations in the Chicago data dictionary
chicago <- chicago %>%
separate_rows(Violations, sep = "\\|") %>%
group_by(Inspection_ID) %>%
mutate(Number_Violations = n(),
violation_num = as.numeric(substr(Violations,1,3)),
flag = ifelse(violation_num < 15, 1, 0),
critical_flag = sum(flag)) %>%
select(Inspection_ID,
Name,
Restaurant_ID,
City,
Zip_code,
Date,
Year,
Month,
Weekday,
Inspection_Type,
Results,
Facility_Type,
Number_Violations,
critical_flag) %>%
unique()
#4. Change critical flag from the sum of critical violations to an indicator variable
chicago <- chicago %>%
group_by(Inspection_ID) %>%
mutate(critical_flag = ifelse(critical_flag > 0, 1,0)) %>%
ungroup()
#5. NA's didn't get changed to 0's to changed them separately
chicago$critical_flag <- ifelse(is.na(chicago$critical_flag), 0, chicago$critical_flag)
#6. Create outcome variable- fail is 1
chicago$outcome <- ifelse(chicago$Results %in% "Pass", 0, 1)
#7. Standardize dataset
chicago <- chicago %>%
select(Restaurant_ID,
Name,
City,
Zip_code,
Date,
Year,
Month,
Weekday,
Inspection_Type,
Number_Violations,
critical_flag,
outcome)
#8. write final data to csv
write_csv(chicago, path = "data/pre-processed_chicago_final.csv")
|
library(shiny)
# library(ggplot2) # for the diamonds dataset
shinyUI(fluidPage(
title = 'Examples of DataTables',
titlePanel("The Paradox for Accuracy and Kappa Statistic"),
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset === "Paradox"',
helpText('To see a demonstration, click the "Demo" tab.')
),
conditionalPanel(
'input.dataset === "Demo"',
helpText('Set the values of several parameters:'),
sliderInput("prevalence", "Prevalence (%):",
min = 0, max = 100, value = 70, step = 1),
br(),
sliderInput("sensitivity", "Sensitivity (%):",
min = 0, max = 100, value = 50, step = 1),
br(),
sliderInput("specificity", "Specificity (%):",
min = 0, max = 100, value = 50, step = 1),
br()
) # end of conditional panel
), # end of sidebarPanel
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel('Paradox',
h5('Introduction:'),
p('The result of a classification problem in machine learning
can be represented by
a confusion matrix. A confusion matrix is a fourfold table
showing the binary agreement between the classifier and the
true data labels (also known as the "gold standard").
As an example, the following table shows a confusion matrix,
where columns indicate the true outcomes from the gold
standard and rows indicate the classification results
from a machine learning algorithm.'),
tableOutput("myConfMatrixDemo"),
p('One typical performance measure to assess a classifier
is the accuracy, which calculates the proportion of the
concordant results among all records. Mathematically, it can
be formulated as:'),
p('P_obs = (TP + TN)/population'),
# h3(withMathJax('$$\\text{P}_{\\text{obs}} = \\frac{\\text{TP + TN}}
# {\\text{Total Population}}.$$')),
p('Usually, a high accuracy indicates a high concordance between the classifier
and the truth. However, in certain cases a high accuracy
may due to the fact that the classifier agrees with the
truth just by chance. To adjust this, in some research areas, particularly in
medical field on diagnosis, researchers use kappa statistic
to report the model performance. The advantage of the kappa
statistic is that it corrects the amount of agreement that
can be expected to occur by chance. To calculate the kappa statistic,
we first computes the expected agreement by chance:'),
p('P_exp = (TP + FN) * (TP + FP)/population^2 +
(FN + TN) * (FP + TN)/population^2'),
# h3(withMathJax('$$\\text{P}_{\\text{exp}} = \\frac{\\text{TP + FN}}
# {\\text{Total Population}} * \\frac{\\text{TP + FP}}
# {\\text{Total Population}} + \\frac{\\text{FN + TN}}
# {\\text{Total Population}} * \\frac{\\text{FP + TN}}
# {\\text{Total Population}}.$$')),
p('Then, the kappa statistic is defined by'),
p('kappa = (p_obs - p_exp)/(1 - p_exp)'),
# h3(withMathJax('$$\\kappa = \\frac{\\text{P}_{\\text{obs}} - \\text{P}_{\\text{exp}}}
# {1 - \\text{P}_{\\text{exp}}}.$$')),
p('Kappa statistic takes value between -1 and 1, where a kappa of 0
indicates agreement equivalent to chance, kappa gets close to 1
indicates strong agreement and close to -1 indicates strong
disagreement. Since kappa statistic is a correction term for accuracy, a first reaction
is that kappa and accuracy should have similar trend on each data.
However, in many real cases we find that high accuracy can sometimes
associate with kappa statistic close to 0. This phenomenon is
more often to happen when the dataset has a strong disproportinate
prevalence (i.e., the original data has very low percentage of
positive cases, or vice versa). In general, for a data with
significantly disproportionate prevalence,
a low kappa value may not necessarily reflect low rates of
overall agreement.')
), # end of tabPanel
tabPanel('Demo',
h5('Instruction:'),
p('This demonstration shows how accuracy and kappa statistic
look like with data in different distributions.
Without loss of generality, we set the data size to be 10,000.
There are three tuning parameters in this demonstration - prevalence,
sensitivity, and specificity. Prevalence is the true percent of
positve (yes) cases. Sensitivity is the percent of correctly
identified records among all positive (yes) cases. Specificity
is the percent of correctly identified records among all
negative (no) cases. One can customize these parameters to
illustrate outcomes in different scenarios.'),
br(),
p('First, one could choose a relatively balanced
prevalence with high sensitivity and specificity.
Then gradually reduce the value prevalence and check
its impact on kappa statistic and accuracy.'),
br(),
h5('Results:'),
p('The confusion matrix is (Column = the truth, or gold standard,
Row = outcomes by the classifier):'),
tableOutput("myConfMatrix"),
br(),
p('The kappa statistic for the confusion matrix is:'),
textOutput("myKappa"),
br(),
p('The accuracy for the confusion matrix is:'),
textOutput("myAccuracy")
) # end of tabPanel
) # end of tabsetPanel
) # end of mainPanel
) # end of sidebarLayout
) # end of fluidPage
) # end of shinyUI
|
/ui.r
|
no_license
|
firefreezing/developing-data-products
|
R
| false | false | 7,347 |
r
|
library(shiny)
# library(ggplot2) # for the diamonds dataset
shinyUI(fluidPage(
title = 'Examples of DataTables',
titlePanel("The Paradox for Accuracy and Kappa Statistic"),
sidebarLayout(
sidebarPanel(
conditionalPanel(
'input.dataset === "Paradox"',
helpText('To see a demonstration, click the "Demo" tab.')
),
conditionalPanel(
'input.dataset === "Demo"',
helpText('Set the values of several parameters:'),
sliderInput("prevalence", "Prevalence (%):",
min = 0, max = 100, value = 70, step = 1),
br(),
sliderInput("sensitivity", "Sensitivity (%):",
min = 0, max = 100, value = 50, step = 1),
br(),
sliderInput("specificity", "Specificity (%):",
min = 0, max = 100, value = 50, step = 1),
br()
) # end of conditional panel
), # end of sidebarPanel
mainPanel(
tabsetPanel(
id = 'dataset',
tabPanel('Paradox',
h5('Introduction:'),
p('The result of a classification problem in machine learning
can be represented by
a confusion matrix. A confusion matrix is a fourfold table
showing the binary agreement between the classifier and the
true data labels (also known as the "gold standard").
As an example, the following table shows a confusion matrix,
where columns indicate the true outcomes from the gold
standard and rows indicate the classification results
from a machine learning algorithm.'),
tableOutput("myConfMatrixDemo"),
p('One typical performance measure to assess a classifier
is the accuracy, which calculates the proportion of the
concordant results among all records. Mathematically, it can
be formulated as:'),
p('P_obs = (TP + TN)/population'),
# h3(withMathJax('$$\\text{P}_{\\text{obs}} = \\frac{\\text{TP + TN}}
# {\\text{Total Population}}.$$')),
p('Usually, a high accuracy indicates a high concordance between the classifier
and the truth. However, in certain cases a high accuracy
may due to the fact that the classifier agrees with the
truth just by chance. To adjust this, in some research areas, particularly in
medical field on diagnosis, researchers use kappa statistic
to report the model performance. The advantage of the kappa
statistic is that it corrects the amount of agreement that
can be expected to occur by chance. To calculate the kappa statistic,
we first computes the expected agreement by chance:'),
p('P_exp = (TP + FN) * (TP + FP)/population^2 +
(FN + TN) * (FP + TN)/population^2'),
# h3(withMathJax('$$\\text{P}_{\\text{exp}} = \\frac{\\text{TP + FN}}
# {\\text{Total Population}} * \\frac{\\text{TP + FP}}
# {\\text{Total Population}} + \\frac{\\text{FN + TN}}
# {\\text{Total Population}} * \\frac{\\text{FP + TN}}
# {\\text{Total Population}}.$$')),
p('Then, the kappa statistic is defined by'),
p('kappa = (p_obs - p_exp)/(1 - p_exp)'),
# h3(withMathJax('$$\\kappa = \\frac{\\text{P}_{\\text{obs}} - \\text{P}_{\\text{exp}}}
# {1 - \\text{P}_{\\text{exp}}}.$$')),
p('Kappa statistic takes value between -1 and 1, where a kappa of 0
indicates agreement equivalent to chance, kappa gets close to 1
indicates strong agreement and close to -1 indicates strong
disagreement. Since kappa statistic is a correction term for accuracy, a first reaction
is that kappa and accuracy should have similar trend on each data.
However, in many real cases we find that high accuracy can sometimes
associate with kappa statistic close to 0. This phenomenon is
more often to happen when the dataset has a strong disproportinate
prevalence (i.e., the original data has very low percentage of
positive cases, or vice versa). In general, for a data with
significantly disproportionate prevalence,
a low kappa value may not necessarily reflect low rates of
overall agreement.')
), # end of tabPanel
tabPanel('Demo',
h5('Instruction:'),
p('This demonstration shows how accuracy and kappa statistic
look like with data in different distributions.
Without loss of generality, we set the data size to be 10,000.
There are three tuning parameters in this demonstration - prevalence,
sensitivity, and specificity. Prevalence is the true percent of
positve (yes) cases. Sensitivity is the percent of correctly
identified records among all positive (yes) cases. Specificity
is the percent of correctly identified records among all
negative (no) cases. One can customize these parameters to
illustrate outcomes in different scenarios.'),
br(),
p('First, one could choose a relatively balanced
prevalence with high sensitivity and specificity.
Then gradually reduce the value prevalence and check
its impact on kappa statistic and accuracy.'),
br(),
h5('Results:'),
p('The confusion matrix is (Column = the truth, or gold standard,
Row = outcomes by the classifier):'),
tableOutput("myConfMatrix"),
br(),
p('The kappa statistic for the confusion matrix is:'),
textOutput("myKappa"),
br(),
p('The accuracy for the confusion matrix is:'),
textOutput("myAccuracy")
) # end of tabPanel
) # end of tabsetPanel
) # end of mainPanel
) # end of sidebarLayout
) # end of fluidPage
) # end of shinyUI
|
# increase console log limit
options(max.print=1000000)
rm(list = ls())
library(broom)
library(dplyr)
library(foreach)
library(car)
library(Hmisc)
library(survey)
library(mfx)
library(margins)
library(hash)
# library(stargazer)
library(testthat)
library(crayon)
library(readxl)
library(jsonlite)
# library("xlsx") No need anymore xls and xlsx have hard limit on max umber of chars in a cell...
# Run R.version and if you see x86_64 you need to install Java 64 bit
# https://java.com/en/download/manual.jsp
`%notin%` <- Negate(`%in%`)
current_dir_path = dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(current_dir_path)
source('EWAS_analysis_base_functions.R')
######################################################################
########### Settings 1 ############
######################################################################
# This will load all independent variables from Patel's dataset
source('EWAS_analysis_Patel_variables.R')
only_work_on_selected_vars <- TRUE
# Select on which nutrient panel the analysis work on
nut_panel = c('12', '58')[2]
dir_reg_analysis <- c(
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts'),
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts_temp'),
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts_y234')
#
)[1]
cat('Path to reg analysis:', bold(dir_reg_analysis), '\n')
survey_year <- 'all'
#### +-+-+-+- IMPORTAN If set to 1 it WILL NOT RUN regressions
generate_desciptive_statistics <- 0
debug_run <- TRUE
# log <- TRUE
# survey_year_code <- 4
# var <- 'LBXV1A' # Patel marked this is binary. var_desc: Blood 1,1-Dichloroethane (ng/mL)
# var <- 'LBXGLU' # 'PHAFSTHR'
# is_binary_or_categorical_var(var, df, survey_year_code, TRUE)
##########################################
# Select Diet Data Here
##########################################
# path_diet_data = paste0('all_diet_data_1999_2006_',nut_panel,'_nuts_Processing index J1.csv')
# path_diet_data = paste0('all_diet_data_1999_2006_',nut_panel,'_nuts_single_and_ensemble_FPro.csv')
# path_diet_data <- 'all_diet_data_1999_2006_58_nuts_ens_FPS.csv'
path_diet_data <- 'input_data/all_diet_data_1999_2006_58_nuts_single_and_ensemble_FPro.csv'
# path_diet_data <- 'all_diet_data_1999_2018_58_nuts_single_and_ensemble_FPro.csv'
# path_diet_data <- 'all_diet_data_1999_2006_58_nuts_single_and_ensemble_FPro__FNDDS9906_C2009.csv'
nhanesCCNR <- read.csv(path_diet_data)
# table(nhanesCCNR$metabolic.syndrome.examination.and.drug, exclude = NULL)
cat(bold('Diet Data File Name: ', current_dir_path, '/', path_diet_data, sep=''), '\n')
load('input_data/nh_99-06.Rdata')
# we added custom vars like t2d so read it from here
VarDescription <- read_excel('input_data/EWAS_VarDescription.xlsx')
VarDescription <- VarDescription %>% mutate_if(is.character, list(~na_if(.,"NA")))
if (FALSE){
# WHYYYYYYYYYYYYY THESE TWO ARE NOT EQUAL!!!!!!!!!!!
VarDescription[(VarDescription$var == 'LBXV1A') & (VarDescription$series_num == 3), ]
VarDescription[(VarDescription$var == 'LBXV1A') && (VarDescription$series_num == 3), ] # RETURN EMPTY!!!!
}
if (only_work_on_selected_vars == TRUE){
selected_vars_CCNR <- read_excel("input_data/EWAS_exposome_CCNR_selection_modules.xlsx")
selected_vars_CCNR <- selected_vars_CCNR %>%
dplyr::filter(CCNR_selected == 1)
resp_vars_to_work_on <- unique(c(
response_vars$custom_variables_by_CCNR,
selected_vars_CCNR$var
))
} else{
# Run regressions on all variable (both custom CCNR and Patel)
resp_vars_to_work_on <- unique(VarDescription$var)
}
total_independend_vars <- length(resp_vars_to_work_on)
MainTable <- merge(x = MainTable,
y = nhanesCCNR[ , c(
"SEQN",
'num_unique_dishes',
'metabolic.syndrome.only.examination', 'metabolic.syndrome.examination.and.drug',
'LBXACR_lab_detectable', 'LBXGLY_lab_detectable',
# 'framingham_risk_10_years', THIS IS THE PYTHON BUT THE R VERSION IS MORE RELIABLE
'ascvd_10y_accaha_lab', 'ascvd_10y_frs_lab', 'ascvd_10y_frs_simple_lab',
"Total.calories.consumed.mean.both.days",
"HEI2015_TOTAL_SCORE",
"FPro.RW.WFDPI.mean.of.both.days.sum",
"FPro.WFDPI.mean.of.both.days.sum",
"FPro.WCDPI.mean.of.both.days.sum",
"ens_FPro.WFDPI.mean.of.both.days.sum",
"ens_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_FPro.WCDPI.mean.of.both.days.sum",
"ens_min_FPro.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.WCDPI.mean.of.both.days.sum",
# "predNOVA4.consumption.kcal.percentage.over.sum.both.days",
# "predNOVA4.consumption.RW.grams.percentage.over.sum.both.days",
# "predNOVA4.consumption.grams.percentage.over.sum.both.days"
"manualNOVA4.consumption.kcal.percentage.over.sum.both.days"
)],
by = "SEQN")
nrow(MainTable)
####################################################################
# Custom vars by CCNR
####################################################################
MainTable$t2d <- I(MainTable$LBXGLU >= 126)
MainTable$metabolic_syndrome_examination <- MainTable$metabolic.syndrome.only.examination
MainTable$metabolic_syndrome <- MainTable$metabolic.syndrome.examination.and.drug
# keep age in its current form because it will be normalized
MainTable$age <- MainTable$RIDAGEYR
if (survey_year == 'all') {
######
## Create sample weights for 8 years based on
## https://wwwn.cdc.gov/nchs/nhanes/tutorials/module3.aspx
####
MainTable[MainTable$SDDSRVYR == 1, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 1, 'WTMEC4YR'] * (2 / 4)
MainTable[MainTable$SDDSRVYR == 2, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 2, 'WTMEC4YR'] * (2 / 4)
MainTable[MainTable$SDDSRVYR == 3, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 3, 'WTMEC2YR'] * (1 / 4)
MainTable[MainTable$SDDSRVYR == 4, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 4, 'WTMEC2YR'] * (1 / 4)
#dat <- subset(MainTable2, SDDSRVYR < 5 & age >= 18)
cat(bold('Number of rows with weight=0 that will be removed:'),
nrow(MainTable[MainTable$WTMEC8YR == 0, ]), '\n')
nhanesDesign <- svydesign(id = ~SDMVPSU,
strata = ~SDMVSTRA,
weights = ~WTMEC8YR, # Use 8 year weights
nest =T,
data = MainTable
)
# nrow(nhanesDesign)
# svymean(~age, nhanesDesign, ci=FALSE)
#svyby(~age, ~age > 0, design=nhanesDesign, FUN=svymean, ci=TRUE)
sink(paste0("output_console/", dir_reg_analysis, "/R_svydesign_FULL_nhanes.txt")) # Store summary of svydesign
print(summary(nhanesDesign))
sink() # returns output to the console
#### Backup raw ALL DATA
if (debug_run == TRUE) {
path_tmp <- paste0('output_console/', dir_reg_analysis,
'/nhanesDesign_RAW_ALL_dataset_', dir_reg_analysis, '_cohort_',
survey_year, '.csv')
write.csv(nhanesDesign$variables, path_tmp)
cat('Saved RAW ALL Data at: ', bold(path_tmp), '\n')
}
####
#####################
# CORRECT WAY TO SUBSET survey data is
# https://static-bcrf.biochem.wisc.edu/courses/Tabular-data-analysis-with-R-and-Tidyverse/book/12-usingNHANESweights.html
# https://r-survey.r-forge.r-project.org/survey/html/subset.survey.design.html
#####################
ageDesign <- subset(nhanesDesign,
age >= 18 &
WTMEC8YR > 0 &
ens_FPro.WFDPI.mean.of.both.days.sum > 0
)
nrow(ageDesign$variables)
svymean(~age, ageDesign, ci=TRUE)
sink(paste0("output_console/", dir_reg_analysis, "/R_svydesign_ageDesign_nhanes.txt")) # Store summary of svydesign
print(summary(ageDesign))
sink() # returns output to the console
}
######################################################################
######### End Settings 1 ##########
######################################################################
#DEL EM
if (FALSE){
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, nhanesDesign)
svymean(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, nhanesDesign,
na.rm=TRUE)
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days,
nhanesDesign)
svyhist(~logit_trans(manualNOVA4.consumption.kcal.percentage.over.sum.both.days),
nhanesDesign)
svyhist(~ens_FPro.WCDPI.mean.of.both.days.sum,
nhanesDesign)
svyhist(~logit_trans(ens_FPro.WCDPI.mean.of.both.days.sum),
nhanesDesign)
box_cox_out = boxcox_trans_return_lambda(
ageDesign$variables, 'ens_FPro.RW.WFDPI.mean.of.both.days.sum'
)
ageDesign$variables$ens_FPro.RW.WFDPI.mean.of.both.days.sum.boxcox = box_cox_out$out
print(paste('lambda for ens_FPro.RW.WFDPI.mean.of.both.days.sum', box_cox_out$lambda))
svyhist(~ens_FPro.RW.WFDPI.mean.of.both.days.sum,
ageDesign)
svyhist(~ens_FPro.RW.WFDPI.mean.of.both.days.sum.boxcox,
ageDesign)
svyhist(~logit_trans(ens_FPro.RW.WFDPI.mean.of.both.days.sum),
ageDesign)
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, ageDesign)
svymean(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, ageDesign,
na.rm=TRUE)
}
######################################################################
########### Settings 2 ############
######################################################################
covar <- c(
'FPro.WFDPI.mean.of.both.days.sum', # Diet Processing Score Gram Weighted
'FPro.RW.WFDPI.mean.of.both.days.sum', # Removed Water - Diet Processing Score Gram Weighted
'FPro.WCDPI.mean.of.both.days.sum', # Diet Processing Score Calorie Weighted
"ens_FPro.WFDPI.mean.of.both.days.sum",
"ens_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_FPro.WCDPI.mean.of.both.days.sum",
"ens_min_FPro.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.WCDPI.mean.of.both.days.sum",
'HEI2015_TOTAL_SCORE',
#'predNOVA4.consumption.kcal.percentage.over.sum.both.days',
#'NOVA4.consumption.grams.percentage.over.sum.both.days',
#'NOVA4.consumption.RW.grams.percentage.over.sum.both.days'
'manualNOVA4.consumption.kcal.percentage.over.sum.both.days'
)
logit_transform_vars <- c(
# 'framingham_risk_10_years',
'ascvd_10y_accaha_lab', 'ascvd_10y_frs_lab', 'ascvd_10y_frs_simple_lab'
)
# Adjusting vars
# 'male', 'other_eth' are not added because of singularities
adj <- c('BMXBMI', 'RIDAGEYR', 'female',
'INDFMPIR', #poverty income ratio
'white', 'black', 'mexican', 'other_hispanic'
,'Total.calories.consumed.mean.both.days',
'current_past_smoking' # 0 means never smoked, 1 is past smoker, 2 is currently smoker, none cant identify
)
# Make sure adjusting vars wont be used as respone variable,
# it can happen for BMXBMI. Also, use this to ignore a response var if needed!
ignore_resp_vars <- c(adj)
resp_vars_to_work_on <- resp_vars_to_work_on[resp_vars_to_work_on %notin% ignore_resp_vars]
# These variables will be transformed AT MODEL LEVEL.
boxcox_vars <- c(
covar, 'BMXBMI', 'RIDAGEYR',
'INDFMPIR' # 'INDFMPIR' is poverty ratio
)
for (patel_tab in keys(response_vars)){
for(patel_var in response_vars[[patel_tab]]){
if (patel_var %in% logit_transform_vars){
next
}
if(is_binary_or_categorical_var(patel_var, ageDesign$variables, 'all', TRUE) == 0){
# Only work on selected variables!
if (patel_var %in% resp_vars_to_work_on){
boxcox_vars <- c(boxcox_vars, patel_var)
}
} else{
cat(blue("Is Binary: ", patel_var), "\n")
}
}
}
boxcox_vars <- unique(boxcox_vars)
# If you want to avoid running all tabs in keys(response_vars),
# you can use this variable to run a selected few, otherwise set it empty.
only_work_on_tabs <- c('Heavy_Metals', 'Any_Disease', 'custom_variables_by_CCNR',
'Pharmaceutical Use', 'Blood_Pressure')
only_work_on_tabs <- c('C_Reactive_Protein', 'Environmental_phenols',
'Total_Cholesterol', 'Urinary_Albumin_and_Creatinine')
only_work_on_tabs <- c('Vitamin_A_E_and_Carotenoids', 'Melamine_Surplus_Urine')
if (TRUE) {only_work_on_tabs <- c()}
######################################################################
######### End Settings 2 ##########
######################################################################
print(paste(
"Number of non-binary vars to be tranformed by BoxCox (at model level): ",
length(boxcox_vars)))
# Apply z transformation on these vars
scale_vars <- unique(c(boxcox_vars, logit_transform_vars))
print(paste(
"Number of non-binary vars to be centered by Z-transformation",
"(at model level after BoxCox or logit transformation): ",
length(scale_vars)))
##################################
# Backup ageDesign data
if (TRUE && debug_run == TRUE) {
path_tmp <- paste0('output_console/', dir_reg_analysis,
'/ageDesign_dataset_', dir_reg_analysis, '_cohort_',
survey_year, '.csv')
write.csv(ageDesign$variables, path_tmp)
cat('Saved ageDesign dataset at: ', bold(path_tmp), '\n')
}
####################################################################
################### Analyze (Run Regressions)#######################
####################################################################
# Check you dont get empty subset
cat(bold('----------------- Year: '), survey_year,
bold(' Subjects {nrow(ageDesign)}: '), nrow(ageDesign), '\n')
table(ageDesign$variables$current_past_smoking)
sum(is.na(ageDesign$variables$current_past_smoking))
resp_var_done_regression <- c()
boxcox_lambda_df <- data.frame(matrix(ncol = 3))
colnames(boxcox_lambda_df) <- c(
'resp_var', 'var', 'lambda')
boxcox_lambda_i <- 1
j = 0
time_start_regs <- Sys.time()
#module_file_name <- keys(response_vars)[1]
#module_file_name <- 'custom_variables_by_CCNR'
#module_file_name <- 'Blood_Pressure'
#module_file_name <- 'Total_Cholesterol'
#module_file_name <- only_work_on_tabs[2]
for (module_file_name in keys(response_vars)) {
skip = FALSE
if(length(only_work_on_tabs) > 0){
skip = TRUE
if (module_file_name %in% only_work_on_tabs){
skip = FALSE
}
}
if (skip == TRUE) { next }
file_name <- module_file_name
cat(bold("\n\n**********WORKING ON TAB:", file_name, ' & year: ',
survey_year, ' **********'), '\n')
response_vars_tab <- response_vars[[module_file_name]]
#########
out_df <- data.frame(matrix(ncol = 16))
colnames(out_df) <- c(
'resp_var', 'resp_var_type', 'N', 'NA_count',
'covariate', 'reg_family', 'num_covars',
'unique_val_counts', 'value_counts',
'coef','std_error', 't_value', 'p_val',
'dispersion', 'coefficients', 'summary')
i <- 1
# resp_var <- c('LBXTHG', 'prostate_cancer_self_report')[2] #DELME !!
# resp_var <- response_vars_tab[3]
for (resp_var in response_vars_tab){
# Only work on the selected variables
if (resp_var %notin% resp_vars_to_work_on){ next; }
###############
#Do not repeat regressions for a variable
###############
if(TRUE){
if (resp_var %in% resp_var_done_regression){
cat(bold(blue('Already done regressions for respone variable')),
bold(resp_var), '\n')
next;
}
resp_var_done_regression <- c(resp_var_done_regression, resp_var)
}
##########################################
phenotypeDesign <- subset(ageDesign,
is.na(ageDesign$variables[[resp_var]]) == FALSE &
is.na(INDFMPIR) == FALSE
)
# nrow(phenotypeDesign)
resp_var_subset = data.table::copy(phenotypeDesign$variables)
cat(bold(
'\n+++++++++[STATS] Response Var:', resp_var, '| Num Subjects:' ,
nrow(phenotypeDesign)
), blue(
'\nAFTER REMOVING subject with NA socio-economic status (NDFMPIR):',
red(
nrow(ageDesign$variables %>%
filter(!is.na(ageDesign$variables[[resp_var]]) & is.na(INDFMPIR)))
)
), '+++++++++\n\n')
################################################
## Transformations for this model
################################################
reg_all_vars = c(resp_var, covar, adj)
#var_tmp <- reg_all_vars[1]
for (var_tmp in reg_all_vars) {
if (var_tmp %in% boxcox_vars){
tryCatch(
{
boxcox_trans_out <- boxcox_trans_return_lambda(
phenotypeDesign$variables, var_tmp
)
phenotypeDesign$variables[[var_tmp]] <- boxcox_trans_out$out[,1]
boxcox_lambda_df[boxcox_lambda_i, 'resp_var'] <- resp_var
boxcox_lambda_df[boxcox_lambda_i, 'var'] <- var_tmp
boxcox_lambda_df[boxcox_lambda_i, 'lambda'] <- boxcox_trans_out$lambda
boxcox_lambda_i <- boxcox_lambda_i + 1
cat(bold('[Tranform BoxCox] '), 'on var:', blue(var_tmp),
'lambda', boxcox_trans_out$lambda, '\n')
},
error=function(error_message) {
# message(error_message)
cat(red(bold(
"!!! BoxCox Failed !!! VarName:", var_tmp
))
# , 'error_message:', error_message
, '\n'
)
cat(red("This variable might be empty; length(unique(", var_tmp, "))=",
length(unique(phenotypeDesign$variables[[var_tmp]]))
), ';\n')
return(NA)
}
)
}
}
for (var_tmp in reg_all_vars) {
if (var_tmp %in% logit_transform_vars){
tryCatch(
{
phenotypeDesign$variables[[var_tmp]] <- logit_trans(
phenotypeDesign$variables[[var_tmp]]
)
cat(bold('[Tranform Logit] '), 'on var:', blue(var_tmp), '\n')
},
error=function(error_message) {
message(paste("!!! logit_trans Failed !!! VarName: ", var_tmp))
cat(red("This variable might be empty: unique(", var_tmp, ")=",
unique(phenotypeDesign$variables[[var_tmp]])), '\n')
message(error_message)
return(NA)
}
)
}
}
for (var_tmp in reg_all_vars) {
if (var_tmp %in% scale_vars){
tryCatch(
{
phenotypeDesign$variables[[var_tmp]] <- scale(
phenotypeDesign$variables[[var_tmp]], center = TRUE, scale = TRUE
)
cat(bold('[Tranform Scale] '), 'on var:', blue(var_tmp), '\n')
},
error=function(error_message){
message(paste("!!! Z-Transformation Failed !!! VarName: ", var_tmp))
cat(red("This variable might be empty: unique(", var_tmp, ")=", unique(
MainTable_subset[[var_tmp]])), '\n')
message(error_message)
return(NA)
}
)
}
}
################################################
################################################
################################################
# cov_ <- covar[1]
for (cov_ in covar){
out_df[i, 'resp_var'] <- resp_var
out_df[i, 'N'] <- nrow(phenotypeDesign)
out_df[i, 'NA_count'] <- nrow(
ageDesign$variables[is.na(ageDesign$variables[[resp_var]]), ]
)
out_df[i, 'covariate'] <- cov_
out_df[i, 'unique_val_counts'] <- length(unique(phenotypeDesign$variables[[resp_var]]))
# Check if an adjusting variable is binary convert it to factor
adj_vars_prepped = c()
# adj_var <- adj[1]
for(adj_var in adj) {
adj_var_type <- is_binary_or_categorical_var(adj_var, resp_var_subset, survey_year, FALSE)
# print(paste(adj_var, adj_var_type))
if (adj_var_type > 0){
##########################################
# TODO MAYBE filter a covar if it has not enough levels.
# adj_var_length <- length(unique(phenotypeDesign$variables[[adj_var]]))
# in other words, put condition on 'adj_var_length'
##########################################
if(length(unique(phenotypeDesign$variables[[adj_var]])) > 1 ){
adj_vars_prepped <- c(adj_vars_prepped, paste0('factor(', adj_var, ')'))
} else {
cat(bold('!!! Adjusting var "', adj_var,
'" removed because not enough levels to be factored.'), '\n')
}
}else{
adj_vars_prepped <- c(adj_vars_prepped, adj_var)
}
}
######
# Check if independent variable is binary, convert it to factor.
# Use MainTable_subset to assess in the whole dataset not a subset
######
resp_var_type <- is_binary_or_categorical_var(resp_var, resp_var_subset, survey_year, TRUE)
out_df[i, 'resp_var_type'] <- resp_var_type
if (resp_var_type > 0){
doForm <- as.formula(paste0(
'factor(', resp_var, ')', '~', paste(c(cov_, adj_vars_prepped), collapse = '+')
))
##############
value_counts <- as.data.frame(table(phenotypeDesign$variables[[resp_var]]))
names(value_counts) <- substring(names(value_counts), first = 1, last = 1)
value_counts <- value_counts[order(-value_counts$F),]
out_df[i, 'value_counts'] <- capture_output(toJSON(value_counts), width=800, print=TRUE)
} else {
doForm <- as.formula(paste(resp_var, '~', paste(c(cov_, adj_vars_prepped), collapse = '+')))
############## Store value count for numerical variables as well
value_counts <- as.data.frame(table(phenotypeDesign$variables[[resp_var]]))
names(value_counts) <- substring(names(value_counts), first = 1, last = 1)
value_counts <- value_counts[order(-value_counts$F),]
out_df[i, 'value_counts'] <- capture_output(toJSON(value_counts), width=800, print=TRUE)
}
out_df[i, 'num_covars'] <- length(adj_vars_prepped) + 1
print(doForm)
reg_family = gaussian()
if(resp_var_type > 0){
reg_family = quasibinomial(link = logit)
}
out_df[i, 'reg_family'] <- trimws(capture_output(reg_family, width=800, print=TRUE))
tryCatch(
{
reg <- svyglm(formula = doForm , design=phenotypeDesign, family=reg_family)
reg_sum <- summary(reg)
out_df[i, 'coef'] <- reg_sum$coefficients[2,][1]
out_df[i, 'std_error'] <- reg_sum$coefficients[2,][2]
out_df[i, 't_value'] <- reg_sum$coefficients[2,][3]
out_df[i, 'p_val'] <- reg_sum$coefficients[2,][4]
last_reg_output <- paste(
capture_output(doForm, width=800, print=TRUE),
capture_output(reg_sum, width = 800, print=TRUE),
sep = "\n"
)
# Save all output of regression
out_df[i, 'summary'] <- last_reg_output
############# Save Coef ############
out_df[i, 'coefficients'] <- toJSON(
as.data.frame(reg_sum$coefficients),
digits=10
)
out_df[i, 'dispersion'] <- reg_sum$dispersion
},
error=function(error_message) {
message(paste("!!! ERROR !!!!"))
cat(red(bold(error_message)))
out_df[i, 'summary'] <- paste(error_message, sep = "\n")
return(NA)
}
)
i <- i + 1
j <- j + 1
if (j %% 10 == 0){
cat(bold(blue(
#round(j/(total_independend_vars * length(covar)), 3) * 100,
round(j/(1577 * length(covar)), 3) * 100, ## see below comments why I used 1577!
'% of regressions (',
(total_independend_vars * length(covar)),
'total) completed from survey year ', survey_year , '...\n'
)))
}
}
}
out_df$sig <- out_df$p_val <= 0.05
round_df(out_df, 3)
write.csv(out_df, paste0('output_console/', dir_reg_analysis, '/',
survey_year ,'/reg_analysis_boxcox_', file_name , '.csv'))
print(paste0('output_console/', dir_reg_analysis, '/',
survey_year ,'/reg_analysis_boxcox_', file_name , '.csv'))
}
cat('########## DONE REGRESSIONS ##############\n')
path_lambda_boxcox <- paste0('output_console/', dir_reg_analysis,
'/ageDesign_lambda_boxcox_cohort_', survey_year, '.csv')
cat(bold('EXPORT Lambda Box Cox --> ', path_lambda_boxcox), '\n')
write.csv(boxcox_lambda_df, path_lambda_boxcox)
cat('Regs started:', format(time_start_regs), 'and ended:',
format(Sys.time())
)
|
/EWAS_survey_regression_on_NHANES_1999_2006.R
|
no_license
|
menicgiulia/MLFoodProcessing
|
R
| false | false | 25,186 |
r
|
# increase console log limit
options(max.print=1000000)
rm(list = ls())
library(broom)
library(dplyr)
library(foreach)
library(car)
library(Hmisc)
library(survey)
library(mfx)
library(margins)
library(hash)
# library(stargazer)
library(testthat)
library(crayon)
library(readxl)
library(jsonlite)
# library("xlsx") No need anymore xls and xlsx have hard limit on max umber of chars in a cell...
# Run R.version and if you see x86_64 you need to install Java 64 bit
# https://java.com/en/download/manual.jsp
`%notin%` <- Negate(`%in%`)
current_dir_path = dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(current_dir_path)
source('EWAS_analysis_base_functions.R')
######################################################################
########### Settings 1 ############
######################################################################
# This will load all independent variables from Patel's dataset
source('EWAS_analysis_Patel_variables.R')
only_work_on_selected_vars <- TRUE
# Select on which nutrient panel the analysis work on
nut_panel = c('12', '58')[2]
dir_reg_analysis <- c(
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts'),
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts_temp'),
paste0('caloric_intake_PSJ1', '_', nut_panel, '_nuts_y234')
#
)[1]
cat('Path to reg analysis:', bold(dir_reg_analysis), '\n')
survey_year <- 'all'
#### +-+-+-+- IMPORTAN If set to 1 it WILL NOT RUN regressions
generate_desciptive_statistics <- 0
debug_run <- TRUE
# log <- TRUE
# survey_year_code <- 4
# var <- 'LBXV1A' # Patel marked this is binary. var_desc: Blood 1,1-Dichloroethane (ng/mL)
# var <- 'LBXGLU' # 'PHAFSTHR'
# is_binary_or_categorical_var(var, df, survey_year_code, TRUE)
##########################################
# Select Diet Data Here
##########################################
# path_diet_data = paste0('all_diet_data_1999_2006_',nut_panel,'_nuts_Processing index J1.csv')
# path_diet_data = paste0('all_diet_data_1999_2006_',nut_panel,'_nuts_single_and_ensemble_FPro.csv')
# path_diet_data <- 'all_diet_data_1999_2006_58_nuts_ens_FPS.csv'
path_diet_data <- 'input_data/all_diet_data_1999_2006_58_nuts_single_and_ensemble_FPro.csv'
# path_diet_data <- 'all_diet_data_1999_2018_58_nuts_single_and_ensemble_FPro.csv'
# path_diet_data <- 'all_diet_data_1999_2006_58_nuts_single_and_ensemble_FPro__FNDDS9906_C2009.csv'
nhanesCCNR <- read.csv(path_diet_data)
# table(nhanesCCNR$metabolic.syndrome.examination.and.drug, exclude = NULL)
cat(bold('Diet Data File Name: ', current_dir_path, '/', path_diet_data, sep=''), '\n')
load('input_data/nh_99-06.Rdata')
# we added custom vars like t2d so read it from here
VarDescription <- read_excel('input_data/EWAS_VarDescription.xlsx')
VarDescription <- VarDescription %>% mutate_if(is.character, list(~na_if(.,"NA")))
if (FALSE){
# WHYYYYYYYYYYYYY THESE TWO ARE NOT EQUAL!!!!!!!!!!!
VarDescription[(VarDescription$var == 'LBXV1A') & (VarDescription$series_num == 3), ]
VarDescription[(VarDescription$var == 'LBXV1A') && (VarDescription$series_num == 3), ] # RETURN EMPTY!!!!
}
if (only_work_on_selected_vars == TRUE){
selected_vars_CCNR <- read_excel("input_data/EWAS_exposome_CCNR_selection_modules.xlsx")
selected_vars_CCNR <- selected_vars_CCNR %>%
dplyr::filter(CCNR_selected == 1)
resp_vars_to_work_on <- unique(c(
response_vars$custom_variables_by_CCNR,
selected_vars_CCNR$var
))
} else{
# Run regressions on all variable (both custom CCNR and Patel)
resp_vars_to_work_on <- unique(VarDescription$var)
}
total_independend_vars <- length(resp_vars_to_work_on)
MainTable <- merge(x = MainTable,
y = nhanesCCNR[ , c(
"SEQN",
'num_unique_dishes',
'metabolic.syndrome.only.examination', 'metabolic.syndrome.examination.and.drug',
'LBXACR_lab_detectable', 'LBXGLY_lab_detectable',
# 'framingham_risk_10_years', THIS IS THE PYTHON BUT THE R VERSION IS MORE RELIABLE
'ascvd_10y_accaha_lab', 'ascvd_10y_frs_lab', 'ascvd_10y_frs_simple_lab',
"Total.calories.consumed.mean.both.days",
"HEI2015_TOTAL_SCORE",
"FPro.RW.WFDPI.mean.of.both.days.sum",
"FPro.WFDPI.mean.of.both.days.sum",
"FPro.WCDPI.mean.of.both.days.sum",
"ens_FPro.WFDPI.mean.of.both.days.sum",
"ens_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_FPro.WCDPI.mean.of.both.days.sum",
"ens_min_FPro.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.WCDPI.mean.of.both.days.sum",
# "predNOVA4.consumption.kcal.percentage.over.sum.both.days",
# "predNOVA4.consumption.RW.grams.percentage.over.sum.both.days",
# "predNOVA4.consumption.grams.percentage.over.sum.both.days"
"manualNOVA4.consumption.kcal.percentage.over.sum.both.days"
)],
by = "SEQN")
nrow(MainTable)
####################################################################
# Custom vars by CCNR
####################################################################
MainTable$t2d <- I(MainTable$LBXGLU >= 126)
MainTable$metabolic_syndrome_examination <- MainTable$metabolic.syndrome.only.examination
MainTable$metabolic_syndrome <- MainTable$metabolic.syndrome.examination.and.drug
# keep age in its current form because it will be normalized
MainTable$age <- MainTable$RIDAGEYR
if (survey_year == 'all') {
######
## Create sample weights for 8 years based on
## https://wwwn.cdc.gov/nchs/nhanes/tutorials/module3.aspx
####
MainTable[MainTable$SDDSRVYR == 1, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 1, 'WTMEC4YR'] * (2 / 4)
MainTable[MainTable$SDDSRVYR == 2, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 2, 'WTMEC4YR'] * (2 / 4)
MainTable[MainTable$SDDSRVYR == 3, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 3, 'WTMEC2YR'] * (1 / 4)
MainTable[MainTable$SDDSRVYR == 4, 'WTMEC8YR'] <- MainTable[
MainTable$SDDSRVYR == 4, 'WTMEC2YR'] * (1 / 4)
#dat <- subset(MainTable2, SDDSRVYR < 5 & age >= 18)
cat(bold('Number of rows with weight=0 that will be removed:'),
nrow(MainTable[MainTable$WTMEC8YR == 0, ]), '\n')
nhanesDesign <- svydesign(id = ~SDMVPSU,
strata = ~SDMVSTRA,
weights = ~WTMEC8YR, # Use 8 year weights
nest =T,
data = MainTable
)
# nrow(nhanesDesign)
# svymean(~age, nhanesDesign, ci=FALSE)
#svyby(~age, ~age > 0, design=nhanesDesign, FUN=svymean, ci=TRUE)
sink(paste0("output_console/", dir_reg_analysis, "/R_svydesign_FULL_nhanes.txt")) # Store summary of svydesign
print(summary(nhanesDesign))
sink() # returns output to the console
#### Backup raw ALL DATA
if (debug_run == TRUE) {
path_tmp <- paste0('output_console/', dir_reg_analysis,
'/nhanesDesign_RAW_ALL_dataset_', dir_reg_analysis, '_cohort_',
survey_year, '.csv')
write.csv(nhanesDesign$variables, path_tmp)
cat('Saved RAW ALL Data at: ', bold(path_tmp), '\n')
}
####
#####################
# CORRECT WAY TO SUBSET survey data is
# https://static-bcrf.biochem.wisc.edu/courses/Tabular-data-analysis-with-R-and-Tidyverse/book/12-usingNHANESweights.html
# https://r-survey.r-forge.r-project.org/survey/html/subset.survey.design.html
#####################
ageDesign <- subset(nhanesDesign,
age >= 18 &
WTMEC8YR > 0 &
ens_FPro.WFDPI.mean.of.both.days.sum > 0
)
nrow(ageDesign$variables)
svymean(~age, ageDesign, ci=TRUE)
sink(paste0("output_console/", dir_reg_analysis, "/R_svydesign_ageDesign_nhanes.txt")) # Store summary of svydesign
print(summary(ageDesign))
sink() # returns output to the console
}
######################################################################
######### End Settings 1 ##########
######################################################################
#DEL EM
if (FALSE){
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, nhanesDesign)
svymean(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, nhanesDesign,
na.rm=TRUE)
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days,
nhanesDesign)
svyhist(~logit_trans(manualNOVA4.consumption.kcal.percentage.over.sum.both.days),
nhanesDesign)
svyhist(~ens_FPro.WCDPI.mean.of.both.days.sum,
nhanesDesign)
svyhist(~logit_trans(ens_FPro.WCDPI.mean.of.both.days.sum),
nhanesDesign)
box_cox_out = boxcox_trans_return_lambda(
ageDesign$variables, 'ens_FPro.RW.WFDPI.mean.of.both.days.sum'
)
ageDesign$variables$ens_FPro.RW.WFDPI.mean.of.both.days.sum.boxcox = box_cox_out$out
print(paste('lambda for ens_FPro.RW.WFDPI.mean.of.both.days.sum', box_cox_out$lambda))
svyhist(~ens_FPro.RW.WFDPI.mean.of.both.days.sum,
ageDesign)
svyhist(~ens_FPro.RW.WFDPI.mean.of.both.days.sum.boxcox,
ageDesign)
svyhist(~logit_trans(ens_FPro.RW.WFDPI.mean.of.both.days.sum),
ageDesign)
svyhist(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, ageDesign)
svymean(~manualNOVA4.consumption.kcal.percentage.over.sum.both.days, ageDesign,
na.rm=TRUE)
}
######################################################################
########### Settings 2 ############
######################################################################
covar <- c(
'FPro.WFDPI.mean.of.both.days.sum', # Diet Processing Score Gram Weighted
'FPro.RW.WFDPI.mean.of.both.days.sum', # Removed Water - Diet Processing Score Gram Weighted
'FPro.WCDPI.mean.of.both.days.sum', # Diet Processing Score Calorie Weighted
"ens_FPro.WFDPI.mean.of.both.days.sum",
"ens_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_FPro.WCDPI.mean.of.both.days.sum",
"ens_min_FPro.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.RW.WFDPI.mean.of.both.days.sum",
"ens_min_FPro.WCDPI.mean.of.both.days.sum",
'HEI2015_TOTAL_SCORE',
#'predNOVA4.consumption.kcal.percentage.over.sum.both.days',
#'NOVA4.consumption.grams.percentage.over.sum.both.days',
#'NOVA4.consumption.RW.grams.percentage.over.sum.both.days'
'manualNOVA4.consumption.kcal.percentage.over.sum.both.days'
)
logit_transform_vars <- c(
# 'framingham_risk_10_years',
'ascvd_10y_accaha_lab', 'ascvd_10y_frs_lab', 'ascvd_10y_frs_simple_lab'
)
# Adjusting vars
# 'male', 'other_eth' are not added because of singularities
adj <- c('BMXBMI', 'RIDAGEYR', 'female',
'INDFMPIR', #poverty income ratio
'white', 'black', 'mexican', 'other_hispanic'
,'Total.calories.consumed.mean.both.days',
'current_past_smoking' # 0 means never smoked, 1 is past smoker, 2 is currently smoker, none cant identify
)
# Make sure adjusting vars wont be used as respone variable,
# it can happen for BMXBMI. Also, use this to ignore a response var if needed!
ignore_resp_vars <- c(adj)
resp_vars_to_work_on <- resp_vars_to_work_on[resp_vars_to_work_on %notin% ignore_resp_vars]
# These variables will be transformed AT MODEL LEVEL.
boxcox_vars <- c(
covar, 'BMXBMI', 'RIDAGEYR',
'INDFMPIR' # 'INDFMPIR' is poverty ratio
)
for (patel_tab in keys(response_vars)){
for(patel_var in response_vars[[patel_tab]]){
if (patel_var %in% logit_transform_vars){
next
}
if(is_binary_or_categorical_var(patel_var, ageDesign$variables, 'all', TRUE) == 0){
# Only work on selected variables!
if (patel_var %in% resp_vars_to_work_on){
boxcox_vars <- c(boxcox_vars, patel_var)
}
} else{
cat(blue("Is Binary: ", patel_var), "\n")
}
}
}
boxcox_vars <- unique(boxcox_vars)
# If you want to avoid running all tabs in keys(response_vars),
# you can use this variable to run a selected few, otherwise set it empty.
only_work_on_tabs <- c('Heavy_Metals', 'Any_Disease', 'custom_variables_by_CCNR',
'Pharmaceutical Use', 'Blood_Pressure')
only_work_on_tabs <- c('C_Reactive_Protein', 'Environmental_phenols',
'Total_Cholesterol', 'Urinary_Albumin_and_Creatinine')
only_work_on_tabs <- c('Vitamin_A_E_and_Carotenoids', 'Melamine_Surplus_Urine')
if (TRUE) {only_work_on_tabs <- c()}
######################################################################
######### End Settings 2 ##########
######################################################################
print(paste(
"Number of non-binary vars to be tranformed by BoxCox (at model level): ",
length(boxcox_vars)))
# Apply z transformation on these vars
scale_vars <- unique(c(boxcox_vars, logit_transform_vars))
print(paste(
"Number of non-binary vars to be centered by Z-transformation",
"(at model level after BoxCox or logit transformation): ",
length(scale_vars)))
##################################
# Backup ageDesign data
if (TRUE && debug_run == TRUE) {
path_tmp <- paste0('output_console/', dir_reg_analysis,
'/ageDesign_dataset_', dir_reg_analysis, '_cohort_',
survey_year, '.csv')
write.csv(ageDesign$variables, path_tmp)
cat('Saved ageDesign dataset at: ', bold(path_tmp), '\n')
}
####################################################################
################### Analyze (Run Regressions)#######################
####################################################################
# Check you dont get empty subset
cat(bold('----------------- Year: '), survey_year,
bold(' Subjects {nrow(ageDesign)}: '), nrow(ageDesign), '\n')
table(ageDesign$variables$current_past_smoking)
sum(is.na(ageDesign$variables$current_past_smoking))
resp_var_done_regression <- c()
boxcox_lambda_df <- data.frame(matrix(ncol = 3))
colnames(boxcox_lambda_df) <- c(
'resp_var', 'var', 'lambda')
boxcox_lambda_i <- 1
j = 0
time_start_regs <- Sys.time()
#module_file_name <- keys(response_vars)[1]
#module_file_name <- 'custom_variables_by_CCNR'
#module_file_name <- 'Blood_Pressure'
#module_file_name <- 'Total_Cholesterol'
#module_file_name <- only_work_on_tabs[2]
for (module_file_name in keys(response_vars)) {
skip = FALSE
if(length(only_work_on_tabs) > 0){
skip = TRUE
if (module_file_name %in% only_work_on_tabs){
skip = FALSE
}
}
if (skip == TRUE) { next }
file_name <- module_file_name
cat(bold("\n\n**********WORKING ON TAB:", file_name, ' & year: ',
survey_year, ' **********'), '\n')
response_vars_tab <- response_vars[[module_file_name]]
#########
out_df <- data.frame(matrix(ncol = 16))
colnames(out_df) <- c(
'resp_var', 'resp_var_type', 'N', 'NA_count',
'covariate', 'reg_family', 'num_covars',
'unique_val_counts', 'value_counts',
'coef','std_error', 't_value', 'p_val',
'dispersion', 'coefficients', 'summary')
i <- 1
# resp_var <- c('LBXTHG', 'prostate_cancer_self_report')[2] #DELME !!
# resp_var <- response_vars_tab[3]
for (resp_var in response_vars_tab){
# Only work on the selected variables
if (resp_var %notin% resp_vars_to_work_on){ next; }
###############
#Do not repeat regressions for a variable
###############
if(TRUE){
if (resp_var %in% resp_var_done_regression){
cat(bold(blue('Already done regressions for respone variable')),
bold(resp_var), '\n')
next;
}
resp_var_done_regression <- c(resp_var_done_regression, resp_var)
}
##########################################
phenotypeDesign <- subset(ageDesign,
is.na(ageDesign$variables[[resp_var]]) == FALSE &
is.na(INDFMPIR) == FALSE
)
# nrow(phenotypeDesign)
resp_var_subset = data.table::copy(phenotypeDesign$variables)
cat(bold(
'\n+++++++++[STATS] Response Var:', resp_var, '| Num Subjects:' ,
nrow(phenotypeDesign)
), blue(
'\nAFTER REMOVING subject with NA socio-economic status (NDFMPIR):',
red(
nrow(ageDesign$variables %>%
filter(!is.na(ageDesign$variables[[resp_var]]) & is.na(INDFMPIR)))
)
), '+++++++++\n\n')
################################################
## Transformations for this model
################################################
reg_all_vars = c(resp_var, covar, adj)
#var_tmp <- reg_all_vars[1]
for (var_tmp in reg_all_vars) {
if (var_tmp %in% boxcox_vars){
tryCatch(
{
boxcox_trans_out <- boxcox_trans_return_lambda(
phenotypeDesign$variables, var_tmp
)
phenotypeDesign$variables[[var_tmp]] <- boxcox_trans_out$out[,1]
boxcox_lambda_df[boxcox_lambda_i, 'resp_var'] <- resp_var
boxcox_lambda_df[boxcox_lambda_i, 'var'] <- var_tmp
boxcox_lambda_df[boxcox_lambda_i, 'lambda'] <- boxcox_trans_out$lambda
boxcox_lambda_i <- boxcox_lambda_i + 1
cat(bold('[Tranform BoxCox] '), 'on var:', blue(var_tmp),
'lambda', boxcox_trans_out$lambda, '\n')
},
error=function(error_message) {
# message(error_message)
cat(red(bold(
"!!! BoxCox Failed !!! VarName:", var_tmp
))
# , 'error_message:', error_message
, '\n'
)
cat(red("This variable might be empty; length(unique(", var_tmp, "))=",
length(unique(phenotypeDesign$variables[[var_tmp]]))
), ';\n')
return(NA)
}
)
}
}
for (var_tmp in reg_all_vars) {
if (var_tmp %in% logit_transform_vars){
tryCatch(
{
phenotypeDesign$variables[[var_tmp]] <- logit_trans(
phenotypeDesign$variables[[var_tmp]]
)
cat(bold('[Tranform Logit] '), 'on var:', blue(var_tmp), '\n')
},
error=function(error_message) {
message(paste("!!! logit_trans Failed !!! VarName: ", var_tmp))
cat(red("This variable might be empty: unique(", var_tmp, ")=",
unique(phenotypeDesign$variables[[var_tmp]])), '\n')
message(error_message)
return(NA)
}
)
}
}
for (var_tmp in reg_all_vars) {
if (var_tmp %in% scale_vars){
tryCatch(
{
phenotypeDesign$variables[[var_tmp]] <- scale(
phenotypeDesign$variables[[var_tmp]], center = TRUE, scale = TRUE
)
cat(bold('[Tranform Scale] '), 'on var:', blue(var_tmp), '\n')
},
error=function(error_message){
message(paste("!!! Z-Transformation Failed !!! VarName: ", var_tmp))
cat(red("This variable might be empty: unique(", var_tmp, ")=", unique(
MainTable_subset[[var_tmp]])), '\n')
message(error_message)
return(NA)
}
)
}
}
################################################
################################################
################################################
# cov_ <- covar[1]
for (cov_ in covar){
out_df[i, 'resp_var'] <- resp_var
out_df[i, 'N'] <- nrow(phenotypeDesign)
out_df[i, 'NA_count'] <- nrow(
ageDesign$variables[is.na(ageDesign$variables[[resp_var]]), ]
)
out_df[i, 'covariate'] <- cov_
out_df[i, 'unique_val_counts'] <- length(unique(phenotypeDesign$variables[[resp_var]]))
# Check if an adjusting variable is binary convert it to factor
adj_vars_prepped = c()
# adj_var <- adj[1]
for(adj_var in adj) {
adj_var_type <- is_binary_or_categorical_var(adj_var, resp_var_subset, survey_year, FALSE)
# print(paste(adj_var, adj_var_type))
if (adj_var_type > 0){
##########################################
# TODO MAYBE filter a covar if it has not enough levels.
# adj_var_length <- length(unique(phenotypeDesign$variables[[adj_var]]))
# in other words, put condition on 'adj_var_length'
##########################################
if(length(unique(phenotypeDesign$variables[[adj_var]])) > 1 ){
adj_vars_prepped <- c(adj_vars_prepped, paste0('factor(', adj_var, ')'))
} else {
cat(bold('!!! Adjusting var "', adj_var,
'" removed because not enough levels to be factored.'), '\n')
}
}else{
adj_vars_prepped <- c(adj_vars_prepped, adj_var)
}
}
######
# Check if independent variable is binary, convert it to factor.
# Use MainTable_subset to assess in the whole dataset not a subset
######
resp_var_type <- is_binary_or_categorical_var(resp_var, resp_var_subset, survey_year, TRUE)
out_df[i, 'resp_var_type'] <- resp_var_type
if (resp_var_type > 0){
doForm <- as.formula(paste0(
'factor(', resp_var, ')', '~', paste(c(cov_, adj_vars_prepped), collapse = '+')
))
##############
value_counts <- as.data.frame(table(phenotypeDesign$variables[[resp_var]]))
names(value_counts) <- substring(names(value_counts), first = 1, last = 1)
value_counts <- value_counts[order(-value_counts$F),]
out_df[i, 'value_counts'] <- capture_output(toJSON(value_counts), width=800, print=TRUE)
} else {
doForm <- as.formula(paste(resp_var, '~', paste(c(cov_, adj_vars_prepped), collapse = '+')))
############## Store value count for numerical variables as well
value_counts <- as.data.frame(table(phenotypeDesign$variables[[resp_var]]))
names(value_counts) <- substring(names(value_counts), first = 1, last = 1)
value_counts <- value_counts[order(-value_counts$F),]
out_df[i, 'value_counts'] <- capture_output(toJSON(value_counts), width=800, print=TRUE)
}
out_df[i, 'num_covars'] <- length(adj_vars_prepped) + 1
print(doForm)
reg_family = gaussian()
if(resp_var_type > 0){
reg_family = quasibinomial(link = logit)
}
out_df[i, 'reg_family'] <- trimws(capture_output(reg_family, width=800, print=TRUE))
tryCatch(
{
reg <- svyglm(formula = doForm , design=phenotypeDesign, family=reg_family)
reg_sum <- summary(reg)
out_df[i, 'coef'] <- reg_sum$coefficients[2,][1]
out_df[i, 'std_error'] <- reg_sum$coefficients[2,][2]
out_df[i, 't_value'] <- reg_sum$coefficients[2,][3]
out_df[i, 'p_val'] <- reg_sum$coefficients[2,][4]
last_reg_output <- paste(
capture_output(doForm, width=800, print=TRUE),
capture_output(reg_sum, width = 800, print=TRUE),
sep = "\n"
)
# Save all output of regression
out_df[i, 'summary'] <- last_reg_output
############# Save Coef ############
out_df[i, 'coefficients'] <- toJSON(
as.data.frame(reg_sum$coefficients),
digits=10
)
out_df[i, 'dispersion'] <- reg_sum$dispersion
},
error=function(error_message) {
message(paste("!!! ERROR !!!!"))
cat(red(bold(error_message)))
out_df[i, 'summary'] <- paste(error_message, sep = "\n")
return(NA)
}
)
i <- i + 1
j <- j + 1
if (j %% 10 == 0){
cat(bold(blue(
#round(j/(total_independend_vars * length(covar)), 3) * 100,
round(j/(1577 * length(covar)), 3) * 100, ## see below comments why I used 1577!
'% of regressions (',
(total_independend_vars * length(covar)),
'total) completed from survey year ', survey_year , '...\n'
)))
}
}
}
out_df$sig <- out_df$p_val <= 0.05
round_df(out_df, 3)
write.csv(out_df, paste0('output_console/', dir_reg_analysis, '/',
survey_year ,'/reg_analysis_boxcox_', file_name , '.csv'))
print(paste0('output_console/', dir_reg_analysis, '/',
survey_year ,'/reg_analysis_boxcox_', file_name , '.csv'))
}
cat('########## DONE REGRESSIONS ##############\n')
path_lambda_boxcox <- paste0('output_console/', dir_reg_analysis,
'/ageDesign_lambda_boxcox_cohort_', survey_year, '.csv')
cat(bold('EXPORT Lambda Box Cox --> ', path_lambda_boxcox), '\n')
write.csv(boxcox_lambda_df, path_lambda_boxcox)
cat('Regs started:', format(time_start_regs), 'and ended:',
format(Sys.time())
)
|
## Analysis of SNA
#
#========================================================
# ---
# ## title: Analysis of social network analysis metrics
# author: Marie Gilbertson
# date: "01/03/2019"
#---
# ## Preamble
#
# What this code does:
# 1. Calculates ranked correlation for node-level SNA metrics between complete and sample networks
# Note: correlations are using only individuals from complete network that are in corresponding sample network;
# therefore, always comparing networks of the same size.
# 2. Exports correlation files for each node-level SNA metric for each simulation
# 3. Combines complete and sample metrics for network-level metrics into one dataset and exports (to be
# plotted later)
#
# Social network analysis metrics: Degree, Strength, Betweenness, Transitivity,
# Density, Proportion Isolates, Modularity
##### Clear Environment
remove(list=ls())
#### Load R libraries
library(igraph)
library(dplyr) # for left_join()
library(beepr)
library(ggplot2)
#### Set simulations to analyze
# the following are for setting up reading in files and looping through different simulation variations
# the following may therefore vary pending file naming system
nsims <- 500
start.type <- "Random Start" # set starting location type. Options are: "Random Start", "Lattice Start", or "Cluster Start"
h.type <- "H15" # set step length distribution. Options are: "H15", "H34", "H60", "SAC1", "SAC3", "SAC4"
# can compare complete and sample network metrics with different contact definitions
comp.cont.type <- "100m" # set contact threshold type for the COMPLETE network. Options are: "100m", "10m", or "1m"
samp.cont.type <- "100m" # set contact threshold type for the SAMPLE networks to compare to the complete network. Options are: "100m", "10m", or "1m"
### function for only keeping KDE results from q24h and q72h individual sampling levels
fix.KDE <- function(metric.data){
s.t <- subset(metric.data, metric.data$contact.type=="space-time")
kde <- subset(metric.data, metric.data$contact.type=="KDE UDOI")
kde <- subset(kde, kde$ind.sample=="q24h" | kde$ind.sample=="q72h")
new.data <- rbind(s.t, kde)
return(new.data)
}
############ Calculate ranked correlation for each node-level metric ##############
# set node-level metrics and sampling levels to analyze
nl.metrics <- c("Deg", "Str", "Btw", "Clust")
ind.sample <- c("q1m", "q15m", "q60m", "q3h", "q12h", "q24h", "q72h")
pop.sample <- seq(100, 10, -10)
contact.type <- c("space-time", "KDE UDOI")
# Loop through by simulation number
for(i in 1:nsims){
#### Set simulation number
print(paste("Simulation", i, sep = " "))
sim <- i
# loop through different node-level SNA metrics
for(j in 1:length(nl.metrics)){
metric <- nl.metrics[j]
# read in complete network data for given metric
# complete.name <- paste(<insert naming structure>, ".Rdata", sep = "")
complete.metric <- get(load(file = complete.name))
# read in sampled network data for given metric
# sample.name <- paste(<insert naming structure>, ".Rdata", sep = "")
sample.metric <- get(load(file = sample.name))
# set up empty object to store results
# uses dynamic allocation which is less efficient but functional for these purposes
full.cor <- NULL
# loop through different sampling levels for given metric and calculate the ranked correlation
for(q in 1:length(contact.type)){
ct <- contact.type[q]
for(r in 1:length(pop.sample)){
ps <- pop.sample[r]
for(s in 1:length(ind.sample)){
is <- ind.sample[s]
samp.met_temp <- subset(sample.metric, sample.metric$ind.sample==is & sample.metric$pop.sample==ps & sample.metric$contact.type==ct)
# pull out the metric values from the full network that match the ids of sampled individuals
match_cmp.met <- complete.metric[complete.metric$id %in% samp.met_temp$id,]
# calculate ranked correlation coefficient between the sampled metric calculation and the metric for those individuals from the complete network
smp.cor <- data.frame(cor(samp.met_temp[,2], match_cmp.met[,2], method = "spearman"))
colnames(smp.cor) <- "cor"
# add sampling info for tracking purposes
smp.cor$ind.sample <- is
smp.cor$pop.sample <- ps
smp.cor$contact.type <- ct
smp.cor$metric <- metric
# save for next round
full.cor <- rbind(full.cor, smp.cor)
}
}
}
# only keep KDE results for q24h and q72h
full.cor <- fix.KDE(full.cor)
# add sim.num for tracking purposes
full.cor$sim.num <- sim
# save correlation data
# cor.name <- paste(<insert naming structure>, ".Rdata", sep = "")
save(full.cor, file = cor.name)
}
}
beep(4)
########### Combine complete and sample metrics for all network-level metrics ##############
# set network-level metrics and sampling levels to analyze
nw.metrics <- c("Dens", "Iso", "Mod")
ind.sample <- c("q1m", "q15m", "q60m", "q3h", "q12h", "q24h", "q72h")
pop.sample <- seq(100, 10, -10)
contact.type <- c("space-time", "KDE UDOI")
# Loop through by simulation number
for(i in 1:nsims){
#### Set simulation number
print(paste("Simulation", i, sep = " "))
sim <- i
# loop through different network-level SNA metrics
for(j in 1:length(nw.metrics)){
metric <- nw.metrics[j]
# read in complete network data for given metric
# complete.name <- paste(<insert naming strucure>, sep = "")
complete.metric <- get(load(file = complete.name))
# read in sampled network data for given metric
# sample.name <- paste(<insert naming strucure>, ".Rdata", sep = "")
sample.metric <- get(load(file = sample.name))
if(metric!="Mod"){
# add metric value for the complete network to the sample dataset
sample.metric$complete <- complete.metric
# reorder columns for ease of assessment
sample.metric <- sample.metric[,c(5, 1:4)]
# add simulation number for tracking purposes
sample.metric$sim.num <- sim
}else{
# modularity has results for several metrics, so needs to be assessed differently
colnames(complete.metric) <- paste("c.", colnames(complete.metric), sep="")
# add metric value for the complete network to the sample dataset
sample.metric <- cbind(sample.metric, complete.metric)
# add simulation number for tracking purposes
sample.metric$sim.num <- sim
}
# save combined data
# conc.name <- paste(<insert naming structure>, ".Rdata", sep = "")
save(sample.metric, file = conc.name)
}
}
beep(4)
|
/Analysis_of_SNA.R
|
no_license
|
mjones029/Telemetry_Network_Simulations
|
R
| false | false | 6,677 |
r
|
## Analysis of SNA
#
#========================================================
# ---
# ## title: Analysis of social network analysis metrics
# author: Marie Gilbertson
# date: "01/03/2019"
#---
# ## Preamble
#
# What this code does:
# 1. Calculates ranked correlation for node-level SNA metrics between complete and sample networks
# Note: correlations are using only individuals from complete network that are in corresponding sample network;
# therefore, always comparing networks of the same size.
# 2. Exports correlation files for each node-level SNA metric for each simulation
# 3. Combines complete and sample metrics for network-level metrics into one dataset and exports (to be
# plotted later)
#
# Social network analysis metrics: Degree, Strength, Betweenness, Transitivity,
# Density, Proportion Isolates, Modularity
##### Clear Environment
remove(list=ls())
#### Load R libraries
library(igraph)
library(dplyr) # for left_join()
library(beepr)
library(ggplot2)
#### Set simulations to analyze
# the following are for setting up reading in files and looping through different simulation variations
# the following may therefore vary pending file naming system
nsims <- 500
start.type <- "Random Start" # set starting location type. Options are: "Random Start", "Lattice Start", or "Cluster Start"
h.type <- "H15" # set step length distribution. Options are: "H15", "H34", "H60", "SAC1", "SAC3", "SAC4"
# can compare complete and sample network metrics with different contact definitions
comp.cont.type <- "100m" # set contact threshold type for the COMPLETE network. Options are: "100m", "10m", or "1m"
samp.cont.type <- "100m" # set contact threshold type for the SAMPLE networks to compare to the complete network. Options are: "100m", "10m", or "1m"
### function for only keeping KDE results from q24h and q72h individual sampling levels
fix.KDE <- function(metric.data){
s.t <- subset(metric.data, metric.data$contact.type=="space-time")
kde <- subset(metric.data, metric.data$contact.type=="KDE UDOI")
kde <- subset(kde, kde$ind.sample=="q24h" | kde$ind.sample=="q72h")
new.data <- rbind(s.t, kde)
return(new.data)
}
############ Calculate ranked correlation for each node-level metric ##############
# set node-level metrics and sampling levels to analyze
nl.metrics <- c("Deg", "Str", "Btw", "Clust")
ind.sample <- c("q1m", "q15m", "q60m", "q3h", "q12h", "q24h", "q72h")
pop.sample <- seq(100, 10, -10)
contact.type <- c("space-time", "KDE UDOI")
# Loop through by simulation number
for(i in 1:nsims){
#### Set simulation number
print(paste("Simulation", i, sep = " "))
sim <- i
# loop through different node-level SNA metrics
for(j in 1:length(nl.metrics)){
metric <- nl.metrics[j]
# read in complete network data for given metric
# complete.name <- paste(<insert naming structure>, ".Rdata", sep = "")
complete.metric <- get(load(file = complete.name))
# read in sampled network data for given metric
# sample.name <- paste(<insert naming structure>, ".Rdata", sep = "")
sample.metric <- get(load(file = sample.name))
# set up empty object to store results
# uses dynamic allocation which is less efficient but functional for these purposes
full.cor <- NULL
# loop through different sampling levels for given metric and calculate the ranked correlation
for(q in 1:length(contact.type)){
ct <- contact.type[q]
for(r in 1:length(pop.sample)){
ps <- pop.sample[r]
for(s in 1:length(ind.sample)){
is <- ind.sample[s]
samp.met_temp <- subset(sample.metric, sample.metric$ind.sample==is & sample.metric$pop.sample==ps & sample.metric$contact.type==ct)
# pull out the metric values from the full network that match the ids of sampled individuals
match_cmp.met <- complete.metric[complete.metric$id %in% samp.met_temp$id,]
# calculate ranked correlation coefficient between the sampled metric calculation and the metric for those individuals from the complete network
smp.cor <- data.frame(cor(samp.met_temp[,2], match_cmp.met[,2], method = "spearman"))
colnames(smp.cor) <- "cor"
# add sampling info for tracking purposes
smp.cor$ind.sample <- is
smp.cor$pop.sample <- ps
smp.cor$contact.type <- ct
smp.cor$metric <- metric
# save for next round
full.cor <- rbind(full.cor, smp.cor)
}
}
}
# only keep KDE results for q24h and q72h
full.cor <- fix.KDE(full.cor)
# add sim.num for tracking purposes
full.cor$sim.num <- sim
# save correlation data
# cor.name <- paste(<insert naming structure>, ".Rdata", sep = "")
save(full.cor, file = cor.name)
}
}
beep(4)
########### Combine complete and sample metrics for all network-level metrics ##############
# set network-level metrics and sampling levels to analyze
nw.metrics <- c("Dens", "Iso", "Mod")
ind.sample <- c("q1m", "q15m", "q60m", "q3h", "q12h", "q24h", "q72h")
pop.sample <- seq(100, 10, -10)
contact.type <- c("space-time", "KDE UDOI")
# Loop through by simulation number
for(i in 1:nsims){
#### Set simulation number
print(paste("Simulation", i, sep = " "))
sim <- i
# loop through different network-level SNA metrics
for(j in 1:length(nw.metrics)){
metric <- nw.metrics[j]
# read in complete network data for given metric
# complete.name <- paste(<insert naming strucure>, sep = "")
complete.metric <- get(load(file = complete.name))
# read in sampled network data for given metric
# sample.name <- paste(<insert naming strucure>, ".Rdata", sep = "")
sample.metric <- get(load(file = sample.name))
if(metric!="Mod"){
# add metric value for the complete network to the sample dataset
sample.metric$complete <- complete.metric
# reorder columns for ease of assessment
sample.metric <- sample.metric[,c(5, 1:4)]
# add simulation number for tracking purposes
sample.metric$sim.num <- sim
}else{
# modularity has results for several metrics, so needs to be assessed differently
colnames(complete.metric) <- paste("c.", colnames(complete.metric), sep="")
# add metric value for the complete network to the sample dataset
sample.metric <- cbind(sample.metric, complete.metric)
# add simulation number for tracking purposes
sample.metric$sim.num <- sim
}
# save combined data
# conc.name <- paste(<insert naming structure>, ".Rdata", sep = "")
save(sample.metric, file = conc.name)
}
}
beep(4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.