content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
context("Check that comparing to Ernest KS test results works")
communities = load_paper_data()
community_pairs = setup_community_combinations(communities)
test_that("KS value comparison works", {
ks_results = lapply(community_pairs, FUN = ks_bsd)
compared_results = compare_ernest_ks_values(ks_results)
expect_true(is.data.frame(compared_results))
expect_false(anyNA(compared_results))
expect_silent(plot_crosscomm_ks_pvals(compared_results))
})
test_that("Loading appendix A works", {
appendixA <- load_ernest_appendixA()
expect_true(is.data.frame(appendixA))
expect_true(nrow(appendixA) == 9)
})
|
/tests/testthat/test06_ernest_ks_tests.R
|
permissive
|
INNFINITEMINDS/replicate-becs
|
R
| false | false | 628 |
r
|
context("Check that comparing to Ernest KS test results works")
communities = load_paper_data()
community_pairs = setup_community_combinations(communities)
test_that("KS value comparison works", {
ks_results = lapply(community_pairs, FUN = ks_bsd)
compared_results = compare_ernest_ks_values(ks_results)
expect_true(is.data.frame(compared_results))
expect_false(anyNA(compared_results))
expect_silent(plot_crosscomm_ks_pvals(compared_results))
})
test_that("Loading appendix A works", {
appendixA <- load_ernest_appendixA()
expect_true(is.data.frame(appendixA))
expect_true(nrow(appendixA) == 9)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in r/util.R
\name{seq_log}
\alias{seq_log}
\alias{seq_log_range}
\alias{seq_range}
\title{Sequence in log space}
\usage{
seq_log(from, to, length.out)
seq_log_range(r, length.out)
seq_range(r, length.out)
}
\arguments{
\item{from}{Starting point}
\item{to}{Ending point}
\item{length.out}{Number of points to generate}
\item{r}{range (i.e., c(from, to)}
}
\description{
Sequence in log space
}
\details{
Unlike the billions of options for \code{seq}, only
\code{length.out} is supported here, and both \code{from} and
\code{to} must be provided. For completeness, \code{seq_range}
generates a range in non-log space.
}
\author{
Rich FitzJohn
}
|
/man/seq_log.Rd
|
no_license
|
elchudi/plant
|
R
| false | true | 723 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in r/util.R
\name{seq_log}
\alias{seq_log}
\alias{seq_log_range}
\alias{seq_range}
\title{Sequence in log space}
\usage{
seq_log(from, to, length.out)
seq_log_range(r, length.out)
seq_range(r, length.out)
}
\arguments{
\item{from}{Starting point}
\item{to}{Ending point}
\item{length.out}{Number of points to generate}
\item{r}{range (i.e., c(from, to)}
}
\description{
Sequence in log space
}
\details{
Unlike the billions of options for \code{seq}, only
\code{length.out} is supported here, and both \code{from} and
\code{to} must be provided. For completeness, \code{seq_range}
generates a range in non-log space.
}
\author{
Rich FitzJohn
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.time.r
\name{as.time}
\alias{as.time}
\title{Convert a vector of frequencies into a vector of times.}
\usage{
as.time(freq)
}
\arguments{
\item{freq}{Vector of frequencies.}
}
\value{
Vector of times.
}
\description{
Convert a vector of frequencies into a vector of times.
}
\examples{
as.time(c(-3, -2, -1, +1, +2, +3))
}
|
/man/as.time.Rd
|
permissive
|
dwysocki/random-noise-generation
|
R
| false | true | 406 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.time.r
\name{as.time}
\alias{as.time}
\title{Convert a vector of frequencies into a vector of times.}
\usage{
as.time(freq)
}
\arguments{
\item{freq}{Vector of frequencies.}
}
\value{
Vector of times.
}
\description{
Convert a vector of frequencies into a vector of times.
}
\examples{
as.time(c(-3, -2, -1, +1, +2, +3))
}
|
#******************************************************************
#Title: TrainTestDataSetsNorm.R Script
#Date: 2016
#Author: Shane Coleman
#Date Created: March 2016
#Modified: User Created R Script (TrainTestDataSetsNorm.R)
#Modified / Refactored Code Referenced Below
#******************************************************************
set.seed(11)
testDataSetNorm <- read.csv("MusicDataNorm.csv",header = FALSE)
head(testDataSetNorm)
#Set the header names to null, represented as NA
names(testDataSetNorm) <- NULL
#************************************************************************************************
#Title: How to split data into training/testing sets using sample function in R program
#Site Owner / Sponsor: stackoverflow.com
#Date: 2014
#Author: dickoa
#Availability: http://stackoverflow.com/questions/17200114/how-to-split-data-into-training-testing-sets-using-sample-function-in-r-program
#Date Accessed: March 2016
#Modified: Code refactord (Data frames and variable names altered)
#50% of whole data set: MusicDataNorm.csv, stored in variable 'testDataSetNorm'
sizeDataSet <- floor(0.50 * nrow(testDataSetNorm))
rowsLengthSize <- sample(seq_len(nrow(testDataSetNorm)),size = sizeDataSet)
train <- testDataSetNorm[rowsLengthSize,]
head(train)
test <- testDataSetNorm[-rowsLengthSize,]
head(test)
#************************************************************************************************
write.csv(train, file = "TrainNorm.csv",row.names = FALSE)
write.csv(test, file = "TestNorm.csv",row.names = FALSE)
|
/TrainTestDataSetsNorm.R
|
no_license
|
ShaneColeman/FYP_RStudio
|
R
| false | false | 1,544 |
r
|
#******************************************************************
#Title: TrainTestDataSetsNorm.R Script
#Date: 2016
#Author: Shane Coleman
#Date Created: March 2016
#Modified: User Created R Script (TrainTestDataSetsNorm.R)
#Modified / Refactored Code Referenced Below
#******************************************************************
set.seed(11)
testDataSetNorm <- read.csv("MusicDataNorm.csv",header = FALSE)
head(testDataSetNorm)
#Set the header names to null, represented as NA
names(testDataSetNorm) <- NULL
#************************************************************************************************
#Title: How to split data into training/testing sets using sample function in R program
#Site Owner / Sponsor: stackoverflow.com
#Date: 2014
#Author: dickoa
#Availability: http://stackoverflow.com/questions/17200114/how-to-split-data-into-training-testing-sets-using-sample-function-in-r-program
#Date Accessed: March 2016
#Modified: Code refactord (Data frames and variable names altered)
#50% of whole data set: MusicDataNorm.csv, stored in variable 'testDataSetNorm'
sizeDataSet <- floor(0.50 * nrow(testDataSetNorm))
rowsLengthSize <- sample(seq_len(nrow(testDataSetNorm)),size = sizeDataSet)
train <- testDataSetNorm[rowsLengthSize,]
head(train)
test <- testDataSetNorm[-rowsLengthSize,]
head(test)
#************************************************************************************************
write.csv(train, file = "TrainNorm.csv",row.names = FALSE)
write.csv(test, file = "TestNorm.csv",row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sig-vdiagram.R
\docType{methods}
\name{vdiagram}
\alias{vdiagram}
\alias{vdiagram,character-method}
\alias{vdiagram,list-method}
\title{v-diagram report}
\usage{
vdiagram(data, ...)
\S4method{vdiagram}{list}(data, ...)
\S4method{vdiagram}{character}(data, ...)
}
\arguments{
\item{data}{local data (as list) or URL}
\item{...}{addtional params passed to GET or authenticateUser}
}
\examples{
library(jsonlite)
library(gsplot)
json_file <- system.file('extdata','vdiagram','vdiagram-example.json', package = 'repgen')
data <-fromJSON(json_file)
vdiagram(data, 'Author Name')
\dontrun{
url <- paste0('http://nwissddvasvis01.cr.usgs.gov/service/timeseries/reports/swreviewvdiagram/?',
'station=01350000&dischargeIdentifier=Discharge.ft\%5E3\%2Fs&stageIdentifier=',
'Gage+height.ft.Work.DD002&dailyDischargeIdentifier=',
'Discharge.ft\%5E3\%2Fs.Mean&ratingModelIdentifier=Gage+height-Discharge.STGQ&waterYear=2014')
vdiagram(data = url, verbose = TRUE) # plot to screen with auth
}
}
|
/man/vdiagram.Rd
|
permissive
|
ee-usgs/repgen
|
R
| false | true | 1,065 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sig-vdiagram.R
\docType{methods}
\name{vdiagram}
\alias{vdiagram}
\alias{vdiagram,character-method}
\alias{vdiagram,list-method}
\title{v-diagram report}
\usage{
vdiagram(data, ...)
\S4method{vdiagram}{list}(data, ...)
\S4method{vdiagram}{character}(data, ...)
}
\arguments{
\item{data}{local data (as list) or URL}
\item{...}{addtional params passed to GET or authenticateUser}
}
\examples{
library(jsonlite)
library(gsplot)
json_file <- system.file('extdata','vdiagram','vdiagram-example.json', package = 'repgen')
data <-fromJSON(json_file)
vdiagram(data, 'Author Name')
\dontrun{
url <- paste0('http://nwissddvasvis01.cr.usgs.gov/service/timeseries/reports/swreviewvdiagram/?',
'station=01350000&dischargeIdentifier=Discharge.ft\%5E3\%2Fs&stageIdentifier=',
'Gage+height.ft.Work.DD002&dailyDischargeIdentifier=',
'Discharge.ft\%5E3\%2Fs.Mean&ratingModelIdentifier=Gage+height-Discharge.STGQ&waterYear=2014')
vdiagram(data = url, verbose = TRUE) # plot to screen with auth
}
}
|
# Week 6 Spring 2018 - R Solutions #
# ******************************************************************************** PMSFS/McCourtMPP/Semester3Fall2017MPP/StataRecitations")
# Merging, missing data, string replace
# *Q1
schools <- read.csv("userssharedsdfschoolimprovement2010grants.csv", header = T, na.strings =c("", "NA"))
# Load in package for reading .dta files
library(haven)
# *Q2
unique(schools$State)
# only 50 unique values, including DC. Which state is missing? Hawaii (short HI)
# Merge the data sets
stateabbs <- read_dta("state_name_abbreviation.dta")
colnames(stateabbs)[2] <- "State"
schools$State <- as.character(schools$State)
library(dplyr)
schools_full <- full_join(schools, stateabbs, by='State')
# We use full join above to ensure that it is not dropping Hawaii. If we want it to automatically drop the unmatched, we could've just used left join.
# See what wasn't matched.
rowSums(is.na(schools_full))
schools_full[832,]
# Delete the unmatched row
schools_full <- schools_full[-832,]
schools_full$X2010.11.Award.Amount <- as.numeric(gsub("\\$", "", schools_full$X2010.11.Award.Amount))
# *Q3
schools_full$X2010.11.Award.Amount <- as.numeric(gsub("\\$", "", schools_full$X2010.11.Award.Amount))
# *Q4
schools_full$Model.Selected <- as.factor(schools_full$Model.Selected)
table(schools_full$Model.Selected)
# *Q5
d <- data.frame(schools_full$X2010.11.Award.Amount, schools_full$Model.Selected)
str(d)
summary(d)
library(mice)
md.pattern(d)
# Not as useful in R.
#
# *Q6
# * Dealing with missing data
summary(d, na.rm = T)
# egen numbermissing = rowmiss(grantamt model)
schools_full$na_count <- rowSums(is.na(schools_full[c("X2010.11.Award.Amount", "Model.Selected")]))
table(schools_full$na_count)
schools_full$nomiss <- ifelse(schools_full$na_count == 0, 0, ifelse(schools_full$na_count > 0, 1, NA))
table(schools_full$nomiss)
|
/StataR Recitations/Week 6 R Merges and Missing.R
|
no_license
|
christianmconroy/Teaching-Materials
|
R
| false | false | 1,873 |
r
|
# Week 6 Spring 2018 - R Solutions #
# ******************************************************************************** PMSFS/McCourtMPP/Semester3Fall2017MPP/StataRecitations")
# Merging, missing data, string replace
# *Q1
schools <- read.csv("userssharedsdfschoolimprovement2010grants.csv", header = T, na.strings =c("", "NA"))
# Load in package for reading .dta files
library(haven)
# *Q2
unique(schools$State)
# only 50 unique values, including DC. Which state is missing? Hawaii (short HI)
# Merge the data sets
stateabbs <- read_dta("state_name_abbreviation.dta")
colnames(stateabbs)[2] <- "State"
schools$State <- as.character(schools$State)
library(dplyr)
schools_full <- full_join(schools, stateabbs, by='State')
# We use full join above to ensure that it is not dropping Hawaii. If we want it to automatically drop the unmatched, we could've just used left join.
# See what wasn't matched.
rowSums(is.na(schools_full))
schools_full[832,]
# Delete the unmatched row
schools_full <- schools_full[-832,]
schools_full$X2010.11.Award.Amount <- as.numeric(gsub("\\$", "", schools_full$X2010.11.Award.Amount))
# *Q3
schools_full$X2010.11.Award.Amount <- as.numeric(gsub("\\$", "", schools_full$X2010.11.Award.Amount))
# *Q4
schools_full$Model.Selected <- as.factor(schools_full$Model.Selected)
table(schools_full$Model.Selected)
# *Q5
d <- data.frame(schools_full$X2010.11.Award.Amount, schools_full$Model.Selected)
str(d)
summary(d)
library(mice)
md.pattern(d)
# Not as useful in R.
#
# *Q6
# * Dealing with missing data
summary(d, na.rm = T)
# egen numbermissing = rowmiss(grantamt model)
schools_full$na_count <- rowSums(is.na(schools_full[c("X2010.11.Award.Amount", "Model.Selected")]))
table(schools_full$na_count)
schools_full$nomiss <- ifelse(schools_full$na_count == 0, 0, ifelse(schools_full$na_count > 0, 1, NA))
table(schools_full$nomiss)
|
test_that("map_mwi returns a leaflet htmlwidget", {
data("wastd_data")
themap <- map_mwi(wastd_data$animals, sites = wastd_data$sites)
testthat::expect_equal(class(themap), c("leaflet", "htmlwidget"))
})
test_that("map_mwi tolerates NULL data", {
data("wastd_data")
themap <- map_mwi(NULL, sites = wastd_data$sites)
testthat::expect_equal(class(themap), c("leaflet", "htmlwidget"))
})
# use_r("map_mwi")
|
/tests/testthat/test-map_mwi.R
|
no_license
|
dbca-wa/wastdr
|
R
| false | false | 418 |
r
|
test_that("map_mwi returns a leaflet htmlwidget", {
data("wastd_data")
themap <- map_mwi(wastd_data$animals, sites = wastd_data$sites)
testthat::expect_equal(class(themap), c("leaflet", "htmlwidget"))
})
test_that("map_mwi tolerates NULL data", {
data("wastd_data")
themap <- map_mwi(NULL, sites = wastd_data$sites)
testthat::expect_equal(class(themap), c("leaflet", "htmlwidget"))
})
# use_r("map_mwi")
|
require("gplots")
# This will take the experiments passed in and graph the mean of the best reults
ttestdata <- function(experimentArray)
{
baseDir <- "/Users/jonathanbyrne/results"
#no. of files to read from
experimentDir <- paste(baseDir, experimentArray[1] , sep="/")
files = list.files(path=experimentDir, pattern = ".dat")
noOfRuns <-length(files)
noOfExperiments <- length(experimentArray)
#create vector to store last result in each run
tmpArray <- matrix(NaN,nrow=noOfRuns) #this has a row for each run
#create an array to hold the two samples you want to compare
compareArray <- matrix(,nrow=noOfRuns,ncol=noOfExperiments)
for(i in 1:noOfExperiments)
{
#specify the directory
experimentDir <- paste(baseDir, experimentArray[i] , sep="/")
print(experimentDir)
#create alist of all the files in the folder
files = list.files(path=experimentDir, pattern = ".dat")
cnt <- 0
#take last element from the run to do histogram
for(file in files)
{
cnt <- cnt+1
localFile = paste(experimentDir,file, sep = "/")
tmpExperiment <- read.table(localFile);
tmpArray[cnt] <- tail(tmpExperiment$V2, n=1) #add the first row(best) to the column
}
compareArray[,i] <-tmpArray
}
t.test(compareArray[,1],compareArray[,2],paired = FALSE,var.equal=FALSE)
}
|
/Rscripts/oldScripts/ttestdata.r
|
no_license
|
squeakus/bitsandbytes
|
R
| false | false | 1,505 |
r
|
require("gplots")
# This will take the experiments passed in and graph the mean of the best reults
ttestdata <- function(experimentArray)
{
baseDir <- "/Users/jonathanbyrne/results"
#no. of files to read from
experimentDir <- paste(baseDir, experimentArray[1] , sep="/")
files = list.files(path=experimentDir, pattern = ".dat")
noOfRuns <-length(files)
noOfExperiments <- length(experimentArray)
#create vector to store last result in each run
tmpArray <- matrix(NaN,nrow=noOfRuns) #this has a row for each run
#create an array to hold the two samples you want to compare
compareArray <- matrix(,nrow=noOfRuns,ncol=noOfExperiments)
for(i in 1:noOfExperiments)
{
#specify the directory
experimentDir <- paste(baseDir, experimentArray[i] , sep="/")
print(experimentDir)
#create alist of all the files in the folder
files = list.files(path=experimentDir, pattern = ".dat")
cnt <- 0
#take last element from the run to do histogram
for(file in files)
{
cnt <- cnt+1
localFile = paste(experimentDir,file, sep = "/")
tmpExperiment <- read.table(localFile);
tmpArray[cnt] <- tail(tmpExperiment$V2, n=1) #add the first row(best) to the column
}
compareArray[,i] <-tmpArray
}
t.test(compareArray[,1],compareArray[,2],paired = FALSE,var.equal=FALSE)
}
|
# This is sample Jags and R code for the Supplemental Instruction project that
# was conducted with a Bayesian framework. The code works by creating a txt
# file containing Jags code that specifies the models and priors. The R code
# sets the initial values, parameters, data used, numbers of iterations, and
# other arguments. For the sake of space, only the code for different models
# was included since the number of iterations can be changed in the code
# provided below.
## Jags Code for Model with Normal Priors with Both Random Intercept Terms
sink("projectmodel.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta1[term[i]] + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dnorm( m[j], prec[j] )
alphastep[j] <- step(alpha[j])
}
for(k in 1:K) {
beta1[k] ~ dnorm(0, dinv1)
}
for(l in 1:L) {
beta2[l] ~ dnorm(0, dinv2)
}
dinv1 ~ dgamma(da1,db1)
dinv2 ~ dgamma(da2,db2)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in Normal Model with Both Random Intercept Terms
set.seed(234)
sidata = list(y = mod$dfw, term = mod$term, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 2]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, K = 11, L = 19,
da1 = 8, db1 = 18, da2 = 8, db2 = 18,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4, 4, 4, 16, 16))
siinits = rep(list(list(alpha = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
beta1 = as.vector(rnorm(11)),
beta2 = as.vector(rnorm(19)),
dinv1=2.25, dinv2 = 2.25)), 5)
siparameters = c("alpha", "alphastep", "beta1", "beta2",
"dinv1", "dinv2")
si.sim_50 = jags(sidata, siinits, siparameters, "projectmodel.txt",
n.chains = 5, n.iter = 55000, n.burnin = 0, n.thin = 1)
Output_50 = AddBurnin(si.sim_50$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with t-Distribution Priors with Both Random Intercept Terms
sink("projectmodelt.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta1[term[i]] + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dt( m[j], prec[j], dff)
alphastep[j] <- step(alpha[j])
}
for(k in 1:K) {
beta1[k] ~ dt(0, dinv1, dft)
}
for(l in 1:L) {
beta2[l] ~ dt(0, dinv2, dfc)
}
dinv1 ~ dgamma(da1,db1)
dinv2 ~ dgamma(da2,db2)
dff ~ dgamma(2, .1)
dft ~ dgamma(2, .1)
dfc ~ dgamma(2, .1)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in T Model with Both Random Intercept Terms
set.seed(234)
sidata_t = list(y = mod$dfw, term = mod$term, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 1]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, K = 11, L = 19,
da1 = 8, db1 = 18, da2 = 8, db2 = 18,
m=c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4 ,4, 4, 16, 16))
siinits_t = rep(list(list(alpha = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
beta1 = as.vector(rnorm(11)),
beta2 = as.vector(rnorm(19)),
dinv1 = 2.25, dinv2 = 2.25)), 5)
siparameters_t = c("alpha", "alphastep", "beta1", "beta2",
"dinv1", "dinv2 ")
si.sim_t_50 = jags(sidata_t, siinits_t, siparameters_t, "projectmodelt.txt",
n.chains = 5, n.iter = 55000, n.burnin = 0, n.thin = 1)
Output_t_50 = AddBurnin(si.sim_t_50$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with Normal Priors and No Random Intercept for Term
sink("projectmodel_noterm.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dnorm( m[j], prec[j] )
alphastep[j] <- step(alpha[j])
}
for(l in 1:L) {
beta2[l] ~ dnorm(0, dinv2)
}
dinv2 ~ dgamma(da2,db2)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in Normal Model and No Random Intercept for Term
set.seed(234)
sidata_noterm = list(y = mod$dfw, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 2]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, L = 19, da2 = 8.0, db2 = 18.0,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4, 4, 4, 16, 16))
siinits_noterm = rep(list(list(alpha = c(-1.39, -1.11, -0.69,
0.0, -0.69, 0.095),
beta2 = as.vector(rnorm(19)),
dinv2 = 2.25)), 5)
siparameters_noterm = c("alpha", "alphastep", "beta2", "dinv2")
si.sim_noterm = jags(sidata_noterm, siinits_noterm, siparameters_noterm,
"projectmodel_noterm.txt", n.chains = 5, n.iter = 55000,
n.burnin = 0, n.thin = 1)
Output_noterm = AddBurnin(si.sim_noterm$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with t-Distribution Priors and No Random Intercept for Term
sink("projectmodelt_noterm.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dt( m[j], prec[j], dff)
alphastep[j] <- step(alpha[j])
}
for(l in 1:L) {
beta2[l] ~ dt(0, dinv2, dfc)
}
dinv2 ~ dgamma(da2,db2)
dff ~ dgamma(2, .1)
dfc ~ dgamma(2, .1)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in T Model and No Random Intercept for Term
set.seed(234)
sidata_noterm = list(y = mod$dfw, class = mod$class,
x = matrix(data = c(rep(1 ,12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 1]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, L = 19, da2 = 8.0, db2 = 18.0,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1 ,4, 4, 4, 16, 16))
siinits_noterm = rep(list(list(alpha = c(-1.39, -1.11, -0.69,
0.0, -0.69, 0.095),
beta2 = as.vector(rnorm(19)),
dinv2 = 2.25)), 5)
siparameters_noterm = c("alpha", "alphastep", "beta2", "dinv2")
si.sim_t_noterm = jags(sidata_noterm, siinits_noterm, siparameters_noterm,
"projectmodelt_noterm.txt", n.chains = 5, n.iter = 55000,
n.burnin = 0, n.thin = 1)
Output_t_noterm = AddBurnin(si.sim_t_noterm$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
|
/sample_jags_R_code.R
|
no_license
|
raguilar2/supplemental_instruction_bayes
|
R
| false | false | 7,614 |
r
|
# This is sample Jags and R code for the Supplemental Instruction project that
# was conducted with a Bayesian framework. The code works by creating a txt
# file containing Jags code that specifies the models and priors. The R code
# sets the initial values, parameters, data used, numbers of iterations, and
# other arguments. For the sake of space, only the code for different models
# was included since the number of iterations can be changed in the code
# provided below.
## Jags Code for Model with Normal Priors with Both Random Intercept Terms
sink("projectmodel.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta1[term[i]] + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dnorm( m[j], prec[j] )
alphastep[j] <- step(alpha[j])
}
for(k in 1:K) {
beta1[k] ~ dnorm(0, dinv1)
}
for(l in 1:L) {
beta2[l] ~ dnorm(0, dinv2)
}
dinv1 ~ dgamma(da1,db1)
dinv2 ~ dgamma(da2,db2)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in Normal Model with Both Random Intercept Terms
set.seed(234)
sidata = list(y = mod$dfw, term = mod$term, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 2]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, K = 11, L = 19,
da1 = 8, db1 = 18, da2 = 8, db2 = 18,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4, 4, 4, 16, 16))
siinits = rep(list(list(alpha = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
beta1 = as.vector(rnorm(11)),
beta2 = as.vector(rnorm(19)),
dinv1=2.25, dinv2 = 2.25)), 5)
siparameters = c("alpha", "alphastep", "beta1", "beta2",
"dinv1", "dinv2")
si.sim_50 = jags(sidata, siinits, siparameters, "projectmodel.txt",
n.chains = 5, n.iter = 55000, n.burnin = 0, n.thin = 1)
Output_50 = AddBurnin(si.sim_50$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with t-Distribution Priors with Both Random Intercept Terms
sink("projectmodelt.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta1[term[i]] + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dt( m[j], prec[j], dff)
alphastep[j] <- step(alpha[j])
}
for(k in 1:K) {
beta1[k] ~ dt(0, dinv1, dft)
}
for(l in 1:L) {
beta2[l] ~ dt(0, dinv2, dfc)
}
dinv1 ~ dgamma(da1,db1)
dinv2 ~ dgamma(da2,db2)
dff ~ dgamma(2, .1)
dft ~ dgamma(2, .1)
dfc ~ dgamma(2, .1)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in T Model with Both Random Intercept Terms
set.seed(234)
sidata_t = list(y = mod$dfw, term = mod$term, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 1]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, K = 11, L = 19,
da1 = 8, db1 = 18, da2 = 8, db2 = 18,
m=c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4 ,4, 4, 16, 16))
siinits_t = rep(list(list(alpha = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
beta1 = as.vector(rnorm(11)),
beta2 = as.vector(rnorm(19)),
dinv1 = 2.25, dinv2 = 2.25)), 5)
siparameters_t = c("alpha", "alphastep", "beta1", "beta2",
"dinv1", "dinv2 ")
si.sim_t_50 = jags(sidata_t, siinits_t, siparameters_t, "projectmodelt.txt",
n.chains = 5, n.iter = 55000, n.burnin = 0, n.thin = 1)
Output_t_50 = AddBurnin(si.sim_t_50$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with Normal Priors and No Random Intercept for Term
sink("projectmodel_noterm.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dnorm( m[j], prec[j] )
alphastep[j] <- step(alpha[j])
}
for(l in 1:L) {
beta2[l] ~ dnorm(0, dinv2)
}
dinv2 ~ dgamma(da2,db2)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in Normal Model and No Random Intercept for Term
set.seed(234)
sidata_noterm = list(y = mod$dfw, class = mod$class,
x = matrix(data = c(rep(1, 12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 2]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, L = 19, da2 = 8.0, db2 = 18.0,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1, 4, 4, 4, 16, 16))
siinits_noterm = rep(list(list(alpha = c(-1.39, -1.11, -0.69,
0.0, -0.69, 0.095),
beta2 = as.vector(rnorm(19)),
dinv2 = 2.25)), 5)
siparameters_noterm = c("alpha", "alphastep", "beta2", "dinv2")
si.sim_noterm = jags(sidata_noterm, siinits_noterm, siparameters_noterm,
"projectmodel_noterm.txt", n.chains = 5, n.iter = 55000,
n.burnin = 0, n.thin = 1)
Output_noterm = AddBurnin(si.sim_noterm$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
## Jags Code for Model with t-Distribution Priors and No Random Intercept for Term
sink("projectmodelt_noterm.txt")
cat("
model{
for(i in 1:N) {
y[i] ~ dbern(p[i])
logit(p[i]) <- inprod(x[i,],alpha[]) + beta2[class[i]]
}
for(j in 1:J) {
alpha[j] ~ dt( m[j], prec[j], dff)
alphastep[j] <- step(alpha[j])
}
for(l in 1:L) {
beta2[l] ~ dt(0, dinv2, dfc)
}
dinv2 ~ dgamma(da2,db2)
dff ~ dgamma(2, .1)
dfc ~ dgamma(2, .1)
}", fill = TRUE)
sink()
## Sample R Code for Parameters in T Model and No Random Intercept for Term
set.seed(234)
sidata_noterm = list(y = mod$dfw, class = mod$class,
x = matrix(data = c(rep(1 ,12730),
mod[, 1]-mean(mod[, 1]),
mod[, 2]-mean(mod[, 1]),
mod[, 7], mod[, 8], mod[, 9]),
byrow = F, ncol = 6),
N = 12730, J = 6, L = 19, da2 = 8.0, db2 = 18.0,
m = c(-1.39, -1.11, -0.69, 0.0, -0.69, 0.095),
prec = c(1 ,4, 4, 4, 16, 16))
siinits_noterm = rep(list(list(alpha = c(-1.39, -1.11, -0.69,
0.0, -0.69, 0.095),
beta2 = as.vector(rnorm(19)),
dinv2 = 2.25)), 5)
siparameters_noterm = c("alpha", "alphastep", "beta2", "dinv2")
si.sim_t_noterm = jags(sidata_noterm, siinits_noterm, siparameters_noterm,
"projectmodelt_noterm.txt", n.chains = 5, n.iter = 55000,
n.burnin = 0, n.thin = 1)
Output_t_noterm = AddBurnin(si.sim_t_noterm$BUGSoutput$sims.array,
burnin = 5000, n.thin = 1)
|
## The function "makeCacheMatrix" creates a special "matrix" object that can cache its inverse.
## The function "cacheSolve" computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
## create set function which takes floating variable y
set <- function(y) {
x <<- y
s <<- NULL
}
##if matrix not found, then create it with this function
get <- function() x
##set inverse of the matrix using solve function
setinv <- function(solve) s <<- solve
## return inverse of the matrix
getinv <- function() s
##create list that contains the above four mentioned functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## check If the inverse has already been calculated and stored in cache and the matrix has not changed
s <- x$getinv()
##if inverse is found, retrieve from cache
if(!is.null(s)) {
message("getting cached data for the matrix inversion")
return(s)
}
##if not found in cache go to makeCacheMatrix function and retrive matrix
data <- x$get()
## create the inverse of matrix
s <- solve(data, ...)
##set inverse so it is stored in cache
x$setinv(s)
s
}
|
/cachematrix.R
|
no_license
|
cbriody1/ProgrammingAssignment2
|
R
| false | false | 1,422 |
r
|
## The function "makeCacheMatrix" creates a special "matrix" object that can cache its inverse.
## The function "cacheSolve" computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the inverse
## from the cache.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
## create set function which takes floating variable y
set <- function(y) {
x <<- y
s <<- NULL
}
##if matrix not found, then create it with this function
get <- function() x
##set inverse of the matrix using solve function
setinv <- function(solve) s <<- solve
## return inverse of the matrix
getinv <- function() s
##create list that contains the above four mentioned functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...) {
## check If the inverse has already been calculated and stored in cache and the matrix has not changed
s <- x$getinv()
##if inverse is found, retrieve from cache
if(!is.null(s)) {
message("getting cached data for the matrix inversion")
return(s)
}
##if not found in cache go to makeCacheMatrix function and retrive matrix
data <- x$get()
## create the inverse of matrix
s <- solve(data, ...)
##set inverse so it is stored in cache
x$setinv(s)
s
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252122755e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613104496-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 344 |
r
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252122755e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
library(tempSyntaxPackage)
resultsFile <- file.path("results_objects", "TTestBayesianOneSample.rds")
if (!file.exists(resultsFile)) {
path <- normalizePath("../../jaspTTests")
renv::activate(path)
library(jaspTools)
jaspTools::setPkgOption("module.dirs", path)
jaspTools::setPkgOption("reinstall.modules", FALSE)
options <- jaspTools::analysisOptions("TTestBayesianOneSample")
options$variables <- c("contNormal", "contGamma")
options$descriptives <- TRUE
options$descriptivesPlots <- TRUE
options$descriptivesPlotsCredibleInterval <- 0.65
options$plotPriorAndPosterior <- TRUE
results <- runAnalysis("TTestBayesianOneSample", "test.csv", options)
saveRDS(results, file = resultsFile)
} else {
library(jaspTools)
results <- readRDS(resultsFile)
}
simp <- simplifyResults(results)
simp
options("jaspDebug" = TRUE)
simp$`Bayesian One Sample T-Test`
print(simp, short = TRUE, indent = 2)
simp$`Bayesian One Sample T-Test`
simp$`Inferential Plots`$contNormal$`Prior and Posterior`$`Prior and Posterior`
simp$Descriptives
simp$`Descriptives Plots`
simp$`Inferential Plots`$contNormal$`Prior and Posterior`$`Prior and Posterior`
simp$`Inferential Plots`$contGamma$`Prior and Posterior`$`Prior and Posterior`
simp$`Descriptives Plots`$contNormal$contNormal
simp$`Descriptives Plots`$contNormal
options("jaspDebug" = FALSE)
print(simp, short = TRUE, indent = 2)
ttt <- simp$descriptivesContainer$Descriptives
ttt
attributes(ttt)
meta <- attr(ttt, "meta")
meta$fields$overTitle[2:3] <- paste(letters[1:26], collapse = "")
attr(ttt, "meta") <- meta
ttt
subset_cols_jaspTableWrapper <- function(tbl, idx) {
tblNew <- tbl[, idx]
attributes(tblNew)$meta <- attributes(tbl)$meta
attributes(tblNew)$meta$fields <- attributes(tblNew)$meta$fields[idx, ]
tblNew
}
ttt
dim(ttt)
idx <- c(2:3, 6:7)
eee
subset_cols_jaspTableWrapper(ttt, 2:3)
simp$ttestContainer$`Bayesian One Sample T-Test`
simp$ttestContainer
names(simp)
|
/test_analyses/testTTests.R
|
no_license
|
vandenman/temporarySyntaxStuff
|
R
| false | false | 1,957 |
r
|
library(tempSyntaxPackage)
resultsFile <- file.path("results_objects", "TTestBayesianOneSample.rds")
if (!file.exists(resultsFile)) {
path <- normalizePath("../../jaspTTests")
renv::activate(path)
library(jaspTools)
jaspTools::setPkgOption("module.dirs", path)
jaspTools::setPkgOption("reinstall.modules", FALSE)
options <- jaspTools::analysisOptions("TTestBayesianOneSample")
options$variables <- c("contNormal", "contGamma")
options$descriptives <- TRUE
options$descriptivesPlots <- TRUE
options$descriptivesPlotsCredibleInterval <- 0.65
options$plotPriorAndPosterior <- TRUE
results <- runAnalysis("TTestBayesianOneSample", "test.csv", options)
saveRDS(results, file = resultsFile)
} else {
library(jaspTools)
results <- readRDS(resultsFile)
}
simp <- simplifyResults(results)
simp
options("jaspDebug" = TRUE)
simp$`Bayesian One Sample T-Test`
print(simp, short = TRUE, indent = 2)
simp$`Bayesian One Sample T-Test`
simp$`Inferential Plots`$contNormal$`Prior and Posterior`$`Prior and Posterior`
simp$Descriptives
simp$`Descriptives Plots`
simp$`Inferential Plots`$contNormal$`Prior and Posterior`$`Prior and Posterior`
simp$`Inferential Plots`$contGamma$`Prior and Posterior`$`Prior and Posterior`
simp$`Descriptives Plots`$contNormal$contNormal
simp$`Descriptives Plots`$contNormal
options("jaspDebug" = FALSE)
print(simp, short = TRUE, indent = 2)
ttt <- simp$descriptivesContainer$Descriptives
ttt
attributes(ttt)
meta <- attr(ttt, "meta")
meta$fields$overTitle[2:3] <- paste(letters[1:26], collapse = "")
attr(ttt, "meta") <- meta
ttt
subset_cols_jaspTableWrapper <- function(tbl, idx) {
tblNew <- tbl[, idx]
attributes(tblNew)$meta <- attributes(tbl)$meta
attributes(tblNew)$meta$fields <- attributes(tblNew)$meta$fields[idx, ]
tblNew
}
ttt
dim(ttt)
idx <- c(2:3, 6:7)
eee
subset_cols_jaspTableWrapper(ttt, 2:3)
simp$ttestContainer$`Bayesian One Sample T-Test`
simp$ttestContainer
names(simp)
|
complete <- function(directory, id = 1:332) {
############################## Function prototype from Coursera #####################
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
######################################################################################
## My code start here:
## The question here is the definition of "complete"
## In this work, we assume that "complete" means none of "sulfate" and "nitrate" is NA
## 1. Obtain file index
monitorNames <- dir(directory)
## 2. initialize the number of complete observe and a counter
nobs <- vector(length = length(id))
mCount <- 0;
## 3. access each monitor
for (fIndex in id) {
mCount = mCount+1
## read the two critial columns
fData <- read.csv( paste( c( directory, monitorNames[fIndex] ), collapse = .Platform$file.sep
) )[ c( "sulfate", "nitrate" ) ]
## count how many rows are both non-NA
nobs[mCount] <- sum( !is.na(fData[,1]) & !is.na(fData[,2]) )
}
## 4. combine the "id" and "nobs" vectors to form a matrix "res" means result
res <- cbind(id, nobs)
names(res)<- c("id", "nobs")
res <- as.data.frame(res)
res
}
|
/Assignment2Airpolute/complete.R
|
no_license
|
cwangED/RProgrammingAssignment
|
R
| false | false | 1,597 |
r
|
complete <- function(directory, id = 1:332) {
############################## Function prototype from Coursera #####################
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
######################################################################################
## My code start here:
## The question here is the definition of "complete"
## In this work, we assume that "complete" means none of "sulfate" and "nitrate" is NA
## 1. Obtain file index
monitorNames <- dir(directory)
## 2. initialize the number of complete observe and a counter
nobs <- vector(length = length(id))
mCount <- 0;
## 3. access each monitor
for (fIndex in id) {
mCount = mCount+1
## read the two critial columns
fData <- read.csv( paste( c( directory, monitorNames[fIndex] ), collapse = .Platform$file.sep
) )[ c( "sulfate", "nitrate" ) ]
## count how many rows are both non-NA
nobs[mCount] <- sum( !is.na(fData[,1]) & !is.na(fData[,2]) )
}
## 4. combine the "id" and "nobs" vectors to form a matrix "res" means result
res <- cbind(id, nobs)
names(res)<- c("id", "nobs")
res <- as.data.frame(res)
res
}
|
# features - names of 561 features
# (561 obs. of 2 variables: V1 = index, V2 = feature label)
# activities - names of 6 activities
# (6 obs. of 2 variables: V1 = index, V2 = activity label)
#
# train and test dataset with 7352 and 2947 entries respectively
#
# *_data - datasets with all 561 features
# (7352/2947 obs. of 561 variables)
# *_labl - activity label for each dataset (6 activities)
# (7352/2947 obs. of 1 variable)
# *_subj - subject label for each dataset (30 subjects)
# (7352/2947 obs. of 1 variable)
# load all data
features <- read.table("UCI HAR Dataset/features.txt")
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
train_data <- read.table("UCI HAR Dataset/train/X_train.txt")
train_labl <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
test_data <- read.table("UCI HAR Dataset/test/X_test.txt")
test_labl <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
# combine training and test data
data <- rbind(train_data, test_data)
labl <- rbind(train_labl, test_labl)
subj <- rbind(train_subj, test_subj)
# use descriptive variable names
names(data) <- features$V2
names(subj) <- c("subject")
names(activities) <- c("V1", "activity")
# restrict to mean and deviation features
meanOrStd_logical <- sapply(c("mean", "std"), grepl, features[,2], ignore.case=TRUE)
selected_features <- features[meanOrStd_logical[,1] | meanOrStd_logical[,2], 1]
data <- data[selected_features]
# combine all data in one data frame
data <- cbind(subj, labl, data)
# replace activity index with labels
data <- merge(activities, data)
last <- length(names(data))
data <- data[,c(3,2,4:last)] # reorder subject and activity
# compute means on features for each subject/activity combination
library(plyr)
means = ddply(data, c("subject","activity"), function(x) colMeans(x[3:88]))
write.table(means, "feature_average.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
goerlitz/GettingCleaningDataCourseProject
|
R
| false | false | 2,080 |
r
|
# features - names of 561 features
# (561 obs. of 2 variables: V1 = index, V2 = feature label)
# activities - names of 6 activities
# (6 obs. of 2 variables: V1 = index, V2 = activity label)
#
# train and test dataset with 7352 and 2947 entries respectively
#
# *_data - datasets with all 561 features
# (7352/2947 obs. of 561 variables)
# *_labl - activity label for each dataset (6 activities)
# (7352/2947 obs. of 1 variable)
# *_subj - subject label for each dataset (30 subjects)
# (7352/2947 obs. of 1 variable)
# load all data
features <- read.table("UCI HAR Dataset/features.txt")
activities <- read.table("UCI HAR Dataset/activity_labels.txt")
train_data <- read.table("UCI HAR Dataset/train/X_train.txt")
train_labl <- read.table("UCI HAR Dataset/train/y_train.txt")
train_subj <- read.table("UCI HAR Dataset/train/subject_train.txt")
test_data <- read.table("UCI HAR Dataset/test/X_test.txt")
test_labl <- read.table("UCI HAR Dataset/test/y_test.txt")
test_subj <- read.table("UCI HAR Dataset/test/subject_test.txt")
# combine training and test data
data <- rbind(train_data, test_data)
labl <- rbind(train_labl, test_labl)
subj <- rbind(train_subj, test_subj)
# use descriptive variable names
names(data) <- features$V2
names(subj) <- c("subject")
names(activities) <- c("V1", "activity")
# restrict to mean and deviation features
meanOrStd_logical <- sapply(c("mean", "std"), grepl, features[,2], ignore.case=TRUE)
selected_features <- features[meanOrStd_logical[,1] | meanOrStd_logical[,2], 1]
data <- data[selected_features]
# combine all data in one data frame
data <- cbind(subj, labl, data)
# replace activity index with labels
data <- merge(activities, data)
last <- length(names(data))
data <- data[,c(3,2,4:last)] # reorder subject and activity
# compute means on features for each subject/activity combination
library(plyr)
means = ddply(data, c("subject","activity"), function(x) colMeans(x[3:88]))
write.table(means, "feature_average.txt", row.names = FALSE)
|
# Load required packages
library(tidyverse)
library(data.table)
library(lubridate)
library(RColorBrewer)
library(mgcv)
library(ggpubr)
library(mgcViz)
library(surveillance)
library(broom)
library(readr)
library(rstan)
library(nleqslv)
# Load helper functions
source("./functions.R")
col_vec = brewer.pal(3, "Dark2")
## Read Edit Data
# Bavaria
# Daily PCR-test data
load("../data/200921_test_bav.RData")
dat_test_bav = dat_test_bav %>% filter(date>=ymd("2020-03-16"))
# Person-specific case data
dat_cases_ind_bav = read_tsv("../data/2020_09_21_bav_synth.csv")
# Derive daily case numbers
dat_cases_bav = dat_cases_ind_bav %>%
group_by(date=rep_date_local) %>% summarise(n_cases = n()) %>%
select(date, n_cases)
case_test_dat_bav = dat_test_bav %>% rename(n_pcr=Gesamtzahl, n_pcr_pos=Positive) %>%
left_join(dat_cases_bav) %>% arrange(date) %>% mutate(t=1:n())
# Germany
load("../data/201001_cases_tests_germany.RData")
case_test_dat_ger = case_test_dat_ger %>% mutate(date = as.Date(paste(2020, KW, 3, sep="-"), "%Y-%U-%u")-7) %>%
mutate(n_pcr_100k = n_pcr/83000000*100000/7,
n_pcr_pos_100k = n_pcr_pos/83000000*100000/7,
n_cases_100k = n_cases/83000000*100000/7)
# Adjust Bavarian case data by population size
case_test_dat_bav = case_test_dat_bav %>%
mutate(n_pcr_100k = n_pcr/13080000*100000,
n_pcr_pos_100k = n_pcr_pos/13080000*100000,
n_cases_100k = n_cases/13080000*100000)
# Plot number cases and number of tests
plot_dat_fig_1 = rbind(case_test_dat_ger %>%
mutate(region="Germany") %>%
select(date,
n_pcr_100k,
n_pcr_pos_100k,
n_cases_100k, region),
case_test_dat_bav %>%
mutate(region="Bavaria") %>%
select(date,
n_pcr_100k,
n_pcr_pos_100k,
n_cases_100k, region))
fig_1_1 = ggplot(plot_dat_fig_1 %>%
mutate(region = factor(region,
levels = c("Germany", "Bavaria"),
labels = c("Germany", "Bavaria")))) +
geom_line(aes(date, n_pcr_100k, lty = region)) +
theme_bw() +
xlab("Date") +
ylab("Number tests\nper 100k") +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_x_date(date_breaks = "1 month")
fig_1_2 = ggplot(plot_dat_fig_1 %>%
select(date, n_pcr_pos_100k, n_cases_100k, region) %>%
pivot_longer(cols = c("n_pcr_pos_100k", "n_cases_100k")) %>%
mutate(name = factor(name, levels = c("n_pcr_pos_100k", "n_cases_100k"),
labels = c("Positive tests",
"Reported cases")),
region = factor(region, levels = c("Germany", "Bavaria")))) +
geom_line(aes(date, value, col = name, lty = region)) +
theme_bw() +
xlab("Date") +
ylab("Number\nper 100k") +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_x_date(date_breaks = "1 month")
fig_1 = ggarrange(fig_1_1,
fig_1_2, nrow = 2, labels = "AUTO", common.legend = F,
legend = "bottom")
ggsave(fig_1, filename = "../results/figures/fig_1_test_case_num_synth.pdf", width = 8, height = 5, dpi = 500)
# Numbers
# Number PCR Tests Germany/Bavaria
sum(case_test_dat_ger$n_pcr)
sum(case_test_dat_bav$n_pcr)
# per 100
sum(case_test_dat_ger$n_pcr)/83000000*100
sum(case_test_dat_bav$n_pcr)/13080000*100
# Cases/pos Test Bavaria
sum(case_test_dat_bav$n_cases)
sum(case_test_dat_bav$n_pcr_pos)
# Cases/pos Test Ger
sum(case_test_dat_ger$n_cases)
sum(case_test_dat_ger$n_pcr_pos)
# Estimate test model
mod_ger = est_pos_test_model(case_test_dat_ger, max_lag = 2, date_start = ymd("2020-04-29"))
summary(mod_ger[[1]])
mod_bav = est_pos_test_model(case_test_dat_bav, max_lag = 7, date_start = ymd("2020-05-01"))
summary(mod_bav[[1]])
# Figure two, predicted number of examined persons and reported number of PCR tests
plot_dat_fig_2 = rbind(mod_ger[[2]] %>% select(date, n_pcr, n_exam_pers) %>%
pivot_longer(cols = c("n_pcr", "n_exam_pers")) %>%
mutate(name = factor(name, levels = c("n_pcr", "n_exam_pers"),
labels = c("PCR tests\nreported by labs",
"Examined persons\nmodel derived"))) %>%
mutate(region="Germany",
value = value/83000000/7*100000),
mod_bav[[2]] %>% select(date, n_pcr, n_exam_pers) %>%
pivot_longer(cols = c("n_pcr", "n_exam_pers")) %>%
mutate(name = factor(name, levels = c("n_pcr", "n_exam_pers"),
labels = c("PCR tests\nreported by labs",
"Examined persons\nmodel derived"))) %>%
mutate(region="Bavaria",
value = value/13080000*100000)) %>%
mutate(region = factor(region, levels = c("Germany", "Bavaria")))
fig_2 = ggplot(plot_dat_fig_2) +
geom_line(aes(date, value, col = name, lty = region)) + theme_bw() +
xlab("Date") +
ylab("Number\nper 100k") +
facet_grid(rows = "region") +
scale_linetype_discrete(guide=FALSE) +
theme(legend.position = "bottom", legend.title = element_blank())
ggsave(fig_2, filename = "../results/figures/fig_2_test_exam_num_synth.pdf", width = 8, height = 5, dpi = 500)
# Supplemental Figure 1 - Effects of model
viz_ger = getViz(mod_ger[[1]])
ger_beta_0 = plot(sm(viz_ger, 1)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,21, by=4), labels = paste0("CW",seq(18,38, by=4))) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-0") + xlab("Time")
ger_beta_1 = plot(sm(viz_ger, 2)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,21, by=4), labels = paste0("CW",seq(18,38, by=4))) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-1") + xlab("Time")
plots_ger = gridPrint(ger_beta_0, ger_beta_1, left = "Germany", ncol=3)
viz_bav = getViz(mod_bav[[1]])
bav_beta_0 = plot(sm(viz_bav, 1)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-0") + xlab("Time")
bav_beta_1 = plot(sm(viz_bav, 2)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-1") + xlab("Time")
bav_beta_6 = plot(sm(viz_bav, 3)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-6") + xlab("Time")
plots_bav = gridPrint(bav_beta_0, bav_beta_1, bav_beta_6, ncol=3, left="Bavaria")
supp_fig_1 = gridPrint(plots_ger, plots_bav)
ggsave(supp_fig_1, filename = "../results/figures/supp_fig_1_synth.pdf", width = 12, height = 6)
# Adjust case numbersfor differnt assumptions on Sensitivity and Specificity
# Prepare dataset for adjustment
adj_case_ger = expand_grid(date=mod_ger[[2]]$date, sens=c(1,.9,.7), spec=c(1,.999,.997,.995)) %>%
left_join(mod_ger[[2]] %>% dplyr::select(date, n_cases, n_exam_pers))
adj_case_bav = expand_grid(date=mod_bav[[2]]$date, sens=c(1,.9,.7), spec=c(1,.999,.997,.995)) %>%
left_join(mod_bav[[2]] %>% dplyr::select(date, n_cases, n_exam_pers))
# Use helper-function to derive misclassification-adjusted case counts
adj_case_ger = adj_case_ger %>%
mutate(n_cases_adj = adjust_n_case(n_cases, n_exam_pers, sens = sens, spec=spec)) %>%
mutate(sens = factor(sens, levels = c("1","0.9", "0.7"),
labels = paste0("Sens: ", c("1","0.9", "0.7"))),
spec = factor(spec, levels = c("1", "0.999", "0.997", "0.995"),
labels = paste0("Spec: ", c("1", "0.999", "0.997", "0.995"))))
adj_case_bav = adj_case_bav %>%
mutate(n_cases_adj = adjust_n_case(n_cases, n_exam_pers, sens = sens, spec=spec)) %>%
mutate(sens = factor(sens, levels = c("1","0.9", "0.7"),
labels = paste0("Sens: ", c("1","0.9", "0.7"))),
spec = factor(spec, levels = c("1", "0.999", "0.997", "0.995"),
labels = paste0("Spec: ", c("1", "0.999", "0.997", "0.995"))))
# Plot results
plot_dat_fig_3 = rbind(adj_case_ger %>% mutate(n_cases_adj = n_cases_adj/81000000*100000/7,
region="Germany"),
adj_case_bav %>% mutate(n_cases_adj = n_cases_adj/13080000*100000,
region="Bavaria")) %>%
mutate(region=factor(region, levels = c("Germany", "Bavaria")))
fig_3 = ggplot(plot_dat_fig_3 %>% filter(sens!= "Sens: 1",
spec!= "Spec: 1") %>%
mutate(type=NA)) +
geom_line(aes(date, n_cases_adj, col = spec), lwd=1, alpha=.75) +
geom_line(aes(date, n_cases_adj, linetype = type, size=type),
data = plot_dat_fig_3 %>% filter(sens== "Sens: 1",
spec== "Spec: 1") %>%
select(date, n_cases_adj, region) %>%
mutate(type="Unadjusted"), alpha=1) +
facet_grid(cols = vars(sens), rows = vars(region), scales = "free_y") +
xlab("Date") +
ylab("Number per 100k") +
theme_bw() +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_linetype_manual(values=c("Unadjusted"=1)) +
scale_size_manual(values=c("Unadjusted"=.5)) +
guides(col = guide_legend(order=1),
linetype = guide_legend(order=2),
size=FALSE)
ggsave(fig_3, filename = "../results/figures/fig_3_adj_case_numbers_2_synth.pdf", width = 8, height = 5, dpi = 500)
# Figure 4 - plot fraction of false-positives of all reported cases for different values of specificity
plot_dat_fig_4 = plot_dat_fig_3 %>% filter(sens== "Sens: 0.7", spec== "Spec: 1") %>%
select(date, region, n_cases = n_cases_adj) %>%
right_join(plot_dat_fig_3 %>% filter(sens== "Sens: 0.7") %>%
select(date, region, n_cases_adj, spec),
by = c("date", "region")) %>%
mutate(frac_fp=1-(n_cases_adj/n_cases)) %>%
filter(spec!="Spec: 1")
fig_4 = ggplot(plot_dat_fig_4) +
geom_line(aes(date, frac_fp, col = spec), lwd=0.3) +
geom_smooth(aes(date, frac_fp, col=spec), se = FALSE) +
facet_grid(rows=vars(region)) +
xlab("Date") + ylab("Fraction") + theme_bw() +
theme(legend.title = element_blank(), legend.pos = "bottom")
ggsave(fig_4, filename = "../results/figures/fig_4_frac_reported_adjusted_synth.pdf", width = 8,
height = 5, dpi = 500)
# Summary Bavarian data upper bound false-positives
# (Numbers do not match numbers in original data due to artificial reporting dates)
adj_case_bav %>% filter(spec=="Spec: 0.995",
sens=="Sens: 1",
date<ymd("2020-08-01"),
date>=ymd("2020-06-01")) %>%
summarise(min_date = min(date),
max_date = max(date),
n=n(),
abs_zero = sum(n_cases_adj==0),
frac_zero = mean(n_cases_adj==0))
# Fraction of positive PCR-tests reported by labs in June/July
case_test_dat_bav %>% filter(date>=ymd("2020-06-01"), date < ymd("2020-08-01")) %>% summarise(sum(n_pcr_pos)/sum(n_pcr))
# Prepare data for misclassification adjusted nowcasting
source("analysis_fun_stanmodel_mc.R")
# Perform imputation of missing disease onsets based on Weibull-GAMLSS
imputation = perform_imputation(dat_cases_ind_bav, type = "week_weekday_age")
imputed_data = imputation[[1]]
# Adjust case numbers for false-positive cases assuming spec < 1
imputed_dat_adj_999 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.999") %>%
select(date, n_cases_adj))
imputed_dat_adj_997 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.997") %>%
select(date, n_cases_adj))
imputed_dat_adj_995 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.995") %>%
select(date, n_cases_adj))
save(imputed_dat_adj_999, file = "../results/nowcast_bav/imp_dat_adj_0999_synth.RData")
save(imputed_dat_adj_997, file = "../results/nowcast_bav/imp_dat_adj_0997_synth.RData")
save(imputed_dat_adj_995, file = "../results/nowcast_bav/imp_dat_adj_0995_synth.RData")
save(imputed_data, file = "../results/nowcast_bav/imp_dat_synth.RData")
|
/code/1_analysis_adj_case_counts.R
|
no_license
|
FelixGuenther/mc_covid_cases_public
|
R
| false | false | 13,381 |
r
|
# Load required packages
library(tidyverse)
library(data.table)
library(lubridate)
library(RColorBrewer)
library(mgcv)
library(ggpubr)
library(mgcViz)
library(surveillance)
library(broom)
library(readr)
library(rstan)
library(nleqslv)
# Load helper functions
source("./functions.R")
col_vec = brewer.pal(3, "Dark2")
## Read Edit Data
# Bavaria
# Daily PCR-test data
load("../data/200921_test_bav.RData")
dat_test_bav = dat_test_bav %>% filter(date>=ymd("2020-03-16"))
# Person-specific case data
dat_cases_ind_bav = read_tsv("../data/2020_09_21_bav_synth.csv")
# Derive daily case numbers
dat_cases_bav = dat_cases_ind_bav %>%
group_by(date=rep_date_local) %>% summarise(n_cases = n()) %>%
select(date, n_cases)
case_test_dat_bav = dat_test_bav %>% rename(n_pcr=Gesamtzahl, n_pcr_pos=Positive) %>%
left_join(dat_cases_bav) %>% arrange(date) %>% mutate(t=1:n())
# Germany
load("../data/201001_cases_tests_germany.RData")
case_test_dat_ger = case_test_dat_ger %>% mutate(date = as.Date(paste(2020, KW, 3, sep="-"), "%Y-%U-%u")-7) %>%
mutate(n_pcr_100k = n_pcr/83000000*100000/7,
n_pcr_pos_100k = n_pcr_pos/83000000*100000/7,
n_cases_100k = n_cases/83000000*100000/7)
# Adjust Bavarian case data by population size
case_test_dat_bav = case_test_dat_bav %>%
mutate(n_pcr_100k = n_pcr/13080000*100000,
n_pcr_pos_100k = n_pcr_pos/13080000*100000,
n_cases_100k = n_cases/13080000*100000)
# Plot number cases and number of tests
plot_dat_fig_1 = rbind(case_test_dat_ger %>%
mutate(region="Germany") %>%
select(date,
n_pcr_100k,
n_pcr_pos_100k,
n_cases_100k, region),
case_test_dat_bav %>%
mutate(region="Bavaria") %>%
select(date,
n_pcr_100k,
n_pcr_pos_100k,
n_cases_100k, region))
fig_1_1 = ggplot(plot_dat_fig_1 %>%
mutate(region = factor(region,
levels = c("Germany", "Bavaria"),
labels = c("Germany", "Bavaria")))) +
geom_line(aes(date, n_pcr_100k, lty = region)) +
theme_bw() +
xlab("Date") +
ylab("Number tests\nper 100k") +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_x_date(date_breaks = "1 month")
fig_1_2 = ggplot(plot_dat_fig_1 %>%
select(date, n_pcr_pos_100k, n_cases_100k, region) %>%
pivot_longer(cols = c("n_pcr_pos_100k", "n_cases_100k")) %>%
mutate(name = factor(name, levels = c("n_pcr_pos_100k", "n_cases_100k"),
labels = c("Positive tests",
"Reported cases")),
region = factor(region, levels = c("Germany", "Bavaria")))) +
geom_line(aes(date, value, col = name, lty = region)) +
theme_bw() +
xlab("Date") +
ylab("Number\nper 100k") +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_x_date(date_breaks = "1 month")
fig_1 = ggarrange(fig_1_1,
fig_1_2, nrow = 2, labels = "AUTO", common.legend = F,
legend = "bottom")
ggsave(fig_1, filename = "../results/figures/fig_1_test_case_num_synth.pdf", width = 8, height = 5, dpi = 500)
# Numbers
# Number PCR Tests Germany/Bavaria
sum(case_test_dat_ger$n_pcr)
sum(case_test_dat_bav$n_pcr)
# per 100
sum(case_test_dat_ger$n_pcr)/83000000*100
sum(case_test_dat_bav$n_pcr)/13080000*100
# Cases/pos Test Bavaria
sum(case_test_dat_bav$n_cases)
sum(case_test_dat_bav$n_pcr_pos)
# Cases/pos Test Ger
sum(case_test_dat_ger$n_cases)
sum(case_test_dat_ger$n_pcr_pos)
# Estimate test model
mod_ger = est_pos_test_model(case_test_dat_ger, max_lag = 2, date_start = ymd("2020-04-29"))
summary(mod_ger[[1]])
mod_bav = est_pos_test_model(case_test_dat_bav, max_lag = 7, date_start = ymd("2020-05-01"))
summary(mod_bav[[1]])
# Figure two, predicted number of examined persons and reported number of PCR tests
plot_dat_fig_2 = rbind(mod_ger[[2]] %>% select(date, n_pcr, n_exam_pers) %>%
pivot_longer(cols = c("n_pcr", "n_exam_pers")) %>%
mutate(name = factor(name, levels = c("n_pcr", "n_exam_pers"),
labels = c("PCR tests\nreported by labs",
"Examined persons\nmodel derived"))) %>%
mutate(region="Germany",
value = value/83000000/7*100000),
mod_bav[[2]] %>% select(date, n_pcr, n_exam_pers) %>%
pivot_longer(cols = c("n_pcr", "n_exam_pers")) %>%
mutate(name = factor(name, levels = c("n_pcr", "n_exam_pers"),
labels = c("PCR tests\nreported by labs",
"Examined persons\nmodel derived"))) %>%
mutate(region="Bavaria",
value = value/13080000*100000)) %>%
mutate(region = factor(region, levels = c("Germany", "Bavaria")))
fig_2 = ggplot(plot_dat_fig_2) +
geom_line(aes(date, value, col = name, lty = region)) + theme_bw() +
xlab("Date") +
ylab("Number\nper 100k") +
facet_grid(rows = "region") +
scale_linetype_discrete(guide=FALSE) +
theme(legend.position = "bottom", legend.title = element_blank())
ggsave(fig_2, filename = "../results/figures/fig_2_test_exam_num_synth.pdf", width = 8, height = 5, dpi = 500)
# Supplemental Figure 1 - Effects of model
viz_ger = getViz(mod_ger[[1]])
ger_beta_0 = plot(sm(viz_ger, 1)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,21, by=4), labels = paste0("CW",seq(18,38, by=4))) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-0") + xlab("Time")
ger_beta_1 = plot(sm(viz_ger, 2)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,21, by=4), labels = paste0("CW",seq(18,38, by=4))) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-1") + xlab("Time")
plots_ger = gridPrint(ger_beta_0, ger_beta_1, left = "Germany", ncol=3)
viz_bav = getViz(mod_bav[[1]])
bav_beta_0 = plot(sm(viz_bav, 1)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-0") + xlab("Time")
bav_beta_1 = plot(sm(viz_bav, 2)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-1") + xlab("Time")
bav_beta_6 = plot(sm(viz_bav, 3)) + l_fitLine() + l_ciLine() +
scale_x_continuous(breaks=seq(1,143, by=28), labels = format(seq(ymd("2020-05-01"), ymd("2020-09-20"), by = 28), '%m-%d')) +
ylim(-.2,1.2) + ylab("Time-varying linear effect Lag-6") + xlab("Time")
plots_bav = gridPrint(bav_beta_0, bav_beta_1, bav_beta_6, ncol=3, left="Bavaria")
supp_fig_1 = gridPrint(plots_ger, plots_bav)
ggsave(supp_fig_1, filename = "../results/figures/supp_fig_1_synth.pdf", width = 12, height = 6)
# Adjust case numbersfor differnt assumptions on Sensitivity and Specificity
# Prepare dataset for adjustment
adj_case_ger = expand_grid(date=mod_ger[[2]]$date, sens=c(1,.9,.7), spec=c(1,.999,.997,.995)) %>%
left_join(mod_ger[[2]] %>% dplyr::select(date, n_cases, n_exam_pers))
adj_case_bav = expand_grid(date=mod_bav[[2]]$date, sens=c(1,.9,.7), spec=c(1,.999,.997,.995)) %>%
left_join(mod_bav[[2]] %>% dplyr::select(date, n_cases, n_exam_pers))
# Use helper-function to derive misclassification-adjusted case counts
adj_case_ger = adj_case_ger %>%
mutate(n_cases_adj = adjust_n_case(n_cases, n_exam_pers, sens = sens, spec=spec)) %>%
mutate(sens = factor(sens, levels = c("1","0.9", "0.7"),
labels = paste0("Sens: ", c("1","0.9", "0.7"))),
spec = factor(spec, levels = c("1", "0.999", "0.997", "0.995"),
labels = paste0("Spec: ", c("1", "0.999", "0.997", "0.995"))))
adj_case_bav = adj_case_bav %>%
mutate(n_cases_adj = adjust_n_case(n_cases, n_exam_pers, sens = sens, spec=spec)) %>%
mutate(sens = factor(sens, levels = c("1","0.9", "0.7"),
labels = paste0("Sens: ", c("1","0.9", "0.7"))),
spec = factor(spec, levels = c("1", "0.999", "0.997", "0.995"),
labels = paste0("Spec: ", c("1", "0.999", "0.997", "0.995"))))
# Plot results
plot_dat_fig_3 = rbind(adj_case_ger %>% mutate(n_cases_adj = n_cases_adj/81000000*100000/7,
region="Germany"),
adj_case_bav %>% mutate(n_cases_adj = n_cases_adj/13080000*100000,
region="Bavaria")) %>%
mutate(region=factor(region, levels = c("Germany", "Bavaria")))
fig_3 = ggplot(plot_dat_fig_3 %>% filter(sens!= "Sens: 1",
spec!= "Spec: 1") %>%
mutate(type=NA)) +
geom_line(aes(date, n_cases_adj, col = spec), lwd=1, alpha=.75) +
geom_line(aes(date, n_cases_adj, linetype = type, size=type),
data = plot_dat_fig_3 %>% filter(sens== "Sens: 1",
spec== "Spec: 1") %>%
select(date, n_cases_adj, region) %>%
mutate(type="Unadjusted"), alpha=1) +
facet_grid(cols = vars(sens), rows = vars(region), scales = "free_y") +
xlab("Date") +
ylab("Number per 100k") +
theme_bw() +
theme(legend.title = element_blank(),
legend.position = "bottom") +
scale_linetype_manual(values=c("Unadjusted"=1)) +
scale_size_manual(values=c("Unadjusted"=.5)) +
guides(col = guide_legend(order=1),
linetype = guide_legend(order=2),
size=FALSE)
ggsave(fig_3, filename = "../results/figures/fig_3_adj_case_numbers_2_synth.pdf", width = 8, height = 5, dpi = 500)
# Figure 4 - plot fraction of false-positives of all reported cases for different values of specificity
plot_dat_fig_4 = plot_dat_fig_3 %>% filter(sens== "Sens: 0.7", spec== "Spec: 1") %>%
select(date, region, n_cases = n_cases_adj) %>%
right_join(plot_dat_fig_3 %>% filter(sens== "Sens: 0.7") %>%
select(date, region, n_cases_adj, spec),
by = c("date", "region")) %>%
mutate(frac_fp=1-(n_cases_adj/n_cases)) %>%
filter(spec!="Spec: 1")
fig_4 = ggplot(plot_dat_fig_4) +
geom_line(aes(date, frac_fp, col = spec), lwd=0.3) +
geom_smooth(aes(date, frac_fp, col=spec), se = FALSE) +
facet_grid(rows=vars(region)) +
xlab("Date") + ylab("Fraction") + theme_bw() +
theme(legend.title = element_blank(), legend.pos = "bottom")
ggsave(fig_4, filename = "../results/figures/fig_4_frac_reported_adjusted_synth.pdf", width = 8,
height = 5, dpi = 500)
# Summary Bavarian data upper bound false-positives
# (Numbers do not match numbers in original data due to artificial reporting dates)
adj_case_bav %>% filter(spec=="Spec: 0.995",
sens=="Sens: 1",
date<ymd("2020-08-01"),
date>=ymd("2020-06-01")) %>%
summarise(min_date = min(date),
max_date = max(date),
n=n(),
abs_zero = sum(n_cases_adj==0),
frac_zero = mean(n_cases_adj==0))
# Fraction of positive PCR-tests reported by labs in June/July
case_test_dat_bav %>% filter(date>=ymd("2020-06-01"), date < ymd("2020-08-01")) %>% summarise(sum(n_pcr_pos)/sum(n_pcr))
# Prepare data for misclassification adjusted nowcasting
source("analysis_fun_stanmodel_mc.R")
# Perform imputation of missing disease onsets based on Weibull-GAMLSS
imputation = perform_imputation(dat_cases_ind_bav, type = "week_weekday_age")
imputed_data = imputation[[1]]
# Adjust case numbers for false-positive cases assuming spec < 1
imputed_dat_adj_999 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.999") %>%
select(date, n_cases_adj))
imputed_dat_adj_997 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.997") %>%
select(date, n_cases_adj))
imputed_dat_adj_995 = adjust_case_data(imputed_data = imputed_data,
adj_case_bav %>%
filter(sens== "Sens: 1" & spec== "Spec: 0.995") %>%
select(date, n_cases_adj))
save(imputed_dat_adj_999, file = "../results/nowcast_bav/imp_dat_adj_0999_synth.RData")
save(imputed_dat_adj_997, file = "../results/nowcast_bav/imp_dat_adj_0997_synth.RData")
save(imputed_dat_adj_995, file = "../results/nowcast_bav/imp_dat_adj_0995_synth.RData")
save(imputed_data, file = "../results/nowcast_bav/imp_dat_synth.RData")
|
library(shiny)
ui <- fluidPage(
leafletOutput("mymap"),
p(),
)
server <- function(input, output, session) {
####################
cities <- read.csv(textConnection("
positiondata,Lat,Long,sumrate
1,24.97155,121.54907,10
2,24.97283,121.53409,30
3,24.97415,121.53824,40
4,24.98033,121.54408,5
5,24.97088,121.53706,79
6,24.97156,121.53532,88
"))
coldata<-c("#B80000","#008F00","#52008F","#8F008F","#E0E000","#F700F7")
####################
output$mymap <- renderLeaflet(
leaflet(cities) %>% addTiles() %>%
addCircles(lng = ~Long, lat = ~Lat, weight = 1,
radius = ~sumrate * 30, popup = ~positiondata, color = ~coldata)
)
}
shinyApp(ui, server)
|
/ShinyGIS.R
|
no_license
|
jerrywu2013/RGIS
|
R
| false | false | 689 |
r
|
library(shiny)
ui <- fluidPage(
leafletOutput("mymap"),
p(),
)
server <- function(input, output, session) {
####################
cities <- read.csv(textConnection("
positiondata,Lat,Long,sumrate
1,24.97155,121.54907,10
2,24.97283,121.53409,30
3,24.97415,121.53824,40
4,24.98033,121.54408,5
5,24.97088,121.53706,79
6,24.97156,121.53532,88
"))
coldata<-c("#B80000","#008F00","#52008F","#8F008F","#E0E000","#F700F7")
####################
output$mymap <- renderLeaflet(
leaflet(cities) %>% addTiles() %>%
addCircles(lng = ~Long, lat = ~Lat, weight = 1,
radius = ~sumrate * 30, popup = ~positiondata, color = ~coldata)
)
}
shinyApp(ui, server)
|
getwd()
bank <- read.csv("C:/Users/us/Desktop/Projects/Intermediate Analytics/Assignment 4/banking_data.csv")
str(bank)
##################
summary(bank)
is.factor(bank$marital)
head(bank)
###################
install.packages("Amelia")
library(Amelia)
missmap(bank,main="Missing Values vs Observed", col = c("BLACK","LIGHT BLUE" ), legend= FALSE)
sapply(bank,function(x) sum(is.na(x)))
sapply(bank, function(x) length(unique(x)))
contrasts(bank$marital)
contrasts(bank$default)
contrasts(bank$housing)
contrasts(bank$contact)
contrasts(bank$poutcome)
contrasts(bank$y)
#######################
###Intercept only model
c_only <- glm(y~1,family = binomial, data = bank)
c_only
coef(c_only)## -2.063912
##funcion to convert logit to probability
logit2prob <- function(logit){
odds<- exp(logit)
prob<- odds/(1+odds)
return(prob)
}
##function call
prob<- logit2prob(coef(c_only))
prob ## 0.1126542
install.packages("gmodels")
library("gmodels")
##CrossTable
CrossTable(bank$y)
##Manual Probability Calculation for Yes and No of Term Deposit
yes_logit<- coef((c_only)[1])
y_prob<- logit2prob(yes_logit)
n_prob<- 1- y_prob
y_prob ##0.1126542
n_prob ##0.8873458
###############################################################################################
##Handling NA values
##Instead of omiting NA values or using KNN method,
##we have utilised the categorization method by introducing a category : "UNKNOWN"
str(bank$education)
bank$education[is.na(bank$education)]<- 999 ##Replace NA by introducing new integer value: 999
str(bank$occupation)
bank$occupation[is.na(bank$occupation)]<- 999 ##Replace NA by introducing new integer value: 999
bank$education <- factor(bank$education,labels=c("LOW","INTERMEDIATE","HIGH","UNKNOWN"))
str(bank$education)
levels(bank$education)
target1<- glm(y~education, data= bank, family='binomial')
summary(target1)
##y = intercept + b1x1+b2x2+b3x3
y1<- -2.34201+0.44785*1 ##high
y2<- -2.34201 ##low
##OddsRatio
#Manual
##y= log(p/1-p) ; p/1-p = exp(y);
h_odd<- exp(y1)
l_odd<- exp(y2)
odds_h_l <- h_odd/l_odd
odds_h_l##High_education vs Low_education OddsRatio = 1.564944
tab<- table(bank$y,bank$education, exclude= c("INTERMEDIATE", "UNKNOWN"))
tab
##OddsRatio
##using package epiR
install.packages("epiR")
library(epiR)
epi.2by2(tab,method = "cohort.count",conf.level = 0.95)
##Probability of Highly educated clients taking Term Deposit
#method 1
h_prob<- logit2prob(y1)
h_prob ##0.1307709
#method 2
h1<-exp(-2.34801+0.44785)/(1+exp(-2.34801+0.44785))
h1 ##0.1300904
##Probability of Lowly educated clients taking Term Deposit
#method 1
l_prob<- logit2prob(y2)
l_prob
#method 2
l2<- exp(-2.34801)/(1+exp(-2.34801))
l2
CrossTable(bank$education,bank$y)
##
#bank2<- subset(bank,bank$education =="LOW" | bank$education=="HIGH",select = X:y)
#bank2
#target<- glm(y~education, data= bank2, family='binomial')
#target
#coef(target)
#summary(target)
#logit2prob(coef(target))
##
##
#exp(0.45201)
#exp(-1.88896-0.45201)/(1+exp(-1.88896-0.45201))
#exp(-1.88896)/(1+exp(-1.88896))
##
##################################################################################################
#Highest day of the weeek for Term Deposit Sign Up
bank[bank$day==1,]$day<- "MONDAY"
bank[bank$day==2,]$day<- "TUESDAY"
bank[bank$day==3,]$day<- "WEDNESDAY"
bank[bank$day==4,]$day<- "THURSDAY"
bank[bank$day==5,]$day<- "FRIDAY"
bank$day<- as.factor(bank$day)
is.factor(bank$day)
str(bank$day)
day_model<- glm(y~day, data=bank, family="binomial")
summary(day_model)
coef(day_model)
fri_p<- logit2prob(coef(day_model)[1])
fri_p ##0.1080874
mon_p<- exp( -2.11042809-0.09255)/(1+exp( -2.11042809-0.09255))
mon_p ##0.09948337
thurs_p<-exp( -2.11042809+0.12919566)/(1+exp( -2.11042809+0.12919566))
thurs_p ##0.1211875
tue_p<- exp( -2.11042809+0.09699519)/(1+exp( -2.11042809+0.09699519))
tue_p ##0.1177998
wed_p<- exp( -2.11042809+0.08608609)/(1+exp( -2.11042809+0.08608609))
wed_p ##0.1166708
highest_day<- thurs_p
highest_day ##0.1211875
######################################################################################
#Logistic Regression for Predicting Term Deposit sign up
set.seed(12345)
split<- sample(seq_len(nrow(bank)),size= floor(0.80*nrow(bank)))
train_data11<- bank[split, ]
test_data11<- bank[-split, ]
head(train_data11)
head(test_data11)
summary(glm(y~., data= train_data11,family=binomial(link='logit')))
sat_mod<- glm(y~., data= train_data11,family=binomial(link='logit'))
library("MASS")
stepAIC(sat_mod, direction="backward")
fin_stepAIC_mod<- glm(formula = y ~ X + age + marital + education + default + housing +
contact + day + duration + campaign + pdays + poutcome,
family = binomial(link = "logit"),
data = train_data11)
summary(fin_stepAIC_mod)
## we can observe that Marital status and Housing still do not add much value to the prediction
# we can eliminate them by observation from our model.
anova(sat_mod, glm(y~.-contact,data = train_data11, family = binomial (link= "logit")),test = "Chisq")
anova(sat_mod,test="Chisq")
fin_mod <- glm(y~ X+age+education+default+contact+day+duration+campaign+pdays+poutcome,
family = binomial(link="logit"),
data=train_data11)
summary(fin_mod)
fin_mod
install.packages("ROCR")
library("ROCR")
test_data11$pred_data<- predict(fin_mod,test_data11,type='response')
test_data11$predictedY<- ifelse(test_data11$pred_data>0.5, 1,0)
test_data11$pred_num<- ifelse(test_data11$pred_data>0.5, 1,0)
test_data11$predictedY<- factor(test_data11$predictedY, levels=c("0","1"),labels=c("no","yes"))
str(test_data11$y)
str(test_data11$predictedY)
##confusion_matrix<- confusionMatrix(test_data11$y, test_data11$predictedY, threshold= 0.5)
c_tab<- table(test_data11$predictedY, test_data11$y)
c_tab
(7085+381)/sum(c_tab)
install.packages('InformationValue')
library(InformationValue)
misClasificError<- mean(test_data11$predictedY != test_data11$y)
Accuracy<- 1-misClasificError
mis<- misClassError(as.integer(test_data11$y), as.integer(test_data11$predictedY), threshold = 0.5)
accur<- 1-mis
accur
##plot TPR vs FPR ,
p<- predict(fin_mod, test_data11,type="response")
pr <- prediction(predictions = p,test_data11$y)
perf<- performance(pr,measure = "tpr",x.measure="fpr")
perf
plot(perf)
## AREA UNDER CURVE
auc<- performance(pr, measure = "auc")
auc<- auc@y.values[[1]]
auc
##ROC
plotROC(as.integer(test_data11$y),test_data11$pred_data)
sensitivity(as.numeric(test_data11$y),as.numeric(test_data11$predictedY), threshold = 0.5)
install.packages("pROC")
library("pROC")
x<- rnorm(1000)
pre<- exp(5*x)/(1+exp(5*x))
y<- 1 *(runif(1000)< pre)
modd<- glm(y~x, family="binomial")
predpr<- predict(modd, type= "response")
rocc<- roc(y~predpr)
plot(rocc)
pre2<- exp(0*x)/(1+exp(0*x))
y2<- 1 *(runif(1000)< pre2)
modd2<- glm(y2~x, family="binomial")
predpr2<- predict(modd2, type= "response")
rocc2<- roc(y2~predpr2)
plot(rocc2)
###################################################################################
optimalCutoff(as.numeric(test_data11$y),as.numeric(test_data11$predictedY),optimiseFor = "misclasserror", returnDiagnostics = TRUE)
###################################
#tree model trial.
install.packages("tree")
library(tree)
y_pred_num<- ifelse(p>0.5,"yes","no")
y_predicted<- factor(y_pred_num,levels=c("no","yes"))
y_observed<- test_data11$y
mean(y_predicted == y_observed, na.rm = TRUE)
plot(tree.model)
text(tree.model)
|
/Banking_Data_pr.R
|
no_license
|
kamathnikhil/Bank-Marketing-Campaign
|
R
| false | false | 7,736 |
r
|
getwd()
bank <- read.csv("C:/Users/us/Desktop/Projects/Intermediate Analytics/Assignment 4/banking_data.csv")
str(bank)
##################
summary(bank)
is.factor(bank$marital)
head(bank)
###################
install.packages("Amelia")
library(Amelia)
missmap(bank,main="Missing Values vs Observed", col = c("BLACK","LIGHT BLUE" ), legend= FALSE)
sapply(bank,function(x) sum(is.na(x)))
sapply(bank, function(x) length(unique(x)))
contrasts(bank$marital)
contrasts(bank$default)
contrasts(bank$housing)
contrasts(bank$contact)
contrasts(bank$poutcome)
contrasts(bank$y)
#######################
###Intercept only model
c_only <- glm(y~1,family = binomial, data = bank)
c_only
coef(c_only)## -2.063912
##funcion to convert logit to probability
logit2prob <- function(logit){
odds<- exp(logit)
prob<- odds/(1+odds)
return(prob)
}
##function call
prob<- logit2prob(coef(c_only))
prob ## 0.1126542
install.packages("gmodels")
library("gmodels")
##CrossTable
CrossTable(bank$y)
##Manual Probability Calculation for Yes and No of Term Deposit
yes_logit<- coef((c_only)[1])
y_prob<- logit2prob(yes_logit)
n_prob<- 1- y_prob
y_prob ##0.1126542
n_prob ##0.8873458
###############################################################################################
##Handling NA values
##Instead of omiting NA values or using KNN method,
##we have utilised the categorization method by introducing a category : "UNKNOWN"
str(bank$education)
bank$education[is.na(bank$education)]<- 999 ##Replace NA by introducing new integer value: 999
str(bank$occupation)
bank$occupation[is.na(bank$occupation)]<- 999 ##Replace NA by introducing new integer value: 999
bank$education <- factor(bank$education,labels=c("LOW","INTERMEDIATE","HIGH","UNKNOWN"))
str(bank$education)
levels(bank$education)
target1<- glm(y~education, data= bank, family='binomial')
summary(target1)
##y = intercept + b1x1+b2x2+b3x3
y1<- -2.34201+0.44785*1 ##high
y2<- -2.34201 ##low
##OddsRatio
#Manual
##y= log(p/1-p) ; p/1-p = exp(y);
h_odd<- exp(y1)
l_odd<- exp(y2)
odds_h_l <- h_odd/l_odd
odds_h_l##High_education vs Low_education OddsRatio = 1.564944
tab<- table(bank$y,bank$education, exclude= c("INTERMEDIATE", "UNKNOWN"))
tab
##OddsRatio
##using package epiR
install.packages("epiR")
library(epiR)
epi.2by2(tab,method = "cohort.count",conf.level = 0.95)
##Probability of Highly educated clients taking Term Deposit
#method 1
h_prob<- logit2prob(y1)
h_prob ##0.1307709
#method 2
h1<-exp(-2.34801+0.44785)/(1+exp(-2.34801+0.44785))
h1 ##0.1300904
##Probability of Lowly educated clients taking Term Deposit
#method 1
l_prob<- logit2prob(y2)
l_prob
#method 2
l2<- exp(-2.34801)/(1+exp(-2.34801))
l2
CrossTable(bank$education,bank$y)
##
#bank2<- subset(bank,bank$education =="LOW" | bank$education=="HIGH",select = X:y)
#bank2
#target<- glm(y~education, data= bank2, family='binomial')
#target
#coef(target)
#summary(target)
#logit2prob(coef(target))
##
##
#exp(0.45201)
#exp(-1.88896-0.45201)/(1+exp(-1.88896-0.45201))
#exp(-1.88896)/(1+exp(-1.88896))
##
##################################################################################################
#Highest day of the weeek for Term Deposit Sign Up
bank[bank$day==1,]$day<- "MONDAY"
bank[bank$day==2,]$day<- "TUESDAY"
bank[bank$day==3,]$day<- "WEDNESDAY"
bank[bank$day==4,]$day<- "THURSDAY"
bank[bank$day==5,]$day<- "FRIDAY"
bank$day<- as.factor(bank$day)
is.factor(bank$day)
str(bank$day)
day_model<- glm(y~day, data=bank, family="binomial")
summary(day_model)
coef(day_model)
fri_p<- logit2prob(coef(day_model)[1])
fri_p ##0.1080874
mon_p<- exp( -2.11042809-0.09255)/(1+exp( -2.11042809-0.09255))
mon_p ##0.09948337
thurs_p<-exp( -2.11042809+0.12919566)/(1+exp( -2.11042809+0.12919566))
thurs_p ##0.1211875
tue_p<- exp( -2.11042809+0.09699519)/(1+exp( -2.11042809+0.09699519))
tue_p ##0.1177998
wed_p<- exp( -2.11042809+0.08608609)/(1+exp( -2.11042809+0.08608609))
wed_p ##0.1166708
highest_day<- thurs_p
highest_day ##0.1211875
######################################################################################
#Logistic Regression for Predicting Term Deposit sign up
set.seed(12345)
split<- sample(seq_len(nrow(bank)),size= floor(0.80*nrow(bank)))
train_data11<- bank[split, ]
test_data11<- bank[-split, ]
head(train_data11)
head(test_data11)
summary(glm(y~., data= train_data11,family=binomial(link='logit')))
sat_mod<- glm(y~., data= train_data11,family=binomial(link='logit'))
library("MASS")
stepAIC(sat_mod, direction="backward")
fin_stepAIC_mod<- glm(formula = y ~ X + age + marital + education + default + housing +
contact + day + duration + campaign + pdays + poutcome,
family = binomial(link = "logit"),
data = train_data11)
summary(fin_stepAIC_mod)
## we can observe that Marital status and Housing still do not add much value to the prediction
# we can eliminate them by observation from our model.
anova(sat_mod, glm(y~.-contact,data = train_data11, family = binomial (link= "logit")),test = "Chisq")
anova(sat_mod,test="Chisq")
fin_mod <- glm(y~ X+age+education+default+contact+day+duration+campaign+pdays+poutcome,
family = binomial(link="logit"),
data=train_data11)
summary(fin_mod)
fin_mod
install.packages("ROCR")
library("ROCR")
test_data11$pred_data<- predict(fin_mod,test_data11,type='response')
test_data11$predictedY<- ifelse(test_data11$pred_data>0.5, 1,0)
test_data11$pred_num<- ifelse(test_data11$pred_data>0.5, 1,0)
test_data11$predictedY<- factor(test_data11$predictedY, levels=c("0","1"),labels=c("no","yes"))
str(test_data11$y)
str(test_data11$predictedY)
##confusion_matrix<- confusionMatrix(test_data11$y, test_data11$predictedY, threshold= 0.5)
c_tab<- table(test_data11$predictedY, test_data11$y)
c_tab
(7085+381)/sum(c_tab)
install.packages('InformationValue')
library(InformationValue)
misClasificError<- mean(test_data11$predictedY != test_data11$y)
Accuracy<- 1-misClasificError
mis<- misClassError(as.integer(test_data11$y), as.integer(test_data11$predictedY), threshold = 0.5)
accur<- 1-mis
accur
##plot TPR vs FPR ,
p<- predict(fin_mod, test_data11,type="response")
pr <- prediction(predictions = p,test_data11$y)
perf<- performance(pr,measure = "tpr",x.measure="fpr")
perf
plot(perf)
## AREA UNDER CURVE
auc<- performance(pr, measure = "auc")
auc<- auc@y.values[[1]]
auc
##ROC
plotROC(as.integer(test_data11$y),test_data11$pred_data)
sensitivity(as.numeric(test_data11$y),as.numeric(test_data11$predictedY), threshold = 0.5)
install.packages("pROC")
library("pROC")
x<- rnorm(1000)
pre<- exp(5*x)/(1+exp(5*x))
y<- 1 *(runif(1000)< pre)
modd<- glm(y~x, family="binomial")
predpr<- predict(modd, type= "response")
rocc<- roc(y~predpr)
plot(rocc)
pre2<- exp(0*x)/(1+exp(0*x))
y2<- 1 *(runif(1000)< pre2)
modd2<- glm(y2~x, family="binomial")
predpr2<- predict(modd2, type= "response")
rocc2<- roc(y2~predpr2)
plot(rocc2)
###################################################################################
optimalCutoff(as.numeric(test_data11$y),as.numeric(test_data11$predictedY),optimiseFor = "misclasserror", returnDiagnostics = TRUE)
###################################
#tree model trial.
install.packages("tree")
library(tree)
y_pred_num<- ifelse(p>0.5,"yes","no")
y_predicted<- factor(y_pred_num,levels=c("no","yes"))
y_observed<- test_data11$y
mean(y_predicted == y_observed, na.rm = TRUE)
plot(tree.model)
text(tree.model)
|
#' @title Functions to subset ClusterExperiment Objects
#' @description These functions are used to subset ClusterExperiment objects,
#' either by removing samples, genes, or clusterings
#' @name subset
#' @param x a ClusterExperiment object.
#' @inheritParams ClusterExperiment-class
#' @inheritParams getClusterIndex
#' @return A \code{\link{ClusterExperiment}} object.
#' @details \code{removeClusterings} removes the clusters given by
#' \code{whichClusters}. If the \code{primaryCluster} is one of the clusters
#' removed, the \code{primaryClusterIndex} is set to 1 and the dendrogram and
#' coclustering matrix are discarded and orderSamples is set to
#' \code{1:NCOL(x)}.
#' @return \code{removeClusterings} returns a \code{ClusterExperiment} object,
#' unless all clusters are removed, in which case it returns a
#' \code{\link{SingleCellExperiment}} object.
#' @examples
#' #load CE object
#' data(rsecFluidigm)
#' # remove the mergeClusters step from the object
#' clusterLabels(rsecFluidigm)
#' test<-removeClusterings(rsecFluidigm,whichClusters="mergeClusters")
#' clusterLabels(test)
#' tableClusters(rsecFluidigm)
#' test<-removeClusters(rsecFluidigm,whichCluster="mergeClusters",clustersToRemove=c("m01","m04"))
#' tableClusters(test,whichCluster="mergeClusters")
#' @export
#' @aliases removeClusterings,ClusterExperiment-method
setMethod(
f = "removeClusterings",
signature = signature("ClusterExperiment"),
definition = function(x, whichClusters) {
whichClusters<-getClusterIndex(object=x,whichClusters=whichClusters,noMatch="throwError")
if(length(whichClusters)==NCOL(clusterMatrix(x))){
warning("All clusters have been removed. Will return just a Summarized Experiment Object")
#make it Summarized Experiment
return(as(x,"SingleCellExperiment"))
}
newClLabels<-clusterMatrix(x)[,-whichClusters,drop=FALSE]
newClusterInfo<-clusteringInfo(x)[-whichClusters]
newClusterType<-clusterTypes(x)[-whichClusters]
newClusterColors<-clusterLegend(x)[-whichClusters]
orderSamples<-orderSamples(x)
if(primaryClusterIndex(x) %in% whichClusters)
pIndex<-1
else
pIndex<-match(primaryClusterIndex(x),seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
## Fix CoClustering information
## Erase if any are part of clusters to remove
coMat<-coClustering(x)
typeCoCl<-.typeOfCoClustering(x)
if(typeCoCl=="indices"){
if(any(coMat %in% whichClusters)){
warning("removing clusterings that were used in makeConsensus (i.e. stored in CoClustering slot). Will delete the coClustering slot")
coMat<-NULL
}
else{
#Fix so indexes the right clustering...
coMat<-match(coMat,
seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
}
#fix merge info:
#erase merge info if either dendro or merge index deleted.
if(mergeClusterIndex(x) %in% whichClusters | x@merge_dendrocluster_index %in% whichClusters){
x<-.eraseMerge(x)
merge_index<-x@merge_index
merge_dendrocluster_index<-x@merge_dendrocluster_index
}
else{
merge_index<-match(x@merge_index, seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
merge_dendrocluster_index<-match(x@merge_dendrocluster_index, seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
#fix dendro info
dend_samples <- x@dendro_samples
dend_cl <- x@dendro_clusters
dend_ind<-dendroClusterIndex(x)
if(dendroClusterIndex(x) %in% whichClusters){
dend_cl<-NULL
dend_samples<-NULL
dend_ind<-NA_real_
}
else{
dend_ind<-match(dend_ind,seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
retval<-ClusterExperiment(
as(x,"SingleCellExperiment"),
clusters=newClLabels,
transformation=transformation(x),
clusterTypes=newClusterType,
clusterInfo<-newClusterInfo,
primaryIndex=pIndex,
dendro_samples=dend_samples,
dendro_clusters=dend_cl,
dendro_index=dend_ind,
merge_index=merge_index,
merge_dendrocluster_index=merge_dendrocluster_index,
merge_cutoff=x@merge_cutoff,
merge_nodeProp=x@merge_nodeProp,
merge_nodeMerge=x@merge_nodeMerge,
merge_method=x@merge_method,
merge_demethod=x@merge_demethod,
coClustering=coMat,
orderSamples=orderSamples,
clusterLegend=newClusterColors,
checkTransformAndAssay=FALSE
)
return(retval)
}
)
#' @details \code{removeClusters} creates a new cluster that unassigns samples
#' in cluster \code{clustersToRemove} (in the clustering defined by
#' \code{whichClusters}) and assigns them to -1 (unassigned)
#' @param clustersToRemove numeric vector identifying the clusters to remove
#' (whose samples will be reassigned to -1 value).
#' @rdname subset
#' @inheritParams addClusterings
#' @aliases removeClusters
#' @export
setMethod(
f = "removeClusters",
signature = c("ClusterExperiment"),
definition = function(x,whichCluster,clustersToRemove,makePrimary=FALSE,clusterLabels=NULL) {
whCl<-getSingleClusterIndex(x,whichCluster)
cl<-clusterMatrix(x)[,whCl]
leg<-clusterLegend(x)[[whCl]]
if(is.character(clustersToRemove)){
m<- match(clustersToRemove,leg[,"name"] )
if(any(is.na(m)))
stop("invalid names of clusters in 'clustersToRemove'")
clustersToRemove<-as.numeric(leg[m,"clusterIds"])
}
if(is.numeric(clustersToRemove)){
if(any(!clustersToRemove %in% cl)) stop("invalid clusterIds in 'clustersToRemove'")
if(any(clustersToRemove== -1)) stop("cannot remove -1 clusters using this function. See 'assignUnassigned' to assign unassigned samples.")
cl[cl %in% clustersToRemove]<- -1
}
else stop("clustersToRemove must be either character or numeric")
if(is.null(clusterLabels)){
currlabel<-clusterLabels(x)[whCl]
clusterLabels<-paste0(currlabel,"_unassignClusters")
}
if(clusterLabels %in% clusterLabels(x))
stop("must give a 'clusterLabels' value that is not already assigned to a clustering")
newleg<-leg
if(!"-1" %in% leg[,"clusterIds"] & any(cl== -1)){
newleg<-rbind(newleg,c("-1","white","-1"))
}
whRm<-which(as.numeric(newleg[,"clusterIds"]) %in% clustersToRemove )
if(length(whRm)>0){
newleg<-newleg[-whRm,,drop=FALSE]
}
newCl<-list(newleg)
#names(newCl)<-clusterLabels
return(addClusterings(x, cl, clusterLabels = clusterLabels,clusterLegend=newCl,makePrimary=makePrimary))
}
)
#' @details Note that when subsetting the data, the dendrogram information and
#' the co-clustering matrix are lost.
#' @aliases [,ClusterExperiment,ANY,ANY,ANY-method [,ClusterExperiment,ANY,character,ANY-method
#' @param i,j A vector of logical or integer subscripts, indicating the rows and columns to be subsetted for \code{i} and \code{j}, respectively.
#' @param drop A logical scalar that is ignored.
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "character"),
definition = function(x, i, j, ..., drop=TRUE) {
j<-match(j, colnames(x))
callGeneric()
}
)
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "logical"),
definition = function(x, i, j, ..., drop=TRUE) {
j<-which(j)
callGeneric()
}
)
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "numeric"),
definition = function(x, i, j, ..., drop=TRUE) {
# The following doesn't work once I added the logical and character choices.
# #out <- callNextMethod() #
# out<-selectMethod("[",c("SingleCellExperiment","ANY","numeric"))(x,i,j) #have to explicitly give the inherintence... not great.
###Note: Could fix subsetting, so that if subset on genes, but same set of samples, doesn't do any of this...
#Following Martin Morgan advice, do "new" rather than @<- to create changed object
#need to subset cluster matrix and convert to consecutive integer valued clusters:
#pull names out so can match it to the clusterLegend.
# subMat<-clusterMatrixNamed(x)[j, ,drop=FALSE]
subMat<-clusterMatrixNamed(x, whichClusters=1:nClusterings(x))[j, ,drop=FALSE] #changed from default "all" because "all" puts primary cluster first so changes order...
#pull out integers incase have changed the "-1"/"-2" to have different names.
intMat<-clusterMatrix(x)[j,,drop=FALSE]
intMat<-.makeIntegerClusters(as.matrix(intMat))
#danger if not unique names
whNotUniqueNames<-vapply(clusterLegend(x),
FUN=function(mat){
length(unique(mat[,"name"]))!=nrow(mat)
},FUN.VALUE=TRUE)
if(any(whNotUniqueNames)){
warning("Some clusterings do not have unique names; information in clusterLegend will not be transferred to subset.")
subMatInt<-x@clusterMatrix[j, whNotUniqueNames,drop=FALSE]
subMat[,whNotUniqueNames]<-subMatInt
}
nms<-colnames(subMat)
if(nrow(subMat)>0){
## Fix clusterLegend slot, in case now lost a level and to match new integer values
## shouldn't need give colors, but function needs argument
out<-.makeColors(clMat=subMat, clNumMat=intMat, distinctColors=FALSE,colors=massivePalette,
matchClusterLegend=clusterLegend(x),matchTo="name")
newMat<-out$numClusters
colnames(newMat)<-nms
newClLegend<-out$colorList
#fix order of samples so same
newOrder<-rank(x@orderSamples[j])
}
else{
newClLegend<-list()
newOrder<-NA_real_
newMat<-subMat
}
out<- ClusterExperiment( object=as(selectMethod("[",c("SingleCellExperiment","ANY","numeric"))(x,i,j),"SingleCellExperiment"),#have to explicitly give the inherintence... not great.
clusters = newMat,
transformation=x@transformation,
primaryIndex = x@primaryIndex,
clusterTypes = x@clusterTypes,
clusterInfo=x@clusterInfo,
orderSamples=newOrder,
clusterLegend=newClLegend,
checkTransformAndAssay=FALSE
)
return(out)
}
)
#' @rdname subset
#' @return \code{subsetByCluster} subsets the object by clusters in a clustering
#' and returns a ClusterExperiment object with only those samples
#' @param clusterValue values of the cluster to match to for subsetting
#' @param matchTo whether to match to the cluster name
#' (\code{"name"}) or internal cluster id (\code{"clusterIds"})
#' @export
#' @aliases subsetByCluster
setMethod(
f = "subsetByCluster",
signature = "ClusterExperiment",
definition = function(x,clusterValue,whichCluster="primary",matchTo=c("name","clusterIds")) {
whCl<-getSingleClusterIndex(x,whichCluster)
matchTo<-match.arg(matchTo)
if(matchTo=="name"){
cl<-clusterMatrixNamed(x)[,whCl]
}
else cl<-clusterMatrix(x)[,whCl]
return(x[,which(cl %in% clusterValue)])
}
)
|
/R/subset.R
|
no_license
|
epurdom/clusterExperiment
|
R
| false | false | 11,961 |
r
|
#' @title Functions to subset ClusterExperiment Objects
#' @description These functions are used to subset ClusterExperiment objects,
#' either by removing samples, genes, or clusterings
#' @name subset
#' @param x a ClusterExperiment object.
#' @inheritParams ClusterExperiment-class
#' @inheritParams getClusterIndex
#' @return A \code{\link{ClusterExperiment}} object.
#' @details \code{removeClusterings} removes the clusters given by
#' \code{whichClusters}. If the \code{primaryCluster} is one of the clusters
#' removed, the \code{primaryClusterIndex} is set to 1 and the dendrogram and
#' coclustering matrix are discarded and orderSamples is set to
#' \code{1:NCOL(x)}.
#' @return \code{removeClusterings} returns a \code{ClusterExperiment} object,
#' unless all clusters are removed, in which case it returns a
#' \code{\link{SingleCellExperiment}} object.
#' @examples
#' #load CE object
#' data(rsecFluidigm)
#' # remove the mergeClusters step from the object
#' clusterLabels(rsecFluidigm)
#' test<-removeClusterings(rsecFluidigm,whichClusters="mergeClusters")
#' clusterLabels(test)
#' tableClusters(rsecFluidigm)
#' test<-removeClusters(rsecFluidigm,whichCluster="mergeClusters",clustersToRemove=c("m01","m04"))
#' tableClusters(test,whichCluster="mergeClusters")
#' @export
#' @aliases removeClusterings,ClusterExperiment-method
setMethod(
f = "removeClusterings",
signature = signature("ClusterExperiment"),
definition = function(x, whichClusters) {
whichClusters<-getClusterIndex(object=x,whichClusters=whichClusters,noMatch="throwError")
if(length(whichClusters)==NCOL(clusterMatrix(x))){
warning("All clusters have been removed. Will return just a Summarized Experiment Object")
#make it Summarized Experiment
return(as(x,"SingleCellExperiment"))
}
newClLabels<-clusterMatrix(x)[,-whichClusters,drop=FALSE]
newClusterInfo<-clusteringInfo(x)[-whichClusters]
newClusterType<-clusterTypes(x)[-whichClusters]
newClusterColors<-clusterLegend(x)[-whichClusters]
orderSamples<-orderSamples(x)
if(primaryClusterIndex(x) %in% whichClusters)
pIndex<-1
else
pIndex<-match(primaryClusterIndex(x),seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
## Fix CoClustering information
## Erase if any are part of clusters to remove
coMat<-coClustering(x)
typeCoCl<-.typeOfCoClustering(x)
if(typeCoCl=="indices"){
if(any(coMat %in% whichClusters)){
warning("removing clusterings that were used in makeConsensus (i.e. stored in CoClustering slot). Will delete the coClustering slot")
coMat<-NULL
}
else{
#Fix so indexes the right clustering...
coMat<-match(coMat,
seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
}
#fix merge info:
#erase merge info if either dendro or merge index deleted.
if(mergeClusterIndex(x) %in% whichClusters | x@merge_dendrocluster_index %in% whichClusters){
x<-.eraseMerge(x)
merge_index<-x@merge_index
merge_dendrocluster_index<-x@merge_dendrocluster_index
}
else{
merge_index<-match(x@merge_index, seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
merge_dendrocluster_index<-match(x@merge_dendrocluster_index, seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
#fix dendro info
dend_samples <- x@dendro_samples
dend_cl <- x@dendro_clusters
dend_ind<-dendroClusterIndex(x)
if(dendroClusterIndex(x) %in% whichClusters){
dend_cl<-NULL
dend_samples<-NULL
dend_ind<-NA_real_
}
else{
dend_ind<-match(dend_ind,seq_len(NCOL(clusterMatrix(x)))[-whichClusters])
}
retval<-ClusterExperiment(
as(x,"SingleCellExperiment"),
clusters=newClLabels,
transformation=transformation(x),
clusterTypes=newClusterType,
clusterInfo<-newClusterInfo,
primaryIndex=pIndex,
dendro_samples=dend_samples,
dendro_clusters=dend_cl,
dendro_index=dend_ind,
merge_index=merge_index,
merge_dendrocluster_index=merge_dendrocluster_index,
merge_cutoff=x@merge_cutoff,
merge_nodeProp=x@merge_nodeProp,
merge_nodeMerge=x@merge_nodeMerge,
merge_method=x@merge_method,
merge_demethod=x@merge_demethod,
coClustering=coMat,
orderSamples=orderSamples,
clusterLegend=newClusterColors,
checkTransformAndAssay=FALSE
)
return(retval)
}
)
#' @details \code{removeClusters} creates a new cluster that unassigns samples
#' in cluster \code{clustersToRemove} (in the clustering defined by
#' \code{whichClusters}) and assigns them to -1 (unassigned)
#' @param clustersToRemove numeric vector identifying the clusters to remove
#' (whose samples will be reassigned to -1 value).
#' @rdname subset
#' @inheritParams addClusterings
#' @aliases removeClusters
#' @export
setMethod(
f = "removeClusters",
signature = c("ClusterExperiment"),
definition = function(x,whichCluster,clustersToRemove,makePrimary=FALSE,clusterLabels=NULL) {
whCl<-getSingleClusterIndex(x,whichCluster)
cl<-clusterMatrix(x)[,whCl]
leg<-clusterLegend(x)[[whCl]]
if(is.character(clustersToRemove)){
m<- match(clustersToRemove,leg[,"name"] )
if(any(is.na(m)))
stop("invalid names of clusters in 'clustersToRemove'")
clustersToRemove<-as.numeric(leg[m,"clusterIds"])
}
if(is.numeric(clustersToRemove)){
if(any(!clustersToRemove %in% cl)) stop("invalid clusterIds in 'clustersToRemove'")
if(any(clustersToRemove== -1)) stop("cannot remove -1 clusters using this function. See 'assignUnassigned' to assign unassigned samples.")
cl[cl %in% clustersToRemove]<- -1
}
else stop("clustersToRemove must be either character or numeric")
if(is.null(clusterLabels)){
currlabel<-clusterLabels(x)[whCl]
clusterLabels<-paste0(currlabel,"_unassignClusters")
}
if(clusterLabels %in% clusterLabels(x))
stop("must give a 'clusterLabels' value that is not already assigned to a clustering")
newleg<-leg
if(!"-1" %in% leg[,"clusterIds"] & any(cl== -1)){
newleg<-rbind(newleg,c("-1","white","-1"))
}
whRm<-which(as.numeric(newleg[,"clusterIds"]) %in% clustersToRemove )
if(length(whRm)>0){
newleg<-newleg[-whRm,,drop=FALSE]
}
newCl<-list(newleg)
#names(newCl)<-clusterLabels
return(addClusterings(x, cl, clusterLabels = clusterLabels,clusterLegend=newCl,makePrimary=makePrimary))
}
)
#' @details Note that when subsetting the data, the dendrogram information and
#' the co-clustering matrix are lost.
#' @aliases [,ClusterExperiment,ANY,ANY,ANY-method [,ClusterExperiment,ANY,character,ANY-method
#' @param i,j A vector of logical or integer subscripts, indicating the rows and columns to be subsetted for \code{i} and \code{j}, respectively.
#' @param drop A logical scalar that is ignored.
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "character"),
definition = function(x, i, j, ..., drop=TRUE) {
j<-match(j, colnames(x))
callGeneric()
}
)
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "logical"),
definition = function(x, i, j, ..., drop=TRUE) {
j<-which(j)
callGeneric()
}
)
#' @rdname subset
#' @export
setMethod(
f = "[",
signature = c("ClusterExperiment", "ANY", "numeric"),
definition = function(x, i, j, ..., drop=TRUE) {
# The following doesn't work once I added the logical and character choices.
# #out <- callNextMethod() #
# out<-selectMethod("[",c("SingleCellExperiment","ANY","numeric"))(x,i,j) #have to explicitly give the inherintence... not great.
###Note: Could fix subsetting, so that if subset on genes, but same set of samples, doesn't do any of this...
#Following Martin Morgan advice, do "new" rather than @<- to create changed object
#need to subset cluster matrix and convert to consecutive integer valued clusters:
#pull names out so can match it to the clusterLegend.
# subMat<-clusterMatrixNamed(x)[j, ,drop=FALSE]
subMat<-clusterMatrixNamed(x, whichClusters=1:nClusterings(x))[j, ,drop=FALSE] #changed from default "all" because "all" puts primary cluster first so changes order...
#pull out integers incase have changed the "-1"/"-2" to have different names.
intMat<-clusterMatrix(x)[j,,drop=FALSE]
intMat<-.makeIntegerClusters(as.matrix(intMat))
#danger if not unique names
whNotUniqueNames<-vapply(clusterLegend(x),
FUN=function(mat){
length(unique(mat[,"name"]))!=nrow(mat)
},FUN.VALUE=TRUE)
if(any(whNotUniqueNames)){
warning("Some clusterings do not have unique names; information in clusterLegend will not be transferred to subset.")
subMatInt<-x@clusterMatrix[j, whNotUniqueNames,drop=FALSE]
subMat[,whNotUniqueNames]<-subMatInt
}
nms<-colnames(subMat)
if(nrow(subMat)>0){
## Fix clusterLegend slot, in case now lost a level and to match new integer values
## shouldn't need give colors, but function needs argument
out<-.makeColors(clMat=subMat, clNumMat=intMat, distinctColors=FALSE,colors=massivePalette,
matchClusterLegend=clusterLegend(x),matchTo="name")
newMat<-out$numClusters
colnames(newMat)<-nms
newClLegend<-out$colorList
#fix order of samples so same
newOrder<-rank(x@orderSamples[j])
}
else{
newClLegend<-list()
newOrder<-NA_real_
newMat<-subMat
}
out<- ClusterExperiment( object=as(selectMethod("[",c("SingleCellExperiment","ANY","numeric"))(x,i,j),"SingleCellExperiment"),#have to explicitly give the inherintence... not great.
clusters = newMat,
transformation=x@transformation,
primaryIndex = x@primaryIndex,
clusterTypes = x@clusterTypes,
clusterInfo=x@clusterInfo,
orderSamples=newOrder,
clusterLegend=newClLegend,
checkTransformAndAssay=FALSE
)
return(out)
}
)
#' @rdname subset
#' @return \code{subsetByCluster} subsets the object by clusters in a clustering
#' and returns a ClusterExperiment object with only those samples
#' @param clusterValue values of the cluster to match to for subsetting
#' @param matchTo whether to match to the cluster name
#' (\code{"name"}) or internal cluster id (\code{"clusterIds"})
#' @export
#' @aliases subsetByCluster
setMethod(
f = "subsetByCluster",
signature = "ClusterExperiment",
definition = function(x,clusterValue,whichCluster="primary",matchTo=c("name","clusterIds")) {
whCl<-getSingleClusterIndex(x,whichCluster)
matchTo<-match.arg(matchTo)
if(matchTo=="name"){
cl<-clusterMatrixNamed(x)[,whCl]
}
else cl<-clusterMatrix(x)[,whCl]
return(x[,which(cl %in% clusterValue)])
}
)
|
## TODO: figure out a better approach for alignment of dendrogram / image axis labels
# f: SPC with diagnostic boolean variables
# v: named variables
# k: number of groups to highlight
# id: id to print next to dendrogram
diagnosticPropertyPlot <- function(f, v, k, grid.label='pedon_id', dend.label='pedon_id') {
# get internal, unique ID
id <- idname(f)
# extract site data
s <- site(f)
# keep only those variables that exist
v <- names(s)[na.omit(match(v, names(s)))]
## TODO: why would there be NA in here?
# filter NA
no.na.idx <- which(complete.cases(s[, v]))
s <- s[no.na.idx, ]
# save diagnostic properties
m <- s[, v]
# optionally check for any vars that are all FALSE and kick them out
vars.not.missing <- apply(m, 2, any)
# if any are all FALSE, then remove from m and v
if(any(!vars.not.missing)) {
not.missing <- which(vars.not.missing)
m <- m[, not.missing]
v <- v[not.missing]
}
# convert to factors, we have to specify the levels as there are cases with all TRUE or FALSE
m <- as.data.frame(lapply(m, factor, levels=c('FALSE', 'TRUE')))
# make a copy of the matrix for plotting, as numerical data and transpose
m.plot <- t(as.matrix(as.data.frame(lapply(m, as.numeric))))
# compute dissimilarity between profiles
d <- daisy(m, metric='gower')
h.profiles <- as.hclust(diana(d))
# store text labels for dendrogram
h.profiles$labels <- as.character(s[[dend.label]]) # factors will break tiplabels()
p <- as.phylo(h.profiles)
# cut tree at user-specified number of groups
h.cut <- cutree(h.profiles, k=k)
# setup plot layout
layout(matrix(c(1,2), nrow=1, ncol=2), widths=c(1,1))
# get number of vars + number of profiles
n.vars <- ncol(m)
n.profiles <- nrow(m)
# plot profile dendrogram
par(mar=c(1,1,6,1))
plot(p, cex=0.75, label.offset=0.05, y.lim=c(1.125, n.profiles))
tiplabels(pch=15, col=h.cut, cex=1.125, adj=0.52)
## note: transpose converts logical -> character, must re-init factors
# compute dissimilarity between variables
d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
h.vars <- as.hclust(diana(d.vars))
# order of profiles in dendrogram
o.profiles <- h.profiles$order
# vector of variable names as plotted in dendrogram
o.vars <- h.vars$order
# plot image matrix, with rows re-ordered according to dendrogram
par(mar=c(1,6,6,1))
image(x=1:n.vars, y=1:n.profiles, z=m.plot[o.vars, o.profiles], axes=FALSE, col=c(grey(0.9), 'RoyalBlue'), xlab='', ylab='', ylim=c(0.5, n.profiles+0.5))
axis(side=2, at=1:n.profiles, labels=s[[grid.label]][o.profiles], las=1, cex.axis=0.75)
axis(side=3, at=1:n.vars, labels=v[o.vars], las=2, cex.axis=0.75)
abline(h=1:(n.profiles+1)-0.5)
abline(v=1:(n.vars+1)-0.5)
# return values
rd <- cbind(s[, c(id, grid.label)], g=h.cut)
return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
}
diagnosticPropertyPlot2 <- function(f, v, k, grid.label='pedon_id') {
# get internal, unique ID
id <- idname(f)
# extract site data
s <- site(f)
# keep only those variables that exist
v <- names(s)[na.omit(match(v, names(s)))]
## TODO: why would there be NA in here?
# filter NA
no.na.idx <- which(complete.cases(s[, v]))
s <- s[no.na.idx, ]
# save grid labels
s.gl <- as.character(s[[grid.label]])
# save diagnostic properties
m <- s[, v]
# optionally check for any vars that are all FALSE and kick them out
vars.not.missing <- apply(m, 2, any)
# if any are all FALSE, then remove from m and v
if(any(!vars.not.missing)) {
not.missing <- which(vars.not.missing)
m <- m[, not.missing]
v <- v[not.missing]
}
# convert to factors, we have to specify the levels as there are cases with all TRUE or FALSE
m <- as.data.frame(lapply(m, factor, levels=c('FALSE', 'TRUE')))
# get number of vars + number of profiles
n.vars <- ncol(m)
n.profiles <- nrow(m)
# compute dissimilarity between profiles
d <- daisy(m, metric='gower')
h.profiles <- as.hclust(diana(d))
## note: transpose converts logical -> character, must re-init factors
# compute dissimilarity between variables
d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
h.vars <- as.hclust(diana(d.vars))
# cut tree at user-specified number of groups
h.cut <- cutree(h.profiles, k=k)
# format for plotting
m.plot <- data.frame(id=s[[id]], m, stringsAsFactors=FALSE)
m.plot.long <- melt(m.plot, id.vars='id')
# convert TRUE/FALSE into factor
m.plot.long$value <- factor(m.plot.long$value, levels=c('FALSE', 'TRUE'))
# order of profiles in dendrogram
o.profiles <- h.profiles$order
# vector of variable names as plotted in dendrogram
o.vars <- h.vars$order
# set factor levels for ordering of level plot
m.plot.long$id <- factor(m.plot.long$id, levels=m.plot$id[o.profiles])
m.plot.long$variable <- factor(m.plot.long$variable, levels=v[o.vars])
# lattice plot
p <- levelplot(value ~ variable * id, data=m.plot.long,
col.regions=c(grey(0.9), 'RoyalBlue'), cuts=1, xlab='', ylab='',
colorkey = FALSE,
scales=list(tck=0, x=list(rot=90), y=list(at=1:length(o.profiles), labels=s.gl[o.profiles])),
legend=list(
right=list(fun=dendrogramGrob, args=list(x = as.dendrogram(h.profiles), side="right", size=15, add=list(
rect=list(fill=h.cut, cex=0.5)))),
top=list(fun=dendrogramGrob, args=list(x=as.dendrogram(h.vars), side="top", size=4))
),
panel=function(...) {
panel.levelplot(...)
# horizontal lines
panel.segments(x0=0.5, y0=1:(n.profiles+1)-0.5, x1=n.vars+0.5, y1=1:(n.profiles+1)-0.5)
# vertical lines
panel.segments(x0=1:(n.vars+1)-0.5, y0=0.5, x1=1:(n.vars+1)-0.5, y1=n.profiles + 0.5)
}
)
# print to graphics device
print(p)
# return values
rd <- cbind(s[, c(id, grid.label)], g=h.cut)
return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
}
## failed attempt to include multi-nominal variables
## not going to work with current implementation, mostly due to how colors are mapped to values in image()
# diagnosticPropertyPlot3 <- function(f, v, k, grid.label='pedon_id', dend.label='pedon_id') {
#
# # get internal, unique ID
# id <- idname(f)
#
# # extract site data
# s <- site(f)
#
# # keep only those variables that exist
# v <- names(s)[na.omit(match(v, names(s)))]
#
# ## TODO: why would there be NA in here?
# # filter NA
# no.na.idx <- which(complete.cases(s[, v]))
# s <- s[no.na.idx, ]
#
# # save diagnostic properties
# m <- s[, v]
#
# # keep track of binary / multinominal variables
# binary.vars <- which(sapply(m, class) == 'logical')
# multinom.vars <- which(sapply(m, class) == 'factor')
#
# # setup colors:
# # binary colors, then 'NA' repeated for number of levels in any multinominal data
# binary.cols <- c(grey(0.9), 'RoyalBlue')
# multinom.cols <- rep(NA, times=length(levels(m[, multinom.vars])))
# cols <- c(binary.cols, multinom.cols)
#
# # split: multinominal data are assumed to be a factor
# m.binary <- m[, binary.vars, drop=FALSE]
# if(length(multinom.vars) > 0)
# m.multinom <- m[, multinom.vars, drop=FALSE]
#
# # optionally check for any vars that are all FALSE and kick them out
# vars.not.missing <- sapply(m.binary, any)
#
# # if any are all FALSE, then remove from m and v
# if(any(!vars.not.missing)) {
# not.missing <- which(vars.not.missing)
# m.binary <- m.binary[, not.missing]
# }
#
# # convert binary data into factors, we have to specify the levels as there are cases with all TRUE or FALSE
# m.binary <- as.data.frame(lapply(m.binary, factor, levels=c('FALSE', 'TRUE')))
#
# # merge binary + multinominal data
# if(length(multinom.vars) > 0)
# m <- cbind(m.binary, m.multinom)
# else
# m <- m.binary
#
# # update variable names, in case any were removed due to missingness
# v <- names(m)
#
# # make a copy of the matrix for plotting, as numerical data and transpose
# m.plot <- t(as.matrix(as.data.frame(lapply(m, as.numeric))))
#
# # compute dissimilarity between profiles
# d <- daisy(m, metric='gower')
# h.profiles <- as.hclust(diana(d))
# # store text labels for dendrogram
# h.profiles$labels <- as.character(s[[dend.label]]) # factors will break tiplabels()
# p <- as.phylo(h.profiles)
#
# # cut tree at user-specified number of groups
# h.cut <- cutree(h.profiles, k=k)
#
# # setup plot layout
# layout(matrix(c(1,2), nrow=1, ncol=2), widths=c(1,1))
#
# # get number of vars + number of profiles
# n.vars <- ncol(m)
# n.profiles <- nrow(m)
#
# # plot profile dendrogram
# par(mar=c(1,1,6,1))
# plot(p, cex=0.75, label.offset=0.05, y.lim=c(1.125, n.profiles))
# tiplabels(pch=15, col=h.cut, cex=1.125, adj=0.52)
#
# ## note: transpose converts logical -> character, must re-init factors
# # compute dissimilarity between variables
# d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
# h.vars <- as.hclust(diana(d.vars))
#
# # order of profiles in dendrogram
# o.profiles <- h.profiles$order
#
# # vector of variable names as plotted in dendrogram
# o.vars <- h.vars$order
#
# # plot image matrix, with rows re-ordered according to dendrogram
# par(mar=c(1,6,6,1))
# image(x=1:n.vars, y=1:n.profiles, z=m.plot[o.vars, o.profiles], axes=FALSE, col=cols, xlab='', ylab='', ylim=c(0.5, n.profiles+0.5))
# axis(side=2, at=1:n.profiles, labels=s[[grid.label]][o.profiles], las=1, cex.axis=0.75)
# axis(side=3, at=1:n.vars, labels=v[o.vars], las=2, cex.axis=0.75)
# abline(h=1:(n.profiles+1)-0.5)
# abline(v=1:(n.vars+1)-0.5)
#
# # return values
# rd <- cbind(s[, c(id, grid.label)], g=h.cut)
# return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
# }
|
/sharpshootR/R/diagnosticPropertyPlot.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 10,277 |
r
|
## TODO: figure out a better approach for alignment of dendrogram / image axis labels
# f: SPC with diagnostic boolean variables
# v: named variables
# k: number of groups to highlight
# id: id to print next to dendrogram
diagnosticPropertyPlot <- function(f, v, k, grid.label='pedon_id', dend.label='pedon_id') {
# get internal, unique ID
id <- idname(f)
# extract site data
s <- site(f)
# keep only those variables that exist
v <- names(s)[na.omit(match(v, names(s)))]
## TODO: why would there be NA in here?
# filter NA
no.na.idx <- which(complete.cases(s[, v]))
s <- s[no.na.idx, ]
# save diagnostic properties
m <- s[, v]
# optionally check for any vars that are all FALSE and kick them out
vars.not.missing <- apply(m, 2, any)
# if any are all FALSE, then remove from m and v
if(any(!vars.not.missing)) {
not.missing <- which(vars.not.missing)
m <- m[, not.missing]
v <- v[not.missing]
}
# convert to factors, we have to specify the levels as there are cases with all TRUE or FALSE
m <- as.data.frame(lapply(m, factor, levels=c('FALSE', 'TRUE')))
# make a copy of the matrix for plotting, as numerical data and transpose
m.plot <- t(as.matrix(as.data.frame(lapply(m, as.numeric))))
# compute dissimilarity between profiles
d <- daisy(m, metric='gower')
h.profiles <- as.hclust(diana(d))
# store text labels for dendrogram
h.profiles$labels <- as.character(s[[dend.label]]) # factors will break tiplabels()
p <- as.phylo(h.profiles)
# cut tree at user-specified number of groups
h.cut <- cutree(h.profiles, k=k)
# setup plot layout
layout(matrix(c(1,2), nrow=1, ncol=2), widths=c(1,1))
# get number of vars + number of profiles
n.vars <- ncol(m)
n.profiles <- nrow(m)
# plot profile dendrogram
par(mar=c(1,1,6,1))
plot(p, cex=0.75, label.offset=0.05, y.lim=c(1.125, n.profiles))
tiplabels(pch=15, col=h.cut, cex=1.125, adj=0.52)
## note: transpose converts logical -> character, must re-init factors
# compute dissimilarity between variables
d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
h.vars <- as.hclust(diana(d.vars))
# order of profiles in dendrogram
o.profiles <- h.profiles$order
# vector of variable names as plotted in dendrogram
o.vars <- h.vars$order
# plot image matrix, with rows re-ordered according to dendrogram
par(mar=c(1,6,6,1))
image(x=1:n.vars, y=1:n.profiles, z=m.plot[o.vars, o.profiles], axes=FALSE, col=c(grey(0.9), 'RoyalBlue'), xlab='', ylab='', ylim=c(0.5, n.profiles+0.5))
axis(side=2, at=1:n.profiles, labels=s[[grid.label]][o.profiles], las=1, cex.axis=0.75)
axis(side=3, at=1:n.vars, labels=v[o.vars], las=2, cex.axis=0.75)
abline(h=1:(n.profiles+1)-0.5)
abline(v=1:(n.vars+1)-0.5)
# return values
rd <- cbind(s[, c(id, grid.label)], g=h.cut)
return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
}
diagnosticPropertyPlot2 <- function(f, v, k, grid.label='pedon_id') {
# get internal, unique ID
id <- idname(f)
# extract site data
s <- site(f)
# keep only those variables that exist
v <- names(s)[na.omit(match(v, names(s)))]
## TODO: why would there be NA in here?
# filter NA
no.na.idx <- which(complete.cases(s[, v]))
s <- s[no.na.idx, ]
# save grid labels
s.gl <- as.character(s[[grid.label]])
# save diagnostic properties
m <- s[, v]
# optionally check for any vars that are all FALSE and kick them out
vars.not.missing <- apply(m, 2, any)
# if any are all FALSE, then remove from m and v
if(any(!vars.not.missing)) {
not.missing <- which(vars.not.missing)
m <- m[, not.missing]
v <- v[not.missing]
}
# convert to factors, we have to specify the levels as there are cases with all TRUE or FALSE
m <- as.data.frame(lapply(m, factor, levels=c('FALSE', 'TRUE')))
# get number of vars + number of profiles
n.vars <- ncol(m)
n.profiles <- nrow(m)
# compute dissimilarity between profiles
d <- daisy(m, metric='gower')
h.profiles <- as.hclust(diana(d))
## note: transpose converts logical -> character, must re-init factors
# compute dissimilarity between variables
d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
h.vars <- as.hclust(diana(d.vars))
# cut tree at user-specified number of groups
h.cut <- cutree(h.profiles, k=k)
# format for plotting
m.plot <- data.frame(id=s[[id]], m, stringsAsFactors=FALSE)
m.plot.long <- melt(m.plot, id.vars='id')
# convert TRUE/FALSE into factor
m.plot.long$value <- factor(m.plot.long$value, levels=c('FALSE', 'TRUE'))
# order of profiles in dendrogram
o.profiles <- h.profiles$order
# vector of variable names as plotted in dendrogram
o.vars <- h.vars$order
# set factor levels for ordering of level plot
m.plot.long$id <- factor(m.plot.long$id, levels=m.plot$id[o.profiles])
m.plot.long$variable <- factor(m.plot.long$variable, levels=v[o.vars])
# lattice plot
p <- levelplot(value ~ variable * id, data=m.plot.long,
col.regions=c(grey(0.9), 'RoyalBlue'), cuts=1, xlab='', ylab='',
colorkey = FALSE,
scales=list(tck=0, x=list(rot=90), y=list(at=1:length(o.profiles), labels=s.gl[o.profiles])),
legend=list(
right=list(fun=dendrogramGrob, args=list(x = as.dendrogram(h.profiles), side="right", size=15, add=list(
rect=list(fill=h.cut, cex=0.5)))),
top=list(fun=dendrogramGrob, args=list(x=as.dendrogram(h.vars), side="top", size=4))
),
panel=function(...) {
panel.levelplot(...)
# horizontal lines
panel.segments(x0=0.5, y0=1:(n.profiles+1)-0.5, x1=n.vars+0.5, y1=1:(n.profiles+1)-0.5)
# vertical lines
panel.segments(x0=1:(n.vars+1)-0.5, y0=0.5, x1=1:(n.vars+1)-0.5, y1=n.profiles + 0.5)
}
)
# print to graphics device
print(p)
# return values
rd <- cbind(s[, c(id, grid.label)], g=h.cut)
return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
}
## failed attempt to include multi-nominal variables
## not going to work with current implementation, mostly due to how colors are mapped to values in image()
# diagnosticPropertyPlot3 <- function(f, v, k, grid.label='pedon_id', dend.label='pedon_id') {
#
# # get internal, unique ID
# id <- idname(f)
#
# # extract site data
# s <- site(f)
#
# # keep only those variables that exist
# v <- names(s)[na.omit(match(v, names(s)))]
#
# ## TODO: why would there be NA in here?
# # filter NA
# no.na.idx <- which(complete.cases(s[, v]))
# s <- s[no.na.idx, ]
#
# # save diagnostic properties
# m <- s[, v]
#
# # keep track of binary / multinominal variables
# binary.vars <- which(sapply(m, class) == 'logical')
# multinom.vars <- which(sapply(m, class) == 'factor')
#
# # setup colors:
# # binary colors, then 'NA' repeated for number of levels in any multinominal data
# binary.cols <- c(grey(0.9), 'RoyalBlue')
# multinom.cols <- rep(NA, times=length(levels(m[, multinom.vars])))
# cols <- c(binary.cols, multinom.cols)
#
# # split: multinominal data are assumed to be a factor
# m.binary <- m[, binary.vars, drop=FALSE]
# if(length(multinom.vars) > 0)
# m.multinom <- m[, multinom.vars, drop=FALSE]
#
# # optionally check for any vars that are all FALSE and kick them out
# vars.not.missing <- sapply(m.binary, any)
#
# # if any are all FALSE, then remove from m and v
# if(any(!vars.not.missing)) {
# not.missing <- which(vars.not.missing)
# m.binary <- m.binary[, not.missing]
# }
#
# # convert binary data into factors, we have to specify the levels as there are cases with all TRUE or FALSE
# m.binary <- as.data.frame(lapply(m.binary, factor, levels=c('FALSE', 'TRUE')))
#
# # merge binary + multinominal data
# if(length(multinom.vars) > 0)
# m <- cbind(m.binary, m.multinom)
# else
# m <- m.binary
#
# # update variable names, in case any were removed due to missingness
# v <- names(m)
#
# # make a copy of the matrix for plotting, as numerical data and transpose
# m.plot <- t(as.matrix(as.data.frame(lapply(m, as.numeric))))
#
# # compute dissimilarity between profiles
# d <- daisy(m, metric='gower')
# h.profiles <- as.hclust(diana(d))
# # store text labels for dendrogram
# h.profiles$labels <- as.character(s[[dend.label]]) # factors will break tiplabels()
# p <- as.phylo(h.profiles)
#
# # cut tree at user-specified number of groups
# h.cut <- cutree(h.profiles, k=k)
#
# # setup plot layout
# layout(matrix(c(1,2), nrow=1, ncol=2), widths=c(1,1))
#
# # get number of vars + number of profiles
# n.vars <- ncol(m)
# n.profiles <- nrow(m)
#
# # plot profile dendrogram
# par(mar=c(1,1,6,1))
# plot(p, cex=0.75, label.offset=0.05, y.lim=c(1.125, n.profiles))
# tiplabels(pch=15, col=h.cut, cex=1.125, adj=0.52)
#
# ## note: transpose converts logical -> character, must re-init factors
# # compute dissimilarity between variables
# d.vars <- daisy(data.frame(t(m), stringsAsFactors=TRUE), metric='gower')
# h.vars <- as.hclust(diana(d.vars))
#
# # order of profiles in dendrogram
# o.profiles <- h.profiles$order
#
# # vector of variable names as plotted in dendrogram
# o.vars <- h.vars$order
#
# # plot image matrix, with rows re-ordered according to dendrogram
# par(mar=c(1,6,6,1))
# image(x=1:n.vars, y=1:n.profiles, z=m.plot[o.vars, o.profiles], axes=FALSE, col=cols, xlab='', ylab='', ylim=c(0.5, n.profiles+0.5))
# axis(side=2, at=1:n.profiles, labels=s[[grid.label]][o.profiles], las=1, cex.axis=0.75)
# axis(side=3, at=1:n.vars, labels=v[o.vars], las=2, cex.axis=0.75)
# abline(h=1:(n.profiles+1)-0.5)
# abline(v=1:(n.vars+1)-0.5)
#
# # return values
# rd <- cbind(s[, c(id, grid.label)], g=h.cut)
# return(invisible(list(rd=rd, profile.order=o.profiles, var.order=o.vars)))
# }
|
#' Random Wishart Distributed Matrices
#'
#' Generate \code{n} random matrices, distributed according to the Wishart distribution with parameters \code{Sigma} and \code{df}, W_p(Sigma, df).
#'
#' @inheritParams base::replicate
#' @inheritParams stats::rWishart
#' @param covariance logical on whether a covariance matrix should be generated
#'
#' @return A numeric array of dimension \code{p * p * n}, where each array is a positive semidefinite matrix, a realization of the Wishart distribution W_p(Sigma, df)
#' @export
#'
#' @details If X_1, ..., X_m is a sample of m independent multivariate Gaussians with mean vector 0, and covariance matrix Sigma,
#' the distribution of M = X'X is W_p(Sigma, m).
#'
#' @importFrom Matrix rankMatrix
#'
#' @examples rWishart(2, 5, diag(1, 20))
rWishart <- function(n, df, Sigma, covariance = FALSE, simplify = "array"){
if(rankMatrix(Sigma) < ncol(Sigma)){
ls <- rSingularWishart(n, df, Sigma, covariance, simplify)
}else{
if(df >= ncol(Sigma)){
if(round(df) - df != 0){
ls <- rFractionalWishart(n, df, Sigma, covariance, simplify)
}else{
ls <- rNonsingularWishart(n, df, Sigma, covariance, simplify)
}
}else{
ls <- rPsuedoWishart(n, df, Sigma, covariance, simplify)
}
}
ls
}
|
/R/rWishart.R
|
no_license
|
BenBarnard/rWishart
|
R
| false | false | 1,286 |
r
|
#' Random Wishart Distributed Matrices
#'
#' Generate \code{n} random matrices, distributed according to the Wishart distribution with parameters \code{Sigma} and \code{df}, W_p(Sigma, df).
#'
#' @inheritParams base::replicate
#' @inheritParams stats::rWishart
#' @param covariance logical on whether a covariance matrix should be generated
#'
#' @return A numeric array of dimension \code{p * p * n}, where each array is a positive semidefinite matrix, a realization of the Wishart distribution W_p(Sigma, df)
#' @export
#'
#' @details If X_1, ..., X_m is a sample of m independent multivariate Gaussians with mean vector 0, and covariance matrix Sigma,
#' the distribution of M = X'X is W_p(Sigma, m).
#'
#' @importFrom Matrix rankMatrix
#'
#' @examples rWishart(2, 5, diag(1, 20))
rWishart <- function(n, df, Sigma, covariance = FALSE, simplify = "array"){
if(rankMatrix(Sigma) < ncol(Sigma)){
ls <- rSingularWishart(n, df, Sigma, covariance, simplify)
}else{
if(df >= ncol(Sigma)){
if(round(df) - df != 0){
ls <- rFractionalWishart(n, df, Sigma, covariance, simplify)
}else{
ls <- rNonsingularWishart(n, df, Sigma, covariance, simplify)
}
}else{
ls <- rPsuedoWishart(n, df, Sigma, covariance, simplify)
}
}
ls
}
|
derepeat <- function(behav) {
n <- length(behav)
x <- !logical(n)
if (n>1) {
for (i in 2:n)
if (behav[i-1]==behav[i]) x[i] <- F
}
return(x)
}
statvect <- function(time,behav,states) {
out <- array(0,length(states))
for (i in 1:length(behav))
out[match(behav[i],states)] <- time[i]
return(out)
}
statprep <- function(stat,dat,maxsec=630,nsec=10,ctmc=F) {
sdat <- dat[Behavior %in% stat,.(Observation,Behavior,RelativeEventTime,Duration)]
sdat <- sdat[,if ((length(Behavior)>1) & (RelativeEventTime[2] - RelativeEventTime[1]) < 2.6)
.(Behavior[-1],RelativeEventTime[-1],Duration[-1])
else .(Behavior,RelativeEventTime,Duration),by=Observation]
if (!ctmc) {
sdat[,V3:=pmin(V3,630-V2)]
sdat <- sdat[,.(time=sum(round(V3/nsec))),by=c("Observation","V1")]
sdat <- sdat[,statvect(time,V1,stat),by=Observation]
out <- matrix(sdat$V1,ncol=length(stat),byrow=T)
} else {
sdat <- sdat[sdat[,derepeat(V1),by=Observation]$V1]
sdat[,V2:=V2/(max(V2)+1)]
setnames(sdat,c(2,3),c("Behavior","StartTime"))
sdat[,V3:=NULL]
out <- sdat
}
return(out)
}
countprep <- function(behav,dat) {
pt <- dat[,length(EventName),by=c("Observation","Behavior")]
pt <- dcast.data.table(pt,Observation ~ Behavior,fill=0)
pt <- pt[,which(names(pt) %in% behav),with=F]
return(as.matrix(pt))
}
|
/parsefocaldata.R
|
no_license
|
thewart/topetho
|
R
| false | false | 1,357 |
r
|
derepeat <- function(behav) {
n <- length(behav)
x <- !logical(n)
if (n>1) {
for (i in 2:n)
if (behav[i-1]==behav[i]) x[i] <- F
}
return(x)
}
statvect <- function(time,behav,states) {
out <- array(0,length(states))
for (i in 1:length(behav))
out[match(behav[i],states)] <- time[i]
return(out)
}
statprep <- function(stat,dat,maxsec=630,nsec=10,ctmc=F) {
sdat <- dat[Behavior %in% stat,.(Observation,Behavior,RelativeEventTime,Duration)]
sdat <- sdat[,if ((length(Behavior)>1) & (RelativeEventTime[2] - RelativeEventTime[1]) < 2.6)
.(Behavior[-1],RelativeEventTime[-1],Duration[-1])
else .(Behavior,RelativeEventTime,Duration),by=Observation]
if (!ctmc) {
sdat[,V3:=pmin(V3,630-V2)]
sdat <- sdat[,.(time=sum(round(V3/nsec))),by=c("Observation","V1")]
sdat <- sdat[,statvect(time,V1,stat),by=Observation]
out <- matrix(sdat$V1,ncol=length(stat),byrow=T)
} else {
sdat <- sdat[sdat[,derepeat(V1),by=Observation]$V1]
sdat[,V2:=V2/(max(V2)+1)]
setnames(sdat,c(2,3),c("Behavior","StartTime"))
sdat[,V3:=NULL]
out <- sdat
}
return(out)
}
countprep <- function(behav,dat) {
pt <- dat[,length(EventName),by=c("Observation","Behavior")]
pt <- dcast.data.table(pt,Observation ~ Behavior,fill=0)
pt <- pt[,which(names(pt) %in% behav),with=F]
return(as.matrix(pt))
}
|
#------------------------------R DataFrame-----------------------------#
#======================== Shinta Aulia Septiani =======================#
#------------Pusat Studi Data Sains(PSDS) Matematika UAD---------------#
## Mengimpor Data Frame Pada R
# Mengimpor Data Frame pada Data Frame
df = read.csv("https://raw.githubusercontent.com/jokoeliyanto/Kelas-Dasar-Pejuang-Data-2.0/main/Super-Store-Dataset.csv")
# Memanggil Data Frame
df
##TUGAS
#1.Tentukan segment mana dengan profit tertinggi!
# Melihat segment apa saja yang ada
table(df$segment)
#Memilih masing-masing segmen
df_Consumer=df[df['segment']=='Consumer',]
df_Corporate=df[df['segment']=='Corporate',]
df_Home_Office=df[df['segment']=='Home Office',]
print(sum(df_Consumer$profit))
print(sum(df_Corporate$profit))
print(sum(df_Home_Office$profit))
#Jadi, Segment dengan profit tertinggi yaitu segment Consumer sebesar 128959.2
#2.Tentukan Category mana dengan sales terbanyak!
# Melihat category apa saja yang ada
table(df$category)
#Memilih masing-masing category
df_Office_Supplies=df[df['category']=='Office Supplies',]
df_Furniture=df[df['category']=='Furniture',]
df_Technology=df[df['category']=='Technology',]
print(sum(df_Office_Supplies$sales))
print(sum(df_Furniture$sales))
print(sum(df_Technology$sales))
#Jadi, category dengan sales terbanyak yaitu Category Technology dengan sales sebesar 755815.7
#3. Tentukan Sub-Category dengan quantity paling sedikit!
#melihat sub-category apa saja yang ada
table(df$sub_category)
#Memilih masing-masing sub-category
df_Binders=df[df['sub_category']=='Binders',]
df_Paper=df[df['sub_category']=='Paper',]
df_Furnishings=df[df['sub_category']=='Furnishings',]
df_Phones=df[df['sub_category']=='Phones',]
df_Storage=df[df['sub_category']=='Storage',]
df_Art=df[df['sub_category']=='Art',]
df_Accessories=df[df['sub_category']=='Accessories',]
df_Chairs=df[df['sub_category']=='Chairs',]
df_Appliances=df[df['sub_category']=='Appliances',]
df_Labels=df[df['sub_category']=='Labels',]
df_Tables=df[df['sub_category']=='Tables',]
df_Envelopes=df[df['sub_category']=='Envelopes',]
df_Bookcases=df[df['sub_category']=='Bookcases',]
df_Fasteners=df[df['sub_category']=='Fasteners',]
df_Supplies=df[df['sub_category']=='Supplies',]
df_Machines=df[df['sub_category']=='Machines',]
df_Copiers=df[df['sub_category']=='Copiers',]
print(sum(df_Binders$quantity))
print(sum(df_Paper$quantity))
print(sum(df_Furnishings$quantity))
print(sum(df_Phones$quantity))
print(sum(df_Storage$quantity))
print(sum(df_Art$quantity))
print(sum(df_Accessories$quantity))
print(sum(df_Chairs$quantity))
print(sum(df_Appliances$quantity))
print(sum(df_Labels$quantity))
print(sum(df_Tables$quantity))
print(sum(df_Envelopes$quantity))
print(sum(df_Bookcases$quantity))
print(sum(df_Fasteners$quantity))
print(sum(df_Supplies$quantity))
print(sum(df_Machines$quantity))
print(sum(df_Copiers$quantity))
#Jadi, sub-category dengan quantity paling sedikit yaitu Copiers sebesar 218
#4.Tentukan Bulan dengan Profit Tertinggi!!!
str(df)
class(df$order_date)
#mengubah type pada order_date menjadi date
df$order_date <- as.Date(df$order_date, "%m/%d/%Y")
#cek type data
str(df)
#menambahkan kolom bulan
df$month <- format(df$order_date, format = "%B")
df
#melihat isi pada kolom month
table(df$month)
#cek type data
str(df)
#Memilih masing-masing month
df_April=df[df['month']=='April',]
df_August=df[df['month']=='August',]
df_December=df[df['month']=='December',]
df_February=df[df['month']=='February',]
df_January=df[df['month']=='January',]
df_July=df[df['month']=='July',]
df_June=df[df['month']=='June',]
df_March=df[df['month']=='March',]
df_May=df[df['month']=='May',]
df_November=df[df['month']=='November',]
df_October=df[df['month']=='October',]
df_September=df[df['month']=='September',]
print(sum(df_April$profit))
print(sum(df_August$profit))
print(sum(df_December$profit))
print(sum(df_February$profit))
print(sum(df_January$profit))
print(sum(df_July$profit))
print(sum(df_June$profit))
print(sum(df_March$profit))
print(sum(df_May$profit))
print(sum(df_November$profit))
print(sum(df_October$profit))
print(sum(df_September$profit))
#Jadi, bulan dengan profit tertinggi adalah Desember
|
/Shinta Aulia Septiani_1800015054_Tugas_R_DataFrame.R
|
no_license
|
shintaaulia/Basic-R-Programming-for-Data-Science
|
R
| false | false | 4,356 |
r
|
#------------------------------R DataFrame-----------------------------#
#======================== Shinta Aulia Septiani =======================#
#------------Pusat Studi Data Sains(PSDS) Matematika UAD---------------#
## Mengimpor Data Frame Pada R
# Mengimpor Data Frame pada Data Frame
df = read.csv("https://raw.githubusercontent.com/jokoeliyanto/Kelas-Dasar-Pejuang-Data-2.0/main/Super-Store-Dataset.csv")
# Memanggil Data Frame
df
##TUGAS
#1.Tentukan segment mana dengan profit tertinggi!
# Melihat segment apa saja yang ada
table(df$segment)
#Memilih masing-masing segmen
df_Consumer=df[df['segment']=='Consumer',]
df_Corporate=df[df['segment']=='Corporate',]
df_Home_Office=df[df['segment']=='Home Office',]
print(sum(df_Consumer$profit))
print(sum(df_Corporate$profit))
print(sum(df_Home_Office$profit))
#Jadi, Segment dengan profit tertinggi yaitu segment Consumer sebesar 128959.2
#2.Tentukan Category mana dengan sales terbanyak!
# Melihat category apa saja yang ada
table(df$category)
#Memilih masing-masing category
df_Office_Supplies=df[df['category']=='Office Supplies',]
df_Furniture=df[df['category']=='Furniture',]
df_Technology=df[df['category']=='Technology',]
print(sum(df_Office_Supplies$sales))
print(sum(df_Furniture$sales))
print(sum(df_Technology$sales))
#Jadi, category dengan sales terbanyak yaitu Category Technology dengan sales sebesar 755815.7
#3. Tentukan Sub-Category dengan quantity paling sedikit!
#melihat sub-category apa saja yang ada
table(df$sub_category)
#Memilih masing-masing sub-category
df_Binders=df[df['sub_category']=='Binders',]
df_Paper=df[df['sub_category']=='Paper',]
df_Furnishings=df[df['sub_category']=='Furnishings',]
df_Phones=df[df['sub_category']=='Phones',]
df_Storage=df[df['sub_category']=='Storage',]
df_Art=df[df['sub_category']=='Art',]
df_Accessories=df[df['sub_category']=='Accessories',]
df_Chairs=df[df['sub_category']=='Chairs',]
df_Appliances=df[df['sub_category']=='Appliances',]
df_Labels=df[df['sub_category']=='Labels',]
df_Tables=df[df['sub_category']=='Tables',]
df_Envelopes=df[df['sub_category']=='Envelopes',]
df_Bookcases=df[df['sub_category']=='Bookcases',]
df_Fasteners=df[df['sub_category']=='Fasteners',]
df_Supplies=df[df['sub_category']=='Supplies',]
df_Machines=df[df['sub_category']=='Machines',]
df_Copiers=df[df['sub_category']=='Copiers',]
print(sum(df_Binders$quantity))
print(sum(df_Paper$quantity))
print(sum(df_Furnishings$quantity))
print(sum(df_Phones$quantity))
print(sum(df_Storage$quantity))
print(sum(df_Art$quantity))
print(sum(df_Accessories$quantity))
print(sum(df_Chairs$quantity))
print(sum(df_Appliances$quantity))
print(sum(df_Labels$quantity))
print(sum(df_Tables$quantity))
print(sum(df_Envelopes$quantity))
print(sum(df_Bookcases$quantity))
print(sum(df_Fasteners$quantity))
print(sum(df_Supplies$quantity))
print(sum(df_Machines$quantity))
print(sum(df_Copiers$quantity))
#Jadi, sub-category dengan quantity paling sedikit yaitu Copiers sebesar 218
#4.Tentukan Bulan dengan Profit Tertinggi!!!
str(df)
class(df$order_date)
#mengubah type pada order_date menjadi date
df$order_date <- as.Date(df$order_date, "%m/%d/%Y")
#cek type data
str(df)
#menambahkan kolom bulan
df$month <- format(df$order_date, format = "%B")
df
#melihat isi pada kolom month
table(df$month)
#cek type data
str(df)
#Memilih masing-masing month
df_April=df[df['month']=='April',]
df_August=df[df['month']=='August',]
df_December=df[df['month']=='December',]
df_February=df[df['month']=='February',]
df_January=df[df['month']=='January',]
df_July=df[df['month']=='July',]
df_June=df[df['month']=='June',]
df_March=df[df['month']=='March',]
df_May=df[df['month']=='May',]
df_November=df[df['month']=='November',]
df_October=df[df['month']=='October',]
df_September=df[df['month']=='September',]
print(sum(df_April$profit))
print(sum(df_August$profit))
print(sum(df_December$profit))
print(sum(df_February$profit))
print(sum(df_January$profit))
print(sum(df_July$profit))
print(sum(df_June$profit))
print(sum(df_March$profit))
print(sum(df_May$profit))
print(sum(df_November$profit))
print(sum(df_October$profit))
print(sum(df_September$profit))
#Jadi, bulan dengan profit tertinggi adalah Desember
|
#' Standardize counts relative to uncleaved counts.
#'
#' There are two different ways to handles standardization of
#' cleaved vs uncleaved. For each of the cleaved and uncleaved counts,
#' we first convert to the proportion of reads, and then either look at the
#' difference in proportion (addititive) or the ratio of the proportions
#' (multiplicative). In the addititive case, we might want to normalize
#' the differences based on the differing amounts in the reference library.
#' @param cleaved The cleaved raw counts
#' @param uncleaved The uncleaved raw counts
#' @param ref The raw counts of the reference library. Ideally these should be
#' identical, but the library likely isn't equally weighted
#' @param type Either `addititive`, `multiplicative`, or `complex`.
#' @param scale Should we scale to have the maximum between 100 and 1000?
#' @examples
#' df <- data.frame( cleaved = c(20, 10,5),
#' uncleaved = c(5, 5, 2),
#' ref = c(3, 3, 2) )
#' with(df, standardize( cleaved, uncleaved, ref ) )
#' with(df, standardize( cleaved, uncleaved, type='multiplicative' ) )
#' @export
standardize <- function(cleaved, uncleaved, ref=NULL, type='additive'){
# Make a data frame of the input stuff
df <- data.frame( cleaved=cleaved, uncleaved=uncleaved )
# Make the reference group default correct
if( is.null(ref) ){
df$ref = 1
}else{
df$ref = ref
}
# Do some error checking making sure the reference numbers aren't too
# small. Zeros would be a huge problem for the reference.
# standardize for the read depth
df <- df %>%
mutate( cleaved = cleaved / sum( cleaved, na.rm=TRUE ),
uncleaved = uncleaved / sum( uncleaved, na.rm=TRUE ) )
# Now set the background rates for cleaved and uncleaved to be the same.
# background_scale <-
# (df %>% pull(cleaved) %>% shrink( proportion=.5, side='top') %>% mean() ) /
# (df %>% pull(uncleaved) %>% shrink( proportion=.5, side='top') %>% mean() )
# df <- df %>%
# mutate( cleaved = cleaved / background_scale )
# Now make the standardization.
if( type == 'additive' ){
df <- df %>% mutate( signal = (cleaved - uncleaved) / ref )
}else if(type == 'multiplicative'){
df <- df %>% mutate( signal = cleaved / uncleaved )
}else if(type == 'complex'){
# This is for some experimental work
df <- df %>% mutate( signal = cleaved - uncleaved )
}else{
stop("type must be either 'additive', 'multiplicative', 'complex', or 'none' ")
}
# I want the maximum signal to live between 100 and 1000
# if( scale == TRUE ){
# scale <- df %>% select(signal) %>% drop_na() %>% filter( signal < Inf) %>% pull(signal)
# scale <- (1 / max(scale)) %>% log10() %>% ceiling() %>% (function(x){10^x})
# df$signal <- df$signal * scale * 100
# }
return(df$signal)
}
#' Standardize the cleaved and uncleaved as well as create the signal
#'
#' There are several cases where we want to both standardize the cleaved/uncleaved
#' as well as calculate the signal terms
#'
#' @param df A data frame with columns cleaved and uncleaved. If the data frame is
#' already grouped, then all the standardization occurs within a group.
#' @param type Either `addititive`, `multiplicative`, or `complex`.
#' @param scale Should we scale the signal to have the maximum between 100 and 1000?
#' @param trim_proportion In the rescaling, what percent of the large values should be
#' removed to get to a background rate.
#' @return A data frame with columns new columns cleaved_Z, uncleaved_Z, and signal. The rows correspond
#' to the rows in the input data.frame.
#' @export
full_standardize <- function(df, type='additive', scale=TRUE, trim_proportion=0.25){
# standardize for the read depth
df <- df %>%
mutate( cleaved_Z = cleaved / sum( cleaved, na.rm=TRUE),
uncleaved_Z = uncleaved / sum(uncleaved, na.rm=TRUE) )
# Now set the background rates for cleaved and uncleaved to be the same.
background_scale <- df %>% summarize(
cleaved_background = cleaved_Z %>% shrink( proportion=trim_proportion, side='top') %>% mean(na.rm=TRUE),
uncleaved_background = uncleaved_Z %>% shrink( proportion=trim_proportion, side='top') %>% mean(na.rm=TRUE) ) %>%
mutate( background_scale = uncleaved_background / cleaved_background ) %>%
mutate( background_scale = ifelse( background_scale == 0, 1, background_scale ),
background_scale = ifelse( is.nan(background_scale), 1, background_scale ),
background_scale = ifelse( is.na(background_scale), 1, background_scale ),
background_scale = ifelse( is.infinite(background_scale), 1, background_scale ))
df <- df %>% left_join(background_scale, by=group_vars(df) ) %>%
mutate( cleaved_Z = cleaved_Z * background_scale ) %>%
select( -cleaved_background, -uncleaved_background, -background_scale )
# Now make the standardization.
if( type == 'additive' ){
df <- df %>% mutate( signal = (cleaved_Z - uncleaved_Z) )
}else if(type == 'multiplicative'){
df <- df %>% mutate( signal = cleaved_Z / uncleaved_Z )
}else if(type == 'complex'){
# This is for some experimental work
df <- df %>% mutate( signal = cleaved_Z - uncleaved_Z )
}else{
stop("type must be either 'additive', 'multiplicative', 'complex', or 'none' ")
}
# I want the maximum signal to live between 100 and 1000
if( scale == TRUE ){
scale_df <-
df %>% select(group_cols(), signal) %>%
drop_na() %>% filter( signal < Inf) %>%
summarize(scale = 1/max(signal) ) %>%
mutate( scale = (scale %>% log10() %>% ceiling()) )
df <- df %>% left_join(scale_df, by=group_vars(.) ) %>%
mutate( signal = signal * 10^scale * 100 ) %>%
select( -scale )
}
return(df)
}
|
/R/Standardization.R
|
no_license
|
PaulDanPhillips/PepSeq
|
R
| false | false | 5,885 |
r
|
#' Standardize counts relative to uncleaved counts.
#'
#' There are two different ways to handles standardization of
#' cleaved vs uncleaved. For each of the cleaved and uncleaved counts,
#' we first convert to the proportion of reads, and then either look at the
#' difference in proportion (addititive) or the ratio of the proportions
#' (multiplicative). In the addititive case, we might want to normalize
#' the differences based on the differing amounts in the reference library.
#' @param cleaved The cleaved raw counts
#' @param uncleaved The uncleaved raw counts
#' @param ref The raw counts of the reference library. Ideally these should be
#' identical, but the library likely isn't equally weighted
#' @param type Either `addititive`, `multiplicative`, or `complex`.
#' @param scale Should we scale to have the maximum between 100 and 1000?
#' @examples
#' df <- data.frame( cleaved = c(20, 10,5),
#' uncleaved = c(5, 5, 2),
#' ref = c(3, 3, 2) )
#' with(df, standardize( cleaved, uncleaved, ref ) )
#' with(df, standardize( cleaved, uncleaved, type='multiplicative' ) )
#' @export
standardize <- function(cleaved, uncleaved, ref=NULL, type='additive'){
# Make a data frame of the input stuff
df <- data.frame( cleaved=cleaved, uncleaved=uncleaved )
# Make the reference group default correct
if( is.null(ref) ){
df$ref = 1
}else{
df$ref = ref
}
# Do some error checking making sure the reference numbers aren't too
# small. Zeros would be a huge problem for the reference.
# standardize for the read depth
df <- df %>%
mutate( cleaved = cleaved / sum( cleaved, na.rm=TRUE ),
uncleaved = uncleaved / sum( uncleaved, na.rm=TRUE ) )
# Now set the background rates for cleaved and uncleaved to be the same.
# background_scale <-
# (df %>% pull(cleaved) %>% shrink( proportion=.5, side='top') %>% mean() ) /
# (df %>% pull(uncleaved) %>% shrink( proportion=.5, side='top') %>% mean() )
# df <- df %>%
# mutate( cleaved = cleaved / background_scale )
# Now make the standardization.
if( type == 'additive' ){
df <- df %>% mutate( signal = (cleaved - uncleaved) / ref )
}else if(type == 'multiplicative'){
df <- df %>% mutate( signal = cleaved / uncleaved )
}else if(type == 'complex'){
# This is for some experimental work
df <- df %>% mutate( signal = cleaved - uncleaved )
}else{
stop("type must be either 'additive', 'multiplicative', 'complex', or 'none' ")
}
# I want the maximum signal to live between 100 and 1000
# if( scale == TRUE ){
# scale <- df %>% select(signal) %>% drop_na() %>% filter( signal < Inf) %>% pull(signal)
# scale <- (1 / max(scale)) %>% log10() %>% ceiling() %>% (function(x){10^x})
# df$signal <- df$signal * scale * 100
# }
return(df$signal)
}
#' Standardize the cleaved and uncleaved as well as create the signal
#'
#' There are several cases where we want to both standardize the cleaved/uncleaved
#' as well as calculate the signal terms
#'
#' @param df A data frame with columns cleaved and uncleaved. If the data frame is
#' already grouped, then all the standardization occurs within a group.
#' @param type Either `addititive`, `multiplicative`, or `complex`.
#' @param scale Should we scale the signal to have the maximum between 100 and 1000?
#' @param trim_proportion In the rescaling, what percent of the large values should be
#' removed to get to a background rate.
#' @return A data frame with columns new columns cleaved_Z, uncleaved_Z, and signal. The rows correspond
#' to the rows in the input data.frame.
#' @export
full_standardize <- function(df, type='additive', scale=TRUE, trim_proportion=0.25){
# standardize for the read depth
df <- df %>%
mutate( cleaved_Z = cleaved / sum( cleaved, na.rm=TRUE),
uncleaved_Z = uncleaved / sum(uncleaved, na.rm=TRUE) )
# Now set the background rates for cleaved and uncleaved to be the same.
background_scale <- df %>% summarize(
cleaved_background = cleaved_Z %>% shrink( proportion=trim_proportion, side='top') %>% mean(na.rm=TRUE),
uncleaved_background = uncleaved_Z %>% shrink( proportion=trim_proportion, side='top') %>% mean(na.rm=TRUE) ) %>%
mutate( background_scale = uncleaved_background / cleaved_background ) %>%
mutate( background_scale = ifelse( background_scale == 0, 1, background_scale ),
background_scale = ifelse( is.nan(background_scale), 1, background_scale ),
background_scale = ifelse( is.na(background_scale), 1, background_scale ),
background_scale = ifelse( is.infinite(background_scale), 1, background_scale ))
df <- df %>% left_join(background_scale, by=group_vars(df) ) %>%
mutate( cleaved_Z = cleaved_Z * background_scale ) %>%
select( -cleaved_background, -uncleaved_background, -background_scale )
# Now make the standardization.
if( type == 'additive' ){
df <- df %>% mutate( signal = (cleaved_Z - uncleaved_Z) )
}else if(type == 'multiplicative'){
df <- df %>% mutate( signal = cleaved_Z / uncleaved_Z )
}else if(type == 'complex'){
# This is for some experimental work
df <- df %>% mutate( signal = cleaved_Z - uncleaved_Z )
}else{
stop("type must be either 'additive', 'multiplicative', 'complex', or 'none' ")
}
# I want the maximum signal to live between 100 and 1000
if( scale == TRUE ){
scale_df <-
df %>% select(group_cols(), signal) %>%
drop_na() %>% filter( signal < Inf) %>%
summarize(scale = 1/max(signal) ) %>%
mutate( scale = (scale %>% log10() %>% ceiling()) )
df <- df %>% left_join(scale_df, by=group_vars(.) ) %>%
mutate( signal = signal * 10^scale * 100 ) %>%
select( -scale )
}
return(df)
}
|
library(FlexGAM)
### Name: flexgam
### Title: Estimation of generalized additive model with flexible response
### function
### Aliases: flexgam
### ** Examples
set.seed(1)
n <- 1000
x1 <- runif(n)
x2 <- runif(n)
x3 <- runif(n)
eta_orig <- -1 + 2*sin(6*x1) + exp(x2) + x3
pi_orig <- pgamma(eta_orig, shape=2, rate=sqrt(2))
y <- rbinom(n,size=1,prob=pi_orig)
Data <- data.frame(y,x1,x2,x3)
formula <- y ~ s(x1,k=20,bs="ps") + s(x2,k=20,bs="ps") + x3
# Fix smoothing parameters to save computational time.
control2 <- list("fix_smooth" = TRUE, "quietly" = TRUE, "sm_par_vec" =
c("lambda" = 100, "s(x1)" = 2000, "s(x2)" = 9000))
set.seed(2)
model_2 <- flexgam(formula=formula, data=Data, type="FlexGAM2",
family=binomial(link=logit), control = control2)
print(model_2)
summary(model_2)
plot(model_2, type = "response")
plot(model_2, type = "covariate")
|
/data/genthat_extracted_code/FlexGAM/examples/flexgam.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 906 |
r
|
library(FlexGAM)
### Name: flexgam
### Title: Estimation of generalized additive model with flexible response
### function
### Aliases: flexgam
### ** Examples
set.seed(1)
n <- 1000
x1 <- runif(n)
x2 <- runif(n)
x3 <- runif(n)
eta_orig <- -1 + 2*sin(6*x1) + exp(x2) + x3
pi_orig <- pgamma(eta_orig, shape=2, rate=sqrt(2))
y <- rbinom(n,size=1,prob=pi_orig)
Data <- data.frame(y,x1,x2,x3)
formula <- y ~ s(x1,k=20,bs="ps") + s(x2,k=20,bs="ps") + x3
# Fix smoothing parameters to save computational time.
control2 <- list("fix_smooth" = TRUE, "quietly" = TRUE, "sm_par_vec" =
c("lambda" = 100, "s(x1)" = 2000, "s(x2)" = 9000))
set.seed(2)
model_2 <- flexgam(formula=formula, data=Data, type="FlexGAM2",
family=binomial(link=logit), control = control2)
print(model_2)
summary(model_2)
plot(model_2, type = "response")
plot(model_2, type = "covariate")
|
library(quantmod)
startDate = '1995-01-01'
endDate = '2014-04-30'
MA_DAYS = 200
getSymbols('JPM', from=startDate, to=endDate)
closePrices = Cl(JPM)
A=Op(YESBANK.NS)
B=Hi(YESBANK.NS)
closePrices=na.omit(closePrices)
closePrices = as.numeric(closePrices)
N_DAYS = length(closePrices)
MA = SMA( closePrices, MA_DAYS )
signal = "inCash"
buyPrice = 0.0
sellPrice = 0.0
maWealth = 1.0
for(d in (MA_DAYS+1):N_DAYS)
{
#buy if Stockprice > MA & if not bought yet
if((closePrices[d] > MA[d]) && (signal == "inCash"))
{
buyPrice = closePrices[d]
signal = "inStock"
}
#sell if (Stockprice < MA OR endDate reached)
# & there is something to sell
if(((closePrices[d] < MA[d]) || (d == N_DAYS)) && (signal == "inStock"))
{
sellPrice = closePrices[d]
signal = "inCash"
maWealth = maWealth * (sellPrice / buyPrice)
}
}
plot(closePrices)
bhWealth = closePrices[N_DAYS] / closePrices[(MA_DAYS+1)]
weatlhDiffs = bhWealth - maWealth
bhWealth
maWealth
weatlhDiffs
|
/Faber-Strategy-Single-Stock.R
|
no_license
|
savio2928/R-Code
|
R
| false | false | 1,040 |
r
|
library(quantmod)
startDate = '1995-01-01'
endDate = '2014-04-30'
MA_DAYS = 200
getSymbols('JPM', from=startDate, to=endDate)
closePrices = Cl(JPM)
A=Op(YESBANK.NS)
B=Hi(YESBANK.NS)
closePrices=na.omit(closePrices)
closePrices = as.numeric(closePrices)
N_DAYS = length(closePrices)
MA = SMA( closePrices, MA_DAYS )
signal = "inCash"
buyPrice = 0.0
sellPrice = 0.0
maWealth = 1.0
for(d in (MA_DAYS+1):N_DAYS)
{
#buy if Stockprice > MA & if not bought yet
if((closePrices[d] > MA[d]) && (signal == "inCash"))
{
buyPrice = closePrices[d]
signal = "inStock"
}
#sell if (Stockprice < MA OR endDate reached)
# & there is something to sell
if(((closePrices[d] < MA[d]) || (d == N_DAYS)) && (signal == "inStock"))
{
sellPrice = closePrices[d]
signal = "inCash"
maWealth = maWealth * (sellPrice / buyPrice)
}
}
plot(closePrices)
bhWealth = closePrices[N_DAYS] / closePrices[(MA_DAYS+1)]
weatlhDiffs = bhWealth - maWealth
bhWealth
maWealth
weatlhDiffs
|
ReadIMX <- function(FileName = NULL,InDirectory=getwd()){
# MapMask <- ReadIMX(FileName,InDirectory);
# read a mask for a tiled UMX to display a ESOM-map
#
# INPUT
# FileName the name of the file to read
#
# OPTIONAL
# InDirectory the directory where *.imx is, default: current dir
#
# OUTPUT
# MapMask a binary array 0 where the map is displayed 1 otherwise
# author Michael Thrun
if (is.null(FileName)) {
res <- ask2loadFile(".imx")
if (is.null(res))
stop("no file selected")
FileName = res$FileName
InDirectory = res$InDirectory
}
FileName = addext(FileName, 'imx')
CurrentDir = getwd()
setwd(InDirectory)
Z = read.table(
FileName,
comment.char = "#",
header = FALSE,
stringsAsFactors = TRUE,
fill = TRUE,
na.strings = c('NA', 'NaN')
) # vorher: stringsAsFactors = FALSE
Data = Z[2:nrow(Z),]
Data = as.matrix(Data)
mode(Data) = 'numeric'
Data[which(is.na(Data))] = NaN
rownames(Data) = 1:nrow(Data)
setwd(CurrentDir)
return(Data)
}# end function ReadIMX
|
/R/ReadIMX.R
|
no_license
|
aultsch/DataIO
|
R
| false | false | 1,135 |
r
|
ReadIMX <- function(FileName = NULL,InDirectory=getwd()){
# MapMask <- ReadIMX(FileName,InDirectory);
# read a mask for a tiled UMX to display a ESOM-map
#
# INPUT
# FileName the name of the file to read
#
# OPTIONAL
# InDirectory the directory where *.imx is, default: current dir
#
# OUTPUT
# MapMask a binary array 0 where the map is displayed 1 otherwise
# author Michael Thrun
if (is.null(FileName)) {
res <- ask2loadFile(".imx")
if (is.null(res))
stop("no file selected")
FileName = res$FileName
InDirectory = res$InDirectory
}
FileName = addext(FileName, 'imx')
CurrentDir = getwd()
setwd(InDirectory)
Z = read.table(
FileName,
comment.char = "#",
header = FALSE,
stringsAsFactors = TRUE,
fill = TRUE,
na.strings = c('NA', 'NaN')
) # vorher: stringsAsFactors = FALSE
Data = Z[2:nrow(Z),]
Data = as.matrix(Data)
mode(Data) = 'numeric'
Data[which(is.na(Data))] = NaN
rownames(Data) = 1:nrow(Data)
setwd(CurrentDir)
return(Data)
}# end function ReadIMX
|
# Set query
#' @include utils.R
build_query <- function(...) {
query <- list(...)
query <- compact(query)
query <- fix_query(query)
return(query)
}
# Fix query fields
#' @include utils.R
fix_query <- function(query) {
stopifnot(is.list(query))
if (!grepl("^ga:", query$profile.id))
query$profile.id <- paste0("ga:", query$profile.id)
snames <- c("metrics", "dimensions", "sort")
query[names(query) %in% snames] <- lapply(query[names(query) %in% snames], strip_spaces)
onames <- c("filters", "segment")
query[names(query) %in% onames] <- lapply(query[names(query) %in% onames], strip_ops)
dnames <- c("start.date", "end.date")
query[names(query) %in% dnames] <- lapply(query[names(query) %in% dnames], as.character)
if (!is.empty(query$sampling.level))
query$sampling.level <- toupper(query$sampling.level)
stopifnot(any(lapply(query, length) <= 1L))
stopifnot(all(vapply(query, is.vector, logical(1))))
return(query)
}
|
/R/query.R
|
no_license
|
Timmwardion/RDFA
|
R
| false | false | 1,000 |
r
|
# Set query
#' @include utils.R
build_query <- function(...) {
query <- list(...)
query <- compact(query)
query <- fix_query(query)
return(query)
}
# Fix query fields
#' @include utils.R
fix_query <- function(query) {
stopifnot(is.list(query))
if (!grepl("^ga:", query$profile.id))
query$profile.id <- paste0("ga:", query$profile.id)
snames <- c("metrics", "dimensions", "sort")
query[names(query) %in% snames] <- lapply(query[names(query) %in% snames], strip_spaces)
onames <- c("filters", "segment")
query[names(query) %in% onames] <- lapply(query[names(query) %in% onames], strip_ops)
dnames <- c("start.date", "end.date")
query[names(query) %in% dnames] <- lapply(query[names(query) %in% dnames], as.character)
if (!is.empty(query$sampling.level))
query$sampling.level <- toupper(query$sampling.level)
stopifnot(any(lapply(query, length) <= 1L))
stopifnot(all(vapply(query, is.vector, logical(1))))
return(query)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_functions4Fitting.R
\name{rmarkovchain}
\alias{rmarkovchain}
\title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.}
\usage{
rmarkovchain(n, object, what = "data.frame", useRCpp = TRUE,
parallel = FALSE, num.cores = NULL, ...)
}
\arguments{
\item{n}{Sample size}
\item{object}{Either a \code{markovchain} or a \code{markovchainList} object}
\item{what}{It specifies whether either a \code{data.frame} or a \code{matrix}
(each rows represent a simulation) or a \code{list} is returned.}
\item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.}
\item{parallel}{Boolean. Should parallel implementation being used? Default is yes.}
\item{num.cores}{Number of Cores to be used}
\item{...}{additional parameters passed to the internal sampler}
}
\value{
Character Vector, data.frame, list or matrix
}
\description{
Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of
states coming from the underlying stationary distribution.
}
\details{
When a homogeneous process is assumed (\code{markovchain} object) a sequence is
sampled of size n. When an non - homogeneous process is assumed,
n samples are taken but the process is assumed to last from the begin to the end of the
non-homogeneous markov process.
}
\note{
Check the type of input
}
\examples{
# define the markovchain object
statesNames <- c("a", "b", "c")
mcB <- new("markovchain", states = statesNames,
transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1),
nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)))
# show the sequence
outs <- rmarkovchain(n = 100, object = mcB, what = "list")
#define markovchainList object
statesNames <- c("a", "b", "c")
mcA <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcB <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcC <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC))
# show the list of sequence
rmarkovchain(100, mclist, "list")
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainFit}}
}
\author{
Giorgio Spedicato
}
|
/man/rmarkovchain.Rd
|
no_license
|
bestwpw/markovchain
|
R
| false | true | 2,745 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_functions4Fitting.R
\name{rmarkovchain}
\alias{rmarkovchain}
\title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.}
\usage{
rmarkovchain(n, object, what = "data.frame", useRCpp = TRUE,
parallel = FALSE, num.cores = NULL, ...)
}
\arguments{
\item{n}{Sample size}
\item{object}{Either a \code{markovchain} or a \code{markovchainList} object}
\item{what}{It specifies whether either a \code{data.frame} or a \code{matrix}
(each rows represent a simulation) or a \code{list} is returned.}
\item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.}
\item{parallel}{Boolean. Should parallel implementation being used? Default is yes.}
\item{num.cores}{Number of Cores to be used}
\item{...}{additional parameters passed to the internal sampler}
}
\value{
Character Vector, data.frame, list or matrix
}
\description{
Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of
states coming from the underlying stationary distribution.
}
\details{
When a homogeneous process is assumed (\code{markovchain} object) a sequence is
sampled of size n. When an non - homogeneous process is assumed,
n samples are taken but the process is assumed to last from the begin to the end of the
non-homogeneous markov process.
}
\note{
Check the type of input
}
\examples{
# define the markovchain object
statesNames <- c("a", "b", "c")
mcB <- new("markovchain", states = statesNames,
transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1),
nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)))
# show the sequence
outs <- rmarkovchain(n = 100, object = mcB, what = "list")
#define markovchainList object
statesNames <- c("a", "b", "c")
mcA <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcB <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mcC <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3,
byrow = TRUE, dimnames = list(statesNames, statesNames)))
mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC))
# show the list of sequence
rmarkovchain(100, mclist, "list")
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\seealso{
\code{\link{markovchainFit}}
}
\author{
Giorgio Spedicato
}
|
#' insert.ui2
#'
#' check nominal if they are nominal
#' @param Click here and there
#' @keywords nominal
#' @keywords
#' @export
#' @examples
#' checking.nominal()
#' @importFrom magrittr %>%
#'
insert.ui2<-function() {
lapply(1:5,function(x1) {
insertUI (
selector="#placeholder2",
ui=tags$div(fluidRow(column(1,checkboxInput(inputId=paste("cdel",x1,sep=""),label="")),
column(2,textInput(inputId=paste("cval",x1,sep=""),
label="",
value="")),
column(5,textInput(inputId=paste("clab",x1,sep=""),
label="",
value=""#,
# width=paste0(ifelse(ich<3&!is.numeric(ich),60,
# ich*20),"px")
))),
id=paste0("create",x1)))
#inserted<<-c(paste0("ins",x1),inserted)
})
}
|
/R/insert.ui2.R
|
no_license
|
senickel/surveycleanup
|
R
| false | false | 1,101 |
r
|
#' insert.ui2
#'
#' check nominal if they are nominal
#' @param Click here and there
#' @keywords nominal
#' @keywords
#' @export
#' @examples
#' checking.nominal()
#' @importFrom magrittr %>%
#'
insert.ui2<-function() {
lapply(1:5,function(x1) {
insertUI (
selector="#placeholder2",
ui=tags$div(fluidRow(column(1,checkboxInput(inputId=paste("cdel",x1,sep=""),label="")),
column(2,textInput(inputId=paste("cval",x1,sep=""),
label="",
value="")),
column(5,textInput(inputId=paste("clab",x1,sep=""),
label="",
value=""#,
# width=paste0(ifelse(ich<3&!is.numeric(ich),60,
# ich*20),"px")
))),
id=paste0("create",x1)))
#inserted<<-c(paste0("ins",x1),inserted)
})
}
|
library(eplusr)
### Name: Idf
### Title: Read, modify, and run an EnergyPlus model
### Aliases: Idf
### ** Examples
# ===== CREATE =====
# read an IDF file
idf <- read_idf(system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr"),
idd = use_idd(8.8, download = "auto"))
# ===== MODEL BASIC INFO =====
# get version
idf$version()
# get path
idf$path()
# get names of all groups in current model
str(idf$group_name())
# get names of all defined groups in the IDD
str(idf$group_name(all = TRUE))
# get names of all classes in current model
str(idf$class_name())
# get names of all defined classes in the IDD
str(idf$class_name(all = TRUE))
# check if input is a valid group name in current model
idf$is_valid_group("Schedules")
idf$is_valid_group("Compliance Objects")
# check if input is a valid group name in IDD
idf$is_valid_group("Compliance Objects", all = TRUE)
# check if input is a valid class name in current model
idf$is_valid_class("Building")
idf$is_valid_class("ShadowCalculation")
# check if input is a valid class name in IDD
idf$is_valid_class("ShadowCalculation", all = TRUE)
# ===== OBJECT DEFINITION (IDDOBJECT) =====
# get the a list of underlying IddObjects
idf$definition("Version")
# ===== OBJECT INFO =====
# get IDs of objects in classes
idf$object_id(c("Version", "Zone"))
# when `simplify` is TRUE, an integer vector will be returned instead of a
# named list
idf$object_id(c("Version", "Zone"), simplify = TRUE)
# get names of objects in classes
# NA will be returned if targeted class does not have a name attribute
idf$object_name(c("Building", "Zone", "Version"))
# if `simplify` is TRUE, a character vector will be returned instead of a
# named list
idf$object_name(c("Building", "Zone", "Version"), simplify = TRUE)
# get number of objects in classes
idf$object_num(c("Zone", "Schedule:Compact"))
# check if input is a valid object ID, i.e. there is an object whose ID is
# the same with input integer
idf$is_valid_id(c(51, 1000))
# check if input is a valid object name, i.e., there is an object whose name is
# the same with input string
idf$is_valid_name(c("Simple One Zone (Wireframe DXF)", "ZONE ONE"))
# ===== OBJECT QUERY =====
# get objects using object IDs or names
idf$object(c(3,10))
# NOTE: object name matching is case-insensitive
idf$object(c("Simple One Zone (Wireframe DXF)", "zone one"))
# the names of returned list are "underscore-style" object names
names(idf$object(c("Simple One Zone (Wireframe DXF)", "zone one")))
# get all objects in classes in a named list
idf$object_in_class("Zone")
names(idf$object_in_class("Zone"))
# OR using shortcuts
idf$Zone
idf[["Zone"]]
# search objects using regular expression
length(idf$search_object("R13"))
names(idf$search_object("R13"))
# search objects using regular expression in specifc class
length(idf$search_object("R13", class = "Construction"))
# get more controls on matching using `stringr::regex()`
names(idf$search_object(stringr::regex("zn.*1.*wall", ignore_case = TRUE)))
# ===== DUPLICATE OBJECTS =====
# duplicate objects in "Construction" class
names(idf$Construction)
idf$dup_object("R13WALL")
# new objects will have the same names as the duplicated objects but with a
# suffix "_1", "_2" and etc.
names(idf$Construction)
# new names can also be explicitly specified
idf$dup_object("R13WALL", new_name = "My-R13Wall")
# duplicate an object multiple times
## Not run: idf$dup_object(rep("R13WALL", time = 10))
# ===== ADD OBJECTS =====
# add two new objects in "RunPeriod" class
idf$add_object(rep("RunPeriod", 2),
value = list(
list("rp_test_1", 1, 1, 2, 1),
list(name = "rp_test_2",
begin_month = 3,
begin_day_of_month = 1,
end_month = 4,
end_day_of_month = 1)
),
comment = list(
list("Comment for new object 1", "Another comment"),
list("Comment for new object 2")),
default = TRUE
)
# ===== INSERT OBJECTS =====
# insert objects from other Idf object
idf_1 <- read_idf(system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr"),
idd = use_idd(8.8, download = "auto"))
idf_1$object_name("Material")
# rename material name from "C5 - 4 IN HW CONCRETE" to "test", otherwise
# insertion will be aborted as there will be two materials with the same name
# in the idf
idf_1$Material$C5_4_IN_HW_CONCRETE$set_value(name = "test")
# insert the object
idf$ins_object(idf_1$Material$test)
# check if material named "test" is there
idf$object_name("Material")
# $ins_object() is useful when importing design days from a ".ddy" file
## Not run: idf$ins_object(read_idf("foo.ddy"))
# ===== SET OBJECTS =====
# set the thickness of newly inserted material "test" to 0.2 m
idf$set_object("test", value = list(thickness = 0.2))
idf$Material$test$Thickness
# set thermal absorptance of all material to 0.85
id_mat <- idf$object_id("Material", simplify = TRUE)
idf$set_object(id_mat,
value = rep(
list(list(thermal_absorptance = 0.85)),
times = length(id_mat)
)
)
# check results
lapply(idf$Material, function (mat) mat$Thermal_Absorptance)
# reset thermal absorptance of all material to the default
idf$set_object(id_mat,
value = rep(
list(list(thermal_absorptance = NA)),
times = length(id_mat)
),
default = TRUE
)
# check results
lapply(idf$Material, function (mat) mat$Thermal_Absorptance)
# ===== DELELTE OBJECTS =====
# delete the added run period "rp_test_1", "rp_test_2" and "test" from above
idf$del_object(c("test", "rp_test_1", "rp_test_2"))
names(idf$Material)
names(idf$RunPeriod)
# In "final" validate level, delete will be aborted if the target obejcts are
# referenced by other objects.
# get objects that referenced material "R13LAYER"
eplusr_option("validate_level")
idf$Material_NoMass$R13LAYER$ref_by_object()
length(idf$Material_NoMass$R13LAYER$ref_by_object())
## Not run: idf$del_object("R13LAYER") # will give an error in "final" validate level
# objects referencing target objects can also be delted by setting `referenced`
# to TRUE
## Not run: idf$del_object("R13LAYER", referenced = TRUE) # will give an error in "final" validate level
# ===== SEARCH ADN REPLACE OBJECT VALUES =====
# get objects whose field values contains both "VAV" and "Node"
idf$search_value("WALL")
length(idf$search_value("WALL"))
names(idf$search_value("WALL"))
# replace values using regular expression
# NOTE: No field validation will be performed! Should be treated as a low-level
# method. Use with caution.
idf$replace_value("WALL", "A_WALL")
# ===== VALIDATE MODEL =====
# CRAN does not like long-time tests
## Not run:
##D # check if there are errors in current model
##D idf$validate()
##D idf$is_valid()
##D
##D # change validate level to "none", which will enable invalid modifications
##D eplusr_option(validate_level = "none")
##D
##D # change the outside layer of floor to an invalid material
##D idf$set_object("FLOOR", list(outside_layer = "wrong_layer"))
##D
##D # change validate level back to "final" and validate the model again
##D eplusr_option(validate_level = "final")
##D
##D idf$validate()
##D idf$is_valid()
##D
##D # get IDs of all objects that contains invalid reference fields
##D idf$validate()$invalid_reference$object_id
##D
##D # fix the error
##D idf$set_object(16, list(outside_layer = idf$Material[[1]]$name()))
##D idf$validate()
##D idf$is_valid()
## End(Not run)
# ===== FORMAT MODEL =====
# get text format of the model
str(idf$string())
# get text format of the model, excluding the header and all comments
str(idf$string(comment = FALSE, header = FALSE))
# ===== SAVE MODEL =====
# check if the model has been modified since read or last saved
idf$is_unsaved()
# save and overwrite current model
## Not run: idf$save(overwrite = TRUE)
# save the model with newly created and modified objects at the top
## Not run: idf$save(overwrite = TRUE, format = "new_top")
# save the model to a new file
idf$save(path = file.path(tempdir(), "test.idf"))
# save the model to a new file and copy all external csv files used in
# "Schedule:File" class into the same folder
idf$save(path = file.path(tempdir(), "test1.idf"), copy_external = TRUE)
# the path of this model will be changed to the saved path
idf$path()
# ===== CLONE MODEL =====
# Idf object are modified in place and has reference semantic.
idf_2 <- idf
idf_2$object_name("Building")
idf$object_name("Building")
# modify idf_2 will also affect idf as well
idf_2$Building[[1]]$set_value(name = "Building_Name_Changed")
idf_2$object_name("Building")
idf$object_name("Building")
# in order to make a copy of an Idf object, use $clone() method
idf_3 <- idf$clone()
idf_3$Building[[1]]$set_value(name = "Building_Name_Changed_Again")
idf_3$object_name("Building")
idf$object_name("Building")
# run the model
## Not run:
##D if (is_avail_eplus(8.8)) {
##D
##D # save the model to tempdir()
##D idf$save(file.path(tempdir(), "test_run.idf"))
##D
##D # use the first epw file in "WeatherData" folder in EnergyPlus v8.8
##D # installation path
##D epw <- list.files(file.path(eplus_config(8.8)$dir, "WeatherData"),
##D pattern = "\\.epw$", full.names = TRUE)[1]
##D basename(epw)
##D # [1] "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
##D
##D # if `dir` is NULL, the directory of IDF file will be used as simulation
##D # output directory
##D job <- idf$run(epw, dir = NULL)
##D
##D # run simulation in the background
##D idf$run(epw, dir = tempdir(), wait = FALSE)
##D
##D # copy all external files into the directory run simulation
##D idf$run(epw, dir = tempdir(), copy_external = TRUE)
##D
##D # check for simulation errors
##D job$errors()
##D
##D # get simulation status
##D job$status()
##D
##D # get output directory
##D job$output_dir()
##D
##D # re-run the simulation
##D job$run()
##D
##D # get simulation results
##D job$report_data()
##D }
## End(Not run)
# print the text format of model
idf$print(plain = TRUE)
|
/data/genthat_extracted_code/eplusr/examples/Idf.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 10,069 |
r
|
library(eplusr)
### Name: Idf
### Title: Read, modify, and run an EnergyPlus model
### Aliases: Idf
### ** Examples
# ===== CREATE =====
# read an IDF file
idf <- read_idf(system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr"),
idd = use_idd(8.8, download = "auto"))
# ===== MODEL BASIC INFO =====
# get version
idf$version()
# get path
idf$path()
# get names of all groups in current model
str(idf$group_name())
# get names of all defined groups in the IDD
str(idf$group_name(all = TRUE))
# get names of all classes in current model
str(idf$class_name())
# get names of all defined classes in the IDD
str(idf$class_name(all = TRUE))
# check if input is a valid group name in current model
idf$is_valid_group("Schedules")
idf$is_valid_group("Compliance Objects")
# check if input is a valid group name in IDD
idf$is_valid_group("Compliance Objects", all = TRUE)
# check if input is a valid class name in current model
idf$is_valid_class("Building")
idf$is_valid_class("ShadowCalculation")
# check if input is a valid class name in IDD
idf$is_valid_class("ShadowCalculation", all = TRUE)
# ===== OBJECT DEFINITION (IDDOBJECT) =====
# get the a list of underlying IddObjects
idf$definition("Version")
# ===== OBJECT INFO =====
# get IDs of objects in classes
idf$object_id(c("Version", "Zone"))
# when `simplify` is TRUE, an integer vector will be returned instead of a
# named list
idf$object_id(c("Version", "Zone"), simplify = TRUE)
# get names of objects in classes
# NA will be returned if targeted class does not have a name attribute
idf$object_name(c("Building", "Zone", "Version"))
# if `simplify` is TRUE, a character vector will be returned instead of a
# named list
idf$object_name(c("Building", "Zone", "Version"), simplify = TRUE)
# get number of objects in classes
idf$object_num(c("Zone", "Schedule:Compact"))
# check if input is a valid object ID, i.e. there is an object whose ID is
# the same with input integer
idf$is_valid_id(c(51, 1000))
# check if input is a valid object name, i.e., there is an object whose name is
# the same with input string
idf$is_valid_name(c("Simple One Zone (Wireframe DXF)", "ZONE ONE"))
# ===== OBJECT QUERY =====
# get objects using object IDs or names
idf$object(c(3,10))
# NOTE: object name matching is case-insensitive
idf$object(c("Simple One Zone (Wireframe DXF)", "zone one"))
# the names of returned list are "underscore-style" object names
names(idf$object(c("Simple One Zone (Wireframe DXF)", "zone one")))
# get all objects in classes in a named list
idf$object_in_class("Zone")
names(idf$object_in_class("Zone"))
# OR using shortcuts
idf$Zone
idf[["Zone"]]
# search objects using regular expression
length(idf$search_object("R13"))
names(idf$search_object("R13"))
# search objects using regular expression in specifc class
length(idf$search_object("R13", class = "Construction"))
# get more controls on matching using `stringr::regex()`
names(idf$search_object(stringr::regex("zn.*1.*wall", ignore_case = TRUE)))
# ===== DUPLICATE OBJECTS =====
# duplicate objects in "Construction" class
names(idf$Construction)
idf$dup_object("R13WALL")
# new objects will have the same names as the duplicated objects but with a
# suffix "_1", "_2" and etc.
names(idf$Construction)
# new names can also be explicitly specified
idf$dup_object("R13WALL", new_name = "My-R13Wall")
# duplicate an object multiple times
## Not run: idf$dup_object(rep("R13WALL", time = 10))
# ===== ADD OBJECTS =====
# add two new objects in "RunPeriod" class
idf$add_object(rep("RunPeriod", 2),
value = list(
list("rp_test_1", 1, 1, 2, 1),
list(name = "rp_test_2",
begin_month = 3,
begin_day_of_month = 1,
end_month = 4,
end_day_of_month = 1)
),
comment = list(
list("Comment for new object 1", "Another comment"),
list("Comment for new object 2")),
default = TRUE
)
# ===== INSERT OBJECTS =====
# insert objects from other Idf object
idf_1 <- read_idf(system.file("extdata/1ZoneUncontrolled.idf", package = "eplusr"),
idd = use_idd(8.8, download = "auto"))
idf_1$object_name("Material")
# rename material name from "C5 - 4 IN HW CONCRETE" to "test", otherwise
# insertion will be aborted as there will be two materials with the same name
# in the idf
idf_1$Material$C5_4_IN_HW_CONCRETE$set_value(name = "test")
# insert the object
idf$ins_object(idf_1$Material$test)
# check if material named "test" is there
idf$object_name("Material")
# $ins_object() is useful when importing design days from a ".ddy" file
## Not run: idf$ins_object(read_idf("foo.ddy"))
# ===== SET OBJECTS =====
# set the thickness of newly inserted material "test" to 0.2 m
idf$set_object("test", value = list(thickness = 0.2))
idf$Material$test$Thickness
# set thermal absorptance of all material to 0.85
id_mat <- idf$object_id("Material", simplify = TRUE)
idf$set_object(id_mat,
value = rep(
list(list(thermal_absorptance = 0.85)),
times = length(id_mat)
)
)
# check results
lapply(idf$Material, function (mat) mat$Thermal_Absorptance)
# reset thermal absorptance of all material to the default
idf$set_object(id_mat,
value = rep(
list(list(thermal_absorptance = NA)),
times = length(id_mat)
),
default = TRUE
)
# check results
lapply(idf$Material, function (mat) mat$Thermal_Absorptance)
# ===== DELELTE OBJECTS =====
# delete the added run period "rp_test_1", "rp_test_2" and "test" from above
idf$del_object(c("test", "rp_test_1", "rp_test_2"))
names(idf$Material)
names(idf$RunPeriod)
# In "final" validate level, delete will be aborted if the target obejcts are
# referenced by other objects.
# get objects that referenced material "R13LAYER"
eplusr_option("validate_level")
idf$Material_NoMass$R13LAYER$ref_by_object()
length(idf$Material_NoMass$R13LAYER$ref_by_object())
## Not run: idf$del_object("R13LAYER") # will give an error in "final" validate level
# objects referencing target objects can also be delted by setting `referenced`
# to TRUE
## Not run: idf$del_object("R13LAYER", referenced = TRUE) # will give an error in "final" validate level
# ===== SEARCH ADN REPLACE OBJECT VALUES =====
# get objects whose field values contains both "VAV" and "Node"
idf$search_value("WALL")
length(idf$search_value("WALL"))
names(idf$search_value("WALL"))
# replace values using regular expression
# NOTE: No field validation will be performed! Should be treated as a low-level
# method. Use with caution.
idf$replace_value("WALL", "A_WALL")
# ===== VALIDATE MODEL =====
# CRAN does not like long-time tests
## Not run:
##D # check if there are errors in current model
##D idf$validate()
##D idf$is_valid()
##D
##D # change validate level to "none", which will enable invalid modifications
##D eplusr_option(validate_level = "none")
##D
##D # change the outside layer of floor to an invalid material
##D idf$set_object("FLOOR", list(outside_layer = "wrong_layer"))
##D
##D # change validate level back to "final" and validate the model again
##D eplusr_option(validate_level = "final")
##D
##D idf$validate()
##D idf$is_valid()
##D
##D # get IDs of all objects that contains invalid reference fields
##D idf$validate()$invalid_reference$object_id
##D
##D # fix the error
##D idf$set_object(16, list(outside_layer = idf$Material[[1]]$name()))
##D idf$validate()
##D idf$is_valid()
## End(Not run)
# ===== FORMAT MODEL =====
# get text format of the model
str(idf$string())
# get text format of the model, excluding the header and all comments
str(idf$string(comment = FALSE, header = FALSE))
# ===== SAVE MODEL =====
# check if the model has been modified since read or last saved
idf$is_unsaved()
# save and overwrite current model
## Not run: idf$save(overwrite = TRUE)
# save the model with newly created and modified objects at the top
## Not run: idf$save(overwrite = TRUE, format = "new_top")
# save the model to a new file
idf$save(path = file.path(tempdir(), "test.idf"))
# save the model to a new file and copy all external csv files used in
# "Schedule:File" class into the same folder
idf$save(path = file.path(tempdir(), "test1.idf"), copy_external = TRUE)
# the path of this model will be changed to the saved path
idf$path()
# ===== CLONE MODEL =====
# Idf object are modified in place and has reference semantic.
idf_2 <- idf
idf_2$object_name("Building")
idf$object_name("Building")
# modify idf_2 will also affect idf as well
idf_2$Building[[1]]$set_value(name = "Building_Name_Changed")
idf_2$object_name("Building")
idf$object_name("Building")
# in order to make a copy of an Idf object, use $clone() method
idf_3 <- idf$clone()
idf_3$Building[[1]]$set_value(name = "Building_Name_Changed_Again")
idf_3$object_name("Building")
idf$object_name("Building")
# run the model
## Not run:
##D if (is_avail_eplus(8.8)) {
##D
##D # save the model to tempdir()
##D idf$save(file.path(tempdir(), "test_run.idf"))
##D
##D # use the first epw file in "WeatherData" folder in EnergyPlus v8.8
##D # installation path
##D epw <- list.files(file.path(eplus_config(8.8)$dir, "WeatherData"),
##D pattern = "\\.epw$", full.names = TRUE)[1]
##D basename(epw)
##D # [1] "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
##D
##D # if `dir` is NULL, the directory of IDF file will be used as simulation
##D # output directory
##D job <- idf$run(epw, dir = NULL)
##D
##D # run simulation in the background
##D idf$run(epw, dir = tempdir(), wait = FALSE)
##D
##D # copy all external files into the directory run simulation
##D idf$run(epw, dir = tempdir(), copy_external = TRUE)
##D
##D # check for simulation errors
##D job$errors()
##D
##D # get simulation status
##D job$status()
##D
##D # get output directory
##D job$output_dir()
##D
##D # re-run the simulation
##D job$run()
##D
##D # get simulation results
##D job$report_data()
##D }
## End(Not run)
# print the text format of model
idf$print(plain = TRUE)
|
rm(list=ls())
library(dplyr)
##018Sep27: 30 repetitions, reduce range of topn
##2018July9: Adapted from serodiscordant.R
# Set Common Parameters ----
# seed
global.random.seed <- 1:30
# prep
#prep.scaleup.targets <- seq(20, 60, by=10)/100
prep.scaleup.targets <- c(20, 30, 40, 50, 60)/100
# Set Eigen-Specific Parameters ----
#prep.uptake <- 'eigen' #set in function call
eigen.base.prep.bl.use.prop.lt <- 12.7/100
eigen.base.prep.bl.use.prop.gte <- 14.7/100
eigen.intrv.prep.years.to.increment <- 5
eigen.intrv.prep.yearly.increment.lt <- c(0, prep.scaleup.targets - eigen.base.prep.bl.use.prop.lt)/eigen.intrv.prep.years.to.increment
eigen.intrv.prep.yearly.increment.gte <- c(0, prep.scaleup.targets - eigen.base.prep.bl.use.prop.gte)/eigen.intrv.prep.years.to.increment
#eigen.intrv.prep.topn <- c(0, 1:4, seq(5, 50, by=5))/100
#eigen.intrv.prep.topn <- c(0, 1, seq(10, 50, by=10))/100
eigen.intrv.prep.topn <- c(0.1, 1, 10, 25, 50)/100
# Fix calibration values to "instance_242" #updated on 2018-05-25 ----
acute.mult=5
late.mult=1
min.chronic.infectivity.unadj=0.000922913
prop.steady.sex.acts=0.1893571
prop.casual.sex.acts=0.053
circum.mult=1
## make parameter grid
df <-
expand.grid(
global.random.seed = global.random.seed, #seed
prep.uptake = 'eigen',
eigen.base.prep.bl.use.prop.lt = eigen.base.prep.bl.use.prop.lt,
eigen.intrv.prep.yearly.increment.lt = eigen.intrv.prep.yearly.increment.lt,
eigen.intrv.prep.topn = eigen.intrv.prep.topn,
acute.mult = acute.mult, #fix calibration inputs
late.mult = late.mult,
min.chronic.infectivity.unadj = min.chronic.infectivity.unadj,
prop.steady.sex.acts = prop.steady.sex.acts,
prop.casual.sex.acts = prop.casual.sex.acts,
circum.mult = circum.mult
)
df$eigen.base.prep.bl.use.prop.gte = rep(eigen.base.prep.bl.use.prop.gte, each=length(global.random.seed))
df$eigen.intrv.prep.yearly.increment.gte = rep(eigen.intrv.prep.yearly.increment.gte, each=length(global.random.seed))
run.number <- 1:nrow(df)
df <- cbind(run.number, df)
ndf <- names(df)
l1 <- lapply(1:ncol(df), function(x) paste0(ndf[x],"=",df[[ndf[x]]]))
res <- do.call(paste,c(l1,c(sep = ",")))
## write as text
write(res, file = "eigen.txt")
## write df as separate csv to make it easier to search
write.csv(df, "eigen.csv")
# Set Degree-Specific Parameters ----
## make parameter grid
df <-
expand.grid(
global.random.seed = global.random.seed, #seed
prep.uptake = 'degree',
degree.base.prep.bl.use.prop.lt = eigen.base.prep.bl.use.prop.lt,
degree.intrv.prep.yearly.increment.lt = eigen.intrv.prep.yearly.increment.lt,
degree.intrv.prep.topn = eigen.intrv.prep.topn,
acute.mult = acute.mult, #fix calibration inputs
late.mult = late.mult,
min.chronic.infectivity.unadj = min.chronic.infectivity.unadj,
prop.steady.sex.acts = prop.steady.sex.acts,
prop.casual.sex.acts = prop.casual.sex.acts,
circum.mult = circum.mult
)
df$degree.base.prep.bl.use.prop.gte = rep(eigen.base.prep.bl.use.prop.gte, each=length(global.random.seed))
df$degree.intrv.prep.yearly.increment.gte = rep(eigen.intrv.prep.yearly.increment.gte, each=length(global.random.seed))
run.number <- 1:nrow(df)
df <- cbind(run.number, df)
ndf <- names(df)
l1 <- lapply(1:ncol(df), function(x) paste0(ndf[x],"=",df[[ndf[x]]]))
res <- do.call(paste,c(l1,c(sep = ",")))
## write as text
write(res, file = "degree.txt")
## write df as separate csv to make it easier to search
write.csv(df, "degree.csv")
|
/transmission_model/swift_proj/data/network-intervention.R
|
no_license
|
khanna7/BARS
|
R
| false | false | 3,507 |
r
|
rm(list=ls())
library(dplyr)
##018Sep27: 30 repetitions, reduce range of topn
##2018July9: Adapted from serodiscordant.R
# Set Common Parameters ----
# seed
global.random.seed <- 1:30
# prep
#prep.scaleup.targets <- seq(20, 60, by=10)/100
prep.scaleup.targets <- c(20, 30, 40, 50, 60)/100
# Set Eigen-Specific Parameters ----
#prep.uptake <- 'eigen' #set in function call
eigen.base.prep.bl.use.prop.lt <- 12.7/100
eigen.base.prep.bl.use.prop.gte <- 14.7/100
eigen.intrv.prep.years.to.increment <- 5
eigen.intrv.prep.yearly.increment.lt <- c(0, prep.scaleup.targets - eigen.base.prep.bl.use.prop.lt)/eigen.intrv.prep.years.to.increment
eigen.intrv.prep.yearly.increment.gte <- c(0, prep.scaleup.targets - eigen.base.prep.bl.use.prop.gte)/eigen.intrv.prep.years.to.increment
#eigen.intrv.prep.topn <- c(0, 1:4, seq(5, 50, by=5))/100
#eigen.intrv.prep.topn <- c(0, 1, seq(10, 50, by=10))/100
eigen.intrv.prep.topn <- c(0.1, 1, 10, 25, 50)/100
# Fix calibration values to "instance_242" #updated on 2018-05-25 ----
acute.mult=5
late.mult=1
min.chronic.infectivity.unadj=0.000922913
prop.steady.sex.acts=0.1893571
prop.casual.sex.acts=0.053
circum.mult=1
## make parameter grid
df <-
expand.grid(
global.random.seed = global.random.seed, #seed
prep.uptake = 'eigen',
eigen.base.prep.bl.use.prop.lt = eigen.base.prep.bl.use.prop.lt,
eigen.intrv.prep.yearly.increment.lt = eigen.intrv.prep.yearly.increment.lt,
eigen.intrv.prep.topn = eigen.intrv.prep.topn,
acute.mult = acute.mult, #fix calibration inputs
late.mult = late.mult,
min.chronic.infectivity.unadj = min.chronic.infectivity.unadj,
prop.steady.sex.acts = prop.steady.sex.acts,
prop.casual.sex.acts = prop.casual.sex.acts,
circum.mult = circum.mult
)
df$eigen.base.prep.bl.use.prop.gte = rep(eigen.base.prep.bl.use.prop.gte, each=length(global.random.seed))
df$eigen.intrv.prep.yearly.increment.gte = rep(eigen.intrv.prep.yearly.increment.gte, each=length(global.random.seed))
run.number <- 1:nrow(df)
df <- cbind(run.number, df)
ndf <- names(df)
l1 <- lapply(1:ncol(df), function(x) paste0(ndf[x],"=",df[[ndf[x]]]))
res <- do.call(paste,c(l1,c(sep = ",")))
## write as text
write(res, file = "eigen.txt")
## write df as separate csv to make it easier to search
write.csv(df, "eigen.csv")
# Set Degree-Specific Parameters ----
## make parameter grid
df <-
expand.grid(
global.random.seed = global.random.seed, #seed
prep.uptake = 'degree',
degree.base.prep.bl.use.prop.lt = eigen.base.prep.bl.use.prop.lt,
degree.intrv.prep.yearly.increment.lt = eigen.intrv.prep.yearly.increment.lt,
degree.intrv.prep.topn = eigen.intrv.prep.topn,
acute.mult = acute.mult, #fix calibration inputs
late.mult = late.mult,
min.chronic.infectivity.unadj = min.chronic.infectivity.unadj,
prop.steady.sex.acts = prop.steady.sex.acts,
prop.casual.sex.acts = prop.casual.sex.acts,
circum.mult = circum.mult
)
df$degree.base.prep.bl.use.prop.gte = rep(eigen.base.prep.bl.use.prop.gte, each=length(global.random.seed))
df$degree.intrv.prep.yearly.increment.gte = rep(eigen.intrv.prep.yearly.increment.gte, each=length(global.random.seed))
run.number <- 1:nrow(df)
df <- cbind(run.number, df)
ndf <- names(df)
l1 <- lapply(1:ncol(df), function(x) paste0(ndf[x],"=",df[[ndf[x]]]))
res <- do.call(paste,c(l1,c(sep = ",")))
## write as text
write(res, file = "degree.txt")
## write df as separate csv to make it easier to search
write.csv(df, "degree.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gamma.R
\name{random.Gamma}
\alias{random.Gamma}
\title{Draw a random sample from a Gamma distribution}
\usage{
\method{random}{Gamma}(d, n = 1L, ...)
}
\arguments{
\item{d}{A \code{Gamma} object created by a call to \code{\link[=Gamma]{Gamma()}}.}
\item{n}{The number of samples to draw. Defaults to \code{1L}.}
\item{...}{Unused. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
A numeric vector of length \code{n}.
}
\description{
Draw a random sample from a Gamma distribution
}
\examples{
set.seed(27)
X <- Gamma(5, 2)
X
random(X, 10)
pdf(X, 2)
log_pdf(X, 2)
cdf(X, 4)
quantile(X, 0.7)
cdf(X, quantile(X, 0.7))
quantile(X, cdf(X, 7))
}
|
/man/random.Gamma.Rd
|
permissive
|
nfultz/distributions3
|
R
| false | true | 782 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Gamma.R
\name{random.Gamma}
\alias{random.Gamma}
\title{Draw a random sample from a Gamma distribution}
\usage{
\method{random}{Gamma}(d, n = 1L, ...)
}
\arguments{
\item{d}{A \code{Gamma} object created by a call to \code{\link[=Gamma]{Gamma()}}.}
\item{n}{The number of samples to draw. Defaults to \code{1L}.}
\item{...}{Unused. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
A numeric vector of length \code{n}.
}
\description{
Draw a random sample from a Gamma distribution
}
\examples{
set.seed(27)
X <- Gamma(5, 2)
X
random(X, 10)
pdf(X, 2)
log_pdf(X, 2)
cdf(X, 4)
quantile(X, 0.7)
cdf(X, quantile(X, 0.7))
quantile(X, cdf(X, 7))
}
|
library(testthat)
library(TAPseq)
test_check("TAPseq")
|
/tests/testthat.R
|
permissive
|
argschwind/TAPseq
|
R
| false | false | 56 |
r
|
library(testthat)
library(TAPseq)
test_check("TAPseq")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{DetermineWeight_SimClust}
\alias{DetermineWeight_SimClust}
\title{Determines an optimal weight for weighted clustering by similarity weighted
clustering.}
\usage{
DetermineWeight_SimClust(List, type = c("data", "dist", "clusters"),
distmeasure = c("tanimoto", "tanimoto"), normalize = c(FALSE, FALSE),
method = c(NULL, NULL), weight = seq(0, 1, by = 0.01),
nrclusters = NULL, clust = "agnes", linkage = c("flexible", "flexible"),
linkageF = "ward", alpha = 0.625, gap = FALSE, maxK = 15,
names = NULL, StopRange = FALSE, plottype = "new", location = NULL)
}
\arguments{
\item{List}{A list of matrices of the same type. It is assumed the rows are
corresponding with the objects.}
\item{type}{indicates whether the provided matrices in "List" are either data matrices, distance
matrices or clustering results obtained from the data. If type="dist" the calculation of the distance
matrices is skipped and if type="clusters" the single source clustering is skipped.
Type should be one of "data", "dist" or "clusters".}
\item{distmeasure}{A vector of the distance measures to be used on each data matrix. Should be one of "tanimoto", "euclidean", "jaccard", "hamming". Defaults to c("tanimoto","tanimoto").}
\item{normalize}{Logical. Indicates whether to normalize the distance matrices or not, defaults to c(FALSE, FALSE) for two data sets. This is recommended if different distance types are used. More details on normalization in \code{Normalization}.}
\item{method}{A method of normalization. Should be one of "Quantile","Fisher-Yates", "standardize","Range" or any of the first letters of these names. Default is c(NULL,NULL) for two data sets.}
\item{weight}{Optional. A list of different weight combinations for the data sets in List.
If NULL, the weights are determined to be equal for each data set.
It is further possible to fix weights for some data matrices and to
let it vary randomly for the remaining data sets. Defaults to seq(1,0,-0.1). An example is provided in the details.}
\item{nrclusters}{The number of clusters to cut the dendrogram in. This is
necessary for the computation of the Jaccard coefficient. Default is NULL.}
\item{clust}{Choice of clustering function (character). Defaults to "agnes".}
\item{linkage}{Choice of inter group dissimilarity (character) for the individual clusterings. Defaults to c("flexible","flexible").}
\item{linkageF}{Choice of inter group dissimilarity (character) for the final clustering. Defaults to "ward".}
\item{alpha}{The parameter alpha to be used in the "flexible" linkage of the agnes function. Defaults to 0.625 and is only used if the linkage is set to "flexible".}
\item{gap}{Logical. Whether or not to calculate the gap statistic in the
clustering on each data matrix separately. Only if type="data". Default is FALSE.}
\item{maxK}{The maximal number of clusters to consider in calculating the
gap statistic. Only if type="data". Default is 15.}
\item{names}{The labels to give to the elements in List. Default is NULL.}
\item{StopRange}{Logical. Indicates whether the distance matrices with
values not between zero and one should be standardized to have so. If FALSE
the range normalization is performed. See \code{Normalization}. If TRUE, the
distance matrices are not changed. This is recommended if different types of
data are used such that these are comparable. Default is FALSE.}
\item{plottype}{Should be one of "pdf","new" or "sweave". If "pdf", a
location should be provided in "location" and the figure is saved there. If
"new" a new graphic device is opened and if "sweave", the figure is made
compatible to appear in a sweave or knitr document, i.e. no new device is
opened and the plot appears in the current device or document. Default is "new".}
\item{location}{If plottype is "pdf", a location should be provided in
"location" and the figure is saved there. Default is FALSE.}
}
\value{
The returned value is a list with three elements:
\item{ClustSep}{The result of \code{Cluster} for each single element of
List} \item{Result}{A data frame with the Jaccard coefficients and their
ratios for each weight} \item{Weight}{The optimal weight}
}
\description{
The function \code{DetermineWeight_SimClust} determines an optimal weight
for performing weighted similarity clustering on by applying similarity
clustering. For each given weight, is each separate clustering compared to
the clustering on a weighted dissimilarity matrix and a Jaccard coefficient
is calculated. The ratio of the Jaccard coefficients closets to one
indicates an optimal weight.
}
\details{
If the type of List is data, an hierarchical clustering is performed on each
data matrix separately. After obtaining clustering results for the two data
matrices, the distance matrices are extracted. If these are not calculated
with the same distance measure, they are normalized to be in the same range.
For each weight, a weighted linear combination of the distance matrices is
taken and hierarchical clustering is performed once again. The resulting
clustering is compared to each of the separate clustering results and a
Jaccard coefficient is computed. The ratio of the Jaccard coefficients
closets to one, indicates an optimal weight. A plot of all the ratios is
produced with an extra indication for the optimal weight.
The weight combinations should be provided as elements in a list. For three
data matrices an example could be:
weights=list(c(0.5,0.2,0.3),c(0.1,0.5,0.4)). To provide a fixed weight for
some data sets and let it vary randomly for others, the element "x"
indicates a free parameter. An example is weights=list(c(0.7,"x","x")). The
weight 0.7 is now fixed for the first data matrix while the remaining 0.3
weight will be divided over the other two data sets. This implies that every
combination of the sequence from 0 to 0.3 with steps of 0.1 will be reported
and clustering will be performed for each.
}
\examples{
\dontrun{
data(fingerprintMat)
data(targetMat)
MCF7_F = Cluster(fingerprintMat,type="data",distmeasure="tanimoto",normalize=FALSE,
method=NULL,clust="agnes",linkage="flexible",alpha=0.625,gap=FALSE,maxK=55,StopRange=FALSE)
MCF7_T = Cluster(targetMat,type="data",distmeasure="tanimoto",normalize=FALSE,
method=NULL,clust="agnes",linkage="flexible",alpha=0.625,gap=FALSE,maxK=55,StopRange=FALSE)
L=list(MCF7_F,MCF7_T)
MCF7_Weight=DetermineWeight_SimClust(List=L,type="clusters",weight=seq(0,1,by=0.01),
nrclusters=c(7,7),distmeasure=c("tanimoto","tanimoto"),normalize=c(FALSE,FALSE),
method=c(NULL,NULL),clust="agnes",linkage=c("flexible","flexible"),linkageF="ward",
alpha=0.625,gap=FALSE,maxK=50,names=c("FP","TP"),StopRange=FALSE,plottype="new",location=NULL)
}
}
\references{
\insertRef{PerualilaTan2016}{IntClust}
}
|
/man/DetermineWeight_SimClust.Rd
|
no_license
|
cran/IntClust
|
R
| false | true | 6,959 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{DetermineWeight_SimClust}
\alias{DetermineWeight_SimClust}
\title{Determines an optimal weight for weighted clustering by similarity weighted
clustering.}
\usage{
DetermineWeight_SimClust(List, type = c("data", "dist", "clusters"),
distmeasure = c("tanimoto", "tanimoto"), normalize = c(FALSE, FALSE),
method = c(NULL, NULL), weight = seq(0, 1, by = 0.01),
nrclusters = NULL, clust = "agnes", linkage = c("flexible", "flexible"),
linkageF = "ward", alpha = 0.625, gap = FALSE, maxK = 15,
names = NULL, StopRange = FALSE, plottype = "new", location = NULL)
}
\arguments{
\item{List}{A list of matrices of the same type. It is assumed the rows are
corresponding with the objects.}
\item{type}{indicates whether the provided matrices in "List" are either data matrices, distance
matrices or clustering results obtained from the data. If type="dist" the calculation of the distance
matrices is skipped and if type="clusters" the single source clustering is skipped.
Type should be one of "data", "dist" or "clusters".}
\item{distmeasure}{A vector of the distance measures to be used on each data matrix. Should be one of "tanimoto", "euclidean", "jaccard", "hamming". Defaults to c("tanimoto","tanimoto").}
\item{normalize}{Logical. Indicates whether to normalize the distance matrices or not, defaults to c(FALSE, FALSE) for two data sets. This is recommended if different distance types are used. More details on normalization in \code{Normalization}.}
\item{method}{A method of normalization. Should be one of "Quantile","Fisher-Yates", "standardize","Range" or any of the first letters of these names. Default is c(NULL,NULL) for two data sets.}
\item{weight}{Optional. A list of different weight combinations for the data sets in List.
If NULL, the weights are determined to be equal for each data set.
It is further possible to fix weights for some data matrices and to
let it vary randomly for the remaining data sets. Defaults to seq(1,0,-0.1). An example is provided in the details.}
\item{nrclusters}{The number of clusters to cut the dendrogram in. This is
necessary for the computation of the Jaccard coefficient. Default is NULL.}
\item{clust}{Choice of clustering function (character). Defaults to "agnes".}
\item{linkage}{Choice of inter group dissimilarity (character) for the individual clusterings. Defaults to c("flexible","flexible").}
\item{linkageF}{Choice of inter group dissimilarity (character) for the final clustering. Defaults to "ward".}
\item{alpha}{The parameter alpha to be used in the "flexible" linkage of the agnes function. Defaults to 0.625 and is only used if the linkage is set to "flexible".}
\item{gap}{Logical. Whether or not to calculate the gap statistic in the
clustering on each data matrix separately. Only if type="data". Default is FALSE.}
\item{maxK}{The maximal number of clusters to consider in calculating the
gap statistic. Only if type="data". Default is 15.}
\item{names}{The labels to give to the elements in List. Default is NULL.}
\item{StopRange}{Logical. Indicates whether the distance matrices with
values not between zero and one should be standardized to have so. If FALSE
the range normalization is performed. See \code{Normalization}. If TRUE, the
distance matrices are not changed. This is recommended if different types of
data are used such that these are comparable. Default is FALSE.}
\item{plottype}{Should be one of "pdf","new" or "sweave". If "pdf", a
location should be provided in "location" and the figure is saved there. If
"new" a new graphic device is opened and if "sweave", the figure is made
compatible to appear in a sweave or knitr document, i.e. no new device is
opened and the plot appears in the current device or document. Default is "new".}
\item{location}{If plottype is "pdf", a location should be provided in
"location" and the figure is saved there. Default is FALSE.}
}
\value{
The returned value is a list with three elements:
\item{ClustSep}{The result of \code{Cluster} for each single element of
List} \item{Result}{A data frame with the Jaccard coefficients and their
ratios for each weight} \item{Weight}{The optimal weight}
}
\description{
The function \code{DetermineWeight_SimClust} determines an optimal weight
for performing weighted similarity clustering on by applying similarity
clustering. For each given weight, is each separate clustering compared to
the clustering on a weighted dissimilarity matrix and a Jaccard coefficient
is calculated. The ratio of the Jaccard coefficients closets to one
indicates an optimal weight.
}
\details{
If the type of List is data, an hierarchical clustering is performed on each
data matrix separately. After obtaining clustering results for the two data
matrices, the distance matrices are extracted. If these are not calculated
with the same distance measure, they are normalized to be in the same range.
For each weight, a weighted linear combination of the distance matrices is
taken and hierarchical clustering is performed once again. The resulting
clustering is compared to each of the separate clustering results and a
Jaccard coefficient is computed. The ratio of the Jaccard coefficients
closets to one, indicates an optimal weight. A plot of all the ratios is
produced with an extra indication for the optimal weight.
The weight combinations should be provided as elements in a list. For three
data matrices an example could be:
weights=list(c(0.5,0.2,0.3),c(0.1,0.5,0.4)). To provide a fixed weight for
some data sets and let it vary randomly for others, the element "x"
indicates a free parameter. An example is weights=list(c(0.7,"x","x")). The
weight 0.7 is now fixed for the first data matrix while the remaining 0.3
weight will be divided over the other two data sets. This implies that every
combination of the sequence from 0 to 0.3 with steps of 0.1 will be reported
and clustering will be performed for each.
}
\examples{
\dontrun{
data(fingerprintMat)
data(targetMat)
MCF7_F = Cluster(fingerprintMat,type="data",distmeasure="tanimoto",normalize=FALSE,
method=NULL,clust="agnes",linkage="flexible",alpha=0.625,gap=FALSE,maxK=55,StopRange=FALSE)
MCF7_T = Cluster(targetMat,type="data",distmeasure="tanimoto",normalize=FALSE,
method=NULL,clust="agnes",linkage="flexible",alpha=0.625,gap=FALSE,maxK=55,StopRange=FALSE)
L=list(MCF7_F,MCF7_T)
MCF7_Weight=DetermineWeight_SimClust(List=L,type="clusters",weight=seq(0,1,by=0.01),
nrclusters=c(7,7),distmeasure=c("tanimoto","tanimoto"),normalize=c(FALSE,FALSE),
method=c(NULL,NULL),clust="agnes",linkage=c("flexible","flexible"),linkageF="ward",
alpha=0.625,gap=FALSE,maxK=50,names=c("FP","TP"),StopRange=FALSE,plottype="new",location=NULL)
}
}
\references{
\insertRef{PerualilaTan2016}{IntClust}
}
|
pnorm(140/1000, mean = 124/1000, sd = 20/1000)
1-0.7881446
pnorm(90/1000, mean = 124/1000, sd = 20/100)
pnorm(200, mean = 219, sd = 50)
pnorm(250, mean = 219, sd = 50)
pnorm(120, mean = 90, sd = 38) - pnorm(65, mean = 90, sd = 38)
pnorm(120, mean = 90, sd = 38)
pnorm(65, mean = 90, sd = 38)
1 - pnorm(180, mean = 90, sd = 38)
1 - pnorm(240, mean = 90, sd = 38)
pnorm(240, mean = 90, sd = 38)
|
/Biostats7.R
|
no_license
|
will-olu/Python-Scripts
|
R
| false | false | 412 |
r
|
pnorm(140/1000, mean = 124/1000, sd = 20/1000)
1-0.7881446
pnorm(90/1000, mean = 124/1000, sd = 20/100)
pnorm(200, mean = 219, sd = 50)
pnorm(250, mean = 219, sd = 50)
pnorm(120, mean = 90, sd = 38) - pnorm(65, mean = 90, sd = 38)
pnorm(120, mean = 90, sd = 38)
pnorm(65, mean = 90, sd = 38)
1 - pnorm(180, mean = 90, sd = 38)
1 - pnorm(240, mean = 90, sd = 38)
pnorm(240, mean = 90, sd = 38)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{districts_r}
\alias{districts_r}
\title{Polygons of the districts of Vietnam with population size}
\format{An object of class \code{SpatialPolygonsDataFrame}}
\usage{
data(districts_r)
}
\description{
Shape files of the districts of Vietnam as of after 2008 (merging of the
provinces of Ha Tay and Ha Noi) with 2009 census populations sizes in high
polygons resolution. All the Vietnamese names are expressed in UNICODE.
}
\keyword{datasets}
|
/man/districts_r.Rd
|
no_license
|
choisy/censusVN2009
|
R
| false | true | 548 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{districts_r}
\alias{districts_r}
\title{Polygons of the districts of Vietnam with population size}
\format{An object of class \code{SpatialPolygonsDataFrame}}
\usage{
data(districts_r)
}
\description{
Shape files of the districts of Vietnam as of after 2008 (merging of the
provinces of Ha Tay and Ha Noi) with 2009 census populations sizes in high
polygons resolution. All the Vietnamese names are expressed in UNICODE.
}
\keyword{datasets}
|
# Calculate centrality script
# author: Rafael Leano
# last-update: Apr 21, 2015
args <- commandArgs(T)
if("--help" %in% args | length(args) != 3L ){
cat("
Usage:
centrality.r <workdir> <infile> <outfile>
(order must be kept)
")
q()
}
dir <- args[1]
infile <- args[2]
outfile <- args[3]
setwd(dir)
contents <- read.csv(infile, header=T)
m <- as.matrix(contents)
rownames(m) <- colnames(m) <- colnames(contents)
try(install.packages('igraph', repos="http://cran.fhcrc.org"))
library(igraph)
g <- graph.adjacency(m)
evcent <- evcent(g)$vector
write(evcent, outfile, sep="\n")
cat(paste("Success! wrote file [", outfile, "]", sep=""))
|
/rcode/centrality.r
|
no_license
|
rleano/phd.SNaRK
|
R
| false | false | 664 |
r
|
# Calculate centrality script
# author: Rafael Leano
# last-update: Apr 21, 2015
args <- commandArgs(T)
if("--help" %in% args | length(args) != 3L ){
cat("
Usage:
centrality.r <workdir> <infile> <outfile>
(order must be kept)
")
q()
}
dir <- args[1]
infile <- args[2]
outfile <- args[3]
setwd(dir)
contents <- read.csv(infile, header=T)
m <- as.matrix(contents)
rownames(m) <- colnames(m) <- colnames(contents)
try(install.packages('igraph', repos="http://cran.fhcrc.org"))
library(igraph)
g <- graph.adjacency(m)
evcent <- evcent(g)$vector
write(evcent, outfile, sep="\n")
cat(paste("Success! wrote file [", outfile, "]", sep=""))
|
77408a5c1822ccfe8a7dc1f52ce0f957 b14_PR_9_5.qdimacs 9057 26536
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Sauer-Reimer/ITC99/b14_PR_9_5/b14_PR_9_5.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 62 |
r
|
77408a5c1822ccfe8a7dc1f52ce0f957 b14_PR_9_5.qdimacs 9057 26536
|
library(RMySQL)
library(stringdist)
library(openxlsx)
#Membuka Koneksi
con <- dbConnect(MySQL(), user="root", host="127.0.0.1", dbname="pelanggan")
#Mengambil Kolom kode_pelanggan, nama dan alamat dari data_pelanggan
s <- "SELECT kode_pelanggan, nama_lengkap, alamat FROM data_pelanggan"
#Mengirim Query
send <- tryCatch(dbSendQuery(con, s), finally = print("Query Ok MEn"))
#Mengambil data
data.pelanggan <- fetch(send, n=-1)
#Clear Result
dbClearResult(send)
#Inisialisasi variable untuk hasil.akhir
hasil.akhir <- NULL
#Insialisasi variable grouping_no dengan nilai 1
grouping_no <- 1
while(length(data.pelanggan$nama_lengkap)>0)
{
#Variable referensi nama dan alamat diambil dari item pertama
referensi.nama <- data.pelanggan$nama_lengkap[1]
referensi.alamat <- data.pelanggan$alamat[1]
#MEnghitung jarak antara referensi dengan item-item nama dan alamat
#gunakan method "cosine" untuk nama, dan method "lv" untuk alamat
jarak.teks.nama <- stringdist(referensi.nama, data.pelanggan$nama, method="cosine")
jarak.teks.alamat <- stringdist(referensi.alamat, data.pelanggan$alamat, method="lv")
#Hasil filter jarak dengan threshold
# - lebih kecil sama dengan angka 0.15 untuk nama
# - lebih kecil dari angka 15 untuk alamat
#Disimpan ke variable filter.jarak
filter.jarak <- (jarak.teks.nama <= 0.15 & jarak.teks.alamat < 15)
#Melakukan filtering pada variable data.pelanggan, dan mengambil tiga kolom
#untuk di simpan ke tiga variable
kode_pelanggan.temp <- data.pelanggan[filter.jarak,]$kode_pelanggan
nama.temp <- data.pelanggan[filter.jarak,]$nama
alamat.temp <- data.pelanggan[filter.jarak,]$alamat
#Konstuksi temporary variable
var.temp <- data.frame(grouping=grouping_no, kode_pelanggan=kode_pelanggan.temp, nama=nama.temp, alamat=alamat.temp, jumlah_record=length(kode_pelanggan.temp))
#Menggabungkan temporary variable dengan hasil sebelumnya
hasil.akhir <- rbind(hasil.akhir, var.temp)
#MEnggabungkan hasil sebelumnya
data.pelanggan <- data.pelanggan[!filter.jarak,]
#Menambahkan nilai grouping untuk diambil pada iterasi selanjutnya
grouping_no <- grouping_no + 1
}
#Menulis hasi ke file staging.duplikat.awal.xlsx
write.xlsx(hasil.akhir, file = "staging.duplikat.awal3.xlsx")
#Menutup Koneksi Mysql
ttp <- dbListConnections(MySQL())
for(con in ttp) dbDisconnect(con)
|
/Data Exploration in Data Science using R/Melakukan Grouping Duplikat dari Dataset Awal.R
|
no_license
|
rhedi/Data_Science
|
R
| false | false | 2,374 |
r
|
library(RMySQL)
library(stringdist)
library(openxlsx)
#Membuka Koneksi
con <- dbConnect(MySQL(), user="root", host="127.0.0.1", dbname="pelanggan")
#Mengambil Kolom kode_pelanggan, nama dan alamat dari data_pelanggan
s <- "SELECT kode_pelanggan, nama_lengkap, alamat FROM data_pelanggan"
#Mengirim Query
send <- tryCatch(dbSendQuery(con, s), finally = print("Query Ok MEn"))
#Mengambil data
data.pelanggan <- fetch(send, n=-1)
#Clear Result
dbClearResult(send)
#Inisialisasi variable untuk hasil.akhir
hasil.akhir <- NULL
#Insialisasi variable grouping_no dengan nilai 1
grouping_no <- 1
while(length(data.pelanggan$nama_lengkap)>0)
{
#Variable referensi nama dan alamat diambil dari item pertama
referensi.nama <- data.pelanggan$nama_lengkap[1]
referensi.alamat <- data.pelanggan$alamat[1]
#MEnghitung jarak antara referensi dengan item-item nama dan alamat
#gunakan method "cosine" untuk nama, dan method "lv" untuk alamat
jarak.teks.nama <- stringdist(referensi.nama, data.pelanggan$nama, method="cosine")
jarak.teks.alamat <- stringdist(referensi.alamat, data.pelanggan$alamat, method="lv")
#Hasil filter jarak dengan threshold
# - lebih kecil sama dengan angka 0.15 untuk nama
# - lebih kecil dari angka 15 untuk alamat
#Disimpan ke variable filter.jarak
filter.jarak <- (jarak.teks.nama <= 0.15 & jarak.teks.alamat < 15)
#Melakukan filtering pada variable data.pelanggan, dan mengambil tiga kolom
#untuk di simpan ke tiga variable
kode_pelanggan.temp <- data.pelanggan[filter.jarak,]$kode_pelanggan
nama.temp <- data.pelanggan[filter.jarak,]$nama
alamat.temp <- data.pelanggan[filter.jarak,]$alamat
#Konstuksi temporary variable
var.temp <- data.frame(grouping=grouping_no, kode_pelanggan=kode_pelanggan.temp, nama=nama.temp, alamat=alamat.temp, jumlah_record=length(kode_pelanggan.temp))
#Menggabungkan temporary variable dengan hasil sebelumnya
hasil.akhir <- rbind(hasil.akhir, var.temp)
#MEnggabungkan hasil sebelumnya
data.pelanggan <- data.pelanggan[!filter.jarak,]
#Menambahkan nilai grouping untuk diambil pada iterasi selanjutnya
grouping_no <- grouping_no + 1
}
#Menulis hasi ke file staging.duplikat.awal.xlsx
write.xlsx(hasil.akhir, file = "staging.duplikat.awal3.xlsx")
#Menutup Koneksi Mysql
ttp <- dbListConnections(MySQL())
for(con in ttp) dbDisconnect(con)
|
# obtain beta matrix for each batch i by g
# quality check on matrix to make sure var is tight
# obtain beta vector
#' Prepends if string values start with 0-9
prepend_ifnumeric <- function(x, prefix) {
x <- as.character(x)
has_numeric_prefix <- grepl("[0-9]", x)
x[has_numeric_prefix] <- paste0(prefix, x[has_numeric_prefix])
x
}
# TODO: compute beta: using median vs all samples
#' Compute batch scaling factor for each feature
#' If batch scaling factors are unable to be computed due to missing values,
#' NA will be assigned as the value of beta
estimate_parameters <- function(X, metadata) {
# probeset i, sample j, batch k, class g
#' If vector has less than 3 non-zero values: return NA
#' Median of vector with all zero values results in NA
median_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
median(x[x != 0], ...)
}
mean_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
mean(x[x != 0], ...)
}
cnt <- 0
metadata <- metadata[colnames(X), , drop = FALSE] # metadata only of samples in X
sample_classes <- prepend_ifnumeric(metadata$class_info, "C")
sample_batches <- prepend_ifnumeric(metadata$batch_info, "B")
n_classes <- length(unique(sample_classes))
n_batches <- length(unique(sample_batches))
# initialise arrays
beta <- mu <- array(
NA, dim = c(nrow(X), n_batches, n_classes),
dimnames = list(rownames(X), unique(sample_batches), unique(sample_classes))
)
ref_mu <- array(
NA, dim = c(nrow(X), n_classes),
dimnames = list(rownames(X), unique(sample_classes))
)
X_classes <- split.default(X, sample_classes)
# Estimation of beta
for (g in names(X_classes)) {
X_g <- X_classes[[g]]
batch_g <- prepend_ifnumeric(metadata[colnames(X_g), "batch_info"], "B")
X_batches <- split.default(X_g, batch_g)
mu_g <- sapply(X_batches, apply, 1, mean_nonzero, na.rm = TRUE)
ref_mu_g <- apply(mu_g, 1, median_nonzero, na.rm = TRUE)
if (any(is.na(ref_mu_g)))
warning(sprintf(
"Class %s: %d out of %d features have median of batch medians with value zero.",
as.character(g), sum(is.na(ref_mu_g)), nrow(X)
))
beta_g <- data.frame(mu_g / ref_mu_g)
for (k in colnames(mu_g)) {
mu[, k, g] <- mu_g[, k]
beta[, k, g] <- beta_g[, k]
}
ref_mu[, g] <- ref_mu_g
}
# beta_arr <- abind::abind(list_betas, rev.along = 0) # beta for all classes
beta_hat <- apply(beta, c(1, 2), mean, na.rm = FALSE) #
beta_hat[is.nan(beta_hat)] <- NA # mean of vector with all NA values returns NaN
cat(sprintf(
"No. of NAs in beta_hat: %d/%d\n",
sum(is.na(beta_hat)), length(beta_hat)
))
beta_hat[is.na(beta_hat)] <- 1 # replace NA with 1 (i.e. do not correct if beta = NA)
beta_sigma2 <- apply(beta, c(1, 2), var, na.rm = FALSE)
## Estimating outlier gamma
# Compute class pair ratios
n_pairs <- 1
# Assume: 2 classes have been chosen for beta
pair_classes <- c("A", "B")
rho <- mu[, , pair_classes[1]] / mu[, , pair_classes[2]]
rho_mu <- apply(rho, 1, median_nonzero, na.rm = TRUE)
cat(sprintf("No. of features with missing rho means = %d\n", sum(is.na(rho_mu))))
rho_sigma <- apply(rho, 1, sd, na.rm = TRUE)
# 1: estimate gamma from rho
gamma_1 <- rho / rho_mu
cat(sprintf(
"Median was computed for %d non-zero vectors with length < 3.\n", cnt
))
list(
X = X,
beta = beta,
beta_hat = beta_hat, # mean of beta
beta_sigma2 = beta_sigma2,
mu = mu, # mean of each (batch, class)
ref_mu = ref_mu,
rho = rho,
gamma_1 = gamma_1,
sample_batches = sample_batches,
sample_classes = sample_classes
)
}
correct_batch_effects <- function(
obj,
beta.var.threshold = 0.05,
gamma.threshold = 1.7
) {
median_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
median(x[x != 0], ...)
}
mean_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
mean(x[x != 0], ...)
}
X <- obj$X
beta <- obj$beta
beta_hat <- obj$beta_hat # mean of beta
beta_sigma2 <- obj$beta_sigma2
mu <- obj$mu # mean of each (batch, class)
ref_mu <- obj$ref_mu
rho <- obj$rho
gamma_1 <- obj$gamma_1
sample_batches <- obj$sample_batches
sample_classes <- obj$sample_classes
# TODO: beware of all rhos for genes is NA
# Identifying outlier (batch, class, feature) that requires gamma correction
# Identification using gamma_1 value only
# gamma_1 cannot be zero or NA
is_outlier <- gamma_1 != 0 &
(gamma_1 > gamma.threshold | gamma_1 < (1 / gamma.threshold))
outlier_indices <- which(is_outlier, arr.ind = TRUE)
pair_classes <- c("A", "B") # assuming there only is one pair
beta_1 <- beta[, , pair_classes[1]]
beta_2 <- beta[, , pair_classes[2]]
outlier_beta <- cbind(beta_1[outlier_indices], beta_2[outlier_indices])
# class with beta closer to 1 is reference class
outlier_class <- apply(abs(log(outlier_beta)), 1, which.max)
# 2: estimate gamma from beta
outliers <- cbind(
outlier_indices,
rho = rho[outlier_indices],
beta_1 = beta_1[outlier_indices],
beta_2 = beta_2[outlier_indices],
gamma_1 = gamma_1[outlier_indices],
gamma_2 = beta_1[outlier_indices] / beta_2[outlier_indices],
outlier_class = outlier_class
)
# use gamma_1 as gamma_2 may be biased by possible errors in estimating ref in classes
# Correcting gamma_1 -> Update X and beta
combinations <- split.data.frame(
outliers, list(outliers[, "col"], outliers[, "outlier_class"])
)
cat(sprintf("No. of gamma outliers = %d\n", nrow(outliers)))
# iterating through all (batch, outlier_class) combinations
for (outlier_kg in combinations) {
if (dim(outlier_kg)[1] == 0) next
k <- unique(sample_batches)[outlier_kg[1, "col"]]
class_idx <- outlier_kg[1, "outlier_class"]
g <- pair_classes[class_idx]
# subset target patients
sids <- colnames(X)[sample_batches == k & sample_classes == g]
gamma_kg <- outlier_kg[, "gamma_1"]
stopifnot(length(gamma_kg) == length(rownames(outlier_kg)))
if (class_idx == 2) {
# if outlier_class == 2 -> multiply by gamma_1
X[rownames(outlier_kg), sids] <- X[rownames(outlier_kg), sids] * gamma_kg
} else if (class_idx == 1) {
# if outlier_class == 1 -> divide by gamma_1
# gamma defined as "1"/"2"
X[rownames(outlier_kg), sids] <- X[rownames(outlier_kg), sids] / gamma_kg
} else {
warning("Gamma was not corrected!")
}
}
# TODO: Do we update mu to recalculate some betas, and then identify betas to correct?
# TODO: Can directly scale mus
# # Correction using beta_hat
# X_batches <- split.default(X, batch)
# if (sum(is.na(beta_hat)) > 0 | sum(beta_hat == 0, na.rm = TRUE) > 0)
# stop("Beta_hat matrix contains zeros or NAs.")
# for (k in colnames(beta_hat)) {
# X_batches[[k]] <- X_batches[[k]] / beta_hat[, k]
# }
# X1 <- do.call(cbind, unname(X_batches))
# X1 <- X1[, colnames(X)]
list(X = X, outliers = outliers)
}
|
/R/batch.R
|
no_license
|
dblux/relapse_prediction
|
R
| false | false | 7,173 |
r
|
# obtain beta matrix for each batch i by g
# quality check on matrix to make sure var is tight
# obtain beta vector
#' Prepends if string values start with 0-9
prepend_ifnumeric <- function(x, prefix) {
x <- as.character(x)
has_numeric_prefix <- grepl("[0-9]", x)
x[has_numeric_prefix] <- paste0(prefix, x[has_numeric_prefix])
x
}
# TODO: compute beta: using median vs all samples
#' Compute batch scaling factor for each feature
#' If batch scaling factors are unable to be computed due to missing values,
#' NA will be assigned as the value of beta
estimate_parameters <- function(X, metadata) {
# probeset i, sample j, batch k, class g
#' If vector has less than 3 non-zero values: return NA
#' Median of vector with all zero values results in NA
median_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
median(x[x != 0], ...)
}
mean_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
mean(x[x != 0], ...)
}
cnt <- 0
metadata <- metadata[colnames(X), , drop = FALSE] # metadata only of samples in X
sample_classes <- prepend_ifnumeric(metadata$class_info, "C")
sample_batches <- prepend_ifnumeric(metadata$batch_info, "B")
n_classes <- length(unique(sample_classes))
n_batches <- length(unique(sample_batches))
# initialise arrays
beta <- mu <- array(
NA, dim = c(nrow(X), n_batches, n_classes),
dimnames = list(rownames(X), unique(sample_batches), unique(sample_classes))
)
ref_mu <- array(
NA, dim = c(nrow(X), n_classes),
dimnames = list(rownames(X), unique(sample_classes))
)
X_classes <- split.default(X, sample_classes)
# Estimation of beta
for (g in names(X_classes)) {
X_g <- X_classes[[g]]
batch_g <- prepend_ifnumeric(metadata[colnames(X_g), "batch_info"], "B")
X_batches <- split.default(X_g, batch_g)
mu_g <- sapply(X_batches, apply, 1, mean_nonzero, na.rm = TRUE)
ref_mu_g <- apply(mu_g, 1, median_nonzero, na.rm = TRUE)
if (any(is.na(ref_mu_g)))
warning(sprintf(
"Class %s: %d out of %d features have median of batch medians with value zero.",
as.character(g), sum(is.na(ref_mu_g)), nrow(X)
))
beta_g <- data.frame(mu_g / ref_mu_g)
for (k in colnames(mu_g)) {
mu[, k, g] <- mu_g[, k]
beta[, k, g] <- beta_g[, k]
}
ref_mu[, g] <- ref_mu_g
}
# beta_arr <- abind::abind(list_betas, rev.along = 0) # beta for all classes
beta_hat <- apply(beta, c(1, 2), mean, na.rm = FALSE) #
beta_hat[is.nan(beta_hat)] <- NA # mean of vector with all NA values returns NaN
cat(sprintf(
"No. of NAs in beta_hat: %d/%d\n",
sum(is.na(beta_hat)), length(beta_hat)
))
beta_hat[is.na(beta_hat)] <- 1 # replace NA with 1 (i.e. do not correct if beta = NA)
beta_sigma2 <- apply(beta, c(1, 2), var, na.rm = FALSE)
## Estimating outlier gamma
# Compute class pair ratios
n_pairs <- 1
# Assume: 2 classes have been chosen for beta
pair_classes <- c("A", "B")
rho <- mu[, , pair_classes[1]] / mu[, , pair_classes[2]]
rho_mu <- apply(rho, 1, median_nonzero, na.rm = TRUE)
cat(sprintf("No. of features with missing rho means = %d\n", sum(is.na(rho_mu))))
rho_sigma <- apply(rho, 1, sd, na.rm = TRUE)
# 1: estimate gamma from rho
gamma_1 <- rho / rho_mu
cat(sprintf(
"Median was computed for %d non-zero vectors with length < 3.\n", cnt
))
list(
X = X,
beta = beta,
beta_hat = beta_hat, # mean of beta
beta_sigma2 = beta_sigma2,
mu = mu, # mean of each (batch, class)
ref_mu = ref_mu,
rho = rho,
gamma_1 = gamma_1,
sample_batches = sample_batches,
sample_classes = sample_classes
)
}
correct_batch_effects <- function(
obj,
beta.var.threshold = 0.05,
gamma.threshold = 1.7
) {
median_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
median(x[x != 0], ...)
}
mean_nonzero <- function(x, ...) {
if (length(x[x != 0]) < 3) {
cnt <- cnt + 1
return(NA)
}
mean(x[x != 0], ...)
}
X <- obj$X
beta <- obj$beta
beta_hat <- obj$beta_hat # mean of beta
beta_sigma2 <- obj$beta_sigma2
mu <- obj$mu # mean of each (batch, class)
ref_mu <- obj$ref_mu
rho <- obj$rho
gamma_1 <- obj$gamma_1
sample_batches <- obj$sample_batches
sample_classes <- obj$sample_classes
# TODO: beware of all rhos for genes is NA
# Identifying outlier (batch, class, feature) that requires gamma correction
# Identification using gamma_1 value only
# gamma_1 cannot be zero or NA
is_outlier <- gamma_1 != 0 &
(gamma_1 > gamma.threshold | gamma_1 < (1 / gamma.threshold))
outlier_indices <- which(is_outlier, arr.ind = TRUE)
pair_classes <- c("A", "B") # assuming there only is one pair
beta_1 <- beta[, , pair_classes[1]]
beta_2 <- beta[, , pair_classes[2]]
outlier_beta <- cbind(beta_1[outlier_indices], beta_2[outlier_indices])
# class with beta closer to 1 is reference class
outlier_class <- apply(abs(log(outlier_beta)), 1, which.max)
# 2: estimate gamma from beta
outliers <- cbind(
outlier_indices,
rho = rho[outlier_indices],
beta_1 = beta_1[outlier_indices],
beta_2 = beta_2[outlier_indices],
gamma_1 = gamma_1[outlier_indices],
gamma_2 = beta_1[outlier_indices] / beta_2[outlier_indices],
outlier_class = outlier_class
)
# use gamma_1 as gamma_2 may be biased by possible errors in estimating ref in classes
# Correcting gamma_1 -> Update X and beta
combinations <- split.data.frame(
outliers, list(outliers[, "col"], outliers[, "outlier_class"])
)
cat(sprintf("No. of gamma outliers = %d\n", nrow(outliers)))
# iterating through all (batch, outlier_class) combinations
for (outlier_kg in combinations) {
if (dim(outlier_kg)[1] == 0) next
k <- unique(sample_batches)[outlier_kg[1, "col"]]
class_idx <- outlier_kg[1, "outlier_class"]
g <- pair_classes[class_idx]
# subset target patients
sids <- colnames(X)[sample_batches == k & sample_classes == g]
gamma_kg <- outlier_kg[, "gamma_1"]
stopifnot(length(gamma_kg) == length(rownames(outlier_kg)))
if (class_idx == 2) {
# if outlier_class == 2 -> multiply by gamma_1
X[rownames(outlier_kg), sids] <- X[rownames(outlier_kg), sids] * gamma_kg
} else if (class_idx == 1) {
# if outlier_class == 1 -> divide by gamma_1
# gamma defined as "1"/"2"
X[rownames(outlier_kg), sids] <- X[rownames(outlier_kg), sids] / gamma_kg
} else {
warning("Gamma was not corrected!")
}
}
# TODO: Do we update mu to recalculate some betas, and then identify betas to correct?
# TODO: Can directly scale mus
# # Correction using beta_hat
# X_batches <- split.default(X, batch)
# if (sum(is.na(beta_hat)) > 0 | sum(beta_hat == 0, na.rm = TRUE) > 0)
# stop("Beta_hat matrix contains zeros or NAs.")
# for (k in colnames(beta_hat)) {
# X_batches[[k]] <- X_batches[[k]] / beta_hat[, k]
# }
# X1 <- do.call(cbind, unname(X_batches))
# X1 <- X1[, colnames(X)]
list(X = X, outliers = outliers)
}
|
#Nicola's Moorea ITS2 analysis
#Almost entirely based on DADA2 ITS Pipeline 1.8 Walkthrough:
#https://benjjneb.github.io/dada2/ITS_workflow.html
#with edits by Carly D. Kenkel and modifications for my data by Nicola Kriefall
#7/23/19
#~########################~#
##### PRE-PROCESSING #######
#~########################~#
#fastq files should have R1 & R2 designations for PE reads
#Also - some pre-trimming. Retain only PE reads that match amplicon primer. Remove reads containing Illumina sequencing adapters
##in Terminal home directory:
##following instructions of installing BBtools from https://jgi.doe.gov/data-and-tools/bbtools/bb-tools-user-guide/installation-guide/
##1. download BBMap package, sftp to installation directory
##2. untar:
#tar -xvzf BBMap_(version).tar.gz
##3. test package:
#cd bbmap
#~/bin/bbmap/stats.sh in=~/bin/bbmap/resources/phix174_ill.ref.fa.gz
## my adaptors, which I saved as "adaptors.fasta"
# >forward
# AATGATACGGCGACCAC
# >forwardrc
# GTGGTCGCCGTATCATT
# >reverse
# CAAGCAGAAGACGGCATAC
# >reverserc
# GTATGCCGTCTTCTGCTTG
##Note: Illumina should have cut these out already, normal if you don't get any
##primers for ITS:
# >forward
# GTGAATTGCAGAACTCCGTG
# >reverse
# CCTCCGCTTACTTATATGCTT
##Still in terminal - making a sample list based on the first phrase before the underscore in the .fastq name
#ls *R1_001.fastq | cut -d '_' -f 1 > samples.list
##cuts off the extra words in the .fastq files
#for file in $(cat samples.list); do mv ${file}_*R1*.fastq ${file}_R1.fastq; mv ${file}_*R2*.fastq ${file}_R2.fastq; done
##gets rid of reads that still have the adaptor sequence, shouldn't be there, I didn't have any
#for file in $(cat samples.list); do ~/bin/bbmap/bbduk.sh in1=${file}_R1.fastq in2=${file}_R2.fastq ref=adaptors.fasta out1=${file}_R1_NoIll.fastq out2=${file}_R2_NoIll.fastq; done &>bbduk_NoIll.log
##only keeping reads that start with the primer
#for file in $(cat samples.list); do ~/bin/bbmap/bbduk.sh in1=${file}_R1_NoIll.fastq in2=${file}_R2_NoIll.fastq k=15 restrictleft=21 literal=GTGAATTGCAGAACTCCGTG,CCTCCGCTTACTTATATGCTT outm1=${file}_R1_NoIll_ITS.fastq outu1=${file}_R1_check.fastq outm2=${file}_R2_NoIll_ITS.fastq outu2=${file}_R2_check.fastq; done &>bbduk_ITS.log
##higher k = more reads removed, but can't surpass k=20 or 21
##using cutadapt to remove adapters & reads with Ns in them
#module load cutadapt
# for file in $(cat samples.list)
# do
# cutadapt -g GTGAATTGCAGAACTCCGTG -G CCTCCGCTTACTTATATGCTT -o ${file}_R1.fastq -p ${file}_R2.fastq --max-n 0 ${file}_R1_NoIll_ITS.fastq ${file}_R2_NoIll_ITS.fastq
# done &> clip.log
##-g regular 5' forward primer
##-G regular 5' reverse primer
##-o forward out
##-p reverse out
##-max-n 0 means 0 Ns allowed
##this overwrote my original renamed files
##did sftp of *_R1.fastq & *_R2.fastq files to the folder to be used in dada2
#~########################~#
##### DADA2 BEGINS #########
#~########################~#
#installing/loading packages:
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("dada2", version = "3.8")
library(dada2)
#packageVersion("dada2")
#I have version 1.10.1 - tutorial says 1.8 but I think that's OK, can't find a version 1.10 walkthrough
library(ShortRead)
#packageVersion("ShortRead")
library(Biostrings)
#packageVersion("Biostrings")
path <- "/Volumes/TOSHIBA EXT/WorkLaptop_2020_03/Desktop/its2/files_rd3" # CHANGE ME to the directory containing the fastq files after unzipping.
fnFs <- sort(list.files(path, pattern = "_R1.fastq.gz", full.names = TRUE))
fnRs <- sort(list.files(path, pattern = "_R2.fastq.gz", full.names = TRUE))
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(fnFs, get.sample.name))
head(sample.names)
sample.names
#### check for primers ####
FWD <- "GTGAATTGCAGAACTCCGTG" ## CHANGE ME to your forward primer sequence
REV <- "CCTCCGCTTACTTATATGCTT" ## CHANGE ME...
allOrients <- function(primer) {
# Create all orientations of the input sequence
require(Biostrings)
dna <- DNAString(primer) # The Biostrings works w/ DNAString objects rather than character vectors
orients <- c(Forward = dna, Complement = complement(dna), Reverse = reverse(dna),
RevComp = reverseComplement(dna))
return(sapply(orients, toString)) # Convert back to character vector
}
FWD.orients <- allOrients(FWD)
REV.orients <- allOrients(REV)
FWD.orients
REV.orients
fnFs.filtN <- file.path(path, "filtN", basename(fnFs)) # Put N-filterd files in filtN/ subdirectory
fnRs.filtN <- file.path(path, "filtN", basename(fnRs))
filterAndTrim(fnFs, fnFs.filtN, fnRs, fnRs.filtN, maxN = 0, multithread = TRUE)
primerHits <- function(primer, fn) {
# Counts number of reads in which the primer is found
nhits <- vcountPattern(primer, sread(readFastq(fn)), fixed = FALSE)
return(sum(nhits > 0))
}
rbind(FWD.ForwardReads = sapply(FWD.orients, primerHits, fn = fnFs.filtN[[2]]),
FWD.ReverseReads = sapply(FWD.orients, primerHits, fn = fnRs.filtN[[2]]),
REV.ForwardReads = sapply(REV.orients, primerHits, fn = fnFs.filtN[[2]]),
REV.ReverseReads = sapply(REV.orients, primerHits, fn = fnRs.filtN[[2]]))
#no primers - amazing
###### Visualizing raw data
#First, lets look at quality profile of R1 reads
plotQualityProfile(fnFs[c(1,2,3,4)])
plotQualityProfile(fnFs[c(90,91,92,93)])
#Then look at quality profile of R2 reads
plotQualityProfile(fnRs[c(1,2,3,4)])
plotQualityProfile(fnRs[c(90,91,92,93)])
#starts to drop off around 220 bp for forwards and 200 for reverse, will make approximately that the cutoff values for filter&trim below
# Make directory and filenames for the filtered fastqs
filt_path <- file.path(path, "trimmed")
if(!file_test("-d", filt_path)) dir.create(filt_path)
filtFs <- file.path(filt_path, paste0(sample.names, "_F_filt.fastq.gz"))
filtRs <- file.path(filt_path, paste0(sample.names, "_R_filt.fastq.gz"))
#changing a bit from default settings - maxEE=1 (1 max expected error, more conservative), truncating length at 200 bp for both forward & reverse, added "trimleft" to cut off primers [20 for forward, 21 for reverse]
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs,
truncLen=c(220,200), #leaves overlap
maxN=0, #DADA does not allow Ns
maxEE=c(1,1), #allow 1 expected errors, where EE = sum(10^(-Q/10)); more conservative, model converges
truncQ=2,
minLen = 50,
#trimLeft=c(20,21), #N nucleotides to remove from the start of each read
rm.phix=TRUE, #remove reads matching phiX genome
matchIDs=TRUE, #enforce mtching between id-line sequence identifiers of F and R reads
compress=TRUE, multithread=TRUE) # On Windows set multithread=FALSE
head(out)
tail(out)
#~############################~#
##### Learn Error Rates ########
#~############################~#
#setDadaOpt(MAX_CONSIST=30) #if necessary, increase number of cycles to allow convergence
errF <- learnErrors(filtFs, multithread=TRUE)
errR <- learnErrors(filtRs, multithread=TRUE)
#sanity check: visualize estimated error rates
#error rates should decline with increasing qual score
#red line is based on definition of quality score alone
#black line is estimated error rate after convergence
#dots are observed error rate for each quality score
plotErrors(errF, nominalQ=TRUE)
plotErrors(errR, nominalQ=TRUE)
#~############################~#
##### Dereplicate reads ########
#~############################~#
#Dereplication combines all identical sequencing reads into into “unique sequences” with a corresponding “abundance”: the number of reads with that unique sequence.
#Dereplication substantially reduces computation time by eliminating redundant comparisons.
#DADA2 retains a summary of the quality information associated with each unique sequence. The consensus quality profile of a unique sequence is the average of the positional qualities from the dereplicated reads. These quality profiles inform the error model of the subsequent denoising step, significantly increasing DADA2’s accuracy.
derepFs <- derepFastq(filtFs, verbose=TRUE)
derepRs <- derepFastq(filtRs, verbose=TRUE)
# Name the derep-class objects by the sample names
names(derepFs) <- sample.names
names(derepRs) <- sample.names
#~###############################~#
##### Infer Sequence Variants #####
#~###############################~#
dadaFs <- dada(derepFs, err=errF, multithread=TRUE)
dadaRs <- dada(derepRs, err=errR, multithread=TRUE)
#now, look at the dada class objects by sample
#will tell how many 'real' variants in unique input seqs
#By default, the dada function processes each sample independently, but pooled processing is available with pool=TRUE and that may give better results for low sampling depths at the cost of increased computation time. See our discussion about pooling samples for sample inference.
dadaFs[[1]]
dadaRs[[1]]
#~############################~#
##### Merge paired reads #######
#~############################~#
#To further cull spurious sequence variants
#Merge the denoised forward and reverse reads
#Paired reads that do not exactly overlap are removed
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE)
# Inspect the merger data.frame from the first sample
head(mergers[[1]])
summary((mergers[[1]]))
#We now have a data.frame for each sample with the merged $sequence, its $abundance, and the indices of the merged $forward and $reverse denoised sequences. Paired reads that did not exactly overlap were removed by mergePairs.
#~##################################~#
##### Construct sequence table #######
#~##################################~#
#a higher-resolution version of the “OTU table” produced by classical methods
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
rowSums(seqtab)
# Inspect distribution of sequence lengths
table(nchar(getSequences(seqtab)))
#mostly at 300 bp, which is just what was expected [271-303 bp]
plot(table(nchar(getSequences(seqtab))))
#The sequence table is a matrix with rows corresponding to (and named by) the samples, and
#columns corresponding to (and named by) the sequence variants.
#Sequences that are much longer or shorter than expected may be the result of non-specific priming, and may be worth removing
#if I wanted to remove some lengths - not recommended by dada2 for its2 data
#seqtab2 <- seqtab[,nchar(colnames(seqtab)) %in% seq(268,327)]
table(nchar(getSequences(seqtab)))
dim(seqtab)
plot(table(nchar(getSequences(seqtab))))
#~############################~#
##### Remove chimeras ##########
#~############################~#
#The core dada method removes substitution and indel errors, but chimeras remain.
#Fortunately, the accuracy of the sequences after denoising makes identifying chimeras easier
#than it is when dealing with fuzzy OTUs: all sequences which can be exactly reconstructed as
#a bimera (two-parent chimera) from more abundant sequences.
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtab.nochim)
#Identified 38 bimeras out of 156 input sequences.
sum(seqtab.nochim)/sum(seqtab)
#0.9989
#The fraction of chimeras varies based on factors including experimental procedures and sample complexity,
#but can be substantial.
#~############################~#
##### Track Read Stats #########
#~############################~#
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(mergers, getN), rowSums(seqtab2), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoised", "merged", "tabled", "nonchim")
rownames(track) <- sample.names
head(track)
tail(track)
write.csv(track,file="its2_reads.csv",row.names=TRUE,quote=FALSE)
#plotting for no reason
#manually added raw counts from the raw .fastq file using the following loop in Terminal:
# for file in *R1_001.fastq
# do
# echo $file >> raw_r1_names
# grep @M0 $file | wc -l >> raw_r1_counts
# done
setwd("~/moorea_holobiont/mr_ITS2/")
reads <- read.csv("its2_reads_renamed.csv")
#counts1 = raw
#counts2 = input
#counts3 = filtered
#counts4 = denoised
#counts5 = merged
#counts6 = nonchim
reread <- reshape(reads, varying = c("counts1","counts2", "counts3", "counts4", "counts5", "counts6"), timevar = "day",idvar = "sample", direction = "long", sep = "")
reread$sample <- as.factor(reread$sample)
ggplot(reread,aes(x=day,y=counts,color=sample))+
geom_point()+
geom_path()
library(Rmisc)
reread.se <- summarySE(data=reread,measurevar="counts",groupvars=c("day"))
ggplot(reread.se,aes(x=day,y=counts))+
geom_point()+
geom_path()+
geom_errorbar(aes(ymin=counts-se,ymax=counts+se),width=0.3)
#~############################~#
##### Assign Taxonomy ##########
#~############################~#
taxa <- assignTaxonomy(seqtab.nochim, "~/Downloads/GeoSymbio_ITS2_LocalDatabase_verForPhyloseq.fasta",tryRC=TRUE,minBoot=70,verbose=TRUE)
unname(head(taxa))
#to come back to later
saveRDS(seqtab.nochim, file="~/Desktop/its2/mrits2_seqtab.nochim.rds")
saveRDS(taxa, file="~/Desktop/its2/mrits2_taxa.rds")
write.csv(seqtab.nochim, file="mrits2_seqtab.nochim.csv")
write.csv(taxa, file="~/Desktop/its2/mrits2_taxa.csv")
#### Reading in prior data files ####
setwd("~/moorea_holobiont/mr_ITS2")
seqtab.nochim <- readRDS("mrits2_seqtab.nochim.rds")
taxa <- readRDS("mrits2_taxa.rds")
#~############################~#
##### handoff 2 phyloseq #######
#~############################~#
#BiocManager::install("phyloseq")
library('phyloseq')
library('ggplot2')
library('Rmisc')
#import dataframe holding sample information
samdf<-read.csv("mrits_sampledata.csv")
head(samdf)
rownames(samdf) <- samdf$Sample
# Construct phyloseq object (straightforward from dada2 outputs)
# ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
# sample_data(samdf),
# tax_table(taxa))
#
# ps
#phyloseq object with shorter names - doing this one instead of one above
ids <- paste0("sq", seq(1, length(colnames(seqtab.nochim))))
#making output fasta file for lulu step & maybe other things, before giving new ids to sequences
#path='~/moorea_holobiont/mr_ITS2/mrits2.fasta'
#uniquesToFasta(seqtab.nochim, path, ids = ids, mode = "w", width = 20000)
colnames(seqtab.nochim)<-ids
taxa2 <- cbind(taxa, rownames(taxa)) #retaining raw sequence info before renaming
rownames(taxa2)<-ids
ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps
#removing sample 87 from ps object, no data
ps_no87 <- subset_samples(ps, Sample != "87")
ps_no87
#removing 87 from other things
samdf.no87 <- samdf[-92,]
seqtab.no87 <- seqtab.nochim[-92,]
#~##########################################~#
###### Apply LULU to cluster ASVs ############
#~##########################################~#
##Necessary pre-steps after producing ASV table and associated fasta file:
##Produce a match list using BLASTn
##IN TERMINAL#
##First produce a blastdatabase with the OTUs
#module load blast+
#makeblastdb -in mrits2.fasta -parse_seqids -dbtype nucl
##Then blast the OTUs against the database to produce the match list
#blastn -db mrits2.fasta -outfmt '6 qseqid sseqid pident' -out match_list.txt -qcov_hsp_perc 80 -perc_identity 84 -query mrits2.fasta
##HSP = high scoring pair
##perc_identity = percent of nucleotides in the highly similar pairings that match
##transfer match_list.txt to R working directory
##BACK IN R#
#first, read in ASV table
#install.packages("remotes")
library("remotes")
#install_github("https://github.com/tobiasgf/lulu.git")
library("lulu")
#rarefying
library(vegan)
#back to lulu sequence
ASVs <- as.data.frame(t(seqtab.no87))
#just made this in terminal
matchList <- read.table("match_list.txt")
#Now, run the LULU curation
curated_result <- lulu(ASVs, matchList,minimum_match=99,minimum_relative_cooccurence=0.99)
#default: minimum_relative_cooccurence = 0.95 default, changed to 0.70 to see what happens, nothing
#default: minimum_match = 84 default, only 1 OTU different between 97 & 84
summary(curated_result)
#19 otus with match of 99%
#Pull out the curated OTU list, re-transpose
lulu.out <- data.frame(t(curated_result$curated_table))
#Continue on to your favorite analysis
#write.csv(lulu.out,"~/moorea_holobiont/mr_ITS2/lulu_output.csv")
lulu.out <- read.csv("~/moorea_holobiont/mr_ITS2/lulu_output.csv",row.names=1)
#ps object with lulu
ps.lulu <- phyloseq(otu_table(lulu.out, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.lulu
#### rarefy ####
library(vegan)
rarecurve(lulu.out,step=100,label=FALSE) #after clustering
total <- rowSums(lulu.out)
total
subset(total, total <1994)
summary(total)
row.names.remove <- c("117","311","402","414","505","513","530","58","72","76")
lulu.out.rare <- lulu.out[!(row.names(lulu.out) %in% row.names.remove),]
samdf.rare <- samdf.no87[!(row.names(samdf.no87) %in% row.names.remove), ]
#85 samples left
seq.rare <- rrarefy(lulu.out.rare,sample=1994)
rarecurve(seq.rare,step=100,label=FALSE)
rarecurve(seqtab.no87,step=100,label=FALSE) #before trimming
#save
#write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv")
write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2.csv")
#read back in
seq.rare <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2.csv",row.names=1,header=TRUE)
#phyloseq object
ps.rare <- phyloseq(otu_table(seq.rare, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.rare
#### Alpha diversity #####
library(ggplot2)
#install.packages("extrafontdb")
library(extrafontdb)
library(extrafont)
library(Rmisc)
library(cowplot)
library(ggpubr)
#font_import(paths = "/Library/Fonts/")
loadfonts()
#Visualize alpha-diversity - ***Should be done on raw, untrimmed dataset***
#total species diversity in a landscape (gamma diversity) is determined by two different things, the mean species diversity in sites or habitats at a more local scale (alpha diversity) and the differentiation among those habitats (beta diversity)
#Shannon:Shannon entropy quantifies the uncertainty (entropy or degree of surprise) associated with correctly predicting which letter will be the next in a diverse string. Based on the weighted geometric mean of the proportional abundances of the types, and equals the logarithm of true diversity. When all types in the dataset of interest are equally common, the Shannon index hence takes the value ln(actual # of types). The more unequal the abundances of the types, the smaller the corresponding Shannon entropy. If practically all abundance is concentrated to one type, and the other types are very rare (even if there are many of them), Shannon entropy approaches zero. When there is only one type in the dataset, Shannon entropy exactly equals zero (there is no uncertainty in predicting the type of the next randomly chosen entity).
#Simpson:equals the probability that two entities taken at random from the dataset of interest represent the same type. equal to the weighted arithmetic mean of the proportional abundances pi of the types of interest, with the proportional abundances themselves being used as the weights. Since mean proportional abundance of the types increases with decreasing number of types and increasing abundance of the most abundant type, λ obtains small values in datasets of high diversity and large values in datasets of low diversity. This is counterintuitive behavior for a diversity index, so often such transformations of λ that increase with increasing diversity have been used instead. The most popular of such indices have been the inverse Simpson index (1/λ) and the Gini–Simpson index (1 − λ).
plot_richness(ps_no87, x="site", measures=c("Shannon", "Simpson"), color="zone") + theme_bw()
df.div <- estimate_richness(ps.rare, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
#df.div <- estimate_richness(ps.rare, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
df.div
df.div$Sample <- rownames(df.div)
df.div$Sample <- gsub("X","",df.div$Sample)
df.div <- merge(df.div,samdf,by="Sample") #add sample data
#write.csv(df.div,file="~/moorea_holobiont/mr_ITS2/mrits_div_lulu.rare.csv") #saving
df.div <- read.csv("~/moorea_holobiont/mr_ITS2/mrits_div_lulu.rare.csv") #reading back in
df.div$zone <- gsub("Forereef","FR",df.div$zone)
df.div$zone <- gsub("Backreef","BR",df.div$zone)
quartz()
gg.sh <- ggplot(df.div, aes(x=zone, y=Shannon,color=zone,shape=zone))+
#geom_errorbar(aes(ymin=InvSimpson-se,ymax=InvSimpson+se),position=position_dodge(0.5),lwd=0.4,width=0.4)+
#geom_point(aes(colour=zone, shape=zone),size=4,position=position_dodge(0.5))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Shannon diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)+
ylim(-0.01, 1.7)
gg.sh
gg.si <- ggplot(df.div, aes(x=zone, y=InvSimpson,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Inv. Simpson diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)+
ylim(1,4.4)
gg.si
gg.ob <- ggplot(df.div, aes(x=zone, y=Observed,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("ASV number")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(text=element_text(family="Times"),legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)
gg.ob
quartz()
ggarrange(gg.sh,gg.si,legend="none")
#different version - with overall panel
gg.total <- ggplot(df.div, aes(x=zone, y=InvSimpson,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Inv. Simpson diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)
quartz()
ggarrange(gg.total,gg.si,widths=c(0.45,1),labels=c("(b)","(c)"))
#stats
library(bestNormalize)
bestNormalize(df.div$InvSimpson)
obs.norm <- orderNorm(df.div$InvSimpson)
df.div$obs.norm <- obs.norm$x.t
shapiro.test(df.div$obs.norm)
#nothing worked
wilcox.test(Shannon~zone,data=df.div)
wilcox.test(InvSimpson~zone,data=df.div)
wilcox.test(Observed~zone,data=df.div)
mnw <- subset(df.div,site=="MNW")
mse <- subset(df.div,site=="MSE")
tah <- subset(df.div,site=="TNW")
wilcox.test(Shannon~zone,data=mnw)
#p = 0.05286 . rare
wilcox.test(InvSimpson~zone,data=mnw)
#p = 0.04687 * rare
wilcox.test(Observed~zone,data=mnw)
#no
#W = 103.5, p-value = 0.506
summary(aov(Shannon~zone,data=mse))
wilcox.test(Shannon~zone,data=mse)
#p = 0.0031 ** rare
wilcox.test(InvSimpson~zone,data=mse)
#p = 0.01 * rare
wilcox.test(Observed~zone,data=mse)
#p = 0.01 * rare
wilcox.test(Shannon~zone,data=tah)
#p = 0.49
wilcox.test(InvSimpson~zone,data=tah)
#p = 0.65
wilcox.test(Observed~zone,data=tah)
#p = 0.59
#install.packages("dunn.test")
library(dunn.test)
dunn.test(df.div$Shannon,df.div$site_zone,method="bh")
#### div stats with size ####
row.names(df.div) <- df.div$Sample
size <- read.csv("~/moorea_holobiont/mr_ITS2/mr_size.csv",header=TRUE,row.names=1)
row.names.remove <- c(109)
size2 <- size[!(row.names(size) %in% row.names.remove),]
df.div.size <- merge(df.div,size2,by=0)
lm.sh.size <- lm(size~Shannon,data=df.div.size)
summary(lm.sh.size)
row.names(df.div) <- df.div$Sample
df.div.size <- merge(df.div,size2,by=0)
lm.si.size <- lm(size~InvSimpson,data=df.div.size)
summary(lm.si.size)
row.names(df.div) <- df.div$Sample
df.div.size <- merge(df.div,size2,by=0)
lm.ob.size <- lm(size~Observed,data=df.div.size)
summary(lm.ob.size)
plot(size~Shannon,data=df.div.size)
#just cladocopium
ps.rare.c <- subset_taxa(ps.rare,Phylum==" Clade C")
df.div.c <- estimate_richness(ps.rare.c, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
df.div.c$Sample <- rownames(df.div.c)
df.div.c$Sample <- gsub("X","",df.div.c$Sample)
df.div.c <- merge(df.div.c,samdf,by="Sample") #add sample data
ggplot(df.div.c, aes(x=zone, y=Observed,color=zone,shape=zone))+
#geom_errorbar(aes(ymin=InvSimpson-se,ymax=InvSimpson+se),position=position_dodge(0.5),lwd=0.4,width=0.4)+
#geom_point(aes(colour=zone, shape=zone),size=4,position=position_dodge(0.5))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Shannon diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)#+
ylim(-0.01, 1.7)
##checking out how diversity works with variables - nothing interesting
#df.size <- read.csv("mr_size.csv",header=TRUE)
#df.div$coral_id <- row.names(df.div)
#
# mergeddf <- merge(df.div, df.size, by="coral_id", sort=FALSE)
# plot(Shannon~size,data=mergeddf)
# shapiro.test(mergeddf$Shannon)
# shapiro.test(mergeddf$size)
# kruskal.test(Shannon~size,data=mergeddf)
## no significant relationship between coral size & diversity! interesting
## now by sample host heterozygosity
# df.het <- read.table("host_het.txt") #read in data
# df.het$het <- df.het$V3/(df.het$V2+df.het$V3) #heterozygosity calculations
# df.het$V1 <- sub("TO","TNWO",df.het$V1) #just renaming some sites
# df.het$V1 <- sub("TI","TNWI",df.het$V1) #just renaming some sites
# df.het$site <- substr(df.het$V1, 0, 4)
# df.het$site <- as.factor(df.het$site)
# df.het$site <- sub("I","-B", df.het$site)
# df.het$site <- sub("O","-F", df.het$site)
# str(df.het)
## just getting sample names on the same page
# df.het$V1 <- sub(".trim.bt2.bam.out.saf.idx.ml","",df.het$V1)
# df.het$coral_id <- substr(df.het$V1, 6, 8)
# df.div$coral_id <- row.names(df.div)
#
# mergeddf2 <- merge(df.div, df.het, by="coral_id", sort=TRUE)
# plot(Shannon~het,data=mergeddf2)
# kruskal.test(Shannon~het,data=mergeddf2)
## not significant!
#### MCMC.OTU to remove underrepresented ASVs ####
library(MCMC.OTU)
#added a column with a blank name in the beginning, with 1-95 in the column, mcmc.otu likes this
#also removed the X from the beginning of sample names
lulu.rare.mcmc <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2_mcmc.csv",header=TRUE)
#& reading back in things
goods <- purgeOutliers(lulu.rare.mcmc,count.columns=3:21,otu.cut=0.001,zero.cut=0.02) #rare
#otu.cut = 0.1% of reads represented by ASV
#zero.cut = present in more than 1 sample (2% of samples)
colnames(goods)
#sq 1, 2, 3, 6, 7, 12, 18, 24, 32 with min 99% matching in lulu
#not rare
lulu.mcmc <- read.csv("~/moorea_holobiont/mr_ITS2/lulu_output_mcmc.csv",header=TRUE)
goods <- purgeOutliers(lulu.mcmc,count.columns=3:21,otu.cut=0.001,zero.cut=0.02) #not rare
#otu.cut = 0.1% of reads represented by ASV
#zero.cut = present in more than 1 sample (2% of samples)
colnames(goods)
#sq 1, 2, 3, 6, 7, 12, 18, 24, 32 with min 99% matching in lulu
rownames(goods) <- goods$sample
counts <- goods[,3:11]
#mcmc.otu removed 3 undersequenced samples: "513" "530" "76", "87" bad to begin with
remove <- c("513","530","76","87")
samdf.mcmc <- samdf[!row.names(samdf)%in%remove,]
#write.csv(samdf.mcmc,"~/Desktop/mr_samples.csv")
write.csv(counts,file="seqtab_lulu.trimmed.csv")
write.csv(counts,file="seqtab_lulu.rare.trimmed.csv")
counts <- read.csv("seqtab_lulu.trimmed.csv",row.names=1,header=TRUE)
counts <- read.csv("seqtab_lulu.rare.trimmed.csv",row.names=1,header=TRUE)
ps.mcmc <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf.mcmc),
tax_table(taxa2))
ps.mcmc
ps.rare.mcmc <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.rare.mcmc #9 taxa
#### Bar plot - clustered, trimmed ####
#bar plot
ps_glom <- tax_glom(ps.rare.mcmc, "Class")
ps1 <- merge_samples(ps_glom, "site_zone")
ps2 <- transform_sample_counts(ps1, function(x) x / sum(x))
plot_bar(ps2, fill="Class",x="site_zone")
ps.mcmc.melt <- psmelt(ps.rare.mcmc)
ps.mcmc.melt$newclass <- paste(ps.mcmc.melt$Class,ps.mcmc.melt$OTU,sep="_")
#boxplot
ggplot(ps.mcmc.melt,aes(x=site_zone,y=Abundance,color=newclass))+
geom_boxplot()
#individually
sq3 <- subset(ps.mcmc.melt,newclass==" C3k_sq3")
ggplot(sq3,aes(x=site_zone,y=Abundance,color=Class))+
geom_boxplot()
library(dplyr)
ps.all <- transform_sample_counts(ps.rare.mcmc, function(OTU) OTU/sum(OTU))
pa <- psmelt(ps.all)
tb <- psmelt(ps.all)%>%
filter(!is.na(Abundance))%>%
group_by(site_zone,OTU)%>%
summarize_at("Abundance",mean)
#some more grouping variables
tb <- psmelt(ps.all)%>%
filter(!is.na(Abundance))%>%
group_by(site,zone,site_zone,OTU)%>%
summarize_at("Abundance",mean)
ggplot(tb,aes(x=site_zone,y=Abundance,fill=OTU))+
geom_bar(stat="identity", colour="black")+
theme_cowplot()
#renaming
#ps.all@tax_table
# Taxonomy Table: [9 taxa by 4 taxonomic ranks]:
# Kingdom Phylum Class
# sq1 "Symbiodinium" " Clade C" " C3k" - 1
# sq2 "Symbiodinium" " Clade C" " Cspc" - 1
# sq3 "Symbiodinium" " Clade C" " C3k" - 2
# sq6 "Symbiodinium" " Clade C" " C3k" - 3
# sq7 "Symbiodinium" " Clade A" " A1"
# sq12 "Symbiodinium" " Clade C" NA - 1
# sq18 "Symbiodinium" " Clade C" NA - 2
# sq24 "Symbiodinium" " Clade C" " C3k" - 4
# sq32 "Symbiodinium" " Clade C" " Cspc" - 2
tb$sym <- gsub("sq12","C. - 1",tb$OTU)
tb$sym <- gsub("sq18","C. - 2",tb$sym)
tb$sym <- gsub("sq1","C3k - 1",tb$sym)
tb$sym <- gsub("sq24","C3k - 4",tb$sym)
tb$sym <- gsub("sq2","Cspc - 1",tb$sym)
tb$sym <- gsub("sq32","Cspc - 2",tb$sym)
tb$sym <- gsub("sq3","C3k - 2",tb$sym)
tb$sym <- gsub("sq6","C3k - 3",tb$sym)
tb$sym <- gsub("sq7","A1",tb$sym)
tb$zone <- gsub("Forereef","FR",tb$zone)
tb$zone <- gsub("Backreef","BR",tb$zone)
tb$site <- gsub("MNW","Mo'orea NW",tb$site)
tb$site <- gsub("MSE","Mo'orea SE",tb$site)
tb$site <- gsub("TNW","Tahiti NW",tb$site)
tb$sym <- factor(tb$sym, levels=c("C3k - 1","C3k - 2","C3k - 3","C3k - 4","Cspc - 1", "Cspc - 2","C. - 1","C. - 2","A1"))
quartz()
gg.bp <- ggplot(tb,aes(x=zone,y=Abundance,fill=sym))+
geom_bar(stat="identity")+
theme_cowplot()+
#theme(text=element_text(family="Times"))+
xlab('Reef zone')+
# scale_fill_manual(name="Sym.",values=c("seagreen1","seagreen2","seagreen3","seagreen4","blue","darkblue","orange","yellow","purple"))
scale_fill_manual(name="Algal symbiont",values=c("#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff","#C70039", "#8F0C3F","#d4e21aff"))+
facet_wrap(~site)
gg.bp
#"#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff","#440154FF", "#48196bff","#d4e21aff"
quartz()
ggarrange(gg.bp,
ggarrange(gg.sh,gg.si,ncol=2,labels=c("(b)","(c)"),common.legend=T,legend="none"),nrow=2,labels="(a)")
#getting raw average relative abundances
ps.rel <- transform_sample_counts(ps_no87, function(x) x / sum(x))
plot_bar(ps.rel,fill="Class")
ps.glom <- tax_glom(ps.rel, "Class")
c3k <- subset_taxa(ps.glom,Class==" C3k")
c3k.otu <- as.data.frame(c3k@otu_table)
mean(c3k.otu$sq1)
#all the seqs - 9 total
cspc <- subset_taxa(ps.rel,Class==" Cspc")
#rel abundance
cspc <- subset_taxa(ps.glom,Class==" Cspc")
cspc.otu <- as.data.frame(cspc@otu_table)
mean(cspc.otu$sq2)
#background ones
back <- subset_taxa(ps.rel,is.na(Class))
all.otu <- ps2@otu_table
# Taxonomy Table: [6 taxa by 4 taxonomic ranks]:
# Kingdom Phylum Class
# sq1 "Symbiodinium" " Clade C" " C3k" NA
# sq2 "Symbiodinium" " Clade C" " Cspc" NA
# sq7 "Symbiodinium" " Clade A" " A1" NA
# sq33 "Symbiodinium" " Clade A" " A3" NA
# sq66 "Symbiodinium" " Clade B" " B1" NA
# sq81 "Symbiodinium" " Clade C" " C116" NA
all.otu2 <- as.data.frame(all.otu)
mean(all.otu2$sq1)
ps.rel <- transform_sample_counts(ps.mcmc, function(x) x / sum(x))
plot_bar(ps.rel,fill="Class")
ps.glom <- tax_glom(ps.rel, "Class")
#c3k.mcmc <- subset_taxa(ps.glom,Class==" C3k")
#c3k.otu <- as.data.frame(c3k.mcmc@otu_table)
#mean(c3k.otu$sq1)
#cs.mcmc <- subset_taxa(ps.glom,Class==" Cspc")
#cs.otu <- as.data.frame(cs.mcmc@otu_table)
#mean(cs.otu$sq2)
#range(cs.otu$sq2)
#### Bar plots by individual sqs ####
#adding sqs to taxa2
taxa3 <- data.frame(taxa2)
taxa3$sqs <- c(rownames(taxa3))
#renaming sqs
taxa3$sqs <- gsub("sq12","C. - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq18","C. - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq1","C3k - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq24","C3k - 4",taxa3$sqs)
taxa3$sqs <- gsub("sq2","Cspc - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq32","Cspc - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq3","C3k - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq6","C3k - 3",taxa3$sqs)
taxa3$sqs <- gsub("sq7","A1",taxa3$sqs)
taxa3 <- as.matrix(taxa3)
#renaming reef zones
samdf.new <- samdf
samdf.new$zone <- gsub("Forereef","FR",samdf.new$zone)
samdf.new$zone <- gsub("Backreef","BR",samdf.new$zone)
ps.rare.mcmc.newnames <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf.new),
tax_table(taxa3))
ps.rare.mcmc.newnames #9 taxa
ps.mnw <- subset_samples(ps.rare.mcmc.newnames,site=="MNW")
quartz()
bar.mnw <- plot_bar(ps.mnw,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(a) Mo'orea NW")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("")
ps.mse <- subset_samples(ps.rare.mcmc.newnames,site=="MSE")
bar.mse <- plot_bar(ps.mse,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(b) Mo'orea SE")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("")
bar.mse
ps.tnw <- subset_samples(ps.rare.mcmc.newnames,site=="TNW")
bar.tnw <- plot_bar(ps.tnw,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(c) Tahiti NW")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("Reef zone")
quartz()
ggarrange(bar.mnw,bar.mse,bar.tnw,ncol=1,common.legend=TRUE,legend="none")
#presence absence
counts_pres <- counts
counts_pres[counts_pres>0] <- 1
ps.pres <- phyloseq(otu_table(counts_pres, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.pres #9 taxa
ps.mnw.pres <- subset_samples(ps.pres,site=="MNW")
plot_bar(ps.mnw.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
ps.mse.pres <- subset_samples(ps.pres,site=="MSE")
plot_bar(ps.mse.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
ps.tnw.pres <- subset_samples(ps.pres,site=="TNW")
plot_bar(ps.tnw.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
#### Pcoa - clustered & trimmed ####
library(vegan)
library(cowplot)
#install.packages("ggforce")
#library(ggforce)
#not sure if I need this one^
#all
#original overview
ps.rare.mcmc.noa <- subset_taxa(ps.rare.mcmc,Phylum==" Clade C")
plot_ordination(ps.rare.mcmc.noa, ordinate(ps.rare.mcmc.noa, "PCoA"), color = "zone") +
geom_point()+
stat_ellipse(level=0.8,aes(lty=zone),geom="polygon",alpha=0.1)
#not sure what this was
#p3 = plot_ordination(GP1, GP.ord, type="biplot", color="SampleType", shape="Phylum", title="biplot")
#site
gg.site.alone <- plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), color="site", shape="site")+
geom_point(size=2)+
stat_ellipse(level=0.8,aes(lty=site),geom="polygon",alpha=0.1)+
xlab('Axis 1 (57.1%)')+
ylab('Axis 2 (35.2%)')+
theme_cowplot()+
scale_linetype_manual(values=c("longdash","dotted","dotdash"),labels=c("MNW","MSE","TNW"))+
scale_color_manual(values=c("darkslategray3","darkslategray4","#000004"),labels=c("MNW","MSE","TNW"))+
scale_shape_manual(values=c(8,4,9),labels=c("MNW","MSE","TNW"))+
labs(shape="Site",color="Site",linetype="Site")
quartz()
gg.site.alone
gg.site.taxa <- plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), type="biplot", color="site", shape="Class")+
geom_point(size=2)+
#stat_ellipse(level=0.8,aes(lty=site),geom="polygon",alpha=0.1)+
xlab('Axis 1 (57.1%)')+
ylab('Axis 2 (35.2%)')+
theme_cowplot()
gg.site.taxa
#### BIPLOT - VERY COOL ####
plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), type="biplot", color="site", shape="Phylum", title="biplot")
#by site
ps.mnw <- subset_samples(ps.rare.mcmc,site=="MNW")
#ps.mnw <- subset_samples(ps.trim,site=="mnw")
ord.mnw <- ordinate(ps.mnw, "PCoA", "bray")
plot_ordination(ps.mnw, ord.mnw, type="biplot", color="zone", shape="Class", title="biplot")#+
theme(legend.position="none")
gg.mnw <- plot_ordination(ps.mnw, ord.mnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea NW")+
#annotate(geom="text", x=0.7, y=0.2, label="p < 0.01**",size=4)+ #rarefied
#annotate(geom="text", x=-0.25, y=0.6, label="p < 0.01**",size=4)+ #not rarefied
xlab("Axis 1 (69.4%)")+ #rarefied
ylab("Axis 2 (25.6%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mnw
ps.mse <- subset_samples(ps.rare.mcmc,site=="MSE")
#ps.mse <- subset_samples(ps.trim,site=="mse")
ord.mse <- ordinate(ps.mse, "PCoA", "bray")
gg.mse <- plot_ordination(ps.mse, ord.mse,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea SE")+
#annotate(geom="text", x=-0.4, y=0.6, label="p < 0.01**",size=4)+ #rarefied
#annotate(geom="text", x=-0.25, y=0.6, label="p < 0.01**",size=4)+ #not rarefied
xlab("Axis 1 (89.9%)")+ #rarefied
ylab("Axis 2 (8.9%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mse
ps.tnw <- subset_samples(ps.rare.mcmc,site=="TNW")
#ps.tnw <- subset_samples(ps.trim,site=="TNW")
ord.tnw <- ordinate(ps.tnw, "PCoA", "bray")
gg.tnw <- plot_ordination(ps.tnw, ord.tnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Tahiti NW")+
xlab("Axis 1 (62.9%)")+ #rarefied
ylab("Axis 2 (29.6%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.tnw
quartz()
ggarrange(gg.mnw,gg.mse,gg.tnw,nrow=1,common.legend=TRUE,legend='right',labels=c("(a)","(b)","(c)"))
#### pcoa - rel abundance ####
ps.mcmc.rel <- transform_sample_counts(ps.mcmc, function(x) x / sum(x))
#by site
ps.mnw <- subset_samples(ps.mcmc.rel,site=="MNW")
#ps.mnw <- subset_samples(ps.trim,site=="mnw")
ord.mnw <- ordinate(ps.mnw, "PCoA", "bray")
gg.mnw <- plot_ordination(ps.mnw, ord.mnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea NW")+
xlab("Axis 1 (68.0%)")+#non-rarefied
ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mnw
ps.mse <- subset_samples(ps.mcmc.rel,site=="MSE")
ord.mse <- ordinate(ps.mse, "PCoA", "bray")
gg.mse <- plot_ordination(ps.mse, ord.mse,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea SE")+
xlab("Axis 1 (87.7%)")+#non-rarefied
ylab("Axis 2 (9.7%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mse
ps.tnw <- subset_samples(ps.mcmc.rel,site=="TNW")
ord.tnw <- ordinate(ps.tnw, "PCoA", "bray")
gg.tnw <- plot_ordination(ps.tnw, ord.tnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Tahiti NW")+
xlab("Axis 1 (60.3%)")+#non-rarefied
ylab("Axis 2 (31.9%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.tnw
quartz()
ggarrange(gg.mnw,gg.mse,gg.tnw,nrow=1,common.legend=TRUE,legend='right',labels=c("(a)","(b)","(c)"))
# ps.tah <- subset_samples(ps.mcmc,site=="TNW")
# gg.tah <- plot_ordination(ps.tah, ordinate(ps.tah, "PCoA"), color = "zone")+
# geom_point()+
# stat_ellipse(level=0.95)
#### Deseq differentially abundant ####
library(DESeq2)
#checking if any significant
ps.mnw = subset_samples(ps.rare.mcmc, site=="MNW")
ds.mnw = phyloseq_to_deseq2(ps.mnw, ~ zone)
dds.mnw <- estimateSizeFactors(ds.mnw,type="poscounts")
stat.mnw = DESeq(dds.mnw, test="Wald", fitType="parametric")
res = results(stat.mnw, cooksCutoff = FALSE)
alpha = 0.05
sigtab.mnw = res[which(res$padj < alpha), ]
sigtab.mnw = cbind(as(sigtab.mnw, "data.frame"), as(tax_table(ps.mnw)[rownames(sigtab.mnw), ], "matrix"))
head(sigtab.mnw)
dim(sigtab.mnw)
#none
ps.mse = subset_samples(ps.rare.mcmc, site=="MSE")
ds.mse = phyloseq_to_deseq2(ps.mse, ~ zone)
dds.mse <- estimateSizeFactors(ds.mse,type="poscounts")
stat.mse = DESeq(dds.mse, test="Wald", fitType="parametric")
res = results(stat.mse, cooksCutoff = FALSE)
alpha = 0.05
sigtab.mse = res[which(res$padj < alpha), ]
sigtab.mse = cbind(as(sigtab.mse, "data.frame"), as(tax_table(ps.mse)[rownames(sigtab.mse), ], "matrix"))
head(sigtab.mse)
dim(sigtab.mse)
#sq 2 & 3
ps.t = subset_samples(ps.rare, site=="TNW")
ds.t = phyloseq_to_deseq2(ps.t, ~ zone)
dds.t <- estimateSizeFactors(ds.t,type="poscounts")
stat.t = DESeq(dds.t, test="Wald", fitType="parametric")
res = results(stat.t, cooksCutoff = FALSE)
alpha = 0.05
sigtab.t = res[which(res$padj < alpha), ]
sigtab.t = cbind(as(sigtab.t, "data.frame"), as(tax_table(ps.t)[rownames(sigtab.t), ], "matrix"))
dim(sigtab.t)
#sq 7
#### Heat map ####
#install.packages("pheatmap")
library(pheatmap)
library(dplyr)
#transform to relative abundance rather than absolute
counts$Sample <- rownames(counts)
newnames <- merge(samdf,counts, by="Sample")
sq1 <- newnames %>%
group_by(site_zone) %>%
summarise(sq1 = sum(sq1))
sq2 <- newnames %>%
group_by(site_zone) %>%
summarise(sq2 = sum(sq2))
sq3 <- newnames %>%
group_by(site_zone) %>%
summarise(sq3 = sum(sq3))
sq6 <- newnames %>%
group_by(site_zone) %>%
summarise(sq6 = sum(sq6))
sq7 <- newnames %>%
group_by(site_zone) %>%
summarise(sq7 = sum(sq7))
sq12 <- newnames %>%
group_by(site_zone) %>%
summarise(sq12 = sum(sq12))
sq18 <- newnames %>%
group_by(site_zone) %>%
summarise(sq18 = sum(sq18))
sq24 <- newnames %>%
group_by(site_zone) %>%
summarise(sq24 = sum(sq24))
sq32 <- newnames %>%
group_by(site_zone) %>%
summarise(sq32 = sum(sq32))
allsq <- newnames %>%
group_by(site_zone) %>%
summarise(all = sum(sq1, sq2, sq3, sq6, sq7, sq12, sq18, sq24, sq32))
df1 <- merge(sq1, sq2, by="site_zone")
df2 <- merge(df1,sq3,by="site_zone")
df3 <- merge(df2,sq6,by="site_zone")
df4 <- merge(df3,sq7,by="site_zone")
df5 <- merge(df4,sq12,by="site_zone")
df6 <- merge(df5,sq18,by="site_zone")
df7 <- merge(df6,sq24,by="site_zone")
df.all <- merge(df7,sq32,by="site_zone")
rownames(df.all) <- df.all$site_zone
df.counts <- df.all[,2:10]
df.counts.t <- t(df.counts)
#relative abundance
dat <- scale(df.counts.t, center=F, scale=colSums(df.counts.t))
quartz()
pheatmap(dat,colorRampPalette(c('white','chartreuse3','darkgreen'))(50),cluster_cols=F)
#without all the stuff I just did
counts.t <- t(counts)
dat2 <- scale(counts.t, center=F, scale=colSums(counts.t))
pheatmap(dat2,colorRampPalette(c('white','blue'))(50))
#### rarefy #####
library(vegan)
rarecurve(counts,step=100,label=FALSE) #after clustering & trimming
total <- rowSums(counts)
total
subset(total, total <1994)
summary(total)
row.names.remove <- c("117","311","402","414","505","58","72")
counts.rare <- counts[!(row.names(counts) %in% row.names.remove),]
samdf.rare <- samdf.mcmc[!(row.names(samdf.mcmc) %in% row.names.remove), ]
#85 samples left
seq.rare <- rrarefy(counts.rare,sample=1994)
rarecurve(seq.rare,step=100,label=FALSE)
rarecurve(seqtab.no87,step=100,label=FALSE)#before clustering & trimming
#save
#write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv")
write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_all.csv")
#read back in
seq.rare <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv",row.names=1)
#phyloseq object
ps.rare <- phyloseq(otu_table(seq.rare, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps_glom <- tax_glom(ps.rare, "Class")
ps0 <- transform_sample_counts(ps_glom, function(x) x / sum(x))
ps1 <- merge_samples(ps0, "site_zone")
ps2 <- transform_sample_counts(ps1, function(x) x / sum(x))
plot_bar(ps2, fill="Class")
plot_ordination(ps.rare, ordinate(ps.rare,method="PCoA"), color = "zone")+
geom_point()+
facet_wrap(~site)+
theme_cowplot()+
stat_ellipse()
#ugly
#### PCoA - rarefied, clustered, trimmed ####
df.seq <- as.data.frame(seq.rare)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
write.csv(counts,"~/Desktop/mr_counts_rare.csv")
write.csv(samdf.rare,"~/Desktop/mr_samples_rare.csv")
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.rare)
pcoa.all$site <- gsub("MNW","Moorea NW",pcoa.all$site)
pcoa.all$site <- gsub("MSE","Moorea SE",pcoa.all$site)
pcoa.all$site <- gsub("TNW","Tahiti NW",pcoa.all$site)
quartz()
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 (54.14%)")+
ylab("Axis 2 (33.58%)")
#looks the same as before rarefying?
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#3 outliers: 178, 116, 113
row.names.remove <- c("178","116","113")
pcoa.mnw.less <- pcoa.mnw[!(pcoa.mnw$Sample %in% row.names.remove),]
gg.mnw <- ggplot(pcoa.mnw.less,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#### Stats ####
library(vegan)
#help on adonis here:
#https://thebiobucket.blogspot.com/2011/04/assumptions-for-permanova-with-adonis.html#more
#all
#dist.seqtab <- vegdist(seqtab.no87)
#anova(betadisper(dist.seqtab,samdf.no87$zone))
adonis(seqtab.no87 ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#0.01 **
#clustered but not trimmed
adonis(lulu.out ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#p 0.009
samdf.rare <- data.frame(sample_data(ps.rare))
#clustered, trimmed, rarefied
rownames(samdf.rare.mcmc) == rownames(counts) #good
#stats by site
samdf.rare.mcmc <- data.frame(ps.rare.mcmc@sam_data)
dist.rare <- vegdist(counts)
bet <- betadisper(dist.rare,samdf.rare.mcmc$site)
anova(bet)
permutest(bet, pairwise = FALSE, permutations = 99)
plot(bet)
adonis(counts ~ site, data=samdf.rare.mcmc, permutations=999)
#0.061 .
#install.packages("remotes")
#remotes::install_github("Jtrachsel/funfuns")
library("funfuns")
pairwise.adonis(counts, factors = samdf.rare.mcmc$site, permutations = 999)
# pairs F.Model R2 p.value p.adjusted
# 1 MNW vs MSE 1.545093 0.02685012 0.122 0.122
# 2 MNW vs TNW 5.737211 0.09936766 0.001 0.001
# 3 MSE vs TNW 13.668578 0.19619431 0.001 0.001
#relative abundance, does this by columns so must transform
t.relabun <- scale(t(counts), center=F, scale=colSums(t(counts)))
#un-transform
relabun <- t(t.relabun)
adonis(relabun ~ zone, strata=samdf.mcmc$site, data=samdf.mcmc, permutations=999)
#0.02 *
#rarefied, clustered, trimmed
dist.rare <- vegdist(seq.rare)
#dist.rare <- vegdist(counts.rare)
bet <- betadisper(dist.rare,samdf.rare$site)
#significant
anova(bet)
permutest(bet, pairwise = FALSE, permutations = 99)
plot(bet)
adonis(counts.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
adonis(seq.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
#0.07 . - 0.1
#Moorea NW
mnw.rare <- subset(samdf.rare.mcmc,site=="MNW")
mnw.seq <- counts[(rownames(counts) %in% mnw.rare$Sample),]
dist.mnw <- vegdist(mnw.seq)
bet.mnw <- betadisper(dist.mnw,mnw.rare$zone)
anova(bet.mnw) #not sig
permutest(bet.mnw, pairwise = FALSE, permutations = 99)
plot(bet.mnw) #not sig
adonis(mnw.seq ~ zone, data=mnw.rare, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.11117 0.111170 2.3662 0.08646 0.023 *
# Residuals 25 1.17458 0.046983 0.91354
# Total 26 1.28575 1.00000
#Moorea SE
mse.rare <- subset(samdf.rare.mcmc,site=="MSE")
mse.seq <- counts[(rownames(counts) %in% mse.rare$Sample),]
dist.mse <- vegdist(mse.seq)
bet.mse <- betadisper(dist.mse,mse.rare$zone)
anova(bet.mse)
permutest(bet.mse, pairwise = FALSE, permutations = 99)
plot(bet.mse) #not sig
adonis(mse.seq ~ zone, data=mse.rare, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.04445 0.04445 6.0477 0.17256 0.015 *
# Residuals 29 0.21315 0.00735 0.82744
# Total 30 0.25760 1.00000
#Tahiti
tnw.rare <- subset(samdf.rare,site=="TNW")
tnw.seq <- counts[(rownames(counts) %in% tnw.rare$Sample),]
dist.tnw <- vegdist(tnw.seq)
bet.tnw <- betadisper(dist.tnw,tnw.rare$zone)
anova(bet.tnw)
permutest(bet.tnw, pairwise = FALSE, permutations = 99)
plot(bet.tnw) #sig
adonis(tnw.seq ~ zone, data=tnw.rare, permutations=99)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.22079 0.22079 2.7127 0.09789 0.05 *
# Residuals 25 2.03476 0.08139 0.90211
# Total 26 2.25554 1.00000
#log-normalized
df.seq <- as.data.frame(seqtab.no87)
all.log=logLin(data=df.seq)
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)
adonis(df.seq ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#0.012 *
#rarefied, but not clustered & trimmed
rarecurve(counts.rare,label=F) #yep def rarefied
adonis(counts.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
#0.01 **
#### stats plus size ####
library(vegan)
size <- read.csv("~/moorea_holobiont/mr_ITS2/mr_size.csv",header=TRUE,row.names=1)
row.names.remove <- c(109)
size2 <- size[!(row.names(size) %in% row.names.remove),]
row.names(size2) <- size2$coral_id
samdf.size <- merge(size2,samdf,by=0)
row.names(samdf.size) <- c(samdf.size$coral_id)
ord.mnw.df <- data.frame(ord.mnw[["vectors"]])
ord.mnw.df2 <- ord.mnw.df[,1:2]
ord.samdf.size.mnw <- merge(ord.mnw.df2,samdf.size,by=0)
plot(Axis.1~log(size),data=ord.samdf.size.mnw)
lm.size.mnw <- lm(Axis.1~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw)
plot(Axis.2~log(size),data=ord.samdf.size.mnw)
lm.size.mnw2 <- lm(Axis.2~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw2)
ord.mnw.df <- data.frame(ord.mnw[["vectors"]])
ord.mnw.df2 <- ord.mnw.df[,1:2]
ord.samdf.size.mnw <- merge(ord.mnw.df2,samdf.size,by=0)
plot(Axis.1~log(size),data=ord.samdf.size.mnw)
lm.size.mnw <- lm(Axis.1~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw)
plot(Axis.2~log(size),data=ord.samdf.size.mnw)
lm.size.mnw2 <- lm(Axis.2~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw2)
#skipping the above ord correlations
size.rows <- row.names(samdf.size)
size3 <- counts[(row.names(counts) %in% size.rows),]
size.rows2 <- c(row.names(size3))
samdf.size2 <- samdf.size[(row.names(samdf.size) %in% size.rows2),]
#samdf.size.sorted <- samdf.size2[nrow(samdf.size2):1, ]
#size3.sorted <- size3[nrow(size3):1, ]
#tahiti
samdf.size.tnw <- subset(samdf.size2,site.y=="TNW")
counts.size.tnw <- size3[(rownames(size3) %in% samdf.size.tnw$coral_id),]
row.names(samdf.size.tnw) == row.names(counts.size.tnw)
dist.tnw <- vegdist(counts.size.tnw)
bet.tnw <- betadisper(dist.tnw,samdf.size.tnw$zone.y)
anova(bet.tnw)
permutest(bet.tnw, pairwise = FALSE, permutations = 99)
plot(bet.tnw) #sig
adonis(counts.size.tnw ~ zone.y*size, data=samdf.size.tnw, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone.y 1 0.22079 0.220788 3.0991 0.09789 0.018 *
# size 1 0.28944 0.289440 4.0628 0.12832 0.047 *
# zone.y:size 1 0.10674 0.106743 1.4983 0.04732 0.194
# Residuals 23 1.63857 0.071242 0.72646
# Total 26 2.25554 1.00000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#plot(log(counts.size.tnw$sq7)~log(samdf.size.tnw$size))
#moorea nw
samdf.size.mnw <- subset(samdf.size2,site.y=="MNW")
counts.size.mnw <- size3[(rownames(size3) %in% samdf.size.mnw$coral_id),]
row.names(samdf.size.mnw) == row.names(counts.size.mnw)
dist.mnw <- vegdist(counts.size.mnw)
bet.mnw <- betadisper(dist.mnw,samdf.size.mnw$zone.y)
anova(bet.mnw)
permutest(bet.mnw, pairwise = FALSE, permutations = 99)
plot(bet.mnw) #not sig
adonis(counts.size.mnw ~ zone.y*size, data=samdf.size.mnw, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone.y 1 0.11117 0.111170 2.20961 0.08646 0.038 *
# size 1 0.00525 0.005254 0.10443 0.00409 0.845
# zone.y:size 1 0.01215 0.012148 0.24145 0.00945 0.684
# Residuals 23 1.15718 0.050312 0.90000
# Total 26 1.28575 1.00000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
plot(log(counts.size.mnw$sq7)~log(samdf.size.mnw$size))
adonis(counts~zone*size,data=samdf.size2)
#### bray-curtis ####
iDist <- distance(ps.rare, method="bray")
iMDS <- ordinate(ps.rare, "MDS", distance=iDist)
plot_ordination(ps.rare, iMDS, color="zone", shape="site")
#ugly
#by site
ps.mnw <- subset_samples(ps.rare,site=="MNW")
iDist <- distance(ps.mnw, method="bray")
iMDS <- ordinate(ps.mnw, "MDS", distance=iDist)
plot_ordination(ps.mnw, iMDS, color="zone")+
stat_ellipse()
#relative abundance
t.relabun <- scale(t(counts), center=F, scale=colSums(t(counts)))
relabun <- t(t.relabun)
all.dist=vegdist(relabun,method="bray")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=site))+
geom_point()+
stat_ellipse()
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#### CCA ####
library(adegenet) # for transp()
pp0=capscale(seq.rare~1)
pp=capscale(seq.rare~zone%in%site,data=samdf.rare)
anova(pp, alpha=0.05)
axes2plot=c(1,2)
quartz()
cmd=pp #change to pp for CAP, pp0 for MDS
plot(cmd,choices=axes2plot) # choices - axes to display
points(cmd,choices=axes2plot)
ordihull(cmd,choices= axes2plot,groups=samdf.rare$site_zone,draw="polygon",label=F)
#ordispider(cmd,choices= axes2plot,groups=samdf.rare$site,col="grey80")
#ordiellipse(cmd,choices= axes2plot,groups=samdf.rare$zone,draw="polygon",label=T)
#### indicator species ####
#install.packages("indicspecies")
library(indicspecies)
library(vegan)
#first testing out k means clustering
mcmc.km <- kmeans(counts,centers=2)
groupskm = mcmc.km$cluster
groupskm
all.log=logLin(data=counts)
all.dist=vegdist(all.log,method="bray")
all.pcoa=pcoa(all.dist)
km.log <- kmeans(all.log,centers=3)
groupskm = km.log$cluster
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.mcmc)
grps <- as.data.frame(groupskm)
grps$Sample <- rownames(grps)
pcoa2 <- merge(grps,pcoa.all,by="Sample")
pcoa2$groupskm <- as.factor(pcoa2$groupskm)
ggplot(pcoa2,aes(x=Axis.1,y=Axis.2,color=site,shape=groupskm))+
geom_point()+
stat_ellipse()
#interesting - not sure what to do here
#anyway back to indicspecies
groups <- samdf.mcmc$site
groups <- samdf.mcmc$zone
indval <- multipatt(counts, groups, control = how(nperm=999))
summary(indval,alpha=1)
#doesn't really work with the clustered ASV
#unclustered
groups <- samdf.no87$zone
indval <- multipatt(seqtab.no87, groups, control = how(nperm=999))
summary(indval)
# Group Backreef #sps. 1
# stat p.value
# sq22 0.478 0.017 *
#
# Group Forereef #sps. 7
# stat p.value
# sq15 0.711 0.001 ***
# sq9 0.526 0.007 **
# sq35 0.410 0.027 *
# sq37 0.388 0.019 *
# sq19 0.380 0.019 *
# sq17 0.377 0.024 *
# sq45 0.354 0.036 *
#now rarefied
counts.rare <- read.csv(file="~/moorea_holobiont/mr_ITS2/seqtabno87.rare.csv",row.names=1)
groups <- samdf.rare$zone
groups <- samdf.rare$site_zone
indval <- multipatt(counts.rare, groups, control = how(nperm=999))
summary(indval)
# Group Backreef #sps. 4
# stat p.value
# sq10 0.620 0.004 **
# sq29 0.594 0.005 **
# sq22 0.556 0.003 **
# sq66 0.420 0.038 *
#
# Group Forereef #sps. 4
# stat p.value
# sq15 0.729 0.001 ***
# sq9 0.544 0.013 *
# sq35 0.436 0.019 *
# sq37 0.404 0.017 *
#^ the same ones as unrarefied, but lost some & gained some
#### mcmc.otu ####
library(MCMC.OTU)
setwd("~/moorea_holobiont/mr_ITS2")
dat <- read.csv(file="seqtab.rare_1994_rd2_mcmc_plussam.csv", sep=",", header=TRUE, row.names=1)
goods <- purgeOutliers(dat,count.columns=5:23,otu.cut=0.001,zero.cut=0.02) #rare
head(goods)
# what is the proportion of samples with data for these OTUs?
apply(goods[,5:length(goods[1,])],2,function(x){sum(x>0)/length(x)})
# what percentage of global total counts each OTU represents?
apply(goods[,5:length(goods[1,])],2,function(x){sum(x)/sum(goods[,5:length(goods[1,])])})
# stacking the data; adjust otu.columns and condition.columns values for your data
gss=otuStack(goods,count.columns=c(5:length(goods[1,])),condition.columns=c(1:4))
gss$count=gss$count+1
# fitting the model. Replace the formula specified in 'fixed' with yours, add random effects if present.
# See ?mcmc.otu for these and other options.
mm=mcmc.otu(
fixed="site+zone+site:zone",
data=gss,
nitt=55000,thin=50,burnin=5000 # a long MCMC chain to improve modeling of rare OTUs
)
plot(mm)
# selecting the OTUs that were modeled reliably
# (OTUs that are too rare for confident parameter estimates are discarded)
acpass=otuByAutocorr(mm,gss)
# # head(gss)
# # ac=autocorr(mm$Sol)
# # ac
# calculating differences and p-values between all pairs of factor combinations
smm0=OTUsummary(mm,gss,otus=acpass,summ.plot=FALSE)
# adjusting p-values for multiple comparisons:
smmA=padjustOTU(smm0)
# significant OTUs at FDR<0.05:
sigs=signifOTU(smmA)
sigs
# plotting the significant ones
smm1=OTUsummary(mm,gss,otus=sigs)
# now plotting them by species
quartz()
smm1=OTUsummary(mm,gss,otus=sigs,xgroup="zone")
smm1+
ggtitle("test")
# table of log10-fold changes and p-values: this one goes into supplementary info in the paper
smmA$otuWise[sigs]
#### archive ####
#### Pcoa - raw data ####
library(vegan)
#install.packages("ggforce")
#library(ggforce)
#not sure if I need this one^
library(ggpubr)
library(cowplot)
# creating a log-transfromed normalized dataset for PCoA:
df.seq <- as.data.frame(seqtab.no87)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="bray")
all.pcoa=pcoa(all.dist)
#32.7%, 23.2%
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
levels(pcoa.all$site) <- c("Moorea NW","Moorea SE","Tahiti NW")
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
xlab('Axis 1 (32.7%)')+
ylab('Axis 2 (23.2%)')+
stat_ellipse()+
facet_wrap(~site)+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
#now by site - unnecessary actually thanks to facet_wrap
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
pcoa.mse <- subset(pcoa.all,site=="MSE")
gg.mse <- ggplot(pcoa.mse,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
gg.mse
pcoa.tah <- subset(pcoa.all,site=="TNW")
gg.tah <- ggplot(pcoa.tah,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
gg.tah
quartz()
ggarrange(gg.mnw,gg.mse,gg.tah,nrow=1,common.legend=TRUE,legend="right")
#relative abundance instead of absolute abundance
t.relabun <- scale(t(seqtab.no87), center=F, scale=colSums(t(seqtab.no87)))
relabun <- t(t.relabun)
all.dist=vegdist(relabun,method="manhattan")
all.pcoa=pcoa(all.dist)
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87,by="Sample")
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=site,shape=zone))+
geom_point()+
stat_ellipse()
#alt method
ps.rel <- transform_sample_counts(ps_no87, function(OTU) OTU/sum(OTU))
iDist <- distance(ps.rel, method="bray")
iMDS <- ordinate(ps.rel, "MDS", distance=iDist)
plot_ordination(ps.rel, iMDS, color="site")+
stat_ellipse()
# creating a log-transfromed normalized dataset for PCoA:
df.seq <- as.data.frame(counts)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.mcmc)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
pcoa.mse <- subset(pcoa.all,site=="MSE")
gg.mse <- ggplot(pcoa.mse,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mse
pcoa.tah <- subset(pcoa.all,site=="TNW")
gg.tah <- ggplot(pcoa.tah,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.tah
ggarrange(gg.mnw,gg.mse,gg.tah,nrow=1,common.legend=TRUE,legend="right")
|
/dada2_workingscript.R
|
no_license
|
BU-BI586/Variability_microbiome_finalproject
|
R
| false | false | 67,998 |
r
|
#Nicola's Moorea ITS2 analysis
#Almost entirely based on DADA2 ITS Pipeline 1.8 Walkthrough:
#https://benjjneb.github.io/dada2/ITS_workflow.html
#with edits by Carly D. Kenkel and modifications for my data by Nicola Kriefall
#7/23/19
#~########################~#
##### PRE-PROCESSING #######
#~########################~#
#fastq files should have R1 & R2 designations for PE reads
#Also - some pre-trimming. Retain only PE reads that match amplicon primer. Remove reads containing Illumina sequencing adapters
##in Terminal home directory:
##following instructions of installing BBtools from https://jgi.doe.gov/data-and-tools/bbtools/bb-tools-user-guide/installation-guide/
##1. download BBMap package, sftp to installation directory
##2. untar:
#tar -xvzf BBMap_(version).tar.gz
##3. test package:
#cd bbmap
#~/bin/bbmap/stats.sh in=~/bin/bbmap/resources/phix174_ill.ref.fa.gz
## my adaptors, which I saved as "adaptors.fasta"
# >forward
# AATGATACGGCGACCAC
# >forwardrc
# GTGGTCGCCGTATCATT
# >reverse
# CAAGCAGAAGACGGCATAC
# >reverserc
# GTATGCCGTCTTCTGCTTG
##Note: Illumina should have cut these out already, normal if you don't get any
##primers for ITS:
# >forward
# GTGAATTGCAGAACTCCGTG
# >reverse
# CCTCCGCTTACTTATATGCTT
##Still in terminal - making a sample list based on the first phrase before the underscore in the .fastq name
#ls *R1_001.fastq | cut -d '_' -f 1 > samples.list
##cuts off the extra words in the .fastq files
#for file in $(cat samples.list); do mv ${file}_*R1*.fastq ${file}_R1.fastq; mv ${file}_*R2*.fastq ${file}_R2.fastq; done
##gets rid of reads that still have the adaptor sequence, shouldn't be there, I didn't have any
#for file in $(cat samples.list); do ~/bin/bbmap/bbduk.sh in1=${file}_R1.fastq in2=${file}_R2.fastq ref=adaptors.fasta out1=${file}_R1_NoIll.fastq out2=${file}_R2_NoIll.fastq; done &>bbduk_NoIll.log
##only keeping reads that start with the primer
#for file in $(cat samples.list); do ~/bin/bbmap/bbduk.sh in1=${file}_R1_NoIll.fastq in2=${file}_R2_NoIll.fastq k=15 restrictleft=21 literal=GTGAATTGCAGAACTCCGTG,CCTCCGCTTACTTATATGCTT outm1=${file}_R1_NoIll_ITS.fastq outu1=${file}_R1_check.fastq outm2=${file}_R2_NoIll_ITS.fastq outu2=${file}_R2_check.fastq; done &>bbduk_ITS.log
##higher k = more reads removed, but can't surpass k=20 or 21
##using cutadapt to remove adapters & reads with Ns in them
#module load cutadapt
# for file in $(cat samples.list)
# do
# cutadapt -g GTGAATTGCAGAACTCCGTG -G CCTCCGCTTACTTATATGCTT -o ${file}_R1.fastq -p ${file}_R2.fastq --max-n 0 ${file}_R1_NoIll_ITS.fastq ${file}_R2_NoIll_ITS.fastq
# done &> clip.log
##-g regular 5' forward primer
##-G regular 5' reverse primer
##-o forward out
##-p reverse out
##-max-n 0 means 0 Ns allowed
##this overwrote my original renamed files
##did sftp of *_R1.fastq & *_R2.fastq files to the folder to be used in dada2
#~########################~#
##### DADA2 BEGINS #########
#~########################~#
#installing/loading packages:
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("dada2", version = "3.8")
library(dada2)
#packageVersion("dada2")
#I have version 1.10.1 - tutorial says 1.8 but I think that's OK, can't find a version 1.10 walkthrough
library(ShortRead)
#packageVersion("ShortRead")
library(Biostrings)
#packageVersion("Biostrings")
path <- "/Volumes/TOSHIBA EXT/WorkLaptop_2020_03/Desktop/its2/files_rd3" # CHANGE ME to the directory containing the fastq files after unzipping.
fnFs <- sort(list.files(path, pattern = "_R1.fastq.gz", full.names = TRUE))
fnRs <- sort(list.files(path, pattern = "_R2.fastq.gz", full.names = TRUE))
get.sample.name <- function(fname) strsplit(basename(fname), "_")[[1]][1]
sample.names <- unname(sapply(fnFs, get.sample.name))
head(sample.names)
sample.names
#### check for primers ####
FWD <- "GTGAATTGCAGAACTCCGTG" ## CHANGE ME to your forward primer sequence
REV <- "CCTCCGCTTACTTATATGCTT" ## CHANGE ME...
allOrients <- function(primer) {
# Create all orientations of the input sequence
require(Biostrings)
dna <- DNAString(primer) # The Biostrings works w/ DNAString objects rather than character vectors
orients <- c(Forward = dna, Complement = complement(dna), Reverse = reverse(dna),
RevComp = reverseComplement(dna))
return(sapply(orients, toString)) # Convert back to character vector
}
FWD.orients <- allOrients(FWD)
REV.orients <- allOrients(REV)
FWD.orients
REV.orients
fnFs.filtN <- file.path(path, "filtN", basename(fnFs)) # Put N-filterd files in filtN/ subdirectory
fnRs.filtN <- file.path(path, "filtN", basename(fnRs))
filterAndTrim(fnFs, fnFs.filtN, fnRs, fnRs.filtN, maxN = 0, multithread = TRUE)
primerHits <- function(primer, fn) {
# Counts number of reads in which the primer is found
nhits <- vcountPattern(primer, sread(readFastq(fn)), fixed = FALSE)
return(sum(nhits > 0))
}
rbind(FWD.ForwardReads = sapply(FWD.orients, primerHits, fn = fnFs.filtN[[2]]),
FWD.ReverseReads = sapply(FWD.orients, primerHits, fn = fnRs.filtN[[2]]),
REV.ForwardReads = sapply(REV.orients, primerHits, fn = fnFs.filtN[[2]]),
REV.ReverseReads = sapply(REV.orients, primerHits, fn = fnRs.filtN[[2]]))
#no primers - amazing
###### Visualizing raw data
#First, lets look at quality profile of R1 reads
plotQualityProfile(fnFs[c(1,2,3,4)])
plotQualityProfile(fnFs[c(90,91,92,93)])
#Then look at quality profile of R2 reads
plotQualityProfile(fnRs[c(1,2,3,4)])
plotQualityProfile(fnRs[c(90,91,92,93)])
#starts to drop off around 220 bp for forwards and 200 for reverse, will make approximately that the cutoff values for filter&trim below
# Make directory and filenames for the filtered fastqs
filt_path <- file.path(path, "trimmed")
if(!file_test("-d", filt_path)) dir.create(filt_path)
filtFs <- file.path(filt_path, paste0(sample.names, "_F_filt.fastq.gz"))
filtRs <- file.path(filt_path, paste0(sample.names, "_R_filt.fastq.gz"))
#changing a bit from default settings - maxEE=1 (1 max expected error, more conservative), truncating length at 200 bp for both forward & reverse, added "trimleft" to cut off primers [20 for forward, 21 for reverse]
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs,
truncLen=c(220,200), #leaves overlap
maxN=0, #DADA does not allow Ns
maxEE=c(1,1), #allow 1 expected errors, where EE = sum(10^(-Q/10)); more conservative, model converges
truncQ=2,
minLen = 50,
#trimLeft=c(20,21), #N nucleotides to remove from the start of each read
rm.phix=TRUE, #remove reads matching phiX genome
matchIDs=TRUE, #enforce mtching between id-line sequence identifiers of F and R reads
compress=TRUE, multithread=TRUE) # On Windows set multithread=FALSE
head(out)
tail(out)
#~############################~#
##### Learn Error Rates ########
#~############################~#
#setDadaOpt(MAX_CONSIST=30) #if necessary, increase number of cycles to allow convergence
errF <- learnErrors(filtFs, multithread=TRUE)
errR <- learnErrors(filtRs, multithread=TRUE)
#sanity check: visualize estimated error rates
#error rates should decline with increasing qual score
#red line is based on definition of quality score alone
#black line is estimated error rate after convergence
#dots are observed error rate for each quality score
plotErrors(errF, nominalQ=TRUE)
plotErrors(errR, nominalQ=TRUE)
#~############################~#
##### Dereplicate reads ########
#~############################~#
#Dereplication combines all identical sequencing reads into into “unique sequences” with a corresponding “abundance”: the number of reads with that unique sequence.
#Dereplication substantially reduces computation time by eliminating redundant comparisons.
#DADA2 retains a summary of the quality information associated with each unique sequence. The consensus quality profile of a unique sequence is the average of the positional qualities from the dereplicated reads. These quality profiles inform the error model of the subsequent denoising step, significantly increasing DADA2’s accuracy.
derepFs <- derepFastq(filtFs, verbose=TRUE)
derepRs <- derepFastq(filtRs, verbose=TRUE)
# Name the derep-class objects by the sample names
names(derepFs) <- sample.names
names(derepRs) <- sample.names
#~###############################~#
##### Infer Sequence Variants #####
#~###############################~#
dadaFs <- dada(derepFs, err=errF, multithread=TRUE)
dadaRs <- dada(derepRs, err=errR, multithread=TRUE)
#now, look at the dada class objects by sample
#will tell how many 'real' variants in unique input seqs
#By default, the dada function processes each sample independently, but pooled processing is available with pool=TRUE and that may give better results for low sampling depths at the cost of increased computation time. See our discussion about pooling samples for sample inference.
dadaFs[[1]]
dadaRs[[1]]
#~############################~#
##### Merge paired reads #######
#~############################~#
#To further cull spurious sequence variants
#Merge the denoised forward and reverse reads
#Paired reads that do not exactly overlap are removed
mergers <- mergePairs(dadaFs, derepFs, dadaRs, derepRs, verbose=TRUE)
# Inspect the merger data.frame from the first sample
head(mergers[[1]])
summary((mergers[[1]]))
#We now have a data.frame for each sample with the merged $sequence, its $abundance, and the indices of the merged $forward and $reverse denoised sequences. Paired reads that did not exactly overlap were removed by mergePairs.
#~##################################~#
##### Construct sequence table #######
#~##################################~#
#a higher-resolution version of the “OTU table” produced by classical methods
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
rowSums(seqtab)
# Inspect distribution of sequence lengths
table(nchar(getSequences(seqtab)))
#mostly at 300 bp, which is just what was expected [271-303 bp]
plot(table(nchar(getSequences(seqtab))))
#The sequence table is a matrix with rows corresponding to (and named by) the samples, and
#columns corresponding to (and named by) the sequence variants.
#Sequences that are much longer or shorter than expected may be the result of non-specific priming, and may be worth removing
#if I wanted to remove some lengths - not recommended by dada2 for its2 data
#seqtab2 <- seqtab[,nchar(colnames(seqtab)) %in% seq(268,327)]
table(nchar(getSequences(seqtab)))
dim(seqtab)
plot(table(nchar(getSequences(seqtab))))
#~############################~#
##### Remove chimeras ##########
#~############################~#
#The core dada method removes substitution and indel errors, but chimeras remain.
#Fortunately, the accuracy of the sequences after denoising makes identifying chimeras easier
#than it is when dealing with fuzzy OTUs: all sequences which can be exactly reconstructed as
#a bimera (two-parent chimera) from more abundant sequences.
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtab.nochim)
#Identified 38 bimeras out of 156 input sequences.
sum(seqtab.nochim)/sum(seqtab)
#0.9989
#The fraction of chimeras varies based on factors including experimental procedures and sample complexity,
#but can be substantial.
#~############################~#
##### Track Read Stats #########
#~############################~#
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(mergers, getN), rowSums(seqtab2), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoised", "merged", "tabled", "nonchim")
rownames(track) <- sample.names
head(track)
tail(track)
write.csv(track,file="its2_reads.csv",row.names=TRUE,quote=FALSE)
#plotting for no reason
#manually added raw counts from the raw .fastq file using the following loop in Terminal:
# for file in *R1_001.fastq
# do
# echo $file >> raw_r1_names
# grep @M0 $file | wc -l >> raw_r1_counts
# done
setwd("~/moorea_holobiont/mr_ITS2/")
reads <- read.csv("its2_reads_renamed.csv")
#counts1 = raw
#counts2 = input
#counts3 = filtered
#counts4 = denoised
#counts5 = merged
#counts6 = nonchim
reread <- reshape(reads, varying = c("counts1","counts2", "counts3", "counts4", "counts5", "counts6"), timevar = "day",idvar = "sample", direction = "long", sep = "")
reread$sample <- as.factor(reread$sample)
ggplot(reread,aes(x=day,y=counts,color=sample))+
geom_point()+
geom_path()
library(Rmisc)
reread.se <- summarySE(data=reread,measurevar="counts",groupvars=c("day"))
ggplot(reread.se,aes(x=day,y=counts))+
geom_point()+
geom_path()+
geom_errorbar(aes(ymin=counts-se,ymax=counts+se),width=0.3)
#~############################~#
##### Assign Taxonomy ##########
#~############################~#
taxa <- assignTaxonomy(seqtab.nochim, "~/Downloads/GeoSymbio_ITS2_LocalDatabase_verForPhyloseq.fasta",tryRC=TRUE,minBoot=70,verbose=TRUE)
unname(head(taxa))
#to come back to later
saveRDS(seqtab.nochim, file="~/Desktop/its2/mrits2_seqtab.nochim.rds")
saveRDS(taxa, file="~/Desktop/its2/mrits2_taxa.rds")
write.csv(seqtab.nochim, file="mrits2_seqtab.nochim.csv")
write.csv(taxa, file="~/Desktop/its2/mrits2_taxa.csv")
#### Reading in prior data files ####
setwd("~/moorea_holobiont/mr_ITS2")
seqtab.nochim <- readRDS("mrits2_seqtab.nochim.rds")
taxa <- readRDS("mrits2_taxa.rds")
#~############################~#
##### handoff 2 phyloseq #######
#~############################~#
#BiocManager::install("phyloseq")
library('phyloseq')
library('ggplot2')
library('Rmisc')
#import dataframe holding sample information
samdf<-read.csv("mrits_sampledata.csv")
head(samdf)
rownames(samdf) <- samdf$Sample
# Construct phyloseq object (straightforward from dada2 outputs)
# ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
# sample_data(samdf),
# tax_table(taxa))
#
# ps
#phyloseq object with shorter names - doing this one instead of one above
ids <- paste0("sq", seq(1, length(colnames(seqtab.nochim))))
#making output fasta file for lulu step & maybe other things, before giving new ids to sequences
#path='~/moorea_holobiont/mr_ITS2/mrits2.fasta'
#uniquesToFasta(seqtab.nochim, path, ids = ids, mode = "w", width = 20000)
colnames(seqtab.nochim)<-ids
taxa2 <- cbind(taxa, rownames(taxa)) #retaining raw sequence info before renaming
rownames(taxa2)<-ids
ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps
#removing sample 87 from ps object, no data
ps_no87 <- subset_samples(ps, Sample != "87")
ps_no87
#removing 87 from other things
samdf.no87 <- samdf[-92,]
seqtab.no87 <- seqtab.nochim[-92,]
#~##########################################~#
###### Apply LULU to cluster ASVs ############
#~##########################################~#
##Necessary pre-steps after producing ASV table and associated fasta file:
##Produce a match list using BLASTn
##IN TERMINAL#
##First produce a blastdatabase with the OTUs
#module load blast+
#makeblastdb -in mrits2.fasta -parse_seqids -dbtype nucl
##Then blast the OTUs against the database to produce the match list
#blastn -db mrits2.fasta -outfmt '6 qseqid sseqid pident' -out match_list.txt -qcov_hsp_perc 80 -perc_identity 84 -query mrits2.fasta
##HSP = high scoring pair
##perc_identity = percent of nucleotides in the highly similar pairings that match
##transfer match_list.txt to R working directory
##BACK IN R#
#first, read in ASV table
#install.packages("remotes")
library("remotes")
#install_github("https://github.com/tobiasgf/lulu.git")
library("lulu")
#rarefying
library(vegan)
#back to lulu sequence
ASVs <- as.data.frame(t(seqtab.no87))
#just made this in terminal
matchList <- read.table("match_list.txt")
#Now, run the LULU curation
curated_result <- lulu(ASVs, matchList,minimum_match=99,minimum_relative_cooccurence=0.99)
#default: minimum_relative_cooccurence = 0.95 default, changed to 0.70 to see what happens, nothing
#default: minimum_match = 84 default, only 1 OTU different between 97 & 84
summary(curated_result)
#19 otus with match of 99%
#Pull out the curated OTU list, re-transpose
lulu.out <- data.frame(t(curated_result$curated_table))
#Continue on to your favorite analysis
#write.csv(lulu.out,"~/moorea_holobiont/mr_ITS2/lulu_output.csv")
lulu.out <- read.csv("~/moorea_holobiont/mr_ITS2/lulu_output.csv",row.names=1)
#ps object with lulu
ps.lulu <- phyloseq(otu_table(lulu.out, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.lulu
#### rarefy ####
library(vegan)
rarecurve(lulu.out,step=100,label=FALSE) #after clustering
total <- rowSums(lulu.out)
total
subset(total, total <1994)
summary(total)
row.names.remove <- c("117","311","402","414","505","513","530","58","72","76")
lulu.out.rare <- lulu.out[!(row.names(lulu.out) %in% row.names.remove),]
samdf.rare <- samdf.no87[!(row.names(samdf.no87) %in% row.names.remove), ]
#85 samples left
seq.rare <- rrarefy(lulu.out.rare,sample=1994)
rarecurve(seq.rare,step=100,label=FALSE)
rarecurve(seqtab.no87,step=100,label=FALSE) #before trimming
#save
#write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv")
write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2.csv")
#read back in
seq.rare <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2.csv",row.names=1,header=TRUE)
#phyloseq object
ps.rare <- phyloseq(otu_table(seq.rare, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.rare
#### Alpha diversity #####
library(ggplot2)
#install.packages("extrafontdb")
library(extrafontdb)
library(extrafont)
library(Rmisc)
library(cowplot)
library(ggpubr)
#font_import(paths = "/Library/Fonts/")
loadfonts()
#Visualize alpha-diversity - ***Should be done on raw, untrimmed dataset***
#total species diversity in a landscape (gamma diversity) is determined by two different things, the mean species diversity in sites or habitats at a more local scale (alpha diversity) and the differentiation among those habitats (beta diversity)
#Shannon:Shannon entropy quantifies the uncertainty (entropy or degree of surprise) associated with correctly predicting which letter will be the next in a diverse string. Based on the weighted geometric mean of the proportional abundances of the types, and equals the logarithm of true diversity. When all types in the dataset of interest are equally common, the Shannon index hence takes the value ln(actual # of types). The more unequal the abundances of the types, the smaller the corresponding Shannon entropy. If practically all abundance is concentrated to one type, and the other types are very rare (even if there are many of them), Shannon entropy approaches zero. When there is only one type in the dataset, Shannon entropy exactly equals zero (there is no uncertainty in predicting the type of the next randomly chosen entity).
#Simpson:equals the probability that two entities taken at random from the dataset of interest represent the same type. equal to the weighted arithmetic mean of the proportional abundances pi of the types of interest, with the proportional abundances themselves being used as the weights. Since mean proportional abundance of the types increases with decreasing number of types and increasing abundance of the most abundant type, λ obtains small values in datasets of high diversity and large values in datasets of low diversity. This is counterintuitive behavior for a diversity index, so often such transformations of λ that increase with increasing diversity have been used instead. The most popular of such indices have been the inverse Simpson index (1/λ) and the Gini–Simpson index (1 − λ).
plot_richness(ps_no87, x="site", measures=c("Shannon", "Simpson"), color="zone") + theme_bw()
df.div <- estimate_richness(ps.rare, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
#df.div <- estimate_richness(ps.rare, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
df.div
df.div$Sample <- rownames(df.div)
df.div$Sample <- gsub("X","",df.div$Sample)
df.div <- merge(df.div,samdf,by="Sample") #add sample data
#write.csv(df.div,file="~/moorea_holobiont/mr_ITS2/mrits_div_lulu.rare.csv") #saving
df.div <- read.csv("~/moorea_holobiont/mr_ITS2/mrits_div_lulu.rare.csv") #reading back in
df.div$zone <- gsub("Forereef","FR",df.div$zone)
df.div$zone <- gsub("Backreef","BR",df.div$zone)
quartz()
gg.sh <- ggplot(df.div, aes(x=zone, y=Shannon,color=zone,shape=zone))+
#geom_errorbar(aes(ymin=InvSimpson-se,ymax=InvSimpson+se),position=position_dodge(0.5),lwd=0.4,width=0.4)+
#geom_point(aes(colour=zone, shape=zone),size=4,position=position_dodge(0.5))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Shannon diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)+
ylim(-0.01, 1.7)
gg.sh
gg.si <- ggplot(df.div, aes(x=zone, y=InvSimpson,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Inv. Simpson diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)+
ylim(1,4.4)
gg.si
gg.ob <- ggplot(df.div, aes(x=zone, y=Observed,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("ASV number")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(text=element_text(family="Times"),legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)
gg.ob
quartz()
ggarrange(gg.sh,gg.si,legend="none")
#different version - with overall panel
gg.total <- ggplot(df.div, aes(x=zone, y=InvSimpson,color=zone,shape=zone))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Inv. Simpson diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)
quartz()
ggarrange(gg.total,gg.si,widths=c(0.45,1),labels=c("(b)","(c)"))
#stats
library(bestNormalize)
bestNormalize(df.div$InvSimpson)
obs.norm <- orderNorm(df.div$InvSimpson)
df.div$obs.norm <- obs.norm$x.t
shapiro.test(df.div$obs.norm)
#nothing worked
wilcox.test(Shannon~zone,data=df.div)
wilcox.test(InvSimpson~zone,data=df.div)
wilcox.test(Observed~zone,data=df.div)
mnw <- subset(df.div,site=="MNW")
mse <- subset(df.div,site=="MSE")
tah <- subset(df.div,site=="TNW")
wilcox.test(Shannon~zone,data=mnw)
#p = 0.05286 . rare
wilcox.test(InvSimpson~zone,data=mnw)
#p = 0.04687 * rare
wilcox.test(Observed~zone,data=mnw)
#no
#W = 103.5, p-value = 0.506
summary(aov(Shannon~zone,data=mse))
wilcox.test(Shannon~zone,data=mse)
#p = 0.0031 ** rare
wilcox.test(InvSimpson~zone,data=mse)
#p = 0.01 * rare
wilcox.test(Observed~zone,data=mse)
#p = 0.01 * rare
wilcox.test(Shannon~zone,data=tah)
#p = 0.49
wilcox.test(InvSimpson~zone,data=tah)
#p = 0.65
wilcox.test(Observed~zone,data=tah)
#p = 0.59
#install.packages("dunn.test")
library(dunn.test)
dunn.test(df.div$Shannon,df.div$site_zone,method="bh")
#### div stats with size ####
row.names(df.div) <- df.div$Sample
size <- read.csv("~/moorea_holobiont/mr_ITS2/mr_size.csv",header=TRUE,row.names=1)
row.names.remove <- c(109)
size2 <- size[!(row.names(size) %in% row.names.remove),]
df.div.size <- merge(df.div,size2,by=0)
lm.sh.size <- lm(size~Shannon,data=df.div.size)
summary(lm.sh.size)
row.names(df.div) <- df.div$Sample
df.div.size <- merge(df.div,size2,by=0)
lm.si.size <- lm(size~InvSimpson,data=df.div.size)
summary(lm.si.size)
row.names(df.div) <- df.div$Sample
df.div.size <- merge(df.div,size2,by=0)
lm.ob.size <- lm(size~Observed,data=df.div.size)
summary(lm.ob.size)
plot(size~Shannon,data=df.div.size)
#just cladocopium
ps.rare.c <- subset_taxa(ps.rare,Phylum==" Clade C")
df.div.c <- estimate_richness(ps.rare.c, split=TRUE, measures =c("Shannon","InvSimpson","Observed"))
df.div.c$Sample <- rownames(df.div.c)
df.div.c$Sample <- gsub("X","",df.div.c$Sample)
df.div.c <- merge(df.div.c,samdf,by="Sample") #add sample data
ggplot(df.div.c, aes(x=zone, y=Observed,color=zone,shape=zone))+
#geom_errorbar(aes(ymin=InvSimpson-se,ymax=InvSimpson+se),position=position_dodge(0.5),lwd=0.4,width=0.4)+
#geom_point(aes(colour=zone, shape=zone),size=4,position=position_dodge(0.5))+
geom_boxplot(outlier.shape=NA)+
xlab("Reef zone")+
ylab("Shannon diversity")+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_colour_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
#guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
theme(legend.position="none")+
geom_jitter(alpha=0.5)+
facet_wrap(~site)#+
ylim(-0.01, 1.7)
##checking out how diversity works with variables - nothing interesting
#df.size <- read.csv("mr_size.csv",header=TRUE)
#df.div$coral_id <- row.names(df.div)
#
# mergeddf <- merge(df.div, df.size, by="coral_id", sort=FALSE)
# plot(Shannon~size,data=mergeddf)
# shapiro.test(mergeddf$Shannon)
# shapiro.test(mergeddf$size)
# kruskal.test(Shannon~size,data=mergeddf)
## no significant relationship between coral size & diversity! interesting
## now by sample host heterozygosity
# df.het <- read.table("host_het.txt") #read in data
# df.het$het <- df.het$V3/(df.het$V2+df.het$V3) #heterozygosity calculations
# df.het$V1 <- sub("TO","TNWO",df.het$V1) #just renaming some sites
# df.het$V1 <- sub("TI","TNWI",df.het$V1) #just renaming some sites
# df.het$site <- substr(df.het$V1, 0, 4)
# df.het$site <- as.factor(df.het$site)
# df.het$site <- sub("I","-B", df.het$site)
# df.het$site <- sub("O","-F", df.het$site)
# str(df.het)
## just getting sample names on the same page
# df.het$V1 <- sub(".trim.bt2.bam.out.saf.idx.ml","",df.het$V1)
# df.het$coral_id <- substr(df.het$V1, 6, 8)
# df.div$coral_id <- row.names(df.div)
#
# mergeddf2 <- merge(df.div, df.het, by="coral_id", sort=TRUE)
# plot(Shannon~het,data=mergeddf2)
# kruskal.test(Shannon~het,data=mergeddf2)
## not significant!
#### MCMC.OTU to remove underrepresented ASVs ####
library(MCMC.OTU)
#added a column with a blank name in the beginning, with 1-95 in the column, mcmc.otu likes this
#also removed the X from the beginning of sample names
lulu.rare.mcmc <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_rd2_mcmc.csv",header=TRUE)
#& reading back in things
goods <- purgeOutliers(lulu.rare.mcmc,count.columns=3:21,otu.cut=0.001,zero.cut=0.02) #rare
#otu.cut = 0.1% of reads represented by ASV
#zero.cut = present in more than 1 sample (2% of samples)
colnames(goods)
#sq 1, 2, 3, 6, 7, 12, 18, 24, 32 with min 99% matching in lulu
#not rare
lulu.mcmc <- read.csv("~/moorea_holobiont/mr_ITS2/lulu_output_mcmc.csv",header=TRUE)
goods <- purgeOutliers(lulu.mcmc,count.columns=3:21,otu.cut=0.001,zero.cut=0.02) #not rare
#otu.cut = 0.1% of reads represented by ASV
#zero.cut = present in more than 1 sample (2% of samples)
colnames(goods)
#sq 1, 2, 3, 6, 7, 12, 18, 24, 32 with min 99% matching in lulu
rownames(goods) <- goods$sample
counts <- goods[,3:11]
#mcmc.otu removed 3 undersequenced samples: "513" "530" "76", "87" bad to begin with
remove <- c("513","530","76","87")
samdf.mcmc <- samdf[!row.names(samdf)%in%remove,]
#write.csv(samdf.mcmc,"~/Desktop/mr_samples.csv")
write.csv(counts,file="seqtab_lulu.trimmed.csv")
write.csv(counts,file="seqtab_lulu.rare.trimmed.csv")
counts <- read.csv("seqtab_lulu.trimmed.csv",row.names=1,header=TRUE)
counts <- read.csv("seqtab_lulu.rare.trimmed.csv",row.names=1,header=TRUE)
ps.mcmc <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf.mcmc),
tax_table(taxa2))
ps.mcmc
ps.rare.mcmc <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.rare.mcmc #9 taxa
#### Bar plot - clustered, trimmed ####
#bar plot
ps_glom <- tax_glom(ps.rare.mcmc, "Class")
ps1 <- merge_samples(ps_glom, "site_zone")
ps2 <- transform_sample_counts(ps1, function(x) x / sum(x))
plot_bar(ps2, fill="Class",x="site_zone")
ps.mcmc.melt <- psmelt(ps.rare.mcmc)
ps.mcmc.melt$newclass <- paste(ps.mcmc.melt$Class,ps.mcmc.melt$OTU,sep="_")
#boxplot
ggplot(ps.mcmc.melt,aes(x=site_zone,y=Abundance,color=newclass))+
geom_boxplot()
#individually
sq3 <- subset(ps.mcmc.melt,newclass==" C3k_sq3")
ggplot(sq3,aes(x=site_zone,y=Abundance,color=Class))+
geom_boxplot()
library(dplyr)
ps.all <- transform_sample_counts(ps.rare.mcmc, function(OTU) OTU/sum(OTU))
pa <- psmelt(ps.all)
tb <- psmelt(ps.all)%>%
filter(!is.na(Abundance))%>%
group_by(site_zone,OTU)%>%
summarize_at("Abundance",mean)
#some more grouping variables
tb <- psmelt(ps.all)%>%
filter(!is.na(Abundance))%>%
group_by(site,zone,site_zone,OTU)%>%
summarize_at("Abundance",mean)
ggplot(tb,aes(x=site_zone,y=Abundance,fill=OTU))+
geom_bar(stat="identity", colour="black")+
theme_cowplot()
#renaming
#ps.all@tax_table
# Taxonomy Table: [9 taxa by 4 taxonomic ranks]:
# Kingdom Phylum Class
# sq1 "Symbiodinium" " Clade C" " C3k" - 1
# sq2 "Symbiodinium" " Clade C" " Cspc" - 1
# sq3 "Symbiodinium" " Clade C" " C3k" - 2
# sq6 "Symbiodinium" " Clade C" " C3k" - 3
# sq7 "Symbiodinium" " Clade A" " A1"
# sq12 "Symbiodinium" " Clade C" NA - 1
# sq18 "Symbiodinium" " Clade C" NA - 2
# sq24 "Symbiodinium" " Clade C" " C3k" - 4
# sq32 "Symbiodinium" " Clade C" " Cspc" - 2
tb$sym <- gsub("sq12","C. - 1",tb$OTU)
tb$sym <- gsub("sq18","C. - 2",tb$sym)
tb$sym <- gsub("sq1","C3k - 1",tb$sym)
tb$sym <- gsub("sq24","C3k - 4",tb$sym)
tb$sym <- gsub("sq2","Cspc - 1",tb$sym)
tb$sym <- gsub("sq32","Cspc - 2",tb$sym)
tb$sym <- gsub("sq3","C3k - 2",tb$sym)
tb$sym <- gsub("sq6","C3k - 3",tb$sym)
tb$sym <- gsub("sq7","A1",tb$sym)
tb$zone <- gsub("Forereef","FR",tb$zone)
tb$zone <- gsub("Backreef","BR",tb$zone)
tb$site <- gsub("MNW","Mo'orea NW",tb$site)
tb$site <- gsub("MSE","Mo'orea SE",tb$site)
tb$site <- gsub("TNW","Tahiti NW",tb$site)
tb$sym <- factor(tb$sym, levels=c("C3k - 1","C3k - 2","C3k - 3","C3k - 4","Cspc - 1", "Cspc - 2","C. - 1","C. - 2","A1"))
quartz()
gg.bp <- ggplot(tb,aes(x=zone,y=Abundance,fill=sym))+
geom_bar(stat="identity")+
theme_cowplot()+
#theme(text=element_text(family="Times"))+
xlab('Reef zone')+
# scale_fill_manual(name="Sym.",values=c("seagreen1","seagreen2","seagreen3","seagreen4","blue","darkblue","orange","yellow","purple"))
scale_fill_manual(name="Algal symbiont",values=c("#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff","#C70039", "#8F0C3F","#d4e21aff"))+
facet_wrap(~site)
gg.bp
#"#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff","#440154FF", "#48196bff","#d4e21aff"
quartz()
ggarrange(gg.bp,
ggarrange(gg.sh,gg.si,ncol=2,labels=c("(b)","(c)"),common.legend=T,legend="none"),nrow=2,labels="(a)")
#getting raw average relative abundances
ps.rel <- transform_sample_counts(ps_no87, function(x) x / sum(x))
plot_bar(ps.rel,fill="Class")
ps.glom <- tax_glom(ps.rel, "Class")
c3k <- subset_taxa(ps.glom,Class==" C3k")
c3k.otu <- as.data.frame(c3k@otu_table)
mean(c3k.otu$sq1)
#all the seqs - 9 total
cspc <- subset_taxa(ps.rel,Class==" Cspc")
#rel abundance
cspc <- subset_taxa(ps.glom,Class==" Cspc")
cspc.otu <- as.data.frame(cspc@otu_table)
mean(cspc.otu$sq2)
#background ones
back <- subset_taxa(ps.rel,is.na(Class))
all.otu <- ps2@otu_table
# Taxonomy Table: [6 taxa by 4 taxonomic ranks]:
# Kingdom Phylum Class
# sq1 "Symbiodinium" " Clade C" " C3k" NA
# sq2 "Symbiodinium" " Clade C" " Cspc" NA
# sq7 "Symbiodinium" " Clade A" " A1" NA
# sq33 "Symbiodinium" " Clade A" " A3" NA
# sq66 "Symbiodinium" " Clade B" " B1" NA
# sq81 "Symbiodinium" " Clade C" " C116" NA
all.otu2 <- as.data.frame(all.otu)
mean(all.otu2$sq1)
ps.rel <- transform_sample_counts(ps.mcmc, function(x) x / sum(x))
plot_bar(ps.rel,fill="Class")
ps.glom <- tax_glom(ps.rel, "Class")
#c3k.mcmc <- subset_taxa(ps.glom,Class==" C3k")
#c3k.otu <- as.data.frame(c3k.mcmc@otu_table)
#mean(c3k.otu$sq1)
#cs.mcmc <- subset_taxa(ps.glom,Class==" Cspc")
#cs.otu <- as.data.frame(cs.mcmc@otu_table)
#mean(cs.otu$sq2)
#range(cs.otu$sq2)
#### Bar plots by individual sqs ####
#adding sqs to taxa2
taxa3 <- data.frame(taxa2)
taxa3$sqs <- c(rownames(taxa3))
#renaming sqs
taxa3$sqs <- gsub("sq12","C. - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq18","C. - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq1","C3k - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq24","C3k - 4",taxa3$sqs)
taxa3$sqs <- gsub("sq2","Cspc - 1",taxa3$sqs)
taxa3$sqs <- gsub("sq32","Cspc - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq3","C3k - 2",taxa3$sqs)
taxa3$sqs <- gsub("sq6","C3k - 3",taxa3$sqs)
taxa3$sqs <- gsub("sq7","A1",taxa3$sqs)
taxa3 <- as.matrix(taxa3)
#renaming reef zones
samdf.new <- samdf
samdf.new$zone <- gsub("Forereef","FR",samdf.new$zone)
samdf.new$zone <- gsub("Backreef","BR",samdf.new$zone)
ps.rare.mcmc.newnames <- phyloseq(otu_table(counts, taxa_are_rows=FALSE),
sample_data(samdf.new),
tax_table(taxa3))
ps.rare.mcmc.newnames #9 taxa
ps.mnw <- subset_samples(ps.rare.mcmc.newnames,site=="MNW")
quartz()
bar.mnw <- plot_bar(ps.mnw,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(a) Mo'orea NW")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("")
ps.mse <- subset_samples(ps.rare.mcmc.newnames,site=="MSE")
bar.mse <- plot_bar(ps.mse,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(b) Mo'orea SE")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("")
bar.mse
ps.tnw <- subset_samples(ps.rare.mcmc.newnames,site=="TNW")
bar.tnw <- plot_bar(ps.tnw,x="zone",y="Abundance",fill="sqs")+
facet_wrap(~sqs,scales="free",ncol=5)+
ggtitle("(c) Tahiti NW")+
theme_cowplot()+
scale_fill_manual(name="Algal symbiont",values=c("#d4e21aff","#C70039","#8F0C3F","#1E9C89FF","#25AB82FF","#58C765FF","#7ED34FFF","#365d8dff","#287d8eff"))+
xlab("Reef zone")
quartz()
ggarrange(bar.mnw,bar.mse,bar.tnw,ncol=1,common.legend=TRUE,legend="none")
#presence absence
counts_pres <- counts
counts_pres[counts_pres>0] <- 1
ps.pres <- phyloseq(otu_table(counts_pres, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps.pres #9 taxa
ps.mnw.pres <- subset_samples(ps.pres,site=="MNW")
plot_bar(ps.mnw.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
ps.mse.pres <- subset_samples(ps.pres,site=="MSE")
plot_bar(ps.mse.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
ps.tnw.pres <- subset_samples(ps.pres,site=="TNW")
plot_bar(ps.tnw.pres,x="zone",y="Abundance",fill="Class",facet_grid=~sqs)
#### Pcoa - clustered & trimmed ####
library(vegan)
library(cowplot)
#install.packages("ggforce")
#library(ggforce)
#not sure if I need this one^
#all
#original overview
ps.rare.mcmc.noa <- subset_taxa(ps.rare.mcmc,Phylum==" Clade C")
plot_ordination(ps.rare.mcmc.noa, ordinate(ps.rare.mcmc.noa, "PCoA"), color = "zone") +
geom_point()+
stat_ellipse(level=0.8,aes(lty=zone),geom="polygon",alpha=0.1)
#not sure what this was
#p3 = plot_ordination(GP1, GP.ord, type="biplot", color="SampleType", shape="Phylum", title="biplot")
#site
gg.site.alone <- plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), color="site", shape="site")+
geom_point(size=2)+
stat_ellipse(level=0.8,aes(lty=site),geom="polygon",alpha=0.1)+
xlab('Axis 1 (57.1%)')+
ylab('Axis 2 (35.2%)')+
theme_cowplot()+
scale_linetype_manual(values=c("longdash","dotted","dotdash"),labels=c("MNW","MSE","TNW"))+
scale_color_manual(values=c("darkslategray3","darkslategray4","#000004"),labels=c("MNW","MSE","TNW"))+
scale_shape_manual(values=c(8,4,9),labels=c("MNW","MSE","TNW"))+
labs(shape="Site",color="Site",linetype="Site")
quartz()
gg.site.alone
gg.site.taxa <- plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), type="biplot", color="site", shape="Class")+
geom_point(size=2)+
#stat_ellipse(level=0.8,aes(lty=site),geom="polygon",alpha=0.1)+
xlab('Axis 1 (57.1%)')+
ylab('Axis 2 (35.2%)')+
theme_cowplot()
gg.site.taxa
#### BIPLOT - VERY COOL ####
plot_ordination(ps.rare.mcmc, ordinate(ps.rare.mcmc, "PCoA"), type="biplot", color="site", shape="Phylum", title="biplot")
#by site
ps.mnw <- subset_samples(ps.rare.mcmc,site=="MNW")
#ps.mnw <- subset_samples(ps.trim,site=="mnw")
ord.mnw <- ordinate(ps.mnw, "PCoA", "bray")
plot_ordination(ps.mnw, ord.mnw, type="biplot", color="zone", shape="Class", title="biplot")#+
theme(legend.position="none")
gg.mnw <- plot_ordination(ps.mnw, ord.mnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea NW")+
#annotate(geom="text", x=0.7, y=0.2, label="p < 0.01**",size=4)+ #rarefied
#annotate(geom="text", x=-0.25, y=0.6, label="p < 0.01**",size=4)+ #not rarefied
xlab("Axis 1 (69.4%)")+ #rarefied
ylab("Axis 2 (25.6%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mnw
ps.mse <- subset_samples(ps.rare.mcmc,site=="MSE")
#ps.mse <- subset_samples(ps.trim,site=="mse")
ord.mse <- ordinate(ps.mse, "PCoA", "bray")
gg.mse <- plot_ordination(ps.mse, ord.mse,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea SE")+
#annotate(geom="text", x=-0.4, y=0.6, label="p < 0.01**",size=4)+ #rarefied
#annotate(geom="text", x=-0.25, y=0.6, label="p < 0.01**",size=4)+ #not rarefied
xlab("Axis 1 (89.9%)")+ #rarefied
ylab("Axis 2 (8.9%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mse
ps.tnw <- subset_samples(ps.rare.mcmc,site=="TNW")
#ps.tnw <- subset_samples(ps.trim,site=="TNW")
ord.tnw <- ordinate(ps.tnw, "PCoA", "bray")
gg.tnw <- plot_ordination(ps.tnw, ord.tnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Tahiti NW")+
xlab("Axis 1 (62.9%)")+ #rarefied
ylab("Axis 2 (29.6%)")+ #rarefied
#xlab("Axis 1 (34.3%)")+#non-rarefied
#ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.tnw
quartz()
ggarrange(gg.mnw,gg.mse,gg.tnw,nrow=1,common.legend=TRUE,legend='right',labels=c("(a)","(b)","(c)"))
#### pcoa - rel abundance ####
ps.mcmc.rel <- transform_sample_counts(ps.mcmc, function(x) x / sum(x))
#by site
ps.mnw <- subset_samples(ps.mcmc.rel,site=="MNW")
#ps.mnw <- subset_samples(ps.trim,site=="mnw")
ord.mnw <- ordinate(ps.mnw, "PCoA", "bray")
gg.mnw <- plot_ordination(ps.mnw, ord.mnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea NW")+
xlab("Axis 1 (68.0%)")+#non-rarefied
ylab("Axis 2 (26.5%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mnw
ps.mse <- subset_samples(ps.mcmc.rel,site=="MSE")
ord.mse <- ordinate(ps.mse, "PCoA", "bray")
gg.mse <- plot_ordination(ps.mse, ord.mse,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Mo'orea SE")+
xlab("Axis 1 (87.7%)")+#non-rarefied
ylab("Axis 2 (9.7%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.mse
ps.tnw <- subset_samples(ps.mcmc.rel,site=="TNW")
ord.tnw <- ordinate(ps.tnw, "PCoA", "bray")
gg.tnw <- plot_ordination(ps.tnw, ord.tnw,color="zone", shape="zone")+
geom_point(size=2)+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("BR","FR"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("BR","FR"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
ggtitle("Tahiti NW")+
xlab("Axis 1 (60.3%)")+#non-rarefied
ylab("Axis 2 (31.9%)")+#non-rarefied
theme(axis.text=element_text(size=10))
gg.tnw
quartz()
ggarrange(gg.mnw,gg.mse,gg.tnw,nrow=1,common.legend=TRUE,legend='right',labels=c("(a)","(b)","(c)"))
# ps.tah <- subset_samples(ps.mcmc,site=="TNW")
# gg.tah <- plot_ordination(ps.tah, ordinate(ps.tah, "PCoA"), color = "zone")+
# geom_point()+
# stat_ellipse(level=0.95)
#### Deseq differentially abundant ####
library(DESeq2)
#checking if any significant
ps.mnw = subset_samples(ps.rare.mcmc, site=="MNW")
ds.mnw = phyloseq_to_deseq2(ps.mnw, ~ zone)
dds.mnw <- estimateSizeFactors(ds.mnw,type="poscounts")
stat.mnw = DESeq(dds.mnw, test="Wald", fitType="parametric")
res = results(stat.mnw, cooksCutoff = FALSE)
alpha = 0.05
sigtab.mnw = res[which(res$padj < alpha), ]
sigtab.mnw = cbind(as(sigtab.mnw, "data.frame"), as(tax_table(ps.mnw)[rownames(sigtab.mnw), ], "matrix"))
head(sigtab.mnw)
dim(sigtab.mnw)
#none
ps.mse = subset_samples(ps.rare.mcmc, site=="MSE")
ds.mse = phyloseq_to_deseq2(ps.mse, ~ zone)
dds.mse <- estimateSizeFactors(ds.mse,type="poscounts")
stat.mse = DESeq(dds.mse, test="Wald", fitType="parametric")
res = results(stat.mse, cooksCutoff = FALSE)
alpha = 0.05
sigtab.mse = res[which(res$padj < alpha), ]
sigtab.mse = cbind(as(sigtab.mse, "data.frame"), as(tax_table(ps.mse)[rownames(sigtab.mse), ], "matrix"))
head(sigtab.mse)
dim(sigtab.mse)
#sq 2 & 3
ps.t = subset_samples(ps.rare, site=="TNW")
ds.t = phyloseq_to_deseq2(ps.t, ~ zone)
dds.t <- estimateSizeFactors(ds.t,type="poscounts")
stat.t = DESeq(dds.t, test="Wald", fitType="parametric")
res = results(stat.t, cooksCutoff = FALSE)
alpha = 0.05
sigtab.t = res[which(res$padj < alpha), ]
sigtab.t = cbind(as(sigtab.t, "data.frame"), as(tax_table(ps.t)[rownames(sigtab.t), ], "matrix"))
dim(sigtab.t)
#sq 7
#### Heat map ####
#install.packages("pheatmap")
library(pheatmap)
library(dplyr)
#transform to relative abundance rather than absolute
counts$Sample <- rownames(counts)
newnames <- merge(samdf,counts, by="Sample")
sq1 <- newnames %>%
group_by(site_zone) %>%
summarise(sq1 = sum(sq1))
sq2 <- newnames %>%
group_by(site_zone) %>%
summarise(sq2 = sum(sq2))
sq3 <- newnames %>%
group_by(site_zone) %>%
summarise(sq3 = sum(sq3))
sq6 <- newnames %>%
group_by(site_zone) %>%
summarise(sq6 = sum(sq6))
sq7 <- newnames %>%
group_by(site_zone) %>%
summarise(sq7 = sum(sq7))
sq12 <- newnames %>%
group_by(site_zone) %>%
summarise(sq12 = sum(sq12))
sq18 <- newnames %>%
group_by(site_zone) %>%
summarise(sq18 = sum(sq18))
sq24 <- newnames %>%
group_by(site_zone) %>%
summarise(sq24 = sum(sq24))
sq32 <- newnames %>%
group_by(site_zone) %>%
summarise(sq32 = sum(sq32))
allsq <- newnames %>%
group_by(site_zone) %>%
summarise(all = sum(sq1, sq2, sq3, sq6, sq7, sq12, sq18, sq24, sq32))
df1 <- merge(sq1, sq2, by="site_zone")
df2 <- merge(df1,sq3,by="site_zone")
df3 <- merge(df2,sq6,by="site_zone")
df4 <- merge(df3,sq7,by="site_zone")
df5 <- merge(df4,sq12,by="site_zone")
df6 <- merge(df5,sq18,by="site_zone")
df7 <- merge(df6,sq24,by="site_zone")
df.all <- merge(df7,sq32,by="site_zone")
rownames(df.all) <- df.all$site_zone
df.counts <- df.all[,2:10]
df.counts.t <- t(df.counts)
#relative abundance
dat <- scale(df.counts.t, center=F, scale=colSums(df.counts.t))
quartz()
pheatmap(dat,colorRampPalette(c('white','chartreuse3','darkgreen'))(50),cluster_cols=F)
#without all the stuff I just did
counts.t <- t(counts)
dat2 <- scale(counts.t, center=F, scale=colSums(counts.t))
pheatmap(dat2,colorRampPalette(c('white','blue'))(50))
#### rarefy #####
library(vegan)
rarecurve(counts,step=100,label=FALSE) #after clustering & trimming
total <- rowSums(counts)
total
subset(total, total <1994)
summary(total)
row.names.remove <- c("117","311","402","414","505","58","72")
counts.rare <- counts[!(row.names(counts) %in% row.names.remove),]
samdf.rare <- samdf.mcmc[!(row.names(samdf.mcmc) %in% row.names.remove), ]
#85 samples left
seq.rare <- rrarefy(counts.rare,sample=1994)
rarecurve(seq.rare,step=100,label=FALSE)
rarecurve(seqtab.no87,step=100,label=FALSE)#before clustering & trimming
#save
#write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv")
write.csv(seq.rare,"~/moorea_holobiont/mr_ITS2/seqtab.rare_1994_all.csv")
#read back in
seq.rare <- read.csv("~/moorea_holobiont/mr_ITS2/seqtab.rare_1994.csv",row.names=1)
#phyloseq object
ps.rare <- phyloseq(otu_table(seq.rare, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa2))
ps_glom <- tax_glom(ps.rare, "Class")
ps0 <- transform_sample_counts(ps_glom, function(x) x / sum(x))
ps1 <- merge_samples(ps0, "site_zone")
ps2 <- transform_sample_counts(ps1, function(x) x / sum(x))
plot_bar(ps2, fill="Class")
plot_ordination(ps.rare, ordinate(ps.rare,method="PCoA"), color = "zone")+
geom_point()+
facet_wrap(~site)+
theme_cowplot()+
stat_ellipse()
#ugly
#### PCoA - rarefied, clustered, trimmed ####
df.seq <- as.data.frame(seq.rare)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
write.csv(counts,"~/Desktop/mr_counts_rare.csv")
write.csv(samdf.rare,"~/Desktop/mr_samples_rare.csv")
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.rare)
pcoa.all$site <- gsub("MNW","Moorea NW",pcoa.all$site)
pcoa.all$site <- gsub("MSE","Moorea SE",pcoa.all$site)
pcoa.all$site <- gsub("TNW","Tahiti NW",pcoa.all$site)
quartz()
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 (54.14%)")+
ylab("Axis 2 (33.58%)")
#looks the same as before rarefying?
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#3 outliers: 178, 116, 113
row.names.remove <- c("178","116","113")
pcoa.mnw.less <- pcoa.mnw[!(pcoa.mnw$Sample %in% row.names.remove),]
gg.mnw <- ggplot(pcoa.mnw.less,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#### Stats ####
library(vegan)
#help on adonis here:
#https://thebiobucket.blogspot.com/2011/04/assumptions-for-permanova-with-adonis.html#more
#all
#dist.seqtab <- vegdist(seqtab.no87)
#anova(betadisper(dist.seqtab,samdf.no87$zone))
adonis(seqtab.no87 ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#0.01 **
#clustered but not trimmed
adonis(lulu.out ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#p 0.009
samdf.rare <- data.frame(sample_data(ps.rare))
#clustered, trimmed, rarefied
rownames(samdf.rare.mcmc) == rownames(counts) #good
#stats by site
samdf.rare.mcmc <- data.frame(ps.rare.mcmc@sam_data)
dist.rare <- vegdist(counts)
bet <- betadisper(dist.rare,samdf.rare.mcmc$site)
anova(bet)
permutest(bet, pairwise = FALSE, permutations = 99)
plot(bet)
adonis(counts ~ site, data=samdf.rare.mcmc, permutations=999)
#0.061 .
#install.packages("remotes")
#remotes::install_github("Jtrachsel/funfuns")
library("funfuns")
pairwise.adonis(counts, factors = samdf.rare.mcmc$site, permutations = 999)
# pairs F.Model R2 p.value p.adjusted
# 1 MNW vs MSE 1.545093 0.02685012 0.122 0.122
# 2 MNW vs TNW 5.737211 0.09936766 0.001 0.001
# 3 MSE vs TNW 13.668578 0.19619431 0.001 0.001
#relative abundance, does this by columns so must transform
t.relabun <- scale(t(counts), center=F, scale=colSums(t(counts)))
#un-transform
relabun <- t(t.relabun)
adonis(relabun ~ zone, strata=samdf.mcmc$site, data=samdf.mcmc, permutations=999)
#0.02 *
#rarefied, clustered, trimmed
dist.rare <- vegdist(seq.rare)
#dist.rare <- vegdist(counts.rare)
bet <- betadisper(dist.rare,samdf.rare$site)
#significant
anova(bet)
permutest(bet, pairwise = FALSE, permutations = 99)
plot(bet)
adonis(counts.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
adonis(seq.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
#0.07 . - 0.1
#Moorea NW
mnw.rare <- subset(samdf.rare.mcmc,site=="MNW")
mnw.seq <- counts[(rownames(counts) %in% mnw.rare$Sample),]
dist.mnw <- vegdist(mnw.seq)
bet.mnw <- betadisper(dist.mnw,mnw.rare$zone)
anova(bet.mnw) #not sig
permutest(bet.mnw, pairwise = FALSE, permutations = 99)
plot(bet.mnw) #not sig
adonis(mnw.seq ~ zone, data=mnw.rare, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.11117 0.111170 2.3662 0.08646 0.023 *
# Residuals 25 1.17458 0.046983 0.91354
# Total 26 1.28575 1.00000
#Moorea SE
mse.rare <- subset(samdf.rare.mcmc,site=="MSE")
mse.seq <- counts[(rownames(counts) %in% mse.rare$Sample),]
dist.mse <- vegdist(mse.seq)
bet.mse <- betadisper(dist.mse,mse.rare$zone)
anova(bet.mse)
permutest(bet.mse, pairwise = FALSE, permutations = 99)
plot(bet.mse) #not sig
adonis(mse.seq ~ zone, data=mse.rare, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.04445 0.04445 6.0477 0.17256 0.015 *
# Residuals 29 0.21315 0.00735 0.82744
# Total 30 0.25760 1.00000
#Tahiti
tnw.rare <- subset(samdf.rare,site=="TNW")
tnw.seq <- counts[(rownames(counts) %in% tnw.rare$Sample),]
dist.tnw <- vegdist(tnw.seq)
bet.tnw <- betadisper(dist.tnw,tnw.rare$zone)
anova(bet.tnw)
permutest(bet.tnw, pairwise = FALSE, permutations = 99)
plot(bet.tnw) #sig
adonis(tnw.seq ~ zone, data=tnw.rare, permutations=99)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone 1 0.22079 0.22079 2.7127 0.09789 0.05 *
# Residuals 25 2.03476 0.08139 0.90211
# Total 26 2.25554 1.00000
#log-normalized
df.seq <- as.data.frame(seqtab.no87)
all.log=logLin(data=df.seq)
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)
adonis(df.seq ~ zone, strata=samdf.no87$site, data=samdf.no87, permutations=999)
#0.012 *
#rarefied, but not clustered & trimmed
rarecurve(counts.rare,label=F) #yep def rarefied
adonis(counts.rare ~ zone, strata=samdf.rare$site, data=samdf.rare, permutations=999)
#0.01 **
#### stats plus size ####
library(vegan)
size <- read.csv("~/moorea_holobiont/mr_ITS2/mr_size.csv",header=TRUE,row.names=1)
row.names.remove <- c(109)
size2 <- size[!(row.names(size) %in% row.names.remove),]
row.names(size2) <- size2$coral_id
samdf.size <- merge(size2,samdf,by=0)
row.names(samdf.size) <- c(samdf.size$coral_id)
ord.mnw.df <- data.frame(ord.mnw[["vectors"]])
ord.mnw.df2 <- ord.mnw.df[,1:2]
ord.samdf.size.mnw <- merge(ord.mnw.df2,samdf.size,by=0)
plot(Axis.1~log(size),data=ord.samdf.size.mnw)
lm.size.mnw <- lm(Axis.1~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw)
plot(Axis.2~log(size),data=ord.samdf.size.mnw)
lm.size.mnw2 <- lm(Axis.2~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw2)
ord.mnw.df <- data.frame(ord.mnw[["vectors"]])
ord.mnw.df2 <- ord.mnw.df[,1:2]
ord.samdf.size.mnw <- merge(ord.mnw.df2,samdf.size,by=0)
plot(Axis.1~log(size),data=ord.samdf.size.mnw)
lm.size.mnw <- lm(Axis.1~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw)
plot(Axis.2~log(size),data=ord.samdf.size.mnw)
lm.size.mnw2 <- lm(Axis.2~size,data=ord.samdf.size.mnw)
summary(lm.size.mnw2)
#skipping the above ord correlations
size.rows <- row.names(samdf.size)
size3 <- counts[(row.names(counts) %in% size.rows),]
size.rows2 <- c(row.names(size3))
samdf.size2 <- samdf.size[(row.names(samdf.size) %in% size.rows2),]
#samdf.size.sorted <- samdf.size2[nrow(samdf.size2):1, ]
#size3.sorted <- size3[nrow(size3):1, ]
#tahiti
samdf.size.tnw <- subset(samdf.size2,site.y=="TNW")
counts.size.tnw <- size3[(rownames(size3) %in% samdf.size.tnw$coral_id),]
row.names(samdf.size.tnw) == row.names(counts.size.tnw)
dist.tnw <- vegdist(counts.size.tnw)
bet.tnw <- betadisper(dist.tnw,samdf.size.tnw$zone.y)
anova(bet.tnw)
permutest(bet.tnw, pairwise = FALSE, permutations = 99)
plot(bet.tnw) #sig
adonis(counts.size.tnw ~ zone.y*size, data=samdf.size.tnw, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone.y 1 0.22079 0.220788 3.0991 0.09789 0.018 *
# size 1 0.28944 0.289440 4.0628 0.12832 0.047 *
# zone.y:size 1 0.10674 0.106743 1.4983 0.04732 0.194
# Residuals 23 1.63857 0.071242 0.72646
# Total 26 2.25554 1.00000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#plot(log(counts.size.tnw$sq7)~log(samdf.size.tnw$size))
#moorea nw
samdf.size.mnw <- subset(samdf.size2,site.y=="MNW")
counts.size.mnw <- size3[(rownames(size3) %in% samdf.size.mnw$coral_id),]
row.names(samdf.size.mnw) == row.names(counts.size.mnw)
dist.mnw <- vegdist(counts.size.mnw)
bet.mnw <- betadisper(dist.mnw,samdf.size.mnw$zone.y)
anova(bet.mnw)
permutest(bet.mnw, pairwise = FALSE, permutations = 99)
plot(bet.mnw) #not sig
adonis(counts.size.mnw ~ zone.y*size, data=samdf.size.mnw, permutations=999)
# Df SumsOfSqs MeanSqs F.Model R2 Pr(>F)
# zone.y 1 0.11117 0.111170 2.20961 0.08646 0.038 *
# size 1 0.00525 0.005254 0.10443 0.00409 0.845
# zone.y:size 1 0.01215 0.012148 0.24145 0.00945 0.684
# Residuals 23 1.15718 0.050312 0.90000
# Total 26 1.28575 1.00000
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
plot(log(counts.size.mnw$sq7)~log(samdf.size.mnw$size))
adonis(counts~zone*size,data=samdf.size2)
#### bray-curtis ####
iDist <- distance(ps.rare, method="bray")
iMDS <- ordinate(ps.rare, "MDS", distance=iDist)
plot_ordination(ps.rare, iMDS, color="zone", shape="site")
#ugly
#by site
ps.mnw <- subset_samples(ps.rare,site=="MNW")
iDist <- distance(ps.mnw, method="bray")
iMDS <- ordinate(ps.mnw, "MDS", distance=iDist)
plot_ordination(ps.mnw, iMDS, color="zone")+
stat_ellipse()
#relative abundance
t.relabun <- scale(t(counts), center=F, scale=colSums(t(counts)))
relabun <- t(t.relabun)
all.dist=vegdist(relabun,method="bray")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=site))+
geom_point()+
stat_ellipse()
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
#### CCA ####
library(adegenet) # for transp()
pp0=capscale(seq.rare~1)
pp=capscale(seq.rare~zone%in%site,data=samdf.rare)
anova(pp, alpha=0.05)
axes2plot=c(1,2)
quartz()
cmd=pp #change to pp for CAP, pp0 for MDS
plot(cmd,choices=axes2plot) # choices - axes to display
points(cmd,choices=axes2plot)
ordihull(cmd,choices= axes2plot,groups=samdf.rare$site_zone,draw="polygon",label=F)
#ordispider(cmd,choices= axes2plot,groups=samdf.rare$site,col="grey80")
#ordiellipse(cmd,choices= axes2plot,groups=samdf.rare$zone,draw="polygon",label=T)
#### indicator species ####
#install.packages("indicspecies")
library(indicspecies)
library(vegan)
#first testing out k means clustering
mcmc.km <- kmeans(counts,centers=2)
groupskm = mcmc.km$cluster
groupskm
all.log=logLin(data=counts)
all.dist=vegdist(all.log,method="bray")
all.pcoa=pcoa(all.dist)
km.log <- kmeans(all.log,centers=3)
groupskm = km.log$cluster
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.mcmc)
grps <- as.data.frame(groupskm)
grps$Sample <- rownames(grps)
pcoa2 <- merge(grps,pcoa.all,by="Sample")
pcoa2$groupskm <- as.factor(pcoa2$groupskm)
ggplot(pcoa2,aes(x=Axis.1,y=Axis.2,color=site,shape=groupskm))+
geom_point()+
stat_ellipse()
#interesting - not sure what to do here
#anyway back to indicspecies
groups <- samdf.mcmc$site
groups <- samdf.mcmc$zone
indval <- multipatt(counts, groups, control = how(nperm=999))
summary(indval,alpha=1)
#doesn't really work with the clustered ASV
#unclustered
groups <- samdf.no87$zone
indval <- multipatt(seqtab.no87, groups, control = how(nperm=999))
summary(indval)
# Group Backreef #sps. 1
# stat p.value
# sq22 0.478 0.017 *
#
# Group Forereef #sps. 7
# stat p.value
# sq15 0.711 0.001 ***
# sq9 0.526 0.007 **
# sq35 0.410 0.027 *
# sq37 0.388 0.019 *
# sq19 0.380 0.019 *
# sq17 0.377 0.024 *
# sq45 0.354 0.036 *
#now rarefied
counts.rare <- read.csv(file="~/moorea_holobiont/mr_ITS2/seqtabno87.rare.csv",row.names=1)
groups <- samdf.rare$zone
groups <- samdf.rare$site_zone
indval <- multipatt(counts.rare, groups, control = how(nperm=999))
summary(indval)
# Group Backreef #sps. 4
# stat p.value
# sq10 0.620 0.004 **
# sq29 0.594 0.005 **
# sq22 0.556 0.003 **
# sq66 0.420 0.038 *
#
# Group Forereef #sps. 4
# stat p.value
# sq15 0.729 0.001 ***
# sq9 0.544 0.013 *
# sq35 0.436 0.019 *
# sq37 0.404 0.017 *
#^ the same ones as unrarefied, but lost some & gained some
#### mcmc.otu ####
library(MCMC.OTU)
setwd("~/moorea_holobiont/mr_ITS2")
dat <- read.csv(file="seqtab.rare_1994_rd2_mcmc_plussam.csv", sep=",", header=TRUE, row.names=1)
goods <- purgeOutliers(dat,count.columns=5:23,otu.cut=0.001,zero.cut=0.02) #rare
head(goods)
# what is the proportion of samples with data for these OTUs?
apply(goods[,5:length(goods[1,])],2,function(x){sum(x>0)/length(x)})
# what percentage of global total counts each OTU represents?
apply(goods[,5:length(goods[1,])],2,function(x){sum(x)/sum(goods[,5:length(goods[1,])])})
# stacking the data; adjust otu.columns and condition.columns values for your data
gss=otuStack(goods,count.columns=c(5:length(goods[1,])),condition.columns=c(1:4))
gss$count=gss$count+1
# fitting the model. Replace the formula specified in 'fixed' with yours, add random effects if present.
# See ?mcmc.otu for these and other options.
mm=mcmc.otu(
fixed="site+zone+site:zone",
data=gss,
nitt=55000,thin=50,burnin=5000 # a long MCMC chain to improve modeling of rare OTUs
)
plot(mm)
# selecting the OTUs that were modeled reliably
# (OTUs that are too rare for confident parameter estimates are discarded)
acpass=otuByAutocorr(mm,gss)
# # head(gss)
# # ac=autocorr(mm$Sol)
# # ac
# calculating differences and p-values between all pairs of factor combinations
smm0=OTUsummary(mm,gss,otus=acpass,summ.plot=FALSE)
# adjusting p-values for multiple comparisons:
smmA=padjustOTU(smm0)
# significant OTUs at FDR<0.05:
sigs=signifOTU(smmA)
sigs
# plotting the significant ones
smm1=OTUsummary(mm,gss,otus=sigs)
# now plotting them by species
quartz()
smm1=OTUsummary(mm,gss,otus=sigs,xgroup="zone")
smm1+
ggtitle("test")
# table of log10-fold changes and p-values: this one goes into supplementary info in the paper
smmA$otuWise[sigs]
#### archive ####
#### Pcoa - raw data ####
library(vegan)
#install.packages("ggforce")
#library(ggforce)
#not sure if I need this one^
library(ggpubr)
library(cowplot)
# creating a log-transfromed normalized dataset for PCoA:
df.seq <- as.data.frame(seqtab.no87)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="bray")
all.pcoa=pcoa(all.dist)
#32.7%, 23.2%
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87)
levels(pcoa.all$site) <- c("Moorea NW","Moorea SE","Tahiti NW")
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
xlab('Axis 1 (32.7%)')+
ylab('Axis 2 (23.2%)')+
stat_ellipse()+
facet_wrap(~site)+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
#now by site - unnecessary actually thanks to facet_wrap
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
pcoa.mse <- subset(pcoa.all,site=="MSE")
gg.mse <- ggplot(pcoa.mse,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
gg.mse
pcoa.tah <- subset(pcoa.all,site=="TNW")
gg.tah <- ggplot(pcoa.tah,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))
gg.tah
quartz()
ggarrange(gg.mnw,gg.mse,gg.tah,nrow=1,common.legend=TRUE,legend="right")
#relative abundance instead of absolute abundance
t.relabun <- scale(t(seqtab.no87), center=F, scale=colSums(t(seqtab.no87)))
relabun <- t(t.relabun)
all.dist=vegdist(relabun,method="manhattan")
all.pcoa=pcoa(all.dist)
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.no87,by="Sample")
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=site,shape=zone))+
geom_point()+
stat_ellipse()
#alt method
ps.rel <- transform_sample_counts(ps_no87, function(OTU) OTU/sum(OTU))
iDist <- distance(ps.rel, method="bray")
iMDS <- ordinate(ps.rel, "MDS", distance=iDist)
plot_ordination(ps.rel, iMDS, color="site")+
stat_ellipse()
# creating a log-transfromed normalized dataset for PCoA:
df.seq <- as.data.frame(counts)
all.log=logLin(data=df.seq)
# computing Manhattan distances (sum of all log-fold-changes) and performing PCoA:
all.dist=vegdist(all.log,method="manhattan")
all.pcoa=pcoa(all.dist)
# plotting:
scores=all.pcoa$vectors[,1:2]
scorez <- as.data.frame(scores)
scorez$Sample <- rownames(scorez)
pcoa.all <- merge(scorez,samdf.mcmc)
ggplot(pcoa.all,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
facet_wrap(~site)
#now by site
pcoa.mnw <- subset(pcoa.all,site=="MNW")
gg.mnw <- ggplot(pcoa.mnw,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mnw
pcoa.mse <- subset(pcoa.all,site=="MSE")
gg.mse <- ggplot(pcoa.mse,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.mse
pcoa.tah <- subset(pcoa.all,site=="TNW")
gg.tah <- ggplot(pcoa.tah,aes(x=Axis.1,y=Axis.2,color=zone,shape=zone))+
geom_point()+
stat_ellipse()+
theme_cowplot()+
scale_shape_manual(values=c(16,15),labels=c("Back reef","Fore reef"))+
scale_color_manual(values=c("#ED7953FF","#8405A7FF"),labels=c("Back reef","Fore reef"))+
guides(color=guide_legend(title="Reef zone"),shape=guide_legend(title="Reef zone"))+
xlab("Axis 1 ()")
gg.tah
ggarrange(gg.mnw,gg.mse,gg.tah,nrow=1,common.legend=TRUE,legend="right")
|
currcold = cf1$winter
mainlin = agedeathlinfun(main)
mainlinmod = sapply(anames, function(age){
wcausemod(age, mainlin, currcold)
}, simplify=FALSE)
tmp = confprint(mainlinmod$tot$all, vnames)
tmp
quit(save = "yes")
|
/wmain.R
|
no_license
|
mac-theobio/AnnualFlu
|
R
| false | false | 222 |
r
|
currcold = cf1$winter
mainlin = agedeathlinfun(main)
mainlinmod = sapply(anames, function(age){
wcausemod(age, mainlin, currcold)
}, simplify=FALSE)
tmp = confprint(mainlinmod$tot$all, vnames)
tmp
quit(save = "yes")
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available")
## Check that outcome are valid
validOutcomes <- c("heart failure", "heart attack", "pneumonia")
if (sum(outcome == validOutcomes)==0) {
stop("invalid outcome")
}
## Get specific outcome data
if (outcome == "heart failure") {
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
} else if (outcome == "heart attack"){
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
} else if (outcome == "pneumonia"){
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
}
## Simplify data frame
simpleData <- data.frame(data$State,data$Hospital.Name,mortality)
colnames(simpleData) <- c("State",'Hospital.Name','Mortality')
simpleData <- simpleData[order(simpleData[,1],simpleData[,2]),]
## For each state, find the hospital of the given rank
stateNames = levels(simpleData$State)
numStates = length(stateNames)
hosp <- character(length(numStates))
for (i in 1:numStates) {
stateRows <- simpleData$State == stateNames[i]
tmp <- simpleData[stateRows,]
tmp <- tmp[order(tmp[,3],tmp[,2]),]
tmp$rank <- c(1:nrow(tmp))
if (is.character(num) && num == "best"){
loc <- which.min(tmp$Mortality)
hosp[i] <- as.character(tmp[loc,2])
} else if (is.character(num) && num == "worst") {
loc <- which.max(tmp$Mortality)
hosp[i] <- as.character(tmp[loc,2])
} else if (is.numeric(num) && num > nrow(tmp)) {
hosp[i] <- NA
} else if (is.numeric(num) && num <= nrow(tmp)){
loc <- which(tmp$rank == num)
hosp[i] <- as.character(tmp[loc,2])
}
}
## Create output data frame
rankedHospitals = data.frame(hospital = hosp, state = stateNames)
}
|
/Course_2_R Programming Final/rankall.R
|
no_license
|
s-farr/datasciencecoursera
|
R
| false | false | 1,930 |
r
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",na.strings = "Not Available")
## Check that outcome are valid
validOutcomes <- c("heart failure", "heart attack", "pneumonia")
if (sum(outcome == validOutcomes)==0) {
stop("invalid outcome")
}
## Get specific outcome data
if (outcome == "heart failure") {
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure
} else if (outcome == "heart attack"){
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack
} else if (outcome == "pneumonia"){
mortality <- data$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia
}
## Simplify data frame
simpleData <- data.frame(data$State,data$Hospital.Name,mortality)
colnames(simpleData) <- c("State",'Hospital.Name','Mortality')
simpleData <- simpleData[order(simpleData[,1],simpleData[,2]),]
## For each state, find the hospital of the given rank
stateNames = levels(simpleData$State)
numStates = length(stateNames)
hosp <- character(length(numStates))
for (i in 1:numStates) {
stateRows <- simpleData$State == stateNames[i]
tmp <- simpleData[stateRows,]
tmp <- tmp[order(tmp[,3],tmp[,2]),]
tmp$rank <- c(1:nrow(tmp))
if (is.character(num) && num == "best"){
loc <- which.min(tmp$Mortality)
hosp[i] <- as.character(tmp[loc,2])
} else if (is.character(num) && num == "worst") {
loc <- which.max(tmp$Mortality)
hosp[i] <- as.character(tmp[loc,2])
} else if (is.numeric(num) && num > nrow(tmp)) {
hosp[i] <- NA
} else if (is.numeric(num) && num <= nrow(tmp)){
loc <- which(tmp$rank == num)
hosp[i] <- as.character(tmp[loc,2])
}
}
## Create output data frame
rankedHospitals = data.frame(hospital = hosp, state = stateNames)
}
|
\name{neglogLik}
\alias{neglogLik}
\title{Negative Log-Likelihood}
\description{
Calculates the log-likelihood multiplied by negative one. It is in a format that can be used with the functions \code{\link[stats]{nlm}} and \code{\link[stats]{optim}}, providing an alternative to the \code{\link{BaumWelch}} algorithm for maximum likelihood parameter estimation.
}
\usage{
neglogLik(params, object, pmap)
}
\arguments{
\item{params}{a vector of revised parameter values.}
\item{object}{an object of class \code{"\link{dthmm}"}, \code{"\link{mmglm0}"}, or \code{"\link{mmpp}"}.}
\item{pmap}{a user provided function mapping the revised (or restricted) parameter values \code{p} into the appropriate locations in \code{object}.}
}
\details{
This function is in a format that can be used with the two functions \code{\link[stats]{nlm}} and \code{\link[stats]{optim}} (see Examples below). This provides alternative methods of estimating the maximum likelihood parameter values, to that of the EM algorithm provided by \code{\link{BaumWelch}}, including Newton type methods and grid searches. It can also be used to restrict estimation to a subset of parameters.
The EM algorithm is relatively stable when starting from poor initial values but convergence is very slow in close proximity to the solution. Newton type methods are very sensitive to initial conditions but converge much more quickly in close proximity to the solution. This suggests initially using the EM algorithm and then switching to Newton type methods (see Examples below).
The maximisation of the model likelihood function can be restricted to be over a subset of the model parameters. Other parameters will then be fixed at the values stored in the model \code{object}. Let \eqn{\Theta}{Theta} denote the model parameter space, and let \eqn{\Psi}{Psi} denote the parameter sub-space (\eqn{\Psi \subseteq \Theta}{Psi subseteq Theta}) over which the likelihood function is to be maximised. The argument \code{params} contains values in \eqn{\Psi}{Psi}, and \code{pmap} is assigned a function that maps these values into the model parameter space \eqn{\Theta}{Theta}. See \dQuote{Examples} below.
The mapping function assigned to \code{pmap} can also be made to impose restrictions on the domain of the parameter space \eqn{\Psi}{Psi} so that the minimiser cannot jump to values such that \eqn{\Psi \not\subseteq \Theta}{Psi notsubseteq Theta}. For example, if a particular parameter must be positive, one can work with a transformed parameter that can take any value on the real line, with the model parameter being the exponential of this transformed parameter. Similarly a modified logit like transform can be used to ensure that parameter values remain within a fixed interval with finite boundaries. Examples of these situations can be found in the \dQuote{Examples} below.
Some functions are provided in the topic \code{\link{Transform-Parameters}} that may provide useful components within the user provided function assigned to \code{pmap}.
}
\value{
Value of the log-likelihood times negative one.
}
\seealso{\code{\link[stats]{nlm}}, \code{\link[stats]{optim}}, \code{\link{Transform-Parameters}}, \code{\link{BaumWelch}}}
\examples{
# Example where the Markov chain is assumed to be stationary
Pi <- matrix(c(0.8, 0.1, 0.1,
0.1, 0.6, 0.3,
0.2, 0.3, 0.5),
byrow=TRUE, nrow=3)
# start simulation in state 2
delta <- c(0, 1, 0)
x <- dthmm(NULL, Pi, delta, "exp", list(rate=c(5, 2, 0.2)), nonstat=FALSE)
x <- simulate(x, nsim=5000, seed=5)
# Approximate solution using BaumWelch
x1 <- BaumWelch(x, control=bwcontrol(maxiter=10, tol=1e-5))
# Exact solution using nlm
allmap <- function(y, p){
# maps vector back to delta, Pi and rate
m <- sqrt(length(p))
y$Pi <- vector2Pi(p[1:(m*(m-1))])
y$pm$rate <- exp(p[(m^2-m+1):(m^2)])
y$delta <- compdelta(Pi)
return(y)
}
p <- c(Pi2vector(x$Pi), log(x$pm$rate))
# Increase iterlim below to achieve convergence
# Has been restricted to minimise time of package checks
z <- nlm(neglogLik, p, object=x, pmap=allmap,
print.level=2, gradtol=0.000001, iterlim=2)
x2 <- allmap(x, z$estimate)
# compare parameter estimates
print(summary(x))
print(summary(x1))
print(summary(x2))
#--------------------------------------------------------
# Estimate only the off diagonal elements in the matrix Pi
# Hold all others as in the simulation
# This function maps the changeable parameters into the
# dthmm object - done within the function neglogLik
# The logit-like transform removes boundaries
Pi <- matrix(c(0.8, 0.1, 0.1,
0.1, 0.6, 0.3,
0.2, 0.3, 0.5),
byrow=TRUE, nrow=3)
delta <- c(0, 1, 0)
x <- dthmm(NULL, Pi, delta, "exp", list(rate=c(5, 3, 1)))
x <- simulate(x, nsim=5000, seed=5)
offdiagmap <- function(y, p){
# rows must sum to one
invlogit <- function(eta)
exp(eta)/(1+exp(eta))
y$Pi[1,2] <- (1-y$Pi[1,1])*invlogit(p[1])
y$Pi[1,3] <- 1-y$Pi[1,1]-y$Pi[1,2]
y$Pi[2,1] <- (1-y$Pi[2,2])*invlogit(p[2])
y$Pi[2,3] <- 1-y$Pi[2,1]-y$Pi[2,2]
y$Pi[3,1] <- (1-y$Pi[3,3])*invlogit(p[3])
y$Pi[3,2] <- 1-y$Pi[3,1]-y$Pi[3,3]
return(y)
}
z <- nlm(neglogLik, c(0, 0, 0), object=x, pmap=offdiagmap,
print.level=2, gradtol=0.000001)
# x1 contains revised parameter estimates
x1 <- offdiagmap(x, z$estimate)
# print revised values of Pi
print(x1$Pi)
# print log-likelihood using original and revised values
print(logLik(x))
print(logLik(x1))
#--------------------------------------------------------
# Fully estimate both Q and lambda for an MMPP Process
Q <- matrix(c(-8, 5, 3,
1, -4, 3,
2, 5, -7),
byrow=TRUE, nrow=3)/25
lambda <- c(5, 3, 1)
delta <- c(0, 1, 0)
# simulate some data
x <- mmpp(NULL, Q, delta, lambda)
x <- simulate(x, nsim=5000, seed=5)
allmap <- function(y, p){
# maps vector back to Pi and rate
m <- sqrt(length(p))
y$Q <- vector2Q(p[1:(m*(m-1))])
y$lambda <- exp(p[(m^2-m+1):(m^2)])
return(y)
}
# Start by using the EM algorithm
x1 <- BaumWelch(x, control=bwcontrol(maxiter=10, tol=0.01))
# use above as initial values for the nlm function
# map parameters to a single vector, fixed delta
p <- c(Q2vector(x1$Q), log(x1$lambda))
# Complete estimation using nlm
# Increase iterlim below to achieve convergence
# Has been restricted to minimise time of package checks
z <- nlm(neglogLik, p, object=x, pmap=allmap,
print.level=2, gradtol=0.000001, iterlim=5)
# mmpp object with estimated parameter values from nlm
x2 <- allmap(x, z$estimate)
# compare log-likelihoods
print(logLik(x))
print(logLik(x1))
print(logLik(x2))
# print final parameter estimates
print(summary(x2))
}
\keyword{optimize}
|
/thirdParty/HiddenMarkov.mod/man/neglogLik.Rd
|
no_license
|
karl616/gNOMePeaks
|
R
| false | false | 6,869 |
rd
|
\name{neglogLik}
\alias{neglogLik}
\title{Negative Log-Likelihood}
\description{
Calculates the log-likelihood multiplied by negative one. It is in a format that can be used with the functions \code{\link[stats]{nlm}} and \code{\link[stats]{optim}}, providing an alternative to the \code{\link{BaumWelch}} algorithm for maximum likelihood parameter estimation.
}
\usage{
neglogLik(params, object, pmap)
}
\arguments{
\item{params}{a vector of revised parameter values.}
\item{object}{an object of class \code{"\link{dthmm}"}, \code{"\link{mmglm0}"}, or \code{"\link{mmpp}"}.}
\item{pmap}{a user provided function mapping the revised (or restricted) parameter values \code{p} into the appropriate locations in \code{object}.}
}
\details{
This function is in a format that can be used with the two functions \code{\link[stats]{nlm}} and \code{\link[stats]{optim}} (see Examples below). This provides alternative methods of estimating the maximum likelihood parameter values, to that of the EM algorithm provided by \code{\link{BaumWelch}}, including Newton type methods and grid searches. It can also be used to restrict estimation to a subset of parameters.
The EM algorithm is relatively stable when starting from poor initial values but convergence is very slow in close proximity to the solution. Newton type methods are very sensitive to initial conditions but converge much more quickly in close proximity to the solution. This suggests initially using the EM algorithm and then switching to Newton type methods (see Examples below).
The maximisation of the model likelihood function can be restricted to be over a subset of the model parameters. Other parameters will then be fixed at the values stored in the model \code{object}. Let \eqn{\Theta}{Theta} denote the model parameter space, and let \eqn{\Psi}{Psi} denote the parameter sub-space (\eqn{\Psi \subseteq \Theta}{Psi subseteq Theta}) over which the likelihood function is to be maximised. The argument \code{params} contains values in \eqn{\Psi}{Psi}, and \code{pmap} is assigned a function that maps these values into the model parameter space \eqn{\Theta}{Theta}. See \dQuote{Examples} below.
The mapping function assigned to \code{pmap} can also be made to impose restrictions on the domain of the parameter space \eqn{\Psi}{Psi} so that the minimiser cannot jump to values such that \eqn{\Psi \not\subseteq \Theta}{Psi notsubseteq Theta}. For example, if a particular parameter must be positive, one can work with a transformed parameter that can take any value on the real line, with the model parameter being the exponential of this transformed parameter. Similarly a modified logit like transform can be used to ensure that parameter values remain within a fixed interval with finite boundaries. Examples of these situations can be found in the \dQuote{Examples} below.
Some functions are provided in the topic \code{\link{Transform-Parameters}} that may provide useful components within the user provided function assigned to \code{pmap}.
}
\value{
Value of the log-likelihood times negative one.
}
\seealso{\code{\link[stats]{nlm}}, \code{\link[stats]{optim}}, \code{\link{Transform-Parameters}}, \code{\link{BaumWelch}}}
\examples{
# Example where the Markov chain is assumed to be stationary
Pi <- matrix(c(0.8, 0.1, 0.1,
0.1, 0.6, 0.3,
0.2, 0.3, 0.5),
byrow=TRUE, nrow=3)
# start simulation in state 2
delta <- c(0, 1, 0)
x <- dthmm(NULL, Pi, delta, "exp", list(rate=c(5, 2, 0.2)), nonstat=FALSE)
x <- simulate(x, nsim=5000, seed=5)
# Approximate solution using BaumWelch
x1 <- BaumWelch(x, control=bwcontrol(maxiter=10, tol=1e-5))
# Exact solution using nlm
allmap <- function(y, p){
# maps vector back to delta, Pi and rate
m <- sqrt(length(p))
y$Pi <- vector2Pi(p[1:(m*(m-1))])
y$pm$rate <- exp(p[(m^2-m+1):(m^2)])
y$delta <- compdelta(Pi)
return(y)
}
p <- c(Pi2vector(x$Pi), log(x$pm$rate))
# Increase iterlim below to achieve convergence
# Has been restricted to minimise time of package checks
z <- nlm(neglogLik, p, object=x, pmap=allmap,
print.level=2, gradtol=0.000001, iterlim=2)
x2 <- allmap(x, z$estimate)
# compare parameter estimates
print(summary(x))
print(summary(x1))
print(summary(x2))
#--------------------------------------------------------
# Estimate only the off diagonal elements in the matrix Pi
# Hold all others as in the simulation
# This function maps the changeable parameters into the
# dthmm object - done within the function neglogLik
# The logit-like transform removes boundaries
Pi <- matrix(c(0.8, 0.1, 0.1,
0.1, 0.6, 0.3,
0.2, 0.3, 0.5),
byrow=TRUE, nrow=3)
delta <- c(0, 1, 0)
x <- dthmm(NULL, Pi, delta, "exp", list(rate=c(5, 3, 1)))
x <- simulate(x, nsim=5000, seed=5)
offdiagmap <- function(y, p){
# rows must sum to one
invlogit <- function(eta)
exp(eta)/(1+exp(eta))
y$Pi[1,2] <- (1-y$Pi[1,1])*invlogit(p[1])
y$Pi[1,3] <- 1-y$Pi[1,1]-y$Pi[1,2]
y$Pi[2,1] <- (1-y$Pi[2,2])*invlogit(p[2])
y$Pi[2,3] <- 1-y$Pi[2,1]-y$Pi[2,2]
y$Pi[3,1] <- (1-y$Pi[3,3])*invlogit(p[3])
y$Pi[3,2] <- 1-y$Pi[3,1]-y$Pi[3,3]
return(y)
}
z <- nlm(neglogLik, c(0, 0, 0), object=x, pmap=offdiagmap,
print.level=2, gradtol=0.000001)
# x1 contains revised parameter estimates
x1 <- offdiagmap(x, z$estimate)
# print revised values of Pi
print(x1$Pi)
# print log-likelihood using original and revised values
print(logLik(x))
print(logLik(x1))
#--------------------------------------------------------
# Fully estimate both Q and lambda for an MMPP Process
Q <- matrix(c(-8, 5, 3,
1, -4, 3,
2, 5, -7),
byrow=TRUE, nrow=3)/25
lambda <- c(5, 3, 1)
delta <- c(0, 1, 0)
# simulate some data
x <- mmpp(NULL, Q, delta, lambda)
x <- simulate(x, nsim=5000, seed=5)
allmap <- function(y, p){
# maps vector back to Pi and rate
m <- sqrt(length(p))
y$Q <- vector2Q(p[1:(m*(m-1))])
y$lambda <- exp(p[(m^2-m+1):(m^2)])
return(y)
}
# Start by using the EM algorithm
x1 <- BaumWelch(x, control=bwcontrol(maxiter=10, tol=0.01))
# use above as initial values for the nlm function
# map parameters to a single vector, fixed delta
p <- c(Q2vector(x1$Q), log(x1$lambda))
# Complete estimation using nlm
# Increase iterlim below to achieve convergence
# Has been restricted to minimise time of package checks
z <- nlm(neglogLik, p, object=x, pmap=allmap,
print.level=2, gradtol=0.000001, iterlim=5)
# mmpp object with estimated parameter values from nlm
x2 <- allmap(x, z$estimate)
# compare log-likelihoods
print(logLik(x))
print(logLik(x1))
print(logLik(x2))
# print final parameter estimates
print(summary(x2))
}
\keyword{optimize}
|
# noise for MODEL 1
library(MASS)
set.seed(109)
ages<-SMS.control@species.info[,1:2]
nop<-first.VPA-1
modelByAge<- c(-1,1,2)[2] # -1: no noise, 1: same noise for all ages, 2:age dependent noise
modelByAge<-rep(modelByAge,nop)
varAsParameter<-1 # 1=estimate variance as a parameter ; 0=use empirical variance
boundsOnVar<- c(0.01,1) # lower and upper bounds on variance (if estimated as parameter)
correlated<-TRUE # correlated noise between age groups of same species (it really does not matter in SMS)
correlation<-0.4
n<-20;
(s<-0.1*sqrt(n))
s<-rep(s,nop)
w_factor<-1; phase<-3 ;empi<-TRUE
lower<-0.5; upper<-1.5; #parameter bounds
## test
if (n>=2) {
a<-mvrnorm(n , mu = 1, Sigma = matrix(s^2,ncol=1,nrow=1), empirical = TRUE)
mean(a)
sd(a)
#empirical = FALSE
a<-mvrnorm(n , mu = 1, Sigma = matrix(s^2,ncol=1,nrow=1), empirical = FALSE)
mean(a)
sd(a)
## end test
}
fi<-file.path(data.path,"other_pred_n_noise.dat")
cat("# File, noise_other_pred.dat, with data for simulating uncertanties in abundance of other predators \n",file=fi)
cat(phase, " # phase for estimating noise\n",file=fi,append=T)
cat(lower, ' ', upper, " # Lower and upper bound for the parameters\n",file=fi,append=TRUE)
cat(" # Weighting factor for likelihood contributions for noise factor\n",file=fi,append=TRUE)
cat("# ",formatC(sp.names[1:nop],12),'\n',file=fi,append=TRUE)
cat(" ",formatC(rep(w_factor,nop),width=11),'\n',file=fi,append=TRUE)
cat(varAsParameter," # 1=estimate variance as a parameter ; 0=use empirical variance\n",file=fi,append=TRUE)
cat(boundsOnVar, " # lower and upper bounds on variance (if estimated as parameter)\n",file=fi,append=TRUE)
cat("1 # model type: 0=no noise, 1=from scaling factors with mean 1 and std of the mean at a level set at target, 2=from noise on observations\n",file=fi,append=TRUE)
cat(n,' # Number of "observations" used used to fit uncertanty for each other predator. \n',file=fi,append=TRUE)
cat(" # Usage of age dependent noise factor. -1: no noise, 1: same noise for all ages, 2:age dependent noise \n",file=fi,append=TRUE)
cat("# ",formatC(sp.names[1:nop],12),'\n',file=fi,append=TRUE)
cat(" ",formatC(modelByAge,width=11),'\n',file=fi,append=TRUE)
for (sp in (1:nop)) {
set.seed(sp)
cat(sp.names[sp],'\n')
cat("######## ",sp.names[sp],'\n',file=fi,append=TRUE)
if (modelByAge[sp]== -1) cat(rep -1,n,'\n',fi=file,append=TRUE)
if (modelByAge[sp]== 1) {
b<-mvrnorm(n , mu = 1, Sigma = matrix(s[sp]^2,ncol=1,nrow=1), empirical = empi)
cat('# "observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b,'\n',file=fi,append=TRUE)
}
if (modelByAge[sp]== 2) {
if (!correlated) for (a in (ages[sp,2]:ages[sp,1])) {
b<-mvrnorm(n , mu = 1, Sigma = matrix(s[sp]^2,ncol=1,nrow=1), empirical = empi)
cat('# age ',a,' " observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b,'\n',file=fi,append=TRUE)
}
if (correlated)
nOages<-ages[sp,1]-ages[sp,2]+1
SE<-rep(s[sp],nOages)
COR<-matrix(correlation,nrow=nOages,ncol=nOages)
diag(COR)<-1.0
COV<- diag(SE) %*% COR %*% diag(SE)
for (a in (ages[sp,2]:ages[sp,1])) {
b<-mvrnorm(n , mu = rep(1,nOages), Sigma = COV, empirical = empi)
apply(b,2,mean)
apply(b,2,sd)
i<-0
for (a in (ages[sp,2]:ages[sp,1])) {
i<-i+1
cat('# age ',a,' CORRELATED " observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b[,i],'\n',file=fi,append=TRUE)
}
}
}
}
|
/SMS_r_prog/make_noise_other_pred.R
|
permissive
|
ices-eg/wg_WGSAM
|
R
| false | false | 3,700 |
r
|
# noise for MODEL 1
library(MASS)
set.seed(109)
ages<-SMS.control@species.info[,1:2]
nop<-first.VPA-1
modelByAge<- c(-1,1,2)[2] # -1: no noise, 1: same noise for all ages, 2:age dependent noise
modelByAge<-rep(modelByAge,nop)
varAsParameter<-1 # 1=estimate variance as a parameter ; 0=use empirical variance
boundsOnVar<- c(0.01,1) # lower and upper bounds on variance (if estimated as parameter)
correlated<-TRUE # correlated noise between age groups of same species (it really does not matter in SMS)
correlation<-0.4
n<-20;
(s<-0.1*sqrt(n))
s<-rep(s,nop)
w_factor<-1; phase<-3 ;empi<-TRUE
lower<-0.5; upper<-1.5; #parameter bounds
## test
if (n>=2) {
a<-mvrnorm(n , mu = 1, Sigma = matrix(s^2,ncol=1,nrow=1), empirical = TRUE)
mean(a)
sd(a)
#empirical = FALSE
a<-mvrnorm(n , mu = 1, Sigma = matrix(s^2,ncol=1,nrow=1), empirical = FALSE)
mean(a)
sd(a)
## end test
}
fi<-file.path(data.path,"other_pred_n_noise.dat")
cat("# File, noise_other_pred.dat, with data for simulating uncertanties in abundance of other predators \n",file=fi)
cat(phase, " # phase for estimating noise\n",file=fi,append=T)
cat(lower, ' ', upper, " # Lower and upper bound for the parameters\n",file=fi,append=TRUE)
cat(" # Weighting factor for likelihood contributions for noise factor\n",file=fi,append=TRUE)
cat("# ",formatC(sp.names[1:nop],12),'\n',file=fi,append=TRUE)
cat(" ",formatC(rep(w_factor,nop),width=11),'\n',file=fi,append=TRUE)
cat(varAsParameter," # 1=estimate variance as a parameter ; 0=use empirical variance\n",file=fi,append=TRUE)
cat(boundsOnVar, " # lower and upper bounds on variance (if estimated as parameter)\n",file=fi,append=TRUE)
cat("1 # model type: 0=no noise, 1=from scaling factors with mean 1 and std of the mean at a level set at target, 2=from noise on observations\n",file=fi,append=TRUE)
cat(n,' # Number of "observations" used used to fit uncertanty for each other predator. \n',file=fi,append=TRUE)
cat(" # Usage of age dependent noise factor. -1: no noise, 1: same noise for all ages, 2:age dependent noise \n",file=fi,append=TRUE)
cat("# ",formatC(sp.names[1:nop],12),'\n',file=fi,append=TRUE)
cat(" ",formatC(modelByAge,width=11),'\n',file=fi,append=TRUE)
for (sp in (1:nop)) {
set.seed(sp)
cat(sp.names[sp],'\n')
cat("######## ",sp.names[sp],'\n',file=fi,append=TRUE)
if (modelByAge[sp]== -1) cat(rep -1,n,'\n',fi=file,append=TRUE)
if (modelByAge[sp]== 1) {
b<-mvrnorm(n , mu = 1, Sigma = matrix(s[sp]^2,ncol=1,nrow=1), empirical = empi)
cat('# "observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b,'\n',file=fi,append=TRUE)
}
if (modelByAge[sp]== 2) {
if (!correlated) for (a in (ages[sp,2]:ages[sp,1])) {
b<-mvrnorm(n , mu = 1, Sigma = matrix(s[sp]^2,ncol=1,nrow=1), empirical = empi)
cat('# age ',a,' " observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b,'\n',file=fi,append=TRUE)
}
if (correlated)
nOages<-ages[sp,1]-ages[sp,2]+1
SE<-rep(s[sp],nOages)
COR<-matrix(correlation,nrow=nOages,ncol=nOages)
diag(COR)<-1.0
COV<- diag(SE) %*% COR %*% diag(SE)
for (a in (ages[sp,2]:ages[sp,1])) {
b<-mvrnorm(n , mu = rep(1,nOages), Sigma = COV, empirical = empi)
apply(b,2,mean)
apply(b,2,sd)
i<-0
for (a in (ages[sp,2]:ages[sp,1])) {
i<-i+1
cat('# age ',a,' CORRELATED " observations" used to mimic a factor with mean 1 and wanted std the mean at ',s[sp],' \n',file=fi,append=TRUE)
cat(b[,i],'\n',file=fi,append=TRUE)
}
}
}
}
|
library(predtoolsTS)
### Name: summary.prep
### Title: Generic function
### Aliases: summary.prep
### ** Examples
summary(prep(AirPassengers))
|
/data/genthat_extracted_code/predtoolsTS/examples/summary.prep.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 150 |
r
|
library(predtoolsTS)
### Name: summary.prep
### Title: Generic function
### Aliases: summary.prep
### ** Examples
summary(prep(AirPassengers))
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-c","--countFile"),
help="Count file as generated for example by featureCounts."
),
make_option(c("-p","--pValue"),
help="Pvalue threshold for a gene to be reported as differentially expressed",
default=0.1
),
make_option(c("-l","--logFC"),
help="Absolute Value of the log Fold change",
default=0
),
make_option(c("-n","--numberBack"),
help="Number of differentially expressed genes to return",
default=1000)
)
opt<- parse_args(OptionParser(option_list=option_list))
#define contrast matrix
getContrastDesign<-function(classes){
list<-combn(classes,2,simplify=F);
contrast<-vector();
for(i in 1:length(list)){
str=paste(list[[i]][1],list[[i]][2],sep="-");
contrast<-c(contrast,str);
str=paste(list[[i]][2],list[[i]][1],sep="-");
contrast<-c(contrast,str);
}
return(contrast);
}
library(edgeR)
library(limma)
library(stringr)
library(statmod)
#read data
datafile=read.table(opt$countFile,header=T,row.names=1)
#get name of categories
groupName=str_replace(colnames(datafile),"_.+","" )
classes<-unique(groupName)
#Do what do below but by comparing i,j
lC=length(classes)
lC_1<-lC-1
for(a in 1:lC_1){
c=a+1;
for(b in c:lC){
class<-c(classes[a],classes[b]);
columnsa<-grep(paste(classes[a],"_",sep=""),colnames(datafile))
columnsb<-grep(paste(classes[b],"_",sep=""),colnames(datafile))
columns<-c(columnsa,columnsb)
gName<-groupName[columns]
all<-DGEList(data.matrix(datafile[,columns]),group=gName)
isexpr<-rowSums(cpm(all)>1)>=length(gName)
all<-all[isexpr,,keep.lib.sizes=FALSE]
all<-calcNormFactors(all)
design<-model.matrix(~ 0+factor(match(gName,class)))
colnames(design)<-unique(gName)
v<-voom(all,design,plot=F)
fit <- lmFit(v, design)
#Contrast Design
contrasts<-getContrastDesign(class)
contrast.matrix<-makeContrasts(contrasts=contrasts,levels=design)
#Fit + predictions
fit2<-contrasts.fit(fit,contrast.matrix)
fit2<-eBayes(fit2)
#Now extract results
for( i in contrasts){
fra<-topTable(fit2,coef=i,number=Inf,adjust.method="fdr",lfc=opt$logFC,p.value=opt$pValue,sort.by="P")
write.table(head(fra[fra$logFC <= opt$logFC & fra$adj.P.Val < opt$pValue,],n=opt$numberBack),file=paste(i,"DOWN",sep="."),qmethod="double")
write.table(head(fra[fra$logFC >= opt$logFC & fra$adj.P.Val < opt$pValue,],n=opt$numberBack),file=paste(i,"UP",sep=".") ,qmethod="double")
}
}
}
|
/diff.R
|
no_license
|
wyim-pgl/htafer_genome_annoation_snakeMake
|
R
| false | false | 5,707 |
r
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c("-c","--countFile"),
help="Count file as generated for example by featureCounts."
),
make_option(c("-p","--pValue"),
help="Pvalue threshold for a gene to be reported as differentially expressed",
default=0.1
),
make_option(c("-l","--logFC"),
help="Absolute Value of the log Fold change",
default=0
),
make_option(c("-n","--numberBack"),
help="Number of differentially expressed genes to return",
default=1000)
)
opt<- parse_args(OptionParser(option_list=option_list))
#define contrast matrix
getContrastDesign<-function(classes){
list<-combn(classes,2,simplify=F);
contrast<-vector();
for(i in 1:length(list)){
str=paste(list[[i]][1],list[[i]][2],sep="-");
contrast<-c(contrast,str);
str=paste(list[[i]][2],list[[i]][1],sep="-");
contrast<-c(contrast,str);
}
return(contrast);
}
library(edgeR)
library(limma)
library(stringr)
library(statmod)
#read data
datafile=read.table(opt$countFile,header=T,row.names=1)
#get name of categories
groupName=str_replace(colnames(datafile),"_.+","" )
classes<-unique(groupName)
#Do what do below but by comparing i,j
lC=length(classes)
lC_1<-lC-1
for(a in 1:lC_1){
c=a+1;
for(b in c:lC){
class<-c(classes[a],classes[b]);
columnsa<-grep(paste(classes[a],"_",sep=""),colnames(datafile))
columnsb<-grep(paste(classes[b],"_",sep=""),colnames(datafile))
columns<-c(columnsa,columnsb)
gName<-groupName[columns]
all<-DGEList(data.matrix(datafile[,columns]),group=gName)
isexpr<-rowSums(cpm(all)>1)>=length(gName)
all<-all[isexpr,,keep.lib.sizes=FALSE]
all<-calcNormFactors(all)
design<-model.matrix(~ 0+factor(match(gName,class)))
colnames(design)<-unique(gName)
v<-voom(all,design,plot=F)
fit <- lmFit(v, design)
#Contrast Design
contrasts<-getContrastDesign(class)
contrast.matrix<-makeContrasts(contrasts=contrasts,levels=design)
#Fit + predictions
fit2<-contrasts.fit(fit,contrast.matrix)
fit2<-eBayes(fit2)
#Now extract results
for( i in contrasts){
fra<-topTable(fit2,coef=i,number=Inf,adjust.method="fdr",lfc=opt$logFC,p.value=opt$pValue,sort.by="P")
write.table(head(fra[fra$logFC <= opt$logFC & fra$adj.P.Val < opt$pValue,],n=opt$numberBack),file=paste(i,"DOWN",sep="."),qmethod="double")
write.table(head(fra[fra$logFC >= opt$logFC & fra$adj.P.Val < opt$pValue,],n=opt$numberBack),file=paste(i,"UP",sep=".") ,qmethod="double")
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gla_pal.R
\name{gla_pal}
\alias{gla_pal}
\title{gla_pal}
\usage{
gla_pal(
gla_theme = "default",
palette_type = "categorical",
palette_name = "core",
main_colours = NULL,
n = 6,
inc0 = FALSE,
remove_margin = NULL
)
}
\arguments{
\item{gla_theme}{One of "default" or "inverse". Default: 'default'}
\item{palette_type}{One of "categorical", "quantitative", "highlight" or "diverging", Default: 'categorical'}
\item{palette_name}{One of the strings "core", "light", "dark", "brand", Default: 'core'}
\item{main_colours}{One or more of "blue", "pink", "green", "red", "yellow", "orange", "purple" or "mayoral" as a string or list, Default: 'mayoral'}
\item{n}{Number of colours in the palette. If palette_type = "Diverging", this is the number of colours on each side of the diverging scale . If palette_type = "Highlight" gla_pal will return main_colours + (n - length(main_colours)) context colours. Default: 6}
\item{inc0}{boolean, If TRUE an additional colour representing zero will be added to a quantiative or diverging palettes, Default: FALSE}
\item{remove_margin}{Remove the edges of the palette to get a more central palette. Either 'left', 'right', 'both' or NULL, Default: NULL}
}
\value{
Returns a character vector of length n giving colour hexs.
}
\description{
Generates palettes using the GLA colours
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
\code{\link[chroma]{interp_scale}}
}
|
/man/gla_pal.Rd
|
no_license
|
Greater-London-Authority/gglaplot
|
R
| false | true | 1,518 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gla_pal.R
\name{gla_pal}
\alias{gla_pal}
\title{gla_pal}
\usage{
gla_pal(
gla_theme = "default",
palette_type = "categorical",
palette_name = "core",
main_colours = NULL,
n = 6,
inc0 = FALSE,
remove_margin = NULL
)
}
\arguments{
\item{gla_theme}{One of "default" or "inverse". Default: 'default'}
\item{palette_type}{One of "categorical", "quantitative", "highlight" or "diverging", Default: 'categorical'}
\item{palette_name}{One of the strings "core", "light", "dark", "brand", Default: 'core'}
\item{main_colours}{One or more of "blue", "pink", "green", "red", "yellow", "orange", "purple" or "mayoral" as a string or list, Default: 'mayoral'}
\item{n}{Number of colours in the palette. If palette_type = "Diverging", this is the number of colours on each side of the diverging scale . If palette_type = "Highlight" gla_pal will return main_colours + (n - length(main_colours)) context colours. Default: 6}
\item{inc0}{boolean, If TRUE an additional colour representing zero will be added to a quantiative or diverging palettes, Default: FALSE}
\item{remove_margin}{Remove the edges of the palette to get a more central palette. Either 'left', 'right', 'both' or NULL, Default: NULL}
}
\value{
Returns a character vector of length n giving colour hexs.
}
\description{
Generates palettes using the GLA colours
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
\seealso{
\code{\link[chroma]{interp_scale}}
}
|
sharingButton <- function() {
url <- 'https://cpsievert.github.io/plotly_book/plot-ly-for-collaboration.html'
list(
name = "Collaborate",
icon = list(
width = 1000,
ascent = 500,
descent = -50,
path = sharingPath
),
click = htmlwidgets::JS(sprintf(
"function(gd) {
// is this being viewed in RStudio?
if (location.search == '?viewer_pane=1') {
alert('To learn about plotly for collaboration, visit:\\n %s');
} else {
window.open('%s', '_blank');
}
}", url, url))
)
}
sharingPath <- 'M487 375c7-10 9-23 5-36l-79-259c-3-12-11-23-22-31-11-8-22-12-35-12l-263 0c-15 0-29 5-43 15-13 10-23 23-28 37-5 13-5 25-1 37 0 0 0 3 1 7 1 5 1 8 1 11 0 2 0 4-1 6 0 3-1 5-1 6 1 2 2 4 3 6 1 2 2 4 4 6 2 3 4 5 5 7 5 7 9 16 13 26 4 10 7 19 9 26 0 2 0 5 0 9-1 4-1 6 0 8 0 2 2 5 4 8 3 3 5 5 5 7 4 6 8 15 12 26 4 11 7 19 7 26 1 1 0 4 0 9-1 4-1 7 0 8 1 2 3 5 6 8 4 4 6 6 6 7 4 5 8 13 13 24 4 11 7 20 7 28 1 1 0 4 0 7-1 3-1 6-1 7 0 2 1 4 3 6 1 1 3 4 5 6 2 3 3 5 5 6 1 2 3 5 4 9 2 3 3 7 5 10 1 3 2 6 4 10 2 4 4 7 6 9 2 3 4 5 7 7 3 2 7 3 11 3 3 0 8 0 13-1l0-1c7 2 12 2 14 2l218 0c14 0 25-5 32-16 8-10 10-23 6-37l-79-259c-7-22-13-37-20-43-7-7-19-10-37-10l-248 0c-5 0-9-2-11-5-2-3-2-7 0-12 4-13 18-20 41-20l264 0c5 0 10 2 16 5 5 3 8 6 10 11l85 282c2 5 2 10 2 17 7-3 13-7 17-13z m-304 0c-1-3-1-5 0-7 1-1 3-2 6-2l174 0c2 0 4 1 7 2 2 2 4 4 5 7l6 18c0 3 0 5-1 7-1 1-3 2-6 2l-173 0c-3 0-5-1-8-2-2-2-4-4-4-7z m-24-73c-1-3-1-5 0-7 2-2 3-2 6-2l174 0c2 0 5 0 7 2 3 2 4 4 5 7l6 18c1 2 0 5-1 6-1 2-3 3-5 3l-174 0c-3 0-5-1-7-3-3-1-4-4-5-6z'
|
/output/sources/authors/2774/plotly/modeBarButtons.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false | false | 1,609 |
r
|
sharingButton <- function() {
url <- 'https://cpsievert.github.io/plotly_book/plot-ly-for-collaboration.html'
list(
name = "Collaborate",
icon = list(
width = 1000,
ascent = 500,
descent = -50,
path = sharingPath
),
click = htmlwidgets::JS(sprintf(
"function(gd) {
// is this being viewed in RStudio?
if (location.search == '?viewer_pane=1') {
alert('To learn about plotly for collaboration, visit:\\n %s');
} else {
window.open('%s', '_blank');
}
}", url, url))
)
}
sharingPath <- 'M487 375c7-10 9-23 5-36l-79-259c-3-12-11-23-22-31-11-8-22-12-35-12l-263 0c-15 0-29 5-43 15-13 10-23 23-28 37-5 13-5 25-1 37 0 0 0 3 1 7 1 5 1 8 1 11 0 2 0 4-1 6 0 3-1 5-1 6 1 2 2 4 3 6 1 2 2 4 4 6 2 3 4 5 5 7 5 7 9 16 13 26 4 10 7 19 9 26 0 2 0 5 0 9-1 4-1 6 0 8 0 2 2 5 4 8 3 3 5 5 5 7 4 6 8 15 12 26 4 11 7 19 7 26 1 1 0 4 0 9-1 4-1 7 0 8 1 2 3 5 6 8 4 4 6 6 6 7 4 5 8 13 13 24 4 11 7 20 7 28 1 1 0 4 0 7-1 3-1 6-1 7 0 2 1 4 3 6 1 1 3 4 5 6 2 3 3 5 5 6 1 2 3 5 4 9 2 3 3 7 5 10 1 3 2 6 4 10 2 4 4 7 6 9 2 3 4 5 7 7 3 2 7 3 11 3 3 0 8 0 13-1l0-1c7 2 12 2 14 2l218 0c14 0 25-5 32-16 8-10 10-23 6-37l-79-259c-7-22-13-37-20-43-7-7-19-10-37-10l-248 0c-5 0-9-2-11-5-2-3-2-7 0-12 4-13 18-20 41-20l264 0c5 0 10 2 16 5 5 3 8 6 10 11l85 282c2 5 2 10 2 17 7-3 13-7 17-13z m-304 0c-1-3-1-5 0-7 1-1 3-2 6-2l174 0c2 0 4 1 7 2 2 2 4 4 5 7l6 18c0 3 0 5-1 7-1 1-3 2-6 2l-173 0c-3 0-5-1-8-2-2-2-4-4-4-7z m-24-73c-1-3-1-5 0-7 2-2 3-2 6-2l174 0c2 0 5 0 7 2 3 2 4 4 5 7l6 18c1 2 0 5-1 6-1 2-3 3-5 3l-174 0c-3 0-5-1-7-3-3-1-4-4-5-6z'
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plotspict.retro}
\alias{plotspict.retro}
\title{Plot results of retrospective analysis}
\usage{
plotspict.retro(rep, stamp = get.version())
}
\arguments{
\item{rep}{A valid result from fit.spict.}
\item{stamp}{Stamp plot with this character string.}
}
\value{
Nothing
}
|
/spict/man/plotspict.retro.Rd
|
no_license
|
alko989/spict
|
R
| false | true | 367 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plotspict.retro}
\alias{plotspict.retro}
\title{Plot results of retrospective analysis}
\usage{
plotspict.retro(rep, stamp = get.version())
}
\arguments{
\item{rep}{A valid result from fit.spict.}
\item{stamp}{Stamp plot with this character string.}
}
\value{
Nothing
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutationCalling.R
\name{mutationCallsFromSingleBam}
\alias{mutationCallsFromSingleBam}
\title{Create a mutationCalls object from a list of single-cell BAM files}
\usage{
mutationCallsFromSingleBam(bam, tag = "XQ:C")
}
\arguments{
\item{bam}{Path to the bam file}
\item{tag}{Name of the bam file tag}
}
\value{
An object of class \code{\link{mutationCalls}}
}
\description{
More explanations of what happens
}
|
/man/mutationCallsFromSingleBam.Rd
|
no_license
|
benstory/mito_app
|
R
| false | true | 488 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutationCalling.R
\name{mutationCallsFromSingleBam}
\alias{mutationCallsFromSingleBam}
\title{Create a mutationCalls object from a list of single-cell BAM files}
\usage{
mutationCallsFromSingleBam(bam, tag = "XQ:C")
}
\arguments{
\item{bam}{Path to the bam file}
\item{tag}{Name of the bam file tag}
}
\value{
An object of class \code{\link{mutationCalls}}
}
\description{
More explanations of what happens
}
|
context("MD5 hashes")
tenant <- Sys.getenv("AZ_TEST_TENANT_ID")
app <- Sys.getenv("AZ_TEST_APP_ID")
password <- Sys.getenv("AZ_TEST_PASSWORD")
subscription <- Sys.getenv("AZ_TEST_SUBSCRIPTION")
if(tenant == "" || app == "" || password == "" || subscription == "")
skip("Authentication tests skipped: ARM credentials not set")
rgname <- Sys.getenv("AZ_TEST_STORAGE_RG")
storname <- Sys.getenv("AZ_TEST_STORAGE_HNS")
if(rgname == "" || storname == "")
skip("MD5 tests skipped: resource names not set")
sub <- AzureRMR::az_rm$new(tenant=tenant, app=app, password=password)$get_subscription(subscription)
stor <- sub$get_resource_group(rgname)$get_storage_account(storname)
bl <- stor$get_blob_endpoint()
ad <- stor$get_adls_endpoint()
fl <- stor$get_file_endpoint()
opts <- options(azure_storage_progress_bar=FALSE)
test_that("Blob upload/download works with MD5 hash",
{
contname <- make_name()
cont <- create_blob_container(bl, contname)
expect_silent(upload_blob(cont, "../resources/iris.csv"))
lst <- list_blobs(cont, info="all")
expect_true(all(is.na(lst[["Content-MD5"]])))
expect_silent(upload_blob(cont, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
lst <- list_blobs(cont, info="all")
expect_identical(lst[["Content-MD5"]], md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_blob(cont, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
test_that("ADLS upload/download works with MD5 hash",
{
contname <- make_name()
fs <- create_adls_filesystem(ad, contname)
expect_silent(upload_adls_file(fs, "../resources/iris.csv"))
props <- get_storage_properties(fs, "iris.csv")
expect_null(props$`content-md5`)
expect_silent(upload_adls_file(fs, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
props <- get_storage_properties(fs, "iris.csv")
expect_identical(props$`content-md5`, md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_adls_file(fs, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
test_that("File upload/download works with MD5 hash",
{
contname <- make_name()
share <- create_file_share(fl, contname)
expect_silent(upload_azure_file(share, "../resources/iris.csv"))
props <- get_storage_properties(share, "iris.csv")
expect_null(props$`content-md5`)
expect_silent(upload_azure_file(share, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
props <- get_storage_properties(share, "iris.csv")
expect_identical(props$`content-md5`, md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_azure_file(share, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
teardown(
{
conts <- list_blob_containers(bl)
lapply(conts, delete_blob_container, confirm=FALSE)
fss <- list_adls_filesystems(ad)
lapply(fss, delete_adls_filesystem, confirm=FALSE)
fls <- list_file_shares(fl)
lapply(fls, delete_file_share, confirm=FALSE)
options(opts)
})
|
/tests/testthat/test13_md5.R
|
permissive
|
Azure/AzureStor
|
R
| false | false | 3,305 |
r
|
context("MD5 hashes")
tenant <- Sys.getenv("AZ_TEST_TENANT_ID")
app <- Sys.getenv("AZ_TEST_APP_ID")
password <- Sys.getenv("AZ_TEST_PASSWORD")
subscription <- Sys.getenv("AZ_TEST_SUBSCRIPTION")
if(tenant == "" || app == "" || password == "" || subscription == "")
skip("Authentication tests skipped: ARM credentials not set")
rgname <- Sys.getenv("AZ_TEST_STORAGE_RG")
storname <- Sys.getenv("AZ_TEST_STORAGE_HNS")
if(rgname == "" || storname == "")
skip("MD5 tests skipped: resource names not set")
sub <- AzureRMR::az_rm$new(tenant=tenant, app=app, password=password)$get_subscription(subscription)
stor <- sub$get_resource_group(rgname)$get_storage_account(storname)
bl <- stor$get_blob_endpoint()
ad <- stor$get_adls_endpoint()
fl <- stor$get_file_endpoint()
opts <- options(azure_storage_progress_bar=FALSE)
test_that("Blob upload/download works with MD5 hash",
{
contname <- make_name()
cont <- create_blob_container(bl, contname)
expect_silent(upload_blob(cont, "../resources/iris.csv"))
lst <- list_blobs(cont, info="all")
expect_true(all(is.na(lst[["Content-MD5"]])))
expect_silent(upload_blob(cont, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
lst <- list_blobs(cont, info="all")
expect_identical(lst[["Content-MD5"]], md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_blob(cont, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
test_that("ADLS upload/download works with MD5 hash",
{
contname <- make_name()
fs <- create_adls_filesystem(ad, contname)
expect_silent(upload_adls_file(fs, "../resources/iris.csv"))
props <- get_storage_properties(fs, "iris.csv")
expect_null(props$`content-md5`)
expect_silent(upload_adls_file(fs, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
props <- get_storage_properties(fs, "iris.csv")
expect_identical(props$`content-md5`, md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_adls_file(fs, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
test_that("File upload/download works with MD5 hash",
{
contname <- make_name()
share <- create_file_share(fl, contname)
expect_silent(upload_azure_file(share, "../resources/iris.csv"))
props <- get_storage_properties(share, "iris.csv")
expect_null(props$`content-md5`)
expect_silent(upload_azure_file(share, "../resources/iris.csv", put_md5=TRUE))
md5 <- encode_md5(file("../resources/iris.csv"))
props <- get_storage_properties(share, "iris.csv")
expect_identical(props$`content-md5`, md5)
dl_file <- file.path(tempdir(), make_name())
expect_silent(download_azure_file(share, "iris.csv", dl_file, check_md5=TRUE))
dl_md5 <- encode_md5(file(dl_file))
expect_identical(md5, dl_md5)
})
teardown(
{
conts <- list_blob_containers(bl)
lapply(conts, delete_blob_container, confirm=FALSE)
fss <- list_adls_filesystems(ad)
lapply(fss, delete_adls_filesystem, confirm=FALSE)
fls <- list_file_shares(fl)
lapply(fls, delete_file_share, confirm=FALSE)
options(opts)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_f.R
\name{survw_integratef}
\alias{survw_integratef}
\title{Integrate function}
\usage{
survw_integratef(t, tau, ascale, bshape)
}
\arguments{
\item{t}{time}
\item{tau}{follow-up}
\item{ascale}{scale parameter for the Weibull distribution}
\item{bshape}{shape parameter for the Weibull distribution}
}
\value{
Variance computation
}
\description{
the function `survw_integratef` is used for the integrations
}
\author{
Marta Bofill Roig
}
\keyword{internal}
|
/man/survw_integratef.Rd
|
no_license
|
MartaBofillRoig/survmixer
|
R
| false | true | 543 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var_f.R
\name{survw_integratef}
\alias{survw_integratef}
\title{Integrate function}
\usage{
survw_integratef(t, tau, ascale, bshape)
}
\arguments{
\item{t}{time}
\item{tau}{follow-up}
\item{ascale}{scale parameter for the Weibull distribution}
\item{bshape}{shape parameter for the Weibull distribution}
}
\value{
Variance computation
}
\description{
the function `survw_integratef` is used for the integrations
}
\author{
Marta Bofill Roig
}
\keyword{internal}
|
#' Create hex colour
#'
#' @param mat matrix of rgb colours defining the palette
#'
#' @export
pal <- function(mat) {
rgb(mat[,1], mat[,2], mat[,3], maxColorValue=255)
}
#' Gretchen Albrecht Colour Palettes
#'
#'A collection of colour palettes inspired by the paintings of NZ abstract expressionist Gretchen Albrecht:
#' rocker
#' black_plain
#' flood_tide
#' last_rays
#' red_sky_golden_cloud
#' winged_spill
#' pink_cloud
#' winter_light
#'
#'@examples
#'
#' # Print out the palettes available
#' gretchenalbrecht_palettes
#'
#' # Make an x-y plot using the rocker palette
#' library(tidyverse)
#' data(diamonds)
#' diamonds_small <- diamonds %>% sample_n(1000)
#' ggplot(diamonds_small,
#' aes(x=carat, price, colour=cut, shape=cut)) +
#' geom_point(size=4, alpha=0.5) +
#' scale_colour_gretchenalbrecht(palette="rocker") +
#' theme_bw() + theme(aspect.ratio=1)
#'
#' # Make a histogram using the pink_cloud palette
#' ggplot(diamonds_small, aes(x=price, fill=cut)) + geom_histogram() +
#' scale_fill_gretchenalbrecht(palette="pink_cloud") + theme_bw()
#'
#' @export
gretchenalbrecht_palettes <- list(
# 1975 acrylic on canvas
rocker = pal(rbind(c(110,3,40), c(53,111,44), c(250,100,0), c(44,32,30), c(2,6,186), c(54,30,54), c(200,71,78))),
black_plain = pal(rbind(c(0,82,50), c(40,30,30), c(164,38,94), c(253,190,0), c(255,240,0), c(160,0,17))),
# 2016 watercolour
flood_tide = pal(rbind(c(45,56,86), c(77,101,186), c(236,229,221), c(250,208,169), c(253,236,130))),
last_rays = pal(rbind(c(60,60,60),c(120,150,180),c(230,230,220),c(250,170,114), c(190,92,85))),
red_sky_golden_cloud = pal(rbind(c(0,40,25), c(68,111,4), c(0,91,205), c(242,200,210), c(255,150,0), c(255,43,0), c(77,50,150))),
winged_spill = pal(rbind(c(90,60,75),c(232,112,75),c(115,100,133),c(253,238,98),c(98,133,138), c(188,68,82))),
pink_cloud = pal(rbind(c(27,27,27),c(130,110,96),c(240,164,0),c(220,220,220),c(179,77,103))),
winter_light = pal(rbind(c(218,212,60),c(242,238,195), c(146,38,31), c(68,33,86)))
)
|
/R/colours.R
|
permissive
|
heike/gretchenalbrecht
|
R
| false | false | 2,019 |
r
|
#' Create hex colour
#'
#' @param mat matrix of rgb colours defining the palette
#'
#' @export
pal <- function(mat) {
rgb(mat[,1], mat[,2], mat[,3], maxColorValue=255)
}
#' Gretchen Albrecht Colour Palettes
#'
#'A collection of colour palettes inspired by the paintings of NZ abstract expressionist Gretchen Albrecht:
#' rocker
#' black_plain
#' flood_tide
#' last_rays
#' red_sky_golden_cloud
#' winged_spill
#' pink_cloud
#' winter_light
#'
#'@examples
#'
#' # Print out the palettes available
#' gretchenalbrecht_palettes
#'
#' # Make an x-y plot using the rocker palette
#' library(tidyverse)
#' data(diamonds)
#' diamonds_small <- diamonds %>% sample_n(1000)
#' ggplot(diamonds_small,
#' aes(x=carat, price, colour=cut, shape=cut)) +
#' geom_point(size=4, alpha=0.5) +
#' scale_colour_gretchenalbrecht(palette="rocker") +
#' theme_bw() + theme(aspect.ratio=1)
#'
#' # Make a histogram using the pink_cloud palette
#' ggplot(diamonds_small, aes(x=price, fill=cut)) + geom_histogram() +
#' scale_fill_gretchenalbrecht(palette="pink_cloud") + theme_bw()
#'
#' @export
gretchenalbrecht_palettes <- list(
# 1975 acrylic on canvas
rocker = pal(rbind(c(110,3,40), c(53,111,44), c(250,100,0), c(44,32,30), c(2,6,186), c(54,30,54), c(200,71,78))),
black_plain = pal(rbind(c(0,82,50), c(40,30,30), c(164,38,94), c(253,190,0), c(255,240,0), c(160,0,17))),
# 2016 watercolour
flood_tide = pal(rbind(c(45,56,86), c(77,101,186), c(236,229,221), c(250,208,169), c(253,236,130))),
last_rays = pal(rbind(c(60,60,60),c(120,150,180),c(230,230,220),c(250,170,114), c(190,92,85))),
red_sky_golden_cloud = pal(rbind(c(0,40,25), c(68,111,4), c(0,91,205), c(242,200,210), c(255,150,0), c(255,43,0), c(77,50,150))),
winged_spill = pal(rbind(c(90,60,75),c(232,112,75),c(115,100,133),c(253,238,98),c(98,133,138), c(188,68,82))),
pink_cloud = pal(rbind(c(27,27,27),c(130,110,96),c(240,164,0),c(220,220,220),c(179,77,103))),
winter_light = pal(rbind(c(218,212,60),c(242,238,195), c(146,38,31), c(68,33,86)))
)
|
#' plots a simple panel for given codes and years
#'
#' Provides two controls - one to select the endpoint and the other
#' to select the year
#'
#' @param id the widget id
#' @param input shiny input
#' @param output shiny output
#' @param ui TRUE for the user interface, FALSE for server
#' @param cx a reactive list, reads data from cx$egData
#'
#' @import ggplot2
#' @export
simpleView=function(id,input=NULL,output=NULL,ui=T,cx=NULL){
require(ggplot2)
ns=NS(id)
if (ui){
fluidPage(
fluidRow(
column(6,
uiOutput(ns("endpoint.choice"))),
column(3,
uiOutput(ns("gender.choice"))),
column(3,
uiOutput(ns("year.choice")))),
fluidRow(
column(12,
plotOutput(ns("map"))))
)
} else {
print("boo")
years=reactive({
selectedCode = input[[ns("endpoint.selection")]]
print(selectedCode)
if (is.null(selectedCode)){
sort(as.character(unique(cx$data$year)),
decreasing=T)
} else {
sort(as.character(unique(subset(cx$data,
code == selectedCode)$year)),
decreasing=T)
}
})
endpoints=reactive({
# need first to remove the sticky $geom slot
w=cx$data
w$geom=NULL
# and now return a named list
x=unique(w[,c("code","name")])
print(x)
out=x$code
names(out)=x$name
out
})
dataView0=reactive({
thisEndpoint=input[[ns("endpoint.selection")]]
thisYear=input[[ns("year.selection")]]
if (any(is.null(c(thisEndpoint,thisYear)))){
NULL
} else {
subset(cx$data, year == thisYear & code == thisEndpoint)
}
})
genders=reactive({
a=dataView0()
if (!is.null(a)){
unique(a$sex)
} else {
NULL
}
})
dataView=reactive({
a=dataView0()
if (!is.null(a)){
subset(a,sex==input[[ns("gender.selection")]])
} else {
NULL
}
})
output[[ns("endpoint.choice")]]=renderUI({
print(endpoints())
selectInput(ns("endpoint.selection"),
label="measure",
choices=endpoints())
})
output[[ns("gender.choice")]]=renderUI({
print(genders())
selectInput(ns("gender.selection"),
label="gender",
choices=genders())
})
output[[ns("year.choice")]]=renderUI({
selectInput(ns("year.selection"),
label="year",
choices=years())
})
output[[ns("map")]]=renderPlot({
a=dataView()
if (is.null(a)){
plot(0,0,axes=F,xlab="",ylab="",type="n")
text(0,0,"wait...")
} else {
ggplot()+
scale_fill_distiller(palette = "Spectral") +
geom_sf(data=world,fill="white") +
geom_sf(data=a,aes(fill=value)) +
xlim(c(-10,50))+ ylim(c(35,70)) +
ggtitle(a[1,"full_name"],
subtitle=a[1,"year"])
}
})
}
}
|
/EuData/R/simpleView.r
|
no_license
|
willeda1/schoolGIS
|
R
| false | false | 3,231 |
r
|
#' plots a simple panel for given codes and years
#'
#' Provides two controls - one to select the endpoint and the other
#' to select the year
#'
#' @param id the widget id
#' @param input shiny input
#' @param output shiny output
#' @param ui TRUE for the user interface, FALSE for server
#' @param cx a reactive list, reads data from cx$egData
#'
#' @import ggplot2
#' @export
simpleView=function(id,input=NULL,output=NULL,ui=T,cx=NULL){
require(ggplot2)
ns=NS(id)
if (ui){
fluidPage(
fluidRow(
column(6,
uiOutput(ns("endpoint.choice"))),
column(3,
uiOutput(ns("gender.choice"))),
column(3,
uiOutput(ns("year.choice")))),
fluidRow(
column(12,
plotOutput(ns("map"))))
)
} else {
print("boo")
years=reactive({
selectedCode = input[[ns("endpoint.selection")]]
print(selectedCode)
if (is.null(selectedCode)){
sort(as.character(unique(cx$data$year)),
decreasing=T)
} else {
sort(as.character(unique(subset(cx$data,
code == selectedCode)$year)),
decreasing=T)
}
})
endpoints=reactive({
# need first to remove the sticky $geom slot
w=cx$data
w$geom=NULL
# and now return a named list
x=unique(w[,c("code","name")])
print(x)
out=x$code
names(out)=x$name
out
})
dataView0=reactive({
thisEndpoint=input[[ns("endpoint.selection")]]
thisYear=input[[ns("year.selection")]]
if (any(is.null(c(thisEndpoint,thisYear)))){
NULL
} else {
subset(cx$data, year == thisYear & code == thisEndpoint)
}
})
genders=reactive({
a=dataView0()
if (!is.null(a)){
unique(a$sex)
} else {
NULL
}
})
dataView=reactive({
a=dataView0()
if (!is.null(a)){
subset(a,sex==input[[ns("gender.selection")]])
} else {
NULL
}
})
output[[ns("endpoint.choice")]]=renderUI({
print(endpoints())
selectInput(ns("endpoint.selection"),
label="measure",
choices=endpoints())
})
output[[ns("gender.choice")]]=renderUI({
print(genders())
selectInput(ns("gender.selection"),
label="gender",
choices=genders())
})
output[[ns("year.choice")]]=renderUI({
selectInput(ns("year.selection"),
label="year",
choices=years())
})
output[[ns("map")]]=renderPlot({
a=dataView()
if (is.null(a)){
plot(0,0,axes=F,xlab="",ylab="",type="n")
text(0,0,"wait...")
} else {
ggplot()+
scale_fill_distiller(palette = "Spectral") +
geom_sf(data=world,fill="white") +
geom_sf(data=a,aes(fill=value)) +
xlim(c(-10,50))+ ylim(c(35,70)) +
ggtitle(a[1,"full_name"],
subtitle=a[1,"year"])
}
})
}
}
|
testlist <- list(b = c(-32768L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
/mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613102550-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 180 |
r
|
testlist <- list(b = c(-32768L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result)
|
= 과거의 유산
아래에서 언급한 변수명, 메소드명, 객체명은 오래된 이름입니다.
사용하면 경고가 나오거나 어느 날 갑자기 사라질지도 모릅니다.
== 과거의 메소드
: String#~
: String#=~
~str은 1.8 이후 삭제되었습니다.또한 str =~ str 은 예외를 발생시킵니다.
: Object#id
1.8에선 경고를 출력합니다.대신 Object#object_id를 사용하세요.
$ ruby-1.8.0 -we 'p Object.new.id'
-e:1: warning: Object#id will be deprecated; use Object#object_id
537752050
: Object#type
1.8에선 경고를 출력합니다.대신 Object#class를 사용하세요.
$ ruby-1.8.0 -we 'p Object.new.type'
-e:1: warning: Object#id will be deprecated; use Object#object_id
Object
: Object#to_a
Object#to_a는 없어질 예정입니다.Kernel#Array를 사용해주세요.
$ ruby-1.8.0 -e 'p Object.new.to_a'
-e:1: warning: default `to_a' will be obsolete
[#<Object:0x401ae3e4>]
$ ruby-1.8.0 -we 'p Array(Object.new)'
[#<Object:0x401ae3d0>]
: FileTest.exists?
삼인칭 단수 현재시제에 s를 붙이지 않는 명명규칙에 반하므로 사용하지 않는 걸 권장합니다.
((<FileTest/FileTest.exist?>)) 을 사용해주세요.
→ ((<rubyist:1194>))
: indexes, indicies (((<Array>)), ((<Hash>)), ((<ENV>)))
((<version 1.7|ruby 1.7 feature>)) 에서, 사용하면
warning: Array#indexes is deprecated; use Array#select
라는 경고를 출력합니다.
* indexの複数形はindexesとindicesの両方があるのが混乱のも
と(両方提供してるけど)
* index(値を指定してそのインデックスを得る)メソッド
とindexes(インデックスを複数指定して対応する値の配列を得
る)は同じ単語なのに意味が逆というのは致命的
ということからだそうです((<ruby-dev:16084>))。
((<ruby-talk:10830>)), ((<ruby-talk:11066>)), ((<ruby-dev:16082>))
などで議論が起こっていました。
警告メッセージにあるように select がその候補になっています。
((<ruby 1.8 feature>)): その後、((<Array/values_at>)) が採用されまし
た((<ruby-dev:20153>))。
: Array#filter
Array#collect!に置き換えられました。
このメソッドを使用すると警告メッセージが出ます。 (1.8 ではこのメソッドはなくなりました。)
: Time.times
((<Process.times|Process>)) に移動しました。 1.8 で (({Time.times})) を使うと警告されます。
$ ruby-1.8.0 -e 'p Time.times'
-e:1: warning: obsolete method Time::times; use Process::times
#<struct Struct::Tms utime=0.0, stime=0.0, cutime=0.0, cstime=0.0>
: iterator? メソッドに付いたブロックは必ずしも繰り返さないので、
ブロック付きメソッドをイテレータと呼ぶのは不適切です。 今後は block_given?を使ってください。
が、イテレータという用語自体は依然使われ続けていますし、
この関数を使っても警告はされません。
: ((<ObjectSpace>)).add_finalizer
: ((<ObjectSpace>)).remove_finalizer
: ((<ObjectSpace>)).call_finalizer
: ((<ObjectSpace>)).finalizers
Ruby 1.8 ではこれらのメソッドを使うと警告されます。 これらのメソッドは以前 final ライブラリで提供されていたメソッド
* ObjectSpace.define_finalizer
* ObjectSpace.undefine_finalizer
が組み込みになったので不要です。 従って、今後は final ライブラリも obsolete です。
== 過去のクラス
: NotImplementError
((<NotImplementedError>))の旧称。 ((<version 1.8|ruby 1.8 feature>)) では既に削除されています。
: MatchingData
((<MatchData>))の旧称
== 過去の組み込み変数、定数
: (({$~})), (({$!})) 等、全般
なくなることはないと思いますが、基本的に使わないのが
最近のスタイルです。少なくとも今後増えることはありません。 無理に使うのをやめる必要はありませんが、代替になる記法が
ある場合はそちらを使うほうがきれいになることが多々あります。 例えば (((<Regexp>)).last_match や ((<Process>)).waitpid2、
rescue => var などです。
ただし (({$=})) (文字列の比較で大文字小文字を無視するか決める)
だけは obsolete であると明言されました (((<ruby-dev:12978>)))。 Ruby 1.8 では警告が出ます。
$ ruby-1.8.0 -e '$= = false'
-e:1: warning: modifying $= is deperecated
Ruby 1.8 では既に文字列のハッシュ値が $= の値に依存しなく
なっています。((<ruby-bugs-ja:PR#61>))
p "foobar".hash
$= = true
p "foobar".hash
# Ruby 1.6.8 の結果
594908901
-24977883
# Ruby 1.8.0 の結果
594908901
594908901
: $defout, $deferr
version 1.8 以降、$stdout, $stderr が代わりに利用されるようになりま
した。version 1.8 では、$stdout, $stderr, $stdin にリダイレクトの効
果はなくなっています。($deferr は version 1.8.0 preview で定義された
変数です)。
$defout, $deferr にオブジェクトを代入すると警告が出力されます。
: TRUE, FALSE, NIL
はるか昔のバージョンの Ruby では true false nil がなかったので
代わりに定数が使われていたのですが、今となっては不要です。 速やかに移行してください。
: VERSION, RELEASE_DATE, PLATFORM
Ruby 1.9 では廃止されました。それぞれ「RUBY_」を前置した
RUBY_VERSION, RUBY_RELEASE_DATE, RUBY_PLATFORM を使ってください。
== その他過去のもの
: 正規表現の //p オプション
1.6 では警告されます。 1.8 では廃止されました。
入れ代わりに m オプションが導入されましたが
これは p オプションとは意味がまったく違います。 p はメタ文字「.」「^」「$」の意味を変えるオプションでした。 m は「.」を改行にマッチするように変えるだけです。
p オプションが廃止されたのは以下の理由からです。
* 定義が複雑である
* //m と 正規表現 \A \Z を使って表現可能である
* p と m を両方提供するにはフラグのビット数が足りない
詳細は ((<ruby-list:22483>)) を参照してください。
|
/target/rubydoc/refm/api/obsolete.rd
|
no_license
|
nacyot/omegat-rurima-ruby
|
R
| false | false | 6,515 |
rd
|
= 과거의 유산
아래에서 언급한 변수명, 메소드명, 객체명은 오래된 이름입니다.
사용하면 경고가 나오거나 어느 날 갑자기 사라질지도 모릅니다.
== 과거의 메소드
: String#~
: String#=~
~str은 1.8 이후 삭제되었습니다.또한 str =~ str 은 예외를 발생시킵니다.
: Object#id
1.8에선 경고를 출력합니다.대신 Object#object_id를 사용하세요.
$ ruby-1.8.0 -we 'p Object.new.id'
-e:1: warning: Object#id will be deprecated; use Object#object_id
537752050
: Object#type
1.8에선 경고를 출력합니다.대신 Object#class를 사용하세요.
$ ruby-1.8.0 -we 'p Object.new.type'
-e:1: warning: Object#id will be deprecated; use Object#object_id
Object
: Object#to_a
Object#to_a는 없어질 예정입니다.Kernel#Array를 사용해주세요.
$ ruby-1.8.0 -e 'p Object.new.to_a'
-e:1: warning: default `to_a' will be obsolete
[#<Object:0x401ae3e4>]
$ ruby-1.8.0 -we 'p Array(Object.new)'
[#<Object:0x401ae3d0>]
: FileTest.exists?
삼인칭 단수 현재시제에 s를 붙이지 않는 명명규칙에 반하므로 사용하지 않는 걸 권장합니다.
((<FileTest/FileTest.exist?>)) 을 사용해주세요.
→ ((<rubyist:1194>))
: indexes, indicies (((<Array>)), ((<Hash>)), ((<ENV>)))
((<version 1.7|ruby 1.7 feature>)) 에서, 사용하면
warning: Array#indexes is deprecated; use Array#select
라는 경고를 출력합니다.
* indexの複数形はindexesとindicesの両方があるのが混乱のも
と(両方提供してるけど)
* index(値を指定してそのインデックスを得る)メソッド
とindexes(インデックスを複数指定して対応する値の配列を得
る)は同じ単語なのに意味が逆というのは致命的
ということからだそうです((<ruby-dev:16084>))。
((<ruby-talk:10830>)), ((<ruby-talk:11066>)), ((<ruby-dev:16082>))
などで議論が起こっていました。
警告メッセージにあるように select がその候補になっています。
((<ruby 1.8 feature>)): その後、((<Array/values_at>)) が採用されまし
た((<ruby-dev:20153>))。
: Array#filter
Array#collect!に置き換えられました。
このメソッドを使用すると警告メッセージが出ます。 (1.8 ではこのメソッドはなくなりました。)
: Time.times
((<Process.times|Process>)) に移動しました。 1.8 で (({Time.times})) を使うと警告されます。
$ ruby-1.8.0 -e 'p Time.times'
-e:1: warning: obsolete method Time::times; use Process::times
#<struct Struct::Tms utime=0.0, stime=0.0, cutime=0.0, cstime=0.0>
: iterator? メソッドに付いたブロックは必ずしも繰り返さないので、
ブロック付きメソッドをイテレータと呼ぶのは不適切です。 今後は block_given?を使ってください。
が、イテレータという用語自体は依然使われ続けていますし、
この関数を使っても警告はされません。
: ((<ObjectSpace>)).add_finalizer
: ((<ObjectSpace>)).remove_finalizer
: ((<ObjectSpace>)).call_finalizer
: ((<ObjectSpace>)).finalizers
Ruby 1.8 ではこれらのメソッドを使うと警告されます。 これらのメソッドは以前 final ライブラリで提供されていたメソッド
* ObjectSpace.define_finalizer
* ObjectSpace.undefine_finalizer
が組み込みになったので不要です。 従って、今後は final ライブラリも obsolete です。
== 過去のクラス
: NotImplementError
((<NotImplementedError>))の旧称。 ((<version 1.8|ruby 1.8 feature>)) では既に削除されています。
: MatchingData
((<MatchData>))の旧称
== 過去の組み込み変数、定数
: (({$~})), (({$!})) 等、全般
なくなることはないと思いますが、基本的に使わないのが
最近のスタイルです。少なくとも今後増えることはありません。 無理に使うのをやめる必要はありませんが、代替になる記法が
ある場合はそちらを使うほうがきれいになることが多々あります。 例えば (((<Regexp>)).last_match や ((<Process>)).waitpid2、
rescue => var などです。
ただし (({$=})) (文字列の比較で大文字小文字を無視するか決める)
だけは obsolete であると明言されました (((<ruby-dev:12978>)))。 Ruby 1.8 では警告が出ます。
$ ruby-1.8.0 -e '$= = false'
-e:1: warning: modifying $= is deperecated
Ruby 1.8 では既に文字列のハッシュ値が $= の値に依存しなく
なっています。((<ruby-bugs-ja:PR#61>))
p "foobar".hash
$= = true
p "foobar".hash
# Ruby 1.6.8 の結果
594908901
-24977883
# Ruby 1.8.0 の結果
594908901
594908901
: $defout, $deferr
version 1.8 以降、$stdout, $stderr が代わりに利用されるようになりま
した。version 1.8 では、$stdout, $stderr, $stdin にリダイレクトの効
果はなくなっています。($deferr は version 1.8.0 preview で定義された
変数です)。
$defout, $deferr にオブジェクトを代入すると警告が出力されます。
: TRUE, FALSE, NIL
はるか昔のバージョンの Ruby では true false nil がなかったので
代わりに定数が使われていたのですが、今となっては不要です。 速やかに移行してください。
: VERSION, RELEASE_DATE, PLATFORM
Ruby 1.9 では廃止されました。それぞれ「RUBY_」を前置した
RUBY_VERSION, RUBY_RELEASE_DATE, RUBY_PLATFORM を使ってください。
== その他過去のもの
: 正規表現の //p オプション
1.6 では警告されます。 1.8 では廃止されました。
入れ代わりに m オプションが導入されましたが
これは p オプションとは意味がまったく違います。 p はメタ文字「.」「^」「$」の意味を変えるオプションでした。 m は「.」を改行にマッチするように変えるだけです。
p オプションが廃止されたのは以下の理由からです。
* 定義が複雑である
* //m と 正規表現 \A \Z を使って表現可能である
* p と m を両方提供するにはフラグのビット数が足りない
詳細は ((<ruby-list:22483>)) を参照してください。
|
options(stringsAsFactors=FALSE)
#--- Command Line Parameters
gmt.file <- commandArgs(TRUE)[1]
cat.name <- commandArgs(TRUE)[2]
set.file <- commandArgs(TRUE)[3]
background.file <- commandArgs(TRUE)[4]
out.file <- commandArgs(TRUE)[5]
#--- Parsing GMT File
flines <- readLines(gmt.file)
pathways <- lapply(flines, function(l){fields <- strsplit(l, "\t")[[1]]; temp=fields[3:length(fields)]; temp[temp!=""]})
names(pathways) <- sapply(flines, function(l){fields <- strsplit(l, "\t")[[1]]; fields[1]})
P <- unique(unlist(pathways))
#--- Background and Selection Files
U <- intersect(read.table(background.file, sep="\t")[,1], P)
S <- intersect(read.table(set.file, sep="\t")[,1], P)
#--- Testing for Enrichment
header <- c("Category", "Term", "Count", "%", "PValue", "Genes",
"List Total", "Pop Hits", "Pop Total", "Fold Enrichment", "Bonferroni", "Benjamini", "FDR")
rv <- data.frame(category=rep(cat.name, length(pathways)),
name=names(pathways),
count.list=rep(NA, length(pathways)),
fraction.list=rep(NA, length(pathways)),
pvalue=rep(NA, length(pathways)),
genes=rep(NA, length(pathways)),
total.list=rep(length(S), length(pathways)),
count.population=rep(NA, length(pathways)),
total.population=rep(length(U), length(pathways)),
fold=rep(NA, length(pathways)),
adj1=rep(NA, length(pathways)),
adj2=rep(NA, length(pathways)),
adj3=rep(NA, length(pathways)))
m <- length(S)
n <- length(U) - length(S)
for (i in 1:length(pathways)) {
R <- pathways[[i]]
q <- length(intersect(S, R)) # number of white balls drawn
k <- length(intersect(U, R))
rv$count.list[i] <- q
rv$fraction.list[i] <- q/m
p1 <- phyper(q-1, m, n, k, lower.tail=FALSE, log.p=FALSE)
p2 <- phyper(q, m, n, k, lower.tail=TRUE, log.p=FALSE)
rv$pvalue[i] <- min(p1, p2)
rv$genes[i] <- paste(sort(intersect(S,R)), collapse=",")
rv$count.population[i] <- k
if(q > 0) {
rv$fold[i] <- (q/m)/(k/(m+n))
} else {
rv$fold[i] <- NA
}
}
rv$adj1 <-p.adjust(rv$pvalue, method="bonferroni")
rv$adj2 <-p.adjust(rv$pvalue, method="BY")
rv$adj3 <-p.adjust(rv$pvalue, method="fdr")
write.table(rv, out.file, sep="\t", quote=FALSE, col.names=header, row.names=FALSE)
|
/run_enrichment_test.R
|
no_license
|
retee/Replication-Timing
|
R
| false | false | 2,372 |
r
|
options(stringsAsFactors=FALSE)
#--- Command Line Parameters
gmt.file <- commandArgs(TRUE)[1]
cat.name <- commandArgs(TRUE)[2]
set.file <- commandArgs(TRUE)[3]
background.file <- commandArgs(TRUE)[4]
out.file <- commandArgs(TRUE)[5]
#--- Parsing GMT File
flines <- readLines(gmt.file)
pathways <- lapply(flines, function(l){fields <- strsplit(l, "\t")[[1]]; temp=fields[3:length(fields)]; temp[temp!=""]})
names(pathways) <- sapply(flines, function(l){fields <- strsplit(l, "\t")[[1]]; fields[1]})
P <- unique(unlist(pathways))
#--- Background and Selection Files
U <- intersect(read.table(background.file, sep="\t")[,1], P)
S <- intersect(read.table(set.file, sep="\t")[,1], P)
#--- Testing for Enrichment
header <- c("Category", "Term", "Count", "%", "PValue", "Genes",
"List Total", "Pop Hits", "Pop Total", "Fold Enrichment", "Bonferroni", "Benjamini", "FDR")
rv <- data.frame(category=rep(cat.name, length(pathways)),
name=names(pathways),
count.list=rep(NA, length(pathways)),
fraction.list=rep(NA, length(pathways)),
pvalue=rep(NA, length(pathways)),
genes=rep(NA, length(pathways)),
total.list=rep(length(S), length(pathways)),
count.population=rep(NA, length(pathways)),
total.population=rep(length(U), length(pathways)),
fold=rep(NA, length(pathways)),
adj1=rep(NA, length(pathways)),
adj2=rep(NA, length(pathways)),
adj3=rep(NA, length(pathways)))
m <- length(S)
n <- length(U) - length(S)
for (i in 1:length(pathways)) {
R <- pathways[[i]]
q <- length(intersect(S, R)) # number of white balls drawn
k <- length(intersect(U, R))
rv$count.list[i] <- q
rv$fraction.list[i] <- q/m
p1 <- phyper(q-1, m, n, k, lower.tail=FALSE, log.p=FALSE)
p2 <- phyper(q, m, n, k, lower.tail=TRUE, log.p=FALSE)
rv$pvalue[i] <- min(p1, p2)
rv$genes[i] <- paste(sort(intersect(S,R)), collapse=",")
rv$count.population[i] <- k
if(q > 0) {
rv$fold[i] <- (q/m)/(k/(m+n))
} else {
rv$fold[i] <- NA
}
}
rv$adj1 <-p.adjust(rv$pvalue, method="bonferroni")
rv$adj2 <-p.adjust(rv$pvalue, method="BY")
rv$adj3 <-p.adjust(rv$pvalue, method="fdr")
write.table(rv, out.file, sep="\t", quote=FALSE, col.names=header, row.names=FALSE)
|
#' @title Plot a legend for a graduated lines map
#' @description This function plots a legend for graduated lines maps.
#'
#' @param pos position of the legend, one of "topleft", "top",
#' "topright", "right", "bottomright", "bottom", "bottomleft",
#' "left", "interactive" or a vector of two coordinates in map units
#' (c(x, y))
#' @param lwd lines widths
#' @param val break labels
#' @param col lines color
#' @param title title of the legend
#' @param title_cex size of the legend title
#' @param val_cex size of the values in the legend
#' @param val_rnd number of decimal places of the values in
#' the legend.
#' @param frame whether to add a frame to the legend (TRUE) or not (FALSE)
#' @param cex size of the legend; 2 means two times bigger
#' @param bg background of the legend
#' @param fg foreground of the legend
#' @keywords internal
#' @export
#' @import graphics
#' @return No return value, a legend is displayed.
#' @examples
#' plot.new()
#' plot.window(xlim = c(0, 1), ylim = c(0, 1), asp = 1)
#' mf_legend_gl(lwd = c(0.2, 2, 4, 5, 10), val = c(1, 2, 3, 4, 10.2, 15.2))
mf_legend_gl <- function(pos = "topleft", val,
col = "tomato4",
lwd,
title = "Legend Title",
title_cex = .8,
val_cex = .6,
val_rnd = 2,
frame = FALSE,
bg,
fg,
cex = 1) {
op <- par(mar = getOption("mapsf.mar"), no.readonly = TRUE)
on.exit(par(op))
# stop if the position is not valid
if (length(pos) == 1) {
if (!pos %in% .gmapsf$positions) {
return(invisible())
}
}
# default values
insetf <- strwidth("MM", units = "user", cex = 1)
inset <- insetf * cex
if (missing(bg)) bg <- getOption("mapsf.bg")
if (missing(fg)) fg <- getOption("mapsf.fg")
w <- inset
h <- inset / 1.5
val <- get_val_rnd(val = val, val_rnd = val_rnd)
val <- rev(val)
lwd <- rev(lwd)
n <- length(val) - 1
pal <- rev(col)
xy_leg <- NULL
while (TRUE) {
if (length(pos) == 2) {
xy_leg <- pos
}
xy_title <- get_xy_title(
x = xy_leg[1],
y = xy_leg[2],
title = title,
title_cex = title_cex
)
xy_box <- get_xy_box_c(
x = xy_title$x,
y = xy_title$y - inset / 2,
n = n,
w = w,
h = h
)
xy_box_lab <- get_xy_box_lab_c(
x = xy_box$xright[n] + inset / 4,
y = xy_box$ytop[1],
h = h,
val = val,
val_cex = val_cex
)
xy_rect <- get_xy_rect2(
xy_title = xy_title,
xy_box = xy_box,
xy_box_lab = xy_box_lab,
inset = inset,
w = w
)
if (!is.null(xy_leg)) {
break
}
xy_leg <- get_pos_leg(
pos = pos,
xy_rect = unlist(xy_rect),
inset = inset,
xy_title = xy_title,
frame = frame
)
}
# Display
if (frame) {
rect(
xleft = xy_rect[[1]] - insetf / 4,
ybottom = xy_rect[[2]] - insetf / 4,
xright = xy_rect[[3]] + insetf / 4,
ytop = xy_rect[[4]] + insetf / 4,
col = bg, border = fg, lwd = .7, xpd = TRUE
)
}
# title
text(xy_title$x,
y = xy_title$y, labels = title, cex = title_cex,
adj = c(0, 0), col = fg
)
# boxes
segments(
x0 = xy_box[[1]],
y0 = xy_box[[2]] + (xy_box[[4]] - xy_box[[2]]) / 2,
x1 = xy_box[[3]],
y1 = xy_box[[2]] + (xy_box[[4]] - xy_box[[2]]) / 2,
col = pal,
lwd = lwd, lend = 1
)
# labels
text(xy_box_lab$x,
y = xy_box_lab$y, labels = val, cex = val_cex,
adj = c(0, 0.5), col = fg
)
return(invisible(NULL))
}
|
/R/mf_leg_gl.R
|
no_license
|
riatelab/mapsf
|
R
| false | false | 3,687 |
r
|
#' @title Plot a legend for a graduated lines map
#' @description This function plots a legend for graduated lines maps.
#'
#' @param pos position of the legend, one of "topleft", "top",
#' "topright", "right", "bottomright", "bottom", "bottomleft",
#' "left", "interactive" or a vector of two coordinates in map units
#' (c(x, y))
#' @param lwd lines widths
#' @param val break labels
#' @param col lines color
#' @param title title of the legend
#' @param title_cex size of the legend title
#' @param val_cex size of the values in the legend
#' @param val_rnd number of decimal places of the values in
#' the legend.
#' @param frame whether to add a frame to the legend (TRUE) or not (FALSE)
#' @param cex size of the legend; 2 means two times bigger
#' @param bg background of the legend
#' @param fg foreground of the legend
#' @keywords internal
#' @export
#' @import graphics
#' @return No return value, a legend is displayed.
#' @examples
#' plot.new()
#' plot.window(xlim = c(0, 1), ylim = c(0, 1), asp = 1)
#' mf_legend_gl(lwd = c(0.2, 2, 4, 5, 10), val = c(1, 2, 3, 4, 10.2, 15.2))
mf_legend_gl <- function(pos = "topleft", val,
col = "tomato4",
lwd,
title = "Legend Title",
title_cex = .8,
val_cex = .6,
val_rnd = 2,
frame = FALSE,
bg,
fg,
cex = 1) {
op <- par(mar = getOption("mapsf.mar"), no.readonly = TRUE)
on.exit(par(op))
# stop if the position is not valid
if (length(pos) == 1) {
if (!pos %in% .gmapsf$positions) {
return(invisible())
}
}
# default values
insetf <- strwidth("MM", units = "user", cex = 1)
inset <- insetf * cex
if (missing(bg)) bg <- getOption("mapsf.bg")
if (missing(fg)) fg <- getOption("mapsf.fg")
w <- inset
h <- inset / 1.5
val <- get_val_rnd(val = val, val_rnd = val_rnd)
val <- rev(val)
lwd <- rev(lwd)
n <- length(val) - 1
pal <- rev(col)
xy_leg <- NULL
while (TRUE) {
if (length(pos) == 2) {
xy_leg <- pos
}
xy_title <- get_xy_title(
x = xy_leg[1],
y = xy_leg[2],
title = title,
title_cex = title_cex
)
xy_box <- get_xy_box_c(
x = xy_title$x,
y = xy_title$y - inset / 2,
n = n,
w = w,
h = h
)
xy_box_lab <- get_xy_box_lab_c(
x = xy_box$xright[n] + inset / 4,
y = xy_box$ytop[1],
h = h,
val = val,
val_cex = val_cex
)
xy_rect <- get_xy_rect2(
xy_title = xy_title,
xy_box = xy_box,
xy_box_lab = xy_box_lab,
inset = inset,
w = w
)
if (!is.null(xy_leg)) {
break
}
xy_leg <- get_pos_leg(
pos = pos,
xy_rect = unlist(xy_rect),
inset = inset,
xy_title = xy_title,
frame = frame
)
}
# Display
if (frame) {
rect(
xleft = xy_rect[[1]] - insetf / 4,
ybottom = xy_rect[[2]] - insetf / 4,
xright = xy_rect[[3]] + insetf / 4,
ytop = xy_rect[[4]] + insetf / 4,
col = bg, border = fg, lwd = .7, xpd = TRUE
)
}
# title
text(xy_title$x,
y = xy_title$y, labels = title, cex = title_cex,
adj = c(0, 0), col = fg
)
# boxes
segments(
x0 = xy_box[[1]],
y0 = xy_box[[2]] + (xy_box[[4]] - xy_box[[2]]) / 2,
x1 = xy_box[[3]],
y1 = xy_box[[2]] + (xy_box[[4]] - xy_box[[2]]) / 2,
col = pal,
lwd = lwd, lend = 1
)
# labels
text(xy_box_lab$x,
y = xy_box_lab$y, labels = val, cex = val_cex,
adj = c(0, 0.5), col = fg
)
return(invisible(NULL))
}
|
##### Script to make stacked barcharts of Admixture output and save dataframe used as .rda file
### Clear workspace and past outputs, set working directory
rm(list = ls())
setwd('~/cmeecoursework/project/data/')
unlink(c("../results/admix*.pdf", "../data/analysis/*admix*"))
### Import packages
library(ggplot2) #need to install - tidyverse
library(RColorBrewer)
library(forcats)
library(tidyr)
library(dplyr,warn.conflicts=F)
library(grid)
library(gridExtra,warn.conflicts=F)
library(cowplot) #needed installing
library(qwraps2) #needed installing
################################### Setup ######################################
### Import and label Admixture output table
admix_output <- read.table('admixture/output/HGDP_1000g_regen_no_AT_CG_3pop_geno05_shapeit4_allchr_pruned.3.Q')
colnames(admix_output) <- c("Native", "African", "European")
### Import and label sample info table
sample_info <- read.table('sample_maps/ind_3pop_popgroup.txt')
colnames(sample_info) <- c("ID", "Subpop", "Pop")
### Combine tables, swapping pop and subpop columns
data <- cbind(sample_info[,c(1,3,2)], admix_output)
### Save dataframe for use by other scripts
saveRDS(data, paste0("../data/analysis/admixture_output_full.rds"))
### Sets ancestry colour palette for diagrams
anc_palette <- brewer.pal(3,"Set2")
########################## Creating stacked barplots ###########################
##### Creating a stacked barchart showing anc distribution of each subpop
#need to find average for each subpop of each anc, then pivot longer
### Wrangle data to find each subpop's average ancestry, and convert long format
stacked <- data %>%
group_by(Pop, Subpop) %>%
# Find averages of each ancstry for each subpop
summarize(.groups="keep", Native = mean(Native, na.rm=TRUE),
African = mean(African, na.rm=TRUE),
European = mean(European, na.rm=TRUE)) %>%
# Arange by each Pop and then by African Ancestry, then pivots for plotting
arrange(desc(Pop), African) %>%
pivot_longer(!c(Subpop,Pop), names_to="Ancestry", values_to="Proportion")
### Plot chart
p <- ggplot(data=stacked, aes(fill=Ancestry, y=Proportion,
x=fct_inorder(Subpop))) +#OrderKept
geom_bar(position="fill", stat="identity", width=1) + theme_bw() + #stacks
theme(axis.text.x = element_text(angle=90, hjust=1, vjust=.5),
axis.title = element_text(face="bold"),
axis.title.x = element_text(vjust=5),
legend.title = element_text(size=10, face="bold.italic"),
legend.text = element_text(size=9, face="italic"),
legend.key.size = unit(.65,"line"),
legend.position = "top") +
labs(x="Population", y="Proportion of Ancestry") +
scale_y_continuous(expand = c(0,0), limits = c(-0.003,1.003)) + #no gaps
scale_fill_manual(values = anc_palette)
### Saves as pdf file
pdf("../results/admixture_subpop_barplot.pdf", 6, 5)
print(p); graphics.off()
##### Creating multiple stacked barcharts for each ADM subpop, showing ancestry proportion of each individual in said population
stackplot <- function(nSubpop){
### Function to create a stacked barplot from a subpop number (from stacked)
subpop <- stacked[3*nSubpop,2] # get subpop name
n <- dim(subset(data, Subpop == as.character(subpop)))[1] # get sample size
samples <- subset(data, Subpop == as.character(subpop)) %>% #subset
### Arange by African Ancestry, then pivots for plotting
arrange(African, European) %>%
pivot_longer(!c(Subpop,Pop,ID), names_to="Ancestry", values_to="Prop") %>%
### Plot chart
ggplot(aes(fill=Ancestry, y=Prop, x=fct_inorder(ID))) +
geom_bar(position="fill", stat="identity", width=1) +
theme_bw() + ggtitle(subpop) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size=6, margin=margin(t=1, b=1)),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_line(colour = "black", size = .2),
axis.ticks.length = unit(1.5, "pt"), #length of tick marks
axis.ticks.x = element_blank(),
legend.position = "none",
plot.title = element_text(hjust=0.5, vjust=-1.8,
face="plain", size=8),
plot.margin = unit(c(0, 0, .8, 0), "pt")) +
labs(x="Population Individuals", y="Proportion of Ancestry") +
geom_text(label=paste0("n=",n), x=n*.5,y=.04, size=2, fontface="plain") +
scale_y_continuous(expand = c(0,0), limits = c(0,1)) +
scale_fill_manual(values = anc_palette)
return(samples) #probs want to return instead for multiplot
}
### Create Legend
legend <- get_legend(
stackplot(28) +
guides(color = guide_legend(nrow = 1)) +
#scale_x_discrete(limits=c("2", "0.5", "1")) +
theme(legend.position = "top",
legend.key.size = unit(0.3, "cm"),
legend.title=element_text(size=7, face="bold.italic"),
plot.margin = margin(0, 0, 10, 0),
legend.text=element_text(size=6, face="italic")))
### Creates stacked bar multiplot
plot <- cowplot::plot_grid(
stackplot(28),
stackplot(29) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(30) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(31),
stackplot(32) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(33) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
ncol=3,
labels = "AUTO",
label_size = 10,
axis=c("b"),
align = "hv",
label_x = .068,
label_y = .99)
### Common y and x labels
y.grob <- textGrob("Proportion of Ancestry",
gp=gpar(fontface="bold", col="black", fontsize=8), rot=90)
x.grob <- textGrob("Population Individuals",
gp=gpar(fontface="bold", col="black", fontsize=8))
### Combine plots, legend and axis labels, prints ou to pdf
pdf("../results/admixture_sample_barplots.pdf", 6, 4)
grid.arrange(legend, arrangeGrob(plot, left=y.grob, bottom=x.grob),
heights=c(.1, 2))
graphics.off()
####################### Creating comparative boxplots ##########################
anc_boxplot <- function(pop_num){
### Function to plot jittered comparative boxplot by subpop for argued pop number (corresponding to pops vecotr below)
q <- ggplot(adm, aes_string(x="fct_inorder(Subpop)", y=pops[pop_num])) +
labs(y=paste("Proportion",pops[pop_num])) + ylim(0, 1) +
geom_boxplot(outlier.shape=NA) + #avoid plotting outliers twice
geom_jitter(position=position_jitter(width=.2, height=0),
colour=anc_palette[pop_num], size=.5) +
stat_boxplot(geom ='errorbar') + theme_bw() +
# top whisker goes to last value within 1.5x the interquartile range &vv
theme(axis.title.x = element_blank(),
axis.title.y = element_text(face="bold", size=14),
axis.text = element_text(size=11))
for (i in 1:6) {
q <- q + geom_text(x=i, y=-0.026, label = table[i,pop_num+1], size=3.5, fontface="plain")}
return(q)
}
MeanSD3 <- function(vector, fun=round, num=2){
### Converts vector into its mean ± SD to 3 decimal places each
mean <- fun(mean(vector), num)
sd <- fun( sd(vector), num)
return(paste0(mean, " ± ", sd))
}
### Names the 3 pops for use in function above, and the 6 subpops
pops <- c("African", "European", "Native")
subpops <- c("ACB", "ASW","PUR", "CLM", "MXL", "PEL")
### Subsets and reorders data so subpops are plotted by African/Native ancestry
adm <- data.frame()
for (subpop in subpops){
adm <- rbind(adm, subset(data, Subpop==subpop))
}
### table in same layout as boxplots, giving mean+-SD for each box
table <- adm %>%
group_by(Subpop) %>%
summarize(.groups="keep",
Native = MeanSD3(Native),
African = MeanSD3(African),
European = MeanSD3(European)) %>% as.data.frame()
table <- table[match(subpops, table$Subpop),] #reorders rows
table <- table[,c(1, 3:4, 2)] #reorders cols
### Lays out multiplot
plot <- cowplot::plot_grid(
anc_boxplot(1) + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()),
anc_boxplot(2) + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()),
anc_boxplot(3),
ncol=1,
labels = "AUTO",
label_size = 14,
axis=c("b"),
align = "hv",
label_x = .105,
label_y = 0.985)
### Common x label
x.grob <- textGrob("Admixed Population",
gp=gpar(fontface="bold", col="black", fontsize=14))
### Combine plot and axis label, prints out to pdf
pdf("../results/admixture_boxplots.pdf", 6, 15)
grid.arrange(arrangeGrob(plot, bottom=x.grob))
graphics.off()
################################ Wilcoxin plots ################################
### Function to make cell entries have fancu scientific notation
expSup <- function(w, digits=0) { #was %d but didnt work with 0s; g sorta does
sprintf(paste0("%.", digits, "f*x*10^%g"), w/10^floor(log10(abs(w))), floor(log10(abs(w))))
}
### Function to plot the heatmap
pvalue_heatmap <- function(p_df, nudge_x, pop=NULL){
options(warn = -1)
p <- ggplot(p_df, aes(fct_inorder(y), fct_inorder(x))) +
geom_tile(aes(fill=p)) + theme_bw() + #ggtitle(pop) +
geom_text(label=parse(text=expSup(p_df$p, digits=3)), size=3) + ##
geom_text(aes(label=replace(rep("<", nrow(p_df)), p_df[,1]>1e-8, "")), nudge_x=nudge_x, size=3) +
scale_fill_gradientn( name ="p-value",
colours=c(pal[10], pal[9], pal[7], pal[5], pal[3], pal[2], pal[1]),
values =c(0, 0.01, 0.05, 0.051, 0.1, 0.5, 1),
limits=c(0,1), breaks=c(0.01, 0.05, 0.25, 0.5, 0.75),
guide=guide_colourbar(nbin=100, draw.ulim=FALSE, draw.llim=TRUE)) +
theme(legend.key.width=unit(2.5, 'cm'), legend.position="bottom",
legend.text = element_text(angle = 45, vjust=1.3, hjust=1),
legend.title = element_text(vjust = .9, face="bold"),
axis.title = element_blank(),
axis.text = element_text(size=11),
plot.title = element_text(hjust = 0.5)) +
scale_y_discrete(position = "right")
if (length(pop) > 0){
p <- p + theme(legend.position = "none")
if (pop == "African Ancestry"){
p <- p + theme(plot.margin=unit(c(-1,0,3.9,0),"cm"))
}else if (pop == "European Ancestry"){
p <- p + theme(plot.margin=unit(c(-.6,0,3.2,0),"cm"))
}else{ p <- p + theme(plot.margin=unit(c(-.2,0,2.7,0),"cm"))}}
return(p)
options(warn = getOption("warn"))
}
### Set palette for plotting
pal <- brewer.pal(n=11, name="RdYlBu") #formerly "RdYlGn", chaned for colorblind
### Create template df to fill with various data comparing subpopulations
combs <- combn(subpops, 2)
combs <- split(combs, rep(1:ncol(combs), each = nrow(combs)))
pvalues <- rep(0, length(combs))
template_p_df <- cbind.data.frame(pvalues, t(as.data.frame(combs)))
colnames(template_p_df) <- c("p", "x", "y")
### Generate legend
p_df <- template_p_df
p_df[,1] <- 1:15
p_legend <- get_legend(pvalue_heatmap(p_df, -0.22))
### AMI Anc plot
for (pop in pops){
p_df <- template_p_df
adm_subset <- cbind.data.frame(adm$Subpop, adm[[pop]])
for (comb in 1:length(combs)){
p_df[comb,1] <- signif(as.numeric(wilcox.test(
adm_subset[adm_subset[,1] == combs[[comb]][1],][,2],
adm_subset[adm_subset[,1] == combs[[comb]][2],][,2],
alternative = "two.sided")[3]) ,2) }
p_df[p_df < 1e-8] <- signif(1e-8,2)
assign(pop, pvalue_heatmap(p_df, -0.22, paste(pop, "Ancestry"))) }
### Lays out multiplot
plot <- cowplot::plot_grid( African, European, Native, ncol=1)
### Arrange plot, legend and axis titles, saves to png
ggsave(file="../results/ADMIXTURE_subpop_comp_by_anc_heatmap.png",
grid.arrange(arrangeGrob(p_legend, plot, heights=c(.2,2))),
width=6, height=17.5, units="in")
print("Finished plotting subpop by ADMIXTURE anc Wilcoxin heatmap, starting ancestry by subpop...")
|
/project/code/admixture_analysis.R
|
no_license
|
Bennouhan/cmeecoursework
|
R
| false | false | 12,428 |
r
|
##### Script to make stacked barcharts of Admixture output and save dataframe used as .rda file
### Clear workspace and past outputs, set working directory
rm(list = ls())
setwd('~/cmeecoursework/project/data/')
unlink(c("../results/admix*.pdf", "../data/analysis/*admix*"))
### Import packages
library(ggplot2) #need to install - tidyverse
library(RColorBrewer)
library(forcats)
library(tidyr)
library(dplyr,warn.conflicts=F)
library(grid)
library(gridExtra,warn.conflicts=F)
library(cowplot) #needed installing
library(qwraps2) #needed installing
################################### Setup ######################################
### Import and label Admixture output table
admix_output <- read.table('admixture/output/HGDP_1000g_regen_no_AT_CG_3pop_geno05_shapeit4_allchr_pruned.3.Q')
colnames(admix_output) <- c("Native", "African", "European")
### Import and label sample info table
sample_info <- read.table('sample_maps/ind_3pop_popgroup.txt')
colnames(sample_info) <- c("ID", "Subpop", "Pop")
### Combine tables, swapping pop and subpop columns
data <- cbind(sample_info[,c(1,3,2)], admix_output)
### Save dataframe for use by other scripts
saveRDS(data, paste0("../data/analysis/admixture_output_full.rds"))
### Sets ancestry colour palette for diagrams
anc_palette <- brewer.pal(3,"Set2")
########################## Creating stacked barplots ###########################
##### Creating a stacked barchart showing anc distribution of each subpop
#need to find average for each subpop of each anc, then pivot longer
### Wrangle data to find each subpop's average ancestry, and convert long format
stacked <- data %>%
group_by(Pop, Subpop) %>%
# Find averages of each ancstry for each subpop
summarize(.groups="keep", Native = mean(Native, na.rm=TRUE),
African = mean(African, na.rm=TRUE),
European = mean(European, na.rm=TRUE)) %>%
# Arange by each Pop and then by African Ancestry, then pivots for plotting
arrange(desc(Pop), African) %>%
pivot_longer(!c(Subpop,Pop), names_to="Ancestry", values_to="Proportion")
### Plot chart
p <- ggplot(data=stacked, aes(fill=Ancestry, y=Proportion,
x=fct_inorder(Subpop))) +#OrderKept
geom_bar(position="fill", stat="identity", width=1) + theme_bw() + #stacks
theme(axis.text.x = element_text(angle=90, hjust=1, vjust=.5),
axis.title = element_text(face="bold"),
axis.title.x = element_text(vjust=5),
legend.title = element_text(size=10, face="bold.italic"),
legend.text = element_text(size=9, face="italic"),
legend.key.size = unit(.65,"line"),
legend.position = "top") +
labs(x="Population", y="Proportion of Ancestry") +
scale_y_continuous(expand = c(0,0), limits = c(-0.003,1.003)) + #no gaps
scale_fill_manual(values = anc_palette)
### Saves as pdf file
pdf("../results/admixture_subpop_barplot.pdf", 6, 5)
print(p); graphics.off()
##### Creating multiple stacked barcharts for each ADM subpop, showing ancestry proportion of each individual in said population
stackplot <- function(nSubpop){
### Function to create a stacked barplot from a subpop number (from stacked)
subpop <- stacked[3*nSubpop,2] # get subpop name
n <- dim(subset(data, Subpop == as.character(subpop)))[1] # get sample size
samples <- subset(data, Subpop == as.character(subpop)) %>% #subset
### Arange by African Ancestry, then pivots for plotting
arrange(African, European) %>%
pivot_longer(!c(Subpop,Pop,ID), names_to="Ancestry", values_to="Prop") %>%
### Plot chart
ggplot(aes(fill=Ancestry, y=Prop, x=fct_inorder(ID))) +
geom_bar(position="fill", stat="identity", width=1) +
theme_bw() + ggtitle(subpop) +
theme(axis.text.x = element_blank(),
axis.text.y = element_text(size=6, margin=margin(t=1, b=1)),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks = element_line(colour = "black", size = .2),
axis.ticks.length = unit(1.5, "pt"), #length of tick marks
axis.ticks.x = element_blank(),
legend.position = "none",
plot.title = element_text(hjust=0.5, vjust=-1.8,
face="plain", size=8),
plot.margin = unit(c(0, 0, .8, 0), "pt")) +
labs(x="Population Individuals", y="Proportion of Ancestry") +
geom_text(label=paste0("n=",n), x=n*.5,y=.04, size=2, fontface="plain") +
scale_y_continuous(expand = c(0,0), limits = c(0,1)) +
scale_fill_manual(values = anc_palette)
return(samples) #probs want to return instead for multiplot
}
### Create Legend
legend <- get_legend(
stackplot(28) +
guides(color = guide_legend(nrow = 1)) +
#scale_x_discrete(limits=c("2", "0.5", "1")) +
theme(legend.position = "top",
legend.key.size = unit(0.3, "cm"),
legend.title=element_text(size=7, face="bold.italic"),
plot.margin = margin(0, 0, 10, 0),
legend.text=element_text(size=6, face="italic")))
### Creates stacked bar multiplot
plot <- cowplot::plot_grid(
stackplot(28),
stackplot(29) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(30) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(31),
stackplot(32) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
stackplot(33) + theme(axis.text.y=element_blank(),
axis.ticks=element_blank()),
ncol=3,
labels = "AUTO",
label_size = 10,
axis=c("b"),
align = "hv",
label_x = .068,
label_y = .99)
### Common y and x labels
y.grob <- textGrob("Proportion of Ancestry",
gp=gpar(fontface="bold", col="black", fontsize=8), rot=90)
x.grob <- textGrob("Population Individuals",
gp=gpar(fontface="bold", col="black", fontsize=8))
### Combine plots, legend and axis labels, prints ou to pdf
pdf("../results/admixture_sample_barplots.pdf", 6, 4)
grid.arrange(legend, arrangeGrob(plot, left=y.grob, bottom=x.grob),
heights=c(.1, 2))
graphics.off()
####################### Creating comparative boxplots ##########################
anc_boxplot <- function(pop_num){
### Function to plot jittered comparative boxplot by subpop for argued pop number (corresponding to pops vecotr below)
q <- ggplot(adm, aes_string(x="fct_inorder(Subpop)", y=pops[pop_num])) +
labs(y=paste("Proportion",pops[pop_num])) + ylim(0, 1) +
geom_boxplot(outlier.shape=NA) + #avoid plotting outliers twice
geom_jitter(position=position_jitter(width=.2, height=0),
colour=anc_palette[pop_num], size=.5) +
stat_boxplot(geom ='errorbar') + theme_bw() +
# top whisker goes to last value within 1.5x the interquartile range &vv
theme(axis.title.x = element_blank(),
axis.title.y = element_text(face="bold", size=14),
axis.text = element_text(size=11))
for (i in 1:6) {
q <- q + geom_text(x=i, y=-0.026, label = table[i,pop_num+1], size=3.5, fontface="plain")}
return(q)
}
MeanSD3 <- function(vector, fun=round, num=2){
### Converts vector into its mean ± SD to 3 decimal places each
mean <- fun(mean(vector), num)
sd <- fun( sd(vector), num)
return(paste0(mean, " ± ", sd))
}
### Names the 3 pops for use in function above, and the 6 subpops
pops <- c("African", "European", "Native")
subpops <- c("ACB", "ASW","PUR", "CLM", "MXL", "PEL")
### Subsets and reorders data so subpops are plotted by African/Native ancestry
adm <- data.frame()
for (subpop in subpops){
adm <- rbind(adm, subset(data, Subpop==subpop))
}
### table in same layout as boxplots, giving mean+-SD for each box
table <- adm %>%
group_by(Subpop) %>%
summarize(.groups="keep",
Native = MeanSD3(Native),
African = MeanSD3(African),
European = MeanSD3(European)) %>% as.data.frame()
table <- table[match(subpops, table$Subpop),] #reorders rows
table <- table[,c(1, 3:4, 2)] #reorders cols
### Lays out multiplot
plot <- cowplot::plot_grid(
anc_boxplot(1) + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()),
anc_boxplot(2) + theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank()),
anc_boxplot(3),
ncol=1,
labels = "AUTO",
label_size = 14,
axis=c("b"),
align = "hv",
label_x = .105,
label_y = 0.985)
### Common x label
x.grob <- textGrob("Admixed Population",
gp=gpar(fontface="bold", col="black", fontsize=14))
### Combine plot and axis label, prints out to pdf
pdf("../results/admixture_boxplots.pdf", 6, 15)
grid.arrange(arrangeGrob(plot, bottom=x.grob))
graphics.off()
################################ Wilcoxin plots ################################
### Function to make cell entries have fancu scientific notation
expSup <- function(w, digits=0) { #was %d but didnt work with 0s; g sorta does
sprintf(paste0("%.", digits, "f*x*10^%g"), w/10^floor(log10(abs(w))), floor(log10(abs(w))))
}
### Function to plot the heatmap
pvalue_heatmap <- function(p_df, nudge_x, pop=NULL){
options(warn = -1)
p <- ggplot(p_df, aes(fct_inorder(y), fct_inorder(x))) +
geom_tile(aes(fill=p)) + theme_bw() + #ggtitle(pop) +
geom_text(label=parse(text=expSup(p_df$p, digits=3)), size=3) + ##
geom_text(aes(label=replace(rep("<", nrow(p_df)), p_df[,1]>1e-8, "")), nudge_x=nudge_x, size=3) +
scale_fill_gradientn( name ="p-value",
colours=c(pal[10], pal[9], pal[7], pal[5], pal[3], pal[2], pal[1]),
values =c(0, 0.01, 0.05, 0.051, 0.1, 0.5, 1),
limits=c(0,1), breaks=c(0.01, 0.05, 0.25, 0.5, 0.75),
guide=guide_colourbar(nbin=100, draw.ulim=FALSE, draw.llim=TRUE)) +
theme(legend.key.width=unit(2.5, 'cm'), legend.position="bottom",
legend.text = element_text(angle = 45, vjust=1.3, hjust=1),
legend.title = element_text(vjust = .9, face="bold"),
axis.title = element_blank(),
axis.text = element_text(size=11),
plot.title = element_text(hjust = 0.5)) +
scale_y_discrete(position = "right")
if (length(pop) > 0){
p <- p + theme(legend.position = "none")
if (pop == "African Ancestry"){
p <- p + theme(plot.margin=unit(c(-1,0,3.9,0),"cm"))
}else if (pop == "European Ancestry"){
p <- p + theme(plot.margin=unit(c(-.6,0,3.2,0),"cm"))
}else{ p <- p + theme(plot.margin=unit(c(-.2,0,2.7,0),"cm"))}}
return(p)
options(warn = getOption("warn"))
}
### Set palette for plotting
pal <- brewer.pal(n=11, name="RdYlBu") #formerly "RdYlGn", chaned for colorblind
### Create template df to fill with various data comparing subpopulations
combs <- combn(subpops, 2)
combs <- split(combs, rep(1:ncol(combs), each = nrow(combs)))
pvalues <- rep(0, length(combs))
template_p_df <- cbind.data.frame(pvalues, t(as.data.frame(combs)))
colnames(template_p_df) <- c("p", "x", "y")
### Generate legend
p_df <- template_p_df
p_df[,1] <- 1:15
p_legend <- get_legend(pvalue_heatmap(p_df, -0.22))
### AMI Anc plot
for (pop in pops){
p_df <- template_p_df
adm_subset <- cbind.data.frame(adm$Subpop, adm[[pop]])
for (comb in 1:length(combs)){
p_df[comb,1] <- signif(as.numeric(wilcox.test(
adm_subset[adm_subset[,1] == combs[[comb]][1],][,2],
adm_subset[adm_subset[,1] == combs[[comb]][2],][,2],
alternative = "two.sided")[3]) ,2) }
p_df[p_df < 1e-8] <- signif(1e-8,2)
assign(pop, pvalue_heatmap(p_df, -0.22, paste(pop, "Ancestry"))) }
### Lays out multiplot
plot <- cowplot::plot_grid( African, European, Native, ncol=1)
### Arrange plot, legend and axis titles, saves to png
ggsave(file="../results/ADMIXTURE_subpop_comp_by_anc_heatmap.png",
grid.arrange(arrangeGrob(p_legend, plot, heights=c(.2,2))),
width=6, height=17.5, units="in")
print("Finished plotting subpop by ADMIXTURE anc Wilcoxin heatmap, starting ancestry by subpop...")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Territory_identity.R
\name{extractClusterMarkers}
\alias{extractClusterMarkers}
\title{extractClusterMarkers compute differential gene expression between clusters
and remaning barcodes}
\usage{
extractClusterMarkers(
cluster,
counts,
method = "wilcox",
logFC = 0.25,
pval = 0.05,
minPct = 0.05,
minBar = 10,
verbose = TRUE
)
}
\arguments{
\item{cluster}{a Seurat object containing clusters of an isolated territory}
\item{counts}{seurat object containing counts. Alternatively, matrix or
sparse matrix. Colnames as barcodes and rownames as genes.}
\item{method}{character describing the statistical test to use in order to
extract differential gene expression (currently only wilcox and t.test)}
\item{logFC}{numeric describing minimum log fold change value for
differential gene expression. Default set at 0.25.}
\item{pval}{numeric for pval threshold. Default set at 0.05}
\item{minPct}{numeric defining the minimum percentage of cells that should
contain any given gene.}
\item{minBar}{integer defining minimum number of barcodes in a territory.}
\item{verbose}{logical - progress message output}
}
\value{
A data.frame (tibble) containing differential gene expression as well
p.value,
logFC,
seedPct (percentage of cells containing gene in first group),
queryPct (percentage of cells containing gene in second group),
seedTerritory (territory used as group 1)
queryTerritory (territory used as group 2)
}
\description{
extractClusterMarkers compute differential gene expression between clusters
and remaning barcodes
}
\details{
extractClusterMarkers compares clusters to all remaining barcodes.
If a territory is isolated (see \code{extractTerritories}) for further
analysis, Seurat can provide a cluster analysis of the isolated territory.
Seurat will compared clusters between each other within the isolated
territory. However, in some cases, it can be useful to compared the barcodes
present in a Seurat cluster to all remaining barcodes. This provides overall
differences in expression within the cluster as opposed to differential
expression between isolated clusters.
}
\examples{
\dontrun{
data("Vesalius")
}
}
|
/man/extractClusterMarkers.Rd
|
no_license
|
neocaleb/Vesalius
|
R
| false | true | 2,226 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Territory_identity.R
\name{extractClusterMarkers}
\alias{extractClusterMarkers}
\title{extractClusterMarkers compute differential gene expression between clusters
and remaning barcodes}
\usage{
extractClusterMarkers(
cluster,
counts,
method = "wilcox",
logFC = 0.25,
pval = 0.05,
minPct = 0.05,
minBar = 10,
verbose = TRUE
)
}
\arguments{
\item{cluster}{a Seurat object containing clusters of an isolated territory}
\item{counts}{seurat object containing counts. Alternatively, matrix or
sparse matrix. Colnames as barcodes and rownames as genes.}
\item{method}{character describing the statistical test to use in order to
extract differential gene expression (currently only wilcox and t.test)}
\item{logFC}{numeric describing minimum log fold change value for
differential gene expression. Default set at 0.25.}
\item{pval}{numeric for pval threshold. Default set at 0.05}
\item{minPct}{numeric defining the minimum percentage of cells that should
contain any given gene.}
\item{minBar}{integer defining minimum number of barcodes in a territory.}
\item{verbose}{logical - progress message output}
}
\value{
A data.frame (tibble) containing differential gene expression as well
p.value,
logFC,
seedPct (percentage of cells containing gene in first group),
queryPct (percentage of cells containing gene in second group),
seedTerritory (territory used as group 1)
queryTerritory (territory used as group 2)
}
\description{
extractClusterMarkers compute differential gene expression between clusters
and remaning barcodes
}
\details{
extractClusterMarkers compares clusters to all remaining barcodes.
If a territory is isolated (see \code{extractTerritories}) for further
analysis, Seurat can provide a cluster analysis of the isolated territory.
Seurat will compared clusters between each other within the isolated
territory. However, in some cases, it can be useful to compared the barcodes
present in a Seurat cluster to all remaining barcodes. This provides overall
differences in expression within the cluster as opposed to differential
expression between isolated clusters.
}
\examples{
\dontrun{
data("Vesalius")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingmodelplots.R
\name{lift}
\alias{lift}
\title{Lift plot}
\usage{
lift(plot_input = eval_t_type)
}
\arguments{
\item{plot_input}{Dataframe.}
\item{targetcat}{String.}
}
\value{
Lift plot.
}
\description{
Lift plot
}
\examples{
add(1, 1)
add(10, 10)
}
|
/man/lift.Rd
|
no_license
|
pbmarcus/modelplotr
|
R
| false | true | 336 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingmodelplots.R
\name{lift}
\alias{lift}
\title{Lift plot}
\usage{
lift(plot_input = eval_t_type)
}
\arguments{
\item{plot_input}{Dataframe.}
\item{targetcat}{String.}
}
\value{
Lift plot.
}
\description{
Lift plot
}
\examples{
add(1, 1)
add(10, 10)
}
|
rm(list=ls())
### In the note below we perform the following calculation
### We derive bounds on the observable distribution under the IV model
require("rcdd")
p <- 2 ## dimension of Z
# X and Y are binary
vmat <- matrix(0,nrow=4*(2^p-1),ncol=4*p) ## Num rows = num of extreme points
## See this as number of ways to
## Partition X(z)'s into those getting 1 and
## getting 0, times values given to
## Y(X(z)), which are either 4 or 2
## depending on whether {X(z); z=1,..,p}
## contains 2 values or just one
## See Bonet for general expression
## Num cols = one for each p(x,y|z)
## Ordering of cols: p(x0,y0|z1) p(x0,y1|z1) p(x1,y0|z1) ... p(x1,y1 | zp)
## Ordering of rows:
## First two rows where X(z) takes 0 for all z, with Y(X(z))=0,1
## Then: two rows where X(z) takes 1 for all z, with Y(X(z))=0,1
## Then: 4*(2^p-2) rows where X(z) takes two different values,
## ordering of rows is given by Y(X(z)=0)=0, Y(X(z)=1)=0
## ordering of rows is given by Y(X(z)=0)=0, Y(X(z)=1)=1
## ordering of rows is given by Y(X(z)=0)=1, Y(X(z)=1)=0
## ordering of rows is given by Y(X(z)=0)=1, Y(X(z)=1)=1
vmat[1,] <- rep(c(1,0,0,0),p)
vmat[2,] <- rep(c(0,1,0,0),p)
vmat[3,] <- rep(c(0,0,1,0),p)
vmat[4,] <- rep(c(0,0,0,1),p)
## {1}=0 {2}=1
vmat[5,] <- c(c(1,0,0,0), c(0,0,1,0)) #(0,0)
vmat[6,] <- c(c(1,0,0,0), c(0,0,0,1)) #(0,1)
vmat[7,] <- c(c(0,1,0,0), c(0,0,1,0)) #(1,0)
vmat[8,] <- c(c(0,1,0,0), c(0,0,0,1)) #(1,1)
## Ordering of cols: p(x0,y0|z1) p(x0,y1|z1) p(x1,y0|z1) ... p(x1,y1 | zp)
## {1}=1 {2}=0
vmat[9,] <- c(c(0,0,1,0), c(1,0,0,0)) #(0,0)
vmat[10,] <- c(c(0,0,0,1), c(1,0,0,0))#(0,1)
vmat[11,] <- c(c(0,0,1,0), c(0,1,0,0))#(1,0)
vmat[12,] <- c(c(0,0,0,1), c(0,1,0,0))#(1,1)
## Now we add two columns
## to indicate
## the nature of the convex hull/cone
vmat <- cbind(rep(0,4*(2^p-1)), rep(1,4*(2^p-1)),vmat)
hmat <- scdd(vmat,representation="V")
# $output
# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
# [4,] 0 0 1 0 0 0 0 0 0 0
# [10,] 0 0 0 1 0 0 0 0 0 0
# [11,] 0 0 0 0 1 0 0 0 0 0
# [12,] 0 0 0 0 0 1 0 0 0 0
# [1,] 0 0 0 0 0 0 1 0 0 0
# [9,] 0 0 0 0 0 0 0 1 0 0
# [7,] 0 0 0 0 0 0 0 0 1 0
# [8,] 0 0 0 0 0 0 0 0 0 1
# [2,] 0 0 0 0 0 -1 1 1 0 1
# [3,] 0 0 0 0 -1 0 1 1 1 0
# [5,] 0 0 0 -1 0 0 0 1 1 1
# [6,] 0 0 -1 0 0 0 1 0 1 1
## Note that there are only 4 additional constraints,
## not 8, as we might expect (on the basis of the conjecture
## that each observable gives rise to upper bounds)
## However, there is a simple explanation for this:
## The upper bounds on the cells in Z=1, are equivalent
## to the upper bounds on Z=0.
|
/description-of-obs-model-z2.R
|
no_license
|
placeboo/instrumental-variable-ace-estimation-and-inference
|
R
| false | false | 3,104 |
r
|
rm(list=ls())
### In the note below we perform the following calculation
### We derive bounds on the observable distribution under the IV model
require("rcdd")
p <- 2 ## dimension of Z
# X and Y are binary
vmat <- matrix(0,nrow=4*(2^p-1),ncol=4*p) ## Num rows = num of extreme points
## See this as number of ways to
## Partition X(z)'s into those getting 1 and
## getting 0, times values given to
## Y(X(z)), which are either 4 or 2
## depending on whether {X(z); z=1,..,p}
## contains 2 values or just one
## See Bonet for general expression
## Num cols = one for each p(x,y|z)
## Ordering of cols: p(x0,y0|z1) p(x0,y1|z1) p(x1,y0|z1) ... p(x1,y1 | zp)
## Ordering of rows:
## First two rows where X(z) takes 0 for all z, with Y(X(z))=0,1
## Then: two rows where X(z) takes 1 for all z, with Y(X(z))=0,1
## Then: 4*(2^p-2) rows where X(z) takes two different values,
## ordering of rows is given by Y(X(z)=0)=0, Y(X(z)=1)=0
## ordering of rows is given by Y(X(z)=0)=0, Y(X(z)=1)=1
## ordering of rows is given by Y(X(z)=0)=1, Y(X(z)=1)=0
## ordering of rows is given by Y(X(z)=0)=1, Y(X(z)=1)=1
vmat[1,] <- rep(c(1,0,0,0),p)
vmat[2,] <- rep(c(0,1,0,0),p)
vmat[3,] <- rep(c(0,0,1,0),p)
vmat[4,] <- rep(c(0,0,0,1),p)
## {1}=0 {2}=1
vmat[5,] <- c(c(1,0,0,0), c(0,0,1,0)) #(0,0)
vmat[6,] <- c(c(1,0,0,0), c(0,0,0,1)) #(0,1)
vmat[7,] <- c(c(0,1,0,0), c(0,0,1,0)) #(1,0)
vmat[8,] <- c(c(0,1,0,0), c(0,0,0,1)) #(1,1)
## Ordering of cols: p(x0,y0|z1) p(x0,y1|z1) p(x1,y0|z1) ... p(x1,y1 | zp)
## {1}=1 {2}=0
vmat[9,] <- c(c(0,0,1,0), c(1,0,0,0)) #(0,0)
vmat[10,] <- c(c(0,0,0,1), c(1,0,0,0))#(0,1)
vmat[11,] <- c(c(0,0,1,0), c(0,1,0,0))#(1,0)
vmat[12,] <- c(c(0,0,0,1), c(0,1,0,0))#(1,1)
## Now we add two columns
## to indicate
## the nature of the convex hull/cone
vmat <- cbind(rep(0,4*(2^p-1)), rep(1,4*(2^p-1)),vmat)
hmat <- scdd(vmat,representation="V")
# $output
# [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
# [4,] 0 0 1 0 0 0 0 0 0 0
# [10,] 0 0 0 1 0 0 0 0 0 0
# [11,] 0 0 0 0 1 0 0 0 0 0
# [12,] 0 0 0 0 0 1 0 0 0 0
# [1,] 0 0 0 0 0 0 1 0 0 0
# [9,] 0 0 0 0 0 0 0 1 0 0
# [7,] 0 0 0 0 0 0 0 0 1 0
# [8,] 0 0 0 0 0 0 0 0 0 1
# [2,] 0 0 0 0 0 -1 1 1 0 1
# [3,] 0 0 0 0 -1 0 1 1 1 0
# [5,] 0 0 0 -1 0 0 0 1 1 1
# [6,] 0 0 -1 0 0 0 1 0 1 1
## Note that there are only 4 additional constraints,
## not 8, as we might expect (on the basis of the conjecture
## that each observable gives rise to upper bounds)
## However, there is a simple explanation for this:
## The upper bounds on the cells in Z=1, are equivalent
## to the upper bounds on Z=0.
|
#### Determine AAI within orthologous genes
Shows leg work behind how AAI is calculated, requires all_prot.faa generated from processing_pangenome
final table with AAI among sulfatase genes is uploaded
```{r echo=TRUE, message=FALSE, warning=FALSE}
AAI_outdir = file.path(dir,'gene_AAI')
dir.create(AAI_outdir)
get_ave_AAI <- function(gene){
#given a orthologous gene extracts faa of all seqs, performs alignment,
#writes matrix of AAI among seqs
GeneIDs <- pres_abs %>% filter(Gene==gene) %>% select(metadata$isolate) %>% as.character()
GeneIDs <- unlist(strsplit(GeneIDs, "[\t]"))
GeneIDs = GeneIDs[!is.na(GeneIDs)]
GeneID_seqs = all_prot[names(all_prot) %in% GeneIDs]
print(paste(gene,length(GeneID_seqs)))
if (length(GeneID_seqs)>1){
file = file.path(AAI_outdir,paste0(gene,'.faa'))
writeXStringSet(GeneID_seqs,file)
system(paste0('mafft --auto ',file,' > ',file,'.align'))
prot_align = read.alignment(file=paste0(file,'.align'),format='fasta')
dist = dist.alignment(prot_align,"identity")
dist = as.matrix(dist)
dist[lower.tri(dist,diag = TRUE)] <- NA
dist = 1-dist
dist_file = file.path(AAI_outdir,paste0(gene,'_dist.txt'))
write_tsv(as.data.frame(dist),file=dist_file)
ave_AAI = mean(dist,na.rm=TRUE)*100
return(c(gene,ave_AAI))}
else {
return(NA)}}
#get_ave_AAI(gene = "group_683")
get_AAI_from_dist <- function(gene){
#given gene reads dist matrix and
#return average amino acid identity
dist_file = file.path(AAI_outdir,paste0(gene,'_dist.txt'))
dist = as.matrix(read_tsv(file=dist_file),col_types=cols())
ave_AAI = mean(dist,na.rm=TRUE)*100
return(data.frame(gene,ave_AAI))
}
#get_AAI_from_dist(gene = "group_683")
#completed = list.files(AAI_outdir,pattern = '_dist.txt')
#completed = unique(sapply(str_split(completed,'_dist'), `[`, 1))
#sulfatase_to_do = setdiff(sulfatase_genes$Gene,completed)
#mclapply(sulfatase_to_do,get_ave_AAI,mc.cores=20)
#sulfatase_OG_AAI = lapply(sulfatase_genes$Gene,get_AAI_from_dist) %>% bind_rows()
#write_tsv(sulfatase_OG_AAI,file=file.path(sulfa_outdir,'sulfatase_OG_AAI.txt'))
sulfatase_OG_AAI = read_tsv(file=file.path(sulfa_outdir,'sulfatase_OG_AAI.txt'))
summary(sulfatase_OG_AAI$ave_AAI)
```
|
/scripts/old/AAI_within_orthologous_genes.R
|
permissive
|
ahnishida/Bxy_strains
|
R
| false | false | 2,234 |
r
|
#### Determine AAI within orthologous genes
Shows leg work behind how AAI is calculated, requires all_prot.faa generated from processing_pangenome
final table with AAI among sulfatase genes is uploaded
```{r echo=TRUE, message=FALSE, warning=FALSE}
AAI_outdir = file.path(dir,'gene_AAI')
dir.create(AAI_outdir)
get_ave_AAI <- function(gene){
#given a orthologous gene extracts faa of all seqs, performs alignment,
#writes matrix of AAI among seqs
GeneIDs <- pres_abs %>% filter(Gene==gene) %>% select(metadata$isolate) %>% as.character()
GeneIDs <- unlist(strsplit(GeneIDs, "[\t]"))
GeneIDs = GeneIDs[!is.na(GeneIDs)]
GeneID_seqs = all_prot[names(all_prot) %in% GeneIDs]
print(paste(gene,length(GeneID_seqs)))
if (length(GeneID_seqs)>1){
file = file.path(AAI_outdir,paste0(gene,'.faa'))
writeXStringSet(GeneID_seqs,file)
system(paste0('mafft --auto ',file,' > ',file,'.align'))
prot_align = read.alignment(file=paste0(file,'.align'),format='fasta')
dist = dist.alignment(prot_align,"identity")
dist = as.matrix(dist)
dist[lower.tri(dist,diag = TRUE)] <- NA
dist = 1-dist
dist_file = file.path(AAI_outdir,paste0(gene,'_dist.txt'))
write_tsv(as.data.frame(dist),file=dist_file)
ave_AAI = mean(dist,na.rm=TRUE)*100
return(c(gene,ave_AAI))}
else {
return(NA)}}
#get_ave_AAI(gene = "group_683")
get_AAI_from_dist <- function(gene){
#given gene reads dist matrix and
#return average amino acid identity
dist_file = file.path(AAI_outdir,paste0(gene,'_dist.txt'))
dist = as.matrix(read_tsv(file=dist_file),col_types=cols())
ave_AAI = mean(dist,na.rm=TRUE)*100
return(data.frame(gene,ave_AAI))
}
#get_AAI_from_dist(gene = "group_683")
#completed = list.files(AAI_outdir,pattern = '_dist.txt')
#completed = unique(sapply(str_split(completed,'_dist'), `[`, 1))
#sulfatase_to_do = setdiff(sulfatase_genes$Gene,completed)
#mclapply(sulfatase_to_do,get_ave_AAI,mc.cores=20)
#sulfatase_OG_AAI = lapply(sulfatase_genes$Gene,get_AAI_from_dist) %>% bind_rows()
#write_tsv(sulfatase_OG_AAI,file=file.path(sulfa_outdir,'sulfatase_OG_AAI.txt'))
sulfatase_OG_AAI = read_tsv(file=file.path(sulfa_outdir,'sulfatase_OG_AAI.txt'))
summary(sulfatase_OG_AAI$ave_AAI)
```
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importTraj.R
\name{importTraj}
\alias{importTraj}
\title{Import pre-calculated HYSPLIT 96-hour back trajectories}
\usage{
importTraj(site = "london", year = 2009, local = NA)
}
\arguments{
\item{site}{Site code of the network site to import
e.g. "london". Only one site can be imported at a time. The
following sites are typically available from 2000-2012, although
some UK ozone sites go back to 1988 (code, location, lat, lon, year):
\tabular{llrrl}{
abudhabi \tab Abu Dhabi \tab 24.43000 \tab 54.408000 \tab 2012-2013\cr
ah \tab Aston Hill \tab 52.50385 \tab -3.041780 \tab 1988-2013\cr
auch \tab Auchencorth Moss \tab 55.79283 \tab -3.242568 \tab 2006-2013\cr
berlin \tab Berlin, Germany \tab 52.52000 \tab 13.400000 \tab 2000-2013\cr
birm \tab Birmigham Centre \tab 52.47972 \tab -1.908078 \tab 1990-2013\cr
boston \tab Boston, USA \tab 42.32900 \tab -71.083000 \tab 2008-2013\cr
bot \tab Bottesford \tab 52.93028 \tab -0.814722 \tab 1990-2013\cr
bukit \tab Bukit Kototabang, Indonesia \tab -0.19805 \tab 100.318000 \tab 1996-2013\cr
chittagong \tab Chittagong, Bangladesh \tab 22.37000 \tab 91.800000 \tab 2010-2013\cr
dhaka \tab Dhaka, Bangladesh \tab 23.70000 \tab 90.375000 \tab 2010-2013\cr
ed \tab Edinburgh \tab 55.95197 \tab -3.195775 \tab 1990-2013\cr
elche \tab Elche, Spain \tab 38.27000 \tab -0.690000 \tab 2004-2013\cr
esk \tab Eskdalemuir \tab 55.31530 \tab -3.206110 \tab 1998-2013\cr
gibraltar \tab Gibraltar \tab 36.13400 \tab -5.347000 \tab 2005-2010\cr
glaz \tab Glazebury \tab 53.46008 \tab -2.472056 \tab 1998-2013\cr
groningen \tab Groningen \tab 53.40000 \tab 6.350000 \tab 2007-2013\cr
har \tab Harwell \tab 51.57108 \tab -1.325283 \tab 1988-2013\cr
hk \tab Hong Kong \tab 22.29000 \tab 114.170000 \tab 1998-2013\cr
hm \tab High Muffles \tab 54.33500 \tab -0.808600 \tab 1988-2013\cr
kuwait \tab Kuwait City \tab 29.36700 \tab 47.967000 \tab 2008-2013\cr
lb \tab Ladybower \tab 53.40337 \tab -1.752006 \tab 1988-2013\cr
london \tab Central London \tab 51.50000 \tab -0.100000 \tab 1990-2013\cr
lh \tab Lullington Heath \tab 50.79370 \tab 0.181250 \tab 1988-2013\cr
ln \tab Lough Navar \tab 54.43951 \tab -7.900328 \tab 1988-2013\cr
mh \tab Mace Head \tab 53.33000 \tab -9.900000 \tab 1988-2013\cr
ny-alesund \tab Ny-Alesund, Norway \tab 78.91763 \tab 11.894653 \tab 2009-2013\cr
oslo \tab Oslo \tab 59.90000 \tab 10.750000 \tab 2010-2013\cr
paris \tab Paris, France \tab 48.86200 \tab 2.339000 \tab 2000-2013\cr
roch \tab Rochester Stoke \tab 51.45617 \tab 0.634889 \tab 1988-2013\cr
rotterdam \tab Rotterdam \tab 51.91660 \tab 4.475000 \tab 2010-2013\cr
saopaulo \tab Sao Paulo \tab -23.55000 \tab -46.640000 \tab 2000-2013\cr
sib \tab Sibton \tab 52.29440 \tab 1.463970 \tab 1988-2013\cr
sv \tab Strath Vaich \tab 57.73446 \tab -4.776583 \tab 1988-2013\cr
wuhan \tab Wuhan, China \tab 30.58300 \tab 114.280000 \tab 2008-2013\cr
yw \tab Yarner Wood \tab 50.59760 \tab -3.716510 \tab 1988-2013
}}
\item{year}{Year or years to import. To import a sequence of years from
1990 to 2000 use \code{year = 1990:2000}. To import several specfic years
use \code{year = c(1990, 1995, 2000)} for example.}
\item{local}{File path to .RData trajectory files run by user and
not stored on the Ricardo web server. These files would have been
generated from the Hysplit trajectory code shown in the appendix
of the openair manual. An example would be \code{local =
'c:/users/david/TrajFiles/'}.}
}
\value{
Returns a data frame with pre-calculated back trajectories.
}
\description{
Function to import pre-calculated back trajectories using the NOAA
HYSPLIT model. The trajectories have been calculated for a select
range of locations which will expand in time. They cover the last
20 years or so and can be used together with other \code{openair}
functions.
}
\details{
This function imports pre-calculated back trajectories using the
HYSPLIT trajectory model (Hybrid Single Particle Lagrangian
Integrated Trajectory Model
\url{http://ready.arl.noaa.gov/HYSPLIT.php}). Back trajectories
provide some very useful information for air quality data
analysis. However, while they are commonly calculated by
researchers it is generally difficult for them to be calculated on
a routine basis and used easily. In addition, the availability of
back trajectories over several years can be very useful, but again
difficult to calculate.
Trajectories are run at 3-hour intervals and stored in yearly
files (see below). The trajectories are started at ground-level
(10m) and propagated backwards in time.
These trajectories have been calculated using the Global
NOAA-NCEP/NCAR reanalysis data archives. The global data are on a
latitude-longitude grid (2.5 degree). Note that there are many
different meteorological data sets that can be used to run HYSPLIT
e.g. including ECMWF data. However, in order to make it
practicable to run and store trajectories for many years and
sites, the NOAA-NCEP/NCAR reanalysis data is most useful. In
addition, these archives are available for use widely, which is
not the case for many other data sets e.g. ECMWF. HYSPLIT
calculated trajectories based on archive data may be distributed
without permission (see
\url{http://ready.arl.noaa.gov/HYSPLIT_agreement.php}). For those
wanting, for example, to consider higher resolution meteorological
data sets it may be better to run the trajectories separately.
We are extremely grateful to NOAA for making HYSPLIT available to
produce back trajectories in an open way. We ask that you cite
HYSPLIT if used in published work.
Users can supply their own trajectory files to plot in
openair. These files must have the following fields: date, lat,
lon and hour.inc (see details below).
The files consist of the following information:
\describe{ \item{date}{This is the arrival point time and is
repeated the number of times equal to the length of the back
trajectory --- typically 96 hours (except early on in the
file). The format is \code{POSIXct}. It is this field that should
be used to link with air quality data. See example below.}
\item{receptor}{Receptor number, currently only 1.}
\item{year}{The year} \item{month}{Month 1-12} \item{day}{Day of
the month 1-31} \item{hour}{Hour of the day 0-23 GMT}
\item{hour.inc}{Number of hours back in time e.g. 0 to -96.}
\item{lat}{Latitude in decimal format.} \item{lon}{Longitude in
decimal format.} \item{height}{Height of trajectory (m).}
\item{pressure}{Pressure of trajectory (kPa).} }
}
\note{
The trajectories were run using the February 2011 HYSPLIT model.
The function is primarily written to investigate a single
site at a time for a single year. The trajectory files are quite
large and care should be exercised when importing several years and/or sites.
}
\examples{
## import trajectory data for London in 2009
\dontrun{mytraj <- importTraj(site = "london", year = 2009)}
## combine with measurements
\dontrun{theData <- importAURN(site = "kc1", year = 2009)
mytraj <- merge(mytraj, theData, by = "date")}
}
\author{
David Carslaw
}
\seealso{
\code{\link{trajPlot}}, \code{\link{importAURN}},
\code{\link{importKCL}},\code{\link{importADMS}},
\code{\link{importSAQN}}
}
\keyword{methods}
|
/man/importTraj.Rd
|
no_license
|
VarrudaS/openair
|
R
| false | true | 8,194 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importTraj.R
\name{importTraj}
\alias{importTraj}
\title{Import pre-calculated HYSPLIT 96-hour back trajectories}
\usage{
importTraj(site = "london", year = 2009, local = NA)
}
\arguments{
\item{site}{Site code of the network site to import
e.g. "london". Only one site can be imported at a time. The
following sites are typically available from 2000-2012, although
some UK ozone sites go back to 1988 (code, location, lat, lon, year):
\tabular{llrrl}{
abudhabi \tab Abu Dhabi \tab 24.43000 \tab 54.408000 \tab 2012-2013\cr
ah \tab Aston Hill \tab 52.50385 \tab -3.041780 \tab 1988-2013\cr
auch \tab Auchencorth Moss \tab 55.79283 \tab -3.242568 \tab 2006-2013\cr
berlin \tab Berlin, Germany \tab 52.52000 \tab 13.400000 \tab 2000-2013\cr
birm \tab Birmigham Centre \tab 52.47972 \tab -1.908078 \tab 1990-2013\cr
boston \tab Boston, USA \tab 42.32900 \tab -71.083000 \tab 2008-2013\cr
bot \tab Bottesford \tab 52.93028 \tab -0.814722 \tab 1990-2013\cr
bukit \tab Bukit Kototabang, Indonesia \tab -0.19805 \tab 100.318000 \tab 1996-2013\cr
chittagong \tab Chittagong, Bangladesh \tab 22.37000 \tab 91.800000 \tab 2010-2013\cr
dhaka \tab Dhaka, Bangladesh \tab 23.70000 \tab 90.375000 \tab 2010-2013\cr
ed \tab Edinburgh \tab 55.95197 \tab -3.195775 \tab 1990-2013\cr
elche \tab Elche, Spain \tab 38.27000 \tab -0.690000 \tab 2004-2013\cr
esk \tab Eskdalemuir \tab 55.31530 \tab -3.206110 \tab 1998-2013\cr
gibraltar \tab Gibraltar \tab 36.13400 \tab -5.347000 \tab 2005-2010\cr
glaz \tab Glazebury \tab 53.46008 \tab -2.472056 \tab 1998-2013\cr
groningen \tab Groningen \tab 53.40000 \tab 6.350000 \tab 2007-2013\cr
har \tab Harwell \tab 51.57108 \tab -1.325283 \tab 1988-2013\cr
hk \tab Hong Kong \tab 22.29000 \tab 114.170000 \tab 1998-2013\cr
hm \tab High Muffles \tab 54.33500 \tab -0.808600 \tab 1988-2013\cr
kuwait \tab Kuwait City \tab 29.36700 \tab 47.967000 \tab 2008-2013\cr
lb \tab Ladybower \tab 53.40337 \tab -1.752006 \tab 1988-2013\cr
london \tab Central London \tab 51.50000 \tab -0.100000 \tab 1990-2013\cr
lh \tab Lullington Heath \tab 50.79370 \tab 0.181250 \tab 1988-2013\cr
ln \tab Lough Navar \tab 54.43951 \tab -7.900328 \tab 1988-2013\cr
mh \tab Mace Head \tab 53.33000 \tab -9.900000 \tab 1988-2013\cr
ny-alesund \tab Ny-Alesund, Norway \tab 78.91763 \tab 11.894653 \tab 2009-2013\cr
oslo \tab Oslo \tab 59.90000 \tab 10.750000 \tab 2010-2013\cr
paris \tab Paris, France \tab 48.86200 \tab 2.339000 \tab 2000-2013\cr
roch \tab Rochester Stoke \tab 51.45617 \tab 0.634889 \tab 1988-2013\cr
rotterdam \tab Rotterdam \tab 51.91660 \tab 4.475000 \tab 2010-2013\cr
saopaulo \tab Sao Paulo \tab -23.55000 \tab -46.640000 \tab 2000-2013\cr
sib \tab Sibton \tab 52.29440 \tab 1.463970 \tab 1988-2013\cr
sv \tab Strath Vaich \tab 57.73446 \tab -4.776583 \tab 1988-2013\cr
wuhan \tab Wuhan, China \tab 30.58300 \tab 114.280000 \tab 2008-2013\cr
yw \tab Yarner Wood \tab 50.59760 \tab -3.716510 \tab 1988-2013
}}
\item{year}{Year or years to import. To import a sequence of years from
1990 to 2000 use \code{year = 1990:2000}. To import several specfic years
use \code{year = c(1990, 1995, 2000)} for example.}
\item{local}{File path to .RData trajectory files run by user and
not stored on the Ricardo web server. These files would have been
generated from the Hysplit trajectory code shown in the appendix
of the openair manual. An example would be \code{local =
'c:/users/david/TrajFiles/'}.}
}
\value{
Returns a data frame with pre-calculated back trajectories.
}
\description{
Function to import pre-calculated back trajectories using the NOAA
HYSPLIT model. The trajectories have been calculated for a select
range of locations which will expand in time. They cover the last
20 years or so and can be used together with other \code{openair}
functions.
}
\details{
This function imports pre-calculated back trajectories using the
HYSPLIT trajectory model (Hybrid Single Particle Lagrangian
Integrated Trajectory Model
\url{http://ready.arl.noaa.gov/HYSPLIT.php}). Back trajectories
provide some very useful information for air quality data
analysis. However, while they are commonly calculated by
researchers it is generally difficult for them to be calculated on
a routine basis and used easily. In addition, the availability of
back trajectories over several years can be very useful, but again
difficult to calculate.
Trajectories are run at 3-hour intervals and stored in yearly
files (see below). The trajectories are started at ground-level
(10m) and propagated backwards in time.
These trajectories have been calculated using the Global
NOAA-NCEP/NCAR reanalysis data archives. The global data are on a
latitude-longitude grid (2.5 degree). Note that there are many
different meteorological data sets that can be used to run HYSPLIT
e.g. including ECMWF data. However, in order to make it
practicable to run and store trajectories for many years and
sites, the NOAA-NCEP/NCAR reanalysis data is most useful. In
addition, these archives are available for use widely, which is
not the case for many other data sets e.g. ECMWF. HYSPLIT
calculated trajectories based on archive data may be distributed
without permission (see
\url{http://ready.arl.noaa.gov/HYSPLIT_agreement.php}). For those
wanting, for example, to consider higher resolution meteorological
data sets it may be better to run the trajectories separately.
We are extremely grateful to NOAA for making HYSPLIT available to
produce back trajectories in an open way. We ask that you cite
HYSPLIT if used in published work.
Users can supply their own trajectory files to plot in
openair. These files must have the following fields: date, lat,
lon and hour.inc (see details below).
The files consist of the following information:
\describe{ \item{date}{This is the arrival point time and is
repeated the number of times equal to the length of the back
trajectory --- typically 96 hours (except early on in the
file). The format is \code{POSIXct}. It is this field that should
be used to link with air quality data. See example below.}
\item{receptor}{Receptor number, currently only 1.}
\item{year}{The year} \item{month}{Month 1-12} \item{day}{Day of
the month 1-31} \item{hour}{Hour of the day 0-23 GMT}
\item{hour.inc}{Number of hours back in time e.g. 0 to -96.}
\item{lat}{Latitude in decimal format.} \item{lon}{Longitude in
decimal format.} \item{height}{Height of trajectory (m).}
\item{pressure}{Pressure of trajectory (kPa).} }
}
\note{
The trajectories were run using the February 2011 HYSPLIT model.
The function is primarily written to investigate a single
site at a time for a single year. The trajectory files are quite
large and care should be exercised when importing several years and/or sites.
}
\examples{
## import trajectory data for London in 2009
\dontrun{mytraj <- importTraj(site = "london", year = 2009)}
## combine with measurements
\dontrun{theData <- importAURN(site = "kc1", year = 2009)
mytraj <- merge(mytraj, theData, by = "date")}
}
\author{
David Carslaw
}
\seealso{
\code{\link{trajPlot}}, \code{\link{importAURN}},
\code{\link{importKCL}},\code{\link{importADMS}},
\code{\link{importSAQN}}
}
\keyword{methods}
|
makeCacheMatrix <- function(m = matrix()) { # define 'constructor' with input m
invm <- NULL # Initialize output (inverse of m)
set <- function(y) { # Subfunction to store input in m. Why?
m <<- y # A 'makeCacheMatrix' object can be created in two ways:
invm <<- NULL # i) > cool_m <- makeCacheMatrix( some_matrix )
} # ii) > cool_m <- makeCacheMatrix()
# > cool_m$set( some_matrix)
get <- function() m # Subfunction to retrieve m
setinverse <- function(inversem) invm <<- inversem
getinverse <- function() invm
# Subfunctions to store/retrieve the inverse of m
list(set = set, get = get, # Create output list of methods
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(m, ...) { # Creates function that returns inverse of
# matrix in m (which is a 'makeCacheMatrix' object)
invm <- m$getinverse() # Try to get inverse of the matrix in m, if it's stored in m *
if(!is.null(invm)) { # If succesfull, just return it (with a message)
message("getting cached data")
return(invm)
}
data <- m$get() # Else, get the matrix in m and put it in data
invm <- solve(data, ...) # Call solve to find the inverse of m and put it in invm
m$setinverse(invm) # Store it in m, for later, if necessary
invm # Return invm
}
# *: There should be a check of m. If it's not a 'makeCacheMatrix' object,
# check if m is a square matrix and if so, create a 'makeCacheMatrix' object and follow on.
# Else, return a failure message and exit.
|
/cachematrix.R
|
no_license
|
aquintar/ProgrammingAssignment2
|
R
| false | false | 2,271 |
r
|
makeCacheMatrix <- function(m = matrix()) { # define 'constructor' with input m
invm <- NULL # Initialize output (inverse of m)
set <- function(y) { # Subfunction to store input in m. Why?
m <<- y # A 'makeCacheMatrix' object can be created in two ways:
invm <<- NULL # i) > cool_m <- makeCacheMatrix( some_matrix )
} # ii) > cool_m <- makeCacheMatrix()
# > cool_m$set( some_matrix)
get <- function() m # Subfunction to retrieve m
setinverse <- function(inversem) invm <<- inversem
getinverse <- function() invm
# Subfunctions to store/retrieve the inverse of m
list(set = set, get = get, # Create output list of methods
setinverse = setinverse,
getinverse = getinverse)
}
cacheSolve <- function(m, ...) { # Creates function that returns inverse of
# matrix in m (which is a 'makeCacheMatrix' object)
invm <- m$getinverse() # Try to get inverse of the matrix in m, if it's stored in m *
if(!is.null(invm)) { # If succesfull, just return it (with a message)
message("getting cached data")
return(invm)
}
data <- m$get() # Else, get the matrix in m and put it in data
invm <- solve(data, ...) # Call solve to find the inverse of m and put it in invm
m$setinverse(invm) # Store it in m, for later, if necessary
invm # Return invm
}
# *: There should be a check of m. If it's not a 'makeCacheMatrix' object,
# check if m is a square matrix and if so, create a 'makeCacheMatrix' object and follow on.
# Else, return a failure message and exit.
|
library(wux)
### Name: cmip3_2100
### Title: Climate Change signals for CMIP3 ensemble
### Aliases: cmip3_2100
### Keywords: datasets
### ** Examples
require(wux)
data(cmip3_2100)
str(cmip3_2100)
summary(cmip3_2100)
## Not run:
##D plot(cmip3_2100, "perc.delta.precipitation_amount",
##D "delta.air_temperature", subreg.subset = "CORDEX.Africa",
##D boxplots = TRUE, xlim = c(-20,20), label.only.these.models = "",
##D ylim = c(0,5), xlab = "Precipitation Amount [%]",
##D ylab = "2-m Air Temperature [K]", draw.legend = FALSE,
##D draw.median.lines = FALSE,
##D main = "CMIP3 2-m Air Temp. and Precip. Amount")
## End(Not run)
|
/data/genthat_extracted_code/wux/examples/cmip3_2100.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 725 |
r
|
library(wux)
### Name: cmip3_2100
### Title: Climate Change signals for CMIP3 ensemble
### Aliases: cmip3_2100
### Keywords: datasets
### ** Examples
require(wux)
data(cmip3_2100)
str(cmip3_2100)
summary(cmip3_2100)
## Not run:
##D plot(cmip3_2100, "perc.delta.precipitation_amount",
##D "delta.air_temperature", subreg.subset = "CORDEX.Africa",
##D boxplots = TRUE, xlim = c(-20,20), label.only.these.models = "",
##D ylim = c(0,5), xlab = "Precipitation Amount [%]",
##D ylab = "2-m Air Temperature [K]", draw.legend = FALSE,
##D draw.median.lines = FALSE,
##D main = "CMIP3 2-m Air Temp. and Precip. Amount")
## End(Not run)
|
#' @useDynLib haven
#' @importFrom Rcpp sourceCpp
#' @importFrom tibble tibble
NULL
#' Read and write SAS files.
#'
#' Reading supports both sas7bdat files and the accompanying sas7bdat files
#' that SAS uses to record value labels. Writing value labels is not currently
#' supported.
#'
#' @param data_file,catalog_file Path to data and catalog files. The files are
#' processed with \code{\link[readr]{datasource}()}.
#' @param data Data frame to write.
#' @param path Path to file where the data will be written.
#' @param encoding The character encoding used for the file. This defaults to
#' the encoding specified in the file, or UTF-8. You can use this argument
#' to override the value stored in the file if it is correct
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @export
#' @examples
#' path <- system.file("examples", "iris.sas7bdat", package = "haven")
#' read_sas(path)
read_sas <- function(data_file, catalog_file = NULL, encoding = NULL) {
if (is.null(encoding)) {
encoding <- ""
}
spec_data <- readr::datasource(data_file)
if (is.null(catalog_file)) {
spec_cat <- list()
} else {
spec_cat <- readr::datasource(catalog_file)
}
switch(class(spec_data)[1],
source_file = df_parse_sas_file(spec_data, spec_cat, encoding = encoding),
source_raw = df_parse_sas_raw(spec_data, spec_cat, encoding = encoding),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_sas
write_sas <- function(data, path) {
write_sas_(data, normalizePath(path, mustWork = FALSE))
}
#' Read and write SAS transport files
#'
#' The SAS transport format is a open format, as is required for submission
#' of the data to the FDA.
#'
#' @inherit read_spss
#' @export
#' @examples
#' tmp <- tempfile(fileext = ".xpt")
#' write_xpt(mtcars, tmp)
#' read_xpt(tmp)
read_xpt <- function(file) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_xpt_file(spec),
source_raw = df_parse_xpt_raw(spec),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_xpt
#' @param version Version of transport file specification to use: either 5 or 8.
write_xpt <- function(data, path, version = 8) {
stopifnot(version %in% c(5, 8))
write_xpt_(data, normalizePath(path, mustWork = FALSE), version)
}
#' Read SPSS (SAV & POR) files. Write SAV files.
#'
#' Currently haven can read and write logical, integer, numeric, character
#' and factors. See \code{\link{labelled_spss}} for how labelled variables in
#' SPSS are handled in R. \code{read_spss} is an alias for \code{read_sav}.
#'
#' @inheritParams readr::datasource
#' @param path Path to a file where the data will be written.
#' @param data Data frame to write.
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @name read_spss
#' @examples
#' path <- system.file("examples", "iris.sav", package = "haven")
#' read_sav(path)
#'
#' tmp <- tempfile(fileext = ".sav")
#' write_sav(mtcars, tmp)
#' read_sav(tmp)
NULL
#' @export
#' @rdname read_spss
read_sav <- function(file, user_na = FALSE) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_sav_file(spec, user_na),
source_raw = df_parse_sav_raw(spec, user_na),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_spss
read_por <- function(file, user_na = FALSE) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_por_file(spec, user_na),
source_raw = df_parse_por_raw(spec, user_na),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_spss
write_sav <- function(data, path) {
write_sav_(data, normalizePath(path, mustWork = FALSE))
}
#' @export
#' @rdname read_spss
#' @param user_na If \code{TRUE} variables with user defined missing will
#' be read into \code{\link{labelled_spss}} objects. If \code{FALSE}, the
#' default, user-defined missings will be converted to \code{NA}.
read_spss <- function(file, user_na = FALSE) {
ext <- tolower(tools::file_ext(file))
switch(ext,
sav = read_sav(file, user_na = user_na),
por = read_por(file, user_na = user_na),
stop("Unknown extension '.", ext, "'", call. = FALSE)
)
}
#' Read and write Stata DTA files.
#'
#' Currently haven can read and write logical, integer, numeric, character
#' and factors. See \code{\link{labelled}} for how labelled variables in
#' Stata are handled in R.
#'
#' @inheritParams readr::datasource
#' @inheritParams read_spss
#' @param encoding The character encoding used for the file. This defaults to
#' the encoding specified in the file, or UTF-8. But older versions of Stata
#' (13 and earlier) did not store the encoding used, and you'll need to
#' specify manually. A commonly used value is "Win 1252".
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @export
#' @examples
#' path <- system.file("examples", "iris.dta", package = "haven")
#' read_dta(path)
#'
#' tmp <- tempfile(fileext = ".dta")
#' write_dta(mtcars, tmp)
#' read_dta(tmp)
#' read_stata(tmp)
read_dta <- function(file, encoding = NULL) {
if (is.null(encoding)) {
encoding <- ""
}
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_dta_file(spec, encoding),
source_raw = df_parse_dta_raw(spec, encoding),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_dta
read_stata <- function(file, encoding = NULL) {
read_dta(file, encoding)
}
#' @export
#' @rdname read_dta
#' @param version File version to use. Supports versions 8-14.
write_dta <- function(data, path, version = 14) {
validate_dta(data)
write_dta_(data,
normalizePath(path, mustWork = FALSE),
version = stata_file_format(version)
)
}
stata_file_format <- function(version) {
stopifnot(is.numeric(version), length(version) == 1)
version <- as.integer(version)
if (version == 14L) {
118
} else if (version == 13L) {
117
} else if (version == 12L) {
115
} else if (version %in% c(10L, 11L)) {
114
} else if (version %in% c(8L, 9L)) {
113
} else {
stop("Version ", version, " not currently supported", call. = FALSE)
}
}
validate_dta <- function(data) {
# Check variable names
bad_names <- !grepl("^[A-Za-z_]{1}[A-Za-z0-9_]{0,31}$", names(data))
if (any(bad_names)) {
stop(
"The following variable names are not valid Stata variables: ",
var_names(data, bad_names),
call. = FALSE
)
}
# Check for labelled double vectors
is_labelled <- vapply(data, is.labelled, logical(1))
is_integer <- vapply(data, typeof, character(1)) == "integer"
bad_labels <- is_labelled && !is_integer
if (any(bad_labels)) {
stop(
"Stata only supports labelled integers.\nProblems: ",
var_names(data, bad_labels),
call. = FALSE
)
}
# Check lengths of labels
lengths <- vapply(data, label_length, integer(1))
bad_lengths <- lengths > 32
if (any(bad_lengths)) {
stop(
"Stata only supports value labels up to 32 characters in length. \nProblems: ",
var_names(data, bad_lengths),
call. = FALSE
)
}
}
var_names <- function(data, i) {
x <- names(data)[i]
paste(encodeString(x, quote = "`"), collapse = ", ")
}
|
/R/haven.R
|
no_license
|
cimentadaj/haven
|
R
| false | false | 7,809 |
r
|
#' @useDynLib haven
#' @importFrom Rcpp sourceCpp
#' @importFrom tibble tibble
NULL
#' Read and write SAS files.
#'
#' Reading supports both sas7bdat files and the accompanying sas7bdat files
#' that SAS uses to record value labels. Writing value labels is not currently
#' supported.
#'
#' @param data_file,catalog_file Path to data and catalog files. The files are
#' processed with \code{\link[readr]{datasource}()}.
#' @param data Data frame to write.
#' @param path Path to file where the data will be written.
#' @param encoding The character encoding used for the file. This defaults to
#' the encoding specified in the file, or UTF-8. You can use this argument
#' to override the value stored in the file if it is correct
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @export
#' @examples
#' path <- system.file("examples", "iris.sas7bdat", package = "haven")
#' read_sas(path)
read_sas <- function(data_file, catalog_file = NULL, encoding = NULL) {
if (is.null(encoding)) {
encoding <- ""
}
spec_data <- readr::datasource(data_file)
if (is.null(catalog_file)) {
spec_cat <- list()
} else {
spec_cat <- readr::datasource(catalog_file)
}
switch(class(spec_data)[1],
source_file = df_parse_sas_file(spec_data, spec_cat, encoding = encoding),
source_raw = df_parse_sas_raw(spec_data, spec_cat, encoding = encoding),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_sas
write_sas <- function(data, path) {
write_sas_(data, normalizePath(path, mustWork = FALSE))
}
#' Read and write SAS transport files
#'
#' The SAS transport format is a open format, as is required for submission
#' of the data to the FDA.
#'
#' @inherit read_spss
#' @export
#' @examples
#' tmp <- tempfile(fileext = ".xpt")
#' write_xpt(mtcars, tmp)
#' read_xpt(tmp)
read_xpt <- function(file) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_xpt_file(spec),
source_raw = df_parse_xpt_raw(spec),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_xpt
#' @param version Version of transport file specification to use: either 5 or 8.
write_xpt <- function(data, path, version = 8) {
stopifnot(version %in% c(5, 8))
write_xpt_(data, normalizePath(path, mustWork = FALSE), version)
}
#' Read SPSS (SAV & POR) files. Write SAV files.
#'
#' Currently haven can read and write logical, integer, numeric, character
#' and factors. See \code{\link{labelled_spss}} for how labelled variables in
#' SPSS are handled in R. \code{read_spss} is an alias for \code{read_sav}.
#'
#' @inheritParams readr::datasource
#' @param path Path to a file where the data will be written.
#' @param data Data frame to write.
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @name read_spss
#' @examples
#' path <- system.file("examples", "iris.sav", package = "haven")
#' read_sav(path)
#'
#' tmp <- tempfile(fileext = ".sav")
#' write_sav(mtcars, tmp)
#' read_sav(tmp)
NULL
#' @export
#' @rdname read_spss
read_sav <- function(file, user_na = FALSE) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_sav_file(spec, user_na),
source_raw = df_parse_sav_raw(spec, user_na),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_spss
read_por <- function(file, user_na = FALSE) {
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_por_file(spec, user_na),
source_raw = df_parse_por_raw(spec, user_na),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_spss
write_sav <- function(data, path) {
write_sav_(data, normalizePath(path, mustWork = FALSE))
}
#' @export
#' @rdname read_spss
#' @param user_na If \code{TRUE} variables with user defined missing will
#' be read into \code{\link{labelled_spss}} objects. If \code{FALSE}, the
#' default, user-defined missings will be converted to \code{NA}.
read_spss <- function(file, user_na = FALSE) {
ext <- tolower(tools::file_ext(file))
switch(ext,
sav = read_sav(file, user_na = user_na),
por = read_por(file, user_na = user_na),
stop("Unknown extension '.", ext, "'", call. = FALSE)
)
}
#' Read and write Stata DTA files.
#'
#' Currently haven can read and write logical, integer, numeric, character
#' and factors. See \code{\link{labelled}} for how labelled variables in
#' Stata are handled in R.
#'
#' @inheritParams readr::datasource
#' @inheritParams read_spss
#' @param encoding The character encoding used for the file. This defaults to
#' the encoding specified in the file, or UTF-8. But older versions of Stata
#' (13 and earlier) did not store the encoding used, and you'll need to
#' specify manually. A commonly used value is "Win 1252".
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#' @export
#' @examples
#' path <- system.file("examples", "iris.dta", package = "haven")
#' read_dta(path)
#'
#' tmp <- tempfile(fileext = ".dta")
#' write_dta(mtcars, tmp)
#' read_dta(tmp)
#' read_stata(tmp)
read_dta <- function(file, encoding = NULL) {
if (is.null(encoding)) {
encoding <- ""
}
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_dta_file(spec, encoding),
source_raw = df_parse_dta_raw(spec, encoding),
stop("This kind of input is not handled", call. = FALSE)
)
}
#' @export
#' @rdname read_dta
read_stata <- function(file, encoding = NULL) {
read_dta(file, encoding)
}
#' @export
#' @rdname read_dta
#' @param version File version to use. Supports versions 8-14.
write_dta <- function(data, path, version = 14) {
validate_dta(data)
write_dta_(data,
normalizePath(path, mustWork = FALSE),
version = stata_file_format(version)
)
}
stata_file_format <- function(version) {
stopifnot(is.numeric(version), length(version) == 1)
version <- as.integer(version)
if (version == 14L) {
118
} else if (version == 13L) {
117
} else if (version == 12L) {
115
} else if (version %in% c(10L, 11L)) {
114
} else if (version %in% c(8L, 9L)) {
113
} else {
stop("Version ", version, " not currently supported", call. = FALSE)
}
}
validate_dta <- function(data) {
# Check variable names
bad_names <- !grepl("^[A-Za-z_]{1}[A-Za-z0-9_]{0,31}$", names(data))
if (any(bad_names)) {
stop(
"The following variable names are not valid Stata variables: ",
var_names(data, bad_names),
call. = FALSE
)
}
# Check for labelled double vectors
is_labelled <- vapply(data, is.labelled, logical(1))
is_integer <- vapply(data, typeof, character(1)) == "integer"
bad_labels <- is_labelled && !is_integer
if (any(bad_labels)) {
stop(
"Stata only supports labelled integers.\nProblems: ",
var_names(data, bad_labels),
call. = FALSE
)
}
# Check lengths of labels
lengths <- vapply(data, label_length, integer(1))
bad_lengths <- lengths > 32
if (any(bad_lengths)) {
stop(
"Stata only supports value labels up to 32 characters in length. \nProblems: ",
var_names(data, bad_lengths),
call. = FALSE
)
}
}
var_names <- function(data, i) {
x <- names(data)[i]
paste(encodeString(x, quote = "`"), collapse = ", ")
}
|
setwd("~/Documents/Data Science")
library(plyr)
# Getting and Cleaning Data Course Project
# The purpose of this project is to demonstrate your ability to collect, work
# with, and clean a data set. The goal is to prepare tidy data that can be used
# for later analysis. You will be graded by your peers on a series of yes/no
# questions related to the project. You will be required to submit:
#1) a tidy data set as described below,
#2) a link to a Github repository with your script for performing the analysis,
#3) a code book that describes the variables, the data, and any transformations or work that
# you performed to clean up the data called CodeBook.md. You should also include a README.md
# in the repo with your scripts. This repo explains how all of the scripts work and how they
# are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see
# for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the
# most advanced algorithms to attract new users. The data linked to from the course website represent data collected
# from the accelerometers from the Samsung Galaxy S smartphone. A full description is available at the site where the
# data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "~/Documents/Data Science/wearabletech.zip", method = "curl")
# You should create one R script called run_analysis.R that does the following.
# Merges the training and the test sets to create one data set.
#Extract all data
unzip("wearabletech.zip")
x_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/X_train.txt", header = TRUE)
y_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/y_train.txt", header = TRUE)
subject_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/subject_train.txt", header = TRUE)
head(x_train)
head(y_train)
head(subject_train)
x_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
head(x_test)
str(y_test)
head(subject_test)
features <- read.table("~/Documents/Data Science/UCI HAR Dataset/features.txt", header = FALSE)
activity_labels <- read.table("~/Documents/Data Science/UCI HAR Dataset/activity_labels.txt", header = FALSE)
# Uses descriptive activity names to name the activities in the data set
#Labels
colnames(x_train) <- features[,2]
colnames(y_train) <-"activityId"
colnames(subject_train) <- "Subject"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "Subject"
colnames(activity_labels) <- c('activityId','activityType')
str(mean_std_data)
colnames(Data)
Data[,"activityId"]
x_merge <- rbind(x_test, x_train)
y_merge <- rbind(y_test, y_train)
head(y_test)
head(y_train)
subject <- rbind(subject_test,subject_train)
x_y_data <- cbind(x_merge,y_merge)
Data <- cbind(x_y_data,subject)
head(Data)
str(Data)
colnames(Data)
# Extracts only the measurements on the mean and standard deviation for each measurement.
mean_std_columns <- grep("std()|mean()",colnames(Data))
std_mean_data <- Data[mean_std_columns]
std_mean_data_names <- as.character(colnames(std_mean_data))
sData <- subset(Data, select = c(std_mean_data_names))
sData <- cbind(sData, Data$activityId, Data$Subject)
# Appropriately labels the data set with descriptive variable names.
colnames(sData)
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
activities <- read.table("UCI HAR Dataset/activity_labels.txt",header = FALSE)
colnames(sData)[names(sData) == 'Data$activityId'] <- 'activity'
colnames(sData)[names(sData) == 'Data$Subject'] <- 'subject'
colnames(activities) <- c("label", "activity")
colnames(sData)
colnames(sData)<-gsub("^t", "time", colnames(sData))
colnames(sData)<-gsub("^f", "frequency",colnames(sData))
colnames(sData)<-gsub("Acc", "accelerometer", colnames(sData))
colnames(sData)<-gsub("Gyro", "gyroscope", colnames(sData))
colnames(sData)<-gsub("Mag", "magnitude", colnames(sData))
colnames(sData)<-gsub("BodyBody", "body", colnames(sData))
colnames(sData)<-gsub("Body", "body", colnames(sData))
colnames(sData)
finalData <- aggregate(. ~subject + activity, sData, mean)
finalData <- finalData[order(finalData$subject, finalData$activity),]
write.table(finalData, file = "tidydata.txt", row.names=FALSE)
# The Github repo contains the required scripts.
# GitHub contains a code book that modifies and updates the available codebooks with the data to indicate all the variables and summaries calculated, along with units, and any other relevant information.
# The README that explains the analysis files is clear and understandable.
|
/run_analysis.R
|
no_license
|
isavannahr/getting_cleaning_data
|
R
| false | false | 5,264 |
r
|
setwd("~/Documents/Data Science")
library(plyr)
# Getting and Cleaning Data Course Project
# The purpose of this project is to demonstrate your ability to collect, work
# with, and clean a data set. The goal is to prepare tidy data that can be used
# for later analysis. You will be graded by your peers on a series of yes/no
# questions related to the project. You will be required to submit:
#1) a tidy data set as described below,
#2) a link to a Github repository with your script for performing the analysis,
#3) a code book that describes the variables, the data, and any transformations or work that
# you performed to clean up the data called CodeBook.md. You should also include a README.md
# in the repo with your scripts. This repo explains how all of the scripts work and how they
# are connected.
#
# One of the most exciting areas in all of data science right now is wearable computing - see
# for example this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the
# most advanced algorithms to attract new users. The data linked to from the course website represent data collected
# from the accelerometers from the Samsung Galaxy S smartphone. A full description is available at the site where the
# data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
#
# Here are the data for the project
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "~/Documents/Data Science/wearabletech.zip", method = "curl")
# You should create one R script called run_analysis.R that does the following.
# Merges the training and the test sets to create one data set.
#Extract all data
unzip("wearabletech.zip")
x_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/X_train.txt", header = TRUE)
y_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/y_train.txt", header = TRUE)
subject_train <- read.table("~/Documents/Data Science/UCI HAR Dataset/train/subject_train.txt", header = TRUE)
head(x_train)
head(y_train)
head(subject_train)
x_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/X_test.txt", header = FALSE)
y_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/y_test.txt", header = FALSE)
subject_test <- read.table("~/Documents/Data Science/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
head(x_test)
str(y_test)
head(subject_test)
features <- read.table("~/Documents/Data Science/UCI HAR Dataset/features.txt", header = FALSE)
activity_labels <- read.table("~/Documents/Data Science/UCI HAR Dataset/activity_labels.txt", header = FALSE)
# Uses descriptive activity names to name the activities in the data set
#Labels
colnames(x_train) <- features[,2]
colnames(y_train) <-"activityId"
colnames(subject_train) <- "Subject"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "Subject"
colnames(activity_labels) <- c('activityId','activityType')
str(mean_std_data)
colnames(Data)
Data[,"activityId"]
x_merge <- rbind(x_test, x_train)
y_merge <- rbind(y_test, y_train)
head(y_test)
head(y_train)
subject <- rbind(subject_test,subject_train)
x_y_data <- cbind(x_merge,y_merge)
Data <- cbind(x_y_data,subject)
head(Data)
str(Data)
colnames(Data)
# Extracts only the measurements on the mean and standard deviation for each measurement.
mean_std_columns <- grep("std()|mean()",colnames(Data))
std_mean_data <- Data[mean_std_columns]
std_mean_data_names <- as.character(colnames(std_mean_data))
sData <- subset(Data, select = c(std_mean_data_names))
sData <- cbind(sData, Data$activityId, Data$Subject)
# Appropriately labels the data set with descriptive variable names.
colnames(sData)
# From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
activities <- read.table("UCI HAR Dataset/activity_labels.txt",header = FALSE)
colnames(sData)[names(sData) == 'Data$activityId'] <- 'activity'
colnames(sData)[names(sData) == 'Data$Subject'] <- 'subject'
colnames(activities) <- c("label", "activity")
colnames(sData)
colnames(sData)<-gsub("^t", "time", colnames(sData))
colnames(sData)<-gsub("^f", "frequency",colnames(sData))
colnames(sData)<-gsub("Acc", "accelerometer", colnames(sData))
colnames(sData)<-gsub("Gyro", "gyroscope", colnames(sData))
colnames(sData)<-gsub("Mag", "magnitude", colnames(sData))
colnames(sData)<-gsub("BodyBody", "body", colnames(sData))
colnames(sData)<-gsub("Body", "body", colnames(sData))
colnames(sData)
finalData <- aggregate(. ~subject + activity, sData, mean)
finalData <- finalData[order(finalData$subject, finalData$activity),]
write.table(finalData, file = "tidydata.txt", row.names=FALSE)
# The Github repo contains the required scripts.
# GitHub contains a code book that modifies and updates the available codebooks with the data to indicate all the variables and summaries calculated, along with units, and any other relevant information.
# The README that explains the analysis files is clear and understandable.
|
\name{g1se}
\alias{g1se}
\title{g1se statistic for standard error of skewness}
\description{A common statistic for standard error of skewness. Called by the function sktable}
\usage{g1se(x)
}
\arguments{
\item{x}{The variable of interest}}
\details{see Wright and Herrington (2011)}
\references{
Wright, D.B. & Harrington, J.A. (2011, actual in press at the moment).
Problematic standard errors and confidence intervals for skewness and
kurtosis. \emph{Behavior Research Methods}. www2.fiu.edu/~dwright/skewkurt
}
\author{Daniel B. Wright}
\note{While this can be called on its own, it was written to be used by sktable.}
\seealso{sktable}
\examples{
varx <- runif(20)^2
g1se(varx)
}
\keyword{skewness}
|
/man/g1se.Rd
|
no_license
|
cran/mrt
|
R
| false | false | 729 |
rd
|
\name{g1se}
\alias{g1se}
\title{g1se statistic for standard error of skewness}
\description{A common statistic for standard error of skewness. Called by the function sktable}
\usage{g1se(x)
}
\arguments{
\item{x}{The variable of interest}}
\details{see Wright and Herrington (2011)}
\references{
Wright, D.B. & Harrington, J.A. (2011, actual in press at the moment).
Problematic standard errors and confidence intervals for skewness and
kurtosis. \emph{Behavior Research Methods}. www2.fiu.edu/~dwright/skewkurt
}
\author{Daniel B. Wright}
\note{While this can be called on its own, it was written to be used by sktable.}
\seealso{sktable}
\examples{
varx <- runif(20)^2
g1se(varx)
}
\keyword{skewness}
|
context("fp for text - misc")
source("utils.R")
test_that("fp_text - print", {
fp <- fp_text(font.size = 10)
expect_output(print(fp))
})
test_that("fp_text - as.data.frame", {
fp <- fp_text(font.size = 10, color = "red", bold = TRUE, italic = TRUE, underlined = TRUE, font.family = "Arial", shading.color = "yellow", vertical.align = "superscript")
expect_is(as.data.frame(fp), class = "data.frame")
})
test_that("fp_text - update", {
fp <- fp_text(font.size = 10)
expect_equal(fp_sign( fp ), "b219bb0bdd7045575978f22781d0d77a" )
fp <- update(fp, font.size = 20)
expect_equal(fp$font.size, 20)
fp <- update(fp, color = "red")
expect_equal(fp$color, "red")
fp <- update(fp, font.family = "Time New Roman")
expect_equal(fp$font.family, "Time New Roman")
fp <- update(fp, vertical.align = "superscript")
expect_equal(fp$vertical.align, "superscript")
fp <- update(fp, shading.color = "yellow")
expect_equal(fp$shading.color, "yellow")
})
|
/tests/testthat/test-fp-text-misc.R
|
no_license
|
plot-and-scatter/officer
|
R
| false | false | 977 |
r
|
context("fp for text - misc")
source("utils.R")
test_that("fp_text - print", {
fp <- fp_text(font.size = 10)
expect_output(print(fp))
})
test_that("fp_text - as.data.frame", {
fp <- fp_text(font.size = 10, color = "red", bold = TRUE, italic = TRUE, underlined = TRUE, font.family = "Arial", shading.color = "yellow", vertical.align = "superscript")
expect_is(as.data.frame(fp), class = "data.frame")
})
test_that("fp_text - update", {
fp <- fp_text(font.size = 10)
expect_equal(fp_sign( fp ), "b219bb0bdd7045575978f22781d0d77a" )
fp <- update(fp, font.size = 20)
expect_equal(fp$font.size, 20)
fp <- update(fp, color = "red")
expect_equal(fp$color, "red")
fp <- update(fp, font.family = "Time New Roman")
expect_equal(fp$font.family, "Time New Roman")
fp <- update(fp, vertical.align = "superscript")
expect_equal(fp$vertical.align, "superscript")
fp <- update(fp, shading.color = "yellow")
expect_equal(fp$shading.color, "yellow")
})
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74846858576451e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615831465-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 2,048 |
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74846858576451e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
setwd("/groups/umcg-lld/scr01/dasha/MR/results/AA_T2D/")
source("/groups/umcg-lld/scr01/dasha/MR/results/run_MR.R")
out_filebase <- "MR_AA-lipids_results"
ao <- available_outcomes(access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
#lipids
t2d_ids <- c(ao[ao$subcategory == "Lipid" & ao$author == "Kettunen", "id"])
#intrinsic factors
#Creatinine, Glucose, HbA1c, Total cholesterol, HDL-C, LDL-C, TG, Citrullin, Haemoglobin, Insulin, Lymph%
#Leptin, adiponectin
#t2d_ids <- c("309", "850", "UKB-a:333", "1099", "1103", "1104", "1105", "18",
# "418", "422", "756", "757", "772", "773", "776", "777", "859",
# "758",
# "933", "301", "782",
# "299", "780",
# "300", "781",
# "934", "302", "783",
# "356",
# "270", "271", "272",
# "761", "762", "768", "774", "775", "778", "779", "767",
# "119", "125", "134", "139", "140",
# "1002", "1003",
# "1")
# Amino acids from Kettunen et al
#aa_ids <- c("840","850","860","866","873","897","919","938","939","940")
# all BCAA
aa_ids <- ao[ao$trait %in% c("Leucine", "Isoleucine", "Valine"), "id"]
res_table_forw <- data.frame()
res_table_rev <- data.frame()
for (aa_id in aa_ids){
for (t2d_id in t2d_ids){
# AA -> T2D
tryCatch({
exp_dat <- extract_instruments(outcomes = aa_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
out_dat <- extract_outcome_data(snps = exp_dat$SNP, outcomes = t2d_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
}, error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table_forw <- rbind(res_table_forw, res)
}
}
}
}
res_table_forw$BH_qval = NA
flt <- which(res_table_forw$nsnp > 2 & res_table_forw$egger_intercept_pval > 0.05 & res_table_forw$heterogeneity_Q_pval > 0.05)
res_table_forw[flt, "BH_qval"] <- p.adjust(res_table_forw[flt, "pval"], method = "BH")
write.table(res_table_forw, file = paste0(out_filebase, ".forward.txt") , sep = "\t", quote = F, col.names = NA)
for (aa_id in aa_ids){
for (t2d_id in t2d_ids){
# T2D -> AA
tryCatch({
exp_dat <- extract_instruments(outcomes = t2d_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
out_dat <- extract_outcome_data(snps = exp_dat$SNP, outcomes = aa_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
}, error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table_rev <- rbind(res_table_rev, res)
}
}
}
}
res_table_rev$BH_qval = NA
flt <- which(res_table_rev$nsnp > 2 & res_table_rev$egger_intercept_pval > 0.05 & res_table_rev$heterogeneity_Q_pval > 0.05)
res_table_rev[flt, "BH_qval"] <- p.adjust(res_table_rev[flt, "pval"], method = "BH")
write.table(res_table_rev, file = paste0(out_filebase, ".reverse.txt") , sep = "\t", quote = F, col.names = NA)
|
/MR/BCAA/run_MR_AA-pheno-AA.R
|
no_license
|
DashaZhernakova/umcg_scripts
|
R
| false | false | 2,989 |
r
|
setwd("/groups/umcg-lld/scr01/dasha/MR/results/AA_T2D/")
source("/groups/umcg-lld/scr01/dasha/MR/results/run_MR.R")
out_filebase <- "MR_AA-lipids_results"
ao <- available_outcomes(access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
#lipids
t2d_ids <- c(ao[ao$subcategory == "Lipid" & ao$author == "Kettunen", "id"])
#intrinsic factors
#Creatinine, Glucose, HbA1c, Total cholesterol, HDL-C, LDL-C, TG, Citrullin, Haemoglobin, Insulin, Lymph%
#Leptin, adiponectin
#t2d_ids <- c("309", "850", "UKB-a:333", "1099", "1103", "1104", "1105", "18",
# "418", "422", "756", "757", "772", "773", "776", "777", "859",
# "758",
# "933", "301", "782",
# "299", "780",
# "300", "781",
# "934", "302", "783",
# "356",
# "270", "271", "272",
# "761", "762", "768", "774", "775", "778", "779", "767",
# "119", "125", "134", "139", "140",
# "1002", "1003",
# "1")
# Amino acids from Kettunen et al
#aa_ids <- c("840","850","860","866","873","897","919","938","939","940")
# all BCAA
aa_ids <- ao[ao$trait %in% c("Leucine", "Isoleucine", "Valine"), "id"]
res_table_forw <- data.frame()
res_table_rev <- data.frame()
for (aa_id in aa_ids){
for (t2d_id in t2d_ids){
# AA -> T2D
tryCatch({
exp_dat <- extract_instruments(outcomes = aa_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
out_dat <- extract_outcome_data(snps = exp_dat$SNP, outcomes = t2d_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
}, error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table_forw <- rbind(res_table_forw, res)
}
}
}
}
res_table_forw$BH_qval = NA
flt <- which(res_table_forw$nsnp > 2 & res_table_forw$egger_intercept_pval > 0.05 & res_table_forw$heterogeneity_Q_pval > 0.05)
res_table_forw[flt, "BH_qval"] <- p.adjust(res_table_forw[flt, "pval"], method = "BH")
write.table(res_table_forw, file = paste0(out_filebase, ".forward.txt") , sep = "\t", quote = F, col.names = NA)
for (aa_id in aa_ids){
for (t2d_id in t2d_ids){
# T2D -> AA
tryCatch({
exp_dat <- extract_instruments(outcomes = t2d_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
out_dat <- extract_outcome_data(snps = exp_dat$SNP, outcomes = aa_id, access_token = "/groups/umcg-lld/scr02/dasha/MR/results/v2/mrbase.oauth")
}, error=function(e) NULL)
if (!is.null(out_dat) & !is.null(exp_dat)){
res <- run_mr(exp_dat, out_dat)
if (!is.null(res)){
res_table_rev <- rbind(res_table_rev, res)
}
}
}
}
res_table_rev$BH_qval = NA
flt <- which(res_table_rev$nsnp > 2 & res_table_rev$egger_intercept_pval > 0.05 & res_table_rev$heterogeneity_Q_pval > 0.05)
res_table_rev[flt, "BH_qval"] <- p.adjust(res_table_rev[flt, "pval"], method = "BH")
write.table(res_table_rev, file = paste0(out_filebase, ".reverse.txt") , sep = "\t", quote = F, col.names = NA)
|
#' Model-based Genomically Informed High-dimensional Predictor
#' of Microbial Community Metabolite Profiles
#'
#' Predict metabolites from new microbiome samples.
#'
#' @param metag Microbial sequence features' relative abundances (matrix)
#' for which prediction is desired. The sequence features' abundances are expected to
#' be normalized (i.e. proportional data ranging from 0 to 1.0).
#' @param weight.matrix The weight matrix to be used for prediction (optional).
#' If not provided, by default, a pre-trained weight matrix based on UniRef90 gene
#' families from the original MelonnPan paper (Mallick et al, 2019) will be used.
#' @param train.metag Quality-controlled training metagenomes against which
#' similarity is desired (optional). The sequence features' abundances are expected
#' to be normalized (i.e. proportional data ranging from 0.0 to 1.0).
#' If not provided, a pre-processed UniRef90 gene family training table from
#' the original MelonnPan paper (Mallick et al. 2019) will be used.
#' @param criticalpoint A numeric value corresponding to the significance level
#' to find the top PCs. If the significance level is 0.05, 0.01, 0.005, or 0.001,
#' the criticalpoint should be set to be 0.9793, 2.0234, 2.4224, or 3.2724,
#' accordingly. The default is 0.9793 (i.e. 0.05 significance level).
#' @param corr.method Method to correlate new metagenomes and training PCs.
#' Default is 'pearson'.
#' @param output Path to the file to write the output.
#' @keywords metabolite prediction, microbiome, metagenomics, elastic net, metabolomics
#' @export
melonnpan.predict<-function(
metag,
weight.matrix = NULL,
train.metag = NULL,
criticalpoint = 0.9793,
corr.method = 'pearson',
output){
#################################################
# Read in the input data (i.e. new metagenomes) #
#################################################
# if a character string then this is a file name, else it
# is a data frame
if (is.character(metag)){
test.metag <-data.frame(readTable(metag))
} else{
test.metag<-metag
}
# Sanity check for proportionality
if(any(test.metag<0)||any(test.metag>1))
stop("All measurements should be normalized to proportions.")
##################
# Calculate RTSI #
##################
# Load training metagenomes
if (is.null(train.metag)) train.metag<-melonnpan::melonnpan.training.data
# Subset to common IDs
commonID<-intersect(colnames(train.metag), colnames(test.metag))
# Throw error if no common IDs between training and test data
if(length(commonID)<1) stop('No common IDs found between training and test data. Execution halted!')
# Common features across datasets
train<-as.data.frame(train.metag[, commonID])
test<-as.data.frame(test.metag[, commonID])
# Remove binary features
ID<-which(colSums(test!=0)>1)
train<-train[, ID]
test<-test[,ID]
rowTrain<-rownames(train)
rowTest<-rownames(test)
# RIN transformation
train<-apply(train, 2, GenABEL::rntransform)
test<-apply(test, 2, GenABEL::rntransform)
rownames(train)<-rowTrain
rownames(test)<-rowTest
# Extract PCs
PCA <- prcomp(train)
# Select top PCs based on TW statistic
DD<-AssocTests::tw(eigenvalues = PCA$sdev, eigenL = length(PCA$sdev), criticalpoint = criticalpoint)
Loadings <- as.data.frame(PCA$rotation[,1:DD$SigntEigenL])
# Calculate pairwise correlation between PCs and samples
RTSI<-matrix(nrow=nrow(test), ncol=ncol(Loadings))
for (i in 1:nrow(test)){
y<-as.numeric(test[i,])
for (j in 1:ncol(Loadings)){
r<-as.numeric(Loadings[,j])
RTSI[i, j] <- cor(r, y, method = )
}
}
# Add structure to the output
rownames(RTSI)<-rowTest
RTSI_Score<-as.data.frame(apply(RTSI, 1, max))
colnames(RTSI_Score)<-'RTSI'
RTSI_Score<-tibble::rownames_to_column(RTSI_Score, 'ID')
write.table(RTSI_Score,
file = paste(output, 'MelonnPan_RTSI.txt', sep = ''),
row.names=F, col.names=T, quote=F, sep="\t")
#######################
# Predict metabolites #
#######################
# Remove binary features (so that RIN transformation is valid)
retainIDs<-which(colSums(test.metag!=0)>1)
new.metag<-test.metag[, retainIDs]
# Load weight matrix
if (is.null(weight.matrix)) weight.matrix<-melonnpan::melonnpan.trained.model
train.weight<-as.data.frame(t(weight.matrix))
# Subset by overlapping sequence features
# i.e. common in both weight matrix and input data (sequence features)
X<-new.metag[, intersect(colnames(train.weight), colnames(new.metag))]
X<-X[,which((nrow(X)-colSums(X==0))>1)]
intercept<-train.weight$Intercept
test.weight<-train.weight[, intersect(colnames(train.weight), colnames(X))]
new.weight<-cbind.data.frame('Intercept' = intercept, test.weight)
compound_names<-rownames(new.weight)
# Apply Rank-based Inverse Normal (RIN) transformation
transf.X<-apply(X, 2, GenABEL::rntransform)
new.X<-cbind('Intercept'=rep(1, nrow(transf.X)), transf.X)
# Carry out prediction in new samples and back-transform
new.X<-as.matrix(new.X)
new.weight<-apply(as.matrix.noquote(new.weight),2,as.numeric)
pred<-new.X%*%t(new.weight)
pred<-apply(pred, 2, SqSin)
# Add structure to the output
rownames(pred)<-rownames(new.metag)
colnames(pred)<-compound_names
pred<-as.data.frame(pred)
pred<-tibble::rownames_to_column(pred, 'ID')
write.table(pred,
file = paste(output, 'MelonnPan_Predicted_Metabolites.txt', sep = ''),
row.names=F, col.names=T, quote=F, sep="\t")
# Return
return(list(pred = pred, RTSI = RTSI_Score))
}
|
/R/melonnpan_predict.R
|
permissive
|
hshiroma/melonnpan
|
R
| false | false | 5,738 |
r
|
#' Model-based Genomically Informed High-dimensional Predictor
#' of Microbial Community Metabolite Profiles
#'
#' Predict metabolites from new microbiome samples.
#'
#' @param metag Microbial sequence features' relative abundances (matrix)
#' for which prediction is desired. The sequence features' abundances are expected to
#' be normalized (i.e. proportional data ranging from 0 to 1.0).
#' @param weight.matrix The weight matrix to be used for prediction (optional).
#' If not provided, by default, a pre-trained weight matrix based on UniRef90 gene
#' families from the original MelonnPan paper (Mallick et al, 2019) will be used.
#' @param train.metag Quality-controlled training metagenomes against which
#' similarity is desired (optional). The sequence features' abundances are expected
#' to be normalized (i.e. proportional data ranging from 0.0 to 1.0).
#' If not provided, a pre-processed UniRef90 gene family training table from
#' the original MelonnPan paper (Mallick et al. 2019) will be used.
#' @param criticalpoint A numeric value corresponding to the significance level
#' to find the top PCs. If the significance level is 0.05, 0.01, 0.005, or 0.001,
#' the criticalpoint should be set to be 0.9793, 2.0234, 2.4224, or 3.2724,
#' accordingly. The default is 0.9793 (i.e. 0.05 significance level).
#' @param corr.method Method to correlate new metagenomes and training PCs.
#' Default is 'pearson'.
#' @param output Path to the file to write the output.
#' @keywords metabolite prediction, microbiome, metagenomics, elastic net, metabolomics
#' @export
melonnpan.predict<-function(
metag,
weight.matrix = NULL,
train.metag = NULL,
criticalpoint = 0.9793,
corr.method = 'pearson',
output){
#################################################
# Read in the input data (i.e. new metagenomes) #
#################################################
# if a character string then this is a file name, else it
# is a data frame
if (is.character(metag)){
test.metag <-data.frame(readTable(metag))
} else{
test.metag<-metag
}
# Sanity check for proportionality
if(any(test.metag<0)||any(test.metag>1))
stop("All measurements should be normalized to proportions.")
##################
# Calculate RTSI #
##################
# Load training metagenomes
if (is.null(train.metag)) train.metag<-melonnpan::melonnpan.training.data
# Subset to common IDs
commonID<-intersect(colnames(train.metag), colnames(test.metag))
# Throw error if no common IDs between training and test data
if(length(commonID)<1) stop('No common IDs found between training and test data. Execution halted!')
# Common features across datasets
train<-as.data.frame(train.metag[, commonID])
test<-as.data.frame(test.metag[, commonID])
# Remove binary features
ID<-which(colSums(test!=0)>1)
train<-train[, ID]
test<-test[,ID]
rowTrain<-rownames(train)
rowTest<-rownames(test)
# RIN transformation
train<-apply(train, 2, GenABEL::rntransform)
test<-apply(test, 2, GenABEL::rntransform)
rownames(train)<-rowTrain
rownames(test)<-rowTest
# Extract PCs
PCA <- prcomp(train)
# Select top PCs based on TW statistic
DD<-AssocTests::tw(eigenvalues = PCA$sdev, eigenL = length(PCA$sdev), criticalpoint = criticalpoint)
Loadings <- as.data.frame(PCA$rotation[,1:DD$SigntEigenL])
# Calculate pairwise correlation between PCs and samples
RTSI<-matrix(nrow=nrow(test), ncol=ncol(Loadings))
for (i in 1:nrow(test)){
y<-as.numeric(test[i,])
for (j in 1:ncol(Loadings)){
r<-as.numeric(Loadings[,j])
RTSI[i, j] <- cor(r, y, method = )
}
}
# Add structure to the output
rownames(RTSI)<-rowTest
RTSI_Score<-as.data.frame(apply(RTSI, 1, max))
colnames(RTSI_Score)<-'RTSI'
RTSI_Score<-tibble::rownames_to_column(RTSI_Score, 'ID')
write.table(RTSI_Score,
file = paste(output, 'MelonnPan_RTSI.txt', sep = ''),
row.names=F, col.names=T, quote=F, sep="\t")
#######################
# Predict metabolites #
#######################
# Remove binary features (so that RIN transformation is valid)
retainIDs<-which(colSums(test.metag!=0)>1)
new.metag<-test.metag[, retainIDs]
# Load weight matrix
if (is.null(weight.matrix)) weight.matrix<-melonnpan::melonnpan.trained.model
train.weight<-as.data.frame(t(weight.matrix))
# Subset by overlapping sequence features
# i.e. common in both weight matrix and input data (sequence features)
X<-new.metag[, intersect(colnames(train.weight), colnames(new.metag))]
X<-X[,which((nrow(X)-colSums(X==0))>1)]
intercept<-train.weight$Intercept
test.weight<-train.weight[, intersect(colnames(train.weight), colnames(X))]
new.weight<-cbind.data.frame('Intercept' = intercept, test.weight)
compound_names<-rownames(new.weight)
# Apply Rank-based Inverse Normal (RIN) transformation
transf.X<-apply(X, 2, GenABEL::rntransform)
new.X<-cbind('Intercept'=rep(1, nrow(transf.X)), transf.X)
# Carry out prediction in new samples and back-transform
new.X<-as.matrix(new.X)
new.weight<-apply(as.matrix.noquote(new.weight),2,as.numeric)
pred<-new.X%*%t(new.weight)
pred<-apply(pred, 2, SqSin)
# Add structure to the output
rownames(pred)<-rownames(new.metag)
colnames(pred)<-compound_names
pred<-as.data.frame(pred)
pred<-tibble::rownames_to_column(pred, 'ID')
write.table(pred,
file = paste(output, 'MelonnPan_Predicted_Metabolites.txt', sep = ''),
row.names=F, col.names=T, quote=F, sep="\t")
# Return
return(list(pred = pred, RTSI = RTSI_Score))
}
|
library(GCalignR)
### Name: simple_chroma
### Title: Simulate simple chromatograms
### Aliases: simple_chroma
### ** Examples
## create a chromatogram
x <- simple_chroma(peaks = c(5,10,15), N = 1, min = 0, max = 30, Names = "MyChroma")
## plot chromatogram
with(x, plot(x,y, xlab = "time", ylab = "intensity"))
|
/data/genthat_extracted_code/GCalignR/examples/simple_chroma.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 319 |
r
|
library(GCalignR)
### Name: simple_chroma
### Title: Simulate simple chromatograms
### Aliases: simple_chroma
### ** Examples
## create a chromatogram
x <- simple_chroma(peaks = c(5,10,15), N = 1, min = 0, max = 30, Names = "MyChroma")
## plot chromatogram
with(x, plot(x,y, xlab = "time", ylab = "intensity"))
|
## ... your simualtion codek
##the founder's haplotypes
#import the haplotypes generated by cosi
haplotype <- read.table("out_100k_10k_1kb.hap-1", header=F)
colnames(haplotype) <- c("HAP", "CHROM", paste("SNP", 1:(ncol(haplotype)-2), sep=""))
snp <-read.table("out_100k_10k_1kb.pos-1", header=T)
#make allele 1 is the minor allele, 2 is the common allele
temp.idx <- snp$FREQ1 > snp$FREQ2
temp.freq <- snp$FREQ2
snp$FREQ2[temp.idx] <- snp$FREQ1[temp.idx]
snp$FREQ1[temp.idx] <- temp.freq[temp.idx]
#also change the genotype file
haplotype[,which(temp.idx==T)+2] <- 3 - haplotype[,which(temp.idx==T)+2]
#allele frequency
nrow(snp) #total number of snp
sum(snp$FREQ1 < 0.05) # number of snp with f < 0.05
sum(snp$FREQ1 < 0.01) # number of snp with f < 0.01
sum(snp$FREQ1 == 0.0001) # number of singletons
##assign risk variants and the corresponding effect size (proportional to allele frequency)
# null <- FALSE
n_haplo <- 10000
n_snp <- ncol(haplotype)-2
prevalence <- p_dis
b0_sqrt <- sqrt(prevalence) #baseline
#set up causual SNPs
#generate risk haplotypes
# risk.variant.id <- c(3, 8,19,21,23,27,44,47,49,50)
risk.variant.id <- c(risk.variant) #2 for common 7 for rare 39 for super rare
risk.haplo.id <- which(apply(2-as.matrix(haplotype[, risk.variant.id+2]), 1, sum)>0)
(risk.haplo.f <- mean(apply(2-as.matrix(haplotype[, risk.variant.id+2]), 1, sum)>0)) #carrier haplotype frequency
haplotype.risk <- rep(1, length=nrow(haplotype))
#assign mean relative risk calculate the haplotype variants p(A|h)
haplotype.risk[risk.haplo.id] <- r
mean(haplotype.risk[risk.haplo.id]) #mean relative risk
haplotype.risk <<- haplotype.risk*b0_sqrt
##gene drop simulation for two generations
family_strct.2g3c <- data.frame(family=c(1,1,1,1,1), person=c(1,2,3,4,5), father=c(0,0,1,1,1),
mother=c(0,0,2,2,2), sex=c(1,2,1,1,1), affect=c(1,2,2,2,2)) #1=male, 2=female, 1=unaffected, 2=affected
rep.idx <<- 1
sim_result <- replicate(n_rep, {
print(rep.idx)
rep.idx <<- rep.idx + 1
family_generated_2g3c <<- gene_family(family_strct=family_strct.2g3c, n=n_family, haplotype.risk=haplotype.risk)
#remove the founder from the data
family_generated_3c <- family_generated_2g3c
family_generated_founder <- list()
temp.idx <- which(family_generated_2g3c$data_family$person %in% 1:2) #take out founders
family_generated_founder$data_family <- family_generated_2g3c$data_family[temp.idx, ]
family_generated_founder$tran_vec <- family_generated_2g3c$tran_vec[temp.idx, ]
family_generated_3c$data_family <- family_generated_2g3c$data_family[-temp.idx, ]
family_generated_3c$tran_vec <- family_generated_2g3c$tran_vec[-temp.idx, ]
data <- family_generated_3c
f <- risk.variant
n_family <- max(data$data_family$family)
n_family_member <- table(data$data_family$family)
#check if founder's haplotype carries any variant's with f < 0.1
if(length(f)==1 & f[1] <1) { #support allele frequency or list of snps
snp2look.idx <- which(snp$FREQ1 < f) # snp to look for
} else(snp2look.idx <- f)
#disguitish those siblings with four founder's haplotype
data.founder <- family_generated_2g3c
n_family_member.w.founder <- n_family_member + 2
#calculate the allele frequency
founder.list <- list() #to store if the haplotype carrying a risk variant on multiple affected
carrier.count.2 <- 0
chromosome.count.2 <- 0
carrier.count.2.mis <- 0
chromosome.count.2.mis <- 0
carrier.count.2.obs <- 0
chromosome.count.2.obs <- 0
carrier.count.3 <- 0
chromosome.count.3 <- 0
carrier.count.3.mis <- 0
chromosome.count.3.mis <- 0
carrier.count.3.obs <- 0
chromosome.count.3.obs <- 0
carrier.count.4 <- 0
chromosome.count.4 <- 0
haplo.unique.list <- list()
for(family.idx in 1:n_family) {
current_row=sum(n_family_member.w.founder[1:family.idx]) - n_family_member.w.founder[family.idx]
#assign a suitable transmission, not trivial to code up the algorithm assume is known for now
tran_vec <- data.founder$tran_vec[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),]
##tally the (un)ambiguous carrier haplotype
h1 <- data.founder$data_family[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),7:(6+n_snp)] #the first haplotype
h2 <- data.founder$data_family[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),-c(1:(6+n_snp))] #the second haplotype
#observed allele count for each individual
carrier.founder <- c(0,0,0,0)
carrier.founder.mis <- c(0,0,0,0)
carrier.founder.obs <- c(0,0,0,0)
carrier.offspring <- c(0,0,0,0)
for(a in 3:n_family_member.w.founder[family.idx]) { #offsprings
idx.h1 <- match(tran_vec[a,"h1"], c("A", "B", "C", "D"))
if(h1[a, snp2look.idx]==1) carrier.offspring[idx.h1] <- carrier.offspring[idx.h1] + 1
idx.h2 <- match(tran_vec[a,"h2"], c("A", "B", "C", "D"))
if(h2[a, snp2look.idx]==1) carrier.offspring[idx.h2] <- carrier.offspring[idx.h2] + 1
}
#the number of each unique haplotype occured on affected offsprings
haplo.unique <- unique(as.vector(as.matrix(tran_vec[3:n_family_member.w.founder[family.idx] ,c("h1","h2")])))
haplo.unique.count <- length(haplo.unique)
haplo.unique.list[[family.idx]] <- length(haplo.unique)
haplo.mis <- which(is.na(match(c("A", "B", "C", "D"), haplo.unique)))
haplo.obs <- which(!is.na(match(c("A", "B", "C", "D"), haplo.unique)))
for(a in 1:2) { #founder's
idx.h1 <- match(tran_vec[a,"h1"], c("A", "B", "C", "D"))
if(h1[a, snp2look.idx]==1) carrier.founder[idx.h1] <- carrier.founder[idx.h1] + 1
idx.h2 <- match(tran_vec[a,"h2"], c("A", "B", "C", "D"))
if(h2[a, snp2look.idx]==1) carrier.founder[idx.h2] <- carrier.founder[idx.h2] + 1
if(idx.h1 %in% haplo.mis & h1[a, snp2look.idx]==1) carrier.founder.mis[idx.h1] <- carrier.founder.mis[idx.h1] + 1
if(idx.h2 %in% haplo.mis & h2[a, snp2look.idx]==1) carrier.founder.mis[idx.h2] <- carrier.founder.mis[idx.h2] + 1
if(idx.h1 %in% haplo.obs & h1[a, snp2look.idx]==1) carrier.founder.obs[idx.h1] <- carrier.founder.obs[idx.h1] + 1
if(idx.h2 %in% haplo.obs & h2[a, snp2look.idx]==1) carrier.founder.obs[idx.h2] <- carrier.founder.obs[idx.h2] + 1
}
#observed founder carrier based on offsprings
founder.list[[family.idx]] <- c(observed=sum(carrier.offspring), carrier=sum(carrier.offspring>0), haplo.unique=haplo.unique)
#steps to calculate true founder allele ferquency by the number of observed founder chromosome
if(haplo.unique.count==2) {
carrier.count.2 <- carrier.count.2 + sum(carrier.founder>=1)
chromosome.count.2 <- chromosome.count.2 + 4
carrier.count.2.mis <- carrier.count.2.mis + sum(carrier.founder.mis>=1)
chromosome.count.2.mis <- chromosome.count.2.mis + 2
carrier.count.2.obs <- carrier.count.2.obs + sum(carrier.founder.obs>=1)
chromosome.count.2.obs <- chromosome.count.2.obs + 2
}
if(haplo.unique.count==3) {
carrier.count.3 <- carrier.count.3 + sum(carrier.founder>=1)
chromosome.count.3 <- chromosome.count.3 + 4
carrier.count.3.mis <- carrier.count.3.mis + sum(carrier.founder.mis>=1)
chromosome.count.3.mis <- chromosome.count.3.mis + 1
carrier.count.3.obs <- carrier.count.3.obs + sum(carrier.founder.obs>=1)
chromosome.count.3.obs <- chromosome.count.3.obs + 3 }
if(haplo.unique.count==4) {
carrier.count.4 <- carrier.count.4 + sum(carrier.founder>=1)
chromosome.count.4 <- chromosome.count.4 + 4
}
}
#the allele frequency
(founder.freq.offspring.2 <- carrier.count.2/chromosome.count.2)
(founder.freq.offspring.2.mis <- carrier.count.2.mis/chromosome.count.2.mis)
(founder.freq.offspring.2.obs <- carrier.count.2.obs/chromosome.count.2.obs)
(founder.freq.offspring.3 <- carrier.count.3/chromosome.count.3)
(founder.freq.offspring.3.mis <- carrier.count.3.mis/chromosome.count.3.mis)
(founder.freq.offspring.3.obs <- carrier.count.3.obs/chromosome.count.3.obs)
(founder.freq.offspring.4 <- carrier.count.4/chromosome.count.4)
#only report p.value
c(founder.freq.offspring.2, founder.freq.offspring.2.mis, founder.freq.offspring.2.obs,
founder.freq.offspring.3, founder.freq.offspring.3.mis, founder.freq.offspring.3.obs,
founder.freq.offspring.4)
})
## Write out your results to a csv file
result.df <- as.data.frame(t(sim_result))
colnames(result.df) <- c("founder.freq.offspring.2", "founder.freq.offspring.2.mis", "founder.freq.offspring.2.obs",
"founder.freq.offspring.3", "founder.freq.offspring.3.mis", "founder.freq.offspring.3.obs",
"founder.freq.offspring.4")
result.df <- cbind(seed,r,p_dis,risk.variant,risk.haplo.f,n_family,result.df)
write.csv(result.df, paste("res_",r,"_",n_family,"_",seed,".csv",sep=""), row.names=FALSE)
## R.miSniam
## End(Not run)
## Not run:
##commands to run jobs in parallel
##passed in from the command prompt.
parseCommandArgs()
## Will disply the default values on the first run,
##initialize
seed=1000
#number of replications
n_rep=34
#number of family
n_family=1000
#prevalence
p_dis=0.3
# print(null) #null=FALSE
#family structure
# print(family_strct) #family_strct='family_strct.2g3c'
print(getwd())
##command line
parallel <- function(...) {
names.args <- names(list(...))
value.args <- as.vector(list(...))
##commands to run
for(i in 1:3) {
rfile="mainSim.R"
cmd <- paste("R --vanilla --args seed ", seed, " ", paste(names.args, value.args, collapse=" "),
" < ", rfile, " > ", "mainSim_", paste(value.args, collapse="_"), ".Rout", seed, " 2>&1", sep="")
print(cmd)
writeLines(cmd, fileConn) #write jobs to text
#add seed number
seed<<-seed+1
}
}
##clean-up & initialization
system("rm *.csv")
system("rm *.Rout*")
system("rm *.out*")
fileConn<-file("jobs.txt", "w")
##create your jobs here
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=2) #common
}
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=7) #rare
}
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=39) #super rare
}
###################################################
#check power using true, imputed founder carrier, minimum offspring carrier
#three versions -- command and rare and super rare #2 for common, 7 for rare, 39 for super rare
result <- read.csv("2015-06-15 imputation framework for TRAP to fix common variant see allele freqency on mis and obs.csv", header=T)
result <- result %>% melt(id.vars=c("seed", "r", "p_dis","risk.variant","risk.haplo.f","n_family"), variable.name="method", value.name="p.value")
result <- mutate(result, transmit=ifelse(grepl("obs", method), T, F))
result <- mutate(result, n_transmit=ifelse(grepl("2", method), "2","3"))
result.plot <- result %>% group_by(risk.variant, r, method, transmit, n_transmit) %>%
summarise(n=n(), maf=mean(p.value))
#TRAP
pd <- position_dodge(0.0)
filter(result.plot, risk.variant==39 & grepl("mis|obs", method)) %>%
ggplot(aes(x=r, y=maf, group=method, col=n_transmit, lty=transmit)) +
# geom_point(size=3, alpha=1) +
geom_line(size=1.2, alpha=0.7, position=pd) +
geom_point(size=1.2, position=pd) +
# ggtitle("f=0.202, consider effect size of risk haplotypes, TRAP") +
# ggtitle("f=0.0178, consider effect size of risk haplotypes, TRAP") +
ggtitle("f=0.0039, consider effect size of risk haplotypes, TRAP") +
labs(x="relative risk r") +
scale_y_continuous(limits=c(0,.1)) +
theme_gray(base_size = 20)
|
/2015-06-15 imputation framework for TRAP to fix common variant see allele freqency on mis and obs.r
|
no_license
|
gtlntw/p3_TRAP
|
R
| false | false | 11,961 |
r
|
## ... your simualtion codek
##the founder's haplotypes
#import the haplotypes generated by cosi
haplotype <- read.table("out_100k_10k_1kb.hap-1", header=F)
colnames(haplotype) <- c("HAP", "CHROM", paste("SNP", 1:(ncol(haplotype)-2), sep=""))
snp <-read.table("out_100k_10k_1kb.pos-1", header=T)
#make allele 1 is the minor allele, 2 is the common allele
temp.idx <- snp$FREQ1 > snp$FREQ2
temp.freq <- snp$FREQ2
snp$FREQ2[temp.idx] <- snp$FREQ1[temp.idx]
snp$FREQ1[temp.idx] <- temp.freq[temp.idx]
#also change the genotype file
haplotype[,which(temp.idx==T)+2] <- 3 - haplotype[,which(temp.idx==T)+2]
#allele frequency
nrow(snp) #total number of snp
sum(snp$FREQ1 < 0.05) # number of snp with f < 0.05
sum(snp$FREQ1 < 0.01) # number of snp with f < 0.01
sum(snp$FREQ1 == 0.0001) # number of singletons
##assign risk variants and the corresponding effect size (proportional to allele frequency)
# null <- FALSE
n_haplo <- 10000
n_snp <- ncol(haplotype)-2
prevalence <- p_dis
b0_sqrt <- sqrt(prevalence) #baseline
#set up causual SNPs
#generate risk haplotypes
# risk.variant.id <- c(3, 8,19,21,23,27,44,47,49,50)
risk.variant.id <- c(risk.variant) #2 for common 7 for rare 39 for super rare
risk.haplo.id <- which(apply(2-as.matrix(haplotype[, risk.variant.id+2]), 1, sum)>0)
(risk.haplo.f <- mean(apply(2-as.matrix(haplotype[, risk.variant.id+2]), 1, sum)>0)) #carrier haplotype frequency
haplotype.risk <- rep(1, length=nrow(haplotype))
#assign mean relative risk calculate the haplotype variants p(A|h)
haplotype.risk[risk.haplo.id] <- r
mean(haplotype.risk[risk.haplo.id]) #mean relative risk
haplotype.risk <<- haplotype.risk*b0_sqrt
##gene drop simulation for two generations
family_strct.2g3c <- data.frame(family=c(1,1,1,1,1), person=c(1,2,3,4,5), father=c(0,0,1,1,1),
mother=c(0,0,2,2,2), sex=c(1,2,1,1,1), affect=c(1,2,2,2,2)) #1=male, 2=female, 1=unaffected, 2=affected
rep.idx <<- 1
sim_result <- replicate(n_rep, {
print(rep.idx)
rep.idx <<- rep.idx + 1
family_generated_2g3c <<- gene_family(family_strct=family_strct.2g3c, n=n_family, haplotype.risk=haplotype.risk)
#remove the founder from the data
family_generated_3c <- family_generated_2g3c
family_generated_founder <- list()
temp.idx <- which(family_generated_2g3c$data_family$person %in% 1:2) #take out founders
family_generated_founder$data_family <- family_generated_2g3c$data_family[temp.idx, ]
family_generated_founder$tran_vec <- family_generated_2g3c$tran_vec[temp.idx, ]
family_generated_3c$data_family <- family_generated_2g3c$data_family[-temp.idx, ]
family_generated_3c$tran_vec <- family_generated_2g3c$tran_vec[-temp.idx, ]
data <- family_generated_3c
f <- risk.variant
n_family <- max(data$data_family$family)
n_family_member <- table(data$data_family$family)
#check if founder's haplotype carries any variant's with f < 0.1
if(length(f)==1 & f[1] <1) { #support allele frequency or list of snps
snp2look.idx <- which(snp$FREQ1 < f) # snp to look for
} else(snp2look.idx <- f)
#disguitish those siblings with four founder's haplotype
data.founder <- family_generated_2g3c
n_family_member.w.founder <- n_family_member + 2
#calculate the allele frequency
founder.list <- list() #to store if the haplotype carrying a risk variant on multiple affected
carrier.count.2 <- 0
chromosome.count.2 <- 0
carrier.count.2.mis <- 0
chromosome.count.2.mis <- 0
carrier.count.2.obs <- 0
chromosome.count.2.obs <- 0
carrier.count.3 <- 0
chromosome.count.3 <- 0
carrier.count.3.mis <- 0
chromosome.count.3.mis <- 0
carrier.count.3.obs <- 0
chromosome.count.3.obs <- 0
carrier.count.4 <- 0
chromosome.count.4 <- 0
haplo.unique.list <- list()
for(family.idx in 1:n_family) {
current_row=sum(n_family_member.w.founder[1:family.idx]) - n_family_member.w.founder[family.idx]
#assign a suitable transmission, not trivial to code up the algorithm assume is known for now
tran_vec <- data.founder$tran_vec[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),]
##tally the (un)ambiguous carrier haplotype
h1 <- data.founder$data_family[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),7:(6+n_snp)] #the first haplotype
h2 <- data.founder$data_family[(current_row+1):(current_row+n_family_member.w.founder[family.idx]),-c(1:(6+n_snp))] #the second haplotype
#observed allele count for each individual
carrier.founder <- c(0,0,0,0)
carrier.founder.mis <- c(0,0,0,0)
carrier.founder.obs <- c(0,0,0,0)
carrier.offspring <- c(0,0,0,0)
for(a in 3:n_family_member.w.founder[family.idx]) { #offsprings
idx.h1 <- match(tran_vec[a,"h1"], c("A", "B", "C", "D"))
if(h1[a, snp2look.idx]==1) carrier.offspring[idx.h1] <- carrier.offspring[idx.h1] + 1
idx.h2 <- match(tran_vec[a,"h2"], c("A", "B", "C", "D"))
if(h2[a, snp2look.idx]==1) carrier.offspring[idx.h2] <- carrier.offspring[idx.h2] + 1
}
#the number of each unique haplotype occured on affected offsprings
haplo.unique <- unique(as.vector(as.matrix(tran_vec[3:n_family_member.w.founder[family.idx] ,c("h1","h2")])))
haplo.unique.count <- length(haplo.unique)
haplo.unique.list[[family.idx]] <- length(haplo.unique)
haplo.mis <- which(is.na(match(c("A", "B", "C", "D"), haplo.unique)))
haplo.obs <- which(!is.na(match(c("A", "B", "C", "D"), haplo.unique)))
for(a in 1:2) { #founder's
idx.h1 <- match(tran_vec[a,"h1"], c("A", "B", "C", "D"))
if(h1[a, snp2look.idx]==1) carrier.founder[idx.h1] <- carrier.founder[idx.h1] + 1
idx.h2 <- match(tran_vec[a,"h2"], c("A", "B", "C", "D"))
if(h2[a, snp2look.idx]==1) carrier.founder[idx.h2] <- carrier.founder[idx.h2] + 1
if(idx.h1 %in% haplo.mis & h1[a, snp2look.idx]==1) carrier.founder.mis[idx.h1] <- carrier.founder.mis[idx.h1] + 1
if(idx.h2 %in% haplo.mis & h2[a, snp2look.idx]==1) carrier.founder.mis[idx.h2] <- carrier.founder.mis[idx.h2] + 1
if(idx.h1 %in% haplo.obs & h1[a, snp2look.idx]==1) carrier.founder.obs[idx.h1] <- carrier.founder.obs[idx.h1] + 1
if(idx.h2 %in% haplo.obs & h2[a, snp2look.idx]==1) carrier.founder.obs[idx.h2] <- carrier.founder.obs[idx.h2] + 1
}
#observed founder carrier based on offsprings
founder.list[[family.idx]] <- c(observed=sum(carrier.offspring), carrier=sum(carrier.offspring>0), haplo.unique=haplo.unique)
#steps to calculate true founder allele ferquency by the number of observed founder chromosome
if(haplo.unique.count==2) {
carrier.count.2 <- carrier.count.2 + sum(carrier.founder>=1)
chromosome.count.2 <- chromosome.count.2 + 4
carrier.count.2.mis <- carrier.count.2.mis + sum(carrier.founder.mis>=1)
chromosome.count.2.mis <- chromosome.count.2.mis + 2
carrier.count.2.obs <- carrier.count.2.obs + sum(carrier.founder.obs>=1)
chromosome.count.2.obs <- chromosome.count.2.obs + 2
}
if(haplo.unique.count==3) {
carrier.count.3 <- carrier.count.3 + sum(carrier.founder>=1)
chromosome.count.3 <- chromosome.count.3 + 4
carrier.count.3.mis <- carrier.count.3.mis + sum(carrier.founder.mis>=1)
chromosome.count.3.mis <- chromosome.count.3.mis + 1
carrier.count.3.obs <- carrier.count.3.obs + sum(carrier.founder.obs>=1)
chromosome.count.3.obs <- chromosome.count.3.obs + 3 }
if(haplo.unique.count==4) {
carrier.count.4 <- carrier.count.4 + sum(carrier.founder>=1)
chromosome.count.4 <- chromosome.count.4 + 4
}
}
#the allele frequency
(founder.freq.offspring.2 <- carrier.count.2/chromosome.count.2)
(founder.freq.offspring.2.mis <- carrier.count.2.mis/chromosome.count.2.mis)
(founder.freq.offspring.2.obs <- carrier.count.2.obs/chromosome.count.2.obs)
(founder.freq.offspring.3 <- carrier.count.3/chromosome.count.3)
(founder.freq.offspring.3.mis <- carrier.count.3.mis/chromosome.count.3.mis)
(founder.freq.offspring.3.obs <- carrier.count.3.obs/chromosome.count.3.obs)
(founder.freq.offspring.4 <- carrier.count.4/chromosome.count.4)
#only report p.value
c(founder.freq.offspring.2, founder.freq.offspring.2.mis, founder.freq.offspring.2.obs,
founder.freq.offspring.3, founder.freq.offspring.3.mis, founder.freq.offspring.3.obs,
founder.freq.offspring.4)
})
## Write out your results to a csv file
result.df <- as.data.frame(t(sim_result))
colnames(result.df) <- c("founder.freq.offspring.2", "founder.freq.offspring.2.mis", "founder.freq.offspring.2.obs",
"founder.freq.offspring.3", "founder.freq.offspring.3.mis", "founder.freq.offspring.3.obs",
"founder.freq.offspring.4")
result.df <- cbind(seed,r,p_dis,risk.variant,risk.haplo.f,n_family,result.df)
write.csv(result.df, paste("res_",r,"_",n_family,"_",seed,".csv",sep=""), row.names=FALSE)
## R.miSniam
## End(Not run)
## Not run:
##commands to run jobs in parallel
##passed in from the command prompt.
parseCommandArgs()
## Will disply the default values on the first run,
##initialize
seed=1000
#number of replications
n_rep=34
#number of family
n_family=1000
#prevalence
p_dis=0.3
# print(null) #null=FALSE
#family structure
# print(family_strct) #family_strct='family_strct.2g3c'
print(getwd())
##command line
parallel <- function(...) {
names.args <- names(list(...))
value.args <- as.vector(list(...))
##commands to run
for(i in 1:3) {
rfile="mainSim.R"
cmd <- paste("R --vanilla --args seed ", seed, " ", paste(names.args, value.args, collapse=" "),
" < ", rfile, " > ", "mainSim_", paste(value.args, collapse="_"), ".Rout", seed, " 2>&1", sep="")
print(cmd)
writeLines(cmd, fileConn) #write jobs to text
#add seed number
seed<<-seed+1
}
}
##clean-up & initialization
system("rm *.csv")
system("rm *.Rout*")
system("rm *.out*")
fileConn<-file("jobs.txt", "w")
##create your jobs here
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=2) #common
}
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=7) #rare
}
for(i in seq(1,2, length.out = 5)) {
parallel(n_rep=n_rep, r=i, n_family=n_family, p_dis=p_dis, risk.variant=39) #super rare
}
###################################################
#check power using true, imputed founder carrier, minimum offspring carrier
#three versions -- command and rare and super rare #2 for common, 7 for rare, 39 for super rare
result <- read.csv("2015-06-15 imputation framework for TRAP to fix common variant see allele freqency on mis and obs.csv", header=T)
result <- result %>% melt(id.vars=c("seed", "r", "p_dis","risk.variant","risk.haplo.f","n_family"), variable.name="method", value.name="p.value")
result <- mutate(result, transmit=ifelse(grepl("obs", method), T, F))
result <- mutate(result, n_transmit=ifelse(grepl("2", method), "2","3"))
result.plot <- result %>% group_by(risk.variant, r, method, transmit, n_transmit) %>%
summarise(n=n(), maf=mean(p.value))
#TRAP
pd <- position_dodge(0.0)
filter(result.plot, risk.variant==39 & grepl("mis|obs", method)) %>%
ggplot(aes(x=r, y=maf, group=method, col=n_transmit, lty=transmit)) +
# geom_point(size=3, alpha=1) +
geom_line(size=1.2, alpha=0.7, position=pd) +
geom_point(size=1.2, position=pd) +
# ggtitle("f=0.202, consider effect size of risk haplotypes, TRAP") +
# ggtitle("f=0.0178, consider effect size of risk haplotypes, TRAP") +
ggtitle("f=0.0039, consider effect size of risk haplotypes, TRAP") +
labs(x="relative risk r") +
scale_y_continuous(limits=c(0,.1)) +
theme_gray(base_size = 20)
|
############################################
#title: "Match Market Selection - YTP JP Q319"
#author: "Kenneth Koh & Hui Xiang Chua"
#date: "June 17, 2019"
#--------------------------------------------
#Objective: To maximize comparability of Match Market a standardized framework in which we evaluate causal effects using geographically segregated control holdout
#Workflow Documentation:
#Market Selection:
#1. From Time-series Data and a single KPI, we select candidates for control holdout based on the following priorities
#- Top 30 percentile lowest dtw distance scores across all geos considered
#- Correlation of >90%
#- Correlation with Population >50%
#2. Within these constraints, we select 2 or more exposed markets with the highest number of shared control holdouts possible (recommended 5-20 control regions, but minimally 3 control markets in countries where control regions may be limited)
#Control Market Validation:
#1. To safeguard against false positive from control selection error - Segment the pre-period data into a 2 : 1 ratio and run Causal Impact at 90% significance - there should be no significant lift
#Inputs requirements:
#1. Daily time-series data for KPI variable/conversions (segmented by geo)
#- e.g. Day, Region, Signups
#2. Minimum 90 day period for pre-period data
#3. Post-period analysed should minimally follow a 1:2 ratio with pre-period data input
#Variables that require user inputs have been commented with ###user input. Ctrl+F to see.
###########################################
###user input
setwd("C:/Users/charles.tan/Documents/GitHub/Essence_stuff/Q120 YTP") #set working directory
###
#import libaries
libs <- c("zoo",
"dtw",
"MarketMatching",
"Rcpp",
"CausalImpact",
"lubridate",
"tidyverse",
"ggplot2",
"reshape2",
"plyr",
"pivottabler",
"dplyr",
"GeoexperimentsResearch")
for (lib in libs) {
if (!require(lib, character.only = TRUE)) {
install.packages(lib)
require(lib, character.only = TRUE)
}
}
#if installing GeoexperimentsResearch fails, run the code below
#install.packages("githubinstall")
#library(githubinstall)
#githubinstall("GeoexperimentsResearch")
#library(GeoexperimentsResearch)
############################################
############################################
#We are going to compare Time-based regression and CausalImpact for back-testing below.
#User needs to input the pre-test and test dates in test_period and
#the regions assigned to exposed and control in geoassign below.
#Both are done at 95% confidence level.
#In this section, we will evaluate if the methods (correctly) predict a lift or not
#when there should/ shouldn't be.
############################################
###user input
test_period = c("2018-12-01","2019-04-01","2019-05-31")
geoassign<-data.frame("geo"=c('Tokyo', 'Kanagawa Prefecture', 'Osaka Prefecture',
'Miyagi Prefecture', 'Ibaraki Prefecture', 'Hokkaido Prefecture', 'Shizuoka Prefecture',
'Chiba Prefecture', 'Hiroshima Prefecture', 'Kyoto Prefecture', 'Fukuoka Prefecture',
'Saitama Prefecture'),
"geo.group"=c(1,1,1,2,3,4,5,6,7,8,9,10)) #set exposed and control markets
# "geo.group"=c(1,1,1,2,2,2,2,2,2,2,2,2)) #set exposed and control markets
###
#Back-testing with Time-based Regression
data<-read.csv('signups.csv', header=T)
colnames(data)<-c("date","geo","Signups")
data$date <- as.Date(data$date, format="%Y-%m-%d")
head(data)
data2<-data[data$date>=test_period[1] & data$date<=test_period[3],]
head(data2)
obj.gts<-GeoTimeseries(data2,metrics=c("Signups"))
head(obj.gts)
aggregate(obj.gts,by='.weekindex')
plot(obj.gts)
obj.per<-ExperimentPeriods(test_period)
obj.per
geoassign
obj.ga<-GeoAssignment(geoassign)
obj.ga
obj.gts2 <- obj.gts[obj.gts$geo %in% geoassign$geo ,]
head(obj.gts2)
obj<-GeoExperimentData(obj.gts2, periods=obj.per, geo.assignment=obj.ga)
head(obj)
aggregate(obj,by=c('period','geo.group'))
obj.tbr<-DoTBRAnalysis(obj,response="Signups",model='tbr1',
pretest.period=0,
intervention.period=1,
cooldown.period=NULL,
control.group=2,
treatment.group=1)
summary(obj.tbr)
head(obj.tbr)
plot(obj.tbr)
#obj.tbr[obj.tbr$period==1,]
#Back-testing with CausalImpact
obj_ci<-aggregate(obj,by=c('date','geo.group'))
obj_ci
pre.period <- as.Date(c(test_period[1],toString(as.Date(test_period[2])-1)))
pre.period
post.period <- as.Date(c(test_period[2],test_period[3]))
post.period
time.points <- seq.Date(as.Date(test_period[1]), to=as.Date(test_period[3]), by = 1)
max(time.points)
ci_data <- zoo(cbind(obj_ci[obj_ci$geo.group==1,]$Signups,
obj_ci[obj_ci$geo.group==2,]$Signups,
obj_ci[obj_ci$geo.group==3,]$Signups,
obj_ci[obj_ci$geo.group==4,]$Signups,
obj_ci[obj_ci$geo.group==5,]$Signups,
obj_ci[obj_ci$geo.group==6,]$Signups,
obj_ci[obj_ci$geo.group==7,]$Signups,
obj_ci[obj_ci$geo.group==8,]$Signups,
obj_ci[obj_ci$geo.group==9,]$Signups,
obj_ci[obj_ci$geo.group==10,]$Signups), time.points)
'ci_data <- zoo(cbind(obj_ci[obj_ci$geo.group==1,]$Signups,
obj_ci[obj_ci$geo.group==2,]$Signups), time.points)'
ci_data
impact <- CausalImpact(ci_data, pre.period, post.period, alpha=0.05, model.args=list(niter=5000))
plot(impact)
summary(impact, "report")
plot(impact$model$bsts.model, "coefficients")
ggplot(data=obj_ci,aes(x=date,y=Signups,group=as.factor(geo.group), colour=as.factor(geo.group)))+
geom_line()+
geom_point()
|
/Q120 YTP/Match Market Procedure Backtest v0 - Standardized.R
|
no_license
|
charles-tan-essence/essence_stuff
|
R
| false | false | 5,881 |
r
|
############################################
#title: "Match Market Selection - YTP JP Q319"
#author: "Kenneth Koh & Hui Xiang Chua"
#date: "June 17, 2019"
#--------------------------------------------
#Objective: To maximize comparability of Match Market a standardized framework in which we evaluate causal effects using geographically segregated control holdout
#Workflow Documentation:
#Market Selection:
#1. From Time-series Data and a single KPI, we select candidates for control holdout based on the following priorities
#- Top 30 percentile lowest dtw distance scores across all geos considered
#- Correlation of >90%
#- Correlation with Population >50%
#2. Within these constraints, we select 2 or more exposed markets with the highest number of shared control holdouts possible (recommended 5-20 control regions, but minimally 3 control markets in countries where control regions may be limited)
#Control Market Validation:
#1. To safeguard against false positive from control selection error - Segment the pre-period data into a 2 : 1 ratio and run Causal Impact at 90% significance - there should be no significant lift
#Inputs requirements:
#1. Daily time-series data for KPI variable/conversions (segmented by geo)
#- e.g. Day, Region, Signups
#2. Minimum 90 day period for pre-period data
#3. Post-period analysed should minimally follow a 1:2 ratio with pre-period data input
#Variables that require user inputs have been commented with ###user input. Ctrl+F to see.
###########################################
###user input
setwd("C:/Users/charles.tan/Documents/GitHub/Essence_stuff/Q120 YTP") #set working directory
###
#import libaries
libs <- c("zoo",
"dtw",
"MarketMatching",
"Rcpp",
"CausalImpact",
"lubridate",
"tidyverse",
"ggplot2",
"reshape2",
"plyr",
"pivottabler",
"dplyr",
"GeoexperimentsResearch")
for (lib in libs) {
if (!require(lib, character.only = TRUE)) {
install.packages(lib)
require(lib, character.only = TRUE)
}
}
#if installing GeoexperimentsResearch fails, run the code below
#install.packages("githubinstall")
#library(githubinstall)
#githubinstall("GeoexperimentsResearch")
#library(GeoexperimentsResearch)
############################################
############################################
#We are going to compare Time-based regression and CausalImpact for back-testing below.
#User needs to input the pre-test and test dates in test_period and
#the regions assigned to exposed and control in geoassign below.
#Both are done at 95% confidence level.
#In this section, we will evaluate if the methods (correctly) predict a lift or not
#when there should/ shouldn't be.
############################################
###user input
test_period = c("2018-12-01","2019-04-01","2019-05-31")
geoassign<-data.frame("geo"=c('Tokyo', 'Kanagawa Prefecture', 'Osaka Prefecture',
'Miyagi Prefecture', 'Ibaraki Prefecture', 'Hokkaido Prefecture', 'Shizuoka Prefecture',
'Chiba Prefecture', 'Hiroshima Prefecture', 'Kyoto Prefecture', 'Fukuoka Prefecture',
'Saitama Prefecture'),
"geo.group"=c(1,1,1,2,3,4,5,6,7,8,9,10)) #set exposed and control markets
# "geo.group"=c(1,1,1,2,2,2,2,2,2,2,2,2)) #set exposed and control markets
###
#Back-testing with Time-based Regression
data<-read.csv('signups.csv', header=T)
colnames(data)<-c("date","geo","Signups")
data$date <- as.Date(data$date, format="%Y-%m-%d")
head(data)
data2<-data[data$date>=test_period[1] & data$date<=test_period[3],]
head(data2)
obj.gts<-GeoTimeseries(data2,metrics=c("Signups"))
head(obj.gts)
aggregate(obj.gts,by='.weekindex')
plot(obj.gts)
obj.per<-ExperimentPeriods(test_period)
obj.per
geoassign
obj.ga<-GeoAssignment(geoassign)
obj.ga
obj.gts2 <- obj.gts[obj.gts$geo %in% geoassign$geo ,]
head(obj.gts2)
obj<-GeoExperimentData(obj.gts2, periods=obj.per, geo.assignment=obj.ga)
head(obj)
aggregate(obj,by=c('period','geo.group'))
obj.tbr<-DoTBRAnalysis(obj,response="Signups",model='tbr1',
pretest.period=0,
intervention.period=1,
cooldown.period=NULL,
control.group=2,
treatment.group=1)
summary(obj.tbr)
head(obj.tbr)
plot(obj.tbr)
#obj.tbr[obj.tbr$period==1,]
#Back-testing with CausalImpact
obj_ci<-aggregate(obj,by=c('date','geo.group'))
obj_ci
pre.period <- as.Date(c(test_period[1],toString(as.Date(test_period[2])-1)))
pre.period
post.period <- as.Date(c(test_period[2],test_period[3]))
post.period
time.points <- seq.Date(as.Date(test_period[1]), to=as.Date(test_period[3]), by = 1)
max(time.points)
ci_data <- zoo(cbind(obj_ci[obj_ci$geo.group==1,]$Signups,
obj_ci[obj_ci$geo.group==2,]$Signups,
obj_ci[obj_ci$geo.group==3,]$Signups,
obj_ci[obj_ci$geo.group==4,]$Signups,
obj_ci[obj_ci$geo.group==5,]$Signups,
obj_ci[obj_ci$geo.group==6,]$Signups,
obj_ci[obj_ci$geo.group==7,]$Signups,
obj_ci[obj_ci$geo.group==8,]$Signups,
obj_ci[obj_ci$geo.group==9,]$Signups,
obj_ci[obj_ci$geo.group==10,]$Signups), time.points)
'ci_data <- zoo(cbind(obj_ci[obj_ci$geo.group==1,]$Signups,
obj_ci[obj_ci$geo.group==2,]$Signups), time.points)'
ci_data
impact <- CausalImpact(ci_data, pre.period, post.period, alpha=0.05, model.args=list(niter=5000))
plot(impact)
summary(impact, "report")
plot(impact$model$bsts.model, "coefficients")
ggplot(data=obj_ci,aes(x=date,y=Signups,group=as.factor(geo.group), colour=as.factor(geo.group)))+
geom_line()+
geom_point()
|
## read data
header<-readLines("household_power_consumption.txt", n=1)
header<-strsplit(header,";")
data<-read.table("household_power_consumption.txt", header=F, na.strings="?", skip=66636, nrows=2880, sep=";", col.names=header[[1]])
## add columm of class Date/Time
data$DateTime<-strptime(paste(data$Date, " ", data$Time), format="%d/%m/%Y %H:%M:%S")
## plot
png("./plot4.png", width=480, height=480)
par(mfrow=c(2,2))
## plot 1
plot(data$DateTime, data$Global_active_power, type="l", ann=F)
title(xlab=NULL, ylab="Global Active Power")
## plot 2
plot(data$DateTime, data$Voltage, type="l", ann="F")
title(xlab="datetime", ylab="Voltage")
## plot 3
plot(data$DateTime, data$Sub_metering_1, type="n", ann="F")
title(ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="blue")
lines(data$DateTime, data$Sub_metering_3, col="red")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1, 1, 1), lwd=c(2.5, 2.5, 2.5), col=c("black", "blue", "red"))
## plot 4
plot(data$DateTime, data$Global_reactive_power, type="l", ann="F")
title(xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
liurong79/ExData_Plotting1
|
R
| false | false | 1,202 |
r
|
## read data
header<-readLines("household_power_consumption.txt", n=1)
header<-strsplit(header,";")
data<-read.table("household_power_consumption.txt", header=F, na.strings="?", skip=66636, nrows=2880, sep=";", col.names=header[[1]])
## add columm of class Date/Time
data$DateTime<-strptime(paste(data$Date, " ", data$Time), format="%d/%m/%Y %H:%M:%S")
## plot
png("./plot4.png", width=480, height=480)
par(mfrow=c(2,2))
## plot 1
plot(data$DateTime, data$Global_active_power, type="l", ann=F)
title(xlab=NULL, ylab="Global Active Power")
## plot 2
plot(data$DateTime, data$Voltage, type="l", ann="F")
title(xlab="datetime", ylab="Voltage")
## plot 3
plot(data$DateTime, data$Sub_metering_1, type="n", ann="F")
title(ylab="Energy sub metering")
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="blue")
lines(data$DateTime, data$Sub_metering_3, col="red")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1, 1, 1), lwd=c(2.5, 2.5, 2.5), col=c("black", "blue", "red"))
## plot 4
plot(data$DateTime, data$Global_reactive_power, type="l", ann="F")
title(xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
#This function will helps in a scenario where, we needed a "huge same data" to be
#repeatedly get computed in a Loop or somthing. Therefore its better to cache
#the value and use it from previously computed value, rather than recomputing
#it every time.
#makeCacheMatrix:
#Has four main function that aids in:
#seting a input value within the parent environment, using set function
#makeing input available to parent frame when cacheSolve function called, using
#get function
#seting computed value to specified object, using set_inverse function.
#assigning inverse symbol to computed output using "<<-" so that it can be availabel
#to next function when called, using get_inverse function
#note:
#Given: For this assignment, assume that the matrix supplied is always invertible.
makeCacheMatrix <- function(x = matrix()) { #Input: Square Matrix
inverse <- NULL
set <- function(y) { #setting input matrix:
x <<- y
inverse <<- NULL
}
get <- function() x
set_inverse <- function(a) inverse <<- a
get_inverse <- function() inverse
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
#Cache data if it is computed previously:
cacheSolve <- function(x, ...) {
inverse <- x$get_inverse()
if(!is.null(inverse)) { # If solve function already executed, then cache data.
message("Getting cached data")
return(inverse)
}
data <- x$get() #Recieve data from the parent enveronment, makeCacheMatrix function
inverse <- solve(data, ...)
x$set_inverse(inverse) #calling set_inverse(), to assign inverse symbol to output of solve()
inverse
}
#Twesting above functions:
#Inverse of matrix:
c=rbind(c(4, 7), c(2, 6))
v <- makeCacheMatrix(c)
cacheSolve(v)
|
/cachematrix.R
|
no_license
|
Vamshi-dhar/ProgrammingAssignment2
|
R
| false | false | 1,861 |
r
|
#This function will helps in a scenario where, we needed a "huge same data" to be
#repeatedly get computed in a Loop or somthing. Therefore its better to cache
#the value and use it from previously computed value, rather than recomputing
#it every time.
#makeCacheMatrix:
#Has four main function that aids in:
#seting a input value within the parent environment, using set function
#makeing input available to parent frame when cacheSolve function called, using
#get function
#seting computed value to specified object, using set_inverse function.
#assigning inverse symbol to computed output using "<<-" so that it can be availabel
#to next function when called, using get_inverse function
#note:
#Given: For this assignment, assume that the matrix supplied is always invertible.
makeCacheMatrix <- function(x = matrix()) { #Input: Square Matrix
inverse <- NULL
set <- function(y) { #setting input matrix:
x <<- y
inverse <<- NULL
}
get <- function() x
set_inverse <- function(a) inverse <<- a
get_inverse <- function() inverse
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
#Cache data if it is computed previously:
cacheSolve <- function(x, ...) {
inverse <- x$get_inverse()
if(!is.null(inverse)) { # If solve function already executed, then cache data.
message("Getting cached data")
return(inverse)
}
data <- x$get() #Recieve data from the parent enveronment, makeCacheMatrix function
inverse <- solve(data, ...)
x$set_inverse(inverse) #calling set_inverse(), to assign inverse symbol to output of solve()
inverse
}
#Twesting above functions:
#Inverse of matrix:
c=rbind(c(4, 7), c(2, 6))
v <- makeCacheMatrix(c)
cacheSolve(v)
|
# Exercise 4: external data sets: Gates Foundation Educational Grants
# Use the `read.csv()` functoin to read the data from the `data/gates_money.csv`
# file into a variable called `grants` using the `read.csv()`
# Be sure to set your working directory in RStudio, and do NOT treat strings as
# factors!
grants <- read.csv('data/gates_money.csv', stringsAsFactors=FALSE)
# Use the View function to look at the loaded data
View(grants)
# Create a variable `organization` that contains the `organization` column of
# the dataset
organization <- grants$organization
# Confirm that the "organization" column is a vector using the `is.vector()`
# function.
# This is a useful debugging tip if you hit errors later!
is.vector(organization)
## Now you can ask some interesting questions about the dataset
# What was the mean grant value?
mean_spending <- mean(grants$total_amount)
# What was the dollar amount of the largest grant?
highest_amount <- max(grants$total_amount)
# What was the dollar amount of the smallest grant?
lowest_amount <- min(grants$total_amount)
# Which organization received the largest grant?
largest_recipient <- organization[grants$total_amount == highest_amount]
# Which organization received the smallest grant?
smallest_recipient <- organization[grants$total_amount == lowest_amount]
# How many grants were awarded in 2010?
length(grants$total_amount[grants$start_year == 2010])
|
/exercise-4/exercise.R
|
permissive
|
jenli36/ch9-data-frames
|
R
| false | false | 1,417 |
r
|
# Exercise 4: external data sets: Gates Foundation Educational Grants
# Use the `read.csv()` functoin to read the data from the `data/gates_money.csv`
# file into a variable called `grants` using the `read.csv()`
# Be sure to set your working directory in RStudio, and do NOT treat strings as
# factors!
grants <- read.csv('data/gates_money.csv', stringsAsFactors=FALSE)
# Use the View function to look at the loaded data
View(grants)
# Create a variable `organization` that contains the `organization` column of
# the dataset
organization <- grants$organization
# Confirm that the "organization" column is a vector using the `is.vector()`
# function.
# This is a useful debugging tip if you hit errors later!
is.vector(organization)
## Now you can ask some interesting questions about the dataset
# What was the mean grant value?
mean_spending <- mean(grants$total_amount)
# What was the dollar amount of the largest grant?
highest_amount <- max(grants$total_amount)
# What was the dollar amount of the smallest grant?
lowest_amount <- min(grants$total_amount)
# Which organization received the largest grant?
largest_recipient <- organization[grants$total_amount == highest_amount]
# Which organization received the smallest grant?
smallest_recipient <- organization[grants$total_amount == lowest_amount]
# How many grants were awarded in 2010?
length(grants$total_amount[grants$start_year == 2010])
|
library(powerAnalysis)
### Name: ES.proportions
### Title: Compute effect size for a difference in proportions
### Aliases: ES.proportions
### ** Examples
ES.proportions(0.65,0.45)
ES.proportions(0.25,0.05)
|
/data/genthat_extracted_code/powerAnalysis/examples/ES.proportions.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 215 |
r
|
library(powerAnalysis)
### Name: ES.proportions
### Title: Compute effect size for a difference in proportions
### Aliases: ES.proportions
### ** Examples
ES.proportions(0.65,0.45)
ES.proportions(0.25,0.05)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcbdPlan.R
\name{rcbdPlan}
\alias{rcbdPlan}
\title{Randomized Complete Block Design (RBD)}
\usage{
rcbdPlan(treat, blocks, seed)
}
\arguments{
\item{treat}{\code{\link[base]{numeric}} or complex vector
containing treatments levels.}
\item{blocks}{\code{\link[base]{numeric}} or complex vector
containing blocks levels.}
\item{seed}{A single \code{\link[base]{numeric}} value, interpreted
as an integer, that specifies the starting value of the random
number generator.}
}
\description{
levels of treatment are randomly assigned to the
experimental units within blocks. We assuming that blocks have a
systematic effect on the statistical comparisons among
treatements. Through randomization, every experimental unit within
block has same probability of receiving any treatement. The word
'complete' indicates that each block (group) contains all
treatments.
}
\examples{
## 3 treatments and 4 blocks
rcbdPlan(treat = 3, blocks = 4)
## Running the shiny app
treat <- LETTERS[seq( from = 1, to = 10 )]
design <- rcbdPlan(treat = treat, block = 6)
\dontrun{
buildShiny(design)
}
## Priori for treatment means
blocks <- paste("Block ", seq(1, 6, 1))
treat <- LETTERS[seq( from = 1, to = 6 )]
design2 <- rcbdPlan(treat = treat, blocks = blocks)
\dontrun{
buildShiny(design2)
}
}
\author{
Thiago de Paula Oliveira,
\email{thiago.paula.oliveira@usp.br}
}
|
/man/rcbdPlan.Rd
|
no_license
|
Prof-ThiagoOliveira/planExp
|
R
| false | true | 1,444 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcbdPlan.R
\name{rcbdPlan}
\alias{rcbdPlan}
\title{Randomized Complete Block Design (RBD)}
\usage{
rcbdPlan(treat, blocks, seed)
}
\arguments{
\item{treat}{\code{\link[base]{numeric}} or complex vector
containing treatments levels.}
\item{blocks}{\code{\link[base]{numeric}} or complex vector
containing blocks levels.}
\item{seed}{A single \code{\link[base]{numeric}} value, interpreted
as an integer, that specifies the starting value of the random
number generator.}
}
\description{
levels of treatment are randomly assigned to the
experimental units within blocks. We assuming that blocks have a
systematic effect on the statistical comparisons among
treatements. Through randomization, every experimental unit within
block has same probability of receiving any treatement. The word
'complete' indicates that each block (group) contains all
treatments.
}
\examples{
## 3 treatments and 4 blocks
rcbdPlan(treat = 3, blocks = 4)
## Running the shiny app
treat <- LETTERS[seq( from = 1, to = 10 )]
design <- rcbdPlan(treat = treat, block = 6)
\dontrun{
buildShiny(design)
}
## Priori for treatment means
blocks <- paste("Block ", seq(1, 6, 1))
treat <- LETTERS[seq( from = 1, to = 6 )]
design2 <- rcbdPlan(treat = treat, blocks = blocks)
\dontrun{
buildShiny(design2)
}
}
\author{
Thiago de Paula Oliveira,
\email{thiago.paula.oliveira@usp.br}
}
|
age <- c(25, 35, 50)
salary <- c(200000, 1200000, 2000000)
df <- data.frame( "Age" = age, "Salary" = salary, stringsAsFactors = FALSE)
df
plot(df[c("Age", "Salary")])
#Min-Max normalization
#Draw back, can have out liers.
#Tends to squeze the data towards the mean.
#If want to get out liers weighted, z-zcore standazization is better
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
new_normalize <- function(x, new_max = 1,new_min = 0) { # see how we define the max min values
a = ( ((x-min(x)) * (new_max-new_min)) / (max(x)-min(x)) ) + new_min
return(a)
}
myzScore = function(x) {
return((x - mean(x)) / sd(x))
}
?lapply
#Apply fuction to each and every feature colum in the data frame
dfNorm <- as.data.frame(lapply(df, normalize))
dfNorm
#Can also specify only a selected columns
dfNorm <- as.data.frame(lapply(df[1:2], normalize))
dfNorm
#can specify even a single column
dfNormSalary <- as.data.frame(lapply(df[2], normalize))
dfNormSalary
#can apply the function only for specific column with column name
dfNormSalary <- as.data.frame(lapply(df["Salary"], normalize))
dfNormSalary
dfNorm1 <- as.data.frame(lapply(df[1:2], new_normalize))
dfNorm1
# Z-core standization
dfNormZ <- as.data.frame( scale(df[1:2] ))
dfNormZ
plot(dfNormZ[c("Age", "Salary")])
plot(vehicles[c("Sc.Var.Maxis", "Sc.Var.maxis")])
dfNorm4 <- as.data.frame(lapply(df, myzScore))
dfNorm4
|
/R/normalization.R
|
no_license
|
semika/IIT-DMML
|
R
| false | false | 1,415 |
r
|
age <- c(25, 35, 50)
salary <- c(200000, 1200000, 2000000)
df <- data.frame( "Age" = age, "Salary" = salary, stringsAsFactors = FALSE)
df
plot(df[c("Age", "Salary")])
#Min-Max normalization
#Draw back, can have out liers.
#Tends to squeze the data towards the mean.
#If want to get out liers weighted, z-zcore standazization is better
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
new_normalize <- function(x, new_max = 1,new_min = 0) { # see how we define the max min values
a = ( ((x-min(x)) * (new_max-new_min)) / (max(x)-min(x)) ) + new_min
return(a)
}
myzScore = function(x) {
return((x - mean(x)) / sd(x))
}
?lapply
#Apply fuction to each and every feature colum in the data frame
dfNorm <- as.data.frame(lapply(df, normalize))
dfNorm
#Can also specify only a selected columns
dfNorm <- as.data.frame(lapply(df[1:2], normalize))
dfNorm
#can specify even a single column
dfNormSalary <- as.data.frame(lapply(df[2], normalize))
dfNormSalary
#can apply the function only for specific column with column name
dfNormSalary <- as.data.frame(lapply(df["Salary"], normalize))
dfNormSalary
dfNorm1 <- as.data.frame(lapply(df[1:2], new_normalize))
dfNorm1
# Z-core standization
dfNormZ <- as.data.frame( scale(df[1:2] ))
dfNormZ
plot(dfNormZ[c("Age", "Salary")])
plot(vehicles[c("Sc.Var.Maxis", "Sc.Var.maxis")])
dfNorm4 <- as.data.frame(lapply(df, myzScore))
dfNorm4
|
" Note: other implementation of num. derivative is possible (e.g. backward, symmetric or some predefined function
in R could be used)"
# f - user defined function
# a - start of an interval
# b - end of an interval
# eps - required precision for the root
# n - maximal number of iterations
#Creating a table format for roots
A <- data.frame(n=as.numeric(),
x=as.numeric(),
alpha_Minus_x=as.numeric(),
stringsAsFactors=FALSE)
#Secant Method to find root in a given interval follows
SecantMethod <- function(f, a, b, eps, n) {
x0 <- a # setting start value to the interval lower bound
fa <- f(a) # check if a or b are the root of f(x)
if (fa == 0.0) {
return(a)
}
x1 <- b
fb <- f(b)
if (fb == 0.0) {
return(b)
}
for (k in 1:n) {
print("Printing the table with the iterations")
A<-data.frame(rbind(A, c(k,x0,alpha-x0)))
print(A)
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
#The below command can be used to write the data of the table in the required location
#Semicolon has been used as a delimiter
# write.table(df, file = "F:\\HPC\\NM\\secant.txt",row.names = FALSE, dec = ".", col.names = c("n", "x", "alpha_Minus_x","log_alpha_Minus_x"), sep = ";")
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
f1 <- f(x1)
f0 <- f(x0)
x2 = x1 - (f1)*((x1-x0)/(f1-f0))
f2 <- f(x2)
x0 <- x1
x1 <- x2
if (abs(x1 - x0) < eps) { # check if required precision reached
print('The found root on the interval [a,b] is:')
return(x1)
}
}
print('Maximal number of iterations reached and solution not yet found.')
}
|
/NumericalMethods_R_RootFindingMethods/SecantMethod.R
|
no_license
|
bhatnags/HighPerformanceComputing
|
R
| false | false | 1,757 |
r
|
" Note: other implementation of num. derivative is possible (e.g. backward, symmetric or some predefined function
in R could be used)"
# f - user defined function
# a - start of an interval
# b - end of an interval
# eps - required precision for the root
# n - maximal number of iterations
#Creating a table format for roots
A <- data.frame(n=as.numeric(),
x=as.numeric(),
alpha_Minus_x=as.numeric(),
stringsAsFactors=FALSE)
#Secant Method to find root in a given interval follows
SecantMethod <- function(f, a, b, eps, n) {
x0 <- a # setting start value to the interval lower bound
fa <- f(a) # check if a or b are the root of f(x)
if (fa == 0.0) {
return(a)
}
x1 <- b
fb <- f(b)
if (fb == 0.0) {
return(b)
}
for (k in 1:n) {
print("Printing the table with the iterations")
A<-data.frame(rbind(A, c(k,x0,alpha-x0)))
print(A)
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
#The below command can be used to write the data of the table in the required location
#Semicolon has been used as a delimiter
# write.table(df, file = "F:\\HPC\\NM\\secant.txt",row.names = FALSE, dec = ".", col.names = c("n", "x", "alpha_Minus_x","log_alpha_Minus_x"), sep = ";")
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
f1 <- f(x1)
f0 <- f(x0)
x2 = x1 - (f1)*((x1-x0)/(f1-f0))
f2 <- f(x2)
x0 <- x1
x1 <- x2
if (abs(x1 - x0) < eps) { # check if required precision reached
print('The found root on the interval [a,b] is:')
return(x1)
}
}
print('Maximal number of iterations reached and solution not yet found.')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary-methods.R
\name{summary-methods}
\alias{summary-methods}
\alias{summary.complmrob}
\alias{summary.bccomplmrob}
\alias{summary.bclmrob}
\title{Get summary information}
\usage{
\method{summary}{complmrob}(object, conf.level = 0.95, ...)
\method{summary}{bccomplmrob}(object, conf.level = 0.95,
conf.type = "perc", ...)
\method{summary}{bclmrob}(object, conf.level = 0.95,
conf.type = "perc", ...)
}
\arguments{
\item{object}{the object for which the summary information should be returned.}
\item{conf.level}{the level of the returned confidence intervals.}
\item{...}{ignored.}
\item{conf.type}{the type of the returned confidence interval (see \code{\link[boot]{boot.ci}} for the
meaning of this parameter).}
}
\description{
List the estimates, standard errors, p-values and confidence intervals for the coefficients of
robust linear regression models with compositional data as returned by \code{\link{complmrob}} or
\code{\link{bootcoefs}}
}
|
/man/summary-methods.Rd
|
no_license
|
dakep/complmrob
|
R
| false | true | 1,039 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary-methods.R
\name{summary-methods}
\alias{summary-methods}
\alias{summary.complmrob}
\alias{summary.bccomplmrob}
\alias{summary.bclmrob}
\title{Get summary information}
\usage{
\method{summary}{complmrob}(object, conf.level = 0.95, ...)
\method{summary}{bccomplmrob}(object, conf.level = 0.95,
conf.type = "perc", ...)
\method{summary}{bclmrob}(object, conf.level = 0.95,
conf.type = "perc", ...)
}
\arguments{
\item{object}{the object for which the summary information should be returned.}
\item{conf.level}{the level of the returned confidence intervals.}
\item{...}{ignored.}
\item{conf.type}{the type of the returned confidence interval (see \code{\link[boot]{boot.ci}} for the
meaning of this parameter).}
}
\description{
List the estimates, standard errors, p-values and confidence intervals for the coefficients of
robust linear regression models with compositional data as returned by \code{\link{complmrob}} or
\code{\link{bootcoefs}}
}
|
# jamenrich-communities-to-nodegroups.R
#' Convert communities object to nodegroups list format
#'
#' Convert communities object to nodegroups list format
#'
#' Note that this function is "lossy", in that the output `list`
#' does not contain all the information necessary to reconstitute
#' the input `communities` object in detail. However, the output
#' `list` can be converted to a `communities` object that will
#' be accepted by most `igraph` related functions that require
#' that object type as an input value.
#'
#'
#' @family jam igraph functions
#'
#' @return `list` of `character` vectors, where each vector contains
#' names of `igraph` nodes. When `algorithm` is defined in the
#' input object, it is included as an attribute of the output `list`,
#' accessible with `attr(out, "algorithm")`.
#'
#' When optional value `"cluster_names"` is present in the `communities`
#' object, they are used to define the output `list` names.
#'
#' @param wc `communities` object as returned by `igraph` functions
#' such as `cluster_optimal()`, `cluster_walktrap()`, or
#' `cluster_leading_eigen()`.
#' @param ... additional arguments are ignored.
#'
#' @export
communities2nodegroups <- function
(wc,
...)
{
#
if (length(wc$names) == 0) {
wc$names <- seq_along(wc$membership);
}
nodegroups <- split(
wc$names,
wc$membership)
if ("cluster_names" %in% names(wc)) {
names(nodegroups) <- wc$cluster_names
}
if ("algorithm" %in% names(wc)) {
attr(nodegroups, "algorithm") <- wc$algorithm;
} else {
attr(nodegroups, "algorithm") <- "nodegroups";
}
return(nodegroups)
}
#' Convert nodegroups list to communities object
#'
#' Convert nodegroups list to communities object
#'
#' Note that this function is "lossy", in that the output `communities`
#' object will not contain any supporting data specific to the
#' community detection algorithm originally used.
#' However, the output `communities` object will be accepted
#' by most `igraph` related functions that require
#' that object type as an input value.
#'
#' The `names(nodegroups)` are used to define a new element in the
#' output `communities` object `"cluster_names"`, so the names
#' will be maintained in the data. Default `igraph` functions
#' do not use these names, but they are used by `multienrichjam`
#' for example by function `make_point_hull()` which uses these
#' names to label each cluster during plotting.
#'
#' @family jam igraph functions
#'
#' @return `community` object, which is essentially a `list` with
#' specific required elements:
#' * `"membership"` - `integer` assignment of nodes to clusters
#' * `"names"` - `character` list of node names
#' * `"vcount"` - `integer` number of nodes
#' * `"algorithm"` - `character` string with the name of the community
#' detection method used.
#' * `"cluster_names"` - `character` labels associated with `membership`
#' index values. These names are not generated by `igraph` community
#' detection, and are therefore optional for use in most `igraph`
#' workflows. However, they are used in some `multienrichjam` functions,
#' specifically `make_point_hull()` which optionally displays a
#' label beside each node cluster during plotting.
#'
#' @param wc `communities` object as returned by `igraph` functions
#' such as `cluster_optimal()`, `cluster_walktrap()`, or
#' `cluster_leading_eigen()`.
#' @param algorithm `character` or `NULL`, indicating the name of the
#' community detection algorithm used.
#' * When `algorithm` is defined, it is used instead of
#' `attr(nodegroups, "algorithm")`.
#' * When `algorithm` is `NULL`, `attribute(nodegroups, "algorithm")` is
#' used if defined, otherwise `algorithm="nodegroups"`.
#' @param ... additional arguments are ignored.
#'
#' @export
nodegroups2communities <- function
(nodegroups,
algorithm=NULL,
...)
{
#
if (length(nodegroups) == 0) {
stop("Input nodegroups was empty.")
}
if (length(names(nodegroups)) == 0) {
names(nodegroups) <- seq_along(nodegroups)
}
if (length(algorithm) == 0) {
if ("algorithm" %in% names(attributes(nodegroups))) {
algorithm <- attr(nodegroups, "algorithm");
} else {
algorithm <- "nodegroups";
}
}
nodegroup_factors <- factor(names(nodegroups),
levels=names(nodegroups));
nodegroup_df <- data.frame(check.names=FALSE,
stringsAsFactors=FALSE,
name=unlist(unname(nodegroups)),
nodegroup=rep(nodegroup_factors, lengths(nodegroups)),
membership=as.integer(rep(nodegroup_factors, lengths(nodegroups))))
if (is.numeric(nodegroup_df$name)) {
nodegroup_df <- jamba::mixedSortDF(nodegroup_df,
byCols=c("name"));
}
wc <- list(
membership=nodegroup_df$membership,
names=nodegroup_df$name,
vcount=nrow(nodegroup_df),
algorithm=algorithm,
cluster_names=levels(nodegroup_factors))
class(wc) <- "communities";
return(wc)
}
#' Assign labels to igraph communities
#'
#' Assign labels to igraph communities
#'
#' @family jam igraph functions
#'
#' @return `communities` or `list` format matching the input `wc` format.
#' * When `communities` is input, additional value `cluster_names`
#' will contain a `character` vector of names corresponding to each
#' integer index in `wc$membership`.
#' * When `nodegroups` is input, the `list` names will be a `character`
#' vector of cluster labels.
#'
#' @param wc `communities` object, or `list` in form of nodegroups,
#' which is a `list` of `character` vectors that contain `igraph`
#' node names.
#' @param labels `character` vector of optional labels to assign directly
#' to community clusters. When not defined, the auto-detection method
#' is used.
#' @param add_catchwords `character` of optional words to include as
#' catchwords, to be excluded from use in the final label.
#' @param num_keep_terms `integer` maximum number of terms to be included
#' in the final output label, when auto-detection is used.
#' @param keep_terms_sep `character` string used as a delimited to separate
#' each term when multiple terms are concatenated together to form
#' the cluster label.
#' @param ... additional arguments are ignored.
#'
#' @export
label_communities <- function
(wc,
labels=NULL,
add_catchwords=NULL,
num_keep_terms=3,
keep_terms_sep=",\n",
...)
{
# define catchwords
catchwords <- unique(c(
add_catchwords,
"the", "an", "a", "of", "in", "between", "to", "and",
"peptide", "peptides",
"protein", "proteins",
"gene", "genes",
"system", "systems",
"role", "roles",
"base", "bases", "based", "basing", "basic",
"acid", "acids", "acidic",
"cells", "cell", "cellular",
"space", "spaces", "spaced", "spacing",
"positive", "positives", "positively",
"negative", "negatives", "negatively",
"pathway", "pathways",
"set", "sets",
"position", "positions", "positioned", "positioning",
"function", "functions", "functioning", "functioned",
"signaling", "signal", "signaling", "signals", "signaled",
"activity", "activation", "activate", "activates", "activated", "activating",
"involve", "involved", "involves", "involving",
"response", "responses", "respond", "responds", "responded", "responding",
"transcript", "transcripts", "transcribe", "transcribed", "transcribes",
"organization", "organize", "organizes", "organized", "organizing",
"formation", "form", "forms", "formed", "forming",
"enhanced", "enhance", "enhances", "enhancing",
"mediated", "mediate", "mediates", "mediating",
"expression", "express", "expresses", "expressed", "expressing",
"compound", "compounds", "compounding", "compounded",
"process", "processes", "processed", "processing",
"regulation", "regulate", "regulates", "regulated", "regulating",
# "up-regulation", "up-regulate", "up-regulates", "up-regulated", "up-regulating",
# "down-regulation", "down-regulate", "down-regulates", "down-regulated", "down-regulating",
"the"))
hyphen_pattern <- paste0(
"-(",
paste(catchwords, collapse="|"),
")$")
input_type <- NULL;
if ("communities" %in% class(wc) ||
(is.list(wc) && all(c("membership", "names") %in% names(wc)))) {
# define list
input_type <- "communities";
nodegroups_wc <- communities2nodegroups(wc);
} else if (is.list(wc)) {
input_type <- "nodegroups";
nodegroups_wc <- wc;
} else {
stop("Input wc must be 'communities' or 'nodegroups' list object.");
}
if (length(labels) > 0) {
if (length(labels) != length(nodegroups_wc)) {
stop("length(labels) must equal the number of clusters in wc")
}
}
# assign most common terms as a cluster label
nodegroup_labels <- lapply(seq_along(nodegroups_wc), function(inum){
i <- nodegroups_wc[[inum]];
# split on whitespace, tab, or newline
j <- tolower(unlist(strsplit(i, "[\t\r\n ]+")));
# remove catchword from the second word of hyphenated phrases
j <- gsub(hyphen_pattern, "", j);
# remove non-alphanumeric characters
j <- gsub("[():;,]+", "", j)
# keep words which are not catchwords, and with two or more characters
j_keep <- (!j %in% catchwords & nchar(j) > 1);
j <- j[j_keep];
if (length(j) == 0) {
# if no words remain, name the cluster by number
return(inum)
}
names(head(tcount(j), num_keep_terms))
})
names(nodegroups_wc) <- nodegroup_labels;
if ("nodegroups" %in% input_type) {
return(nodegroups_wc)
}
# assign cluster_names to communities object
wc$cluster_names <- nodegroup_labels;
return(wc);
}
#' Sync igraph nodes and communities
#'
#' Sync igraph nodes and communities
#'
#' This function ensures that `igraph` nodes and corresponding
#' community clusters are synchronized for proper downstream use.
#' In particular, when using a subgraph, or when communities only
#' assign a subset of nodes to clusters, this function ensures the
#' two objects are in sync, the same order, and with the same nodes.
#'
#' @return `list` with two elements:
#' * `"g"` - the `igraph` object after subsetting to match node names
#' shared with `wc`, as necessary.
#' * `"wc'` - the `communities` object after subsetting to match
#' node names shared with `g`, as necessary. When input `wc` is
#' in `list` nodegroups format, that same format is returned.
#'
#' @family jam igraph functions
#'
#' @param g `igraph` object
#' @param wc `communities` object, or `list` in form of nodegroups,
#' which is a `list` of `character` vectors that contain `igraph`
#' node names.
#' @param verbose `logical` indicating whether to print verbose output.
#' @param ... additional arguments are passed to `nodegroups2communities()`
#' only when input `wc` is supplied in `list` nodegroups format.
#'
#' @export
sync_igraph_communities <- function
(g,
wc,
verbose=TRUE,
...)
{
# validate input
input_type <- NULL;
if ("communities" %in% class(wc) ||
(is.list(wc) && all(c("membership", "names") %in% names(wc)))) {
# define list
input_type <- "communities";
} else if (is.list(wc)) {
input_type <- "nodegroups";
nodegroups_wc <- wc;
wc <- nodegroups2communities(nodegroups_wc,
...)
} else {
stop("Input wc must be 'communities' or 'nodegroups' list object.");
}
g_nodes <- igraph::V(g)$name;
wc_nodes <- wc$names;
observed_nodes <- intersect(g_nodes, wc_nodes)
# subset igraph
if (!all(g_nodes %in% observed_nodes)) {
g_keep <- which(g_nodes %in% observed_nodes)
g <- igraph::subgraph(g, v=g_keep)
# if layout is defined, subset in place
if ("layout" %in% igraph::graph_attr_names(g)) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"igraph layout was subset to match the remaining nodes.")
}
g_layout <- igraph::graph_attr(g, "layout");
igraph::graph_attr(g, "layout") <- g_layout[g_keep, , drop=FALSE];
}
}
# subset (and order) communities
wc_keep <- match(igraph::V(g)$name, wc_nodes)
wc$membership <- wc$membership[wc_keep]
wc$names <- wc$names[wc_keep]
if (length(wc$modularity) > 1) {
wc$modularity <- wc$modularity[wc_keep]
}
if (length(wc$memberships) > 1) {
wc$memberships <- wc$memberships[wc_keep, , drop=FALSE]
}
if (length(wc$merges) > 0) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"community merges were removed.")
}
wc$merges <- NULL;
}
wc$vcount <- igraph::vcount(g);
class(wc) <- "communities";
if ("cluster_names" %in% names(wc)) {
cluster_names <- wc$cluster_names;
names(cluster_names) <- seq_along(cluster_names);
if (!all(names(cluster_names) %in% as.character(wc$membership))) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"cluster_names were reduced due to match the remaining nodes.")
}
cn_keep <- (names(cluster_names) %in% as.character(wc$membership));
new_cluster_names <- unname(cluster_names[cn_keep]);
new_membership <- as.numeric(as.factor(wc$membership));
wc$cluster_names <- new_cluster_names;
wc$membership <- new_membership
}
}
if ("nodegroups" %in% input_type) {
wc <- communities2nodegroups(wc);
}
return(list(
g=g,
wc=wc));
}
|
/R/jamenrich-communities-nodegroups.R
|
no_license
|
jmw86069/multienrichjam
|
R
| false | false | 13,732 |
r
|
# jamenrich-communities-to-nodegroups.R
#' Convert communities object to nodegroups list format
#'
#' Convert communities object to nodegroups list format
#'
#' Note that this function is "lossy", in that the output `list`
#' does not contain all the information necessary to reconstitute
#' the input `communities` object in detail. However, the output
#' `list` can be converted to a `communities` object that will
#' be accepted by most `igraph` related functions that require
#' that object type as an input value.
#'
#'
#' @family jam igraph functions
#'
#' @return `list` of `character` vectors, where each vector contains
#' names of `igraph` nodes. When `algorithm` is defined in the
#' input object, it is included as an attribute of the output `list`,
#' accessible with `attr(out, "algorithm")`.
#'
#' When optional value `"cluster_names"` is present in the `communities`
#' object, they are used to define the output `list` names.
#'
#' @param wc `communities` object as returned by `igraph` functions
#' such as `cluster_optimal()`, `cluster_walktrap()`, or
#' `cluster_leading_eigen()`.
#' @param ... additional arguments are ignored.
#'
#' @export
communities2nodegroups <- function
(wc,
...)
{
#
if (length(wc$names) == 0) {
wc$names <- seq_along(wc$membership);
}
nodegroups <- split(
wc$names,
wc$membership)
if ("cluster_names" %in% names(wc)) {
names(nodegroups) <- wc$cluster_names
}
if ("algorithm" %in% names(wc)) {
attr(nodegroups, "algorithm") <- wc$algorithm;
} else {
attr(nodegroups, "algorithm") <- "nodegroups";
}
return(nodegroups)
}
#' Convert nodegroups list to communities object
#'
#' Convert nodegroups list to communities object
#'
#' Note that this function is "lossy", in that the output `communities`
#' object will not contain any supporting data specific to the
#' community detection algorithm originally used.
#' However, the output `communities` object will be accepted
#' by most `igraph` related functions that require
#' that object type as an input value.
#'
#' The `names(nodegroups)` are used to define a new element in the
#' output `communities` object `"cluster_names"`, so the names
#' will be maintained in the data. Default `igraph` functions
#' do not use these names, but they are used by `multienrichjam`
#' for example by function `make_point_hull()` which uses these
#' names to label each cluster during plotting.
#'
#' @family jam igraph functions
#'
#' @return `community` object, which is essentially a `list` with
#' specific required elements:
#' * `"membership"` - `integer` assignment of nodes to clusters
#' * `"names"` - `character` list of node names
#' * `"vcount"` - `integer` number of nodes
#' * `"algorithm"` - `character` string with the name of the community
#' detection method used.
#' * `"cluster_names"` - `character` labels associated with `membership`
#' index values. These names are not generated by `igraph` community
#' detection, and are therefore optional for use in most `igraph`
#' workflows. However, they are used in some `multienrichjam` functions,
#' specifically `make_point_hull()` which optionally displays a
#' label beside each node cluster during plotting.
#'
#' @param wc `communities` object as returned by `igraph` functions
#' such as `cluster_optimal()`, `cluster_walktrap()`, or
#' `cluster_leading_eigen()`.
#' @param algorithm `character` or `NULL`, indicating the name of the
#' community detection algorithm used.
#' * When `algorithm` is defined, it is used instead of
#' `attr(nodegroups, "algorithm")`.
#' * When `algorithm` is `NULL`, `attribute(nodegroups, "algorithm")` is
#' used if defined, otherwise `algorithm="nodegroups"`.
#' @param ... additional arguments are ignored.
#'
#' @export
nodegroups2communities <- function
(nodegroups,
algorithm=NULL,
...)
{
#
if (length(nodegroups) == 0) {
stop("Input nodegroups was empty.")
}
if (length(names(nodegroups)) == 0) {
names(nodegroups) <- seq_along(nodegroups)
}
if (length(algorithm) == 0) {
if ("algorithm" %in% names(attributes(nodegroups))) {
algorithm <- attr(nodegroups, "algorithm");
} else {
algorithm <- "nodegroups";
}
}
nodegroup_factors <- factor(names(nodegroups),
levels=names(nodegroups));
nodegroup_df <- data.frame(check.names=FALSE,
stringsAsFactors=FALSE,
name=unlist(unname(nodegroups)),
nodegroup=rep(nodegroup_factors, lengths(nodegroups)),
membership=as.integer(rep(nodegroup_factors, lengths(nodegroups))))
if (is.numeric(nodegroup_df$name)) {
nodegroup_df <- jamba::mixedSortDF(nodegroup_df,
byCols=c("name"));
}
wc <- list(
membership=nodegroup_df$membership,
names=nodegroup_df$name,
vcount=nrow(nodegroup_df),
algorithm=algorithm,
cluster_names=levels(nodegroup_factors))
class(wc) <- "communities";
return(wc)
}
#' Assign labels to igraph communities
#'
#' Assign labels to igraph communities
#'
#' @family jam igraph functions
#'
#' @return `communities` or `list` format matching the input `wc` format.
#' * When `communities` is input, additional value `cluster_names`
#' will contain a `character` vector of names corresponding to each
#' integer index in `wc$membership`.
#' * When `nodegroups` is input, the `list` names will be a `character`
#' vector of cluster labels.
#'
#' @param wc `communities` object, or `list` in form of nodegroups,
#' which is a `list` of `character` vectors that contain `igraph`
#' node names.
#' @param labels `character` vector of optional labels to assign directly
#' to community clusters. When not defined, the auto-detection method
#' is used.
#' @param add_catchwords `character` of optional words to include as
#' catchwords, to be excluded from use in the final label.
#' @param num_keep_terms `integer` maximum number of terms to be included
#' in the final output label, when auto-detection is used.
#' @param keep_terms_sep `character` string used as a delimited to separate
#' each term when multiple terms are concatenated together to form
#' the cluster label.
#' @param ... additional arguments are ignored.
#'
#' @export
label_communities <- function
(wc,
labels=NULL,
add_catchwords=NULL,
num_keep_terms=3,
keep_terms_sep=",\n",
...)
{
# define catchwords
catchwords <- unique(c(
add_catchwords,
"the", "an", "a", "of", "in", "between", "to", "and",
"peptide", "peptides",
"protein", "proteins",
"gene", "genes",
"system", "systems",
"role", "roles",
"base", "bases", "based", "basing", "basic",
"acid", "acids", "acidic",
"cells", "cell", "cellular",
"space", "spaces", "spaced", "spacing",
"positive", "positives", "positively",
"negative", "negatives", "negatively",
"pathway", "pathways",
"set", "sets",
"position", "positions", "positioned", "positioning",
"function", "functions", "functioning", "functioned",
"signaling", "signal", "signaling", "signals", "signaled",
"activity", "activation", "activate", "activates", "activated", "activating",
"involve", "involved", "involves", "involving",
"response", "responses", "respond", "responds", "responded", "responding",
"transcript", "transcripts", "transcribe", "transcribed", "transcribes",
"organization", "organize", "organizes", "organized", "organizing",
"formation", "form", "forms", "formed", "forming",
"enhanced", "enhance", "enhances", "enhancing",
"mediated", "mediate", "mediates", "mediating",
"expression", "express", "expresses", "expressed", "expressing",
"compound", "compounds", "compounding", "compounded",
"process", "processes", "processed", "processing",
"regulation", "regulate", "regulates", "regulated", "regulating",
# "up-regulation", "up-regulate", "up-regulates", "up-regulated", "up-regulating",
# "down-regulation", "down-regulate", "down-regulates", "down-regulated", "down-regulating",
"the"))
hyphen_pattern <- paste0(
"-(",
paste(catchwords, collapse="|"),
")$")
input_type <- NULL;
if ("communities" %in% class(wc) ||
(is.list(wc) && all(c("membership", "names") %in% names(wc)))) {
# define list
input_type <- "communities";
nodegroups_wc <- communities2nodegroups(wc);
} else if (is.list(wc)) {
input_type <- "nodegroups";
nodegroups_wc <- wc;
} else {
stop("Input wc must be 'communities' or 'nodegroups' list object.");
}
if (length(labels) > 0) {
if (length(labels) != length(nodegroups_wc)) {
stop("length(labels) must equal the number of clusters in wc")
}
}
# assign most common terms as a cluster label
nodegroup_labels <- lapply(seq_along(nodegroups_wc), function(inum){
i <- nodegroups_wc[[inum]];
# split on whitespace, tab, or newline
j <- tolower(unlist(strsplit(i, "[\t\r\n ]+")));
# remove catchword from the second word of hyphenated phrases
j <- gsub(hyphen_pattern, "", j);
# remove non-alphanumeric characters
j <- gsub("[():;,]+", "", j)
# keep words which are not catchwords, and with two or more characters
j_keep <- (!j %in% catchwords & nchar(j) > 1);
j <- j[j_keep];
if (length(j) == 0) {
# if no words remain, name the cluster by number
return(inum)
}
names(head(tcount(j), num_keep_terms))
})
names(nodegroups_wc) <- nodegroup_labels;
if ("nodegroups" %in% input_type) {
return(nodegroups_wc)
}
# assign cluster_names to communities object
wc$cluster_names <- nodegroup_labels;
return(wc);
}
#' Sync igraph nodes and communities
#'
#' Sync igraph nodes and communities
#'
#' This function ensures that `igraph` nodes and corresponding
#' community clusters are synchronized for proper downstream use.
#' In particular, when using a subgraph, or when communities only
#' assign a subset of nodes to clusters, this function ensures the
#' two objects are in sync, the same order, and with the same nodes.
#'
#' @return `list` with two elements:
#' * `"g"` - the `igraph` object after subsetting to match node names
#' shared with `wc`, as necessary.
#' * `"wc'` - the `communities` object after subsetting to match
#' node names shared with `g`, as necessary. When input `wc` is
#' in `list` nodegroups format, that same format is returned.
#'
#' @family jam igraph functions
#'
#' @param g `igraph` object
#' @param wc `communities` object, or `list` in form of nodegroups,
#' which is a `list` of `character` vectors that contain `igraph`
#' node names.
#' @param verbose `logical` indicating whether to print verbose output.
#' @param ... additional arguments are passed to `nodegroups2communities()`
#' only when input `wc` is supplied in `list` nodegroups format.
#'
#' @export
sync_igraph_communities <- function
(g,
wc,
verbose=TRUE,
...)
{
# validate input
input_type <- NULL;
if ("communities" %in% class(wc) ||
(is.list(wc) && all(c("membership", "names") %in% names(wc)))) {
# define list
input_type <- "communities";
} else if (is.list(wc)) {
input_type <- "nodegroups";
nodegroups_wc <- wc;
wc <- nodegroups2communities(nodegroups_wc,
...)
} else {
stop("Input wc must be 'communities' or 'nodegroups' list object.");
}
g_nodes <- igraph::V(g)$name;
wc_nodes <- wc$names;
observed_nodes <- intersect(g_nodes, wc_nodes)
# subset igraph
if (!all(g_nodes %in% observed_nodes)) {
g_keep <- which(g_nodes %in% observed_nodes)
g <- igraph::subgraph(g, v=g_keep)
# if layout is defined, subset in place
if ("layout" %in% igraph::graph_attr_names(g)) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"igraph layout was subset to match the remaining nodes.")
}
g_layout <- igraph::graph_attr(g, "layout");
igraph::graph_attr(g, "layout") <- g_layout[g_keep, , drop=FALSE];
}
}
# subset (and order) communities
wc_keep <- match(igraph::V(g)$name, wc_nodes)
wc$membership <- wc$membership[wc_keep]
wc$names <- wc$names[wc_keep]
if (length(wc$modularity) > 1) {
wc$modularity <- wc$modularity[wc_keep]
}
if (length(wc$memberships) > 1) {
wc$memberships <- wc$memberships[wc_keep, , drop=FALSE]
}
if (length(wc$merges) > 0) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"community merges were removed.")
}
wc$merges <- NULL;
}
wc$vcount <- igraph::vcount(g);
class(wc) <- "communities";
if ("cluster_names" %in% names(wc)) {
cluster_names <- wc$cluster_names;
names(cluster_names) <- seq_along(cluster_names);
if (!all(names(cluster_names) %in% as.character(wc$membership))) {
if (verbose) {
jamba::printDebug("sync_igraph_communities(): ",
"cluster_names were reduced due to match the remaining nodes.")
}
cn_keep <- (names(cluster_names) %in% as.character(wc$membership));
new_cluster_names <- unname(cluster_names[cn_keep]);
new_membership <- as.numeric(as.factor(wc$membership));
wc$cluster_names <- new_cluster_names;
wc$membership <- new_membership
}
}
if ("nodegroups" %in% input_type) {
wc <- communities2nodegroups(wc);
}
return(list(
g=g,
wc=wc));
}
|
library(tidyverse)
library(rvest)
library(rebus)
# Creamos la url
url <- "https://juvenalcampos.com"
# Extraemos el codigo de la pagina de interes
html = read_html(url)
# Enlaces
enlaces = html %>%
html_nodes("article") %>%
html_nodes("a") %>%
html_attr("href")
class(enlaces)
enlaces = str_c(url, enlaces)
# Nombres de los articulos
articulos = html %>%
html_nodes("article") %>%
html_nodes("a") %>%
html_text()
# Extraigo las fechas
fechas = html %>%
html_nodes("article") %>%
html_nodes("span") %>%
html_text() %>%
str_remove_all(pattern = "\n" %R% one_or_more(SPC)) %>%
as.Date(format = "%Y-%m-%d")
class(fechas)
# Paso final: ordenar los datos en una tabla
datos <- tibble(articulos,
enlaces,
fechas)
|
/Sesión 10 - Introducción al Web Scraping/pagina_juve.R
|
no_license
|
JuveCampos/periodismoDeDatos2021
|
R
| false | false | 774 |
r
|
library(tidyverse)
library(rvest)
library(rebus)
# Creamos la url
url <- "https://juvenalcampos.com"
# Extraemos el codigo de la pagina de interes
html = read_html(url)
# Enlaces
enlaces = html %>%
html_nodes("article") %>%
html_nodes("a") %>%
html_attr("href")
class(enlaces)
enlaces = str_c(url, enlaces)
# Nombres de los articulos
articulos = html %>%
html_nodes("article") %>%
html_nodes("a") %>%
html_text()
# Extraigo las fechas
fechas = html %>%
html_nodes("article") %>%
html_nodes("span") %>%
html_text() %>%
str_remove_all(pattern = "\n" %R% one_or_more(SPC)) %>%
as.Date(format = "%Y-%m-%d")
class(fechas)
# Paso final: ordenar los datos en una tabla
datos <- tibble(articulos,
enlaces,
fechas)
|
#' translation weights
library(devtools)
load_all(".")
load("test_patterns.rda")
w1 <- translation_weights(x1)
w1c <- translation_weights(x1, TRUE)
w2 <- translation_weights(x2)
w2c <- translation_weights(x2, TRUE)
par(mfrow=c(2,1))
plot(w1-w1c, type="l")
plot(w2-w2c, type="l")
|
/test/3-translation-weights.R
|
no_license
|
antiphon/SGCS
|
R
| false | false | 284 |
r
|
#' translation weights
library(devtools)
load_all(".")
load("test_patterns.rda")
w1 <- translation_weights(x1)
w1c <- translation_weights(x1, TRUE)
w2 <- translation_weights(x2)
w2c <- translation_weights(x2, TRUE)
par(mfrow=c(2,1))
plot(w1-w1c, type="l")
plot(w2-w2c, type="l")
|
# day02_10_factor.R
# factor(범주): 요인(카테고리)이 몇 개로 되어있는지를
# 구분하는 데이터 구조
# 팩터는 문자처럼 보이지만 숫자형이다.
# 팩터는 수준(level)이라고 알려진 사전에 정의된 값만
# 저장(수정)이 가능하다.
# 팩터는 순위를 가질 수도 있고, 순위가 없을 수도 있다.
c('male', 'female', 'male', 'female')
gender <- factor(c('male', 'female', 'male', 'female'))
gender
class(gender)
# level 갯수 세기
nlevels(gender)
# level 조회하기
levels(gender)
## 요인에 순서가 필요한 데이터의 경우
dust <- factor(c('low','medium','high'))
dust
# 순서를 지정하여 factor 생성하기
dust <- factor(c('low','medium','high'), levels = c('low','medium','high'), ordered = TRUE)
dust
max(dust)
min(dust)
# factor에는 사전에 정의된 값만 저장(수정)이 가능하다.
# gender의 5번째 방에 female을 추가
gender[5] <- 'female'
gender
# gender의 6번째 방에 mele(오타)을 추가
# gender[6] <- 'mele' 오류가 난다.
|
/day02/day02_10_factor.R
|
no_license
|
Kyungpyo-Kang/R_Bigdata
|
R
| false | false | 1,176 |
r
|
# day02_10_factor.R
# factor(범주): 요인(카테고리)이 몇 개로 되어있는지를
# 구분하는 데이터 구조
# 팩터는 문자처럼 보이지만 숫자형이다.
# 팩터는 수준(level)이라고 알려진 사전에 정의된 값만
# 저장(수정)이 가능하다.
# 팩터는 순위를 가질 수도 있고, 순위가 없을 수도 있다.
c('male', 'female', 'male', 'female')
gender <- factor(c('male', 'female', 'male', 'female'))
gender
class(gender)
# level 갯수 세기
nlevels(gender)
# level 조회하기
levels(gender)
## 요인에 순서가 필요한 데이터의 경우
dust <- factor(c('low','medium','high'))
dust
# 순서를 지정하여 factor 생성하기
dust <- factor(c('low','medium','high'), levels = c('low','medium','high'), ordered = TRUE)
dust
max(dust)
min(dust)
# factor에는 사전에 정의된 값만 저장(수정)이 가능하다.
# gender의 5번째 방에 female을 추가
gender[5] <- 'female'
gender
# gender의 6번째 방에 mele(오타)을 추가
# gender[6] <- 'mele' 오류가 난다.
|
## The makeCacheMatrix and cacheSolve functions are used together to store a matrix
## inputted by the user, and then to solve for the inverse of the matrix and store the
## computed inverse in memory.
## The makeCacheMatrix function takes a supplied matrix as input and creates a list of
## 3 functions: get, setinv, and getinv, which will be used, respectively, to print
## the matrix from memory, store the inverse, and print the inverse from memory.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The cacheSolve function solves for the inverse of the matrix supplied to the makeCacheMatrix
## function. First, it checks the 'getinv' variable defined above to determine if the computed
## inverse is already stored in memory. If so, the inverse is printed from the cache. If the
## inverse has not been computed, cacheSolve will calculate the inverse of the matrix and supply
## the result to 'setinv', which stores the inverse in cache memory.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
/cachematrix.R
|
no_license
|
lmm22/ProgrammingAssignment2
|
R
| false | false | 1,424 |
r
|
## The makeCacheMatrix and cacheSolve functions are used together to store a matrix
## inputted by the user, and then to solve for the inverse of the matrix and store the
## computed inverse in memory.
## The makeCacheMatrix function takes a supplied matrix as input and creates a list of
## 3 functions: get, setinv, and getinv, which will be used, respectively, to print
## the matrix from memory, store the inverse, and print the inverse from memory.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The cacheSolve function solves for the inverse of the matrix supplied to the makeCacheMatrix
## function. First, it checks the 'getinv' variable defined above to determine if the computed
## inverse is already stored in memory. If so, the inverse is printed from the cache. If the
## inverse has not been computed, cacheSolve will calculate the inverse of the matrix and supply
## the result to 'setinv', which stores the inverse in cache memory.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinv(i)
i
}
|
#This program calculates the song complexity using Note Variability Index (NVI) (Sawant S., Arvind C., Joshi V., Robin V. V., 2021)
#This program uses warbleR to calculate Spectrogram Cross-Correlation
#Input files: Raven selection table for notes with sound files in same directory
#The notes selection table should include- Begin Time (s), End Time (s), Low Frequency (Hz), High Frequency (Hz),
# Begin File, File Offset (s), Song No..
#-------------------------------------------------------------------------------------------------------------------------------
#This part generates cross-correlation table from the Raven Notes selection table
library(Rraven)
#set working directory
#setwd('PATH')
#import raven note selection table with sound files in same directory
Notes_for_Corr <- imp_raven(warbler.format = TRUE)
#set windows length
wl <- 512
#set time window overlap
ovlp <- 50
#frequency limits based on the low and high frequencies in the note selection table
bp <- c(min(Notes_for_Corr$bottom.freq)-0.1, max(Notes_for_Corr$top.freq)+0.1)
#run batch correlator on all the selections in note selection table
#set any correlation method- pearson, spearman or kendall
batch_corr_output <- xcorr(Notes_for_Corr, bp = bp, cor.method = "pearson" )
#convert negative correlation to no correlation
nonneg_batch_corr_output <- pmax(batch_corr_output,0)
BatchCorrOutput <- nonneg_batch_corr_output
#-------------------------------------------------------------------------------------------------------------------------------
#This part creates Raven Songs selection table from the Notes Selections
library("readr")
#Import raven selection table for notes in the txt form
#with an annotation column- 'Song No.'
#for alloting each note under some song
Notes_Selections <-as.data.frame(read_tsv("BRTH_notes_selections.txt", col_names = T))
#Extract Unique song IDs
Unique_Songs <- as.data.frame(unique(Notes_Selections$`Song No.`))
#No. of Songs
Total_songs <- length(unique(Notes_Selections$`Song No.`))
#Create data frame for songs selection table
Song_Selections <- data.frame(matrix(nrow = Total_songs, ncol = ncol(Notes_Selections)))
colnames(Song_Selections) <- names(Notes_Selections)
colnames(Song_Selections)[ncol(Song_Selections)] <- "Note Count"
#Create selection table for songs
Song_Selections[,1] <- c(1:Total_songs)
Song_Selections[,2] <- "Spectrogram 1"
Song_Selections[,3] <- 1
Begin_Time <- aggregate(Notes_Selections$`Begin Time (s)` , by = list(Category = Notes_Selections$`Song No.`), FUN = min)
Song_Selections[,4] <- Begin_Time$x
End_Time <- aggregate(Notes_Selections$`End Time (s)` , by = list(Category = Notes_Selections$`Song No.`), FUN = max)
Song_Selections[,5] <- End_Time$x
Min_Freq <- aggregate(Notes_Selections$`Low Freq (Hz)` , by = list(Category = Notes_Selections$`Song No.`), FUN = min)
Song_Selections[,6] <- Min_Freq$x
Max_Freq <- aggregate(Notes_Selections$`High Freq (Hz)` , by = list(Category = Notes_Selections$`Song No.`), FUN = max)
Song_Selections[,7] <- Max_Freq$x
Note_Count <- aggregate(Notes_Selections$Channel , by = list(Category = Notes_Selections$`Song No.`), FUN = sum )
Song_Selections[,ncol(Song_Selections)] <- Note_Count$x
#-------------------------------------------------------------------------------------------------------------------------------
#This part calculates the song complexity values using Note Variability INdex(NVI)
######### NVI calculation from Batch Correlator Output #########
### ## ## ## ####
#### ## ## ## ##
## ## ## ## ## ##
## #### #### ##
## ### ## ####
#extract song lengths from the songs selction table
SongLength <- as.data.frame(Song_Selections$`Note Count`)
#Create datasheet for the start and end of each song
start_end <- data.frame(matrix(nrow = nrow(SongLength), ncol = 2))
start_end <- cbind(SongLength,start_end)
colnames(start_end) <- c("Note_Count","Start_note", "end_note")
for (i in 2:nrow(start_end)){
start_end[1,2] <- 1
start_end[1,3] <- start_end[1,1]
start_end[i,2] <- 1 + start_end[i-1,3]
start_end[i,3] <- start_end[i,1] + start_end[i-1,3]
}
#Extract total number of notes to be correlated from the individual note count of songs
Total_notes <- start_end[nrow(start_end),3]
#Extract note count, start note and end note fpr each song
StartNote <- start_end$Start_note
EndNote <- start_end$end_note
NoteCount <- start_end$Note_Count
#creat output dataframe for NVI values
NVI_output <- data.frame(matrix(nrow = nrow(SongLength), ncol = 3))
colnames(NVI_output) <- c("No. of notes","NVI_non_norm", "NVI")
#Calculate the NVI values based on the song bounds provided
for (x in 1:nrow(SongLength)){
i = StartNote[c(x)]
j = EndNote[c(x)]
k = NoteCount[c(x)]
NVI_non_norm <-sum((1-BatchCorrOutput[c(i:j),c(i:j)]))
NVI <-sum((1-BatchCorrOutput[c(i:j),c(i:j)]))/(k*(k-1))
NVI_output[x, 1] <- k
NVI_output[x, 2] <- NVI_non_norm
NVI_output[x, 3] <- NVI
}
#add NVI values to raven selection table
NVI <- NVI_output$NVI
Song_Selections_Output <- cbind(Song_Selections, NVI)
#remove objects
rm("SongLength","BatchCorrOutput","StartNote","EndNote","NoteCount","start_end",
"Song_Selections", "i","j","k","x", "NVI", "NVI_non_norm", "bp", "ovlp", "wl",
"Begin_Time","End_Time","Max_Freq","Min_Freq","Note_Count","Unique_Songs",
"batch_corr_output","nonneg_batch_corr_output")
#Write Raven selection table for songs with added NVI values in txt format
write.table(Song_Selections_Output, file = "Song_Selections_Output.txt", sep = "\t", quote = F,row.names = FALSE)
#Export NVI output as a csv file
#write.csv(NVI_output, "NVI_Output.csv")
#_______________________________________________________________________________________________________________________________
|
/NVI_calculation_warbleR_1.0.R
|
no_license
|
suyash-sawant/Birdsong_Complexity
|
R
| false | false | 6,060 |
r
|
#This program calculates the song complexity using Note Variability Index (NVI) (Sawant S., Arvind C., Joshi V., Robin V. V., 2021)
#This program uses warbleR to calculate Spectrogram Cross-Correlation
#Input files: Raven selection table for notes with sound files in same directory
#The notes selection table should include- Begin Time (s), End Time (s), Low Frequency (Hz), High Frequency (Hz),
# Begin File, File Offset (s), Song No..
#-------------------------------------------------------------------------------------------------------------------------------
#This part generates cross-correlation table from the Raven Notes selection table
library(Rraven)
#set working directory
#setwd('PATH')
#import raven note selection table with sound files in same directory
Notes_for_Corr <- imp_raven(warbler.format = TRUE)
#set windows length
wl <- 512
#set time window overlap
ovlp <- 50
#frequency limits based on the low and high frequencies in the note selection table
bp <- c(min(Notes_for_Corr$bottom.freq)-0.1, max(Notes_for_Corr$top.freq)+0.1)
#run batch correlator on all the selections in note selection table
#set any correlation method- pearson, spearman or kendall
batch_corr_output <- xcorr(Notes_for_Corr, bp = bp, cor.method = "pearson" )
#convert negative correlation to no correlation
nonneg_batch_corr_output <- pmax(batch_corr_output,0)
BatchCorrOutput <- nonneg_batch_corr_output
#-------------------------------------------------------------------------------------------------------------------------------
#This part creates Raven Songs selection table from the Notes Selections
library("readr")
#Import raven selection table for notes in the txt form
#with an annotation column- 'Song No.'
#for alloting each note under some song
Notes_Selections <-as.data.frame(read_tsv("BRTH_notes_selections.txt", col_names = T))
#Extract Unique song IDs
Unique_Songs <- as.data.frame(unique(Notes_Selections$`Song No.`))
#No. of Songs
Total_songs <- length(unique(Notes_Selections$`Song No.`))
#Create data frame for songs selection table
Song_Selections <- data.frame(matrix(nrow = Total_songs, ncol = ncol(Notes_Selections)))
colnames(Song_Selections) <- names(Notes_Selections)
colnames(Song_Selections)[ncol(Song_Selections)] <- "Note Count"
#Create selection table for songs
Song_Selections[,1] <- c(1:Total_songs)
Song_Selections[,2] <- "Spectrogram 1"
Song_Selections[,3] <- 1
Begin_Time <- aggregate(Notes_Selections$`Begin Time (s)` , by = list(Category = Notes_Selections$`Song No.`), FUN = min)
Song_Selections[,4] <- Begin_Time$x
End_Time <- aggregate(Notes_Selections$`End Time (s)` , by = list(Category = Notes_Selections$`Song No.`), FUN = max)
Song_Selections[,5] <- End_Time$x
Min_Freq <- aggregate(Notes_Selections$`Low Freq (Hz)` , by = list(Category = Notes_Selections$`Song No.`), FUN = min)
Song_Selections[,6] <- Min_Freq$x
Max_Freq <- aggregate(Notes_Selections$`High Freq (Hz)` , by = list(Category = Notes_Selections$`Song No.`), FUN = max)
Song_Selections[,7] <- Max_Freq$x
Note_Count <- aggregate(Notes_Selections$Channel , by = list(Category = Notes_Selections$`Song No.`), FUN = sum )
Song_Selections[,ncol(Song_Selections)] <- Note_Count$x
#-------------------------------------------------------------------------------------------------------------------------------
#This part calculates the song complexity values using Note Variability INdex(NVI)
######### NVI calculation from Batch Correlator Output #########
### ## ## ## ####
#### ## ## ## ##
## ## ## ## ## ##
## #### #### ##
## ### ## ####
#extract song lengths from the songs selction table
SongLength <- as.data.frame(Song_Selections$`Note Count`)
#Create datasheet for the start and end of each song
start_end <- data.frame(matrix(nrow = nrow(SongLength), ncol = 2))
start_end <- cbind(SongLength,start_end)
colnames(start_end) <- c("Note_Count","Start_note", "end_note")
for (i in 2:nrow(start_end)){
start_end[1,2] <- 1
start_end[1,3] <- start_end[1,1]
start_end[i,2] <- 1 + start_end[i-1,3]
start_end[i,3] <- start_end[i,1] + start_end[i-1,3]
}
#Extract total number of notes to be correlated from the individual note count of songs
Total_notes <- start_end[nrow(start_end),3]
#Extract note count, start note and end note fpr each song
StartNote <- start_end$Start_note
EndNote <- start_end$end_note
NoteCount <- start_end$Note_Count
#creat output dataframe for NVI values
NVI_output <- data.frame(matrix(nrow = nrow(SongLength), ncol = 3))
colnames(NVI_output) <- c("No. of notes","NVI_non_norm", "NVI")
#Calculate the NVI values based on the song bounds provided
for (x in 1:nrow(SongLength)){
i = StartNote[c(x)]
j = EndNote[c(x)]
k = NoteCount[c(x)]
NVI_non_norm <-sum((1-BatchCorrOutput[c(i:j),c(i:j)]))
NVI <-sum((1-BatchCorrOutput[c(i:j),c(i:j)]))/(k*(k-1))
NVI_output[x, 1] <- k
NVI_output[x, 2] <- NVI_non_norm
NVI_output[x, 3] <- NVI
}
#add NVI values to raven selection table
NVI <- NVI_output$NVI
Song_Selections_Output <- cbind(Song_Selections, NVI)
#remove objects
rm("SongLength","BatchCorrOutput","StartNote","EndNote","NoteCount","start_end",
"Song_Selections", "i","j","k","x", "NVI", "NVI_non_norm", "bp", "ovlp", "wl",
"Begin_Time","End_Time","Max_Freq","Min_Freq","Note_Count","Unique_Songs",
"batch_corr_output","nonneg_batch_corr_output")
#Write Raven selection table for songs with added NVI values in txt format
write.table(Song_Selections_Output, file = "Song_Selections_Output.txt", sep = "\t", quote = F,row.names = FALSE)
#Export NVI output as a csv file
#write.csv(NVI_output, "NVI_Output.csv")
#_______________________________________________________________________________________________________________________________
|
# --------------------------------------------------- #
# Takes as input an esttable - object and returns a validation of
# which multiphase-methods performed BEST in comparison to the onephase
# (baseline) std-error.
#
# We define the "gain" as the reduction ("+") or possibly also the increase ("-")
# of the best multiphase-standard-error compared to the onephase-std-error
#
# We also give the percentage of the best multiphase-standard-error
# compared to the onephase std-error AND the relative efficiency
#
# --------------------------------------------------- #
#' mphase.gain
#'
#' \code{mphase.gain} takes as input an object created by the \code{\link{estTable}} function
#' and returns a validation of which multiphase method and estimator performed best in comparison
#' to the onephase estimation (baseline) in terms of estimation precision.
#'
#'
#' @param esttable.obj an object of class \code{esttable} created by the \code{\link{estTable}} function
#'
#' @param pref.vartype preferred type of multiphase variance that should be compared to the \code{onephase} variance,
#' if more then one variance type has been calculated in the multiphase estimation object(s) stored in
#' \code{esttable}. Valid input values are \code{"g_variance"} (default) and \code{"ext_variance"}.
#'
#' @param exclude.synth \code{logical}. If set to \code{TRUE} (default), synthetic estimations are not considered in the validation.
#'
#'
#' @return \code{mphase.gain} returns a \code{data.frame} containing the following components:
#'
#' \itemize{
#' \item \code{area:} in case of small area estimation: the name of the small area
#' \item \code{var_onephase:} standard error of the \code{\link{onephase}} estimation
#' \item \code{var_multiphase:} smallest variance among the (set of) multiphase estimations stored in \code{esttable.obj}
#' \item \code{estimator:} multiphase estimator with the smallest variance
#' \item \code{method:} estimation Method of the multiphase estimator with the smallest variance
#' \item \code{gain:} the \emph{gain} is the reduction (if value is positive) or possibly also the increase (if value is negative)
#' in variance when applying the multiphase as alternative to the onephase estimation
#' \item \code{rel.eff:} the \emph{relative efficiency} defined as the ratio between the onephase variance and the multiphase variance
#' %\item \code{perc.of.onephase:} ratio between the smallest multiphase standard error and the onephase standard error
#' }
#'
#' @note
#'
#' The \emph{gain} can be interpreted as: "The multiphase estimation procedure leads to a \code{gain} \% reduction in variance compared to the
#' onephase procedure".
#'
#' The \emph{relative efficiency} can be interpreted as: "Using the onephase estimation procedure, the terrestrial sample size would have to be \code{rel.eff} times larger in order to achieve the same precision (in terms of variance) as the mutiphase estimation procedure".
#'
#'
#' % @example examples/example_mphasegain.R
#'
#' @import methods
#' @export
# -----------------------------------------------------------------------------#
# FUNCTION STARTS HERE:
mphase.gain<- function(esttable.obj, pref.vartype = "g_variance", exclude.synth = TRUE){
# check input:
if(!is(esttable.obj, "esttable")){stop("'mphase.gain()' expects an 'esttable' object created by 'estTable()'")}
if(is(esttable.obj, "global")){
etype<- "global"
}
if(is(esttable.obj, "smallarea")){
etype<- "smallarea"
}
# convert esttable-object into data.frame:
esttable.obj<- as.data.frame(esttable.obj)
# -------- #
# closure:
prec.gain<- function(est.tab){
ind.1ph<- est.tab$vartype %in% "variance"
if(exclude.synth){
ind.not.1ph<- est.tab$vartype %in% c(pref.vartype) & !(est.tab$estimator %in% c("psynth", "synth"))
} else {
ind.not.1ph<- est.tab$vartype %in% c(pref.vartype)
}
var.1ph<- est.tab[ind.1ph, "variance"]
est.tab.not1ph<- est.tab[ind.not.1ph,]
ind.best.multiph<- which.min(est.tab.not1ph[["variance"]])[1]
best.mphase<- est.tab.not1ph[ind.best.multiph, c("estimator", "method", "variance", "vartype")]
if(all(!ind.1ph)){ # if no onephase is there
d<- data.frame(var_onephase = NA, var_multiphase = best.mphase$variance,
estimator = best.mphase$estimator, method = best.mphase$method,
gain = NA, rel.eff = NA)
} else {
# gain:
red.to.1ph<- round(100* (1 - (best.mphase[["variance"]] / var.1ph)), digits = 1)
perc.of.1phvar<- round(100*(best.mphase[["variance"]] / var.1ph), digits = 1)
rel.eff<- var.1ph / best.mphase[["variance"]]
d<-
data.frame(var_onephase = var.1ph, var_multiphase = best.mphase$variance,
method = best.mphase$method, estimator = best.mphase$estimator,
gain = red.to.1ph, rel.eff = rel.eff)
}
return(d)
}
# -------- #
if(etype == "global"){
# apply closure:
return(prec.gain(esttable.obj))
}
if(etype == "smallarea"){
# apply closure:
return(ddply(esttable.obj,"area", prec.gain))
}
} # end of function
|
/R/mphasegain.R
|
no_license
|
cran/forestinventory
|
R
| false | false | 5,380 |
r
|
# --------------------------------------------------- #
# Takes as input an esttable - object and returns a validation of
# which multiphase-methods performed BEST in comparison to the onephase
# (baseline) std-error.
#
# We define the "gain" as the reduction ("+") or possibly also the increase ("-")
# of the best multiphase-standard-error compared to the onephase-std-error
#
# We also give the percentage of the best multiphase-standard-error
# compared to the onephase std-error AND the relative efficiency
#
# --------------------------------------------------- #
#' mphase.gain
#'
#' \code{mphase.gain} takes as input an object created by the \code{\link{estTable}} function
#' and returns a validation of which multiphase method and estimator performed best in comparison
#' to the onephase estimation (baseline) in terms of estimation precision.
#'
#'
#' @param esttable.obj an object of class \code{esttable} created by the \code{\link{estTable}} function
#'
#' @param pref.vartype preferred type of multiphase variance that should be compared to the \code{onephase} variance,
#' if more then one variance type has been calculated in the multiphase estimation object(s) stored in
#' \code{esttable}. Valid input values are \code{"g_variance"} (default) and \code{"ext_variance"}.
#'
#' @param exclude.synth \code{logical}. If set to \code{TRUE} (default), synthetic estimations are not considered in the validation.
#'
#'
#' @return \code{mphase.gain} returns a \code{data.frame} containing the following components:
#'
#' \itemize{
#' \item \code{area:} in case of small area estimation: the name of the small area
#' \item \code{var_onephase:} standard error of the \code{\link{onephase}} estimation
#' \item \code{var_multiphase:} smallest variance among the (set of) multiphase estimations stored in \code{esttable.obj}
#' \item \code{estimator:} multiphase estimator with the smallest variance
#' \item \code{method:} estimation Method of the multiphase estimator with the smallest variance
#' \item \code{gain:} the \emph{gain} is the reduction (if value is positive) or possibly also the increase (if value is negative)
#' in variance when applying the multiphase as alternative to the onephase estimation
#' \item \code{rel.eff:} the \emph{relative efficiency} defined as the ratio between the onephase variance and the multiphase variance
#' %\item \code{perc.of.onephase:} ratio between the smallest multiphase standard error and the onephase standard error
#' }
#'
#' @note
#'
#' The \emph{gain} can be interpreted as: "The multiphase estimation procedure leads to a \code{gain} \% reduction in variance compared to the
#' onephase procedure".
#'
#' The \emph{relative efficiency} can be interpreted as: "Using the onephase estimation procedure, the terrestrial sample size would have to be \code{rel.eff} times larger in order to achieve the same precision (in terms of variance) as the mutiphase estimation procedure".
#'
#'
#' % @example examples/example_mphasegain.R
#'
#' @import methods
#' @export
# -----------------------------------------------------------------------------#
# FUNCTION STARTS HERE:
mphase.gain<- function(esttable.obj, pref.vartype = "g_variance", exclude.synth = TRUE){
# check input:
if(!is(esttable.obj, "esttable")){stop("'mphase.gain()' expects an 'esttable' object created by 'estTable()'")}
if(is(esttable.obj, "global")){
etype<- "global"
}
if(is(esttable.obj, "smallarea")){
etype<- "smallarea"
}
# convert esttable-object into data.frame:
esttable.obj<- as.data.frame(esttable.obj)
# -------- #
# closure:
prec.gain<- function(est.tab){
ind.1ph<- est.tab$vartype %in% "variance"
if(exclude.synth){
ind.not.1ph<- est.tab$vartype %in% c(pref.vartype) & !(est.tab$estimator %in% c("psynth", "synth"))
} else {
ind.not.1ph<- est.tab$vartype %in% c(pref.vartype)
}
var.1ph<- est.tab[ind.1ph, "variance"]
est.tab.not1ph<- est.tab[ind.not.1ph,]
ind.best.multiph<- which.min(est.tab.not1ph[["variance"]])[1]
best.mphase<- est.tab.not1ph[ind.best.multiph, c("estimator", "method", "variance", "vartype")]
if(all(!ind.1ph)){ # if no onephase is there
d<- data.frame(var_onephase = NA, var_multiphase = best.mphase$variance,
estimator = best.mphase$estimator, method = best.mphase$method,
gain = NA, rel.eff = NA)
} else {
# gain:
red.to.1ph<- round(100* (1 - (best.mphase[["variance"]] / var.1ph)), digits = 1)
perc.of.1phvar<- round(100*(best.mphase[["variance"]] / var.1ph), digits = 1)
rel.eff<- var.1ph / best.mphase[["variance"]]
d<-
data.frame(var_onephase = var.1ph, var_multiphase = best.mphase$variance,
method = best.mphase$method, estimator = best.mphase$estimator,
gain = red.to.1ph, rel.eff = rel.eff)
}
return(d)
}
# -------- #
if(etype == "global"){
# apply closure:
return(prec.gain(esttable.obj))
}
if(etype == "smallarea"){
# apply closure:
return(ddply(esttable.obj,"area", prec.gain))
}
} # end of function
|
\name{thermo.depth}
\alias{thermo.depth}
\title{
Calculate depth of the thermocline from a temperature profile.
}
\description{
This function calculates the location of the thermocline from a temperature profile.
It uses a special technique to estimate where the thermocline lies even between two temperature measurement depths,
giving a potentially finer-scale estimate than usual techniques.
}
\usage{
thermo.depth(wtr, depths, Smin = 0.1, seasonal=TRUE, index=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{wtr}{
Numeric vector of water temperature in degrees Celsius
}
\item{depths}{
Numeric vector of depths. Must be the same length as the wtr parameter
}
\item{Smin}{
Optional paramter defining minimum density gradient for thermocline
}
\item{seasonal}{
a logical value indicating whether the seasonal thermocline should be returned. The seasonal thermocline
is defined as the deepest density gradient found in the profile. If FALSE, the depth of the maximum density gradient is returned.
}
\item{index}{
Boolean value indicated if index of the thermocline depth, instead of the depth value, should be returned.
}
}
\value{
Depth of thermocline. If no thermocline found, value is max(depths).
}
\author{
Luke Winslow
}
\seealso{
\code{water.density}
}
\examples{
# A vector of water temperatures
wtr = c(22.51, 22.42, 22.4, 22.4, 22.4, 22.36, 22.3, 22.21, 22.11, 21.23, 16.42,
15.15, 14.24, 13.35, 10.94, 10.43, 10.36, 9.94, 9.45, 9.1, 8.91, 8.58, 8.43)
#A vector defining the depths
depths = c(0, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20)
t.d = thermo.depth(wtr, depths, seasonal=FALSE)
cat('The thermocline depth is:', t.d)
}
\keyword{manip}
|
/man/thermo.depth.Rd
|
no_license
|
snortheim/rLakeAnalyzer
|
R
| false | false | 1,840 |
rd
|
\name{thermo.depth}
\alias{thermo.depth}
\title{
Calculate depth of the thermocline from a temperature profile.
}
\description{
This function calculates the location of the thermocline from a temperature profile.
It uses a special technique to estimate where the thermocline lies even between two temperature measurement depths,
giving a potentially finer-scale estimate than usual techniques.
}
\usage{
thermo.depth(wtr, depths, Smin = 0.1, seasonal=TRUE, index=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{wtr}{
Numeric vector of water temperature in degrees Celsius
}
\item{depths}{
Numeric vector of depths. Must be the same length as the wtr parameter
}
\item{Smin}{
Optional paramter defining minimum density gradient for thermocline
}
\item{seasonal}{
a logical value indicating whether the seasonal thermocline should be returned. The seasonal thermocline
is defined as the deepest density gradient found in the profile. If FALSE, the depth of the maximum density gradient is returned.
}
\item{index}{
Boolean value indicated if index of the thermocline depth, instead of the depth value, should be returned.
}
}
\value{
Depth of thermocline. If no thermocline found, value is max(depths).
}
\author{
Luke Winslow
}
\seealso{
\code{water.density}
}
\examples{
# A vector of water temperatures
wtr = c(22.51, 22.42, 22.4, 22.4, 22.4, 22.36, 22.3, 22.21, 22.11, 21.23, 16.42,
15.15, 14.24, 13.35, 10.94, 10.43, 10.36, 9.94, 9.45, 9.1, 8.91, 8.58, 8.43)
#A vector defining the depths
depths = c(0, 0.5, 1, 1.5, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20)
t.d = thermo.depth(wtr, depths, seasonal=FALSE)
cat('The thermocline depth is:', t.d)
}
\keyword{manip}
|
rm(list=ls())
source("inst/coxprocess/repeat_gibbsflow_sis.R")
source("inst/coxprocess/repeat_gibbsflow_ais_rmhmc.R")
source("inst/coxprocess/repeat_ais_rmhmc.R")
source("inst/coxprocess/repeat_gibbsflow_sis_vi.R")
source("inst/coxprocess/repeat_gibbsflow_ais_vi.R")
source("inst/coxprocess/repeat_ais_vi.R")
source("inst/coxprocess/repeat_gibbsflow_sis_ep.R")
source("inst/coxprocess/repeat_gibbsflow_ais_ep.R")
source("inst/coxprocess/repeat_ais_ep.R")
|
/inst/coxprocess/repeats.R
|
no_license
|
jeremyhengjm/GibbsFlow
|
R
| false | false | 460 |
r
|
rm(list=ls())
source("inst/coxprocess/repeat_gibbsflow_sis.R")
source("inst/coxprocess/repeat_gibbsflow_ais_rmhmc.R")
source("inst/coxprocess/repeat_ais_rmhmc.R")
source("inst/coxprocess/repeat_gibbsflow_sis_vi.R")
source("inst/coxprocess/repeat_gibbsflow_ais_vi.R")
source("inst/coxprocess/repeat_ais_vi.R")
source("inst/coxprocess/repeat_gibbsflow_sis_ep.R")
source("inst/coxprocess/repeat_gibbsflow_ais_ep.R")
source("inst/coxprocess/repeat_ais_ep.R")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiSummaryImp.R
\name{multiSummaryImp}
\alias{multiSummaryImp}
\title{Summarize the output from multiple regression models with imputed data}
\usage{
multiSummaryImp(..., Stars = F, Output = "markdown")
}
\arguments{
\item{...}{up to 8 linear models, produced by lm}
\item{Stars}{adds significance stars for the last model, if requested}
\item{Output}{specifies if the function returns a dataframe or a markdown file}
}
\value{
The regression table
}
\description{
This function creates a summary table for a linear model with imputed data, and outputs it to the
viewer in HTML Format to facilitate copying and pasting by default. By default it outputs
standardized betas for regression coefficients, and it can accept
up to 8 models as arguments, which makes it ideal for displaying models with multiple
steps.
}
|
/man/multiSummaryImp.Rd
|
no_license
|
michaelasher/asherR
|
R
| false | true | 896 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiSummaryImp.R
\name{multiSummaryImp}
\alias{multiSummaryImp}
\title{Summarize the output from multiple regression models with imputed data}
\usage{
multiSummaryImp(..., Stars = F, Output = "markdown")
}
\arguments{
\item{...}{up to 8 linear models, produced by lm}
\item{Stars}{adds significance stars for the last model, if requested}
\item{Output}{specifies if the function returns a dataframe or a markdown file}
}
\value{
The regression table
}
\description{
This function creates a summary table for a linear model with imputed data, and outputs it to the
viewer in HTML Format to facilitate copying and pasting by default. By default it outputs
standardized betas for regression coefficients, and it can accept
up to 8 models as arguments, which makes it ideal for displaying models with multiple
steps.
}
|
# ******************************************************************
#
# Functions to get futures and options current data from www.moex.com
#
# *****************************************************************
# Sys.setenv(http_proxy="http://10.1.144.50:3128")
# +-----------------------------------------------------+
# | Returns vector of avalible base futures for options
# +-----------------------------------------------------+
futuresList = function(){
library(rvest)
library(dplyr)
url = 'http://moex.com/ru/derivatives/optionsdesk.aspx'
urlpage = read_html(url)
futures = html_nodes(urlpage, 'option') %>% html_text()
return(unique(futures))
}
# +---------------------------+
# | Download board web page
# +---------------------------+
boardDownload = function(fut){
library(rvest)
url = paste0('http://moex.com/ru/derivatives/optionsdesk.aspx?sby=116&sub=on&code=', fut, '&c1=on&c2=on&c6=on&c3=on&c5=on&c4=on&c7=on&sid=1')
hh = read_html(url)
return(hh)
}
# +--------------------------------------------+
# | Returns data time as POSIXct
# +--------------------------------------------+
datetimeCurrentInfo = function(x){
library(dplyr)
library(rvest)
#x = boardDownload('RTS-12.15')
hh = x
curtime = html_nodes(hh, xpath = '//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/div[2]/span') %>%
html_text() %>%
substr(., nchar(.)-15, nchar(.)) %>%
as.POSIXct(., format='%d.%m.%Y %H:%M')
return(curtime)
}
# boardDownload("RTS-12.15") %>% datetimeCurrentInfo()
# +--------------------------------------------+
# | Returns list of selected future parametres
# +--------------------------------------------+
futureCurrentInfo = function(x){
library(dplyr)
library(rvest)
hh = x
futData = html_nodes(hh, xpath = '//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[2]') %>%
html_table(fill=T) %>%
data.frame(.) %>%
select(c(1:12)) %>%
slice(4) %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>% as.list(.)
if(futData[[2]]=='')
futData[4:12] = futData[3:11]
futData=lapply(futData, function(x){
suppressWarnings(ifelse(is.na(as.numeric(x)), x, as.numeric(x)))
})
names(futData) = c('code', 'last', 'lastdate', 'roc', 'bid', 'ask', 'maxpr', 'minpr', 'volrub', 'volfuts', 'trades', 'OI')
return(futData)
}
# Example
# futureCurrentInfo("MXI-12.15")
# boardDownload("RTS-12.15") %>% futureCurrentInfo()
# +----------------------------------------------+
# | Returns list of option boards for the future
# +----------------------------------------------+
optionsCurrentInfo = function(x){
library(dplyr)
library(rvest)
hh = x
optboardList = list()
optExps = vector()
for(n in 0:2){
try({
# Read expiration date
optseriesExp = html_nodes(hh, xpath = paste0('//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[3]/tr/td/table[', 1+n*3 ,']') ) %>%
html_table(fill=T) %>%
data.frame(.) %>% .[3,2] %>% strsplit(.,' ') %>% .[[1]] %>% .[1]
# Read all options data
optseriesData = html_nodes(hh, xpath = paste0('//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[3]/tr/td/table[', 2+n*3 ,']') ) %>%
html_table(fill=T) %>% data.frame(.) %>% .[-c(1:3), ] %>% slice(1:(nrow(.)-3))
# Clear call data
calls = optseriesData[1:16]
names(calls) = c('code','volrub','volopts','trades','OI','maxpr','minpr','last','lastdate','roc','bid','ask','cprice','tprice','strike', 'iv')
for (i in 1:ncol(calls)){
if( names(calls[i]) %in% c('code','roc','lastdate') ) next
calls[,i] = calls[,i] %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>%
gsub('-', '0', ., fixed=T ) %>%
as.numeric
}
calls = calls[, c('code','strike','tprice','ask','bid','iv','OI','volrub','volopts','trades','last','lastdate','roc','cprice','maxpr','minpr')]
# Clear puts data
puts = optseriesData[15:30]
names(puts) = c('strike', 'iv', 'tprice','cprice', 'bid','ask','last','lastdate','roc','maxpr','minpr','trades','OI','volrub','volopts', 'code')
for (i in 1:ncol(puts)){
if( names(puts[i]) %in% c('code','roc','lastdate') ) next
puts[,i] = puts[,i] %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>%
gsub('-', '0', ., fixed=T ) %>%
as.numeric
}
puts = puts[, c('code','strike','tprice','ask','bid','iv','OI','volrub','volopts','trades','last','lastdate','roc','cprice','maxpr','minpr')]
# Single expiration calls and puts to list
optboardList[[n+1]] = list(calls=calls, puts=puts)
optExps = c(optExps, as.character(optseriesExp))
}, silent=T)
}
# Name boards
names(optboardList) = optExps
return(optboardList)
}
# ((boardDownload("RTS-12.15") %>% optionsCurrentInfo())[[2]][['calls']])[, c('strike', 'iv')] %>% as.data.frame %>% qplot(data=., x=strike, y=iv)
# +----------------------------------------------+
# | Add greeks
# +----------------------------------------------+
moexGreeks = function(x, expdate){
require(fOptions)
#expdate = '15.03.2016'
#x = boardDownload("RTS-3.16")
brdwgreeks = list()
curtime = x %>% datetimeCurrentInfo() %>% as.Date
t = as.numeric(as.Date(expdate, '%d.%m.%Y') - curtime)/365
S = futureCurrentInfo(x)$last
if(S == '')
S = (futureCurrentInfo(x)$bid + futureCurrentInfo(x)$ask) / 2
#xtype = 'call'
for(xtype in c('calls', 'puts')){
brd = (x %>% optionsCurrentInfo())[[expdate]][[xtype]]
#options('scipen' = 100, digits = 4)
greeks = sapply(c('delta', 'gamma', 'vega', 'theta'), function(x){
param = x
sapply(c(1:nrow(brd)),
function(x){
GBSGreeks(param, substr(xtype,1,1), S, brd[x, 'strike'], t, 0, 0, brd[x, 'iv']/100) }
)
}, USE.NAMES=T
) %>% as.data.frame
greeks$theta = greeks$theta/365
greeks$vega = greeks$vega/100
brdwgreeks[[paste0(xtype, 's')]] = cbind(brd, greeks)
}
return(brdwgreeks)
}
# fut="Si-12.15"
# (boardDownload("MXI-12.15") %>% moexGreeks(., '17.12.2015'))$calls %>% View
|
/R/moex_scraping.R
|
no_license
|
davydovpv/OptionsStaff
|
R
| false | false | 6,836 |
r
|
# ******************************************************************
#
# Functions to get futures and options current data from www.moex.com
#
# *****************************************************************
# Sys.setenv(http_proxy="http://10.1.144.50:3128")
# +-----------------------------------------------------+
# | Returns vector of avalible base futures for options
# +-----------------------------------------------------+
futuresList = function(){
library(rvest)
library(dplyr)
url = 'http://moex.com/ru/derivatives/optionsdesk.aspx'
urlpage = read_html(url)
futures = html_nodes(urlpage, 'option') %>% html_text()
return(unique(futures))
}
# +---------------------------+
# | Download board web page
# +---------------------------+
boardDownload = function(fut){
library(rvest)
url = paste0('http://moex.com/ru/derivatives/optionsdesk.aspx?sby=116&sub=on&code=', fut, '&c1=on&c2=on&c6=on&c3=on&c5=on&c4=on&c7=on&sid=1')
hh = read_html(url)
return(hh)
}
# +--------------------------------------------+
# | Returns data time as POSIXct
# +--------------------------------------------+
datetimeCurrentInfo = function(x){
library(dplyr)
library(rvest)
#x = boardDownload('RTS-12.15')
hh = x
curtime = html_nodes(hh, xpath = '//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/div[2]/span') %>%
html_text() %>%
substr(., nchar(.)-15, nchar(.)) %>%
as.POSIXct(., format='%d.%m.%Y %H:%M')
return(curtime)
}
# boardDownload("RTS-12.15") %>% datetimeCurrentInfo()
# +--------------------------------------------+
# | Returns list of selected future parametres
# +--------------------------------------------+
futureCurrentInfo = function(x){
library(dplyr)
library(rvest)
hh = x
futData = html_nodes(hh, xpath = '//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[2]') %>%
html_table(fill=T) %>%
data.frame(.) %>%
select(c(1:12)) %>%
slice(4) %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>% as.list(.)
if(futData[[2]]=='')
futData[4:12] = futData[3:11]
futData=lapply(futData, function(x){
suppressWarnings(ifelse(is.na(as.numeric(x)), x, as.numeric(x)))
})
names(futData) = c('code', 'last', 'lastdate', 'roc', 'bid', 'ask', 'maxpr', 'minpr', 'volrub', 'volfuts', 'trades', 'OI')
return(futData)
}
# Example
# futureCurrentInfo("MXI-12.15")
# boardDownload("RTS-12.15") %>% futureCurrentInfo()
# +----------------------------------------------+
# | Returns list of option boards for the future
# +----------------------------------------------+
optionsCurrentInfo = function(x){
library(dplyr)
library(rvest)
hh = x
optboardList = list()
optExps = vector()
for(n in 0:2){
try({
# Read expiration date
optseriesExp = html_nodes(hh, xpath = paste0('//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[3]/tr/td/table[', 1+n*3 ,']') ) %>%
html_table(fill=T) %>%
data.frame(.) %>% .[3,2] %>% strsplit(.,' ') %>% .[[1]] %>% .[1]
# Read all options data
optseriesData = html_nodes(hh, xpath = paste0('//table/tr[1]/td[2]/table/tr[2]/td/table/tr/td/table/tr[2]/td[2]/table[3]/tr/td/table[', 2+n*3 ,']') ) %>%
html_table(fill=T) %>% data.frame(.) %>% .[-c(1:3), ] %>% slice(1:(nrow(.)-3))
# Clear call data
calls = optseriesData[1:16]
names(calls) = c('code','volrub','volopts','trades','OI','maxpr','minpr','last','lastdate','roc','bid','ask','cprice','tprice','strike', 'iv')
for (i in 1:ncol(calls)){
if( names(calls[i]) %in% c('code','roc','lastdate') ) next
calls[,i] = calls[,i] %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>%
gsub('-', '0', ., fixed=T ) %>%
as.numeric
}
calls = calls[, c('code','strike','tprice','ask','bid','iv','OI','volrub','volopts','trades','last','lastdate','roc','cprice','maxpr','minpr')]
# Clear puts data
puts = optseriesData[15:30]
names(puts) = c('strike', 'iv', 'tprice','cprice', 'bid','ask','last','lastdate','roc','maxpr','minpr','trades','OI','volrub','volopts', 'code')
for (i in 1:ncol(puts)){
if( names(puts[i]) %in% c('code','roc','lastdate') ) next
puts[,i] = puts[,i] %>%
gsub(rawToChar(as.raw(194)), '', ., useBytes=T) %>%
gsub(rawToChar(as.raw(160)), '', ., useBytes=T) %>%
gsub(',', '.', .) %>%
gsub('-', '0', ., fixed=T ) %>%
as.numeric
}
puts = puts[, c('code','strike','tprice','ask','bid','iv','OI','volrub','volopts','trades','last','lastdate','roc','cprice','maxpr','minpr')]
# Single expiration calls and puts to list
optboardList[[n+1]] = list(calls=calls, puts=puts)
optExps = c(optExps, as.character(optseriesExp))
}, silent=T)
}
# Name boards
names(optboardList) = optExps
return(optboardList)
}
# ((boardDownload("RTS-12.15") %>% optionsCurrentInfo())[[2]][['calls']])[, c('strike', 'iv')] %>% as.data.frame %>% qplot(data=., x=strike, y=iv)
# +----------------------------------------------+
# | Add greeks
# +----------------------------------------------+
moexGreeks = function(x, expdate){
require(fOptions)
#expdate = '15.03.2016'
#x = boardDownload("RTS-3.16")
brdwgreeks = list()
curtime = x %>% datetimeCurrentInfo() %>% as.Date
t = as.numeric(as.Date(expdate, '%d.%m.%Y') - curtime)/365
S = futureCurrentInfo(x)$last
if(S == '')
S = (futureCurrentInfo(x)$bid + futureCurrentInfo(x)$ask) / 2
#xtype = 'call'
for(xtype in c('calls', 'puts')){
brd = (x %>% optionsCurrentInfo())[[expdate]][[xtype]]
#options('scipen' = 100, digits = 4)
greeks = sapply(c('delta', 'gamma', 'vega', 'theta'), function(x){
param = x
sapply(c(1:nrow(brd)),
function(x){
GBSGreeks(param, substr(xtype,1,1), S, brd[x, 'strike'], t, 0, 0, brd[x, 'iv']/100) }
)
}, USE.NAMES=T
) %>% as.data.frame
greeks$theta = greeks$theta/365
greeks$vega = greeks$vega/100
brdwgreeks[[paste0(xtype, 's')]] = cbind(brd, greeks)
}
return(brdwgreeks)
}
# fut="Si-12.15"
# (boardDownload("MXI-12.15") %>% moexGreeks(., '17.12.2015'))$calls %>% View
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_082.R
|
no_license
|
leon1003/QSMART
|
R
| false | false | 417 |
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.8,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_082.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Illustration of Feature Validation
#'
#' This function takes featurelist, dataset and a significance value as input and produce a list of relevant features validated by anova test.
#'
#' @param featurelist a list of selected features
#' @param data provided as dataframe
#' @param significance a significance value for anova test
#' @return a list of selected and validated features
#' @export
#' @examples
#' featurelist = c("Percent.Peptide", "Amino.Acid.Cut.position","predictions")
#' dir = getwd()
#' filepath = paste0(dir,'/data-raw/sample.csv')
#' data = read.csv(filepath)
#' features = featurevalidation(featurelist, data, 0.01)
featurevalidation = function(featurelist, data, significance = 0.05){
formuli = featureformula(featurelist)
model = lm(as.formula(formuli), data)
ap = anova(model)[5]
p = ap$`Pr(>F)`
f = row.names(ap)
p = p[-length(p)]
f = f[-length(f)]
relevantfeatures = c()
featuresignificance = c()
for (i in 1:length(p)) {
if (p[i] < significance) {
relevantfeatures[length(relevantfeatures) + 1] = f[i]
featuresignificance[length(featuresignificance) + 1] = p[i]
}
}
validatedfeaturelist = data.frame(relevantfeatures,featuresignificance)
return(validatedfeaturelist)
}
|
/CRISPRpred/crisprpred/R_src/featurevalidation/featurevalidation.R
|
no_license
|
khaled-rahman/CRISPRpred
|
R
| false | false | 1,245 |
r
|
#' Illustration of Feature Validation
#'
#' This function takes featurelist, dataset and a significance value as input and produce a list of relevant features validated by anova test.
#'
#' @param featurelist a list of selected features
#' @param data provided as dataframe
#' @param significance a significance value for anova test
#' @return a list of selected and validated features
#' @export
#' @examples
#' featurelist = c("Percent.Peptide", "Amino.Acid.Cut.position","predictions")
#' dir = getwd()
#' filepath = paste0(dir,'/data-raw/sample.csv')
#' data = read.csv(filepath)
#' features = featurevalidation(featurelist, data, 0.01)
featurevalidation = function(featurelist, data, significance = 0.05){
formuli = featureformula(featurelist)
model = lm(as.formula(formuli), data)
ap = anova(model)[5]
p = ap$`Pr(>F)`
f = row.names(ap)
p = p[-length(p)]
f = f[-length(f)]
relevantfeatures = c()
featuresignificance = c()
for (i in 1:length(p)) {
if (p[i] < significance) {
relevantfeatures[length(relevantfeatures) + 1] = f[i]
featuresignificance[length(featuresignificance) + 1] = p[i]
}
}
validatedfeaturelist = data.frame(relevantfeatures,featuresignificance)
return(validatedfeaturelist)
}
|
install.packages("rcompanion")
x <- matrix(c(76, 22, 45, 30, 11, 45, 40, 6, 25, 36, 10, 40), byrow=TRUE, nrow=4, ncol=3);
fisher.test(x);
Input =("
Guidelines None_declared No_mention Declared_any
1 76 22 45
2 30 11 45
3 40 6 25
4 36 10 40
")
Matriz=as.matrix(read.table(textConnection(Input),
header=TRUE,
row.names=1))
fisher.test(workspace=4000000, Matriz,
alternative = "two.sided")
Input2=("
Reference None_declared No_mention Declared_any
5 43 17 34
6 28 2 44
7 13 4 31
8 40 16 26
9 58 10 20
")
reference=as.matrix(read.table(textConnection(Input2),
header=TRUE,
row.names=1))
fisher.test(simulate.p.value=TRUE, reference,
alternative = "two.sided")
|
/R/25- Fisher exact.R
|
permissive
|
marissasmith8/Citation-Network-Analysis
|
R
| false | false | 1,124 |
r
|
install.packages("rcompanion")
x <- matrix(c(76, 22, 45, 30, 11, 45, 40, 6, 25, 36, 10, 40), byrow=TRUE, nrow=4, ncol=3);
fisher.test(x);
Input =("
Guidelines None_declared No_mention Declared_any
1 76 22 45
2 30 11 45
3 40 6 25
4 36 10 40
")
Matriz=as.matrix(read.table(textConnection(Input),
header=TRUE,
row.names=1))
fisher.test(workspace=4000000, Matriz,
alternative = "two.sided")
Input2=("
Reference None_declared No_mention Declared_any
5 43 17 34
6 28 2 44
7 13 4 31
8 40 16 26
9 58 10 20
")
reference=as.matrix(read.table(textConnection(Input2),
header=TRUE,
row.names=1))
fisher.test(simulate.p.value=TRUE, reference,
alternative = "two.sided")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DSC_BIRCH.R
\name{BIRCH-get_microclusters}
\alias{BIRCH-get_microclusters}
\title{Centroids of micro clusters}
\description{
This function returns all micro clusters of a given CF-Tree.
}
|
/man/BIRCH-get_microclusters.Rd
|
no_license
|
Dennis1989/stream
|
R
| false | true | 267 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DSC_BIRCH.R
\name{BIRCH-get_microclusters}
\alias{BIRCH-get_microclusters}
\title{Centroids of micro clusters}
\description{
This function returns all micro clusters of a given CF-Tree.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.