content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
# Creates a simple random forest benchmark
library(randomForest)
library(readr)
set.seed(0)
numTrain <- 10000
numTrees <- 25
train <- read_csv("../input/train.csv")
test <- read_csv("../input/test.csv")
rows <- sample(1:nrow(train), numTrain)
labels <- as.factor(train[rows,1])
train <- train[rows,-1]
rf <- randomForest(train, labels, xtest=test, ntree=numTrees)
predictions <- data.frame(ImageId=1:nrow(test), Label=levels(labels)[rf$test$predicted])
head(predictions)
write_csv(predictions, "rf_benchmark.csv")
|
/digits/rf_benchmark.R
|
no_license
|
bhimmetoglu/kaggle_101
|
R
| false | false | 523 |
r
|
# Creates a simple random forest benchmark
library(randomForest)
library(readr)
set.seed(0)
numTrain <- 10000
numTrees <- 25
train <- read_csv("../input/train.csv")
test <- read_csv("../input/test.csv")
rows <- sample(1:nrow(train), numTrain)
labels <- as.factor(train[rows,1])
train <- train[rows,-1]
rf <- randomForest(train, labels, xtest=test, ntree=numTrees)
predictions <- data.frame(ImageId=1:nrow(test), Label=levels(labels)[rf$test$predicted])
head(predictions)
write_csv(predictions, "rf_benchmark.csv")
|
# Install TMB
# Must be installed from: https://github.com/kaskr/adcomp
# Install INLA
# Must be installed from: http://www.r-inla.org/download
#If Install geostatistical delta-GLMM package
if(!"VAST" %in% installed.packages()[,1]) devtools::install_github("james-thorson/VAST")
if(!"ThorsonUtilities" %in% installed.packages()[,1]) devtools::install_github("james-thorson/utilities")
# Load libraries
library(TMB)
library(ThorsonUtilities)
library(VAST)
library(INLA)
#INLA:::inla.dynload.workaround()
run <- 'MINIMUM'
# This is where all runs will be located
DateFile <- file.path('..','results',paste(Sys.Date(),'_',run,'/', sep = ""))
dir.create(DateFile)
###############
# Settings
###############
#########################
### VAST CPP version ###
Version = "VAST_v2_4_0"
########################
## Spatial settings ###
########################
Method = c("Grid", "Mesh")[2]
#grid_size_km = 20
n_x = c(10, 50, 100, 250, 500, 1000, 2000)[1] # Number of stations
Kmeans_Config = list( "randomseed"=1, "nstart"=100, "iter.max"=1e3 ) # Samples: Do K-means on trawl locs; Domain: Do K-means on extrapolation grid
strata.limits <- data.frame('STRATA'="All_areas") # Decide on strata for use when calculating indices
Region = "Celtic_Sea"# Determine region
Catch_units <- 'Kg'
max_dist <- 50
########################
#### Model settings ####
########################
FieldConfig = c("Omega1"= 4, "Epsilon1"= 4, "Omega2"= 4, "Epsilon2"= 4) # 1=Presence-absence; 2=Density given presence; #Epsilon=Spatio-temporal; #Omega=Spatial
RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=0, "Epsilon2"=0) # Structure for beta or epsilon over time: 0=None (default); 1=WhiteNoise; 2=RandomWalk; 3=Constant
ObsModel = c(2,0) # 0=normal (log-link); 1=lognormal; 2=gamma; 4=ZANB; 5=ZINB; 11=lognormal-mixture; 12=gamma-mixture
OverdispersionConfig = c("eta1" = 0,"eta2" = 0) # 0 - number of factors
Options = c(SD_site_density = 1, SD_site_logdensity = 1, Calculate_Range = 1, Calculate_evenness = 1, Calculate_effective_area = 1, Calculate_Cov_SE = 1, Calculate_Synchrony = 0, Calculate_Coherence = 0)
BiasCorr = FALSE
#######################
##### Save options ####
#######################
# Save options for future records
Record = ThorsonUtilities::bundlelist( c("Version","Method","n_x","FieldConfig","RhoConfig", "ObsModel", "OverdispersionConfig", "Kmeans_Config","Catch_units","BiasCorr","Region","strata.limits") )
capture.output( Record, file=paste0(DateFile,"Record.txt"))
save(Record, file=paste0(DateFile,"Record.RData"))
diag.plots <- FALSE ## Do you want to plot diagnostics ??
######################
#### Prepare data ####
######################
# Read or simulate trawl data
load(file.path('..','data', 'Cleaned','CelticSurveyFormattedSize.RData')) ## EVHOE and IE-IGFS
load(file.path('..','data', 'Cleaned','CelticSurvey2FormattedSize.RData')) ## Various Cefas surveys
# Combine the survey data
DF2 <- DF
ac <- as.character
DF <- data.frame(Survey = c(DF2$Ship, ac(FSS$fldSeriesName)),
Year = c(DF2$Year, ac(FSS$Year)),
Station = c(DF2$StNo, FSS$fldCruiseStationNumber),
Lat = c(DF2$HaulLatMid, FSS$HaulLatMid),
Lon = c(DF2$HaulLonMid, FSS$HaulLonMid),
AreaSwept_km2 = c(DF2$SweptArea, FSS$SweptArea),
spp = c(DF2$SpeciesName, ac(FSS$Species)),
Kg = c(DF2$Kg, FSS$Kg))
table(DF$Survey, DF$Year)
## Subset years to best data - based on data exp. doc
DF <- DF[DF$Year %in% c(2000:2015),]
species <- sort(unique(DF$spp))
DF <- DF[DF$spp %in% species[13:16],] # Plaice only
DF$SpeciesName <- factor(DF$spp) # drop empty factors
DF$Ship <- factor(DF$Survey)
DF$Year <- factor(DF$Year)
an <- as.numeric
Data_Geostat = data.frame("spp"=DF[,"SpeciesName"],
"Year"=DF[,"Year"],
"Catch_KG"=DF[,"Kg"],
"AreaSwept_km2"=DF[,'AreaSwept_km2'],
"Vessel"= DF[,'Ship'] ,
"Lat"=DF[,"Lat"],
"Lon"=DF[,"Lon"] )
## Prepare the fixed vessel covariates, Q_ik
## Vessel and species concatenated
Vess_Cov <- vector_to_design_matrix(paste(Data_Geostat[,'Vessel'],Data_Geostat[,'spp'], sep = '_'))
# Drop set of vessel-species combos
Vess_Cov <- Vess_Cov[,-grep('CEXP', colnames(Vess_Cov))] ## All spp relative to the Celtic Explorer
##############################
##### Extrapolation grid #####
##############################
# Get extrapolation data
Extrapolation_List = SpatialDeltaGLMM::Prepare_Extrapolation_Data_Fn(Region=Region, strata.limits=strata.limits, observations_LL=Data_Geostat[,c('Lat','Lon')], maximum_distance_from_sample = max_dist)
# Calculate spatial information for SPDE mesh, strata areas, and AR1 process
Spatial_List = SpatialDeltaGLMM::Spatial_Information_Fn(n_x=n_x, Method=Method, Lon=Data_Geostat[,'Lon'], Lat=Data_Geostat[,'Lat'], Extrapolation_List=Extrapolation_List, randomseed=Kmeans_Config[["randomseed"]], nstart=Kmeans_Config[["nstart"]], iter.max=Kmeans_Config[["iter.max"]], DirPath=DateFile )
#### Prep data
Data_Geostat = cbind(Data_Geostat, Spatial_List$loc_i, "knot_i"=Spatial_List$knot_i)
################################
#### Make and Run TMB model ####
################################
# Make TMB data list
## End here
save.image(file = 'MinExampleHess.RData')
|
/code/MinimumRepPart1.R
|
no_license
|
pdolder/JointProduction_study
|
R
| false | false | 5,420 |
r
|
# Install TMB
# Must be installed from: https://github.com/kaskr/adcomp
# Install INLA
# Must be installed from: http://www.r-inla.org/download
#If Install geostatistical delta-GLMM package
if(!"VAST" %in% installed.packages()[,1]) devtools::install_github("james-thorson/VAST")
if(!"ThorsonUtilities" %in% installed.packages()[,1]) devtools::install_github("james-thorson/utilities")
# Load libraries
library(TMB)
library(ThorsonUtilities)
library(VAST)
library(INLA)
#INLA:::inla.dynload.workaround()
run <- 'MINIMUM'
# This is where all runs will be located
DateFile <- file.path('..','results',paste(Sys.Date(),'_',run,'/', sep = ""))
dir.create(DateFile)
###############
# Settings
###############
#########################
### VAST CPP version ###
Version = "VAST_v2_4_0"
########################
## Spatial settings ###
########################
Method = c("Grid", "Mesh")[2]
#grid_size_km = 20
n_x = c(10, 50, 100, 250, 500, 1000, 2000)[1] # Number of stations
Kmeans_Config = list( "randomseed"=1, "nstart"=100, "iter.max"=1e3 ) # Samples: Do K-means on trawl locs; Domain: Do K-means on extrapolation grid
strata.limits <- data.frame('STRATA'="All_areas") # Decide on strata for use when calculating indices
Region = "Celtic_Sea"# Determine region
Catch_units <- 'Kg'
max_dist <- 50
########################
#### Model settings ####
########################
FieldConfig = c("Omega1"= 4, "Epsilon1"= 4, "Omega2"= 4, "Epsilon2"= 4) # 1=Presence-absence; 2=Density given presence; #Epsilon=Spatio-temporal; #Omega=Spatial
RhoConfig = c("Beta1"=0, "Beta2"=0, "Epsilon1"=0, "Epsilon2"=0) # Structure for beta or epsilon over time: 0=None (default); 1=WhiteNoise; 2=RandomWalk; 3=Constant
ObsModel = c(2,0) # 0=normal (log-link); 1=lognormal; 2=gamma; 4=ZANB; 5=ZINB; 11=lognormal-mixture; 12=gamma-mixture
OverdispersionConfig = c("eta1" = 0,"eta2" = 0) # 0 - number of factors
Options = c(SD_site_density = 1, SD_site_logdensity = 1, Calculate_Range = 1, Calculate_evenness = 1, Calculate_effective_area = 1, Calculate_Cov_SE = 1, Calculate_Synchrony = 0, Calculate_Coherence = 0)
BiasCorr = FALSE
#######################
##### Save options ####
#######################
# Save options for future records
Record = ThorsonUtilities::bundlelist( c("Version","Method","n_x","FieldConfig","RhoConfig", "ObsModel", "OverdispersionConfig", "Kmeans_Config","Catch_units","BiasCorr","Region","strata.limits") )
capture.output( Record, file=paste0(DateFile,"Record.txt"))
save(Record, file=paste0(DateFile,"Record.RData"))
diag.plots <- FALSE ## Do you want to plot diagnostics ??
######################
#### Prepare data ####
######################
# Read or simulate trawl data
load(file.path('..','data', 'Cleaned','CelticSurveyFormattedSize.RData')) ## EVHOE and IE-IGFS
load(file.path('..','data', 'Cleaned','CelticSurvey2FormattedSize.RData')) ## Various Cefas surveys
# Combine the survey data
DF2 <- DF
ac <- as.character
DF <- data.frame(Survey = c(DF2$Ship, ac(FSS$fldSeriesName)),
Year = c(DF2$Year, ac(FSS$Year)),
Station = c(DF2$StNo, FSS$fldCruiseStationNumber),
Lat = c(DF2$HaulLatMid, FSS$HaulLatMid),
Lon = c(DF2$HaulLonMid, FSS$HaulLonMid),
AreaSwept_km2 = c(DF2$SweptArea, FSS$SweptArea),
spp = c(DF2$SpeciesName, ac(FSS$Species)),
Kg = c(DF2$Kg, FSS$Kg))
table(DF$Survey, DF$Year)
## Subset years to best data - based on data exp. doc
DF <- DF[DF$Year %in% c(2000:2015),]
species <- sort(unique(DF$spp))
DF <- DF[DF$spp %in% species[13:16],] # Plaice only
DF$SpeciesName <- factor(DF$spp) # drop empty factors
DF$Ship <- factor(DF$Survey)
DF$Year <- factor(DF$Year)
an <- as.numeric
Data_Geostat = data.frame("spp"=DF[,"SpeciesName"],
"Year"=DF[,"Year"],
"Catch_KG"=DF[,"Kg"],
"AreaSwept_km2"=DF[,'AreaSwept_km2'],
"Vessel"= DF[,'Ship'] ,
"Lat"=DF[,"Lat"],
"Lon"=DF[,"Lon"] )
## Prepare the fixed vessel covariates, Q_ik
## Vessel and species concatenated
Vess_Cov <- vector_to_design_matrix(paste(Data_Geostat[,'Vessel'],Data_Geostat[,'spp'], sep = '_'))
# Drop set of vessel-species combos
Vess_Cov <- Vess_Cov[,-grep('CEXP', colnames(Vess_Cov))] ## All spp relative to the Celtic Explorer
##############################
##### Extrapolation grid #####
##############################
# Get extrapolation data
Extrapolation_List = SpatialDeltaGLMM::Prepare_Extrapolation_Data_Fn(Region=Region, strata.limits=strata.limits, observations_LL=Data_Geostat[,c('Lat','Lon')], maximum_distance_from_sample = max_dist)
# Calculate spatial information for SPDE mesh, strata areas, and AR1 process
Spatial_List = SpatialDeltaGLMM::Spatial_Information_Fn(n_x=n_x, Method=Method, Lon=Data_Geostat[,'Lon'], Lat=Data_Geostat[,'Lat'], Extrapolation_List=Extrapolation_List, randomseed=Kmeans_Config[["randomseed"]], nstart=Kmeans_Config[["nstart"]], iter.max=Kmeans_Config[["iter.max"]], DirPath=DateFile )
#### Prep data
Data_Geostat = cbind(Data_Geostat, Spatial_List$loc_i, "knot_i"=Spatial_List$knot_i)
################################
#### Make and Run TMB model ####
################################
# Make TMB data list
## End here
save.image(file = 'MinExampleHess.RData')
|
context("metalCriteria")
hardness <- c(25, 125, 225, 325, 400)
tol <- 1e-5
test_that("Cadmium criteria correct", {
expect_equal(metalCriteria(hardness, 'cadmium', toxicity = 'acute'),
c(0.821101892407,5.044698529249,9.789998014042,14.822626126205,18.734598642374))
expect_equal(metalCriteria(hardness, 'cadmium', toxicity = 'chronic'),
c(0.0936968237,0.2872405824,0.4318577787,0.5571341111,0.6432217364))
})
test_that("Chromium criteria correct", {
expect_equal(metalCriteria(hardness, 'chromium', toxicity = 'acute'),
c(183.0659069317,684.0122901081,1106.9604047590,1495.9844422769,1773.2980532507))
expect_equal(metalCriteria(hardness, 'chromium', toxicity = 'chronic'),
c(23.8131133690,88.9759457843,143.9928059534,194.5968406638,230.6696439922))
})
test_that("Copper criteria correct", {
expect_equal(metalCriteria(hardness, 'copper', toxicity = 'acute'),
c(4.801002749255,21.872654492311,38.055659013185,53.813269771962,65.441583928317))
expect_equal(metalCriteria(hardness, 'copper', toxicity = 'chronic'),
c(3.616582041387,14.307646126444,23.642780254564,32.371506870310,38.656172142951))
})
test_that("Lead criteria correct", {
expect_equal(metalCriteria(hardness, 'lead', toxicity = 'acute'),
c(13.8821727935,82.2705689542,154.2302894611,226.6880806307,280.8464812000))
expect_equal(metalCriteria(hardness, 'lead', toxicity = 'chronic'),
c(0.5409683439,3.2059659610,6.0101329607,8.8337090591,10.9441841772))
})
test_that("Nickel criteria correct", {
expect_equal(metalCriteria(hardness, 'nickel', toxicity = 'acute'),
c(144.9178376852,565.5232558349,929.8459596043,1269.1645160390,1512.8899943659))
expect_equal(metalCriteria(hardness, 'nickel', toxicity = 'chronic'),
c(16.0958977086,62.8121742856,103.2771789150,140.9649947302,168.0353708192))
})
test_that("Silver criteria correct", {
expect_equal(metalCriteria(hardness, 'silver', toxicity = 'acute'),
c(0.2963978881,4.7217556900,12.9769377435,24.4262991070,34.9109345676))
})
test_that("Zinc criteria correct", {
expect_equal(metalCriteria(hardness, 'zinc', toxicity = 'acute'),
c(36.2017651055,141.5686304956,232.9482350791,318.1075292888,379.2980477944))
expect_equal(metalCriteria(hardness, 'zinc', toxicity = 'chronic'),
c(36.4978940634,142.7266561029,234.8537421145,320.7096358679,382.4006903121))
})
|
/tests/testthat/test-metalCriteria.R
|
no_license
|
jasonelaw/ORDEQWaterQualityCriteria
|
R
| false | false | 2,504 |
r
|
context("metalCriteria")
hardness <- c(25, 125, 225, 325, 400)
tol <- 1e-5
test_that("Cadmium criteria correct", {
expect_equal(metalCriteria(hardness, 'cadmium', toxicity = 'acute'),
c(0.821101892407,5.044698529249,9.789998014042,14.822626126205,18.734598642374))
expect_equal(metalCriteria(hardness, 'cadmium', toxicity = 'chronic'),
c(0.0936968237,0.2872405824,0.4318577787,0.5571341111,0.6432217364))
})
test_that("Chromium criteria correct", {
expect_equal(metalCriteria(hardness, 'chromium', toxicity = 'acute'),
c(183.0659069317,684.0122901081,1106.9604047590,1495.9844422769,1773.2980532507))
expect_equal(metalCriteria(hardness, 'chromium', toxicity = 'chronic'),
c(23.8131133690,88.9759457843,143.9928059534,194.5968406638,230.6696439922))
})
test_that("Copper criteria correct", {
expect_equal(metalCriteria(hardness, 'copper', toxicity = 'acute'),
c(4.801002749255,21.872654492311,38.055659013185,53.813269771962,65.441583928317))
expect_equal(metalCriteria(hardness, 'copper', toxicity = 'chronic'),
c(3.616582041387,14.307646126444,23.642780254564,32.371506870310,38.656172142951))
})
test_that("Lead criteria correct", {
expect_equal(metalCriteria(hardness, 'lead', toxicity = 'acute'),
c(13.8821727935,82.2705689542,154.2302894611,226.6880806307,280.8464812000))
expect_equal(metalCriteria(hardness, 'lead', toxicity = 'chronic'),
c(0.5409683439,3.2059659610,6.0101329607,8.8337090591,10.9441841772))
})
test_that("Nickel criteria correct", {
expect_equal(metalCriteria(hardness, 'nickel', toxicity = 'acute'),
c(144.9178376852,565.5232558349,929.8459596043,1269.1645160390,1512.8899943659))
expect_equal(metalCriteria(hardness, 'nickel', toxicity = 'chronic'),
c(16.0958977086,62.8121742856,103.2771789150,140.9649947302,168.0353708192))
})
test_that("Silver criteria correct", {
expect_equal(metalCriteria(hardness, 'silver', toxicity = 'acute'),
c(0.2963978881,4.7217556900,12.9769377435,24.4262991070,34.9109345676))
})
test_that("Zinc criteria correct", {
expect_equal(metalCriteria(hardness, 'zinc', toxicity = 'acute'),
c(36.2017651055,141.5686304956,232.9482350791,318.1075292888,379.2980477944))
expect_equal(metalCriteria(hardness, 'zinc', toxicity = 'chronic'),
c(36.4978940634,142.7266561029,234.8537421145,320.7096358679,382.4006903121))
})
|
# Author: Cruz Davalos Diana Ivette
# Version: 2.0
# Date: March 22, 2015
##****************************************************************************************
## Matrix inversion is usually a costly computation and there may be some benefit
## to caching the inverse of a matrix rather than compute it repeatedly.
## -----------------------------------------------------------------------------------
## Functions reported here:
## -----------------------------------------------------------------------------------
## makeCacheMatrix: This function creates a special "matrix" object that can cache
## its inverse.
## ..................................................................................
## cacheSolve: This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix
## has not changed), then the cachesolve should retrieve the inverse from the cache.
## ------------------------------------------------------------------------------------
## Both functions take the structure provided by the examples in the Assignment 2.
##****************************************************************************************
# Starts makeCacheMatrix() taking a matrix class variable as argument
# and returning the 'special' matrix
makeCacheMatrix <- function(x = matrix()) {
# Initialize the variable that will hold the inverse matrix
mat <- NULL
# Function that assigns the value of the matrix,
# even when 'x' comes from other environment.
# It is also important to reset 'm', because calling
# this function means that the value of the original matrix
# has changed.
setmatrix <- function(y) {
x <<- y
mat <<- NULL
}
# Gets the original matrix 'x'
getmatrix <- function() x
# Assign the inverse matrix to 'mat' for using it after
setInverse <- function(solve) mat <<- solve
# We can call the value of mat because of the '<<-' assignation operator
# used before, so we do not have to calculate the inverse matrix again,
# we just have to call it.
getInverse <- function() mat
# That's it! Return all that you need, the set functions to assign
# values if necessary, and the get functions to call the cached values.
list(setmatrix = setmatrix, getmatrix = getmatrix,
setInverse = setInverse,
getInverse = getInverse)
}
# Starts cacheSolve() function taking the 'special' matrix as argument
# and returning the inverse matrix 'm'
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
# Verifies if the inverse has already been calculated
# if so, it returns the cached inverse
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# If the inverse matrix hasn't been calculated,
# it gets the inverse with the 'solve()' function
# and caches by calling the 'setInverse()' function wit
# the inverse matrix 'm' as argument.
data <- x$getmatrix()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
dianaicd/ProgrammingAssignment2
|
R
| false | false | 3,312 |
r
|
# Author: Cruz Davalos Diana Ivette
# Version: 2.0
# Date: March 22, 2015
##****************************************************************************************
## Matrix inversion is usually a costly computation and there may be some benefit
## to caching the inverse of a matrix rather than compute it repeatedly.
## -----------------------------------------------------------------------------------
## Functions reported here:
## -----------------------------------------------------------------------------------
## makeCacheMatrix: This function creates a special "matrix" object that can cache
## its inverse.
## ..................................................................................
## cacheSolve: This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix
## has not changed), then the cachesolve should retrieve the inverse from the cache.
## ------------------------------------------------------------------------------------
## Both functions take the structure provided by the examples in the Assignment 2.
##****************************************************************************************
# Starts makeCacheMatrix() taking a matrix class variable as argument
# and returning the 'special' matrix
makeCacheMatrix <- function(x = matrix()) {
# Initialize the variable that will hold the inverse matrix
mat <- NULL
# Function that assigns the value of the matrix,
# even when 'x' comes from other environment.
# It is also important to reset 'm', because calling
# this function means that the value of the original matrix
# has changed.
setmatrix <- function(y) {
x <<- y
mat <<- NULL
}
# Gets the original matrix 'x'
getmatrix <- function() x
# Assign the inverse matrix to 'mat' for using it after
setInverse <- function(solve) mat <<- solve
# We can call the value of mat because of the '<<-' assignation operator
# used before, so we do not have to calculate the inverse matrix again,
# we just have to call it.
getInverse <- function() mat
# That's it! Return all that you need, the set functions to assign
# values if necessary, and the get functions to call the cached values.
list(setmatrix = setmatrix, getmatrix = getmatrix,
setInverse = setInverse,
getInverse = getInverse)
}
# Starts cacheSolve() function taking the 'special' matrix as argument
# and returning the inverse matrix 'm'
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
# Verifies if the inverse has already been calculated
# if so, it returns the cached inverse
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# If the inverse matrix hasn't been calculated,
# it gets the inverse with the 'solve()' function
# and caches by calling the 'setInverse()' function wit
# the inverse matrix 'm' as argument.
data <- x$getmatrix()
m <- solve(data, ...)
x$setInverse(m)
m
}
|
#' `print` method for `qntmap` class object
#'
#' @param x
#' A `qntmap` class object returned by [`quantify()`] or [`qntmap()`].
#' @param ...
#' Discarded.
#'
#' @export
print.qntmap <- function(x, ...) {
message(
"Summary of",
paste(dim(x[[c(1L, 1L)]]), collapse = " * "),
" mass concentration map\n",
sep = " "
)
print(summary(x))
message(
"",
"This is a list object",
"x$CaO$wt returns CaO mass concentration map, and",
"x$CaO$se returns CaO standard error map",
"",
"The data are also accessible as csv files",
'in "qntmap" directory below your mapping data directory',
"e.g., example/.map/1/qntmap/CaO_wt.csv",
sep = "\n"
)
invisible(x)
}
|
/R/print.R
|
permissive
|
atusy/qntmap
|
R
| false | false | 714 |
r
|
#' `print` method for `qntmap` class object
#'
#' @param x
#' A `qntmap` class object returned by [`quantify()`] or [`qntmap()`].
#' @param ...
#' Discarded.
#'
#' @export
print.qntmap <- function(x, ...) {
message(
"Summary of",
paste(dim(x[[c(1L, 1L)]]), collapse = " * "),
" mass concentration map\n",
sep = " "
)
print(summary(x))
message(
"",
"This is a list object",
"x$CaO$wt returns CaO mass concentration map, and",
"x$CaO$se returns CaO standard error map",
"",
"The data are also accessible as csv files",
'in "qntmap" directory below your mapping data directory',
"e.g., example/.map/1/qntmap/CaO_wt.csv",
sep = "\n"
)
invisible(x)
}
|
## Matrix inversion is usually a costly computation and there may be
## some benefit to caching the inverse of a matrix rather than compute
## it repeatedly.
## The two functions in the code cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
Hamanjila/ProgrammingAssignment2
|
R
| false | false | 1,182 |
r
|
## Matrix inversion is usually a costly computation and there may be
## some benefit to caching the inverse of a matrix rather than compute
## it repeatedly.
## The two functions in the code cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
library(sf)
library(sp)
library(raster)
library(rgeos)
library(rgdal)
library(maptools)
library(spdep)
library(plyr)
library(rlist)
#Set working directory
setwd("$PATH")
#Read in shapefile for precincts
precinct_2016_near_final <- readOGR(dsn="georgia_precincts_2016",layer="VTD2016-Shape_step_5")
# precinct_2016_near_final <- readOGR(dsn="VTD2016-Shape",layer="VTD2016-Shape_step_5")
#Read in csv for the list of lists for precincts
dat = read.csv("georgia_precincts_2016/VTD2016-Shape_step_5_1.csv", header = TRUE)
# dat = read.csv("VTD2016-Shape/VTD2016-Shape_step_5_1.csv", header = TRUE)
dat <- dat[2:4]
dat <- as(dat,"data.frame")
#Set new shapefile to which we will merge the data from the csv.
precinct_2016_near_final@data <- data.frame(precinct_2016_near_final@data,dat[match(precinct_2016_near_final@data[,"ID_3"],dat[,"ID_3"]),])
#Set the projection.
precinct_2016_near_final_3 <- precinct_2016_near_final
# precinct_2016_near_final_3 <- spTransform(precinct_2016_near_final_2,CRS("+proj=longlat +ellps=GRS80 +no_defs"))
#Delte some unnecessary columns.
drops <- c('COUNTY_NAM', 'PRECINCT_I', 'PRECINCT_N','ID_3.1')
precinct_2016_near_final_3 <- precinct_2016_near_final_3[,!(names(precinct_2016_near_final_3) %in% drops)]
names(precinct_2016_near_final_3)[names(precinct_2016_near_final_3) == 'VAPPOP3'] <- 'VAPPOP'
names(precinct_2016_near_final_3)[names(precinct_2016_near_final_3) == 'INDEX_1'] <- 'IDX'
#Find the neighborhood of each vertex in the precinct file. Create new neighborhoods for this
#shapefile. This will help build the neighbor hood column for the final shapefile.
nbs <- poly2nb(as(precinct_2016_near_final_3, "SpatialPolygons"), queen = FALSE)
#Create a matrix where each row and column is a precinct in the shapefile.
#There is a 1 if they are adjacent and 0 otherwise.
mat <- nb2mat(nbs, style="B")
colnames(mat) <- rownames(mat)
#This is the final check for holes. If there are any holes, handle them on a case-by-case basis.
poly_index_for_holes_2 <- which(rowSums(mat)==1)
#Create a list of the indices that will be removes since they are on the border of the state.
remove <- c(poly_index_for_holes_2[which(precinct_2016_near_final_3[poly_index_for_holes_2[],]$ST_BORDER==1)])
# precinct_2016_no_multiparts_2[remove[2],]$ID_3
# plot(precinct_2016_no_multiparts[remove,])
# We remove all of the indices of the "holes" that are actually precincts on the border of the state.
poly_index_for_holes_2 <- poly_index_for_holes_2[! poly_index_for_holes_2 %in% remove]
poly_index_for_holes_2
#Write the final precinct file with all of the necessary information.
writeOGR(obj = precinct_2016_near_final_3, dsn="georgia_precincts_2016", layer = "VTD2016-Shape_final", driver = "ESRI Shapefile")
write.csv(precinct_2016_near_final_3@data, file = 'georgia_precincts_2016\\VTD2016-Shape_final_dataframe.csv', row.names = FALSE)
|
/preprocessing/step6_merge_data.R
|
no_license
|
jsasplun/MCMC_redistricting_john
|
R
| false | false | 2,930 |
r
|
library(sf)
library(sp)
library(raster)
library(rgeos)
library(rgdal)
library(maptools)
library(spdep)
library(plyr)
library(rlist)
#Set working directory
setwd("$PATH")
#Read in shapefile for precincts
precinct_2016_near_final <- readOGR(dsn="georgia_precincts_2016",layer="VTD2016-Shape_step_5")
# precinct_2016_near_final <- readOGR(dsn="VTD2016-Shape",layer="VTD2016-Shape_step_5")
#Read in csv for the list of lists for precincts
dat = read.csv("georgia_precincts_2016/VTD2016-Shape_step_5_1.csv", header = TRUE)
# dat = read.csv("VTD2016-Shape/VTD2016-Shape_step_5_1.csv", header = TRUE)
dat <- dat[2:4]
dat <- as(dat,"data.frame")
#Set new shapefile to which we will merge the data from the csv.
precinct_2016_near_final@data <- data.frame(precinct_2016_near_final@data,dat[match(precinct_2016_near_final@data[,"ID_3"],dat[,"ID_3"]),])
#Set the projection.
precinct_2016_near_final_3 <- precinct_2016_near_final
# precinct_2016_near_final_3 <- spTransform(precinct_2016_near_final_2,CRS("+proj=longlat +ellps=GRS80 +no_defs"))
#Delte some unnecessary columns.
drops <- c('COUNTY_NAM', 'PRECINCT_I', 'PRECINCT_N','ID_3.1')
precinct_2016_near_final_3 <- precinct_2016_near_final_3[,!(names(precinct_2016_near_final_3) %in% drops)]
names(precinct_2016_near_final_3)[names(precinct_2016_near_final_3) == 'VAPPOP3'] <- 'VAPPOP'
names(precinct_2016_near_final_3)[names(precinct_2016_near_final_3) == 'INDEX_1'] <- 'IDX'
#Find the neighborhood of each vertex in the precinct file. Create new neighborhoods for this
#shapefile. This will help build the neighbor hood column for the final shapefile.
nbs <- poly2nb(as(precinct_2016_near_final_3, "SpatialPolygons"), queen = FALSE)
#Create a matrix where each row and column is a precinct in the shapefile.
#There is a 1 if they are adjacent and 0 otherwise.
mat <- nb2mat(nbs, style="B")
colnames(mat) <- rownames(mat)
#This is the final check for holes. If there are any holes, handle them on a case-by-case basis.
poly_index_for_holes_2 <- which(rowSums(mat)==1)
#Create a list of the indices that will be removes since they are on the border of the state.
remove <- c(poly_index_for_holes_2[which(precinct_2016_near_final_3[poly_index_for_holes_2[],]$ST_BORDER==1)])
# precinct_2016_no_multiparts_2[remove[2],]$ID_3
# plot(precinct_2016_no_multiparts[remove,])
# We remove all of the indices of the "holes" that are actually precincts on the border of the state.
poly_index_for_holes_2 <- poly_index_for_holes_2[! poly_index_for_holes_2 %in% remove]
poly_index_for_holes_2
#Write the final precinct file with all of the necessary information.
writeOGR(obj = precinct_2016_near_final_3, dsn="georgia_precincts_2016", layer = "VTD2016-Shape_final", driver = "ESRI Shapefile")
write.csv(precinct_2016_near_final_3@data, file = 'georgia_precincts_2016\\VTD2016-Shape_final_dataframe.csv', row.names = FALSE)
|
library(devtools)
create("pkg")
|
/initial_setup.R
|
no_license
|
ellisp/election2011
|
R
| false | false | 31 |
r
|
library(devtools)
create("pkg")
|
## plot 1
##
## Get data if you haven't got it
##
file = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file, destfile = "household_power_consumption.zip", method = "curl")
unzip("household_power_consumption.zip")
##
## Cleaning up data
##
pc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE,
stringsAsFactors = FALSE)
pc <- transform(pc, Date = as.Date(Date, format = "%d/%m/%Y"))
# keep only desired dates
pc <- pc[pc$Date >= "2007-02-01" & pc$Date <= "2007-02-02", ]
pc <- transform(pc, Time = strptime(Time, format = "%H:%M:%S"))
pc <- transform(pc, Global_active_power = as.numeric(Global_active_power))
## scale font down
par(cex = .7) ## scale font down
## open png device
png(filename = "plot1.png",width = 480, height = 480, units = "px")
## generate histogram
hist(pc$Global_active_power, xlab="Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
## close device
dev.off()
|
/plot1.R
|
no_license
|
quantile99/ExData_Plotting1
|
R
| false | false | 1,008 |
r
|
## plot 1
##
## Get data if you haven't got it
##
file = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(file, destfile = "household_power_consumption.zip", method = "curl")
unzip("household_power_consumption.zip")
##
## Cleaning up data
##
pc <- read.table("household_power_consumption.txt", sep = ";", header = TRUE,
stringsAsFactors = FALSE)
pc <- transform(pc, Date = as.Date(Date, format = "%d/%m/%Y"))
# keep only desired dates
pc <- pc[pc$Date >= "2007-02-01" & pc$Date <= "2007-02-02", ]
pc <- transform(pc, Time = strptime(Time, format = "%H:%M:%S"))
pc <- transform(pc, Global_active_power = as.numeric(Global_active_power))
## scale font down
par(cex = .7) ## scale font down
## open png device
png(filename = "plot1.png",width = 480, height = 480, units = "px")
## generate histogram
hist(pc$Global_active_power, xlab="Global Active Power (kilowatts)", col = "red", main = "Global Active Power")
## close device
dev.off()
|
# The purpose of this project is to look at 2014 data on ports in terms of
# the best way to display the busiest ports in terms of graphs and maps
# IMPORT PACKAGES
library(tidyverse)
library(sf)
library(ggspatial)
library(rnaturalearth)
library(tidygeocoder)
library(maps)
#=============
# GET THE DATA
#=============
url.world_ports <- url("https://vrzkj25a871bpq7t1ugcgmn9-wpengine.netdna-ssl.com/wp-content/datasets/world_ports.RData")
load(url.world_ports)
glimpse(df.world_ports)
#=========================================
# CREATE THEMES
# We'll create two themes:
#
# 1. theme.porttheme
# - this will be a general theme that
# we'll apply to most of our charts
# to format the text, titles, etc
#
# 2. theme.smallmult
# - we'll apply this exclusively to
# "small multiple" charts
# (AKA, trellis charts). We need this
# because the axis text needs to be
# smaller for the small multiples
#=========================================
#----------------------------------------
# GENERAL THEME
# - we'll use this for most of our charts
# and build on it when we need to
#----------------------------------------
theme.porttheme <-
theme(text = element_text(family = "Gill Sans", color = "#444444")) +
theme(plot.title = element_text(size = 24)) +
theme(plot.subtitle = element_text(size = 18)) +
theme(axis.title = element_text(size = 14)) +
theme(axis.title.y = element_text(angle = 0, vjust = .5, margin = margin(r = 15))) +
theme(axis.text = element_text(size = 10)) +
theme(axis.title.x = element_text(margin = margin(t = 20))) +
theme(legend.title = element_blank())
#------------------------------------
# THEME FOR 'WIDE' BAR CHARTS
# - there are several bar charts that
# are very wide, and need some
# special formatting
#------------------------------------
theme.widebar <-
theme.porttheme +
theme(plot.title = element_text(size = 30)) +
theme(plot.subtitle = element_text(size = 20)) +
theme(legend.title = element_blank(), legend.background = element_blank()) +
theme(legend.text = element_text(size = 12)) +
theme(legend.position = c(.9,.55)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .4))
#------------------------------------
# THEME FOR 'WIDE' BAR CHARTS
# - we'll use this for small multiple
# charts. these also have some
# special formatting requirements
#------------------------------------
theme.smallmult <-
theme.porttheme +
theme(axis.text = element_text(size = 6)) +
theme(axis.text.x = element_text(angle = 90))
#----------------------------------------------------
# BAR CHART: Port vs Volume (2014)
# - this is the "long" form of the bar chart.
# - it's harder to read, but we can fit more data
# - it also shows the uneven distribution of shipping
# volume
#----------------------------------------------------
df.world_ports %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", fill = "dark red") +
labs(title = "Busiest container ports in the world") +
labs(subtitle = '2014, in order of shipping volume') +
labs(x = "Port", y = "Shipping\nVolume") +
scale_y_continuous(labels = scales::comma_format()) +
theme.widebar
#----------------------------------------
# FLIPPED BAR CHART: Top 25 busiest ports
# - this is useful because it makes the
# chart more readable when we flip
# the axes
# - use top 25 so you can read names
#----------------------------------------
df.world_ports %>%
filter(year == 2014, rank <= 25) %>%
ggplot(aes(x = reorder(port, volume), y = volume)) +
geom_bar(stat = "identity", fill = "dark red") +
geom_text(aes(label = volume), hjust = 1.1, color = "#FFFFFF") +
scale_y_continuous(labels = scales::comma_format()) +
coord_flip() +
labs(title = "Shanghai, Singapore had much higher volume\nthan other high-volume ports in 2014") +
labs(x = "Port", y = "Shipping Volume\n(1000 TEUs)") +
theme.porttheme
#==========================
# BAR CHART: Ports in China
# = use mutate and ifelse() to divide data into China and not China
#==========================
df.world_ports %>%
mutate(china_flag = ifelse(economy == "China","China","Not China")) %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", aes(fill = china_flag)) +
scale_y_continuous(labels = scales::comma_format()) +
scale_fill_manual(values = c("dark red","#999999")) +
labs(title = "Roughly 20% of busiest ports were\nin China in 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.widebar
#==========================
# BAR CHART: Ports in Asia
# = use mutate and ifelse() to divide into Aisa and non-Asia
#==========================
df.world_ports %>%
mutate(asia_flag = ifelse(continent == "Asia","Asia","Other")) %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", aes(fill = asia_flag)) +
scale_fill_manual(values = c("dark red","#999999")) +
labs(title = "More than half of the busiest ports were in Asia in 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.widebar
#========================================================
# SMALL MULTIPLE, LINE: All ports, shipping vol over time
# - This is useful for getting a new overview of the
# data
#========================================================
df.world_ports %>%
ggplot(aes(x = year, y = volume, group = port_label)) +
geom_line(color = "dark red", size = 1, na.rm = T) +
facet_wrap(~ port_label) +
labs(title = "Strong growth in Shanghai, Singapore,\nShenzhen, Guangzhou") +
labs(subtitle = "2004 to 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.smallmult
#================================================
# LINE CHART: Shanghai, Volume change over time
# - Shanghai volume has increased substantially
# so we want to show it visually
#================================================
df.world_ports %>%
mutate(port_highlight = ifelse(port == "Shanghai","Shanghai","Other")) %>%
ggplot(aes(x = year, y = volume, group = port)) +
geom_line(aes(color = port_highlight, alpha = port_highlight), size = 1.5, na.rm = T) +
scale_color_manual(values = c("#999999","dark red")) +
scale_alpha_manual(values = c(.3,1)) +
labs(title = "Shanghai's shipping volume increased\nsubstantially from 2004 to 2014") +
labs(x = "Year", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.porttheme
#===============
# PLOT SINGAPORE
#===============
df.world_ports %>%
filter(port == "Singapore") %>%
ggplot(aes(x = year, y = volume, group = 1)) +
geom_line(color = "dark red", size = 2) +
labs(title = "Singapore volume also increased\nsubstantially from 2004 to 2014") +
labs(x = "Year", y = "Shipping\nVolume\n(1000 TEUs)") +
scale_y_continuous(limits = c(0,NA)) +
theme.porttheme
#===================================
# SMALL MULTIPLE: Rank over time
# - We'll use this to show
# the rank changes of all of the
# ports
# - Given the large number of ports
# the data will be much easier to
# read in a small multiple
#===================================
df.world_ports %>%
ggplot(aes(x = year, y = rank, group = port_label)) +
geom_line(size = 1, color = "dark red", na.rm = T) +
scale_y_reverse() +
facet_wrap(~ port_label) +
labs(title = "Ranking over time of world's busiest ports") +
labs(subtitle = "2004 to 2014") +
labs(x = "Year", y = "Rank") +
theme.smallmult
#============================
# BUMP CHART: CHINA
# here, we'll highlight China
# creating a variable called china_labels. china_labels
# will enable us to individually color each line for the different Chinese ports
# (we do this in conjunction with scale_color_manual()).
# We're also going to modify the transparency of the lines.
# We'll set the Chinese lines to almost fully opaque,
# and set the non-Chinese lines to be highly transparent. T
#============================
param.rank_n = 15
df.world_ports %>%
filter(rank <= param.rank_n) %>%
mutate(china_flag = ifelse(economy == "China", T,F)) %>%
mutate(china_labels = ifelse(china_flag == T, port,"other")) %>%
ggplot(aes(x = year, y = rank, group = port_label)) +
geom_line(aes(color = china_labels, alpha = china_flag), size = 2) +
geom_point(aes(color = china_labels, alpha = china_flag), size = 2.3) +
geom_point(color = "#FFFFFF", alpha = .8, size = .3) +
geom_text(data = df.world_ports %>% filter(year == "2014", rank <= param.rank_n), aes(label = port_label, x = '2014') , hjust = -.05, color = "#888888", size = 4) +
geom_text(data = df.world_ports %>% filter(year == "2004", rank <= param.rank_n), aes(label = port_label, x = '2004') , hjust = 1.05, color = "#888888", size = 4) +
scale_x_discrete(expand = c(.3, .3)) +
scale_y_reverse(breaks = c(1,5,10,15)) +
scale_alpha_discrete(range = c(.4,.9)) +
labs(title = "Top Chinese ports increased rank\nsubstantially from 2004 to 2014") +
labs(subtitle = "(Port ranks, by volume)") +
labs(x = "Year", y = "Rank") +
theme.porttheme +
theme(panel.grid.major.x = element_line(color = "#F3F3F3")) +
theme(panel.grid.major.y = element_blank()) +
theme(panel.grid.minor = element_blank()) +
theme(legend.position = "none") +
scale_color_manual(values = c("#4e79a5","#f18f3b","#af7a0a","#e0585b","#5aa155","#edc958","#77b7b2","#BBBBBB"))
#=============
# GET MAP DATA
#=============
map.world_polygon <- map_data("world")
head(map.world_polygon)
#=====================================
# SIMPLE DOT DISTRIBUTION MAP
# - This will be useful just to see
# the data
# - It also serves as a good test
# for the more complex chart we're
# going to make next
#=====================================
df.world_ports %>%
filter(year == "2014") %>%
ggplot(aes(x = lon, y = lat)) +
geom_polygon(data = map.world_polygon, aes(x = long, y = lat, group = group)) +
geom_point(color = "red")
#=========================
# BUBBLE DISTRIBUTION MAP
#=========================
# CREATE THEME
theme.maptheeme <-
theme(text = element_text(family = "Gill Sans", color = "#444444")) +
theme(plot.title = element_text(size = 30)) +
theme(plot.subtitle = element_text(size = 18)) +
theme(panel.background = element_rect(fill = "#FCFCFF")) +
theme(panel.grid = element_blank()) +
theme(axis.text = element_blank()) +
theme(axis.ticks = element_blank()) +
theme(axis.title = element_blank()) +
theme(legend.position = c(.17,.35)) +
theme(legend.background = element_blank()) +
theme(legend.key = element_blank()) +
theme(legend.title = element_text(size = 16)) +
theme(legend.text = element_text(size = 10))
#==============================================
# GEOSPATIAL BUBBLE
# - This will give us a sense
# of the density of shipping in a particular
# geographic region
#
#==============================================
df.world_ports %>%
filter(year == "2014") %>%
ggplot(aes(x = lon, y = lat)) +
geom_polygon(data = map.world_polygon, aes(x = long, y = lat, group = group),fill = "#AAAAAA",colour = "#818181", size = .15) +
geom_point(aes(size = volume), color = "#DD0000", alpha = .15) +
geom_point(aes(size = volume), color = "#DD0000", alpha = .7, shape = 1) +
scale_size_continuous(range = c(.2,10), breaks = c(5000, 10000, 30000), name = "Shipping Volume\n(1000 TEUs)") +
#coord_proj("+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs") + # use robinson projection
labs(title = "High volume ports were highly clustered in\nChina and Asia in 2014") +
theme.maptheeme
|
/most_important_ports_globally.R
|
no_license
|
TanyaReeves-Unicorn/Busiest_ports_global_charts_maps
|
R
| false | false | 12,059 |
r
|
# The purpose of this project is to look at 2014 data on ports in terms of
# the best way to display the busiest ports in terms of graphs and maps
# IMPORT PACKAGES
library(tidyverse)
library(sf)
library(ggspatial)
library(rnaturalearth)
library(tidygeocoder)
library(maps)
#=============
# GET THE DATA
#=============
url.world_ports <- url("https://vrzkj25a871bpq7t1ugcgmn9-wpengine.netdna-ssl.com/wp-content/datasets/world_ports.RData")
load(url.world_ports)
glimpse(df.world_ports)
#=========================================
# CREATE THEMES
# We'll create two themes:
#
# 1. theme.porttheme
# - this will be a general theme that
# we'll apply to most of our charts
# to format the text, titles, etc
#
# 2. theme.smallmult
# - we'll apply this exclusively to
# "small multiple" charts
# (AKA, trellis charts). We need this
# because the axis text needs to be
# smaller for the small multiples
#=========================================
#----------------------------------------
# GENERAL THEME
# - we'll use this for most of our charts
# and build on it when we need to
#----------------------------------------
theme.porttheme <-
theme(text = element_text(family = "Gill Sans", color = "#444444")) +
theme(plot.title = element_text(size = 24)) +
theme(plot.subtitle = element_text(size = 18)) +
theme(axis.title = element_text(size = 14)) +
theme(axis.title.y = element_text(angle = 0, vjust = .5, margin = margin(r = 15))) +
theme(axis.text = element_text(size = 10)) +
theme(axis.title.x = element_text(margin = margin(t = 20))) +
theme(legend.title = element_blank())
#------------------------------------
# THEME FOR 'WIDE' BAR CHARTS
# - there are several bar charts that
# are very wide, and need some
# special formatting
#------------------------------------
theme.widebar <-
theme.porttheme +
theme(plot.title = element_text(size = 30)) +
theme(plot.subtitle = element_text(size = 20)) +
theme(legend.title = element_blank(), legend.background = element_blank()) +
theme(legend.text = element_text(size = 12)) +
theme(legend.position = c(.9,.55)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = .4))
#------------------------------------
# THEME FOR 'WIDE' BAR CHARTS
# - we'll use this for small multiple
# charts. these also have some
# special formatting requirements
#------------------------------------
theme.smallmult <-
theme.porttheme +
theme(axis.text = element_text(size = 6)) +
theme(axis.text.x = element_text(angle = 90))
#----------------------------------------------------
# BAR CHART: Port vs Volume (2014)
# - this is the "long" form of the bar chart.
# - it's harder to read, but we can fit more data
# - it also shows the uneven distribution of shipping
# volume
#----------------------------------------------------
df.world_ports %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", fill = "dark red") +
labs(title = "Busiest container ports in the world") +
labs(subtitle = '2014, in order of shipping volume') +
labs(x = "Port", y = "Shipping\nVolume") +
scale_y_continuous(labels = scales::comma_format()) +
theme.widebar
#----------------------------------------
# FLIPPED BAR CHART: Top 25 busiest ports
# - this is useful because it makes the
# chart more readable when we flip
# the axes
# - use top 25 so you can read names
#----------------------------------------
df.world_ports %>%
filter(year == 2014, rank <= 25) %>%
ggplot(aes(x = reorder(port, volume), y = volume)) +
geom_bar(stat = "identity", fill = "dark red") +
geom_text(aes(label = volume), hjust = 1.1, color = "#FFFFFF") +
scale_y_continuous(labels = scales::comma_format()) +
coord_flip() +
labs(title = "Shanghai, Singapore had much higher volume\nthan other high-volume ports in 2014") +
labs(x = "Port", y = "Shipping Volume\n(1000 TEUs)") +
theme.porttheme
#==========================
# BAR CHART: Ports in China
# = use mutate and ifelse() to divide data into China and not China
#==========================
df.world_ports %>%
mutate(china_flag = ifelse(economy == "China","China","Not China")) %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", aes(fill = china_flag)) +
scale_y_continuous(labels = scales::comma_format()) +
scale_fill_manual(values = c("dark red","#999999")) +
labs(title = "Roughly 20% of busiest ports were\nin China in 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.widebar
#==========================
# BAR CHART: Ports in Asia
# = use mutate and ifelse() to divide into Aisa and non-Asia
#==========================
df.world_ports %>%
mutate(asia_flag = ifelse(continent == "Asia","Asia","Other")) %>%
filter(year == 2014) %>%
ggplot(aes(x = reorder(port_label, desc(volume)), y = volume)) +
geom_bar(stat = "identity", aes(fill = asia_flag)) +
scale_fill_manual(values = c("dark red","#999999")) +
labs(title = "More than half of the busiest ports were in Asia in 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.widebar
#========================================================
# SMALL MULTIPLE, LINE: All ports, shipping vol over time
# - This is useful for getting a new overview of the
# data
#========================================================
df.world_ports %>%
ggplot(aes(x = year, y = volume, group = port_label)) +
geom_line(color = "dark red", size = 1, na.rm = T) +
facet_wrap(~ port_label) +
labs(title = "Strong growth in Shanghai, Singapore,\nShenzhen, Guangzhou") +
labs(subtitle = "2004 to 2014") +
labs(x = "Port", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.smallmult
#================================================
# LINE CHART: Shanghai, Volume change over time
# - Shanghai volume has increased substantially
# so we want to show it visually
#================================================
df.world_ports %>%
mutate(port_highlight = ifelse(port == "Shanghai","Shanghai","Other")) %>%
ggplot(aes(x = year, y = volume, group = port)) +
geom_line(aes(color = port_highlight, alpha = port_highlight), size = 1.5, na.rm = T) +
scale_color_manual(values = c("#999999","dark red")) +
scale_alpha_manual(values = c(.3,1)) +
labs(title = "Shanghai's shipping volume increased\nsubstantially from 2004 to 2014") +
labs(x = "Year", y = "Shipping\nVolume\n(1000 TEUs)") +
theme.porttheme
#===============
# PLOT SINGAPORE
#===============
df.world_ports %>%
filter(port == "Singapore") %>%
ggplot(aes(x = year, y = volume, group = 1)) +
geom_line(color = "dark red", size = 2) +
labs(title = "Singapore volume also increased\nsubstantially from 2004 to 2014") +
labs(x = "Year", y = "Shipping\nVolume\n(1000 TEUs)") +
scale_y_continuous(limits = c(0,NA)) +
theme.porttheme
#===================================
# SMALL MULTIPLE: Rank over time
# - We'll use this to show
# the rank changes of all of the
# ports
# - Given the large number of ports
# the data will be much easier to
# read in a small multiple
#===================================
df.world_ports %>%
ggplot(aes(x = year, y = rank, group = port_label)) +
geom_line(size = 1, color = "dark red", na.rm = T) +
scale_y_reverse() +
facet_wrap(~ port_label) +
labs(title = "Ranking over time of world's busiest ports") +
labs(subtitle = "2004 to 2014") +
labs(x = "Year", y = "Rank") +
theme.smallmult
#============================
# BUMP CHART: CHINA
# here, we'll highlight China
# creating a variable called china_labels. china_labels
# will enable us to individually color each line for the different Chinese ports
# (we do this in conjunction with scale_color_manual()).
# We're also going to modify the transparency of the lines.
# We'll set the Chinese lines to almost fully opaque,
# and set the non-Chinese lines to be highly transparent. T
#============================
param.rank_n = 15
df.world_ports %>%
filter(rank <= param.rank_n) %>%
mutate(china_flag = ifelse(economy == "China", T,F)) %>%
mutate(china_labels = ifelse(china_flag == T, port,"other")) %>%
ggplot(aes(x = year, y = rank, group = port_label)) +
geom_line(aes(color = china_labels, alpha = china_flag), size = 2) +
geom_point(aes(color = china_labels, alpha = china_flag), size = 2.3) +
geom_point(color = "#FFFFFF", alpha = .8, size = .3) +
geom_text(data = df.world_ports %>% filter(year == "2014", rank <= param.rank_n), aes(label = port_label, x = '2014') , hjust = -.05, color = "#888888", size = 4) +
geom_text(data = df.world_ports %>% filter(year == "2004", rank <= param.rank_n), aes(label = port_label, x = '2004') , hjust = 1.05, color = "#888888", size = 4) +
scale_x_discrete(expand = c(.3, .3)) +
scale_y_reverse(breaks = c(1,5,10,15)) +
scale_alpha_discrete(range = c(.4,.9)) +
labs(title = "Top Chinese ports increased rank\nsubstantially from 2004 to 2014") +
labs(subtitle = "(Port ranks, by volume)") +
labs(x = "Year", y = "Rank") +
theme.porttheme +
theme(panel.grid.major.x = element_line(color = "#F3F3F3")) +
theme(panel.grid.major.y = element_blank()) +
theme(panel.grid.minor = element_blank()) +
theme(legend.position = "none") +
scale_color_manual(values = c("#4e79a5","#f18f3b","#af7a0a","#e0585b","#5aa155","#edc958","#77b7b2","#BBBBBB"))
#=============
# GET MAP DATA
#=============
map.world_polygon <- map_data("world")
head(map.world_polygon)
#=====================================
# SIMPLE DOT DISTRIBUTION MAP
# - This will be useful just to see
# the data
# - It also serves as a good test
# for the more complex chart we're
# going to make next
#=====================================
df.world_ports %>%
filter(year == "2014") %>%
ggplot(aes(x = lon, y = lat)) +
geom_polygon(data = map.world_polygon, aes(x = long, y = lat, group = group)) +
geom_point(color = "red")
#=========================
# BUBBLE DISTRIBUTION MAP
#=========================
# CREATE THEME
theme.maptheeme <-
theme(text = element_text(family = "Gill Sans", color = "#444444")) +
theme(plot.title = element_text(size = 30)) +
theme(plot.subtitle = element_text(size = 18)) +
theme(panel.background = element_rect(fill = "#FCFCFF")) +
theme(panel.grid = element_blank()) +
theme(axis.text = element_blank()) +
theme(axis.ticks = element_blank()) +
theme(axis.title = element_blank()) +
theme(legend.position = c(.17,.35)) +
theme(legend.background = element_blank()) +
theme(legend.key = element_blank()) +
theme(legend.title = element_text(size = 16)) +
theme(legend.text = element_text(size = 10))
#==============================================
# GEOSPATIAL BUBBLE
# - This will give us a sense
# of the density of shipping in a particular
# geographic region
#
#==============================================
df.world_ports %>%
filter(year == "2014") %>%
ggplot(aes(x = lon, y = lat)) +
geom_polygon(data = map.world_polygon, aes(x = long, y = lat, group = group),fill = "#AAAAAA",colour = "#818181", size = .15) +
geom_point(aes(size = volume), color = "#DD0000", alpha = .15) +
geom_point(aes(size = volume), color = "#DD0000", alpha = .7, shape = 1) +
scale_size_continuous(range = c(.2,10), breaks = c(5000, 10000, 30000), name = "Shipping Volume\n(1000 TEUs)") +
#coord_proj("+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs") + # use robinson projection
labs(title = "High volume ports were highly clustered in\nChina and Asia in 2014") +
theme.maptheeme
|
##################################################
##INSTRUCTIONS FOR readme PACKAGE USE ############
##################################################
#Set directory to the readme-software folder.
setwd("~/Downloads/readme-software")
#Install package
install.packages("./readme.tar.gz", lib = "./", repos = NULL, type ="source",INSTALL_opts = c('--no-lock'))
#Load in package to environment
library(readme, lib.loc = "./")
#For further instructions on use, see ?readme and ?undergrad, as well as readme.pdf
|
/package_instructions.R
|
no_license
|
leiqi/readme-software
|
R
| false | false | 518 |
r
|
##################################################
##INSTRUCTIONS FOR readme PACKAGE USE ############
##################################################
#Set directory to the readme-software folder.
setwd("~/Downloads/readme-software")
#Install package
install.packages("./readme.tar.gz", lib = "./", repos = NULL, type ="source",INSTALL_opts = c('--no-lock'))
#Load in package to environment
library(readme, lib.loc = "./")
#For further instructions on use, see ?readme and ?undergrad, as well as readme.pdf
|
# Read data
dat = read.table('household_power_consumption.txt',header = T, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
da = subset(dat, dat$Date == '1/2/2007' | dat$Date == '2/2/2007')
da$Date <- as.Date(da$Date, format = "%d/%m/%Y")
da$DateTime = paste(da$Date, da$Time)
str(da$DateTime)
da$DateTime = strptime(da$DateTime, format = "%Y-%m-%d %H:%M:%S")
# Plot 1
png("plot1.png", width=480, height=480)
plot1 = hist(da$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
# Plot 2
png("plot2.png", width=480, height=480)
with(da,plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
# Plot 3
png("plot3.png", width=480, height=480)
with(da,plot(DateTime, da$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(da,lines(DateTime, Sub_metering_2, type="l", col="red"))
with(da,lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
# Plot 4
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(da$DateTime, da$Global_active_power, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(da$DateTime, da$Voltage, type="l", xlab="datetime", ylab="Voltage")
with(da,plot(DateTime, da$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(da,lines(DateTime, Sub_metering_2, type="l", col="red"))
with(da,lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(da$DateTime, da$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/code.R
|
no_license
|
shimizuhsu/Electric-power-consumption
|
R
| false | false | 1,885 |
r
|
# Read data
dat = read.table('household_power_consumption.txt',header = T, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
da = subset(dat, dat$Date == '1/2/2007' | dat$Date == '2/2/2007')
da$Date <- as.Date(da$Date, format = "%d/%m/%Y")
da$DateTime = paste(da$Date, da$Time)
str(da$DateTime)
da$DateTime = strptime(da$DateTime, format = "%Y-%m-%d %H:%M:%S")
# Plot 1
png("plot1.png", width=480, height=480)
plot1 = hist(da$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
# Plot 2
png("plot2.png", width=480, height=480)
with(da,plot(DateTime, Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
# Plot 3
png("plot3.png", width=480, height=480)
with(da,plot(DateTime, da$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(da,lines(DateTime, Sub_metering_2, type="l", col="red"))
with(da,lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
# Plot 4
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(da$DateTime, da$Global_active_power, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(da$DateTime, da$Voltage, type="l", xlab="datetime", ylab="Voltage")
with(da,plot(DateTime, da$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering"))
with(da,lines(DateTime, Sub_metering_2, type="l", col="red"))
with(da,lines(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(da$DateTime, da$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
\name{monvardiff}
\alias{monvardiff}
\title{
Estimating Monotone Variance Functions Using Pseudo-Residuals
}
\description{
monvardiff provides a strictly monotone estimator of the variance
function based on the nonparametric regression model.
}
\usage{
monvardiff(x,y,a=min(x),b=max(x),N=length(x),t=length(x),r=2,hr,Kr="epanech",
hd,Kd="epanech",degree=1,inverse=0,monotonie="isoton")
}
\arguments{
\item{x}{vector containing the x-values (design points) of a sample}
\item{y}{vector containing the y-values (response) of a sample}
\item{a}{lower bound of the support of the design points density
function, or smallest fixed design point}
\item{b}{upper bound of the support of the design points density
function, or largest fixed design point}
\item{N}{number or vector of evaluation points of the unconstrained
nonparametric variance estimator (e.g. Nadaraya-Watson estimator)}
\item{t}{number or vector of points where the monotone estimation is
computed}
\item{r}{order of the difference scheme, i.e. weights \eqn{d_0,...,d_r} to calculate the pseudo-residuals}
\item{hr}{bandwith of kernel \eqn{Kr} of the variance estimation step}
\item{Kr}{Kernel for the variance estimation step (unconstrained estimation).
'epanech' for Epanechnikov, 'rectangle' for rectangle, 'biweight' for biweight,
'triweight' for triweight, 'triangle' for triangle, 'cosine' for cosine kernel}
\item{hd}{bandwith of kernel \eqn{K_d} of the density estimation step}
\item{Kd}{Kernel for the density estimation step (monotonization step).
'epanech' for Epanechnikov, 'rectangle' for rectangle, 'biweight' for biweight,
'triweight' for triweight, 'triangle' for triangle, 'cosine' for cosine kernel}
\item{degree}{determines the method for the unconstrained variance estimation.
'0' for the classical Nadaraya-Watson estimate, '1' for the local linear estimate.
As well \code{degree} can be the vector of the unconditional estimator provided by the
user for the design points given in the vector \code{N}}
\item{inverse}{for '0' the original variance function is estimated, for '1'
the inverse of the variance function is estimated.}
\item{monotonie}{determines the type of monotonicity. 'isoton' if the variance
function is assumed to be isotone, 'antinton' if the variance function is assumed to be antitone.}
}
\details{
Nonparametric regression models are of the form \eqn{Y_i = m(X_i) + \sigma(X_i) \cdot \varepsilon_i},
where \eqn{m} is the regression funtion and \eqn{\sigma} the variance function.
\code{monvardiff} performs a monotone estimate of the unknown variance function
\eqn{s=\sigma^2}. \code{monvardiff} first estimates \eqn{s} by an unconstrained nonparametric
method, the classical Nadaraya-Watson estimate or the local- linear estimate
(unless the user decides to pass his or her own estimate). This estimation contains the usage of the
Pseudo-Residuals. In a second step the inverse of the (monotone) variance function is calculated
by monotonizing the unconstrained estimate from the first step. With the above notation and
\eqn{\hat s} for the unconstrained estimate, the second step writes as follows,
\deqn{\hat s_I^{-1} = \frac{1}{Nh_d} \sum\limits_{i=1}^N \int\limits_{-\infty}^t K_d \Bigl( \frac{\hat s (\frac{i}{N} ) - u}{h_d} \Bigr) \; du.}
Finally, the monotone estimate is achieved by inversion of \eqn{\hat s_I^{-1}}.}
\value{
\code{monvardiff} returns a list of values
\item{xs}{the input values x, standardized on the interval \eqn{[0,1]}}
\item{y}{input variable y}
\item{z}{the points, for which the unconstrained function is estimated}
\item{t}{the points, for which the monotone variance function will be estimated}
\item{length.x}{length of the vector x}
\item{length.z}{length of the vector z}
\item{length.t}{length of the vector t}
\item{r}{order of the difference scheme, i.e. number of weights to calculate the pseudo-residuals}
\item{hr}{bandwidth used with the Kernel \eqn{K_r}}
\item{hd}{bandwidth used with the Kernel \eqn{K_d}}
\item{Kr}{kernel used for the unconstrained variance estimate}
\item{Kd}{kernel used for the monotonization step}
\item{degree}{method, which was used for the unconstrained variance estimate}
\item{ldeg.vektor}{ length of the vector degree. If ldeg.vektor is not equal to 1 the user provided the vector of the unconditional variance
estimator for the design points given in the vector N}
\item{inverse}{indicates, if the origin variance function or its inverse has been estimated}
\item{estimation}{the monotone estimate at the design points \eqn{t}}
}
\author{
This R Package was developed by Kay Pilz and Stefanie Titoff. Earlier developements of the estimator were made by Holger Dette and Kay Pilz.
}
\seealso{
\code{monreg} for monotone regression function estimation and \code{monvarresid} for monotone variance function estimation by nonparametric residuals.
}
\keyword{
nonparametric}
\keyword{ smooth}
\keyword{ regression
}
|
/man/monvardiff.rd
|
no_license
|
cran/monreg
|
R
| false | false | 4,945 |
rd
|
\name{monvardiff}
\alias{monvardiff}
\title{
Estimating Monotone Variance Functions Using Pseudo-Residuals
}
\description{
monvardiff provides a strictly monotone estimator of the variance
function based on the nonparametric regression model.
}
\usage{
monvardiff(x,y,a=min(x),b=max(x),N=length(x),t=length(x),r=2,hr,Kr="epanech",
hd,Kd="epanech",degree=1,inverse=0,monotonie="isoton")
}
\arguments{
\item{x}{vector containing the x-values (design points) of a sample}
\item{y}{vector containing the y-values (response) of a sample}
\item{a}{lower bound of the support of the design points density
function, or smallest fixed design point}
\item{b}{upper bound of the support of the design points density
function, or largest fixed design point}
\item{N}{number or vector of evaluation points of the unconstrained
nonparametric variance estimator (e.g. Nadaraya-Watson estimator)}
\item{t}{number or vector of points where the monotone estimation is
computed}
\item{r}{order of the difference scheme, i.e. weights \eqn{d_0,...,d_r} to calculate the pseudo-residuals}
\item{hr}{bandwith of kernel \eqn{Kr} of the variance estimation step}
\item{Kr}{Kernel for the variance estimation step (unconstrained estimation).
'epanech' for Epanechnikov, 'rectangle' for rectangle, 'biweight' for biweight,
'triweight' for triweight, 'triangle' for triangle, 'cosine' for cosine kernel}
\item{hd}{bandwith of kernel \eqn{K_d} of the density estimation step}
\item{Kd}{Kernel for the density estimation step (monotonization step).
'epanech' for Epanechnikov, 'rectangle' for rectangle, 'biweight' for biweight,
'triweight' for triweight, 'triangle' for triangle, 'cosine' for cosine kernel}
\item{degree}{determines the method for the unconstrained variance estimation.
'0' for the classical Nadaraya-Watson estimate, '1' for the local linear estimate.
As well \code{degree} can be the vector of the unconditional estimator provided by the
user for the design points given in the vector \code{N}}
\item{inverse}{for '0' the original variance function is estimated, for '1'
the inverse of the variance function is estimated.}
\item{monotonie}{determines the type of monotonicity. 'isoton' if the variance
function is assumed to be isotone, 'antinton' if the variance function is assumed to be antitone.}
}
\details{
Nonparametric regression models are of the form \eqn{Y_i = m(X_i) + \sigma(X_i) \cdot \varepsilon_i},
where \eqn{m} is the regression funtion and \eqn{\sigma} the variance function.
\code{monvardiff} performs a monotone estimate of the unknown variance function
\eqn{s=\sigma^2}. \code{monvardiff} first estimates \eqn{s} by an unconstrained nonparametric
method, the classical Nadaraya-Watson estimate or the local- linear estimate
(unless the user decides to pass his or her own estimate). This estimation contains the usage of the
Pseudo-Residuals. In a second step the inverse of the (monotone) variance function is calculated
by monotonizing the unconstrained estimate from the first step. With the above notation and
\eqn{\hat s} for the unconstrained estimate, the second step writes as follows,
\deqn{\hat s_I^{-1} = \frac{1}{Nh_d} \sum\limits_{i=1}^N \int\limits_{-\infty}^t K_d \Bigl( \frac{\hat s (\frac{i}{N} ) - u}{h_d} \Bigr) \; du.}
Finally, the monotone estimate is achieved by inversion of \eqn{\hat s_I^{-1}}.}
\value{
\code{monvardiff} returns a list of values
\item{xs}{the input values x, standardized on the interval \eqn{[0,1]}}
\item{y}{input variable y}
\item{z}{the points, for which the unconstrained function is estimated}
\item{t}{the points, for which the monotone variance function will be estimated}
\item{length.x}{length of the vector x}
\item{length.z}{length of the vector z}
\item{length.t}{length of the vector t}
\item{r}{order of the difference scheme, i.e. number of weights to calculate the pseudo-residuals}
\item{hr}{bandwidth used with the Kernel \eqn{K_r}}
\item{hd}{bandwidth used with the Kernel \eqn{K_d}}
\item{Kr}{kernel used for the unconstrained variance estimate}
\item{Kd}{kernel used for the monotonization step}
\item{degree}{method, which was used for the unconstrained variance estimate}
\item{ldeg.vektor}{ length of the vector degree. If ldeg.vektor is not equal to 1 the user provided the vector of the unconditional variance
estimator for the design points given in the vector N}
\item{inverse}{indicates, if the origin variance function or its inverse has been estimated}
\item{estimation}{the monotone estimate at the design points \eqn{t}}
}
\author{
This R Package was developed by Kay Pilz and Stefanie Titoff. Earlier developements of the estimator were made by Holger Dette and Kay Pilz.
}
\seealso{
\code{monreg} for monotone regression function estimation and \code{monvarresid} for monotone variance function estimation by nonparametric residuals.
}
\keyword{
nonparametric}
\keyword{ smooth}
\keyword{ regression
}
|
\alias{gtkButtonEnter}
\name{gtkButtonEnter}
\title{gtkButtonEnter}
\description{Emits a \code{\link{gtkButtonEnter}} signal to the given \code{\link{GtkButton}}.}
\usage{gtkButtonEnter(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkButton}}] The \code{\link{GtkButton}} you want to send the signal to.}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkButtonEnter.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false | false | 386 |
rd
|
\alias{gtkButtonEnter}
\name{gtkButtonEnter}
\title{gtkButtonEnter}
\description{Emits a \code{\link{gtkButtonEnter}} signal to the given \code{\link{GtkButton}}.}
\usage{gtkButtonEnter(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkButton}}] The \code{\link{GtkButton}} you want to send the signal to.}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weibull-random.R
\name{rtailw}
\alias{rtailw}
\title{TailW Random Sample Generation}
\usage{
rtailw(n, threshold, scale, shape)
}
\arguments{
\item{n}{Sample size.}
\item{threshold}{Minimum value of the tail.}
\item{scale}{Scale parameter.}
\item{shape}{Shape parameter.}
}
\value{
Gives random deviates of the TailW. The length of the result is determined by n.
}
\description{
This function generates random deviates for the tailW distribution.
}
\examples{
x <- rtailw(1000, 1, 2, 3)
hist(x, breaks = "FD")
}
\keyword{TailW}
|
/man/rtailw.Rd
|
no_license
|
cran/distTails
|
R
| false | true | 637 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/weibull-random.R
\name{rtailw}
\alias{rtailw}
\title{TailW Random Sample Generation}
\usage{
rtailw(n, threshold, scale, shape)
}
\arguments{
\item{n}{Sample size.}
\item{threshold}{Minimum value of the tail.}
\item{scale}{Scale parameter.}
\item{shape}{Shape parameter.}
}
\value{
Gives random deviates of the TailW. The length of the result is determined by n.
}
\description{
This function generates random deviates for the tailW distribution.
}
\examples{
x <- rtailw(1000, 1, 2, 3)
hist(x, breaks = "FD")
}
\keyword{TailW}
|
## Augment the .Rprofile for a project -- if it doesn't exist, just copy
## from packrat; if it does, check it and add if necessary
augmentRprofile <- function(project = NULL) {
project <- getProjectDir(project)
path <- file.path(project, ".Rprofile")
if (!file.exists(path)) {
file.copy(
instInitRprofileFilePath(),
path
)
} else {
editRprofileAutoloader(project, "update")
}
}
# edit the .Rprofile for this project
editRprofileAutoloader <- function(project, action = c("update", "remove")) {
# resolve action argument
action <- match.arg(action)
# if the .Rprofile doesn't exist, create it
if (!file.exists(file.path(project, ".Rprofile")))
file.create(file.path(project, ".Rprofile"))
## Read the .Rprofile in and see if it's been packified
path <- file.path(project, ".Rprofile")
.Rprofile <- readLines(path)
packifyStart <- grep("#### -- Packrat Autoloader", .Rprofile, fixed = TRUE)
packifyEnd <- grep("#### -- End Packrat Autoloader -- ####", .Rprofile, fixed = TRUE)
if (length(packifyStart) && length(packifyEnd))
.Rprofile <- .Rprofile[-c(packifyStart:packifyEnd)]
## Append init.R to the .Rprofile if needed
if (identical(action, "update"))
.Rprofile <- c(.Rprofile, readLines(instInitRprofileFilePath()))
## if the .Rprofile is now empty, delete it
if (identical(gsub("[[:space:]]", "", unique(.Rprofile)), "") ||
!length(.Rprofile))
file.remove(file.path(project, ".Rprofile"))
else
cat(.Rprofile, file = path, sep = "\n")
invisible()
}
|
/packrat/src/packrat/packrat/R/augment-rprofile.R
|
permissive
|
rachjone/iapsr
|
R
| false | false | 1,554 |
r
|
## Augment the .Rprofile for a project -- if it doesn't exist, just copy
## from packrat; if it does, check it and add if necessary
augmentRprofile <- function(project = NULL) {
project <- getProjectDir(project)
path <- file.path(project, ".Rprofile")
if (!file.exists(path)) {
file.copy(
instInitRprofileFilePath(),
path
)
} else {
editRprofileAutoloader(project, "update")
}
}
# edit the .Rprofile for this project
editRprofileAutoloader <- function(project, action = c("update", "remove")) {
# resolve action argument
action <- match.arg(action)
# if the .Rprofile doesn't exist, create it
if (!file.exists(file.path(project, ".Rprofile")))
file.create(file.path(project, ".Rprofile"))
## Read the .Rprofile in and see if it's been packified
path <- file.path(project, ".Rprofile")
.Rprofile <- readLines(path)
packifyStart <- grep("#### -- Packrat Autoloader", .Rprofile, fixed = TRUE)
packifyEnd <- grep("#### -- End Packrat Autoloader -- ####", .Rprofile, fixed = TRUE)
if (length(packifyStart) && length(packifyEnd))
.Rprofile <- .Rprofile[-c(packifyStart:packifyEnd)]
## Append init.R to the .Rprofile if needed
if (identical(action, "update"))
.Rprofile <- c(.Rprofile, readLines(instInitRprofileFilePath()))
## if the .Rprofile is now empty, delete it
if (identical(gsub("[[:space:]]", "", unique(.Rprofile)), "") ||
!length(.Rprofile))
file.remove(file.path(project, ".Rprofile"))
else
cat(.Rprofile, file = path, sep = "\n")
invisible()
}
|
#written by Vineet W. Singh - 04-12-2017
#submissions for various parts of assignment of Week 4 - Part 4 of the
#Exploratory Data Analysis module of the data science course of coursera
#this script checks to see if the data files are present in the current
#directory, if the files are present, it will open it and load the data
#into data frames.
#If the file is not present it will try to download the main zip file
#from the url provided and will try to unzip the data files into the current
#direcory and load the data into the required data frames
#The data will be processed by subseting into appropriate sub frames where
#required and will extract the necessary data and process it as required.
#The script will then produce the required plot and save it as a png file.
#
#This script addresses Part 2 of the asignment:
#Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
#(fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
#check if package curl is installed
if(is.element("curl", installed.packages()[,1])){ #check if curl is installed
require("curl") #load curl if it is installed
} else{ #curl is not installed - stop
stop("missing package: curl, please install it first")
}
#check if package ggplot2 is installed
if(is.element("ggplot2", installed.packages()[,1])){
#check if ggplot2 is installed
require("ggplot2") #load ggplot2 if it is installed
} else{ #ggplot2 is not installed - stop
stop("missing package: ggplot2, please install it first")
}
#check if package ggplot2 is installed
if(is.element("sqldf", installed.packages()[,1])){ #check if sqldf is installed
require("sqldf") #load sqldf if it is installed
} else{ #sqldf is not installed - stop
stop("missing package: sqldf, please install it first")
}
#check to see if input data exists or download it and then read it
if ((file.exists("summarySCC_PM25.rds")
& file.exists("Source_Classification_Code.rds"))){
message("loading emissions (NEI) data")
NEI <- readRDS("summarySCC_PM25.rds")
message("loading Source classification code (SCC) data")
SCC <- readRDS("Source_Classification_Code.rds")
} else {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,destfile="./datazip.zip",method="curl")
unzip('./datazip.zip',exdir='./')
message("loading emissions (NEI) data")
NEI <- readRDS("summarySCC_PM25.rds")
message("loading Source classification code (SCC) data")
SCC <- readRDS("Source_Classification_Code.rds")
}
message("generating the plot")
# get the number of years out as a factor so that they can be used to plot on
# the x axis
x<-levels(as.factor(NEI$year))
#try out the sqldf package. make a sql query to extract all rows where
#the county is baltimore
query<-"select * from NEI where fips = '24510'"
#dispatch the query and get the results
baltimore<-sqldf(query)
#apply the sum function to all emissions grouped by the years and read it into
#a vector for plotting
a2s<-tapply(baltimore$Emissions,baltimore$year,sum)
#ans2
#open the graphics device i.e. png file
png("plot2.png",res=150,width=20,height=20,units="cm")
#make the plot of points of total emissions in the baltimore
plot(x=x,y=a2s,type="p",pch=1,ylab="Total PM2.5 Emission",
main="Total PM2.5 Emissions in Baltimore City in 1999, 2002, 2005 & 2008",
xaxt='n', xlab="Year")
#add the lines connecting the points to make the plot a bit more informative
lines(x=x,y=a2s,type="l",lty=1,lwd=2)
#add years info/title on the x axis
axis(1,at=c(1999,2002,2005,2008),labels = c("1999","2002","2005","2008"))
#save the file
dev.off()
rm(NEI)
rm(SCC)
|
/script2.R
|
no_license
|
Vulcan-Logic/DSC4W4
|
R
| false | false | 3,742 |
r
|
#written by Vineet W. Singh - 04-12-2017
#submissions for various parts of assignment of Week 4 - Part 4 of the
#Exploratory Data Analysis module of the data science course of coursera
#this script checks to see if the data files are present in the current
#directory, if the files are present, it will open it and load the data
#into data frames.
#If the file is not present it will try to download the main zip file
#from the url provided and will try to unzip the data files into the current
#direcory and load the data into the required data frames
#The data will be processed by subseting into appropriate sub frames where
#required and will extract the necessary data and process it as required.
#The script will then produce the required plot and save it as a png file.
#
#This script addresses Part 2 of the asignment:
#Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
#(fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
#check if package curl is installed
if(is.element("curl", installed.packages()[,1])){ #check if curl is installed
require("curl") #load curl if it is installed
} else{ #curl is not installed - stop
stop("missing package: curl, please install it first")
}
#check if package ggplot2 is installed
if(is.element("ggplot2", installed.packages()[,1])){
#check if ggplot2 is installed
require("ggplot2") #load ggplot2 if it is installed
} else{ #ggplot2 is not installed - stop
stop("missing package: ggplot2, please install it first")
}
#check if package ggplot2 is installed
if(is.element("sqldf", installed.packages()[,1])){ #check if sqldf is installed
require("sqldf") #load sqldf if it is installed
} else{ #sqldf is not installed - stop
stop("missing package: sqldf, please install it first")
}
#check to see if input data exists or download it and then read it
if ((file.exists("summarySCC_PM25.rds")
& file.exists("Source_Classification_Code.rds"))){
message("loading emissions (NEI) data")
NEI <- readRDS("summarySCC_PM25.rds")
message("loading Source classification code (SCC) data")
SCC <- readRDS("Source_Classification_Code.rds")
} else {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,destfile="./datazip.zip",method="curl")
unzip('./datazip.zip',exdir='./')
message("loading emissions (NEI) data")
NEI <- readRDS("summarySCC_PM25.rds")
message("loading Source classification code (SCC) data")
SCC <- readRDS("Source_Classification_Code.rds")
}
message("generating the plot")
# get the number of years out as a factor so that they can be used to plot on
# the x axis
x<-levels(as.factor(NEI$year))
#try out the sqldf package. make a sql query to extract all rows where
#the county is baltimore
query<-"select * from NEI where fips = '24510'"
#dispatch the query and get the results
baltimore<-sqldf(query)
#apply the sum function to all emissions grouped by the years and read it into
#a vector for plotting
a2s<-tapply(baltimore$Emissions,baltimore$year,sum)
#ans2
#open the graphics device i.e. png file
png("plot2.png",res=150,width=20,height=20,units="cm")
#make the plot of points of total emissions in the baltimore
plot(x=x,y=a2s,type="p",pch=1,ylab="Total PM2.5 Emission",
main="Total PM2.5 Emissions in Baltimore City in 1999, 2002, 2005 & 2008",
xaxt='n', xlab="Year")
#add the lines connecting the points to make the plot a bit more informative
lines(x=x,y=a2s,type="l",lty=1,lwd=2)
#add years info/title on the x axis
axis(1,at=c(1999,2002,2005,2008),labels = c("1999","2002","2005","2008"))
#save the file
dev.off()
rm(NEI)
rm(SCC)
|
# This wrapper takes Population5 indicators from DemoData and Standardizes/harmonizes
# by age group
# output data frame includes two series:
# "abridged" contains standard abridged age groups 0, 1-4, 0-4, 5-9, 10-14 ..... up to the open age group
DDharmonize_Pop5 <- function (indata) {
# split input data by indicator (abridged or single)
pop_abridged <- indata
# Initialize sex specific outputs
abr_sex <- NULL
cpl_from_abr_sex <-NULL
sexes <- unique(pop_abridged$SexID)
for (sex in sexes) { # loop through sex ids, 1=males, 2=females, 3= both
print(paste("SexID = ", sex))
abr <- pop_abridged %>%
dplyr::filter(SexID == sex & !is.na(DataValue)) %>%
select(-SexID) %>%
distinct()
if (nrow(abr[abr$AgeSpan == 5,]) > 0) { # only process those that have at least one abridged age group
# if "Final" data status is available, keep only the final series
if ("Final" %in% unique(abr$DataStatusName)) {
abr <- abr %>%
dplyr::filter(DataStatusName == "Final")
}
# check for multiple series ids
ids_series <- unique(abr$SeriesID)
n_series <- length(ids_series)
# for each unique series,
abr_out <- NULL
for (i in 1:n_series) {
df <- abr %>% dplyr::filter(SeriesID == ids_series[i])
# populate any missing abridged records based on any data by single year of age
sngl <- df %>% dplyr::filter(AgeSpan == 1)
if (nrow(sngl) > 1) {
sngl2abr <- sngl %>% dd_single2abridged %>%
select(-AgeSort) %>%
mutate(DataSourceYear = sngl$DataSourceYear[1])
df <- df %>%
bind_rows(sngl2abr %>% dplyr::filter(!(AgeLabel %in% df$AgeLabel)))
}
# check whether it is a full series with all age groups represented and an open age greater than 60
df_abr_std <- df[(df$AgeStart == 0 & df$AgeSpan == 1 ) |
df$AgeSpan %in% c(-1, -2, 5),]
if (nrow(df_abr_std) > 13) {
df$check_full <- dd_series_isfull(df_abr_std, abridged = TRUE)
} else { df$check_full <- FALSE }
abr_out <- rbind(abr_out, df)
}
abr <- abr_out
rm(abr_out)
# if there is more than one series ...
if (n_series > 1) {
latest_source_year <- max(abr$DataSourceYear)
check_latest_full <- unique(abr$check_full[abr$DataSourceYear == latest_source_year])
# ... and latest series is full then keep only that one
if (check_latest_full) {
abr <- abr[abr$DataSourceYear == latest_source_year,]
} else {
# ... and latest series is not full, then keep the latest data source record for each age
abr <- abr %>% dd_latest_source_year
}
}
# tidy up the data frame
abr <- abr %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue) %>%
distinct()
# if there are still duplicate age groups (e.g., Eswatini 2017 DYB)
# keep the last one in current sort order
abr <- abr %>%
mutate(sorting = 1:nrow(abr)) %>%
group_by(AgeLabel) %>%
mutate(keeping = max(sorting)) %>%
ungroup() %>%
dplyr::filter(sorting == keeping) %>%
select(-sorting, -keeping)
# if no record for unknown age, set data value to zero
if (!("Unknown" %in% abr$AgeLabel)) {
abr <- abr %>%
bind_rows(data.frame(AgeStart = -2,
AgeEnd = -2,
AgeSpan = -2,
AgeLabel = "Unknown",
DataSourceYear = NA,
DataValue = 0))
}
# sometimes there are single year ages (not 0) on the abridged series (often for children)
# extract these for use on single series
cpl_from_abr <- abr %>% dd_extract_single %>%
bind_rows(abr[abr$AgeSpan < 0,])
cpl_from_abr <- dd_age_standard(cpl_from_abr, abridged = FALSE) %>%
dplyr::filter(!is.na(DataValue)) %>%
mutate(note = NA)
# remove single year records for age> 0 from abridged
abr <- abr %>%
dplyr::filter(!(AgeSpan == 1 & AgeStart != 0)) %>%
arrange(AgeStart)
# reconcile first age groups
abr <- abr %>% dd_firstages_compute
# check whether there are multiple open age groups
oag_multi <- abr %>% dd_oag_multiple
# compute closed age groups from multiple open age groups and add to data if missing
if (oag_multi) {
add <- abr %>% dd_oag2closed %>%
dplyr::filter(!(AgeLabel %in% abr$AgeLabel[!is.na(abr$DataValue)]))
if (nrow(add > 0)) {
abr <- abr %>%
bind_rows(add) %>%
arrange(AgeStart)
}
}
# identify the start age of the open age group needed to close the series
oag_start <- abr %>% dd_oag_agestart
# flag whether this open age group exists in the series
oag_check <- paste0(oag_start,"+") %in% abr$AgeLabel
# drop records for open age groups that do not close the series
abr <- abr %>%
dplyr::filter(!(AgeStart > 0 & AgeSpan == -1 & AgeStart != oag_start))
# check that there are no missing age groups on the abridged series
if (nrow(abr[abr$AgeStart >= 5,]) > 0) {
check_abr <- is_abridged(abr$AgeStart[abr$AgeStart >=5])
} else {
check_abr <- FALSE
}
if (check_abr==TRUE) {
# compute all possible open age groups given available input
abr_oag <- dd_oag_compute(abr, age_span = 5)
# append the oag that completes the abridged series
abr <- abr %>%
bind_rows(abr_oag[!(abr_oag$AgeLabel %in% abr$AgeLabel) &
abr_oag$AgeStart == oag_start,]) %>%
arrange(AgeSort)
}
# check again whether any open age group exists
oag_check <- paste0(oag_start,"+") %in% abr$AgeLabel
# if total is missing and series is otherwise complete, compute total
if (!("Total" %in% abr$AgeLabel) & "0-4" %in% abr$AgeLabel & oag_check == TRUE) {
abr <- abr %>%
bind_rows(data.frame(AgeStart = 0,
AgeEnd = -1,
AgeLabel = "Total",
AgeSpan = -1,
AgeSort = 184,
DataSourceYear = NA,
DataValue = sum(abr$DataValue[abr$AgeSpan == 5]) +
abr$DataValue[abr$AgeSpan == -1 & abr$AgeStart == oag_start] +
abr$DataValue[abr$AgeLabel == "Unknown"]))
}
# write a note to alert about missing data
abr$note <- NA
if (check_abr == FALSE | oag_check == FALSE) {
abr$note <- "The abridged series is missing data for one or more age groups."
}
if (!("0" %in% abr$AgeLabel & "1-4" %in% abr$AgeLabel & "0-4" %in% abr$AgeLabel)) {
abr$note <- "The abridged series is missing data for one or more age groups."
}
abr$SexID <- sex
cpl_from_abr$SexID <- sex
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
} else { # close for if nrow(abr) >0
# sometimes there are no 5-year age groups but there are 1-year. We reserve those for complete
if (nrow(abr[abr$AgeSpan == 1,]) > 0) {
abr <- abr %>% dd_latest_source_year
cpl_from_abr <- abr %>% dd_extract_single %>%
bind_rows(abr[abr$AgeSpan < 0,]) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue)
cpl_from_abr <- dd_age_standard(cpl_from_abr, abridged = FALSE) %>%
dplyr::filter(!is.na(DataValue)) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, AgeSort, DataValue) %>%
mutate(note = NA,
note = as.character(note),
SexID = sex)
abr <- NULL
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
} else { # if no 5 or 1 year age groups, then just keep total, open and unknown
abr <- abr %>% dd_latest_source_year %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue)
abr <- dd_age_standard(abr, abridged = TRUE) %>%
dplyr::filter(!is.na(DataValue)) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, AgeSort, DataValue) %>%
mutate(note = "The abridged series is missing data for one or more age groups.",
SexID = sex)
cpl_from_abr <- NULL
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
}
}
# clean up the environment before beginning next loop
rm(abr, abr_std, abr_cpl_recon, abr_from_cpl, abr_oag, cpl, cpl_std, cpl_abr_recon,
cpl_from_abr, check_cpl, cpl_oag, check_abr, check_cpl, open.age)
} # close loop for sex
# add series field to data
if (!is.null(abr_sex)) {
abr_sex <- abr_sex %>%
mutate(abridged = TRUE,
complete = FALSE,
series = "abridged") %>%
dplyr::filter(AgeSpan %in% c(-2, -1, 1, 4, 5))
}
if (!is.null(cpl_from_abr_sex)) {
cpl_from_abr_sex <- cpl_from_abr_sex %>%
mutate(abridged = FALSE,
complete = TRUE,
series = "complete from abridged") %>%
dplyr::filter(AgeSpan %in% c(-2, -1, 1))
}
outdata <- rbind(abr_sex, cpl_from_abr_sex)
return(outdata)
}
|
/DDharmonize_Pop5.R
|
no_license
|
Shelmith-Kariuki/ddharmony
|
R
| false | false | 10,678 |
r
|
# This wrapper takes Population5 indicators from DemoData and Standardizes/harmonizes
# by age group
# output data frame includes two series:
# "abridged" contains standard abridged age groups 0, 1-4, 0-4, 5-9, 10-14 ..... up to the open age group
DDharmonize_Pop5 <- function (indata) {
# split input data by indicator (abridged or single)
pop_abridged <- indata
# Initialize sex specific outputs
abr_sex <- NULL
cpl_from_abr_sex <-NULL
sexes <- unique(pop_abridged$SexID)
for (sex in sexes) { # loop through sex ids, 1=males, 2=females, 3= both
print(paste("SexID = ", sex))
abr <- pop_abridged %>%
dplyr::filter(SexID == sex & !is.na(DataValue)) %>%
select(-SexID) %>%
distinct()
if (nrow(abr[abr$AgeSpan == 5,]) > 0) { # only process those that have at least one abridged age group
# if "Final" data status is available, keep only the final series
if ("Final" %in% unique(abr$DataStatusName)) {
abr <- abr %>%
dplyr::filter(DataStatusName == "Final")
}
# check for multiple series ids
ids_series <- unique(abr$SeriesID)
n_series <- length(ids_series)
# for each unique series,
abr_out <- NULL
for (i in 1:n_series) {
df <- abr %>% dplyr::filter(SeriesID == ids_series[i])
# populate any missing abridged records based on any data by single year of age
sngl <- df %>% dplyr::filter(AgeSpan == 1)
if (nrow(sngl) > 1) {
sngl2abr <- sngl %>% dd_single2abridged %>%
select(-AgeSort) %>%
mutate(DataSourceYear = sngl$DataSourceYear[1])
df <- df %>%
bind_rows(sngl2abr %>% dplyr::filter(!(AgeLabel %in% df$AgeLabel)))
}
# check whether it is a full series with all age groups represented and an open age greater than 60
df_abr_std <- df[(df$AgeStart == 0 & df$AgeSpan == 1 ) |
df$AgeSpan %in% c(-1, -2, 5),]
if (nrow(df_abr_std) > 13) {
df$check_full <- dd_series_isfull(df_abr_std, abridged = TRUE)
} else { df$check_full <- FALSE }
abr_out <- rbind(abr_out, df)
}
abr <- abr_out
rm(abr_out)
# if there is more than one series ...
if (n_series > 1) {
latest_source_year <- max(abr$DataSourceYear)
check_latest_full <- unique(abr$check_full[abr$DataSourceYear == latest_source_year])
# ... and latest series is full then keep only that one
if (check_latest_full) {
abr <- abr[abr$DataSourceYear == latest_source_year,]
} else {
# ... and latest series is not full, then keep the latest data source record for each age
abr <- abr %>% dd_latest_source_year
}
}
# tidy up the data frame
abr <- abr %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue) %>%
distinct()
# if there are still duplicate age groups (e.g., Eswatini 2017 DYB)
# keep the last one in current sort order
abr <- abr %>%
mutate(sorting = 1:nrow(abr)) %>%
group_by(AgeLabel) %>%
mutate(keeping = max(sorting)) %>%
ungroup() %>%
dplyr::filter(sorting == keeping) %>%
select(-sorting, -keeping)
# if no record for unknown age, set data value to zero
if (!("Unknown" %in% abr$AgeLabel)) {
abr <- abr %>%
bind_rows(data.frame(AgeStart = -2,
AgeEnd = -2,
AgeSpan = -2,
AgeLabel = "Unknown",
DataSourceYear = NA,
DataValue = 0))
}
# sometimes there are single year ages (not 0) on the abridged series (often for children)
# extract these for use on single series
cpl_from_abr <- abr %>% dd_extract_single %>%
bind_rows(abr[abr$AgeSpan < 0,])
cpl_from_abr <- dd_age_standard(cpl_from_abr, abridged = FALSE) %>%
dplyr::filter(!is.na(DataValue)) %>%
mutate(note = NA)
# remove single year records for age> 0 from abridged
abr <- abr %>%
dplyr::filter(!(AgeSpan == 1 & AgeStart != 0)) %>%
arrange(AgeStart)
# reconcile first age groups
abr <- abr %>% dd_firstages_compute
# check whether there are multiple open age groups
oag_multi <- abr %>% dd_oag_multiple
# compute closed age groups from multiple open age groups and add to data if missing
if (oag_multi) {
add <- abr %>% dd_oag2closed %>%
dplyr::filter(!(AgeLabel %in% abr$AgeLabel[!is.na(abr$DataValue)]))
if (nrow(add > 0)) {
abr <- abr %>%
bind_rows(add) %>%
arrange(AgeStart)
}
}
# identify the start age of the open age group needed to close the series
oag_start <- abr %>% dd_oag_agestart
# flag whether this open age group exists in the series
oag_check <- paste0(oag_start,"+") %in% abr$AgeLabel
# drop records for open age groups that do not close the series
abr <- abr %>%
dplyr::filter(!(AgeStart > 0 & AgeSpan == -1 & AgeStart != oag_start))
# check that there are no missing age groups on the abridged series
if (nrow(abr[abr$AgeStart >= 5,]) > 0) {
check_abr <- is_abridged(abr$AgeStart[abr$AgeStart >=5])
} else {
check_abr <- FALSE
}
if (check_abr==TRUE) {
# compute all possible open age groups given available input
abr_oag <- dd_oag_compute(abr, age_span = 5)
# append the oag that completes the abridged series
abr <- abr %>%
bind_rows(abr_oag[!(abr_oag$AgeLabel %in% abr$AgeLabel) &
abr_oag$AgeStart == oag_start,]) %>%
arrange(AgeSort)
}
# check again whether any open age group exists
oag_check <- paste0(oag_start,"+") %in% abr$AgeLabel
# if total is missing and series is otherwise complete, compute total
if (!("Total" %in% abr$AgeLabel) & "0-4" %in% abr$AgeLabel & oag_check == TRUE) {
abr <- abr %>%
bind_rows(data.frame(AgeStart = 0,
AgeEnd = -1,
AgeLabel = "Total",
AgeSpan = -1,
AgeSort = 184,
DataSourceYear = NA,
DataValue = sum(abr$DataValue[abr$AgeSpan == 5]) +
abr$DataValue[abr$AgeSpan == -1 & abr$AgeStart == oag_start] +
abr$DataValue[abr$AgeLabel == "Unknown"]))
}
# write a note to alert about missing data
abr$note <- NA
if (check_abr == FALSE | oag_check == FALSE) {
abr$note <- "The abridged series is missing data for one or more age groups."
}
if (!("0" %in% abr$AgeLabel & "1-4" %in% abr$AgeLabel & "0-4" %in% abr$AgeLabel)) {
abr$note <- "The abridged series is missing data for one or more age groups."
}
abr$SexID <- sex
cpl_from_abr$SexID <- sex
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
} else { # close for if nrow(abr) >0
# sometimes there are no 5-year age groups but there are 1-year. We reserve those for complete
if (nrow(abr[abr$AgeSpan == 1,]) > 0) {
abr <- abr %>% dd_latest_source_year
cpl_from_abr <- abr %>% dd_extract_single %>%
bind_rows(abr[abr$AgeSpan < 0,]) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue)
cpl_from_abr <- dd_age_standard(cpl_from_abr, abridged = FALSE) %>%
dplyr::filter(!is.na(DataValue)) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, AgeSort, DataValue) %>%
mutate(note = NA,
note = as.character(note),
SexID = sex)
abr <- NULL
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
} else { # if no 5 or 1 year age groups, then just keep total, open and unknown
abr <- abr %>% dd_latest_source_year %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, DataValue)
abr <- dd_age_standard(abr, abridged = TRUE) %>%
dplyr::filter(!is.na(DataValue)) %>%
select(DataSourceYear, AgeStart, AgeEnd, AgeLabel, AgeSpan, AgeSort, DataValue) %>%
mutate(note = "The abridged series is missing data for one or more age groups.",
SexID = sex)
cpl_from_abr <- NULL
# now compile these for each sex
abr_sex <- rbind(abr_sex, abr)
cpl_from_abr_sex <- rbind(cpl_from_abr_sex, cpl_from_abr)
}
}
# clean up the environment before beginning next loop
rm(abr, abr_std, abr_cpl_recon, abr_from_cpl, abr_oag, cpl, cpl_std, cpl_abr_recon,
cpl_from_abr, check_cpl, cpl_oag, check_abr, check_cpl, open.age)
} # close loop for sex
# add series field to data
if (!is.null(abr_sex)) {
abr_sex <- abr_sex %>%
mutate(abridged = TRUE,
complete = FALSE,
series = "abridged") %>%
dplyr::filter(AgeSpan %in% c(-2, -1, 1, 4, 5))
}
if (!is.null(cpl_from_abr_sex)) {
cpl_from_abr_sex <- cpl_from_abr_sex %>%
mutate(abridged = FALSE,
complete = TRUE,
series = "complete from abridged") %>%
dplyr::filter(AgeSpan %in% c(-2, -1, 1))
}
outdata <- rbind(abr_sex, cpl_from_abr_sex)
return(outdata)
}
|
## 16 March ##
## Correlation and regression ##
### Part 1: Pearson's coefficient vs Spearman's coefficient
x <- c(1, 2, 6, 8, 9, 7, 7.5, 10, 3, 4, 5.5)
y <- c(2, 4, 11, 15, 19, 16, 14, 23, 7, 6, 11)
plot(x, y)
# Pearson's coefficient
cor.test(x, y)
# Spearman's coefficient
cor.test(x, y, method = 'spearman')
x <- c(1, 2, 6, 8, 9, 7, 7.5, 10, 3, 4, 5.5, 150)
y <- c(2, 4, 11, 15, 19, 16, 14, 23, 7, 6, 11, 10)
plot(x, y)
cor.test(x, y)
cor.test(x, y, method = 'spearman')
cor.test(x, y, method = 'kendall')
### Part 2: real data
educ <- read.csv("https://raw.githubusercontent.com/LingData2019/LingData/master/data/education.csv")
library(tidyverse)
library(GGally)
scores <- educ %>% select(read, write, math, science, socst)
pairs(scores)
ggpairs(scores)
ggplot(data = scores, aes(x = math, y = science)) +
geom_point() +
labs(x = "Math score",
y = "Science score",
title = "Students' scores")
cor.test(scores$math, scores$science)
model1 <- lm(data = scores, science ~ math)
summary(model1)
ggplot(data = scores, aes(x = math, y = science)) +
geom_point() +
labs(x = "Math score",
y = "Science score",
title = "Students' scores") +
geom_smooth(method=lm)
|
/seminars/2019-03-16/corr-regression.R
|
no_license
|
LingData2019/LingData
|
R
| false | false | 1,213 |
r
|
## 16 March ##
## Correlation and regression ##
### Part 1: Pearson's coefficient vs Spearman's coefficient
x <- c(1, 2, 6, 8, 9, 7, 7.5, 10, 3, 4, 5.5)
y <- c(2, 4, 11, 15, 19, 16, 14, 23, 7, 6, 11)
plot(x, y)
# Pearson's coefficient
cor.test(x, y)
# Spearman's coefficient
cor.test(x, y, method = 'spearman')
x <- c(1, 2, 6, 8, 9, 7, 7.5, 10, 3, 4, 5.5, 150)
y <- c(2, 4, 11, 15, 19, 16, 14, 23, 7, 6, 11, 10)
plot(x, y)
cor.test(x, y)
cor.test(x, y, method = 'spearman')
cor.test(x, y, method = 'kendall')
### Part 2: real data
educ <- read.csv("https://raw.githubusercontent.com/LingData2019/LingData/master/data/education.csv")
library(tidyverse)
library(GGally)
scores <- educ %>% select(read, write, math, science, socst)
pairs(scores)
ggpairs(scores)
ggplot(data = scores, aes(x = math, y = science)) +
geom_point() +
labs(x = "Math score",
y = "Science score",
title = "Students' scores")
cor.test(scores$math, scores$science)
model1 <- lm(data = scores, science ~ math)
summary(model1)
ggplot(data = scores, aes(x = math, y = science)) +
geom_point() +
labs(x = "Math score",
y = "Science score",
title = "Students' scores") +
geom_smooth(method=lm)
|
# This code is essentially to read in all non-gdx related info such as plotting colors, technology types and other such things
# Copying heavily from Yinong's code
library(gdxrrw)
gms.dir <- gams.directory
GAMSVersions<-c("24.4","24.7","24.6","24.5","24.3","24.2","24.1", "25.1")
for (version in GAMSVersions){
# FOLLOWING ASSUMES GAMS FILE LOCATED in program files (x86), may be elsewhere
if (dir.exists(file.path(gms.dir, version))){
Selected_GAMSVersion<-version
break
}
}
gams.folder <- file.path(gms.dir,Selected_GAMSVersion)
start_gams <- function(dir = paste0("/../../../Users/tbowen/AppData/Local/Programs/GAMS/",Selected_GAMSVersion)) {
# Try to load package
out <- require(gdxrrw)
if (!out) {
print("Error: gdxrrw package not installed")
print(" Go to ReEDS-R Readme file for installation instructions")
} else {
out2 <- igdx(dir)
if(!out2) {
print("Error: gdxrrw package not properly loaded")
print(" Use start_gams(dir), where 'dir' is your GAMS installation directory")
}
}
}
start_gams(dir = gams.folder)
|
/functions/start_gdxr.R
|
no_license
|
MasonBowen/ReEDS-Data-Visualizer
|
R
| false | false | 1,089 |
r
|
# This code is essentially to read in all non-gdx related info such as plotting colors, technology types and other such things
# Copying heavily from Yinong's code
library(gdxrrw)
gms.dir <- gams.directory
GAMSVersions<-c("24.4","24.7","24.6","24.5","24.3","24.2","24.1", "25.1")
for (version in GAMSVersions){
# FOLLOWING ASSUMES GAMS FILE LOCATED in program files (x86), may be elsewhere
if (dir.exists(file.path(gms.dir, version))){
Selected_GAMSVersion<-version
break
}
}
gams.folder <- file.path(gms.dir,Selected_GAMSVersion)
start_gams <- function(dir = paste0("/../../../Users/tbowen/AppData/Local/Programs/GAMS/",Selected_GAMSVersion)) {
# Try to load package
out <- require(gdxrrw)
if (!out) {
print("Error: gdxrrw package not installed")
print(" Go to ReEDS-R Readme file for installation instructions")
} else {
out2 <- igdx(dir)
if(!out2) {
print("Error: gdxrrw package not properly loaded")
print(" Use start_gams(dir), where 'dir' is your GAMS installation directory")
}
}
}
start_gams(dir = gams.folder)
|
########################################
#' @title cansee
#' @name cansee
#' @description Check if point1 (xy1) visible from point2 (xy2) given
#' a certain DEM (r)
#'
#' @export
#'
#' @param r A DEM raster
#' @param xy1 A vector/matrix with X and Y coordinates for Point 1
#' @param xy2 A vector/matrix with X and Y coordinates for Point 2
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#'
#' @return A boolean value, indicating if the point (xy2) is visible
#'
#' @author Sebastian Gatscha
cansee <- function(r, xy1, xy2, h1=0, h2=0){
# xy1 = c(4653100.36021378, 2744048.65794167);
# xy2 = c(4648381.88040377, 2741196.10301024);
# xy1 = xy1; xy2 = xy2[1,]
### can xy1 see xy2 on DEM r?
### r is a DEM in same x,y, z units
### xy1 and xy2 are 2-length vectors of x,y coords
### h1 and h2 are extra height offsets
### (eg top of mast, observer on a ladder etc)
xyz = rasterprofile(r, xy1, xy2)
np = length(xyz[,1])-1
h1 = xyz[["z"]][1] + h1
h2 = xyz[["z"]][np] + h2
hpath = h1 + (0:np)*(h2-h1)/np
invisible(!any(hpath < xyz[["z"]], na.rm = T))
}
#' @title viewTo
#' @name viewTo
#' @description Check if Point 1 (xy) is visible from multiple points
#' (xy2)
#'
#' @export
#' @importFrom plyr aaply
#'
#' @param r A DEM raster
#' @param xy1 A matrix with X and Y coordinates for Point 1
#' @param xy2 A matrix with X and Y coordinates for Points 2
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#' @param progress Is passed on to plyr::aaply
#'
#' @return A boolean vector, indicating if Point 1 (xy1) is visible
#' from all elements of Points 2 (xy2)
#'
#' @author Sebastian Gatscha
#'
viewTo <- function(r, xy1, xy2, h1=0, h2=0, progress="none"){
# xy1 = c(x = 4653100.36021378, y = 2744048.65794167);
# xy2 = structure(c(4648381.88040377, 4649001.7726914, 4649621.66497904,
# 4650241.55726667, 4650861.4495543, 4648381.88040377, 2741196.10301024,
# 2741196.10301024, 2741196.10301024, 2741196.10301024, 2741196.10301024,
# 2741815.99529787), .Dim = c(6L, 2L), .Dimnames = list(NULL, c("x1",
# "x2")))
# xy1 = turbine_locs[1,]; xy2 = sample_xy; h1=h2=0
## xy2 is a matrix of x,y coords (not a data frame)
a <- plyr::aaply(xy2, 1, function(d){
cansee(r,xy1 = xy1,xy2 = d,h1,h2)}, .progress=progress)
a[is.na(a)] <- FALSE
return(a)
}
#' @title rasterprofile
#' @name rasterprofile
#' @description Sample a raster along a straight line between 2 points
#'
#' @export
#' @importFrom raster res cellFromXY
#' @importFrom stats complete.cases
#'
#' @param r A DEM raster
#' @param xy1 A matrix with X and Y coordinates for Point 1
#' @param xy2 A matrix with X and Y coordinates for Points 2
#' @param plot Plot the process? Default is FALSE
#'
#' @return A boolean vector, indicating if Point 1 (xy1) is visible
#' from all elements of Points 2 (xy2)
#'
#' @author Sebastian Gatscha
rasterprofile <- function(r, xy1, xy2, plot=FALSE){
# r = DEM_meter[[1]]; xy1 = sample_xy[29,]; xy2 = sample_xy[26,]; plot=T
if (plot==TRUE) {
plot(r)
points(x = xy2[1], y=xy2[2], col="blue", pch=20, cex=1.4)
points(x = xy1[1], y=xy1[2], col="red", pch=20, cex=2)
}
### sample a raster along a straight line between two points
### try to match the sampling size to the raster resolution
dx = sqrt( (xy1[1]-xy2[1])^2 + (xy1[2]-xy2[2])^2 )
nsteps = 1 + round(dx/ min(raster::res(r)))
xc = xy1[1] + (0:nsteps) * (xy2[1]-xy1[1])/nsteps
yc = xy1[2] + (0:nsteps) * (xy2[2]-xy1[2])/nsteps
if (plot==TRUE) {
points(x = xc, y=yc, col="red", pch=20, cex=1.4)
}
rasterVals <- r[raster::cellFromXY(r, cbind(xc,yc))]
# rasterVals <- raster::extract(x = r, y = cbind(xc,yc), buffer=5, df=T)
# rasterVals <- rasterVals[!is.na(rasterVals)]
pointsZ <- data.frame(x = xc, y = yc, z = rasterVals)
if (plot==TRUE) {
points(pointsZ$x, pointsZ$y, pch=20, col="black")
text(pointsZ$x, pointsZ$y, pos=1, pointsZ$z, cex=0.5)
}
if (any(is.na(pointsZ))) {
pointsZ <- pointsZ[stats::complete.cases(pointsZ),]
# browser()
}
return(pointsZ)
}
#' @title viewshed
#' @name viewshed
#' @description Calculate visibility for given points in
#' a given area.
#'
#' @export
#'
#' @importFrom sp coordinates spsample
#' @importFrom raster res ncell
#' @importFrom plyr aaply
#' @importFrom sf st_as_sf
#' @param r A DEM raster
#' @param shape A SpatialPolygon of the windfarm area.
#' @param turbine_locs Coordinates or SpatialPoint representing
#' the wind turbines
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#' @param progress Is passed on to plyr::aaply
#'
#' @return A list of 5, containing the boolean result for every cell,
#' the raster cell points, a SimpleFeature Polygon of the given area
#' and the DEM raster
#'
#' @examples \dontrun{
#' library(sp)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]), type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
#' }
#' @author Sebastian Gatscha
viewshed <- function(r, shape, turbine_locs, h1=0, h2=0, progress="none"){
# r = DEM_meter[[1]]; shape=DEM_meter[[2]]; turbine_locs = turbloc
# h1=0; h2=0; progress="none"
if (class(shape)[1] == "sf") {
shape <- as(shape, "Spatial")
}
if (class(turbine_locs) == "SpatialPoints") {
turbine_locs = sp::coordinates(turbine_locs)
}
smplf <- sf::st_as_sf(shape)
smplf <- sf::st_buffer(smplf, dist = 10)
shape <- as(smplf, "Spatial")
sample_POI <- sp::spsample(shape, n = raster::ncell(r), type = "regular")
sample_xy <- sp::coordinates(sample_POI)
## xy2 is a matrix of x,y coords (not a data frame)
res <- plyr::aaply(turbine_locs, 1, function(d){
viewTo(r, xy1 = d, xy2 = sample_xy, h1, h2)
}, .progress=progress)
if (is.matrix(res)) {
res <- res[1:nrow(res),1:nrow(sample_xy)]
}
if (is.logical(res)) {
res[1:nrow(sample_xy)]
}
return(list("Result"=res, "Raster_POI" = sample_xy,
"Area" = sf::st_as_sf(shape), "DEM" = r, "Turbines" = turbine_locs))
}
## Geht noch nicht
# viewshed_par <- function(r, shape, turbine_locs, h1=0, h2=0, progress="none"){
# # r = DEM_meter; shape=shape_meter; turbine_locs = turbloc
# # h1=0; h2=0;
#
# if (class(shape)[1] == "sf") {
# shape <- as(shape, "Spatial")
# }
# if (class(turbine_locs) == "SpatialPoints") {
# turbine_locs = sp::coordinates(turbine_locs)
# }
#
# sample_POI <- sp::spsample(shape, n = raster::ncell(r), type = "regular")
# sample_xy <- sp::coordinates(sample_POI)
#
#
# library(parallel)
# nCore <- parallel::detectCores()
# cl <- parallel::makeCluster(nCore)
# parallel::clusterEvalQ(cl, {
# library(plyr)
# library(raster)
# })
# parallel::clusterExport(cl, varlist = c("turbine_locs", "sample_xy",
# "viewTo", "cansee", "rasterprofile",
# "r", "h1", "h2", "progress"))
#
# res <- parallel::parApply(cl = cl, X = turbine_locs, 1, function(d){
# viewTo(r, xy1 = d, xy2 = sample_xy, h1, h2, progress)
# })
# res <- t(res)
#
# parallel::stopCluster(cl)
#
# if (is.matrix(res)) {
# res <- res[1:nrow(res),1:nrow(sample_xy)]
# }
# if (is.logical(res)) {
# res[1:nrow(sample_xy)]
# }
#
# return(list("Result"=res, "Raster_POI" = sample_xy,
# "Area" = sf::st_as_sf(shape), "DEM" = r, "Turbines" = turbine_locs))
# }
# res <- viewshed_par(r = DEM_meter, shape=shape_meter, turbine_locs = turbloc, h1=1.8, h2=50)
#' @title plot_viewshed
#' @name plot_viewshed
#' @description Plot the result of viewshed
#'
#' @export
#'
#' @importFrom raster plot
#' @importFrom sf st_geometry
#'
#' @param res The resulting list from viewshed
#' @param legend Plot a legend? Default is FALSE
#'
#' @return NULL
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]), type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
#' plot_viewshed(res)
#' }
#' @author Sebastian Gatscha
plot_viewshed <- function(res, legend=FALSE) {
# r=DEM_meter[[1]]; leg=TRUE
raster::plot(res[[4]])
plot(sf::st_geometry(res[[3]]), add = T)
points(res[[2]], col="green", pch=20)
points(res[[5]], cex=1.5, col="black", pch=20)
if (is.matrix(res[[1]])) {
invisible(apply(res[[1]], 1, function(d) {points(res[[2]][d,], col="red", pch=20)}))
} else {
points(res[[2]][res[[1]],], col="red", pch=20)
# invisible(apply(res[[1]], 1, function(d) {points(res[[2]][d,], col="red", pch=20)}))
}
if (legend) {
legend(x = "bottomright", y = "topleft", yjust=0, title="Visibility",
col=c("green","black", "red"),
legend = c("Not visible","Turbines","Turbine/s visible"), pch=20)
}
}
#' @title interpol_view
#' @name interpol_view
#' @description Plot an interpolated view of the viewshed analysis
#'
#' @export
#'
#' @importFrom raster plot rasterize
#' @importFrom stats quantile
#'
#' @param res The result list from viewshed.
#' @param plot Should the result be plotted? Default is TRUE
#' @param breakseq The breaks for value plotting. By default, 5 equal
#' intervals are generated.
#' @param breakform If 'breakseq' is missing, a sampling function to
#' calculate the breaks, like \code{\link{quantile}}, fivenum, etc.
#' @param plotDEM Plot the DEM? Default is FALSE
#' @param fun Function used for rasterize. Default is mean
#' @param ... Arguments passed on to \code{\link[raster]{plot}}.
#'
#' @return An interpolated raster
#'
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]),
#' type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]],
#' turbine_locs = turbloc, h1=1.8, h2=50)
#' interpol_view(res, plotDEM = T)
#'
#' interpol_view(res, breakseq = seq(0,max(colSums(res$Result)),1))
#' interpol_view(res, plotDEM = F, breakform = quantile)
#' interpol_view(res, breakform = factor)
#'
#' ## ... Arguments are past on to the raster plot method
#' interpol_view(res, plotDEM = T, alpha=0.5)
#' interpol_view(res, plotDEM = F, breakseq = seq(0,10,1), colNA="black")
#'
#' }
#' @author Sebastian Gatscha
interpol_view <- function(res, plot=TRUE, breakseq, breakform = NULL,
plotDEM=FALSE, fun = mean, ...) {
# res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
# fun = mean
if (nrow(res$Result) > 1) {
res$Result <- apply(res$Result, 2, function(d) {
sum(d)
})
}
visible = raster::rasterize(res$Raster_POI, res$DEM, field = res$Result, fun = fun)
rasterpois <- cbind(res$Raster_POI, "z" = res$Result)
if (plot) {
pal <- colorRampPalette(c("green","orange","red"))
maxR = max(rasterpois[,3])
if (missing(breakseq)) {
a = range(rasterpois[,3])
breakseq <- seq(from = a[1], to = a[2], length.out = 5)
if (!is.null(breakform)) {
breakseq <- as.numeric(breakform(rasterpois[,3]))
}
breakseq <- breakseq[!duplicated(breakseq)]
}
if (!any(breakseq == maxR)) {
breakseq <- c(breakseq, maxR)
}
if (plotDEM) {
raster::plot(res$DEM, legend = F)
raster::plot(visible, breaks=breakseq, add = T, col=pal(length(breakseq)), ...)
# raster::plot(visible, breaks=breakseq, add = T, col=pal(length(breakseq)), alpha=0.1)
} else {
raster::plot(visible, breaks=breakseq, col=pal(length(breakseq)), ...)
# raster::plot(visible, breaks=breakseq, col=pal(length(breakseq)))
}
points(res$Turbines, pch=20, col="black", cex=1.5)
}
return(visible)
}
#' @title getISO3
#' @name getISO3
#' @description Get point values from the rworldmap package
#'
#' @export
#'
#' @importFrom rworldmap getMap
#' @importFrom sp over
#' @importFrom sf st_coordinates st_as_sf st_transform
#'
#' @param pp SpatialPoints or matrix
#' @param crs_pp The CRS of the points
#' @param col Which column/s should be returned
#' @param resol The search resolution if high accuracy is needed
#' @param coords The column names of the point matrix
#' @param ask A boolean, to ask which columns can be returned
#'
#' @return A character vector
#'
#' @examples \dontrun{
#' points = cbind(c(4488182.26267016, 4488852.91748256),
#' c(2667398.93118627, 2667398.93118627))
#' getISO3(pp = points, ask = T)
#' getISO3(pp = points, crs_pp = 3035)
#'
#' points <- as.data.frame(points)
#' colnames(points) <- c("x","y")
#' points <- st_as_sf(points, coords = c("x","y"))
#' st_crs(points) <- 3035
#' getISO3(pp = points, crs_pp = 3035)
#' }
#' @author Sebastian Gatscha
getISO3 <- function(pp, crs_pp = 4326, col = "ISO3", resol = "low",
coords = c("LONG", "LAT"), ask=F) {
# pp= points; col = "ISO3"; crs_pp = 3035; resol = "low"; coords = c("LONG", "LAT")
# pp = points; col = "?"; crs_pp = 3035; resol = "low"; coords = c("LONG", "LAT"); ask=T
if (col == "?") {ask=T}
countriesSP <- rworldmap::getMap(resolution=resol)
if (ask == TRUE) {
print(sort(names(countriesSP)))
col = readline(prompt="Enter an ISO3 code: ")
# col = "afs"
if (!col %in% sort(names(countriesSP))) {
stop("Column not found")
}
}
## if sf
if (class(pp)[1] %in% c("sf")) {
pp <- sf::st_coordinates(pp)
}
pp <- as.data.frame(pp)
colnames(pp) <- coords
pp <- st_as_sf(pp, coords=coords, crs = crs_pp)
pp <- st_transform(pp, crs = countriesSP@proj4string@projargs)
pp1 <- as(pp, "Spatial")
# use 'over' to get indices of the Polygons object containing each point
worldmap_values <- sp::over(pp1, countriesSP)
##-------what if multiple columns?
# return desired column of each country
res <- as.character(unique(worldmap_values[[col]]))
return(res)
}
# points=sample_POI
# getISO3(pp = points, ask = T)
# getISO3(pp = points, crs_pp = 3035)
# points=coordinates(sample_POI)
# dput(head(coordinates(sample_POI), 2))
# getISO3(points, crs_pp = 3035)
# points=st_as_sf(sample_POI)
# getISO3(points, crs_pp = 3035)
#' @title getDEM
#' @name getDEM
#' @description Get a DEM raster for a country based on ISO3 code
#'
#' @export
#'
#' @importFrom raster getData projection crop extent crs projectRaster
#' @importFrom sp over
#' @importFrom sf st_coordinates st_as_sf st_transform
#' @importFrom methods as
#'
#' @param ISO3 The ISO3 code of the country
#' @param clip boolean, indicating if polygon should be cropped.
#' Default is TRUE
#' @param polygon A Spatial / SimpleFeature Polygon to crop the DEM
#'
#' @return A list with the DEM raster, and a SpatialPolygonsDataFrame or NULL
#' if no polygon is given
#'
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#' plot(DEM_meter[[1]])
#' plot(DEM_meter[[2]], add=T)
#' }
#' @author Sebastian Gatscha
getDEM <- function(polygon, ISO3 = "AUT", clip = TRUE) {
# polygon = shape; ISO3 = "AUT"
PROJ <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
# DEM <- getData("SRTM", lon = st_bbox(polygon)[1], lat=st_bbox(polygon)[2])
DEM <- raster::getData("alt", country=ISO3)
if (clip) {
## if data.frame / sp object ? -----------------
# shape <- st_as_sf(shape)
if (class(polygon)[1] == "SpatialPolygonsDataFrame" | class(polygon)[1] == "SpatialPolygons" ) {
polygon <- sf::st_as_sf(polygon)
}
shape <- sf::st_transform(polygon, crs = raster::projection(DEM))
shape_SP <- as(shape, "Spatial")
DEM <- raster::crop(x = DEM, raster::extent(shape_SP))
# shape_meter <- sf::st_transform(shape, PROJ)
shape_SP <- sp::spTransform(shape_SP, CRSobj = crs(PROJ))
}
DEM_meter <- raster::projectRaster(DEM, crs = PROJ)
if (clip) {
return(list(DEM_meter, shape_SP))
} else {
return(list(DEM_meter, NULL))
}
}
|
/R/visibility.R
|
no_license
|
daveyrichard/windfarmGA
|
R
| false | false | 18,522 |
r
|
########################################
#' @title cansee
#' @name cansee
#' @description Check if point1 (xy1) visible from point2 (xy2) given
#' a certain DEM (r)
#'
#' @export
#'
#' @param r A DEM raster
#' @param xy1 A vector/matrix with X and Y coordinates for Point 1
#' @param xy2 A vector/matrix with X and Y coordinates for Point 2
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#'
#' @return A boolean value, indicating if the point (xy2) is visible
#'
#' @author Sebastian Gatscha
cansee <- function(r, xy1, xy2, h1=0, h2=0){
# xy1 = c(4653100.36021378, 2744048.65794167);
# xy2 = c(4648381.88040377, 2741196.10301024);
# xy1 = xy1; xy2 = xy2[1,]
### can xy1 see xy2 on DEM r?
### r is a DEM in same x,y, z units
### xy1 and xy2 are 2-length vectors of x,y coords
### h1 and h2 are extra height offsets
### (eg top of mast, observer on a ladder etc)
xyz = rasterprofile(r, xy1, xy2)
np = length(xyz[,1])-1
h1 = xyz[["z"]][1] + h1
h2 = xyz[["z"]][np] + h2
hpath = h1 + (0:np)*(h2-h1)/np
invisible(!any(hpath < xyz[["z"]], na.rm = T))
}
#' @title viewTo
#' @name viewTo
#' @description Check if Point 1 (xy) is visible from multiple points
#' (xy2)
#'
#' @export
#' @importFrom plyr aaply
#'
#' @param r A DEM raster
#' @param xy1 A matrix with X and Y coordinates for Point 1
#' @param xy2 A matrix with X and Y coordinates for Points 2
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#' @param progress Is passed on to plyr::aaply
#'
#' @return A boolean vector, indicating if Point 1 (xy1) is visible
#' from all elements of Points 2 (xy2)
#'
#' @author Sebastian Gatscha
#'
viewTo <- function(r, xy1, xy2, h1=0, h2=0, progress="none"){
# xy1 = c(x = 4653100.36021378, y = 2744048.65794167);
# xy2 = structure(c(4648381.88040377, 4649001.7726914, 4649621.66497904,
# 4650241.55726667, 4650861.4495543, 4648381.88040377, 2741196.10301024,
# 2741196.10301024, 2741196.10301024, 2741196.10301024, 2741196.10301024,
# 2741815.99529787), .Dim = c(6L, 2L), .Dimnames = list(NULL, c("x1",
# "x2")))
# xy1 = turbine_locs[1,]; xy2 = sample_xy; h1=h2=0
## xy2 is a matrix of x,y coords (not a data frame)
a <- plyr::aaply(xy2, 1, function(d){
cansee(r,xy1 = xy1,xy2 = d,h1,h2)}, .progress=progress)
a[is.na(a)] <- FALSE
return(a)
}
#' @title rasterprofile
#' @name rasterprofile
#' @description Sample a raster along a straight line between 2 points
#'
#' @export
#' @importFrom raster res cellFromXY
#' @importFrom stats complete.cases
#'
#' @param r A DEM raster
#' @param xy1 A matrix with X and Y coordinates for Point 1
#' @param xy2 A matrix with X and Y coordinates for Points 2
#' @param plot Plot the process? Default is FALSE
#'
#' @return A boolean vector, indicating if Point 1 (xy1) is visible
#' from all elements of Points 2 (xy2)
#'
#' @author Sebastian Gatscha
rasterprofile <- function(r, xy1, xy2, plot=FALSE){
# r = DEM_meter[[1]]; xy1 = sample_xy[29,]; xy2 = sample_xy[26,]; plot=T
if (plot==TRUE) {
plot(r)
points(x = xy2[1], y=xy2[2], col="blue", pch=20, cex=1.4)
points(x = xy1[1], y=xy1[2], col="red", pch=20, cex=2)
}
### sample a raster along a straight line between two points
### try to match the sampling size to the raster resolution
dx = sqrt( (xy1[1]-xy2[1])^2 + (xy1[2]-xy2[2])^2 )
nsteps = 1 + round(dx/ min(raster::res(r)))
xc = xy1[1] + (0:nsteps) * (xy2[1]-xy1[1])/nsteps
yc = xy1[2] + (0:nsteps) * (xy2[2]-xy1[2])/nsteps
if (plot==TRUE) {
points(x = xc, y=yc, col="red", pch=20, cex=1.4)
}
rasterVals <- r[raster::cellFromXY(r, cbind(xc,yc))]
# rasterVals <- raster::extract(x = r, y = cbind(xc,yc), buffer=5, df=T)
# rasterVals <- rasterVals[!is.na(rasterVals)]
pointsZ <- data.frame(x = xc, y = yc, z = rasterVals)
if (plot==TRUE) {
points(pointsZ$x, pointsZ$y, pch=20, col="black")
text(pointsZ$x, pointsZ$y, pos=1, pointsZ$z, cex=0.5)
}
if (any(is.na(pointsZ))) {
pointsZ <- pointsZ[stats::complete.cases(pointsZ),]
# browser()
}
return(pointsZ)
}
#' @title viewshed
#' @name viewshed
#' @description Calculate visibility for given points in
#' a given area.
#'
#' @export
#'
#' @importFrom sp coordinates spsample
#' @importFrom raster res ncell
#' @importFrom plyr aaply
#' @importFrom sf st_as_sf
#' @param r A DEM raster
#' @param shape A SpatialPolygon of the windfarm area.
#' @param turbine_locs Coordinates or SpatialPoint representing
#' the wind turbines
#' @param h1 A numeric giving the extra height offset of Point 1
#' @param h2 A numeric giving the extra height offset of Point 2
#' @param progress Is passed on to plyr::aaply
#'
#' @return A list of 5, containing the boolean result for every cell,
#' the raster cell points, a SimpleFeature Polygon of the given area
#' and the DEM raster
#'
#' @examples \dontrun{
#' library(sp)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]), type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
#' }
#' @author Sebastian Gatscha
viewshed <- function(r, shape, turbine_locs, h1=0, h2=0, progress="none"){
# r = DEM_meter[[1]]; shape=DEM_meter[[2]]; turbine_locs = turbloc
# h1=0; h2=0; progress="none"
if (class(shape)[1] == "sf") {
shape <- as(shape, "Spatial")
}
if (class(turbine_locs) == "SpatialPoints") {
turbine_locs = sp::coordinates(turbine_locs)
}
smplf <- sf::st_as_sf(shape)
smplf <- sf::st_buffer(smplf, dist = 10)
shape <- as(smplf, "Spatial")
sample_POI <- sp::spsample(shape, n = raster::ncell(r), type = "regular")
sample_xy <- sp::coordinates(sample_POI)
## xy2 is a matrix of x,y coords (not a data frame)
res <- plyr::aaply(turbine_locs, 1, function(d){
viewTo(r, xy1 = d, xy2 = sample_xy, h1, h2)
}, .progress=progress)
if (is.matrix(res)) {
res <- res[1:nrow(res),1:nrow(sample_xy)]
}
if (is.logical(res)) {
res[1:nrow(sample_xy)]
}
return(list("Result"=res, "Raster_POI" = sample_xy,
"Area" = sf::st_as_sf(shape), "DEM" = r, "Turbines" = turbine_locs))
}
## Geht noch nicht
# viewshed_par <- function(r, shape, turbine_locs, h1=0, h2=0, progress="none"){
# # r = DEM_meter; shape=shape_meter; turbine_locs = turbloc
# # h1=0; h2=0;
#
# if (class(shape)[1] == "sf") {
# shape <- as(shape, "Spatial")
# }
# if (class(turbine_locs) == "SpatialPoints") {
# turbine_locs = sp::coordinates(turbine_locs)
# }
#
# sample_POI <- sp::spsample(shape, n = raster::ncell(r), type = "regular")
# sample_xy <- sp::coordinates(sample_POI)
#
#
# library(parallel)
# nCore <- parallel::detectCores()
# cl <- parallel::makeCluster(nCore)
# parallel::clusterEvalQ(cl, {
# library(plyr)
# library(raster)
# })
# parallel::clusterExport(cl, varlist = c("turbine_locs", "sample_xy",
# "viewTo", "cansee", "rasterprofile",
# "r", "h1", "h2", "progress"))
#
# res <- parallel::parApply(cl = cl, X = turbine_locs, 1, function(d){
# viewTo(r, xy1 = d, xy2 = sample_xy, h1, h2, progress)
# })
# res <- t(res)
#
# parallel::stopCluster(cl)
#
# if (is.matrix(res)) {
# res <- res[1:nrow(res),1:nrow(sample_xy)]
# }
# if (is.logical(res)) {
# res[1:nrow(sample_xy)]
# }
#
# return(list("Result"=res, "Raster_POI" = sample_xy,
# "Area" = sf::st_as_sf(shape), "DEM" = r, "Turbines" = turbine_locs))
# }
# res <- viewshed_par(r = DEM_meter, shape=shape_meter, turbine_locs = turbloc, h1=1.8, h2=50)
#' @title plot_viewshed
#' @name plot_viewshed
#' @description Plot the result of viewshed
#'
#' @export
#'
#' @importFrom raster plot
#' @importFrom sf st_geometry
#'
#' @param res The resulting list from viewshed
#' @param legend Plot a legend? Default is FALSE
#'
#' @return NULL
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]), type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
#' plot_viewshed(res)
#' }
#' @author Sebastian Gatscha
plot_viewshed <- function(res, legend=FALSE) {
# r=DEM_meter[[1]]; leg=TRUE
raster::plot(res[[4]])
plot(sf::st_geometry(res[[3]]), add = T)
points(res[[2]], col="green", pch=20)
points(res[[5]], cex=1.5, col="black", pch=20)
if (is.matrix(res[[1]])) {
invisible(apply(res[[1]], 1, function(d) {points(res[[2]][d,], col="red", pch=20)}))
} else {
points(res[[2]][res[[1]],], col="red", pch=20)
# invisible(apply(res[[1]], 1, function(d) {points(res[[2]][d,], col="red", pch=20)}))
}
if (legend) {
legend(x = "bottomright", y = "topleft", yjust=0, title="Visibility",
col=c("green","black", "red"),
legend = c("Not visible","Turbines","Turbine/s visible"), pch=20)
}
}
#' @title interpol_view
#' @name interpol_view
#' @description Plot an interpolated view of the viewshed analysis
#'
#' @export
#'
#' @importFrom raster plot rasterize
#' @importFrom stats quantile
#'
#' @param res The result list from viewshed.
#' @param plot Should the result be plotted? Default is TRUE
#' @param breakseq The breaks for value plotting. By default, 5 equal
#' intervals are generated.
#' @param breakform If 'breakseq' is missing, a sampling function to
#' calculate the breaks, like \code{\link{quantile}}, fivenum, etc.
#' @param plotDEM Plot the DEM? Default is FALSE
#' @param fun Function used for rasterize. Default is mean
#' @param ... Arguments passed on to \code{\link[raster]{plot}}.
#'
#' @return An interpolated raster
#'
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#'
#' sample_POI <- spsample(DEM_meter[[2]], n = ncell(DEM_meter[[1]]),
#' type = "regular")
#' sample_xy <- coordinates(sample_POI)
#'
#' turbloc = spsample(DEM_meter[[2]], 10, type = "random");
#' res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]],
#' turbine_locs = turbloc, h1=1.8, h2=50)
#' interpol_view(res, plotDEM = T)
#'
#' interpol_view(res, breakseq = seq(0,max(colSums(res$Result)),1))
#' interpol_view(res, plotDEM = F, breakform = quantile)
#' interpol_view(res, breakform = factor)
#'
#' ## ... Arguments are past on to the raster plot method
#' interpol_view(res, plotDEM = T, alpha=0.5)
#' interpol_view(res, plotDEM = F, breakseq = seq(0,10,1), colNA="black")
#'
#' }
#' @author Sebastian Gatscha
interpol_view <- function(res, plot=TRUE, breakseq, breakform = NULL,
plotDEM=FALSE, fun = mean, ...) {
# res <- viewshed(r = DEM_meter[[1]], shape=DEM_meter[[2]], turbine_locs = turbloc, h1=1.8, h2=50)
# fun = mean
if (nrow(res$Result) > 1) {
res$Result <- apply(res$Result, 2, function(d) {
sum(d)
})
}
visible = raster::rasterize(res$Raster_POI, res$DEM, field = res$Result, fun = fun)
rasterpois <- cbind(res$Raster_POI, "z" = res$Result)
if (plot) {
pal <- colorRampPalette(c("green","orange","red"))
maxR = max(rasterpois[,3])
if (missing(breakseq)) {
a = range(rasterpois[,3])
breakseq <- seq(from = a[1], to = a[2], length.out = 5)
if (!is.null(breakform)) {
breakseq <- as.numeric(breakform(rasterpois[,3]))
}
breakseq <- breakseq[!duplicated(breakseq)]
}
if (!any(breakseq == maxR)) {
breakseq <- c(breakseq, maxR)
}
if (plotDEM) {
raster::plot(res$DEM, legend = F)
raster::plot(visible, breaks=breakseq, add = T, col=pal(length(breakseq)), ...)
# raster::plot(visible, breaks=breakseq, add = T, col=pal(length(breakseq)), alpha=0.1)
} else {
raster::plot(visible, breaks=breakseq, col=pal(length(breakseq)), ...)
# raster::plot(visible, breaks=breakseq, col=pal(length(breakseq)))
}
points(res$Turbines, pch=20, col="black", cex=1.5)
}
return(visible)
}
#' @title getISO3
#' @name getISO3
#' @description Get point values from the rworldmap package
#'
#' @export
#'
#' @importFrom rworldmap getMap
#' @importFrom sp over
#' @importFrom sf st_coordinates st_as_sf st_transform
#'
#' @param pp SpatialPoints or matrix
#' @param crs_pp The CRS of the points
#' @param col Which column/s should be returned
#' @param resol The search resolution if high accuracy is needed
#' @param coords The column names of the point matrix
#' @param ask A boolean, to ask which columns can be returned
#'
#' @return A character vector
#'
#' @examples \dontrun{
#' points = cbind(c(4488182.26267016, 4488852.91748256),
#' c(2667398.93118627, 2667398.93118627))
#' getISO3(pp = points, ask = T)
#' getISO3(pp = points, crs_pp = 3035)
#'
#' points <- as.data.frame(points)
#' colnames(points) <- c("x","y")
#' points <- st_as_sf(points, coords = c("x","y"))
#' st_crs(points) <- 3035
#' getISO3(pp = points, crs_pp = 3035)
#' }
#' @author Sebastian Gatscha
getISO3 <- function(pp, crs_pp = 4326, col = "ISO3", resol = "low",
coords = c("LONG", "LAT"), ask=F) {
# pp= points; col = "ISO3"; crs_pp = 3035; resol = "low"; coords = c("LONG", "LAT")
# pp = points; col = "?"; crs_pp = 3035; resol = "low"; coords = c("LONG", "LAT"); ask=T
if (col == "?") {ask=T}
countriesSP <- rworldmap::getMap(resolution=resol)
if (ask == TRUE) {
print(sort(names(countriesSP)))
col = readline(prompt="Enter an ISO3 code: ")
# col = "afs"
if (!col %in% sort(names(countriesSP))) {
stop("Column not found")
}
}
## if sf
if (class(pp)[1] %in% c("sf")) {
pp <- sf::st_coordinates(pp)
}
pp <- as.data.frame(pp)
colnames(pp) <- coords
pp <- st_as_sf(pp, coords=coords, crs = crs_pp)
pp <- st_transform(pp, crs = countriesSP@proj4string@projargs)
pp1 <- as(pp, "Spatial")
# use 'over' to get indices of the Polygons object containing each point
worldmap_values <- sp::over(pp1, countriesSP)
##-------what if multiple columns?
# return desired column of each country
res <- as.character(unique(worldmap_values[[col]]))
return(res)
}
# points=sample_POI
# getISO3(pp = points, ask = T)
# getISO3(pp = points, crs_pp = 3035)
# points=coordinates(sample_POI)
# dput(head(coordinates(sample_POI), 2))
# getISO3(points, crs_pp = 3035)
# points=st_as_sf(sample_POI)
# getISO3(points, crs_pp = 3035)
#' @title getDEM
#' @name getDEM
#' @description Get a DEM raster for a country based on ISO3 code
#'
#' @export
#'
#' @importFrom raster getData projection crop extent crs projectRaster
#' @importFrom sp over
#' @importFrom sf st_coordinates st_as_sf st_transform
#' @importFrom methods as
#'
#' @param ISO3 The ISO3 code of the country
#' @param clip boolean, indicating if polygon should be cropped.
#' Default is TRUE
#' @param polygon A Spatial / SimpleFeature Polygon to crop the DEM
#'
#' @return A list with the DEM raster, and a SpatialPolygonsDataFrame or NULL
#' if no polygon is given
#'
#' @examples \dontrun{
#' library(sp)
#' library(raster)
#' Polygon1 <- Polygon(rbind(c(4488182, 2667172), c(4488182, 2669343),
#' c(4499991, 2669343), c(4499991, 2667172)))
#' Polygon1 <- Polygons(list(Polygon1), 1);
#' Polygon1 <- SpatialPolygons(list(Polygon1))
#' Projection <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000
#' +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"
#' proj4string(Polygon1) <- CRS(Projection)
#' DEM_meter <- getDEM(Polygon1)
#' plot(DEM_meter[[1]])
#' plot(DEM_meter[[2]], add=T)
#' }
#' @author Sebastian Gatscha
getDEM <- function(polygon, ISO3 = "AUT", clip = TRUE) {
# polygon = shape; ISO3 = "AUT"
PROJ <- "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
# DEM <- getData("SRTM", lon = st_bbox(polygon)[1], lat=st_bbox(polygon)[2])
DEM <- raster::getData("alt", country=ISO3)
if (clip) {
## if data.frame / sp object ? -----------------
# shape <- st_as_sf(shape)
if (class(polygon)[1] == "SpatialPolygonsDataFrame" | class(polygon)[1] == "SpatialPolygons" ) {
polygon <- sf::st_as_sf(polygon)
}
shape <- sf::st_transform(polygon, crs = raster::projection(DEM))
shape_SP <- as(shape, "Spatial")
DEM <- raster::crop(x = DEM, raster::extent(shape_SP))
# shape_meter <- sf::st_transform(shape, PROJ)
shape_SP <- sp::spTransform(shape_SP, CRSobj = crs(PROJ))
}
DEM_meter <- raster::projectRaster(DEM, crs = PROJ)
if (clip) {
return(list(DEM_meter, shape_SP))
} else {
return(list(DEM_meter, NULL))
}
}
|
source("R/init.R")
dst <- commandArgs(TRUE)[1]
### depends: sources/data/bankers_magazine_govt_bonds_quotes_in_text.csv data/greenbacks_fill.csv
src <- "sources/data/bankers_magazine_govt_bonds_quotes_in_text.csv"
greenbacks_fill_file <- "data/greenbacks_fill.csv"
greenbacks <- (mutate(read_csv(greenbacks_fill_file),
date = as.Date(date, "%Y-%m-%d"),
gold_rate = 100 / mean)
%>% select(date, gold_rate))
bankers <- read_csv(src) %>%
mutate(date = as.Date(date, "%Y-%m-%d")) %>%
left_join(greenbacks, by = "date") %>%
rename(price_currency_low = low_price,
price_currency_high = high_price) %>%
mutate(gold_rate = ifelse(date < as.Date("1862-1-1"), 1, gold_rate),
price_gold_low = price_currency_low / gold_rate,
price_gold_high = price_currency_high / gold_rate,
price_gold = exp(0.5 * (log(price_gold_low) +
log(price_gold_high))),
price_currency = exp(0.5 * (log(price_currency_low) +
log(price_currency_high))),
current_yield = interest / price_gold)
write_csv(bankers, file = dst)
|
/sources/csv/bankers_magazine_govt_state_loans_misc.R
|
no_license
|
jrnold/civil_war_era_findata
|
R
| false | false | 1,193 |
r
|
source("R/init.R")
dst <- commandArgs(TRUE)[1]
### depends: sources/data/bankers_magazine_govt_bonds_quotes_in_text.csv data/greenbacks_fill.csv
src <- "sources/data/bankers_magazine_govt_bonds_quotes_in_text.csv"
greenbacks_fill_file <- "data/greenbacks_fill.csv"
greenbacks <- (mutate(read_csv(greenbacks_fill_file),
date = as.Date(date, "%Y-%m-%d"),
gold_rate = 100 / mean)
%>% select(date, gold_rate))
bankers <- read_csv(src) %>%
mutate(date = as.Date(date, "%Y-%m-%d")) %>%
left_join(greenbacks, by = "date") %>%
rename(price_currency_low = low_price,
price_currency_high = high_price) %>%
mutate(gold_rate = ifelse(date < as.Date("1862-1-1"), 1, gold_rate),
price_gold_low = price_currency_low / gold_rate,
price_gold_high = price_currency_high / gold_rate,
price_gold = exp(0.5 * (log(price_gold_low) +
log(price_gold_high))),
price_currency = exp(0.5 * (log(price_currency_low) +
log(price_currency_high))),
current_yield = interest / price_gold)
write_csv(bankers, file = dst)
|
# Copyright (C) 2008-2010 Daniel F. Schwarz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
library(methods)
library(stats)
##' to add
##'
##' to add
##' @title predict
##' @param object
##' @param data
##' @param ...
##' @return NULL
##' @author Jochen Kruppa
##' @S3method predict rjungle
##' @export
predict.rjungle <- function(object, data, ...) {
rj = object
if (!inherits(rj, "rjungle"))
stop("data argument should be rjungle-class")
if (!rj@keepJungle) stop(RJ__MSG3);
# convert factor to integer
mydata = data
for (i in 1:ncol(mydata)) {
if (is.factor(mydata[,i])) {
mydata[,i] = as.integer(mydata[,i])
}
}
# save file
fileNameIn = tempfile("rjungledata")
fileNameOut = tempfile("rjungledata")
write.table(mydata, file = fileNameIn, row.names = FALSE, quote = FALSE)
# unpack
if(file.exists(paste(rj@tmpFile, ".jungle.xml.gz", sep = ""))) {
system(paste("rm -f ", rj@tmpFile, ".jungle.xml", sep = ""))
system(paste("gunzip ", rj@tmpFile, ".jungle.xml.gz", sep = ""))
}
# do the rjungle
system(paste(
RJ__EXECNAME,
"-f", fileNameIn,
"-D", rj@depVarName,
"-y", rj@treeType,
"-o", fileNameOut,
"-P", paste(rj@tmpFile, ".jungle.xml", sep = ""),
"-v"
))
# save results
rjPred = new(
"rjungle",
tmpDir = tempdir(),
tmpFile = fileNameOut,
depVarName = rj@depVarName,
treeType = rj@treeType,
ntree = rj@ntree,
mtry = rj@mtry,
seed = rj@seed,
importance = rj@importance,
proximity = rj@proximity,
replace = rj@replace,
keepJungle = rj@keepJungle,
balanceData = rj@balanceData,
verbose = rj@verbose
)
file.show(paste(rjPred@tmpFile, ".confusion", sep = ""), pager = "cat")
# show prediction matrix
return(scan(paste(rjPred@tmpFile, ".prediction", sep = "")))
}
|
/R/predict.R
|
no_license
|
jkruppa/Rjungle
|
R
| false | false | 2,528 |
r
|
# Copyright (C) 2008-2010 Daniel F. Schwarz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
library(methods)
library(stats)
##' to add
##'
##' to add
##' @title predict
##' @param object
##' @param data
##' @param ...
##' @return NULL
##' @author Jochen Kruppa
##' @S3method predict rjungle
##' @export
predict.rjungle <- function(object, data, ...) {
rj = object
if (!inherits(rj, "rjungle"))
stop("data argument should be rjungle-class")
if (!rj@keepJungle) stop(RJ__MSG3);
# convert factor to integer
mydata = data
for (i in 1:ncol(mydata)) {
if (is.factor(mydata[,i])) {
mydata[,i] = as.integer(mydata[,i])
}
}
# save file
fileNameIn = tempfile("rjungledata")
fileNameOut = tempfile("rjungledata")
write.table(mydata, file = fileNameIn, row.names = FALSE, quote = FALSE)
# unpack
if(file.exists(paste(rj@tmpFile, ".jungle.xml.gz", sep = ""))) {
system(paste("rm -f ", rj@tmpFile, ".jungle.xml", sep = ""))
system(paste("gunzip ", rj@tmpFile, ".jungle.xml.gz", sep = ""))
}
# do the rjungle
system(paste(
RJ__EXECNAME,
"-f", fileNameIn,
"-D", rj@depVarName,
"-y", rj@treeType,
"-o", fileNameOut,
"-P", paste(rj@tmpFile, ".jungle.xml", sep = ""),
"-v"
))
# save results
rjPred = new(
"rjungle",
tmpDir = tempdir(),
tmpFile = fileNameOut,
depVarName = rj@depVarName,
treeType = rj@treeType,
ntree = rj@ntree,
mtry = rj@mtry,
seed = rj@seed,
importance = rj@importance,
proximity = rj@proximity,
replace = rj@replace,
keepJungle = rj@keepJungle,
balanceData = rj@balanceData,
verbose = rj@verbose
)
file.show(paste(rjPred@tmpFile, ".confusion", sep = ""), pager = "cat")
# show prediction matrix
return(scan(paste(rjPred@tmpFile, ".prediction", sep = "")))
}
|
#rankscore on a given user.
rankScore <- function(recommendedIDX, testSetIDX, alpha){
#extract index of the hits
match_TS <- which(recommendedIDX %in% testSetIDX)
if(length(match_TS) == 0 ) return(0)
rankscoreMAX <- getrankscoreMAX(length(match_TS), alpha)
rankscore_user <- (match_TS - 1)
rankscore_user <- -rankscore_user/alpha
rankscore_user <- 2^rankscore_user
rankscore_user <- sum(rankscore_user)
rankscore_user/rankscoreMAX
}
getrankscoreMAX<- function(n,alpha){
rankscoreMAX <- 0
rankscoreMAX <- 1/2^((c(1:n) - 1)/alpha)
sum(rankscoreMAX)
}
|
/rrecsys/R/eval_rankScore.R
|
no_license
|
akhikolla/InformationHouse
|
R
| false | false | 626 |
r
|
#rankscore on a given user.
rankScore <- function(recommendedIDX, testSetIDX, alpha){
#extract index of the hits
match_TS <- which(recommendedIDX %in% testSetIDX)
if(length(match_TS) == 0 ) return(0)
rankscoreMAX <- getrankscoreMAX(length(match_TS), alpha)
rankscore_user <- (match_TS - 1)
rankscore_user <- -rankscore_user/alpha
rankscore_user <- 2^rankscore_user
rankscore_user <- sum(rankscore_user)
rankscore_user/rankscoreMAX
}
getrankscoreMAX<- function(n,alpha){
rankscoreMAX <- 0
rankscoreMAX <- 1/2^((c(1:n) - 1)/alpha)
sum(rankscoreMAX)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events_guildbanremove.r
\name{events.guild_ban_remove}
\alias{events.guild_ban_remove}
\title{Event, emitted whenever a user ban is being revoked}
\usage{
events.guild_ban_remove(data, client)
}
\arguments{
\item{data}{The event fields}
\item{client}{The client object}
}
\description{
Event, emitted whenever a user ban is being revoked
}
\section{Disclaimer}{
Be aware that whenever the guild won't be cached
the guild parameter will return as a guild id
guild id can be used to fetch the guild from the API
}
\examples{
\dontrun{
client$emitter$on("GUILD_BAN_REMOVE", function(guild, user) {
cat(user$name, "'s' ban has been revoked on", guild$name)
})
}
}
|
/man/events.guild_ban_remove.Rd
|
no_license
|
TheOnlyArtz/Pirate
|
R
| false | true | 743 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/events_guildbanremove.r
\name{events.guild_ban_remove}
\alias{events.guild_ban_remove}
\title{Event, emitted whenever a user ban is being revoked}
\usage{
events.guild_ban_remove(data, client)
}
\arguments{
\item{data}{The event fields}
\item{client}{The client object}
}
\description{
Event, emitted whenever a user ban is being revoked
}
\section{Disclaimer}{
Be aware that whenever the guild won't be cached
the guild parameter will return as a guild id
guild id can be used to fetch the guild from the API
}
\examples{
\dontrun{
client$emitter$on("GUILD_BAN_REMOVE", function(guild, user) {
cat(user$name, "'s' ban has been revoked on", guild$name)
})
}
}
|
#' Is the object of class factor_pos_neg?
#'
#' @param x An object to be checked if it is a 3-level (positive,
#' neutral, negative) categorical variable.
#' @examples
#' my_categories <- as_factor_pos_neg( c("Better", "DK", "Worse",
#' "Same", "The Same","Inap. not "))
#'
#' is.factor_pos_neg (my_categories)
#'
#' @export
#'
is.factor_pos_neg<- function(x) inherits(x, "factor_pos_neg")
|
/R/is.factor_pos_neg.R
|
no_license
|
antaldaniel/eurobarometer_old
|
R
| false | false | 421 |
r
|
#' Is the object of class factor_pos_neg?
#'
#' @param x An object to be checked if it is a 3-level (positive,
#' neutral, negative) categorical variable.
#' @examples
#' my_categories <- as_factor_pos_neg( c("Better", "DK", "Worse",
#' "Same", "The Same","Inap. not "))
#'
#' is.factor_pos_neg (my_categories)
#'
#' @export
#'
is.factor_pos_neg<- function(x) inherits(x, "factor_pos_neg")
|
library(aws.ses)
### Name: get_id_notification
### Title: Get/Set Notifications
### Aliases: get_id_notification set_id_notification
### ** Examples
## Not run:
##D # get
##D get_id_notifiaction("example@example.com")
##D
##D # set
##D if (require("aws.sns")) {
##D top <- create_topic("ses_email_bounce")
##D set_id_notifiaction("example@example.com", "Bounce", top)
##D get_id_notifiaction("example@example.com")
##D
##D # cleanup
##D delete_topic(top)
##D }
## End(Not run)
|
/data/genthat_extracted_code/aws.ses/examples/idnotification.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 499 |
r
|
library(aws.ses)
### Name: get_id_notification
### Title: Get/Set Notifications
### Aliases: get_id_notification set_id_notification
### ** Examples
## Not run:
##D # get
##D get_id_notifiaction("example@example.com")
##D
##D # set
##D if (require("aws.sns")) {
##D top <- create_topic("ses_email_bounce")
##D set_id_notifiaction("example@example.com", "Bounce", top)
##D get_id_notifiaction("example@example.com")
##D
##D # cleanup
##D delete_topic(top)
##D }
## End(Not run)
|
# This file was generated, do not edit by hand
# Please edit inst/srr_template_nonspatial_yardstick.R instead
test_that("srr: ww_willmott_d errors if truth and estimate are different lengths", {
# Note that this test isn't applicable to data-frame input, which enforces
# constant column lengths
expect_snapshot(
ww_willmott_d_vec(1:5, 1:4),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:5),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors if truth and estimate aren't numeric", {
char_df <- tibble::tibble(x = 1:5, y = letters[1:5])
expect_snapshot(
ww_willmott_d(char_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(char_df, y, x),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(as.character(1:5), 1:4),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:5, as.character(1:4)),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors if truth and estimate are list columns", {
list_df <- tibble::tibble(x = 1:5, y = lapply(1:5, function(x) x))
expect_snapshot(
ww_willmott_d(list_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(list_df, y, x),
error = TRUE
)
})
test_that("srr: ww_willmott_d removes NaN and NA when na_rm = TRUE", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_snapshot(
round(ww_willmott_d(missing_df, x, y)$.estimate, 15),
)
expect_snapshot(
round(ww_willmott_d(missing_df, y, x)$.estimate, 15),
)
expect_snapshot(
round(ww_willmott_d_vec(missing_df$y, missing_df$x), 15),
)
expect_snapshot(
round(ww_willmott_d_vec(missing_df$x, missing_df$y), 15),
)
})
test_that("srr: ww_willmott_d returns NA when na_rm = FALSE and NA is present", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_identical(
ww_willmott_d(missing_df, y, x, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
ww_willmott_d(missing_df, x, y, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
ww_willmott_d_vec(missing_df$y, missing_df$x, na_rm = FALSE),
NA_real_
)
expect_identical(
ww_willmott_d_vec(missing_df$x, missing_df$y, na_rm = FALSE),
NA_real_
)
})
test_that("srr: ww_willmott_d errors on zero-length data", {
expect_snapshot(
ww_willmott_d_vec(numeric(), numeric()),
error = TRUE
)
empty_df <- tibble::tibble(x = numeric(), y = numeric())
expect_snapshot(
ww_willmott_d(empty_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(empty_df, y, x),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors on all-NA data", {
expect_snapshot(
ww_willmott_d_vec(rep(NA_real_, 4), 4:1),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, rep(NA_real_, 4)),
error = TRUE
)
all_na <- tibble::tibble(x = rep(NA_real_, 4), y = 1:4)
expect_snapshot(
ww_willmott_d(all_na, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(all_na, y, x),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:4)
)
})
test_that("srr: ww_willmott_d works with all identical data", {
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
ww_willmott_d(all_identical, x, y)
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:4)
)
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
ww_willmott_d(all_identical, x, y)
)
})
test_that("srr: ww_willmott_d results don't change with trivial noise", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
noised_x <- x + rnorm(x, .Machine$double.eps, .Machine$double.eps)
noised_df <- tibble::tibble(x = noised_x, y = y)
expect_equal(
ww_willmott_d(noised_df, x, y),
ww_willmott_d(df, x, y)
)
expect_equal(
ww_willmott_d(noised_df, y, x),
ww_willmott_d(df, y, x)
)
expect_equal(
ww_willmott_d_vec(noised_x, y),
ww_willmott_d_vec(x, y)
)
expect_equal(
ww_willmott_d_vec(y, noised_x),
ww_willmott_d_vec(y, x)
)
})
test_that("srr: ww_willmott_d results don't change with different seeds", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
expect_equal(
withr::with_seed(
123,
ww_willmott_d(df, x, y)
),
withr::with_seed(
1107,
ww_willmott_d(df, x, y)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d(df, y, x)
),
withr::with_seed(
1107,
ww_willmott_d(df, y, x)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d_vec(x, y)
),
withr::with_seed(
1107,
ww_willmott_d_vec(x, y)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d_vec(y, x)
),
withr::with_seed(
1107,
ww_willmott_d_vec(y, x)
)
)
})
|
/tests/testthat/test-srr-ww_willmott_d.R
|
permissive
|
ropensci/waywiser
|
R
| false | false | 4,983 |
r
|
# This file was generated, do not edit by hand
# Please edit inst/srr_template_nonspatial_yardstick.R instead
test_that("srr: ww_willmott_d errors if truth and estimate are different lengths", {
# Note that this test isn't applicable to data-frame input, which enforces
# constant column lengths
expect_snapshot(
ww_willmott_d_vec(1:5, 1:4),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:5),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors if truth and estimate aren't numeric", {
char_df <- tibble::tibble(x = 1:5, y = letters[1:5])
expect_snapshot(
ww_willmott_d(char_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(char_df, y, x),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(as.character(1:5), 1:4),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:5, as.character(1:4)),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors if truth and estimate are list columns", {
list_df <- tibble::tibble(x = 1:5, y = lapply(1:5, function(x) x))
expect_snapshot(
ww_willmott_d(list_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(list_df, y, x),
error = TRUE
)
})
test_that("srr: ww_willmott_d removes NaN and NA when na_rm = TRUE", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_snapshot(
round(ww_willmott_d(missing_df, x, y)$.estimate, 15),
)
expect_snapshot(
round(ww_willmott_d(missing_df, y, x)$.estimate, 15),
)
expect_snapshot(
round(ww_willmott_d_vec(missing_df$y, missing_df$x), 15),
)
expect_snapshot(
round(ww_willmott_d_vec(missing_df$x, missing_df$y), 15),
)
})
test_that("srr: ww_willmott_d returns NA when na_rm = FALSE and NA is present", {
missing_df <- tibble::tibble(x = c(NaN, 2:5), y = c(1:4, NA))
expect_identical(
ww_willmott_d(missing_df, y, x, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
ww_willmott_d(missing_df, x, y, na_rm = FALSE)$.estimate,
NA_real_
)
expect_identical(
ww_willmott_d_vec(missing_df$y, missing_df$x, na_rm = FALSE),
NA_real_
)
expect_identical(
ww_willmott_d_vec(missing_df$x, missing_df$y, na_rm = FALSE),
NA_real_
)
})
test_that("srr: ww_willmott_d errors on zero-length data", {
expect_snapshot(
ww_willmott_d_vec(numeric(), numeric()),
error = TRUE
)
empty_df <- tibble::tibble(x = numeric(), y = numeric())
expect_snapshot(
ww_willmott_d(empty_df, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(empty_df, y, x),
error = TRUE
)
})
test_that("srr: ww_willmott_d errors on all-NA data", {
expect_snapshot(
ww_willmott_d_vec(rep(NA_real_, 4), 4:1),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, rep(NA_real_, 4)),
error = TRUE
)
all_na <- tibble::tibble(x = rep(NA_real_, 4), y = 1:4)
expect_snapshot(
ww_willmott_d(all_na, x, y),
error = TRUE
)
expect_snapshot(
ww_willmott_d(all_na, y, x),
error = TRUE
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:4)
)
})
test_that("srr: ww_willmott_d works with all identical data", {
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
ww_willmott_d(all_identical, x, y)
)
expect_snapshot(
ww_willmott_d_vec(1:4, 1:4)
)
all_identical <- tibble::tibble(x = 1:4, y = 1:4)
expect_snapshot(
ww_willmott_d(all_identical, x, y)
)
})
test_that("srr: ww_willmott_d results don't change with trivial noise", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
noised_x <- x + rnorm(x, .Machine$double.eps, .Machine$double.eps)
noised_df <- tibble::tibble(x = noised_x, y = y)
expect_equal(
ww_willmott_d(noised_df, x, y),
ww_willmott_d(df, x, y)
)
expect_equal(
ww_willmott_d(noised_df, y, x),
ww_willmott_d(df, y, x)
)
expect_equal(
ww_willmott_d_vec(noised_x, y),
ww_willmott_d_vec(x, y)
)
expect_equal(
ww_willmott_d_vec(y, noised_x),
ww_willmott_d_vec(y, x)
)
})
test_that("srr: ww_willmott_d results don't change with different seeds", {
skip_if_not_installed("withr")
x <- c(6, 8, 9, 10, 11, 14)
y <- c(2, 3, 5, 5, 6, 8)
df <- tibble::tibble(x = x, y = y)
expect_equal(
withr::with_seed(
123,
ww_willmott_d(df, x, y)
),
withr::with_seed(
1107,
ww_willmott_d(df, x, y)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d(df, y, x)
),
withr::with_seed(
1107,
ww_willmott_d(df, y, x)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d_vec(x, y)
),
withr::with_seed(
1107,
ww_willmott_d_vec(x, y)
)
)
expect_equal(
withr::with_seed(
123,
ww_willmott_d_vec(y, x)
),
withr::with_seed(
1107,
ww_willmott_d_vec(y, x)
)
)
})
|
\name{make.statespace}
\alias{make.ss}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a state-space grid for use by SCRbayes functions%% ~~function to do ... ~~
}
\description{
This function will make a state-space grid given a set of coordinates
that define the trap locations (or points representative of potential
traps).
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
make.statespace(ll = NA, buffer = 0.1, minx = NA, maxx = NA, miny = NA, maxy = NA, nx = 20, ny = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ll}{
Coordinates of traps or similar points within the study area.
%% ~~Describe \code{ll} here~~
}
\item{buffer}{
Relative size of the buffer to use in creating the state-space. A
value of 0 produces the minimum area rectangle around the traps.
%% ~~Describe \code{buffer} here~~
}
\item{minx}{
Instead of ll one could provide the minimum and maximum x and y values
to use.
%% ~~Describe \code{minx} here~~
}
\item{maxx}{
%% ~~Describe \code{maxx} here~~
}
\item{miny}{
%% ~~Describe \code{miny} here~~
}
\item{maxy}{
%% ~~Describe \code{maxy} here~~
}
\item{nx}{
Number of state-space points in the x-direction. Don't make this too
large. The total size of the state-space will be nx*ny points (see next
argument). A reasonable total value of nx*ny is the expected population
size N*4. So if you expect N = 100 individuals in the state-space around
the traps, nx*ny should be around 400.
%% ~~Describe \code{nx} here~~
}
\item{ny=NULL}{
Number of state-space points in the y-direction. It is recommended to
leave this to NULL in which case ny is computed relative to nx.
%% ~~Describe \code{ny} here~~
}
}
\details{
Lots of details here......A future version of this function will have
the user input "average home range size" and compute everything you
need.
The function returns the nG x 2 state-space grid which has a number of
arguments. "area" is the area of each grid point in the units of
"ll". "traps" is the matrix "ll". These arguments will be used by
other functions of the SCRbayes package.
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Andy Royle, aroyle@usgs.gov %% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/make.statespace.Rd
|
no_license
|
jaroyle/SCRbayes
|
R
| false | false | 3,013 |
rd
|
\name{make.statespace}
\alias{make.ss}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a state-space grid for use by SCRbayes functions%% ~~function to do ... ~~
}
\description{
This function will make a state-space grid given a set of coordinates
that define the trap locations (or points representative of potential
traps).
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
make.statespace(ll = NA, buffer = 0.1, minx = NA, maxx = NA, miny = NA, maxy = NA, nx = 20, ny = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{ll}{
Coordinates of traps or similar points within the study area.
%% ~~Describe \code{ll} here~~
}
\item{buffer}{
Relative size of the buffer to use in creating the state-space. A
value of 0 produces the minimum area rectangle around the traps.
%% ~~Describe \code{buffer} here~~
}
\item{minx}{
Instead of ll one could provide the minimum and maximum x and y values
to use.
%% ~~Describe \code{minx} here~~
}
\item{maxx}{
%% ~~Describe \code{maxx} here~~
}
\item{miny}{
%% ~~Describe \code{miny} here~~
}
\item{maxy}{
%% ~~Describe \code{maxy} here~~
}
\item{nx}{
Number of state-space points in the x-direction. Don't make this too
large. The total size of the state-space will be nx*ny points (see next
argument). A reasonable total value of nx*ny is the expected population
size N*4. So if you expect N = 100 individuals in the state-space around
the traps, nx*ny should be around 400.
%% ~~Describe \code{nx} here~~
}
\item{ny=NULL}{
Number of state-space points in the y-direction. It is recommended to
leave this to NULL in which case ny is computed relative to nx.
%% ~~Describe \code{ny} here~~
}
}
\details{
Lots of details here......A future version of this function will have
the user input "average home range size" and compute everything you
need.
The function returns the nG x 2 state-space grid which has a number of
arguments. "area" is the area of each grid point in the units of
"ll". "traps" is the matrix "ll". These arguments will be used by
other functions of the SCRbayes package.
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Andy Royle, aroyle@usgs.gov %% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
electricity_data <- read.table("household_power_consumption.txt", sep=";", na.strings = "?",
col.names = colnames(read.table("household_power_consumption.txt", sep=";", header = TRUE, nrow =1)),
colClasses = c(rep("character",2), rep("numeric", 7)), skip = 66637, nrows = 2880)
electricity_data[,"TimeStamp"] <- paste(electricity_data[,1], electricity_data[,2], sep = " ")
times <- strptime(electricity_data[,"TimeStamp"], "%d/%m/%Y %H:%M:%S")
png("plot3.png",width = 480, height = 480)
plot(times, electricity_data[,"Sub_metering_1"], type= "n", xlab = "", ylab="Energy sub metering")
lines(times, electricity_data[,"Sub_metering_1"], type="l", col = "black")
lines(times, electricity_data[,"Sub_metering_2"], type="l", col = "red")
lines(times, electricity_data[,"Sub_metering_3"], type="l", col = "blue")
legend("topright",legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black","red","blue"), lty=1)
dev.off();
|
/plot3.R
|
no_license
|
gchen19/ExData_Plotting1
|
R
| false | false | 1,006 |
r
|
electricity_data <- read.table("household_power_consumption.txt", sep=";", na.strings = "?",
col.names = colnames(read.table("household_power_consumption.txt", sep=";", header = TRUE, nrow =1)),
colClasses = c(rep("character",2), rep("numeric", 7)), skip = 66637, nrows = 2880)
electricity_data[,"TimeStamp"] <- paste(electricity_data[,1], electricity_data[,2], sep = " ")
times <- strptime(electricity_data[,"TimeStamp"], "%d/%m/%Y %H:%M:%S")
png("plot3.png",width = 480, height = 480)
plot(times, electricity_data[,"Sub_metering_1"], type= "n", xlab = "", ylab="Energy sub metering")
lines(times, electricity_data[,"Sub_metering_1"], type="l", col = "black")
lines(times, electricity_data[,"Sub_metering_2"], type="l", col = "red")
lines(times, electricity_data[,"Sub_metering_3"], type="l", col = "blue")
legend("topright",legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black","red","blue"), lty=1)
dev.off();
|
#' Declare a null hypothesis
#' @param x a data frame that can be coerced into a \code{\link[dplyr]{tbl_df}}
#' @param null the null hypothesis. Options include "independence" and "point"
#' @param ... arguments passed to downstream functions
#' @return A tibble containing the response (and explanatory, if specified)
#' variable data with parameter information stored as well
#' @importFrom dplyr as.tbl
#' @return a data frame with attributes set
#' @export
#' @examples
#' # Permutation test similar to ANOVA
#' mtcars %>%
#' dplyr::mutate(cyl = factor(cyl)) %>%
#' specify(mpg ~ cyl) %>%
#' hypothesize(null = "independence") %>%
#' generate(reps = 100, type = "permute") %>%
#' calculate(stat = "F")
hypothesize <- function(x, null, ...) {
hypothesize_checks(x, null)
attr(x, "null") <- null
dots <- list(...)
if( (null == "point") && (length(dots) == 0) ){
stop(paste("Provide a parameter and a value to check such as `mu = 30`",
"for the point hypothesis."))
}
if((null == "independence") && (length(dots) > 0)) {
warning(paste("Parameter values are not specified when testing that two",
"variables are independent."))
}
if((length(dots) > 0) && (null == "point")) {
params <- parse_params(dots, x)
attr(x, "params") <- params
if(any(grepl("p.", attr(attr(x, "params"), "names")))){
# simulate instead of bootstrap based on the value of `p` provided
attr(x, "type") <- "simulate"
} else {
attr(x, "type") <- "bootstrap"
}
}
if(!is.null(null) && null == "independence")
attr(x, "type") <- "permute"
# Check one proportion test set up correctly
if(null == "point"){
if(is.factor(response_variable(x))){
if(!any(grepl("p", attr(attr(x, "params"), "names"))))
stop(paste('Testing one categorical variable requires `p`',
'to be used as a parameter.'))
}
}
# Check one numeric test set up correctly
## Not currently able to reach in testing as other checks
## already produce errors
# if(null == "point"){
# if(!is.factor(response_variable(x))
# & !any(grepl("mu|med|sigma", attr(attr(x, "params"), "names"))))
# stop(paste('Testing one numerical variable requires one of',
# '`mu`, `med`, or `sd` to be used as a parameter.'))
# }
return(as.tbl(x))
}
|
/R/hypothesize.R
|
no_license
|
topepo/infer
|
R
| false | false | 2,414 |
r
|
#' Declare a null hypothesis
#' @param x a data frame that can be coerced into a \code{\link[dplyr]{tbl_df}}
#' @param null the null hypothesis. Options include "independence" and "point"
#' @param ... arguments passed to downstream functions
#' @return A tibble containing the response (and explanatory, if specified)
#' variable data with parameter information stored as well
#' @importFrom dplyr as.tbl
#' @return a data frame with attributes set
#' @export
#' @examples
#' # Permutation test similar to ANOVA
#' mtcars %>%
#' dplyr::mutate(cyl = factor(cyl)) %>%
#' specify(mpg ~ cyl) %>%
#' hypothesize(null = "independence") %>%
#' generate(reps = 100, type = "permute") %>%
#' calculate(stat = "F")
hypothesize <- function(x, null, ...) {
hypothesize_checks(x, null)
attr(x, "null") <- null
dots <- list(...)
if( (null == "point") && (length(dots) == 0) ){
stop(paste("Provide a parameter and a value to check such as `mu = 30`",
"for the point hypothesis."))
}
if((null == "independence") && (length(dots) > 0)) {
warning(paste("Parameter values are not specified when testing that two",
"variables are independent."))
}
if((length(dots) > 0) && (null == "point")) {
params <- parse_params(dots, x)
attr(x, "params") <- params
if(any(grepl("p.", attr(attr(x, "params"), "names")))){
# simulate instead of bootstrap based on the value of `p` provided
attr(x, "type") <- "simulate"
} else {
attr(x, "type") <- "bootstrap"
}
}
if(!is.null(null) && null == "independence")
attr(x, "type") <- "permute"
# Check one proportion test set up correctly
if(null == "point"){
if(is.factor(response_variable(x))){
if(!any(grepl("p", attr(attr(x, "params"), "names"))))
stop(paste('Testing one categorical variable requires `p`',
'to be used as a parameter.'))
}
}
# Check one numeric test set up correctly
## Not currently able to reach in testing as other checks
## already produce errors
# if(null == "point"){
# if(!is.factor(response_variable(x))
# & !any(grepl("mu|med|sigma", attr(attr(x, "params"), "names"))))
# stop(paste('Testing one numerical variable requires one of',
# '`mu`, `med`, or `sd` to be used as a parameter.'))
# }
return(as.tbl(x))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reconstruct.R
\name{reconstruct}
\alias{reconstruct}
\alias{reconstruct.DeepBeliefNet}
\alias{reconstruct.RestrictedBolzmannMachine}
\title{Reconstruct data through a Deep Belief Nets and Restricted Bolzman Machines}
\usage{
reconstruct(object, newdata, ...)
\method{reconstruct}{DeepBeliefNet}(object, newdata, drop = TRUE, ...)
\method{reconstruct}{RestrictedBolzmannMachine}(object, newdata, drop = TRUE,
...)
}
\arguments{
\item{object}{the \code{\link{RestrictedBolzmannMachine}} or \code{\link{DeepBeliefNet}} object}
\item{newdata}{a \code{\link{data.frame}} or \code{\link{matrix}} providing the data. Must have the same columns than the input layer of the model.}
\item{drop}{do not return additional dimensions}
\item{\dots}{ignored}
}
\value{
the reconstructed data
}
\description{
Passes the data all the way through an unrolled DeepBeliefNet (in this case, it is identical to predict).
For a RestrictedBolzmannMachine or a DeepBeliefNet that hasn't been unrolled, it will predict, and predict again through the reversed network.
In the end, the reconstruction has the same dimension as the input.
}
|
/man/reconstruct.Rd
|
no_license
|
marinapavlovicrivas/DeepLearning
|
R
| false | true | 1,198 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reconstruct.R
\name{reconstruct}
\alias{reconstruct}
\alias{reconstruct.DeepBeliefNet}
\alias{reconstruct.RestrictedBolzmannMachine}
\title{Reconstruct data through a Deep Belief Nets and Restricted Bolzman Machines}
\usage{
reconstruct(object, newdata, ...)
\method{reconstruct}{DeepBeliefNet}(object, newdata, drop = TRUE, ...)
\method{reconstruct}{RestrictedBolzmannMachine}(object, newdata, drop = TRUE,
...)
}
\arguments{
\item{object}{the \code{\link{RestrictedBolzmannMachine}} or \code{\link{DeepBeliefNet}} object}
\item{newdata}{a \code{\link{data.frame}} or \code{\link{matrix}} providing the data. Must have the same columns than the input layer of the model.}
\item{drop}{do not return additional dimensions}
\item{\dots}{ignored}
}
\value{
the reconstructed data
}
\description{
Passes the data all the way through an unrolled DeepBeliefNet (in this case, it is identical to predict).
For a RestrictedBolzmannMachine or a DeepBeliefNet that hasn't been unrolled, it will predict, and predict again through the reversed network.
In the end, the reconstruction has the same dimension as the input.
}
|
# Provide two arguments: 1) file to run Fisher's exact test on, and 2) output file name
args<-commandArgs(TRUE)
file=args[1]
data <- read.table(args[1])
pvals <- apply(data[,c(5,6,7,8)],1, function(x) fisher.test(matrix(x,nr=2))$p.value)
data[,"p-vals"] <- format(round(pvals, 5))
write.table(data, file=args[2], sep= "\t", col.names=FALSE, row.names=FALSE, quote=FALSE)
|
/fishers_exact_test.R
|
no_license
|
mattjmeier/general_purpose_scripts
|
R
| false | false | 376 |
r
|
# Provide two arguments: 1) file to run Fisher's exact test on, and 2) output file name
args<-commandArgs(TRUE)
file=args[1]
data <- read.table(args[1])
pvals <- apply(data[,c(5,6,7,8)],1, function(x) fisher.test(matrix(x,nr=2))$p.value)
data[,"p-vals"] <- format(round(pvals, 5))
write.table(data, file=args[2], sep= "\t", col.names=FALSE, row.names=FALSE, quote=FALSE)
|
#' Correlation Plot
#'
#' This function analyses the correlation between the numeric variables in the dataset and returns the correlation plot
#' based on the correlation matrix.
#' Note: this function requires the package corrplot.
#'
#' This function needs the argument air_safety
#'
#' @importFrom stats cor
#'
#' @export
#'
correlation <- function(){
numeric.var<-sapply(airlinesafety::air_safety, is.numeric)
corr.matrix<-cor(airlinesafety::air_safety[,numeric.var])
corrplot::corrplot(corr.matrix,main="\n\nCorrelation Matrix",method="number")
}
|
/R/correlation.R
|
no_license
|
unimi-dse/f0e7a855
|
R
| false | false | 559 |
r
|
#' Correlation Plot
#'
#' This function analyses the correlation between the numeric variables in the dataset and returns the correlation plot
#' based on the correlation matrix.
#' Note: this function requires the package corrplot.
#'
#' This function needs the argument air_safety
#'
#' @importFrom stats cor
#'
#' @export
#'
correlation <- function(){
numeric.var<-sapply(airlinesafety::air_safety, is.numeric)
corr.matrix<-cor(airlinesafety::air_safety[,numeric.var])
corrplot::corrplot(corr.matrix,main="\n\nCorrelation Matrix",method="number")
}
|
#' Calculate cross correlation by extending reads
#'
#' @param bam_file character. Path to .bam file, must have index at .bam.bai.
#' @param query_gr GRanges. Regions to calculate cross correlation for.
#' @param n_regions integer. query_gr will be downsampled to this many regions
#' for speed. Use NA to skip downsampling.
#' @param max_dupes integer. Duplicate reads above this value will be removed.
#' @param frag_min integer. extension value to start at.
#' @param frag_max integer. extension value to end at.
#' @param step integer. proceed from frag_min measuring correlation every step.
#' @param small_step integer. after measuring correlation every step, a second
#' round of fragment size refinement is done using small_step within +/- step
#' of maximum.
#' @param include_plots logical. Should plots be included in output?
#'
#' @return named list of results
#' @export
#' @import pbapply
#' @examples
#' bam_file = system.file("extdata", "MCF10A_CTCF.random5.bam", package = "peakrefine")
#' np = system.file("extdata", "MCF10A_CTCF.random5.narrowPeak", package = "peakrefine")
#' qgr = rtracklayer::import(np, format = "narrowPeak")
#' crossCorrByExtension(bam_file, qgr[1:2], frag_min = 50,
#' frag_max = 250, step = 50, small_step = 10)
crossCorrByExtension = function(bam_file,
query_gr,
n_regions = 20,
max_dupes = 1,
frag_min = 50,
frag_max = 250,
step = 10,
small_step = 1,
include_plots = TRUE){
which_label = N = id = crank = corr = frag_len = NULL #reserve for data.table
if(is.na(n_regions)) n_regions = length(query_gr)
stopifnot(is.numeric(n_regions))
stopifnot(n_regions >= 1)
if(is.na(n_regions) || n_regions >= length(query_gr)){
test_gr = query_gr
}else{
test_gr = sample(query_gr, n_regions)
}
if(is.null(test_gr$id)){
test_gr$id = paste0("peak_", seq_along(test_gr))
}
if(is.null(names(test_gr))){
names(test_gr) = test_gr$id
}
test_gr = harmonize_seqlengths(test_gr, bam_file)
# browser()
message("fetch reads...")
reads_dt = .fetch_bam_stranded(bam_file, test_gr, max_dupes = max_dupes)
cnt_dt = reads_dt[, .N, by = list(which_label)]
test_dt = data.table(which_label = as.character(test_gr), id = test_gr$id)
cnt_dt = merge(cnt_dt, test_dt, all = TRUE)
cnt_dt[is.na(N), N := 0]
cnt_dt = cnt_dt[, list(id, count = N)]
read_corr = .calc_cross_corr(reads_dt, test_gr)
read_coverage = .calc_stranded_coverage(reads_dt, test_gr)
tab = table(reads_dt$width)
read_length = as.numeric(names(tab[which(tab == max(tab))]))
message("correlate coarse...")
corrVals = pbapply::pblapply(seq(from = frag_min, to = frag_max, by = step), function(frag_len){
dc_dt = .calc_cross_corr(reads_dt, test_gr, frag_len)
dc_dt$frag_len = frag_len
dc_dt
})
corrVals = data.table::rbindlist(corrVals)
# corrVals$crank = NULL
corrVals[, crank := rank(-corr), by = list(id)]
center = round(mean(corrVals[crank < 2 & !is.na(corr)]$frag_len))
message("correlate fine...")
corrValsDetail = pbapply::pblapply(seq(from = center-step, to = center+step, by = small_step), function(frag_len){
dc_dt = .calc_cross_corr(reads_dt, test_gr, frag_len)
dc_dt$frag_len = frag_len
dc_dt
})
corrValsDetail = rbindlist(corrValsDetail)
corrValsDetail[, crank := rank(-corr), by = list(id)]
corrValsDetail[crank == 1]
bestFragLen = round(mean(corrValsDetail[crank < 2]$frag_len))
frag_corr = corrValsDetail[frag_len == bestFragLen, 1:2]
if(include_plots){
tp = sample(unique(corrVals$id), min(12, length(test_gr)))
message("plot sampled regions...")
p = ggplot(corrVals[id %in% tp], aes(x = frag_len, y = corr, group = id)) + geom_path() +
geom_path(data = corrValsDetail[id %in% tp], color = "red") + facet_wrap("id") +
geom_point(data = corrValsDetail[id %in% tp][crank == 1], color = "red")
out = list(
read_length = read_length,
frag_length = bestFragLen,
read_corr = read_corr,
frag_corr = frag_corr,
corr_vals = corrVals,
count = cnt_dt,
sample_plot = p
)
}else{
out = list(
read_length = read_length,
frag_length = bestFragLen,
read_corr = read_corr,
frag_corr = frag_corr,
corr_vals = corrVals,
count = cnt_dt
)
}
return(out)
}
#' Measure cross correlation using specified frag_len for all regions
#'
#' @param bam_file character. Path to .bam file, must have index at .bam.bai.
#' @param query_gr GRanges. Regions to calculate cross correlation for.
#' @param frag_len integer. Fragment length to calculate cross correlation for.
#' @param max_dupes integer. Duplicate reads above this value will be removed.
#' @param ncores integer. ncores to use to split up the cross correlation
#' calculation.
#' @param output_withGRanges logical. Should results be merged back into
#' query_gr? If TRUE output is GRanges. If FALSE output is data.table.
#'
#' @return Either a GRanges equivalent to query_gr with added columns for
#' correlation metics or a data.table of metrics.
#' @export
#' @import parallel
#' @examples
#' bam_file = system.file("extdata", "MCF10A_CTCF.random5.bam", package = "peakrefine")
#' np = system.file("extdata", "MCF10A_CTCF.random5.narrowPeak", package = "peakrefine")
#' qgr = rtracklayer::import(np, format = "narrowPeak")
#' crossCorrByExtensionFull(bam_file, qgr[1:2], frag_len = 150, ncores = 2)
crossCorrByExtensionFull = function(bam_file, query_gr, frag_len,
max_dupes = 1,
ncores = 1,
output_withGRanges = TRUE){
# browser()
if(is.null(query_gr$id)) query_gr$id = query_gr$name
options(mc.cores = ncores)
assignments = ceiling(seq_along(query_gr) / (length(query_gr)/ncores))
cres = parallel::mclapply(seq_len(ncores), function(i){
crossCorrByExtension(bam_file,
query_gr[assignments == i],
n_regions = NA,
step = 0,
max_dupes = max_dupes,
frag_min = frag_len,
frag_max = frag_len,
include_plots = FALSE
)
})
out = list(
read_corr =
rbindlist(lapply(cres, function(x)x$read_corr)),
frag_corr =
rbindlist(lapply(cres, function(x)x$frag_corr)),
count =
rbindlist(lapply(cres, function(x)x$count))
)
colnames(out$read_corr)[2] = "read_corr"
colnames(out$frag_corr)[2] = "frag_corr"
out = merge(merge(out$read_corr, out$frag_corr), out$count)
if(output_withGRanges){
out = GRanges(merge(out, query_gr, by = "id"))
}
return(out)
}
|
/R/functions_crossCorrExtension.R
|
no_license
|
jrboyd/peakrefine
|
R
| false | false | 7,280 |
r
|
#' Calculate cross correlation by extending reads
#'
#' @param bam_file character. Path to .bam file, must have index at .bam.bai.
#' @param query_gr GRanges. Regions to calculate cross correlation for.
#' @param n_regions integer. query_gr will be downsampled to this many regions
#' for speed. Use NA to skip downsampling.
#' @param max_dupes integer. Duplicate reads above this value will be removed.
#' @param frag_min integer. extension value to start at.
#' @param frag_max integer. extension value to end at.
#' @param step integer. proceed from frag_min measuring correlation every step.
#' @param small_step integer. after measuring correlation every step, a second
#' round of fragment size refinement is done using small_step within +/- step
#' of maximum.
#' @param include_plots logical. Should plots be included in output?
#'
#' @return named list of results
#' @export
#' @import pbapply
#' @examples
#' bam_file = system.file("extdata", "MCF10A_CTCF.random5.bam", package = "peakrefine")
#' np = system.file("extdata", "MCF10A_CTCF.random5.narrowPeak", package = "peakrefine")
#' qgr = rtracklayer::import(np, format = "narrowPeak")
#' crossCorrByExtension(bam_file, qgr[1:2], frag_min = 50,
#' frag_max = 250, step = 50, small_step = 10)
crossCorrByExtension = function(bam_file,
query_gr,
n_regions = 20,
max_dupes = 1,
frag_min = 50,
frag_max = 250,
step = 10,
small_step = 1,
include_plots = TRUE){
which_label = N = id = crank = corr = frag_len = NULL #reserve for data.table
if(is.na(n_regions)) n_regions = length(query_gr)
stopifnot(is.numeric(n_regions))
stopifnot(n_regions >= 1)
if(is.na(n_regions) || n_regions >= length(query_gr)){
test_gr = query_gr
}else{
test_gr = sample(query_gr, n_regions)
}
if(is.null(test_gr$id)){
test_gr$id = paste0("peak_", seq_along(test_gr))
}
if(is.null(names(test_gr))){
names(test_gr) = test_gr$id
}
test_gr = harmonize_seqlengths(test_gr, bam_file)
# browser()
message("fetch reads...")
reads_dt = .fetch_bam_stranded(bam_file, test_gr, max_dupes = max_dupes)
cnt_dt = reads_dt[, .N, by = list(which_label)]
test_dt = data.table(which_label = as.character(test_gr), id = test_gr$id)
cnt_dt = merge(cnt_dt, test_dt, all = TRUE)
cnt_dt[is.na(N), N := 0]
cnt_dt = cnt_dt[, list(id, count = N)]
read_corr = .calc_cross_corr(reads_dt, test_gr)
read_coverage = .calc_stranded_coverage(reads_dt, test_gr)
tab = table(reads_dt$width)
read_length = as.numeric(names(tab[which(tab == max(tab))]))
message("correlate coarse...")
corrVals = pbapply::pblapply(seq(from = frag_min, to = frag_max, by = step), function(frag_len){
dc_dt = .calc_cross_corr(reads_dt, test_gr, frag_len)
dc_dt$frag_len = frag_len
dc_dt
})
corrVals = data.table::rbindlist(corrVals)
# corrVals$crank = NULL
corrVals[, crank := rank(-corr), by = list(id)]
center = round(mean(corrVals[crank < 2 & !is.na(corr)]$frag_len))
message("correlate fine...")
corrValsDetail = pbapply::pblapply(seq(from = center-step, to = center+step, by = small_step), function(frag_len){
dc_dt = .calc_cross_corr(reads_dt, test_gr, frag_len)
dc_dt$frag_len = frag_len
dc_dt
})
corrValsDetail = rbindlist(corrValsDetail)
corrValsDetail[, crank := rank(-corr), by = list(id)]
corrValsDetail[crank == 1]
bestFragLen = round(mean(corrValsDetail[crank < 2]$frag_len))
frag_corr = corrValsDetail[frag_len == bestFragLen, 1:2]
if(include_plots){
tp = sample(unique(corrVals$id), min(12, length(test_gr)))
message("plot sampled regions...")
p = ggplot(corrVals[id %in% tp], aes(x = frag_len, y = corr, group = id)) + geom_path() +
geom_path(data = corrValsDetail[id %in% tp], color = "red") + facet_wrap("id") +
geom_point(data = corrValsDetail[id %in% tp][crank == 1], color = "red")
out = list(
read_length = read_length,
frag_length = bestFragLen,
read_corr = read_corr,
frag_corr = frag_corr,
corr_vals = corrVals,
count = cnt_dt,
sample_plot = p
)
}else{
out = list(
read_length = read_length,
frag_length = bestFragLen,
read_corr = read_corr,
frag_corr = frag_corr,
corr_vals = corrVals,
count = cnt_dt
)
}
return(out)
}
#' Measure cross correlation using specified frag_len for all regions
#'
#' @param bam_file character. Path to .bam file, must have index at .bam.bai.
#' @param query_gr GRanges. Regions to calculate cross correlation for.
#' @param frag_len integer. Fragment length to calculate cross correlation for.
#' @param max_dupes integer. Duplicate reads above this value will be removed.
#' @param ncores integer. ncores to use to split up the cross correlation
#' calculation.
#' @param output_withGRanges logical. Should results be merged back into
#' query_gr? If TRUE output is GRanges. If FALSE output is data.table.
#'
#' @return Either a GRanges equivalent to query_gr with added columns for
#' correlation metics or a data.table of metrics.
#' @export
#' @import parallel
#' @examples
#' bam_file = system.file("extdata", "MCF10A_CTCF.random5.bam", package = "peakrefine")
#' np = system.file("extdata", "MCF10A_CTCF.random5.narrowPeak", package = "peakrefine")
#' qgr = rtracklayer::import(np, format = "narrowPeak")
#' crossCorrByExtensionFull(bam_file, qgr[1:2], frag_len = 150, ncores = 2)
crossCorrByExtensionFull = function(bam_file, query_gr, frag_len,
max_dupes = 1,
ncores = 1,
output_withGRanges = TRUE){
# browser()
if(is.null(query_gr$id)) query_gr$id = query_gr$name
options(mc.cores = ncores)
assignments = ceiling(seq_along(query_gr) / (length(query_gr)/ncores))
cres = parallel::mclapply(seq_len(ncores), function(i){
crossCorrByExtension(bam_file,
query_gr[assignments == i],
n_regions = NA,
step = 0,
max_dupes = max_dupes,
frag_min = frag_len,
frag_max = frag_len,
include_plots = FALSE
)
})
out = list(
read_corr =
rbindlist(lapply(cres, function(x)x$read_corr)),
frag_corr =
rbindlist(lapply(cres, function(x)x$frag_corr)),
count =
rbindlist(lapply(cres, function(x)x$count))
)
colnames(out$read_corr)[2] = "read_corr"
colnames(out$frag_corr)[2] = "frag_corr"
out = merge(merge(out$read_corr, out$frag_corr), out$count)
if(output_withGRanges){
out = GRanges(merge(out, query_gr, by = "id"))
}
return(out)
}
|
library(base, quietly = TRUE)
library(methods, quietly = TRUE)
library(datasets, quietly = TRUE)
library(utils, quietly = TRUE)
library(grDevices, quietly = TRUE)
library(graphics, quietly = TRUE)
library(stats, quietly = TRUE)
library(pander, quietly = TRUE)
library(png, quietly = TRUE)
library(docopt, quietly = TRUE)
library(rslurm, quietly = TRUE)
.rslurm_func <- readRDS('f.RDS')
.rslurm_params <- readRDS('params.RDS')
.rslurm_id <- as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID'))
.rslurm_istart <- .rslurm_id * 2 + 1
.rslurm_iend <- min((.rslurm_id + 1) * 2, nrow(.rslurm_params))
.rslurm_result <- do.call(parallel::mcmapply, c(
FUN = .rslurm_func,
.rslurm_params[.rslurm_istart:.rslurm_iend, , drop = FALSE],
mc.cores = 2,
SIMPLIFY = FALSE))
saveRDS(.rslurm_result, file = paste0('results_', .rslurm_id, '.RDS'))
|
/LearningSlurm/_rslurm_SVM/slurm_run.R
|
no_license
|
kayhan-batmanghelich/DataScienceCourse
|
R
| false | false | 838 |
r
|
library(base, quietly = TRUE)
library(methods, quietly = TRUE)
library(datasets, quietly = TRUE)
library(utils, quietly = TRUE)
library(grDevices, quietly = TRUE)
library(graphics, quietly = TRUE)
library(stats, quietly = TRUE)
library(pander, quietly = TRUE)
library(png, quietly = TRUE)
library(docopt, quietly = TRUE)
library(rslurm, quietly = TRUE)
.rslurm_func <- readRDS('f.RDS')
.rslurm_params <- readRDS('params.RDS')
.rslurm_id <- as.numeric(Sys.getenv('SLURM_ARRAY_TASK_ID'))
.rslurm_istart <- .rslurm_id * 2 + 1
.rslurm_iend <- min((.rslurm_id + 1) * 2, nrow(.rslurm_params))
.rslurm_result <- do.call(parallel::mcmapply, c(
FUN = .rslurm_func,
.rslurm_params[.rslurm_istart:.rslurm_iend, , drop = FALSE],
mc.cores = 2,
SIMPLIFY = FALSE))
saveRDS(.rslurm_result, file = paste0('results_', .rslurm_id, '.RDS'))
|
## ----setup, include = FALSE---------------------------------------------------
#rmarkdown::html_vignette
knitr::opts_knit$set(
self.contained = TRUE)
knitr::opts_chunk$set(
#collapse = TRUE,
dpi = 55,
fig.retina = 1,
comment = "#>"
)
require("genBaRcode")
require("ggplot2")
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# if (!requireNamespace("BiocManager", quietly = TRUE)) {
# install.packages("BiocManager")
# }
#
# BiocManager::install(c("Biostrings", "ShortRead", "S4Vectors", "ggtree"))
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# require("genBaRcode")
#
# bb <- "ACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANN"
# source_dir <- system.file("extdata", package = "genBaRcode")
#
# BC_data <- processingRawData(file_name = "test_data.fastq.gz",
# source_dir = source_dir,
# results_dir = "/my/results/directory/",
# mismatch = 0,
# label = "test",
# bc_backbone = bb,
# bc_backbone_label = "BC_1",
# min_score = 30,
# min_reads = 2,
# save_it = FALSE,
# seqLogo = FALSE,
# cpus = 1,
# strategy = "sequential",
# full_output = FALSE,
# wobble_extraction = TRUE,
# dist_measure = "hamming")
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
getBackboneSelection()
bb <- getBackboneSelection(1)
show(bb)
bb <- getBackboneSelection("BC32-eBFP")
show(bb)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
bb <- "ACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANN"
source_dir <- system.file("extdata", package = "genBaRcode")
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 0,
label = "test",
bc_backbone = bb,
bc_backbone_label = "BC_1",
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = TRUE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data, "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data_multiple <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 0,
label = "test",
bc_backbone = getBackboneSelection(1:2),
bc_backbone_label = c("BC_1", "BC_2"),
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = FALSE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data_multiple[[1]], "results_dir") <- "/my/results/dir/"
methods::slot(BC_data_multiple[[2]], "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_multiple)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data_2 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 4,
label = "test",
bc_backbone = "none",
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = FALSE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data_2, "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_2)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
head(getReads(BC_data))
show(getResultsDir(BC_data))
show(getBackbone(BC_data))
show(getLabel(BC_data))
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- setReads(BC_data, data.frame(read_count = c(1:5), barcode = letters[1:5]))
# BC_data <- setResultsDir(BC_data, "/my/test/folder/")
# BC_data <- setBackbone(BC_data, "AAANNNNGGG")
# BC_data <- setLabel(BC_data, "new label")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- readBCdat(path = "/my/test/folder/",
# label = "test",
# BC_backbone = "AAANNNNCCCC",
# file_name = "test.csv",
# s = ";")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data_EC <- errorCorrection(BC_dat = BC_data,
# maxDist = 4,
# save_it = FALSE,
# cpus = 1,
# strategy = "sequential",
# m = "hamming",
# type = "standard",
# only_EC_BCs = TRUE,
# EC_analysis = FALSE,
# start_small = TRUE)
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = TRUE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "graph based",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "connectivity based",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "clustering",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
s_dir <- system.file("extdata", package = "genBaRcode")
plotNucFrequency(source_dir = s_dir, file_name = "test_data.fastq.gz")
## ----eval=TRUE, fig.height=1.5, fig.width=5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScoreDis(source_dir = s_dir, file_name = "test_data.fastq.gz", type = "mean")
## ----eval=TRUE, fig.height=1.5, fig.width=5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScoreDis(source_dir = s_dir, file_name = "test_data.fastq.gz", type = "median")
## ----eval=TRUE, fig.width=6, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScorePerCycle(source_dir = s_dir, file_name = "test_data.fastq.gz")
## ----eval=TRUE, fig.width=6.5, fig.height=1.5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
show(BC_data)
plotSeqLogo(BC_dat = BC_data, colrs = NULL)
## ----eval=TRUE, fig.width=6.5, fig.height=1.5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
# color order correlates to the following nucleotide order A, T, C, G, N
col_vec <- c("#000000",
"#000000",
RColorBrewer::brewer.pal(6, "Paired")[c(5, 6)],
"#000000")
show(col_vec)
plotSeqLogo(BC_dat = BC_data, colrs = col_vec)
## ----eval=TRUE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
show(BC_data)
generateKirchenplot(BC_dat = BC_data)
## ----eval=FALSE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
#
# known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
# "CACGATCCGCTTCTATCGCGTGCACTACATGT",
# "ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
#
# generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs)
#
## ----echo=FALSE, eval=TRUE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs) + ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key.size = ggplot2::unit(4, "mm"),
legend.title = ggplot2::element_text(size = 7))
## ----eval=TRUE, fig.width=7.2, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
contaminations <- c("CACGATCCGCTTCTATCGCGTGCACTACATGC",
"ATTGGGTCCGTCTGAGGGCGTCTCTGCGCCTT",
"CACGATCCGCTTCTATCGCGTGCGCTACATGT",
"TACGATCCGCTTCTATCGCGTGCACTACATGT")
generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs, ori_BCs2 = contaminations)
## ----eval=FALSE, fig.width=7.2, fig.height=4, fig.align="center", fig.cap="Figure 5.4: Extracted barcodes and their abundancies.", collapse=TRUE----
#
# known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
# "CACGATCCGCTTCTATCGCGTGCACTACATGT",
# "ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
# contaminations <- c("CACGATCCGCTTCTATCGCGTGCACTACATGC",
# "ATTGGGTCCGTCTGAGGGCGTCTCTGCGCCTT",
# "CACGATCCGCTTCTATCGCGTGCGCTACATGT",
# "TACGATCCGCTTCTATCGCGTGCACTACATGT")
#
# generateKirchenplot(BC_dat = BC_data,
# ori_BCs = known_BCs, ori_BCs2 = contaminations,
# setLabels = c("known BCs", "stuff", "contaminations"),
# loga = TRUE, col_type = "wild", m = "lv")
#
## ----eval=TRUE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotReadFrequencies(BC_dat = BC_data)
## ----eval=FALSE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotReadFrequencies(BC_dat = BC_data, log = TRUE)
# plotReadFrequencies(BC_dat = BC_data, dens = TRUE)
#
## ----eval=FALSE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotReadFrequencies(BC_dat = BC_data, bw = 30)
# plotReadFrequencies(BC_dat = BC_data, b = 30)
#
## ----eval=FALSE---------------------------------------------------------------
#
# plotDistanceVisNetwork(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
# plotDistanceIgraph(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
ggplotDistanceGraph(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
## ----eval=TRUE, fig.width=4.5, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph(BC_dat = BC_data,
minDist = 1, loga = TRUE, m = "hamming",
ori_BCs = known_BCs, lay = "circle", complete = FALSE,
col_type = "topo.colors", legend_size = 2)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# createGDF(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
plotClusterTree(BC_dat = BC_data, tree_est = "UPGMA",
type = "fan", tipLabel = FALSE, m = "hamming")
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
plotClusterGgTree(BC_dat = BC_data, tree_est = "NJ",
type = "rectangular", m = "hamming")
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data_EC <- errorCorrection(BC_dat = BC_data,
# maxDist = 4,
# save_it = FALSE,
# cpus = 1,
# strategy = "sequential",
# m = "hamming",
# type = "standard",
# only_EC_BCs = FALSE,
# EC_analysis = TRUE,
# start_small = FALSE)
#
# error_correction_clustered_HDs(datEC = BC_data_EC, size = 0.75)
#
## ----echo=FALSE, fig.width=2, fig.height=2.5, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = FALSE,
EC_analysis = TRUE,
start_small = FALSE)
error_correction_clustered_HDs(datEC = BC_data_EC, size = 0.75) + ggplot2::theme(axis.title = ggplot2::element_text(size = 8))
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
error_correction_circlePlot(edges = BC_data_EC$edges, vertices = BC_data_EC$vertices)
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
error_correction_treePlot(edges = BC_data_EC$edges, vertices = BC_data_EC$vertices)
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming")
## ----eval=FALSE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotDistanceVisNetwork_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
# minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming", ori_BCs = known_BCs)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# plotDistanceVisNetwork_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
# minDist = 1, loga = TRUE, m = "hamming", ori_BCs = known_BCs)
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming", BC_threshold = 2)
## ----eval=TRUE, , fig.width=3, fig.height=2.5, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
# path to the package internal data file
source_dir <- system.file("extdata", package = "genBaRcode")
BC_data_tp1 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 10,
label = "tp1",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 10,
save_it = FALSE)
BC_data_tp1 <- errorCorrection(BC_data_tp1, maxDist = 2)
BC_data_tp2 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 1,
label = "tp2",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 30,
min_reads = 1000,
save_it = FALSE)
BC_data_tp2 <- errorCorrection(BC_data_tp2, maxDist = 4, type = "clustering")
BC_data_tp3 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 0,
label = "tp3",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 37,
save_it = FALSE)
BC_data_tp3 <- errorCorrection(BC_data_tp3, maxDist = 8, type = "graph based")
BC_list <- list(BC_data_tp1, BC_data_tp2, BC_data_tp3)
BC_matrix <- generateTimeSeriesData(BC_dat_list = BC_list)
plotTimeSeries(ov_dat = BC_matrix)
plotVennDiagram(BC_dat = BC_list)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# # choose colors
# test_colors <- RColorBrewer::brewer.pal(12, "Set3")
#
# plotTimeSeries(ov_dat = BC_matrix[1:12, ],
# colr = test_colors, tp = c(1,3,4),
# x_label = "test data", y_label = "test freqs")
#
# plotVennDiagram(BC_dat = BC_list, alpha_value = 0.25,
# colrs = c("green", "red", "blue"), border_color = "orange",
# plot_title = "this is the title",
# legend_sort = c("tp2_EC", "tp3_EC", "tp1_EC"),
# annotationSize = 2.5)
#
## ----eval=FALSE---------------------------------------------------------------
#
# # start Shiny app with the package internal test data file
# genBaRcode_app()
#
# # start Shiny app with access to a predefined directory
# genBaRcode_app(dat_dir = "/my/test/directory/")
#
## ----eval=TRUE, out.width = 40, collapse=TRUE---------------------------------
getBackboneSelection()
bb <- getBackboneSelection(1)
show(bb)
bb <- getBackboneSelection("BC32-eBFP")
show(bb)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- readBCdat(path = "/my/test/firectory", label = "test_label", s = ";",
# BC_backbone = "ACTNNGGCNNTGANN", file_name = "test_file.csv")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# test_data_frame <- data.frame(read_count = seq(100, 400, 100),
# barcode = c("AAAAAAAA", "GGGGGGGG",
# "TTTTTTTT", "CCCCCCCC"))
#
# BC_data <- asBCdat(dat = test_data_frame,
# label = "test_label",
# BC_backbone = "CCCNNAAANNTTTNNGGGNN",
# resDir = "/my/results/directory/")
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
test_data_frame <- data.frame(read_count = seq(100, 400, 100),
barcode = c("AAAAAAAA", "GGGGGGGG",
"TTTTTTTT", "CCCCCCCC"))
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test_data_frame)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_1 <- asBCdat(dat = test_data_frame,
label = "test_label_1",
BC_backbone = "CCCNNAAANNTTTNNGGGNN",
resDir = getwd())
test_data_frame <- data.frame(read_count = c(300, 99, 150, 400),
barcode = c("TTTTTTTT", "AATTTAAA",
"GGGGGGGG", "CCCCCCCC"))
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test_data_frame)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_2 <- asBCdat(dat = test_data_frame,
label = "test_label_2",
BC_backbone = "CCCNNAAANNTTTNNGGGNN",
resDir = getwd())
test <- genBaRcode:::com_pair(BC_dat1 = BC_data_1, BC_dat2 = BC_data_2)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test)
|
/inst/doc/genBaRcode_Vignette.R
|
no_license
|
cran/genBaRcode
|
R
| false | false | 26,108 |
r
|
## ----setup, include = FALSE---------------------------------------------------
#rmarkdown::html_vignette
knitr::opts_knit$set(
self.contained = TRUE)
knitr::opts_chunk$set(
#collapse = TRUE,
dpi = 55,
fig.retina = 1,
comment = "#>"
)
require("genBaRcode")
require("ggplot2")
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# if (!requireNamespace("BiocManager", quietly = TRUE)) {
# install.packages("BiocManager")
# }
#
# BiocManager::install(c("Biostrings", "ShortRead", "S4Vectors", "ggtree"))
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# require("genBaRcode")
#
# bb <- "ACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANN"
# source_dir <- system.file("extdata", package = "genBaRcode")
#
# BC_data <- processingRawData(file_name = "test_data.fastq.gz",
# source_dir = source_dir,
# results_dir = "/my/results/directory/",
# mismatch = 0,
# label = "test",
# bc_backbone = bb,
# bc_backbone_label = "BC_1",
# min_score = 30,
# min_reads = 2,
# save_it = FALSE,
# seqLogo = FALSE,
# cpus = 1,
# strategy = "sequential",
# full_output = FALSE,
# wobble_extraction = TRUE,
# dist_measure = "hamming")
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
getBackboneSelection()
bb <- getBackboneSelection(1)
show(bb)
bb <- getBackboneSelection("BC32-eBFP")
show(bb)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
bb <- "ACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANNCTTNNCGANNCTTNNGGANNCTANNACTNNCGANN"
source_dir <- system.file("extdata", package = "genBaRcode")
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 0,
label = "test",
bc_backbone = bb,
bc_backbone_label = "BC_1",
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = TRUE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data, "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data_multiple <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 0,
label = "test",
bc_backbone = getBackboneSelection(1:2),
bc_backbone_label = c("BC_1", "BC_2"),
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = FALSE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data_multiple[[1]], "results_dir") <- "/my/results/dir/"
methods::slot(BC_data_multiple[[2]], "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_multiple)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
# if no results_dir is provided the source_dir automatically also becomes the results_dir
BC_data_2 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir = source_dir,
mismatch = 4,
label = "test",
bc_backbone = "none",
min_score = 30,
min_reads = 2,
save_it = FALSE,
seqLogo = FALSE,
cpus = 1,
strategy = "sequential",
full_output = FALSE,
wobble_extraction = FALSE,
dist_measure = "hamming")
## ----echo = FALSE, eval=TRUE, collapse=TRUE-----------------------------------
methods::slot(BC_data_2, "results_dir") <- "/my/results/dir/"
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_2)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
head(getReads(BC_data))
show(getResultsDir(BC_data))
show(getBackbone(BC_data))
show(getLabel(BC_data))
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- setReads(BC_data, data.frame(read_count = c(1:5), barcode = letters[1:5]))
# BC_data <- setResultsDir(BC_data, "/my/test/folder/")
# BC_data <- setBackbone(BC_data, "AAANNNNGGG")
# BC_data <- setLabel(BC_data, "new label")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- readBCdat(path = "/my/test/folder/",
# label = "test",
# BC_backbone = "AAANNNNCCCC",
# file_name = "test.csv",
# s = ";")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data_EC <- errorCorrection(BC_dat = BC_data,
# maxDist = 4,
# save_it = FALSE,
# cpus = 1,
# strategy = "sequential",
# m = "hamming",
# type = "standard",
# only_EC_BCs = TRUE,
# EC_analysis = FALSE,
# start_small = TRUE)
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = TRUE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "graph based",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "connectivity based",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "clustering",
only_EC_BCs = TRUE,
EC_analysis = FALSE,
start_small = FALSE)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(BC_data_EC)
## ----eval=TRUE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
s_dir <- system.file("extdata", package = "genBaRcode")
plotNucFrequency(source_dir = s_dir, file_name = "test_data.fastq.gz")
## ----eval=TRUE, fig.height=1.5, fig.width=5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScoreDis(source_dir = s_dir, file_name = "test_data.fastq.gz", type = "mean")
## ----eval=TRUE, fig.height=1.5, fig.width=5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScoreDis(source_dir = s_dir, file_name = "test_data.fastq.gz", type = "median")
## ----eval=TRUE, fig.width=6, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotQualityScorePerCycle(source_dir = s_dir, file_name = "test_data.fastq.gz")
## ----eval=TRUE, fig.width=6.5, fig.height=1.5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
show(BC_data)
plotSeqLogo(BC_dat = BC_data, colrs = NULL)
## ----eval=TRUE, fig.width=6.5, fig.height=1.5, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
# color order correlates to the following nucleotide order A, T, C, G, N
col_vec <- c("#000000",
"#000000",
RColorBrewer::brewer.pal(6, "Paired")[c(5, 6)],
"#000000")
show(col_vec)
plotSeqLogo(BC_dat = BC_data, colrs = col_vec)
## ----eval=TRUE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
show(BC_data)
generateKirchenplot(BC_dat = BC_data)
## ----eval=FALSE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
#
# known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
# "CACGATCCGCTTCTATCGCGTGCACTACATGT",
# "ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
#
# generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs)
#
## ----echo=FALSE, eval=TRUE, fig.width=6, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs) + ggplot2::theme(legend.text = ggplot2::element_text(size = 6),
legend.key.size = ggplot2::unit(4, "mm"),
legend.title = ggplot2::element_text(size = 7))
## ----eval=TRUE, fig.width=7.2, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
contaminations <- c("CACGATCCGCTTCTATCGCGTGCACTACATGC",
"ATTGGGTCCGTCTGAGGGCGTCTCTGCGCCTT",
"CACGATCCGCTTCTATCGCGTGCGCTACATGT",
"TACGATCCGCTTCTATCGCGTGCACTACATGT")
generateKirchenplot(BC_dat = BC_data, ori_BCs = known_BCs, ori_BCs2 = contaminations)
## ----eval=FALSE, fig.width=7.2, fig.height=4, fig.align="center", fig.cap="Figure 5.4: Extracted barcodes and their abundancies.", collapse=TRUE----
#
# known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
# "CACGATCCGCTTCTATCGCGTGCACTACATGT",
# "ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
# contaminations <- c("CACGATCCGCTTCTATCGCGTGCACTACATGC",
# "ATTGGGTCCGTCTGAGGGCGTCTCTGCGCCTT",
# "CACGATCCGCTTCTATCGCGTGCGCTACATGT",
# "TACGATCCGCTTCTATCGCGTGCACTACATGT")
#
# generateKirchenplot(BC_dat = BC_data,
# ori_BCs = known_BCs, ori_BCs2 = contaminations,
# setLabels = c("known BCs", "stuff", "contaminations"),
# loga = TRUE, col_type = "wild", m = "lv")
#
## ----eval=TRUE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
plotReadFrequencies(BC_dat = BC_data)
## ----eval=FALSE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotReadFrequencies(BC_dat = BC_data, log = TRUE)
# plotReadFrequencies(BC_dat = BC_data, dens = TRUE)
#
## ----eval=FALSE, fig.width=2.5, fig.height=2, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotReadFrequencies(BC_dat = BC_data, bw = 30)
# plotReadFrequencies(BC_dat = BC_data, b = 30)
#
## ----eval=FALSE---------------------------------------------------------------
#
# plotDistanceVisNetwork(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
# plotDistanceIgraph(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
ggplotDistanceGraph(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
## ----eval=TRUE, fig.width=4.5, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='asis', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph(BC_dat = BC_data,
minDist = 1, loga = TRUE, m = "hamming",
ori_BCs = known_BCs, lay = "circle", complete = FALSE,
col_type = "topo.colors", legend_size = 2)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# createGDF(BC_dat = BC_data, minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
plotClusterTree(BC_dat = BC_data, tree_est = "UPGMA",
type = "fan", tipLabel = FALSE, m = "hamming")
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
plotClusterGgTree(BC_dat = BC_data, tree_est = "NJ",
type = "rectangular", m = "hamming")
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data_EC <- errorCorrection(BC_dat = BC_data,
# maxDist = 4,
# save_it = FALSE,
# cpus = 1,
# strategy = "sequential",
# m = "hamming",
# type = "standard",
# only_EC_BCs = FALSE,
# EC_analysis = TRUE,
# start_small = FALSE)
#
# error_correction_clustered_HDs(datEC = BC_data_EC, size = 0.75)
#
## ----echo=FALSE, fig.width=2, fig.height=2.5, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
BC_data_EC <- errorCorrection(BC_dat = BC_data,
maxDist = 4,
save_it = FALSE,
cpus = 1,
strategy = "sequential",
m = "hamming",
type = "standard",
only_EC_BCs = FALSE,
EC_analysis = TRUE,
start_small = FALSE)
error_correction_clustered_HDs(datEC = BC_data_EC, size = 0.75) + ggplot2::theme(axis.title = ggplot2::element_text(size = 8))
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
error_correction_circlePlot(edges = BC_data_EC$edges, vertices = BC_data_EC$vertices)
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
error_correction_treePlot(edges = BC_data_EC$edges, vertices = BC_data_EC$vertices)
## ----eval=TRUE, fig.width=4, fig.height=4, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming")
## ----eval=FALSE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
#
# plotDistanceVisNetwork_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
# minDist = 1, loga = TRUE, m = "hamming")
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming", ori_BCs = known_BCs)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# plotDistanceVisNetwork_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
# minDist = 1, loga = TRUE, m = "hamming", ori_BCs = known_BCs)
#
## ----eval=TRUE, fig.width=3, fig.height=3, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
known_BCs <- c("GGTCGAAGCTTCTTTCGGGCCGCACGGCTGCT",
"CACGATCCGCTTCTATCGCGTGCACTACATGT",
"ATTGGGTCCGTCTGAGGGCGTTTCTGCGCCTT")
ggplotDistanceGraph_EC(BC_dat = BC_data, BC_dat_EC = BC_data_EC,
minDist = 1, loga = TRUE, m = "hamming", BC_threshold = 2)
## ----eval=TRUE, , fig.width=3, fig.height=2.5, fig.pos = 'H', fig.align='center', fig.show='hold', collapse=TRUE----
# path to the package internal data file
source_dir <- system.file("extdata", package = "genBaRcode")
BC_data_tp1 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 10,
label = "tp1",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 10,
save_it = FALSE)
BC_data_tp1 <- errorCorrection(BC_data_tp1, maxDist = 2)
BC_data_tp2 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 1,
label = "tp2",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 30,
min_reads = 1000,
save_it = FALSE)
BC_data_tp2 <- errorCorrection(BC_data_tp2, maxDist = 4, type = "clustering")
BC_data_tp3 <- processingRawData(file_name = "test_data.fastq.gz",
source_dir,
mismatch = 0,
label = "tp3",
bc_backbone = getBackboneSelection(1),
bc_backbone_label = "BC_1",
min_score = 37,
save_it = FALSE)
BC_data_tp3 <- errorCorrection(BC_data_tp3, maxDist = 8, type = "graph based")
BC_list <- list(BC_data_tp1, BC_data_tp2, BC_data_tp3)
BC_matrix <- generateTimeSeriesData(BC_dat_list = BC_list)
plotTimeSeries(ov_dat = BC_matrix)
plotVennDiagram(BC_dat = BC_list)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# # choose colors
# test_colors <- RColorBrewer::brewer.pal(12, "Set3")
#
# plotTimeSeries(ov_dat = BC_matrix[1:12, ],
# colr = test_colors, tp = c(1,3,4),
# x_label = "test data", y_label = "test freqs")
#
# plotVennDiagram(BC_dat = BC_list, alpha_value = 0.25,
# colrs = c("green", "red", "blue"), border_color = "orange",
# plot_title = "this is the title",
# legend_sort = c("tp2_EC", "tp3_EC", "tp1_EC"),
# annotationSize = 2.5)
#
## ----eval=FALSE---------------------------------------------------------------
#
# # start Shiny app with the package internal test data file
# genBaRcode_app()
#
# # start Shiny app with access to a predefined directory
# genBaRcode_app(dat_dir = "/my/test/directory/")
#
## ----eval=TRUE, out.width = 40, collapse=TRUE---------------------------------
getBackboneSelection()
bb <- getBackboneSelection(1)
show(bb)
bb <- getBackboneSelection("BC32-eBFP")
show(bb)
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# BC_data <- readBCdat(path = "/my/test/firectory", label = "test_label", s = ";",
# BC_backbone = "ACTNNGGCNNTGANN", file_name = "test_file.csv")
#
## ----eval=FALSE, collapse=TRUE------------------------------------------------
#
# test_data_frame <- data.frame(read_count = seq(100, 400, 100),
# barcode = c("AAAAAAAA", "GGGGGGGG",
# "TTTTTTTT", "CCCCCCCC"))
#
# BC_data <- asBCdat(dat = test_data_frame,
# label = "test_label",
# BC_backbone = "CCCNNAAANNTTTNNGGGNN",
# resDir = "/my/results/directory/")
#
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
test_data_frame <- data.frame(read_count = seq(100, 400, 100),
barcode = c("AAAAAAAA", "GGGGGGGG",
"TTTTTTTT", "CCCCCCCC"))
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test_data_frame)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_1 <- asBCdat(dat = test_data_frame,
label = "test_label_1",
BC_backbone = "CCCNNAAANNTTTNNGGGNN",
resDir = getwd())
test_data_frame <- data.frame(read_count = c(300, 99, 150, 400),
barcode = c("TTTTTTTT", "AATTTAAA",
"GGGGGGGG", "CCCCCCCC"))
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test_data_frame)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
BC_data_2 <- asBCdat(dat = test_data_frame,
label = "test_label_2",
BC_backbone = "CCCNNAAANNTTTNNGGGNN",
resDir = getwd())
test <- genBaRcode:::com_pair(BC_dat1 = BC_data_1, BC_dat2 = BC_data_2)
## ----eval=TRUE, collapse=TRUE-------------------------------------------------
show(test)
|
## makeCacheMatrix: creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: computes the inverse of the special matrix returned by
## makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
hacheemaster/ProgrammingAssignment2
|
R
| false | false | 780 |
r
|
## makeCacheMatrix: creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve: computes the inverse of the special matrix returned by
## makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data,...)
x$setinverse(inv)
inv
}
|
##Read Data
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",colClasses = "character")
##convert date time to posixct
data$Date<-as.Date(factor(data$Date),"%d/%m/%Y")
data1<-subset(data,Date=="2007-2-1"|Date=="2007-2-2")
data1$Global_active_power<-as.numeric(data1$Global_active_power)
##create png file
png("Plot1.png", width = 480, height = 480)
hist(data1$Global_active_power,col="red",
main="Global Active Power",xlab ="Global Active Power(Kilowatts)")
dev.off()
|
/code/Plot1.r
|
no_license
|
Nivi14/ExData_Plotting1
|
R
| false | false | 498 |
r
|
##Read Data
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",colClasses = "character")
##convert date time to posixct
data$Date<-as.Date(factor(data$Date),"%d/%m/%Y")
data1<-subset(data,Date=="2007-2-1"|Date=="2007-2-2")
data1$Global_active_power<-as.numeric(data1$Global_active_power)
##create png file
png("Plot1.png", width = 480, height = 480)
hist(data1$Global_active_power,col="red",
main="Global Active Power",xlab ="Global Active Power(Kilowatts)")
dev.off()
|
#' ---
#' output: github_document
#' ---
#remote awesome work
## deja vu from earlier!
library(tidyverse)
library(here)
## create a data frame of your installed packages
## hint: installed.packages() is the function you need
## optional: select just some of the variables, such as
## * Package
## * LibPath
## * Version
## * Priority
## * Built
inst <- installed.packages() %>%
as.tibble() %>%
select(Package, LibPath, Version, Priority, Built)
nrow(inst)
## write this data frame to data/installed-packages.csv
## hint: readr::write_csv() or write.table()
## idea: try using here::here() to create the file path
#readr::write_csv(inst,here::here("data","installed-packages.csv"))
## YES overwrite the file that is there now (or delete it first)
## that's a old result from me (Jenny)
## it an example of what yours should look like and where it should go
|
/test.R
|
no_license
|
KevinKnightIDEXX/packages-report
|
R
| false | false | 880 |
r
|
#' ---
#' output: github_document
#' ---
#remote awesome work
## deja vu from earlier!
library(tidyverse)
library(here)
## create a data frame of your installed packages
## hint: installed.packages() is the function you need
## optional: select just some of the variables, such as
## * Package
## * LibPath
## * Version
## * Priority
## * Built
inst <- installed.packages() %>%
as.tibble() %>%
select(Package, LibPath, Version, Priority, Built)
nrow(inst)
## write this data frame to data/installed-packages.csv
## hint: readr::write_csv() or write.table()
## idea: try using here::here() to create the file path
#readr::write_csv(inst,here::here("data","installed-packages.csv"))
## YES overwrite the file that is there now (or delete it first)
## that's a old result from me (Jenny)
## it an example of what yours should look like and where it should go
|
#!/usr/bin/env Rscript
# eQTL analysis
library(data.table)
source(file=file.path("utils", "load_data.R"))
source(file=file.path("utils", "QTL-common.R"))
ncpus <- as.integer(commandArgs(trailingOnly=TRUE)[[2]])
# read covariates
patients <- load.patients()
# read gene positions, and add an end position
genepos <- load.genes()
genepos$pos2 <- genepos$pos
# read expression data
gene <- load.edata(patients)
# match gene positions to gene data
genepos <- genepos[na.omit(match(colnames(gene), feature)),]
gene <- gene[,na.omit(match(genepos[,feature], colnames(gene)))]
# match patient IDs to gene data
patients <- patients[na.omit(match(rownames(gene), patients[,projid])),]
gene <- gene[na.omit(match(patients[,projid], rownames(gene))),]
# make sure everything matches
stopifnot(rownames(gene) == patients[,projid])
stopifnot(colnames(gene) == genepos[,as.character(feature)])
# run Matrix eQTL
get.all.qtls(gene, genepos, patients, file.path("results", "eQTL"), ncpus)
|
/scripts/eQTL.R
|
no_license
|
rmcclosk/mostafavi-rotation
|
R
| false | false | 984 |
r
|
#!/usr/bin/env Rscript
# eQTL analysis
library(data.table)
source(file=file.path("utils", "load_data.R"))
source(file=file.path("utils", "QTL-common.R"))
ncpus <- as.integer(commandArgs(trailingOnly=TRUE)[[2]])
# read covariates
patients <- load.patients()
# read gene positions, and add an end position
genepos <- load.genes()
genepos$pos2 <- genepos$pos
# read expression data
gene <- load.edata(patients)
# match gene positions to gene data
genepos <- genepos[na.omit(match(colnames(gene), feature)),]
gene <- gene[,na.omit(match(genepos[,feature], colnames(gene)))]
# match patient IDs to gene data
patients <- patients[na.omit(match(rownames(gene), patients[,projid])),]
gene <- gene[na.omit(match(patients[,projid], rownames(gene))),]
# make sure everything matches
stopifnot(rownames(gene) == patients[,projid])
stopifnot(colnames(gene) == genepos[,as.character(feature)])
# run Matrix eQTL
get.all.qtls(gene, genepos, patients, file.path("results", "eQTL"), ncpus)
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
12
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[12]], l=0, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD12","l",0,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
/scripts/checkplots_for_parallel_amarel/asy_958.R
|
no_license
|
dushoff/diversity_metrics
|
R
| false | false | 536 |
r
|
source("/home/mr984/diversity_metrics/scripts/checkplot_initials.R")
source("/home/mr984/diversity_metrics/scripts/checkplot_inf.R")
reps<-50
outerreps<-1000
size<-rev(round(10^seq(2, 5, 0.25)))[
12
]
nc<-12
plan(strategy=multisession, workers=nc)
map(rev(1:outerreps), function(x){
start<-Sys.time()
out<-checkplot_inf(flatten(flatten(SADs_list))[[12]], l=0, inds=size, reps=reps)
write.csv(out, paste("/scratch/mr984/SAD12","l",0,"inds", size, "outernew", x, ".csv", sep="_"), row.names=F)
rm(out)
print(Sys.time()-start)
})
|
# check argo profiles
load('analysis/data/jan_march_data.RData')
profDensAggr <- list()
for (i in 1:length(profLatAggr)) {
temp <- profTempAggr[[i]][[1]]
psal <- profPsalAggr[[i]][[1]]
pressure <- profPsalAggr[[i]][[1]]
profDensAggr[[i]] <- gsw::gsw_pot_rho_t_exact(psal, temp, pressure, p_ref = 0)
if (i %% 2000 == 0) {
print(i)
}
}
library(dplyr)
mean_decreasing <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
lagged_pressure <- lag(pres)
weights <- pres - lagged_pressure
sum(weights * ((dens - lagged) >= 0), na.rm = T)/sum(weights, na.rm = T)
})
mean_decreasing_550 <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
#lagged_pressure <- lag(pres)
#weights <- pres - lagged_pressure
#findInterval(550, pres, all.inside = T)
(dens - lagged)[findInterval(550, pres, all.inside = T)+1] >= 0
})
mean_decreasing_550_val <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
lagged_pressure <- lag(pres)
#weights <- pres - lagged_pressure
#findInterval(550, pres, all.inside = T)
interv <- findInterval(550, pres, all.inside = T)+1
(dens[interv] - lagged[interv])/(pres[interv] - lagged_pressure[interv])
})
load('analysis/data/RG_Defined_mask.RData')
df <- data.frame(profLongAggr, profLatAggr, mean_decreasing,mean_decreasing_550,mean_decreasing_550_val,
profYearAggr, profMonthAggr)
df$long_grid <- round(ifelse(df$profLongAggr > 180,df$profLongAggr - 360,
df$profLongAggr)+ .5)- .5
df$lat_grid <- round(df$profLatAggr + .5)- .5
df_comb <- inner_join(df, RG_defined_long, by = c('long_grid' = 'long',
'lat_grid' = 'lat'))
df_comb <- df_comb[df_comb$value > 1999,]
df_comb <- df_comb[!is.na(df_comb$value),]
library(ggplot2)
theme_set(theme_bw() + theme(panel.grid = element_blank(), text = element_text(size = 15)))
ggplot()+
geom_point(data = df_comb,
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white', name = 'Proportion',high = 'red',
midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
labs(x='Longitude', y = 'Latitude') +
theme_gray()+ theme(panel.grid = element_blank(), text = element_text(size = 15))
ggsave('analysis/images/misc/dens_monotone.png',
height = 4, width = 7.25)
ggplot()+
geom_point(data = df_comb[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2,],
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing_550),
size = .01)+
scale_color_discrete()+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
labs(x='Longitude', y = 'Latitude') +
theme_gray()+ theme(panel.grid = element_blank(), text = element_text(size = 15))
library(viridis)
ggplot()+
geom_point(data = df_comb[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2,],
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing_550_val),
size = .2)+
# scale_color_gradientn(colors = rev(viridis_pal()(50)),values = scales::rescale(c(-.0005, 0, .001, .003, .006, .008)),
# limits = quantile(df_comb$mean_decreasing_550_val[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2],
# probs = c(.01, .99)))+
scale_color_gradient2(limits = quantile(df_comb$mean_decreasing_550_val[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2],
probs = c(.0001, .995)),
low = 'blue', mid = 'white', high = 'red')+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
#g + xlab( expression(Value~is~sigma~R^{2}==0.6))
# labs(x='Longitude', y = 'Latitude',
# color = expression(Density'\n'~is)) +
labs(x='Longitude', y = 'Latitude',
#color = expression(paste('Density\nGradient\n', 'a (', kg, '/',m^{3},'/p)'))) +
color =expression(atop("Density Gradient",
"(kg/"*m^{3}*"/p)"))) +
theme_gray()+ theme(legend.title = element_text(size = 12),
panel.grid = element_blank(), text = element_text(size = 15))
ggsave('analysis/images/misc/dens_monotone_550_profiles.png',
height = 4, width = 7.25)
b <- ggplot()+
geom_polygon(data= map_data('world'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
geom_point(data = df_comb,
aes(x = ifelse(profLongAggr > 180, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white',
name = 'Prop Monotone',high = 'red', midpoint = .6,
limits = c(.18, 1))+
labs(x='Longitude', y = 'Latitude') +
coord_cartesian(xlim = c(-100, 20), ylim = c(0, 60))+
#facet_wrap(~profYearAggr, ncol = 3) +
theme_gray()
ggsave('analysis/images/misc/dens_monotone_atl.png',
height = 4, width = 7.25)
load('analysis/results/density_check_pred.RData')
avg_prop_nonnegative <- sapply(density_check, function(x) x[[1]])
library(ggplot2)
df_preds_use_summary <- data.frame(df_preds_use, avg_prop_nonnegative) %>%
group_by(long, lat) %>%
summarise(dens = mean(avg_prop_nonnegative, na.rm = T))
a <- ggplot(data = df_preds_use_summary %>%
inner_join( RG_defined_long) %>% filter(value > 1999),
aes(x =ifelse(long < 0, long + 360, long), y = lat, fill = dens))+
geom_raster()+
scale_fill_gradient2(low = 'blue', mid = 'white', high = 'red',midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data = map_data('world2'), aes(x =long, y = lat, group = group),
fill = 'white', color = 'black', size = .2)+
#coord_cartesian(xlim = c(-100, 20), ylim = c(0, 60))+
labs(x = 'Longitude', y = 'Latitude', fill = 'Proportion')
a
ggsave('analysis/images/misc/dens_prop_ours.png',
height = 4, width = 7.25)
library(patchwork)
a / b
a <- ggplot(data = df_preds_use_summary #%>%
# inner_join( RG_defined_long) %>% filter(value > 1999)
,
aes(x =ifelse(long < 0, long + 360, long), y = lat, fill = dens))+
geom_raster()+
scale_fill_gradient2(low = 'blue', mid = 'white', high = 'red',midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data = map_data('world2'), aes(x =long, y = lat, group = group),
fill = 'white', color = 'black', size = .2)+
coord_cartesian(xlim = c(160, 250), ylim = c(-75, -55))+
labs(x = 'Longitude', y = 'Latitude', fill = 'Proportion',
title = 'Proportion of Pressure Dimension with nonnegative derivative',
subtitle = 'February Predictions, reference pressure 0 dbar')
a
b <- ggplot()+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
geom_point(data = df,
aes(x = profLongAggr ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white',
name = 'Prop Monotone',high = 'red', midpoint = .6,
limits = c(.18, 1))+
labs(x='Longitude', y = 'Latitude') +
coord_cartesian(xlim = c(160, 250), ylim = c(-75, -55))+
theme_gray()
b
a/b
|
/analysis/code/09_misc_plots/check_density_profiles.R
|
no_license
|
xingcheg/argofda
|
R
| false | false | 8,146 |
r
|
# check argo profiles
load('analysis/data/jan_march_data.RData')
profDensAggr <- list()
for (i in 1:length(profLatAggr)) {
temp <- profTempAggr[[i]][[1]]
psal <- profPsalAggr[[i]][[1]]
pressure <- profPsalAggr[[i]][[1]]
profDensAggr[[i]] <- gsw::gsw_pot_rho_t_exact(psal, temp, pressure, p_ref = 0)
if (i %% 2000 == 0) {
print(i)
}
}
library(dplyr)
mean_decreasing <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
lagged_pressure <- lag(pres)
weights <- pres - lagged_pressure
sum(weights * ((dens - lagged) >= 0), na.rm = T)/sum(weights, na.rm = T)
})
mean_decreasing_550 <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
#lagged_pressure <- lag(pres)
#weights <- pres - lagged_pressure
#findInterval(550, pres, all.inside = T)
(dens - lagged)[findInterval(550, pres, all.inside = T)+1] >= 0
})
mean_decreasing_550_val <- sapply(1:length(profDensAggr), function(x) {
dens <- profDensAggr[[x]][,1]
pres <- profPresAggr[[x]][[1]]
lagged <- lag(dens)
lagged_pressure <- lag(pres)
#weights <- pres - lagged_pressure
#findInterval(550, pres, all.inside = T)
interv <- findInterval(550, pres, all.inside = T)+1
(dens[interv] - lagged[interv])/(pres[interv] - lagged_pressure[interv])
})
load('analysis/data/RG_Defined_mask.RData')
df <- data.frame(profLongAggr, profLatAggr, mean_decreasing,mean_decreasing_550,mean_decreasing_550_val,
profYearAggr, profMonthAggr)
df$long_grid <- round(ifelse(df$profLongAggr > 180,df$profLongAggr - 360,
df$profLongAggr)+ .5)- .5
df$lat_grid <- round(df$profLatAggr + .5)- .5
df_comb <- inner_join(df, RG_defined_long, by = c('long_grid' = 'long',
'lat_grid' = 'lat'))
df_comb <- df_comb[df_comb$value > 1999,]
df_comb <- df_comb[!is.na(df_comb$value),]
library(ggplot2)
theme_set(theme_bw() + theme(panel.grid = element_blank(), text = element_text(size = 15)))
ggplot()+
geom_point(data = df_comb,
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white', name = 'Proportion',high = 'red',
midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
labs(x='Longitude', y = 'Latitude') +
theme_gray()+ theme(panel.grid = element_blank(), text = element_text(size = 15))
ggsave('analysis/images/misc/dens_monotone.png',
height = 4, width = 7.25)
ggplot()+
geom_point(data = df_comb[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2,],
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing_550),
size = .01)+
scale_color_discrete()+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
labs(x='Longitude', y = 'Latitude') +
theme_gray()+ theme(panel.grid = element_blank(), text = element_text(size = 15))
library(viridis)
ggplot()+
geom_point(data = df_comb[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2,],
aes(x = ifelse(profLongAggr >360, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing_550_val),
size = .2)+
# scale_color_gradientn(colors = rev(viridis_pal()(50)),values = scales::rescale(c(-.0005, 0, .001, .003, .006, .008)),
# limits = quantile(df_comb$mean_decreasing_550_val[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2],
# probs = c(.01, .99)))+
scale_color_gradient2(limits = quantile(df_comb$mean_decreasing_550_val[df_comb$profYearAggr == 2015 & df_comb$profMonthAggr==2],
probs = c(.0001, .995)),
low = 'blue', mid = 'white', high = 'red')+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
#g + xlab( expression(Value~is~sigma~R^{2}==0.6))
# labs(x='Longitude', y = 'Latitude',
# color = expression(Density'\n'~is)) +
labs(x='Longitude', y = 'Latitude',
#color = expression(paste('Density\nGradient\n', 'a (', kg, '/',m^{3},'/p)'))) +
color =expression(atop("Density Gradient",
"(kg/"*m^{3}*"/p)"))) +
theme_gray()+ theme(legend.title = element_text(size = 12),
panel.grid = element_blank(), text = element_text(size = 15))
ggsave('analysis/images/misc/dens_monotone_550_profiles.png',
height = 4, width = 7.25)
b <- ggplot()+
geom_polygon(data= map_data('world'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
geom_point(data = df_comb,
aes(x = ifelse(profLongAggr > 180, profLongAggr - 360, profLongAggr) ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white',
name = 'Prop Monotone',high = 'red', midpoint = .6,
limits = c(.18, 1))+
labs(x='Longitude', y = 'Latitude') +
coord_cartesian(xlim = c(-100, 20), ylim = c(0, 60))+
#facet_wrap(~profYearAggr, ncol = 3) +
theme_gray()
ggsave('analysis/images/misc/dens_monotone_atl.png',
height = 4, width = 7.25)
load('analysis/results/density_check_pred.RData')
avg_prop_nonnegative <- sapply(density_check, function(x) x[[1]])
library(ggplot2)
df_preds_use_summary <- data.frame(df_preds_use, avg_prop_nonnegative) %>%
group_by(long, lat) %>%
summarise(dens = mean(avg_prop_nonnegative, na.rm = T))
a <- ggplot(data = df_preds_use_summary %>%
inner_join( RG_defined_long) %>% filter(value > 1999),
aes(x =ifelse(long < 0, long + 360, long), y = lat, fill = dens))+
geom_raster()+
scale_fill_gradient2(low = 'blue', mid = 'white', high = 'red',midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data = map_data('world2'), aes(x =long, y = lat, group = group),
fill = 'white', color = 'black', size = .2)+
#coord_cartesian(xlim = c(-100, 20), ylim = c(0, 60))+
labs(x = 'Longitude', y = 'Latitude', fill = 'Proportion')
a
ggsave('analysis/images/misc/dens_prop_ours.png',
height = 4, width = 7.25)
library(patchwork)
a / b
a <- ggplot(data = df_preds_use_summary #%>%
# inner_join( RG_defined_long) %>% filter(value > 1999)
,
aes(x =ifelse(long < 0, long + 360, long), y = lat, fill = dens))+
geom_raster()+
scale_fill_gradient2(low = 'blue', mid = 'white', high = 'red',midpoint = .6,
limits = c(.18, 1))+
geom_polygon(data = map_data('world2'), aes(x =long, y = lat, group = group),
fill = 'white', color = 'black', size = .2)+
coord_cartesian(xlim = c(160, 250), ylim = c(-75, -55))+
labs(x = 'Longitude', y = 'Latitude', fill = 'Proportion',
title = 'Proportion of Pressure Dimension with nonnegative derivative',
subtitle = 'February Predictions, reference pressure 0 dbar')
a
b <- ggplot()+
geom_polygon(data= map_data('world2'), aes(x = long, y = lat, group = group),
fill = 'white', color = 'black', size = .2) +
geom_point(data = df,
aes(x = profLongAggr ,
y = profLatAggr, color = mean_decreasing),
size = .1)+
scale_color_gradient2(low = 'blue', mid = 'white',
name = 'Prop Monotone',high = 'red', midpoint = .6,
limits = c(.18, 1))+
labs(x='Longitude', y = 'Latitude') +
coord_cartesian(xlim = c(160, 250), ylim = c(-75, -55))+
theme_gray()
b
a/b
|
library(pMineR)
### Name: confCheck_easy
### Title: A simple conformance checking class
### Aliases: confCheck_easy
### ** Examples
## Not run:
##D
##D # Create a Conformance Checker obj
##D obj.cc <- confCheck_easy()
##D
##D # Load an XML with the workflow to check
##D obj.cc$loadWorkFlow( WF.fileName='../otherFiles/import_01/rules.v2.xml' )
##D
##D # plot the graph related to the XML
##D obj.cc$plot()
##D
##D # now play 20 processes, 10 correct and 10 mismatchful
##D # (matching and not matching can be seen thanks to the 'valido' column)
##D aaa <- obj.cc$play(number.of.cases = 20,min.num.of.valid.words = 10)
##D
##D # Build a dataLoaderObject
##D objDL <- dataLoader()
##D
##D # load the previously genearated data.frame
##D objDL$load.data.frame(mydata = aaa$valid.data.frame,IDName = "patID",
##D EVENTName = "event",dateColumnName = "date")
##D
##D # now load the data into the obj
##D obj.cc$loadDataset(dataList = objDL$getData())
##D # replay the loaded data
##D obj.cc$replay()
##D
##D # plot the result, showing the terminations in absolute values
##D obj.cc$plot.replay.result(whatToCount = "terminations",
##D kindOfNumber = "absolute")
##D # plot the result, showing the transitions in relative values
##D obj.cc$plot.replay.result(whatToCount = "activations",
##D kindOfNumber = "relative")
##D
##D # get the XML of the replay
##D xmlText <- obj.cc$get.XML.replay.result()
##D # or the same data in form of list
##D list.result <- obj.cc$get.list.replay.result()
##D
##D # plot the timeline of the first patient
##D # and the timeline computed during the re-play
##D obj.cc$plotPatientEventTimeLine(patientID = "1")
##D obj.cc$plotPatientReplayedTimeline(patientID = "1")
##D
## End(Not run)
|
/data/genthat_extracted_code/pMineR/examples/confCheck_easy.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,736 |
r
|
library(pMineR)
### Name: confCheck_easy
### Title: A simple conformance checking class
### Aliases: confCheck_easy
### ** Examples
## Not run:
##D
##D # Create a Conformance Checker obj
##D obj.cc <- confCheck_easy()
##D
##D # Load an XML with the workflow to check
##D obj.cc$loadWorkFlow( WF.fileName='../otherFiles/import_01/rules.v2.xml' )
##D
##D # plot the graph related to the XML
##D obj.cc$plot()
##D
##D # now play 20 processes, 10 correct and 10 mismatchful
##D # (matching and not matching can be seen thanks to the 'valido' column)
##D aaa <- obj.cc$play(number.of.cases = 20,min.num.of.valid.words = 10)
##D
##D # Build a dataLoaderObject
##D objDL <- dataLoader()
##D
##D # load the previously genearated data.frame
##D objDL$load.data.frame(mydata = aaa$valid.data.frame,IDName = "patID",
##D EVENTName = "event",dateColumnName = "date")
##D
##D # now load the data into the obj
##D obj.cc$loadDataset(dataList = objDL$getData())
##D # replay the loaded data
##D obj.cc$replay()
##D
##D # plot the result, showing the terminations in absolute values
##D obj.cc$plot.replay.result(whatToCount = "terminations",
##D kindOfNumber = "absolute")
##D # plot the result, showing the transitions in relative values
##D obj.cc$plot.replay.result(whatToCount = "activations",
##D kindOfNumber = "relative")
##D
##D # get the XML of the replay
##D xmlText <- obj.cc$get.XML.replay.result()
##D # or the same data in form of list
##D list.result <- obj.cc$get.list.replay.result()
##D
##D # plot the timeline of the first patient
##D # and the timeline computed during the re-play
##D obj.cc$plotPatientEventTimeLine(patientID = "1")
##D obj.cc$plotPatientReplayedTimeline(patientID = "1")
##D
## End(Not run)
|
suppressMessages(library(LauraeCE))
suppressMessages(library(parallel))
# Since 2017/12/23, the strategy to generate discrete data has changed
# Therefore, matching results with the old CEoptim is not possible anymore when using discrete data.
# Continuous Testing
fun <- function(x){
return(3 * (1 - x[1]) ^ 2 * exp(-x[1] ^ 2 - (x[2] + 1) ^ 2) - 10 * (x[1] / 5 - x[1] ^ 3 - x[2] ^ 5) * exp(-x[1] ^ 2 - x[2] ^ 2) - 1 / 3 * exp(-(x[1] + 1) ^ 2 - x[2] ^ 2))
}
mu0 <- c(-3, -3)
sigma0 <- c(10, 10)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE)
})
system.time({
set.seed(11111)
res2 <- CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimum, res2$optimum)
all.equal(res1$optimum, res3$optimum)
# Discrete Testing
data(lesmis)
fmaxcut <- function(x,costs) {
v1 <- which(x == 1)
v2 <- which(x == 0)
return(sum(costs[v1, v2]))
}
p0 <- list()
for (i in 1:77) {
p0 <- c(p0, list(rep(0.5, 2)))
}
p0[[1]] <- c(0, 1)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L)
})
system.time({
set.seed(11111)
res2 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimizer$discrete, res2$optimizer$discrete)
all.equal(res1$optimizer$discrete, res3$optimizer$discrete)
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L,
max_time = 15,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimizer$discrete, res3$optimizer$discrete)
# Mixed Input (Continuous + Discrete) Testing
sumsqrs <- function(theta, rm1, x) {
N <- length(x) #without x[0]
r <- 1 + sort(rm1) # internal end points of regimes
if (r[1] == r[2]) { # test for invalid regime
return(Inf);
}
thetas <- rep(theta, times = c(r, N) - c(1, r + 1) + 1)
xhat <- c(0, head(x, -1)) * thetas
# Compute sum of squared errors
sum((x - xhat) ^ 2)
}
data(yt)
xt <- yt - c(0, yt[-300])
A <- rbind(diag(3), -diag(3))
b <- rep(1, 6)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE)
})
system.time({
set.seed(11111)
res2 <- CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimum, res2$optimum)
all.equal(res1$optimum, res3$optimum)
|
/.break_me.R
|
no_license
|
Laurae2/LauraeCE
|
R
| false | false | 5,407 |
r
|
suppressMessages(library(LauraeCE))
suppressMessages(library(parallel))
# Since 2017/12/23, the strategy to generate discrete data has changed
# Therefore, matching results with the old CEoptim is not possible anymore when using discrete data.
# Continuous Testing
fun <- function(x){
return(3 * (1 - x[1]) ^ 2 * exp(-x[1] ^ 2 - (x[2] + 1) ^ 2) - 10 * (x[1] / 5 - x[1] ^ 3 - x[2] ^ 5) * exp(-x[1] ^ 2 - x[2] ^ 2) - 1 / 3 * exp(-(x[1] + 1) ^ 2 - x[2] ^ 2))
}
mu0 <- c(-3, -3)
sigma0 <- c(10, 10)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE)
})
system.time({
set.seed(11111)
res2 <- CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fun,
continuous = list(mean = mu0,
sd = sigma0),
maximize = TRUE,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimum, res2$optimum)
all.equal(res1$optimum, res3$optimum)
# Discrete Testing
data(lesmis)
fmaxcut <- function(x,costs) {
v1 <- which(x == 1)
v2 <- which(x == 0)
return(sum(costs[v1, v2]))
}
p0 <- list()
for (i in 1:77) {
p0 <- c(p0, list(rep(0.5, 2)))
}
p0[[1]] <- c(0, 1)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L)
})
system.time({
set.seed(11111)
res2 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimizer$discrete, res2$optimizer$discrete)
all.equal(res1$optimizer$discrete, res3$optimizer$discrete)
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(fmaxcut,
f.arg = list(costs = lesmis),
maximize = TRUE,
verbose = TRUE,
discrete = list(probs = p0),
N = 3000L,
max_time = 15,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimizer$discrete, res3$optimizer$discrete)
# Mixed Input (Continuous + Discrete) Testing
sumsqrs <- function(theta, rm1, x) {
N <- length(x) #without x[0]
r <- 1 + sort(rm1) # internal end points of regimes
if (r[1] == r[2]) { # test for invalid regime
return(Inf);
}
thetas <- rep(theta, times = c(r, N) - c(1, r + 1) + 1)
xhat <- c(0, head(x, -1)) * thetas
# Compute sum of squared errors
sum((x - xhat) ^ 2)
}
data(yt)
xt <- yt - c(0, yt[-300])
A <- rbind(diag(3), -diag(3))
b <- rep(1, 6)
system.time({
set.seed(11111)
res1 <- CEoptim::CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE)
})
system.time({
set.seed(11111)
res2 <- CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE)
})
cl <- makeCluster(2)
system.time({
set.seed(11111)
res3 <- CEoptim(f = sumsqrs,
f.arg = list(xt),
continuous = list(mean = c(0, 0,0),
sd = rep(1, 0,3),
conMat = A,
conVec = b),
discrete = list(categories = c(298L, 298L),
smoothProb = 0.5),
N = 10000,
rho = 0.001,
verbose = TRUE,
parallelize = TRUE,
cl = cl)
})
stopCluster(cl)
closeAllConnections()
all.equal(res1$optimum, res2$optimum)
all.equal(res1$optimum, res3$optimum)
|
context("read_gff")
read_gff <- function(file) {
spec(file, "gff", "gff3") %>%
infr_skip() %>%
do_read()
}
test_that("output is as expected when reading string", {
intake <- "3R\treg\tbind_site\t46748\t48137\t0.499\t.\t.\tID=enr_reg_1\n"
output <- read_gff(intake)
expect <- dplyr::data_frame(seqid = "3R",
source = "reg",
type = "bind_site",
start = 46748L,
end = 48137L,
score = 0.499,
strand = ".",
phase = ".",
attributes = "ID=enr_reg_1")
expect_equal(output, expect)
expect_equal(length(output), 9)
expect_equal(nrow(output), 1)
})
test_that("output is as expected when reading file", {
intake <- "one-data-field.gff3.gz"
output <- read_gff(intake)
expect <- dplyr::data_frame(seqid = "3R",
source = "Regions_of_sig_enrichment",
type = "binding_site",
start = 46748L,
end = 48137L,
score = 0.49961892708069,
strand = ".",
phase = ".",
attributes = "ID=enriched_region_1")
expect_equal(output, expect)
expect_equal(length(output), 9)
expect_equal(nrow(output), 1)
})
test_that("empty and incorrect data fields error predictably", {
intake <- "\n"
intake2 <- "no-data-fields.gff3.gz"
expect <- "only unexpected number of fields"
expect_error(read_gff(intake), expect)
expect_error(read_gff(intake2), expect)
})
|
/tests/testthat/test-read-gff.R
|
no_license
|
npjc/readrbio
|
R
| false | false | 1,743 |
r
|
context("read_gff")
read_gff <- function(file) {
spec(file, "gff", "gff3") %>%
infr_skip() %>%
do_read()
}
test_that("output is as expected when reading string", {
intake <- "3R\treg\tbind_site\t46748\t48137\t0.499\t.\t.\tID=enr_reg_1\n"
output <- read_gff(intake)
expect <- dplyr::data_frame(seqid = "3R",
source = "reg",
type = "bind_site",
start = 46748L,
end = 48137L,
score = 0.499,
strand = ".",
phase = ".",
attributes = "ID=enr_reg_1")
expect_equal(output, expect)
expect_equal(length(output), 9)
expect_equal(nrow(output), 1)
})
test_that("output is as expected when reading file", {
intake <- "one-data-field.gff3.gz"
output <- read_gff(intake)
expect <- dplyr::data_frame(seqid = "3R",
source = "Regions_of_sig_enrichment",
type = "binding_site",
start = 46748L,
end = 48137L,
score = 0.49961892708069,
strand = ".",
phase = ".",
attributes = "ID=enriched_region_1")
expect_equal(output, expect)
expect_equal(length(output), 9)
expect_equal(nrow(output), 1)
})
test_that("empty and incorrect data fields error predictably", {
intake <- "\n"
intake2 <- "no-data-fields.gff3.gz"
expect <- "only unexpected number of fields"
expect_error(read_gff(intake), expect)
expect_error(read_gff(intake2), expect)
})
|
##------Code to generate dummy media_spend table------##
##------To be executed after generating user_table------##
## Clear Workspace and load required libraries and files
rm(list=ls())
library(data.table)
load('../cleaned/cohort_size_channel.bin')
load('../output/user_table.bin')
# calculate mean/median revenue to create cac for weekly cohorts by channel accordingly
meanrevchannel <- usertable[,{mean(revenue)}, by = "channel"]
usertable$year <- year(usertable$first_transaction)
medianrevchannel <- usertable[,{median(revenue)}, by = c("channel", "year")]
#generating cac for different years and channels
for(i in 1:length(cohort_size$join_cohort)){
if(year(cohort_size$join_cohort[i]) == 2014){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 20, 22)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 29, 31)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 33, 35)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 36, 38)
}
}
if(year(cohort_size$join_cohort[i]) == 2015){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 15, 17)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 20, 22)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 19, 21)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 25, 27)
}
}
if(year(cohort_size$join_cohort[i]) == 2016){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 14, 16)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 18, 21)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 22, 25)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 20, 23)
}
}
if(year(cohort_size$join_cohort[i]) == 2017){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 12, 15)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 19, 20)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 19, 21)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 32, 34)
}
}
}
#scaling to make media spend ~ 5% of total revenue
usertable[,{sum(revenue)}, by = c("channel", "year")]
#cohort_size$year <- year(cohort_size$join_cohort)
#cohort_size[,sum(media_spend), by = c("channel_name","year")]
cohort_size$cac <- round((cohort_size$cac * 0.219), 2)
mediaspend <- cohort_size
mediaspend$media_spend <- round((cohort_size$cac * cohort_size$cohort_active_users), 1)
save(mediaspend, file = '../output/media_spend.bin')
|
/dummy_mediaspend.R
|
no_license
|
nabilbhatiya/dummyuser
|
R
| false | false | 3,360 |
r
|
##------Code to generate dummy media_spend table------##
##------To be executed after generating user_table------##
## Clear Workspace and load required libraries and files
rm(list=ls())
library(data.table)
load('../cleaned/cohort_size_channel.bin')
load('../output/user_table.bin')
# calculate mean/median revenue to create cac for weekly cohorts by channel accordingly
meanrevchannel <- usertable[,{mean(revenue)}, by = "channel"]
usertable$year <- year(usertable$first_transaction)
medianrevchannel <- usertable[,{median(revenue)}, by = c("channel", "year")]
#generating cac for different years and channels
for(i in 1:length(cohort_size$join_cohort)){
if(year(cohort_size$join_cohort[i]) == 2014){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 20, 22)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 29, 31)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 33, 35)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 36, 38)
}
}
if(year(cohort_size$join_cohort[i]) == 2015){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 15, 17)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 20, 22)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 19, 21)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 25, 27)
}
}
if(year(cohort_size$join_cohort[i]) == 2016){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 14, 16)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 18, 21)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 22, 25)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 20, 23)
}
}
if(year(cohort_size$join_cohort[i]) == 2017){
if(cohort_size$channel_name[i] == "Facebook"){
cohort_size$cac[i] <- runif(1, 12, 15)
}
if(cohort_size$channel_name[i] == "Organic"){
cohort_size$cac[i] <- runif(1, 0, 0)
}
if(cohort_size$channel_name[i] == "Paid Search"){
cohort_size$cac[i] <- runif(1, 19, 20)
}
if(cohort_size$channel_name[i] == "Pinterest"){
cohort_size$cac[i] <- runif(1, 19, 21)
}
if(cohort_size$channel_name[i] == "Referral"){
cohort_size$cac[i] <- runif(1, 32, 34)
}
}
}
#scaling to make media spend ~ 5% of total revenue
usertable[,{sum(revenue)}, by = c("channel", "year")]
#cohort_size$year <- year(cohort_size$join_cohort)
#cohort_size[,sum(media_spend), by = c("channel_name","year")]
cohort_size$cac <- round((cohort_size$cac * 0.219), 2)
mediaspend <- cohort_size
mediaspend$media_spend <- round((cohort_size$cac * cohort_size$cohort_active_users), 1)
save(mediaspend, file = '../output/media_spend.bin')
|
library(raster)
library(sp)
library(rgeos)
#categoryName <- 'CHU'
#r <- 200
## INIT
city <- shapefile("../data/additional/boundries/bialystok/bialystok.shp")
city <- spTransform(city, CRS("+init=epsg:4326"))
city <- aggregate(city)
crimesPath <- paste('../data/hotspot-grid/bialystokSWD/', categoryName, '.csv', sep = '')
crimes <- read.csv(crimesPath)
crimesDf <- crimes
coordinates(crimes) =~ x+y
projection(crimes) = "+proj=aeqd +lat_0=0 +lon_0=0 +x_0=0 +y_0=0"
#projection(crimes) = projection(city)
poiShape <- shapefile("../data/additional/poi/bialystok/gis.osm_pois_free_1.shp")
source('./scripts/additional/poi/osmUtil.R')
## DENSITY
drawCircleAroundPoint <- function(point, radius) {
point <- data.frame(x = point['x'], y = point['y'], name = 'circle')
coordinates(point) =~ x+y
crs(point) <- aeqdGlobal
stopifnot(length(point) == 1)
aeqd <- sprintf("+proj=aeqd +lat_0=0 +lon_0=0 +x_0=%s +y_0=%s",
point@coords[[2]], point@coords[[1]])
projected <- spTransform(point, CRS(aeqd))
buffered <- gBuffer(projected, width=radius, byid=TRUE)
spTransform(buffered, point@proj4string)
}
pointsDensity <- data.frame(crimes)
pointsDensity <- pointsDensity[, c('x', 'y')]
result <- computeDensity()
filePath <- paste("../data/hotspot-grid/bialystokSWD/poi/", r, "/", categoryName, "_poi_dens_", r, ".csv", sep = '')
write.csv(result, file = filePath)
|
/scripts/additional/poi/hotspot-grid/bialystokSWDHotspotPOIDens.R
|
no_license
|
kontrabanda/mgr-2
|
R
| false | false | 1,389 |
r
|
library(raster)
library(sp)
library(rgeos)
#categoryName <- 'CHU'
#r <- 200
## INIT
city <- shapefile("../data/additional/boundries/bialystok/bialystok.shp")
city <- spTransform(city, CRS("+init=epsg:4326"))
city <- aggregate(city)
crimesPath <- paste('../data/hotspot-grid/bialystokSWD/', categoryName, '.csv', sep = '')
crimes <- read.csv(crimesPath)
crimesDf <- crimes
coordinates(crimes) =~ x+y
projection(crimes) = "+proj=aeqd +lat_0=0 +lon_0=0 +x_0=0 +y_0=0"
#projection(crimes) = projection(city)
poiShape <- shapefile("../data/additional/poi/bialystok/gis.osm_pois_free_1.shp")
source('./scripts/additional/poi/osmUtil.R')
## DENSITY
drawCircleAroundPoint <- function(point, radius) {
point <- data.frame(x = point['x'], y = point['y'], name = 'circle')
coordinates(point) =~ x+y
crs(point) <- aeqdGlobal
stopifnot(length(point) == 1)
aeqd <- sprintf("+proj=aeqd +lat_0=0 +lon_0=0 +x_0=%s +y_0=%s",
point@coords[[2]], point@coords[[1]])
projected <- spTransform(point, CRS(aeqd))
buffered <- gBuffer(projected, width=radius, byid=TRUE)
spTransform(buffered, point@proj4string)
}
pointsDensity <- data.frame(crimes)
pointsDensity <- pointsDensity[, c('x', 'y')]
result <- computeDensity()
filePath <- paste("../data/hotspot-grid/bialystokSWD/poi/", r, "/", categoryName, "_poi_dens_", r, ".csv", sep = '')
write.csv(result, file = filePath)
|
## The following two functions will allow for the inverse of a matrix to be
## calculated and stored in cache.
## The following function will create a list
# First element will set the matrix
# Second will get the matrix
# Third will set the inverse
# Fourth will get the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## The following function will find the inverse of the matrix that was set
## above. First it will check to see if the inverse was found. If not, it
## will find the inverse and the set it in the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/ProgrammingAssignment2/cachematrix.R
|
no_license
|
dssievewright/ProgrammingAssignment2
|
R
| false | false | 1,118 |
r
|
## The following two functions will allow for the inverse of a matrix to be
## calculated and stored in cache.
## The following function will create a list
# First element will set the matrix
# Second will get the matrix
# Third will set the inverse
# Fourth will get the inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## The following function will find the inverse of the matrix that was set
## above. First it will check to see if the inverse was found. If not, it
## will find the inverse and the set it in the cache.
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
library(data.table)
input <- data.table::fread("Day 1/input.csv"
, header = FALSE)
# Part 1
param <- input[
,
j = .(value = V1,
remain = 2020 - V1)
]
param[, check := ifelse(remain %in% value, TRUE, FALSE)]
solution <- param[check == TRUE][1, solution := value * remain]
# Part 2
values <- as.vector(input$V1)
differences1 <- as.matrix(outer(values, values, `+`))
differences2 <- 2020 - differences1
matches <- which(matrix(differences2 %in% values, dim(differences2)), arr.ind = TRUE)
pot_match1 <- as.numeric(matches[1, 1])
pot_match2 <- as.numeric(matches[2, 1])
pot_match3 <- as.numeric(matches[3, 1])
match_value1 <- values[pot_match1]
match_value2 <- values[pot_match2]
match_value3 <- values[pot_match3]
solution <- match_value1*match_value2*match_value3
|
/Day 1/solution.R
|
permissive
|
tomasokal/adventofcode2020
|
R
| false | false | 816 |
r
|
library(data.table)
input <- data.table::fread("Day 1/input.csv"
, header = FALSE)
# Part 1
param <- input[
,
j = .(value = V1,
remain = 2020 - V1)
]
param[, check := ifelse(remain %in% value, TRUE, FALSE)]
solution <- param[check == TRUE][1, solution := value * remain]
# Part 2
values <- as.vector(input$V1)
differences1 <- as.matrix(outer(values, values, `+`))
differences2 <- 2020 - differences1
matches <- which(matrix(differences2 %in% values, dim(differences2)), arr.ind = TRUE)
pot_match1 <- as.numeric(matches[1, 1])
pot_match2 <- as.numeric(matches[2, 1])
pot_match3 <- as.numeric(matches[3, 1])
match_value1 <- values[pot_match1]
match_value2 <- values[pot_match2]
match_value3 <- values[pot_match3]
solution <- match_value1*match_value2*match_value3
|
testlist <- list(lims = structure(1.18891957015238e-319, .Dim = c(1L, 1L)), points = structure(-2.75946511594154e-48, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988275-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 207 |
r
|
testlist <- list(lims = structure(1.18891957015238e-319, .Dim = c(1L, 1L)), points = structure(-2.75946511594154e-48, .Dim = c(1L, 1L )))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
#reading the dataset
library(ggplot2)
library(readr)
library(dplyr)
myData <- read.csv("C:/Users/amal_/Documents/MLDM1/DataMining/Suicide_India_2001_2012.csv" ,
header = TRUE, sep = ";",
quote = "\"'", dec = ".")
Data1 <- myData[(myData$Typecode == 'Causes'),]
Data2 <- myData[(myData$Typecode == 'Means_adopted'),]
Data3 <- myData[(myData$Typecode == 'Professional_Profile'),]
Data4 <- myData[(myData$Typecode == 'Education_Status'),]
Data5 <- myData[(myData$Typecode == 'Social_Status'),]
#our interest is Data 3 with observations of suicides with the professional profile information about victims
#Data3<- Data3[!(Data3$Total == 0),]
Data3<- Data3[!(Data3$Age_group =='0-100+'),]
Data3['0-14']= 0
Data3['15-29']= 0
Data3['30-44']= 0
Data3['45-59']=0
Data3['60+']= 0
Data3[Data3[,6] == "0-14",8]=1
Data3[Data3[,6] == "15-29",9]=1
Data3[Data3[,6] == "30-44",10]=1
Data3[Data3[,6] == "45-59",11]=1
Data3[Data3[,6] == "60+",12]=1
###########################################################
Data3['Retired']= 0
Data3[Data3[,4] == "Retired Person",13]=1
Data3['Unemployed']= 0
Data3[Data3[,4] == "Unemployed",14]=1
Data3['Undertaking']= 0
Data3[Data3[,4] == "Public Sector Undertaking",15]=1
Data3['Private']= 0
Data3[Data3[,4] == "Service (Private)",16]=1
Data3['Housewife']= 0
Data3[Data3[,4] == "House wife",17]=1
Data3['Selfemployed']= 0
Data3[Data3[,4] == "Self-employed (Business activity)",18]=1
Data3['Professionalactivity']= 0
Data3[Data3[,4] == " Professional Activity",19]=1
Data3['Student']= 0
Data3[Data3[,4] == "Student",20]=1
Data3['Other']= 0
Data3[Data3[,4] == "Others (Please Specify)",21]=1
Data3['Farming']= 0
Data3[Data3[,4] == "Farming/Agriculture Activity",22]=1
Data3['Governmentservice']= 0
Data3[Data3[,4] == "Service (Government)",23]=1
###########################################################
#delete column Gender and replace it by 2 additional columns female and male (0 fale 1 true )
###########################################################
Data3['Female']= 0
Data3[Data3[,5] == "Female",24]=1
Data3['Male']= 0
Data3[Data3[,5] == "Male",25]=1
#we delete column age group
###########################################################
Data3$Age_group <- NULL
Data3$Year <- NULL
Data3$Typecode <- NULL
Data3$Type <- NULL
Data3$Gender <- NULL
Data3$State <- NULL
###########################################################
#creating target from total (0 or !0 ) to predict chance of suicide to
#a specific profile
#our interest is Data 3 with observations of suicides with the professional profile information about victims
# Random sampling
samplesize = 0.60 * nrow(Data3)
set.seed(80)
index = sample( seq_len ( nrow ( Data3 ) ), size = samplesize )
#scaling
maxs <- apply(Data3, 2, max)
mins <- apply(Data3, 2, min)
scaled <- as.data.frame(scale(Data3, scale =maxs-mins))
scaled <- scaled[colSums(!is.na(scaled)) > 0]
# Create training and test set
train = scaled[ index, ]
test = scaled[ -index, ]
y=train[,'Total']>0
scaled$Total <- NULL
library(e1071)
library(rpart)
# svm
#svm.model <- svm(y ~ ., data = train, gamma = 1)
#pred <- predict(model, test[,-10])
model <- svm(y~ .,train)
pred <- predict(model, test)
points(myData$Age_group,pred,col="red",pch=16)
##########################################################################
#association rules
######################################################################
#???decretize data
myData$Total <- NULL
myData[,2]<-discretize(myData[,2])
myData[,7]<-discretize(myData[,7])
#####################################################################
library(arules)
# find association rules with default settings
rules <- apriori(myData, parameter=list(support=0.05,confidence=0.5))
inspect(rules)
#appearance = list(rhs=c("Gender=Female", "Gender=Male"))
|
/Documents/MLDM1/DataMining/DMproject/svm.R
|
no_license
|
amalamellal/DataMining
|
R
| false | false | 3,832 |
r
|
#reading the dataset
library(ggplot2)
library(readr)
library(dplyr)
myData <- read.csv("C:/Users/amal_/Documents/MLDM1/DataMining/Suicide_India_2001_2012.csv" ,
header = TRUE, sep = ";",
quote = "\"'", dec = ".")
Data1 <- myData[(myData$Typecode == 'Causes'),]
Data2 <- myData[(myData$Typecode == 'Means_adopted'),]
Data3 <- myData[(myData$Typecode == 'Professional_Profile'),]
Data4 <- myData[(myData$Typecode == 'Education_Status'),]
Data5 <- myData[(myData$Typecode == 'Social_Status'),]
#our interest is Data 3 with observations of suicides with the professional profile information about victims
#Data3<- Data3[!(Data3$Total == 0),]
Data3<- Data3[!(Data3$Age_group =='0-100+'),]
Data3['0-14']= 0
Data3['15-29']= 0
Data3['30-44']= 0
Data3['45-59']=0
Data3['60+']= 0
Data3[Data3[,6] == "0-14",8]=1
Data3[Data3[,6] == "15-29",9]=1
Data3[Data3[,6] == "30-44",10]=1
Data3[Data3[,6] == "45-59",11]=1
Data3[Data3[,6] == "60+",12]=1
###########################################################
Data3['Retired']= 0
Data3[Data3[,4] == "Retired Person",13]=1
Data3['Unemployed']= 0
Data3[Data3[,4] == "Unemployed",14]=1
Data3['Undertaking']= 0
Data3[Data3[,4] == "Public Sector Undertaking",15]=1
Data3['Private']= 0
Data3[Data3[,4] == "Service (Private)",16]=1
Data3['Housewife']= 0
Data3[Data3[,4] == "House wife",17]=1
Data3['Selfemployed']= 0
Data3[Data3[,4] == "Self-employed (Business activity)",18]=1
Data3['Professionalactivity']= 0
Data3[Data3[,4] == " Professional Activity",19]=1
Data3['Student']= 0
Data3[Data3[,4] == "Student",20]=1
Data3['Other']= 0
Data3[Data3[,4] == "Others (Please Specify)",21]=1
Data3['Farming']= 0
Data3[Data3[,4] == "Farming/Agriculture Activity",22]=1
Data3['Governmentservice']= 0
Data3[Data3[,4] == "Service (Government)",23]=1
###########################################################
#delete column Gender and replace it by 2 additional columns female and male (0 fale 1 true )
###########################################################
Data3['Female']= 0
Data3[Data3[,5] == "Female",24]=1
Data3['Male']= 0
Data3[Data3[,5] == "Male",25]=1
#we delete column age group
###########################################################
Data3$Age_group <- NULL
Data3$Year <- NULL
Data3$Typecode <- NULL
Data3$Type <- NULL
Data3$Gender <- NULL
Data3$State <- NULL
###########################################################
#creating target from total (0 or !0 ) to predict chance of suicide to
#a specific profile
#our interest is Data 3 with observations of suicides with the professional profile information about victims
# Random sampling
samplesize = 0.60 * nrow(Data3)
set.seed(80)
index = sample( seq_len ( nrow ( Data3 ) ), size = samplesize )
#scaling
maxs <- apply(Data3, 2, max)
mins <- apply(Data3, 2, min)
scaled <- as.data.frame(scale(Data3, scale =maxs-mins))
scaled <- scaled[colSums(!is.na(scaled)) > 0]
# Create training and test set
train = scaled[ index, ]
test = scaled[ -index, ]
y=train[,'Total']>0
scaled$Total <- NULL
library(e1071)
library(rpart)
# svm
#svm.model <- svm(y ~ ., data = train, gamma = 1)
#pred <- predict(model, test[,-10])
model <- svm(y~ .,train)
pred <- predict(model, test)
points(myData$Age_group,pred,col="red",pch=16)
##########################################################################
#association rules
######################################################################
#???decretize data
myData$Total <- NULL
myData[,2]<-discretize(myData[,2])
myData[,7]<-discretize(myData[,7])
#####################################################################
library(arules)
# find association rules with default settings
rules <- apriori(myData, parameter=list(support=0.05,confidence=0.5))
inspect(rules)
#appearance = list(rhs=c("Gender=Female", "Gender=Male"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/posteriors.R
\name{Posterior.rjags}
\alias{Posterior.rjags}
\title{Returns samples from the posterior distributions of each model parameter using JAGS.}
\usage{
Posterior.rjags(tox, notox, sdose, ff, prior.alpha, burnin.itr, production.itr)
}
\arguments{
\item{tox}{A vector of length \code{k} showing the number of patient who had toxicities at each dose level}
\item{notox}{A vector of length \code{k} showing the number of patients who did not have toxicities at each dose level}
\item{sdose}{A vector of length \code{k} listing the standardised doses to
be used in the CRM model.}
\item{ff}{A string indicating the functional form of the dose-response
curve. Options are \describe{ \item{ht}{ 1-parameter hyperbolic tangent}
\item{logit1}{ 1-parameter logistic} \item{power}{ 1-parameter power}
\item{logit2}{ 2-parameter logistic} }}
\item{prior.alpha}{A list of length 3 containing the distributional
information for the prior. The first element is a number from 1-4 specifying
the type of distribution. Options are \enumerate{ \item Gamma(a, b), where
a=shape, b=scale: mean=a*b, variance=a*b*b \item Uniform(a, b), where a=min,
b=max \item Lognormal(a, b), where a=mean on the log scale, b=variance on the
log scale \item Bivariate Lognormal(a, b), where a=mean vector on the log
scale, b=Variance-covariance matrix on the log scale. This prior should be
used only in conjunction with a two-parameter logistic model. } The second
and third elements of the list are the parameters a and b, respectively.}
\item{burnin.itr}{Number of burn-in iterations (default 2000).}
\item{production.itr}{Number of production iterations (default 2000).}
}
\description{
If \code{ff = "logit2"} (i.e. a two-parameter logistic model is used), a matrix of dimensions
\code{production.itr}-by-2 is returned (the first and second columns containing the posterior samples for the
intercept and slope parameters respectively). Otherwise, a vector of length \code{production.itr}
is returned.
}
\examples{
## Dose-escalation cancer trial example as described in Neuenschwander et al 2008.
## Pre-defined doses
dose <- c(1, 2.5, 5, 10, 15, 20, 25, 30, 40, 50, 75, 100, 150, 200, 250)
## Pre-specified probabilities of toxicity
## [dose levels 11-15 not specified in the paper, and are for illustration only]
p.tox0 <- c(0.010, 0.015, 0.020, 0.025, 0.030, 0.040, 0.050,
0.100, 0.170, 0.300, 0.400, 0.500, 0.650, 0.800, 0.900)
## Data from the first 5 cohorts of 18 patients
tox <- c(0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0)
notox <- c(3, 4, 5, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
## Target toxicity level
target.tox <- 0.30
## Prior distribution for the MTD given a lognormal(0, 1.34^2) distribution for alpha
## and a power model functional form
prior.alpha <- list(3, 0, 1.34^2)
ff <- "power"
samples.alpha <- getprior(prior.alpha, 2000)
mtd <- find.x(ff, target.tox, alpha=samples.alpha)
hist(mtd)
## Standardised doses
sdose <- find.x(ff, p.tox0, alpha=1)
## Posterior distribution of the MTD (on standardised dose scale) using data
## from the cancer trial described in Neuenschwander et al 2008.
## Using rjags
\dontrun{
posterior.samples <- Posterior.rjags(tox, notox, sdose, ff, prior.alpha
, burnin.itr=2000, production.itr=2000)
}
}
\references{
Sweeting M., Mander A., Sabin T. \pkg{bcrm}: Bayesian Continual
Reassessment Method Designs for Phase I Dose-Finding Trials. \emph{Journal
of Statistical Software} (2013) 54: 1--26.
\doi{10.18637/jss.v054.i13}
}
\seealso{
\code{\link{bcrm}}, \code{\link{find.x}}
}
\author{
Michael Sweeting \email{mjs212@medschl.cam.ac.uk} (University of
Cambridge, UK), drawing on code originally developed by J. Jack Lee and Nan
Chen, Department of Biostatistics, the University of Texas M. D. Anderson
Cancer Center
}
|
/man/Posterior.rjags.Rd
|
no_license
|
mikesweeting/bcrm
|
R
| false | true | 3,863 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/posteriors.R
\name{Posterior.rjags}
\alias{Posterior.rjags}
\title{Returns samples from the posterior distributions of each model parameter using JAGS.}
\usage{
Posterior.rjags(tox, notox, sdose, ff, prior.alpha, burnin.itr, production.itr)
}
\arguments{
\item{tox}{A vector of length \code{k} showing the number of patient who had toxicities at each dose level}
\item{notox}{A vector of length \code{k} showing the number of patients who did not have toxicities at each dose level}
\item{sdose}{A vector of length \code{k} listing the standardised doses to
be used in the CRM model.}
\item{ff}{A string indicating the functional form of the dose-response
curve. Options are \describe{ \item{ht}{ 1-parameter hyperbolic tangent}
\item{logit1}{ 1-parameter logistic} \item{power}{ 1-parameter power}
\item{logit2}{ 2-parameter logistic} }}
\item{prior.alpha}{A list of length 3 containing the distributional
information for the prior. The first element is a number from 1-4 specifying
the type of distribution. Options are \enumerate{ \item Gamma(a, b), where
a=shape, b=scale: mean=a*b, variance=a*b*b \item Uniform(a, b), where a=min,
b=max \item Lognormal(a, b), where a=mean on the log scale, b=variance on the
log scale \item Bivariate Lognormal(a, b), where a=mean vector on the log
scale, b=Variance-covariance matrix on the log scale. This prior should be
used only in conjunction with a two-parameter logistic model. } The second
and third elements of the list are the parameters a and b, respectively.}
\item{burnin.itr}{Number of burn-in iterations (default 2000).}
\item{production.itr}{Number of production iterations (default 2000).}
}
\description{
If \code{ff = "logit2"} (i.e. a two-parameter logistic model is used), a matrix of dimensions
\code{production.itr}-by-2 is returned (the first and second columns containing the posterior samples for the
intercept and slope parameters respectively). Otherwise, a vector of length \code{production.itr}
is returned.
}
\examples{
## Dose-escalation cancer trial example as described in Neuenschwander et al 2008.
## Pre-defined doses
dose <- c(1, 2.5, 5, 10, 15, 20, 25, 30, 40, 50, 75, 100, 150, 200, 250)
## Pre-specified probabilities of toxicity
## [dose levels 11-15 not specified in the paper, and are for illustration only]
p.tox0 <- c(0.010, 0.015, 0.020, 0.025, 0.030, 0.040, 0.050,
0.100, 0.170, 0.300, 0.400, 0.500, 0.650, 0.800, 0.900)
## Data from the first 5 cohorts of 18 patients
tox <- c(0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0)
notox <- c(3, 4, 5, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
## Target toxicity level
target.tox <- 0.30
## Prior distribution for the MTD given a lognormal(0, 1.34^2) distribution for alpha
## and a power model functional form
prior.alpha <- list(3, 0, 1.34^2)
ff <- "power"
samples.alpha <- getprior(prior.alpha, 2000)
mtd <- find.x(ff, target.tox, alpha=samples.alpha)
hist(mtd)
## Standardised doses
sdose <- find.x(ff, p.tox0, alpha=1)
## Posterior distribution of the MTD (on standardised dose scale) using data
## from the cancer trial described in Neuenschwander et al 2008.
## Using rjags
\dontrun{
posterior.samples <- Posterior.rjags(tox, notox, sdose, ff, prior.alpha
, burnin.itr=2000, production.itr=2000)
}
}
\references{
Sweeting M., Mander A., Sabin T. \pkg{bcrm}: Bayesian Continual
Reassessment Method Designs for Phase I Dose-Finding Trials. \emph{Journal
of Statistical Software} (2013) 54: 1--26.
\doi{10.18637/jss.v054.i13}
}
\seealso{
\code{\link{bcrm}}, \code{\link{find.x}}
}
\author{
Michael Sweeting \email{mjs212@medschl.cam.ac.uk} (University of
Cambridge, UK), drawing on code originally developed by J. Jack Lee and Nan
Chen, Department of Biostatistics, the University of Texas M. D. Anderson
Cancer Center
}
|
## The following funcitons can compute inverse matrix of invertible matrix.
## Since computing inverse matrix is computationally expensive, the following
## funcitons store inverse matrix computed to reuse it for avoiding computing
## again. Also avoiding error caused by computing empty input, it has default
## value, which is NULL.
## The first function, makeCacheMatrix creates a special "matrix",
## which is really a list containing a function to
## set the matrix
## get the matrix
## set the matrix named inverse
## get the matrix named inverse
## default value of x is NULL
makeCacheMatrix <- function(x = NULL) {
m <- NULL
set <- function(y = NULL) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverse of the special invertible
## "matrix" created with the above function.
## However, it first checks to see if the inverse matrix has already
## been calculated. If so, it gets the inverse matrix from the cache and
## skips the computation. Otherwise, it calculates the inverse of the matrix
## and sets the inverse matrix in the cache via the setinverse function.
## since default value of x is NULL in makeCacheMatrix (), cacheSolve can detect
## matrix is set by set() or not. if matrix is not set yet, it output message.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
if(is.null(data)) {
message("please set invertible matrix")
return(data)
}
m <- solve(data, ...)
x$setinverse(m)
m
}
## example usage.
## xxx <- makeCacheMatrix ()
## cacheSolve(xxx)
## you can get message
## xx <- matrix(c(1,0,0,1),2,2)
## xxx$set(xx)
## cacheSolve(xxx)
## function computes inverse matrix of xxx
## cacheSolve(xxx)
## function read from cache
|
/cachematrix.R
|
no_license
|
yamamoto4423/ProgrammingAssignment2
|
R
| false | false | 2,236 |
r
|
## The following funcitons can compute inverse matrix of invertible matrix.
## Since computing inverse matrix is computationally expensive, the following
## funcitons store inverse matrix computed to reuse it for avoiding computing
## again. Also avoiding error caused by computing empty input, it has default
## value, which is NULL.
## The first function, makeCacheMatrix creates a special "matrix",
## which is really a list containing a function to
## set the matrix
## get the matrix
## set the matrix named inverse
## get the matrix named inverse
## default value of x is NULL
makeCacheMatrix <- function(x = NULL) {
m <- NULL
set <- function(y = NULL) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverse of the special invertible
## "matrix" created with the above function.
## However, it first checks to see if the inverse matrix has already
## been calculated. If so, it gets the inverse matrix from the cache and
## skips the computation. Otherwise, it calculates the inverse of the matrix
## and sets the inverse matrix in the cache via the setinverse function.
## since default value of x is NULL in makeCacheMatrix (), cacheSolve can detect
## matrix is set by set() or not. if matrix is not set yet, it output message.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
if(is.null(data)) {
message("please set invertible matrix")
return(data)
}
m <- solve(data, ...)
x$setinverse(m)
m
}
## example usage.
## xxx <- makeCacheMatrix ()
## cacheSolve(xxx)
## you can get message
## xx <- matrix(c(1,0,0,1),2,2)
## xxx$set(xx)
## cacheSolve(xxx)
## function computes inverse matrix of xxx
## cacheSolve(xxx)
## function read from cache
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneGeneExpressionPlotter.R
\name{setGenes,GeneGeneExpressionPlotter-method}
\alias{setGenes,GeneGeneExpressionPlotter-method}
\alias{setGenes}
\title{specify the pair of genes, typically a TF and a targetGene}
\usage{
\S4method{setGenes}{GeneGeneExpressionPlotter}(obj, gene1, gene2)
}
\arguments{
\item{obj}{An object of class GeneGeneExpressionPlotter}
\item{gene1}{A character string}
\item{gene2}{A character string}
}
\description{
specify the pair of genes, typically a TF and a targetGene
}
|
/man/setGenes.Rd
|
permissive
|
PriceLab/TrenaViz
|
R
| false | true | 579 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneGeneExpressionPlotter.R
\name{setGenes,GeneGeneExpressionPlotter-method}
\alias{setGenes,GeneGeneExpressionPlotter-method}
\alias{setGenes}
\title{specify the pair of genes, typically a TF and a targetGene}
\usage{
\S4method{setGenes}{GeneGeneExpressionPlotter}(obj, gene1, gene2)
}
\arguments{
\item{obj}{An object of class GeneGeneExpressionPlotter}
\item{gene1}{A character string}
\item{gene2}{A character string}
}
\description{
specify the pair of genes, typically a TF and a targetGene
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_logistic_regression.R
\name{mod_logistic_regression_ui}
\alias{mod_logistic_regression_ui}
\alias{mod_logistic_regression_server}
\title{mod_logistic_regression_ui and mod_logistic_regression_server}
\usage{
mod_logistic_regression_ui(id)
mod_logistic_regression_server(input, output, session)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
/man/mod_logistic_regression.Rd
|
permissive
|
PascalCrepey/BiostatsAppsMPH
|
R
| false | true | 539 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_logistic_regression.R
\name{mod_logistic_regression_ui}
\alias{mod_logistic_regression_ui}
\alias{mod_logistic_regression_server}
\title{mod_logistic_regression_ui and mod_logistic_regression_server}
\usage{
mod_logistic_regression_ui(id)
mod_logistic_regression_server(input, output, session)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
}
\description{
A shiny Module.
}
\keyword{internal}
|
library(treeducken)
# test species tree output is a list of trees with correct length
test_that("sim_sptree_bdp produces the right number of trees", {
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 10, 10)), 10)
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 5, 10)), 5)
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 20, 10)), 20)
})
get_number_extant_tips <- function(tr){
tip_vec <- vector(length = length(tr))
for(i in 1:length(tr)){
pruned_tr <- geiger::drop.extinct(tr[[i]], tol = 0.001)
tip_vec[i] <- length(pruned_tr$tip.label)
}
mean(tip_vec)
}
# test that tree has correct extant tips (gsa)
test_that("sim_sptree_bdp produces the right number of extant tips", {
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 10)), 10)
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 5)), 5)
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 20)), 20)
})
# test that species tree produces tree within correct distribution (gsa)
get_treesim_treedepth_dist <- function(sbr, sdr, nt, reps){
trees <- TreeSim::sim.bd.taxa(lambda = sbr, mu = sdr, n = nt, numbsim = reps)
max(phytools::nodeHeights(trees))
}
# test that tree has correct extant tips
test_that("sim_sptree_bdp_time produces the right number of trees", {
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 10, 2.0)), 10)
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 5, 2.0)), 5)
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 20, 2.0)), 20)
})
get_length_tree <- function(tr){
tree_depth <- vector(length = length(tr))
for(i in 1:length(tr)){
tree_depth[i] <- max(phytools::nodeHeights(tr[[i]])) + tr[[i]]$root.edge
}
mean(tree_depth)
}
# test that tree has correct length (simple)
test_that("sim_sptree_bdp_time produces the right length trees", {
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 1.0)), 1.0)
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 2.0)), 2.0)
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 5.0)), 5.0)
})
# test that species tree produces tree within correct distribution (simple)
# test_that("sim_sptree_bdp_time produces trees under the right distribution"){
#
# }
|
/tests/testthat/test_sptree_gsa.R
|
no_license
|
jjustison/rtreeducken
|
R
| false | false | 2,261 |
r
|
library(treeducken)
# test species tree output is a list of trees with correct length
test_that("sim_sptree_bdp produces the right number of trees", {
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 10, 10)), 10)
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 5, 10)), 5)
expect_equal(length(sim_sptree_bdp(1.0, 0.5, 20, 10)), 20)
})
get_number_extant_tips <- function(tr){
tip_vec <- vector(length = length(tr))
for(i in 1:length(tr)){
pruned_tr <- geiger::drop.extinct(tr[[i]], tol = 0.001)
tip_vec[i] <- length(pruned_tr$tip.label)
}
mean(tip_vec)
}
# test that tree has correct extant tips (gsa)
test_that("sim_sptree_bdp produces the right number of extant tips", {
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 10)), 10)
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 5)), 5)
expect_equal(get_number_extant_tips(sim_sptree_bdp(1.0, 0.5, 10, 20)), 20)
})
# test that species tree produces tree within correct distribution (gsa)
get_treesim_treedepth_dist <- function(sbr, sdr, nt, reps){
trees <- TreeSim::sim.bd.taxa(lambda = sbr, mu = sdr, n = nt, numbsim = reps)
max(phytools::nodeHeights(trees))
}
# test that tree has correct extant tips
test_that("sim_sptree_bdp_time produces the right number of trees", {
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 10, 2.0)), 10)
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 5, 2.0)), 5)
expect_equal(length(sim_sptree_bdp_time(1.0, 0.5, 20, 2.0)), 20)
})
get_length_tree <- function(tr){
tree_depth <- vector(length = length(tr))
for(i in 1:length(tr)){
tree_depth[i] <- max(phytools::nodeHeights(tr[[i]])) + tr[[i]]$root.edge
}
mean(tree_depth)
}
# test that tree has correct length (simple)
test_that("sim_sptree_bdp_time produces the right length trees", {
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 1.0)), 1.0)
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 2.0)), 2.0)
expect_equal(get_length_tree(sim_sptree_bdp_time(1.0, 0.5, 10, 5.0)), 5.0)
})
# test that species tree produces tree within correct distribution (simple)
# test_that("sim_sptree_bdp_time produces trees under the right distribution"){
#
# }
|
## This function creates a cache of the matrix.
## and return the list of the operations.
makeCacheMatrix <- function(x = matrix()) {
matrix <- NULL
set <- function(y) {
x <<- y
matrix <<- NULL
}
get <- function() x
setInverse <- function(inv) matrix <<- inv
getInverse <- function() matrix
list(set = set, get = get,
setInverse = setInverse,
setInverse = setInverse)
}
## This function creates a cache of the inverse of the matrix
## if it does not exist.
cacheSolve <- function(x, ...) {
matrix <- x$getInverse()
if(!is.null(matrix)) {
message("getting cached data")
return(matrix)
}
data <- x$get()
matrix <- solve(data, ...)
x$setInverse(matrix)
matrix
}
|
/cachematrix.R
|
no_license
|
PaulNicolasHunter/ProgrammingAssignment2
|
R
| false | false | 690 |
r
|
## This function creates a cache of the matrix.
## and return the list of the operations.
makeCacheMatrix <- function(x = matrix()) {
matrix <- NULL
set <- function(y) {
x <<- y
matrix <<- NULL
}
get <- function() x
setInverse <- function(inv) matrix <<- inv
getInverse <- function() matrix
list(set = set, get = get,
setInverse = setInverse,
setInverse = setInverse)
}
## This function creates a cache of the inverse of the matrix
## if it does not exist.
cacheSolve <- function(x, ...) {
matrix <- x$getInverse()
if(!is.null(matrix)) {
message("getting cached data")
return(matrix)
}
data <- x$get()
matrix <- solve(data, ...)
x$setInverse(matrix)
matrix
}
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes 1 argument, a matrix, and returns a list
## with the functions to get and set the matrix and inverse.
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function()x
setinverse <- function(inverse) i<<-inverse
getinverse <- function() i
list(set=set,get=get,getinverse=getinverse,setinverse=setinverse)
}
## cacheSolve returns the inverse of a matrix set in makeCacheMatrix
## it checks if the list created in makeCacheMatrix has an inverse matrix, i.
## If there is no inverse, then is computes and returns the inverse
cacheSolve <- function(x,?solve ...) {
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
mnandwe/ProgrammingAssignment2
|
R
| false | false | 1,035 |
r
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix takes 1 argument, a matrix, and returns a list
## with the functions to get and set the matrix and inverse.
makeCacheMatrix <- function(x = matrix()) {
i<-NULL
set<-function(y){
x<<-y
i<<-NULL
}
get<-function()x
setinverse <- function(inverse) i<<-inverse
getinverse <- function() i
list(set=set,get=get,getinverse=getinverse,setinverse=setinverse)
}
## cacheSolve returns the inverse of a matrix set in makeCacheMatrix
## it checks if the list created in makeCacheMatrix has an inverse matrix, i.
## If there is no inverse, then is computes and returns the inverse
cacheSolve <- function(x,?solve ...) {
i<-x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data)
x$setinverse(i)
i
}
|
# Keep track of how well we can find results, and how consistent the recommendations are,
# based on varying numbers of features
featureTargetControl <- list(
importance = list(),
fit=NA,
control = NA,
consistency = NA,
currentRow = NA,
sensitivity = NA
)
setupFeatureTargets <- function(saveFolder){
suppressWarnings(dir.create(file.path(saveFolder,"consistency")))
suppressWarnings(dir.create(file.path(saveFolder,"consistency_combined")))
for(item in inputs$featureImportanceResult){
featureTargetControl$importance[item$key] <<- item$max
}
featureTargetControl$consistency <<- matrix(1,nrow=length(inputs$featureTargets), ncol=length(inputs$featureTargets))
featureTargetControl$consistency[upper.tri(featureTargetControl$consistency)] <<- 0
featureTargetControl$consistency <<- as.data.frame(featureTargetControl$consistency)
names(featureTargetControl$consistency) <<- names(featureTargetControl$importance)[which(names(featureTargetControl$importance) %in% names(inputs$featureTargets))]
featureTargetControl$control <<- featureTargetControl$consistency
featureTargetControl$fit <<- featureTargetControl$consistency
featureTargetControl$sensitivity <<- featureTargetControl$consistency
featureTargetControl$currentRow <<- sample(1:nrow(featureTargetControl$control),1)
featuresTemp <- data.frame(matrix(NA,nrow=nrow(featureTargetControl$control),ncol=ncol(featureTargetControl$control)))
names(featuresTemp) <- names(featureTargetControl$control)
#Additional columns for fit: each feature and total
featuresTempFit <- featuresTemp
names(featuresTempFit) <- paste0(names(featuresTemp),"_fit")
featureTargetControl$fit <<- cbind(featureTargetControl$fit,featuresTempFit)
featureTargetControl$fit$total_fit <<- rep(NA,nrow(featureTargetControl$fit))
#Additional columns for sensitivity: each feature
featureTargetControl$sensitivity <<- cbind(featureTargetControl$sensitivity,featuresTemp)
#Additional columns for consistency: each method and total
methodsTemp <- data.frame(matrix(NA,nrow=nrow(featureTargetControl$control),ncol=length(inputs$methods)+1))
names(methodsTemp) <- c(names(inputs$methods),"total")
featureTargetControl$consistency <<- cbind(featureTargetControl$consistency,methodsTemp)
for(i in 1:nrow(featureTargetControl$consistency)){
suppressWarnings(dir.create(file.path(saveFolder,"consistency",i)))
}
}
getFeatureTargets <- function(){
featureTargetControl$currentRow <<- (featureTargetControl$currentRow)%%nrow(featureTargetControl$control)+1
print("currentRow")
print(featureTargetControl$currentRow)
featuresToUse <- names(featureTargetControl$control)[featureTargetControl$control[featureTargetControl$currentRow,]==1]
return(inputs$featureTargets[featuresToUse])
}
classify_performance <- function(perf,methods,metric,tieThreshold){
tempTable <- abs(perf[,paste0(methods,"_",metric)])
bestPerf <- apply(tempTable,1,min)
classified <- 1*(tempTable < bestPerf+tieThreshold)
colnames(classified) <- methods
return(classified)
}
measure_consistency <- function(saveFolder){
print("measuring consistency")
for(rowIndex in 1:nrow(featureTargetControl$control)){
print(paste0("Row Index: ",rowIndex))
#count performance files
basePath <- file.path(saveFolder,"consistency",rowIndex)
results <- list.files(basePath)
nResults <- length(results)/2
print(nResults)
if(nResults > 10){
performance_results <- NA
feature_results <- NA
#load performance and features
for(item in results){
if(strsplit(item,"_")[[1]][[1]]=="performance"){
if(is.na(performance_results)){
performance_results <- read.csv(file.path(basePath,item))
}else{
performance_results <- rbind(performance_results,read.csv(file.path(basePath,item)))
}
}
else if(strsplit(item,"_")[[1]][[1]]=="featuresDist"){
if(is.na(feature_results)){
feature_results <- read.csv(file.path(basePath,item))
}else{
feature_results <- rbind(feature_results,read.csv(file.path(basePath,item)))
}
}
}
#Order and clear
features_and_performance <- sqldf("select * from feature_results as f join performance_results as p on f.seed=p.seed")
features_and_performance[,which(names(features_and_performance)=="seed")[[2]]]<- NULL
performance_results <- NULL
feature_results <- NULL
if(!("Conf_over_ATE" %in% names(features_and_performance))){
features_and_performance$Conf_over_ATE <- features_and_performance$Conf/features_and_performance$ATE
}
#Classify performance
classified <- classify_performance(features_and_performance,names(inputs$methods),"MSE",0.0001)
features_and_performance <- cbind(features_and_performance,classified)
#Assess fit
featuresToUse <- names(featureTargetControl$control)[featureTargetControl$control[rowIndex,]==1]
fits <- data.frame(matrix(nrow=nrow(features_and_performance),ncol=(length(featuresToUse))))
colnames(fits) <- paste0(featuresToUse,"_fit")
for(fName in featuresToUse){
fits[,paste0(fName,"_fit")] <- abs((features_and_performance[,fName]-inputs$featureTargets[[fName]])/inputs$featureTargets[[fName]])
}
fits$total_fit <- apply(fits,1,sum)
features_and_performance <- cbind(features_and_performance,fits)
featureTargetControl$fit[rowIndex,paste0(c(featuresToUse,"total"),"_fit")] <<- apply(fits[,paste0(c(featuresToUse,"total"),"_fit")],2,mean)
#Limit to well-fit only
cutoff <- length(featuresToUse)*0.05
goodFits <- features_and_performance[which(abs(features_and_performance$total_fit)<cutoff),]
goodFits_Min_N <- 10
if(nrow(goodFits) > goodFits_Min_N){
#Assess consistency
classified_consistency <- 2*abs(0.5-apply(goodFits[,names(inputs$methods)],2,mean))
total_consistency <- mean(classified_consistency)
featureTargetControl$consistency[rowIndex,names(inputs$methods)] <<- classified_consistency
featureTargetControl$consistency[rowIndex,"total"] <<- total_consistency
}
#Assess sensitivity
# TO DO: needs to have some variation in fit to work (could target slight variations, or perturb the CSs)
}
}
baseDir <- file.path(saveFolder,"consistency_combined")
write.csv(featureTargetControl$control,file.path(baseDir,"control.csv"))
write.csv(featureTargetControl$fit,file.path(baseDir,"fit.csv"))
write.csv(featureTargetControl$consistency,file.path(baseDir,"consistency.csv"))
write.csv(featureTargetControl$sensitivity,file.path(baseDir,"sensitivity.csv"))
}
|
/measure_consistency.R
|
no_license
|
ScottZimmerman/SER2018
|
R
| false | false | 6,504 |
r
|
# Keep track of how well we can find results, and how consistent the recommendations are,
# based on varying numbers of features
featureTargetControl <- list(
importance = list(),
fit=NA,
control = NA,
consistency = NA,
currentRow = NA,
sensitivity = NA
)
setupFeatureTargets <- function(saveFolder){
suppressWarnings(dir.create(file.path(saveFolder,"consistency")))
suppressWarnings(dir.create(file.path(saveFolder,"consistency_combined")))
for(item in inputs$featureImportanceResult){
featureTargetControl$importance[item$key] <<- item$max
}
featureTargetControl$consistency <<- matrix(1,nrow=length(inputs$featureTargets), ncol=length(inputs$featureTargets))
featureTargetControl$consistency[upper.tri(featureTargetControl$consistency)] <<- 0
featureTargetControl$consistency <<- as.data.frame(featureTargetControl$consistency)
names(featureTargetControl$consistency) <<- names(featureTargetControl$importance)[which(names(featureTargetControl$importance) %in% names(inputs$featureTargets))]
featureTargetControl$control <<- featureTargetControl$consistency
featureTargetControl$fit <<- featureTargetControl$consistency
featureTargetControl$sensitivity <<- featureTargetControl$consistency
featureTargetControl$currentRow <<- sample(1:nrow(featureTargetControl$control),1)
featuresTemp <- data.frame(matrix(NA,nrow=nrow(featureTargetControl$control),ncol=ncol(featureTargetControl$control)))
names(featuresTemp) <- names(featureTargetControl$control)
#Additional columns for fit: each feature and total
featuresTempFit <- featuresTemp
names(featuresTempFit) <- paste0(names(featuresTemp),"_fit")
featureTargetControl$fit <<- cbind(featureTargetControl$fit,featuresTempFit)
featureTargetControl$fit$total_fit <<- rep(NA,nrow(featureTargetControl$fit))
#Additional columns for sensitivity: each feature
featureTargetControl$sensitivity <<- cbind(featureTargetControl$sensitivity,featuresTemp)
#Additional columns for consistency: each method and total
methodsTemp <- data.frame(matrix(NA,nrow=nrow(featureTargetControl$control),ncol=length(inputs$methods)+1))
names(methodsTemp) <- c(names(inputs$methods),"total")
featureTargetControl$consistency <<- cbind(featureTargetControl$consistency,methodsTemp)
for(i in 1:nrow(featureTargetControl$consistency)){
suppressWarnings(dir.create(file.path(saveFolder,"consistency",i)))
}
}
getFeatureTargets <- function(){
featureTargetControl$currentRow <<- (featureTargetControl$currentRow)%%nrow(featureTargetControl$control)+1
print("currentRow")
print(featureTargetControl$currentRow)
featuresToUse <- names(featureTargetControl$control)[featureTargetControl$control[featureTargetControl$currentRow,]==1]
return(inputs$featureTargets[featuresToUse])
}
classify_performance <- function(perf,methods,metric,tieThreshold){
tempTable <- abs(perf[,paste0(methods,"_",metric)])
bestPerf <- apply(tempTable,1,min)
classified <- 1*(tempTable < bestPerf+tieThreshold)
colnames(classified) <- methods
return(classified)
}
measure_consistency <- function(saveFolder){
print("measuring consistency")
for(rowIndex in 1:nrow(featureTargetControl$control)){
print(paste0("Row Index: ",rowIndex))
#count performance files
basePath <- file.path(saveFolder,"consistency",rowIndex)
results <- list.files(basePath)
nResults <- length(results)/2
print(nResults)
if(nResults > 10){
performance_results <- NA
feature_results <- NA
#load performance and features
for(item in results){
if(strsplit(item,"_")[[1]][[1]]=="performance"){
if(is.na(performance_results)){
performance_results <- read.csv(file.path(basePath,item))
}else{
performance_results <- rbind(performance_results,read.csv(file.path(basePath,item)))
}
}
else if(strsplit(item,"_")[[1]][[1]]=="featuresDist"){
if(is.na(feature_results)){
feature_results <- read.csv(file.path(basePath,item))
}else{
feature_results <- rbind(feature_results,read.csv(file.path(basePath,item)))
}
}
}
#Order and clear
features_and_performance <- sqldf("select * from feature_results as f join performance_results as p on f.seed=p.seed")
features_and_performance[,which(names(features_and_performance)=="seed")[[2]]]<- NULL
performance_results <- NULL
feature_results <- NULL
if(!("Conf_over_ATE" %in% names(features_and_performance))){
features_and_performance$Conf_over_ATE <- features_and_performance$Conf/features_and_performance$ATE
}
#Classify performance
classified <- classify_performance(features_and_performance,names(inputs$methods),"MSE",0.0001)
features_and_performance <- cbind(features_and_performance,classified)
#Assess fit
featuresToUse <- names(featureTargetControl$control)[featureTargetControl$control[rowIndex,]==1]
fits <- data.frame(matrix(nrow=nrow(features_and_performance),ncol=(length(featuresToUse))))
colnames(fits) <- paste0(featuresToUse,"_fit")
for(fName in featuresToUse){
fits[,paste0(fName,"_fit")] <- abs((features_and_performance[,fName]-inputs$featureTargets[[fName]])/inputs$featureTargets[[fName]])
}
fits$total_fit <- apply(fits,1,sum)
features_and_performance <- cbind(features_and_performance,fits)
featureTargetControl$fit[rowIndex,paste0(c(featuresToUse,"total"),"_fit")] <<- apply(fits[,paste0(c(featuresToUse,"total"),"_fit")],2,mean)
#Limit to well-fit only
cutoff <- length(featuresToUse)*0.05
goodFits <- features_and_performance[which(abs(features_and_performance$total_fit)<cutoff),]
goodFits_Min_N <- 10
if(nrow(goodFits) > goodFits_Min_N){
#Assess consistency
classified_consistency <- 2*abs(0.5-apply(goodFits[,names(inputs$methods)],2,mean))
total_consistency <- mean(classified_consistency)
featureTargetControl$consistency[rowIndex,names(inputs$methods)] <<- classified_consistency
featureTargetControl$consistency[rowIndex,"total"] <<- total_consistency
}
#Assess sensitivity
# TO DO: needs to have some variation in fit to work (could target slight variations, or perturb the CSs)
}
}
baseDir <- file.path(saveFolder,"consistency_combined")
write.csv(featureTargetControl$control,file.path(baseDir,"control.csv"))
write.csv(featureTargetControl$fit,file.path(baseDir,"fit.csv"))
write.csv(featureTargetControl$consistency,file.path(baseDir,"consistency.csv"))
write.csv(featureTargetControl$sensitivity,file.path(baseDir,"sensitivity.csv"))
}
|
##############
#File: plot1.R
##############
#
# reading data
# reading data
setwd("C:/Users/jb/Documents/GitHub/ExData_Plotting1")
#myfile<-"C:/Users/jb/Documents/Coursera_DataScience/Course_03_GettingCleaningData/PA1/household_power_consumption.txt"
myfile<-"http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp<- tempfile()
download.file(myfile,temp)
con <- unz(temp, "household_power_consumption.txt")
#data_short<-read.table(con,comment.char="", nrows=3, sep=";")
col_class<-c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
data<-read.table(con,na.strings="?",comment.char="", sep=";",colClasses=col_class,header=T)
unlink(temp)
indices<-(data[,1]=="1/2/2007")|(data[,1]=="2/2/2007")
data_ext<-data[indices,]
dt<-paste(data_ext[,1],data_ext[,2])
dt_new<-dt_new<-strptime(dt,"%d/%m/%Y %H:%M:%S")
data_new<-cbind(dt_new,data_ext[,3:9])
####
#figure1
png("./plot1.png",width = 480, height = 480, units = "px")
hist(data_new[,2],col="red",xlim=c(0.0,6.0),ylim=c(0.0,1200.0),axes=F,main="Global Active Power",xlab="Global active power (kilowatts)",ylab="Frequency")
axis(1,at=seq(0,6,2), labels=T)
axis(2,at=seq(0,1200,200),labels=T)
dev.off()
|
/plot1.R
|
no_license
|
jebestock/ExploreData_PA1
|
R
| false | false | 1,237 |
r
|
##############
#File: plot1.R
##############
#
# reading data
# reading data
setwd("C:/Users/jb/Documents/GitHub/ExData_Plotting1")
#myfile<-"C:/Users/jb/Documents/Coursera_DataScience/Course_03_GettingCleaningData/PA1/household_power_consumption.txt"
myfile<-"http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
temp<- tempfile()
download.file(myfile,temp)
con <- unz(temp, "household_power_consumption.txt")
#data_short<-read.table(con,comment.char="", nrows=3, sep=";")
col_class<-c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
data<-read.table(con,na.strings="?",comment.char="", sep=";",colClasses=col_class,header=T)
unlink(temp)
indices<-(data[,1]=="1/2/2007")|(data[,1]=="2/2/2007")
data_ext<-data[indices,]
dt<-paste(data_ext[,1],data_ext[,2])
dt_new<-dt_new<-strptime(dt,"%d/%m/%Y %H:%M:%S")
data_new<-cbind(dt_new,data_ext[,3:9])
####
#figure1
png("./plot1.png",width = 480, height = 480, units = "px")
hist(data_new[,2],col="red",xlim=c(0.0,6.0),ylim=c(0.0,1200.0),axes=F,main="Global Active Power",xlab="Global active power (kilowatts)",ylab="Frequency")
axis(1,at=seq(0,6,2), labels=T)
axis(2,at=seq(0,1200,200),labels=T)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.oneway.R
\name{summary.oneway}
\alias{summary.oneway}
\title{Summary of One Way ANOVA}
\usage{
\method{summary}{oneway}(x, ...)
}
\arguments{
\item{x}{object of class \code{oneway}}
\item{...}{parameters passed to print function}
}
\description{
Prints summmary of oneway ANOVA
}
\examples{
mileage <- oneway(hwy ~ class, cars)
summary(mileage)
}
|
/man/summary.oneway.Rd
|
permissive
|
nurahjaradat/onewayAnova
|
R
| false | true | 434 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.oneway.R
\name{summary.oneway}
\alias{summary.oneway}
\title{Summary of One Way ANOVA}
\usage{
\method{summary}{oneway}(x, ...)
}
\arguments{
\item{x}{object of class \code{oneway}}
\item{...}{parameters passed to print function}
}
\description{
Prints summmary of oneway ANOVA
}
\examples{
mileage <- oneway(hwy ~ class, cars)
summary(mileage)
}
|
\name{counts.question_type}
\alias{counts.question_type}
\title{Question Counts}
\usage{
\method{counts}{question_type}(x, ...)
}
\arguments{
\item{x}{The \code{\link[qdap]{question_type}} object.}
\item{\ldots}{ignored}
}
\description{
View question_type counts.
}
\details{
question_type Method for counts
}
|
/man/counts.question_type.Rd
|
no_license
|
craigcitro/qdap
|
R
| false | false | 316 |
rd
|
\name{counts.question_type}
\alias{counts.question_type}
\title{Question Counts}
\usage{
\method{counts}{question_type}(x, ...)
}
\arguments{
\item{x}{The \code{\link[qdap]{question_type}} object.}
\item{\ldots}{ignored}
}
\description{
View question_type counts.
}
\details{
question_type Method for counts
}
|
#Getting started with Naive Bayes
#Install the package
#install.packages("e1071")
#Loading the library
library(e1071)
?naiveBayes #The documentation also contains an example implementation of Titanic dataset
#Next load the Titanic dataset
data("Titanic")
#Save into a data frame and view it
Titanic_df=as.data.frame(Titanic)
#Creating data from table
repeating_sequence=rep.int(seq_len(nrow(Titanic_df)), Titanic_df$Freq) #This will repeat each combination equal to the frequency of each combination
#Create the dataset by row repetition created
Titanic_dataset=Titanic_df[repeating_sequence,]
#We no longer need the frequency, drop the feature
Titanic_dataset$Freq=NULL
#Fitting the Naive Bayes model
Naive_Bayes_Model=naiveBayes(Survived ~., data=Titanic_dataset)
#What does the model say? Print the model summary
Naive_Bayes_Model
#Prediction on the dataset
NB_Predictions=predict(Naive_Bayes_Model,Titanic_dataset)
#Confusion matrix to check accuracy
table(NB_Predictions,Titanic_dataset$Survived)
#Getting started with Naive Bayes in mlr
#Install the package
install.packages("mlr", dependencies = T)
#Loading the library
library(mlr)
#Create a classification task for learning on Titanic Dataset and specify the target feature
task = makeClassifTask(data = Titanic_dataset, target = "Survived")
#Initialize the Naive Bayes classifier
selected_model = makeLearner("classif.naiveBayes")
#Train the model
NB_mlr = train(selected_model, task)
#Read the model learned
NB_mlr$learner.model
#Predict on the dataset without passing the target feature
predictions_mlr = as.data.frame(predict(NB_mlr, newdata = Titanic_dataset[,1:3]))
##Confusion matrix to check accuracy
table(predictions_mlr[,1],Titanic_dataset$Survived)
|
/NaiveBayes.R
|
no_license
|
Manoj954/R-with-Machine-learning
|
R
| false | false | 1,735 |
r
|
#Getting started with Naive Bayes
#Install the package
#install.packages("e1071")
#Loading the library
library(e1071)
?naiveBayes #The documentation also contains an example implementation of Titanic dataset
#Next load the Titanic dataset
data("Titanic")
#Save into a data frame and view it
Titanic_df=as.data.frame(Titanic)
#Creating data from table
repeating_sequence=rep.int(seq_len(nrow(Titanic_df)), Titanic_df$Freq) #This will repeat each combination equal to the frequency of each combination
#Create the dataset by row repetition created
Titanic_dataset=Titanic_df[repeating_sequence,]
#We no longer need the frequency, drop the feature
Titanic_dataset$Freq=NULL
#Fitting the Naive Bayes model
Naive_Bayes_Model=naiveBayes(Survived ~., data=Titanic_dataset)
#What does the model say? Print the model summary
Naive_Bayes_Model
#Prediction on the dataset
NB_Predictions=predict(Naive_Bayes_Model,Titanic_dataset)
#Confusion matrix to check accuracy
table(NB_Predictions,Titanic_dataset$Survived)
#Getting started with Naive Bayes in mlr
#Install the package
install.packages("mlr", dependencies = T)
#Loading the library
library(mlr)
#Create a classification task for learning on Titanic Dataset and specify the target feature
task = makeClassifTask(data = Titanic_dataset, target = "Survived")
#Initialize the Naive Bayes classifier
selected_model = makeLearner("classif.naiveBayes")
#Train the model
NB_mlr = train(selected_model, task)
#Read the model learned
NB_mlr$learner.model
#Predict on the dataset without passing the target feature
predictions_mlr = as.data.frame(predict(NB_mlr, newdata = Titanic_dataset[,1:3]))
##Confusion matrix to check accuracy
table(predictions_mlr[,1],Titanic_dataset$Survived)
|
# LiveCoding
#Navigation - editor, console, plots, enviroment, creating a script - see slides
# cmd + enter (cntrl + enter) to run the current line
#get help: in console type ? and the function you want to know about
?length() #gives a description of the function
###################################################################################################
#1 Variables, Vectors
#Important: Vector operations, Classes, Append, remove, add, sum, index
#make a variable that contains a number and has identifier 'box' (identifier can be anything you want)
box <- 9
#run just the variable name to see it in the console
box
#check the class of the variables - the class of the variable depends on the values inside
class(box) #"numeric"
#make a variable that contains a word and has identifier 'name'
#R will attempt to execute letters/words/text as commands; to avoid that, use quotation marks
name <- "peter"
class(name) #"character"
#variables can contain more than just one value
#a variable that contains several values is called 'vector' and is created using function c()
#c() means either concatenate (i.e. link together in a chain) or combine <- depends on who you ask
a_vector <- c(2,3,4)
cats <- c(5,6,7)
#create new vector containing variables we defined before
long_vector <- c(cats, a_vector)
#see new vector - all elements of previous vectors are in there!
long_vector
#we can also make vectors, containing different kinds of elements
varia_vector <- c(3, long_vector, "car")
varia_vector
#the class() will always show the type of the most 'complicated' element in the vector
class(varia_vector) #"character"
#if a variable contains a number or a vector of numbers - you can do all sorts of math with it
long_vector + 10
#if it contains at least one non-numeric element - you can't do math stuff
name+4
varia_vector+13
#just doing math stuff is going to give you an output in the console
#if you want R to remember that output, you should make it a variable
triple_vector <- long_vector * 3
triple_vector
#vector's length is an important property!
length(triple_vector)
#you can access specific elements of the vector by specifying the index number of the element
triple_vector[1] #see first element
triple_vector[3] #see third element
#Removing a single (fifth) element from the vector, and rewriting the vector so it stays this way
triple_vector <- triple_vector[-5]
#removing several elements via their index number - doing the opposite of c() function by adding minus in the front
triple_vector <- triple_vector[-c(1, 2)]
#removing several elements in a row, e.g. remove elements from the first one to the fourth one:
short_vector <- long_vector[-(1:4)]
#what if we take our vector with different kinds of elements and remove the character element?
#if you want to remove element with the exact value you know:
number_vector <- varia_vector[varia_vector != "car"] #or
number_vector2 <- varia_vector[-8]
#number_vector and number_vector2 are the same, so let's remove one of them :)
#variables can be removed using rm()
rm(number_vector2)
#Removing several variables at the same time
rm(box, a_vector, cats, name)
#try to do math stuff to the number_vector from before, e.g. summarize all elements in the vector - now they all are numbers, right?
sum(number_vector) #error says invalid type (character)
typeof(number_vector) #says "character"
#it's fixable!!!! Types/classes of variables can be changed using functions as.numeric, as.character, as.factor, etc...
number_vector <- as.numeric(number_vector)
#do math stuff now, it will work!
sum(number_vector)
######################################################################################################
#2 Dataframes - accessing the dataframe, fixing a datapoint, vector operation (+/-), mean()
#Dataframe is a two-dimensional data structure - containing vectors of equal length
#here are our vectors containing the same number of elements
siblings <- c(1,2,3)
names <- c("Anita", "Fabio", "Karen")
#dataframe is created with the function data.frame()
#data.frame should be filled out like this: data.frame(YourColumnName = CorrespondingVectorOfValues, YourColumnName = CorrespondingVectorOfValues, ...)
df <- data.frame(name = c("Anita","Fabio","Karen"), sibling = siblings)
View(df)
#use $ to look at a specific column (as if it was a vector) in this format: dataframe$columnname
df$sibling
#do stuff you can normally do to vectors with columns from your df
length(df$sibling)
df$sibling + 15
mean(df$sibling)
#use $ to also add a new column to your df (your df is going to update and have it itself)
df$age <- c(21,20,7)
df$siblingplus2 <- df$sibling + 2 #you can use existing columns when you create new ones
df$gender <- c("Female", "Male", "Female") #why is there a problem? replacement has 3 rows, data has 4 -> Vectors should be the same lenght!!!
df$gender <- c("Female", "Male", "Female", "Female") #works
#you can change formats of whole columns if you want to (just like we did with vectors)
df$name <- as.character(df$name)
#you can add a new row to your dataframe using rbind() function like this: rbind(dataframe, c(same amount of values as other rows in the dataframe))
#you need to rewrite your df for it to remember the new row
df <- rbind(df,c("Millie",4,30))
#keep checking on the class of your columns, in case if formats have changed when you added new rows
class(df$name)
#change formats if you need to
df$name <- as.character(df$name)
df$sibling <- as.numeric(df$sibling)
df$age <- as.numeric(df$age)
#we can access single values in the dataframe by specifying [row index, column index]
#Here I want to access just the name "Anita" - 1st row, 1st column
df[1,1]
#we can change single values by finding them and redefining, e.g. changing value in 2nd row 3rd column to 90
df[2,3] <- 90
#We can access full rows by leaving the column index in the brackets empty:
df[2,] #access the whole second row
df[df$name == "Fabio",] #access the whole row with the name "Fabio"; == means equal
#We can access full column by using $ or leaving the row index empty
df[,2] #is the same as:
df$sibling
#if we leave both indeces empty, we will get all rows and all columns
df[,]
#we can remove whole rows and columns from the dataframe similarly to vectors, we just need two coordinates now
smaller_df <- df[-1,-2] #remove first row and second column
tinier_df <- smaller_df[-3,] #remove just the third row
#we can use c() for efficiency:
teenytiny_df <- df[-c(1,4),-c(1,2)] #remove 1st and 4th rows and remove 1st and 2nd column
#teenytiny_df is in fact now just a tiny vector
##########################################################################################################
#3 logic - (!=, ==), ; , packages, subset()
# != means not equal
# == means equal
# guess what these means: '<' '>' '>=' <='
#these are logical operators and can be used for things such as this:
df[df$sibling == 2,]#the data where siblings = 2
df[df$sibling >= 2,] #the data where siblings >= 2 (bigger than or equal)
#we can also find single values by knowing other values... sounds confusing but stay with me
#we can access the whole column with number of siblings - by writing the vector df$sibling
#and then we can search the df$sibling vector for the value that corresponds to "Fabio" in the other column
df$sibling[df$name == "Fabio"]
subset(df, gender == "Female") #creates a subset of the data based on condition (only females)
?length() #gives a description of the function
install.packages("beepr") #install package
library(beepr) #load package
beep(5) #use a function from the package
#Extra
#You should be able to do this :) Find how to fix the following code:
names <- c("Peter", "Natalie", "Maya")
n_pets <- c(1,3,8)
pet_frame <- data.frame(names=names n_pets=n_pets)
#######################################################################################################3
#Solutions to exercises are in a separate file
|
/Class1_LiveCoding.R
|
no_license
|
anitakurm/Experimental-Methods-1-E19
|
R
| false | false | 7,989 |
r
|
# LiveCoding
#Navigation - editor, console, plots, enviroment, creating a script - see slides
# cmd + enter (cntrl + enter) to run the current line
#get help: in console type ? and the function you want to know about
?length() #gives a description of the function
###################################################################################################
#1 Variables, Vectors
#Important: Vector operations, Classes, Append, remove, add, sum, index
#make a variable that contains a number and has identifier 'box' (identifier can be anything you want)
box <- 9
#run just the variable name to see it in the console
box
#check the class of the variables - the class of the variable depends on the values inside
class(box) #"numeric"
#make a variable that contains a word and has identifier 'name'
#R will attempt to execute letters/words/text as commands; to avoid that, use quotation marks
name <- "peter"
class(name) #"character"
#variables can contain more than just one value
#a variable that contains several values is called 'vector' and is created using function c()
#c() means either concatenate (i.e. link together in a chain) or combine <- depends on who you ask
a_vector <- c(2,3,4)
cats <- c(5,6,7)
#create new vector containing variables we defined before
long_vector <- c(cats, a_vector)
#see new vector - all elements of previous vectors are in there!
long_vector
#we can also make vectors, containing different kinds of elements
varia_vector <- c(3, long_vector, "car")
varia_vector
#the class() will always show the type of the most 'complicated' element in the vector
class(varia_vector) #"character"
#if a variable contains a number or a vector of numbers - you can do all sorts of math with it
long_vector + 10
#if it contains at least one non-numeric element - you can't do math stuff
name+4
varia_vector+13
#just doing math stuff is going to give you an output in the console
#if you want R to remember that output, you should make it a variable
triple_vector <- long_vector * 3
triple_vector
#vector's length is an important property!
length(triple_vector)
#you can access specific elements of the vector by specifying the index number of the element
triple_vector[1] #see first element
triple_vector[3] #see third element
#Removing a single (fifth) element from the vector, and rewriting the vector so it stays this way
triple_vector <- triple_vector[-5]
#removing several elements via their index number - doing the opposite of c() function by adding minus in the front
triple_vector <- triple_vector[-c(1, 2)]
#removing several elements in a row, e.g. remove elements from the first one to the fourth one:
short_vector <- long_vector[-(1:4)]
#what if we take our vector with different kinds of elements and remove the character element?
#if you want to remove element with the exact value you know:
number_vector <- varia_vector[varia_vector != "car"] #or
number_vector2 <- varia_vector[-8]
#number_vector and number_vector2 are the same, so let's remove one of them :)
#variables can be removed using rm()
rm(number_vector2)
#Removing several variables at the same time
rm(box, a_vector, cats, name)
#try to do math stuff to the number_vector from before, e.g. summarize all elements in the vector - now they all are numbers, right?
sum(number_vector) #error says invalid type (character)
typeof(number_vector) #says "character"
#it's fixable!!!! Types/classes of variables can be changed using functions as.numeric, as.character, as.factor, etc...
number_vector <- as.numeric(number_vector)
#do math stuff now, it will work!
sum(number_vector)
######################################################################################################
#2 Dataframes - accessing the dataframe, fixing a datapoint, vector operation (+/-), mean()
#Dataframe is a two-dimensional data structure - containing vectors of equal length
#here are our vectors containing the same number of elements
siblings <- c(1,2,3)
names <- c("Anita", "Fabio", "Karen")
#dataframe is created with the function data.frame()
#data.frame should be filled out like this: data.frame(YourColumnName = CorrespondingVectorOfValues, YourColumnName = CorrespondingVectorOfValues, ...)
df <- data.frame(name = c("Anita","Fabio","Karen"), sibling = siblings)
View(df)
#use $ to look at a specific column (as if it was a vector) in this format: dataframe$columnname
df$sibling
#do stuff you can normally do to vectors with columns from your df
length(df$sibling)
df$sibling + 15
mean(df$sibling)
#use $ to also add a new column to your df (your df is going to update and have it itself)
df$age <- c(21,20,7)
df$siblingplus2 <- df$sibling + 2 #you can use existing columns when you create new ones
df$gender <- c("Female", "Male", "Female") #why is there a problem? replacement has 3 rows, data has 4 -> Vectors should be the same lenght!!!
df$gender <- c("Female", "Male", "Female", "Female") #works
#you can change formats of whole columns if you want to (just like we did with vectors)
df$name <- as.character(df$name)
#you can add a new row to your dataframe using rbind() function like this: rbind(dataframe, c(same amount of values as other rows in the dataframe))
#you need to rewrite your df for it to remember the new row
df <- rbind(df,c("Millie",4,30))
#keep checking on the class of your columns, in case if formats have changed when you added new rows
class(df$name)
#change formats if you need to
df$name <- as.character(df$name)
df$sibling <- as.numeric(df$sibling)
df$age <- as.numeric(df$age)
#we can access single values in the dataframe by specifying [row index, column index]
#Here I want to access just the name "Anita" - 1st row, 1st column
df[1,1]
#we can change single values by finding them and redefining, e.g. changing value in 2nd row 3rd column to 90
df[2,3] <- 90
#We can access full rows by leaving the column index in the brackets empty:
df[2,] #access the whole second row
df[df$name == "Fabio",] #access the whole row with the name "Fabio"; == means equal
#We can access full column by using $ or leaving the row index empty
df[,2] #is the same as:
df$sibling
#if we leave both indeces empty, we will get all rows and all columns
df[,]
#we can remove whole rows and columns from the dataframe similarly to vectors, we just need two coordinates now
smaller_df <- df[-1,-2] #remove first row and second column
tinier_df <- smaller_df[-3,] #remove just the third row
#we can use c() for efficiency:
teenytiny_df <- df[-c(1,4),-c(1,2)] #remove 1st and 4th rows and remove 1st and 2nd column
#teenytiny_df is in fact now just a tiny vector
##########################################################################################################
#3 logic - (!=, ==), ; , packages, subset()
# != means not equal
# == means equal
# guess what these means: '<' '>' '>=' <='
#these are logical operators and can be used for things such as this:
df[df$sibling == 2,]#the data where siblings = 2
df[df$sibling >= 2,] #the data where siblings >= 2 (bigger than or equal)
#we can also find single values by knowing other values... sounds confusing but stay with me
#we can access the whole column with number of siblings - by writing the vector df$sibling
#and then we can search the df$sibling vector for the value that corresponds to "Fabio" in the other column
df$sibling[df$name == "Fabio"]
subset(df, gender == "Female") #creates a subset of the data based on condition (only females)
?length() #gives a description of the function
install.packages("beepr") #install package
library(beepr) #load package
beep(5) #use a function from the package
#Extra
#You should be able to do this :) Find how to fix the following code:
names <- c("Peter", "Natalie", "Maya")
n_pets <- c(1,3,8)
pet_frame <- data.frame(names=names n_pets=n_pets)
#######################################################################################################3
#Solutions to exercises are in a separate file
|
devtools::install_github("laduplessis/bdskytools", force=TRUE)
library(bdskytools)
library(lubridate)
library(ggplot2)
## Read in BEAST2 logfile for Wave-3 BDSS, discarding initial 10% of samples as burn-in
w3.fname <- "./Wave3_BDSS.log"
w3.lf <- readLogfile(w3.fname, burnin=0.1)
## Extract reproductiveNumber from logfile content
w3.Re_sky <- getSkylineSubset(w3.lf, "reproductiveNumber")
## Extract 95% HPD intervals of reproductiveNumber
w3.Re_hpd <- getMatrixHPD(w3.Re_sky)
## Set number of time-points at which reproductiveNumber was estimated in BDSS
w3.int_num <- 12
## Set time-gridpoints (n=27) from estimated treeHeight to time of most recent sample in Wave-3 dataset
w3.timegrid <- seq(0, 0.3054, length.out=27) # Estimated treeHeight as extracted from logfile using Tracer v1.7.1
w3.Re_gridded <- gridSkyline(w3.Re_sky, w3.lf$origin, w3.timegrid)
w3.Re_gridded_hpd <- getMatrixHPD(w3.Re_gridded)
## Invert time-gridpoints to plot temporal changes of reproductiveNumber from past to present
w3.recent <- 2020.8032786885246 # Date of most recent sample in Wave-3 dataset in decimal format as calculated using TempEst v1.5.3
w3.times <- w3.recent - w3.timegrid
# Transform function for time-axis labelling
x.date_transform <- function(x) {format(date_decimal(x), "%d/%b")}
## Plot temporal changes in reproductiveNumber
ggplot() +
labs(x='Time (yr)', y='Effective reproductive number, Re') +
scale_x_continuous(labels=x.date_transform, breaks=seq(w3.times[length(w3.times)], w3.times[1], by=0.019165), expand = c(0, 0)) + ## x-label every 1 week (0.019165 year)
geom_ribbon(aes(x=w3.times, ymin=w3.Re_gridded_hpd[3,], ymax=w3.Re_gridded_hpd[1,]), fill="#DBD0DD") +
geom_line(aes(x=w3.times, y=w3.Re_gridded_hpd[2,]), size=0.3, col="#654C6B") +
theme_bw() +
theme(axis.text.x = element_text(angle=90, hjust=1), panel.grid.major = element_blank(), panel.grid.minor = element_blank())
|
/scripts/BDSS/Wave3_BDSS_Re_Plot.R
|
no_license
|
HKU-SPH-COVID-19-Genomics-Consortium/HK-SARS-CoV-2-genomic-epidemiology
|
R
| false | false | 1,914 |
r
|
devtools::install_github("laduplessis/bdskytools", force=TRUE)
library(bdskytools)
library(lubridate)
library(ggplot2)
## Read in BEAST2 logfile for Wave-3 BDSS, discarding initial 10% of samples as burn-in
w3.fname <- "./Wave3_BDSS.log"
w3.lf <- readLogfile(w3.fname, burnin=0.1)
## Extract reproductiveNumber from logfile content
w3.Re_sky <- getSkylineSubset(w3.lf, "reproductiveNumber")
## Extract 95% HPD intervals of reproductiveNumber
w3.Re_hpd <- getMatrixHPD(w3.Re_sky)
## Set number of time-points at which reproductiveNumber was estimated in BDSS
w3.int_num <- 12
## Set time-gridpoints (n=27) from estimated treeHeight to time of most recent sample in Wave-3 dataset
w3.timegrid <- seq(0, 0.3054, length.out=27) # Estimated treeHeight as extracted from logfile using Tracer v1.7.1
w3.Re_gridded <- gridSkyline(w3.Re_sky, w3.lf$origin, w3.timegrid)
w3.Re_gridded_hpd <- getMatrixHPD(w3.Re_gridded)
## Invert time-gridpoints to plot temporal changes of reproductiveNumber from past to present
w3.recent <- 2020.8032786885246 # Date of most recent sample in Wave-3 dataset in decimal format as calculated using TempEst v1.5.3
w3.times <- w3.recent - w3.timegrid
# Transform function for time-axis labelling
x.date_transform <- function(x) {format(date_decimal(x), "%d/%b")}
## Plot temporal changes in reproductiveNumber
ggplot() +
labs(x='Time (yr)', y='Effective reproductive number, Re') +
scale_x_continuous(labels=x.date_transform, breaks=seq(w3.times[length(w3.times)], w3.times[1], by=0.019165), expand = c(0, 0)) + ## x-label every 1 week (0.019165 year)
geom_ribbon(aes(x=w3.times, ymin=w3.Re_gridded_hpd[3,], ymax=w3.Re_gridded_hpd[1,]), fill="#DBD0DD") +
geom_line(aes(x=w3.times, y=w3.Re_gridded_hpd[2,]), size=0.3, col="#654C6B") +
theme_bw() +
theme(axis.text.x = element_text(angle=90, hjust=1), panel.grid.major = element_blank(), panel.grid.minor = element_blank())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/authenticate.R
\name{get_spotify_access_token}
\alias{get_spotify_access_token}
\title{Get Spotify Access Token}
\usage{
get_spotify_access_token(
client_id = Sys.getenv("SPOTIFY_CLIENT_ID"),
client_secret = Sys.getenv("SPOTIFY_CLIENT_SECRET")
)
}
\arguments{
\item{client_id}{Defaults to System Environment variable "SPOTIFY_CLIENT_ID"}
\item{client_secret}{Defaults to System Environment variable "SPOTIFY_CLIENT_SECRET"}
}
\value{
Returns an environment with access token data.
}
\description{
This function creates a Spotify access token.
}
\examples{
\dontrun{
get_spotify_access_token()
}
}
\keyword{auth}
|
/man/get_spotify_access_token.Rd
|
no_license
|
TroyHernandez/tinyspotifyr
|
R
| false | true | 695 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/authenticate.R
\name{get_spotify_access_token}
\alias{get_spotify_access_token}
\title{Get Spotify Access Token}
\usage{
get_spotify_access_token(
client_id = Sys.getenv("SPOTIFY_CLIENT_ID"),
client_secret = Sys.getenv("SPOTIFY_CLIENT_SECRET")
)
}
\arguments{
\item{client_id}{Defaults to System Environment variable "SPOTIFY_CLIENT_ID"}
\item{client_secret}{Defaults to System Environment variable "SPOTIFY_CLIENT_SECRET"}
}
\value{
Returns an environment with access token data.
}
\description{
This function creates a Spotify access token.
}
\examples{
\dontrun{
get_spotify_access_token()
}
}
\keyword{auth}
|
### -----------------------------------------------------------------------
### API
parse_remote_local <- function(specs, config, ...) {
parsed_specs <- re_match(specs, type_local_rx())
parsed_specs$ref <- parsed_specs$.text
cn <- setdiff(colnames(parsed_specs), c(".match", ".text"))
parsed_specs <- parsed_specs[, cn]
parsed_specs$type <- "local"
lapply(
seq_len(nrow(parsed_specs)),
function(i) as.list(parsed_specs[i,])
)
}
resolve_remote_local <- function(remote, direct, config, cache,
dependencies, ...) {
sources <- paste0("file://", normalizePath(remote$path, mustWork = FALSE))
resolve_from_description(remote$path, sources, remote, direct,
config, cache, dependencies[[2 - direct]])
}
download_remote_local <- function(resolution, target, config, cache,
on_progress) {
source_file <- sub("^file://", "", resolution$sources[[1]])
if (! file.copy(source_file, target, overwrite = TRUE)) {
stop("No local file found")
}
"Had"
}
satisfy_remote_local <- function(resolution, candidate, config, ...) {
## TODO: we can probably do better than this
FALSE
}
## ----------------------------------------------------------------------
## Internal functions
type_local_rx <- function() {
paste0(
"^",
"(?:local::)",
"(?<path>.*)",
"$"
)
}
|
/R/type-local.R
|
permissive
|
dpastoor/pkgdepends
|
R
| false | false | 1,411 |
r
|
### -----------------------------------------------------------------------
### API
parse_remote_local <- function(specs, config, ...) {
parsed_specs <- re_match(specs, type_local_rx())
parsed_specs$ref <- parsed_specs$.text
cn <- setdiff(colnames(parsed_specs), c(".match", ".text"))
parsed_specs <- parsed_specs[, cn]
parsed_specs$type <- "local"
lapply(
seq_len(nrow(parsed_specs)),
function(i) as.list(parsed_specs[i,])
)
}
resolve_remote_local <- function(remote, direct, config, cache,
dependencies, ...) {
sources <- paste0("file://", normalizePath(remote$path, mustWork = FALSE))
resolve_from_description(remote$path, sources, remote, direct,
config, cache, dependencies[[2 - direct]])
}
download_remote_local <- function(resolution, target, config, cache,
on_progress) {
source_file <- sub("^file://", "", resolution$sources[[1]])
if (! file.copy(source_file, target, overwrite = TRUE)) {
stop("No local file found")
}
"Had"
}
satisfy_remote_local <- function(resolution, candidate, config, ...) {
## TODO: we can probably do better than this
FALSE
}
## ----------------------------------------------------------------------
## Internal functions
type_local_rx <- function() {
paste0(
"^",
"(?:local::)",
"(?<path>.*)",
"$"
)
}
|
##############################################################################################
#' title Workflow to NCAR CLM data set
#' author
#' Hannah Holland-Moritz (hhollandmoritz AT gmail.com), based on script by David Durden (eddy4R.info AT gmail.com)
#'
#' description
#' Workflow for collating NIWOT LTER data, gap-filling, and packaging in NCAR CLM netcdf format.
# Modified from David Durden's flow.api.clm.R script for NEON data
# changelog and author contributions / copyrights
# David Durden (2019-07-05)
# original creation
# David Durden (2020-05-31)
# Updating to use neonUtilities for all data retrieval from API
##############################################################################
##############################################################################
# Dependencies
##############################################################################
#Call the R HDF5 Library
packReq <- c("rhdf5","REddyProc", "ncdf4","devtools","magrittr","EML", "dplyr",
"ggplot2", "purrr", "tidyr", "lubridate","RCurl", "httr", "jsonlite")
#Install and load all required packages
lapply(packReq, function(x) {
print(x)
if (require(x, character.only = TRUE) == FALSE) {
install.packages(x)
library(x, character.only = TRUE)
}})
#Setup Environment
options(stringsAsFactors = F)
##############################################################################
#Workflow parameters
##############################################################################
#### Ploting options ####
# Should plots be made of gap-filled data?
makeplots <- TRUE # FALSE
#### Output Options ####
# Base directory for all files
DirBase <- "~/Desktop/Working_files/Niwot/"
# Base directory for output
DirOutBase <- paste0(DirBase,"CLM/data")
#### Download and input options ####
# Directory to download precipitation and radidation data to
DirDnld = paste0(DirBase,"lter_flux")
# Should a newer version of precip data be automatically
# downloaded if one is available?
getNewData = TRUE
# Ameriflux username
# NOTE: you cannot download Ameriflux data without a valid username
# to create an account, visit the Ameriflux website: https://ameriflux.lbl.gov/
# Please also read their data-use policy, by downloading their data you are agreeing
# to follow it. The policy can be found here: https://ameriflux.lbl.gov/data/data-policy/
amf_usr <- "wwieder" # CHANGE ME
#### Tower Use Options ####
# What tvan tower should be used?
tower <- "Both" # Options are "East", "West", or "Both"
# if "Both" the one tower will be used to gapfill the other tower
# basetower provides which tower is the baseline that will be filled
# with the other tower. Currently the East tower record is more complete
# and has fewer gaps and errors, so it is being used as the basetower.
basetower <- "East" # West
#### Tvan data location ####
# Only necessary to set the location of the tower that you are processing, or
# both, if tower = "Both"
# The data should be formatted with ReddyProc file format.
# Briefly the file should be formated as follows: the file should be
# tab-delimited with the first row specifying the name of the variable
# and the second specifying the units of that variable. The columns should have names
# and units that follow the guidelines below:
# Column formating guidelines for Tvan data
# (optional indicates a column is not necessary for producing the final netcdf,
# it includes variables that are necessary for CLM, and also variables that are
# necessary for ReddyProc gapfilling of the data in preparation for CLM).
# | Column Name | Column Description | Units | Optional? |
# | ----------- | -------------------------------- | -------------- | --------- |
# | NEE | Net ecosystem exchange | umol m^-2 s^-1 | Yes |
# | LE | Latent heat flux | W m^-2 | No |
# | H | Sensible heat flux | W m^-2 | No |
# | Ustar | Friction velocity | m s^-1 | Yes |
# | Tair | Air temperature | degC | No |
# | VPD | Vapor pressure density | kPa | No |
# | rH | relative humidity | % | No |
# | U | Wind speed | m s^-1 | No |
# | P | Atmospheric pressure | kPa | No |
# | Tsoil | Soil temperature | degC | Yes |
# | Year | Year | - | No |
# | DoY | The day of year (1-365/366) | - | No |
# | Hour | Decimal hour of the day (0.5-24) | - | No |
# The location of the east tvan data filepath, use "", if tower = "West"
DirIN = paste0(DirBase,"Tvan_out_new/supp_filtering/")
east_data_fp <- paste0(DirIN,"tvan_East_2007-05-10_00-30-00_to_2021-03-02_flux_P_reddyproc_cleaned.txt")
# The location of the west tvan data filepath, use "", if tower = "East"
west_data_fp <- paste0(DirIN,"tvan_West_2007-05-10_00-30-00_to_2021-03-02_flux_P_reddyproc_cleaned.txt")
#### Simulated Runoff Option ####
# WARNING THIS FEATURE IS UNTESTED; CHANGE AT YOUR OWN RISK
# The user can provide a data file from a simulated Moist Meadow run that
# contains two columns, a timestamp column (every timestamp represents the
# state at the *end* of the 30 minute sampling period) called "time",
# and a column containing the QRUNOFF amounts in mm/s from a Moist Meadow
# simulation. If provided, this data will be added to the Wet meadow
# precipitation. If not provided, wet meadow precipitation will be 75% of
# observed precipitation.
# As done in Wieder et al. 2017, JGR-B. doi:10.1002/2016JG003704.
# Provide a character string specifying the location of the simulated runoff data
# if NA, no simulated runoff will be used
simulated_runoff_fp <- paste0(DirIN,'QRUNOFF_clm50bgc_NWT_mm_newPHS_lowSLA.csv')
##############################################################################
# Static workflow parameters - these are unlikely to change
##############################################################################
#Append the site to the base output directory
DirOut <- paste0(DirOutBase, "/", "data")
plots_dir <- paste0(DirOutBase, "/plots")
# Check if directory exists and create if not
if (!dir.exists(DirOut)) dir.create(DirOut, recursive = TRUE)
if (!dir.exists(DirDnld)) dir.create(DirDnld, recursive = TRUE)
if (!dir.exists(plots_dir)) dir.create(plots_dir, recursive = TRUE)
# the EDI id for precip data from the saddle and C1 weather stations
saddle_precip_data <- "416" # NWT LTER EDI id
# Lat/long coords - shouldn't need to change unless modified in surface
# dataset lat/long
latSite <- 40.05 # should match the lat of the surface dataset
lonSite <- 360 - 254.42 # should match the long of the surface dataset
# Should simulated runoff mode be activated?
if (is.na(simulated_runoff_fp)) {
simulated_runoff_present <- FALSE
writeLines(paste0("No simulated runoff file supplied. Wet meadow precipitation",
" will be calculated without any added runoff."))
} else {
simulated_runoff_present <- TRUE
writeLines(paste0("You have supplied the following simulated runoff file: \n",
simulated_runoff_fp,
"\nIt will be added when wet meadow precipitation",
" is calculated."))
}
##############################################################################
# Helper functions - for downloading and loading data
##############################################################################
# Functions for downloading LTER Precip data are from Sarah Elmendorf's
# utility_functions_all.R script
# https://github.com/NWTlter/long-term-trends/blob/master/utility_functions/utility_functions_all.R
# function to determine current version of data package on EDI
getCurrentVersion <- function(edi_id){
require(magrittr)
versions = readLines(paste0('https://pasta.lternet.edu/package/eml/knb-lter-nwt/', edi_id),
warn = FALSE) %>%
as.numeric() %>% (max)
packageid = paste0('knb-lter-nwt.', edi_id, '.', versions)
return(packageid)
}
#function to download the EML file from EDI
getEML <- function(packageid){
require(magrittr)
myurl<-paste0("https://portal.edirepository.org/nis/metadataviewer?packageid=",
packageid,
"&contentType=application/xml")
#myeml<-xml2::download_html(myurl)%>%xml2::read_xml()%>%EML::read_eml()
myeml<-xml2::read_xml(paste0("https://portal.edirepository.org/nis/metadataviewer?packageid=",
packageid,
"&contentType=application/xml")) %>% EML::read_eml()
}
# Function for downloading from EDI
download_EDI <- function(edi_id, dest_dir, getNewData = TRUE) {
# This section heavily borrowed from Sarah Elmendorf's generic_timeseries_workflow.R script
# https://github.com/NWTlter/long-term-trends/blob/master/plotting_scripts/generic_timeseries_workflow.R
# Depends on getCurrentVersion() and getEML()
packageid = getCurrentVersion(edi_id)
if (any(grepl(packageid, list.files(dest_dir)) == TRUE)) {
writeLines(paste0("Most recent package version ",
packageid, " is already downloaded. Nothing to do."))
return(list.files(dest_dir, pattern = paste0(packageid, ".{1,}csv"), full.names = T))
} else if (getNewData == FALSE) {
writeLines(paste0("A more recent version of the data (version ",
packageid, ") is available. ",
"But since you have specified getNewData = FALSE, ",
"the latest version will not be downloaded."))
return(list.files(dest_dir, pattern = paste0(".{1,}csv"), full.names = T))
} else {
writeLines(paste0("Downloading package ", packageid, " from EDI."))
myeml = getEML(packageid)
# Create output directory for data
ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir)), FALSE)
### eml reading and downloading of csv
if (is.null(names(myeml$dataset$dataTable))) {
attributeList = lapply(myeml$dataset$dataTable, function(x){
EML::get_attributes(x$attributeList)
})
names(attributeList) = lapply(myeml$dataset$dataTable, function(x){
x$physical$objectName})
if (getNewData) {
#download all the datatables in the package
csv_list <- list()
csv_list <- lapply(myeml$dataset$dataTable, function(x){
url_to_get = x$physical$distribution$online$url$url
download.file(url_to_get,
destfile = paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName),
method = "curl")
output_csv_file <- paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName)
})
}
}else{
#if only one data table
attributeList = list(EML::get_attributes(myeml$dataset$dataTable$attributeList))
names(attributeList) = myeml$dataset$dataTable$physical$objectName
if (getNewData) {
url_to_get = myeml$dataset$dataTable$physical$distribution$online$url$url
download.file(url_to_get,
destfile = paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName),
method = "curl")
output_csv_file <- paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName)
}
}
# Also save the full xml
write_eml(myeml, file = paste0(dest_dir, "/", packageid, ".xml"))
writeLines(paste0("Downloaded data can be found in: ", dest_dir))
return(output_csv_file)
}
}
# Function for downloading USCRN precip
download_USCRN <- function(start_date, end_date, dest_dir, DoNotOverwrite = TRUE) {
# This function downloads precipitation data from the Boulder USCRN weather
# station at C1. It returns a list of the files it tried to download. By
# default it will not download files that are already in the destination directory.
# Arguments:
# start_date = the start date of tvan data in character form (or other form
# that lubridate can coerce with its `year()` function)
# end_date = the end date of tvan data in character form (or other form
# that lubridate can coerce with its `year()` function)
# dest_dir = the destination directory where the files will be downloaded
# DoNotOverwrite = should existing files with the same name be overwritten? If
# TRUE, files will not be overwritten, if FALSE, files will be
#overwritten.
require(lubridate)
require(RCurl)
# To do: replace this warning with a check for the tvan data
message("Please note, end_date of USCRN data must not be less than the end_date of the tvan data.")
# make dest_dir if it doesn't exist
made_dir <- ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir), recursive = TRUE), FALSE)
if (!made_dir) {
writeLines("Data download directory not created, it already exists.")
}
# Create a list of urls - one for each year of data
url_list <- vector(mode = "list",
length = lubridate::year(end_date) - lubridate::year(start_date) + 1)
file_list <- vector(mode = "list",
length = lubridate::year(end_date) - lubridate::year(start_date) + 1)
# get the names for each year (including unfinished partial years at the end)
names(url_list) <- lubridate::year(seq(from = lubridate::ymd(as.Date(start_date)),
length.out = (lubridate::year(end_date) -
lubridate::year(start_date) + 1),
by = "years"))
names(file_list) <- lubridate::year(seq(from = lubridate::ymd(as.Date(start_date)),
length.out = (lubridate::year(end_date) -
lubridate::year(start_date) + 1),
by = "years"))
for (i in seq_along(url_list)) {
url_list[[i]] <- paste0("https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/", names(url_list[i]), "/CRNS0101-05-", names(url_list[i]),"-CO_Boulder_14_W.txt")
}
# Check if url exists and if it does, download file
for (i in seq_along(url_list)) {
writeLines(paste0("Checking if ", url_list[[i]], " exists..."))
if (!url.exists(url_list[[i]])) {
stop(paste0("Url ", x, " is not accessible."))
} else {
writeLines("TRUE")
}
# Check if destination file already exists
dest_fp <- paste0(dest_dir, "/CRNS0101-05-",
names(url_list[i]),"-CO_Boulder_14_W.txt")
file_list[[i]] <- dest_fp
if (file.exists(dest_fp) & DoNotOverwrite == TRUE) {
writeLines(paste0(dest_fp, " already exits, skipping..."))
} else { # if file doesn't exist or if overwrite is TRUE, download
try(download.file(url = url_list[[i]],
destfile = dest_fp))
}
}
return(file_list)
}
# Function for reading in USCRN precip text files
read_USCRN_precip_data <- function(USCRN_precip_fp) {
# This function reads in USCRN precipitation data files. It adds column
# names and then it 1) collapses the time from 5-minute increments to half-
# hourly by summing the precipitation over each 1/2-hour period; 2) Changes -9999
# to NAs; and 3) selects only the local date, local time, and precpitation variables
# for the final data frame. It returns the resulting dataframe.
# Arguments:
# USCRN_precip_fp = file path to the USCRN text file you want to load
# USCRN Fields and information can be found here:
# https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/README.txt
# Field# Name Units
# ---------------------------------------------
# 1 WBANNO XXXXX
# 2 UTC_DATE YYYYMMDD
# 3 UTC_TIME HHmm
# 4 LST_DATE YYYYMMDD
# 5 LST_TIME HHmm
# 6 CRX_VN XXXXXX
# 7 LONGITUDE Decimal_degrees
# 8 LATITUDE Decimal_degrees
# 9 AIR_TEMPERATURE Celsius
# 10 PRECIPITATION mm
# 11 SOLAR_RADIATION W/m^2
# 12 SR_FLAG X
# 13 SURFACE_TEMPERATURE Celsius
# 14 ST_TYPE X
# 15 ST_FLAG X
# 16 RELATIVE_HUMIDITY %
# 17 RH_FLAG X
# 18 SOIL_MOISTURE_5 m^3/m^3
# 19 SOIL_TEMPERATURE_5 Celsius
# 20 WETNESS Ohms
# 21 WET_FLAG X
# 22 WIND_1_5 m/s
# 23 WIND_FLAG X
#
# ----------------------- Begin Function -------------------- #
require(dplyr)
# read in text file
writeLines(paste0("Reading in ", USCRN_precip_fp))
precip <- read.table(USCRN_precip_fp, sep = "",
colClasses = c(rep("character", times = 6),
rep("numeric", times = 7),
"character",
rep("numeric", times = 9)))
# Assign column names
names(precip) <- c("WBANNO", "UTC_DATE", "UTC_TIME", "LST_DATE", "LST_TIME",
"CRX_VN", "LONGITUDE", "LATITUDE", "AIR_TEMPERATURE",
"PRECIPITATION", "SOLAR_RADIATION", "SR_FLAG",
"SURFACE_TEMPERATURE", "ST_TYPE", "ST_FLAG",
"RELATIVE_HUMIDITY", "RH_FLAG", "SOIL_MOISTURE_5",
"SOIL_TEMPERATURE_5", "WETNESS", "WET_FLAG", "WIND_1_5",
"WIND_FLAG")
# Clean data frame
precip <- precip %>%
# Split local time string and convert to decimal time
dplyr::mutate(UTC_TIME = gsub("(..)(..)", "\\1:\\2:00", UTC_TIME),
cleanTime_UTC =
strsplit(UTC_TIME, ":") %>%
sapply(function(x){
x <- as.numeric(x)
x[1] + x[2]/60 + x[3]/(60*60)
}),
decimalTime_UTC = floor(cleanTime_UTC * 2)/2) %>%
dplyr::mutate(LST_TIME = gsub("(..)(..)", "\\1:\\2:00", LST_TIME),
cleanTime_LST =
strsplit(LST_TIME, ":") %>%
sapply(function(x){
x <- as.numeric(x)
x[1] + x[2]/60 + x[3]/(60*60)
}),
decimalTime_LST = floor(cleanTime_LST * 2)/2) %>%
# select only columns used for precipitation and time stamp
dplyr::select(UTC_DATE, UTC_TIME, LST_DATE, LST_TIME,
cleanTime_UTC, decimalTime_UTC,
cleanTime_LST, decimalTime_LST, PRECIPITATION) %>%
# set NAs from -9999
dplyr::mutate_all(list(~na_if(., -9999))) %>%
# sum all precip events in each 1/2 period
dplyr::group_by(UTC_DATE, decimalTime_UTC) %>%
dplyr::mutate(PRECIP_TOT = sum(PRECIPITATION)) %>%
# remove extra time steps
dplyr::select(-PRECIPITATION, -LST_TIME, -UTC_TIME,
-cleanTime_UTC, -cleanTime_LST) %>%
unique() %>%
# create 1/2-hourly time stamps
dplyr::mutate(UTC_DATE = as.Date(UTC_DATE, format = "%Y%m%d"),
timestamp_UTC = as.POSIXct(paste0(UTC_DATE," 00:00:00"),
tz = "UTC") + 3600*decimalTime_UTC) %>%
dplyr::mutate(LST_DATE = as.Date(LST_DATE, format = "%Y%m%d"),
timestamp_LST = as.POSIXct(paste0(LST_DATE," 00:00:00"),
tz = "MST") + 3600*decimalTime_LST)
return(precip)
}
# Function for downloading radiation data from Ameriflux
download_amflx <- function(dest_dir, username,
site = "US-NR1", DescriptionOfDataUse,
DoNotOverwrite = TRUE,
verbose = FALSE) {
# This function downloads radiation data from the Ameriflux webiste
# It returns a list of the files it tried to download. By default it will
# not download files that are already in the destination directory.
# Arguments:
# dest_dir -------------- the destination directory where the files will be
# downloaded
# username -------------- the Ameriflux username of the user - this function
# will fail without a valid username.
# site ------------------ the Ameriflux site to get the data from; defaults to
# US-NR1
# DescriptionOfDataUse --- the description to provide to Ameriflux for the intended
# use of the data. If not provided by the user, the
# description will read:
#
# These data will be used as atmospheric forcings
# to run a local point-simulation for the alpine
# tundra at the Niwot Ridge LTER site.
#
# DoNotOverwrite --------- should existing files with the same name be overwritten?
# If TRUE, files will not be overwritten, if FALSE, files
# will be overwritten.
# verbose ---------------- Should the communication with the website be verbose?
# default is FALSE.
require(httr)
require(jsonlite)
require(RCurl)
# Testing
# site <- "US-NR1"
# username <- amf_usr
# dest_dir <- "~/Downloads/lter_flux/rad2"
writeLines("Connecting with Ameriflux endpoint...")
# NOTE THIS ENDPOINT MAY CHANGE
ameriflux_endpoint <- "https://ameriflux-data.lbl.gov/AmeriFlux/DataDownload.svc/datafileURLs"
if (missing(DescriptionOfDataUse)) {
DescriptionOfDataUse = "These data will be used as atmospheric forcings to run a local point-simulation for the alpine tundra at the Niwot Ridge LTER site."
}
# Construct Payload request for ameriflux endpoint
Payload <- paste0('{',
'"username":"', username, '",',
'"siteList":["', site, '"],',
'"intendedUse": "Research - Land model/Earth system model",',
'"description": "', DescriptionOfDataUse, '"',
'}')
# Get download information from Ameriflux endpoint
if (verbose) {
tmp <- httr::POST(url = ameriflux_endpoint,
body = Payload, verbose(), content_type_json())
} else {
tmp <- httr::POST(url = ameriflux_endpoint,
body = Payload, content_type_json())
}
# Check that the connection was successful
if (tmp$status_code < 200 | tmp$status_code > 299) {
stop(paste0("Attempt to connect to the website was not successful.\n",
"This may be because Ameriflux has changed its endpoint url \n",
"and you may need to contact Ameriflux support for an updated \n",
"address, or it may be due to a mistake in the request payload \n",
"syntax. Please check that the Ameriflux endpoing url and the \n",
"payload syntax are valid. \n\n",
"Current endpoint: ", ameriflux_endpoint, "\n",
"Current payload: ", Payload))
} else {
writeLines("Connection to Ameriflux successful.")
}
# extract content from the response
r <- content(tmp)
# Check if the content is successfully received
if (class(r) == "raw" | length(r$dataURLsList) == 0) {
stop(paste0("No data was received from Ameriflux. Please check that your ",
"username is valid and that both it and the site name are ",
"spelled correctly."))
}
# Extract list of ftp urls
url_list <- unlist(lapply(1:length(r$dataURLsList),
function(x){r$dataURLsList[[x]]$URL}))
file_list <- vector(mode = "list",
length = length(url_list))
# Notify user of the data policy prior to download
message(paste0("Thank you for using Ameriflux data. Please be aware of the data \n",
"policy. By downloading this data you are acknowledging that you \n",
"have read and agree to that policy. \n\n",
"The following is how you described how you intend to use the data.\n\n",
"\tIntended Use: Research - Land model/Earth system model \n",
"\tDescription: These data will be used as atmospheric forcings \n",
"\tto run a local point-simulation for the alpine tundra at the \n",
"\tNiwot Ridge LTER site)\n\n",
"By downloading the data, the data contributors have been informed \n",
"of your use. If you are planning an in-depth analysis that may \n",
"result in a publication, please contact the data contributors \n",
"directly so they have the opportunity to contribute substantially \n",
"and become a co-author. \n\n",
"The contact email for this site is: ",
unlist(r$manifest$emailForSitePIs), "\n\n",
"You should also acknowledge Ameriflux in your presentations and \n",
"publications. Details about how this should be done can be found \n",
"on the Ameriflux website. \n\n",
"The full policy along with details about how to properly cite the \n",
"data can found here: \n",
"https://ameriflux.lbl.gov/data/data-policy/"))
# make dest_dir if it doesn't exist
made_dir <- ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir), recursive = TRUE), FALSE)
writeLines("Downloading data...")
if(!made_dir) {
writeLines("Data download directory not created, it already exists.")
}
# Check if downloaded files already exist and if not, download file
for (i in seq_along(url_list)) {
# Check if destination file already exists
dest_fp <- paste0(dest_dir, "/", basename(url_list[[i]]))
file_list[[i]] <- dest_fp
if (file.exists(dest_fp) & DoNotOverwrite == TRUE) {
writeLines(paste0(dest_fp, " already exits, skipping..."))
} else { # if file doesn't exist or if overwrite is TRUE, download
# try(download.file(url = url_list[[i]],
# destfile = dest_fp,
# method = "curl"))
try(GET(url = url_list[[i]],
write_disk(dest_fp, overwrite=FALSE), progress(), verbose()))
}
}
return(unlist(file_list))
}
##############################################################################
# Read in L1 flux tower data product
##############################################################################
# Read in East & West tower
if (tower == "East" | tower == "Both") {
# East data
tvan_east <- read.table(file = east_data_fp, sep = "\t",
skip = 2, header = FALSE)
tvan_east_names <- read.table(file = east_data_fp, sep = "\t",
header = TRUE, nrows = 1)
tvan_east_units <- as.character(unname(unlist(tvan_east_names[1,])))
colnames(tvan_east) <- names(tvan_east_names)
}
if (tower == "West" | tower == "Both") {
# West data
tvan_west <- read.csv(file = west_data_fp, sep = "\t",
skip = 2, header = FALSE)
tvan_west_names <- read.table(file = west_data_fp, sep = "\t",
header = TRUE, nrows = 1)
tvan_west_units <- as.character(unname(unlist(tvan_west_names[1,])))
colnames(tvan_west) <- names(tvan_west_names)
}
# Get the start and end dates of the tvan data. If tower = "Both",
# combine East and West data into one dataframe for convenience
if (tower == "Both") {
tvan_east$Tower <- "East"
tvan_west$Tower <- "West"
tvan_all <- bind_rows(tvan_east, tvan_west) %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour) %>%
group_by(Tower, Year, DoY) %>%
mutate_at(vars(NEE:Ustar), list(daily_mean = mean), na.rm = TRUE) %>%
select(date, timestamp, Year, DoY, Hour, Tower, everything())
# Set a start/end date for the precip and radiation data based on the tvan data
# make sure it's a round number or rEddyProc will complain
start_date <- ceiling_date(min(tvan_all$timestamp, na.rm = TRUE), unit = "day")
end_date <- floor_date(max(tvan_all$timestamp, na.rm = TRUE), unit = "day")
} else if (tower == "East") {
tvan_east$Tower <- "East"
# Set a start/end date for the precip and radiation data based on the tvan data
start_date <- min(tvan_east$timestamp, na.rm = TRUE)
end_date <- max(tvan_east$timestamp, na.rm = TRUE)
} else if (tower == "West") {
tvan_west$Tower <- "West"
# Set a start/end date for the precip and radiation data based on the tvan data
start_date <- min(tvan_west$timestamp, na.rm = TRUE)
end_date <- max(tvan_west$timestamp, na.rm = TRUE)
}
# Create a timeseries dataframe with the timestamps (this is in MST since start_date
# and end_date are in MST):
posix_complete <- as.data.frame(seq.POSIXt(start_date, end_date, by = "30 mins"))
colnames(posix_complete) <- "timestamp"
# get rid of first timestep, which is at midnight and not 00:30:00; it makes rEddyProc complain
posix_complete <- data.frame(timestamp = posix_complete[-1,])
##############################################################################
# Download Precipitation
##############################################################################
# Download precip data
# From here: https://portal.edirepository.org/nis/mapbrowse?packageid=knb-lter-nwt.416.10
writeLines("Downloading Saddle Precip data from EDI...")
saddle_precip_data_fp <- download_EDI(edi_id = saddle_precip_data,
dest_dir = paste0(DirDnld, "/precip_data"),
getNewData = getNewData)
writeLines("Downloading C1 precipitation data from USCRN...")
USCRN_precip_data_fp <- download_USCRN(start_date = start_date,
end_date = end_date,
dest_dir = paste0(DirDnld, "/precip_data"),
DoNotOverwrite = TRUE)
##############################################################################
# Handling Precip data
##############################################################################
# Saddle precip data must be corrected for blowing snow events, and extended to
# half-hourly precip using Will's formula (see below for details).
writeLines("Reading in Saddle data...")
# Read in Saddle and USCRN Precip data; also collapse USCRN data into one dataframe
saddle_precip <- read.csv(saddle_precip_data_fp,
sep = ",", quot = '"', check.names = TRUE)
writeLines("Reading in C1 precipitation data from USCRN. This may take a while.")
USCRN_precip_list <- lapply(USCRN_precip_data_fp, read_USCRN_precip_data)
USCRN_precip <- plyr::rbind.fill(USCRN_precip_list) %>%
unique() # make sure to remove duplicates caused by aggregating to 30-minute time steps
# Check for duplicated time stamps - should be 0 (aka no TRUEs)
if (sum(duplicated(USCRN_precip$timestamp_UTC)) > 0) {
warning("USCRN precipitation data still contains ",
sum(duplicated(USCRN_precip$timestamp_UTC)),
" duplicates!")
} else {
writeLines(paste0("USCRN precipitation data has been loaded. ",
sum(duplicated(USCRN_precip$timestamp_UTC)),
" duplicated timestamps have been detected."))
}
# Filter the precip data by exact start and end dates
saddle_precip <- saddle_precip %>%
mutate(date = as.Date(date)) %>%
filter(date >= floor_date(start_date, unit = "day") &
date <= ceiling_date(end_date, unit = "day"))
USCRN_precip <- USCRN_precip %>%
rename(date = LST_DATE) %>%
mutate(timestamp_LST = as.POSIXct(timestamp_LST, tz = "MST")) %>%
filter(timestamp_LST >= floor_date(start_date, unit = "day") &
timestamp_LST <= ceiling_date(end_date, unit = "day"))
# Apply blowing snow correction to months of Oct-May Saddle data
# Due to blowing snow events where the belfort gauge has an oversampling of precipitation,
# it is recommended to add a correction for the precipitation total in the months Oct-May.
# The recommended correction for these events should be (0.39 * the recorded total). More
# information on this can be found in:
# Williams, M.W., Bardsley, T., Rikkers, M., (1998) Overestimation of snow depth and inorganic nitrogen wetfall using NADP data, Niwot Ridge, Colorado. Atmospheric Environment 32 (22) :3827-3833
writeLines("Applying blowing snow correction to Saddle precip data.")
saddle_precip <- saddle_precip %>%
mutate(month = month(date),
ppt_tot_corr = ifelse(month %in% c(10, 11, 12, 1, 2, 3, 4, 5),
ppt_tot * 0.39, ppt_tot))
# Change any Nas or NaNs to zero
saddle_precip <- saddle_precip %>%
mutate(ppt_tot_corr = ifelse(is.na(ppt_tot_corr), 0, ppt_tot_corr))
USCRN_precip <- USCRN_precip %>%
mutate(PRECIP_TOT = ifelse(is.na(PRECIP_TOT), 0, PRECIP_TOT))
# Apply Will's algorithm for Precip data from paper:
# Use half-hourly precipitation recordfrom the U.S. Climate Reference Network (USCRN; data from https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/;), measured nearby (4 km) at the lower elevation(3050 m asl) C-1 site. Proportioanlly allocate the daily saddle precip measurements to the half-hourly precip record from USCRN. On days when Saddle record reports measurable precip, but the USCRN does not, distribute the daily saddle precip evenly across the day for model simulations.
# Code modified from his TVAN_daily_ppt.R script
writeLines(paste0("Applying Will Wieder's algorithm for allocating daily Saddle ",
"precipitation totals into 30-minute increments."))
Tvan_ppt <- saddle_precip$ppt_tot_corr
CRNS_ppt <- USCRN_precip$PRECIP_TOT
CRNS_date <- USCRN_precip$date
CRNS_mo <- month(USCRN_precip$date)
CRNS_hour <- USCRN_precip$decimalTime
CRNS_d <- tapply(CRNS_ppt, CRNS_date, sum) # daily precip totals
CRNS_day <- tapply(CRNS_date, CRNS_date, mean) # num of days since 1970-01-01 - see date.mean()
CRNS_month <- tapply(CRNS_mo, CRNS_date, mean) # months
#------------------------------------------------------
# distribute Tvan ppt when observed in half-hourly CRNS
#------------------------------------------------------
ndays <- length(Tvan_ppt)
nsteps <- length(CRNS_ppt)
Tvan_fine <- rep(NA, nsteps)
Tvan_note <- rep(NA, nsteps)
Tvan_flag <- rep(NA, ndays)
Tvan_flag_mo <- rep(NA, ndays)
Tvan_date <- USCRN_precip$date # MST date
Tvan_hour <- USCRN_precip$decimalTime_LST # MST hour
start <- 1
# code below does the following:
# (0) if no daily precip at Tvan, add zeros to half hourly results
# (1) if precip at Tvan, but not recorded @ CRNS, distribute evenly in day and add 1 the flag
# (2) if both precip at Tvan and CRNS, distribute Tvan in same proportion as CRNS
for (d in 1:ndays) {
end <- start + 47
if (Tvan_ppt[d] == 0) {
Tvan_fine[start:end] <- 0
Tvan_note[start:end] <- 0
} else if (CRNS_d[d] == 0){
Tvan_fine[start:end] <- Tvan_ppt[d] / 48
Tvan_note[start:end] <- 1
Tvan_flag[d] <- 1
Tvan_flag_mo[d] <- CRNS_month[d]
} else {
temp_frac <- CRNS_ppt[start:end] / CRNS_d[d]
Tvan_fine[start:end] <- Tvan_ppt[d] * temp_frac
Tvan_note[start:end] <- 2
}
if (round(sum(Tvan_fine[start:end], na.rm = TRUE), digits = 7) !=
round(sum(Tvan_ppt[d], na.rm = TRUE), digits = 7)) {
warning(paste0("Running precip totals don't match at day ", d))
}
start <- end + 1
}
# Check that the total precip that fell at the saddle is the same as the total precip
# when allocated over 30-minute time steps
if (sum(Tvan_fine, na.rm=T) == sum(Tvan_ppt)) {
writeLines(paste0("Total precip that fell at the Saddle (", sum(Tvan_ppt),
") matches the amount of total precip that has been ",
"allocated to the for the tvan data (", sum(Tvan_fine, na.rm=T), ")."))
} else {
warning(paste0("Total precip that fell at the Saddle (", sum(Tvan_ppt),
") does NOT match the amount of total precip that has been ",
"allocated to the for the tvan data (", sum(Tvan_fine, na.rm=T), ")!"))
}
writeLines(paste0("Number of total days = ",ndays, " [", ddays(ndays), "]"))
writeLines(paste0("Number of days w/ precip at Tvan = ",
length(Tvan_ppt[Tvan_ppt > 0])))
writeLines(paste0("Number of days with Tvan precip but w/o recorded CRNS precip = ",
sum(Tvan_flag, na.rm = T)))
hist(Tvan_flag_mo, xlim = c(1,12),
main = paste0("Montly frequency of days with Tvan precip but ",
"w/o recorded CRNS precip"),
xlab = "Months"
)
# Convert precip from mm/30 minutes into mm/s
Precip = Tvan_fine[1:nsteps] # mm every 30 minutes
PRECTmms <- Precip / (30*60) # mm/s
# Combine date and 1/2-hourly precip into one dataframe and add a timestamp
hlf_hr_precip <- data.frame(PRECTmms = PRECTmms, # mm/s
MST_HOUR = Tvan_hour[1:nsteps], # decimal hours
MST_DATE = Tvan_date[1:nsteps]) %>% # date
mutate(timestamp = as.POSIXct(paste0(MST_DATE," 00:00:00"), tz = "MST") +
3600*MST_HOUR) %>%
# fix date so that "0" hour readings are converted into 24
mutate(MST_DATE = if_else(MST_HOUR == 0, MST_DATE - 1, MST_DATE),
MST_HOUR = if_else(MST_HOUR == 0.0, 24, MST_HOUR))
##############################################################################
# Download Radiation data
##############################################################################
writeLines("Downloading Ameriflux radiation data...")
rad_data_fp <- download_amflx(dest_dir = paste0(DirDnld, "/rad_data"),
username = amf_usr, verbose = TRUE)
# Check if the files have already been unzipped, if not, unzip the zip file
for (i in seq_along(rad_data_fp)) {
if (grepl(".zip", basename(rad_data_fp[i]))) {
writeLines(paste0("Unzipping ", rad_data_fp[i]))
# check if the unzipped files exist
unzip_list <- unzip(zipfile = rad_data_fp[i],
exdir = dirname(rad_data_fp[i]),
overwrite = FALSE)
}
}
amf_data_fp <- list.files(dirname(rad_data_fp[i]),
full.names = TRUE,
pattern = "*.csv")
##############################################################################
# Handle Radiation data
##############################################################################
# Note: Radiation data comes from the Ameriflux NR-1 site. Currently this
# data cannot be downloaded automatically and has to be downloaded by hand from
# the Ameriflux site after getting a user account: https://ameriflux.lbl.gov/data/download-data/
# For CLM we will pull out incoming shortwave (necessary) and incoming longwave (optional).
# The net radation is provided by the Tvan tower datasets.
# The possible Ameriflux variables are:
# NETRAD_1_1_2 (W m-2): Net radiation (no QA/QC or gapfilling)
# NETRAD_PI_F_1_1_2 (W m-2): Net radiation (gapfilled by tower team)
# SW_IN_1_1_1 (W m-2): Shortwave radiation, incoming (no QA/QC or gapfilling)
# LW_IN_1_1_1 (W m-2): Longwave radiation, incoming (no QA/QC or gapfilling)
# SW_IN_PI_F_1_1_1 (W m-2): Shortwave radiation, incoming (gapfilled by tower team)
# LW_IN_PI_F_1_1_1 (W m-2): Longwave radiation, incoming (gapfilled by tower team)
# SW_OUT_1_1_1 (W m-2): Shortwave radiation, outgoing (no QA/QC or gapfilling)
# LW_OUT_1_1_1 (W m-2): Longwave radiation, outgoing (no QA/QC or gapfilling)
# SW_OUT_PI_F_1_1_1 (W m-2): Shortwave radiation, outgoing (gapfilled by tower team)
# LW_OUT_PI_F_1_1_1 (W m-2): Longwave radiation, outgoing (gapfilled by tower team)
writeLines("Reading in Ameriflux radiation data...")
# Load in Radiation data:
amf_data <- read.csv(file = amf_data_fp[2],
skip = 2,
header = TRUE,
na.strings = "-9999",
as.is = TRUE)
# Select timestamps, and radiation variables
rad_data <- amf_data[,c("TIMESTAMP_START", "TIMESTAMP_END",
"SW_IN_1_1_1", # also sometimes called Rg
"LW_IN_1_1_1", # also sometimes called FLDS
"SW_IN_PI_F_1_1_1", # also sometimes called Rg
"LW_IN_PI_F_1_1_1", # also sometimes called FLDS
"SW_OUT_1_1_1",
"LW_OUT_1_1_1",
"SW_OUT_PI_F_1_1_1",
"LW_OUT_PI_F_1_1_1",
"NETRAD_1_1_2",
"NETRAD_PI_F_1_1_2")]
rad_data$TIMESTAMP_START <- as.POSIXct(as.character(rad_data$TIMESTAMP_START), format = "%Y%m%d%H%M%OS", tz = "MST")
rad_data$TIMESTAMP_END <- as.POSIXct(as.character(rad_data$TIMESTAMP_END), format = "%Y%m%d%H%M%OS", tz = "MST")
# Subset the radiation data to the Tvan time period, reformat the times to get hours
# and dates, finally, select only the radiation, hour, and date variables.
hlf_hr_rad <- rad_data %>%
mutate(date = lubridate::date(TIMESTAMP_END)) %>%
filter(date >= floor_date(start_date, unit = "day") &
date <= floor_date(end_date, unit = "day")) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(MST_HOUR = lubridate::hour(TIMESTAMP_END) +
lubridate::minute(TIMESTAMP_END)/60,
MST_DATE = lubridate::date(TIMESTAMP_START)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(MST_HOUR = if_else(MST_HOUR == 0.0, 24, MST_HOUR)) %>%
# Calculate net radiation from in/out radiation
mutate(radNet = (SW_IN_PI_F_1_1_1 - SW_OUT_PI_F_1_1_1) +
(LW_IN_PI_F_1_1_1 - LW_OUT_PI_F_1_1_1)) %>%
rename(Rg_usnr1 = SW_IN_PI_F_1_1_1, FLDS = LW_IN_PI_F_1_1_1,
SW_OUT = SW_OUT_PI_F_1_1_1, LW_OUT = LW_OUT_PI_F_1_1_1,
timestamp = TIMESTAMP_END) %>%
select(timestamp, MST_DATE, MST_HOUR, Rg_usnr1, FLDS, radNet)
##############################################################################
# Combine flux and met data
##############################################################################
if (tower == "East" | tower == "Both") {
# East tower
tvan_east_tms <- tvan_east %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour)
}
if (tower == "West" | tower == "Both") {
# West tower
tvan_west_tms <- tvan_west %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour)
}
# Join the flux data to the posix_complete date sequence
if (tower == "Both") {
tmp_east <- left_join(posix_complete, tvan_east_tms, by = "timestamp") %>%
mutate(Tower = "East")
tmp_west <- left_join(posix_complete, tvan_west_tms, by = "timestamp") %>%
mutate(Tower = "West")
tvan_comb_tms <- bind_rows(tmp_east, tmp_west)
tvan_tms <- tvan_comb_tms %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
} else if (tower == "West") {
tmp_west <- left_join(posix_complete, tvan_west_tms, by = "timestamp") %>%
mutate(Tower = "West")
tvan_tms <- tmp_west %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
} else {
tmp_east <- left_join(posix_complete, tvan_east_tms, by = "timestamp") %>%
mutate(Tower = "East")
tvan_tms <- tmp_east %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
}
writeLines("Combining precipitation, radiation, and Tvan data.")
# Combine dataframes by date and time
dataDf <- tvan_tms %>%
left_join(hlf_hr_precip, by = c("Hour" = "MST_HOUR", "date" = "MST_DATE",
"timestamp" = "timestamp")) %>%
left_join(hlf_hr_rad, by = c("Hour" = "MST_HOUR", "date" = "MST_DATE",
"timestamp" = "timestamp")) %>%
select(timestamp, date, Year, DoY, Hour, Tower, everything())
# Renaming of variables:
# FLDS - incident longwave (FLDS) (W/m^2)
# FSDS - incident shortwave (FSDS, or Rg) (W/m^2) # Check that these are the same as SW_IN/LW_IN
# PRECTmms - precipitation (PRECTmms = PRECTmms) (mm/s)
# PSRF - pressure at the lowest atmospheric level (PSRF = P) (kPa)
# RH - relative humidity at lowest atm level (RH = rH) (%)
# TBOT - temperature at lowest atm level (TBOT = Tair) (K)
# WIND - wind at lowest atm level (WIND = U) (m/s)
# NEE - net ecosystem exchange (NEE = NEE) (umolm-2s-1)
# FSH - sensible heat flux (FSH = H) (Wm-2)
# EFLX_LH_TOT - latent heat flux (EFLX_LH_TOT = LE) (Wm-2)
# GPP - gross primary productivity (GPP) (umolm-2s-1)
# Rnet - net radiation (Rnet = Rn) (W/m^2)
##############################################################################
# Plot the un-gapfilled data
##############################################################################
if (makeplots == TRUE) {
# needs ggplot and dplyr/tidyr
# change data to longform
# Necessary for model:
# tbot, wind, rh, PSRF, FLDS, FSDS, PRECTmms
getgaplength <- function(gap, y = "notgap") {
res <- rle(gap == y)
res_vec <- rep(res$values*res$lengths,res$lengths)
return(res_vec)
}
# Find the minimum and maximum time stamps at which all required forcing variables have values
min_gap_days <- 1 # how many days does a gap have to be at minimum to be plotted
dataClm.forc.gaps <- dataDf %>%
rename(TIMESTAMP = timestamp, EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg_usnr1) %>%
mutate_at(vars(TBOT, WIND, RH, PSRF, FLDS, FSDS, PRECTmms), list(gap = is.na)) %>%
mutate(gap = TBOT_gap | WIND_gap | RH_gap | PSRF_gap | FLDS_gap | FSDS_gap |
PRECTmms_gap) %>%
group_by(Tower) %>%
mutate(gap = ifelse(gap == FALSE, "notgap", "gap"),
ncontiguousgaps = getgaplength(gap, "gap")) %>%
filter(gap == "gap") %>%
select(TIMESTAMP, gap, ncontiguousgaps, Tower) %>%
mutate(ndays = ncontiguousgaps/48,
ncontiguousgaps = as.factor(ncontiguousgaps)) %>%
group_by(Tower, ndays) %>%
summarize(min = min(TIMESTAMP, na.rm = TRUE),
max = max(TIMESTAMP, na.rm = TRUE)) %>%
arrange(desc(ndays)) %>%
mutate(ndays = as.factor(round(ndays, digits = 2))) %>%
mutate(yr1 = year(min),
yr2 = year(max)) %>%
rowwise() %>%
mutate(years = paste0(seq(yr1, yr2), collapse = " | ")) %>%
select(-yr1, -yr2)
# Plot the required forcing variables
dataClm.forc.plot <- dataDf %>%
rename(TIMESTAMP = timestamp, EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg_usnr1) %>%
tidyr::pivot_longer(cols = !matches(c("TIMESTAMP", "date", "Year", "DoY", "Hour",
"Tower")),
names_to = "variable",
values_to = "value") %>%
filter(variable %in% c("TBOT", "WIND", "RH", "PSRF", "FLDS", "FSDS", "PRECTmms"))
plot_gaps <- function(forcings, gaps,
filteryears = NA,
tower = NA,
min_gap_days = 1,
highlightgaps = FALSE,
verbose = FALSE) {
# if filteryear and tower are NA all years and both towers are plotted.
# filteryear takes values of either NA or a vector of character strings
# of years to plot
# if highlightgaps == TRUE, gaps will be highlighted on plot
# min_gaps_days is the minimum length in days of gaps to highlight
forcings.plot <- forcings
gaps.plot <- gaps %>%
filter(as.numeric(as.character(ndays)) >= min_gap_days)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Both towers \n",
"Years: all")
#### Filter forcing and gap datasets based on settings ####
# create a custom title
if (any(!is.na(filteryears)) & !is.na(tower)) { # filter towers and years
forcings.plot <- forcings %>%
filter(Year %in% filteryears) %>%
filter(Tower == tower) %>%
# the following variables are the same in both towers
filter(!(variable %in% c("FLDS", "FSDS", "PRECTmms")))
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years)) %>%
filter(Tower == tower)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Tower: ", tower, "\n",
"Years: ", paste0(filteryears, collapse = ", "))
} else if (any(!is.na(filteryears))) { # filter only by years
forcings.plot <- forcings %>%
filter(Year %in% filteryears)
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years))
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Both towers \n",
"Years: ", paste0(filteryears, collapse = ", "))
} else if (!is.na(tower)) { # filter only by tower
forcings.plot <- forcings %>%
filter(Tower == tower) %>%
# the following variables are the same in both towers
filter(!(variable %in% c("FLDS", "FSDS", "PRECTmms")))
gaps.plot <- gaps.plot %>%
filter(Tower == tower)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Tower: ", tower, "\n",
"Years: all")
}
# Tell the user what's happening
writeLines(paste0("Plotting from ", min(forcings.plot$Year, na.rm = TRUE), " to ",
max(forcings.plot$Year, na.rm = TRUE)))
if (verbose) {
if (!is.na(tower)) {
writeLines(paste0("Tower is ", tower))
} else {
writeLines(paste0("Plotting both towers"))
}
if (highlightgaps) {
writeLines("Gaps will be highlighted")
writeLines("Note: if a gap exeeds the boundary year, the x-axis will be",
"modified so the entire gap is shown but points for that period ",
"will not be plotted.")
} else {
writeLines("Gaps will not be highlighted")
}
}
#### Plot the data ####
forcing_gaps.plot <- ggplot(forcings.plot) +
geom_point(aes(x = TIMESTAMP, y = value, color = Tower), alpha = 0.05) +
facet_wrap(~variable, scales = "free_y", ncol = 1) +
scale_color_discrete(name = "Tower") +
guides(color = guide_legend(override.aes = list(alpha = 1),
title.position = "top")) +
theme(legend.position = "bottom") +
ggtitle(title)
# Highlight gaps on graphs
if (highlightgaps) {
forcing_gaps.plot <- forcing_gaps.plot +
geom_rect(data = gaps.plot,
aes(xmin = min,
xmax = max,
ymin = -Inf,
ymax = Inf,
fill = Tower), alpha = 0.3) +
geom_vline(aes(xintercept = min), data = gaps.plot) +
geom_vline(aes(xintercept = max), data = gaps.plot) +
theme(legend.position = "bottom") +
scale_fill_discrete(name = paste0("Gaps >", min_gap_days, " days")) +
guides(fill = guide_legend(title.position = "top"))
}
return(forcing_gaps.plot)
}
plot_years <- c(min(dataClm.forc.plot$Year, na.rm = TRUE):max(dataClm.forc.plot$Year, na.rm = TRUE))
plot_years <- set_names(plot_years)
plot_years <- map(plot_years,
~plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = .x,
tower = NA,
min_gap_days = 7))
plot_all_years <- plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = FALSE,
filteryears = NA,
tower = NA,
min_gap_days = 7)
writeLines("Saving plots - this may take a while...")
iwalk(plot_years, ~{
suppressWarnings(
ggsave(plot = .x,
filename = paste0(plots_dir,"/","yearly_gap_plots_",
.y, '.png'),
width = 10,
height = 5*7,
dpi = 150)
)
})
forc.plot.out.name <- paste0(plots_dir,"/","all_years_gap_plots.png")
ggsave(plot = plot_all_years,
filename = forc.plot.out.name,
width = 10,
height = 5*7,
dpi = 150)
}
plots_dir
##############################################################################
# Gap-fill West tower with East tower
##############################################################################
if (tower == "Both") {
writeLines(paste0("Gap-filling ", basetower," tower data with data from the",
" other tower"))
dataDf.wide <- dataDf %>%
select(all_of(c("timestamp", "date", "Year", "DoY", "Hour", "Tower",
"NEE", "LE", "H", "Ustar", "Tair", "VPD", "rH", "U",
"P", "Rg_usnr1", "PRECTmms", "FLDS", "radNet", "Tsoil"))) %>%
rename(Rg = Rg_usnr1) %>%
mutate(BaseTower = ifelse(Tower == basetower, "base", "fill")) %>%
# select(TIMESTAMP, date, Year, DoY, Hour, Tower, EFLX_LH_TOT, FSH,
# TBOT, RH, WIND, PSRF, FSDS, FLDS, PRECTmms) %>%
# for choice
select(-Tower) %>%
pivot_wider(names_from = BaseTower,
values_from = c("NEE", "LE", "H", "Ustar", "Tair", "VPD",
"rH", "U", "P", "Rg", "PRECTmms",
"FLDS", "radNet", "Tsoil")) %>%
# pivot_wider(names_from = Tower,
# values_from = c("NEE", "LE", "H", "Ustar", "Tair", "VPD",
# "rH", "U", "P", "Rg", "PRECTmms",
# "FLDS", "radNet", "Tsoil")) %>%
select(!ends_with("_NA"))
writeLines("Checking to make sure that tower timesteps line up correctly.")
# convert posix_complete to UTC; then remove leap days
#posix_complete$timestamp <- with_tz(posix_complete$timestamp, "UTC")
# posix_complete_noleap <- posix_complete$timestamp[!grepl(".{4}-02-29", posix_complete$timestamp)]
if (any(!(posix_complete$timestamp == dataDf.wide$timestamp))) {
warning(paste0("At least one timestamp value is missing or out of bounds."))
} else {
writeLines(paste0("Timestamps are all present and line up correctly ",
"between \ntowers.",
"\nThere are ", nrow(dataDf.wide),
" timestamps in total which is \n",
ddays(nrow(dataDf.wide)/48)))
}
#### Gap-fill "base" tower with "fill" tower data ####
# we will create a flag variable to show which values were substituted
# s = base tower was gapfilled with fill tower data
# m = missing in both tower datasets
# n = not missing; original west tower value was used
gap_filled_from_twr <- dataDf.wide %>%
mutate(
# LH (Latent heat flux)
LE = ifelse(is.na(LE_base),
LE_fill,
LE_base),
LE_flag = ifelse(is.na(LE_base) & is.na(LE_fill),
"m",
ifelse(is.na(LE_base) & !is.na(LE_fill),
"s", "n")),
# H (sensible heat flux)
H = ifelse(is.na(H_base),
H_fill,
H_base),
H_flag = ifelse(is.na(H_base) & is.na(H_fill),
"m",
ifelse(is.na(H_base) & !is.na(H_fill),
"s", "n")),
# Air Temperature (TBOT)
Tair = ifelse(is.na(Tair_base),
Tair_fill,
Tair_base),
Tair_flag = ifelse(is.na(Tair_base) & is.na(Tair_fill),
"m",
ifelse(is.na(Tair_base) & !is.na(Tair_fill),
"s", "n")),
# Relative humidity (rH)
rH = ifelse(is.na(rH_base),
rH_fill,
rH_base),
rH_flag = ifelse(is.na(rH_base) & is.na(rH_fill),
"m",
ifelse(is.na(rH_base) & !is.na(rH_fill),
"s", "n")),
# Wind speed (U)
U = ifelse(is.na(U_base),
U_fill,
U_base),
U_flag = ifelse(is.na(U_base) & is.na(U_fill),
"m",
ifelse(is.na(U_base) & !is.na(U_fill),
"s", "n")),
# Atmospheric pressure (P)
P = ifelse(is.na(P_base),
P_fill,
P_base),
P_flag = ifelse(is.na(P_base) & is.na(P_fill),
"m",
ifelse(is.na(P_base) & !is.na(P_fill),
"s", "n")),
# Incident shortwave radiation (Rg_usnr1)
Rg = ifelse(is.na(Rg_base),
Rg_fill,
Rg_base),
Rg_flag = ifelse(is.na(Rg_base) & is.na(Rg_fill),
"m",
ifelse(is.na(Rg_base) & !is.na(Rg_fill),
"s", "n")),
# Incident longwave radiation (FLDS) <- CHECK WITH WILL ON THIS ONE
FLDS = ifelse(is.na(FLDS_base),
FLDS_fill,
FLDS_base),
FLDS_flag = ifelse(is.na(FLDS_base) & is.na(FLDS_fill),
"m",
ifelse(is.na(FLDS_base) & !is.na(FLDS_fill),
"s", "n")),
# Precipitation (PRECTmms)
PRECTmms = ifelse(is.na(PRECTmms_base),
PRECTmms_fill,
PRECTmms_base),
PRECTmms_flag = ifelse(is.na(PRECTmms_base) & is.na(PRECTmms_fill),
"m",
ifelse(is.na(PRECTmms_base) & !is.na(PRECTmms_fill),
"s", "n")),
# Net Ecosystem Excahange (NEE)
NEE = ifelse(is.na(NEE_base),
NEE_fill,
NEE_base),
NEE_flag = ifelse(is.na(NEE_base) & is.na(NEE_fill),
"m",
ifelse(is.na(NEE_base) & !is.na(NEE_fill),
"s", "n")),
# Ustar friction velocity (Ustar)
Ustar = ifelse(is.na(Ustar_base),
Ustar_fill,
Ustar_base),
Ustar_flag = ifelse(is.na(Ustar_base) & is.na(Ustar_fill),
"m",
ifelse(is.na(Ustar_base) & !is.na(Ustar_fill),
"s", "n")),
# Net radiation (radNet)
radNet = ifelse(is.na(radNet_base),
radNet_fill,
radNet_base),
radNet_flag = ifelse(is.na(radNet_base) & is.na(radNet_fill),
"m",
ifelse(is.na(radNet_base) & !is.na(radNet_fill),
"s", "n")),
# Soil Temperature (Tsoil)
Tsoil = ifelse(is.na(Tsoil_base),
Tsoil_fill,
Tsoil_base),
Tsoil_flag = ifelse(is.na(Tsoil_base) & is.na(Tsoil_fill),
"m",
ifelse(is.na(Tsoil_base) & !is.na(Tsoil_fill),
"s", "n"))
)
#### Save Gap-filled outputs ####
writeLines("Tower gap-filling complete. Saving data with flags...")
dataDf <- gap_filled_from_twr %>%
select(!ends_with(c("base", "fill", "flag")))
dataDf_flag <- gap_filled_from_twr %>%
select(!ends_with(c("base", "fill")))
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
flagged_fp <- paste0(DirOut, "/", "tvan_forcing_data_flagged_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),".txt")
write(paste0("# Flags: \n",
"# Base tower is: ", basetower, "\n",
"# s = base tower was gapfilled with fill tower data \n",
"# m = missing in both tower datasets \n",
"# n = not missing; original west tower value was used"),
flagged_fp)
suppressWarnings(
write.table(dataDf_flag, flagged_fp,
sep = "\t", row.names = FALSE,
append = TRUE)
)
writeLines(paste0("Flagged data can be found here: ", flagged_fp))
}
##############################################################################
# Prepare file for ReddyProc
##############################################################################
# Change NA to -9999
dataDf[is.na(dataDf)] <- -9999
# #Convert time to ReddyProc format
# dataDf$Year <- lubridate::year(dataDf$TIMESTAMP)
# dataDf$DoY <- lubridate::yday(dataDf$TIMESTAMP)
# dataDf$Hour <- lubridate::hour(dataDf$TIMESTAMP) + lubridate::minute(dataDf$TIMESTAMP)/60
#
# Remove timestamp and date
dataDf$timestamp <- NULL
dataDf$date <- NULL
# FLDS - incident longwave (FLDS) (W/m^2)
# FSDS - incident shortwave (FSDS) (W/m^2) # Check that these are the same as SW_IN/LW_IN
# PRECTmms - precipitation (PRECTmms = PRECTmms) (mm/s)
# PSRF - pressure at the lowest atmospheric level (PSRF = P) (kPa) - CONVERT TO kPa
# RH - relative humidity at lowest atm level (RH = rH) (%)
# TBOT - temperature at lowest atm level (TBOT = Tair) (K)
# WIND - wind at lowest atm level (WIND = U) (m/s)
# NEE - net ecosystem exchange (NEE = NEE) (umolm-2s-1)
# FSH - sensible heat flux (FSH = H) (Wm-2)
# EFLX_LH_TOT - latent heat flux (EFLX_LH_TOT = LE) (Wm-2)
# GPP - gross primary productivity (GPP) (umolm-2s-1)
# Rnet - net radiation (Rnet = Rn/Rg) (W/m^2)
# Ustar - friction velocity
# Tsoil
#Vector of units for each variable
unitDf <- c("Year" = "--", "DoY" = "--", "Hour" = "--", "LE" = "Wm-2", "H" = "Wm-2",
"Tair" = "degC",
"rH" = "%", "U" = "ms-1", "P" = "kPa", "Rg" = "Wm-2", "FLDS" = "Wm-2",
"PRECTmms" = "mms-1",
"NEE" = "umolm-2s-1", "Ustar" = "ms-1", "radNet" = "Wm-2",
"Tsoil" = "degC")
#Set the output data column order based off of the units vector
dataDf <- data.table::setcolorder(dataDf, names(unitDf))
#Create filename
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
fileOut <- paste0(DirOut,"/","tvan_forcing_data_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),'.txt')
h1 <- paste(names(unitDf), collapse = "\t")
h2 <- paste(unitDf, collapse = "\t")
#Output data in ReddyProc format
conFile <- file(fileOut, "w")
#write the variable names header
writeLines(text = c(h1,h2), sep = "\n", con = conFile)
#write the variable units header
#writeLines(text = unitDf, sep = "\t", con = conFile)
#Write output in tab delimited format
write.table(x = dataDf, file = conFile, sep = "\t", row.names = FALSE, col.names = FALSE)
#Close file connection
close(conFile)
##############################################################################
# ReddyProc Gap-filling workflow
##############################################################################
EddyData.F <- fLoadTXTIntoDataframe(fileOut)
#Threshold bounds to prevent rH > 100%
EddyData.F$rH[EddyData.F$rH > 100] <- 100
#Threshold bounds to prevent Rg (FSDS) < 0
EddyData.F$Rg[EddyData.F$Rg < 0] <- 0
EddyData.F$Rg[EddyData.F$Rg > 1200 ] <- 1200
#Threshold bounds to prevent NEE > 100
EddyData.F$NEE[EddyData.F$NEE > 100] <- NA
#Threshold bounds to prevent NEE < -100
EddyData.F$NEE[EddyData.F$NEE < -100] <- NA
#+++ If not provided, calculate VPD from TBOT and RH
EddyData.F <- cbind(EddyData.F,VPD = fCalcVPDfromRHandTair(EddyData.F$rH,
EddyData.F$Tair))
#+++ Add time stamp in POSIX time format
EddyDataWithPosix.F <- fConvertTimeToPosix(EddyData.F, 'YDH', Year = 'Year',
Day = 'DoY', Hour = 'Hour', tz = "MST")
#+++ Initalize R5 reference class sEddyProc for processing of eddy data
#+++ with all variables needed for processing later
EddyProc.C <- sEddyProc$new(twr, EddyDataWithPosix.F,
c('NEE','Rg','Tair','VPD','rH','LE','H','Ustar','P',
'FLDS','U', 'PRECTmms', 'radNet', 'Tsoil'))
#Set location information
EddyProc.C$sSetLocationInfo(LatDeg = latSite, LongDeg = lonSite, TimeZoneHour = -6)
#+++ Fill gaps in variables with MDS gap filling algorithm (without prior ustar filtering)
# Note, this also takes a long time to complete!
EddyProc.C$sMDSGapFill('NEE', FillAll = TRUE) #Fill all values to estimate flux uncertainties
EddyProc.C$sMDSGapFill('LE', FillAll = TRUE)
EddyProc.C$sMDSGapFill('H', FillAll = TRUE)
EddyProc.C$sMDSGapFill('Ustar', FillAll = TRUE)
EddyProc.C$sMDSGapFill('Tair', FillAll = FALSE)
EddyProc.C$sMDSGapFill('VPD', FillAll = FALSE)
EddyProc.C$sMDSGapFill('rH', FillAll = FALSE)
EddyProc.C$sMDSGapFill('U', FillAll = FALSE) # wind
EddyProc.C$sMDSGapFill('PRECTmms', FillAll = FALSE)
EddyProc.C$sMDSGapFill('P', FillAll = FALSE)
EddyProc.C$sMDSGapFill('FLDS', FillAll = FALSE)
EddyProc.C$sMDSGapFill('Rg', FillAll = FALSE)
EddyProc.C$sMDSGapFill('radNet', FillAll = FALSE)
EddyProc.C$sMDSGapFill('Tsoil', FillAll = FALSE)
EddyProc.C$sMRFluxPartition()
#+++ Export gap filled and partitioned data to standard data frame
FilledEddyData.F <- EddyProc.C$sExportResults()
#Grab just the filled data products
dataClm <- FilledEddyData.F[,grep(pattern = "_f$", x = names(FilledEddyData.F))]
#Grab the POSIX timestamp
dataClm$DateTime <- EddyDataWithPosix.F$DateTime - lubridate::minutes(30) # putting back to original position
names(dataClm) <- gsub("_f", "", names(dataClm))
#Convert degC to K for temperature
dataClm$Tair <- dataClm$Tair + 273.15
attributes(obj = dataClm$Tair)$units <- "K"
#Convert kPa to Pa for pressure
dataClm$P <- dataClm$P * 1000.0
attributes(obj = dataClm$P)$units <- "Pa"
#Create tower height measurement field
dataClm$ZBOT <- rep(2,nrow(dataClm))
#Year month combination for data filtering
dataClm$yearMon <- paste0(year(dataClm$DateTime), "-",
sprintf("%02d", month(dataClm$DateTime)))
##############################################################################
# Plotting and identifying gaps left in data after gapfilling
##############################################################################
if (makeplots == TRUE) {
# needs ggplot and dplyr/tidyr
# change data to longform
# Necessary for model:
# tbot, wind, rh, PSRF, FLDS, FSDS, PRECTmms
getgaplength <- function(gap, y = "notgap") {
res <- rle(gap == y)
res_vec <- rep(res$values*res$lengths,res$lengths)
return(res_vec)
}
# Find the minimum and maximum time stamps at which all required forcing variables have values
dataClm.forc.gaps <- dataClm %>%
rename(EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg) %>%
mutate_at(vars(TBOT, WIND, RH, PSRF, FLDS, FSDS, PRECTmms), list(gap = is.na)) %>%
mutate(gap = TBOT_gap | WIND_gap | RH_gap | PSRF_gap | FLDS_gap | FSDS_gap |
PRECTmms_gap) %>%
mutate(gap = ifelse(gap == FALSE, "notgap", "gap"),
ncontiguousgaps = getgaplength(gap, "gap")) %>%
filter(gap == "gap") %>%
select(DateTime, gap, ncontiguousgaps) %>%
mutate(ndays = ncontiguousgaps/48,
#ndays = as.factor(ndays),
ncontiguousgaps = as.factor(ncontiguousgaps)) %>%
group_by(ndays) %>%
summarize(min = min(DateTime, na.rm = TRUE),
max = max(DateTime, na.rm = TRUE)) %>%
arrange(desc(ndays)) %>%
mutate(ndays = as.factor(round(ndays, digits = 2))) %>%
mutate(yr1 = year(min),
yr2 = year(max)) %>%
rowwise() %>%
mutate(years = paste0(seq(yr1, yr2), collapse = " | ")) %>%
select(-yr1, -yr2)
# Plot the required forcing variables
dataClm.forc.plot <- dataClm %>%
rename(EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg) %>%
tidyr::pivot_longer(cols = !matches(c("DateTime", "yearMon")),
names_to = "variable",
values_to = "value") %>%
filter(variable %in% c("TBOT", "WIND", "RH", "PSRF", "FLDS", "FSDS", "PRECTmms"))
plot_gaps <- function(forcings, gaps,
filteryears = NA,
min_gap_days = 1,
highlightgaps = FALSE,
verbose = FALSE) {
# if filteryear and tower are NA all years and both towers are plotted
# filteryear is either NA or a vector of character strings of years to plot
# if highlightgaps == TRUE, gaps will be highlighted on plot
# min_gaps_days is the minimum length in days of gaps to highlight
forcings.plot <- forcings %>%
mutate(Year = year(DateTime))
gaps.plot <- gaps
title <- paste0("Gap-plots for both towers and all years")
if (any(!is.na(filteryears))) {
forcings.plot <- forcings.plot %>%
filter(Year %in% filteryears)
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years))
title <- paste0("Gap-plots for gap-filled data: year(s) ",
paste0(filteryears, collapse = ", "))
}
writeLines(paste0("Plotting from ", min(forcings.plot$Year), " to ",
max(forcings.plot$Year)))
if (verbose) {
if (!is.na(tower)) {
writeLines(paste0("Tower is ", tower))
} else {
writeLines(paste0("Plotting both towers"))
}
if (highlightgaps) {
writeLines("Gaps will be highlighted")
writeLines("Note: if a gap exeeds the boundary year, the x-axis will be",
"modified so the entire gap is shown but points for that period ",
"will not be plotted.")
} else {
writeLines("Gaps will not be highlighted")
}
}
forcing_gaps.plot <- ggplot(forcings.plot) +
geom_point(aes(x = DateTime, y = value), alpha = 0.05) +
facet_wrap(~variable, scales = "free_y", ncol = 1) +
ggtitle(title)
if (nrow(gaps.plot) == 0) {
highlightgaps <- FALSE
}
if (highlightgaps) {
forcing_gaps.plot <- forcing_gaps.plot +
geom_rect(data = gaps.plot,
aes(xmin = min,
xmax = max,
ymin = -Inf,
ymax = Inf), alpha = 0.3) +
geom_vline(aes(xintercept = min), data = gaps.plot) +
geom_vline(aes(xintercept = max), data = gaps.plot) +
theme(legend.position = "none") +
scale_fill_discrete(name = paste0("Gaps >", min_gap_days, " days"))
}
return(forcing_gaps.plot)
}
plot_years <- c(min(year(dataClm.forc.plot$DateTime),
na.rm = TRUE):max(year(dataClm.forc.plot$DateTime),
na.rm = TRUE))
plot_years <- set_names(plot_years)
plot_years <- map(plot_years,
~plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = .x))
iwalk(plot_years, ~{
ggsave(plot = .x,
filename = paste0(plots_dir,"/",.y,
'_yearly_gap_plots_postgapfilling.png'),
width = 10,
height = 5*7,
dpi = 150)
})
plot_all_years <- plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = NA)
forc.plot.out.name <- paste0(plots_dir,"/",
lubridate::date(dataClm$DateTime[1]),'_',
lubridate::date(tail(dataClm$DateTime, n = 1)),
'_required_forcing_postgapfilling.png')
ggsave(plot = plot_all_years,
filename = forc.plot.out.name,
width = 10,
height = 5*7,
dpi = 150)
}
##############################################################################
# Prepare 4 different precipitation regimes for the different vegetation communities
##############################################################################
# There are several vegetation communities at Niwot and they all see slightly
# different precipitation regimes. (See Wieder et al. 2017). We will modify the
# precipitation inputs based on Table 1 in Wieder et al. 2017
# | Community | Snow (% relative to observations) |
# | ----------------- | -------------------------------------- |
# | Fellfield (FF) | 10, but 25 during March, April and May |
# | Dry meadow (DM) | 10, but 25 during March, April and May |
# | Moist meadow (MM) | 100 |
# | Wet meadow (WM) | 75 + runoff simulated from moist meadow |
# | Snowbed (SB) | 200 |
dataClm_veg_communities <- dataClm %>%
mutate(month = month(DateTime),
PRECTmms_FF = ifelse(Tair >= 273.15, PRECTmms,
ifelse(month %in% c(3,4,5), PRECTmms * 0.25,
PRECTmms*0.1)),
PRECTmms_DM = ifelse(Tair >= 273.15, PRECTmms,
ifelse(month %in% c(3,4,5), PRECTmms * 0.25,
PRECTmms*0.1)),
PRECTmms_MM = PRECTmms,
PRECTmms_WM = ifelse(Tair >= 273.15, PRECTmms, PRECTmms*0.75),
PRECTmms_SB = ifelse(Tair >= 273.15, PRECTmms, PRECTmms*2)) %>%
select(-month)
# Add in simulated runoff from mm to wm:
if (simulated_runoff_present) {
simulated_runoff <- read.csv(file = simulated_runoff_fp)
colnames(simulated_runoff) <- names(simulated_runoff)
}
names(dataClm_veg_communities)
# convert runoff time to DateTime
simulated_runoff$time = as.POSIXct(simulated_runoff$time,tz='UTC')
simulated_runoff$time = round(simulated_runoff$time, 'min')
# add runoff to precipitation for wetmeadow
if(simulated_runoff_present){
dataClm_veg_communities = dataClm_veg_communities %>%
left_join(simulated_runoff, by = c("DateTime" = "time")) %>%
mutate(PRECTmms_WM = PRECTmms_WM + QRUNOFF ) %>%
select(-QRUNOFF)
}
# Write out modified precipitation data
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
precip_mods_fp <- paste0(DirOut, "/", "tvan_forcing_data_precip_mods_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),".txt")
# ADD UNITS
dataClm_veg_communities_units <- c("NEE" = "umolm-2s-1",
"LE" = "Wm-2",
"H" = "Wm-2",
"Ustar" = "ms-1",
"Tair" = "K",
"VPD" = "kPa",
"rH" = "%",
"U" = "ms-1",
"PRECTmms" = "mms-1",
"PRECTmms_FF" = "mms-1",
"PRECTmms_DM" = "mms-1",
"PRECTmms_MM" = "mms-1",
"PRECTmms_WM" = "mms-1",
"PRECTmms_SB" = "mms-1",
"P" = "Pa",
"FLDS" = "Wm-2",
"Rg" = "Wm-2",
"radNet" = "Wm-2",
"Tsoil" = "degC",
"GPP" = "umolm-2s-1",
"DateTime" = "-",
"yearMon" = "-",
"ZBOT" = "-")
# Reorder the units to match the order of dataClm_veg_communities
dataClm_veg_communities_units <- dataClm_veg_communities_units[names(dataClm_veg_communities)]
dataClm_veg_communities_units.df <- rbind(dataClm_veg_communities_units)
rownames(dataClm_veg_communities_units.df) <- NULL
write.table(dataClm_veg_communities_units.df, precip_mods_fp,
sep = "\t", row.names = FALSE)
write.table(dataClm_veg_communities, precip_mods_fp,
sep = "\t", row.names = FALSE, append = TRUE, col.names = FALSE)
##############################################################################
# Write output to CLM
##############################################################################
write_to_clm <- function(dataClm, veg_community = NA, verbose = FALSE) {
# dataClm = the gap-filled data subsetted according to the precipitation
# regime you want
# veg_community = one of "FF", "DM", "MM", "WM", or "SB" specifying the
# vegetation community you want to simulate, if NA, original
# precip values are used
#
# Set up for vegetation choice
veg_community_list <- c("fell_field", "dry_meadow", "moist_meadow", "wet_meadow",
"snow_bed")
names(veg_community_list) <- c("FF", "DM", "MM", "WM","SB")
if (is.na(veg_community)) { # original precip
dataClm <- dataClm %>%
select(!ends_with(c("_FF", "_DM", "_MM", "_WM", "_SB")))
vegcom <- "original"
} else { # specific vegetation community
precip_col_name <- paste0("PRECTmms_", veg_community)
dataClm$PRECTmms <- dataClm[,precip_col_name]
dataClm <- dataClm %>%
select(!ends_with(c("_FF", "_DM", "_MM", "_WM", "_SB")))
vegcom <- veg_community_list[veg_community]
}
#Define missing value fill
mv <- -9999.
#Set of year/month combinations for netCDF output
setYearMon <- unique(dataClm$yearMon)
for (m in setYearMon) {
#m <- setYearMon[10] #for testing
Data.mon <- dataClm[dataClm$yearMon == m,]
timeStep <- seq(0,nrow(Data.mon)-1,1)
time <- timeStep/48
#endStep <- startStep + nsteps[m]-1
if (verbose) {
print(paste(m,"Data date =",Data.mon$DateTime[1], "00:00:00"))
names(Data.mon)
}
#NetCDF output filename
fileOutNcdf <- paste(DirOut,"/",vegcom, "/",m,".nc", sep = "")
if (verbose) {
print(fileOutNcdf)
}
veg_com_dir <- paste0(DirOut,"/",vegcom)
if(!dir.exists(veg_com_dir)) dir.create(veg_com_dir, recursive = TRUE)
#sub(pattern = ".txt", replacement = ".nc", fileOut)
# define the netcdf coordinate variables (name, units, type)
lat <- ncdf4::ncdim_def("lat","degrees_north", as.double(latSite), create_dimvar=TRUE)
lon <- ncdf4::ncdim_def("lon","degrees_east", as.double(lonSite), create_dimvar=TRUE)
#Variables to output to netCDF
time <- ncdf4::ncdim_def("time", paste("days since",Data.mon$DateTime[1], "00:00:00"),
vals=as.double(time),unlim=FALSE, create_dimvar=TRUE,
calendar = "noleap")
LATIXY <- ncdf4::ncvar_def("LATIXY", "degrees N", list(lat), mv,
longname="latitude", prec="double")
LONGXY <- ncdf4::ncvar_def("LONGXY", "degrees E", list(lon), mv,
longname="longitude", prec="double")
FLDS <- ncdf4::ncvar_def("FLDS", "W/m^2", list(lon,lat,time), mv,
longname="incident longwave (FLDS)", prec="double")
FSDS <- ncdf4::ncvar_def("FSDS", "W/m^2", list(lon,lat,time), mv,
longname="incident shortwave (FSDS)", prec="double")
PRECTmms <- ncdf4::ncvar_def("PRECTmms", "mm/s", list(lon,lat,time), mv,
longname="precipitation (PRECTmms)", prec="double")
PSRF <- ncdf4::ncvar_def("PSRF", "Pa", list(lon,lat,time), mv,
longname="pressure at the lowest atmospheric level (PSRF)", prec="double")
RH <- ncdf4::ncvar_def("RH", "%", list(lon,lat,time), mv,
longname="relative humidity at lowest atm level (RH)", prec="double")
TBOT <- ncdf4::ncvar_def("TBOT", "K", list(lon,lat,time), mv,
longname="temperature at lowest atm level (TBOT)", prec="double")
WIND <- ncdf4::ncvar_def("WIND", "m/s", list(lon,lat,time), mv,
longname="wind at lowest atm level (WIND)", prec="double")
ZBOT <- ncdf4::ncvar_def("ZBOT", "m", list(lon,lat,time), mv,
longname="observational height", prec="double")
NEE <- ncdf4::ncvar_def("NEE", "umolm-2s-1", list(lon,lat,time), mv,
longname="net ecosystem exchange", prec="double")
FSH <- ncdf4::ncvar_def("FSH", "Wm-2", list(lon,lat,time), mv,
longname="sensible heat flux", prec="double")
EFLX_LH_TOT <- ncdf4::ncvar_def("EFLX_LH_TOT", "Wm-2", list(lon,lat,time), mv,
longname="latent heat flux", prec="double")
GPP <- ncdf4::ncvar_def("GPP", "umolm-2s-1", list(lon,lat,time), mv,
longname="gross primary productivity", prec="double")
Rnet <- ncdf4::ncvar_def("Rnet", "W/m^2", list(lon,lat,time), mv,
longname="net radiation", prec="double")
#Create the output file
ncnew <- ncdf4::nc_create(fileOutNcdf, list(LATIXY,LONGXY,FLDS,FSDS,PRECTmms,RH,PSRF,TBOT,WIND,ZBOT,FSH,EFLX_LH_TOT,NEE,GPP,Rnet))
# Write some values to this variable on disk.
ncdf4::ncvar_put(ncnew, LATIXY, latSite)
ncdf4::ncvar_put(ncnew, LONGXY, lonSite)
ncdf4::ncvar_put(ncnew, FLDS, Data.mon$FLDS)
ncdf4::ncvar_put(ncnew, FSDS, Data.mon$Rg)
ncdf4::ncvar_put(ncnew, RH, Data.mon$rH)
ncdf4::ncvar_put(ncnew, PRECTmms, Data.mon$PRECTmms)
ncdf4::ncvar_put(ncnew, PSRF, Data.mon$P)
ncdf4::ncvar_put(ncnew, TBOT, Data.mon$Tair)
ncdf4::ncvar_put(ncnew, WIND, Data.mon$U)
ncdf4::ncvar_put(ncnew, ZBOT, Data.mon$ZBOT)
ncdf4::ncvar_put(ncnew, NEE, Data.mon$NEE)
ncdf4::ncvar_put(ncnew, FSH, Data.mon$H)
ncdf4::ncvar_put(ncnew, EFLX_LH_TOT, Data.mon$LE)
ncdf4::ncvar_put(ncnew, GPP, Data.mon$GPP)
ncdf4::ncvar_put(ncnew, Rnet, Data.mon$radNet)
#add attributes
# ncdf4::ncatt_put(ncnew, time,"calendar", "noleap" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FLDS,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FSDS,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, RH ,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, PRECTmms,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, PSRF,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, TBOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, WIND,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, ZBOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, NEE,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FSH,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, EFLX_LH_TOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, GPP,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, Rnet,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "veg_community_type", veg_community_list[veg_community],prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_on",date() ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_by","Will Wieder",prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_from",fileOut ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_with", "flow.lter.clm.R",prec=NA,verbose=FALSE,definemode=FALSE )
#Close Netcdf file connection
ncdf4::nc_close(ncnew)
#Add step
#startStep <- endStep + 1
#Remove not needed variables
remove(time, timeStep, fileOutNcdf, ncnew, Data.mon,
FLDS,FSDS,RH,PRECTmms,PSRF,TBOT,WIND,ZBOT)
} #End of monthloop
}
# Prepare file for CLM simulations - convert to UTC and filter out leapdays
dataClm_veg_communities_modelready <- dataClm_veg_communities %>%
# Convert time into UTC
mutate(timestamp_UTC = with_tz(DateTime, tzone = "UTC"),
date = as.Date(timestamp_UTC),
Hour = lubridate::hour(timestamp_UTC) +
lubridate::minute(timestamp_UTC)/60) %>%
# Remove leap years
filter(!grepl(".{4}-02-29", date)) %>%
# Fix Hours, date, DoY, and Year; Hour is 0.5-24.0; Adjust date accordingly
# get new doy now that leap years are filtered out
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date - 1, date),
Year = year(date),
DoY = yday(date),
DoY = ifelse(leap_year(Year) & (yday(date) > 59),
(yday(date) - 1), yday(date))) %>%
# Remove MST timestamp and replace it with UTC timestamp; also remove other
# extraneous time indicators
select(-DateTime, -date, -Hour, -Year, -DoY) %>%
rename(DateTime = timestamp_UTC) %>%
# overwrite yearMon with updated timezone yearMon
mutate(yearMon = paste0(year(DateTime), "-",
sprintf("%02d", month(DateTime))))
# Create NC files
community_list <- c("Fell Field", "Dry Meadow", "Moist Meadow", "Wet Meadow",
"Snow Bed", "Original Precipitation")
names(community_list) <- c("FF", "DM", "MM", "WM","SB", NA)
for (i in seq_along(community_list)) {
writeLines(paste0("Writing .nc files for ",
community_list[i], "..."))
write_to_clm(dataClm = dataClm_veg_communities_modelready,
veg_community = names(community_list[i]))
}
print(DirOut)
print('The met (.nc) forcings for Tvan are ready to be used! Time to run CLM')
|
/prepare_forcings_for_clm.R
|
no_license
|
hhollandmoritz/NWT_CLM
|
R
| false | false | 90,353 |
r
|
##############################################################################################
#' title Workflow to NCAR CLM data set
#' author
#' Hannah Holland-Moritz (hhollandmoritz AT gmail.com), based on script by David Durden (eddy4R.info AT gmail.com)
#'
#' description
#' Workflow for collating NIWOT LTER data, gap-filling, and packaging in NCAR CLM netcdf format.
# Modified from David Durden's flow.api.clm.R script for NEON data
# changelog and author contributions / copyrights
# David Durden (2019-07-05)
# original creation
# David Durden (2020-05-31)
# Updating to use neonUtilities for all data retrieval from API
##############################################################################
##############################################################################
# Dependencies
##############################################################################
#Call the R HDF5 Library
packReq <- c("rhdf5","REddyProc", "ncdf4","devtools","magrittr","EML", "dplyr",
"ggplot2", "purrr", "tidyr", "lubridate","RCurl", "httr", "jsonlite")
#Install and load all required packages
lapply(packReq, function(x) {
print(x)
if (require(x, character.only = TRUE) == FALSE) {
install.packages(x)
library(x, character.only = TRUE)
}})
#Setup Environment
options(stringsAsFactors = F)
##############################################################################
#Workflow parameters
##############################################################################
#### Ploting options ####
# Should plots be made of gap-filled data?
makeplots <- TRUE # FALSE
#### Output Options ####
# Base directory for all files
DirBase <- "~/Desktop/Working_files/Niwot/"
# Base directory for output
DirOutBase <- paste0(DirBase,"CLM/data")
#### Download and input options ####
# Directory to download precipitation and radidation data to
DirDnld = paste0(DirBase,"lter_flux")
# Should a newer version of precip data be automatically
# downloaded if one is available?
getNewData = TRUE
# Ameriflux username
# NOTE: you cannot download Ameriflux data without a valid username
# to create an account, visit the Ameriflux website: https://ameriflux.lbl.gov/
# Please also read their data-use policy, by downloading their data you are agreeing
# to follow it. The policy can be found here: https://ameriflux.lbl.gov/data/data-policy/
amf_usr <- "wwieder" # CHANGE ME
#### Tower Use Options ####
# What tvan tower should be used?
tower <- "Both" # Options are "East", "West", or "Both"
# if "Both" the one tower will be used to gapfill the other tower
# basetower provides which tower is the baseline that will be filled
# with the other tower. Currently the East tower record is more complete
# and has fewer gaps and errors, so it is being used as the basetower.
basetower <- "East" # West
#### Tvan data location ####
# Only necessary to set the location of the tower that you are processing, or
# both, if tower = "Both"
# The data should be formatted with ReddyProc file format.
# Briefly the file should be formated as follows: the file should be
# tab-delimited with the first row specifying the name of the variable
# and the second specifying the units of that variable. The columns should have names
# and units that follow the guidelines below:
# Column formating guidelines for Tvan data
# (optional indicates a column is not necessary for producing the final netcdf,
# it includes variables that are necessary for CLM, and also variables that are
# necessary for ReddyProc gapfilling of the data in preparation for CLM).
# | Column Name | Column Description | Units | Optional? |
# | ----------- | -------------------------------- | -------------- | --------- |
# | NEE | Net ecosystem exchange | umol m^-2 s^-1 | Yes |
# | LE | Latent heat flux | W m^-2 | No |
# | H | Sensible heat flux | W m^-2 | No |
# | Ustar | Friction velocity | m s^-1 | Yes |
# | Tair | Air temperature | degC | No |
# | VPD | Vapor pressure density | kPa | No |
# | rH | relative humidity | % | No |
# | U | Wind speed | m s^-1 | No |
# | P | Atmospheric pressure | kPa | No |
# | Tsoil | Soil temperature | degC | Yes |
# | Year | Year | - | No |
# | DoY | The day of year (1-365/366) | - | No |
# | Hour | Decimal hour of the day (0.5-24) | - | No |
# The location of the east tvan data filepath, use "", if tower = "West"
DirIN = paste0(DirBase,"Tvan_out_new/supp_filtering/")
east_data_fp <- paste0(DirIN,"tvan_East_2007-05-10_00-30-00_to_2021-03-02_flux_P_reddyproc_cleaned.txt")
# The location of the west tvan data filepath, use "", if tower = "East"
west_data_fp <- paste0(DirIN,"tvan_West_2007-05-10_00-30-00_to_2021-03-02_flux_P_reddyproc_cleaned.txt")
#### Simulated Runoff Option ####
# WARNING THIS FEATURE IS UNTESTED; CHANGE AT YOUR OWN RISK
# The user can provide a data file from a simulated Moist Meadow run that
# contains two columns, a timestamp column (every timestamp represents the
# state at the *end* of the 30 minute sampling period) called "time",
# and a column containing the QRUNOFF amounts in mm/s from a Moist Meadow
# simulation. If provided, this data will be added to the Wet meadow
# precipitation. If not provided, wet meadow precipitation will be 75% of
# observed precipitation.
# As done in Wieder et al. 2017, JGR-B. doi:10.1002/2016JG003704.
# Provide a character string specifying the location of the simulated runoff data
# if NA, no simulated runoff will be used
simulated_runoff_fp <- paste0(DirIN,'QRUNOFF_clm50bgc_NWT_mm_newPHS_lowSLA.csv')
##############################################################################
# Static workflow parameters - these are unlikely to change
##############################################################################
#Append the site to the base output directory
DirOut <- paste0(DirOutBase, "/", "data")
plots_dir <- paste0(DirOutBase, "/plots")
# Check if directory exists and create if not
if (!dir.exists(DirOut)) dir.create(DirOut, recursive = TRUE)
if (!dir.exists(DirDnld)) dir.create(DirDnld, recursive = TRUE)
if (!dir.exists(plots_dir)) dir.create(plots_dir, recursive = TRUE)
# the EDI id for precip data from the saddle and C1 weather stations
saddle_precip_data <- "416" # NWT LTER EDI id
# Lat/long coords - shouldn't need to change unless modified in surface
# dataset lat/long
latSite <- 40.05 # should match the lat of the surface dataset
lonSite <- 360 - 254.42 # should match the long of the surface dataset
# Should simulated runoff mode be activated?
if (is.na(simulated_runoff_fp)) {
simulated_runoff_present <- FALSE
writeLines(paste0("No simulated runoff file supplied. Wet meadow precipitation",
" will be calculated without any added runoff."))
} else {
simulated_runoff_present <- TRUE
writeLines(paste0("You have supplied the following simulated runoff file: \n",
simulated_runoff_fp,
"\nIt will be added when wet meadow precipitation",
" is calculated."))
}
##############################################################################
# Helper functions - for downloading and loading data
##############################################################################
# Functions for downloading LTER Precip data are from Sarah Elmendorf's
# utility_functions_all.R script
# https://github.com/NWTlter/long-term-trends/blob/master/utility_functions/utility_functions_all.R
# function to determine current version of data package on EDI
getCurrentVersion <- function(edi_id){
require(magrittr)
versions = readLines(paste0('https://pasta.lternet.edu/package/eml/knb-lter-nwt/', edi_id),
warn = FALSE) %>%
as.numeric() %>% (max)
packageid = paste0('knb-lter-nwt.', edi_id, '.', versions)
return(packageid)
}
#function to download the EML file from EDI
getEML <- function(packageid){
require(magrittr)
myurl<-paste0("https://portal.edirepository.org/nis/metadataviewer?packageid=",
packageid,
"&contentType=application/xml")
#myeml<-xml2::download_html(myurl)%>%xml2::read_xml()%>%EML::read_eml()
myeml<-xml2::read_xml(paste0("https://portal.edirepository.org/nis/metadataviewer?packageid=",
packageid,
"&contentType=application/xml")) %>% EML::read_eml()
}
# Function for downloading from EDI
download_EDI <- function(edi_id, dest_dir, getNewData = TRUE) {
# This section heavily borrowed from Sarah Elmendorf's generic_timeseries_workflow.R script
# https://github.com/NWTlter/long-term-trends/blob/master/plotting_scripts/generic_timeseries_workflow.R
# Depends on getCurrentVersion() and getEML()
packageid = getCurrentVersion(edi_id)
if (any(grepl(packageid, list.files(dest_dir)) == TRUE)) {
writeLines(paste0("Most recent package version ",
packageid, " is already downloaded. Nothing to do."))
return(list.files(dest_dir, pattern = paste0(packageid, ".{1,}csv"), full.names = T))
} else if (getNewData == FALSE) {
writeLines(paste0("A more recent version of the data (version ",
packageid, ") is available. ",
"But since you have specified getNewData = FALSE, ",
"the latest version will not be downloaded."))
return(list.files(dest_dir, pattern = paste0(".{1,}csv"), full.names = T))
} else {
writeLines(paste0("Downloading package ", packageid, " from EDI."))
myeml = getEML(packageid)
# Create output directory for data
ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir)), FALSE)
### eml reading and downloading of csv
if (is.null(names(myeml$dataset$dataTable))) {
attributeList = lapply(myeml$dataset$dataTable, function(x){
EML::get_attributes(x$attributeList)
})
names(attributeList) = lapply(myeml$dataset$dataTable, function(x){
x$physical$objectName})
if (getNewData) {
#download all the datatables in the package
csv_list <- list()
csv_list <- lapply(myeml$dataset$dataTable, function(x){
url_to_get = x$physical$distribution$online$url$url
download.file(url_to_get,
destfile = paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName),
method = "curl")
output_csv_file <- paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName)
})
}
}else{
#if only one data table
attributeList = list(EML::get_attributes(myeml$dataset$dataTable$attributeList))
names(attributeList) = myeml$dataset$dataTable$physical$objectName
if (getNewData) {
url_to_get = myeml$dataset$dataTable$physical$distribution$online$url$url
download.file(url_to_get,
destfile = paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName),
method = "curl")
output_csv_file <- paste0(dest_dir, "/",
packageid, "_",
myeml$dataset$dataTable$physical$objectName)
}
}
# Also save the full xml
write_eml(myeml, file = paste0(dest_dir, "/", packageid, ".xml"))
writeLines(paste0("Downloaded data can be found in: ", dest_dir))
return(output_csv_file)
}
}
# Function for downloading USCRN precip
download_USCRN <- function(start_date, end_date, dest_dir, DoNotOverwrite = TRUE) {
# This function downloads precipitation data from the Boulder USCRN weather
# station at C1. It returns a list of the files it tried to download. By
# default it will not download files that are already in the destination directory.
# Arguments:
# start_date = the start date of tvan data in character form (or other form
# that lubridate can coerce with its `year()` function)
# end_date = the end date of tvan data in character form (or other form
# that lubridate can coerce with its `year()` function)
# dest_dir = the destination directory where the files will be downloaded
# DoNotOverwrite = should existing files with the same name be overwritten? If
# TRUE, files will not be overwritten, if FALSE, files will be
#overwritten.
require(lubridate)
require(RCurl)
# To do: replace this warning with a check for the tvan data
message("Please note, end_date of USCRN data must not be less than the end_date of the tvan data.")
# make dest_dir if it doesn't exist
made_dir <- ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir), recursive = TRUE), FALSE)
if (!made_dir) {
writeLines("Data download directory not created, it already exists.")
}
# Create a list of urls - one for each year of data
url_list <- vector(mode = "list",
length = lubridate::year(end_date) - lubridate::year(start_date) + 1)
file_list <- vector(mode = "list",
length = lubridate::year(end_date) - lubridate::year(start_date) + 1)
# get the names for each year (including unfinished partial years at the end)
names(url_list) <- lubridate::year(seq(from = lubridate::ymd(as.Date(start_date)),
length.out = (lubridate::year(end_date) -
lubridate::year(start_date) + 1),
by = "years"))
names(file_list) <- lubridate::year(seq(from = lubridate::ymd(as.Date(start_date)),
length.out = (lubridate::year(end_date) -
lubridate::year(start_date) + 1),
by = "years"))
for (i in seq_along(url_list)) {
url_list[[i]] <- paste0("https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/", names(url_list[i]), "/CRNS0101-05-", names(url_list[i]),"-CO_Boulder_14_W.txt")
}
# Check if url exists and if it does, download file
for (i in seq_along(url_list)) {
writeLines(paste0("Checking if ", url_list[[i]], " exists..."))
if (!url.exists(url_list[[i]])) {
stop(paste0("Url ", x, " is not accessible."))
} else {
writeLines("TRUE")
}
# Check if destination file already exists
dest_fp <- paste0(dest_dir, "/CRNS0101-05-",
names(url_list[i]),"-CO_Boulder_14_W.txt")
file_list[[i]] <- dest_fp
if (file.exists(dest_fp) & DoNotOverwrite == TRUE) {
writeLines(paste0(dest_fp, " already exits, skipping..."))
} else { # if file doesn't exist or if overwrite is TRUE, download
try(download.file(url = url_list[[i]],
destfile = dest_fp))
}
}
return(file_list)
}
# Function for reading in USCRN precip text files
read_USCRN_precip_data <- function(USCRN_precip_fp) {
# This function reads in USCRN precipitation data files. It adds column
# names and then it 1) collapses the time from 5-minute increments to half-
# hourly by summing the precipitation over each 1/2-hour period; 2) Changes -9999
# to NAs; and 3) selects only the local date, local time, and precpitation variables
# for the final data frame. It returns the resulting dataframe.
# Arguments:
# USCRN_precip_fp = file path to the USCRN text file you want to load
# USCRN Fields and information can be found here:
# https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/README.txt
# Field# Name Units
# ---------------------------------------------
# 1 WBANNO XXXXX
# 2 UTC_DATE YYYYMMDD
# 3 UTC_TIME HHmm
# 4 LST_DATE YYYYMMDD
# 5 LST_TIME HHmm
# 6 CRX_VN XXXXXX
# 7 LONGITUDE Decimal_degrees
# 8 LATITUDE Decimal_degrees
# 9 AIR_TEMPERATURE Celsius
# 10 PRECIPITATION mm
# 11 SOLAR_RADIATION W/m^2
# 12 SR_FLAG X
# 13 SURFACE_TEMPERATURE Celsius
# 14 ST_TYPE X
# 15 ST_FLAG X
# 16 RELATIVE_HUMIDITY %
# 17 RH_FLAG X
# 18 SOIL_MOISTURE_5 m^3/m^3
# 19 SOIL_TEMPERATURE_5 Celsius
# 20 WETNESS Ohms
# 21 WET_FLAG X
# 22 WIND_1_5 m/s
# 23 WIND_FLAG X
#
# ----------------------- Begin Function -------------------- #
require(dplyr)
# read in text file
writeLines(paste0("Reading in ", USCRN_precip_fp))
precip <- read.table(USCRN_precip_fp, sep = "",
colClasses = c(rep("character", times = 6),
rep("numeric", times = 7),
"character",
rep("numeric", times = 9)))
# Assign column names
names(precip) <- c("WBANNO", "UTC_DATE", "UTC_TIME", "LST_DATE", "LST_TIME",
"CRX_VN", "LONGITUDE", "LATITUDE", "AIR_TEMPERATURE",
"PRECIPITATION", "SOLAR_RADIATION", "SR_FLAG",
"SURFACE_TEMPERATURE", "ST_TYPE", "ST_FLAG",
"RELATIVE_HUMIDITY", "RH_FLAG", "SOIL_MOISTURE_5",
"SOIL_TEMPERATURE_5", "WETNESS", "WET_FLAG", "WIND_1_5",
"WIND_FLAG")
# Clean data frame
precip <- precip %>%
# Split local time string and convert to decimal time
dplyr::mutate(UTC_TIME = gsub("(..)(..)", "\\1:\\2:00", UTC_TIME),
cleanTime_UTC =
strsplit(UTC_TIME, ":") %>%
sapply(function(x){
x <- as.numeric(x)
x[1] + x[2]/60 + x[3]/(60*60)
}),
decimalTime_UTC = floor(cleanTime_UTC * 2)/2) %>%
dplyr::mutate(LST_TIME = gsub("(..)(..)", "\\1:\\2:00", LST_TIME),
cleanTime_LST =
strsplit(LST_TIME, ":") %>%
sapply(function(x){
x <- as.numeric(x)
x[1] + x[2]/60 + x[3]/(60*60)
}),
decimalTime_LST = floor(cleanTime_LST * 2)/2) %>%
# select only columns used for precipitation and time stamp
dplyr::select(UTC_DATE, UTC_TIME, LST_DATE, LST_TIME,
cleanTime_UTC, decimalTime_UTC,
cleanTime_LST, decimalTime_LST, PRECIPITATION) %>%
# set NAs from -9999
dplyr::mutate_all(list(~na_if(., -9999))) %>%
# sum all precip events in each 1/2 period
dplyr::group_by(UTC_DATE, decimalTime_UTC) %>%
dplyr::mutate(PRECIP_TOT = sum(PRECIPITATION)) %>%
# remove extra time steps
dplyr::select(-PRECIPITATION, -LST_TIME, -UTC_TIME,
-cleanTime_UTC, -cleanTime_LST) %>%
unique() %>%
# create 1/2-hourly time stamps
dplyr::mutate(UTC_DATE = as.Date(UTC_DATE, format = "%Y%m%d"),
timestamp_UTC = as.POSIXct(paste0(UTC_DATE," 00:00:00"),
tz = "UTC") + 3600*decimalTime_UTC) %>%
dplyr::mutate(LST_DATE = as.Date(LST_DATE, format = "%Y%m%d"),
timestamp_LST = as.POSIXct(paste0(LST_DATE," 00:00:00"),
tz = "MST") + 3600*decimalTime_LST)
return(precip)
}
# Function for downloading radiation data from Ameriflux
download_amflx <- function(dest_dir, username,
site = "US-NR1", DescriptionOfDataUse,
DoNotOverwrite = TRUE,
verbose = FALSE) {
# This function downloads radiation data from the Ameriflux webiste
# It returns a list of the files it tried to download. By default it will
# not download files that are already in the destination directory.
# Arguments:
# dest_dir -------------- the destination directory where the files will be
# downloaded
# username -------------- the Ameriflux username of the user - this function
# will fail without a valid username.
# site ------------------ the Ameriflux site to get the data from; defaults to
# US-NR1
# DescriptionOfDataUse --- the description to provide to Ameriflux for the intended
# use of the data. If not provided by the user, the
# description will read:
#
# These data will be used as atmospheric forcings
# to run a local point-simulation for the alpine
# tundra at the Niwot Ridge LTER site.
#
# DoNotOverwrite --------- should existing files with the same name be overwritten?
# If TRUE, files will not be overwritten, if FALSE, files
# will be overwritten.
# verbose ---------------- Should the communication with the website be verbose?
# default is FALSE.
require(httr)
require(jsonlite)
require(RCurl)
# Testing
# site <- "US-NR1"
# username <- amf_usr
# dest_dir <- "~/Downloads/lter_flux/rad2"
writeLines("Connecting with Ameriflux endpoint...")
# NOTE THIS ENDPOINT MAY CHANGE
ameriflux_endpoint <- "https://ameriflux-data.lbl.gov/AmeriFlux/DataDownload.svc/datafileURLs"
if (missing(DescriptionOfDataUse)) {
DescriptionOfDataUse = "These data will be used as atmospheric forcings to run a local point-simulation for the alpine tundra at the Niwot Ridge LTER site."
}
# Construct Payload request for ameriflux endpoint
Payload <- paste0('{',
'"username":"', username, '",',
'"siteList":["', site, '"],',
'"intendedUse": "Research - Land model/Earth system model",',
'"description": "', DescriptionOfDataUse, '"',
'}')
# Get download information from Ameriflux endpoint
if (verbose) {
tmp <- httr::POST(url = ameriflux_endpoint,
body = Payload, verbose(), content_type_json())
} else {
tmp <- httr::POST(url = ameriflux_endpoint,
body = Payload, content_type_json())
}
# Check that the connection was successful
if (tmp$status_code < 200 | tmp$status_code > 299) {
stop(paste0("Attempt to connect to the website was not successful.\n",
"This may be because Ameriflux has changed its endpoint url \n",
"and you may need to contact Ameriflux support for an updated \n",
"address, or it may be due to a mistake in the request payload \n",
"syntax. Please check that the Ameriflux endpoing url and the \n",
"payload syntax are valid. \n\n",
"Current endpoint: ", ameriflux_endpoint, "\n",
"Current payload: ", Payload))
} else {
writeLines("Connection to Ameriflux successful.")
}
# extract content from the response
r <- content(tmp)
# Check if the content is successfully received
if (class(r) == "raw" | length(r$dataURLsList) == 0) {
stop(paste0("No data was received from Ameriflux. Please check that your ",
"username is valid and that both it and the site name are ",
"spelled correctly."))
}
# Extract list of ftp urls
url_list <- unlist(lapply(1:length(r$dataURLsList),
function(x){r$dataURLsList[[x]]$URL}))
file_list <- vector(mode = "list",
length = length(url_list))
# Notify user of the data policy prior to download
message(paste0("Thank you for using Ameriflux data. Please be aware of the data \n",
"policy. By downloading this data you are acknowledging that you \n",
"have read and agree to that policy. \n\n",
"The following is how you described how you intend to use the data.\n\n",
"\tIntended Use: Research - Land model/Earth system model \n",
"\tDescription: These data will be used as atmospheric forcings \n",
"\tto run a local point-simulation for the alpine tundra at the \n",
"\tNiwot Ridge LTER site)\n\n",
"By downloading the data, the data contributors have been informed \n",
"of your use. If you are planning an in-depth analysis that may \n",
"result in a publication, please contact the data contributors \n",
"directly so they have the opportunity to contribute substantially \n",
"and become a co-author. \n\n",
"The contact email for this site is: ",
unlist(r$manifest$emailForSitePIs), "\n\n",
"You should also acknowledge Ameriflux in your presentations and \n",
"publications. Details about how this should be done can be found \n",
"on the Ameriflux website. \n\n",
"The full policy along with details about how to properly cite the \n",
"data can found here: \n",
"https://ameriflux.lbl.gov/data/data-policy/"))
# make dest_dir if it doesn't exist
made_dir <- ifelse(!dir.exists(file.path(dest_dir)),
dir.create(file.path(dest_dir), recursive = TRUE), FALSE)
writeLines("Downloading data...")
if(!made_dir) {
writeLines("Data download directory not created, it already exists.")
}
# Check if downloaded files already exist and if not, download file
for (i in seq_along(url_list)) {
# Check if destination file already exists
dest_fp <- paste0(dest_dir, "/", basename(url_list[[i]]))
file_list[[i]] <- dest_fp
if (file.exists(dest_fp) & DoNotOverwrite == TRUE) {
writeLines(paste0(dest_fp, " already exits, skipping..."))
} else { # if file doesn't exist or if overwrite is TRUE, download
# try(download.file(url = url_list[[i]],
# destfile = dest_fp,
# method = "curl"))
try(GET(url = url_list[[i]],
write_disk(dest_fp, overwrite=FALSE), progress(), verbose()))
}
}
return(unlist(file_list))
}
##############################################################################
# Read in L1 flux tower data product
##############################################################################
# Read in East & West tower
if (tower == "East" | tower == "Both") {
# East data
tvan_east <- read.table(file = east_data_fp, sep = "\t",
skip = 2, header = FALSE)
tvan_east_names <- read.table(file = east_data_fp, sep = "\t",
header = TRUE, nrows = 1)
tvan_east_units <- as.character(unname(unlist(tvan_east_names[1,])))
colnames(tvan_east) <- names(tvan_east_names)
}
if (tower == "West" | tower == "Both") {
# West data
tvan_west <- read.csv(file = west_data_fp, sep = "\t",
skip = 2, header = FALSE)
tvan_west_names <- read.table(file = west_data_fp, sep = "\t",
header = TRUE, nrows = 1)
tvan_west_units <- as.character(unname(unlist(tvan_west_names[1,])))
colnames(tvan_west) <- names(tvan_west_names)
}
# Get the start and end dates of the tvan data. If tower = "Both",
# combine East and West data into one dataframe for convenience
if (tower == "Both") {
tvan_east$Tower <- "East"
tvan_west$Tower <- "West"
tvan_all <- bind_rows(tvan_east, tvan_west) %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour) %>%
group_by(Tower, Year, DoY) %>%
mutate_at(vars(NEE:Ustar), list(daily_mean = mean), na.rm = TRUE) %>%
select(date, timestamp, Year, DoY, Hour, Tower, everything())
# Set a start/end date for the precip and radiation data based on the tvan data
# make sure it's a round number or rEddyProc will complain
start_date <- ceiling_date(min(tvan_all$timestamp, na.rm = TRUE), unit = "day")
end_date <- floor_date(max(tvan_all$timestamp, na.rm = TRUE), unit = "day")
} else if (tower == "East") {
tvan_east$Tower <- "East"
# Set a start/end date for the precip and radiation data based on the tvan data
start_date <- min(tvan_east$timestamp, na.rm = TRUE)
end_date <- max(tvan_east$timestamp, na.rm = TRUE)
} else if (tower == "West") {
tvan_west$Tower <- "West"
# Set a start/end date for the precip and radiation data based on the tvan data
start_date <- min(tvan_west$timestamp, na.rm = TRUE)
end_date <- max(tvan_west$timestamp, na.rm = TRUE)
}
# Create a timeseries dataframe with the timestamps (this is in MST since start_date
# and end_date are in MST):
posix_complete <- as.data.frame(seq.POSIXt(start_date, end_date, by = "30 mins"))
colnames(posix_complete) <- "timestamp"
# get rid of first timestep, which is at midnight and not 00:30:00; it makes rEddyProc complain
posix_complete <- data.frame(timestamp = posix_complete[-1,])
##############################################################################
# Download Precipitation
##############################################################################
# Download precip data
# From here: https://portal.edirepository.org/nis/mapbrowse?packageid=knb-lter-nwt.416.10
writeLines("Downloading Saddle Precip data from EDI...")
saddle_precip_data_fp <- download_EDI(edi_id = saddle_precip_data,
dest_dir = paste0(DirDnld, "/precip_data"),
getNewData = getNewData)
writeLines("Downloading C1 precipitation data from USCRN...")
USCRN_precip_data_fp <- download_USCRN(start_date = start_date,
end_date = end_date,
dest_dir = paste0(DirDnld, "/precip_data"),
DoNotOverwrite = TRUE)
##############################################################################
# Handling Precip data
##############################################################################
# Saddle precip data must be corrected for blowing snow events, and extended to
# half-hourly precip using Will's formula (see below for details).
writeLines("Reading in Saddle data...")
# Read in Saddle and USCRN Precip data; also collapse USCRN data into one dataframe
saddle_precip <- read.csv(saddle_precip_data_fp,
sep = ",", quot = '"', check.names = TRUE)
writeLines("Reading in C1 precipitation data from USCRN. This may take a while.")
USCRN_precip_list <- lapply(USCRN_precip_data_fp, read_USCRN_precip_data)
USCRN_precip <- plyr::rbind.fill(USCRN_precip_list) %>%
unique() # make sure to remove duplicates caused by aggregating to 30-minute time steps
# Check for duplicated time stamps - should be 0 (aka no TRUEs)
if (sum(duplicated(USCRN_precip$timestamp_UTC)) > 0) {
warning("USCRN precipitation data still contains ",
sum(duplicated(USCRN_precip$timestamp_UTC)),
" duplicates!")
} else {
writeLines(paste0("USCRN precipitation data has been loaded. ",
sum(duplicated(USCRN_precip$timestamp_UTC)),
" duplicated timestamps have been detected."))
}
# Filter the precip data by exact start and end dates
saddle_precip <- saddle_precip %>%
mutate(date = as.Date(date)) %>%
filter(date >= floor_date(start_date, unit = "day") &
date <= ceiling_date(end_date, unit = "day"))
USCRN_precip <- USCRN_precip %>%
rename(date = LST_DATE) %>%
mutate(timestamp_LST = as.POSIXct(timestamp_LST, tz = "MST")) %>%
filter(timestamp_LST >= floor_date(start_date, unit = "day") &
timestamp_LST <= ceiling_date(end_date, unit = "day"))
# Apply blowing snow correction to months of Oct-May Saddle data
# Due to blowing snow events where the belfort gauge has an oversampling of precipitation,
# it is recommended to add a correction for the precipitation total in the months Oct-May.
# The recommended correction for these events should be (0.39 * the recorded total). More
# information on this can be found in:
# Williams, M.W., Bardsley, T., Rikkers, M., (1998) Overestimation of snow depth and inorganic nitrogen wetfall using NADP data, Niwot Ridge, Colorado. Atmospheric Environment 32 (22) :3827-3833
writeLines("Applying blowing snow correction to Saddle precip data.")
saddle_precip <- saddle_precip %>%
mutate(month = month(date),
ppt_tot_corr = ifelse(month %in% c(10, 11, 12, 1, 2, 3, 4, 5),
ppt_tot * 0.39, ppt_tot))
# Change any Nas or NaNs to zero
saddle_precip <- saddle_precip %>%
mutate(ppt_tot_corr = ifelse(is.na(ppt_tot_corr), 0, ppt_tot_corr))
USCRN_precip <- USCRN_precip %>%
mutate(PRECIP_TOT = ifelse(is.na(PRECIP_TOT), 0, PRECIP_TOT))
# Apply Will's algorithm for Precip data from paper:
# Use half-hourly precipitation recordfrom the U.S. Climate Reference Network (USCRN; data from https://www1.ncdc.noaa.gov/pub/data/uscrn/products/subhourly01/;), measured nearby (4 km) at the lower elevation(3050 m asl) C-1 site. Proportioanlly allocate the daily saddle precip measurements to the half-hourly precip record from USCRN. On days when Saddle record reports measurable precip, but the USCRN does not, distribute the daily saddle precip evenly across the day for model simulations.
# Code modified from his TVAN_daily_ppt.R script
writeLines(paste0("Applying Will Wieder's algorithm for allocating daily Saddle ",
"precipitation totals into 30-minute increments."))
Tvan_ppt <- saddle_precip$ppt_tot_corr
CRNS_ppt <- USCRN_precip$PRECIP_TOT
CRNS_date <- USCRN_precip$date
CRNS_mo <- month(USCRN_precip$date)
CRNS_hour <- USCRN_precip$decimalTime
CRNS_d <- tapply(CRNS_ppt, CRNS_date, sum) # daily precip totals
CRNS_day <- tapply(CRNS_date, CRNS_date, mean) # num of days since 1970-01-01 - see date.mean()
CRNS_month <- tapply(CRNS_mo, CRNS_date, mean) # months
#------------------------------------------------------
# distribute Tvan ppt when observed in half-hourly CRNS
#------------------------------------------------------
ndays <- length(Tvan_ppt)
nsteps <- length(CRNS_ppt)
Tvan_fine <- rep(NA, nsteps)
Tvan_note <- rep(NA, nsteps)
Tvan_flag <- rep(NA, ndays)
Tvan_flag_mo <- rep(NA, ndays)
Tvan_date <- USCRN_precip$date # MST date
Tvan_hour <- USCRN_precip$decimalTime_LST # MST hour
start <- 1
# code below does the following:
# (0) if no daily precip at Tvan, add zeros to half hourly results
# (1) if precip at Tvan, but not recorded @ CRNS, distribute evenly in day and add 1 the flag
# (2) if both precip at Tvan and CRNS, distribute Tvan in same proportion as CRNS
for (d in 1:ndays) {
end <- start + 47
if (Tvan_ppt[d] == 0) {
Tvan_fine[start:end] <- 0
Tvan_note[start:end] <- 0
} else if (CRNS_d[d] == 0){
Tvan_fine[start:end] <- Tvan_ppt[d] / 48
Tvan_note[start:end] <- 1
Tvan_flag[d] <- 1
Tvan_flag_mo[d] <- CRNS_month[d]
} else {
temp_frac <- CRNS_ppt[start:end] / CRNS_d[d]
Tvan_fine[start:end] <- Tvan_ppt[d] * temp_frac
Tvan_note[start:end] <- 2
}
if (round(sum(Tvan_fine[start:end], na.rm = TRUE), digits = 7) !=
round(sum(Tvan_ppt[d], na.rm = TRUE), digits = 7)) {
warning(paste0("Running precip totals don't match at day ", d))
}
start <- end + 1
}
# Check that the total precip that fell at the saddle is the same as the total precip
# when allocated over 30-minute time steps
if (sum(Tvan_fine, na.rm=T) == sum(Tvan_ppt)) {
writeLines(paste0("Total precip that fell at the Saddle (", sum(Tvan_ppt),
") matches the amount of total precip that has been ",
"allocated to the for the tvan data (", sum(Tvan_fine, na.rm=T), ")."))
} else {
warning(paste0("Total precip that fell at the Saddle (", sum(Tvan_ppt),
") does NOT match the amount of total precip that has been ",
"allocated to the for the tvan data (", sum(Tvan_fine, na.rm=T), ")!"))
}
writeLines(paste0("Number of total days = ",ndays, " [", ddays(ndays), "]"))
writeLines(paste0("Number of days w/ precip at Tvan = ",
length(Tvan_ppt[Tvan_ppt > 0])))
writeLines(paste0("Number of days with Tvan precip but w/o recorded CRNS precip = ",
sum(Tvan_flag, na.rm = T)))
hist(Tvan_flag_mo, xlim = c(1,12),
main = paste0("Montly frequency of days with Tvan precip but ",
"w/o recorded CRNS precip"),
xlab = "Months"
)
# Convert precip from mm/30 minutes into mm/s
Precip = Tvan_fine[1:nsteps] # mm every 30 minutes
PRECTmms <- Precip / (30*60) # mm/s
# Combine date and 1/2-hourly precip into one dataframe and add a timestamp
hlf_hr_precip <- data.frame(PRECTmms = PRECTmms, # mm/s
MST_HOUR = Tvan_hour[1:nsteps], # decimal hours
MST_DATE = Tvan_date[1:nsteps]) %>% # date
mutate(timestamp = as.POSIXct(paste0(MST_DATE," 00:00:00"), tz = "MST") +
3600*MST_HOUR) %>%
# fix date so that "0" hour readings are converted into 24
mutate(MST_DATE = if_else(MST_HOUR == 0, MST_DATE - 1, MST_DATE),
MST_HOUR = if_else(MST_HOUR == 0.0, 24, MST_HOUR))
##############################################################################
# Download Radiation data
##############################################################################
writeLines("Downloading Ameriflux radiation data...")
rad_data_fp <- download_amflx(dest_dir = paste0(DirDnld, "/rad_data"),
username = amf_usr, verbose = TRUE)
# Check if the files have already been unzipped, if not, unzip the zip file
for (i in seq_along(rad_data_fp)) {
if (grepl(".zip", basename(rad_data_fp[i]))) {
writeLines(paste0("Unzipping ", rad_data_fp[i]))
# check if the unzipped files exist
unzip_list <- unzip(zipfile = rad_data_fp[i],
exdir = dirname(rad_data_fp[i]),
overwrite = FALSE)
}
}
amf_data_fp <- list.files(dirname(rad_data_fp[i]),
full.names = TRUE,
pattern = "*.csv")
##############################################################################
# Handle Radiation data
##############################################################################
# Note: Radiation data comes from the Ameriflux NR-1 site. Currently this
# data cannot be downloaded automatically and has to be downloaded by hand from
# the Ameriflux site after getting a user account: https://ameriflux.lbl.gov/data/download-data/
# For CLM we will pull out incoming shortwave (necessary) and incoming longwave (optional).
# The net radation is provided by the Tvan tower datasets.
# The possible Ameriflux variables are:
# NETRAD_1_1_2 (W m-2): Net radiation (no QA/QC or gapfilling)
# NETRAD_PI_F_1_1_2 (W m-2): Net radiation (gapfilled by tower team)
# SW_IN_1_1_1 (W m-2): Shortwave radiation, incoming (no QA/QC or gapfilling)
# LW_IN_1_1_1 (W m-2): Longwave radiation, incoming (no QA/QC or gapfilling)
# SW_IN_PI_F_1_1_1 (W m-2): Shortwave radiation, incoming (gapfilled by tower team)
# LW_IN_PI_F_1_1_1 (W m-2): Longwave radiation, incoming (gapfilled by tower team)
# SW_OUT_1_1_1 (W m-2): Shortwave radiation, outgoing (no QA/QC or gapfilling)
# LW_OUT_1_1_1 (W m-2): Longwave radiation, outgoing (no QA/QC or gapfilling)
# SW_OUT_PI_F_1_1_1 (W m-2): Shortwave radiation, outgoing (gapfilled by tower team)
# LW_OUT_PI_F_1_1_1 (W m-2): Longwave radiation, outgoing (gapfilled by tower team)
writeLines("Reading in Ameriflux radiation data...")
# Load in Radiation data:
amf_data <- read.csv(file = amf_data_fp[2],
skip = 2,
header = TRUE,
na.strings = "-9999",
as.is = TRUE)
# Select timestamps, and radiation variables
rad_data <- amf_data[,c("TIMESTAMP_START", "TIMESTAMP_END",
"SW_IN_1_1_1", # also sometimes called Rg
"LW_IN_1_1_1", # also sometimes called FLDS
"SW_IN_PI_F_1_1_1", # also sometimes called Rg
"LW_IN_PI_F_1_1_1", # also sometimes called FLDS
"SW_OUT_1_1_1",
"LW_OUT_1_1_1",
"SW_OUT_PI_F_1_1_1",
"LW_OUT_PI_F_1_1_1",
"NETRAD_1_1_2",
"NETRAD_PI_F_1_1_2")]
rad_data$TIMESTAMP_START <- as.POSIXct(as.character(rad_data$TIMESTAMP_START), format = "%Y%m%d%H%M%OS", tz = "MST")
rad_data$TIMESTAMP_END <- as.POSIXct(as.character(rad_data$TIMESTAMP_END), format = "%Y%m%d%H%M%OS", tz = "MST")
# Subset the radiation data to the Tvan time period, reformat the times to get hours
# and dates, finally, select only the radiation, hour, and date variables.
hlf_hr_rad <- rad_data %>%
mutate(date = lubridate::date(TIMESTAMP_END)) %>%
filter(date >= floor_date(start_date, unit = "day") &
date <= floor_date(end_date, unit = "day")) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(MST_HOUR = lubridate::hour(TIMESTAMP_END) +
lubridate::minute(TIMESTAMP_END)/60,
MST_DATE = lubridate::date(TIMESTAMP_START)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(MST_HOUR = if_else(MST_HOUR == 0.0, 24, MST_HOUR)) %>%
# Calculate net radiation from in/out radiation
mutate(radNet = (SW_IN_PI_F_1_1_1 - SW_OUT_PI_F_1_1_1) +
(LW_IN_PI_F_1_1_1 - LW_OUT_PI_F_1_1_1)) %>%
rename(Rg_usnr1 = SW_IN_PI_F_1_1_1, FLDS = LW_IN_PI_F_1_1_1,
SW_OUT = SW_OUT_PI_F_1_1_1, LW_OUT = LW_OUT_PI_F_1_1_1,
timestamp = TIMESTAMP_END) %>%
select(timestamp, MST_DATE, MST_HOUR, Rg_usnr1, FLDS, radNet)
##############################################################################
# Combine flux and met data
##############################################################################
if (tower == "East" | tower == "Both") {
# East tower
tvan_east_tms <- tvan_east %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour)
}
if (tower == "West" | tower == "Both") {
# West tower
tvan_west_tms <- tvan_west %>%
mutate_all(list(~na_if(., -9999))) %>%
mutate(date = as.Date(DoY - 1, origin = paste0(Year, "-01-01")),
timestamp = as.POSIXct(paste0(date," 00:00:00"),
format = "%Y-%m-%d %H:%M:%OS",
tz = "MST") + 3600*Hour)
}
# Join the flux data to the posix_complete date sequence
if (tower == "Both") {
tmp_east <- left_join(posix_complete, tvan_east_tms, by = "timestamp") %>%
mutate(Tower = "East")
tmp_west <- left_join(posix_complete, tvan_west_tms, by = "timestamp") %>%
mutate(Tower = "West")
tvan_comb_tms <- bind_rows(tmp_east, tmp_west)
tvan_tms <- tvan_comb_tms %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
} else if (tower == "West") {
tmp_west <- left_join(posix_complete, tvan_west_tms, by = "timestamp") %>%
mutate(Tower = "West")
tvan_tms <- tmp_west %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
} else {
tmp_east <- left_join(posix_complete, tvan_east_tms, by = "timestamp") %>%
mutate(Tower = "East")
tvan_tms <- tmp_east %>%
# Fill in the DoY, Hour, Date, and Year that are NAs
mutate(date = lubridate::date(timestamp)) %>%
# Take reading from end of period, keep the date at midnight as the day before
# to be consistent with other variables
mutate(Hour = lubridate::hour(timestamp) +
lubridate::minute(timestamp)/60,
date = lubridate::date(timestamp)) %>%
# fix date so that "0" hour readings are converted into 24
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date-1, date),
DoY = yday(date),
Year = year(date))
}
writeLines("Combining precipitation, radiation, and Tvan data.")
# Combine dataframes by date and time
dataDf <- tvan_tms %>%
left_join(hlf_hr_precip, by = c("Hour" = "MST_HOUR", "date" = "MST_DATE",
"timestamp" = "timestamp")) %>%
left_join(hlf_hr_rad, by = c("Hour" = "MST_HOUR", "date" = "MST_DATE",
"timestamp" = "timestamp")) %>%
select(timestamp, date, Year, DoY, Hour, Tower, everything())
# Renaming of variables:
# FLDS - incident longwave (FLDS) (W/m^2)
# FSDS - incident shortwave (FSDS, or Rg) (W/m^2) # Check that these are the same as SW_IN/LW_IN
# PRECTmms - precipitation (PRECTmms = PRECTmms) (mm/s)
# PSRF - pressure at the lowest atmospheric level (PSRF = P) (kPa)
# RH - relative humidity at lowest atm level (RH = rH) (%)
# TBOT - temperature at lowest atm level (TBOT = Tair) (K)
# WIND - wind at lowest atm level (WIND = U) (m/s)
# NEE - net ecosystem exchange (NEE = NEE) (umolm-2s-1)
# FSH - sensible heat flux (FSH = H) (Wm-2)
# EFLX_LH_TOT - latent heat flux (EFLX_LH_TOT = LE) (Wm-2)
# GPP - gross primary productivity (GPP) (umolm-2s-1)
# Rnet - net radiation (Rnet = Rn) (W/m^2)
##############################################################################
# Plot the un-gapfilled data
##############################################################################
if (makeplots == TRUE) {
# needs ggplot and dplyr/tidyr
# change data to longform
# Necessary for model:
# tbot, wind, rh, PSRF, FLDS, FSDS, PRECTmms
getgaplength <- function(gap, y = "notgap") {
res <- rle(gap == y)
res_vec <- rep(res$values*res$lengths,res$lengths)
return(res_vec)
}
# Find the minimum and maximum time stamps at which all required forcing variables have values
min_gap_days <- 1 # how many days does a gap have to be at minimum to be plotted
dataClm.forc.gaps <- dataDf %>%
rename(TIMESTAMP = timestamp, EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg_usnr1) %>%
mutate_at(vars(TBOT, WIND, RH, PSRF, FLDS, FSDS, PRECTmms), list(gap = is.na)) %>%
mutate(gap = TBOT_gap | WIND_gap | RH_gap | PSRF_gap | FLDS_gap | FSDS_gap |
PRECTmms_gap) %>%
group_by(Tower) %>%
mutate(gap = ifelse(gap == FALSE, "notgap", "gap"),
ncontiguousgaps = getgaplength(gap, "gap")) %>%
filter(gap == "gap") %>%
select(TIMESTAMP, gap, ncontiguousgaps, Tower) %>%
mutate(ndays = ncontiguousgaps/48,
ncontiguousgaps = as.factor(ncontiguousgaps)) %>%
group_by(Tower, ndays) %>%
summarize(min = min(TIMESTAMP, na.rm = TRUE),
max = max(TIMESTAMP, na.rm = TRUE)) %>%
arrange(desc(ndays)) %>%
mutate(ndays = as.factor(round(ndays, digits = 2))) %>%
mutate(yr1 = year(min),
yr2 = year(max)) %>%
rowwise() %>%
mutate(years = paste0(seq(yr1, yr2), collapse = " | ")) %>%
select(-yr1, -yr2)
# Plot the required forcing variables
dataClm.forc.plot <- dataDf %>%
rename(TIMESTAMP = timestamp, EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg_usnr1) %>%
tidyr::pivot_longer(cols = !matches(c("TIMESTAMP", "date", "Year", "DoY", "Hour",
"Tower")),
names_to = "variable",
values_to = "value") %>%
filter(variable %in% c("TBOT", "WIND", "RH", "PSRF", "FLDS", "FSDS", "PRECTmms"))
plot_gaps <- function(forcings, gaps,
filteryears = NA,
tower = NA,
min_gap_days = 1,
highlightgaps = FALSE,
verbose = FALSE) {
# if filteryear and tower are NA all years and both towers are plotted.
# filteryear takes values of either NA or a vector of character strings
# of years to plot
# if highlightgaps == TRUE, gaps will be highlighted on plot
# min_gaps_days is the minimum length in days of gaps to highlight
forcings.plot <- forcings
gaps.plot <- gaps %>%
filter(as.numeric(as.character(ndays)) >= min_gap_days)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Both towers \n",
"Years: all")
#### Filter forcing and gap datasets based on settings ####
# create a custom title
if (any(!is.na(filteryears)) & !is.na(tower)) { # filter towers and years
forcings.plot <- forcings %>%
filter(Year %in% filteryears) %>%
filter(Tower == tower) %>%
# the following variables are the same in both towers
filter(!(variable %in% c("FLDS", "FSDS", "PRECTmms")))
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years)) %>%
filter(Tower == tower)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Tower: ", tower, "\n",
"Years: ", paste0(filteryears, collapse = ", "))
} else if (any(!is.na(filteryears))) { # filter only by years
forcings.plot <- forcings %>%
filter(Year %in% filteryears)
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years))
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Both towers \n",
"Years: ", paste0(filteryears, collapse = ", "))
} else if (!is.na(tower)) { # filter only by tower
forcings.plot <- forcings %>%
filter(Tower == tower) %>%
# the following variables are the same in both towers
filter(!(variable %in% c("FLDS", "FSDS", "PRECTmms")))
gaps.plot <- gaps.plot %>%
filter(Tower == tower)
if (nrow(gaps.plot) < 1) {highlightgaps = FALSE}
title <- paste0("Gap-plots\n",
"Tower: ", tower, "\n",
"Years: all")
}
# Tell the user what's happening
writeLines(paste0("Plotting from ", min(forcings.plot$Year, na.rm = TRUE), " to ",
max(forcings.plot$Year, na.rm = TRUE)))
if (verbose) {
if (!is.na(tower)) {
writeLines(paste0("Tower is ", tower))
} else {
writeLines(paste0("Plotting both towers"))
}
if (highlightgaps) {
writeLines("Gaps will be highlighted")
writeLines("Note: if a gap exeeds the boundary year, the x-axis will be",
"modified so the entire gap is shown but points for that period ",
"will not be plotted.")
} else {
writeLines("Gaps will not be highlighted")
}
}
#### Plot the data ####
forcing_gaps.plot <- ggplot(forcings.plot) +
geom_point(aes(x = TIMESTAMP, y = value, color = Tower), alpha = 0.05) +
facet_wrap(~variable, scales = "free_y", ncol = 1) +
scale_color_discrete(name = "Tower") +
guides(color = guide_legend(override.aes = list(alpha = 1),
title.position = "top")) +
theme(legend.position = "bottom") +
ggtitle(title)
# Highlight gaps on graphs
if (highlightgaps) {
forcing_gaps.plot <- forcing_gaps.plot +
geom_rect(data = gaps.plot,
aes(xmin = min,
xmax = max,
ymin = -Inf,
ymax = Inf,
fill = Tower), alpha = 0.3) +
geom_vline(aes(xintercept = min), data = gaps.plot) +
geom_vline(aes(xintercept = max), data = gaps.plot) +
theme(legend.position = "bottom") +
scale_fill_discrete(name = paste0("Gaps >", min_gap_days, " days")) +
guides(fill = guide_legend(title.position = "top"))
}
return(forcing_gaps.plot)
}
plot_years <- c(min(dataClm.forc.plot$Year, na.rm = TRUE):max(dataClm.forc.plot$Year, na.rm = TRUE))
plot_years <- set_names(plot_years)
plot_years <- map(plot_years,
~plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = .x,
tower = NA,
min_gap_days = 7))
plot_all_years <- plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = FALSE,
filteryears = NA,
tower = NA,
min_gap_days = 7)
writeLines("Saving plots - this may take a while...")
iwalk(plot_years, ~{
suppressWarnings(
ggsave(plot = .x,
filename = paste0(plots_dir,"/","yearly_gap_plots_",
.y, '.png'),
width = 10,
height = 5*7,
dpi = 150)
)
})
forc.plot.out.name <- paste0(plots_dir,"/","all_years_gap_plots.png")
ggsave(plot = plot_all_years,
filename = forc.plot.out.name,
width = 10,
height = 5*7,
dpi = 150)
}
plots_dir
##############################################################################
# Gap-fill West tower with East tower
##############################################################################
if (tower == "Both") {
writeLines(paste0("Gap-filling ", basetower," tower data with data from the",
" other tower"))
dataDf.wide <- dataDf %>%
select(all_of(c("timestamp", "date", "Year", "DoY", "Hour", "Tower",
"NEE", "LE", "H", "Ustar", "Tair", "VPD", "rH", "U",
"P", "Rg_usnr1", "PRECTmms", "FLDS", "radNet", "Tsoil"))) %>%
rename(Rg = Rg_usnr1) %>%
mutate(BaseTower = ifelse(Tower == basetower, "base", "fill")) %>%
# select(TIMESTAMP, date, Year, DoY, Hour, Tower, EFLX_LH_TOT, FSH,
# TBOT, RH, WIND, PSRF, FSDS, FLDS, PRECTmms) %>%
# for choice
select(-Tower) %>%
pivot_wider(names_from = BaseTower,
values_from = c("NEE", "LE", "H", "Ustar", "Tair", "VPD",
"rH", "U", "P", "Rg", "PRECTmms",
"FLDS", "radNet", "Tsoil")) %>%
# pivot_wider(names_from = Tower,
# values_from = c("NEE", "LE", "H", "Ustar", "Tair", "VPD",
# "rH", "U", "P", "Rg", "PRECTmms",
# "FLDS", "radNet", "Tsoil")) %>%
select(!ends_with("_NA"))
writeLines("Checking to make sure that tower timesteps line up correctly.")
# convert posix_complete to UTC; then remove leap days
#posix_complete$timestamp <- with_tz(posix_complete$timestamp, "UTC")
# posix_complete_noleap <- posix_complete$timestamp[!grepl(".{4}-02-29", posix_complete$timestamp)]
if (any(!(posix_complete$timestamp == dataDf.wide$timestamp))) {
warning(paste0("At least one timestamp value is missing or out of bounds."))
} else {
writeLines(paste0("Timestamps are all present and line up correctly ",
"between \ntowers.",
"\nThere are ", nrow(dataDf.wide),
" timestamps in total which is \n",
ddays(nrow(dataDf.wide)/48)))
}
#### Gap-fill "base" tower with "fill" tower data ####
# we will create a flag variable to show which values were substituted
# s = base tower was gapfilled with fill tower data
# m = missing in both tower datasets
# n = not missing; original west tower value was used
gap_filled_from_twr <- dataDf.wide %>%
mutate(
# LH (Latent heat flux)
LE = ifelse(is.na(LE_base),
LE_fill,
LE_base),
LE_flag = ifelse(is.na(LE_base) & is.na(LE_fill),
"m",
ifelse(is.na(LE_base) & !is.na(LE_fill),
"s", "n")),
# H (sensible heat flux)
H = ifelse(is.na(H_base),
H_fill,
H_base),
H_flag = ifelse(is.na(H_base) & is.na(H_fill),
"m",
ifelse(is.na(H_base) & !is.na(H_fill),
"s", "n")),
# Air Temperature (TBOT)
Tair = ifelse(is.na(Tair_base),
Tair_fill,
Tair_base),
Tair_flag = ifelse(is.na(Tair_base) & is.na(Tair_fill),
"m",
ifelse(is.na(Tair_base) & !is.na(Tair_fill),
"s", "n")),
# Relative humidity (rH)
rH = ifelse(is.na(rH_base),
rH_fill,
rH_base),
rH_flag = ifelse(is.na(rH_base) & is.na(rH_fill),
"m",
ifelse(is.na(rH_base) & !is.na(rH_fill),
"s", "n")),
# Wind speed (U)
U = ifelse(is.na(U_base),
U_fill,
U_base),
U_flag = ifelse(is.na(U_base) & is.na(U_fill),
"m",
ifelse(is.na(U_base) & !is.na(U_fill),
"s", "n")),
# Atmospheric pressure (P)
P = ifelse(is.na(P_base),
P_fill,
P_base),
P_flag = ifelse(is.na(P_base) & is.na(P_fill),
"m",
ifelse(is.na(P_base) & !is.na(P_fill),
"s", "n")),
# Incident shortwave radiation (Rg_usnr1)
Rg = ifelse(is.na(Rg_base),
Rg_fill,
Rg_base),
Rg_flag = ifelse(is.na(Rg_base) & is.na(Rg_fill),
"m",
ifelse(is.na(Rg_base) & !is.na(Rg_fill),
"s", "n")),
# Incident longwave radiation (FLDS) <- CHECK WITH WILL ON THIS ONE
FLDS = ifelse(is.na(FLDS_base),
FLDS_fill,
FLDS_base),
FLDS_flag = ifelse(is.na(FLDS_base) & is.na(FLDS_fill),
"m",
ifelse(is.na(FLDS_base) & !is.na(FLDS_fill),
"s", "n")),
# Precipitation (PRECTmms)
PRECTmms = ifelse(is.na(PRECTmms_base),
PRECTmms_fill,
PRECTmms_base),
PRECTmms_flag = ifelse(is.na(PRECTmms_base) & is.na(PRECTmms_fill),
"m",
ifelse(is.na(PRECTmms_base) & !is.na(PRECTmms_fill),
"s", "n")),
# Net Ecosystem Excahange (NEE)
NEE = ifelse(is.na(NEE_base),
NEE_fill,
NEE_base),
NEE_flag = ifelse(is.na(NEE_base) & is.na(NEE_fill),
"m",
ifelse(is.na(NEE_base) & !is.na(NEE_fill),
"s", "n")),
# Ustar friction velocity (Ustar)
Ustar = ifelse(is.na(Ustar_base),
Ustar_fill,
Ustar_base),
Ustar_flag = ifelse(is.na(Ustar_base) & is.na(Ustar_fill),
"m",
ifelse(is.na(Ustar_base) & !is.na(Ustar_fill),
"s", "n")),
# Net radiation (radNet)
radNet = ifelse(is.na(radNet_base),
radNet_fill,
radNet_base),
radNet_flag = ifelse(is.na(radNet_base) & is.na(radNet_fill),
"m",
ifelse(is.na(radNet_base) & !is.na(radNet_fill),
"s", "n")),
# Soil Temperature (Tsoil)
Tsoil = ifelse(is.na(Tsoil_base),
Tsoil_fill,
Tsoil_base),
Tsoil_flag = ifelse(is.na(Tsoil_base) & is.na(Tsoil_fill),
"m",
ifelse(is.na(Tsoil_base) & !is.na(Tsoil_fill),
"s", "n"))
)
#### Save Gap-filled outputs ####
writeLines("Tower gap-filling complete. Saving data with flags...")
dataDf <- gap_filled_from_twr %>%
select(!ends_with(c("base", "fill", "flag")))
dataDf_flag <- gap_filled_from_twr %>%
select(!ends_with(c("base", "fill")))
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
flagged_fp <- paste0(DirOut, "/", "tvan_forcing_data_flagged_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),".txt")
write(paste0("# Flags: \n",
"# Base tower is: ", basetower, "\n",
"# s = base tower was gapfilled with fill tower data \n",
"# m = missing in both tower datasets \n",
"# n = not missing; original west tower value was used"),
flagged_fp)
suppressWarnings(
write.table(dataDf_flag, flagged_fp,
sep = "\t", row.names = FALSE,
append = TRUE)
)
writeLines(paste0("Flagged data can be found here: ", flagged_fp))
}
##############################################################################
# Prepare file for ReddyProc
##############################################################################
# Change NA to -9999
dataDf[is.na(dataDf)] <- -9999
# #Convert time to ReddyProc format
# dataDf$Year <- lubridate::year(dataDf$TIMESTAMP)
# dataDf$DoY <- lubridate::yday(dataDf$TIMESTAMP)
# dataDf$Hour <- lubridate::hour(dataDf$TIMESTAMP) + lubridate::minute(dataDf$TIMESTAMP)/60
#
# Remove timestamp and date
dataDf$timestamp <- NULL
dataDf$date <- NULL
# FLDS - incident longwave (FLDS) (W/m^2)
# FSDS - incident shortwave (FSDS) (W/m^2) # Check that these are the same as SW_IN/LW_IN
# PRECTmms - precipitation (PRECTmms = PRECTmms) (mm/s)
# PSRF - pressure at the lowest atmospheric level (PSRF = P) (kPa) - CONVERT TO kPa
# RH - relative humidity at lowest atm level (RH = rH) (%)
# TBOT - temperature at lowest atm level (TBOT = Tair) (K)
# WIND - wind at lowest atm level (WIND = U) (m/s)
# NEE - net ecosystem exchange (NEE = NEE) (umolm-2s-1)
# FSH - sensible heat flux (FSH = H) (Wm-2)
# EFLX_LH_TOT - latent heat flux (EFLX_LH_TOT = LE) (Wm-2)
# GPP - gross primary productivity (GPP) (umolm-2s-1)
# Rnet - net radiation (Rnet = Rn/Rg) (W/m^2)
# Ustar - friction velocity
# Tsoil
#Vector of units for each variable
unitDf <- c("Year" = "--", "DoY" = "--", "Hour" = "--", "LE" = "Wm-2", "H" = "Wm-2",
"Tair" = "degC",
"rH" = "%", "U" = "ms-1", "P" = "kPa", "Rg" = "Wm-2", "FLDS" = "Wm-2",
"PRECTmms" = "mms-1",
"NEE" = "umolm-2s-1", "Ustar" = "ms-1", "radNet" = "Wm-2",
"Tsoil" = "degC")
#Set the output data column order based off of the units vector
dataDf <- data.table::setcolorder(dataDf, names(unitDf))
#Create filename
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
fileOut <- paste0(DirOut,"/","tvan_forcing_data_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),'.txt')
h1 <- paste(names(unitDf), collapse = "\t")
h2 <- paste(unitDf, collapse = "\t")
#Output data in ReddyProc format
conFile <- file(fileOut, "w")
#write the variable names header
writeLines(text = c(h1,h2), sep = "\n", con = conFile)
#write the variable units header
#writeLines(text = unitDf, sep = "\t", con = conFile)
#Write output in tab delimited format
write.table(x = dataDf, file = conFile, sep = "\t", row.names = FALSE, col.names = FALSE)
#Close file connection
close(conFile)
##############################################################################
# ReddyProc Gap-filling workflow
##############################################################################
EddyData.F <- fLoadTXTIntoDataframe(fileOut)
#Threshold bounds to prevent rH > 100%
EddyData.F$rH[EddyData.F$rH > 100] <- 100
#Threshold bounds to prevent Rg (FSDS) < 0
EddyData.F$Rg[EddyData.F$Rg < 0] <- 0
EddyData.F$Rg[EddyData.F$Rg > 1200 ] <- 1200
#Threshold bounds to prevent NEE > 100
EddyData.F$NEE[EddyData.F$NEE > 100] <- NA
#Threshold bounds to prevent NEE < -100
EddyData.F$NEE[EddyData.F$NEE < -100] <- NA
#+++ If not provided, calculate VPD from TBOT and RH
EddyData.F <- cbind(EddyData.F,VPD = fCalcVPDfromRHandTair(EddyData.F$rH,
EddyData.F$Tair))
#+++ Add time stamp in POSIX time format
EddyDataWithPosix.F <- fConvertTimeToPosix(EddyData.F, 'YDH', Year = 'Year',
Day = 'DoY', Hour = 'Hour', tz = "MST")
#+++ Initalize R5 reference class sEddyProc for processing of eddy data
#+++ with all variables needed for processing later
EddyProc.C <- sEddyProc$new(twr, EddyDataWithPosix.F,
c('NEE','Rg','Tair','VPD','rH','LE','H','Ustar','P',
'FLDS','U', 'PRECTmms', 'radNet', 'Tsoil'))
#Set location information
EddyProc.C$sSetLocationInfo(LatDeg = latSite, LongDeg = lonSite, TimeZoneHour = -6)
#+++ Fill gaps in variables with MDS gap filling algorithm (without prior ustar filtering)
# Note, this also takes a long time to complete!
EddyProc.C$sMDSGapFill('NEE', FillAll = TRUE) #Fill all values to estimate flux uncertainties
EddyProc.C$sMDSGapFill('LE', FillAll = TRUE)
EddyProc.C$sMDSGapFill('H', FillAll = TRUE)
EddyProc.C$sMDSGapFill('Ustar', FillAll = TRUE)
EddyProc.C$sMDSGapFill('Tair', FillAll = FALSE)
EddyProc.C$sMDSGapFill('VPD', FillAll = FALSE)
EddyProc.C$sMDSGapFill('rH', FillAll = FALSE)
EddyProc.C$sMDSGapFill('U', FillAll = FALSE) # wind
EddyProc.C$sMDSGapFill('PRECTmms', FillAll = FALSE)
EddyProc.C$sMDSGapFill('P', FillAll = FALSE)
EddyProc.C$sMDSGapFill('FLDS', FillAll = FALSE)
EddyProc.C$sMDSGapFill('Rg', FillAll = FALSE)
EddyProc.C$sMDSGapFill('radNet', FillAll = FALSE)
EddyProc.C$sMDSGapFill('Tsoil', FillAll = FALSE)
EddyProc.C$sMRFluxPartition()
#+++ Export gap filled and partitioned data to standard data frame
FilledEddyData.F <- EddyProc.C$sExportResults()
#Grab just the filled data products
dataClm <- FilledEddyData.F[,grep(pattern = "_f$", x = names(FilledEddyData.F))]
#Grab the POSIX timestamp
dataClm$DateTime <- EddyDataWithPosix.F$DateTime - lubridate::minutes(30) # putting back to original position
names(dataClm) <- gsub("_f", "", names(dataClm))
#Convert degC to K for temperature
dataClm$Tair <- dataClm$Tair + 273.15
attributes(obj = dataClm$Tair)$units <- "K"
#Convert kPa to Pa for pressure
dataClm$P <- dataClm$P * 1000.0
attributes(obj = dataClm$P)$units <- "Pa"
#Create tower height measurement field
dataClm$ZBOT <- rep(2,nrow(dataClm))
#Year month combination for data filtering
dataClm$yearMon <- paste0(year(dataClm$DateTime), "-",
sprintf("%02d", month(dataClm$DateTime)))
##############################################################################
# Plotting and identifying gaps left in data after gapfilling
##############################################################################
if (makeplots == TRUE) {
# needs ggplot and dplyr/tidyr
# change data to longform
# Necessary for model:
# tbot, wind, rh, PSRF, FLDS, FSDS, PRECTmms
getgaplength <- function(gap, y = "notgap") {
res <- rle(gap == y)
res_vec <- rep(res$values*res$lengths,res$lengths)
return(res_vec)
}
# Find the minimum and maximum time stamps at which all required forcing variables have values
dataClm.forc.gaps <- dataClm %>%
rename(EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg) %>%
mutate_at(vars(TBOT, WIND, RH, PSRF, FLDS, FSDS, PRECTmms), list(gap = is.na)) %>%
mutate(gap = TBOT_gap | WIND_gap | RH_gap | PSRF_gap | FLDS_gap | FSDS_gap |
PRECTmms_gap) %>%
mutate(gap = ifelse(gap == FALSE, "notgap", "gap"),
ncontiguousgaps = getgaplength(gap, "gap")) %>%
filter(gap == "gap") %>%
select(DateTime, gap, ncontiguousgaps) %>%
mutate(ndays = ncontiguousgaps/48,
#ndays = as.factor(ndays),
ncontiguousgaps = as.factor(ncontiguousgaps)) %>%
group_by(ndays) %>%
summarize(min = min(DateTime, na.rm = TRUE),
max = max(DateTime, na.rm = TRUE)) %>%
arrange(desc(ndays)) %>%
mutate(ndays = as.factor(round(ndays, digits = 2))) %>%
mutate(yr1 = year(min),
yr2 = year(max)) %>%
rowwise() %>%
mutate(years = paste0(seq(yr1, yr2), collapse = " | ")) %>%
select(-yr1, -yr2)
# Plot the required forcing variables
dataClm.forc.plot <- dataClm %>%
rename(EFLX_LH_TOT = LE, FSH = H, TBOT = Tair, RH = rH,
WIND = U, PSRF = P, FSDS = Rg) %>%
tidyr::pivot_longer(cols = !matches(c("DateTime", "yearMon")),
names_to = "variable",
values_to = "value") %>%
filter(variable %in% c("TBOT", "WIND", "RH", "PSRF", "FLDS", "FSDS", "PRECTmms"))
plot_gaps <- function(forcings, gaps,
filteryears = NA,
min_gap_days = 1,
highlightgaps = FALSE,
verbose = FALSE) {
# if filteryear and tower are NA all years and both towers are plotted
# filteryear is either NA or a vector of character strings of years to plot
# if highlightgaps == TRUE, gaps will be highlighted on plot
# min_gaps_days is the minimum length in days of gaps to highlight
forcings.plot <- forcings %>%
mutate(Year = year(DateTime))
gaps.plot <- gaps
title <- paste0("Gap-plots for both towers and all years")
if (any(!is.na(filteryears))) {
forcings.plot <- forcings.plot %>%
filter(Year %in% filteryears)
gaps.plot <- gaps.plot %>%
filter(grepl(paste0(filteryears, collapse = "|"), years))
title <- paste0("Gap-plots for gap-filled data: year(s) ",
paste0(filteryears, collapse = ", "))
}
writeLines(paste0("Plotting from ", min(forcings.plot$Year), " to ",
max(forcings.plot$Year)))
if (verbose) {
if (!is.na(tower)) {
writeLines(paste0("Tower is ", tower))
} else {
writeLines(paste0("Plotting both towers"))
}
if (highlightgaps) {
writeLines("Gaps will be highlighted")
writeLines("Note: if a gap exeeds the boundary year, the x-axis will be",
"modified so the entire gap is shown but points for that period ",
"will not be plotted.")
} else {
writeLines("Gaps will not be highlighted")
}
}
forcing_gaps.plot <- ggplot(forcings.plot) +
geom_point(aes(x = DateTime, y = value), alpha = 0.05) +
facet_wrap(~variable, scales = "free_y", ncol = 1) +
ggtitle(title)
if (nrow(gaps.plot) == 0) {
highlightgaps <- FALSE
}
if (highlightgaps) {
forcing_gaps.plot <- forcing_gaps.plot +
geom_rect(data = gaps.plot,
aes(xmin = min,
xmax = max,
ymin = -Inf,
ymax = Inf), alpha = 0.3) +
geom_vline(aes(xintercept = min), data = gaps.plot) +
geom_vline(aes(xintercept = max), data = gaps.plot) +
theme(legend.position = "none") +
scale_fill_discrete(name = paste0("Gaps >", min_gap_days, " days"))
}
return(forcing_gaps.plot)
}
plot_years <- c(min(year(dataClm.forc.plot$DateTime),
na.rm = TRUE):max(year(dataClm.forc.plot$DateTime),
na.rm = TRUE))
plot_years <- set_names(plot_years)
plot_years <- map(plot_years,
~plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = .x))
iwalk(plot_years, ~{
ggsave(plot = .x,
filename = paste0(plots_dir,"/",.y,
'_yearly_gap_plots_postgapfilling.png'),
width = 10,
height = 5*7,
dpi = 150)
})
plot_all_years <- plot_gaps(forcings = dataClm.forc.plot,
gaps = dataClm.forc.gaps,
highlightgaps = TRUE,
filteryears = NA)
forc.plot.out.name <- paste0(plots_dir,"/",
lubridate::date(dataClm$DateTime[1]),'_',
lubridate::date(tail(dataClm$DateTime, n = 1)),
'_required_forcing_postgapfilling.png')
ggsave(plot = plot_all_years,
filename = forc.plot.out.name,
width = 10,
height = 5*7,
dpi = 150)
}
##############################################################################
# Prepare 4 different precipitation regimes for the different vegetation communities
##############################################################################
# There are several vegetation communities at Niwot and they all see slightly
# different precipitation regimes. (See Wieder et al. 2017). We will modify the
# precipitation inputs based on Table 1 in Wieder et al. 2017
# | Community | Snow (% relative to observations) |
# | ----------------- | -------------------------------------- |
# | Fellfield (FF) | 10, but 25 during March, April and May |
# | Dry meadow (DM) | 10, but 25 during March, April and May |
# | Moist meadow (MM) | 100 |
# | Wet meadow (WM) | 75 + runoff simulated from moist meadow |
# | Snowbed (SB) | 200 |
dataClm_veg_communities <- dataClm %>%
mutate(month = month(DateTime),
PRECTmms_FF = ifelse(Tair >= 273.15, PRECTmms,
ifelse(month %in% c(3,4,5), PRECTmms * 0.25,
PRECTmms*0.1)),
PRECTmms_DM = ifelse(Tair >= 273.15, PRECTmms,
ifelse(month %in% c(3,4,5), PRECTmms * 0.25,
PRECTmms*0.1)),
PRECTmms_MM = PRECTmms,
PRECTmms_WM = ifelse(Tair >= 273.15, PRECTmms, PRECTmms*0.75),
PRECTmms_SB = ifelse(Tair >= 273.15, PRECTmms, PRECTmms*2)) %>%
select(-month)
# Add in simulated runoff from mm to wm:
if (simulated_runoff_present) {
simulated_runoff <- read.csv(file = simulated_runoff_fp)
colnames(simulated_runoff) <- names(simulated_runoff)
}
names(dataClm_veg_communities)
# convert runoff time to DateTime
simulated_runoff$time = as.POSIXct(simulated_runoff$time,tz='UTC')
simulated_runoff$time = round(simulated_runoff$time, 'min')
# add runoff to precipitation for wetmeadow
if(simulated_runoff_present){
dataClm_veg_communities = dataClm_veg_communities %>%
left_join(simulated_runoff, by = c("DateTime" = "time")) %>%
mutate(PRECTmms_WM = PRECTmms_WM + QRUNOFF ) %>%
select(-QRUNOFF)
}
# Write out modified precipitation data
twr <- ifelse(tower == "Both", "both_towers", paste0(tower, "_tower"))
precip_mods_fp <- paste0(DirOut, "/", "tvan_forcing_data_precip_mods_",
twr, '_',lubridate::date(start_date),
'_',lubridate::date(end_date),".txt")
# ADD UNITS
dataClm_veg_communities_units <- c("NEE" = "umolm-2s-1",
"LE" = "Wm-2",
"H" = "Wm-2",
"Ustar" = "ms-1",
"Tair" = "K",
"VPD" = "kPa",
"rH" = "%",
"U" = "ms-1",
"PRECTmms" = "mms-1",
"PRECTmms_FF" = "mms-1",
"PRECTmms_DM" = "mms-1",
"PRECTmms_MM" = "mms-1",
"PRECTmms_WM" = "mms-1",
"PRECTmms_SB" = "mms-1",
"P" = "Pa",
"FLDS" = "Wm-2",
"Rg" = "Wm-2",
"radNet" = "Wm-2",
"Tsoil" = "degC",
"GPP" = "umolm-2s-1",
"DateTime" = "-",
"yearMon" = "-",
"ZBOT" = "-")
# Reorder the units to match the order of dataClm_veg_communities
dataClm_veg_communities_units <- dataClm_veg_communities_units[names(dataClm_veg_communities)]
dataClm_veg_communities_units.df <- rbind(dataClm_veg_communities_units)
rownames(dataClm_veg_communities_units.df) <- NULL
write.table(dataClm_veg_communities_units.df, precip_mods_fp,
sep = "\t", row.names = FALSE)
write.table(dataClm_veg_communities, precip_mods_fp,
sep = "\t", row.names = FALSE, append = TRUE, col.names = FALSE)
##############################################################################
# Write output to CLM
##############################################################################
write_to_clm <- function(dataClm, veg_community = NA, verbose = FALSE) {
# dataClm = the gap-filled data subsetted according to the precipitation
# regime you want
# veg_community = one of "FF", "DM", "MM", "WM", or "SB" specifying the
# vegetation community you want to simulate, if NA, original
# precip values are used
#
# Set up for vegetation choice
veg_community_list <- c("fell_field", "dry_meadow", "moist_meadow", "wet_meadow",
"snow_bed")
names(veg_community_list) <- c("FF", "DM", "MM", "WM","SB")
if (is.na(veg_community)) { # original precip
dataClm <- dataClm %>%
select(!ends_with(c("_FF", "_DM", "_MM", "_WM", "_SB")))
vegcom <- "original"
} else { # specific vegetation community
precip_col_name <- paste0("PRECTmms_", veg_community)
dataClm$PRECTmms <- dataClm[,precip_col_name]
dataClm <- dataClm %>%
select(!ends_with(c("_FF", "_DM", "_MM", "_WM", "_SB")))
vegcom <- veg_community_list[veg_community]
}
#Define missing value fill
mv <- -9999.
#Set of year/month combinations for netCDF output
setYearMon <- unique(dataClm$yearMon)
for (m in setYearMon) {
#m <- setYearMon[10] #for testing
Data.mon <- dataClm[dataClm$yearMon == m,]
timeStep <- seq(0,nrow(Data.mon)-1,1)
time <- timeStep/48
#endStep <- startStep + nsteps[m]-1
if (verbose) {
print(paste(m,"Data date =",Data.mon$DateTime[1], "00:00:00"))
names(Data.mon)
}
#NetCDF output filename
fileOutNcdf <- paste(DirOut,"/",vegcom, "/",m,".nc", sep = "")
if (verbose) {
print(fileOutNcdf)
}
veg_com_dir <- paste0(DirOut,"/",vegcom)
if(!dir.exists(veg_com_dir)) dir.create(veg_com_dir, recursive = TRUE)
#sub(pattern = ".txt", replacement = ".nc", fileOut)
# define the netcdf coordinate variables (name, units, type)
lat <- ncdf4::ncdim_def("lat","degrees_north", as.double(latSite), create_dimvar=TRUE)
lon <- ncdf4::ncdim_def("lon","degrees_east", as.double(lonSite), create_dimvar=TRUE)
#Variables to output to netCDF
time <- ncdf4::ncdim_def("time", paste("days since",Data.mon$DateTime[1], "00:00:00"),
vals=as.double(time),unlim=FALSE, create_dimvar=TRUE,
calendar = "noleap")
LATIXY <- ncdf4::ncvar_def("LATIXY", "degrees N", list(lat), mv,
longname="latitude", prec="double")
LONGXY <- ncdf4::ncvar_def("LONGXY", "degrees E", list(lon), mv,
longname="longitude", prec="double")
FLDS <- ncdf4::ncvar_def("FLDS", "W/m^2", list(lon,lat,time), mv,
longname="incident longwave (FLDS)", prec="double")
FSDS <- ncdf4::ncvar_def("FSDS", "W/m^2", list(lon,lat,time), mv,
longname="incident shortwave (FSDS)", prec="double")
PRECTmms <- ncdf4::ncvar_def("PRECTmms", "mm/s", list(lon,lat,time), mv,
longname="precipitation (PRECTmms)", prec="double")
PSRF <- ncdf4::ncvar_def("PSRF", "Pa", list(lon,lat,time), mv,
longname="pressure at the lowest atmospheric level (PSRF)", prec="double")
RH <- ncdf4::ncvar_def("RH", "%", list(lon,lat,time), mv,
longname="relative humidity at lowest atm level (RH)", prec="double")
TBOT <- ncdf4::ncvar_def("TBOT", "K", list(lon,lat,time), mv,
longname="temperature at lowest atm level (TBOT)", prec="double")
WIND <- ncdf4::ncvar_def("WIND", "m/s", list(lon,lat,time), mv,
longname="wind at lowest atm level (WIND)", prec="double")
ZBOT <- ncdf4::ncvar_def("ZBOT", "m", list(lon,lat,time), mv,
longname="observational height", prec="double")
NEE <- ncdf4::ncvar_def("NEE", "umolm-2s-1", list(lon,lat,time), mv,
longname="net ecosystem exchange", prec="double")
FSH <- ncdf4::ncvar_def("FSH", "Wm-2", list(lon,lat,time), mv,
longname="sensible heat flux", prec="double")
EFLX_LH_TOT <- ncdf4::ncvar_def("EFLX_LH_TOT", "Wm-2", list(lon,lat,time), mv,
longname="latent heat flux", prec="double")
GPP <- ncdf4::ncvar_def("GPP", "umolm-2s-1", list(lon,lat,time), mv,
longname="gross primary productivity", prec="double")
Rnet <- ncdf4::ncvar_def("Rnet", "W/m^2", list(lon,lat,time), mv,
longname="net radiation", prec="double")
#Create the output file
ncnew <- ncdf4::nc_create(fileOutNcdf, list(LATIXY,LONGXY,FLDS,FSDS,PRECTmms,RH,PSRF,TBOT,WIND,ZBOT,FSH,EFLX_LH_TOT,NEE,GPP,Rnet))
# Write some values to this variable on disk.
ncdf4::ncvar_put(ncnew, LATIXY, latSite)
ncdf4::ncvar_put(ncnew, LONGXY, lonSite)
ncdf4::ncvar_put(ncnew, FLDS, Data.mon$FLDS)
ncdf4::ncvar_put(ncnew, FSDS, Data.mon$Rg)
ncdf4::ncvar_put(ncnew, RH, Data.mon$rH)
ncdf4::ncvar_put(ncnew, PRECTmms, Data.mon$PRECTmms)
ncdf4::ncvar_put(ncnew, PSRF, Data.mon$P)
ncdf4::ncvar_put(ncnew, TBOT, Data.mon$Tair)
ncdf4::ncvar_put(ncnew, WIND, Data.mon$U)
ncdf4::ncvar_put(ncnew, ZBOT, Data.mon$ZBOT)
ncdf4::ncvar_put(ncnew, NEE, Data.mon$NEE)
ncdf4::ncvar_put(ncnew, FSH, Data.mon$H)
ncdf4::ncvar_put(ncnew, EFLX_LH_TOT, Data.mon$LE)
ncdf4::ncvar_put(ncnew, GPP, Data.mon$GPP)
ncdf4::ncvar_put(ncnew, Rnet, Data.mon$radNet)
#add attributes
# ncdf4::ncatt_put(ncnew, time,"calendar", "noleap" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FLDS,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FSDS,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, RH ,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, PRECTmms,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, PSRF,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, TBOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, WIND,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, ZBOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, NEE,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, FSH,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, EFLX_LH_TOT,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, GPP,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, Rnet,"mode","time-dependent" ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "veg_community_type", veg_community_list[veg_community],prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_on",date() ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_by","Will Wieder",prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_from",fileOut ,prec=NA,verbose=FALSE,definemode=FALSE )
ncdf4::ncatt_put(ncnew, 0, "created_with", "flow.lter.clm.R",prec=NA,verbose=FALSE,definemode=FALSE )
#Close Netcdf file connection
ncdf4::nc_close(ncnew)
#Add step
#startStep <- endStep + 1
#Remove not needed variables
remove(time, timeStep, fileOutNcdf, ncnew, Data.mon,
FLDS,FSDS,RH,PRECTmms,PSRF,TBOT,WIND,ZBOT)
} #End of monthloop
}
# Prepare file for CLM simulations - convert to UTC and filter out leapdays
dataClm_veg_communities_modelready <- dataClm_veg_communities %>%
# Convert time into UTC
mutate(timestamp_UTC = with_tz(DateTime, tzone = "UTC"),
date = as.Date(timestamp_UTC),
Hour = lubridate::hour(timestamp_UTC) +
lubridate::minute(timestamp_UTC)/60) %>%
# Remove leap years
filter(!grepl(".{4}-02-29", date)) %>%
# Fix Hours, date, DoY, and Year; Hour is 0.5-24.0; Adjust date accordingly
# get new doy now that leap years are filtered out
mutate(Hour = if_else(Hour == 0.0, 24, Hour),
date = if_else(Hour == 24, date - 1, date),
Year = year(date),
DoY = yday(date),
DoY = ifelse(leap_year(Year) & (yday(date) > 59),
(yday(date) - 1), yday(date))) %>%
# Remove MST timestamp and replace it with UTC timestamp; also remove other
# extraneous time indicators
select(-DateTime, -date, -Hour, -Year, -DoY) %>%
rename(DateTime = timestamp_UTC) %>%
# overwrite yearMon with updated timezone yearMon
mutate(yearMon = paste0(year(DateTime), "-",
sprintf("%02d", month(DateTime))))
# Create NC files
community_list <- c("Fell Field", "Dry Meadow", "Moist Meadow", "Wet Meadow",
"Snow Bed", "Original Precipitation")
names(community_list) <- c("FF", "DM", "MM", "WM","SB", NA)
for (i in seq_along(community_list)) {
writeLines(paste0("Writing .nc files for ",
community_list[i], "..."))
write_to_clm(dataClm = dataClm_veg_communities_modelready,
veg_community = names(community_list[i]))
}
print(DirOut)
print('The met (.nc) forcings for Tvan are ready to be used! Time to run CLM')
|
SKAT_Optimal_PValue_Davies<-function(pmin.q,param.m,r.all){
re<-try(integrate(SKAT_Optimal_Integrate_Func_Davies, lower=0, upper=30, subdivisions=500,pmin.q=pmin.q,param.m=param.m,r.all=r.all,abs.tol = 10^-15), silent = TRUE)
if(class(re) == "try-error"){
re<-SKAT_Optimal_PValue_Liu(pmin.q,param.m,r.all)
return(re)
}
pvalue<-1-re[[1]]
if(pvalue < 0){
pvalue=0
}
return(pvalue)
}
|
/SKATr/SKAT_Optimal_PValue_Davies.R
|
no_license
|
cailab-tamu/SKATr2matlab
|
R
| false | false | 397 |
r
|
SKAT_Optimal_PValue_Davies<-function(pmin.q,param.m,r.all){
re<-try(integrate(SKAT_Optimal_Integrate_Func_Davies, lower=0, upper=30, subdivisions=500,pmin.q=pmin.q,param.m=param.m,r.all=r.all,abs.tol = 10^-15), silent = TRUE)
if(class(re) == "try-error"){
re<-SKAT_Optimal_PValue_Liu(pmin.q,param.m,r.all)
return(re)
}
pvalue<-1-re[[1]]
if(pvalue < 0){
pvalue=0
}
return(pvalue)
}
|
#' @title To show the Euclidean distance formula.
#' @description To show the Euclidean distance formula and to calculate the Euclidean distance of two clusters.
#' @param x is a numeric vectoror a matrix. It represents the values of a cluster.
#' @param y is a numeric vectoror a matrix. It represents the values of a cluster.
#' @details This function is part of the hierarchical clusterization method. The function calculates the
#' Euclidean distance value from \code{x} and \code{y}.
#' @author Roberto Alcántara \email{roberto.alcantara@@edu.uah.es}
#' @author Juan José Cuadrado \email{jjcg@@uah.es}
#' @author Universidad de Alcalá de Henares
#' @return Euclidean distance value and formula.
#' @examples
#'
#' x <- c(1,2)
#' y <- c(1,3)
#'
#' cluster1 <- matrix(x,ncol=2)
#' cluster2 <- matrix(y,ncol=2)
#'
#' edistance(x,y)
#'
#' edistance(cluster1,cluster2)
#'
#' @export
edistance.details <- function(x,y){
initImages("../man/images/euclideanDistance.PNG")
sqrt(((y[1] - x[1])^2) + ((y[2] - x[2])^2))
}
|
/R/euclideanDistance.details.R
|
no_license
|
cran/LearnClust
|
R
| false | false | 1,051 |
r
|
#' @title To show the Euclidean distance formula.
#' @description To show the Euclidean distance formula and to calculate the Euclidean distance of two clusters.
#' @param x is a numeric vectoror a matrix. It represents the values of a cluster.
#' @param y is a numeric vectoror a matrix. It represents the values of a cluster.
#' @details This function is part of the hierarchical clusterization method. The function calculates the
#' Euclidean distance value from \code{x} and \code{y}.
#' @author Roberto Alcántara \email{roberto.alcantara@@edu.uah.es}
#' @author Juan José Cuadrado \email{jjcg@@uah.es}
#' @author Universidad de Alcalá de Henares
#' @return Euclidean distance value and formula.
#' @examples
#'
#' x <- c(1,2)
#' y <- c(1,3)
#'
#' cluster1 <- matrix(x,ncol=2)
#' cluster2 <- matrix(y,ncol=2)
#'
#' edistance(x,y)
#'
#' edistance(cluster1,cluster2)
#'
#' @export
edistance.details <- function(x,y){
initImages("../man/images/euclideanDistance.PNG")
sqrt(((y[1] - x[1])^2) + ((y[2] - x[2])^2))
}
|
#######################################################################################
###### Configuration options ######
#######################################################################################
###directories
scripts.dir="scripts"
bowtie.build.path="../windows/bowtie2-2.1.0-win/bowtie2-build.exe"
bowtie.align.path="../windows/bowtie2-2.1.0-win/bowtie2-align.exe"
samtools.path="../windows/samtools-win/samtools.exe"
picard.samtofastq.jar="../SamToFastq.jar"
###input_reads
read.length=76
#paired-end reads
fastq1="examples/tel_reads1.fq"
fastq2="examples/tel_reads2.fq"
#single-end reads (if single is T)
single=T
files.with.prefix = F
#if files.with.prefix is F
#specify one or many fastq files
fastq="examples/tel_reads.fq,../examples/tel_reads1.fq ../examples/tel_reads2.fq"
#if files.with.prefix is T
#specify fastq files with their prefix and directory
fastq.dir="examples"
fastq.prefix="tel_reads"
###algorithm_options
pattern='TTAGGG'
num.haploid.chr=23
min.seed=12
mode.local=F
###base_coverage_calculation_options
compute.base.cov=T
base.cov=5.4
base.index.pathtoprefix="examples/base.index/base_index"
###output_options
output.dir='examples/output'
###system_options
num.proc=3
###additional_options
quals="--phred33" #default: --phred33, alternatives: --phred64, --solexa-quals
ignore.err=T
################################################################
#### assemble options in config.table for validation ####
################################################################
config.table = NULL
config.table['scripts.dir'] = scripts.dir
config.table['bowtie.build.path'] = bowtie.build.path
config.table['bowtie.align.path'] = bowtie.align.path
config.table['samtools.path'] = samtools.path
config.table['picard.samtofastq.jar'] = picard.samtofastq.jar
config.table['fastq1'] = fastq1
config.table['fastq2'] = fastq2
config.table['single'] = single
config.table['fastq'] = fastq
config.table['files.with.prefix'] = files.with.prefix
config.table['fastq.dir'] = fastq.dir
config.table['fastq.prefix']=fastq.prefix
config.table['read.length'] = read.length
config.table['pattern'] = pattern
config.table['num.haploid.chr'] = num.haploid.chr
config.table['min.seed'] = min.seed
config.table['mode.local'] = mode.local
config.table['compute.base.cov'] = compute.base.cov
config.table['base.cov'] = base.cov
config.table['base.index.pathtoprefix'] = base.index.pathtoprefix
config.table['output.dir'] = output.dir
config.table['num.proc'] = num.proc
config.table['quals']=quals
config.table['ingore.err']=F
config.table = as.matrix(config.table)
validate.R = file.path(scripts.dir, "validate.options.R")
if (!file.exists(validate.R))
validate.R = "validate.options.R"
if (!file.exists(validate.R)){
stop("validate.options.R not found.\n Provide scripts.dir containing the script.")
} else {
config.set = T
source(validate.R)
}
dir.create(output.dir, showWarnings=F)
if (!config.set){
stop("configuration not set successfully. Scripts will not execute.\n")
} else {
source(pipeline.R)
}
|
/src/scripts/computel.R
|
no_license
|
BioinformaticsArchive/computel
|
R
| false | false | 3,258 |
r
|
#######################################################################################
###### Configuration options ######
#######################################################################################
###directories
scripts.dir="scripts"
bowtie.build.path="../windows/bowtie2-2.1.0-win/bowtie2-build.exe"
bowtie.align.path="../windows/bowtie2-2.1.0-win/bowtie2-align.exe"
samtools.path="../windows/samtools-win/samtools.exe"
picard.samtofastq.jar="../SamToFastq.jar"
###input_reads
read.length=76
#paired-end reads
fastq1="examples/tel_reads1.fq"
fastq2="examples/tel_reads2.fq"
#single-end reads (if single is T)
single=T
files.with.prefix = F
#if files.with.prefix is F
#specify one or many fastq files
fastq="examples/tel_reads.fq,../examples/tel_reads1.fq ../examples/tel_reads2.fq"
#if files.with.prefix is T
#specify fastq files with their prefix and directory
fastq.dir="examples"
fastq.prefix="tel_reads"
###algorithm_options
pattern='TTAGGG'
num.haploid.chr=23
min.seed=12
mode.local=F
###base_coverage_calculation_options
compute.base.cov=T
base.cov=5.4
base.index.pathtoprefix="examples/base.index/base_index"
###output_options
output.dir='examples/output'
###system_options
num.proc=3
###additional_options
quals="--phred33" #default: --phred33, alternatives: --phred64, --solexa-quals
ignore.err=T
################################################################
#### assemble options in config.table for validation ####
################################################################
config.table = NULL
config.table['scripts.dir'] = scripts.dir
config.table['bowtie.build.path'] = bowtie.build.path
config.table['bowtie.align.path'] = bowtie.align.path
config.table['samtools.path'] = samtools.path
config.table['picard.samtofastq.jar'] = picard.samtofastq.jar
config.table['fastq1'] = fastq1
config.table['fastq2'] = fastq2
config.table['single'] = single
config.table['fastq'] = fastq
config.table['files.with.prefix'] = files.with.prefix
config.table['fastq.dir'] = fastq.dir
config.table['fastq.prefix']=fastq.prefix
config.table['read.length'] = read.length
config.table['pattern'] = pattern
config.table['num.haploid.chr'] = num.haploid.chr
config.table['min.seed'] = min.seed
config.table['mode.local'] = mode.local
config.table['compute.base.cov'] = compute.base.cov
config.table['base.cov'] = base.cov
config.table['base.index.pathtoprefix'] = base.index.pathtoprefix
config.table['output.dir'] = output.dir
config.table['num.proc'] = num.proc
config.table['quals']=quals
config.table['ingore.err']=F
config.table = as.matrix(config.table)
validate.R = file.path(scripts.dir, "validate.options.R")
if (!file.exists(validate.R))
validate.R = "validate.options.R"
if (!file.exists(validate.R)){
stop("validate.options.R not found.\n Provide scripts.dir containing the script.")
} else {
config.set = T
source(validate.R)
}
dir.create(output.dir, showWarnings=F)
if (!config.set){
stop("configuration not set successfully. Scripts will not execute.\n")
} else {
source(pipeline.R)
}
|
/데이터 과학/Homework3.R
|
no_license
|
yeseongcho/-
|
R
| false | false | 27,680 |
r
| ||
# Tune algorithm parameters using a manual grid search.
# load the library
library(caret)
# load the dataset
data(iris)
# prepare training scheme
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# design the parameter tuning grid
grid <- expand.grid(size=c(5,10,20,50), k=c(1,2,3,4,5))
# train the model
model <- train(Species~., data=iris, method="lvq", trControl=control, tuneGrid=grid)
# summarize the model
print(model)
# plot the effect of parameters on accuracy
plot(model)
|
/08_Machine_Learning_Mastery_with_R/05_ImproveResults/01_TuneAlgorithms/manual_grid_search.R
|
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
R
| false | false | 499 |
r
|
# Tune algorithm parameters using a manual grid search.
# load the library
library(caret)
# load the dataset
data(iris)
# prepare training scheme
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# design the parameter tuning grid
grid <- expand.grid(size=c(5,10,20,50), k=c(1,2,3,4,5))
# train the model
model <- train(Species~., data=iris, method="lvq", trControl=control, tuneGrid=grid)
# summarize the model
print(model)
# plot the effect of parameters on accuracy
plot(model)
|
## version: 1.31
## method: get
## path: /tasks/{id}
## code: 200
## response: {"ID":"0kzzo1i0y4jz6027t0k7aezc7","Version":{"Index":71},"CreatedAt":"2016-06-07T21:07:31.171892745Z","UpdatedAt":"2016-06-07T21:07:31.376370513Z","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:31.290032978Z","State":"running","Message":"started","ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677}},"DesiredState":"running","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.10/16"}]}
NULL
container_spec <- list(
image = "redis",
labels = NULL,
command = character(0),
args = character(0),
hostname = NA_character_,
env = character(0),
dir = NA_character_,
user = NA_character_,
groups = character(0),
privileges = NULL,
tty = NA,
open_stdin = NA,
read_only = NA,
mounts = data_frame(
target = character(0),
source = character(0),
type = character(0),
read_only = logical(0),
consistency = character(0),
bind_options = I(list()),
volume_options = I(list()),
tmpfs_options = I(list())),
stop_signal = NA_character_,
stop_grace_period = NA_integer_,
health_check = NULL,
hosts = character(0),
dns_config = NULL,
secrets = data_frame(
file = I(list()),
secret_id = character(0),
secret_name = character(0)),
configs = data_frame(
file = I(list()),
config_id = character(0),
config_name = character(0)))
spec <- list(
plugin_spec = NULL,
container_spec = container_spec,
resources = list(
limits = list(
nano_cpus = NA_integer_,
memory_bytes = NA_integer_),
reservation = NULL),
restart_policy = list(
condition = "any",
delay = NA_integer_,
max_attempts = 0L,
window = NA_integer_),
placement = list(
constraints = character(0),
preferences = data_frame(
spread = I(list())),
platforms = data_frame(architecture = character(0),
os = character(0))),
force_update = NA_integer_,
runtime = NA_character_,
networks = data_frame(
target = character(),
aliases = I(list())),
log_driver = NULL)
list(
id = "0kzzo1i0y4jz6027t0k7aezc7",
version = list(index = 71L),
created_at = "2016-06-07T21:07:31.171892745Z",
updated_at = "2016-06-07T21:07:31.376370513Z",
name = NA_character_,
labels = NULL,
spec = spec,
service_id = "9mnpnzenvg8p8tdbtq4wvbkcz",
slot = 1L,
node_id = "60gvrl6tm78dmak4yl7srz94v",
status = list(
timestamp = "2016-06-07T21:07:31.290032978Z",
state = "running",
message = "started",
err = NA_character_,
container_status = list(
container_id = "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035",
pid = 677L,
exit_code = NA_integer_)),
desired_state = "running")
|
/tests/testthat/sample_responses/v1.31/task_inspect.R
|
no_license
|
cran/stevedore
|
R
| false | false | 3,575 |
r
|
## version: 1.31
## method: get
## path: /tasks/{id}
## code: 200
## response: {"ID":"0kzzo1i0y4jz6027t0k7aezc7","Version":{"Index":71},"CreatedAt":"2016-06-07T21:07:31.171892745Z","UpdatedAt":"2016-06-07T21:07:31.376370513Z","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:31.290032978Z","State":"running","Message":"started","ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677}},"DesiredState":"running","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.10/16"}]}
NULL
container_spec <- list(
image = "redis",
labels = NULL,
command = character(0),
args = character(0),
hostname = NA_character_,
env = character(0),
dir = NA_character_,
user = NA_character_,
groups = character(0),
privileges = NULL,
tty = NA,
open_stdin = NA,
read_only = NA,
mounts = data_frame(
target = character(0),
source = character(0),
type = character(0),
read_only = logical(0),
consistency = character(0),
bind_options = I(list()),
volume_options = I(list()),
tmpfs_options = I(list())),
stop_signal = NA_character_,
stop_grace_period = NA_integer_,
health_check = NULL,
hosts = character(0),
dns_config = NULL,
secrets = data_frame(
file = I(list()),
secret_id = character(0),
secret_name = character(0)),
configs = data_frame(
file = I(list()),
config_id = character(0),
config_name = character(0)))
spec <- list(
plugin_spec = NULL,
container_spec = container_spec,
resources = list(
limits = list(
nano_cpus = NA_integer_,
memory_bytes = NA_integer_),
reservation = NULL),
restart_policy = list(
condition = "any",
delay = NA_integer_,
max_attempts = 0L,
window = NA_integer_),
placement = list(
constraints = character(0),
preferences = data_frame(
spread = I(list())),
platforms = data_frame(architecture = character(0),
os = character(0))),
force_update = NA_integer_,
runtime = NA_character_,
networks = data_frame(
target = character(),
aliases = I(list())),
log_driver = NULL)
list(
id = "0kzzo1i0y4jz6027t0k7aezc7",
version = list(index = 71L),
created_at = "2016-06-07T21:07:31.171892745Z",
updated_at = "2016-06-07T21:07:31.376370513Z",
name = NA_character_,
labels = NULL,
spec = spec,
service_id = "9mnpnzenvg8p8tdbtq4wvbkcz",
slot = 1L,
node_id = "60gvrl6tm78dmak4yl7srz94v",
status = list(
timestamp = "2016-06-07T21:07:31.290032978Z",
state = "running",
message = "started",
err = NA_character_,
container_status = list(
container_id = "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035",
pid = 677L,
exit_code = NA_integer_)),
desired_state = "running")
|
install.packages("hexbin")
library(hexbin)
library(RColorBrewer)
library(ggplot2)
#this is the section where I download the data, change as needed
setwd("H:/internship/Internship-Work")
the <- read.csv("all-data.csv")
#viewed the data just to make sure it works, commented out after
View(the)
#drop non-participants
big <-subset(the, io!="o")
View(big)
big$Total.Grade <- as.numeric(as.character(gsub(",","",big$Total.Grade)))
#scatterplots here show both data points as well as correlation trends
scatter.smooth(x=big$teamSatisfaction, y=big$M1.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M2.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M3.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M4.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$Grade.Total)
scatter.smooth(x=big$M1FB.Interdependence, y=big$M1.Grade)
scatter.smooth(x=big$M1FB.Inclusion, y=big$M1.Grade)
scatter.smooth(x=big$M1FB.Interaction, y=big$M1.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M4.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M4.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M4.Grade)
scatter.smooth(x=big$adjFactorNoSelf, y=big$Total.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$Total.Grade)
scatter.smooth(x=big$adjFactorNoSelf, y=big$teamSatisfaction)
#did some boxplots just to see if there were outliers in the data
boxplot(big$Total.Grade)
boxplot(big$teamInterdep)
boxplot(big$teamSatisfaction)
boxplot(big$adjFactorSelf)
boxplot(big$adjFactorNoSelf)
#did some hexagonal plots here to show some loose groupings
a <- hexbin(big$adjFactorNoSelf,big$teamSatisfaction,xbins=20)
plot(a)
b <- hexbin(big$teamSatisfaction,big$Grade.Total,xbins=20)
plot(b)
#update this with all the data
big$Interdependence <- (big$M1FB.Interdependence + big$M2FB.Interdependence + big$M3FB.Interdependence + big$M4FB.Interdependence + big$M5FB.Interdependence + big$M6FB.Interdependence + big$M7FB.Interdependence + big$M8FB.Interdependence)/8
big$Inclusion <- (big$M1FB.Inclusion + big$M2FB.Inclusion + big$M3FB.Inclusion + big$M4FB.Inclusion + big$M5FB.Inclusion + big$M6FB.Inclusion + big$M7FB.Inclusion + big$M8FB.Inclusion)/8
big$Interaction <- (big$M1FB.Interaction + big$M2FB.Interaction + big$M3FB.Interaction + big$M4FB.Interaction + big$M5FB.Interaction + big$M6FB.Interaction + big$M7FB.Interaction + big$M8FB.Interaction)/8
big$Group_Performance <- (big$Interdependence + big$Inclusion + big$Interaction)/3
View(big)
big$Group_Performance <- (big$Interdependence + big$Inclusion + big$Interaction)/3
scatter.smooth(x=big$Group_Performance, y=big$adjFactorNoSelf)
scatter.smooth(x=big$adjFactorNoSelf, y=big$Group_Performance)
scatter.smooth(x=big$teamSatisfaction, y=big$Group_Performance)
scatter.smooth(x=big$teamSatisfaction, y=big$Total.Grade)
scatter.smooth(x=big$Total.Grade, y=big$teamSatisfaction)
scatter.smooth(x=big$Total.Grade, y=big$Group_Performance)
scatter.smooth(x=big$Group_Performance, y=big$Total.Grade)
c <- hexbin(big$Group_Performance,big$Grade.Total,xbins=20)
plot(c)
View(big)
|
/exploratory-analysis.R
|
no_license
|
STDillon/CodeSamples
|
R
| false | false | 3,472 |
r
|
install.packages("hexbin")
library(hexbin)
library(RColorBrewer)
library(ggplot2)
#this is the section where I download the data, change as needed
setwd("H:/internship/Internship-Work")
the <- read.csv("all-data.csv")
#viewed the data just to make sure it works, commented out after
View(the)
#drop non-participants
big <-subset(the, io!="o")
View(big)
big$Total.Grade <- as.numeric(as.character(gsub(",","",big$Total.Grade)))
#scatterplots here show both data points as well as correlation trends
scatter.smooth(x=big$teamSatisfaction, y=big$M1.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M2.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M3.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$M4.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$Grade.Total)
scatter.smooth(x=big$M1FB.Interdependence, y=big$M1.Grade)
scatter.smooth(x=big$M1FB.Inclusion, y=big$M1.Grade)
scatter.smooth(x=big$M1FB.Interaction, y=big$M1.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M2.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M3.Grade)
scatter.smooth(x=big$M2FB.Interdependence, y=big$M4.Grade)
scatter.smooth(x=big$M2FB.Inclusion, y=big$M4.Grade)
scatter.smooth(x=big$M2FB.Interaction, y=big$M4.Grade)
scatter.smooth(x=big$adjFactorNoSelf, y=big$Total.Grade)
scatter.smooth(x=big$teamSatisfaction, y=big$Total.Grade)
scatter.smooth(x=big$adjFactorNoSelf, y=big$teamSatisfaction)
#did some boxplots just to see if there were outliers in the data
boxplot(big$Total.Grade)
boxplot(big$teamInterdep)
boxplot(big$teamSatisfaction)
boxplot(big$adjFactorSelf)
boxplot(big$adjFactorNoSelf)
#did some hexagonal plots here to show some loose groupings
a <- hexbin(big$adjFactorNoSelf,big$teamSatisfaction,xbins=20)
plot(a)
b <- hexbin(big$teamSatisfaction,big$Grade.Total,xbins=20)
plot(b)
#update this with all the data
big$Interdependence <- (big$M1FB.Interdependence + big$M2FB.Interdependence + big$M3FB.Interdependence + big$M4FB.Interdependence + big$M5FB.Interdependence + big$M6FB.Interdependence + big$M7FB.Interdependence + big$M8FB.Interdependence)/8
big$Inclusion <- (big$M1FB.Inclusion + big$M2FB.Inclusion + big$M3FB.Inclusion + big$M4FB.Inclusion + big$M5FB.Inclusion + big$M6FB.Inclusion + big$M7FB.Inclusion + big$M8FB.Inclusion)/8
big$Interaction <- (big$M1FB.Interaction + big$M2FB.Interaction + big$M3FB.Interaction + big$M4FB.Interaction + big$M5FB.Interaction + big$M6FB.Interaction + big$M7FB.Interaction + big$M8FB.Interaction)/8
big$Group_Performance <- (big$Interdependence + big$Inclusion + big$Interaction)/3
View(big)
big$Group_Performance <- (big$Interdependence + big$Inclusion + big$Interaction)/3
scatter.smooth(x=big$Group_Performance, y=big$adjFactorNoSelf)
scatter.smooth(x=big$adjFactorNoSelf, y=big$Group_Performance)
scatter.smooth(x=big$teamSatisfaction, y=big$Group_Performance)
scatter.smooth(x=big$teamSatisfaction, y=big$Total.Grade)
scatter.smooth(x=big$Total.Grade, y=big$teamSatisfaction)
scatter.smooth(x=big$Total.Grade, y=big$Group_Performance)
scatter.smooth(x=big$Group_Performance, y=big$Total.Grade)
c <- hexbin(big$Group_Performance,big$Grade.Total,xbins=20)
plot(c)
View(big)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PatternMatching.R
\name{crossMerge}
\alias{crossMerge}
\title{crossMerge}
\usage{
crossMerge(ind1, ind2, x, y, useMatrixToDataFrame = TRUE)
}
\arguments{
\item{ind1}{ind1}
\item{ind2}{ind2}
\item{x}{x}
\item{y}{y}
}
\description{
crossMerge
}
\keyword{internal}
|
/man/crossMerge.Rd
|
no_license
|
cran/SSBtools
|
R
| false | true | 364 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PatternMatching.R
\name{crossMerge}
\alias{crossMerge}
\title{crossMerge}
\usage{
crossMerge(ind1, ind2, x, y, useMatrixToDataFrame = TRUE)
}
\arguments{
\item{ind1}{ind1}
\item{ind2}{ind2}
\item{x}{x}
\item{y}{y}
}
\description{
crossMerge
}
\keyword{internal}
|
library(plink)
### Name: as.irt.pars
### Title: irt.pars objects
### Aliases: as.irt.pars as.irt.pars-methods
### as.irt.pars,numeric,missing-method
### as.irt.pars,data.frame,missing-method
### as.irt.pars,matrix,missing-method as.irt.pars,list,missing-method
### as.irt.pars,sep.pars,missing-method as.irt.pars,list,matrix-method
### as.irt.pars,list,list-method
### Keywords: utilities
### ** Examples
# Create object for three dichotomous (1PL) items with difficulties
# equal to -1, 0, 1
x <- as.irt.pars(c(-1,0,1))
# Create object for three dichotomous (3PL) items and two polytomous
# (gpcm) items without a location parameter
# (use signature matrix, missing)
dichot <- matrix(c(1.2, .8, .9, 2.3, -1.1, -.2, .24, .19, .13),3,3)
poly <- matrix(c(.64, -1.8, -.73, .45, NA, .88, .06, 1.4, 1.9, 2.6),
2,5,byrow=TRUE)
pars <- rbind(cbind(dichot,matrix(NA,3,2)),poly)
cat <- c(2,2,2,4,5)
pm <- as.poly.mod(5, c("drm","gpcm"), list(1:3,4:5))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm)
summary(x)
# Create object for three dichotomous (3PL) items and two polytomous
# (gpcm) items without a location parameter
# (use signature list, missing)
a <- c(1.2, .8, .9, .64, .88)
b <- matrix(c(
2.3, rep(NA,3),
-1.1, rep(NA,3),
-.2, rep(NA,3),
-1.8, -.73, .45, NA,
.06, 1.4, 1.9, 2.6),5,4,byrow=TRUE)
c <- c(1.4, 1.9, 2.6, NA, NA)
pars <- list(a,b,c)
cat <- c(2,2,2,4,5)
pm <- as.poly.mod(5, c("drm","gpcm"), list(1:3,4:5))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm)
summary(x)
# Create object for three dichotomous (3PL) items, four polytomous items,
# two gpcm items and two nrm items. Include a location parameter for the
# gpcm items (use signature list, missing)
a <- matrix(c(
1.2, rep(NA,4),
.8, rep(NA,4),
.9, rep(NA,4),
.64, rep(NA,4),
.88, rep(NA,4),
.905, .522, -.469, -.959, NA,
.828, .375, -.357, -.079, -.817),7,5,byrow=TRUE)
b <- matrix(c(
2.3, rep(NA,4),
-1.1, rep(NA,4),
-.2, rep(NA,4),
-.69, -1.11, -.04, 1.14, NA,
1.49, -1.43, -.09, .41, 1.11,
.126, -.206, -.257, .336, NA,
.565, .865, -1.186, -1.199, .993),7,5,byrow=TRUE)
c <- c(.14, .19, .26, rep(NA,4))
pars <- list(a,b,c)
cat <- c(2,2,2,4,5,4,5)
pm <- as.poly.mod(7, c("drm","gpcm","nrm"), list(1:3,4:5,6:7))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm, location=TRUE)
summary(x, TRUE)
# Create object with two groups (all dichotomous items)
pm <- as.poly.mod(36)
x <- as.irt.pars(KB04$pars, KB04$common, cat=list(rep(2,36),rep(2,36)),
list(pm,pm), grp.names=c("form.x","form.y"))
summary(x, TRUE)
# Create object with six groups (all dichotomous items)
pars <- TK07$pars
common <- TK07$common
cat <- list(rep(2,26),rep(2,34),rep(2,37),rep(2,40),rep(2,41),rep(2,43))
pm1 <- as.poly.mod(26)
pm2 <- as.poly.mod(34)
pm3 <- as.poly.mod(37)
pm4 <- as.poly.mod(40)
pm5 <- as.poly.mod(41)
pm6 <- as.poly.mod(43)
pm <- list(pm1, pm2, pm3, pm4, pm5, pm6)
x <- as.irt.pars(pars, common, cat, pm,
grp.names=paste("grade",3:8,sep=""))
# Create an object with two groups using mixed-format items and
# a mixed placement of common items. This example uses the dgn dataset.
pm1=as.poly.mod(55,c("drm","gpcm","nrm"),dgn$items$group1)
pm2=as.poly.mod(55,c("drm","gpcm","nrm"),dgn$items$group2)
x=as.irt.pars(dgn$pars,dgn$common,dgn$cat,list(pm1,pm2))
summary(x, TRUE)
|
/data/genthat_extracted_code/plink/examples/as.irt.pars.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 3,304 |
r
|
library(plink)
### Name: as.irt.pars
### Title: irt.pars objects
### Aliases: as.irt.pars as.irt.pars-methods
### as.irt.pars,numeric,missing-method
### as.irt.pars,data.frame,missing-method
### as.irt.pars,matrix,missing-method as.irt.pars,list,missing-method
### as.irt.pars,sep.pars,missing-method as.irt.pars,list,matrix-method
### as.irt.pars,list,list-method
### Keywords: utilities
### ** Examples
# Create object for three dichotomous (1PL) items with difficulties
# equal to -1, 0, 1
x <- as.irt.pars(c(-1,0,1))
# Create object for three dichotomous (3PL) items and two polytomous
# (gpcm) items without a location parameter
# (use signature matrix, missing)
dichot <- matrix(c(1.2, .8, .9, 2.3, -1.1, -.2, .24, .19, .13),3,3)
poly <- matrix(c(.64, -1.8, -.73, .45, NA, .88, .06, 1.4, 1.9, 2.6),
2,5,byrow=TRUE)
pars <- rbind(cbind(dichot,matrix(NA,3,2)),poly)
cat <- c(2,2,2,4,5)
pm <- as.poly.mod(5, c("drm","gpcm"), list(1:3,4:5))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm)
summary(x)
# Create object for three dichotomous (3PL) items and two polytomous
# (gpcm) items without a location parameter
# (use signature list, missing)
a <- c(1.2, .8, .9, .64, .88)
b <- matrix(c(
2.3, rep(NA,3),
-1.1, rep(NA,3),
-.2, rep(NA,3),
-1.8, -.73, .45, NA,
.06, 1.4, 1.9, 2.6),5,4,byrow=TRUE)
c <- c(1.4, 1.9, 2.6, NA, NA)
pars <- list(a,b,c)
cat <- c(2,2,2,4,5)
pm <- as.poly.mod(5, c("drm","gpcm"), list(1:3,4:5))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm)
summary(x)
# Create object for three dichotomous (3PL) items, four polytomous items,
# two gpcm items and two nrm items. Include a location parameter for the
# gpcm items (use signature list, missing)
a <- matrix(c(
1.2, rep(NA,4),
.8, rep(NA,4),
.9, rep(NA,4),
.64, rep(NA,4),
.88, rep(NA,4),
.905, .522, -.469, -.959, NA,
.828, .375, -.357, -.079, -.817),7,5,byrow=TRUE)
b <- matrix(c(
2.3, rep(NA,4),
-1.1, rep(NA,4),
-.2, rep(NA,4),
-.69, -1.11, -.04, 1.14, NA,
1.49, -1.43, -.09, .41, 1.11,
.126, -.206, -.257, .336, NA,
.565, .865, -1.186, -1.199, .993),7,5,byrow=TRUE)
c <- c(.14, .19, .26, rep(NA,4))
pars <- list(a,b,c)
cat <- c(2,2,2,4,5,4,5)
pm <- as.poly.mod(7, c("drm","gpcm","nrm"), list(1:3,4:5,6:7))
x <- as.irt.pars(pars, cat=cat, poly.mod=pm, location=TRUE)
summary(x, TRUE)
# Create object with two groups (all dichotomous items)
pm <- as.poly.mod(36)
x <- as.irt.pars(KB04$pars, KB04$common, cat=list(rep(2,36),rep(2,36)),
list(pm,pm), grp.names=c("form.x","form.y"))
summary(x, TRUE)
# Create object with six groups (all dichotomous items)
pars <- TK07$pars
common <- TK07$common
cat <- list(rep(2,26),rep(2,34),rep(2,37),rep(2,40),rep(2,41),rep(2,43))
pm1 <- as.poly.mod(26)
pm2 <- as.poly.mod(34)
pm3 <- as.poly.mod(37)
pm4 <- as.poly.mod(40)
pm5 <- as.poly.mod(41)
pm6 <- as.poly.mod(43)
pm <- list(pm1, pm2, pm3, pm4, pm5, pm6)
x <- as.irt.pars(pars, common, cat, pm,
grp.names=paste("grade",3:8,sep=""))
# Create an object with two groups using mixed-format items and
# a mixed placement of common items. This example uses the dgn dataset.
pm1=as.poly.mod(55,c("drm","gpcm","nrm"),dgn$items$group1)
pm2=as.poly.mod(55,c("drm","gpcm","nrm"),dgn$items$group2)
x=as.irt.pars(dgn$pars,dgn$common,dgn$cat,list(pm1,pm2))
summary(x, TRUE)
|
main <- function () {
if (!exists("LAControllerDatabase")) {
LAControllerDatabase <- read.csv("https://controllerdata.lacity.org/api/views/3ctd-sjrm/rows.csv?accessType=DOWNLOAD")
}
mainPhyloPlot(LAControllerDatabase)
return(c( getNumPayments(LAControllerDatabase),
getMeanPayment(LAControllerDatabase),
getMedianPayment(LAControllerDatabase),
getSDPayment(LAControllerDatabase)))
}
mainPhyloPlot <- function(LAControllerDatabase) {
LAControllerDatabase$EXPENDITURES <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
departmentExpenditure <- aggregate(LAControllerDatabase$EXPENDITURE, by=list(Category=LAControllerDatabase$DEPARTMENT.NAME), FUN=sum)
cluster <- hclust((dist(departmentExpenditure[2]))^(1/2), "ave")
labels <- t(departmentExpenditure[1])
labels <- substring(labels, 0, 10)
plot(cluster, labels, hang = -1, main = "Departments by Net Expenditure")
}
getNumPayments <- function(LAControllerDatabase) {
return (nrow(LAControllerDatabase))
}
getMeanPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (mean(Expenditures))
}
getMedianPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (median(Expenditures))
}
getSDPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (sd(Expenditures))
}
|
/Schwarzer/SchwarzerRscript.r
|
no_license
|
Kaspect/pipeline-templates
|
R
| false | false | 1,565 |
r
|
main <- function () {
if (!exists("LAControllerDatabase")) {
LAControllerDatabase <- read.csv("https://controllerdata.lacity.org/api/views/3ctd-sjrm/rows.csv?accessType=DOWNLOAD")
}
mainPhyloPlot(LAControllerDatabase)
return(c( getNumPayments(LAControllerDatabase),
getMeanPayment(LAControllerDatabase),
getMedianPayment(LAControllerDatabase),
getSDPayment(LAControllerDatabase)))
}
mainPhyloPlot <- function(LAControllerDatabase) {
LAControllerDatabase$EXPENDITURES <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
departmentExpenditure <- aggregate(LAControllerDatabase$EXPENDITURE, by=list(Category=LAControllerDatabase$DEPARTMENT.NAME), FUN=sum)
cluster <- hclust((dist(departmentExpenditure[2]))^(1/2), "ave")
labels <- t(departmentExpenditure[1])
labels <- substring(labels, 0, 10)
plot(cluster, labels, hang = -1, main = "Departments by Net Expenditure")
}
getNumPayments <- function(LAControllerDatabase) {
return (nrow(LAControllerDatabase))
}
getMeanPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (mean(Expenditures))
}
getMedianPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (median(Expenditures))
}
getSDPayment <- function(LAControllerDatabase) {
Expenditures <- as.numeric(gsub("\\$", "", as.character(LAControllerDatabase$EXPENDITURES)))
return (sd(Expenditures))
}
|
# Dennis & Schnabel,1996,"Numerical methods for unconstrained optimization and nonlinear equations", SIAM
# example 6.5.1 page 149
library(nleqslv)
dslnex <- function(x) {
y <- numeric(2)
y[1] <- x[1]^2 + x[2]^2 - 2
y[2] <- exp(x[1]-1) + x[2]^3 - 2
y
}
jacdsln <- function(x) {
n <- length(x)
Df <- matrix(numeric(n*n),n,n)
Df[1,1] <- 2*x[1]
Df[1,2] <- 2*x[2]
Df[2,1] <- exp(x[1]-1)
Df[2,2] <- 3*x[2]^2
Df
}
do.print.xf <- FALSE
do.trace <- 0
print.result <- function(z) {
if( do.print.xf ) {
print(z$x)
print(z$fvec)
}
print(z$message)
print(all(abs(z$fvec)<=1e-8))
}
xstart <- c(2,.5)
z <- nleqslv(xstart,dslnex, jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=0.05)
z <- nleqslv(xstart,dslnex,jacdsln, jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=0.05)
z <- nleqslv(xstart,dslnex, method="Newton", jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=10^3*.Machine$double.eps^0.5)
z <- nleqslv(xstart,dslnex, jacdsln, method="Newton", jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
identical(z$jac,jacdsln(z$x))
|
/tests/dslnexjacout.R
|
no_license
|
cran/nleqslv
|
R
| false | false | 1,273 |
r
|
# Dennis & Schnabel,1996,"Numerical methods for unconstrained optimization and nonlinear equations", SIAM
# example 6.5.1 page 149
library(nleqslv)
dslnex <- function(x) {
y <- numeric(2)
y[1] <- x[1]^2 + x[2]^2 - 2
y[2] <- exp(x[1]-1) + x[2]^3 - 2
y
}
jacdsln <- function(x) {
n <- length(x)
Df <- matrix(numeric(n*n),n,n)
Df[1,1] <- 2*x[1]
Df[1,2] <- 2*x[2]
Df[2,1] <- exp(x[1]-1)
Df[2,2] <- 3*x[2]^2
Df
}
do.print.xf <- FALSE
do.trace <- 0
print.result <- function(z) {
if( do.print.xf ) {
print(z$x)
print(z$fvec)
}
print(z$message)
print(all(abs(z$fvec)<=1e-8))
}
xstart <- c(2,.5)
z <- nleqslv(xstart,dslnex, jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=0.05)
z <- nleqslv(xstart,dslnex,jacdsln, jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=0.05)
z <- nleqslv(xstart,dslnex, method="Newton", jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
all.equal(z$jac,jacdsln(z$x), tolerance=10^3*.Machine$double.eps^0.5)
z <- nleqslv(xstart,dslnex, jacdsln, method="Newton", jacobian=TRUE, control=list(trace=do.trace))
print.result(z)
identical(z$jac,jacdsln(z$x))
|
\name{AIRPORT.RASTER}
\alias{AIRPORT.RASTER}
\docType{data}
\title{File name of a raster of airport locations
}
\description{The airport locations are rasterized into a raster
dataset which allows for simple calculations of the distance
from an airport. The raster is a 1km raster with a value of
1 if an airport is present in the grid square and NA if there
is no airport in the grid.
}
\usage{ AIRPORT.RASTER }
\format{
The format is:
chr "airports.grd"
}
\details{The raster is created by the \code{createRaster} function.
saved to the "Airport" directory and named using the string
associated with AIRPORT.RASTER. the format is native raster
package format
}
\source{
\url{http://www.ourairports.com/}
}
\references{
\url{http://www.ourairports.com/about.html#credits}
They include the FAA and several dedicated individuals.
}
\examples{
print(AIRPORT.RASTER)
}
\keyword{rasters}
|
/man/AIRPORT.RASTER.Rd
|
no_license
|
cran/Metadata
|
R
| false | false | 954 |
rd
|
\name{AIRPORT.RASTER}
\alias{AIRPORT.RASTER}
\docType{data}
\title{File name of a raster of airport locations
}
\description{The airport locations are rasterized into a raster
dataset which allows for simple calculations of the distance
from an airport. The raster is a 1km raster with a value of
1 if an airport is present in the grid square and NA if there
is no airport in the grid.
}
\usage{ AIRPORT.RASTER }
\format{
The format is:
chr "airports.grd"
}
\details{The raster is created by the \code{createRaster} function.
saved to the "Airport" directory and named using the string
associated with AIRPORT.RASTER. the format is native raster
package format
}
\source{
\url{http://www.ourairports.com/}
}
\references{
\url{http://www.ourairports.com/about.html#credits}
They include the FAA and several dedicated individuals.
}
\examples{
print(AIRPORT.RASTER)
}
\keyword{rasters}
|
## Transparent colors
## Mark Gardener 2015
## www.dataanalytics.org.uk
t_col <- function(color, percent = 50, name = NULL) {
# color = color name
# percent = % transparency
# name = an optional name for the color
## Get RGB values for named color
rgb.val <- col2rgb(color)
## Make new color using input color as base and alpha set by transparency
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
max = 255,
alpha = (100 - percent) * 255 / 100,
names = name)
## Save the color
invisible(t.col)
}
#Set working dir location location
(WD <- dirname(rstudioapi::getSourceEditorContext()$path))
if (!is.null(WD)) setwd(WD)
#Load counts
Viral_Reads <- read.table("./Viral_Reads.txt", header = T)#Load viral read table
Viral_Reads$Temperature <- gsub("33", "33°C", Viral_Reads$Temperature)
Viral_Reads$Temperature <- gsub("37", "37°C", Viral_Reads$Temperature)
## Mean fraction of total viral counts
#SARS-CoV-2
p1 <- Viral_Reads %>% filter(Genome == "SARS.CoV.2") %>%
ggplot(., aes(x=Condition, y=Frac_Total_Viral_Counts, fill=Condition)) +
geom_boxplot(coef=1e30) +
geom_jitter(aes(colour = Donor)) +
#geom_col(position=position_dodge(1), width = 0.5) +
scale_fill_manual(values = c("#1b9e77","#7570b3","#d95f02"), labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
facet_grid(Temperature ~ Time) + theme_bw() +
theme(axis.text.x = element_text(angle = 90), axis.text = element_text(size = 14, family = "sans"),
legend.text = element_text(size = 14, family = "sans"), strip.text = element_text(size = 14),
axis.title = element_text(size = 16, family = "sans"),
legend.title = element_text(size = 18, family = "sans")) +
scale_x_discrete(breaks = c("Mock", "SARS.CoV", "SARS.CoV.2"),
labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
labs(y= "Fraction of Viral Counts", x = "", fill = "")
# Modify colors facet rectangle to match condition palette
e <- ggplot_gtable(ggplot_build(p1))
strip_t <- which(grepl('strip-t', e$layout$name))
time <- c("#ffffcc", "#a1dab4", "#41b6c4", "#225ea8")
k <- 1
for (i in strip_t) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <-time[k]
k <- k+1
}
strip_r1 <- which(grepl('strip-r', e$layout$name))
temp <- c("#66a61e", "#e7298a")
k <- 1
for (i in strip_r1) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <- t_col(temp[k])
k <- k+1
}
ggsave(filename = "SARS.CoV.2_Viral_Reads.pdf", grid::grid.draw(e), width = 30, height = 20, units = "cm")
#SARS-CoV
p2 <- Viral_Reads %>% filter(Genome == "SARS.CoV") %>%
ggplot(., aes(x=Condition, y=Frac_Total_Viral_Counts, fill=Condition)) +
geom_boxplot(coef=1e30) +
scale_fill_manual(values = c("#1b9e77","#7570b3","#d95f02"), labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
geom_jitter(aes(colour = Donor)) +
#geom_col(position=position_dodge(1), width = 0.5) +
facet_grid(Temperature ~ Time) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90), axis.text = element_text(size = 14, family = "sans"),
legend.text = element_text(size = 14, family = "sans"), strip.text = element_text(size = 14),
axis.title = element_text(size = 16, family = "sans"),
legend.title = element_text(size = 18, family = "sans"),
) +
scale_x_discrete(breaks = c("Mock", "SARS.CoV", "SARS.CoV.2"),
labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
labs(y= "Fraction of Viral Counts", x = "", fill = "Condition")
# Modify colors facet rectangle to match condition palette
e <- ggplot_gtable(ggplot_build(p2))
strip_t <- which(grepl('strip-t', e$layout$name))
time <- c("#ffffcc", "#a1dab4", "#41b6c4", "#225ea8")
k <- 1
for (i in strip_t) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <-time[k]
k <- k+1
}
strip_r1 <- which(grepl('strip-r', e$layout$name))
temp <- c("#66a61e", "#e7298a")
k <- 1
for (i in strip_r1) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <- t_col(temp[k])
k <- k+1
}
ggsave(filename = "SARS.CoV_Viral_Reads.pdf", grid::grid.draw(e), width = 30, height = 20, units = "cm")
|
/Figures/FigureS4/Virus counts.R
|
no_license
|
IFIK-virology/Temperature
|
R
| false | false | 4,374 |
r
|
## Transparent colors
## Mark Gardener 2015
## www.dataanalytics.org.uk
t_col <- function(color, percent = 50, name = NULL) {
# color = color name
# percent = % transparency
# name = an optional name for the color
## Get RGB values for named color
rgb.val <- col2rgb(color)
## Make new color using input color as base and alpha set by transparency
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
max = 255,
alpha = (100 - percent) * 255 / 100,
names = name)
## Save the color
invisible(t.col)
}
#Set working dir location location
(WD <- dirname(rstudioapi::getSourceEditorContext()$path))
if (!is.null(WD)) setwd(WD)
#Load counts
Viral_Reads <- read.table("./Viral_Reads.txt", header = T)#Load viral read table
Viral_Reads$Temperature <- gsub("33", "33°C", Viral_Reads$Temperature)
Viral_Reads$Temperature <- gsub("37", "37°C", Viral_Reads$Temperature)
## Mean fraction of total viral counts
#SARS-CoV-2
p1 <- Viral_Reads %>% filter(Genome == "SARS.CoV.2") %>%
ggplot(., aes(x=Condition, y=Frac_Total_Viral_Counts, fill=Condition)) +
geom_boxplot(coef=1e30) +
geom_jitter(aes(colour = Donor)) +
#geom_col(position=position_dodge(1), width = 0.5) +
scale_fill_manual(values = c("#1b9e77","#7570b3","#d95f02"), labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
facet_grid(Temperature ~ Time) + theme_bw() +
theme(axis.text.x = element_text(angle = 90), axis.text = element_text(size = 14, family = "sans"),
legend.text = element_text(size = 14, family = "sans"), strip.text = element_text(size = 14),
axis.title = element_text(size = 16, family = "sans"),
legend.title = element_text(size = 18, family = "sans")) +
scale_x_discrete(breaks = c("Mock", "SARS.CoV", "SARS.CoV.2"),
labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
labs(y= "Fraction of Viral Counts", x = "", fill = "")
# Modify colors facet rectangle to match condition palette
e <- ggplot_gtable(ggplot_build(p1))
strip_t <- which(grepl('strip-t', e$layout$name))
time <- c("#ffffcc", "#a1dab4", "#41b6c4", "#225ea8")
k <- 1
for (i in strip_t) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <-time[k]
k <- k+1
}
strip_r1 <- which(grepl('strip-r', e$layout$name))
temp <- c("#66a61e", "#e7298a")
k <- 1
for (i in strip_r1) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <- t_col(temp[k])
k <- k+1
}
ggsave(filename = "SARS.CoV.2_Viral_Reads.pdf", grid::grid.draw(e), width = 30, height = 20, units = "cm")
#SARS-CoV
p2 <- Viral_Reads %>% filter(Genome == "SARS.CoV") %>%
ggplot(., aes(x=Condition, y=Frac_Total_Viral_Counts, fill=Condition)) +
geom_boxplot(coef=1e30) +
scale_fill_manual(values = c("#1b9e77","#7570b3","#d95f02"), labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
geom_jitter(aes(colour = Donor)) +
#geom_col(position=position_dodge(1), width = 0.5) +
facet_grid(Temperature ~ Time) +
theme_bw() +
theme(axis.text.x = element_text(angle = 90), axis.text = element_text(size = 14, family = "sans"),
legend.text = element_text(size = 14, family = "sans"), strip.text = element_text(size = 14),
axis.title = element_text(size = 16, family = "sans"),
legend.title = element_text(size = 18, family = "sans"),
) +
scale_x_discrete(breaks = c("Mock", "SARS.CoV", "SARS.CoV.2"),
labels = c("Mock", "SARS-CoV", "SARS-CoV-2")) +
labs(y= "Fraction of Viral Counts", x = "", fill = "Condition")
# Modify colors facet rectangle to match condition palette
e <- ggplot_gtable(ggplot_build(p2))
strip_t <- which(grepl('strip-t', e$layout$name))
time <- c("#ffffcc", "#a1dab4", "#41b6c4", "#225ea8")
k <- 1
for (i in strip_t) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <-time[k]
k <- k+1
}
strip_r1 <- which(grepl('strip-r', e$layout$name))
temp <- c("#66a61e", "#e7298a")
k <- 1
for (i in strip_r1) {
j <- which(grepl('rect', e$grobs[[i]]$grobs[[1]]$childrenOrder))
e$grobs[[i]]$grobs[[1]]$children[[j]]$gp$fill <- t_col(temp[k])
k <- k+1
}
ggsave(filename = "SARS.CoV_Viral_Reads.pdf", grid::grid.draw(e), width = 30, height = 20, units = "cm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict}
\alias{predict}
\alias{predictive_interval}
\alias{posterior_linpred}
\alias{posterior_predict}
\alias{predictive_interval.glmmfields}
\alias{posterior_linpred.glmmfields}
\alias{posterior_predict.glmmfields}
\alias{predict.glmmfields}
\title{Predict from a glmmfields model}
\usage{
\method{predictive_interval}{glmmfields}(object, ...)
\method{posterior_linpred}{glmmfields}(object, ...)
\method{posterior_predict}{glmmfields}(object, ...)
\method{predict}{glmmfields}(object, newdata = NULL,
estimate_method = c("median", "mean"), conf_level = 0.95,
interval = c("confidence", "prediction"), type = c("link",
"response"), return_mcmc = FALSE, iter = "all", ...)
}
\arguments{
\item{object}{An object returned by \code{\link[=glmmfields]{glmmfields()}}.}
\item{...}{Ignored currently}
\item{newdata}{Optionally, a data frame to predict on}
\item{estimate_method}{Method for computing point estimate ("mean" or
"median")}
\item{conf_level}{Probability level for the credible intervals.}
\item{interval}{Type of interval calculation. Same as for
\code{\link[stats:predict.lm]{stats::predict.lm()}}.}
\item{type}{Whether the predictions are returned on "link" scale or
"response" scale (Same as for \code{\link[stats:predict.glm]{stats::predict.glm()}}).}
\item{return_mcmc}{Logical. Should the full MCMC draws be returned for the
predictions?}
\item{iter}{Number of MCMC iterations to draw. Defaults to all.}
}
\description{
These functions extract posterior draws or credible intervals. The helper
functions are named to match those in the \pkg{rstanarm} package and call the
function \code{predict()} with appropriate argument values.
}
\examples{
\donttest{
library(ggplot2)
# simulate:
set.seed(1)
s <- sim_glmmfields(
n_draws = 12, n_knots = 12, gp_theta = 2.5,
gp_sigma = 0.2, sd_obs = 0.1
)
# fit:
# options(mc.cores = parallel::detectCores()) # for parallel processing
m <- glmmfields(y ~ 0,
data = s$dat, time = "time",
lat = "lat", lon = "lon",
nknots = 12, iter = 800, chains = 1
)
# Predictions:
# Link scale credible intervals:
p <- predict(m, type = "link", interval = "confidence")
head(p)
# Prediction intervals on new observations (include observation error):
p <- predictive_interval(m)
head(p)
# Posterior prediction draws:
p <- posterior_predict(m, iter = 100)
dim(p) # rows are iterations and columns are data elements
# Draws from the linear predictor (not in link space):
p <- posterior_linpred(m, iter = 100)
dim(p) # rows are iterations and columns are data elements
# Use the `tidy` method to extract parameter estimates as a data frame:
head(tidy(m, conf.int = TRUE, conf.method = "HPDinterval"))
# Make predictions on a fine-scale spatial grid:
pred_grid <- expand.grid(
lat = seq(min(s$dat$lat), max(s$dat$lat), length.out = 25),
lon = seq(min(s$dat$lon), max(s$dat$lon), length.out = 25),
time = unique(s$dat$time)
)
pred_grid$prediction <- predict(m,
newdata = pred_grid, type = "response", iter = 100,
estimate_method = "median"
)$estimate
ggplot(pred_grid, aes(lon, lat, fill = prediction)) +
facet_wrap(~time) +
geom_raster() +
scale_fill_gradient2()
}
}
|
/man/predict.Rd
|
no_license
|
Climostatistics/glmmfields
|
R
| false | true | 3,244 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict}
\alias{predict}
\alias{predictive_interval}
\alias{posterior_linpred}
\alias{posterior_predict}
\alias{predictive_interval.glmmfields}
\alias{posterior_linpred.glmmfields}
\alias{posterior_predict.glmmfields}
\alias{predict.glmmfields}
\title{Predict from a glmmfields model}
\usage{
\method{predictive_interval}{glmmfields}(object, ...)
\method{posterior_linpred}{glmmfields}(object, ...)
\method{posterior_predict}{glmmfields}(object, ...)
\method{predict}{glmmfields}(object, newdata = NULL,
estimate_method = c("median", "mean"), conf_level = 0.95,
interval = c("confidence", "prediction"), type = c("link",
"response"), return_mcmc = FALSE, iter = "all", ...)
}
\arguments{
\item{object}{An object returned by \code{\link[=glmmfields]{glmmfields()}}.}
\item{...}{Ignored currently}
\item{newdata}{Optionally, a data frame to predict on}
\item{estimate_method}{Method for computing point estimate ("mean" or
"median")}
\item{conf_level}{Probability level for the credible intervals.}
\item{interval}{Type of interval calculation. Same as for
\code{\link[stats:predict.lm]{stats::predict.lm()}}.}
\item{type}{Whether the predictions are returned on "link" scale or
"response" scale (Same as for \code{\link[stats:predict.glm]{stats::predict.glm()}}).}
\item{return_mcmc}{Logical. Should the full MCMC draws be returned for the
predictions?}
\item{iter}{Number of MCMC iterations to draw. Defaults to all.}
}
\description{
These functions extract posterior draws or credible intervals. The helper
functions are named to match those in the \pkg{rstanarm} package and call the
function \code{predict()} with appropriate argument values.
}
\examples{
\donttest{
library(ggplot2)
# simulate:
set.seed(1)
s <- sim_glmmfields(
n_draws = 12, n_knots = 12, gp_theta = 2.5,
gp_sigma = 0.2, sd_obs = 0.1
)
# fit:
# options(mc.cores = parallel::detectCores()) # for parallel processing
m <- glmmfields(y ~ 0,
data = s$dat, time = "time",
lat = "lat", lon = "lon",
nknots = 12, iter = 800, chains = 1
)
# Predictions:
# Link scale credible intervals:
p <- predict(m, type = "link", interval = "confidence")
head(p)
# Prediction intervals on new observations (include observation error):
p <- predictive_interval(m)
head(p)
# Posterior prediction draws:
p <- posterior_predict(m, iter = 100)
dim(p) # rows are iterations and columns are data elements
# Draws from the linear predictor (not in link space):
p <- posterior_linpred(m, iter = 100)
dim(p) # rows are iterations and columns are data elements
# Use the `tidy` method to extract parameter estimates as a data frame:
head(tidy(m, conf.int = TRUE, conf.method = "HPDinterval"))
# Make predictions on a fine-scale spatial grid:
pred_grid <- expand.grid(
lat = seq(min(s$dat$lat), max(s$dat$lat), length.out = 25),
lon = seq(min(s$dat$lon), max(s$dat$lon), length.out = 25),
time = unique(s$dat$time)
)
pred_grid$prediction <- predict(m,
newdata = pred_grid, type = "response", iter = 100,
estimate_method = "median"
)$estimate
ggplot(pred_grid, aes(lon, lat, fill = prediction)) +
facet_wrap(~time) +
geom_raster() +
scale_fill_gradient2()
}
}
|
testlist <- list(x = c(NA_integer_, NA_integer_), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961908-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 139 |
r
|
testlist <- list(x = c(NA_integer_, NA_integer_), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
library(dplyr)
library(ggplot2)
args = commandArgs(trailingOnly=TRUE)
# if (length(args)!=2) {
# stop("please provide the path to 1) cellphoneDB output folder, 2) differentially expressed genes folder", call.=FALSE)
# }else{
# message('cellphoneDB output folder: ', args[1])
# message('DEGs folder: ', args[2])
# }
CPdb_folder = '~/cellphoneDB/analysis/CVID/out/' #args[1]
DEG_folder = '~/cellphoneDB/analysis/CVID/DEG_MAST_20200131/' #args[2]
# Adapt deconvoluted
my_deconvoluted = read.delim(paste0(CPdb_folder, 'deconvoluted.txt'), stringsAsFactors = F)
my_deconvoluted[, grep('celltype', colnames(my_deconvoluted)) ] = 10
colnames(my_deconvoluted) = gsub('celltype_', '', colnames(my_deconvoluted))
colnames(my_deconvoluted) = gsub('\\._', '_', colnames(my_deconvoluted))
colnames(my_deconvoluted) = gsub('\\.', '-', colnames(my_deconvoluted))
# Load DEG
DEGs_f = list.files(DEG_folder, full.names = T)
DEGs = lapply(DEGs_f, read.csv, stringsAsFactors=F)
# Filter significant DEGs
# expressed_G = lapply(DEGs, subset, percentExpr_cluster+percentExpr_rest > 0.2) %>%
# lapply(., subset, Gene %in% my_deconvoluted$gene_name )
expressed_G = lapply(DEGs, subset, pct.1+pct.2 > 0.2) %>%
lapply(., subset, Gene %in% my_deconvoluted$gene_name )
names(expressed_G) = sapply(strsplit(DEGs_f, '/'), tail, 1) %>% strsplit(., '_CVID_vs_') %>% sapply(., head, 1) %>% gsub('\\+', '', .)
DEGs = lapply(expressed_G, subset, adj.P.Val < 0.01)
names(DEGs) = names(expressed_G)
DEGs = lapply(DEGs, subset, abs(logFC) >= 0.1)
# Fill deconvoluted with DEGs p-values
nrow(my_deconvoluted)
# build genes2pvalue dictionary
get_DEG_pval = function(gene)
sapply(DEGs, function(x) x$adj.P.Val[ x$Gene == gene] * sign(x$logFC[ x$Gene == gene]) ) %>% unlist(.)
get_DEG_foldchange = function(gene)
sapply(DEGs, function(x) x$logFC[ x$Gene == gene] ) %>% unlist(.)
genes2logFold = lapply(unique(my_deconvoluted$gene_name), get_DEG_foldchange)
names(genes2logFold) = unique(my_deconvoluted$gene_name)
# build genes2pvalue dictionary
# get_percent = function(gene)
# sapply(expressed_G, function(x) x$percentExpr_cluster[ x$Gene == gene] ) %>% unlist(.)
get_percent = function(gene)
sapply(expressed_G, function(x) x$pct.1[ x$Gene == gene] ) %>% unlist(.)
genes2percent = lapply(unique(my_deconvoluted$gene_name), get_percent)
names(genes2percent) = unique(my_deconvoluted$gene_name)
# Filter interactions with no DEGs
genes_in_DEGs = sapply(DEGs, function(x) x$Gene) %>% unlist(.) %>% unique(.)
my_deconvoluted = subset(my_deconvoluted, gene_name %in% genes_in_DEGs)
# Substitute value by the adj.P.Val signed according to the fold change
genes2logFold = genes2logFold[ sapply(genes2logFold, length) > 0 ]
for(gene in names(genes2logFold)){
for ( celltype in names(genes2logFold[[gene]]) )
my_deconvoluted[ my_deconvoluted$gene_name == gene, celltype ] = genes2logFold[[gene]][celltype]
}
# Remove genes not in the L/R collection
rows2remove = apply(my_deconvoluted[, 7:ncol(my_deconvoluted)], 1, min) != 10
my_deconvoluted = my_deconvoluted[ rows2remove, ]
# Remove celltypes with no DEGs in the L/R collection
celltype2remove = names(which(apply(my_deconvoluted[, 7:ncol(my_deconvoluted)], 2, min) == 10))
my_deconvoluted = my_deconvoluted[ , !(names(my_deconvoluted) %in% celltype2remove)]
nrow(my_deconvoluted)
# Adapt means matrix
means_file = read.delim(paste0(CPdb_folder, 'means.txt'), stringsAsFactors = F)
# Remove non-curated interactions
means_file = subset(means_file, annotation_strategy == "user_curated")
means_file = means_file[ ! duplicated(means_file$id_cp_interaction), ]
# Add genes in complexes
complexes = read.csv('~/farm/CellPhoneDB-data_smallmolecules/data/sources/complex_curated.csv', stringsAsFactors = F)
complexes$complex_name = paste0('complex:', complexes$complex_name)
genes = read.csv('~/farm/CellPhoneDB-data_smallmolecules/data/gene_input_all.csv', stringsAsFactors = F)
complexes2genes = lapply(complexes$complex_name, function(cx) subset(genes, uniprot %in% complexes[complexes$complex_name == cx, 2:5] )$gene_name )
complexes2genes = lapply(complexes2genes, unique)
names(complexes2genes) = complexes$complex_name
# Build means matrix de novo
my_means = unique(means_file[1:11])
my_means$gene_a[ my_means$partner_a %in% names(complexes2genes)] = sapply(complexes2genes[my_means$partner_a[my_means$partner_a %in% names(complexes2genes)]], paste, collapse=';')
my_means$gene_b[ my_means$partner_b %in% names(complexes2genes)] = sapply(complexes2genes[my_means$partner_b[my_means$partner_b %in% names(complexes2genes)]], paste, collapse=';')
# Add reverse partnerA -> B and vice versa
my_means_reverse = my_means
my_means_reverse$id_cp_interaction = paste0(my_means$id_cp_interaction, '_rev')
my_means_reverse$gene_a = my_means$gene_b
my_means_reverse$partner_a = my_means$partner_b
my_means_reverse$gene_b = my_means$gene_a
my_means_reverse$partner_b = my_means$partner_a
my_means_reverse$interacting_pair = paste(my_means_reverse$gene_a, my_means_reverse$gene_b, sep='_')
my_means = rbind(my_means, my_means_reverse)
my_means = my_means[ ! duplicated(my_means$interacting_pair) , ]
# We define relevant interactions as those where the partnerB have expression > 10% and any partnerA member is DEG
int_of_interest = function(int, ctA, ctB){
partnersA = strsplit(int[1], ';') %>% unlist(.)
partnersB = strsplit(int[2], ';') %>% unlist(.)
A = all(ctA %in% sapply(genes2percent[partnersA], names))
B = all(ctB %in% sapply(genes2percent[partnersB], names))
Adeg = any(ctA %in% sapply(genes2logFold[partnersA], names))
if(B & Adeg){
max_fold = sapply(genes2logFold[partnersA], function(x) x[ctA] ) %>% unlist(.)
max_fold = max_fold[ which.max(abs(max_fold)) ]
return(max_fold)
}else{
return(10)
}
}
# For each pair of interacting cell types, chek if interaction is relevant because a partner is DE and retrieve forl change
for (ctA in names(DEGs) )
for (ctB in names(DEGs) ){
if( ctA == ctB | length(grep('B', c(ctA, ctB))) == 0) # if any B cell there
next()
foldchangeA = apply(my_means[,5:6], 1, int_of_interest, ctA, ctB)
if( all(foldchangeA == 10) )
next()
df = data.frame(foldchangeA)
names(df) = paste0(ctA, '.DEGs---', ctB)
my_means = cbind(my_means, df)
}
# Remove interactions that are not relevant
idx = which(apply(my_means[, 12:ncol(my_means)], 1, sum) != (10*ncol(my_means)-11) )
my_means = my_means[idx, ]
# Fix L/R names
genes_a = my_means$gene_a
genes_a[ grep('complex', my_means$partner_a) ] = grep('complex', my_means$partner_a, value = T) %>% gsub('complex:', '', .)
genes_b = my_means$gene_b
genes_b[ grep('complex', my_means$partner_b) ] = grep('complex', my_means$partner_b, value = T) %>% gsub('complex:', '', .)
rownames(my_means) = paste(genes_a, genes_b, sep = '---')
# Plot the results - as retrieved
results = as.matrix(my_means[, 12:ncol(my_means)])
results[ results == 10 ] = 0
results = results[ rowSums(results) != 0 , ]
library("RColorBrewer")
library("gplots")
col <- colorRampPalette(brewer.pal(9, "RdBu"))(256)
par(mar=c(1,1,1,1))
pdf('~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01_heatmap_alternative.pdf', width = 22, height = 22)
heatmap.2(t(results), scale = "none", col = bluered(100), Rowv = NA, Colv = NA,
trace = "none", density.info = "none",
sepwidth=c(0.01,0.01),
sepcolor="black",
colsep=0:ncol(t(results)),
rowsep=0:nrow(t(results)),
keysize = 0.5,
key=TRUE, symkey=FALSE, cexRow=1,cexCol=1,margins=c(12,25),srtCol=45)
graphics.off()
# Plot the results - alternative format
library(reshape2)
results = melt(as.matrix(my_means[, 12:ncol(my_means)]), factorsAsStrings = F)
results$Var1 = as.character(results$Var1)
results$Var2 = as.character(results$Var2)
results = subset(results, value != 10)
results$partnerA_DE = strsplit(results$Var1, split = '---') %>% sapply(., head, 1)
results$partnerB = strsplit(results$Var1, split = '---') %>% sapply(., tail, 1)
results$celltypeA_DE_in_CVID = strsplit(results$Var2, split = '---') %>% sapply(., head, 1) %>% gsub('\\.DEGs', '', .)
results$celltypeB = strsplit(results$Var2, split = '---') %>% sapply(., tail, 1)
results$logFC = results$value
results$Y = paste(results$partnerA_DE, results$celltypeA_DE_in_CVID, sep = ' --- ')
results$X = paste(results$partnerB, results$celltypeB, sep = ' --- ')
head(results)
library(ggplot2)
ggplot(results, aes(x = X, y = Y)) +
geom_tile(aes(fill = logFC), colour = "black") +
xlab('interacting partner / cell type') + ylab('genes differentially expressed in CVID') +
scale_fill_gradientn(colors = rev(brewer.pal(11, "RdBu"))) +
ggtitle("Cell-cell communication events differentially expressed in CVID") +
theme(#panel.background = element_blank(),
#panel.grid.major = element_blank(),
#panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
ggsave(filename = '~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01_heatmap.pdf', dpi = 300, width = 30, height = 12)
# Add partner expression
partner_expression = melt(unlist(genes2percent))
results$partnerB_percentExpr = partner_expression[ gsub(' --- ', '.', results$X), ]
write.csv(results[, -c(1:3)], file = '~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01.csv', quote = F, row.names = F)
|
/202003_initial_analysis_scTranscriptomics/scTranscriptomics/M3b_retrieve_DEGs_from_CellPhoneDB.r
|
permissive
|
ventolab/CVID
|
R
| false | false | 9,431 |
r
|
library(dplyr)
library(ggplot2)
args = commandArgs(trailingOnly=TRUE)
# if (length(args)!=2) {
# stop("please provide the path to 1) cellphoneDB output folder, 2) differentially expressed genes folder", call.=FALSE)
# }else{
# message('cellphoneDB output folder: ', args[1])
# message('DEGs folder: ', args[2])
# }
CPdb_folder = '~/cellphoneDB/analysis/CVID/out/' #args[1]
DEG_folder = '~/cellphoneDB/analysis/CVID/DEG_MAST_20200131/' #args[2]
# Adapt deconvoluted
my_deconvoluted = read.delim(paste0(CPdb_folder, 'deconvoluted.txt'), stringsAsFactors = F)
my_deconvoluted[, grep('celltype', colnames(my_deconvoluted)) ] = 10
colnames(my_deconvoluted) = gsub('celltype_', '', colnames(my_deconvoluted))
colnames(my_deconvoluted) = gsub('\\._', '_', colnames(my_deconvoluted))
colnames(my_deconvoluted) = gsub('\\.', '-', colnames(my_deconvoluted))
# Load DEG
DEGs_f = list.files(DEG_folder, full.names = T)
DEGs = lapply(DEGs_f, read.csv, stringsAsFactors=F)
# Filter significant DEGs
# expressed_G = lapply(DEGs, subset, percentExpr_cluster+percentExpr_rest > 0.2) %>%
# lapply(., subset, Gene %in% my_deconvoluted$gene_name )
expressed_G = lapply(DEGs, subset, pct.1+pct.2 > 0.2) %>%
lapply(., subset, Gene %in% my_deconvoluted$gene_name )
names(expressed_G) = sapply(strsplit(DEGs_f, '/'), tail, 1) %>% strsplit(., '_CVID_vs_') %>% sapply(., head, 1) %>% gsub('\\+', '', .)
DEGs = lapply(expressed_G, subset, adj.P.Val < 0.01)
names(DEGs) = names(expressed_G)
DEGs = lapply(DEGs, subset, abs(logFC) >= 0.1)
# Fill deconvoluted with DEGs p-values
nrow(my_deconvoluted)
# build genes2pvalue dictionary
get_DEG_pval = function(gene)
sapply(DEGs, function(x) x$adj.P.Val[ x$Gene == gene] * sign(x$logFC[ x$Gene == gene]) ) %>% unlist(.)
get_DEG_foldchange = function(gene)
sapply(DEGs, function(x) x$logFC[ x$Gene == gene] ) %>% unlist(.)
genes2logFold = lapply(unique(my_deconvoluted$gene_name), get_DEG_foldchange)
names(genes2logFold) = unique(my_deconvoluted$gene_name)
# build genes2pvalue dictionary
# get_percent = function(gene)
# sapply(expressed_G, function(x) x$percentExpr_cluster[ x$Gene == gene] ) %>% unlist(.)
get_percent = function(gene)
sapply(expressed_G, function(x) x$pct.1[ x$Gene == gene] ) %>% unlist(.)
genes2percent = lapply(unique(my_deconvoluted$gene_name), get_percent)
names(genes2percent) = unique(my_deconvoluted$gene_name)
# Filter interactions with no DEGs
genes_in_DEGs = sapply(DEGs, function(x) x$Gene) %>% unlist(.) %>% unique(.)
my_deconvoluted = subset(my_deconvoluted, gene_name %in% genes_in_DEGs)
# Substitute value by the adj.P.Val signed according to the fold change
genes2logFold = genes2logFold[ sapply(genes2logFold, length) > 0 ]
for(gene in names(genes2logFold)){
for ( celltype in names(genes2logFold[[gene]]) )
my_deconvoluted[ my_deconvoluted$gene_name == gene, celltype ] = genes2logFold[[gene]][celltype]
}
# Remove genes not in the L/R collection
rows2remove = apply(my_deconvoluted[, 7:ncol(my_deconvoluted)], 1, min) != 10
my_deconvoluted = my_deconvoluted[ rows2remove, ]
# Remove celltypes with no DEGs in the L/R collection
celltype2remove = names(which(apply(my_deconvoluted[, 7:ncol(my_deconvoluted)], 2, min) == 10))
my_deconvoluted = my_deconvoluted[ , !(names(my_deconvoluted) %in% celltype2remove)]
nrow(my_deconvoluted)
# Adapt means matrix
means_file = read.delim(paste0(CPdb_folder, 'means.txt'), stringsAsFactors = F)
# Remove non-curated interactions
means_file = subset(means_file, annotation_strategy == "user_curated")
means_file = means_file[ ! duplicated(means_file$id_cp_interaction), ]
# Add genes in complexes
complexes = read.csv('~/farm/CellPhoneDB-data_smallmolecules/data/sources/complex_curated.csv', stringsAsFactors = F)
complexes$complex_name = paste0('complex:', complexes$complex_name)
genes = read.csv('~/farm/CellPhoneDB-data_smallmolecules/data/gene_input_all.csv', stringsAsFactors = F)
complexes2genes = lapply(complexes$complex_name, function(cx) subset(genes, uniprot %in% complexes[complexes$complex_name == cx, 2:5] )$gene_name )
complexes2genes = lapply(complexes2genes, unique)
names(complexes2genes) = complexes$complex_name
# Build means matrix de novo
my_means = unique(means_file[1:11])
my_means$gene_a[ my_means$partner_a %in% names(complexes2genes)] = sapply(complexes2genes[my_means$partner_a[my_means$partner_a %in% names(complexes2genes)]], paste, collapse=';')
my_means$gene_b[ my_means$partner_b %in% names(complexes2genes)] = sapply(complexes2genes[my_means$partner_b[my_means$partner_b %in% names(complexes2genes)]], paste, collapse=';')
# Add reverse partnerA -> B and vice versa
my_means_reverse = my_means
my_means_reverse$id_cp_interaction = paste0(my_means$id_cp_interaction, '_rev')
my_means_reverse$gene_a = my_means$gene_b
my_means_reverse$partner_a = my_means$partner_b
my_means_reverse$gene_b = my_means$gene_a
my_means_reverse$partner_b = my_means$partner_a
my_means_reverse$interacting_pair = paste(my_means_reverse$gene_a, my_means_reverse$gene_b, sep='_')
my_means = rbind(my_means, my_means_reverse)
my_means = my_means[ ! duplicated(my_means$interacting_pair) , ]
# We define relevant interactions as those where the partnerB have expression > 10% and any partnerA member is DEG
int_of_interest = function(int, ctA, ctB){
partnersA = strsplit(int[1], ';') %>% unlist(.)
partnersB = strsplit(int[2], ';') %>% unlist(.)
A = all(ctA %in% sapply(genes2percent[partnersA], names))
B = all(ctB %in% sapply(genes2percent[partnersB], names))
Adeg = any(ctA %in% sapply(genes2logFold[partnersA], names))
if(B & Adeg){
max_fold = sapply(genes2logFold[partnersA], function(x) x[ctA] ) %>% unlist(.)
max_fold = max_fold[ which.max(abs(max_fold)) ]
return(max_fold)
}else{
return(10)
}
}
# For each pair of interacting cell types, chek if interaction is relevant because a partner is DE and retrieve forl change
for (ctA in names(DEGs) )
for (ctB in names(DEGs) ){
if( ctA == ctB | length(grep('B', c(ctA, ctB))) == 0) # if any B cell there
next()
foldchangeA = apply(my_means[,5:6], 1, int_of_interest, ctA, ctB)
if( all(foldchangeA == 10) )
next()
df = data.frame(foldchangeA)
names(df) = paste0(ctA, '.DEGs---', ctB)
my_means = cbind(my_means, df)
}
# Remove interactions that are not relevant
idx = which(apply(my_means[, 12:ncol(my_means)], 1, sum) != (10*ncol(my_means)-11) )
my_means = my_means[idx, ]
# Fix L/R names
genes_a = my_means$gene_a
genes_a[ grep('complex', my_means$partner_a) ] = grep('complex', my_means$partner_a, value = T) %>% gsub('complex:', '', .)
genes_b = my_means$gene_b
genes_b[ grep('complex', my_means$partner_b) ] = grep('complex', my_means$partner_b, value = T) %>% gsub('complex:', '', .)
rownames(my_means) = paste(genes_a, genes_b, sep = '---')
# Plot the results - as retrieved
results = as.matrix(my_means[, 12:ncol(my_means)])
results[ results == 10 ] = 0
results = results[ rowSums(results) != 0 , ]
library("RColorBrewer")
library("gplots")
col <- colorRampPalette(brewer.pal(9, "RdBu"))(256)
par(mar=c(1,1,1,1))
pdf('~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01_heatmap_alternative.pdf', width = 22, height = 22)
heatmap.2(t(results), scale = "none", col = bluered(100), Rowv = NA, Colv = NA,
trace = "none", density.info = "none",
sepwidth=c(0.01,0.01),
sepcolor="black",
colsep=0:ncol(t(results)),
rowsep=0:nrow(t(results)),
keysize = 0.5,
key=TRUE, symkey=FALSE, cexRow=1,cexCol=1,margins=c(12,25),srtCol=45)
graphics.off()
# Plot the results - alternative format
library(reshape2)
results = melt(as.matrix(my_means[, 12:ncol(my_means)]), factorsAsStrings = F)
results$Var1 = as.character(results$Var1)
results$Var2 = as.character(results$Var2)
results = subset(results, value != 10)
results$partnerA_DE = strsplit(results$Var1, split = '---') %>% sapply(., head, 1)
results$partnerB = strsplit(results$Var1, split = '---') %>% sapply(., tail, 1)
results$celltypeA_DE_in_CVID = strsplit(results$Var2, split = '---') %>% sapply(., head, 1) %>% gsub('\\.DEGs', '', .)
results$celltypeB = strsplit(results$Var2, split = '---') %>% sapply(., tail, 1)
results$logFC = results$value
results$Y = paste(results$partnerA_DE, results$celltypeA_DE_in_CVID, sep = ' --- ')
results$X = paste(results$partnerB, results$celltypeB, sep = ' --- ')
head(results)
library(ggplot2)
ggplot(results, aes(x = X, y = Y)) +
geom_tile(aes(fill = logFC), colour = "black") +
xlab('interacting partner / cell type') + ylab('genes differentially expressed in CVID') +
scale_fill_gradientn(colors = rev(brewer.pal(11, "RdBu"))) +
ggtitle("Cell-cell communication events differentially expressed in CVID") +
theme(#panel.background = element_blank(),
#panel.grid.major = element_blank(),
#panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
ggsave(filename = '~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01_heatmap.pdf', dpi = 300, width = 30, height = 12)
# Add partner expression
partner_expression = melt(unlist(genes2percent))
results$partnerB_percentExpr = partner_expression[ gsub(' --- ', '.', results$X), ]
write.csv(results[, -c(1:3)], file = '~/cellphoneDB/analysis/CVID/cellphoneDB_DEGs_significant_FDR01.csv', quote = F, row.names = F)
|
/nohup/man.r
|
no_license
|
unix-history/tropix-cmd
|
R
| false | false | 2,842 |
r
| ||
## makeCacheMatrix() and cacheSolve() explore cacheing and lexical scoping in R.
## The code structure and many object names in this file were layed out by Dr. R Peng.
## Major thanks to classmate Randeep Grewall for helping to understand how components of Dr. Peng's code actually work.
## I hope this file doesn't contain too many comments. I need them (and any forthcoming edits) for learning.
## makeCacheMatrix performs many tasks, including these key tasks:
## Makes methods (i.e. functions) available to another function, cacheSolve()
## Receives a cached value of a matrix inverse from cacheSolve() via the global environment
makeCacheMatrix <-function(x = matrix()) {
m <- NULL ## initialize "m" as NULL
## "m" eventually receives the cached inverse
set <- function(y) { ## "set" passes a matrix to "x" in the parent environment
x <<- y ## "y" is initialized by the first argument in makeCacheMatrix()
m <<- NULL
}
get <- function() x ## get is a function that enables
## cacheSolve() to retrieve the value of "x" from
## the calling environment in makeCacheMatrix()
setinverse <- function(solve) m <<- solve
## setinverse is a function to be used by cacheSolve()
## takes "m" from parent env. wherever setinverse is called
## calls the solve() function to find the inverse of "m"
getinverse <- function() m ## enables cacheSolve to retrieve m from parent environment
list(set = set, get = get, ## returns a list of four methods (i.e. functions)
setinverse = setinverse,
getinverse = getinverse)
## Call class(makeCacheMatrix(Z)) and press Enter to confirm a list is returned.
}
## cacheSolve() performs many tasks, including these key tasks:
## Receives methods and the value of matrix "x" from makeCacheMatrix
## Returns the matrix inverse to the global environment as "m"
cacheSolve <- function(x, ...) {
m <- x$getinverse() ## a local variable "m" (thanks to Randeep Grewal for that insight)
## receives getinverse method; retrieves "m" from parent
## env., which is the global environment in this case
if(!is.null(m)) { ## checks whether "m" is NULL
message("getting cached data") ## if "m" is NOT NULL, a message is printed
return(m) ## and the cached "m" is returned
}
temp <- x$get() ## creates local storage for matrix "x"
## "get" method assigns matrix "x" to "temp"
m <- solve(temp, ...) ## calls solve() to find inverse of "temp"
## assigns inverse of "temp" to "m" in calling environment
x$setinverse(m) ## copies inverse from "m" to "x"
m ## returns inverse to "m" in the global environment
}
|
/cachematrix.R
|
no_license
|
SOTCK/ProgrammingAssignment2
|
R
| false | false | 3,148 |
r
|
## makeCacheMatrix() and cacheSolve() explore cacheing and lexical scoping in R.
## The code structure and many object names in this file were layed out by Dr. R Peng.
## Major thanks to classmate Randeep Grewall for helping to understand how components of Dr. Peng's code actually work.
## I hope this file doesn't contain too many comments. I need them (and any forthcoming edits) for learning.
## makeCacheMatrix performs many tasks, including these key tasks:
## Makes methods (i.e. functions) available to another function, cacheSolve()
## Receives a cached value of a matrix inverse from cacheSolve() via the global environment
makeCacheMatrix <-function(x = matrix()) {
m <- NULL ## initialize "m" as NULL
## "m" eventually receives the cached inverse
set <- function(y) { ## "set" passes a matrix to "x" in the parent environment
x <<- y ## "y" is initialized by the first argument in makeCacheMatrix()
m <<- NULL
}
get <- function() x ## get is a function that enables
## cacheSolve() to retrieve the value of "x" from
## the calling environment in makeCacheMatrix()
setinverse <- function(solve) m <<- solve
## setinverse is a function to be used by cacheSolve()
## takes "m" from parent env. wherever setinverse is called
## calls the solve() function to find the inverse of "m"
getinverse <- function() m ## enables cacheSolve to retrieve m from parent environment
list(set = set, get = get, ## returns a list of four methods (i.e. functions)
setinverse = setinverse,
getinverse = getinverse)
## Call class(makeCacheMatrix(Z)) and press Enter to confirm a list is returned.
}
## cacheSolve() performs many tasks, including these key tasks:
## Receives methods and the value of matrix "x" from makeCacheMatrix
## Returns the matrix inverse to the global environment as "m"
cacheSolve <- function(x, ...) {
m <- x$getinverse() ## a local variable "m" (thanks to Randeep Grewal for that insight)
## receives getinverse method; retrieves "m" from parent
## env., which is the global environment in this case
if(!is.null(m)) { ## checks whether "m" is NULL
message("getting cached data") ## if "m" is NOT NULL, a message is printed
return(m) ## and the cached "m" is returned
}
temp <- x$get() ## creates local storage for matrix "x"
## "get" method assigns matrix "x" to "temp"
m <- solve(temp, ...) ## calls solve() to find inverse of "temp"
## assigns inverse of "temp" to "m" in calling environment
x$setinverse(m) ## copies inverse from "m" to "x"
m ## returns inverse to "m" in the global environment
}
|
## File Name: SRM_PARTABLE_FLAT_DYAD.R
## File Version: 0.11
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## Function for the Dyad
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
SRM_PARTABLE_FLAT_DYAD <- function(PARLIST,
# definitions for default parameters
# 1. covariance relationship effects of one latent rr
auto.cov.lv.dy = TRUE,
# 2. covariance relationship effects of one observed rr
auto.cov.ov.dy = TRUE,
# 3. covariance relationship-effects of across latent rrs
auto.cov.lv.block = FALSE,
# 4. meanstructure
auto.int.ov = FALSE,
auto.int.lv = FALSE,
# definitions for fixed values
auto.fix.loa.first.ind.ij=TRUE,
auto.fix.loa.first.ind.ji=TRUE,
auto.fix.loa.ind.ij.ji = TRUE, # rel-loadings are set to the equal value
auto.fix.int.first.ind.ij=FALSE,
auto.fix.int.first.ind.ji=FALSE,
ngroups = 1L )
{
# Step 1: extract `names' of various types of variables:
# there are a number of possibilities
# IMPORTANT: We make this selection for one group only!!!
idx = which(PARLIST$group == 1)
TMP.PARLIST = lapply(PARLIST,function(x) x[idx])
# 1. regular latent round robin variable (defined by =~)
# 2. observed round robin variables that are used to lv-rr vars (in =~)
# 3. observed round robin variables that are not 1. or 2. but that are
# used as predictors or outcomes
# 4. true exogenuous variables used to predict latent rr variables
# 5. true exogenuous variables used to predict observed rr vars
# the regular rr-lvs
rr.lv.regular.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.ij")
rr.lv.regular.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.ji")
# regular rr-lvs that are used as predictors or are the outcomes
rr.lv.names.y.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.y.ij") # dependent rr-lv actors
rr.lv.names.y.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.y.ji") # dependent rr-lv partners
rr.lv.names.x.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.x.ij") # independent rr-lv actors
rr.lv.names.x.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.x.ji") # independent rr-lv partners
# observed rrs that are the indicators of the regular rr-lvs
rr.ov.ind.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.ind.ij")
rr.ov.ind.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.ind.ji")
# observed rrs that are used as predictors or are the outcomes
rr.ov.names.y.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.y.ij") # dependent rr-ov actors
rr.ov.names.y.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.y.ji") # dependent rr-ov partners
rr.ov.names.x.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.x.ij") # independent rr-ov actors
rr.ov.names.x.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.x.ji") # independent rr-ov partners
rr.cov.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.cov.ij") # covariance ij
rr.cov.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.cov.ji") # covariance ji
# some computations with these variables:
# ov-rrs that are not defined as regular lv-rrs (they are outcomes or they
# are predictors, but they are not allowed to be indicators)
rr.ov.notind.names.ij <- setdiff(Reduce(union, list(rr.ov.names.y.ij, rr.ov.names.x.ij, rr.cov.names.ij)),rr.ov.ind.names.ij)
rr.ov.notind.names.ji <- setdiff(Reduce(union, list(rr.ov.names.y.ji, rr.ov.names.x.ji, rr.cov.names.ji)),rr.ov.ind.names.ji)
# it's possible that the a-part or p-part was defined as the ov-rr so that we
# we have to expand the respective other vector
if ( length(rr.ov.notind.names.ij) > 0L ) {
tmp.ij <- gsub("@AP","",rr.ov.notind.names.ij,perl=TRUE)
tmp.ji <- gsub("@PA","",rr.ov.notind.names.ji,perl=TRUE)
if (!(tmp.ij %in% tmp.ji)) { # elements in .p are missing in .a
tmp <- setdiff(tmp.ij,tmp.ji)
rr.ov.notind.names.ji <- c(rr.ov.notind.names.ji,paste(tmp,"@PA",sep=""))
}
}
if ( length(rr.ov.notind.names.ji) > 0L ) {
tmp.ij <- gsub("@AP","",rr.ov.notind.names.ij,perl=TRUE)
tmp.ji <- gsub("@PA","",rr.ov.notind.names.ji,perl=TRUE)
if (!(tmp.ji %in% tmp.ij)) { # elements in .p are missing in .a
tmp <- setdiff(tmp.ji,tmp.ij)
rr.ov.notind.names.ij <- c(rr.ov.notind.names.ij,paste(tmp,"@AP",sep=""))
}
}
# save all rrs (latents and observed)
rr.all.lv.names.ij <- c(rr.lv.regular.names.ij,rr.ov.notind.names.ij)
rr.all.lv.names.ji <- c(rr.lv.regular.names.ji,rr.ov.notind.names.ji)
# true exogenouos covariates
sv.eqs.x <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="sv.eqs.x")
sv.eqs.y <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="sv.eqs.y")
## +++++++++++++++++++++++++++++++++++++++++++
## 2. We construct a default parameter table
## +++++++++++++++++++++++++++++++++++++++++++
lhs <- rhs <- character(0)
mod.idx <- integer(0)
#equal <- character(0)
## 2.1 ALWAYS: variances of latent actor and partner effects
## and residual variances of actor and partner effects
lhs <- c(lhs, rr.lv.regular.names.ij, rr.lv.regular.names.ji, rr.ov.ind.names.ij, rr.ov.ind.names.ji, rr.ov.notind.names.ij, rr.ov.notind.names.ji )
rhs <- c(rhs, rr.lv.regular.names.ij, rr.lv.regular.names.ji, rr.ov.ind.names.ij, rr.ov.ind.names.ji, rr.ov.notind.names.ij, rr.ov.notind.names.ji )
## 2.3 Default covariance parameters:
## per Default, we always include the covariance between the a-part and the
## p-part of ONE rr-variable
if ( auto.cov.lv.dy & length(rr.all.lv.names.ij) > 0L & length(rr.all.lv.names.ji) > 0L) {
lhs <- c(lhs, sort(rr.all.lv.names.ij))
rhs <- c(rhs, sort(rr.all.lv.names.ji))
#equal <- c(equal,rep(as.numeric(NA),length(rr.all.lv.names.ij)))
}
if ( auto.cov.ov.dy & length(rr.ov.ind.names.ij) > 0L & length(rr.ov.ind.names.ji) > 0L) {
lhs <- c(lhs, sort(rr.ov.ind.names.ij))
rhs <- c(rhs, sort(rr.ov.ind.names.ji))
#equal <- c(equal,rep(as.numeric(NA),length(rr.ov.ind.names.ij)))
}
## Covariance block in PHI_U
## These covariances are added for those rr-lvs elements, that are not part
## of a regression model; when there is thus a regression of f1@A~f2@A, we have
## to delete the respective variable --> THIS HAS TO BE DONE
if ( auto.cov.lv.block & length(rr.all.lv.names.ij) > 1L & length(rr.all.lv.names.ji) > 1L ) {
tmp <- utils::combn(c(rr.all.lv.names.ij,rr.all.lv.names.ji), 2)
tmp <- SRM_PARTABLE_DELETE_SAME(tmp) # delete all same elements
lhs <- c(lhs, tmp[1,])
rhs <- c(rhs, tmp[2,])
#equal <- c(equal,rep(as.numeric(NA),length(tmp)))
}
op <- rep("~~", length(lhs))
mod.idx <- rep(0,length(lhs))
## 2.2 If there are rr-ovs that are not used to define rr-lvs, we treat them
## as single-indicator lvs that have a factor loading of one
if (length(rr.ov.notind.names.ij) != 0L) {
lhs <- c(lhs, rr.ov.notind.names.ij, rr.ov.notind.names.ji)
rhs <- c(rhs, rr.ov.notind.names.ij, rr.ov.notind.names.ji)
op <- c(op,rep("=~",length(c(rr.ov.notind.names.ij,rr.ov.notind.names.ji))))
#equal <- c(equal,rep(as.numeric(NA),length(c(rr.ov.notind.names.ij,rr.ov.notind.names.ji))))
mod.idx <- rep(0,length(lhs))
}
## ADD EXOGENOUS COVARIATES HERE?
## 2.3 Default Observed Variable Intercepts
#if(auto.int.ov && length(rr.ov.names.a) > 0L && length(rr.ov.names.p) > 0L) {
# ## Achtung, muss intersect tatsaechlich sein?
# tmp <- Reduce(union, list(rr.ov.names.a,rr.ov.names.p))
# lhs <- c(lhs, tmp)
# rhs <- c(rhs, tmp)
# op <- c(op, rep("~1", length(tmp)))
#}
## 2.4 Default Lv intercepts --> only those that are predicted
#if(auto.int.lv && length(rr.lv.names.y.a) > 0L && length(rr.lv.names.y.p) > 0L) {
# tmp <- c(rr.lv.names.y.a,rr.lv.names.y.p)
# lhs <- c(lhs, tmp)
# rhs <- c(rhs, tmp)
# op <- c(op, rep("~1", length(tmp)))
#}
DEFAULT <- data.frame(lhs=lhs, op=op, rhs=rhs,
mod.idx=mod.idx,
#equal=equal,
stringsAsFactors=FALSE)
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## 3. We construct the user parameter table and compare with the default table
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# USER table
lhs <- TMP.PARLIST$lhs
op <- TMP.PARLIST$op
rhs <- TMP.PARLIST$rhs
mod.idx <- TMP.PARLIST$mod.idx
group <- TMP.PARLIST$group
fixed <- TMP.PARLIST$fixed
starts <- TMP.PARLIST$starts
equal <- TMP.PARLIST$equal
free <- TMP.PARLIST$free
USER <- data.frame(lhs=lhs, op=op, rhs=rhs,
mod.idx=mod.idx,
group=group,fixed=fixed,starts=starts,equal=equal,
free=free, stringsAsFactors=FALSE)
# check for duplicated elements in USER
TMP <- USER[,1:3]
idx <- which(duplicated(TMP))
if(length(idx) > 0L) {
warning("There are duplicated elements in model syntax.
They have been ignored.")
USER <- USER[-idx,]
}
# We combine USER and DEFAULT and check for duplicated elements
# These elements are then deleted from DEFAULT
TMP <- rbind(DEFAULT[,1:3], USER[,1:3])
idx <- which(duplicated(TMP, fromLast=TRUE)) # idx should be in DEFAULT
if(length(idx)) { DEFAULT <- DEFAULT[-idx,] }
## +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## 4. We construct the final parameter table
## +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
lhs <- c(USER$lhs, DEFAULT$lhs)
op <- c(USER$op, DEFAULT$op)
rhs <- c(USER$rhs, DEFAULT$rhs)
user <- c(rep(1L, length(USER$lhs)),
rep(0L, length(DEFAULT$lhs))) # user-specified or not
fixed <- c(USER$fixed,rep(as.numeric(NA),length(DEFAULT$lhs)))
starts <- c(USER$starts,rep(as.numeric(NA),length(DEFAULT$lhs))) # user svs
#equal <- c(USER$equal,DEFAULT$equal)
equal <- c(USER$equal,rep(as.numeric(NA),length(DEFAULT$lhs)))
free <- c(USER$free,rep(1,length(DEFAULT$lhs)))
mod.idx <- c(USER$mod.idx, DEFAULT$mod.idx) # modified or not
#label <- rep(character(1), length(lhs))
#exo <- rep(0L, length(lhs))
## some additional definitions
## fix first loading of latent actor factor indicator to one
if(auto.fix.loa.first.ind.ij) {
# fix metric by fixing the loading of the first indicator
mm.idx <- which(op == "=~" & grepl("@AP",lhs))
first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))]
fixed[first.idx] <- 1.0
free[first.idx] <- 0L
}
if(auto.fix.loa.first.ind.ji) {
# fix metric by fixing the loading of the first indicator
mm.idx <- which(op == "=~" & grepl("@PA",lhs))
first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))]
fixed[first.idx] <- 1.0
free[first.idx] <- 0L
}
if (auto.fix.loa.ind.ij.ji) {
# we have to constrain the factor laodings of the AP and the PA vector to the same value
mm.idx.ap <- which(op == "=~" & grepl("@AP",lhs))
mm.idx.pa <- which(op == "=~" & grepl("@PA",lhs))
all.idx.ap <- mm.idx.ap[which(duplicated(lhs[mm.idx.ap]))]
all.idx.pa <- mm.idx.pa[which(duplicated(lhs[mm.idx.pa]))]
# check
zz <- paste("eqload",rep(1:length(all.idx.ap)),sep="")
if ( length( all.idx.ap ) != 0 ) {
for ( i in 1:length( all.idx.ap ) ) {
if ( is.na(equal[all.idx.ap[i]] == equal[all.idx.pa[i]]) ) {
equal[all.idx.ap[i]] = zz[i]
equal[all.idx.pa[i]] = zz[i]
} else if ( equal[all.idx.ap[i]] != equal[all.idx.pa[i]] ) {
warning("There is an error in the definition of the model syntax.
Syntax has been corrected in terms of the defaults.")
equal[all.idx.ap[i]] = zz[i]
equal[all.idx.pa[i]] = zz[i]
}
}
}
}
## Now, we have the Parameter table for one group; we now expand it for the case
## of multiple groups:
group <- rep(1L, length(lhs))
if(ngroups > 1) {
group <- rep(1:ngroups, each=length(lhs))
user <- rep(user, times=ngroups)
lhs <- rep(lhs, times=ngroups)
op <- rep(op, times=ngroups)
rhs <- rep(rhs, times=ngroups)
fixed <- rep(fixed, times=ngroups)
free <- rep(free, times=ngroups)
starts <- rep(starts, times=ngroups)
equal <- rep(equal, times=ngroups)
mod.idx <- rep(mod.idx, times=ngroups)
## consider group specifcic defaults?
for (g in 2:ngroups) {
###
}
}
# Handling of exogenous variables?
LIST <- list( lhs = lhs,
op = op,
rhs = rhs,
user = user,
group = group)
# other columns
LIST2 <- list(fixed = fixed,
starts = starts,
equal = equal,
mod.idx = mod.idx,
free = free)
LIST <- c(LIST, LIST2)
return(LIST)
}
|
/R/SRM_PARTABLE_FLAT_DYAD.R
|
no_license
|
alexanderrobitzsch/srm
|
R
| false | false | 13,611 |
r
|
## File Name: SRM_PARTABLE_FLAT_DYAD.R
## File Version: 0.11
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## Function for the Dyad
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
SRM_PARTABLE_FLAT_DYAD <- function(PARLIST,
# definitions for default parameters
# 1. covariance relationship effects of one latent rr
auto.cov.lv.dy = TRUE,
# 2. covariance relationship effects of one observed rr
auto.cov.ov.dy = TRUE,
# 3. covariance relationship-effects of across latent rrs
auto.cov.lv.block = FALSE,
# 4. meanstructure
auto.int.ov = FALSE,
auto.int.lv = FALSE,
# definitions for fixed values
auto.fix.loa.first.ind.ij=TRUE,
auto.fix.loa.first.ind.ji=TRUE,
auto.fix.loa.ind.ij.ji = TRUE, # rel-loadings are set to the equal value
auto.fix.int.first.ind.ij=FALSE,
auto.fix.int.first.ind.ji=FALSE,
ngroups = 1L )
{
# Step 1: extract `names' of various types of variables:
# there are a number of possibilities
# IMPORTANT: We make this selection for one group only!!!
idx = which(PARLIST$group == 1)
TMP.PARLIST = lapply(PARLIST,function(x) x[idx])
# 1. regular latent round robin variable (defined by =~)
# 2. observed round robin variables that are used to lv-rr vars (in =~)
# 3. observed round robin variables that are not 1. or 2. but that are
# used as predictors or outcomes
# 4. true exogenuous variables used to predict latent rr variables
# 5. true exogenuous variables used to predict observed rr vars
# the regular rr-lvs
rr.lv.regular.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.ij")
rr.lv.regular.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.ji")
# regular rr-lvs that are used as predictors or are the outcomes
rr.lv.names.y.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.y.ij") # dependent rr-lv actors
rr.lv.names.y.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.y.ji") # dependent rr-lv partners
rr.lv.names.x.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.x.ij") # independent rr-lv actors
rr.lv.names.x.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.lv.x.ji") # independent rr-lv partners
# observed rrs that are the indicators of the regular rr-lvs
rr.ov.ind.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.ind.ij")
rr.ov.ind.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.ind.ji")
# observed rrs that are used as predictors or are the outcomes
rr.ov.names.y.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.y.ij") # dependent rr-ov actors
rr.ov.names.y.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.y.ji") # dependent rr-ov partners
rr.ov.names.x.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.x.ij") # independent rr-ov actors
rr.ov.names.x.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.ov.x.ji") # independent rr-ov partners
rr.cov.names.ij <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.cov.ij") # covariance ij
rr.cov.names.ji <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="rr.cov.ji") # covariance ji
# some computations with these variables:
# ov-rrs that are not defined as regular lv-rrs (they are outcomes or they
# are predictors, but they are not allowed to be indicators)
rr.ov.notind.names.ij <- setdiff(Reduce(union, list(rr.ov.names.y.ij, rr.ov.names.x.ij, rr.cov.names.ij)),rr.ov.ind.names.ij)
rr.ov.notind.names.ji <- setdiff(Reduce(union, list(rr.ov.names.y.ji, rr.ov.names.x.ji, rr.cov.names.ji)),rr.ov.ind.names.ji)
# it's possible that the a-part or p-part was defined as the ov-rr so that we
# we have to expand the respective other vector
if ( length(rr.ov.notind.names.ij) > 0L ) {
tmp.ij <- gsub("@AP","",rr.ov.notind.names.ij,perl=TRUE)
tmp.ji <- gsub("@PA","",rr.ov.notind.names.ji,perl=TRUE)
if (!(tmp.ij %in% tmp.ji)) { # elements in .p are missing in .a
tmp <- setdiff(tmp.ij,tmp.ji)
rr.ov.notind.names.ji <- c(rr.ov.notind.names.ji,paste(tmp,"@PA",sep=""))
}
}
if ( length(rr.ov.notind.names.ji) > 0L ) {
tmp.ij <- gsub("@AP","",rr.ov.notind.names.ij,perl=TRUE)
tmp.ji <- gsub("@PA","",rr.ov.notind.names.ji,perl=TRUE)
if (!(tmp.ji %in% tmp.ij)) { # elements in .p are missing in .a
tmp <- setdiff(tmp.ji,tmp.ij)
rr.ov.notind.names.ij <- c(rr.ov.notind.names.ij,paste(tmp,"@AP",sep=""))
}
}
# save all rrs (latents and observed)
rr.all.lv.names.ij <- c(rr.lv.regular.names.ij,rr.ov.notind.names.ij)
rr.all.lv.names.ji <- c(rr.lv.regular.names.ji,rr.ov.notind.names.ji)
# true exogenouos covariates
sv.eqs.x <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="sv.eqs.x")
sv.eqs.y <- SRM_PARTABLE_VNAMES_DYAD(TMP.PARLIST, type="sv.eqs.y")
## +++++++++++++++++++++++++++++++++++++++++++
## 2. We construct a default parameter table
## +++++++++++++++++++++++++++++++++++++++++++
lhs <- rhs <- character(0)
mod.idx <- integer(0)
#equal <- character(0)
## 2.1 ALWAYS: variances of latent actor and partner effects
## and residual variances of actor and partner effects
lhs <- c(lhs, rr.lv.regular.names.ij, rr.lv.regular.names.ji, rr.ov.ind.names.ij, rr.ov.ind.names.ji, rr.ov.notind.names.ij, rr.ov.notind.names.ji )
rhs <- c(rhs, rr.lv.regular.names.ij, rr.lv.regular.names.ji, rr.ov.ind.names.ij, rr.ov.ind.names.ji, rr.ov.notind.names.ij, rr.ov.notind.names.ji )
## 2.3 Default covariance parameters:
## per Default, we always include the covariance between the a-part and the
## p-part of ONE rr-variable
if ( auto.cov.lv.dy & length(rr.all.lv.names.ij) > 0L & length(rr.all.lv.names.ji) > 0L) {
lhs <- c(lhs, sort(rr.all.lv.names.ij))
rhs <- c(rhs, sort(rr.all.lv.names.ji))
#equal <- c(equal,rep(as.numeric(NA),length(rr.all.lv.names.ij)))
}
if ( auto.cov.ov.dy & length(rr.ov.ind.names.ij) > 0L & length(rr.ov.ind.names.ji) > 0L) {
lhs <- c(lhs, sort(rr.ov.ind.names.ij))
rhs <- c(rhs, sort(rr.ov.ind.names.ji))
#equal <- c(equal,rep(as.numeric(NA),length(rr.ov.ind.names.ij)))
}
## Covariance block in PHI_U
## These covariances are added for those rr-lvs elements, that are not part
## of a regression model; when there is thus a regression of f1@A~f2@A, we have
## to delete the respective variable --> THIS HAS TO BE DONE
if ( auto.cov.lv.block & length(rr.all.lv.names.ij) > 1L & length(rr.all.lv.names.ji) > 1L ) {
tmp <- utils::combn(c(rr.all.lv.names.ij,rr.all.lv.names.ji), 2)
tmp <- SRM_PARTABLE_DELETE_SAME(tmp) # delete all same elements
lhs <- c(lhs, tmp[1,])
rhs <- c(rhs, tmp[2,])
#equal <- c(equal,rep(as.numeric(NA),length(tmp)))
}
op <- rep("~~", length(lhs))
mod.idx <- rep(0,length(lhs))
## 2.2 If there are rr-ovs that are not used to define rr-lvs, we treat them
## as single-indicator lvs that have a factor loading of one
if (length(rr.ov.notind.names.ij) != 0L) {
lhs <- c(lhs, rr.ov.notind.names.ij, rr.ov.notind.names.ji)
rhs <- c(rhs, rr.ov.notind.names.ij, rr.ov.notind.names.ji)
op <- c(op,rep("=~",length(c(rr.ov.notind.names.ij,rr.ov.notind.names.ji))))
#equal <- c(equal,rep(as.numeric(NA),length(c(rr.ov.notind.names.ij,rr.ov.notind.names.ji))))
mod.idx <- rep(0,length(lhs))
}
## ADD EXOGENOUS COVARIATES HERE?
## 2.3 Default Observed Variable Intercepts
#if(auto.int.ov && length(rr.ov.names.a) > 0L && length(rr.ov.names.p) > 0L) {
# ## Achtung, muss intersect tatsaechlich sein?
# tmp <- Reduce(union, list(rr.ov.names.a,rr.ov.names.p))
# lhs <- c(lhs, tmp)
# rhs <- c(rhs, tmp)
# op <- c(op, rep("~1", length(tmp)))
#}
## 2.4 Default Lv intercepts --> only those that are predicted
#if(auto.int.lv && length(rr.lv.names.y.a) > 0L && length(rr.lv.names.y.p) > 0L) {
# tmp <- c(rr.lv.names.y.a,rr.lv.names.y.p)
# lhs <- c(lhs, tmp)
# rhs <- c(rhs, tmp)
# op <- c(op, rep("~1", length(tmp)))
#}
DEFAULT <- data.frame(lhs=lhs, op=op, rhs=rhs,
mod.idx=mod.idx,
#equal=equal,
stringsAsFactors=FALSE)
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## 3. We construct the user parameter table and compare with the default table
## ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# USER table
lhs <- TMP.PARLIST$lhs
op <- TMP.PARLIST$op
rhs <- TMP.PARLIST$rhs
mod.idx <- TMP.PARLIST$mod.idx
group <- TMP.PARLIST$group
fixed <- TMP.PARLIST$fixed
starts <- TMP.PARLIST$starts
equal <- TMP.PARLIST$equal
free <- TMP.PARLIST$free
USER <- data.frame(lhs=lhs, op=op, rhs=rhs,
mod.idx=mod.idx,
group=group,fixed=fixed,starts=starts,equal=equal,
free=free, stringsAsFactors=FALSE)
# check for duplicated elements in USER
TMP <- USER[,1:3]
idx <- which(duplicated(TMP))
if(length(idx) > 0L) {
warning("There are duplicated elements in model syntax.
They have been ignored.")
USER <- USER[-idx,]
}
# We combine USER and DEFAULT and check for duplicated elements
# These elements are then deleted from DEFAULT
TMP <- rbind(DEFAULT[,1:3], USER[,1:3])
idx <- which(duplicated(TMP, fromLast=TRUE)) # idx should be in DEFAULT
if(length(idx)) { DEFAULT <- DEFAULT[-idx,] }
## +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## 4. We construct the final parameter table
## +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
lhs <- c(USER$lhs, DEFAULT$lhs)
op <- c(USER$op, DEFAULT$op)
rhs <- c(USER$rhs, DEFAULT$rhs)
user <- c(rep(1L, length(USER$lhs)),
rep(0L, length(DEFAULT$lhs))) # user-specified or not
fixed <- c(USER$fixed,rep(as.numeric(NA),length(DEFAULT$lhs)))
starts <- c(USER$starts,rep(as.numeric(NA),length(DEFAULT$lhs))) # user svs
#equal <- c(USER$equal,DEFAULT$equal)
equal <- c(USER$equal,rep(as.numeric(NA),length(DEFAULT$lhs)))
free <- c(USER$free,rep(1,length(DEFAULT$lhs)))
mod.idx <- c(USER$mod.idx, DEFAULT$mod.idx) # modified or not
#label <- rep(character(1), length(lhs))
#exo <- rep(0L, length(lhs))
## some additional definitions
## fix first loading of latent actor factor indicator to one
if(auto.fix.loa.first.ind.ij) {
# fix metric by fixing the loading of the first indicator
mm.idx <- which(op == "=~" & grepl("@AP",lhs))
first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))]
fixed[first.idx] <- 1.0
free[first.idx] <- 0L
}
if(auto.fix.loa.first.ind.ji) {
# fix metric by fixing the loading of the first indicator
mm.idx <- which(op == "=~" & grepl("@PA",lhs))
first.idx <- mm.idx[which(!duplicated(lhs[mm.idx]))]
fixed[first.idx] <- 1.0
free[first.idx] <- 0L
}
if (auto.fix.loa.ind.ij.ji) {
# we have to constrain the factor laodings of the AP and the PA vector to the same value
mm.idx.ap <- which(op == "=~" & grepl("@AP",lhs))
mm.idx.pa <- which(op == "=~" & grepl("@PA",lhs))
all.idx.ap <- mm.idx.ap[which(duplicated(lhs[mm.idx.ap]))]
all.idx.pa <- mm.idx.pa[which(duplicated(lhs[mm.idx.pa]))]
# check
zz <- paste("eqload",rep(1:length(all.idx.ap)),sep="")
if ( length( all.idx.ap ) != 0 ) {
for ( i in 1:length( all.idx.ap ) ) {
if ( is.na(equal[all.idx.ap[i]] == equal[all.idx.pa[i]]) ) {
equal[all.idx.ap[i]] = zz[i]
equal[all.idx.pa[i]] = zz[i]
} else if ( equal[all.idx.ap[i]] != equal[all.idx.pa[i]] ) {
warning("There is an error in the definition of the model syntax.
Syntax has been corrected in terms of the defaults.")
equal[all.idx.ap[i]] = zz[i]
equal[all.idx.pa[i]] = zz[i]
}
}
}
}
## Now, we have the Parameter table for one group; we now expand it for the case
## of multiple groups:
group <- rep(1L, length(lhs))
if(ngroups > 1) {
group <- rep(1:ngroups, each=length(lhs))
user <- rep(user, times=ngroups)
lhs <- rep(lhs, times=ngroups)
op <- rep(op, times=ngroups)
rhs <- rep(rhs, times=ngroups)
fixed <- rep(fixed, times=ngroups)
free <- rep(free, times=ngroups)
starts <- rep(starts, times=ngroups)
equal <- rep(equal, times=ngroups)
mod.idx <- rep(mod.idx, times=ngroups)
## consider group specifcic defaults?
for (g in 2:ngroups) {
###
}
}
# Handling of exogenous variables?
LIST <- list( lhs = lhs,
op = op,
rhs = rhs,
user = user,
group = group)
# other columns
LIST2 <- list(fixed = fixed,
starts = starts,
equal = equal,
mod.idx = mod.idx,
free = free)
LIST <- c(LIST, LIST2)
return(LIST)
}
|
状态机处理各个角色的状态与变迁
状态机:一个对象,其构成为若干个状态,以及触发这些状态发生发生相互转移的事件, 那么此对象称之为状态机。
状态机RMApp 中 记录一个Application的所有状态RMAppState, 触发状态改变的事件RMAppEvent
功能 就是接收其他对象发出的事件,然后根据当前状态和事件类型,将当前状态转移到另外一种状态,并触发一种行为。
ME:状态机记录了一个对象所有的状态,并用于维护这个对象的生命周期,接收其他对象发出的事件,
然后根据当前状态和事件类型,将当前状态转移到另外一种状态,并触发一种行为。
http://bubuko.com/infodetail-296314.html
|
/ColonelHouNote/src/main/java/com/hn/cluster/hadoop/doc/状态机/状态机.rd
|
no_license
|
jiangsy163/ColonelHouNote
|
R
| false | false | 768 |
rd
|
状态机处理各个角色的状态与变迁
状态机:一个对象,其构成为若干个状态,以及触发这些状态发生发生相互转移的事件, 那么此对象称之为状态机。
状态机RMApp 中 记录一个Application的所有状态RMAppState, 触发状态改变的事件RMAppEvent
功能 就是接收其他对象发出的事件,然后根据当前状态和事件类型,将当前状态转移到另外一种状态,并触发一种行为。
ME:状态机记录了一个对象所有的状态,并用于维护这个对象的生命周期,接收其他对象发出的事件,
然后根据当前状态和事件类型,将当前状态转移到另外一种状态,并触发一种行为。
http://bubuko.com/infodetail-296314.html
|
# This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(Fyw)
test_check("Fyw")
|
/tests/testthat.R
|
no_license
|
tamnva/Fyw
|
R
| false | false | 366 |
r
|
# This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(Fyw)
test_check("Fyw")
|
# ---- A function to trim of the right side of a string ---- #
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
# ---- a function to calculate the percentage of values notequal to something in a column
perc <- function(x, n){ 100*length((which(x != n))) / length(x) }
# coefficient of variation
coef.variation <- function(x) {
sqrt(var(x))/mean(x)
}
##############################################
# ------------------------------------------ #
# Constant Annual Growth Rate (CAGR)Function #
# Takes two time series and the difference #
# in years as inputs and spits out the rate #
# converted to a rounded percentage value #
# ------------------------------------------ #
CAGR <- function(yt,ytn,n){
r <-((yt/ytn)^(1/n)-1)*100
round(r, digits = 2)
|
/A collection of functions.R
|
permissive
|
chrisferris3/SimpleTimeSeriesFunctions
|
R
| false | false | 818 |
r
|
# ---- A function to trim of the right side of a string ---- #
substrRight <- function(x, n){
substr(x, nchar(x)-n+1, nchar(x))
}
# ---- a function to calculate the percentage of values notequal to something in a column
perc <- function(x, n){ 100*length((which(x != n))) / length(x) }
# coefficient of variation
coef.variation <- function(x) {
sqrt(var(x))/mean(x)
}
##############################################
# ------------------------------------------ #
# Constant Annual Growth Rate (CAGR)Function #
# Takes two time series and the difference #
# in years as inputs and spits out the rate #
# converted to a rounded percentage value #
# ------------------------------------------ #
CAGR <- function(yt,ytn,n){
r <-((yt/ytn)^(1/n)-1)*100
round(r, digits = 2)
|
# Chapter 7 - Snakes Homework
# 21 April 2021
# Biostatistics
# Author: Amicia Canterbury
library(readr)
library(tidyverse)
library(ggplot2)
library(plotly)
library(lubridate)
snakes <- read_csv("data/snakes.csv")
snakes$day = as.factor(snakes$day) # As factor changes the factorial data
# When you want to change the data of a specific column - group_by
snakes$day = as.factor(snakes$day)
view(snakes)
#The first thing we do is to create some summaries of the data. Refer to the summary statistics Chapter.
snakes.summary <- snakes %>%
group_by(day, snake) %>% # Average's of everything, if you group by day = more sense because it will group by the day
group_by(day, snake) %>%
summarise(mean_openings = mean(openings),
sd_openings = sd(openings)) %>%
ungroup()
snakes.summary
#To fix this problem, let us ignore the grouping by both snake and day.
snakes.summary <- snakes %>%
group_by(day) %>%
summarise(mean_openings = mean(openings),
sd_openings = sd(openings)) %>%
ungroup()
snakes.summary
library(Rmisc)
snakes.summary2 <- summarySE(data = snakes, measurevar = "openings", groupvars = c("day"))
snakes.summary2
# Make plots:
ggplot(data = snakes, aes(x = day, y = openings)) +
geom_segment(data = snakes.summary2, aes(x = day, xend = day, y = openings - ci, yend = openings + ci, colour = day),
size = 2.0, linetype = "solid", show.legend = F) +
geom_boxplot(aes(fill = day), alpha = 0.6, show.legend = F) +
geom_jitter(width = 0.05)+
labs(x = "Day", y = "Openings", title = "Boxplot representing the amount of releases that occur during openings")+
theme_bw()
#What are our null hypotheses?
#H0: There is no difference between snakes with respect to the number of openings at which they habituate.
#: There is no difference between days in terms of the number of openings at which the snakes habituate.
#Fit the ANOVA model to test these hypotheses:
snakes.aov <- aov(openings ~ day + snake, data = snakes)
summary(snakes.aov)
#Now we need to test of the assumptions hold true (i.e. erros are normally distributed and heteroscedastic). Also, where are the differences?
par(mfrow = c(2, 2))
# Checking assumptions...
# make a histogram of the residuals;
# they must be normal
snakes.res <- residuals(snakes.aov)
hist(snakes.res, col = "red")
# make a plot of residuals and the fitted values;
# # they must be normal and homoscedastic
plot(fitted(snakes.aov), residuals(snakes.aov), col = "red")
snakes.tukey <- TukeyHSD(snakes.aov, which = "day", conf.level = 0.90)
plot(snakes.tukey, las = 1, col = "red")
# Own plot:
ggplot(data = snakes, aes(x = openings, y = day, fill = day)) +
geom_bar(stat = "identity") +
labs(x = "Openings", y = "Day") +
theme(legend.position = "none",
axis.text.x = element_text(angle = 90)) +
ggtitle("Bar graph representing the amount of releases that occur during openings")+
theme_bw()+
theme(panel.border = element_blank(), legend.position = "none")
# Second plot:
ggplot(data = snakes, aes(x = day, y = openings, fill = snake))+
geom_col(position = "dodge", col = "black")+
labs(x = "Day", y = "Openings", title = "Showing the relationship between each snakes and the amount of releases",
fill = "Snake")+
theme_bw()+
scale_fill_manual(values =c("navy blue", "cornflower blue", "blue", "cyan1",
"cadetblue2", "skyblue"))
#scale_fill_brewer(pallet = "set3")
# scale_fill_gradient(low = "yellow", high = "red", na.value = NA) - try it
# on a temp. scale
|
/Assignments/Amicia_Canterbury_Snakes.R
|
no_license
|
amiciacanterbury/Biostats_2021
|
R
| false | false | 3,573 |
r
|
# Chapter 7 - Snakes Homework
# 21 April 2021
# Biostatistics
# Author: Amicia Canterbury
library(readr)
library(tidyverse)
library(ggplot2)
library(plotly)
library(lubridate)
snakes <- read_csv("data/snakes.csv")
snakes$day = as.factor(snakes$day) # As factor changes the factorial data
# When you want to change the data of a specific column - group_by
snakes$day = as.factor(snakes$day)
view(snakes)
#The first thing we do is to create some summaries of the data. Refer to the summary statistics Chapter.
snakes.summary <- snakes %>%
group_by(day, snake) %>% # Average's of everything, if you group by day = more sense because it will group by the day
group_by(day, snake) %>%
summarise(mean_openings = mean(openings),
sd_openings = sd(openings)) %>%
ungroup()
snakes.summary
#To fix this problem, let us ignore the grouping by both snake and day.
snakes.summary <- snakes %>%
group_by(day) %>%
summarise(mean_openings = mean(openings),
sd_openings = sd(openings)) %>%
ungroup()
snakes.summary
library(Rmisc)
snakes.summary2 <- summarySE(data = snakes, measurevar = "openings", groupvars = c("day"))
snakes.summary2
# Make plots:
ggplot(data = snakes, aes(x = day, y = openings)) +
geom_segment(data = snakes.summary2, aes(x = day, xend = day, y = openings - ci, yend = openings + ci, colour = day),
size = 2.0, linetype = "solid", show.legend = F) +
geom_boxplot(aes(fill = day), alpha = 0.6, show.legend = F) +
geom_jitter(width = 0.05)+
labs(x = "Day", y = "Openings", title = "Boxplot representing the amount of releases that occur during openings")+
theme_bw()
#What are our null hypotheses?
#H0: There is no difference between snakes with respect to the number of openings at which they habituate.
#: There is no difference between days in terms of the number of openings at which the snakes habituate.
#Fit the ANOVA model to test these hypotheses:
snakes.aov <- aov(openings ~ day + snake, data = snakes)
summary(snakes.aov)
#Now we need to test of the assumptions hold true (i.e. erros are normally distributed and heteroscedastic). Also, where are the differences?
par(mfrow = c(2, 2))
# Checking assumptions...
# make a histogram of the residuals;
# they must be normal
snakes.res <- residuals(snakes.aov)
hist(snakes.res, col = "red")
# make a plot of residuals and the fitted values;
# # they must be normal and homoscedastic
plot(fitted(snakes.aov), residuals(snakes.aov), col = "red")
snakes.tukey <- TukeyHSD(snakes.aov, which = "day", conf.level = 0.90)
plot(snakes.tukey, las = 1, col = "red")
# Own plot:
ggplot(data = snakes, aes(x = openings, y = day, fill = day)) +
geom_bar(stat = "identity") +
labs(x = "Openings", y = "Day") +
theme(legend.position = "none",
axis.text.x = element_text(angle = 90)) +
ggtitle("Bar graph representing the amount of releases that occur during openings")+
theme_bw()+
theme(panel.border = element_blank(), legend.position = "none")
# Second plot:
ggplot(data = snakes, aes(x = day, y = openings, fill = snake))+
geom_col(position = "dodge", col = "black")+
labs(x = "Day", y = "Openings", title = "Showing the relationship between each snakes and the amount of releases",
fill = "Snake")+
theme_bw()+
scale_fill_manual(values =c("navy blue", "cornflower blue", "blue", "cyan1",
"cadetblue2", "skyblue"))
#scale_fill_brewer(pallet = "set3")
# scale_fill_gradient(low = "yellow", high = "red", na.value = NA) - try it
# on a temp. scale
|
#' The Complete KSEA App Analysis
#'
#' Takes a formatted phoshoproteomics data input and performs KSEA calculations to infer relative kinase activities
#'
#' @param KSData the Kinase-Substrate dataset uploaded from the file
#' prefaced with "PSP&NetworKIN_"
#' available from github.com/casecpb/KSEA/
#' @param PX the experimental data file formatted exactly as described below;
#' must have 6 columns in the exact order: Protein, Gene, Peptide, Residue.Both, p, FC;
#' cannot have NA values, or else the entire peptide row is deleted;
#' Description of each column in PX:
#' \itemize{
#' \item{"Protein"}{ the Uniprot ID for the parent protein}
#' \item{"Gene"}{ the HUGO gene name for the parent protein}
#' \item{"Peptide"}{ the peptide sequence}
#' \item{"Residue.Both"}{ all phosphosites from that peptide, separated by semicolons if applicable;
#' must be formatted as the single amino acid abbrev. with the residue position (e.g. S102)}
#' \item{"p"}{ the p-value of that peptide (if none calculated, please write "NULL", cannot be NA)}
#' \item{"FC"}{ the fold change (not log-transformed); usually the control sample is the denominator}
#' }
#' @param NetworKIN a binary input of TRUE or FALSE, indicating whether or not to include NetworKIN predictions;
#' NetworKIN = TRUE means inclusion of NetworKIN predictions
#' @param NetworKIN.cutoff a numeric value between 1 and infinity setting the minimum NetworKIN score
#' (can be left out if NetworKIN = FALSE)
#' @param m.cutoff a numeric value between 0 and infinity indicating the min. # of substrates
#' a kinase must have to be included in the bar plot output
#' @param p.cutoff a numeric value between 0 and 1 indicating the p-value cutoff for
#' indicating significant kinases in the bar plot
#'
#' @return creates the following outputs that are deposited into your working directory:
#' a bar plot highlighting key kinase results, a .csv file of all KSEA kinase scores,
#' and a .csv file listing all kinase-substrate relationships used for the calculations
#'
#' @references
#' Casado et al. (2013) Sci Signal. 6(268):rs6
#'
#' Hornbeck et al. (2015) Nucleic Acids Res. 43:D512-20
#'
#' Horn et al. (2014) Nature Methods 11(6):603-4
#'
#' @examples
#' KSEA.Complete(KSData, PX, NetworKIN=TRUE, NetworKIN.cutoff=5, m.cutoff=5, p.cutoff=0.01)
#' KSEA.Complete(KSData, PX, NetworKIN=FALSE, m.cutoff=2, p.cutoff=0.05)
#'
#' @importFrom grDevices dev.off png tiff
#' @importFrom graphics barplot par
#' @importFrom stats aggregate complete.cases p.adjust pnorm sd
#' @importFrom utils write.csv
#'
#' @export
#----------------------------#
# IMPORTANT OVERVIEW OF PX INPUT REQUIREMENTS
# PX input requirements:
# must have exact 6 columns in the following order: Protein, Gene, Peptide, Residue.Both, p, FC
# cannot have NA values, or else the entire peptide row is deleted
# Description of each column in PX:
# - Protein = the Uniprot ID for the parent protein
# - Gene = the HUGO gene name for the parent protein
# - Peptide = the peptide sequence
# - Residue.Both = all phosphosites from that peptide, separated by semicolons if applicable; must be formatted as the single amino acid abbrev. with the residue position (e.g. S102)
# - p = the p-value of that peptide (if none calculated, please write "NULL", cannot be NA)
# - FC = the fold change (not log-transformed); usually recommended to have the control sample as the denominator
#----------------------------#
KSEA.Complete = function (KSData, PX, NetworKIN, NetworKIN.cutoff, m.cutoff, p.cutoff){
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Process the input data files
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#--------------
# Process the PX data file
# Check if each peptide row has multiple phosphorylated residues and create new dataframe with a single residue per row
if (length(grep(";", PX$Residue.Both))==0){
new = PX
colnames(new)[c(2,4)] = c("SUB_GENE", "SUB_MOD_RSD")
new$log2FC = log2(abs(as.numeric(as.character(new$FC)))) # the as.numeric(as.character()) fixes an issue with the FC values as factors
new = new[complete.cases(new$log2FC),]
}
else {
double = PX[grep(";",PX$Residue.Both),]
residues = as.character(double$Residue.Both)
residues = as.matrix(residues, ncol = 1)
split = strsplit(residues, split = ";")
x = sapply(split, length)
single = data.frame(Protein = rep(double$Protein, x),
Gene = rep(double$Gene, x),
Peptide = rep(double$Peptide, x),
Residue.Both = unlist(split),
p = rep(double$p, x),
FC = rep(double$FC, x))
# create new object of PX that has all residues in separate rows
new = PX[-grep(";", PX$Residue.Both),]
new = rbind(new, single)
colnames(new)[c(2,4)] = c("SUB_GENE", "SUB_MOD_RSD")
new$log2FC = log2(abs(as.numeric(as.character(new$FC)))) # the as.numeric(as.character()) fixes an issue with the FC values as factors
new = new[complete.cases(new$log2FC),]
}
#----------------
# Process KSData dataset based on user input (NetworKIN=T/F and NetworKIN cutoff score)
if (NetworKIN == TRUE){
KSData.filtered = KSData[grep("[a-z]", KSData$Source),]
KSData.filtered = KSData.filtered[(KSData.filtered$networkin_score >= NetworKIN.cutoff),]
}
else{
KSData.filtered = KSData[grep("PhosphoSitePlus", KSData$Source),]
}
#----------------
# Extract KSData.filtered annotations that are only found in new
KSData.dataset = merge(KSData.filtered, new)
KSData.dataset = KSData.dataset[order(KSData.dataset$GENE),]
KSData.dataset$Uniprot.noIsoform = sapply(KSData.dataset$KIN_ACC_ID, function(x) unlist(strsplit(as.character(x), split="-"))[1])
# last expression collapses isoforms of the same protein for easy processing
KSData.dataset.abbrev = KSData.dataset[,c(5,1,2,16:19,14)]
colnames(KSData.dataset.abbrev) = c("Kinase.Gene", "Substrate.Gene", "Substrate.Mod", "Peptide", "p", "FC", "log2FC", "Source")
KSData.dataset.abbrev = KSData.dataset.abbrev[order(KSData.dataset.abbrev$Kinase.Gene, KSData.dataset.abbrev$Substrate.Gene, KSData.dataset.abbrev$Substrate.Mod, KSData.dataset.abbrev$p),]
# take the mean of the log2FC amongst phosphosite duplicates
KSData.dataset.abbrev = aggregate(log2FC ~ Kinase.Gene+Substrate.Gene+Substrate.Mod+Source, data=KSData.dataset.abbrev, FUN=mean)
KSData.dataset.abbrev = KSData.dataset.abbrev[order(KSData.dataset.abbrev$Kinase.Gene),]
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Do analysis for KSEA
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
kinase.list = as.vector(KSData.dataset.abbrev$Kinase.Gene)
kinase.list = as.matrix(table(kinase.list))
Mean.FC = aggregate(log2FC ~ Kinase.Gene, data=KSData.dataset.abbrev, FUN=mean)
Mean.FC = Mean.FC[order(Mean.FC[,1]),]
Mean.FC$mS = Mean.FC[,2]
Mean.FC$Enrichment = Mean.FC$mS/abs(mean(new$log2FC, na.rm=T))
Mean.FC$m = kinase.list
Mean.FC$z.score = ((Mean.FC$mS- mean(new$log2FC, na.rm=T))*sqrt(Mean.FC$m))/sd(new$log2FC, na.rm=T)
Mean.FC$p.value = pnorm(-abs(Mean.FC$z.score)) # 1-tailed p-value
Mean.FC$FDR = p.adjust(Mean.FC$p.value, method="fdr")
Mean.FC.filtered = Mean.FC[(Mean.FC$m >= m.cutoff),-2] # filter dataset by m.cutoff
Mean.FC.filtered = Mean.FC.filtered[order(Mean.FC.filtered$z.score),]
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Create Outputs
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#----------------
# Create bar plot for Kinase z-score
plot.height = length(Mean.FC.filtered$z.score)^0.55
# create color coding according to the p.cutoff
Mean.FC.filtered$color = "black"
Mean.FC.filtered[(Mean.FC.filtered$p.value < p.cutoff)&(Mean.FC.filtered$z.score < 0),ncol(Mean.FC.filtered)] = "blue"
Mean.FC.filtered[(Mean.FC.filtered$p.value < p.cutoff)&(Mean.FC.filtered$z.score > 0),ncol(Mean.FC.filtered)] = "red"
tiff("KSEA Bar Plot.tiff",
width = 6*300,
height = 300*plot.height,
res = 300, # 300 pixels per inch
pointsize = 13)
par(mai=c(1,1,.4,.4))
barplot(as.numeric(Mean.FC.filtered$z.score), col=Mean.FC.filtered$color,
border = NA,
xpd=F, cex.names= .6, cex.axis = 0.8,
xlab = "Kinase z-score",
names.arg=Mean.FC.filtered$Kinase.Gene, horiz=T, las=1)
dev.off()
#----------------
# Create tables
write.csv(KSData.dataset.abbrev, file="Kinase-Substrate Links.csv", quote=F, row.names=F)
write.csv(Mean.FC[order(Mean.FC$Kinase.Gene),-ncol(Mean.FC)], file="KSEA Kinase Scores.csv", quote=F, row.names=F)
}
|
/R/KSEA.Complete.R
|
no_license
|
cran/KSEAapp
|
R
| false | false | 9,204 |
r
|
#' The Complete KSEA App Analysis
#'
#' Takes a formatted phoshoproteomics data input and performs KSEA calculations to infer relative kinase activities
#'
#' @param KSData the Kinase-Substrate dataset uploaded from the file
#' prefaced with "PSP&NetworKIN_"
#' available from github.com/casecpb/KSEA/
#' @param PX the experimental data file formatted exactly as described below;
#' must have 6 columns in the exact order: Protein, Gene, Peptide, Residue.Both, p, FC;
#' cannot have NA values, or else the entire peptide row is deleted;
#' Description of each column in PX:
#' \itemize{
#' \item{"Protein"}{ the Uniprot ID for the parent protein}
#' \item{"Gene"}{ the HUGO gene name for the parent protein}
#' \item{"Peptide"}{ the peptide sequence}
#' \item{"Residue.Both"}{ all phosphosites from that peptide, separated by semicolons if applicable;
#' must be formatted as the single amino acid abbrev. with the residue position (e.g. S102)}
#' \item{"p"}{ the p-value of that peptide (if none calculated, please write "NULL", cannot be NA)}
#' \item{"FC"}{ the fold change (not log-transformed); usually the control sample is the denominator}
#' }
#' @param NetworKIN a binary input of TRUE or FALSE, indicating whether or not to include NetworKIN predictions;
#' NetworKIN = TRUE means inclusion of NetworKIN predictions
#' @param NetworKIN.cutoff a numeric value between 1 and infinity setting the minimum NetworKIN score
#' (can be left out if NetworKIN = FALSE)
#' @param m.cutoff a numeric value between 0 and infinity indicating the min. # of substrates
#' a kinase must have to be included in the bar plot output
#' @param p.cutoff a numeric value between 0 and 1 indicating the p-value cutoff for
#' indicating significant kinases in the bar plot
#'
#' @return creates the following outputs that are deposited into your working directory:
#' a bar plot highlighting key kinase results, a .csv file of all KSEA kinase scores,
#' and a .csv file listing all kinase-substrate relationships used for the calculations
#'
#' @references
#' Casado et al. (2013) Sci Signal. 6(268):rs6
#'
#' Hornbeck et al. (2015) Nucleic Acids Res. 43:D512-20
#'
#' Horn et al. (2014) Nature Methods 11(6):603-4
#'
#' @examples
#' KSEA.Complete(KSData, PX, NetworKIN=TRUE, NetworKIN.cutoff=5, m.cutoff=5, p.cutoff=0.01)
#' KSEA.Complete(KSData, PX, NetworKIN=FALSE, m.cutoff=2, p.cutoff=0.05)
#'
#' @importFrom grDevices dev.off png tiff
#' @importFrom graphics barplot par
#' @importFrom stats aggregate complete.cases p.adjust pnorm sd
#' @importFrom utils write.csv
#'
#' @export
#----------------------------#
# IMPORTANT OVERVIEW OF PX INPUT REQUIREMENTS
# PX input requirements:
# must have exact 6 columns in the following order: Protein, Gene, Peptide, Residue.Both, p, FC
# cannot have NA values, or else the entire peptide row is deleted
# Description of each column in PX:
# - Protein = the Uniprot ID for the parent protein
# - Gene = the HUGO gene name for the parent protein
# - Peptide = the peptide sequence
# - Residue.Both = all phosphosites from that peptide, separated by semicolons if applicable; must be formatted as the single amino acid abbrev. with the residue position (e.g. S102)
# - p = the p-value of that peptide (if none calculated, please write "NULL", cannot be NA)
# - FC = the fold change (not log-transformed); usually recommended to have the control sample as the denominator
#----------------------------#
KSEA.Complete = function (KSData, PX, NetworKIN, NetworKIN.cutoff, m.cutoff, p.cutoff){
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Process the input data files
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#--------------
# Process the PX data file
# Check if each peptide row has multiple phosphorylated residues and create new dataframe with a single residue per row
if (length(grep(";", PX$Residue.Both))==0){
new = PX
colnames(new)[c(2,4)] = c("SUB_GENE", "SUB_MOD_RSD")
new$log2FC = log2(abs(as.numeric(as.character(new$FC)))) # the as.numeric(as.character()) fixes an issue with the FC values as factors
new = new[complete.cases(new$log2FC),]
}
else {
double = PX[grep(";",PX$Residue.Both),]
residues = as.character(double$Residue.Both)
residues = as.matrix(residues, ncol = 1)
split = strsplit(residues, split = ";")
x = sapply(split, length)
single = data.frame(Protein = rep(double$Protein, x),
Gene = rep(double$Gene, x),
Peptide = rep(double$Peptide, x),
Residue.Both = unlist(split),
p = rep(double$p, x),
FC = rep(double$FC, x))
# create new object of PX that has all residues in separate rows
new = PX[-grep(";", PX$Residue.Both),]
new = rbind(new, single)
colnames(new)[c(2,4)] = c("SUB_GENE", "SUB_MOD_RSD")
new$log2FC = log2(abs(as.numeric(as.character(new$FC)))) # the as.numeric(as.character()) fixes an issue with the FC values as factors
new = new[complete.cases(new$log2FC),]
}
#----------------
# Process KSData dataset based on user input (NetworKIN=T/F and NetworKIN cutoff score)
if (NetworKIN == TRUE){
KSData.filtered = KSData[grep("[a-z]", KSData$Source),]
KSData.filtered = KSData.filtered[(KSData.filtered$networkin_score >= NetworKIN.cutoff),]
}
else{
KSData.filtered = KSData[grep("PhosphoSitePlus", KSData$Source),]
}
#----------------
# Extract KSData.filtered annotations that are only found in new
KSData.dataset = merge(KSData.filtered, new)
KSData.dataset = KSData.dataset[order(KSData.dataset$GENE),]
KSData.dataset$Uniprot.noIsoform = sapply(KSData.dataset$KIN_ACC_ID, function(x) unlist(strsplit(as.character(x), split="-"))[1])
# last expression collapses isoforms of the same protein for easy processing
KSData.dataset.abbrev = KSData.dataset[,c(5,1,2,16:19,14)]
colnames(KSData.dataset.abbrev) = c("Kinase.Gene", "Substrate.Gene", "Substrate.Mod", "Peptide", "p", "FC", "log2FC", "Source")
KSData.dataset.abbrev = KSData.dataset.abbrev[order(KSData.dataset.abbrev$Kinase.Gene, KSData.dataset.abbrev$Substrate.Gene, KSData.dataset.abbrev$Substrate.Mod, KSData.dataset.abbrev$p),]
# take the mean of the log2FC amongst phosphosite duplicates
KSData.dataset.abbrev = aggregate(log2FC ~ Kinase.Gene+Substrate.Gene+Substrate.Mod+Source, data=KSData.dataset.abbrev, FUN=mean)
KSData.dataset.abbrev = KSData.dataset.abbrev[order(KSData.dataset.abbrev$Kinase.Gene),]
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Do analysis for KSEA
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
kinase.list = as.vector(KSData.dataset.abbrev$Kinase.Gene)
kinase.list = as.matrix(table(kinase.list))
Mean.FC = aggregate(log2FC ~ Kinase.Gene, data=KSData.dataset.abbrev, FUN=mean)
Mean.FC = Mean.FC[order(Mean.FC[,1]),]
Mean.FC$mS = Mean.FC[,2]
Mean.FC$Enrichment = Mean.FC$mS/abs(mean(new$log2FC, na.rm=T))
Mean.FC$m = kinase.list
Mean.FC$z.score = ((Mean.FC$mS- mean(new$log2FC, na.rm=T))*sqrt(Mean.FC$m))/sd(new$log2FC, na.rm=T)
Mean.FC$p.value = pnorm(-abs(Mean.FC$z.score)) # 1-tailed p-value
Mean.FC$FDR = p.adjust(Mean.FC$p.value, method="fdr")
Mean.FC.filtered = Mean.FC[(Mean.FC$m >= m.cutoff),-2] # filter dataset by m.cutoff
Mean.FC.filtered = Mean.FC.filtered[order(Mean.FC.filtered$z.score),]
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# Create Outputs
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
#----------------
# Create bar plot for Kinase z-score
plot.height = length(Mean.FC.filtered$z.score)^0.55
# create color coding according to the p.cutoff
Mean.FC.filtered$color = "black"
Mean.FC.filtered[(Mean.FC.filtered$p.value < p.cutoff)&(Mean.FC.filtered$z.score < 0),ncol(Mean.FC.filtered)] = "blue"
Mean.FC.filtered[(Mean.FC.filtered$p.value < p.cutoff)&(Mean.FC.filtered$z.score > 0),ncol(Mean.FC.filtered)] = "red"
tiff("KSEA Bar Plot.tiff",
width = 6*300,
height = 300*plot.height,
res = 300, # 300 pixels per inch
pointsize = 13)
par(mai=c(1,1,.4,.4))
barplot(as.numeric(Mean.FC.filtered$z.score), col=Mean.FC.filtered$color,
border = NA,
xpd=F, cex.names= .6, cex.axis = 0.8,
xlab = "Kinase z-score",
names.arg=Mean.FC.filtered$Kinase.Gene, horiz=T, las=1)
dev.off()
#----------------
# Create tables
write.csv(KSData.dataset.abbrev, file="Kinase-Substrate Links.csv", quote=F, row.names=F)
write.csv(Mean.FC[order(Mean.FC$Kinase.Gene),-ncol(Mean.FC)], file="KSEA Kinase Scores.csv", quote=F, row.names=F)
}
|
#' @importFrom data.table fread
#' @importFrom dplyr select
NULL
#' Efficiently loads a EDGE-produced Kraken taxonomic assignment from a file.
#' An assumption has been made -- since Kraken/EDGE tables are generated in an automated fashion,
#' they should be properly formatted -- thus the code doesn't check for any inconsistencies except
#' for the very file existence. Note however, the unassigned to taxa entries are removed.
#' This implementation fully relies on the read.table function from data.table package
#' gaining performance over traditional R techniques.
#'
#' @param filepath A path to EDGE-generated tab-delimeted Kraken taxonomy assignment file.
#'
#' @return a data frame containing four columns: TAXA, LEVEL, COUNT, and ABUNDANCE, representing
#' taxonomically anchored sequences from the sample.
#'
#' @export
load_kraken_assignment <- function(filepath) {
TAXA <- LEVEL <- COUNT <- ABUNDANCE <- NULL
# check for the file existence
#
if ( !file.exists(filepath) ) {
stop(paste("Specified file \"", filepath, "\" doesn't exist!"))
}
# read the file
#
df <- data.table::fread(filepath, sep = "\t", header = T)
# remove empty (non-assigned) lines
#
df <- df[df$LEVEL != "", ]
# add a normilized abundance
#
max_rollup <- df[df$LEVEL == "root", ]$ROLLUP
df$ABUNDANCE <- df$ROLLUP / max_rollup * 100
# rename the abundance column
#
names(df) <- sub("ROLLUP", "COUNT", names(df))
# return results, "as a data frame" to avoid any confusion...
#
as.data.frame( dplyr::select(df, LEVEL, TAXA, COUNT, ABUNDANCE))
}
|
/R/load_kraken_assignment.R
|
no_license
|
mshakya/MetaComp
|
R
| false | false | 1,590 |
r
|
#' @importFrom data.table fread
#' @importFrom dplyr select
NULL
#' Efficiently loads a EDGE-produced Kraken taxonomic assignment from a file.
#' An assumption has been made -- since Kraken/EDGE tables are generated in an automated fashion,
#' they should be properly formatted -- thus the code doesn't check for any inconsistencies except
#' for the very file existence. Note however, the unassigned to taxa entries are removed.
#' This implementation fully relies on the read.table function from data.table package
#' gaining performance over traditional R techniques.
#'
#' @param filepath A path to EDGE-generated tab-delimeted Kraken taxonomy assignment file.
#'
#' @return a data frame containing four columns: TAXA, LEVEL, COUNT, and ABUNDANCE, representing
#' taxonomically anchored sequences from the sample.
#'
#' @export
load_kraken_assignment <- function(filepath) {
TAXA <- LEVEL <- COUNT <- ABUNDANCE <- NULL
# check for the file existence
#
if ( !file.exists(filepath) ) {
stop(paste("Specified file \"", filepath, "\" doesn't exist!"))
}
# read the file
#
df <- data.table::fread(filepath, sep = "\t", header = T)
# remove empty (non-assigned) lines
#
df <- df[df$LEVEL != "", ]
# add a normilized abundance
#
max_rollup <- df[df$LEVEL == "root", ]$ROLLUP
df$ABUNDANCE <- df$ROLLUP / max_rollup * 100
# rename the abundance column
#
names(df) <- sub("ROLLUP", "COUNT", names(df))
# return results, "as a data frame" to avoid any confusion...
#
as.data.frame( dplyr::select(df, LEVEL, TAXA, COUNT, ABUNDANCE))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format.POSIXct.R
\name{format.POSIXct}
\alias{format.POSIXct}
\title{fun_name}
\usage{
format.POSIXct(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Gruba}
\keyword{Przy}
\keyword{boski}
\keyword{chillout}
\keyword{piwerku}
\keyword{rozkmina}
\keyword{sie}
\keyword{toczy}
|
/man/format.POSIXct.Rd
|
no_license
|
granatb/RapeR
|
R
| false | true | 404 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format.POSIXct.R
\name{format.POSIXct}
\alias{format.POSIXct}
\title{fun_name}
\usage{
format.POSIXct(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Gruba}
\keyword{Przy}
\keyword{boski}
\keyword{chillout}
\keyword{piwerku}
\keyword{rozkmina}
\keyword{sie}
\keyword{toczy}
|
## vim:textwidth=128:expandtab:shiftwidth=4:softtabstop=4
setMethod(f="initialize",
signature="gps",
definition=function(.Object, longitude, latitude, filename="") {
if (!missing(longitude)) .Object@data$longitude <- as.numeric(longitude)
if (!missing(latitude)) .Object@data$latitude <- as.numeric(latitude)
.Object@metadata$filename <- filename
.Object@processingLog$time <- as.POSIXct(Sys.time())
.Object@processingLog$value <- "create 'gps' object"
return(.Object)
})
setMethod(f="summary",
signature="gps",
definition=function(object, ...) {
threes <- matrix(nrow=2, ncol=3)
threes[1,] <- threenum(object@data$latitude)
threes[2,] <- threenum(object@data$longitude)
colnames(threes) <- c("Min.", "Mean", "Max.")
rownames(threes) <- c("Latitude", "Longitude")
cat("GPX Summary\n-----------------\n\n")
cat("* Number of points:", length(object@data$latitude), ", of which",
sum(is.na(object@data$latitude)), "are NA.\n")
cat("\n",...)
cat("* Statistics of subsample::\n\n", ...)
print(threes)
cat("\n")
processingLogShow(object)
invisible(NULL)
})
setMethod(f="[[",
signature(x="gps", i="ANY", j="ANY"),
definition=function(x, i, j, drop) {
## I use 'as' because I could not figure out callNextMethod() etc
#as(x, "oce")[[i, j, drop]]
as(x, "oce")[[i]]
})
setMethod(f="plot",
signature=signature("gps"),
definition=function (x,
xlab="", ylab="",
asp,
clongitude, clatitude, span,
projection, parameters=NULL, orientation=NULL,
## center, span,
expand=1,
mgp=getOption("oceMgp"),
mar=c(mgp[1]+1,mgp[1]+1,1,1),
bg,
axes=TRUE, cex.axis=par('cex.axis'),
add=FALSE, inset=FALSE,
geographical=0,
debug=getOption("oceDebug"),
...)
{
oceDebug(debug, "plot.gps(...",
", clongitude=", if(missing(clongitude)) "(missing)" else clongitude,
", clatitude=", if(missing(clatitude)) "(missing)" else clatitude,
", span=", if(missing(span)) "(missing)" else span,
", geographical=", geographical,
", cex.axis=", cex.axis,
", inset=", inset,
", ...) {\n", sep="", unindent=1)
if (!missing(projection)) {
if (missing(span))
span <- 1000
if (missing(clongitude))
longitudelim <- c(-180, 180)
else
longitudelim <- clongitude + c(-1, 1) * span / 111
if (missing(clatitude))
latitudelim <- c(-90, 90)
else
latitudelim <- clatitude + c(-1, 1) * span / 111
return(mapPlot(x[['longitude']], x[['latitude']], longitudelim, latitudelim,
mgp=mgp, mar=mar,
bg="white", type='l', axes=TRUE,
projection=projection, parameters=parameters, orientation=orientation,
debug=debug, ...))
}
geographical <- round(geographical)
if (geographical < 0 || geographical > 2)
stop("argument geographical must be 0, 1, or 2")
if (is.list(x) && "latitude" %in% names(x)) {
if (!("longitude" %in% names(x)))
stop("list must contain item named 'longitude'")
x <- as.gps(longitude=x$longitude, latitude=x$latitude)
} else {
if (!inherits(x, "gps"))
stop("method is only for gps objects, or lists that contain 'latitude' and 'longitude'")
}
longitude <- x[["longitude"]]
latitude <- x[["latitude"]]
dots <- list(...)
dotsNames <- names(dots)
gave.center <- !missing(clongitude) && !missing(clatitude)
if ("center" %in% dotsNames)
stop("use 'clongitude' and 'clatitude' instead of 'center'")
if ("xlim" %in% dotsNames) stop("cannot supply 'xlim'; use 'clongitude' and 'span' instead")
if ("ylim" %in% dotsNames) stop("cannot supply 'ylim'; use 'clatitude' and 'span' instead")
if (!inset)
par(mar=mar)
par(mgp=mgp)
if (add) {
lines(longitude, latitude, ...)
} else {
gaveSpan <- !missing(span)
if (!missing(clatitude) && !missing(clongitude)) {
if (!missing(asp))
warning("argument 'asp' being ignored, because argument 'clatitude' and 'clongitude' were given")
asp <- 1 / cos(clatitude * atan2(1, 1) / 45) # ignore any provided asp, because lat from center over-rides it
xr <- clongitude + span * c(-1/2, 1/2) / 111.11 / asp
yr <- clatitude + span * c(-1/2, 1/2) / 111.11
xr0 <- xr
yr0 <- yr
oceDebug(debug, "xr=", xr," yr=", yr, " asp=", asp, "\n")
} else {
xr0 <- range(longitude, na.rm=TRUE)
yr0 <- range(latitude, na.rm=TRUE)
oceDebug(debug, "xr0=", xr0, " yr0=", yr0, "\n")
if (missing(asp)) {
if ("ylim" %in% dotsNames)
asp <- 1 / cos(mean(range(dots$ylim, na.rm=TRUE)) * atan2(1, 1) / 45) # dy/dx
else
asp <- 1 / cos(mean(yr0) * atan2(1, 1) / 45) # dy/dx
}
## Expand
if (missing(span)) {
if (expand >= 0 && max(abs(xr0)) < 100 && max(abs(yr0) < 70)) { # don't expand if full map
xr <- mean(xr0) + expand * diff(xr0) * c(-1/2, 1/2)
yr <- mean(yr0) + expand * diff(yr0) * c(-1/2, 1/2)
} else {
xr <- xr0
yr <- yr0
}
} else {
xr <- mean(xr0) + span * c(-1/2, 1/2) / 111.11 / asp
yr <- mean(yr0)+ span * c(-1/2, 1/2) / 111.11
}
oceDebug(debug, "xr=", xr, " yr=", yr, "\n")
}
## Trim lat or lon, to avoid empty margin space
asp.page <- par("fin")[2] / par("fin")[1] # dy / dx
oceDebug(debug, "par('pin')=", par('pin'), "\n")
oceDebug(debug, "par('fin')=", par('fin'), "\n")
oceDebug(debug, "asp=", asp, "\n")
oceDebug(debug, "asp.page=", asp.page, "\n")
if (!is.finite(asp))
asp <- 1 / cos(clatitude * atan2(1, 1) / 45)
if (asp < asp.page) {
oceDebug(debug, "type 1 (will narrow x range)\n")
d <- asp.page / asp * diff(xr)
oceDebug(debug, " xr original:", xr, "\n")
xr <- mean(xr) + d * c(-1/2, 1/2)
oceDebug(debug, " xr narrowed:", xr, "\n")
} else {
oceDebug(debug, "type 2 (will narrow y range)\n")
d <- asp.page / asp * diff(yr)
oceDebug(debug, " yr original:", yr, "\n")
yr <- mean(yr) + d * c(-1/2, 1/2)
oceDebug(debug, " yr narrowed:", yr, "\n")
}
## Avoid looking beyond the poles, or the dateline
if (xr[1] < (-180))
xr[1] <- (-180)
if (xr[2] > 180)
xr[2] <- 180
if (yr[1] < (-90))
yr[1] <- (-90)
if (yr[2] > 90)
yr[2] <- 90
oceDebug(debug, "after range trimming, xr=", xr, " yr=", yr, "\n")
## Draw underlay, if desired
plot(xr, yr, asp=asp, xlab=xlab, ylab=ylab, type="n", xaxs="i", yaxs="i", axes=FALSE, ...)
if (!missing(bg)) {
plot.window(xr, yr, asp=asp, xlab=xlab, ylab=ylab, xaxs="i", yaxs="i", log="", ...)
usr <- par("usr")
oceDebug(debug, "drawing background; usr=", par('usr'), "bg=", bg, "\n")
rect(usr[1], usr[3], usr[2], usr[4], col=bg)
par(new=TRUE)
}
## Ranges
##plot(xr, yr, asp=asp, xlab=xlab, ylab=ylab, type="n", xaxs="i", yaxs="i", axes=FALSE, ...)
usrTrimmed <- par('usr')
## Construct axes "manually" because axis() does not know the physical range
if (axes) {
prettyLat <- function(yr, ...)
{
res <- pretty(yr, ...)
if (diff(yr) > 100)
res <- seq(-90, 90, 45)
res
}
prettyLon <- function(xr, ...)
{
res <- pretty(xr, ...)
if (diff(xr) > 100)
res <- seq(-180, 180, 45)
res
}
oceDebug(debug, "xr:", xr, ", yr:", yr, ", xr0:", xr0, ", yr0:", yr0, "\n")
##xr.pretty <- prettyLon(xr, n=if (geographical)3 else 5, high.u.bias=20)
xr.pretty <- prettyLon(par('usr')[1:2], n=if (geographical)3 else 5, high.u.bias=20)
##yr.pretty <- prettyLat(yr, n=if (geographical)3 else 5, high.u.bias=20)
yr.pretty <- prettyLat(par('usr')[3:4], n=if (geographical)3 else 5, high.u.bias=20)
oceDebug(debug, "xr.pretty=", xr.pretty, "\n")
oceDebug(debug, "yr.pretty=", yr.pretty, "\n")
oceDebug(debug, "usrTrimmed", usrTrimmed, "(original)\n")
usrTrimmed[1] <- max(-180, usrTrimmed[1])
usrTrimmed[2] <- min( 180, usrTrimmed[2])
usrTrimmed[3] <- max( -90, usrTrimmed[3])
usrTrimmed[4] <- min( 90, usrTrimmed[4])
oceDebug(debug, "usrTrimmed", usrTrimmed, "\n")
oceDebug(debug, "par('usr')", par('usr'), "\n")
xlabels <- format(xr.pretty)
ylabels <- format(yr.pretty)
if (geographical >= 1) {
xlabels <- sub("-", "", xlabels)
ylabels <- sub("-", "", ylabels)
}
if (geographical == 2) {
xr.pretty <- prettyPosition(xr.pretty, debug=debug-1)
yr.pretty <- prettyPosition(yr.pretty, debug=debug-1)
xlabels <- formatPosition(xr.pretty, type='expression')
ylabels <- formatPosition(yr.pretty, type='expression')
}
axis(1, at=xr.pretty, labels=xlabels, pos=usrTrimmed[3], cex.axis=cex.axis)
oceDebug(debug, "putting bottom x axis at", usrTrimmed[3], "with labels:", xlabels, "\n")
axis(2, at=yr.pretty, labels=ylabels, pos=usrTrimmed[1], cex.axis=cex.axis, cex=cex.axis)
oceDebug(debug, "putting left y axis at", usrTrimmed[1], "\n")
axis(3, at=xr.pretty, labels=rep("", length.out=length(xr.pretty)), pos=usrTrimmed[4], cex.axis=cex.axis)
##axis(3, at=xr.pretty, pos=usrTrimmed[4], labels=FALSE)
##oceDebug(debug, "putting top x axis at", usrTrimmed[4], "\n")
axis(4, at=yr.pretty, pos=usrTrimmed[2], labels=FALSE, cex.axis=cex.axis)
oceDebug(debug, "putting right y axis at", usrTrimmed[2], "\n")
}
yaxp <- par("yaxp")
oceDebug(debug, "par('yaxp')", par("yaxp"), "\n")
oceDebug(debug, "par('pin')", par("pin"), "\n")
if (yaxp[1] < -90 | yaxp[2] > 90) {
oceDebug(debug, "trimming latitude; pin=", par("pin"), "FIXME: not working\n")
oceDebug(debug, "trimming latitdue; yaxp=", yaxp, "FIXME: not working\n")
yscale <- 180 / (yaxp[2] - yaxp[1])
## FIXME: should allow type as an arg
points(x[["longitude"]], x[["latitude"]], ...)
} else {
points(longitude, latitude, ...)
if (axes)
rect(usrTrimmed[1], usrTrimmed[3], usrTrimmed[2], usrTrimmed[4])
}
}
##box()
oceDebug(debug, "par('usr')=", par('usr'), "\n")
oceDebug(debug, "} # plot.gps()\n", unindent=1)
invisible()
})
as.gps <- function(longitude, latitude, filename="")
{
names <- names(longitude)
if ("longitude" %in% names && "latitude" %in% names) {
latitude <- longitude[["latitude"]]
longitude <- longitude[["longitude"]]
}
rval <- new('gps', longitude=longitude, latitude=latitude, filename=filename)
}
read.gps <- function(file, type=NULL, debug=getOption("oceDebug"), processingLog)
{
oceDebug(debug, "read.gps(...) {\n", sep="", unindent=1)
filename <- NULL
if (is.character(file)) {
filename <- fullFilename(file)
file <- file(file, "r")
on.exit(close(file))
}
if (!inherits(file, "connection"))
stop("argument `file' must be a character string or connection")
if (!isOpen(file)) {
open(file, "r")
on.exit(close(file))
}
if (is.null(type)) {
tokens <- scan(file, "character", n=5)
found <- grep("gpx", tokens)
if (length(found) > 0) {
type <- "gpx"
} else {
warning("cannot determine file type; assuming 'gpx'")
}
}
type <- match.arg(type, c("gpx"))
oceDebug(debug, "file type:", type, "\n")
lines <- readLines(file)
look <- grep("lat=", lines)
latlon <- lines[look]
latlonCleaned <- gsub("[a-zA-Z<>=\"/]*", "", latlon)
latlon <- read.table(text=latlonCleaned)
rval <- new("gps", longitude=latlon[,2], latitude=latlon[,1], file=filename)
oceDebug(debug, "} # read.gps()\n", sep="", unindent=1)
rval
}
|
/R/gps.R
|
no_license
|
marie-geissler/oce
|
R
| false | false | 15,654 |
r
|
## vim:textwidth=128:expandtab:shiftwidth=4:softtabstop=4
setMethod(f="initialize",
signature="gps",
definition=function(.Object, longitude, latitude, filename="") {
if (!missing(longitude)) .Object@data$longitude <- as.numeric(longitude)
if (!missing(latitude)) .Object@data$latitude <- as.numeric(latitude)
.Object@metadata$filename <- filename
.Object@processingLog$time <- as.POSIXct(Sys.time())
.Object@processingLog$value <- "create 'gps' object"
return(.Object)
})
setMethod(f="summary",
signature="gps",
definition=function(object, ...) {
threes <- matrix(nrow=2, ncol=3)
threes[1,] <- threenum(object@data$latitude)
threes[2,] <- threenum(object@data$longitude)
colnames(threes) <- c("Min.", "Mean", "Max.")
rownames(threes) <- c("Latitude", "Longitude")
cat("GPX Summary\n-----------------\n\n")
cat("* Number of points:", length(object@data$latitude), ", of which",
sum(is.na(object@data$latitude)), "are NA.\n")
cat("\n",...)
cat("* Statistics of subsample::\n\n", ...)
print(threes)
cat("\n")
processingLogShow(object)
invisible(NULL)
})
setMethod(f="[[",
signature(x="gps", i="ANY", j="ANY"),
definition=function(x, i, j, drop) {
## I use 'as' because I could not figure out callNextMethod() etc
#as(x, "oce")[[i, j, drop]]
as(x, "oce")[[i]]
})
setMethod(f="plot",
signature=signature("gps"),
definition=function (x,
xlab="", ylab="",
asp,
clongitude, clatitude, span,
projection, parameters=NULL, orientation=NULL,
## center, span,
expand=1,
mgp=getOption("oceMgp"),
mar=c(mgp[1]+1,mgp[1]+1,1,1),
bg,
axes=TRUE, cex.axis=par('cex.axis'),
add=FALSE, inset=FALSE,
geographical=0,
debug=getOption("oceDebug"),
...)
{
oceDebug(debug, "plot.gps(...",
", clongitude=", if(missing(clongitude)) "(missing)" else clongitude,
", clatitude=", if(missing(clatitude)) "(missing)" else clatitude,
", span=", if(missing(span)) "(missing)" else span,
", geographical=", geographical,
", cex.axis=", cex.axis,
", inset=", inset,
", ...) {\n", sep="", unindent=1)
if (!missing(projection)) {
if (missing(span))
span <- 1000
if (missing(clongitude))
longitudelim <- c(-180, 180)
else
longitudelim <- clongitude + c(-1, 1) * span / 111
if (missing(clatitude))
latitudelim <- c(-90, 90)
else
latitudelim <- clatitude + c(-1, 1) * span / 111
return(mapPlot(x[['longitude']], x[['latitude']], longitudelim, latitudelim,
mgp=mgp, mar=mar,
bg="white", type='l', axes=TRUE,
projection=projection, parameters=parameters, orientation=orientation,
debug=debug, ...))
}
geographical <- round(geographical)
if (geographical < 0 || geographical > 2)
stop("argument geographical must be 0, 1, or 2")
if (is.list(x) && "latitude" %in% names(x)) {
if (!("longitude" %in% names(x)))
stop("list must contain item named 'longitude'")
x <- as.gps(longitude=x$longitude, latitude=x$latitude)
} else {
if (!inherits(x, "gps"))
stop("method is only for gps objects, or lists that contain 'latitude' and 'longitude'")
}
longitude <- x[["longitude"]]
latitude <- x[["latitude"]]
dots <- list(...)
dotsNames <- names(dots)
gave.center <- !missing(clongitude) && !missing(clatitude)
if ("center" %in% dotsNames)
stop("use 'clongitude' and 'clatitude' instead of 'center'")
if ("xlim" %in% dotsNames) stop("cannot supply 'xlim'; use 'clongitude' and 'span' instead")
if ("ylim" %in% dotsNames) stop("cannot supply 'ylim'; use 'clatitude' and 'span' instead")
if (!inset)
par(mar=mar)
par(mgp=mgp)
if (add) {
lines(longitude, latitude, ...)
} else {
gaveSpan <- !missing(span)
if (!missing(clatitude) && !missing(clongitude)) {
if (!missing(asp))
warning("argument 'asp' being ignored, because argument 'clatitude' and 'clongitude' were given")
asp <- 1 / cos(clatitude * atan2(1, 1) / 45) # ignore any provided asp, because lat from center over-rides it
xr <- clongitude + span * c(-1/2, 1/2) / 111.11 / asp
yr <- clatitude + span * c(-1/2, 1/2) / 111.11
xr0 <- xr
yr0 <- yr
oceDebug(debug, "xr=", xr," yr=", yr, " asp=", asp, "\n")
} else {
xr0 <- range(longitude, na.rm=TRUE)
yr0 <- range(latitude, na.rm=TRUE)
oceDebug(debug, "xr0=", xr0, " yr0=", yr0, "\n")
if (missing(asp)) {
if ("ylim" %in% dotsNames)
asp <- 1 / cos(mean(range(dots$ylim, na.rm=TRUE)) * atan2(1, 1) / 45) # dy/dx
else
asp <- 1 / cos(mean(yr0) * atan2(1, 1) / 45) # dy/dx
}
## Expand
if (missing(span)) {
if (expand >= 0 && max(abs(xr0)) < 100 && max(abs(yr0) < 70)) { # don't expand if full map
xr <- mean(xr0) + expand * diff(xr0) * c(-1/2, 1/2)
yr <- mean(yr0) + expand * diff(yr0) * c(-1/2, 1/2)
} else {
xr <- xr0
yr <- yr0
}
} else {
xr <- mean(xr0) + span * c(-1/2, 1/2) / 111.11 / asp
yr <- mean(yr0)+ span * c(-1/2, 1/2) / 111.11
}
oceDebug(debug, "xr=", xr, " yr=", yr, "\n")
}
## Trim lat or lon, to avoid empty margin space
asp.page <- par("fin")[2] / par("fin")[1] # dy / dx
oceDebug(debug, "par('pin')=", par('pin'), "\n")
oceDebug(debug, "par('fin')=", par('fin'), "\n")
oceDebug(debug, "asp=", asp, "\n")
oceDebug(debug, "asp.page=", asp.page, "\n")
if (!is.finite(asp))
asp <- 1 / cos(clatitude * atan2(1, 1) / 45)
if (asp < asp.page) {
oceDebug(debug, "type 1 (will narrow x range)\n")
d <- asp.page / asp * diff(xr)
oceDebug(debug, " xr original:", xr, "\n")
xr <- mean(xr) + d * c(-1/2, 1/2)
oceDebug(debug, " xr narrowed:", xr, "\n")
} else {
oceDebug(debug, "type 2 (will narrow y range)\n")
d <- asp.page / asp * diff(yr)
oceDebug(debug, " yr original:", yr, "\n")
yr <- mean(yr) + d * c(-1/2, 1/2)
oceDebug(debug, " yr narrowed:", yr, "\n")
}
## Avoid looking beyond the poles, or the dateline
if (xr[1] < (-180))
xr[1] <- (-180)
if (xr[2] > 180)
xr[2] <- 180
if (yr[1] < (-90))
yr[1] <- (-90)
if (yr[2] > 90)
yr[2] <- 90
oceDebug(debug, "after range trimming, xr=", xr, " yr=", yr, "\n")
## Draw underlay, if desired
plot(xr, yr, asp=asp, xlab=xlab, ylab=ylab, type="n", xaxs="i", yaxs="i", axes=FALSE, ...)
if (!missing(bg)) {
plot.window(xr, yr, asp=asp, xlab=xlab, ylab=ylab, xaxs="i", yaxs="i", log="", ...)
usr <- par("usr")
oceDebug(debug, "drawing background; usr=", par('usr'), "bg=", bg, "\n")
rect(usr[1], usr[3], usr[2], usr[4], col=bg)
par(new=TRUE)
}
## Ranges
##plot(xr, yr, asp=asp, xlab=xlab, ylab=ylab, type="n", xaxs="i", yaxs="i", axes=FALSE, ...)
usrTrimmed <- par('usr')
## Construct axes "manually" because axis() does not know the physical range
if (axes) {
prettyLat <- function(yr, ...)
{
res <- pretty(yr, ...)
if (diff(yr) > 100)
res <- seq(-90, 90, 45)
res
}
prettyLon <- function(xr, ...)
{
res <- pretty(xr, ...)
if (diff(xr) > 100)
res <- seq(-180, 180, 45)
res
}
oceDebug(debug, "xr:", xr, ", yr:", yr, ", xr0:", xr0, ", yr0:", yr0, "\n")
##xr.pretty <- prettyLon(xr, n=if (geographical)3 else 5, high.u.bias=20)
xr.pretty <- prettyLon(par('usr')[1:2], n=if (geographical)3 else 5, high.u.bias=20)
##yr.pretty <- prettyLat(yr, n=if (geographical)3 else 5, high.u.bias=20)
yr.pretty <- prettyLat(par('usr')[3:4], n=if (geographical)3 else 5, high.u.bias=20)
oceDebug(debug, "xr.pretty=", xr.pretty, "\n")
oceDebug(debug, "yr.pretty=", yr.pretty, "\n")
oceDebug(debug, "usrTrimmed", usrTrimmed, "(original)\n")
usrTrimmed[1] <- max(-180, usrTrimmed[1])
usrTrimmed[2] <- min( 180, usrTrimmed[2])
usrTrimmed[3] <- max( -90, usrTrimmed[3])
usrTrimmed[4] <- min( 90, usrTrimmed[4])
oceDebug(debug, "usrTrimmed", usrTrimmed, "\n")
oceDebug(debug, "par('usr')", par('usr'), "\n")
xlabels <- format(xr.pretty)
ylabels <- format(yr.pretty)
if (geographical >= 1) {
xlabels <- sub("-", "", xlabels)
ylabels <- sub("-", "", ylabels)
}
if (geographical == 2) {
xr.pretty <- prettyPosition(xr.pretty, debug=debug-1)
yr.pretty <- prettyPosition(yr.pretty, debug=debug-1)
xlabels <- formatPosition(xr.pretty, type='expression')
ylabels <- formatPosition(yr.pretty, type='expression')
}
axis(1, at=xr.pretty, labels=xlabels, pos=usrTrimmed[3], cex.axis=cex.axis)
oceDebug(debug, "putting bottom x axis at", usrTrimmed[3], "with labels:", xlabels, "\n")
axis(2, at=yr.pretty, labels=ylabels, pos=usrTrimmed[1], cex.axis=cex.axis, cex=cex.axis)
oceDebug(debug, "putting left y axis at", usrTrimmed[1], "\n")
axis(3, at=xr.pretty, labels=rep("", length.out=length(xr.pretty)), pos=usrTrimmed[4], cex.axis=cex.axis)
##axis(3, at=xr.pretty, pos=usrTrimmed[4], labels=FALSE)
##oceDebug(debug, "putting top x axis at", usrTrimmed[4], "\n")
axis(4, at=yr.pretty, pos=usrTrimmed[2], labels=FALSE, cex.axis=cex.axis)
oceDebug(debug, "putting right y axis at", usrTrimmed[2], "\n")
}
yaxp <- par("yaxp")
oceDebug(debug, "par('yaxp')", par("yaxp"), "\n")
oceDebug(debug, "par('pin')", par("pin"), "\n")
if (yaxp[1] < -90 | yaxp[2] > 90) {
oceDebug(debug, "trimming latitude; pin=", par("pin"), "FIXME: not working\n")
oceDebug(debug, "trimming latitdue; yaxp=", yaxp, "FIXME: not working\n")
yscale <- 180 / (yaxp[2] - yaxp[1])
## FIXME: should allow type as an arg
points(x[["longitude"]], x[["latitude"]], ...)
} else {
points(longitude, latitude, ...)
if (axes)
rect(usrTrimmed[1], usrTrimmed[3], usrTrimmed[2], usrTrimmed[4])
}
}
##box()
oceDebug(debug, "par('usr')=", par('usr'), "\n")
oceDebug(debug, "} # plot.gps()\n", unindent=1)
invisible()
})
as.gps <- function(longitude, latitude, filename="")
{
names <- names(longitude)
if ("longitude" %in% names && "latitude" %in% names) {
latitude <- longitude[["latitude"]]
longitude <- longitude[["longitude"]]
}
rval <- new('gps', longitude=longitude, latitude=latitude, filename=filename)
}
read.gps <- function(file, type=NULL, debug=getOption("oceDebug"), processingLog)
{
oceDebug(debug, "read.gps(...) {\n", sep="", unindent=1)
filename <- NULL
if (is.character(file)) {
filename <- fullFilename(file)
file <- file(file, "r")
on.exit(close(file))
}
if (!inherits(file, "connection"))
stop("argument `file' must be a character string or connection")
if (!isOpen(file)) {
open(file, "r")
on.exit(close(file))
}
if (is.null(type)) {
tokens <- scan(file, "character", n=5)
found <- grep("gpx", tokens)
if (length(found) > 0) {
type <- "gpx"
} else {
warning("cannot determine file type; assuming 'gpx'")
}
}
type <- match.arg(type, c("gpx"))
oceDebug(debug, "file type:", type, "\n")
lines <- readLines(file)
look <- grep("lat=", lines)
latlon <- lines[look]
latlonCleaned <- gsub("[a-zA-Z<>=\"/]*", "", latlon)
latlon <- read.table(text=latlonCleaned)
rval <- new("gps", longitude=latlon[,2], latitude=latlon[,1], file=filename)
oceDebug(debug, "} # read.gps()\n", sep="", unindent=1)
rval
}
|
#' Title
#'
#' @return
#' @export
#'
#' @examples
glm_MedAssist <- function() {
the_glm <- glm(Completed ~ MedAssist, family = binomial, data = gardasil) %>% summary() %>% coef() %>% as.data.frame()
or_values <- list("1.0")
for (i in seq_along(the_glm$Estimate)[-1]) {
or_values <- c(or_values, paste0(round(exp(the_glm$Estimate[i]), 2), " (", round(exp(the_glm$Estimate[i]) - the_glm$`Std. Error`[i] * qnorm(0.975), 2), "-", round(exp(the_glm$Estimate[i]) + the_glm$`Std. Error`[i] * qnorm(0.975), 2), ")"))
}
p_values <- list("")
for (i in seq_along(the_glm$`Pr(>|z|)`)[-1]) {
p_values <- c(p_values, round(the_glm$`Pr(>|z|)`[i], 2))
}
column_one <- group_by(gardasil, MedAssist) %>% summarise("n" = length(MedAssist))
column_two <- filter(gardasil, Completed == "Completer") %>% group_by(MedAssist) %>% summarise("Completed 3 Vaccinations in 12 Mo (%)" = length(MedAssist))
column_three <- group_by(gardasil, MedAssist) %>% summarise("OR (95% CI)" = "")
column_four <- group_by(gardasil, MedAssist) %>% summarise("P" = "")
all_columns <- list(column_one, column_two, column_three, column_four)
glm_table <- Reduce(full_join, all_columns) %>% rename(., Group = MedAssist) %>% mutate("Completed 3 Vaccinations in 12 Mo (%)" = paste0(`Completed 3 Vaccinations in 12 Mo (%)`, " (", round(`Completed 3 Vaccinations in 12 Mo (%)` / n * 100, 1), ")"))
glm_table[, 4] <- unlist(or_values)
glm_table[, 5] <- unlist(p_values)
glm_table
}
|
/R/glm_MedAssist.R
|
permissive
|
schifferl/assignment1
|
R
| false | false | 1,474 |
r
|
#' Title
#'
#' @return
#' @export
#'
#' @examples
glm_MedAssist <- function() {
the_glm <- glm(Completed ~ MedAssist, family = binomial, data = gardasil) %>% summary() %>% coef() %>% as.data.frame()
or_values <- list("1.0")
for (i in seq_along(the_glm$Estimate)[-1]) {
or_values <- c(or_values, paste0(round(exp(the_glm$Estimate[i]), 2), " (", round(exp(the_glm$Estimate[i]) - the_glm$`Std. Error`[i] * qnorm(0.975), 2), "-", round(exp(the_glm$Estimate[i]) + the_glm$`Std. Error`[i] * qnorm(0.975), 2), ")"))
}
p_values <- list("")
for (i in seq_along(the_glm$`Pr(>|z|)`)[-1]) {
p_values <- c(p_values, round(the_glm$`Pr(>|z|)`[i], 2))
}
column_one <- group_by(gardasil, MedAssist) %>% summarise("n" = length(MedAssist))
column_two <- filter(gardasil, Completed == "Completer") %>% group_by(MedAssist) %>% summarise("Completed 3 Vaccinations in 12 Mo (%)" = length(MedAssist))
column_three <- group_by(gardasil, MedAssist) %>% summarise("OR (95% CI)" = "")
column_four <- group_by(gardasil, MedAssist) %>% summarise("P" = "")
all_columns <- list(column_one, column_two, column_three, column_four)
glm_table <- Reduce(full_join, all_columns) %>% rename(., Group = MedAssist) %>% mutate("Completed 3 Vaccinations in 12 Mo (%)" = paste0(`Completed 3 Vaccinations in 12 Mo (%)`, " (", round(`Completed 3 Vaccinations in 12 Mo (%)` / n * 100, 1), ")"))
glm_table[, 4] <- unlist(or_values)
glm_table[, 5] <- unlist(p_values)
glm_table
}
|
.valid.ChromMaintainers <- function(x){
if(class(x@maintainers) != "HLDAResult")
return("the maintainers slot should an HLDAResult object");
if(!is.matrix(x@topEdges))
return("topEdges should be a matrix")
if(!is.matrix(x@topNodes))
return("topNodes should be a matrix")
if(ncol(x@topEdges) != ncol(x@topNodes))
return("topEdges and topNodes should have the same number of networks")
if(!is.list(x@networks)){
return("networks should be a list")
}
else{
## if there are some elements check if they are of class igraph
if(length(x@networks) >0){
alligraph <- all(sapply(x@networks, function(elem) class(elem) == "igraph"))
if(!alligraph)
return("the networks slot should be a list of igraph objects")
}
}
return(TRUE)
}
.ConvertToHDA<-function(Nets,tfspace){
#tenPercent<- floor(length(Nets)/10);
Documents<-list();
for(i in 1:length(Nets)){
termCount<-length(unique(Nets[[i]]));
if(termCount >0){
counts<-table(Nets[[i]]);
pos<-match(names(counts), tfspace);
ord<-order(pos);
netName<-names(Nets)[i];
if(is.null(netName)){ netName<-i; }
counts<-counts[ord];
names(counts)<-pos[ord];
res<-matrix(0,nrow=2,ncol=length(counts))
res[1,]<-as.numeric(names(counts))-1; #Should be 0 indexed
res[2,]<-counts
Documents[[netName]]<-res;
}
}
return(Documents);
}
.plot.clustOrderHeatmap<-function(cluster,data, path, W=2048, H=1024){
elementsOrder<-order(cluster);
data<-data[elementsOrder,];
cluster<-cluster[elementsOrder];
lbls<-paste("cluster",as.numeric(sort(unique(cluster))),sep="");
annot<-data.frame(Cluster=factor(cluster,labels = lbls));
rownames(annot)<-rownames(data);
Var1<- sample(colors()[100:700],length(lbls))
names(Var1)<-lbls;
ann_colors<-list(Cluster=Var1)
if(path != ""){
filename <- file.path(path,"ClusHeatmap.png");
png(filename,height=H,width=W)
}
p <- pheatmap(t(data), cluster_rows=FALSE,cluster_cols=FALSE,border_color=NA, show_colnames=FALSE,
color= colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) (200),
annotation= annot, annotation_colors=ann_colors)
p
if(path != ""){
dev.off()
}
return(p)
}
.plot.TopicsSimilarity <- function(topics){
simMat<-matrix(0,nrow=ncol(topics),ncol=ncol(topics));
for(i in 1:(ncol(topics)-1)){
for(j in (i+1):ncol(topics)){
simMat[i,j]<-length(intersect(topics[,i],topics[,j]))/ length(unique(c(topics[,i],topics[,j])));
simMat[j,i]<-simMat[i,j];
}
}
colnames(simMat)<-paste("Network",1:ncol(topics),sep="")
rownames(simMat)<-paste("Network",1:ncol(topics),sep="")
diag(simMat) <- 1
p <- pheatmap(simMat)
invisible( list(plot=p, simMat = simMat) )
}
## Get the the list of gene promoters near to the given regions
.GetClusterInfo<-function(object){
if(!class(object) %in% "GRanges"){
stop("object should be of class GRanges");
}
requireNamespace("TxDb.Hsapiens.UCSC.hg19.knownGene")
requireNamespace("org.Hs.eg.db")
hg19.known<-TxDb.Hsapiens.UCSC.hg19.knownGene;
genesPromoters<-subsetByOverlaps(promoters(hg19.known,2500,2500),object)
ucscIDs<-elementMetadata(genesPromoters)$tx_name
ucsc2Entrez<-toTable(org.Hs.egUCSCKG);
pos<-match(ucscIDs,ucsc2Entrez$ucsc_id);
pos<-pos[!is.na(pos)];
EntrezIDs<-ucsc2Entrez$gene_id[pos];
#hgnc <- EntrezToHGNC(EntrezIDs)
res<- select(org.Hs.eg.db, EntrezIDs, c("GENENAME", "SYMBOL"))
res<-res[!duplicated(res),]
return(res);
#hgnc <- hgnc[!(duplicated),]
#return(hgnc)
}
## Creates a directory that contains a the list of genes for each cluster
.get.ClusterInvolvedGenes<-function(hdaRes,data,path="ClustersGenes"){
message(paste("creating directory",path))
dir.create(path, showWarnings = FALSE)
clus<-sort(unique(getClusters(hdaRes)));
message("processing clusters ....")
for(i in clus){
clusRegions <- getRegionsIncluster(hdaRes,data, cluster= i)
clusInfo<-.GetClusterInfo(clusRegions);
fname<-file.path(path, paste( c("cluster",i,"_genes.txt"),collapse="") )
write.table(clusInfo,file=fname,row.names=FALSE,quote=FALSE, sep="\t")
}
}
.get.NetworksGenes<- function(hdaRes, data, path){
requireNamespace("ChIPpeakAnno")
data("TSS.human.GRCh37", package="ChIPpeakAnno", envir=environment())
TSS.human.GRCh37 <- get("TSS.human.GRCh37", envir= environment())
message(paste("creating directory",path))
dir.create(path, showWarnings = FALSE)
nets <-1:ncol(topEdges(hdaRes))
message("processing networks ....")
for(net in nets){
NetworkRegions <- getRegionsInNetwork(hdaRes,data,net)
if(length(NetworkRegions)>0){
#networkInfo<-.GetClusterInfo(NetworkRegions);
tmp.rd <- GRanges( gsub("chr","", as.character(seqnames(NetworkRegions))),
IRanges(start(NetworkRegions), end(NetworkRegions)))
##ensemble <- useMart("ensembl")
## hsp <- useDataset(mart = ensemble, dataset = "hsapiens_gene_ensembl")
tmp.anno <- annotatePeakInBatch(tmp.rd,
featureType = "TSS",
AnnotationData = TSS.human.GRCh37,
## mart= hsp,
select = "all",
PeakLocForDistance = "middle")
res <- with(tmp.anno, { subset(as.data.frame(tmp.anno),
abs(distancetoFeature) <= 2500 | insideFeature =="includeFeature" )})
if(nrow(res)>0){
conv <- EnsemblToHGNC(res$feature)
pos <- match(res$feature, conv$ensembl_gene_id)
res$name <- conv$hgnc_symbol[pos]
res$space <- paste("chr",as.character(res$space))
res <- res[,c("feature","name")]
fname<-file.path(path, paste( c("Network",net,"_genes.txt"),collapse="") )
write.table(res,file=fname,row.names=FALSE,quote=FALSE, sep="\t")
}
}
}
}
.buildNetFromEdges<-function(edgesList){
g<-graph.empty()
for(e in edgesList){
vertices <-unlist(strsplit(e,split="_"))
NotIn <- which(!vertices %in% V(g)$name)
#Add nodes
if(length(NotIn)>0){
for(n in NotIn){
g <- g+ vertex(vertices[n])
}
}
#Add edges
g[vertices[1],vertices[2],directed=FALSE]<-1
}
g<-as.undirected(g)
return(g)
}
## TO Draw basier like edges
## inspired from http://is-r.tumblr.com/post/38459242505/beautiful-network-diagrams-with-ggplot2
.edgeMaker <- function(whichRow, len = 100, curved = TRUE,adjacencyList,layoutCoordinates){
fromC <- as.matrix( layoutCoordinates[adjacencyList[whichRow, 1],1:2 ] ) # Origin
toC <- as.matrix( layoutCoordinates[adjacencyList[whichRow, 2],1:2 ] ) # Terminus
# Add curve:
graphCenter <- colMeans(layoutCoordinates[,1:2]) # Center of the overall graph
bezierMid <- unlist(c(fromC[1], toC[2])) # A midpoint, for bended edges
distance1 <- sum((graphCenter - bezierMid)^2)
if(distance1 < sum((graphCenter - unlist(c(toC[1], fromC[2])))^2)){
bezierMid <- c(toC[1], fromC[2])
} # To select the best Bezier midpoint
bezierMid <- (fromC + toC + bezierMid) / 3 # Moderate the Bezier midpoint
if(curved == FALSE){bezierMid <- (fromC + toC) / 2} # Remove the curve
edge <- data.frame(bezier(c(fromC[1], bezierMid[1], toC[1]), # Generate
c(fromC[2], bezierMid[2], toC[2]), # X & y
evaluation = len)) # Bezier path coordinates
edge$Sequence <- 1:len # For size and colour weighting in plot
edge$Group <- paste(adjacencyList[whichRow, 1:2], collapse = ">")
return(edge)
}
## inspired from http://is-r.tumblr.com/post/38459242505/beautiful-network-diagrams-with-ggplot2
.plotNetwork<-function(g,layout.fct=layout.kamada.kawai, title=""){
plotcord <- data.frame(layout.fct(g) )
petnet<-NULL;
if(nrow(get.edgelist(g))>0){
edglist <- melt(as.matrix(get.adjacency(g)))
edglist <- edglist[edglist$value > 0, ]
edglist[,1]<-factor(edglist[,1],levels=V(g)$name)
edglist[,2]<-factor(edglist[,2],levels=V(g)$name)
edges <- data.frame(plotcord[edglist[,1],], plotcord[edglist[,2],])
colnames(edges) <- c("X1","Y1","X2","Y2")
edges$midX <- (edges$X1 + edges$X2) / 2
edges$midY <- (edges$Y1 + edges$Y2) / 2
allEdges <- lapply(1:nrow(edglist), .edgeMaker, len = 500, curved = TRUE,
adjacencyList= edglist, layoutCoordinates= plotcord)
allEdges <- do.call(rbind, allEdges)
pnet <- with(allEdges,{ ggplot(allEdges) + geom_path(aes(x = x, y = y, group = Group,size = -Sequence, colour= Sequence))})
}
else{
pnet <- ggplot()
}
plotcord$type <- as.factor(V(g)$type)
plotcord$name <-V(g)$name;
pnet <- pnet + geom_point(aes_string(x='X1', y='X2'), data=plotcord,size = 10, pch=21, color="#e34a33", fill="#fdbb84")
pnet <- pnet + scale_colour_gradient(low = gray(0), high = gray(9/10), guide = "none")
pnet <- pnet + geom_text(data = plotcord, aes_string(x='X1',y='X2', label = 'name'),size=2,family="Courier", fontface="bold")
pnet <- pnet + scale_size(range = c(1/10, 1), guide = "none")
pnet <- pnet + theme(panel.background = element_blank()) # + theme(legend.position="none")
pnet <- pnet + theme(axis.title.x = element_blank(), axis.title.y = element_blank())
pnet <- pnet + theme( legend.background = element_rect(colour = NA))
pnet <- pnet + theme(panel.background = element_rect(fill = "white", colour = "black"))
pnet <- pnet + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank())
pnet <- pnet + ggtitle(title)
return(pnet);
}
.plotAllNet <- function(networks,layoutfct=layout.kamada.kawai, file="AllGraphs.pdf"){
if(length(networks) == 0 || !all(sapply(networks, is.igraph)))
stop("the networks member should a list of igraph object")
message("plotting networks")
plots <- list()
if(file != ""){
message(paste("plots will be also available on file", file))
pdf(file, width=14, height=7)
}
for(i in 1:length(networks)){
subplot<-.plotNetwork(networks[[i]],layoutfct, paste("Network",i));
#print(subplot, vp=vplayout( ceiling(i/netPerRow), ifelse(i %% netPerRow==0,netPerRow,i %% netPerRow)) );
if(file != "") plot(subplot)
plots[[i]] <- subplot
}
if(file != ""){
dev.off()
}
return(plots)
}
.annotateNodesExpression<-function(graphs,RPKMS){
if(length(graphs) == 0)
stop("Please generate the igraph objects first. Check the GenerateNetworks method.")
for(i in 1:length(graphs)){
g<-graphs[[i]]
pos<- match(V(g)$name,RPKMS[,1])
V(g)[!is.na(pos)]$RPKM<-RPKMS[pos[!is.na(pos)],2]
graphs[[i]]<-g
}
return(graphs)
}
## Copied from th clValid package with some minor modifications.
.plot.sota <- function(x, cl=0, ...){
op <- par(no.readonly=TRUE)
on.exit(par(op))
if(cl!=0)
par(mfrow=c(1,1)) else
{
pdim <- c(0,0)
for(i in 1:100){
j <- i
if(length(x$totals) > i*j)
j <- j+1
else{
pdim <- c(i,j)
break}
if(length(x$totals) > i*j)
i <- i+1
else{
pdim <- c(i,j)
break}
}
par(mfrow=pdim)
}
ylim = c(min(x$data), max(x$data))
pr <- 4:ncol(x$tree)
if(cl==0)
cl.to.print <- 1:length(table(x$clust)) else ## changed
cl.to.print <- cl
cl.id <- sort(unique(x$clust)) ## changed
for(i in cl.to.print){
plot(1:ncol(x$data), x$tree[i, pr], col="red", type="l",
ylim=ylim, xlab=paste("Cluster ",i), ylab="Expr. Level", ...)
legend("topleft", legend=paste(x$totals[i], " Elements"), cex=.7,
text.col="navy", bty="n")
cl <- x$data[x$clust==cl.id[i],] ## changed
if(is.vector(cl))
cl <- matrix(cl, nrow=1)
for(j in 1:x$totals[i])
lines(1:ncol(x$data), cl[j,], col="grey")
lines(1:ncol(x$data), x$tree[i, pr], col="red", ...)
}
}
###################################################################################
##
## ChromatinMaintainers-methods
##
####################################################################################
setMethod("clusterInteractions", signature = c(object="ChromMaintainers"),
function(object, method="sota", nbClus=20 ){
cat("clusterInteractions : checking\n")
if(is.null(object@maintainers@docPerTopic) || 0 %in% dim(object@maintainers@docPerTopic))
stop("The docPerTopic matrix should not be empty")
cat("clusterInteractions : reading args\n")
method <- match.arg(method)
if(nbClus <= 0 || is.null(nbClus)){
stop("nbClus should be a positive number")
}
cat("using clValid\n")
requireNamespace("clValid")
clusRes <- sota(object@maintainers@docPerTopic,maxCycles=nbClus-1)
object@clusRes <- clusRes;
message(paste("DNA interactions have been clustered into",length(unique(getClusters(object))), "cluster"))
return(object)
})
setMethod("plot3CPETRes", signature = c(object="ChromMaintainers"),
function(object, path="", W=14, H=7 , type=c("heatmap","clusters","curve","avgCurve","netSim", "networks"),
byEdge=TRUE, layoutfct=layout.kamada.kawai, ...){
type <- match.arg(type)
## we can do getClusters(object) but sometime we get
## some weired erros.
clusters<- getClusters(object) #@clusRes$mem
p<- NULL;
if(type== "heatmap"){
if(is.null(slot(object@maintainers,"docPerTopic")))
stop('No infered networks were found, please check the Method InferNetworks')
p <- .plot.clustOrderHeatmap(clusters, slot(object@maintainers,"docPerTopic"), path, W, H)
}
else{
if(type %in% c("clusters","curve","avgCurve") && is.null(object@clusRes))
stop("No clustering results found, please check method cluster")
par(mar = rep(2, 4))
if(type == "curve"){
if("sota" %in% class(object@clusRes))
p<- .plot.sota(object@clusRes)
else
p <- plotCurves(object@clusRes$y,object@clusRes$mem)
}
else
if(type == "avgCurve"){
if("sota" %in% class(object@clusRes)){
message("curves and avgCurves are only plotted for clues objects. function kept for legacy")
p <- .plot.sota(object@clusRes)
}
else
p <- plotAvgCurves(object@clusRes$y,object@clusRes$mem)
}
else{
if(type == "netSim"){
if(byEdge){
p <- .plot.TopicsSimilarity(slot(object,"topEdges"))
}
else
p <- .plot.TopicsSimilarity(slot(object,"topNodes"))
}
else{
if(type== "networks"){
library(ggplot2)
if(path == "") path = "AllGraphs.pdf"
p<- .plotAllNet(networks(object), layoutfct,path)
}
}
}
}
invisible(p)
})
setMethod("getClusters",signature= c(object= "ChromMaintainers"),
function(object){
if(is.null(object@clusRes))
return(NA)
clusters <- c();
if("clues" %in% class(object@clusRes)){
clusters <- object@clusRes$mem
}
else{
clusters <- object@clusRes$clust
}
return(clusters)
})
setMethod("getRegionsIncluster", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData",
cluster="numeric"),
function(hdaRes,data, cluster=1, ...){
if(is.null(hdaRes@clusRes))
stop("You need to do the clustering first, check the cluster method")
clusters <- hdaRes@clusRes$clust
clusElements<-which(clusters == cluster);
if(length(clusElements) <= 0){
warning("The provided cluster does not exist")
return(new("GRanges"))
}
petNames <-rownames(slot(hdaRes@maintainers,"docPerTopic"))
regionRoot <- paste("PET#",petNames[clusElements],sep="")
pos <- which( gsub("\\.\\d|PET#","",pet(data)$PET_ID) %in% petNames[clusElements])
return(pet(data)[pos])
})
setMethod("getRegionsInNetwork", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData",
net="numeric"),
function(hdaRes,data, net=1,thr=0.5, ...){
if(is.null(hdaRes@clusRes))
stop("You need to do the clustering first, check the cluster method")
if(ncol(slot(hdaRes@maintainers,"docPerTopic")) < net){
warning("The provided net does not exist")
return(new("GRanges"))
}
maxes <- apply(slot(hdaRes@maintainers,"docPerTopic"),1,function(x) which(x==max(x))[1])
topInter <- which(maxes == net)
#topInter <- which(slot(hdaRes@maintainers,"docPerTopic")[,net] >= thr)
petNames <-rownames(slot(hdaRes@maintainers,"docPerTopic"))[topInter]
regionRoot <- paste("PET#",petNames,sep="")
pos <- which( gsub("\\.\\d|PET#","",pet(data)$PET_ID) %in% petNames)
return(pet(data)[pos])
})
setMethod("outputGenesPerClusterToDir", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData"),
function(hdaRes,data,path="ClustersGenes", ...){
.get.ClusterInvolvedGenes(hdaRes,data,path)
})
setMethod("outputGenesPerNetworkToDir", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData"),
function(hdaRes, data, path="NetworksGenes", ...){
.get.NetworksGenes(hdaRes, data, path)
})
## TODO: Don't use a lot of packages
setMethod("visualizeCircos", signature = c(object= "ChromMaintainers", data= "ChiapetExperimentData", cluster="numeric"),
function(object, data, cluster = 1, chrLenghts = NULL){
requireNamespace("biovizBase")
requireNamespace("ggbio")
interactions <- getRegionsIncluster(object, data, cluster = cluster)
if(length(interactions) ==0)
return(NA)
if(is.null(chrLenghts)){
data("hg19Ideogram", package = "biovizBase", envir= environment())
hg19Ideogram <- get("hg19Ideogram", envir = environment())
hg19Ideogram <- hg19Ideogram[ as.character(seqnames(hg19Ideogram)) %in% seqlevels(interactions) ]
hg19Ideo <- keepSeqlevels(hg19Ideogram, seqlevels(interactions))
seqlengths(interactions) <- seqlengths(hg19Ideo)
}
else{
if(length(names(chrLenghts)) == 0 || !is.numeric(chrLenghts))
stop("chrLenghts should be a names numeric vector")
if(! all(seqlevels(interactions) %in% names(chrLenghts)) )
stop("some chromosomes are missing from chrLenghts")
pos <- match(seqlevels(interactions), names(chrLenghts))
seqlengths(interactions) <- chrLenghts[pos]
}
## get left-side interactions
leftID <- grep("PET#\\d+\\.1",interactions$PET_ID)
RightID <- grep("PET#\\d+\\.2",interactions$PET_ID)
circos <- interactions[leftID]
values(circos)$to.gr <- interactions[RightID]
p <- ggplot() + layout_circle(hg19Ideo, geom = "ideo", fill = "#9ecae1", color="#636363", radius = 30,trackWidth = 4)
p <- p + layout_circle(hg19Ideo, geom = "text", aes(label = seqnames),
vjust = 0,radius = 32, trackWidth = 7)
p <- p + layout_circle(circos, geom = "link", linked.to = "to.gr",radius = 29, trackWidth = 1, color="#f03b20")
p <- p + ggtitle(paste("Interactions in cluster", cluster)) +
theme(plot.title = element_text(lineheight=.8, face="bold"))
plot(p)
invisible(list(circos = circos,plot = p))
})
setMethod("topEdges", signature = c(object = "ChromMaintainers"),
function(object){
return(object@topEdges)
})
setMethod("topNodes", signature = c(object = "ChromMaintainers"),
function(object){
return(object@topNodes)
})
setMethod("networks", signature= c(object= "ChromMaintainers"),
function(object){
return(slot(object,"networks"))
})
setMethod("updateResults", signature=c(object="ChromMaintainers",nets="NetworkCollection", thr="numeric"),
function(object,nets,thr=0.5){
if(!is.null(nets)){
object@topEdges<- .print.topwords(object@maintainers@wordsPerTopic,as.matrix(TF(nets)),thr)
object@topNodes <- .getNodesList(object@topEdges)
## if the networks were previously generated then update them
if(length(object@networks) > 0){
object<- GenerateNetworks(object)
}
return(object)
}
else{
warning("a NetworkCollection object should be specified")
}
}
)
setMethod("GenerateNetworks", signature = c(object = "ChromMaintainers"),
function(object,...) {
## if one of the dimensions is zero we consider it as non valid
if(0 %in% dim(topEdges(object)) || is.null(topEdges(object)) )
stop("No topEdge reults found")
motifs <- colnames(wordsPerTopic(object@maintainers))
subgraphs<-list()
topics <- topEdges(object)
for(i in 1:ncol(topics)){
edgesList<-unique(topics[,i])
edgesList<- edgesList[edgesList!=""]
g <- .buildNetFromEdges(edgesList)
V(g)$type="co-factor";
tfs<-which(V(g)$name %in% motifs);
if(length(tfs)>0){
V(g)$type[tfs]<-"TF";
}
subgraphs[[i]]<-g;
}
names(subgraphs) <- paste("Network", 1:length(subgraphs),sep="")
object@networks <- subgraphs;
return(object)
})
setMethod("annotateExpression", signature= c(object = "ChromMaintainers", RPKMS = "data.frame"),
function(object, RPKMS){
if(ncol(RPKMS) <2)
stop("a data.frame with at least 2 columns should be provided")
if( all(is.na(as.numeric(RPKMS[,2]) )) )
stop("The second column should have numeric values")
object@networks <- .annotateNodesExpression(networks(object),RPKMS)
return(object)
})
setMethod("show", signature=c(object="ChromMaintainers"),
function(object){
cat("class:", class(object),"\n")
cat("HLDA Results:\n")
cat("------------\n")
print(object@maintainers)
})
setValidity("ChromMaintainers",.valid.ChromMaintainers)
## An S3 user freindly method
ChromMaintainers<- function( maintainers,topEdges,topNodes, clusRes = NULL, networks = list()){
return( new("ChromMaintainers", maintainers = maintainers,topEdges = topEdges,
topNodes = topNodes, clusRes = clusRes, networks = networks) )
}
|
/R/ChromMaintainers-methods.r
|
no_license
|
sirusb/R3CPET
|
R
| false | false | 24,878 |
r
|
.valid.ChromMaintainers <- function(x){
if(class(x@maintainers) != "HLDAResult")
return("the maintainers slot should an HLDAResult object");
if(!is.matrix(x@topEdges))
return("topEdges should be a matrix")
if(!is.matrix(x@topNodes))
return("topNodes should be a matrix")
if(ncol(x@topEdges) != ncol(x@topNodes))
return("topEdges and topNodes should have the same number of networks")
if(!is.list(x@networks)){
return("networks should be a list")
}
else{
## if there are some elements check if they are of class igraph
if(length(x@networks) >0){
alligraph <- all(sapply(x@networks, function(elem) class(elem) == "igraph"))
if(!alligraph)
return("the networks slot should be a list of igraph objects")
}
}
return(TRUE)
}
.ConvertToHDA<-function(Nets,tfspace){
#tenPercent<- floor(length(Nets)/10);
Documents<-list();
for(i in 1:length(Nets)){
termCount<-length(unique(Nets[[i]]));
if(termCount >0){
counts<-table(Nets[[i]]);
pos<-match(names(counts), tfspace);
ord<-order(pos);
netName<-names(Nets)[i];
if(is.null(netName)){ netName<-i; }
counts<-counts[ord];
names(counts)<-pos[ord];
res<-matrix(0,nrow=2,ncol=length(counts))
res[1,]<-as.numeric(names(counts))-1; #Should be 0 indexed
res[2,]<-counts
Documents[[netName]]<-res;
}
}
return(Documents);
}
.plot.clustOrderHeatmap<-function(cluster,data, path, W=2048, H=1024){
elementsOrder<-order(cluster);
data<-data[elementsOrder,];
cluster<-cluster[elementsOrder];
lbls<-paste("cluster",as.numeric(sort(unique(cluster))),sep="");
annot<-data.frame(Cluster=factor(cluster,labels = lbls));
rownames(annot)<-rownames(data);
Var1<- sample(colors()[100:700],length(lbls))
names(Var1)<-lbls;
ann_colors<-list(Cluster=Var1)
if(path != ""){
filename <- file.path(path,"ClusHeatmap.png");
png(filename,height=H,width=W)
}
p <- pheatmap(t(data), cluster_rows=FALSE,cluster_cols=FALSE,border_color=NA, show_colnames=FALSE,
color= colorRampPalette(c("#00007F", "blue", "#007FFF", "cyan","#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")) (200),
annotation= annot, annotation_colors=ann_colors)
p
if(path != ""){
dev.off()
}
return(p)
}
.plot.TopicsSimilarity <- function(topics){
simMat<-matrix(0,nrow=ncol(topics),ncol=ncol(topics));
for(i in 1:(ncol(topics)-1)){
for(j in (i+1):ncol(topics)){
simMat[i,j]<-length(intersect(topics[,i],topics[,j]))/ length(unique(c(topics[,i],topics[,j])));
simMat[j,i]<-simMat[i,j];
}
}
colnames(simMat)<-paste("Network",1:ncol(topics),sep="")
rownames(simMat)<-paste("Network",1:ncol(topics),sep="")
diag(simMat) <- 1
p <- pheatmap(simMat)
invisible( list(plot=p, simMat = simMat) )
}
## Get the the list of gene promoters near to the given regions
.GetClusterInfo<-function(object){
if(!class(object) %in% "GRanges"){
stop("object should be of class GRanges");
}
requireNamespace("TxDb.Hsapiens.UCSC.hg19.knownGene")
requireNamespace("org.Hs.eg.db")
hg19.known<-TxDb.Hsapiens.UCSC.hg19.knownGene;
genesPromoters<-subsetByOverlaps(promoters(hg19.known,2500,2500),object)
ucscIDs<-elementMetadata(genesPromoters)$tx_name
ucsc2Entrez<-toTable(org.Hs.egUCSCKG);
pos<-match(ucscIDs,ucsc2Entrez$ucsc_id);
pos<-pos[!is.na(pos)];
EntrezIDs<-ucsc2Entrez$gene_id[pos];
#hgnc <- EntrezToHGNC(EntrezIDs)
res<- select(org.Hs.eg.db, EntrezIDs, c("GENENAME", "SYMBOL"))
res<-res[!duplicated(res),]
return(res);
#hgnc <- hgnc[!(duplicated),]
#return(hgnc)
}
## Creates a directory that contains a the list of genes for each cluster
.get.ClusterInvolvedGenes<-function(hdaRes,data,path="ClustersGenes"){
message(paste("creating directory",path))
dir.create(path, showWarnings = FALSE)
clus<-sort(unique(getClusters(hdaRes)));
message("processing clusters ....")
for(i in clus){
clusRegions <- getRegionsIncluster(hdaRes,data, cluster= i)
clusInfo<-.GetClusterInfo(clusRegions);
fname<-file.path(path, paste( c("cluster",i,"_genes.txt"),collapse="") )
write.table(clusInfo,file=fname,row.names=FALSE,quote=FALSE, sep="\t")
}
}
.get.NetworksGenes<- function(hdaRes, data, path){
requireNamespace("ChIPpeakAnno")
data("TSS.human.GRCh37", package="ChIPpeakAnno", envir=environment())
TSS.human.GRCh37 <- get("TSS.human.GRCh37", envir= environment())
message(paste("creating directory",path))
dir.create(path, showWarnings = FALSE)
nets <-1:ncol(topEdges(hdaRes))
message("processing networks ....")
for(net in nets){
NetworkRegions <- getRegionsInNetwork(hdaRes,data,net)
if(length(NetworkRegions)>0){
#networkInfo<-.GetClusterInfo(NetworkRegions);
tmp.rd <- GRanges( gsub("chr","", as.character(seqnames(NetworkRegions))),
IRanges(start(NetworkRegions), end(NetworkRegions)))
##ensemble <- useMart("ensembl")
## hsp <- useDataset(mart = ensemble, dataset = "hsapiens_gene_ensembl")
tmp.anno <- annotatePeakInBatch(tmp.rd,
featureType = "TSS",
AnnotationData = TSS.human.GRCh37,
## mart= hsp,
select = "all",
PeakLocForDistance = "middle")
res <- with(tmp.anno, { subset(as.data.frame(tmp.anno),
abs(distancetoFeature) <= 2500 | insideFeature =="includeFeature" )})
if(nrow(res)>0){
conv <- EnsemblToHGNC(res$feature)
pos <- match(res$feature, conv$ensembl_gene_id)
res$name <- conv$hgnc_symbol[pos]
res$space <- paste("chr",as.character(res$space))
res <- res[,c("feature","name")]
fname<-file.path(path, paste( c("Network",net,"_genes.txt"),collapse="") )
write.table(res,file=fname,row.names=FALSE,quote=FALSE, sep="\t")
}
}
}
}
.buildNetFromEdges<-function(edgesList){
g<-graph.empty()
for(e in edgesList){
vertices <-unlist(strsplit(e,split="_"))
NotIn <- which(!vertices %in% V(g)$name)
#Add nodes
if(length(NotIn)>0){
for(n in NotIn){
g <- g+ vertex(vertices[n])
}
}
#Add edges
g[vertices[1],vertices[2],directed=FALSE]<-1
}
g<-as.undirected(g)
return(g)
}
## TO Draw basier like edges
## inspired from http://is-r.tumblr.com/post/38459242505/beautiful-network-diagrams-with-ggplot2
.edgeMaker <- function(whichRow, len = 100, curved = TRUE,adjacencyList,layoutCoordinates){
fromC <- as.matrix( layoutCoordinates[adjacencyList[whichRow, 1],1:2 ] ) # Origin
toC <- as.matrix( layoutCoordinates[adjacencyList[whichRow, 2],1:2 ] ) # Terminus
# Add curve:
graphCenter <- colMeans(layoutCoordinates[,1:2]) # Center of the overall graph
bezierMid <- unlist(c(fromC[1], toC[2])) # A midpoint, for bended edges
distance1 <- sum((graphCenter - bezierMid)^2)
if(distance1 < sum((graphCenter - unlist(c(toC[1], fromC[2])))^2)){
bezierMid <- c(toC[1], fromC[2])
} # To select the best Bezier midpoint
bezierMid <- (fromC + toC + bezierMid) / 3 # Moderate the Bezier midpoint
if(curved == FALSE){bezierMid <- (fromC + toC) / 2} # Remove the curve
edge <- data.frame(bezier(c(fromC[1], bezierMid[1], toC[1]), # Generate
c(fromC[2], bezierMid[2], toC[2]), # X & y
evaluation = len)) # Bezier path coordinates
edge$Sequence <- 1:len # For size and colour weighting in plot
edge$Group <- paste(adjacencyList[whichRow, 1:2], collapse = ">")
return(edge)
}
## inspired from http://is-r.tumblr.com/post/38459242505/beautiful-network-diagrams-with-ggplot2
.plotNetwork<-function(g,layout.fct=layout.kamada.kawai, title=""){
plotcord <- data.frame(layout.fct(g) )
petnet<-NULL;
if(nrow(get.edgelist(g))>0){
edglist <- melt(as.matrix(get.adjacency(g)))
edglist <- edglist[edglist$value > 0, ]
edglist[,1]<-factor(edglist[,1],levels=V(g)$name)
edglist[,2]<-factor(edglist[,2],levels=V(g)$name)
edges <- data.frame(plotcord[edglist[,1],], plotcord[edglist[,2],])
colnames(edges) <- c("X1","Y1","X2","Y2")
edges$midX <- (edges$X1 + edges$X2) / 2
edges$midY <- (edges$Y1 + edges$Y2) / 2
allEdges <- lapply(1:nrow(edglist), .edgeMaker, len = 500, curved = TRUE,
adjacencyList= edglist, layoutCoordinates= plotcord)
allEdges <- do.call(rbind, allEdges)
pnet <- with(allEdges,{ ggplot(allEdges) + geom_path(aes(x = x, y = y, group = Group,size = -Sequence, colour= Sequence))})
}
else{
pnet <- ggplot()
}
plotcord$type <- as.factor(V(g)$type)
plotcord$name <-V(g)$name;
pnet <- pnet + geom_point(aes_string(x='X1', y='X2'), data=plotcord,size = 10, pch=21, color="#e34a33", fill="#fdbb84")
pnet <- pnet + scale_colour_gradient(low = gray(0), high = gray(9/10), guide = "none")
pnet <- pnet + geom_text(data = plotcord, aes_string(x='X1',y='X2', label = 'name'),size=2,family="Courier", fontface="bold")
pnet <- pnet + scale_size(range = c(1/10, 1), guide = "none")
pnet <- pnet + theme(panel.background = element_blank()) # + theme(legend.position="none")
pnet <- pnet + theme(axis.title.x = element_blank(), axis.title.y = element_blank())
pnet <- pnet + theme( legend.background = element_rect(colour = NA))
pnet <- pnet + theme(panel.background = element_rect(fill = "white", colour = "black"))
pnet <- pnet + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank())
pnet <- pnet + ggtitle(title)
return(pnet);
}
.plotAllNet <- function(networks,layoutfct=layout.kamada.kawai, file="AllGraphs.pdf"){
if(length(networks) == 0 || !all(sapply(networks, is.igraph)))
stop("the networks member should a list of igraph object")
message("plotting networks")
plots <- list()
if(file != ""){
message(paste("plots will be also available on file", file))
pdf(file, width=14, height=7)
}
for(i in 1:length(networks)){
subplot<-.plotNetwork(networks[[i]],layoutfct, paste("Network",i));
#print(subplot, vp=vplayout( ceiling(i/netPerRow), ifelse(i %% netPerRow==0,netPerRow,i %% netPerRow)) );
if(file != "") plot(subplot)
plots[[i]] <- subplot
}
if(file != ""){
dev.off()
}
return(plots)
}
.annotateNodesExpression<-function(graphs,RPKMS){
if(length(graphs) == 0)
stop("Please generate the igraph objects first. Check the GenerateNetworks method.")
for(i in 1:length(graphs)){
g<-graphs[[i]]
pos<- match(V(g)$name,RPKMS[,1])
V(g)[!is.na(pos)]$RPKM<-RPKMS[pos[!is.na(pos)],2]
graphs[[i]]<-g
}
return(graphs)
}
## Copied from th clValid package with some minor modifications.
.plot.sota <- function(x, cl=0, ...){
op <- par(no.readonly=TRUE)
on.exit(par(op))
if(cl!=0)
par(mfrow=c(1,1)) else
{
pdim <- c(0,0)
for(i in 1:100){
j <- i
if(length(x$totals) > i*j)
j <- j+1
else{
pdim <- c(i,j)
break}
if(length(x$totals) > i*j)
i <- i+1
else{
pdim <- c(i,j)
break}
}
par(mfrow=pdim)
}
ylim = c(min(x$data), max(x$data))
pr <- 4:ncol(x$tree)
if(cl==0)
cl.to.print <- 1:length(table(x$clust)) else ## changed
cl.to.print <- cl
cl.id <- sort(unique(x$clust)) ## changed
for(i in cl.to.print){
plot(1:ncol(x$data), x$tree[i, pr], col="red", type="l",
ylim=ylim, xlab=paste("Cluster ",i), ylab="Expr. Level", ...)
legend("topleft", legend=paste(x$totals[i], " Elements"), cex=.7,
text.col="navy", bty="n")
cl <- x$data[x$clust==cl.id[i],] ## changed
if(is.vector(cl))
cl <- matrix(cl, nrow=1)
for(j in 1:x$totals[i])
lines(1:ncol(x$data), cl[j,], col="grey")
lines(1:ncol(x$data), x$tree[i, pr], col="red", ...)
}
}
###################################################################################
##
## ChromatinMaintainers-methods
##
####################################################################################
setMethod("clusterInteractions", signature = c(object="ChromMaintainers"),
function(object, method="sota", nbClus=20 ){
cat("clusterInteractions : checking\n")
if(is.null(object@maintainers@docPerTopic) || 0 %in% dim(object@maintainers@docPerTopic))
stop("The docPerTopic matrix should not be empty")
cat("clusterInteractions : reading args\n")
method <- match.arg(method)
if(nbClus <= 0 || is.null(nbClus)){
stop("nbClus should be a positive number")
}
cat("using clValid\n")
requireNamespace("clValid")
clusRes <- sota(object@maintainers@docPerTopic,maxCycles=nbClus-1)
object@clusRes <- clusRes;
message(paste("DNA interactions have been clustered into",length(unique(getClusters(object))), "cluster"))
return(object)
})
setMethod("plot3CPETRes", signature = c(object="ChromMaintainers"),
function(object, path="", W=14, H=7 , type=c("heatmap","clusters","curve","avgCurve","netSim", "networks"),
byEdge=TRUE, layoutfct=layout.kamada.kawai, ...){
type <- match.arg(type)
## we can do getClusters(object) but sometime we get
## some weired erros.
clusters<- getClusters(object) #@clusRes$mem
p<- NULL;
if(type== "heatmap"){
if(is.null(slot(object@maintainers,"docPerTopic")))
stop('No infered networks were found, please check the Method InferNetworks')
p <- .plot.clustOrderHeatmap(clusters, slot(object@maintainers,"docPerTopic"), path, W, H)
}
else{
if(type %in% c("clusters","curve","avgCurve") && is.null(object@clusRes))
stop("No clustering results found, please check method cluster")
par(mar = rep(2, 4))
if(type == "curve"){
if("sota" %in% class(object@clusRes))
p<- .plot.sota(object@clusRes)
else
p <- plotCurves(object@clusRes$y,object@clusRes$mem)
}
else
if(type == "avgCurve"){
if("sota" %in% class(object@clusRes)){
message("curves and avgCurves are only plotted for clues objects. function kept for legacy")
p <- .plot.sota(object@clusRes)
}
else
p <- plotAvgCurves(object@clusRes$y,object@clusRes$mem)
}
else{
if(type == "netSim"){
if(byEdge){
p <- .plot.TopicsSimilarity(slot(object,"topEdges"))
}
else
p <- .plot.TopicsSimilarity(slot(object,"topNodes"))
}
else{
if(type== "networks"){
library(ggplot2)
if(path == "") path = "AllGraphs.pdf"
p<- .plotAllNet(networks(object), layoutfct,path)
}
}
}
}
invisible(p)
})
setMethod("getClusters",signature= c(object= "ChromMaintainers"),
function(object){
if(is.null(object@clusRes))
return(NA)
clusters <- c();
if("clues" %in% class(object@clusRes)){
clusters <- object@clusRes$mem
}
else{
clusters <- object@clusRes$clust
}
return(clusters)
})
setMethod("getRegionsIncluster", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData",
cluster="numeric"),
function(hdaRes,data, cluster=1, ...){
if(is.null(hdaRes@clusRes))
stop("You need to do the clustering first, check the cluster method")
clusters <- hdaRes@clusRes$clust
clusElements<-which(clusters == cluster);
if(length(clusElements) <= 0){
warning("The provided cluster does not exist")
return(new("GRanges"))
}
petNames <-rownames(slot(hdaRes@maintainers,"docPerTopic"))
regionRoot <- paste("PET#",petNames[clusElements],sep="")
pos <- which( gsub("\\.\\d|PET#","",pet(data)$PET_ID) %in% petNames[clusElements])
return(pet(data)[pos])
})
setMethod("getRegionsInNetwork", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData",
net="numeric"),
function(hdaRes,data, net=1,thr=0.5, ...){
if(is.null(hdaRes@clusRes))
stop("You need to do the clustering first, check the cluster method")
if(ncol(slot(hdaRes@maintainers,"docPerTopic")) < net){
warning("The provided net does not exist")
return(new("GRanges"))
}
maxes <- apply(slot(hdaRes@maintainers,"docPerTopic"),1,function(x) which(x==max(x))[1])
topInter <- which(maxes == net)
#topInter <- which(slot(hdaRes@maintainers,"docPerTopic")[,net] >= thr)
petNames <-rownames(slot(hdaRes@maintainers,"docPerTopic"))[topInter]
regionRoot <- paste("PET#",petNames,sep="")
pos <- which( gsub("\\.\\d|PET#","",pet(data)$PET_ID) %in% petNames)
return(pet(data)[pos])
})
setMethod("outputGenesPerClusterToDir", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData"),
function(hdaRes,data,path="ClustersGenes", ...){
.get.ClusterInvolvedGenes(hdaRes,data,path)
})
setMethod("outputGenesPerNetworkToDir", signature=c(hdaRes="ChromMaintainers", data="ChiapetExperimentData"),
function(hdaRes, data, path="NetworksGenes", ...){
.get.NetworksGenes(hdaRes, data, path)
})
## TODO: Don't use a lot of packages
setMethod("visualizeCircos", signature = c(object= "ChromMaintainers", data= "ChiapetExperimentData", cluster="numeric"),
function(object, data, cluster = 1, chrLenghts = NULL){
requireNamespace("biovizBase")
requireNamespace("ggbio")
interactions <- getRegionsIncluster(object, data, cluster = cluster)
if(length(interactions) ==0)
return(NA)
if(is.null(chrLenghts)){
data("hg19Ideogram", package = "biovizBase", envir= environment())
hg19Ideogram <- get("hg19Ideogram", envir = environment())
hg19Ideogram <- hg19Ideogram[ as.character(seqnames(hg19Ideogram)) %in% seqlevels(interactions) ]
hg19Ideo <- keepSeqlevels(hg19Ideogram, seqlevels(interactions))
seqlengths(interactions) <- seqlengths(hg19Ideo)
}
else{
if(length(names(chrLenghts)) == 0 || !is.numeric(chrLenghts))
stop("chrLenghts should be a names numeric vector")
if(! all(seqlevels(interactions) %in% names(chrLenghts)) )
stop("some chromosomes are missing from chrLenghts")
pos <- match(seqlevels(interactions), names(chrLenghts))
seqlengths(interactions) <- chrLenghts[pos]
}
## get left-side interactions
leftID <- grep("PET#\\d+\\.1",interactions$PET_ID)
RightID <- grep("PET#\\d+\\.2",interactions$PET_ID)
circos <- interactions[leftID]
values(circos)$to.gr <- interactions[RightID]
p <- ggplot() + layout_circle(hg19Ideo, geom = "ideo", fill = "#9ecae1", color="#636363", radius = 30,trackWidth = 4)
p <- p + layout_circle(hg19Ideo, geom = "text", aes(label = seqnames),
vjust = 0,radius = 32, trackWidth = 7)
p <- p + layout_circle(circos, geom = "link", linked.to = "to.gr",radius = 29, trackWidth = 1, color="#f03b20")
p <- p + ggtitle(paste("Interactions in cluster", cluster)) +
theme(plot.title = element_text(lineheight=.8, face="bold"))
plot(p)
invisible(list(circos = circos,plot = p))
})
setMethod("topEdges", signature = c(object = "ChromMaintainers"),
function(object){
return(object@topEdges)
})
setMethod("topNodes", signature = c(object = "ChromMaintainers"),
function(object){
return(object@topNodes)
})
setMethod("networks", signature= c(object= "ChromMaintainers"),
function(object){
return(slot(object,"networks"))
})
setMethod("updateResults", signature=c(object="ChromMaintainers",nets="NetworkCollection", thr="numeric"),
function(object,nets,thr=0.5){
if(!is.null(nets)){
object@topEdges<- .print.topwords(object@maintainers@wordsPerTopic,as.matrix(TF(nets)),thr)
object@topNodes <- .getNodesList(object@topEdges)
## if the networks were previously generated then update them
if(length(object@networks) > 0){
object<- GenerateNetworks(object)
}
return(object)
}
else{
warning("a NetworkCollection object should be specified")
}
}
)
setMethod("GenerateNetworks", signature = c(object = "ChromMaintainers"),
function(object,...) {
## if one of the dimensions is zero we consider it as non valid
if(0 %in% dim(topEdges(object)) || is.null(topEdges(object)) )
stop("No topEdge reults found")
motifs <- colnames(wordsPerTopic(object@maintainers))
subgraphs<-list()
topics <- topEdges(object)
for(i in 1:ncol(topics)){
edgesList<-unique(topics[,i])
edgesList<- edgesList[edgesList!=""]
g <- .buildNetFromEdges(edgesList)
V(g)$type="co-factor";
tfs<-which(V(g)$name %in% motifs);
if(length(tfs)>0){
V(g)$type[tfs]<-"TF";
}
subgraphs[[i]]<-g;
}
names(subgraphs) <- paste("Network", 1:length(subgraphs),sep="")
object@networks <- subgraphs;
return(object)
})
setMethod("annotateExpression", signature= c(object = "ChromMaintainers", RPKMS = "data.frame"),
function(object, RPKMS){
if(ncol(RPKMS) <2)
stop("a data.frame with at least 2 columns should be provided")
if( all(is.na(as.numeric(RPKMS[,2]) )) )
stop("The second column should have numeric values")
object@networks <- .annotateNodesExpression(networks(object),RPKMS)
return(object)
})
setMethod("show", signature=c(object="ChromMaintainers"),
function(object){
cat("class:", class(object),"\n")
cat("HLDA Results:\n")
cat("------------\n")
print(object@maintainers)
})
setValidity("ChromMaintainers",.valid.ChromMaintainers)
## An S3 user freindly method
ChromMaintainers<- function( maintainers,topEdges,topNodes, clusRes = NULL, networks = list()){
return( new("ChromMaintainers", maintainers = maintainers,topEdges = topEdges,
topNodes = topNodes, clusRes = clusRes, networks = networks) )
}
|
library(tidyverse)
library(RColorBrewer)
# Arguments
path <- "./Data/ProcessedQueries/References/"
# path.plots <- "./Rocio/Plots/"
path_processed_dictionaries <- "./Data/Dictionary/Papers-Term/"
path_dictionary_info <- "./Data/Dictionary/"
source("R/methods_cat_analysis.R")
papers <- read.csv(file = paste0(path,"cleaned_papers_all_years_simple.csv"),stringsAsFactors = FALSE)
data_decade <- papers %>%
filter(pubyear > 2008 & pubyear < 2019)
dictionary <- "Methods"
load(paste0(path_processed_dictionaries,"paper-term-dictionary-",dictionary,".RData"))
# both matrices have the keywords ordered alphabetically:
total_useful_papers <- apply(matrix_CatTerm[2:ncol(matrix_CatTerm)],1,sum)
matrix_CatTerm <- matrix_CatTerm[total_useful_papers>0,]
keywords <- unlist(lapply(strsplit(colnames(matrix_CatTerm), " : ")[-1],"[[",2))
colnames(matrix_CatTerm) <- c("doi",keywords)
rownames(matrix_CatTerm) <- matrix_CatTerm$doi
matrix_CatTerm <-
matrix_CatTerm[,-1] %>%
select(sort(colnames(.))) # sort columns by alphabetical order
synonyms_keywords <- read_csv(paste0(path_dictionary_info,"Synonyms-Methods.csv"))
res <- methods_cat_analysis(matrix_CatTerm = matrix_CatTerm, synonyms_keywords, col_cat = 3, data_decade)
## print(res[[1]]) gives the output of the Table "Percentage of papers using each type of statistical method."
# Internally, I'm filtering out tests and other stuff:
filter_out_methods <- unique(c(which(synonyms_keywords$meaning2 == "test" |
synonyms_keywords$meaning2 == "model selection" |
synonyms_keywords$meaning2 == "simulation" |
is.na(synonyms_keywords$meaning2) == TRUE |
synonyms_keywords$meaning3 == "other"),
grep("likelihood",synonyms_keywords$keyword)))
synonyms_keywords <- as.data.frame(synonyms_keywords[-filter_out_methods,])
col_cat=3
ext_subcat <- unique(synonyms_keywords[ , col_cat + 1])
matrix_Ext_SubCat <- matrix(0,ncol=length(ext_subcat),nrow=dim(matrix_CatTerm))
colnames(matrix_Ext_SubCat) <- ext_subcat
rownames(matrix_Ext_SubCat) <- rownames(matrix_CatTerm)
for (i in 1:length(ext_subcat)){ # for each subcategory
terms_ext <- synonyms_keywords$keyword[which(synonyms_keywords[ , col_cat + 1] == ext_subcat[i])] # which keywords correspond to the subcategory
col_matrix <- which(colnames(matrix_CatTerm) %in% terms_ext) # which columns in matrix_CatTerm does that represent
# now check with rows have at least one "1" among those categories
# first sum by row. it should be >= 1
ind_matrix <- which(apply(matrix_CatTerm[,col_matrix],1,sum) > 0)
matrix_Ext_SubCat[ind_matrix,i] <- 1
}
total_ext <- sum(rowSums(matrix_Ext_SubCat)>0)
table_sub <- sort(round(apply(matrix_Ext_SubCat,2,sum)/total_ext*100,1),decreasing = TRUE)
# Now, join with year
df_Ext_SubCat <- as.data.frame(matrix_Ext_SubCat) %>%
mutate(doi = rownames(matrix_Ext_SubCat))
joined_df <- df_Ext_SubCat %>% left_join(data_decade, by = "doi") %>% select(ext_subcat,pubyear)
# And counting by category by year
joined_df_year <- joined_df %>% group_by(pubyear) %>% summarise_all(sum)
joined_df_nozero <- joined_df[apply(joined_df[,ext_subcat],1,sum) > 0,]
sum_year <- joined_df_nozero %>% group_by(pubyear) %>% tally()
joined_df_prop_year <- joined_df_year[,ext_subcat]/matrix(rep(sum_year$n,each=length(ext_subcat)),ncol=length(ext_subcat),byrow=TRUE)
joined_df_prop_year <- joined_df_prop_year %>% mutate(year = joined_df_year$pubyear)
df_plot_prop <- joined_df_prop_year %>% gather(key = subcategory, value = prop_papers, -year)
plot_df <- df_plot_prop
head(plot_df)
plot_df$subcategory <- rep(c('Generic','Spatial','Movement','Time-series','Social','Spatial-temporal'), each = 10)
# Run a quick linear model to measure which trend lines are positive or negative
# we'll reference this when we choose our colors
here <- by(plot_df, plot_df$subcategory, function(x)
lm(x$prop_papers ~ x$year)$coefficients[2]
)
plot_df$subcategory <- factor(plot_df$subcategory, levels= names(sort(here)))
# Create a grouping variable based on this value
grouping <- data.frame(subcategory = c(names(here)[here<=0.003 & here>=(-0.003)],names(here)[here<(-0.003)],names(here)[here>0.003]))
grouping$group <- seq_along(grouping$subcategory)
plot_df <- merge(plot_df,grouping, by='subcategory')
# Now to make our aesthetic features which will be added with scale_*_manual()
# Colors
# Make a color ramp where the amount of 'grays' will determine the highlighted categories
Tol_muted <- c('#88CCEE', '#44AA99', '#117733', '#332288', '#DDCC77', '#999933','#CC6677', '#882255', '#AA4499', '#DDDDDD')
#Okabe_Ito <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000")
colorz <- Tol_muted[1:(length(here)) %% (length(Tol_muted))]
# change problematic colors to gray60
# gray 60 "#7f7f7f"
# black "#000000"
names(colorz) <- names(sort(here))
colorz
colorz[6] <- "#7f7f7f"
# line types
# just need to spread linetypes out enough so that the color and alpha can help distinguish as well
# manual 5 = dash, 3 = dotted, 1 = solid
linetypez <- c(5,3,3,3,3,1)
# or random
# linetypez <- rep(1:6,times=ceiling(length(levels(plot_df$Topic))/6))
# linetypez <- linetypez[seq_along(levels(plot_df$Topic))]
names(linetypez) <- names(sort(here))
# alpha
# Changing alpha will help to make the important categories pop.
# Create a gradient of alphas from 1 -> .2 -> so none trend lines are grayed out.
nz <- length(here)
# automatically
#alphaz <- c((1*nz/2):(.2*nz/2)/nz*2,(.2*nz/2):(1*nz/2)/nz*2,ifelse(nz%%2==0,NULL,1))
# or manually
alphaz <- c(1,.7,.4,.4,.4,1)
names(alphaz) <- names(sort(here))
# line width
sizez <- c(2,1,1,1,1,2)
#names(sizez) <- names(here)
sizez
sizez <- rep(sizez, each = 10)
sizez
# You have to include color, linetype, and alpha in the mapping even if youre going to override it anyway.
p <- ggplot(
data = plot_df) +
geom_line(size=1.5,
mapping = aes(x = year, y = prop_papers, color = subcategory, group = group, linetype = subcategory, alpha = subcategory)
) +
scale_color_manual(name='Methods',values = colorz) +
scale_linetype_manual(name='Methods',values = linetypez) +
scale_alpha_manual(name='Methods',values = alphaz)+
theme_classic()+xlab("") + ylab("Proportion of articles in a year") +
theme(axis.text.x = element_text(angle = 15, hjust = 1,size=16),axis.text.y = element_text(size=16),
legend.position = "none", legend.justification = "right",legend.text=element_text(size=15),
axis.title.y = element_text(margin = margin(r=10),size=17),
axis.title.x = element_text(margin = margin(t=10)),
legend.key.size = unit(2,"line"),
legend.title=element_text(size=16))
start_pos <- plot_df %>% group_by(subcategory) %>% summarise(y = last(prop_papers)) %>% mutate(x = 2018)
start_pos$colorz <- colorz
start_pos
start_pos$x_new <- start_pos$x + 0.1
start_pos$y_new <- start_pos$y + c(0,0,0,0,0,0)
p + geom_text(data = start_pos, aes(x =x_new ,y=y_new, label = subcategory), color=colorz,hjust=0,size=5)+
coord_cartesian(xlim = c(2009, 2018),clip = 'off') +
theme(plot.margin = unit(c(1,10,1,1), "lines"))
# ggsave("Manuscript/Images/method_ts1.png", width=12,height=8)
|
/docs/R/man_method_trend_plot.R
|
permissive
|
rociojoo/mov-eco-review
|
R
| false | false | 7,173 |
r
|
library(tidyverse)
library(RColorBrewer)
# Arguments
path <- "./Data/ProcessedQueries/References/"
# path.plots <- "./Rocio/Plots/"
path_processed_dictionaries <- "./Data/Dictionary/Papers-Term/"
path_dictionary_info <- "./Data/Dictionary/"
source("R/methods_cat_analysis.R")
papers <- read.csv(file = paste0(path,"cleaned_papers_all_years_simple.csv"),stringsAsFactors = FALSE)
data_decade <- papers %>%
filter(pubyear > 2008 & pubyear < 2019)
dictionary <- "Methods"
load(paste0(path_processed_dictionaries,"paper-term-dictionary-",dictionary,".RData"))
# both matrices have the keywords ordered alphabetically:
total_useful_papers <- apply(matrix_CatTerm[2:ncol(matrix_CatTerm)],1,sum)
matrix_CatTerm <- matrix_CatTerm[total_useful_papers>0,]
keywords <- unlist(lapply(strsplit(colnames(matrix_CatTerm), " : ")[-1],"[[",2))
colnames(matrix_CatTerm) <- c("doi",keywords)
rownames(matrix_CatTerm) <- matrix_CatTerm$doi
matrix_CatTerm <-
matrix_CatTerm[,-1] %>%
select(sort(colnames(.))) # sort columns by alphabetical order
synonyms_keywords <- read_csv(paste0(path_dictionary_info,"Synonyms-Methods.csv"))
res <- methods_cat_analysis(matrix_CatTerm = matrix_CatTerm, synonyms_keywords, col_cat = 3, data_decade)
## print(res[[1]]) gives the output of the Table "Percentage of papers using each type of statistical method."
# Internally, I'm filtering out tests and other stuff:
filter_out_methods <- unique(c(which(synonyms_keywords$meaning2 == "test" |
synonyms_keywords$meaning2 == "model selection" |
synonyms_keywords$meaning2 == "simulation" |
is.na(synonyms_keywords$meaning2) == TRUE |
synonyms_keywords$meaning3 == "other"),
grep("likelihood",synonyms_keywords$keyword)))
synonyms_keywords <- as.data.frame(synonyms_keywords[-filter_out_methods,])
col_cat=3
ext_subcat <- unique(synonyms_keywords[ , col_cat + 1])
matrix_Ext_SubCat <- matrix(0,ncol=length(ext_subcat),nrow=dim(matrix_CatTerm))
colnames(matrix_Ext_SubCat) <- ext_subcat
rownames(matrix_Ext_SubCat) <- rownames(matrix_CatTerm)
for (i in 1:length(ext_subcat)){ # for each subcategory
terms_ext <- synonyms_keywords$keyword[which(synonyms_keywords[ , col_cat + 1] == ext_subcat[i])] # which keywords correspond to the subcategory
col_matrix <- which(colnames(matrix_CatTerm) %in% terms_ext) # which columns in matrix_CatTerm does that represent
# now check with rows have at least one "1" among those categories
# first sum by row. it should be >= 1
ind_matrix <- which(apply(matrix_CatTerm[,col_matrix],1,sum) > 0)
matrix_Ext_SubCat[ind_matrix,i] <- 1
}
total_ext <- sum(rowSums(matrix_Ext_SubCat)>0)
table_sub <- sort(round(apply(matrix_Ext_SubCat,2,sum)/total_ext*100,1),decreasing = TRUE)
# Now, join with year
df_Ext_SubCat <- as.data.frame(matrix_Ext_SubCat) %>%
mutate(doi = rownames(matrix_Ext_SubCat))
joined_df <- df_Ext_SubCat %>% left_join(data_decade, by = "doi") %>% select(ext_subcat,pubyear)
# And counting by category by year
joined_df_year <- joined_df %>% group_by(pubyear) %>% summarise_all(sum)
joined_df_nozero <- joined_df[apply(joined_df[,ext_subcat],1,sum) > 0,]
sum_year <- joined_df_nozero %>% group_by(pubyear) %>% tally()
joined_df_prop_year <- joined_df_year[,ext_subcat]/matrix(rep(sum_year$n,each=length(ext_subcat)),ncol=length(ext_subcat),byrow=TRUE)
joined_df_prop_year <- joined_df_prop_year %>% mutate(year = joined_df_year$pubyear)
df_plot_prop <- joined_df_prop_year %>% gather(key = subcategory, value = prop_papers, -year)
plot_df <- df_plot_prop
head(plot_df)
plot_df$subcategory <- rep(c('Generic','Spatial','Movement','Time-series','Social','Spatial-temporal'), each = 10)
# Run a quick linear model to measure which trend lines are positive or negative
# we'll reference this when we choose our colors
here <- by(plot_df, plot_df$subcategory, function(x)
lm(x$prop_papers ~ x$year)$coefficients[2]
)
plot_df$subcategory <- factor(plot_df$subcategory, levels= names(sort(here)))
# Create a grouping variable based on this value
grouping <- data.frame(subcategory = c(names(here)[here<=0.003 & here>=(-0.003)],names(here)[here<(-0.003)],names(here)[here>0.003]))
grouping$group <- seq_along(grouping$subcategory)
plot_df <- merge(plot_df,grouping, by='subcategory')
# Now to make our aesthetic features which will be added with scale_*_manual()
# Colors
# Make a color ramp where the amount of 'grays' will determine the highlighted categories
Tol_muted <- c('#88CCEE', '#44AA99', '#117733', '#332288', '#DDCC77', '#999933','#CC6677', '#882255', '#AA4499', '#DDDDDD')
#Okabe_Ito <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#000000")
colorz <- Tol_muted[1:(length(here)) %% (length(Tol_muted))]
# change problematic colors to gray60
# gray 60 "#7f7f7f"
# black "#000000"
names(colorz) <- names(sort(here))
colorz
colorz[6] <- "#7f7f7f"
# line types
# just need to spread linetypes out enough so that the color and alpha can help distinguish as well
# manual 5 = dash, 3 = dotted, 1 = solid
linetypez <- c(5,3,3,3,3,1)
# or random
# linetypez <- rep(1:6,times=ceiling(length(levels(plot_df$Topic))/6))
# linetypez <- linetypez[seq_along(levels(plot_df$Topic))]
names(linetypez) <- names(sort(here))
# alpha
# Changing alpha will help to make the important categories pop.
# Create a gradient of alphas from 1 -> .2 -> so none trend lines are grayed out.
nz <- length(here)
# automatically
#alphaz <- c((1*nz/2):(.2*nz/2)/nz*2,(.2*nz/2):(1*nz/2)/nz*2,ifelse(nz%%2==0,NULL,1))
# or manually
alphaz <- c(1,.7,.4,.4,.4,1)
names(alphaz) <- names(sort(here))
# line width
sizez <- c(2,1,1,1,1,2)
#names(sizez) <- names(here)
sizez
sizez <- rep(sizez, each = 10)
sizez
# You have to include color, linetype, and alpha in the mapping even if youre going to override it anyway.
p <- ggplot(
data = plot_df) +
geom_line(size=1.5,
mapping = aes(x = year, y = prop_papers, color = subcategory, group = group, linetype = subcategory, alpha = subcategory)
) +
scale_color_manual(name='Methods',values = colorz) +
scale_linetype_manual(name='Methods',values = linetypez) +
scale_alpha_manual(name='Methods',values = alphaz)+
theme_classic()+xlab("") + ylab("Proportion of articles in a year") +
theme(axis.text.x = element_text(angle = 15, hjust = 1,size=16),axis.text.y = element_text(size=16),
legend.position = "none", legend.justification = "right",legend.text=element_text(size=15),
axis.title.y = element_text(margin = margin(r=10),size=17),
axis.title.x = element_text(margin = margin(t=10)),
legend.key.size = unit(2,"line"),
legend.title=element_text(size=16))
start_pos <- plot_df %>% group_by(subcategory) %>% summarise(y = last(prop_papers)) %>% mutate(x = 2018)
start_pos$colorz <- colorz
start_pos
start_pos$x_new <- start_pos$x + 0.1
start_pos$y_new <- start_pos$y + c(0,0,0,0,0,0)
p + geom_text(data = start_pos, aes(x =x_new ,y=y_new, label = subcategory), color=colorz,hjust=0,size=5)+
coord_cartesian(xlim = c(2009, 2018),clip = 'off') +
theme(plot.margin = unit(c(1,10,1,1), "lines"))
# ggsave("Manuscript/Images/method_ts1.png", width=12,height=8)
|
library(RODBC)
connection <- odbcConnectAccess2007("P:/PartTimers/MarkEngeln/SWAMP_RM_112012.mdb")
# sqlQuery(connection, "SELECT Count(*) AS N
# FROM
# (SELECT DISTINCT StationCode FROM Query3_test) AS T")
projects <- sqlQuery(connection, "SELECT DISTINCT ProjectCode, ProtocolName FROM Query3_test WHERE ProtocolName IN
('CCAMP Field Sampling Protocol 2006', 'CCAMP Field Sampling Protocol 2012', 'DFG-ABL 2005 Wadeable Streams',
'EMAP 2001 Wadeable Streams', 'EMAP Coastal, MPSL-DFG_Field SOP_v1.0', 'SNARL_1996_WS, SNARL_2003_WS', 'SNARL_2005_WS, SNARL_2007_WS',
'SNARL_2008_WS, SWAMP 07 & EMAP 01 Wadeable Streams combination', 'SWAMP 2007 & SNARL 2007 Wadeable Streams', 'SWAMP_2007_WS')")
projects.sub <- as.character(projects$ProjectCode[projects$ProtocolName != "SWAMP_2007_WS"])
test <- sqlQuery(connection, paste0("SELECT * FROM Query3_test WHERE ProjectCode = '", "RWB1_RuR_FY1011", "'"))
test$SampleID <- with(test, paste0(StationCode, ProjectCode, SampleDate))
# test2 <- subset(test, SampleID == test$SampleID[1])
#
phabMetrics(test)
phab_test2 <- lapply(projects.sub, function(p){
try({
data <- sqlQuery(connection, paste0("SELECT * FROM Query3_test WHERE ProjectCode = '", p, "'"))
print(p)
if(nrow(data)==0)NA else{
data$SampleID <- with(data, paste0(StationCode, SampleDate))
res <- phabMetrics(data)
gc()
res}
})
})
odbcClose(connection)
phab_result2 <- Filter(is.data.frame, phab_test2)
phab_NON_SWAMP_2007_WS <- Reduce(rbind, phab_result2)
full <- rbind(phab_NON_SWAMP_2007_WS, phab_SWAMP_2007_WS)
save(full, file="full.rdata")
|
/db_query.r
|
no_license
|
mengeln/PHAB-metrics
|
R
| false | false | 1,624 |
r
|
library(RODBC)
connection <- odbcConnectAccess2007("P:/PartTimers/MarkEngeln/SWAMP_RM_112012.mdb")
# sqlQuery(connection, "SELECT Count(*) AS N
# FROM
# (SELECT DISTINCT StationCode FROM Query3_test) AS T")
projects <- sqlQuery(connection, "SELECT DISTINCT ProjectCode, ProtocolName FROM Query3_test WHERE ProtocolName IN
('CCAMP Field Sampling Protocol 2006', 'CCAMP Field Sampling Protocol 2012', 'DFG-ABL 2005 Wadeable Streams',
'EMAP 2001 Wadeable Streams', 'EMAP Coastal, MPSL-DFG_Field SOP_v1.0', 'SNARL_1996_WS, SNARL_2003_WS', 'SNARL_2005_WS, SNARL_2007_WS',
'SNARL_2008_WS, SWAMP 07 & EMAP 01 Wadeable Streams combination', 'SWAMP 2007 & SNARL 2007 Wadeable Streams', 'SWAMP_2007_WS')")
projects.sub <- as.character(projects$ProjectCode[projects$ProtocolName != "SWAMP_2007_WS"])
test <- sqlQuery(connection, paste0("SELECT * FROM Query3_test WHERE ProjectCode = '", "RWB1_RuR_FY1011", "'"))
test$SampleID <- with(test, paste0(StationCode, ProjectCode, SampleDate))
# test2 <- subset(test, SampleID == test$SampleID[1])
#
phabMetrics(test)
phab_test2 <- lapply(projects.sub, function(p){
try({
data <- sqlQuery(connection, paste0("SELECT * FROM Query3_test WHERE ProjectCode = '", p, "'"))
print(p)
if(nrow(data)==0)NA else{
data$SampleID <- with(data, paste0(StationCode, SampleDate))
res <- phabMetrics(data)
gc()
res}
})
})
odbcClose(connection)
phab_result2 <- Filter(is.data.frame, phab_test2)
phab_NON_SWAMP_2007_WS <- Reduce(rbind, phab_result2)
full <- rbind(phab_NON_SWAMP_2007_WS, phab_SWAMP_2007_WS)
save(full, file="full.rdata")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.