content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
fileName <- "household_power_consumption.txt"
fileErrPrmpt <- "Please copy the file to the directory and/or download file from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
wd <- getwd()
## Check if the file exists
## Since the file is big(126 MB), script does not try to download but outputs link for the manual download
## If required download.file() can be used
if(!file.exists(fileName)){
stop(paste(c("\nFile does not exists in ", wd, " .\n", fileErrPrmpt)))
}
# Read the data file
raw_data <- read.csv(fileName, sep=";", na.strings="?", stringsAsFactors=FALSE)
dataset <- subset(raw_data, regexpr("^0?(1|2)/0?2/2007", raw_data$Date)>0)
datetime <- strptime(paste(dataset$Date,dataset$Time), "%d/%m/%Y %H:%M:%S", tz='UTC')
# Open the png device for plotting, type = "cairo" has been added to match the plot with example
# The image file in rdpeng's github (from which fork was done) has transparent background, so adding transparent background
png(file = "./plot2.png", width = 480, height = 480, units = "px", type="cairo", bg="transparent")
# Plot
plot(datetime,dataset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)", main = "")
# Close device
dev.off()
|
/plot2.R
|
no_license
|
sirujam/ExData_Plotting1
|
R
| false | false | 1,248 |
r
|
fileName <- "household_power_consumption.txt"
fileErrPrmpt <- "Please copy the file to the directory and/or download file from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
wd <- getwd()
## Check if the file exists
## Since the file is big(126 MB), script does not try to download but outputs link for the manual download
## If required download.file() can be used
if(!file.exists(fileName)){
stop(paste(c("\nFile does not exists in ", wd, " .\n", fileErrPrmpt)))
}
# Read the data file
raw_data <- read.csv(fileName, sep=";", na.strings="?", stringsAsFactors=FALSE)
dataset <- subset(raw_data, regexpr("^0?(1|2)/0?2/2007", raw_data$Date)>0)
datetime <- strptime(paste(dataset$Date,dataset$Time), "%d/%m/%Y %H:%M:%S", tz='UTC')
# Open the png device for plotting, type = "cairo" has been added to match the plot with example
# The image file in rdpeng's github (from which fork was done) has transparent background, so adding transparent background
png(file = "./plot2.png", width = 480, height = 480, units = "px", type="cairo", bg="transparent")
# Plot
plot(datetime,dataset$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)", main = "")
# Close device
dev.off()
|
options(shiny.host="10.64.20.23")
options(shiny.port=7401)
library(shiny)
library(stringr)
library(shinyjs)
library(shinysky)
# returns string w/o leading or trailing whitespace
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# Globally define a place where all users can share some reactive data.
vars <- reactiveValues(chat=NULL, users=NULL)
# Restore the chat log from the last session.
if (file.exists("chat.Rds")){
vars$chat <- readRDS("chat.Rds")
} else {
vars$chat <- "Welcome to SilentChat!"
}
#' Get the prefix for the line to be added to the chat window. Usually a newline
#' character unless it's the first line.
linePrefix <- function(){
if (is.null(isolate(vars$chat))){
return("")
}
return("<br />")
}
shinyServer(function(input, output, session) {
# Create a spot for reactive variables specific to this particular session
sessionVars <- reactiveValues(username = "")
# Track whether or not this session has been initialized. We'll use this to
# assign a username to unininitialized sessions.
init <- FALSE
# When a session is ended, remove the user and note that they left the room.
session$onSessionEnded(function() {
isolate({
vars$users <- vars$users[vars$users != sessionVars$username]
vars$chat <- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-exit",
sessionVars$username,
"left the room.")))
})
})
# Observer to handle changes to the username
observe({
# We want a reactive dependency on this variable, so we'll just list it here.
input$user
if (!init){
# Seed initial username
sessionVars$username <- paste0("User", round(runif(1, 10000, 99999)))
isolate({
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-enter",
sessionVars$username,
"entered the room.")))
})
init <<- TRUE
} else{
# A previous username was already given
isolate({
if (input$user == sessionVars$username || input$user == ""){
# No change. Just return.
return()
}
# Updating username
# First, remove the old one
vars$users <- vars$users[vars$users != sessionVars$username]
# Note the change in the chat log
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-change",
paste0("\"", sessionVars$username, "\""),
" -> ",
paste0("\"", input$user, "\""))))
# Now update with the new one
sessionVars$username <- input$user
})
}
# Add this user to the global list of users
isolate(vars$users <- c(vars$users, sessionVars$username))
})
# Keep the username updated with whatever sanitized/assigned username we have
observe({
updateTextInput(session, "user",
value=sessionVars$username)
})
# Keep the list of connected users updated
output$userList <- renderUI({
tagList(tags$ul( lapply(vars$users, function(user){
return(tags$li(user))
})))
})
# Listen for input$send changes (i.e. when the button is clicked)
observe({
if(input$send < 1){
# The code must be initializing, b/c the button hasn't been clicked yet.
return()
}
isolate({
# Add the current entry to the chat log.
if (trim(input$entry) != "") {
vars$chat <<- c(vars$chat,
paste0(linePrefix(),
tags$span(class="username",
tags$abbr(title=Sys.time(), sessionVars$username)
),
": ",
tagList(input$entry)))
if(grepl("tell me a secret", tolower(input$entry)) ) {
vars$chat <<- c(vars$chat,
paste0(linePrefix(),
tags$span(class="username",
tags$abbr(title=Sys.time(), "Admin")
),
": ",
tagList("Jacob got a girlfriend.")))
}
}
})
# Clear out the text entry field.
updateTextInput(session, "entry", value="")
})
# Listen for the input$clearChat (i.e. when the button is clicked)
observeEvent(input$clearChat, {
vars$chat <<- "Welcome to SilentChat!"
})
# handle css
output$selectCSS <- renderUI({
fileName = paste0("bootstrap_",input$css, ".css")
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = fileName))
})
# Dynamically create the UI for the chat window.
output$chat <- renderUI({
if (length(vars$chat) > 500){
# Too long, use only the most recent 500 lines
vars$chat <- vars$chat[(length(vars$chat)-500):(length(vars$chat))]
}
# Save the chat object so we can restore it later if needed.
if (input$mode_selection == "Log Mode"){
saveRDS(vars$chat, "chat.Rds")
} else if (input$mode_selection == "Silent Mode"){
saveRDS(vars$chat, gsub(":", "_", paste0("SilentLogs/chat_", Sys.time(), ".Rds")))
}
# Pass the chat log through as HTML
HTML(vars$chat)
})
})
|
/server.R
|
no_license
|
ASound18/SlientChat
|
R
| false | false | 5,843 |
r
|
options(shiny.host="10.64.20.23")
options(shiny.port=7401)
library(shiny)
library(stringr)
library(shinyjs)
library(shinysky)
# returns string w/o leading or trailing whitespace
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
# Globally define a place where all users can share some reactive data.
vars <- reactiveValues(chat=NULL, users=NULL)
# Restore the chat log from the last session.
if (file.exists("chat.Rds")){
vars$chat <- readRDS("chat.Rds")
} else {
vars$chat <- "Welcome to SilentChat!"
}
#' Get the prefix for the line to be added to the chat window. Usually a newline
#' character unless it's the first line.
linePrefix <- function(){
if (is.null(isolate(vars$chat))){
return("")
}
return("<br />")
}
shinyServer(function(input, output, session) {
# Create a spot for reactive variables specific to this particular session
sessionVars <- reactiveValues(username = "")
# Track whether or not this session has been initialized. We'll use this to
# assign a username to unininitialized sessions.
init <- FALSE
# When a session is ended, remove the user and note that they left the room.
session$onSessionEnded(function() {
isolate({
vars$users <- vars$users[vars$users != sessionVars$username]
vars$chat <- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-exit",
sessionVars$username,
"left the room.")))
})
})
# Observer to handle changes to the username
observe({
# We want a reactive dependency on this variable, so we'll just list it here.
input$user
if (!init){
# Seed initial username
sessionVars$username <- paste0("User", round(runif(1, 10000, 99999)))
isolate({
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-enter",
sessionVars$username,
"entered the room.")))
})
init <<- TRUE
} else{
# A previous username was already given
isolate({
if (input$user == sessionVars$username || input$user == ""){
# No change. Just return.
return()
}
# Updating username
# First, remove the old one
vars$users <- vars$users[vars$users != sessionVars$username]
# Note the change in the chat log
vars$chat <<- c(vars$chat, paste0(linePrefix(),
tags$span(class="user-change",
paste0("\"", sessionVars$username, "\""),
" -> ",
paste0("\"", input$user, "\""))))
# Now update with the new one
sessionVars$username <- input$user
})
}
# Add this user to the global list of users
isolate(vars$users <- c(vars$users, sessionVars$username))
})
# Keep the username updated with whatever sanitized/assigned username we have
observe({
updateTextInput(session, "user",
value=sessionVars$username)
})
# Keep the list of connected users updated
output$userList <- renderUI({
tagList(tags$ul( lapply(vars$users, function(user){
return(tags$li(user))
})))
})
# Listen for input$send changes (i.e. when the button is clicked)
observe({
if(input$send < 1){
# The code must be initializing, b/c the button hasn't been clicked yet.
return()
}
isolate({
# Add the current entry to the chat log.
if (trim(input$entry) != "") {
vars$chat <<- c(vars$chat,
paste0(linePrefix(),
tags$span(class="username",
tags$abbr(title=Sys.time(), sessionVars$username)
),
": ",
tagList(input$entry)))
if(grepl("tell me a secret", tolower(input$entry)) ) {
vars$chat <<- c(vars$chat,
paste0(linePrefix(),
tags$span(class="username",
tags$abbr(title=Sys.time(), "Admin")
),
": ",
tagList("Jacob got a girlfriend.")))
}
}
})
# Clear out the text entry field.
updateTextInput(session, "entry", value="")
})
# Listen for the input$clearChat (i.e. when the button is clicked)
observeEvent(input$clearChat, {
vars$chat <<- "Welcome to SilentChat!"
})
# handle css
output$selectCSS <- renderUI({
fileName = paste0("bootstrap_",input$css, ".css")
tags$head(tags$link(rel = "stylesheet", type = "text/css", href = fileName))
})
# Dynamically create the UI for the chat window.
output$chat <- renderUI({
if (length(vars$chat) > 500){
# Too long, use only the most recent 500 lines
vars$chat <- vars$chat[(length(vars$chat)-500):(length(vars$chat))]
}
# Save the chat object so we can restore it later if needed.
if (input$mode_selection == "Log Mode"){
saveRDS(vars$chat, "chat.Rds")
} else if (input$mode_selection == "Silent Mode"){
saveRDS(vars$chat, gsub(":", "_", paste0("SilentLogs/chat_", Sys.time(), ".Rds")))
}
# Pass the chat log through as HTML
HTML(vars$chat)
})
})
|
#' Define target grid for interpolation
#'
#' Define the resolution and extent of a target grid for interpolation by SAGA modules based on (1) user-provided x/y coordinates, (2) an existing SAGA grid file, or (3) the header data of an ASCII grid. Intended to be used with RSAGA's interpolation functions.
#' @name rsaga.target
#' @param target character: method used for defining the target grid
#' @param user.cellsize Only for \code{target="user.defined"}: raster resolution (in the grid's map units)
#' @param user.x.extent See \code{user.y.extent}
#' @param user.y.extent Only for \code{target="user.defined"}: numeric vectors of length 2: minimum and maximum coordinates of grid cell center points
#' @param target.grid Only for \code{target="target.grid"}: character string giving the name of a SAGA grid file that specifies the extent and resolution of the target grid; this target grid file may be overwritten, depending on the specifics of the SAGA GIS module used.
#' @param header Only for \code{target="header"}: list: ASCII grid header (as returned e.g. by \code{\link{read.ascii.grid.header}}) or defined manually; must at least have components \code{ncols}, \code{nrows}, \code{cellsize}, and either \code{x/yllcorner} or \code{x/yllcenter}.
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}.)
#' @note This function is to be used with RSAGA functions \code{\link{rsaga.inverse.distance}}, \code{\link{rsaga.nearest.neighbour}} and \code{\link{rsaga.modified.quadratic.shephard}}. Note that these are currently only compatible with SAGA GIS 2.0.5 and higher.
#' @seealso \code{\link{read.ascii.grid.header}}
#' @examples
#' \dontrun{
#' # IDW interpolation of attribute "z" from the point shapefile
#' # 'points.shp' to a grid with the same extent and resolution
#' # as the (pre-existing) geology grid:
#' rsaga.inverse.distance("points", "dem", field = "z", maxdist = 1000,
#' target = rsaga.target(target="target.grid",
#' target.grid = "geology"))
#' }
#' @keywords spatial interface
#' @export
rsaga.target = function(
target = c("user.defined", "target.grid", "header"),
user.cellsize = 100,
user.x.extent, user.y.extent,
target.grid, header, env = rsaga.env() )
{
if(env$version == "2.0.4")
stop("'rsaga.target' currently doesn't support SAGA GIS version 2.0.4\n")
target = match.arg.ext(target, base = 0, numeric = TRUE)
if (target == 2) {
stopifnot(missing(user.x.extent) & missing(user.y.extent) & missing(target.grid))
target = 0
user.cellsize = header$cellsize
if (!any(names(header) == "xllcenter"))
header$xllcenter = header$xllcorner + header$cellsize / 2
if (!any(names(header) == "yllcenter"))
header$yllcenter = header$yllcorner + header$cellsize / 2
user.x.extent = c(header$xllcenter, header$xllcenter + header$cellsize * (header$ncols-1))
user.y.extent = c(header$yllcenter, header$yllcenter + header$cellsize * (header$nrows-1))
}
param = list(TARGET = target)
if (target == 0) {
param = c(param,
USER_SIZE = user.cellsize,
USER_XMIN = min(user.x.extent),
USER_XMAX = max(user.x.extent),
USER_YMIN = min(user.y.extent),
USER_YMAX = max(user.y.extent))
} else if (target == 1) {
stopifnot(missing(user.x.extent) & missing(user.y.extent))
target.grid = default.file.extension(target.grid, ".sgrd")
param = c(param,
GRID_GRID = target.grid)
}
return(param)
}
######## Module io_grid_gdal ########
#' Import Grid Files to SAGA grid format using GDAL
#'
#' These functions provide simple interfaces for reading and writing grids from/to ASCII grids and Rd files. Grids are stored in matrices, their headers in lists.
#' @name rsaga.import.gdal
#' @param in.grid file name of a grid in a format supported by GDAL
#' @param out.grid output SAGA grid file name; defaults to \code{in.grid} with the file extension being removed; file extension should not be specified, it defaults to \code{.sgrd}
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}
#' @param ... additional arguments to be passed to \code{rsaga.geoprocessor}
#' @details The GDAL Raster Import module of SAGA imports grid data from various file formats using the Geospatial Data Abstraction Library (GDAL) by Frank Warmerdam.
#' GDAL Versions are specific to SAGA versions:
#' \itemize{
#' \item SAGA 2.0.7 - 2.0.8: GDAL v.1.8.0
#' \item SAGA 2.1.0 - 2.1.1: GDAL v.1.10.0
#' \item SAGA 2.1.2 - 2.2.0: GDAL v.1.11.0
#' \item SAGA 2.2.1 - 2.2.3: GDAL v.2.1.0 dev}
#' More information is available at \url{http://www.gdal.org/}.
#'
#' If \code{in.grid} has more than one band (e.g. RGB GEOTIFF), then output grids with file names of the form \eqn{in.grid{\_}01.sgrd}{in.grid_01.sgrd}, \eqn{in.grid{\_}02.sgrd}{in.grid_02.sgrd} etc. are written, one for each band.
#'
#' The following raster formats are currently supported. Last updated for SAGA GIS 2.2.3;
#' for a list for a specific SAGA GIS version call \code{rsaga.html.help("io_gdal","GDAL: Import Raster", env = rsaga.env(path="SAGA_Version_to_Test"))}
#' \itemize{
#' \item BAG - Bathymetry Attributed Grid
#' \item ECW - ERDAS Compressed Wavelets (SDK 3.x)
#' \item JP2ECW - ERDAS JPEG2000 (SDK 3.x)
#' \item FITS - Flexible Image Transport System
#' \item GMT - GMT NetCDF Grid Format
#' \item HDF4 - Hierarchical Data Format Release 4
#' \item HDF4Image - HDF4 Dataset
#' \item HDF5 - Hierarchical Data Format Release 5
#' \item HDF5Image - HDF5 Dataset
#' \item KEA - KEA Image Format (.kea)
#' \item MG4Lidar - MrSID Generation 4 / Lidar (.sid)
#' \item MrSID - Multi-resolution Seamless Image Database (MrSID)
#' \item netCDF - Network Common Data Format
#' \item PostgreSQL - PostgreSQL/PostGIS
#' \item VRT - Virtual Raster
#' \item GTiff - GeoTIFF
#' \item NITF - National Imagery Transmission Format
#' \item RPFTOC - Raster Product Format TOC format
#' \item ECRGTOC - ECRG TOC format
#' \item HFA - Erdas Imagine Images (.img)
#' \item SAR_CEOS - CEOS SAR Image
#' \item CEOS - CEOS Image
#' \item JAXAPALSAR - JAXA PALSAR Product Reader (Level 1.1/1.5)
#' \item GFF - Ground-based SAR Applications Testbed File Format (.gff)
#' \item ELAS - ELAS
#' \item AIG - Arc/Info Binary Grid
#' \item AAIGrid - Arc/Info ASCII Grid
#' \item GRASSASCIIGrid - GRASS ASCII Grid
#' \item SDTS - SDTS Raster
#' \item DTED - DTED Elevation Raster
#' \item PNG - Portable Network Graphics
#' \item JPEG - JPEG JFIF
#' \item MEM - In Memory Raster
#' \item JDEM - Japanese DEM (.mem)
#' \item GIF - Graphics Interchange Format (.gif)
#' \item BIGGIF - Graphics Interchange Format (.gif)
#' \item ESAT - Envisat Image Format
#' \item BSB - Maptech BSB Nautical Charts
#' \item XPM - X11 PixMap Format
#' \item BMP - MS Windows Device Independent Bitmap
#' \item DIMAP - SPOT DIMAP
#' \item AirSAR - AirSAR Polarimetric Image
#' \item RS2 - RadarSat 2 XML Product
#' \item SAFE - Sentinel SAFE Product
#' \item PCIDSK - PCIDSK Database File
#' \item PCRaster - PCRaster Raster File
#' \item ILWIS - ILWIS Raster Map
#' \item SGI - SGI Image File Format 1.0
#' \item SRTMHGT - SRTMHGT File Format
#' \item Leveller - Leveller heightfield
#' \item Terragen - Terragen heightfield
#' \item ISIS3 - USGS Astrogeology ISIS cube (Version 3)
#' \item ISIS2 - USGS Astrogeology ISIS cube (Version 2)
#' \item PDS - NASA Planetary Data System
#' \item VICAR - MIPL VICAR file
#' \item TIL - EarthWatch .TIL
#' \item ERS - ERMapper .ers Labelled
#' \item JP2OpenJPEG - JPEG-2000 driver based on OpenJPEG library
#' \item L1B - NOAA Polar Orbiter Level 1b Data Set
#' \item FIT - FIT Image
#' \item GRIB - GRIdded Binary (.grb)
#' \item RMF - Raster Matrix Format
#' \item WCS - OGC Web Coverage Service
#' \item WMS - OGC Web Map Service
#' \item MSGN - EUMETSAT Archive native (.nat)
#' \item RST - Idrisi Raster A.1
#' \item INGR - Intergraph Raster
#' \item GSAG - Golden Software ASCII Grid (.grd)
#' \item GSBG - Golden Software Binary Grid (.grd)
#' \item GS7BG - Golden Software 7 Binary Grid (.grd)
#' \item COSAR - COSAR Annotated Binary Matrix (TerraSAR-X)
#' \item TSX - TerraSAR-X Product
#' \item COASP - DRDC COASP SAR Processor Raster
#' \item R - R Object Data Store
#' \item MAP - OziExplorer .MAP
#' \item PNM - Portable Pixmap Format (netpbm)
#' \item DOQ1 - USGS DOQ (Old Style)
#' \item DOQ2 - USGS DOQ (New Style)
#' \item ENVI - ENVI .hdr Labelled
#' \item EHdr - ESRI .hdr Labelled
#' \item GenBin - Generic Binary (.hdr Labelled)
#' \item PAux - PCI .aux Labelled
#' \item MFF - Vexcel MFF Raster
#' \item MFF2 - Vexcel MFF2 (HKV) Raster
#' \item FujiBAS - Fuji BAS Scanner Image
#' \item GSC - GSC Geogrid
#' \item FAST - EOSAT FAST Format
#' \item BT - VTP .bt (Binary Terrain) 1.3 Format
#' \item LAN - Erdas .LAN/.GIS
#' \item CPG - Convair PolGASP
#' \item IDA - Image Data and Analysis
#' \item NDF - NLAPS Data Format
#' \item EIR - Erdas Imagine Raw
#' \item DIPEx - DIPEx
#' \item LCP - FARSITE v.4 Landscape File (.lcp)
#' \item GTX - NOAA Vertical Datum .GTX
#' \item LOSLAS - NADCON .los/.las Datum Grid Shift
#' \item NTv2 - NTv2 Datum Grid Shift
#' \item CTable2 - CTable2 Datum Grid Shift
#' \item ACE2 - ACE2
#' \item SNODAS - Snow Data Assimilation System
#' \item KRO - KOLOR Raw
#' \item ROI_PAC - ROI_PAC raster
#' \item ISCE - ISCE raster
#' \item ARG - Azavea Raster Grid format
#' \item RIK - Swedish Grid RIK (.rik)
#' \item USGSDEM - USGS Optional ASCII DEM (and CDED)
#' \item GXF - GeoSoft Grid Exchange Format
#' \item NWT_GRD - Northwood Numeric Grid Format .grd/.tab
#' \item NWT_GRC - Northwood Classified Grid Format .grc/.tab
#' \item ADRG - ARC Digitized Raster Graphics
#' \item SRP - Standard Raster Product (ASRP/USRP)
#' \item BLX - Magellan topo (.blx)
#' \item Rasterlite - Rasterlite
#' \item PostGISRaster - PostGIS Raster driver
#' \item SAGA - SAGA GIS Binary Grid (.sdat)
#' \item KMLSUPEROVERLAY - Kml Super Overlay
#' \item XYZ - ASCII Gridded XYZ
#' \item HF2 - HF2/HFZ heightfield raster
#' \item PDF - Geospatial PDF
#' \item OZI - OziExplorer Image File
#' \item CTG - USGS LULC Composite Theme Grid
#' \item E00GRID - Arc/Info Export E00 GRID
#' \item ZMap - ZMap Plus Grid
#' \item NGSGEOID - NOAA NGS Geoid Height Grids
#' \item MBTiles - MBTiles
#' \item IRIS - IRIS data (.PPI, .CAPPi etc)
#' \item PLMOSAIC - Planet Labs Mosaic
#' \item CALS - CALS (Type 1)
#' \item WMTS - OGC Web Map Tile Service
#' \item ESRI Shapefile - ESRI Shapefile
#' \item MapInfo File - MapInfo File
#' \item UK .NTF - UK .NTF
#' \item OGD_SDTS - SDTS
#' \item S57 - IHO S-57 (ENC)
#' \item DGN - Microstation DGN
#' \item OGR_VRT - VRT - Virtual Datasource
#' \item REC EPIInfo .REC
#' \item Memory - Memory
#' \item BNA - Atlas BNA
#' \item CSV - Comma Separated Value (.csv)
#' \item NAS - NAS - ALKIS
#' \item GML - Geography Markup Language
#' \item GPX - GPX
#' \item LIBKML - Keyhole Markup Language (LIBKML)
#' \item KML - Keyhole Markup Language (KML)
#' \item GeoJSON - GeoJSON
#' \item Interlis 1 - Interlis 1
#' \item Interlis 2 - Interlis 2
#' \item OGR_GMT - GMT ASCII Vectors (.gmt)
#' \item GPKG - GeoPackage
#' \item SQLite - SQLite / Spatialite
#' \item ODBC - ODBC
#' \item WAsP - WAsP .map format
#' \item PGeo - ESRI Personal GeoDatabase
#' \item MSSQLSpatial - Microsoft SQL Server Spatial Database
#' \item MySQL - MySQL
#' \item OpenFileGDB - ESRI FileGDB
#' \item XPlane - X-Plane/Flightgear aeronautical data
#' \item DXF - AutoCAD DXF
#' \item Geoconcept - Geoconcept
#' \item GeoRSS - GeoRSS
#' \item GPSTrackMaker - GPSTrackMaker
#' \item VFK - Czech Cadastral Exchange Data Format
#' \item PGDUMP - PostgreSQL SQL dump
#' \item OSM - OpenStreetMap XML and PDF
#' \item GPSBabel - GPSBabel
#' \item SUA - Tim Newport-Peace's Special Use Airspace Format
#' \item OpenAir - OpenAir
#' \item OGR_PDS - Planetary Data Systems TABLE
#' \item WFS - OGC WFS (Web Feature Service)
#' \item HTF - Hydrographic Transfer Vector
#' \item AeronavFAA - Aeronav FAA
#' \item Geomedia - Geomedia .mdb
#' \item EDIGEO - French EDIGEO exchange format
#' \item GFT - Google Fusion Tables
#' \item GME - Google Maps Engine
#' \item SVG - Scalable Vector Graphics
#' \item CouchDB - CouchDB / GeoCouch
#' \item Cloudant - Cloudant / CouchDB
#' \item Idrisi - Idrisi Vector (.vct)
#' \item ARCGEN - Arc/Info Generate
#' \item SEGUKOOA - SEG-P1 / UKOOA P1/90
#' \item SEG-Y - SEG-Y
#' \item ODS - Open Document/ LibreOffice / OpenOffice Spreadsheet
#' \item XLSX - MS Office Open XML spreadsheet
#' \item ElasticSearch - Elastic Search
#' \item Walk - Walk
#' \item CartoDB - CartoDB
#' \item SXF - Storage and eXchange Format
#' \item Selafin - Selafin
#' \item JML - OpenJUMP JML
#' \item PLSCENES - Planet Labs Scenes API
#' \item CSW - OGC CSW (Catalog Search for the Web)
#' \item IDF - INTREST Data Format
#' \item TIGER - U.S. Census TIGER/Line
#' \item AVCBin - Arc/Info Binary Coverage
#' \item AVCE00 - Arc/Info E00 (ASCII) Coverage
#' \item HTTP - HTTP Fetching Wrapper
#' }
#' @references GDAL website: \url{http://www.gdal.org/}
#' @author Alexander Brenning (R interface), Olaf Conrad / Andre Ringeler (SAGA module), Frank Warmerdam (GDAL)
#' @seealso \code{read.ascii.grid}, \code{rsaga.esri.to.sgrd}, \code{read.sgrd}, \code{read.Rd.grid}
#' @keywords spatial interface file
#' @export
rsaga.import.gdal = function( in.grid, out.grid, env = rsaga.env(), ... )
{
if (missing(out.grid)) {
out.grid = set.file.extension(in.grid, "")
out.grid = substr(out.grid, 1, nchar(out.grid) - 1)
}
if (env$version == "2.0.4") {
param = list( GRIDS = out.grid, FILE = in.grid )
} else {
param = list( GRIDS = out.grid, FILES = in.grid )
}
# Module name change with SAGA 2.2.3
module = "GDAL: Import Raster"
if (env$version == "2.2.3"){
module = "Import Raster"
}
rsaga.geoprocessor("io_gdal", module = module,
param = param, env = env, ...)
}
######## Module io_grid ########
#' Convert ESRI ASCII/binary grids to SAGA grids
#'
#' \code{rsaga.esri.to.sgrd} converts grid files from ESRI's ASCII (.asc) and binary (.flt) format to SAGA's (version 2) grid format (.sgrd).
#' @name rsaga.esri.to.sgrd
#' @param in.grids character vector of ESRI ASCII/binary grid files (default file extension: \code{.asc}); files should be located in folder \code{in.path}
#' @param out.sgrds character vector of output SAGA grid files; defaults to \code{in.grids} with file extension being replaced by \code{.sgrd}, which is also the default extension if file names without extension are specified; files will be placed in the current SAGA workspace (default: \code{\link{rsaga.env}()$workspace}, or \code{env$workspace} if an \code{env} argument is provided
#' @param in.path folder with \code{in.grids}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#'
#' If multiple \code{in.grids} are converted, the result will be a vector of numerical error codes of the same length, or the combination of the console outputs with \code{c()}.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 1 from the SAGA library \code{io_grid}.
#' @seealso \code{\link{rsaga.esri.wrapper}} for an efficient way of applying RSAGA to ESRI ASCII/binary grids; \code{\link{rsaga.env}}
#' @keywords spatial interface file
#' @export
rsaga.esri.to.sgrd = function( in.grids,
out.sgrds=set.file.extension(in.grids,".sgrd"), in.path, ... )
{
in.grids = default.file.extension(in.grids,".asc")
out.sgrds = default.file.extension(out.sgrds,".sgrd")
if (!missing(in.path))
in.grids = file.path(in.path,in.grids)
if (length(in.grids) != length(out.sgrds))
stop("must have the same number of input and outpute grids")
res = c()
for (i in 1:length(in.grids))
res = c(res, rsaga.geoprocessor("io_grid", "Import ESRI Arc/Info Grid",
list(FILE=in.grids[i],GRID=out.sgrds[i]),...) )
invisible(res)
}
#' Convert SAGA grids to ESRI ASCII/binary grids
#'
#' \code{rsaga.sgrd.to.esri} converts grid files from SAGA's (version 2) grid format (.sgrd) to ESRI's ASCII (.asc) and binary (.flt) format.
#' @name rsaga.sgrd.to.esri
#' @param in.sgrds character vector of SAGA grid files (\code{.sgrd}) to be converted; files are expected to be found in folder \code{\link{rsaga.env}()$workspace}, or, if an optional \code{env} argument is provided, in \code{env$workspace}
#' @param out.grids character vector of ESRI ASCII/float output file names; defaults to \code{in.sgrds} with the file extension being replaced by \code{.asc} or \code{.flt}, depending on \code{format}. Files will be placed in folder \code{out.path}, existing files will be overwritten
#' @param out.path folder for \code{out.grids}
#' @param format output file format, either \code{"ascii"} (default; equivalent: \code{format=1}) for ASCII grids or \code{"binary"} (equivalent: \code{0}) for binary ESRI grids (\code{.flt}).
#' @param georef character: \code{"corner"} (equivalent numeric code: \code{0}) or \code{"center"} (default; equivalent: \code{1}). Determines whether the georeference will be related to the center or corner of its extreme lower left grid cell.
#' @param prec number of digits when writing floating point values to ASCII grid files; either a single number (to be replicated if necessary), or a numeric vector of length \code{length(in.grids)}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 0 from the SAGA library \code{io_grid}.
#' @seealso \code{\link{rsaga.esri.wrapper}} for an efficient way of applying RSAGA to ESRI ASCII/binary grids; \code{\link{rsaga.env}}
#' @keywords spatial interface file
#' @export
rsaga.sgrd.to.esri = function( in.sgrds, out.grids, out.path,
format="ascii", georef="corner", prec=5, ... )
{
in.sgrds = default.file.extension(in.sgrds,".sgrd")
format = match.arg.ext(format,choices=c("binary","ascii"),base=0,ignore.case=TRUE,numeric=TRUE)
georef = match.arg.ext(georef,choices=c("corner","center"),base=0,ignore.case=TRUE,numeric=TRUE)
if (missing(out.grids))
out.grids = set.file.extension(in.sgrds, c(".flt",".asc")[format+1])
out.grids = default.file.extension(out.grids, c(".flt",".asc")[format+1])
if (!missing(out.path))
out.grids = file.path(out.path,out.grids)
if (length(out.grids) != length(in.sgrds))
stop("must have the same number of input and outpute grids")
if ((length(prec)==1) & (length(in.sgrds)>1))
prec = rep(prec,length(in.sgrds))
if (length(prec) != length(in.sgrds))
stop("must have same number of in-/output grids and 'prec' parameters (or length(prec)==1)")
res = c()
for (i in 1:length(in.sgrds))
res = c(res, rsaga.geoprocessor("io_grid", "Export ESRI Arc/Info Grid",
list( GRID=in.sgrds[i], FILE=out.grids[i], FORMAT=format, GEOREF=georef, PREC=prec[i]),
...))
invisible(res)
}
#
######## Module ta_morphometry ########
#' Slope, Aspect, Curvature
#'
#' Calculates local morphometric terrain attributes (i.e. slope, aspect, and curvatures). Intended for use with SAGA v 2.1.1+. For older versions use \code{\link{rsaga.local.morphometry}}.
#' @name rsaga.slope.asp.curv
#' @param in.dem input: digital elevation model as SAGA grid file (\code{.sgrd})
#' @param out.slope optional output: slope
#' @param out.aspect optional output: aspect
#' @param out.cgene optional output: general curvature
#' @param out.cprof optional output: profile curvature (vertical curvature; degrees)
#' @param out.cplan optional output: plan curvature (horizontal curvature; degrees)
#' @param out.ctang optional output: tangential curvature (degrees)
#' @param out.clong optional output: longitudinal curvature (degrees) Zevenbergen & Thorne (1987) refer to this as profile curvature
#' @param out.ccros optional output: cross-sectional curvature (degrees) Zevenbergen & Thorne (1987) refer to this as the plan curvature
#' @param out.cmini optional output: minimal curvature (degrees)
#' @param out.cmaxi optional output: maximal curvature (degrees)
#' @param out.ctota optional output: total curvature (degrees)
#' @param out.croto optional output: flow line curvature (degrees)
#' @param method character algorithm (see References):
#' \itemize{
#' \item [0] Maximum Slope - Travis et al. (1975) (\code{"maxslope"})
#' \item [1] Max. Triangle Slope - Tarboton (1997) (\code{"maxtriangleslope"})
#' \item [2] Least Squares Fit Plane - Costa-Cabral & Burgess (1996) (\code{"lsqfitplane"})
#' \item [3] Fit 2nd Degree Polynomial - Evans (1979) (\code{"poly2evans"})
#' \item [4] Fit 2nd Degree Polynomial - Heerdegen and Beran (1982) (\code{"poly2heerdegen"})
#' \item [5] Fit 2nd Degree Polynomial - Bauer et al. (1985) (\code{"poly2bauer"})
#' \item [6] default: Fit 2nd Degree Polynomial - Zevenbergen & Thorne (1987) (\code{"poly2zevenbergen"})
#' \item [7] Fit 3rd Degree Polynomial - Haralick (1983) (\code{"poly3haralick"})}
#' @param unit.slope character or numeric (default \code{"radians"}):
#' \itemize{
#' \item [0] \code{"radians"}
#' \item [1] \code{"degrees"}
#' \item [2] \code{"percent"}}
#' @param unit.aspect character or numeric (default is 0, or \code{"radians"}):
#' \itemize{
#' \item [0] \code{"radians"}
#' \item [1] \code{"degrees"}}
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Profile and plan curvature calculation (\code{out.cprof}, \code{out.cplan}) changed in SAGA GIS 2.1.1+ compared to earlier versions. See the following thread on sourceforge.net for an ongoing discussion: \url{http://sourceforge.net/p/saga-gis/discussion/354013/thread/e9d07075/#5727}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @references General references:
#'
#' Jones KH (1998) A comparison of algorithms used to compute hill slope as a property of the DEM. Computers and Geosciences. 24 (4): 315-323.
#'
#' References on specific methods:
#'
#' Maximum Slope:
#'
#' Travis, M.R., Elsner, G.H., Iverson, W.D., Johnson, C.G. (1975): VIEWIT: computation of seen areas, slope, and aspect for land-use planning. USDA F.S. Gen. Tech. Rep. PSW-11/1975, 70 p. Berkeley, California, U.S.A.
#'
#' Maximum Triangle Slope:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Least Squares or Best Fit Plane:
#'
#' Beasley, D.B., Huggins, L.F. (1982): ANSWERS: User's manual. U.S. EPA-905/9-82-001, Chicago, IL, 54 pp.
#'
#' Costa-Cabral, M., Burges, S.J. (1994): Digital Elevation Model Networks (DEMON): a model of flow over hillslopes for computation of contributing and dispersal areas. Water Resources Research, 30(6): 1681-1692.
#'
#' Fit 2nd Degree Polynomial:
#'
#' Evans, I.S. (1979): An integrated system of terrain analysis and slope mapping. Final Report on grant DA-ERO-591-73-G0040. University of Durham, England.
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zur Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Heerdegen, R.G., Beran, M.A. (1982): Quantifying source areas through land surface curvature. Journal of Hydrology, 57.
#'
#' Zevenbergen, L.W., Thorne, C.R. (1987): Quantitative analysis of land surface topography. Earth Surface Processes and Landforms, 12: 47-56.
#'
#' Fit 3.Degree Polynomial:
#'
#' Haralick, R.M. (1983): Ridge and valley detection on digital images. Computer Vision, Graphics and Image Processing, 22(1): 28-38.
#'
#' For a discussion on the calculation of slope by ArcGIS check these links:
#'
#' \url{http://forums.esri.com/Thread.asp?c=93&f=1734&t=239914}
#'
#' \url{http://webhelp.esri.com/arcgisdesktop/9.2/index.cfm?topicname=how_slope_works}
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.local.morphometry}}, \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # Simple slope, aspect, and general curvature in degrees:
#' rsaga.slope.asp.curv("lican.sgrd", "slope", "aspect", "curvature",
#' method = "maxslope", unit.slope = "degrees", unit.aspect = "degrees")
#' # same for ASCII grids (default extension .asc):
#' rsaga.esri.wrapper(rsaga.slope.asp.curv,
#' in.dem="lican", out.slope="slope",
#' out.aspect = "aspect", out.cgene = "curvature",
#' method="maxslope", unit.slope = "degrees", unit.aspect = "degrees")
#' }
#' @keywords spatial interface
#' @export
rsaga.slope.asp.curv = function(in.dem,
out.slope, out.aspect, out.cgene,
out.cprof, out.cplan, out.ctang,
out.clong, out.ccros, out.cmini,
out.cmaxi, out.ctota, out.croto,
method = "poly2zevenbergen",
unit.slope = "radians", unit.aspect = "radians",
env = rsaga.env(), ...) {
if(env$version != "2.1.1" & env$version != "2.1.2" &
env$version != "2.1.3" & env$version != "2.1.4" &
env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3") {
stop("rsaga.slope.asp.curv only for SAGA GIS 2.1.1+;\n",
"use rsaga.local.morphometry for older versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem, ".sgrd")
method.choices = c("maxslope","maxtriangleslope","lsqfitplane", "poly2evans",
"poly2bauer","poly2heerdegen","poly2zevenbergen","poly3haralick")
if(is.numeric(method) == TRUE)
stop("Numeric 'method' argument not supported with SAGA GIS 2.1.1+;\n",
"Use character name of methods - see help(rsaga.slope.asp.curv) for options")
method = match.arg.ext(method, method.choices, numeric=TRUE, base=0)
unit.slope.choices = c("radians", "degrees", "percent")
unit.slope = match.arg.ext(unit.slope, unit.slope.choices, numeric=TRUE, base=0)
unit.aspect.choices = c("radians", "degrees")
unit.aspect = match.arg.ext(unit.aspect, unit.aspect.choices, numeric=TRUE, base=0)
if (missing(out.aspect)) {
out.aspect = tempfile()
on.exit(unlink(paste(out.aspect,".*",sep="")), add = TRUE)
}
if (missing(out.slope)) {
out.slope = tempfile()
on.exit(unlink(paste(out.slope,".*",sep="")), add = TRUE)
}
param = list(ELEVATION=in.dem, SLOPE=out.slope, ASPECT = out.aspect)
if(!missing(out.cgene))
param = c(param, C_GENE = out.cgene)
if(!missing(out.cprof))
param = c(param, C_PROF = out.cprof)
if(!missing(out.cplan))
param =c(param, C_PLAN = out.cplan)
if(!missing(out.ctang))
param = c(param, C_TANG = out.ctang)
if(!missing(out.clong))
param = c(param, C_LONG = out.clong)
if(!missing(out.ccros))
param = c(param, C_CROS = out.ccros)
if(!missing(out.cmini))
param = c(param, C_MINI = out.cmini)
if(!missing(out.cmaxi))
param = c(param, C_MAXI = out.cmaxi)
if(!missing(out.ctota))
param = c(param, C_TOTA = out.ctota)
if(!missing(out.croto))
param = c(param, C_ROTO = out.croto)
param = c(param, METHOD=method, UNIT_SLOPE=unit.slope, UNIT_ASPECT=unit.aspect)
module = "Slope, Aspect, Curvature"
rsaga.geoprocessor("ta_morphometry", module, param, env = env, ...)
if (!missing(out.cprof) | !missing(out.cplan))
warning("Plan and profile curvature calculations have changed with SAGA 2.1.1+\n",
"See help(rsaga.slope.asp.curv) for more information")
}
#' Local Morphometry
#'
#' Calculates local morphometric terrain attributes (i.e. slope, aspect and curvatures). Intended for use with SAGA versions 2.1.0 and older. Use \code{\link{rsaga.slope.asp.curv}} for SAGA 2.1.1+
#' @name rsaga.local.morphometry
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.slope optional output: slope (in radians)
#' @param out.aspect optional output: aspect (in radians; north=0, clockwise angles)
#' @param out.curv optional output: curvature
#' @param out.hcurv optional output: horizontal curvature (plan curvature)
#' @param out.vcurv optional output: vertical curvature (profile curvature)
#' @param method character (or numeric): algorithm (see References):
#' \itemize{
#' \item [0] Maximum Slope - Travis et al. (1975) (\code{"maxslope"}, or 0)
#' \item [1] Max. Triangle Slope - Tarboton (1997) (\code{"maxtriangleslope"}, or 1)
#' \item [2] Least Squares Fit Plane - Costa-Cabral and Burgess (1996) (\code{"lsqfitplane"}, or 2)
#' \item [3] Fit 2nd Degree Polynomial - Bauer et al. (1985) (\code{"poly2bauer"}, or 3)
#' \item [4] Fit 2nd Degree Polynomial - Heerdegen and Beran (1982) (\code{"poly2heerdegen"}, or 4)
#' \item [5] default: Fit 2nd Degree Polynomial - Zevenbergen and Thorne (1987) (\code{"poly2zevenbergen"}, or 5)
#' \item [6] Fit 3rd Degree Polynomial - Haralick (1983) (\code{"poly3haralick"}, or 6).}
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @references For references and algorithm changes in SAGA GIS 2.1.1+ see \code{\link{rsaga.slope.asp.curv}}.
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.slope.asp.curv}}, \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # a simple slope algorithm:
#' rsaga.slope("lican.sgrd","slope","maxslope")
#' # same for ASCII grids (default extension .asc):
#' rsaga.esri.wrapper(rsaga.slope,in.dem="lican",out.slope="slope",method="maxslope")
#' }
#' @keywords spatial interface
#' @export
rsaga.local.morphometry = function( in.dem,
out.slope, out.aspect, out.curv, out.hcurv, out.vcurv,
method = "poly2zevenbergen", env = rsaga.env(), ...)
{
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.slope=out.slope, out.aspect=out.aspect,
out.cgene=out.curv, out.cplan=out.hcurv, out.cprof=out.vcurv,
method=method, env=env, ... )
warning("rsaga.local.morphometry specific to SAGA versions < 2.1.1\n",
"Translating provided arguments and using rsaga.slope.asp.curv\n",
"Note: order of numeric methods have changed with SAGA 2.1.1+")
} else {
in.dem = default.file.extension(in.dem,".sgrd")
choices = c("maxslope","maxtriangleslope","lsqfitplane",
"poly2bauer","poly2heerdegen","poly2zevenbergen","poly3haralick")
method = match.arg.ext(method,choices,numeric=TRUE,base=0)
if (missing(out.aspect)) {
out.aspect = tempfile()
on.exit(unlink(paste(out.aspect,".*",sep="")), add = TRUE)
}
if (missing(out.slope)) {
out.slope = tempfile()
on.exit(unlink(paste(out.slope,".*",sep="")), add = TRUE)
}
param = list(ELEVATION=in.dem, SLOPE=out.slope, ASPECT=out.aspect)
if (!missing(out.curv))
param = c(param, CURV=out.curv)
if (!missing(out.hcurv))
param = c(param, HCURV=out.hcurv)
if (!missing(out.vcurv))
param = c(param, VCURV=out.vcurv)
param = c(param, METHOD=method)
module = "Slope, Aspect, Curvature"
if (any(c("2.0.4","2.0.5","2.0.6") == env$version)) module = "Local Morphometry"
rsaga.geoprocessor("ta_morphometry", module, param, env = env, ...)
}
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))){
if (!missing(out.hcurv) | !missing(out.vcurv))
warning("Plan and profile curvature calculations have changed with SAGA 2.1.1+\n",
"See help(rsaga.slope.asp.curv) for more information")
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.slope
#' @export
rsaga.slope = function( in.dem, out.slope, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.slope))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.slope=out.slope, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.slope=out.slope, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.aspect
#' @export
rsaga.aspect = function( in.dem, out.aspect, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.aspect))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.aspect=out.aspect, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.aspect=out.aspect, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.curvature
#' @export
rsaga.curvature = function( in.dem, out.curv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.curv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cgene=out.curv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.curv=out.curv, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.plan.curvature
#' @export
rsaga.plan.curvature = function( in.dem, out.hcurv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.hcurv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cplan=out.hcurv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.hcurv=out.hcurv, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.profile.curvature
#' @export
rsaga.profile.curvature = function( in.dem, out.vcurv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.vcurv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cprof=out.vcurv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.vcurv=out.vcurv, method=method, env = env, ... )
}
}
######## Module ta_preprocessor ########
#' Fill Sinks
#'
#' Several methods for filling closed depressions in digital elevation models that would affect hydrological modeling.
#' @name rsaga.fill.sinks
#' @param in.dem Input: digital elevation model (DEM) as SAGA grid file (default extension: \code{.sgrd}).
#' @param out.dem Output: filled, depression-free DEM (SAGA grid file). Existing files will be overwritten!
#' @param method The depression filling algorithm to be used (character). One of \code{"planchon.darboux.2001"} (default), \code{"wang.liu.2006"}, or \code{"xxl.wang.liu.2006"}.
#' @param out.flowdir (only for \code{"wang.liu.2001"}): Optional output grid file for computed flow directions (see Notes).
#' @param out.wshed (only for \code{"wang.liu.2001"}): Optional output grid file for watershed basins.
#' @param minslope Minimum slope angle (in degree) preserved between adjacent grid cells (default value of \code{0.01} only for \code{method="planchon.darboux.2001"}, otherwise no default).
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details This function bundles three SAGA modules for filling sinks using three different algorithms (\code{method} argument).
#'
#' \code{"planchon.darboux.2001"}: The algorithm of Planchon and Darboux (2001) consists of increasing the elevation of pixels in closed depressions until the sink disappears and a mininum slope angle of \code{minslope} (default: \code{0.01} degree) is established.
#'
#' \code{"wang.liu.2006"}: This module uses an algorithm proposed by Wang and Liu (2006) to identify and fill surface depressions in DEMs. The method was enhanced to allow the creation of hydrologically sound elevation models, i.e. not only to fill the depressions but also to preserve a downward slope along the flow path. If desired, this is accomplished by preserving a minimum slope gradient (and thus elevation difference) between cells. This is the fully featured version of the module creating a depression-free DEM, a flow path grid and a grid with watershed basins. If you encounter problems processing large data sets (e.g. LIDAR data) with this module try the basic version (\code{xxl.wang.lui.2006}).
#'
#' \code{"xxl.wang.liu.2006"}: This modified algorithm after Wang and Liu (2006) is designed to work on large data sets.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#'
#' The function writes SAGA grid files containing of the depression-free preprocessed DEM, and optionally the flow directions and watershed basins.
#' @references Planchon, O., and F. Darboux (2001): A fast, simple and versatile algorithm to fill the depressions of digital elevation models. Catena 46: 159-176.
#'
#' Wang, L. & H. Liu (2006): An efficient method for identifying and filling surface depressions in digital elevation models for hydrologic analysis and modelling. International Journal of Geographical Information Science, Vol. 20, No. 2: 193-213.
#' @author Alexander Brenning (R interface), Volker Wichmann (SAGA module)
#' @note The flow directions are coded as 0 = north, 1 = northeast, 2 = east, ..., 7 = northwest.
#'
#' If \code{minslope=0}, depressions will only be filled until a horizontal surface is established, which may not be helpful for hydrological modeling.
#' @seealso \code{\link{rsaga.sink.removal}}, \code{\link{rsaga.sink.route}}.
#' @keywords spatial interface
#' @export
rsaga.fill.sinks = function(in.dem,out.dem,
method="planchon.darboux.2001", out.flowdir, out.wshed, minslope, ...)
{
stopifnot(is.character(method))
method = match.arg.ext(method, ignore.case=TRUE, numeric=TRUE, base=2,
choices=c("planchon.darboux.2001","wang.liu.2006","xxl.wang.liu.2006"))
in.dem = default.file.extension(in.dem,".sgrd")
stopifnot(!missing(out.dem))
if (missing(minslope)) minslope = NULL
if (method==2) {
param = list( DEM=in.dem, RESULT=out.dem )
if (missing(minslope)) minslope = 0.01
minslope = as.numeric(minslope)
method = "Fill Sinks (Planchon/Darboux, 2001)"
} else if (method==3) {
if (missing(out.flowdir)) {
out.flowdir = tempfile()
on.exit(unlink(paste(out.flowdir,".*",sep="")), add = TRUE)
}
if (missing(out.wshed)) {
out.wshed = tempfile()
on.exit(unlink(paste(out.wshed,".*",sep="")), add = TRUE)
}
param = list(ELEV=in.dem, FILLED=out.dem, FDIR=out.flowdir, WSHED=out.wshed)
method = "Fill Sinks (Wang & Liu)"
} else if (method==4) {
param = list(ELEV=in.dem, FILLED=out.dem)
method = "Fill Sinks XXL (Wang & Liu)"
}
if (!is.null(minslope)) param = c( param, MINSLOPE=minslope )
rsaga.geoprocessor("ta_preprocessor", method, param, ...)
}
#' Sink Drainage Route Detection
#'
#' Sink drainage route detection.
#' @name rsaga.sink.route
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.sinkroute output: sink route grid file: non-sinks obtain a value of 0, sinks are assigned an integer between 0 and 8 indicating the direction to which flow from this sink should be routed
#' @param threshold logical: use a threshold value?
#' @param thrsheight numeric: threshold value (default: \code{100})
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note I assume that flow directions are coded as 0 = north, 1 = northeast, 2 = east, ..., 7 = northwest, as in \code{\link{rsaga.fill.sinks}}.
#' @seealso \code{\link{rsaga.sink.removal}}
#' @examples
#' \dontrun{rsaga.sink.route("dem","sinkroute")
#' rsaga.sink.removal("dem","sinkroute","dem-preproc",method="deepen")}
#' @keywords spatial interface
#' @export
rsaga.sink.route = function(in.dem, out.sinkroute,
threshold, thrsheight = 100, ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( ELEVATION=in.dem, SINKROUTE=out.sinkroute )
if (!missing(threshold)) {
if (threshold) param = c( param, THRESHOLD="" )
}
# I guess thrsheight is redundant if threshold is missing/false:
param = c( param, THRSHEIGHT=as.numeric(thrsheight) )
rsaga.geoprocessor("ta_preprocessor", "Sink Drainage Route Detection", param, ...)
# was: module = 0
}
#' Sink Removal
#' Remove sinks from a digital elevation model by deepening drainage routes or filling sinks.
#' @name rsaga.sink.removal
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: sink route grid file
#' @param out.dem output: modified DEM
#' @param method character string or numeric value specifying the algorithm (partial string matching will be applied): \code{"deepen drainage route"} (or 0): reduce the elevation of pixels in order to achieve drainage out of the former sinks \code{"fill sinks"} (or 1): fill sinks until none are left
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 1 from SAGA library \code{ta_preprocessor}.
#' @seealso \code{\link{rsaga.sink.route}}, \code{\link{rsaga.fill.sinks}}
#' @examples
#' \dontrun{rsaga.sink.route("dem","sinkroute")
#' rsaga.sink.removal("dem","sinkroute","dem-preproc",method="deepen")}
#' @keywords spatial interface
#' @export
rsaga.sink.removal = function(in.dem,in.sinkroute,out.dem,method="fill",...)
{
in.dem = default.file.extension(in.dem,".sgrd")
method = match.arg.ext(method,c("deepen drainage routes","fill sinks"),ignore.case=TRUE,numeric=TRUE)
param = list( DEM=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
param = c( param, DEM_PREPROC=out.dem, METHOD=method )
rsaga.geoprocessor("ta_preprocessor", "Sink Removal", param, ...)
}
######## Module grid_tools ########
#' SAGA Modules Close Gaps and Close One Cell Gaps
#'
#' Close (Interpolate) Gaps
#' @name rsaga.close.gaps
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.dem output: DEM grid file without no-data values (gaps). Existing files will be overwritten!
#' @param threshold tension threshold for adjusting the interpolator (default: 0.1)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @details \code{rsaga.close.one.cell.gaps} only fill gaps whose neighbor grid cells have non-missing data.
#'
#' In \code{rsaga.close.gaps}, larger tension thresholds can be used to reduce overshoots and undershoots in the surfaces used to fill (interpolate) the gaps.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses modules 7 (\code{rsaga.close.gaps} and 6 \code{rsaga.close.one.cell.gaps} from the SAGA library \code{grid_tools}.
#'
#' SAGA GIS 2.0.5+ has a new additional module \code{Close Gaps with Spline}, which
#' can be accessed using \code{\link{rsaga.geoprocessor}} (currently no R wrapper
#' available). See \code{rsaga.get.usage("grid_tools","Close Gaps with Spline")}
#' or in version 2.1.0+ call \code{rsaga.html.help("grid_tools","Close Gaps with Spline")}.
#' @seealso \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' rsaga.close.gaps("rawdem.sgrd","dem.sgrd")
#' # using ASCII grids:
#' rsaga.esri.wrapper(rsaga.close.gaps,in.dem="rawdem",out.dem="dem")
#' }
#' @keywords spatial interface
#' @export
rsaga.close.gaps = function(in.dem,out.dem,threshold=0.1,...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( INPUT=in.dem, RESULT=out.dem, THRESHOLD=as.numeric(threshold) )
rsaga.geoprocessor("grid_tools", "Close Gaps", param, ...)
}
#' @rdname rsaga.close.gaps
#' @name rsaga.close.one.cell.gaps
#' @keywords spatial interface
#' @export
rsaga.close.one.cell.gaps = function(in.dem,out.dem,...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( INPUT = in.dem, RESULT = out.dem )
rsaga.geoprocessor("grid_tools", "Close One Cell Gaps",
param, ...)
}
######## Module ta_lighting ########
#' Analytical hillshading
#' Analytical hillshading calculation.
#' @name rsaga.hillshade
#' @param in.dem Input digital elevation model (DEM) as SAGA grid file (default extension: \code{.sgrd}).
#' @param out.grid Output hillshading grid (SAGA grid file). Existing files will be overwritten!
#' @param method Available choices (character or numeric): \code{"standard"} (or \code{0} - default), \code{"max90deg.standard"} (\code{1}), \code{"combined.shading"} (\code{2}), \code{"ray.tracing"} (\code{3}). See Details.
#' @param azimuth Direction of the light source, measured in degree clockwise from the north direction; default 315, i.e. northwest.
#' @param declination Declination of the light source, measured in degree above the horizon (default 45).
#' @param exaggeration Vertical exaggeration of elevation (default: 4). The terrain exaggeration factor allows to increase the shading contrasts in flat areas.
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details The Analytical Hillshading algorithm is based on the angle between the surface and the incoming light beams, measured in radians.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note While the default azimuth of 315 degree (northwest) is not physically meaningful on the northern hemisphere, a northwesterly light source is required to properly depict relief in hillshading images. Physically correct southerly light sources results a hillshade that would be considered by most people as inverted: hills look like depressions, mountain chains like troughs.
#' @seealso \code{\link{rsaga.solar.radiation}}, \code{\link{rsaga.insolation}}
#' @examples
#' \dontrun{rsaga.hillshade("dem.sgrd","hillshade")}
#' @keywords spatial interface
#' @export
rsaga.hillshade = function(in.dem, out.grid,
method="standard", azimuth=315, declination=45, exaggeration=4, ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
out.grid = default.file.extension(out.grid,".sgrd")
method = match.arg.ext(method, numeric=TRUE, ignore.case=TRUE, base=0,
choices=c("standard","max90deg.standard","combined.shading","ray.tracing"))
param = list(ELEVATION=in.dem, SHADE=out.grid, METHOD=method,
AZIMUTH=azimuth, DECLINATION=declination, EXAGGERATION=exaggeration)
rsaga.geoprocessor("ta_lighting", "Analytical Hillshading", param, ...)
# was: module = 0
}
#' Potential incoming solar radiation
#'
#' This function calculates the potential incoming solar radiation in an area using different atmospheric models; module available in SAGA GIS 2.0.6+.
#' @name rsaga.pisr
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.svf.grid Optional input grid in SAGA format: Sky View Factor; see also \code{local.svf}
#' @param in.vapour.grid Optional input grid in SAGA format: Water vapour pressure (mbar); see also argument \code{hgt.water.vapour.pressure}
#' @param in.latitude.grid Optional input grid in SAGA format: Latitude (degree) of each grid cell
#' @param in.longitude.grid see \code{in.latitude.grid}
#' @param out.direct.grid Output grid: Direct insolation (unit selected by \code{unit} argument)
#' @param out.diffuse.grid Output grid: Diffuse insolation
#' @param out.total.grid Optional output grid: Total insolation, i.e. sum of direct and diffuse incoming solar radiation
#' @param out.ratio.grid Optional output grid: Direct to diffuse ratio
#' @param out.duration Optional output grid: Duration of insolation
#' @param out.sunrise Optional output grid: time of sunrise; only calculated if time span is set to single day
#' @param out.sunset Time of sunset; see \code{out.sunrise}
#' @param local.svf logical (default: \code{TRUE}; if TRUE, use sky view factor based on local slope (after Oke, 1988), if no sky view factor grid is provided in \code{in.svf.grid}
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of insolation output grids: \code{"kWh/m2"} (default) \code{"kJ/m2"}, or \code{"J/cm2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param enable.bending logical (default: \code{FALSE}): incorporate effects of planetary bending?
#' @param bending.radius Planetary radius, default \code{6366737.96}
#' @param bending.lat.offset if bending is enabled: latitudinal reference is \code{"user"}-defined (default), or relative to \code{"top"}, \code{"center"} or \code{"bottom"} of grid?
#' @param bending.lat.ref.user user-defined lat. reference for bending, see \code{bending.lat.offset}
#' @param bending.lon.offset longitudinal reference, i.e. local time, is \code{"user"}-defined, or relative to \code{"top"}, \code{"center"} (default) or \code{"bottom"} of grid?
#' @param bending.lon.ref.user user-defined reference for local time (Details??)
#' @param method specifies how the atmospheric components should be accounted for: either based on the height of atmosphere and vapour pressure (\code{"height"}, or numeric code 0), or air pressure, water and dust content (\code{"components"}, code 1), or lumped atmospheric transmittance (\code{"lumped"}, code \code{0})
#' @param hgt.atmosphere Height of atmosphere (in m); default 12000 m
#' @param hgt.water.vapour.pressure Water vapour pressure in mbar (default 10 mbar); This value is used if no vapour pressure grid is given in argument \code{in.vapour.grid}
#' @param cmp.pressure atmospheric pressure in mbar, defaults to 1013 mbar
#' @param cmp.water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default)
#' @param cmp.dust dust factor in ppm; defaults to 100 ppm
#' @param lmp.transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param start.date list of length two, giving the start date in \code{day} and \code{month} components as numbers; these numbers are one-based (SAGA_CMD uses zero-based numbers internally), i.e. Jan. 1st is \code{list(day=1,month=1)}
#' @param end.date see \code{start.date}
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details According to SAGA GIS 2.0.7 documentation, "Most options should do well, but TAPES-G based diffuse irradiance calculation ("Atmospheric Effects" methods 2 and 3) needs further revision!" I.e. be careful with \code{method = "components"} and \code{method = "lumped"}.
#' @references
#' Boehner, J., Antonic, O. (2009): Land surface parameters specific to topo-climatology. In: Hengl, T. and Reuter, H. I. (eds.): Geomorphometry - Concepts, Software, Applications. Elsevier.
#'
#' Oke, T.R. (1988): Boundary layer climates. London, Taylor and Francis.
#'
#' Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley and Sons.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This module is computationally very intensive (depending on the size of the grid and the time resolution, of course). The performance seems to have much improved in SAGA GIS 2.1.0, which by default runs this module in multicore mode (at the release candidate 1 for Windows does).
#'
#' SAGA_CMD uses zero-based days and months, but this R function uses the standard one-based days and months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' This function uses module Potential Incoming Solar Radiation from SAGA library \code{ta_lighting} in SAGA version 2.0.6+.
#' @seealso \code{\link{rsaga.hillshade}}; for similar modules in older SAGA versions (pre-2.0.6) see \code{\link{rsaga.solar.radiation}} and \code{\link{rsaga.insolation}}
#' @keywords spatial interface
#' @export
rsaga.pisr = function(in.dem, in.svf.grid = NULL, in.vapour.grid = NULL,
in.latitude.grid = NULL, in.longitude.grid = NULL,
out.direct.grid, out.diffuse.grid, out.total.grid = NULL,
out.ratio.grid = NULL, out.duration, out.sunrise, out.sunset,
local.svf = TRUE, latitude,
unit=c("kWh/m2","kJ/m2","J/cm2"), solconst=1367.0,
enable.bending = FALSE, bending.radius = 6366737.96,
bending.lat.offset = "user", bending.lat.ref.user = 0,
bending.lon.offset = "center", bending.lon.ref.user = 0,
method = c("height","components","lumped"),
hgt.atmosphere = 12000, hgt.water.vapour.pressure = 10,
cmp.pressure = 1013, cmp.water.content = 1.68, cmp.dust = 100,
lmp.transmittance = 70,
time.range = c(0,24), time.step = 0.5,
start.date = list(day=21, month=3), end.date = NULL, day.step = 5,
env = rsaga.env(), ...)
{
if ( (env$version == "2.0.4" | env$version == "2.0.5") ) {
stop("rsaga.pisr only for SAGA GIS 2.0.6 - 2.2.1;\n",
" use rsaga.solar.radiation for older versions of SAGA GIS")
}
if ( (env$version == "2.2.2" | env$version == "2.2.3") ) {
stop("rsaga.pisr only for SAGA GIS 2.0.6 - 2.2.1:\n",
" use rsaga.pisr2 for newer versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (!is.null(in.svf.grid)) in.svf.grid = default.file.extension(in.svf.grid,".sgrd")
if (!is.null(in.vapour.grid)) in.vapour.grid = default.file.extension(in.vapour.grid,".sgrd")
if (!is.null(in.latitude.grid)) in.latitude.grid = default.file.extension(in.latitude.grid,".sgrd")
if (!is.null(in.longitude.grid)) in.longitude.grid = default.file.extension(in.longitude.grid,".sgrd")
if (missing(out.direct.grid)) {
out.direct.grid = tempfile()
on.exit(unlink(paste(out.direct.grid,".*",sep="")), add = TRUE)
}
if (missing(out.diffuse.grid)) {
out.diffuse.grid = tempfile()
on.exit(unlink(paste(out.diffuse.grid,".*",sep="")), add = TRUE)
}
if (missing(out.total.grid)) {
out.total.grid = tempfile()
on.exit(unlink(paste(out.total.grid,".*",sep="")), add = TRUE)
}
if (missing(out.ratio.grid)) {
out.ratio.grid = tempfile()
on.exit(unlink(paste(out.ratio.grid,".*",sep="")), add = TRUE)
}
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
if (missing(out.sunrise)) {
out.sunrise = tempfile()
on.exit(unlink(paste(out.sunrise,".*",sep="")), add = TRUE)
}
if (missing(out.sunset)) {
out.sunset = tempfile()
on.exit(unlink(paste(out.sunset,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method, numeric = TRUE, ignore.case = TRUE, base = 0)
bending.lat.offset = match.arg.ext(bending.lat.offset, c("bottom","center","top","user"),
numeric = TRUE, ignore.case = TRUE, base = 0)
bending.lon.offset = match.arg.ext(bending.lon.offset, c("left","center","right","user"),
numeric = TRUE, ignore.case = TRUE, base = 0)
if (!is.null(latitude))
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
stopifnot( is.logical(local.svf) )
stopifnot( is.logical(enable.bending) )
param = list( GRD_DEM=in.dem,
GRD_DIRECT = out.direct.grid, GRD_DIFFUS = out.diffuse.grid,
GRD_TOTAL = out.total.grid, GRD_RATIO = out.ratio.grid,
DURATION = out.duration,
SUNRISE = out.sunrise, SUNSET = out.sunset,
UNITS = unit, SOLARCONST = as.numeric(solconst), LOCALSVF = local.svf,
BENDING_BENDING = enable.bending,
METHOD = method,
#LATITUDE = as.numeric(latitude), # removed 27 Dec 2011
DHOUR = time.step )
# Added 27 Dec 2011:
if (!is.null(latitude)) {
stopifnot((latitude >= -90) & (latitude <= 90))
param = c(param, LATITUDE = as.numeric(latitude))
}
if (!is.null(in.svf.grid)) param = c( param, GRD_SVF=in.svf.grid )
if (!is.null(in.vapour.grid)) param = c( param, GRD_VAPOUR=in.vapour.grid )
stopifnot( !is.null(latitude) | !is.null(in.latitude.grid) ) # added 27 Dec 2011
if (!is.null(in.latitude.grid)) param = c( param, GRD_LAT=in.latitude.grid )
if (!is.null(in.longitude.grid)) param = c( param, GRD_LON=in.longitude.grid )
if (enable.bending) {
param = c( param,
BENDING_RADIUS = bending.radius,
BENDING_LAT_OFFSET = bending.lat.offset,
BENDING_LAT_REF_USER = bending.lat.ref.user,
BENDING_LON_OFFSET = bending.lon.offset,
BENDING_LON_REF_USER = bending.lon.ref.user )
}
if (method == 0) {
param = c(param, ATMOSPHERE = as.numeric(hgt.atmosphere),
VAPOUR = as.numeric(hgt.water.vapour.pressure))
} else if (method == 1) {
param = c(param, PRESSURE = as.numeric(cmp.pressure),
WATER = as.numeric(cmp.water.content), DUST = as.numeric(cmp.dust))
} else if (method == 2) {
stopifnot( (lmp.transmittance>=0) & (lmp.transmittance<=100) )
param = c(param, LUMPED = as.numeric(lmp.transmittance))
} else stopifnot( method %in% c(0:2) )
if (is.null(start.date)) { # one year
stopifnot( is.null(end.date) )
param = c( param, PERIOD = 2, DAY_A = 0, MONTH_A = 0,
DAY_B = 30, MONTH_B = 11 )
} else {
if (is.null(end.date)) {
param = c( param, PERIOD = 1 ) # single day ... or moment (later)
} else param = c( param, PERIOD = 2 )
stopifnot(is.list(start.date))
stopifnot(length(start.date) == 2)
stopifnot(all(names(start.date %in% c("day","month"))))
stopifnot( (start.date$day>=1) & (start.date$day<=31) )
stopifnot( (start.date$month>=1) & (start.date$month<=12) )
param = c( param, DAY_A = start.date$day - 1,
MON_A = start.date$month - 1 )
if (is.null(end.date)) {
# check if moment:
stopifnot(length(time.range) <= 2)
if (length(time.range) == 2) {
if (time.range[2] == time.range[1])
time.range = time.range[1]
}
if (length(time.range) == 1) {
# moment
param$PERIOD = 0
stopifnot(time.range >= 0 & time.range <= 24)
param = c(param, MOMENT = round(time.range,3))
} else {
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
} else {
# range of days:
stopifnot(is.list(end.date))
stopifnot(length(end.date) == 2)
stopifnot(all(names(end.date %in% c("day","month"))))
stopifnot( (end.date$day>=1) & (end.date$day<=31) )
stopifnot( (end.date$month>=1) & (end.date$month<=12) )
param = c( param, DAY_B = end.date$day - 1,
MON_B = end.date$month - 1,
DDAYS = day.step )
if (is.null(time.range)) time.range = c(0,24)
stopifnot(length(time.range) == 2)
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Potential Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Potential incoming solar radiation SAGA 2.2.2+
#'
#' This function calculates the potential incoming solar radiation in an area using different atmospheric models; This function reflects changes to the module with SAGA 2.2.2+.
#' For SAGA versions 2.0.6 to 2.2.1 please see \code{\link{rsaga.pisr}}.
#' @name rsaga.pisr2
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.svf.grid Optional input grid in SAGA format: Sky View Factor; see also \code{local.svf}
#' @param in.vapour.grid Optional input grid in SAGA format: Water vapour pressure (mbar), for use with \code{method = "height"}; default 10 mbar
#' @param in.linke.grid Optional input grid in SAGA format: Linke turbidity coefficient, for use with \code{method = "hofierka"}; default 3.0
#' @param out.direct.grid Output grid: Direct insolation (unit selected by \code{unit} argument)
#' @param out.diffuse.grid Output grid: Diffuse insolation
#' @param out.total.grid Optional output grid: Total insolation, i.e. sum of direct and diffuse incoming solar radiation
#' @param out.ratio.grid Optional output grid: Direct to diffuse ratio
#' @param out.duration Optional output grid: Duration of insolation
#' @param out.sunrise Optional output grid: time of sunrise; only calculated if time span is set to single day
#' @param out.sunset Time of sunset; see \code{out.sunrise}
#' @param local.svf logical (default: \code{TRUE}; if TRUE, use sky view factor based on local slope (after Oke, 1988), if no sky view factor grid is provided in \code{in.svf.grid}
#' @param location specified whether to use constant latitude supplied by \code{latitude} below (\code{"latitude"} or code \code{0}; default) or as calculated from the grid system (\code{"grid"} or code \code{1})
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of insolation output grids: \code{"kWh/m2"} (default) \code{"kJ/m2"}, or \code{"J/cm2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param method specifies how the atmospheric components should be accounted for: either based on the height of atmosphere and vapour pressure (\code{"height"}, or numeric code 0), or air pressure, water and dust content (\code{"components"}, code 1), or lumped atmospheric transmittance (\code{"lumped"}, code \code{2}), or by the method of Hofierka and Suri, 2009 (\code{"hofierka"}, code \code{3}). Default: \code{"lumped"}.
#' @param hgt.atmosphere Height of atmosphere (in m); default 12000 m. For use with \code{method = "height"}
#' @param cmp.pressure atmospheric pressure in mbar, defaults to 1013 mbar. For use with \code{method = "components"}
#' @param cmp.water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default). For use with \code{method = "components"}
#' @param cmp.dust dust factor in ppm; defaults to 100 ppm. For use with \code{method = "components"}
#' @param lmp.transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param start.date list of length three, giving the start date in \code{day}, \code{month}, and \code{year} components as numbers; month is one-based (SAGA_CMD uses zero-based numbers internally), i.e. Jan. 1st 2015 is \code{list(day=1,month=1,year=2015)}
#' @param end.date see \code{start.date}
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details According to SAGA GIS 2.0.7 documentation, "Most options should do well, but TAPES-G based diffuse irradiance calculation ("Atmospheric Effects" methods 2 and 3) needs further revision!" I.e. be careful with \code{method = "components"} and \code{method = "lumped"}.
#' @references
#' Boehner, J., Antonic, O. (2009): Land surface parameters specific to topo-climatology. In: Hengl, T. and Reuter, H. I. (eds.): Geomorphometry - Concepts, Software, Applications. Elsevier.
#'
#' Oke, T.R. (1988): Boundary layer climates. London, Taylor and Francis.
#'
#' Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley and Sons.
#'
#' Hofierka, J., Suri, M. (2002): The solar radiation model for Open source GIS: implementation and applications. International GRASS users conference in Trento, Italy, September 2002
#' @author Alexander Brenning & Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @note
#' SAGA_CMD uses zero-based months, but this R function uses the standard one-based months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' This function uses module Potential Incoming Solar Radiation from SAGA library \code{ta_lighting} in SAGA version 2.0.6+.
#' Changes to the module with SAGA 2.2.2+ include adding \code{year} to the \code{*.date} arguments to allow calculation across years.
#' The method of Hofierka and Suri (2009) is added, which uses the Linke turbidity coefficient.
#' Duration of insolation (\code{"out.duration"}) is only calculated when the time period is set to a single day.
#' @seealso \code{\link{rsaga.pisr}}; for similar modules in older SAGA versions (pre-2.0.6) see \code{\link{rsaga.solar.radiation}} and \code{\link{rsaga.insolation}}; \code{\link{rsaga.hillshade}}
#' @keywords spatial interface
#' @export
rsaga.pisr2 = function(in.dem, in.svf.grid = NULL, in.vapour.grid = NULL,
in.linke.grid = NULL,
out.direct.grid, out.diffuse.grid, out.total.grid = NULL,
out.ratio.grid = NULL, out.duration, out.sunrise, out.sunset,
local.svf = TRUE, location = c("latitude", "grid"), latitude = 53,
unit=c("kWh/m2","kJ/m2","J/cm2"), solconst=1367.0,
method = c("height","components","lumped","hofierka"),
hgt.atmosphere = 12000,
cmp.pressure = 1013, cmp.water.content = 1.68, cmp.dust = 100,
lmp.transmittance = 70,
time.range = c(0,24), time.step = 0.5,
start.date = list(day=31, month=10, year=2015), end.date = NULL, day.step = 5,
env = rsaga.env(), ...)
{
if ( env$version != "2.2.2" & env$version != "2.2.3" ) {
stop("rsaga.pisr2 only for SAGA GIS 2.2.2+;\n",
" use rsaga.pisr or rsaga.solar.radiation for older versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (!is.null(in.svf.grid)) in.svf.grid = default.file.extension(in.svf.grid,".sgrd")
if (!is.null(in.vapour.grid)) in.vapour.grid = default.file.extension(in.vapour.grid,".sgrd")
if (!is.null(in.linke.grid)) in.linke.grid = default.file.extension(in.linke.grid,".sgrd")
if (missing(out.direct.grid)) {
out.direct.grid = tempfile()
on.exit(unlink(paste(out.direct.grid,".*",sep="")), add = TRUE)
}
if (missing(out.diffuse.grid)) {
out.diffuse.grid = tempfile()
on.exit(unlink(paste(out.diffuse.grid,".*",sep="")), add = TRUE)
}
if (missing(out.total.grid)) {
out.total.grid = tempfile()
on.exit(unlink(paste(out.total.grid,".*",sep="")), add = TRUE)
}
if (missing(out.ratio.grid)) {
out.ratio.grid = tempfile()
on.exit(unlink(paste(out.ratio.grid,".*",sep="")), add = TRUE)
}
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
if (missing(out.sunrise)) {
out.sunrise = tempfile()
on.exit(unlink(paste(out.sunrise,".*",sep="")), add = TRUE)
}
if (missing(out.sunset)) {
out.sunset = tempfile()
on.exit(unlink(paste(out.sunset,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method, numeric = TRUE, ignore.case = TRUE, base = 0)
location = match.arg.ext(location, numeric = TRUE, ignore.case = TRUE, base = 0)
if (!is.null(latitude))
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
stopifnot( is.logical(local.svf) )
param = list( GRD_DEM=in.dem,
GRD_DIRECT = out.direct.grid, GRD_DIFFUS = out.diffuse.grid,
GRD_TOTAL = out.total.grid, GRD_RATIO = out.ratio.grid,
GRD_DURATION = out.duration,
GRD_SUNRISE = out.sunrise, GRD_SUNSET = out.sunset,
UNITS = unit, SOLARCONST = as.numeric(solconst), LOCALSVF = local.svf,
METHOD = method,
HOUR_STEP = time.step )
if (location == 0) {
if (!is.null(latitude)) {
stopifnot((latitude >= -90) & (latitude <= 90))
param = c(param, LATITUDE = as.numeric(latitude))
}
} else {
param = c(param, LOCATION = as.numeric(location))
}
if (!is.null(in.svf.grid)) param = c( param, GRD_SVF=in.svf.grid )
if (!is.null(in.vapour.grid)) param = c( param, GRD_VAPOUR=in.vapour.grid )
if (!is.null(in.linke.grid)) param = c( param, GRD_LINKE=in.linke.grid )
if (method == 0) {
param = c(param, ATMOSPHERE = as.numeric(hgt.atmosphere))
} else if (method == 1) {
param = c(param, PRESSURE = as.numeric(cmp.pressure),
WATER = as.numeric(cmp.water.content), DUST = as.numeric(cmp.dust))
} else if (method == 2) {
stopifnot( (lmp.transmittance>=0) & (lmp.transmittance<=100) )
param = c(param, LUMPED = as.numeric(lmp.transmittance))
} else if (method == 3) {
param = param
} else stopifnot( method %in% c(0:3) )
if (is.null(start.date)) { # one year
stopifnot( is.null(end.date) )
param = c( param, PERIOD = 2, DAY_A = 0, MONTH_A = 0,
DAY_B = 30, MONTH_B = 11 )
} else {
if (is.null(end.date)) {
param = c( param, PERIOD = 1 ) # single day ... or moment (later)
} else param = c( param, PERIOD = 2 )
stopifnot(is.list(start.date))
stopifnot(length(start.date) == 3)
stopifnot(all(names(start.date %in% c("day","month","year"))))
stopifnot( (start.date$day>=1) & (start.date$day<=31) )
stopifnot( (start.date$month>=1) & (start.date$month<=12) )
param = c( param, DAY_A = start.date$day ,
MON_A = start.date$month - 1,
YEAR_A = start.date$year )
if (is.null(end.date)) {
# check if moment:
stopifnot(length(time.range) <= 2)
if (length(time.range) == 2) {
if (time.range[2] == time.range[1])
time.range = time.range[1]
}
if (length(time.range) == 1) {
# moment
param$PERIOD = 0
stopifnot(time.range >= 0 & time.range <= 24)
param = c(param, MOMENT = round(time.range,3))
} else {
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
} else {
# range of days:
stopifnot(is.list(end.date))
stopifnot(length(end.date) == 3)
stopifnot(all(names(end.date %in% c("day","month","year"))))
stopifnot( (end.date$day>=1) & (end.date$day<=31) )
stopifnot( (end.date$month>=1) & (end.date$month<=12) )
param = c( param, DAY_B = end.date$day,
MON_B = end.date$month - 1,
YEAR_B = end.date$year,
DAYS_STEP = day.step )
if (is.null(time.range)) time.range = c(0,24)
stopifnot(length(time.range) == 2)
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Potential Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Potential incoming solar radiation
#'
#' This function calculates the potential incoming solar radiation in an area either using a lumped atmospheric transmittance model or estimating it based on water and dust content. Use \code{\link{rsaga.pisr}} instead with SAGA GIS 2.0.6+.
#' @name rsaga.solar.radiation
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param out.grid output grid file for potential incoming solar radiation sums
#' @param out.duration Optional output grid file for duration of insolation
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of the \code{out.grid} output: \code{"kWh/m2"} (default) or \code{"J/m2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param method specifies how the atmospheric components should be accounted for: either based on a lumped atmospheric transmittance as specified by argument \code{transmittance} (\code{"lumped"}, or numeric code \code{0}; default); or by calculating the components corresponding to water and dust (\code{"components"}, code \code{1})
#' @param transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param pressure atmospheric pressure in mbar
#' @param water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default)
#' @param dust dust factor in ppm; defaults to 100ppm
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param days either a list with components \code{day} and \code{month} specifying a single day of the year for radiation modeling; OR a numeric vector of length 2 specifying the start and end date (see Note below)
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @references Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley & Sons.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This module ceased to exist under SAGA GIS 2.0.6+, which has a similar (but more flexible) module Potential Solar Radiation that is interfaced by \code{\link{rsaga.pisr}}.
#'
#' SAGA_CMD uses zero-based days and months, but this R function uses the standard one-based days and months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' In SAGA 2.0.2, solar radiation sums calculated for a range of days, say \code{days=c(a,b)} actually calculate radiation only for days \code{a,...,b-1} (in steps of \code{day.step} - I used \code{day.step=1} in this example). The setting \code{a=b} however gives the same result as \code{b=a+1}, and indeed \code{b=a+2} gives twice the radiation sums and potential sunshine duration that \code{a=b} and \code{b=a+1} both give.
#'
#' The solar radiation module of SAGA 2.0.1 had a bug that made it impossible to pass a range of \code{days} of the year or a range of hours of the day (\code{time.range}) to SAGA. These options work in SAGA 2.0.1.
#'
#' This function uses module Incoming Solar Radiation from SAGA GIS library \code{ta_lighting}.
#' @seealso \code{\link{rsaga.hillshade}}, \code{\link{rsaga.insolation}}
#' @examples
#' \dontrun{
#' # potential solar radiation on Nov 7 in Southern Ontario...
#' rsaga.solar.radiation("dem","solrad","soldur",latitude=43,
#' days=list(day=7,month=11),time.step=0.5)
#' }
#' @keywords spatial interface
#' @export
rsaga.solar.radiation = function(in.dem, out.grid, out.duration, latitude,
unit=c("kWh/m2","J/m2"), solconst=1367.0, method=c("lumped","components"),
transmittance=70, pressure=1013, water.content=1.68, dust=100,
time.range=c(0,24), time.step=1,
days=list(day=21,month=3), day.step=5,
env = rsaga.env(), ...)
{
if ( !(env$version == "2.0.4" | env$version == "2.0.5") ) {
stop("rsaga.solar.radiation only for SAGA GIS 2.0.4 / 2.0.5;\n",
" use rsaga.pisr for SAGA GIS 2.0.6+")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method,numeric=TRUE,ignore.case=TRUE,base=0)
stopifnot( (transmittance>=0) & (transmittance<=100) )
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
param = list( ELEVATION=in.dem, INSOLAT=out.grid, DURATION=out.duration,
UNIT=unit, SOLCONST=as.numeric(solconst), METHOD=method,
TRANSMITT=as.numeric(transmittance), PRESSURE=as.numeric(pressure),
WATER=as.numeric(water.content), DUST=as.numeric(dust),
LATITUDE=as.numeric(latitude),
HOUR_RANGE_MIN=time.range[1], HOUR_RANGE_MAX=time.range[2],
HOUR_STEP=time.step )
if (is.null(days)) { # one year
param = c( param, TIMESPAN=2 )
} else if (is.list(days)) { # single day
stopifnot(length(days)==2)
stopifnot( (days$day>=1) & (days$day<=31) )
stopifnot( (days$month>=1) & (days$month<=12) )
param = c( param, TIMESPAN=0,
SINGLE_DAY_DAY=days$day-1, SINGLE_DAY_MONTH=days$month-1 )
} else if (is.numeric(days)) { # range of days
stopifnot(length(days)==2)
stopifnot( days[1] <= days[2] )
stopifnot( (days[1]>=1) & (days[2]<=366) )
param = c( param, TIMESPAN=1,
DAY_RANGE_MIN=days[1], DAY_RANGE_MAX=days[2],
DAY_STEP=day.step )
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Incoming Solar Radiation (Insolation)
#'
#' This function calculates the amount of incoming solar radiation (insolation) depending on slope, aspect, and atmospheric properties. Module not available in SAGA GIS 2.0.6 and 2.0.7.
#' @name rsaga.insolation
#' @param in.dem Name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.vapour Optional input: SAGA grid file giving the water vapour pressure in mbar
#' @param in.latitude Optional input: SAGA grid file giving for each pixel the latitude in degree
#' @param in.longitude Optional input: SAGA grid file giving for each pixel the longitude in degree
#' @param out.direct Optional output grid file for direct insolation
#' @param out.diffuse Optional output grid file for diffuse insolation
#' @param out.total Optional output grid file for total insolation, i.e. the sum of direct and diffuse insolation
#' @param horizontal logical; project radiation onto a horizontal surface? (default: \code{FALSE}, i.e. use the actual inclined surface as a reference area)
#' @param solconst solar constant in Joule; default: 8.164 J/cm2/min (=1360.7 kWh/m2; the more commonly used solar constant of 1367 kWh/m2 corresponds to 8.202 J/cm2/min)
#' @param atmosphere height of atmosphere in m; default: 12000m
#' @param water.vapour.pressure if no water vapour grid is given, this argument specifies a constant water vapour pressure that is uniform in space; in mbar, default 10 mbar
#' @param type type of time period: \code{"moment"} (equivalent: \code{0}) for a single instant, \code{"day"} (or \code{1}) for a single day, \code{"range.of.days"} (or \code{2}), or \code{"same.moment.range.of.days"} (or \code{3}) for the same moment in a range of days; default: \code{"moment"}
#' @param time.step time resolution in hours for discretization within a day
#' @param day.step time resolution in days for a range of days
#' @param days numeric vector of length 2, specifying the first and last day of a range of days (for \code{type}s 2 and 3)
#' @param moment if \code{type="moment"} or \code{"same.moment.range.of.days"}, \code{moment} specifies the time of the day (hour between 0 and 24) for which the insolation is to be calculated
#' @param latitude if no \code{in.latitude} grid is given, this will specify a fixed geographical latitude for the entire grid
#' @param bending should planetary bending be modeled? (default: \code{FALSE})
#' @param radius planetary radius
#' @param lat.offset \code{latitude} relates to grids \code{"bottom"}(equivalent code: \code{0}), \code{"center"} (1), \code{"top"} (2), or \code{"user"}-defined reference (default: \code{"user"}); in the latter case, \code{lat.ref.user} defines the reference
#' @param lat.ref.user if \code{in.latitude} is missing and \code{lat.offset="user"}, then this numeric value defines the latitudinal reference (details??)
#' @param lon.offset local time refers to grid's \code{"left"} edge (code 0), \code{"center"} (1), \code{"right"} edge (2), or a \code{"user"}-defined reference.
#' @param lon.ref.user if \code{in.longitude} is missing and \code{lon.offset="user"}, then this numeric value defines the reference of the local time (details??)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @details Calculation of incoming solar radiation (insolation). Based on the SADO (System for the Analysis of Discrete Surfaces) routines developed by Boehner & Trachinow.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module \code{Insolation} (code: 3) from SAGA library \code{ta_lighting}. It is availble in SAGA GIS 2.0.4 and 2.0.5 but not 2.0.6 and 2.0.7; see \code{\link{rsaga.pisr}}.
#' @seealso \code{\link{rsaga.solar.radiation}}, \code{\link{rsaga.pisr}}, \code{\link{rsaga.hillshade}}
#' @keywords spatial interface
#' @export
rsaga.insolation = function(in.dem, in.vapour, in.latitude, in.longitude,
out.direct, out.diffuse, out.total,
horizontal=FALSE, solconst=8.1640, atmosphere=12000, water.vapour.pressure=10.0,
type=c("moment","day","range.of.days","same.moment.range.of.days"),
time.step=1, day.step=5, days, moment, latitude, bending=FALSE,
radius=6366737.96,
lat.offset="user", lat.ref.user=0,
lon.offset="center", lon.ref.user=0,
...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( GRD_DEM=in.dem )
type = match.arg.ext(type,numeric=TRUE,ignore.case=TRUE,base=0)
stopifnot( (!missing(out.direct)) | (!missing(out.diffuse)) | (!missing(out.total)) )
stopifnot( !missing(latitude) )
if (!missing(moment)) {
if (!(type==0 | type==3)) {
warning("'moment' argument only relevant for 'type=\"moment\"'\n",
"or 'type=\"same.moment.range.of.days\"' -\n",
"ignoring the 'moment' argument")
}
}
if (!missing(in.vapour)) {
in.vapour = default.file.extension(in.vapour,".sgrd")
param = c(param, GRD_VAPOUR=in.vapour)
}
if (!missing(in.latitude)) {
in.latitude = default.file.extension(in.latitude,".sgrd")
param = c(param, GRD_LAT=in.latitude)
}
if (!missing(in.longitude)) {
in.longitude = default.file.extension(in.longitude,".sgrd")
param = c(param, GRD_LON=in.longitude)
}
if (!missing(out.direct)) param = c(param, GRD_DIRECT=out.direct)
if (!missing(out.diffuse)) param = c(param, GRD_DIFFUS=out.diffuse)
if (!missing(out.total)) param = c(param, GRD_TOTAL=out.total)
stopifnot( (days[1]>=0) & (days[1]<=366) )
param = c(param, BHORIZON=horizontal, SOLARCONST=solconst,
ATMOSPHERE=atmosphere, VAPOUR=water.vapour.pressure,
PERIOD=type, DHOUR=time.step, DDAYS=day.step,
DAY_A=days[1])
if (type>=2) { # range of days / same moment in a range of days
stopifnot( (days[2]>=days[1]) & (days[2]<=366) )
param = c(param, DAY_B=days[2])
}
if ((type==0) | (type==3)) {
stopifnot( (moment>=0) & (moment<=24) )
param = c(param, MOMENT=moment)
}
param = c(param, LATITUDE=latitude, BENDING=bending, RADIUS=radius)
lat.offset = match.arg.ext(lat.offset, c("bottom","center","top","user"),
numeric=TRUE, ignore.case=TRUE, base=0)
lon.offset = match.arg.ext(lon.offset, c("left","center","right","user"),
numeric=TRUE, ignore.case=TRUE, base=0)
param = c(param, LAT_OFFSET=lat.offset)
if (lat.offset==3) { # user-defined
#stopifnot(!missing(lat.ref.user))
param = c(param, LAT_REF_USER=as.numeric(lat.ref.user))
}
param = c(param, LON_OFFSET=lon.offset)
if (lon.offset==3) { # user-defined
#stopifnot(!missing(lon.ref.user))
param = c(param, LON_REF_USER=as.numeric(lon.ref.user))
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Insolation", # = 3
param = param, ...)
}
######## Module grid_filter ########
#' Simple Filters
#'
#' Apply a smoothing, sharpening or edge filter to a SAGA grid.
#' @name rsaga.filter.simple
#' @param in.grid input: SAGA grid file (default file extension: \code{.sgrd})
#' @param out.grid output: SAGA grid file
#' @param mode character or numeric: shape of moving window, either \code{"square"} (=0) or \code{"circle"} (=1, default)
#' @param method character or numeric: \code{"smooth"} (=0), \code{"sharpen"} (=1), or \code{"edge"} (=2)
#' @param radius positive integer: radius of moving window
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.filter.gauss}}
#' @examples \dontrun{rsaga.filter.simple("dem","dem-smooth",radius=4)}
#' @keywords spatial interface
#' @export
rsaga.filter.simple = function(in.grid, out.grid, mode="circle",
method=c("smooth","sharpen","edge"), radius,...)
{
in.grid = default.file.extension(in.grid,".sgrd")
mode = match.arg.ext(mode,choices=c("square","circle"),
numeric=TRUE,base=0,ignore.case=TRUE)
method = match.arg.ext(method,numeric=TRUE,base=0,ignore.case=TRUE)
if (missing(radius)) stop("the search 'radius' argument (in # pixels) must be specified")
if (round(radius) != radius) {
warning("'radius' must be an integer >=1 (# pixels); rounding it...")
radius = round(radius)
}
if (radius<1) {
warning("'radius' must be an integer >=1 (# pixels); setting 'radius=1'...")
radius = 1
}
param = list(INPUT=in.grid, RESULT=out.grid, MODE=mode,
METHOD=method, RADIUS=radius)
rsaga.geoprocessor(lib = "grid_filter",
module = "Simple Filter",
param = param, ...)
}
#' Gauss Filter
#'
#' Smooth a grid using a Gauss filter.
#' @name rsaga.filter.gauss
#' @param in.grid input: SAGA GIS grid file (default file extension: \code{.sgrd})
#' @param out.grid output: SAGA GIS grid file
#' @param sigma numeric, >0.0001: standard deviation parameter of Gauss filter
#' @param radius positive integer: radius of moving window
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.filter.simple}}
#' @keywords spatial interface
#' @export
rsaga.filter.gauss = function(in.grid, out.grid, sigma,
radius=ceiling(2*sigma),...)
{
in.grid = default.file.extension(in.grid,".sgrd")
if (missing(sigma)) stop("the 'sigma' standard deviation argument (in # pixels) must be specified")
stopifnot(sigma>0.0001)
if (round(radius) != radius) stop("'radius' must be an integer (# pixels)")
stopifnot(radius>=1)
param = list(INPUT=in.grid, RESULT=out.grid, SIGMA=sigma, RADIUS=radius)
rsaga.geoprocessor(lib = "grid_filter",
module = "Gaussian Filter", # = 1,
param, ...)
}
######## Module ta_hydrology ########
#' Parallel Processing
#'
#' Calculate the size of the local catchment area (contributing area), the catchment height, catchment slope and aspect, and flow path length, using parallel processing algorithms including the recommended multiple flow direction algorithm. This set of algorithms processes a digital elevation model (DEM) downwards from the highest to the lowest cell.\cr No longer supported with SAGA GIS 2.1.3+. See \code{\link{rsaga.topdown.processing}}.
#' @name rsaga.parallel.processing
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: SAGA grid with sink routes
#' @param in.weight optional intput: SAGA grid with weights
#' @param out.carea output: catchment area grid
#' @param out.cheight optional output: catchment height grid
#' @param out.cslope optional output: catchment slope grid
#' @param out.caspect optional output: catchment aspect grid
#' @param out.flowpath optional output: flow path length grid
#' @param step integer >=1: step parameter
#' @param method character or numeric: choice of processing algorithm: Deterministic 8 (\code{"d8"} or 0), Rho 8 (\code{"rho8"} or 1), Braunschweiger Reliefmodell (\code{"braunschweig"} or 2), Deterministic Infinity (\code{"dinf"} or 3), Multiple Flow Direction (\code{"mfd"} or 4, the default), Multiple Triangular Flow Direction (\code{"mtfd"}, or 5).
#' @param linear.threshold numeric (number of grid cells): threshold above which linear flow (i.e. the Deterministic 8 algorithm) will be used; linear flow is disabled for \code{linear.threshold=Inf} (the default)
#' @param convergence numeric >=0: a parameter for tuning convergent/ divergent flow; default value of \code{1.1} gives realistic results and should not be changed
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Refer to the references for details on the available algorithms.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references
#' Deterministic 8:
#'
#' O'Callaghan, J.F., Mark, D.M. (1984): The extraction of drainage networks from digital elevation data. Computer Vision, Graphics and Image Processing, 28: 323-344.
#'
#' Rho 8:
#'
#' Fairfield, J., Leymarie, P. (1991): Drainage networks from grid digital elevation models. Water Resources Research, 27: 709-717.
#'
#' Braunschweiger Reliefmodell:
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zu Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Deterministic Infinity:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Multiple Flow Direction:
#'
#' Freeman, G.T. (1991): Calculating catchment area with divergent flow based on a regular grid. Computers and Geosciences, 17: 413-22.
#'
#' Quinn, P.F., Beven, K.J., Chevallier, P., Planchon, O. (1991): The prediction of hillslope flow paths for distributed hydrological modelling using digital terrain models. Hydrological Processes, 5: 59-79.
#'
#' Multiple Triangular Flow Direction:
#'
#' Seibert, J., McGlynn, B. (2007): A new triangular multiple flow direction algorithm for computing upslope areas from gridded digital elevation models. Water Ressources Research, 43, W04501.
#'
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module), Thomas Grabs (MTFD algorithm)
#' @note This function uses module \code{Parallel Processing} (version 2.0.7+: \code{Catchment Area (Parallel)} from SAGA library \code{ta_hydrology}.
#'
#' The SAGA GIS 2.0.6+ version of the module adds more (optional) input and
#' output grids that are currently not supported by this wrapper function.
#' Use \code{\link{rsaga.geoprocessor}} for access to these options,
#' and see \code{rsaga.get.usage("ta_hydrology","Catchment Area (Parallel)")}
#' for information on new arguments.
#' @seealso \code{\link{rsaga.topdown.processing}}, \code{\link{rsaga.wetness.index}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # SAGA GIS 2.0.6+:
#' rsaga.get.usage("ta_hydrology","Catchment Area (Parallel)")
#' # earlier versions of SAGA GIS:
#' #rsaga.get.usage("ta_hydrology","Parallel Processing")
#' # execute model with typical settings:
#' rsaga.parallel.processing(in.dem = "dem", out.carea = "carea", out.cslope = "cslope")
#' # cslope is in radians - convert to degree:
#' fac = round(180/pi, 4)
#' formula = paste(fac, "*a", sep = "")
#' rsaga.grid.calculus("cslope", "cslopedeg", formula)
#' }
#' @keywords spatial interface
#' @export
rsaga.parallel.processing = function(in.dem, in.sinkroute, in.weight,
out.carea, out.cheight, out.cslope, out.caspect, out.flowpath,
step, method="mfd", linear.threshold=Inf, convergence=1.1,
env = rsaga.env(), ...)
{
## Version Stop - tool no longer supported SAGA 2.1.3
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" | env$version == "2.2.1" |
env$version == "2.2.2" | env$version == "2.2.3") {
stop("Parallel processing not supported with SAGA GIS 2.1.3 and higher;\n",
"See help(rsaga.topdown.processing) for similar function with SAGA 2.1.3+")
}
in.dem = default.file.extension(in.dem,".sgrd")
pp.choices = c("d8","rho8","braunschweig","dinf","mfd", "mtfd")
method = match.arg.ext(method, choices=pp.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
param = list( ELEVATION=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
if (!missing(in.weight)) {
in.weight = default.file.extension(in.weight,".sgrd")
param = c(param, SINKROUTE=in.weight)
}
if (!missing(out.carea))
param = c(param, CAREA=out.carea)
if (!missing(out.cheight))
param = c(param, CHEIGHT=out.cheight)
if (!missing(out.cslope))
param = c(param, CSLOPE=out.cslope)
if (!missing(step))
param = c(param, STEP=step)
if (!missing(out.caspect))
param = c(param, CASPECT=out.caspect)
if (!missing(out.flowpath))
param = c(param, FLWPATH=out.flowpath)
param = c(param, Method=method)
if (is.finite(linear.threshold)) {
param = c(param, DOLINEAR=TRUE, LINEARTHRS=linear.threshold)
} else param = c(param, DOLINEAR=FALSE)
param = c(param, CONVERGENCE=convergence)
module = "Catchment Area (Parallel)"
if (env$version == "2.0.4" | env$version == "2.0.5" | env$version == "2.0.6")
module = "Parallel Processing"
rsaga.geoprocessor(lib = "ta_hydrology", module = module, param, env = env, ...)
}
#' Top-Down Processing
#'
#' Calculate the size of the local catchment area (contributing area), accumulated material, and flow path length, using top-down processing algorithms from the highest to the lowest cell. \cr Top-Down Processing is new with SAGA GIS 2.1.3. See \code{\link{rsaga.parallel.processing}} with older versions.
#' @name rsaga.topdown.processing
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: SAGA grid with sink routes
#' @param in.weight optional input: SAGA grid with weights
#' @param in.mean optional input: SAGA grid for mean over catchment calculation
#' @param in.material optional input: SAGA grid with material
#' @param in.target optional input: SAGA grid of accumulation target
#' @param in.lin.val optional input: SAGA grid providing values to be compared with linear flow threshold instead of catchment area
#' @param in.lin.dir optional input: SAGA grid to be used for linear flow routing, if the value is a valid direction (0-7 = N, NE, E, SE, S, SW, W, NW)
#' @param out.carea output: catchment area grid
#' @param out.mean optional output: mean over catchment grid
#' @param out.tot.mat optional output: total accumulated material grid
#' @param out.acc.left optional output: accumulated material from left side grid
#' @param out.acc.right optional output: accumulated material from right side grid
#' @param out.flowpath optional output: flow path length grid
#' @param step integer >=1: step parameter
#' @param method character or numeric: choice of processing algorithm (default \code{"mfd"}, or 4):
#' \itemize{
#' \item [0] Deterministic 8 (\code{"d8"} or 0)
#' \item [1] Rho 8 (\code{"rho8"}, or 1)
#' \item [2] Braunschweiger Reliefmodell (\code{"braunschweig"} or 2)
#' \item [3] Deterministic Infinity (\code{"dinf"} or 3)
#' \item [4] Multiple Flow Direction (\code{"mfd"} or 4)
#' \item [5] Multiple Triangular Flow Direction (\code{"mtfd"}, or 5)
#' \item [6] Multiple Maximum Gradient Based Flow Direction (\code{"mdg"}, or 6)}
#' @param linear.threshold numeric (number of grid cells): threshold above which linear flow (i.e. the Deterministic 8 algorithm) will be used; linear flow is disabled for \code{linear.threshold=Inf} (the default)
#' @param convergence numeric >=0: a parameter for tuning convergent/ divergent flow; default value of \code{1.1} gives realistic results and should not be changed
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Refer to the references for details on the available algorithms.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references
#' Deterministic 8:
#'
#' O'Callaghan, J.F., Mark, D.M. (1984): The extraction of drainage networks from digital elevation data. Computer Vision, Graphics and Image Processing, 28: 323-344.
#'
#' Rho 8:
#'
#' Fairfield, J., Leymarie, P. (1991): Drainage networks from grid digital elevation models. Water Resources Research, 27: 709-717.
#'
#' Braunschweiger Reliefmodell:
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zu Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Deterministic Infinity:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Multiple Flow Direction:
#'
#' Freeman, G.T. (1991): Calculating catchment area with divergent flow based on a regular grid. Computers and Geosciences, 17: 413-22.
#'
#' Quinn, P.F., Beven, K.J., Chevallier, P., Planchon, O. (1991): The prediction of hillslope flow paths for distributed hydrological modelling using digital terrain models. Hydrological Processes, 5: 59-79.
#'
#' Multiple Triangular Flow Direction:
#'
#' Seibert, J., McGlynn, B. (2007): A new triangular multiple flow direction algorithm for computing upslope areas from gridded digital elevation models. Water Ressources Research, 43, W04501.
#'
#' Multiple Flow Direction Based on Maximum Downslope Gradient:
#'
#' Qin, C.Z., Zhu, A-X., Pei, T., Li, B.L., Scholten, T., Zhou, C.H. (2011): An approach to computing topographic wetness index based on maximum downslope gradient. Precision Agriculture, 12(1): 32-43.
#'
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module), Thomas Grabs (MTFD algorithm)
#' @examples
#' \dontrun{
#' # Calculation of contributing area with default settings:
#' rsaga.topdown.processing(in.dem = "dem", out.carea = "carea")
#' # Calculation of contributing area by maximunm downslope gradient:
#' rsaga.topdown.processing(in.dem = "dem", out.carea = "carea",
#' method = "mdg")
#' }
#' @seealso \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.wetness.index}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @keywords spatial interface
#' @export
rsaga.topdown.processing = function(in.dem, in.sinkroute, in.weight, in.mean, in.material, in.target,
in.lin.val, in.lin.dir,
out.carea, out.mean, out.tot.mat, out.acc.left, out.acc.right,
out.flowpath, step, method = "mfd", linear.threshold = Inf, convergence = 1.1,
env = rsaga.env(), ...) {
## Version Stop - SAGA GIS Version < 2.1.3
if (env$version != "2.1.3" & env$version != "2.1.4" & env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3") {
stop("rsaga.topdown.processing requires SAGA GIS 2.1.3 or higher;\n",
"see help(rsaga.parallel.processing) for similar function in earlier versions")
}
in.dem = default.file.extension(in.dem,".sgrd")
pp.choices = c("d8","rho8","braunschweig","dinf","mfd", "mtfd", "mdg")
method = match.arg.ext(method, choices=pp.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
param = list( ELEVATION=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
if (!missing(in.weight)) {
in.weight = default.file.extension(in.weight,".sgrd")
param = c(param, SINKROUTE=in.weight)
}
if (!missing(in.mean)) {
in.mean = default.file.extension(in.mean, ".sgrd")
param = c(param,VAL_INPUT=in.mean)
}
if (!missing(in.material)) {
in.material = default.file.extension(in.material, ".sgrd")
param = c(param, MATERIAL=in.material)
}
if (!missing(in.target)) {
in.target = default.file.extension(in.target, ".sgrd")
param = c(param, TARGET=in.target)
}
if (!missing(in.lin.val)) {
in.lin.val = default.file.extension(in.lin.val, ".sgrd")
param = c(param, LINEAR_VAL=in.lin.val)
}
if (!missing(in.lin.dir)){
in.lin.dir = default.file.extension(in.lin.dir, ".sgrd")
param = c(param, LINEAR_DIR=in.lin.dir)
}
if (!missing(out.carea))
param = c(param, CAREA=out.carea)
if (!missing(out.mean))
param = c(param, VAL_MEAN=out.mean)
if (!missing(out.tot.mat))
param = c(param, ACCU_TOT=out.tot.mat)
if (!missing(out.acc.left))
param = c(param, ACCU_LEFT=out.acc.left)
if (!missing(out.acc.right))
param = c(param, ACCU_RIGHT=out.acc.right)
if (!missing(out.flowpath))
param = c(param, FLOWLEN=out.flowpath)
param = c(param, METHOD=method)
if (is.finite(linear.threshold)) {
param = c(param, LINEAR_DO=TRUE, LINEAR_MIN=linear.threshold)
} else param = c(param, LINEAR_DO=FALSE)
param = c(param, CONVERGENCE=convergence)
module = "Catchment Area (Top-Down)"
if (env$version == "2.2.0" | env$version == "2.2.1" | env$version == "2.2.2" |
env$version == "2.2.3") {
module = "Flow Accumulation (Top-Down)"
}
rsaga.geoprocessor(lib = "ta_hydrology", module = module, param, env = env, ...)
}
#' SAGA Modules SAGA Wetness Index
#'
#' Calculate the SAGA Wetness Index (SWI), a modified topographic wetness index (TWI)
#' @name rsaga.wetness.index
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.wetness.index output file (optional): wetness index grid file name. Existing files of the same name will be overwritten!
#' @param out.carea output file (optional): catchment area grid file name
#' @param out.cslope output file (optional): catchment slope grid file name
#' @param out.mod.carea output file (optional): file name of modified catchment area grid
#' @param suction SAGA GIS 2.1.0+: positive numeric value (optional): the lower this value is the stronger is the suction effect; defaults to a value of 10 (more detailed information is currently not available in the SAGA GIS documentation
#' @param area.type character or numeric (optional): type of area: \code{"absolute"} (or numeric code 0): absolute catchment area; \code{"square root"} (code 1; the default): square root of catchment area; \code{"specific"} (code 2): specific catchment area
#' @param slope.type character or numeric (optional): type of slope: \code{"local"} (or numeric code 0): local slope; \code{"catchment"} (or code 1; the default): catchment slope.
#' @param slope.min numeric (optional): minimum slope; default: 0
#' @param slope.offset numeric (optional): offset slope; default: 0.1
#' @param slope.weight numeric (optional): weighting factor for slope in index calculation; default: 1
#' @param t.param SAGA GIS up to version 2.0.8: positive numeric value (optional): undocumented
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}.)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details The SAGA Wetness Index is similar to the Topographic Wetness Index (TWI), but it is based on a modified catchment area calculation (\code{out.mod.carea}), which does not treat the flow as a thin film as done in the calculation of catchment areas in conventional algorithms. As a result, the SWI tends to assign a more realistic, higher potential soil wetness than the TWI to grid cells situated in valley floors with a small vertical distance to a channel.
#'
#' This module and its arguments changed substantially from SAGA GIS 2.0.8 to version 2.1.0. It appears to me that the new algorithm is similar (but not identical) to the old one when using \code{area.type="absolute"} and \code{slope.type="local"} but I haven't tried out all possible options. This help file will be updated as soon as additional documentation becomes available.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references Boehner, J., Koethe, R. Conrad, O., Gross, J., Ringeler, A., Selige, T. (2002): Soil Regionalisation by Means of Terrain Analysis and Process Parameterisation. In: Micheli, E., Nachtergaele, F., Montanarella, L. (ed.): Soil Classification 2001. European Soil Bureau, Research Report No. 7, EUR 20398 EN, Luxembourg. pp.213-222.
#'
#' Boehner, J. and Selige, T. (2006): Spatial prediction of soil attributes using terrain analysis and climate regionalisation. In: Boehner, J., McCloy, K.R., Strobl, J. [Ed.]: SAGA - Analysis and Modelling Applications, Goettinger Geographische Abhandlungen, Goettingen: 13-28.
#' @author Alexander Brenning (R interface), Juergen Boehner and Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' rsaga.wetness.index("dem.sgrd","swi.sgrd")
#' }
#' @keywords spatial interface
#' @export
rsaga.wetness.index = function( in.dem,
out.wetness.index, out.carea, out.cslope,
out.mod.carea,
# since SAGA GIS 2.1.0:
suction, area.type, slope.type, slope.min, slope.offset, slope.weight,
# up to SAGA GIS 2.0.8:
t.param,
env = rsaga.env(), ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
if (missing(out.carea)) {
out.carea = tempfile()
on.exit(unlink(paste(out.carea,".*",sep="")), add = TRUE)
}
if (missing(out.cslope)) {
out.cslope = tempfile()
on.exit(unlink(paste(out.cslope,".*",sep="")), add=TRUE)
}
if (missing(out.mod.carea)) {
out.mod.carea = tempfile()
on.exit(unlink(paste(out.mod.carea,".*",sep="")), add=TRUE)
}
if (env$version == "2.1.0" | env$version == "2.1.1" | env$version == "2.1.2" |
env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
param = list(DEM=in.dem, AREA=out.carea, SLOPE=out.cslope,
AREA_MOD=out.mod.carea, TWI=out.wetness.index)
if (!missing(suction)) {
suction = as.numeric(suction)
if (suction <= 0) stop("'suction' argument must be >0")
param = c(param, SUCTION=suction)
}
if (!missing(area.type)) {
area.type = match.arg.ext(area.type,choices=c("absolute","square root","specific"),base=0,ignore.case=TRUE,numeric=TRUE)
param = c(param, AREA_TYPE=area.type)
}
if (!missing(slope.type)) {
slope.type = match.arg.ext(slope.type,choices=c("local","catchment"),base=0,ignore.case=TRUE,numeric=TRUE)
param = c(param, SLOPE_TYPE=slope.type)
}
if (!missing(slope.min)) {
slope.min = as.numeric(slope.min)
if (slope.min < 0) stop("'slope.min' argument must be >=0")
param = c(param, SLOPE.MIN=slope.min)
}
if (!missing(slope.offset)) {
slope.offset = as.numeric(slope.offset)
if (slope.offset < 0) stop("'slope.offset' argument must be >=0")
param = c(param, SLOPE.OFF=slope.offset)
}
if (!missing(slope.weight)) {
slope.weight = as.numeric(slope.weight)
if (slope.weight < 0) stop("'slope.weight' argument must be >=0")
param = c(param, SLOPE.WEIGHT=slope.weight)
}
if (!missing(t.param))
warning("argument 't.param' (in saga_cmd: T) supported only up to SAGA GIS 2.0.8")
} else {
param = list(DEM=in.dem, C=out.carea, GN=out.cslope,
CS=out.mod.carea, SB=out.wetness.index)
if (!missing(t.param))
param = c(param, T=as.numeric(t.param))
if (!missing(suction) | !missing(area.type) | !missing(slope.type) | !missing(slope.min) | !missing(slope.offset) | !missing(slope.weight))
warning("arguments 'suction', 'area.type', 'slope.min', 'slope.type', 'slope.offset'\n",
"and 'slope.weight' not supported prior to SAGA GIS 2.1.0")
}
rsaga.geoprocessor(lib = "ta_hydrology",
module = "SAGA Wetness Index",
param, ..., env = env)
}
######## Module grid_calculus ########
#' SAGA Module Grid Calculus
#'
#' Perform Arithmetic Operations on Grids
#' @name rsaga.grid.calculus
#' @param in.grids input character vector: SAGA grid files (default file extension: \code{.sgrd})
#' @param out.grid output: grid file resulting from the cell-by-cell application of 'formula' to the grids. Existing files will be overwritten!
#' @param formula character string of formula specifying the arithmetic operation to be performed on the \code{in.grids} (see Details); if this is a formula, only the right hand side will be used.
#' @param coef numeric: coefficient vector to be used for the linear combination of the \code{in.grids}. If \code{coef} as one more element than \code{in.grids}, the first one will be interpreted as an intercept.
#' @param cf.digits integer: number of digits used when converting the \code{coef}ficients to character strings (trailing zeros will be removed)
#' @param remove.zeros logical: if \code{TRUE}, terms (grids) with coefficient (numerically) equal to zero (after rounding to \code{cf.digits} digits) will be removed from the formula
#' @param remove.ones logical: if \code{TRUE} (th edefault), factors equal to 1 (after rounding to \code{cf.digits} digits) will be removed from the formula
#' @param env RSAGA geoprocessing environment, generated by a call to \code{\link{rsaga.env}}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details The \code{in.grids} are represented in the \code{formula} by the letters \code{a} (for \code{in.grids[1]}), \code{b} etc. Thus, if \code{in.grids[1]} is Landsat TM channel 3 and \code{in.grids[2]} is channel 4, the NDVI formula (TM3-TM4)/(TM3+TM4) can be represented by the character string \code{"(a-b)/(a+b)"} (any spaces are removed) or the formula \code{~(a-b)/(a+b)} in the \code{formula} argument.
#'
#' In addition to +, -, *, and /, the following operators and functions are available for the \code{formula} definition:
#' \itemize{
#' \item \eqn{\hat{\ }}{^} power
#' \item \code{sin(a)} sine
#' \item \code{cos(a)} cosine
#' \item \code{tan(a)} tangent
#' \item \code{asin(a)} arc sine
#' \item \code{acos(a)} arc cosine
#' \item \code{atan(a)} arc tangent
#' \item \code{atan2(a,b)} arc tangent of b/a
#' \item \code{abs(a)} absolute value
#' \item \code{int(a)} convert to integer
#' \item \code{sqr(a)} square
#' \item \code{sqrt(a)} square root
#' \item \code{ln(a)} natural logarithm
#' \item \code{log(a)} base 10 logarithm
#' \item \code{mod(a,b)} modulo
#' \item \code{gt(a, b)} returns 1 if a greater b
#' \item \code{lt(a, b)} returns 1 if a lower b
#' \item \code{eq(a, b)} returns 1 if a equal b
#' \item \code{ifelse(switch, x, y)} returns x if switch equals 1 else y
#' }
#'
#' Using \code{remove.zeros=FALSE} might have the side effect that no data areas in the grid with coefficient 0 are passed on to the results grid. (To be confirmed.)
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{local.function}}, \code{\link{focal.function}}, and \code{\link{multi.focal.function}} for a more flexible framework for combining grids or applying local and focal functions; \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' # calculate the NDVI from Landsat TM bands 3 and 4:
#' rsaga.grid.calculus(c("tm3.sgrd","tm4.sgrd"), "ndvi.sgrd", ~(a-b)/(a+b))
#' # apply a linear regression equation to grids:
#' coefs = c(20,-0.6)
#' # maybe from a linear regression of mean annual air temperature (MAAT)
#' # against elevation - something like:
#' # coefs = coef( lm( maat ~ elevation ) )
#' rsaga.linear.combination("elevation.sgrd", "maat.sgrd", coefs)
#' # equivalent:
#' rsaga.grid.calculus("elevation.sgrd", "maat.sgrd", "20 - 0.6*a")
#' }
#' @keywords spatial interface
#' @export
rsaga.grid.calculus = function(in.grids, out.grid, formula,
env = rsaga.env(), ...)
{
in.grids = default.file.extension(in.grids, ".sgrd")
in.grids = paste(in.grids, collapse = ";")
if (any(class(formula) == "formula"))
formula = rev( as.character(formula) )[1]
formula = gsub(" ", "", formula)
if (env$version == "2.0.4") {
param = list( INPUT = in.grids, RESULT = out.grid,
FORMUL = formula )
} else {
param = list( GRIDS = in.grids, RESULT = out.grid,
FORMULA = formula )
}
rsaga.geoprocessor(lib = "grid_calculus",
module = "Grid Calculator", # was = 1
param = param, env = env, ...)
}
#' @rdname rsaga.grid.calculus
#' @name rsaga.linear.combination
#' @export
rsaga.linear.combination = function(in.grids, out.grid, coef,
cf.digits = 16, remove.zeros = FALSE, remove.ones = TRUE,
env = rsaga.env(), ...)
{
fmt = paste("%.", cf.digits, "f", sep = "")
coef = sprintf(fmt, coef)
zero = sprintf(fmt, 0)
omit = rep(FALSE, length(coef))
if (length(coef) == length(in.grids)) { # no intercept provided
coef = c(NA, coef)
omit = c(TRUE, omit)
}
nvars = length(coef)
if (nvars != length(in.grids) + 1)
stop("'coef' must have length 'length(in.grids)' or 'length(in.grids)+1'")
# Simplify the formula by removing terms that are zero
# (after rounding to the specified number of digits):
if (remove.zeros)
omit = omit | (coef == zero)
# Zero intercept is always removed:
omit[1] = omit[1] | (coef[1] == zero)
# Remove zeros at the end of the coefficients:
for (i in 1:nvars) {
if (omit[i]) next
# Are there any digits at all?
if (length(grep(".", coef[i], fixed = TRUE)) == 0) next
nc = nchar(coef[i])
# Remove all trailing zeros:
while (substr(coef[i], nc, nc) == "0") {
coef[i] = substr(coef[i], 1, nc - 1)
nc = nchar(coef[i])
}
# Remove trailing decimal point:
if (substr(coef[i], nc, nc) == ".")
coef[i] = substr(coef[i], 1, nc - 1)
}
# Set up the formula:
ltrs = letters[ 1 : sum(!omit[-1]) ]
if (!omit[1]) ltrs = c("intercept", ltrs)
formula = paste(coef[ !omit ], ltrs,
collapse = "+", sep = "*")
formula = gsub("*intercept", "", formula, fixed = TRUE)
formula = gsub("+-", "-", formula, fixed = TRUE)
if (remove.ones) {
formula = gsub("-1*", "-", formula, fixed = TRUE)
formula = gsub("+1*", "+", formula, fixed = TRUE)
}
rsaga.grid.calculus(in.grids = in.grids[!omit[-1]], out.grid = out.grid,
formula = formula, env = env, ...)
}
######## Module shapes_grid ########
#' Contour Lines from a Grid
#'
#' Creates a contour lines shapefile from a grid file in SAGA grid format.
#' @name rsaga.contour
#' @param in.grid input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.shapefile output: contour line shapefile. Existing files will be overwritten!
#' @param zstep,zmin,zmax lower limit, upper limit, and equidistance of contour lines
#' @param vertex optional parameter: vertex type for resulting contours. Default \code{"xy"} (or 0). Only available with SAGA GIS 2.1.3+. \itemize{
#' \item [0] \code{"xy"}
#' \item [1] \code{"xyz"}}
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}
#' @param ... arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.geoprocessor}}
#' @keywords spatial interface
#' @export
rsaga.contour = function(in.grid,out.shapefile,zstep,zmin,zmax,vertex="xy",env=rsaga.env(),...) {
in.grid = default.file.extension(in.grid,".sgrd")
# 'INPUT' changed to 'GRID' with SAGA 2.1.3
if(env$version != "2.1.3" & env$version != "2.1.4" & env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3"){
param = list(INPUT=in.grid,CONTOUR=out.shapefile)
} else {
param = list(GRID=in.grid,CONTOUR=out.shapefile)
}
if (!missing(zmin)) param = c(param, ZMIN=as.numeric(zmin))
if (!missing(zmax)) param = c(param, ZMAX=as.numeric(zmax))
if (!missing(zstep)) {
stopifnot(as.numeric(zstep)>0)
param = c(param, ZSTEP=as.numeric(zstep))
}
v.choices = c("xy", "xyz")
vertex = match.arg.ext(vertex, choices=v.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
if (!missing(vertex)) {
if (env$version == "2.1.3" | env$version == "2.1.4") {
param = c(param, VERTEX=vertex)
}
}
rsaga.geoprocessor(lib = "shapes_grid",
module = "Contour Lines from Grid",
param, env = env,...)
}
#' Add Grid Values to Point Shapefile
#'
#' Pick values from SAGA grids and attach them as a new variables to a point shapefile.
#' @name rsaga.add.grid.values.to.points
#' @param in.grids Input: character vector with names of (one or more) SAGA GIS grid files to be converted into a point shapefile.
#' @param in.shapefile Input point shapefile (default extension: \code{.shp}).
#' @param out.shapefile Output point shapefile (default extension: \code{.shp}).
#' @param method interpolation method to be used; choices: nearest neighbour interpolation (default), bilinear interpolation, inverse distance weighting, bicubic spline interpolation, B-splines.
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details Retrieves information from the selected grids at the positions of the points of the selected points layer and adds it to the resulting layer.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA modules)
#' @note This function uses module \code{Add Grid Values to Points} in SAGA GIS library \code{shapes_grid}.
#' @seealso \code{\link{pick.from.points}}, \code{\link{pick.from.ascii.grid}}, \code{\link{pick.from.saga.grid}}, \code{\link{rsaga.grid.to.points}}
#' @keywords spatial interface
#' @export
rsaga.add.grid.values.to.points = function(in.shapefile,
in.grids, out.shapefile,
method = c("nearest.neighbour", "bilinear",
"idw", "bicubic.spline", "b.spline"), ...)
{
in.grids = default.file.extension(in.grids,".sgrd")
in.grids = paste(in.grids, collapse = ";")
# check if this is SAGA version dependent:
in.shapefile = default.file.extension(in.shapefile,".shp")
out.shapefile = default.file.extension(out.shapefile,".shp")
method = match.arg.ext(method, base = 0, ignore.case = TRUE, numeric = TRUE)
param = list(SHAPES = in.shapefile, GRIDS = in.grids,
RESULT = out.shapefile, INTERPOL = method)
rsaga.geoprocessor(lib = "shapes_grid",
module = "Add Grid Values to Points", # was: = 0
param, ...)
}
#' Convert SAGA grid file to point shapefile
#'
#' Convert SAGA grid file to point (or polygon) shapefile - either completely or only a random sample of grid cells.
#' @name rsaga.grid.to.points
#' @param in.grids Input: names of (possibly several) SAGA GIS grid files to be converted into a point shapefile.
#' @param in.grid Input: SAGA grid file from which to sample.
#' @param out.shapefile Output: point shapefile (default extension: \code{.shp}). Existing files will be overwritten!
#' @param in.clip.polygons optional polygon shapefile to be used for clipping/masking an area
#' @param exclude.nodata logical (default: \code{TRUE}): skip 'nodata' grid cells?
#' @param type character string: \code{"nodes"}: create point shapefile of grid center points; \code{"cells"} (only supported by SAGA GIS 2.0.6+): create polygon shapefile with grid cell boundaries
#' @param freq integer >=1: sampling frequency: on average 1 out of 'freq' grid cells are selected
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}; required by \code{rsaga.grid.to.points} to determine version-dependent SAGA module name and arguments
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA modules)
#' @note These functions use modules \code{Grid Values to Shapes} (pre-2.0.6 name: \code{Grid Values to Points}) and \code{Grid Values to Points (randomly)} in SAGA library \code{shapes_grid}.
#'
#' The SAGA 2.0.6+ module \code{Grid Values to Shapes} is more flexible than the earlier versions as it allows to create grid cell polygons instead of center points (see argument \code{type}).
#' @seealso \code{\link{rsaga.add.grid.values.to.points}}
#' @examples
#' \dontrun{
#' # one point per grid cell, exclude nodata areas:
#' rsaga.grid.to.points("dem", "dempoints")
#' # take only every 20th point, but to not exclude nodata areas:
#' rsaga.grid.to.points.randomly("dem", "dempoints20", freq = 20)
#' }
#' @keywords spatial interface
#' @export
rsaga.grid.to.points = function(in.grids, out.shapefile,
in.clip.polygons, exclude.nodata = TRUE,
type = "nodes", env = rsaga.env(), ...)
{
in.grids = default.file.extension(in.grids,".sgrd")
in.grids = paste(in.grids, collapse = ";")
type = match.arg.ext(type, numeric=TRUE, ignore.case=TRUE, base=0,
choices=c("nodes","cells"))
if (type == 1 & (env$version == "2.0.4" | env$version == "2.0.5")) {
type = 0
warning("type == 'cells' not supported by SAGA 2.0.4 and 2.0.5; using type = 'nodes'")
}
param = list(GRIDS = in.grids)
if (env$version == "2.0.4" | env$version == "2.0.5") {
param = c(param, POINTS = out.shapefile)
} else param = c(param, SHAPES = out.shapefile)
param = c(param, NODATA = exclude.nodata)
if (!missing(in.clip.polygons))
param = c(param, POLYGONS = in.clip.polygons)
if (!(env$version == "2.0.4" | env$version == "2.0.5"))
param = c(param, TYPE = type)
module = "Grid Values to Shapes"
if (!rsaga.module.exists("shapes_grid",module,env=env))
#if (env$version == "2.0.4" | env$version == "2.0.5")
module = "Grid Values to Points"
rsaga.geoprocessor(lib = "shapes_grid",
module = module, # was: = 3
param, env = env, ...)
}
#' @rdname rsaga.grid.to.points
#' @name rsaga.grid.to.points.randomly
#' @export
rsaga.grid.to.points.randomly = function(in.grid,
out.shapefile, freq, ...)
{
in.grid = default.file.extension(in.grid, ".sgrd")
out.shapefile = default.file.extension(out.shapefile, ".shp")
if (freq < 1) stop("'freq' must be an integer >=1")
param = list(GRID = in.grid, FREQ = freq, POINTS = out.shapefile)
rsaga.geoprocessor(lib = "shapes_grid",
module = "Grid Values to Points (randomly)", # was: = 4
param, ...)
}
#' Spatial Interpolation Methods
#'
#' Spatial interpolation of point data using inverse distance to a power (inverse distance weighting, IDW), nearest neighbors, or modified quadratic shephard.
#' @name rsaga.inverse.distance
#' @param in.shapefile Input: point shapefile (default extension: \code{.shp}).
#' @param out.grid Output: filename for interpolated grid (SAGA grid file). Existing files will be overwritten!
#' @param field numeric or character: number or name of attribute in the shapefile's attribute table to be interpolated; the first attribute is represented by a zero.
#' @param power numeric (>0): exponent used in inverse distance weighting (usually 1 or 2)
#' @param maxdist numeric: maximum distance of points to be used for inverse distance interpolation (search radius); no search radius is applied when this argument is missing or equals \code{Inf}
#' @param nmax Maximum number of nearest points to be used for interpolation; \code{nmax=Inf} is a valid value (no upper limit)
#' @param quadratic.neighbors integer >=5; default 13.
#' @param weighting.neighbors integer >=3; default 19.
#' @param target required argument of type list: parameters identifying the target area, e.g. the x/y extent and cellsize, or name of a reference grid; see \code{\link{rsaga.target}}.
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}, required because module(s) depend(s) on SAGA version
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details These functions use modules from the \code{grid_gridding} SAGA GIS library. They do not support SAGA GIS 2.0.4, which differs in some argument names and parameterizations. Target grid parameterization by grid file name currently doesn't work with SAGA GIS 2.1.0 Release Candidate 1 (see also \code{\link{rsaga.target}}); stay tuned for future updates and fixes.
#' @references QSHEP2D: Fortran routines implementing the Quadratic Shepard method for bivariate interpolation of scattered data (see R. J. Renka, ACM TOMS 14 (1988) pp.149-150). Classes: E2b. Interpolation of scattered, non-gridded multivariate data.
#' @author Alexander Brenning (R interface), Andre Ringeler and Olaf Conrad (SAGA modules)
#' @note The 'Inverse Distance Weighted' module of SAGA GIS not only support inverse-distance weighted interpolation, but also exponential and other weighting schemes (command line argument WEIGHTING); these are however not accessible through this function, but only through the \code{rsaga.geoprocessor}, if needed. See \code{rsaga.get.usage("grid_gridding","Inverse Distance Weighted")} for details.
#'
#' See the example section in the help file for \code{\link[shapefiles]{write.shapefile}} in package \code{shapefiles} to learn how to apply these interpolation functions to a shapefile exported from a data.frame.
#'
#' Modified Quadratic Shephard method: based on module 660 in TOMS (see references).
#' @seealso \code{\link{rsaga.target}}; \code{\link[gstat]{idw}} in package \code{gstat}.
#' @keywords spatial interface
#' @export
rsaga.inverse.distance = function(in.shapefile, out.grid, field,
power = 1, maxdist, nmax = 100,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.inverse.distance doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (power <= 0) stop("'power' must be >0")
if (field < 0) stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
module = "Inverse Distance Weighted"
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field,
WEIGHTING = 0, # IDW
MODE = 0, # search mode: all directions
POWER = power)
is.global = (missing(maxdist))
if (!missing(maxdist)) {
if (maxdist <= 0) stop("'maxdist' must be >0")
if (maxdist == Inf) is.global = TRUE
}
if (is.global) {
param = c(param, list(RANGE = 1))
} else
param = c(param, list(RANGE = 0, RADIUS = maxdist))
#use.all = (missing(nmax))
#if (!missing(nmax)) {
if (nmax <= 0) stop("'nmax' must be an integer >0, or Inf")
use.all = (nmax == Inf)
#}
if (use.all) {
param = c(param, list(POINTS = 1))
} else
param = c(param, list(POINTS = 0, NPOINTS = nmax))
param = c(param, target)
# Translate some argument names for SAGA GIS 2.1.0+:
if (substr(env$version,1,4) != "2.0.") {
nm = names(param)
nm[ nm == "RANGE" ] = "SEARCH_RANGE"
nm[ nm == "RADIUS" ] = "SEARCH_RADIUS"
nm[ nm == "POINTS" ] = "SEARCH_POINTS_ALL"
nm[ nm == "NPOINTS" ] = "SEARCH_POINTS_MAX"
nm[ nm == "MODE" ] = "SEARCH_DIRECTION"
nm[ nm == "POWER" ] = "WEIGHT_POWER"
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
}
names(param) = nm
# Translate some argument names for SAGA 2.2.0
if (substr(env$version,1,4) == "2.2."){
nm = names(param)
nm[ nm == "WEIGHTING" ] = "DW_WEIGHTING"
nm[ nm == "WEIGHT_POWER" ] = "DW_IDW_POWER"
nm[ nm == "WEIGHT_BANDWIDTH" ] = "DW_BANDWIDTH"
}
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = module,
param = param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.nearest.neighbour
#' @export
rsaga.nearest.neighbour = function(in.shapefile, out.grid, field,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.nearest.neighbour doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Nearest Neighbour", # was: = 2 (=1 in earlier SAGA version)
param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.modified.quadratic.shephard
#' @export
rsaga.modified.quadratic.shephard = function(in.shapefile, out.grid, field,
quadratic.neighbors = 13, weighting.neighbors = 19,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.modified.quadratic.shephard doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
if (quadratic.neighbors < 5)
stop("'quadratic.neighbors' must be an integer >=5")
if (weighting.neighbors < 5)
stop("'weighting.neighbors' must be an integer >=3")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field,
QUADRATIC_NEIGHBORS = quadratic.neighbors,
WEIGHTING_NEIGHBORS = weighting.neighbors)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Modifed Quadratic Shepard", # = 4 (earlier SAGA versions: =2)
param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.triangulation
#' @export
rsaga.triangulation = function(in.shapefile, out.grid, field,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.triangulation doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Triangulation",
param, env = env, ...)
}
|
/RSAGA/R/RSAGA-modules.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 149,022 |
r
|
#' Define target grid for interpolation
#'
#' Define the resolution and extent of a target grid for interpolation by SAGA modules based on (1) user-provided x/y coordinates, (2) an existing SAGA grid file, or (3) the header data of an ASCII grid. Intended to be used with RSAGA's interpolation functions.
#' @name rsaga.target
#' @param target character: method used for defining the target grid
#' @param user.cellsize Only for \code{target="user.defined"}: raster resolution (in the grid's map units)
#' @param user.x.extent See \code{user.y.extent}
#' @param user.y.extent Only for \code{target="user.defined"}: numeric vectors of length 2: minimum and maximum coordinates of grid cell center points
#' @param target.grid Only for \code{target="target.grid"}: character string giving the name of a SAGA grid file that specifies the extent and resolution of the target grid; this target grid file may be overwritten, depending on the specifics of the SAGA GIS module used.
#' @param header Only for \code{target="header"}: list: ASCII grid header (as returned e.g. by \code{\link{read.ascii.grid.header}}) or defined manually; must at least have components \code{ncols}, \code{nrows}, \code{cellsize}, and either \code{x/yllcorner} or \code{x/yllcenter}.
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}.)
#' @note This function is to be used with RSAGA functions \code{\link{rsaga.inverse.distance}}, \code{\link{rsaga.nearest.neighbour}} and \code{\link{rsaga.modified.quadratic.shephard}}. Note that these are currently only compatible with SAGA GIS 2.0.5 and higher.
#' @seealso \code{\link{read.ascii.grid.header}}
#' @examples
#' \dontrun{
#' # IDW interpolation of attribute "z" from the point shapefile
#' # 'points.shp' to a grid with the same extent and resolution
#' # as the (pre-existing) geology grid:
#' rsaga.inverse.distance("points", "dem", field = "z", maxdist = 1000,
#' target = rsaga.target(target="target.grid",
#' target.grid = "geology"))
#' }
#' @keywords spatial interface
#' @export
rsaga.target = function(
target = c("user.defined", "target.grid", "header"),
user.cellsize = 100,
user.x.extent, user.y.extent,
target.grid, header, env = rsaga.env() )
{
if(env$version == "2.0.4")
stop("'rsaga.target' currently doesn't support SAGA GIS version 2.0.4\n")
target = match.arg.ext(target, base = 0, numeric = TRUE)
if (target == 2) {
stopifnot(missing(user.x.extent) & missing(user.y.extent) & missing(target.grid))
target = 0
user.cellsize = header$cellsize
if (!any(names(header) == "xllcenter"))
header$xllcenter = header$xllcorner + header$cellsize / 2
if (!any(names(header) == "yllcenter"))
header$yllcenter = header$yllcorner + header$cellsize / 2
user.x.extent = c(header$xllcenter, header$xllcenter + header$cellsize * (header$ncols-1))
user.y.extent = c(header$yllcenter, header$yllcenter + header$cellsize * (header$nrows-1))
}
param = list(TARGET = target)
if (target == 0) {
param = c(param,
USER_SIZE = user.cellsize,
USER_XMIN = min(user.x.extent),
USER_XMAX = max(user.x.extent),
USER_YMIN = min(user.y.extent),
USER_YMAX = max(user.y.extent))
} else if (target == 1) {
stopifnot(missing(user.x.extent) & missing(user.y.extent))
target.grid = default.file.extension(target.grid, ".sgrd")
param = c(param,
GRID_GRID = target.grid)
}
return(param)
}
######## Module io_grid_gdal ########
#' Import Grid Files to SAGA grid format using GDAL
#'
#' These functions provide simple interfaces for reading and writing grids from/to ASCII grids and Rd files. Grids are stored in matrices, their headers in lists.
#' @name rsaga.import.gdal
#' @param in.grid file name of a grid in a format supported by GDAL
#' @param out.grid output SAGA grid file name; defaults to \code{in.grid} with the file extension being removed; file extension should not be specified, it defaults to \code{.sgrd}
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}
#' @param ... additional arguments to be passed to \code{rsaga.geoprocessor}
#' @details The GDAL Raster Import module of SAGA imports grid data from various file formats using the Geospatial Data Abstraction Library (GDAL) by Frank Warmerdam.
#' GDAL Versions are specific to SAGA versions:
#' \itemize{
#' \item SAGA 2.0.7 - 2.0.8: GDAL v.1.8.0
#' \item SAGA 2.1.0 - 2.1.1: GDAL v.1.10.0
#' \item SAGA 2.1.2 - 2.2.0: GDAL v.1.11.0
#' \item SAGA 2.2.1 - 2.2.3: GDAL v.2.1.0 dev}
#' More information is available at \url{http://www.gdal.org/}.
#'
#' If \code{in.grid} has more than one band (e.g. RGB GEOTIFF), then output grids with file names of the form \eqn{in.grid{\_}01.sgrd}{in.grid_01.sgrd}, \eqn{in.grid{\_}02.sgrd}{in.grid_02.sgrd} etc. are written, one for each band.
#'
#' The following raster formats are currently supported. Last updated for SAGA GIS 2.2.3;
#' for a list for a specific SAGA GIS version call \code{rsaga.html.help("io_gdal","GDAL: Import Raster", env = rsaga.env(path="SAGA_Version_to_Test"))}
#' \itemize{
#' \item BAG - Bathymetry Attributed Grid
#' \item ECW - ERDAS Compressed Wavelets (SDK 3.x)
#' \item JP2ECW - ERDAS JPEG2000 (SDK 3.x)
#' \item FITS - Flexible Image Transport System
#' \item GMT - GMT NetCDF Grid Format
#' \item HDF4 - Hierarchical Data Format Release 4
#' \item HDF4Image - HDF4 Dataset
#' \item HDF5 - Hierarchical Data Format Release 5
#' \item HDF5Image - HDF5 Dataset
#' \item KEA - KEA Image Format (.kea)
#' \item MG4Lidar - MrSID Generation 4 / Lidar (.sid)
#' \item MrSID - Multi-resolution Seamless Image Database (MrSID)
#' \item netCDF - Network Common Data Format
#' \item PostgreSQL - PostgreSQL/PostGIS
#' \item VRT - Virtual Raster
#' \item GTiff - GeoTIFF
#' \item NITF - National Imagery Transmission Format
#' \item RPFTOC - Raster Product Format TOC format
#' \item ECRGTOC - ECRG TOC format
#' \item HFA - Erdas Imagine Images (.img)
#' \item SAR_CEOS - CEOS SAR Image
#' \item CEOS - CEOS Image
#' \item JAXAPALSAR - JAXA PALSAR Product Reader (Level 1.1/1.5)
#' \item GFF - Ground-based SAR Applications Testbed File Format (.gff)
#' \item ELAS - ELAS
#' \item AIG - Arc/Info Binary Grid
#' \item AAIGrid - Arc/Info ASCII Grid
#' \item GRASSASCIIGrid - GRASS ASCII Grid
#' \item SDTS - SDTS Raster
#' \item DTED - DTED Elevation Raster
#' \item PNG - Portable Network Graphics
#' \item JPEG - JPEG JFIF
#' \item MEM - In Memory Raster
#' \item JDEM - Japanese DEM (.mem)
#' \item GIF - Graphics Interchange Format (.gif)
#' \item BIGGIF - Graphics Interchange Format (.gif)
#' \item ESAT - Envisat Image Format
#' \item BSB - Maptech BSB Nautical Charts
#' \item XPM - X11 PixMap Format
#' \item BMP - MS Windows Device Independent Bitmap
#' \item DIMAP - SPOT DIMAP
#' \item AirSAR - AirSAR Polarimetric Image
#' \item RS2 - RadarSat 2 XML Product
#' \item SAFE - Sentinel SAFE Product
#' \item PCIDSK - PCIDSK Database File
#' \item PCRaster - PCRaster Raster File
#' \item ILWIS - ILWIS Raster Map
#' \item SGI - SGI Image File Format 1.0
#' \item SRTMHGT - SRTMHGT File Format
#' \item Leveller - Leveller heightfield
#' \item Terragen - Terragen heightfield
#' \item ISIS3 - USGS Astrogeology ISIS cube (Version 3)
#' \item ISIS2 - USGS Astrogeology ISIS cube (Version 2)
#' \item PDS - NASA Planetary Data System
#' \item VICAR - MIPL VICAR file
#' \item TIL - EarthWatch .TIL
#' \item ERS - ERMapper .ers Labelled
#' \item JP2OpenJPEG - JPEG-2000 driver based on OpenJPEG library
#' \item L1B - NOAA Polar Orbiter Level 1b Data Set
#' \item FIT - FIT Image
#' \item GRIB - GRIdded Binary (.grb)
#' \item RMF - Raster Matrix Format
#' \item WCS - OGC Web Coverage Service
#' \item WMS - OGC Web Map Service
#' \item MSGN - EUMETSAT Archive native (.nat)
#' \item RST - Idrisi Raster A.1
#' \item INGR - Intergraph Raster
#' \item GSAG - Golden Software ASCII Grid (.grd)
#' \item GSBG - Golden Software Binary Grid (.grd)
#' \item GS7BG - Golden Software 7 Binary Grid (.grd)
#' \item COSAR - COSAR Annotated Binary Matrix (TerraSAR-X)
#' \item TSX - TerraSAR-X Product
#' \item COASP - DRDC COASP SAR Processor Raster
#' \item R - R Object Data Store
#' \item MAP - OziExplorer .MAP
#' \item PNM - Portable Pixmap Format (netpbm)
#' \item DOQ1 - USGS DOQ (Old Style)
#' \item DOQ2 - USGS DOQ (New Style)
#' \item ENVI - ENVI .hdr Labelled
#' \item EHdr - ESRI .hdr Labelled
#' \item GenBin - Generic Binary (.hdr Labelled)
#' \item PAux - PCI .aux Labelled
#' \item MFF - Vexcel MFF Raster
#' \item MFF2 - Vexcel MFF2 (HKV) Raster
#' \item FujiBAS - Fuji BAS Scanner Image
#' \item GSC - GSC Geogrid
#' \item FAST - EOSAT FAST Format
#' \item BT - VTP .bt (Binary Terrain) 1.3 Format
#' \item LAN - Erdas .LAN/.GIS
#' \item CPG - Convair PolGASP
#' \item IDA - Image Data and Analysis
#' \item NDF - NLAPS Data Format
#' \item EIR - Erdas Imagine Raw
#' \item DIPEx - DIPEx
#' \item LCP - FARSITE v.4 Landscape File (.lcp)
#' \item GTX - NOAA Vertical Datum .GTX
#' \item LOSLAS - NADCON .los/.las Datum Grid Shift
#' \item NTv2 - NTv2 Datum Grid Shift
#' \item CTable2 - CTable2 Datum Grid Shift
#' \item ACE2 - ACE2
#' \item SNODAS - Snow Data Assimilation System
#' \item KRO - KOLOR Raw
#' \item ROI_PAC - ROI_PAC raster
#' \item ISCE - ISCE raster
#' \item ARG - Azavea Raster Grid format
#' \item RIK - Swedish Grid RIK (.rik)
#' \item USGSDEM - USGS Optional ASCII DEM (and CDED)
#' \item GXF - GeoSoft Grid Exchange Format
#' \item NWT_GRD - Northwood Numeric Grid Format .grd/.tab
#' \item NWT_GRC - Northwood Classified Grid Format .grc/.tab
#' \item ADRG - ARC Digitized Raster Graphics
#' \item SRP - Standard Raster Product (ASRP/USRP)
#' \item BLX - Magellan topo (.blx)
#' \item Rasterlite - Rasterlite
#' \item PostGISRaster - PostGIS Raster driver
#' \item SAGA - SAGA GIS Binary Grid (.sdat)
#' \item KMLSUPEROVERLAY - Kml Super Overlay
#' \item XYZ - ASCII Gridded XYZ
#' \item HF2 - HF2/HFZ heightfield raster
#' \item PDF - Geospatial PDF
#' \item OZI - OziExplorer Image File
#' \item CTG - USGS LULC Composite Theme Grid
#' \item E00GRID - Arc/Info Export E00 GRID
#' \item ZMap - ZMap Plus Grid
#' \item NGSGEOID - NOAA NGS Geoid Height Grids
#' \item MBTiles - MBTiles
#' \item IRIS - IRIS data (.PPI, .CAPPi etc)
#' \item PLMOSAIC - Planet Labs Mosaic
#' \item CALS - CALS (Type 1)
#' \item WMTS - OGC Web Map Tile Service
#' \item ESRI Shapefile - ESRI Shapefile
#' \item MapInfo File - MapInfo File
#' \item UK .NTF - UK .NTF
#' \item OGD_SDTS - SDTS
#' \item S57 - IHO S-57 (ENC)
#' \item DGN - Microstation DGN
#' \item OGR_VRT - VRT - Virtual Datasource
#' \item REC EPIInfo .REC
#' \item Memory - Memory
#' \item BNA - Atlas BNA
#' \item CSV - Comma Separated Value (.csv)
#' \item NAS - NAS - ALKIS
#' \item GML - Geography Markup Language
#' \item GPX - GPX
#' \item LIBKML - Keyhole Markup Language (LIBKML)
#' \item KML - Keyhole Markup Language (KML)
#' \item GeoJSON - GeoJSON
#' \item Interlis 1 - Interlis 1
#' \item Interlis 2 - Interlis 2
#' \item OGR_GMT - GMT ASCII Vectors (.gmt)
#' \item GPKG - GeoPackage
#' \item SQLite - SQLite / Spatialite
#' \item ODBC - ODBC
#' \item WAsP - WAsP .map format
#' \item PGeo - ESRI Personal GeoDatabase
#' \item MSSQLSpatial - Microsoft SQL Server Spatial Database
#' \item MySQL - MySQL
#' \item OpenFileGDB - ESRI FileGDB
#' \item XPlane - X-Plane/Flightgear aeronautical data
#' \item DXF - AutoCAD DXF
#' \item Geoconcept - Geoconcept
#' \item GeoRSS - GeoRSS
#' \item GPSTrackMaker - GPSTrackMaker
#' \item VFK - Czech Cadastral Exchange Data Format
#' \item PGDUMP - PostgreSQL SQL dump
#' \item OSM - OpenStreetMap XML and PDF
#' \item GPSBabel - GPSBabel
#' \item SUA - Tim Newport-Peace's Special Use Airspace Format
#' \item OpenAir - OpenAir
#' \item OGR_PDS - Planetary Data Systems TABLE
#' \item WFS - OGC WFS (Web Feature Service)
#' \item HTF - Hydrographic Transfer Vector
#' \item AeronavFAA - Aeronav FAA
#' \item Geomedia - Geomedia .mdb
#' \item EDIGEO - French EDIGEO exchange format
#' \item GFT - Google Fusion Tables
#' \item GME - Google Maps Engine
#' \item SVG - Scalable Vector Graphics
#' \item CouchDB - CouchDB / GeoCouch
#' \item Cloudant - Cloudant / CouchDB
#' \item Idrisi - Idrisi Vector (.vct)
#' \item ARCGEN - Arc/Info Generate
#' \item SEGUKOOA - SEG-P1 / UKOOA P1/90
#' \item SEG-Y - SEG-Y
#' \item ODS - Open Document/ LibreOffice / OpenOffice Spreadsheet
#' \item XLSX - MS Office Open XML spreadsheet
#' \item ElasticSearch - Elastic Search
#' \item Walk - Walk
#' \item CartoDB - CartoDB
#' \item SXF - Storage and eXchange Format
#' \item Selafin - Selafin
#' \item JML - OpenJUMP JML
#' \item PLSCENES - Planet Labs Scenes API
#' \item CSW - OGC CSW (Catalog Search for the Web)
#' \item IDF - INTREST Data Format
#' \item TIGER - U.S. Census TIGER/Line
#' \item AVCBin - Arc/Info Binary Coverage
#' \item AVCE00 - Arc/Info E00 (ASCII) Coverage
#' \item HTTP - HTTP Fetching Wrapper
#' }
#' @references GDAL website: \url{http://www.gdal.org/}
#' @author Alexander Brenning (R interface), Olaf Conrad / Andre Ringeler (SAGA module), Frank Warmerdam (GDAL)
#' @seealso \code{read.ascii.grid}, \code{rsaga.esri.to.sgrd}, \code{read.sgrd}, \code{read.Rd.grid}
#' @keywords spatial interface file
#' @export
rsaga.import.gdal = function( in.grid, out.grid, env = rsaga.env(), ... )
{
if (missing(out.grid)) {
out.grid = set.file.extension(in.grid, "")
out.grid = substr(out.grid, 1, nchar(out.grid) - 1)
}
if (env$version == "2.0.4") {
param = list( GRIDS = out.grid, FILE = in.grid )
} else {
param = list( GRIDS = out.grid, FILES = in.grid )
}
# Module name change with SAGA 2.2.3
module = "GDAL: Import Raster"
if (env$version == "2.2.3"){
module = "Import Raster"
}
rsaga.geoprocessor("io_gdal", module = module,
param = param, env = env, ...)
}
######## Module io_grid ########
#' Convert ESRI ASCII/binary grids to SAGA grids
#'
#' \code{rsaga.esri.to.sgrd} converts grid files from ESRI's ASCII (.asc) and binary (.flt) format to SAGA's (version 2) grid format (.sgrd).
#' @name rsaga.esri.to.sgrd
#' @param in.grids character vector of ESRI ASCII/binary grid files (default file extension: \code{.asc}); files should be located in folder \code{in.path}
#' @param out.sgrds character vector of output SAGA grid files; defaults to \code{in.grids} with file extension being replaced by \code{.sgrd}, which is also the default extension if file names without extension are specified; files will be placed in the current SAGA workspace (default: \code{\link{rsaga.env}()$workspace}, or \code{env$workspace} if an \code{env} argument is provided
#' @param in.path folder with \code{in.grids}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#'
#' If multiple \code{in.grids} are converted, the result will be a vector of numerical error codes of the same length, or the combination of the console outputs with \code{c()}.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 1 from the SAGA library \code{io_grid}.
#' @seealso \code{\link{rsaga.esri.wrapper}} for an efficient way of applying RSAGA to ESRI ASCII/binary grids; \code{\link{rsaga.env}}
#' @keywords spatial interface file
#' @export
rsaga.esri.to.sgrd = function( in.grids,
out.sgrds=set.file.extension(in.grids,".sgrd"), in.path, ... )
{
in.grids = default.file.extension(in.grids,".asc")
out.sgrds = default.file.extension(out.sgrds,".sgrd")
if (!missing(in.path))
in.grids = file.path(in.path,in.grids)
if (length(in.grids) != length(out.sgrds))
stop("must have the same number of input and outpute grids")
res = c()
for (i in 1:length(in.grids))
res = c(res, rsaga.geoprocessor("io_grid", "Import ESRI Arc/Info Grid",
list(FILE=in.grids[i],GRID=out.sgrds[i]),...) )
invisible(res)
}
#' Convert SAGA grids to ESRI ASCII/binary grids
#'
#' \code{rsaga.sgrd.to.esri} converts grid files from SAGA's (version 2) grid format (.sgrd) to ESRI's ASCII (.asc) and binary (.flt) format.
#' @name rsaga.sgrd.to.esri
#' @param in.sgrds character vector of SAGA grid files (\code{.sgrd}) to be converted; files are expected to be found in folder \code{\link{rsaga.env}()$workspace}, or, if an optional \code{env} argument is provided, in \code{env$workspace}
#' @param out.grids character vector of ESRI ASCII/float output file names; defaults to \code{in.sgrds} with the file extension being replaced by \code{.asc} or \code{.flt}, depending on \code{format}. Files will be placed in folder \code{out.path}, existing files will be overwritten
#' @param out.path folder for \code{out.grids}
#' @param format output file format, either \code{"ascii"} (default; equivalent: \code{format=1}) for ASCII grids or \code{"binary"} (equivalent: \code{0}) for binary ESRI grids (\code{.flt}).
#' @param georef character: \code{"corner"} (equivalent numeric code: \code{0}) or \code{"center"} (default; equivalent: \code{1}). Determines whether the georeference will be related to the center or corner of its extreme lower left grid cell.
#' @param prec number of digits when writing floating point values to ASCII grid files; either a single number (to be replicated if necessary), or a numeric vector of length \code{length(in.grids)}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 0 from the SAGA library \code{io_grid}.
#' @seealso \code{\link{rsaga.esri.wrapper}} for an efficient way of applying RSAGA to ESRI ASCII/binary grids; \code{\link{rsaga.env}}
#' @keywords spatial interface file
#' @export
rsaga.sgrd.to.esri = function( in.sgrds, out.grids, out.path,
format="ascii", georef="corner", prec=5, ... )
{
in.sgrds = default.file.extension(in.sgrds,".sgrd")
format = match.arg.ext(format,choices=c("binary","ascii"),base=0,ignore.case=TRUE,numeric=TRUE)
georef = match.arg.ext(georef,choices=c("corner","center"),base=0,ignore.case=TRUE,numeric=TRUE)
if (missing(out.grids))
out.grids = set.file.extension(in.sgrds, c(".flt",".asc")[format+1])
out.grids = default.file.extension(out.grids, c(".flt",".asc")[format+1])
if (!missing(out.path))
out.grids = file.path(out.path,out.grids)
if (length(out.grids) != length(in.sgrds))
stop("must have the same number of input and outpute grids")
if ((length(prec)==1) & (length(in.sgrds)>1))
prec = rep(prec,length(in.sgrds))
if (length(prec) != length(in.sgrds))
stop("must have same number of in-/output grids and 'prec' parameters (or length(prec)==1)")
res = c()
for (i in 1:length(in.sgrds))
res = c(res, rsaga.geoprocessor("io_grid", "Export ESRI Arc/Info Grid",
list( GRID=in.sgrds[i], FILE=out.grids[i], FORMAT=format, GEOREF=georef, PREC=prec[i]),
...))
invisible(res)
}
#
######## Module ta_morphometry ########
#' Slope, Aspect, Curvature
#'
#' Calculates local morphometric terrain attributes (i.e. slope, aspect, and curvatures). Intended for use with SAGA v 2.1.1+. For older versions use \code{\link{rsaga.local.morphometry}}.
#' @name rsaga.slope.asp.curv
#' @param in.dem input: digital elevation model as SAGA grid file (\code{.sgrd})
#' @param out.slope optional output: slope
#' @param out.aspect optional output: aspect
#' @param out.cgene optional output: general curvature
#' @param out.cprof optional output: profile curvature (vertical curvature; degrees)
#' @param out.cplan optional output: plan curvature (horizontal curvature; degrees)
#' @param out.ctang optional output: tangential curvature (degrees)
#' @param out.clong optional output: longitudinal curvature (degrees) Zevenbergen & Thorne (1987) refer to this as profile curvature
#' @param out.ccros optional output: cross-sectional curvature (degrees) Zevenbergen & Thorne (1987) refer to this as the plan curvature
#' @param out.cmini optional output: minimal curvature (degrees)
#' @param out.cmaxi optional output: maximal curvature (degrees)
#' @param out.ctota optional output: total curvature (degrees)
#' @param out.croto optional output: flow line curvature (degrees)
#' @param method character algorithm (see References):
#' \itemize{
#' \item [0] Maximum Slope - Travis et al. (1975) (\code{"maxslope"})
#' \item [1] Max. Triangle Slope - Tarboton (1997) (\code{"maxtriangleslope"})
#' \item [2] Least Squares Fit Plane - Costa-Cabral & Burgess (1996) (\code{"lsqfitplane"})
#' \item [3] Fit 2nd Degree Polynomial - Evans (1979) (\code{"poly2evans"})
#' \item [4] Fit 2nd Degree Polynomial - Heerdegen and Beran (1982) (\code{"poly2heerdegen"})
#' \item [5] Fit 2nd Degree Polynomial - Bauer et al. (1985) (\code{"poly2bauer"})
#' \item [6] default: Fit 2nd Degree Polynomial - Zevenbergen & Thorne (1987) (\code{"poly2zevenbergen"})
#' \item [7] Fit 3rd Degree Polynomial - Haralick (1983) (\code{"poly3haralick"})}
#' @param unit.slope character or numeric (default \code{"radians"}):
#' \itemize{
#' \item [0] \code{"radians"}
#' \item [1] \code{"degrees"}
#' \item [2] \code{"percent"}}
#' @param unit.aspect character or numeric (default is 0, or \code{"radians"}):
#' \itemize{
#' \item [0] \code{"radians"}
#' \item [1] \code{"degrees"}}
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Profile and plan curvature calculation (\code{out.cprof}, \code{out.cplan}) changed in SAGA GIS 2.1.1+ compared to earlier versions. See the following thread on sourceforge.net for an ongoing discussion: \url{http://sourceforge.net/p/saga-gis/discussion/354013/thread/e9d07075/#5727}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @references General references:
#'
#' Jones KH (1998) A comparison of algorithms used to compute hill slope as a property of the DEM. Computers and Geosciences. 24 (4): 315-323.
#'
#' References on specific methods:
#'
#' Maximum Slope:
#'
#' Travis, M.R., Elsner, G.H., Iverson, W.D., Johnson, C.G. (1975): VIEWIT: computation of seen areas, slope, and aspect for land-use planning. USDA F.S. Gen. Tech. Rep. PSW-11/1975, 70 p. Berkeley, California, U.S.A.
#'
#' Maximum Triangle Slope:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Least Squares or Best Fit Plane:
#'
#' Beasley, D.B., Huggins, L.F. (1982): ANSWERS: User's manual. U.S. EPA-905/9-82-001, Chicago, IL, 54 pp.
#'
#' Costa-Cabral, M., Burges, S.J. (1994): Digital Elevation Model Networks (DEMON): a model of flow over hillslopes for computation of contributing and dispersal areas. Water Resources Research, 30(6): 1681-1692.
#'
#' Fit 2nd Degree Polynomial:
#'
#' Evans, I.S. (1979): An integrated system of terrain analysis and slope mapping. Final Report on grant DA-ERO-591-73-G0040. University of Durham, England.
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zur Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Heerdegen, R.G., Beran, M.A. (1982): Quantifying source areas through land surface curvature. Journal of Hydrology, 57.
#'
#' Zevenbergen, L.W., Thorne, C.R. (1987): Quantitative analysis of land surface topography. Earth Surface Processes and Landforms, 12: 47-56.
#'
#' Fit 3.Degree Polynomial:
#'
#' Haralick, R.M. (1983): Ridge and valley detection on digital images. Computer Vision, Graphics and Image Processing, 22(1): 28-38.
#'
#' For a discussion on the calculation of slope by ArcGIS check these links:
#'
#' \url{http://forums.esri.com/Thread.asp?c=93&f=1734&t=239914}
#'
#' \url{http://webhelp.esri.com/arcgisdesktop/9.2/index.cfm?topicname=how_slope_works}
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.local.morphometry}}, \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # Simple slope, aspect, and general curvature in degrees:
#' rsaga.slope.asp.curv("lican.sgrd", "slope", "aspect", "curvature",
#' method = "maxslope", unit.slope = "degrees", unit.aspect = "degrees")
#' # same for ASCII grids (default extension .asc):
#' rsaga.esri.wrapper(rsaga.slope.asp.curv,
#' in.dem="lican", out.slope="slope",
#' out.aspect = "aspect", out.cgene = "curvature",
#' method="maxslope", unit.slope = "degrees", unit.aspect = "degrees")
#' }
#' @keywords spatial interface
#' @export
rsaga.slope.asp.curv = function(in.dem,
out.slope, out.aspect, out.cgene,
out.cprof, out.cplan, out.ctang,
out.clong, out.ccros, out.cmini,
out.cmaxi, out.ctota, out.croto,
method = "poly2zevenbergen",
unit.slope = "radians", unit.aspect = "radians",
env = rsaga.env(), ...) {
if(env$version != "2.1.1" & env$version != "2.1.2" &
env$version != "2.1.3" & env$version != "2.1.4" &
env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3") {
stop("rsaga.slope.asp.curv only for SAGA GIS 2.1.1+;\n",
"use rsaga.local.morphometry for older versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem, ".sgrd")
method.choices = c("maxslope","maxtriangleslope","lsqfitplane", "poly2evans",
"poly2bauer","poly2heerdegen","poly2zevenbergen","poly3haralick")
if(is.numeric(method) == TRUE)
stop("Numeric 'method' argument not supported with SAGA GIS 2.1.1+;\n",
"Use character name of methods - see help(rsaga.slope.asp.curv) for options")
method = match.arg.ext(method, method.choices, numeric=TRUE, base=0)
unit.slope.choices = c("radians", "degrees", "percent")
unit.slope = match.arg.ext(unit.slope, unit.slope.choices, numeric=TRUE, base=0)
unit.aspect.choices = c("radians", "degrees")
unit.aspect = match.arg.ext(unit.aspect, unit.aspect.choices, numeric=TRUE, base=0)
if (missing(out.aspect)) {
out.aspect = tempfile()
on.exit(unlink(paste(out.aspect,".*",sep="")), add = TRUE)
}
if (missing(out.slope)) {
out.slope = tempfile()
on.exit(unlink(paste(out.slope,".*",sep="")), add = TRUE)
}
param = list(ELEVATION=in.dem, SLOPE=out.slope, ASPECT = out.aspect)
if(!missing(out.cgene))
param = c(param, C_GENE = out.cgene)
if(!missing(out.cprof))
param = c(param, C_PROF = out.cprof)
if(!missing(out.cplan))
param =c(param, C_PLAN = out.cplan)
if(!missing(out.ctang))
param = c(param, C_TANG = out.ctang)
if(!missing(out.clong))
param = c(param, C_LONG = out.clong)
if(!missing(out.ccros))
param = c(param, C_CROS = out.ccros)
if(!missing(out.cmini))
param = c(param, C_MINI = out.cmini)
if(!missing(out.cmaxi))
param = c(param, C_MAXI = out.cmaxi)
if(!missing(out.ctota))
param = c(param, C_TOTA = out.ctota)
if(!missing(out.croto))
param = c(param, C_ROTO = out.croto)
param = c(param, METHOD=method, UNIT_SLOPE=unit.slope, UNIT_ASPECT=unit.aspect)
module = "Slope, Aspect, Curvature"
rsaga.geoprocessor("ta_morphometry", module, param, env = env, ...)
if (!missing(out.cprof) | !missing(out.cplan))
warning("Plan and profile curvature calculations have changed with SAGA 2.1.1+\n",
"See help(rsaga.slope.asp.curv) for more information")
}
#' Local Morphometry
#'
#' Calculates local morphometric terrain attributes (i.e. slope, aspect and curvatures). Intended for use with SAGA versions 2.1.0 and older. Use \code{\link{rsaga.slope.asp.curv}} for SAGA 2.1.1+
#' @name rsaga.local.morphometry
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.slope optional output: slope (in radians)
#' @param out.aspect optional output: aspect (in radians; north=0, clockwise angles)
#' @param out.curv optional output: curvature
#' @param out.hcurv optional output: horizontal curvature (plan curvature)
#' @param out.vcurv optional output: vertical curvature (profile curvature)
#' @param method character (or numeric): algorithm (see References):
#' \itemize{
#' \item [0] Maximum Slope - Travis et al. (1975) (\code{"maxslope"}, or 0)
#' \item [1] Max. Triangle Slope - Tarboton (1997) (\code{"maxtriangleslope"}, or 1)
#' \item [2] Least Squares Fit Plane - Costa-Cabral and Burgess (1996) (\code{"lsqfitplane"}, or 2)
#' \item [3] Fit 2nd Degree Polynomial - Bauer et al. (1985) (\code{"poly2bauer"}, or 3)
#' \item [4] Fit 2nd Degree Polynomial - Heerdegen and Beran (1982) (\code{"poly2heerdegen"}, or 4)
#' \item [5] default: Fit 2nd Degree Polynomial - Zevenbergen and Thorne (1987) (\code{"poly2zevenbergen"}, or 5)
#' \item [6] Fit 3rd Degree Polynomial - Haralick (1983) (\code{"poly3haralick"}, or 6).}
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @references For references and algorithm changes in SAGA GIS 2.1.1+ see \code{\link{rsaga.slope.asp.curv}}.
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.slope.asp.curv}}, \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # a simple slope algorithm:
#' rsaga.slope("lican.sgrd","slope","maxslope")
#' # same for ASCII grids (default extension .asc):
#' rsaga.esri.wrapper(rsaga.slope,in.dem="lican",out.slope="slope",method="maxslope")
#' }
#' @keywords spatial interface
#' @export
rsaga.local.morphometry = function( in.dem,
out.slope, out.aspect, out.curv, out.hcurv, out.vcurv,
method = "poly2zevenbergen", env = rsaga.env(), ...)
{
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.slope=out.slope, out.aspect=out.aspect,
out.cgene=out.curv, out.cplan=out.hcurv, out.cprof=out.vcurv,
method=method, env=env, ... )
warning("rsaga.local.morphometry specific to SAGA versions < 2.1.1\n",
"Translating provided arguments and using rsaga.slope.asp.curv\n",
"Note: order of numeric methods have changed with SAGA 2.1.1+")
} else {
in.dem = default.file.extension(in.dem,".sgrd")
choices = c("maxslope","maxtriangleslope","lsqfitplane",
"poly2bauer","poly2heerdegen","poly2zevenbergen","poly3haralick")
method = match.arg.ext(method,choices,numeric=TRUE,base=0)
if (missing(out.aspect)) {
out.aspect = tempfile()
on.exit(unlink(paste(out.aspect,".*",sep="")), add = TRUE)
}
if (missing(out.slope)) {
out.slope = tempfile()
on.exit(unlink(paste(out.slope,".*",sep="")), add = TRUE)
}
param = list(ELEVATION=in.dem, SLOPE=out.slope, ASPECT=out.aspect)
if (!missing(out.curv))
param = c(param, CURV=out.curv)
if (!missing(out.hcurv))
param = c(param, HCURV=out.hcurv)
if (!missing(out.vcurv))
param = c(param, VCURV=out.vcurv)
param = c(param, METHOD=method)
module = "Slope, Aspect, Curvature"
if (any(c("2.0.4","2.0.5","2.0.6") == env$version)) module = "Local Morphometry"
rsaga.geoprocessor("ta_morphometry", module, param, env = env, ...)
}
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))){
if (!missing(out.hcurv) | !missing(out.vcurv))
warning("Plan and profile curvature calculations have changed with SAGA 2.1.1+\n",
"See help(rsaga.slope.asp.curv) for more information")
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.slope
#' @export
rsaga.slope = function( in.dem, out.slope, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.slope))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.slope=out.slope, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.slope=out.slope, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.aspect
#' @export
rsaga.aspect = function( in.dem, out.aspect, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.aspect))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.aspect=out.aspect, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.aspect=out.aspect, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.curvature
#' @export
rsaga.curvature = function( in.dem, out.curv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.curv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cgene=out.curv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.curv=out.curv, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.plan.curvature
#' @export
rsaga.plan.curvature = function( in.dem, out.hcurv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.hcurv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cplan=out.hcurv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.hcurv=out.hcurv, method=method, env = env, ... )
}
}
#' @rdname rsaga.local.morphometry
#' @name rsaga.profile.curvature
#' @export
rsaga.profile.curvature = function( in.dem, out.vcurv, method = "poly2zevenbergen", env = rsaga.env(), ... ) {
stopifnot(!missing(out.vcurv))
if (!(env$version %in% c("2.0.4","2.0.5","2.0.6","2.0.7","2.0.8","2.0.9","2.1.0"))) {
rsaga.slope.asp.curv( in.dem=in.dem, out.cprof=out.vcurv, method=method, env = env, ... )
}
else {
rsaga.local.morphometry( in.dem=in.dem, out.vcurv=out.vcurv, method=method, env = env, ... )
}
}
######## Module ta_preprocessor ########
#' Fill Sinks
#'
#' Several methods for filling closed depressions in digital elevation models that would affect hydrological modeling.
#' @name rsaga.fill.sinks
#' @param in.dem Input: digital elevation model (DEM) as SAGA grid file (default extension: \code{.sgrd}).
#' @param out.dem Output: filled, depression-free DEM (SAGA grid file). Existing files will be overwritten!
#' @param method The depression filling algorithm to be used (character). One of \code{"planchon.darboux.2001"} (default), \code{"wang.liu.2006"}, or \code{"xxl.wang.liu.2006"}.
#' @param out.flowdir (only for \code{"wang.liu.2001"}): Optional output grid file for computed flow directions (see Notes).
#' @param out.wshed (only for \code{"wang.liu.2001"}): Optional output grid file for watershed basins.
#' @param minslope Minimum slope angle (in degree) preserved between adjacent grid cells (default value of \code{0.01} only for \code{method="planchon.darboux.2001"}, otherwise no default).
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details This function bundles three SAGA modules for filling sinks using three different algorithms (\code{method} argument).
#'
#' \code{"planchon.darboux.2001"}: The algorithm of Planchon and Darboux (2001) consists of increasing the elevation of pixels in closed depressions until the sink disappears and a mininum slope angle of \code{minslope} (default: \code{0.01} degree) is established.
#'
#' \code{"wang.liu.2006"}: This module uses an algorithm proposed by Wang and Liu (2006) to identify and fill surface depressions in DEMs. The method was enhanced to allow the creation of hydrologically sound elevation models, i.e. not only to fill the depressions but also to preserve a downward slope along the flow path. If desired, this is accomplished by preserving a minimum slope gradient (and thus elevation difference) between cells. This is the fully featured version of the module creating a depression-free DEM, a flow path grid and a grid with watershed basins. If you encounter problems processing large data sets (e.g. LIDAR data) with this module try the basic version (\code{xxl.wang.lui.2006}).
#'
#' \code{"xxl.wang.liu.2006"}: This modified algorithm after Wang and Liu (2006) is designed to work on large data sets.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#'
#' The function writes SAGA grid files containing of the depression-free preprocessed DEM, and optionally the flow directions and watershed basins.
#' @references Planchon, O., and F. Darboux (2001): A fast, simple and versatile algorithm to fill the depressions of digital elevation models. Catena 46: 159-176.
#'
#' Wang, L. & H. Liu (2006): An efficient method for identifying and filling surface depressions in digital elevation models for hydrologic analysis and modelling. International Journal of Geographical Information Science, Vol. 20, No. 2: 193-213.
#' @author Alexander Brenning (R interface), Volker Wichmann (SAGA module)
#' @note The flow directions are coded as 0 = north, 1 = northeast, 2 = east, ..., 7 = northwest.
#'
#' If \code{minslope=0}, depressions will only be filled until a horizontal surface is established, which may not be helpful for hydrological modeling.
#' @seealso \code{\link{rsaga.sink.removal}}, \code{\link{rsaga.sink.route}}.
#' @keywords spatial interface
#' @export
rsaga.fill.sinks = function(in.dem,out.dem,
method="planchon.darboux.2001", out.flowdir, out.wshed, minslope, ...)
{
stopifnot(is.character(method))
method = match.arg.ext(method, ignore.case=TRUE, numeric=TRUE, base=2,
choices=c("planchon.darboux.2001","wang.liu.2006","xxl.wang.liu.2006"))
in.dem = default.file.extension(in.dem,".sgrd")
stopifnot(!missing(out.dem))
if (missing(minslope)) minslope = NULL
if (method==2) {
param = list( DEM=in.dem, RESULT=out.dem )
if (missing(minslope)) minslope = 0.01
minslope = as.numeric(minslope)
method = "Fill Sinks (Planchon/Darboux, 2001)"
} else if (method==3) {
if (missing(out.flowdir)) {
out.flowdir = tempfile()
on.exit(unlink(paste(out.flowdir,".*",sep="")), add = TRUE)
}
if (missing(out.wshed)) {
out.wshed = tempfile()
on.exit(unlink(paste(out.wshed,".*",sep="")), add = TRUE)
}
param = list(ELEV=in.dem, FILLED=out.dem, FDIR=out.flowdir, WSHED=out.wshed)
method = "Fill Sinks (Wang & Liu)"
} else if (method==4) {
param = list(ELEV=in.dem, FILLED=out.dem)
method = "Fill Sinks XXL (Wang & Liu)"
}
if (!is.null(minslope)) param = c( param, MINSLOPE=minslope )
rsaga.geoprocessor("ta_preprocessor", method, param, ...)
}
#' Sink Drainage Route Detection
#'
#' Sink drainage route detection.
#' @name rsaga.sink.route
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.sinkroute output: sink route grid file: non-sinks obtain a value of 0, sinks are assigned an integer between 0 and 8 indicating the direction to which flow from this sink should be routed
#' @param threshold logical: use a threshold value?
#' @param thrsheight numeric: threshold value (default: \code{100})
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note I assume that flow directions are coded as 0 = north, 1 = northeast, 2 = east, ..., 7 = northwest, as in \code{\link{rsaga.fill.sinks}}.
#' @seealso \code{\link{rsaga.sink.removal}}
#' @examples
#' \dontrun{rsaga.sink.route("dem","sinkroute")
#' rsaga.sink.removal("dem","sinkroute","dem-preproc",method="deepen")}
#' @keywords spatial interface
#' @export
rsaga.sink.route = function(in.dem, out.sinkroute,
threshold, thrsheight = 100, ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( ELEVATION=in.dem, SINKROUTE=out.sinkroute )
if (!missing(threshold)) {
if (threshold) param = c( param, THRESHOLD="" )
}
# I guess thrsheight is redundant if threshold is missing/false:
param = c( param, THRSHEIGHT=as.numeric(thrsheight) )
rsaga.geoprocessor("ta_preprocessor", "Sink Drainage Route Detection", param, ...)
# was: module = 0
}
#' Sink Removal
#' Remove sinks from a digital elevation model by deepening drainage routes or filling sinks.
#' @name rsaga.sink.removal
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: sink route grid file
#' @param out.dem output: modified DEM
#' @param method character string or numeric value specifying the algorithm (partial string matching will be applied): \code{"deepen drainage route"} (or 0): reduce the elevation of pixels in order to achieve drainage out of the former sinks \code{"fill sinks"} (or 1): fill sinks until none are left
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module 1 from SAGA library \code{ta_preprocessor}.
#' @seealso \code{\link{rsaga.sink.route}}, \code{\link{rsaga.fill.sinks}}
#' @examples
#' \dontrun{rsaga.sink.route("dem","sinkroute")
#' rsaga.sink.removal("dem","sinkroute","dem-preproc",method="deepen")}
#' @keywords spatial interface
#' @export
rsaga.sink.removal = function(in.dem,in.sinkroute,out.dem,method="fill",...)
{
in.dem = default.file.extension(in.dem,".sgrd")
method = match.arg.ext(method,c("deepen drainage routes","fill sinks"),ignore.case=TRUE,numeric=TRUE)
param = list( DEM=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
param = c( param, DEM_PREPROC=out.dem, METHOD=method )
rsaga.geoprocessor("ta_preprocessor", "Sink Removal", param, ...)
}
######## Module grid_tools ########
#' SAGA Modules Close Gaps and Close One Cell Gaps
#'
#' Close (Interpolate) Gaps
#' @name rsaga.close.gaps
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.dem output: DEM grid file without no-data values (gaps). Existing files will be overwritten!
#' @param threshold tension threshold for adjusting the interpolator (default: 0.1)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @details \code{rsaga.close.one.cell.gaps} only fill gaps whose neighbor grid cells have non-missing data.
#'
#' In \code{rsaga.close.gaps}, larger tension thresholds can be used to reduce overshoots and undershoots in the surfaces used to fill (interpolate) the gaps.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses modules 7 (\code{rsaga.close.gaps} and 6 \code{rsaga.close.one.cell.gaps} from the SAGA library \code{grid_tools}.
#'
#' SAGA GIS 2.0.5+ has a new additional module \code{Close Gaps with Spline}, which
#' can be accessed using \code{\link{rsaga.geoprocessor}} (currently no R wrapper
#' available). See \code{rsaga.get.usage("grid_tools","Close Gaps with Spline")}
#' or in version 2.1.0+ call \code{rsaga.html.help("grid_tools","Close Gaps with Spline")}.
#' @seealso \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' rsaga.close.gaps("rawdem.sgrd","dem.sgrd")
#' # using ASCII grids:
#' rsaga.esri.wrapper(rsaga.close.gaps,in.dem="rawdem",out.dem="dem")
#' }
#' @keywords spatial interface
#' @export
rsaga.close.gaps = function(in.dem,out.dem,threshold=0.1,...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( INPUT=in.dem, RESULT=out.dem, THRESHOLD=as.numeric(threshold) )
rsaga.geoprocessor("grid_tools", "Close Gaps", param, ...)
}
#' @rdname rsaga.close.gaps
#' @name rsaga.close.one.cell.gaps
#' @keywords spatial interface
#' @export
rsaga.close.one.cell.gaps = function(in.dem,out.dem,...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( INPUT = in.dem, RESULT = out.dem )
rsaga.geoprocessor("grid_tools", "Close One Cell Gaps",
param, ...)
}
######## Module ta_lighting ########
#' Analytical hillshading
#' Analytical hillshading calculation.
#' @name rsaga.hillshade
#' @param in.dem Input digital elevation model (DEM) as SAGA grid file (default extension: \code{.sgrd}).
#' @param out.grid Output hillshading grid (SAGA grid file). Existing files will be overwritten!
#' @param method Available choices (character or numeric): \code{"standard"} (or \code{0} - default), \code{"max90deg.standard"} (\code{1}), \code{"combined.shading"} (\code{2}), \code{"ray.tracing"} (\code{3}). See Details.
#' @param azimuth Direction of the light source, measured in degree clockwise from the north direction; default 315, i.e. northwest.
#' @param declination Declination of the light source, measured in degree above the horizon (default 45).
#' @param exaggeration Vertical exaggeration of elevation (default: 4). The terrain exaggeration factor allows to increase the shading contrasts in flat areas.
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details The Analytical Hillshading algorithm is based on the angle between the surface and the incoming light beams, measured in radians.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note While the default azimuth of 315 degree (northwest) is not physically meaningful on the northern hemisphere, a northwesterly light source is required to properly depict relief in hillshading images. Physically correct southerly light sources results a hillshade that would be considered by most people as inverted: hills look like depressions, mountain chains like troughs.
#' @seealso \code{\link{rsaga.solar.radiation}}, \code{\link{rsaga.insolation}}
#' @examples
#' \dontrun{rsaga.hillshade("dem.sgrd","hillshade")}
#' @keywords spatial interface
#' @export
rsaga.hillshade = function(in.dem, out.grid,
method="standard", azimuth=315, declination=45, exaggeration=4, ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
out.grid = default.file.extension(out.grid,".sgrd")
method = match.arg.ext(method, numeric=TRUE, ignore.case=TRUE, base=0,
choices=c("standard","max90deg.standard","combined.shading","ray.tracing"))
param = list(ELEVATION=in.dem, SHADE=out.grid, METHOD=method,
AZIMUTH=azimuth, DECLINATION=declination, EXAGGERATION=exaggeration)
rsaga.geoprocessor("ta_lighting", "Analytical Hillshading", param, ...)
# was: module = 0
}
#' Potential incoming solar radiation
#'
#' This function calculates the potential incoming solar radiation in an area using different atmospheric models; module available in SAGA GIS 2.0.6+.
#' @name rsaga.pisr
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.svf.grid Optional input grid in SAGA format: Sky View Factor; see also \code{local.svf}
#' @param in.vapour.grid Optional input grid in SAGA format: Water vapour pressure (mbar); see also argument \code{hgt.water.vapour.pressure}
#' @param in.latitude.grid Optional input grid in SAGA format: Latitude (degree) of each grid cell
#' @param in.longitude.grid see \code{in.latitude.grid}
#' @param out.direct.grid Output grid: Direct insolation (unit selected by \code{unit} argument)
#' @param out.diffuse.grid Output grid: Diffuse insolation
#' @param out.total.grid Optional output grid: Total insolation, i.e. sum of direct and diffuse incoming solar radiation
#' @param out.ratio.grid Optional output grid: Direct to diffuse ratio
#' @param out.duration Optional output grid: Duration of insolation
#' @param out.sunrise Optional output grid: time of sunrise; only calculated if time span is set to single day
#' @param out.sunset Time of sunset; see \code{out.sunrise}
#' @param local.svf logical (default: \code{TRUE}; if TRUE, use sky view factor based on local slope (after Oke, 1988), if no sky view factor grid is provided in \code{in.svf.grid}
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of insolation output grids: \code{"kWh/m2"} (default) \code{"kJ/m2"}, or \code{"J/cm2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param enable.bending logical (default: \code{FALSE}): incorporate effects of planetary bending?
#' @param bending.radius Planetary radius, default \code{6366737.96}
#' @param bending.lat.offset if bending is enabled: latitudinal reference is \code{"user"}-defined (default), or relative to \code{"top"}, \code{"center"} or \code{"bottom"} of grid?
#' @param bending.lat.ref.user user-defined lat. reference for bending, see \code{bending.lat.offset}
#' @param bending.lon.offset longitudinal reference, i.e. local time, is \code{"user"}-defined, or relative to \code{"top"}, \code{"center"} (default) or \code{"bottom"} of grid?
#' @param bending.lon.ref.user user-defined reference for local time (Details??)
#' @param method specifies how the atmospheric components should be accounted for: either based on the height of atmosphere and vapour pressure (\code{"height"}, or numeric code 0), or air pressure, water and dust content (\code{"components"}, code 1), or lumped atmospheric transmittance (\code{"lumped"}, code \code{0})
#' @param hgt.atmosphere Height of atmosphere (in m); default 12000 m
#' @param hgt.water.vapour.pressure Water vapour pressure in mbar (default 10 mbar); This value is used if no vapour pressure grid is given in argument \code{in.vapour.grid}
#' @param cmp.pressure atmospheric pressure in mbar, defaults to 1013 mbar
#' @param cmp.water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default)
#' @param cmp.dust dust factor in ppm; defaults to 100 ppm
#' @param lmp.transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param start.date list of length two, giving the start date in \code{day} and \code{month} components as numbers; these numbers are one-based (SAGA_CMD uses zero-based numbers internally), i.e. Jan. 1st is \code{list(day=1,month=1)}
#' @param end.date see \code{start.date}
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details According to SAGA GIS 2.0.7 documentation, "Most options should do well, but TAPES-G based diffuse irradiance calculation ("Atmospheric Effects" methods 2 and 3) needs further revision!" I.e. be careful with \code{method = "components"} and \code{method = "lumped"}.
#' @references
#' Boehner, J., Antonic, O. (2009): Land surface parameters specific to topo-climatology. In: Hengl, T. and Reuter, H. I. (eds.): Geomorphometry - Concepts, Software, Applications. Elsevier.
#'
#' Oke, T.R. (1988): Boundary layer climates. London, Taylor and Francis.
#'
#' Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley and Sons.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This module is computationally very intensive (depending on the size of the grid and the time resolution, of course). The performance seems to have much improved in SAGA GIS 2.1.0, which by default runs this module in multicore mode (at the release candidate 1 for Windows does).
#'
#' SAGA_CMD uses zero-based days and months, but this R function uses the standard one-based days and months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' This function uses module Potential Incoming Solar Radiation from SAGA library \code{ta_lighting} in SAGA version 2.0.6+.
#' @seealso \code{\link{rsaga.hillshade}}; for similar modules in older SAGA versions (pre-2.0.6) see \code{\link{rsaga.solar.radiation}} and \code{\link{rsaga.insolation}}
#' @keywords spatial interface
#' @export
rsaga.pisr = function(in.dem, in.svf.grid = NULL, in.vapour.grid = NULL,
in.latitude.grid = NULL, in.longitude.grid = NULL,
out.direct.grid, out.diffuse.grid, out.total.grid = NULL,
out.ratio.grid = NULL, out.duration, out.sunrise, out.sunset,
local.svf = TRUE, latitude,
unit=c("kWh/m2","kJ/m2","J/cm2"), solconst=1367.0,
enable.bending = FALSE, bending.radius = 6366737.96,
bending.lat.offset = "user", bending.lat.ref.user = 0,
bending.lon.offset = "center", bending.lon.ref.user = 0,
method = c("height","components","lumped"),
hgt.atmosphere = 12000, hgt.water.vapour.pressure = 10,
cmp.pressure = 1013, cmp.water.content = 1.68, cmp.dust = 100,
lmp.transmittance = 70,
time.range = c(0,24), time.step = 0.5,
start.date = list(day=21, month=3), end.date = NULL, day.step = 5,
env = rsaga.env(), ...)
{
if ( (env$version == "2.0.4" | env$version == "2.0.5") ) {
stop("rsaga.pisr only for SAGA GIS 2.0.6 - 2.2.1;\n",
" use rsaga.solar.radiation for older versions of SAGA GIS")
}
if ( (env$version == "2.2.2" | env$version == "2.2.3") ) {
stop("rsaga.pisr only for SAGA GIS 2.0.6 - 2.2.1:\n",
" use rsaga.pisr2 for newer versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (!is.null(in.svf.grid)) in.svf.grid = default.file.extension(in.svf.grid,".sgrd")
if (!is.null(in.vapour.grid)) in.vapour.grid = default.file.extension(in.vapour.grid,".sgrd")
if (!is.null(in.latitude.grid)) in.latitude.grid = default.file.extension(in.latitude.grid,".sgrd")
if (!is.null(in.longitude.grid)) in.longitude.grid = default.file.extension(in.longitude.grid,".sgrd")
if (missing(out.direct.grid)) {
out.direct.grid = tempfile()
on.exit(unlink(paste(out.direct.grid,".*",sep="")), add = TRUE)
}
if (missing(out.diffuse.grid)) {
out.diffuse.grid = tempfile()
on.exit(unlink(paste(out.diffuse.grid,".*",sep="")), add = TRUE)
}
if (missing(out.total.grid)) {
out.total.grid = tempfile()
on.exit(unlink(paste(out.total.grid,".*",sep="")), add = TRUE)
}
if (missing(out.ratio.grid)) {
out.ratio.grid = tempfile()
on.exit(unlink(paste(out.ratio.grid,".*",sep="")), add = TRUE)
}
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
if (missing(out.sunrise)) {
out.sunrise = tempfile()
on.exit(unlink(paste(out.sunrise,".*",sep="")), add = TRUE)
}
if (missing(out.sunset)) {
out.sunset = tempfile()
on.exit(unlink(paste(out.sunset,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method, numeric = TRUE, ignore.case = TRUE, base = 0)
bending.lat.offset = match.arg.ext(bending.lat.offset, c("bottom","center","top","user"),
numeric = TRUE, ignore.case = TRUE, base = 0)
bending.lon.offset = match.arg.ext(bending.lon.offset, c("left","center","right","user"),
numeric = TRUE, ignore.case = TRUE, base = 0)
if (!is.null(latitude))
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
stopifnot( is.logical(local.svf) )
stopifnot( is.logical(enable.bending) )
param = list( GRD_DEM=in.dem,
GRD_DIRECT = out.direct.grid, GRD_DIFFUS = out.diffuse.grid,
GRD_TOTAL = out.total.grid, GRD_RATIO = out.ratio.grid,
DURATION = out.duration,
SUNRISE = out.sunrise, SUNSET = out.sunset,
UNITS = unit, SOLARCONST = as.numeric(solconst), LOCALSVF = local.svf,
BENDING_BENDING = enable.bending,
METHOD = method,
#LATITUDE = as.numeric(latitude), # removed 27 Dec 2011
DHOUR = time.step )
# Added 27 Dec 2011:
if (!is.null(latitude)) {
stopifnot((latitude >= -90) & (latitude <= 90))
param = c(param, LATITUDE = as.numeric(latitude))
}
if (!is.null(in.svf.grid)) param = c( param, GRD_SVF=in.svf.grid )
if (!is.null(in.vapour.grid)) param = c( param, GRD_VAPOUR=in.vapour.grid )
stopifnot( !is.null(latitude) | !is.null(in.latitude.grid) ) # added 27 Dec 2011
if (!is.null(in.latitude.grid)) param = c( param, GRD_LAT=in.latitude.grid )
if (!is.null(in.longitude.grid)) param = c( param, GRD_LON=in.longitude.grid )
if (enable.bending) {
param = c( param,
BENDING_RADIUS = bending.radius,
BENDING_LAT_OFFSET = bending.lat.offset,
BENDING_LAT_REF_USER = bending.lat.ref.user,
BENDING_LON_OFFSET = bending.lon.offset,
BENDING_LON_REF_USER = bending.lon.ref.user )
}
if (method == 0) {
param = c(param, ATMOSPHERE = as.numeric(hgt.atmosphere),
VAPOUR = as.numeric(hgt.water.vapour.pressure))
} else if (method == 1) {
param = c(param, PRESSURE = as.numeric(cmp.pressure),
WATER = as.numeric(cmp.water.content), DUST = as.numeric(cmp.dust))
} else if (method == 2) {
stopifnot( (lmp.transmittance>=0) & (lmp.transmittance<=100) )
param = c(param, LUMPED = as.numeric(lmp.transmittance))
} else stopifnot( method %in% c(0:2) )
if (is.null(start.date)) { # one year
stopifnot( is.null(end.date) )
param = c( param, PERIOD = 2, DAY_A = 0, MONTH_A = 0,
DAY_B = 30, MONTH_B = 11 )
} else {
if (is.null(end.date)) {
param = c( param, PERIOD = 1 ) # single day ... or moment (later)
} else param = c( param, PERIOD = 2 )
stopifnot(is.list(start.date))
stopifnot(length(start.date) == 2)
stopifnot(all(names(start.date %in% c("day","month"))))
stopifnot( (start.date$day>=1) & (start.date$day<=31) )
stopifnot( (start.date$month>=1) & (start.date$month<=12) )
param = c( param, DAY_A = start.date$day - 1,
MON_A = start.date$month - 1 )
if (is.null(end.date)) {
# check if moment:
stopifnot(length(time.range) <= 2)
if (length(time.range) == 2) {
if (time.range[2] == time.range[1])
time.range = time.range[1]
}
if (length(time.range) == 1) {
# moment
param$PERIOD = 0
stopifnot(time.range >= 0 & time.range <= 24)
param = c(param, MOMENT = round(time.range,3))
} else {
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
} else {
# range of days:
stopifnot(is.list(end.date))
stopifnot(length(end.date) == 2)
stopifnot(all(names(end.date %in% c("day","month"))))
stopifnot( (end.date$day>=1) & (end.date$day<=31) )
stopifnot( (end.date$month>=1) & (end.date$month<=12) )
param = c( param, DAY_B = end.date$day - 1,
MON_B = end.date$month - 1,
DDAYS = day.step )
if (is.null(time.range)) time.range = c(0,24)
stopifnot(length(time.range) == 2)
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Potential Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Potential incoming solar radiation SAGA 2.2.2+
#'
#' This function calculates the potential incoming solar radiation in an area using different atmospheric models; This function reflects changes to the module with SAGA 2.2.2+.
#' For SAGA versions 2.0.6 to 2.2.1 please see \code{\link{rsaga.pisr}}.
#' @name rsaga.pisr2
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.svf.grid Optional input grid in SAGA format: Sky View Factor; see also \code{local.svf}
#' @param in.vapour.grid Optional input grid in SAGA format: Water vapour pressure (mbar), for use with \code{method = "height"}; default 10 mbar
#' @param in.linke.grid Optional input grid in SAGA format: Linke turbidity coefficient, for use with \code{method = "hofierka"}; default 3.0
#' @param out.direct.grid Output grid: Direct insolation (unit selected by \code{unit} argument)
#' @param out.diffuse.grid Output grid: Diffuse insolation
#' @param out.total.grid Optional output grid: Total insolation, i.e. sum of direct and diffuse incoming solar radiation
#' @param out.ratio.grid Optional output grid: Direct to diffuse ratio
#' @param out.duration Optional output grid: Duration of insolation
#' @param out.sunrise Optional output grid: time of sunrise; only calculated if time span is set to single day
#' @param out.sunset Time of sunset; see \code{out.sunrise}
#' @param local.svf logical (default: \code{TRUE}; if TRUE, use sky view factor based on local slope (after Oke, 1988), if no sky view factor grid is provided in \code{in.svf.grid}
#' @param location specified whether to use constant latitude supplied by \code{latitude} below (\code{"latitude"} or code \code{0}; default) or as calculated from the grid system (\code{"grid"} or code \code{1})
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of insolation output grids: \code{"kWh/m2"} (default) \code{"kJ/m2"}, or \code{"J/cm2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param method specifies how the atmospheric components should be accounted for: either based on the height of atmosphere and vapour pressure (\code{"height"}, or numeric code 0), or air pressure, water and dust content (\code{"components"}, code 1), or lumped atmospheric transmittance (\code{"lumped"}, code \code{2}), or by the method of Hofierka and Suri, 2009 (\code{"hofierka"}, code \code{3}). Default: \code{"lumped"}.
#' @param hgt.atmosphere Height of atmosphere (in m); default 12000 m. For use with \code{method = "height"}
#' @param cmp.pressure atmospheric pressure in mbar, defaults to 1013 mbar. For use with \code{method = "components"}
#' @param cmp.water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default). For use with \code{method = "components"}
#' @param cmp.dust dust factor in ppm; defaults to 100 ppm. For use with \code{method = "components"}
#' @param lmp.transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param start.date list of length three, giving the start date in \code{day}, \code{month}, and \code{year} components as numbers; month is one-based (SAGA_CMD uses zero-based numbers internally), i.e. Jan. 1st 2015 is \code{list(day=1,month=1,year=2015)}
#' @param end.date see \code{start.date}
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details According to SAGA GIS 2.0.7 documentation, "Most options should do well, but TAPES-G based diffuse irradiance calculation ("Atmospheric Effects" methods 2 and 3) needs further revision!" I.e. be careful with \code{method = "components"} and \code{method = "lumped"}.
#' @references
#' Boehner, J., Antonic, O. (2009): Land surface parameters specific to topo-climatology. In: Hengl, T. and Reuter, H. I. (eds.): Geomorphometry - Concepts, Software, Applications. Elsevier.
#'
#' Oke, T.R. (1988): Boundary layer climates. London, Taylor and Francis.
#'
#' Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley and Sons.
#'
#' Hofierka, J., Suri, M. (2002): The solar radiation model for Open source GIS: implementation and applications. International GRASS users conference in Trento, Italy, September 2002
#' @author Alexander Brenning & Donovan Bangs (R interface), Olaf Conrad (SAGA module)
#' @note
#' SAGA_CMD uses zero-based months, but this R function uses the standard one-based months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' This function uses module Potential Incoming Solar Radiation from SAGA library \code{ta_lighting} in SAGA version 2.0.6+.
#' Changes to the module with SAGA 2.2.2+ include adding \code{year} to the \code{*.date} arguments to allow calculation across years.
#' The method of Hofierka and Suri (2009) is added, which uses the Linke turbidity coefficient.
#' Duration of insolation (\code{"out.duration"}) is only calculated when the time period is set to a single day.
#' @seealso \code{\link{rsaga.pisr}}; for similar modules in older SAGA versions (pre-2.0.6) see \code{\link{rsaga.solar.radiation}} and \code{\link{rsaga.insolation}}; \code{\link{rsaga.hillshade}}
#' @keywords spatial interface
#' @export
rsaga.pisr2 = function(in.dem, in.svf.grid = NULL, in.vapour.grid = NULL,
in.linke.grid = NULL,
out.direct.grid, out.diffuse.grid, out.total.grid = NULL,
out.ratio.grid = NULL, out.duration, out.sunrise, out.sunset,
local.svf = TRUE, location = c("latitude", "grid"), latitude = 53,
unit=c("kWh/m2","kJ/m2","J/cm2"), solconst=1367.0,
method = c("height","components","lumped","hofierka"),
hgt.atmosphere = 12000,
cmp.pressure = 1013, cmp.water.content = 1.68, cmp.dust = 100,
lmp.transmittance = 70,
time.range = c(0,24), time.step = 0.5,
start.date = list(day=31, month=10, year=2015), end.date = NULL, day.step = 5,
env = rsaga.env(), ...)
{
if ( env$version != "2.2.2" & env$version != "2.2.3" ) {
stop("rsaga.pisr2 only for SAGA GIS 2.2.2+;\n",
" use rsaga.pisr or rsaga.solar.radiation for older versions of SAGA GIS")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (!is.null(in.svf.grid)) in.svf.grid = default.file.extension(in.svf.grid,".sgrd")
if (!is.null(in.vapour.grid)) in.vapour.grid = default.file.extension(in.vapour.grid,".sgrd")
if (!is.null(in.linke.grid)) in.linke.grid = default.file.extension(in.linke.grid,".sgrd")
if (missing(out.direct.grid)) {
out.direct.grid = tempfile()
on.exit(unlink(paste(out.direct.grid,".*",sep="")), add = TRUE)
}
if (missing(out.diffuse.grid)) {
out.diffuse.grid = tempfile()
on.exit(unlink(paste(out.diffuse.grid,".*",sep="")), add = TRUE)
}
if (missing(out.total.grid)) {
out.total.grid = tempfile()
on.exit(unlink(paste(out.total.grid,".*",sep="")), add = TRUE)
}
if (missing(out.ratio.grid)) {
out.ratio.grid = tempfile()
on.exit(unlink(paste(out.ratio.grid,".*",sep="")), add = TRUE)
}
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
if (missing(out.sunrise)) {
out.sunrise = tempfile()
on.exit(unlink(paste(out.sunrise,".*",sep="")), add = TRUE)
}
if (missing(out.sunset)) {
out.sunset = tempfile()
on.exit(unlink(paste(out.sunset,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method, numeric = TRUE, ignore.case = TRUE, base = 0)
location = match.arg.ext(location, numeric = TRUE, ignore.case = TRUE, base = 0)
if (!is.null(latitude))
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
stopifnot( is.logical(local.svf) )
param = list( GRD_DEM=in.dem,
GRD_DIRECT = out.direct.grid, GRD_DIFFUS = out.diffuse.grid,
GRD_TOTAL = out.total.grid, GRD_RATIO = out.ratio.grid,
GRD_DURATION = out.duration,
GRD_SUNRISE = out.sunrise, GRD_SUNSET = out.sunset,
UNITS = unit, SOLARCONST = as.numeric(solconst), LOCALSVF = local.svf,
METHOD = method,
HOUR_STEP = time.step )
if (location == 0) {
if (!is.null(latitude)) {
stopifnot((latitude >= -90) & (latitude <= 90))
param = c(param, LATITUDE = as.numeric(latitude))
}
} else {
param = c(param, LOCATION = as.numeric(location))
}
if (!is.null(in.svf.grid)) param = c( param, GRD_SVF=in.svf.grid )
if (!is.null(in.vapour.grid)) param = c( param, GRD_VAPOUR=in.vapour.grid )
if (!is.null(in.linke.grid)) param = c( param, GRD_LINKE=in.linke.grid )
if (method == 0) {
param = c(param, ATMOSPHERE = as.numeric(hgt.atmosphere))
} else if (method == 1) {
param = c(param, PRESSURE = as.numeric(cmp.pressure),
WATER = as.numeric(cmp.water.content), DUST = as.numeric(cmp.dust))
} else if (method == 2) {
stopifnot( (lmp.transmittance>=0) & (lmp.transmittance<=100) )
param = c(param, LUMPED = as.numeric(lmp.transmittance))
} else if (method == 3) {
param = param
} else stopifnot( method %in% c(0:3) )
if (is.null(start.date)) { # one year
stopifnot( is.null(end.date) )
param = c( param, PERIOD = 2, DAY_A = 0, MONTH_A = 0,
DAY_B = 30, MONTH_B = 11 )
} else {
if (is.null(end.date)) {
param = c( param, PERIOD = 1 ) # single day ... or moment (later)
} else param = c( param, PERIOD = 2 )
stopifnot(is.list(start.date))
stopifnot(length(start.date) == 3)
stopifnot(all(names(start.date %in% c("day","month","year"))))
stopifnot( (start.date$day>=1) & (start.date$day<=31) )
stopifnot( (start.date$month>=1) & (start.date$month<=12) )
param = c( param, DAY_A = start.date$day ,
MON_A = start.date$month - 1,
YEAR_A = start.date$year )
if (is.null(end.date)) {
# check if moment:
stopifnot(length(time.range) <= 2)
if (length(time.range) == 2) {
if (time.range[2] == time.range[1])
time.range = time.range[1]
}
if (length(time.range) == 1) {
# moment
param$PERIOD = 0
stopifnot(time.range >= 0 & time.range <= 24)
param = c(param, MOMENT = round(time.range,3))
} else {
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
} else {
# range of days:
stopifnot(is.list(end.date))
stopifnot(length(end.date) == 3)
stopifnot(all(names(end.date %in% c("day","month","year"))))
stopifnot( (end.date$day>=1) & (end.date$day<=31) )
stopifnot( (end.date$month>=1) & (end.date$month<=12) )
param = c( param, DAY_B = end.date$day,
MON_B = end.date$month - 1,
YEAR_B = end.date$year,
DAYS_STEP = day.step )
if (is.null(time.range)) time.range = c(0,24)
stopifnot(length(time.range) == 2)
stopifnot(time.range[1] >= 0 & time.range[1] <= 24)
stopifnot(time.range[2] >= 0 & time.range[2] <= 24)
stopifnot(time.range[1] < time.range[2])
param = c(param, HOUR_RANGE_MIN = time.range[1],
HOUR_RANGE_MAX = time.range[2])
}
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Potential Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Potential incoming solar radiation
#'
#' This function calculates the potential incoming solar radiation in an area either using a lumped atmospheric transmittance model or estimating it based on water and dust content. Use \code{\link{rsaga.pisr}} instead with SAGA GIS 2.0.6+.
#' @name rsaga.solar.radiation
#' @param in.dem name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param out.grid output grid file for potential incoming solar radiation sums
#' @param out.duration Optional output grid file for duration of insolation
#' @param latitude Geographical latitude in degree North (negative values indicate southern hemisphere)
#' @param unit unit of the \code{out.grid} output: \code{"kWh/m2"} (default) or \code{"J/m2"}
#' @param solconst solar constant, defaults to 1367 W/m2
#' @param method specifies how the atmospheric components should be accounted for: either based on a lumped atmospheric transmittance as specified by argument \code{transmittance} (\code{"lumped"}, or numeric code \code{0}; default); or by calculating the components corresponding to water and dust (\code{"components"}, code \code{1})
#' @param transmittance transmittance of the atmosphere in percent; usually between 60 (humid areas) and 80 percent (deserts)
#' @param pressure atmospheric pressure in mbar
#' @param water.content water content of a vertical slice of the atmosphere in cm: between 1.5 and 1.7cm, average 1.68cm (default)
#' @param dust dust factor in ppm; defaults to 100ppm
#' @param time.range numeric vector of length 2: time span (hours of the day) for numerical integration
#' @param time.step time step in hours for numerical integration
#' @param days either a list with components \code{day} and \code{month} specifying a single day of the year for radiation modeling; OR a numeric vector of length 2 specifying the start and end date (see Note below)
#' @param day.step if \code{days} indicates a range of days, this specifies the time step (number of days) for calculating the incoming solar radiation
#' @param env RSAGA geoprocessing environment obtained with \code{\link{rsaga.env}}; this argument is required for version control (see Note)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @references Wilson, J.P., Gallant, J.C. (eds.), 2000: Terrain analysis - principles and applications. New York, John Wiley & Sons.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This module ceased to exist under SAGA GIS 2.0.6+, which has a similar (but more flexible) module Potential Solar Radiation that is interfaced by \code{\link{rsaga.pisr}}.
#'
#' SAGA_CMD uses zero-based days and months, but this R function uses the standard one-based days and months (e.g. day 1 is the first day of the month, month 1 is January) and translates to the SAGA system.
#'
#' In SAGA 2.0.2, solar radiation sums calculated for a range of days, say \code{days=c(a,b)} actually calculate radiation only for days \code{a,...,b-1} (in steps of \code{day.step} - I used \code{day.step=1} in this example). The setting \code{a=b} however gives the same result as \code{b=a+1}, and indeed \code{b=a+2} gives twice the radiation sums and potential sunshine duration that \code{a=b} and \code{b=a+1} both give.
#'
#' The solar radiation module of SAGA 2.0.1 had a bug that made it impossible to pass a range of \code{days} of the year or a range of hours of the day (\code{time.range}) to SAGA. These options work in SAGA 2.0.1.
#'
#' This function uses module Incoming Solar Radiation from SAGA GIS library \code{ta_lighting}.
#' @seealso \code{\link{rsaga.hillshade}}, \code{\link{rsaga.insolation}}
#' @examples
#' \dontrun{
#' # potential solar radiation on Nov 7 in Southern Ontario...
#' rsaga.solar.radiation("dem","solrad","soldur",latitude=43,
#' days=list(day=7,month=11),time.step=0.5)
#' }
#' @keywords spatial interface
#' @export
rsaga.solar.radiation = function(in.dem, out.grid, out.duration, latitude,
unit=c("kWh/m2","J/m2"), solconst=1367.0, method=c("lumped","components"),
transmittance=70, pressure=1013, water.content=1.68, dust=100,
time.range=c(0,24), time.step=1,
days=list(day=21,month=3), day.step=5,
env = rsaga.env(), ...)
{
if ( !(env$version == "2.0.4" | env$version == "2.0.5") ) {
stop("rsaga.solar.radiation only for SAGA GIS 2.0.4 / 2.0.5;\n",
" use rsaga.pisr for SAGA GIS 2.0.6+")
}
in.dem = default.file.extension(in.dem,".sgrd")
if (missing(out.duration)) {
out.duration = tempfile()
on.exit(unlink(paste(out.duration,".*",sep="")), add = TRUE)
}
unit = match.arg.ext(unit,numeric=TRUE,ignore.case=TRUE,base=0)
method = match.arg.ext(method,numeric=TRUE,ignore.case=TRUE,base=0)
stopifnot( (transmittance>=0) & (transmittance<=100) )
stopifnot( (latitude>=-90) & (latitude<=90) )
stopifnot( length(time.range)==2 )
stopifnot( all(time.range>=0) & all(time.range<=24) & (time.range[1]<time.range[2]) )
stopifnot( (time.step>0) & (time.step<=12) )
stopifnot( (day.step>0) & (day.step<=100) )
param = list( ELEVATION=in.dem, INSOLAT=out.grid, DURATION=out.duration,
UNIT=unit, SOLCONST=as.numeric(solconst), METHOD=method,
TRANSMITT=as.numeric(transmittance), PRESSURE=as.numeric(pressure),
WATER=as.numeric(water.content), DUST=as.numeric(dust),
LATITUDE=as.numeric(latitude),
HOUR_RANGE_MIN=time.range[1], HOUR_RANGE_MAX=time.range[2],
HOUR_STEP=time.step )
if (is.null(days)) { # one year
param = c( param, TIMESPAN=2 )
} else if (is.list(days)) { # single day
stopifnot(length(days)==2)
stopifnot( (days$day>=1) & (days$day<=31) )
stopifnot( (days$month>=1) & (days$month<=12) )
param = c( param, TIMESPAN=0,
SINGLE_DAY_DAY=days$day-1, SINGLE_DAY_MONTH=days$month-1 )
} else if (is.numeric(days)) { # range of days
stopifnot(length(days)==2)
stopifnot( days[1] <= days[2] )
stopifnot( (days[1]>=1) & (days[2]<=366) )
param = c( param, TIMESPAN=1,
DAY_RANGE_MIN=days[1], DAY_RANGE_MAX=days[2],
DAY_STEP=day.step )
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Incoming Solar Radiation", # = 2
param = param, env = env, ...)
}
#' Incoming Solar Radiation (Insolation)
#'
#' This function calculates the amount of incoming solar radiation (insolation) depending on slope, aspect, and atmospheric properties. Module not available in SAGA GIS 2.0.6 and 2.0.7.
#' @name rsaga.insolation
#' @param in.dem Name of input digital elevation model (DEM) grid in SAGA grid format (default extension: \code{.sgrd})
#' @param in.vapour Optional input: SAGA grid file giving the water vapour pressure in mbar
#' @param in.latitude Optional input: SAGA grid file giving for each pixel the latitude in degree
#' @param in.longitude Optional input: SAGA grid file giving for each pixel the longitude in degree
#' @param out.direct Optional output grid file for direct insolation
#' @param out.diffuse Optional output grid file for diffuse insolation
#' @param out.total Optional output grid file for total insolation, i.e. the sum of direct and diffuse insolation
#' @param horizontal logical; project radiation onto a horizontal surface? (default: \code{FALSE}, i.e. use the actual inclined surface as a reference area)
#' @param solconst solar constant in Joule; default: 8.164 J/cm2/min (=1360.7 kWh/m2; the more commonly used solar constant of 1367 kWh/m2 corresponds to 8.202 J/cm2/min)
#' @param atmosphere height of atmosphere in m; default: 12000m
#' @param water.vapour.pressure if no water vapour grid is given, this argument specifies a constant water vapour pressure that is uniform in space; in mbar, default 10 mbar
#' @param type type of time period: \code{"moment"} (equivalent: \code{0}) for a single instant, \code{"day"} (or \code{1}) for a single day, \code{"range.of.days"} (or \code{2}), or \code{"same.moment.range.of.days"} (or \code{3}) for the same moment in a range of days; default: \code{"moment"}
#' @param time.step time resolution in hours for discretization within a day
#' @param day.step time resolution in days for a range of days
#' @param days numeric vector of length 2, specifying the first and last day of a range of days (for \code{type}s 2 and 3)
#' @param moment if \code{type="moment"} or \code{"same.moment.range.of.days"}, \code{moment} specifies the time of the day (hour between 0 and 24) for which the insolation is to be calculated
#' @param latitude if no \code{in.latitude} grid is given, this will specify a fixed geographical latitude for the entire grid
#' @param bending should planetary bending be modeled? (default: \code{FALSE})
#' @param radius planetary radius
#' @param lat.offset \code{latitude} relates to grids \code{"bottom"}(equivalent code: \code{0}), \code{"center"} (1), \code{"top"} (2), or \code{"user"}-defined reference (default: \code{"user"}); in the latter case, \code{lat.ref.user} defines the reference
#' @param lat.ref.user if \code{in.latitude} is missing and \code{lat.offset="user"}, then this numeric value defines the latitudinal reference (details??)
#' @param lon.offset local time refers to grid's \code{"left"} edge (code 0), \code{"center"} (1), \code{"right"} edge (2), or a \code{"user"}-defined reference.
#' @param lon.ref.user if \code{in.longitude} is missing and \code{lon.offset="user"}, then this numeric value defines the reference of the local time (details??)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @details Calculation of incoming solar radiation (insolation). Based on the SADO (System for the Analysis of Discrete Surfaces) routines developed by Boehner & Trachinow.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @note This function uses module \code{Insolation} (code: 3) from SAGA library \code{ta_lighting}. It is availble in SAGA GIS 2.0.4 and 2.0.5 but not 2.0.6 and 2.0.7; see \code{\link{rsaga.pisr}}.
#' @seealso \code{\link{rsaga.solar.radiation}}, \code{\link{rsaga.pisr}}, \code{\link{rsaga.hillshade}}
#' @keywords spatial interface
#' @export
rsaga.insolation = function(in.dem, in.vapour, in.latitude, in.longitude,
out.direct, out.diffuse, out.total,
horizontal=FALSE, solconst=8.1640, atmosphere=12000, water.vapour.pressure=10.0,
type=c("moment","day","range.of.days","same.moment.range.of.days"),
time.step=1, day.step=5, days, moment, latitude, bending=FALSE,
radius=6366737.96,
lat.offset="user", lat.ref.user=0,
lon.offset="center", lon.ref.user=0,
...)
{
in.dem = default.file.extension(in.dem,".sgrd")
param = list( GRD_DEM=in.dem )
type = match.arg.ext(type,numeric=TRUE,ignore.case=TRUE,base=0)
stopifnot( (!missing(out.direct)) | (!missing(out.diffuse)) | (!missing(out.total)) )
stopifnot( !missing(latitude) )
if (!missing(moment)) {
if (!(type==0 | type==3)) {
warning("'moment' argument only relevant for 'type=\"moment\"'\n",
"or 'type=\"same.moment.range.of.days\"' -\n",
"ignoring the 'moment' argument")
}
}
if (!missing(in.vapour)) {
in.vapour = default.file.extension(in.vapour,".sgrd")
param = c(param, GRD_VAPOUR=in.vapour)
}
if (!missing(in.latitude)) {
in.latitude = default.file.extension(in.latitude,".sgrd")
param = c(param, GRD_LAT=in.latitude)
}
if (!missing(in.longitude)) {
in.longitude = default.file.extension(in.longitude,".sgrd")
param = c(param, GRD_LON=in.longitude)
}
if (!missing(out.direct)) param = c(param, GRD_DIRECT=out.direct)
if (!missing(out.diffuse)) param = c(param, GRD_DIFFUS=out.diffuse)
if (!missing(out.total)) param = c(param, GRD_TOTAL=out.total)
stopifnot( (days[1]>=0) & (days[1]<=366) )
param = c(param, BHORIZON=horizontal, SOLARCONST=solconst,
ATMOSPHERE=atmosphere, VAPOUR=water.vapour.pressure,
PERIOD=type, DHOUR=time.step, DDAYS=day.step,
DAY_A=days[1])
if (type>=2) { # range of days / same moment in a range of days
stopifnot( (days[2]>=days[1]) & (days[2]<=366) )
param = c(param, DAY_B=days[2])
}
if ((type==0) | (type==3)) {
stopifnot( (moment>=0) & (moment<=24) )
param = c(param, MOMENT=moment)
}
param = c(param, LATITUDE=latitude, BENDING=bending, RADIUS=radius)
lat.offset = match.arg.ext(lat.offset, c("bottom","center","top","user"),
numeric=TRUE, ignore.case=TRUE, base=0)
lon.offset = match.arg.ext(lon.offset, c("left","center","right","user"),
numeric=TRUE, ignore.case=TRUE, base=0)
param = c(param, LAT_OFFSET=lat.offset)
if (lat.offset==3) { # user-defined
#stopifnot(!missing(lat.ref.user))
param = c(param, LAT_REF_USER=as.numeric(lat.ref.user))
}
param = c(param, LON_OFFSET=lon.offset)
if (lon.offset==3) { # user-defined
#stopifnot(!missing(lon.ref.user))
param = c(param, LON_REF_USER=as.numeric(lon.ref.user))
}
rsaga.geoprocessor(lib = "ta_lighting",
module = "Insolation", # = 3
param = param, ...)
}
######## Module grid_filter ########
#' Simple Filters
#'
#' Apply a smoothing, sharpening or edge filter to a SAGA grid.
#' @name rsaga.filter.simple
#' @param in.grid input: SAGA grid file (default file extension: \code{.sgrd})
#' @param out.grid output: SAGA grid file
#' @param mode character or numeric: shape of moving window, either \code{"square"} (=0) or \code{"circle"} (=1, default)
#' @param method character or numeric: \code{"smooth"} (=0), \code{"sharpen"} (=1), or \code{"edge"} (=2)
#' @param radius positive integer: radius of moving window
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.filter.gauss}}
#' @examples \dontrun{rsaga.filter.simple("dem","dem-smooth",radius=4)}
#' @keywords spatial interface
#' @export
rsaga.filter.simple = function(in.grid, out.grid, mode="circle",
method=c("smooth","sharpen","edge"), radius,...)
{
in.grid = default.file.extension(in.grid,".sgrd")
mode = match.arg.ext(mode,choices=c("square","circle"),
numeric=TRUE,base=0,ignore.case=TRUE)
method = match.arg.ext(method,numeric=TRUE,base=0,ignore.case=TRUE)
if (missing(radius)) stop("the search 'radius' argument (in # pixels) must be specified")
if (round(radius) != radius) {
warning("'radius' must be an integer >=1 (# pixels); rounding it...")
radius = round(radius)
}
if (radius<1) {
warning("'radius' must be an integer >=1 (# pixels); setting 'radius=1'...")
radius = 1
}
param = list(INPUT=in.grid, RESULT=out.grid, MODE=mode,
METHOD=method, RADIUS=radius)
rsaga.geoprocessor(lib = "grid_filter",
module = "Simple Filter",
param = param, ...)
}
#' Gauss Filter
#'
#' Smooth a grid using a Gauss filter.
#' @name rsaga.filter.gauss
#' @param in.grid input: SAGA GIS grid file (default file extension: \code{.sgrd})
#' @param out.grid output: SAGA GIS grid file
#' @param sigma numeric, >0.0001: standard deviation parameter of Gauss filter
#' @param radius positive integer: radius of moving window
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.filter.simple}}
#' @keywords spatial interface
#' @export
rsaga.filter.gauss = function(in.grid, out.grid, sigma,
radius=ceiling(2*sigma),...)
{
in.grid = default.file.extension(in.grid,".sgrd")
if (missing(sigma)) stop("the 'sigma' standard deviation argument (in # pixels) must be specified")
stopifnot(sigma>0.0001)
if (round(radius) != radius) stop("'radius' must be an integer (# pixels)")
stopifnot(radius>=1)
param = list(INPUT=in.grid, RESULT=out.grid, SIGMA=sigma, RADIUS=radius)
rsaga.geoprocessor(lib = "grid_filter",
module = "Gaussian Filter", # = 1,
param, ...)
}
######## Module ta_hydrology ########
#' Parallel Processing
#'
#' Calculate the size of the local catchment area (contributing area), the catchment height, catchment slope and aspect, and flow path length, using parallel processing algorithms including the recommended multiple flow direction algorithm. This set of algorithms processes a digital elevation model (DEM) downwards from the highest to the lowest cell.\cr No longer supported with SAGA GIS 2.1.3+. See \code{\link{rsaga.topdown.processing}}.
#' @name rsaga.parallel.processing
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: SAGA grid with sink routes
#' @param in.weight optional intput: SAGA grid with weights
#' @param out.carea output: catchment area grid
#' @param out.cheight optional output: catchment height grid
#' @param out.cslope optional output: catchment slope grid
#' @param out.caspect optional output: catchment aspect grid
#' @param out.flowpath optional output: flow path length grid
#' @param step integer >=1: step parameter
#' @param method character or numeric: choice of processing algorithm: Deterministic 8 (\code{"d8"} or 0), Rho 8 (\code{"rho8"} or 1), Braunschweiger Reliefmodell (\code{"braunschweig"} or 2), Deterministic Infinity (\code{"dinf"} or 3), Multiple Flow Direction (\code{"mfd"} or 4, the default), Multiple Triangular Flow Direction (\code{"mtfd"}, or 5).
#' @param linear.threshold numeric (number of grid cells): threshold above which linear flow (i.e. the Deterministic 8 algorithm) will be used; linear flow is disabled for \code{linear.threshold=Inf} (the default)
#' @param convergence numeric >=0: a parameter for tuning convergent/ divergent flow; default value of \code{1.1} gives realistic results and should not be changed
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Refer to the references for details on the available algorithms.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references
#' Deterministic 8:
#'
#' O'Callaghan, J.F., Mark, D.M. (1984): The extraction of drainage networks from digital elevation data. Computer Vision, Graphics and Image Processing, 28: 323-344.
#'
#' Rho 8:
#'
#' Fairfield, J., Leymarie, P. (1991): Drainage networks from grid digital elevation models. Water Resources Research, 27: 709-717.
#'
#' Braunschweiger Reliefmodell:
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zu Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Deterministic Infinity:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Multiple Flow Direction:
#'
#' Freeman, G.T. (1991): Calculating catchment area with divergent flow based on a regular grid. Computers and Geosciences, 17: 413-22.
#'
#' Quinn, P.F., Beven, K.J., Chevallier, P., Planchon, O. (1991): The prediction of hillslope flow paths for distributed hydrological modelling using digital terrain models. Hydrological Processes, 5: 59-79.
#'
#' Multiple Triangular Flow Direction:
#'
#' Seibert, J., McGlynn, B. (2007): A new triangular multiple flow direction algorithm for computing upslope areas from gridded digital elevation models. Water Ressources Research, 43, W04501.
#'
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module), Thomas Grabs (MTFD algorithm)
#' @note This function uses module \code{Parallel Processing} (version 2.0.7+: \code{Catchment Area (Parallel)} from SAGA library \code{ta_hydrology}.
#'
#' The SAGA GIS 2.0.6+ version of the module adds more (optional) input and
#' output grids that are currently not supported by this wrapper function.
#' Use \code{\link{rsaga.geoprocessor}} for access to these options,
#' and see \code{rsaga.get.usage("ta_hydrology","Catchment Area (Parallel)")}
#' for information on new arguments.
#' @seealso \code{\link{rsaga.topdown.processing}}, \code{\link{rsaga.wetness.index}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # SAGA GIS 2.0.6+:
#' rsaga.get.usage("ta_hydrology","Catchment Area (Parallel)")
#' # earlier versions of SAGA GIS:
#' #rsaga.get.usage("ta_hydrology","Parallel Processing")
#' # execute model with typical settings:
#' rsaga.parallel.processing(in.dem = "dem", out.carea = "carea", out.cslope = "cslope")
#' # cslope is in radians - convert to degree:
#' fac = round(180/pi, 4)
#' formula = paste(fac, "*a", sep = "")
#' rsaga.grid.calculus("cslope", "cslopedeg", formula)
#' }
#' @keywords spatial interface
#' @export
rsaga.parallel.processing = function(in.dem, in.sinkroute, in.weight,
out.carea, out.cheight, out.cslope, out.caspect, out.flowpath,
step, method="mfd", linear.threshold=Inf, convergence=1.1,
env = rsaga.env(), ...)
{
## Version Stop - tool no longer supported SAGA 2.1.3
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" | env$version == "2.2.1" |
env$version == "2.2.2" | env$version == "2.2.3") {
stop("Parallel processing not supported with SAGA GIS 2.1.3 and higher;\n",
"See help(rsaga.topdown.processing) for similar function with SAGA 2.1.3+")
}
in.dem = default.file.extension(in.dem,".sgrd")
pp.choices = c("d8","rho8","braunschweig","dinf","mfd", "mtfd")
method = match.arg.ext(method, choices=pp.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
param = list( ELEVATION=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
if (!missing(in.weight)) {
in.weight = default.file.extension(in.weight,".sgrd")
param = c(param, SINKROUTE=in.weight)
}
if (!missing(out.carea))
param = c(param, CAREA=out.carea)
if (!missing(out.cheight))
param = c(param, CHEIGHT=out.cheight)
if (!missing(out.cslope))
param = c(param, CSLOPE=out.cslope)
if (!missing(step))
param = c(param, STEP=step)
if (!missing(out.caspect))
param = c(param, CASPECT=out.caspect)
if (!missing(out.flowpath))
param = c(param, FLWPATH=out.flowpath)
param = c(param, Method=method)
if (is.finite(linear.threshold)) {
param = c(param, DOLINEAR=TRUE, LINEARTHRS=linear.threshold)
} else param = c(param, DOLINEAR=FALSE)
param = c(param, CONVERGENCE=convergence)
module = "Catchment Area (Parallel)"
if (env$version == "2.0.4" | env$version == "2.0.5" | env$version == "2.0.6")
module = "Parallel Processing"
rsaga.geoprocessor(lib = "ta_hydrology", module = module, param, env = env, ...)
}
#' Top-Down Processing
#'
#' Calculate the size of the local catchment area (contributing area), accumulated material, and flow path length, using top-down processing algorithms from the highest to the lowest cell. \cr Top-Down Processing is new with SAGA GIS 2.1.3. See \code{\link{rsaga.parallel.processing}} with older versions.
#' @name rsaga.topdown.processing
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param in.sinkroute optional input: SAGA grid with sink routes
#' @param in.weight optional input: SAGA grid with weights
#' @param in.mean optional input: SAGA grid for mean over catchment calculation
#' @param in.material optional input: SAGA grid with material
#' @param in.target optional input: SAGA grid of accumulation target
#' @param in.lin.val optional input: SAGA grid providing values to be compared with linear flow threshold instead of catchment area
#' @param in.lin.dir optional input: SAGA grid to be used for linear flow routing, if the value is a valid direction (0-7 = N, NE, E, SE, S, SW, W, NW)
#' @param out.carea output: catchment area grid
#' @param out.mean optional output: mean over catchment grid
#' @param out.tot.mat optional output: total accumulated material grid
#' @param out.acc.left optional output: accumulated material from left side grid
#' @param out.acc.right optional output: accumulated material from right side grid
#' @param out.flowpath optional output: flow path length grid
#' @param step integer >=1: step parameter
#' @param method character or numeric: choice of processing algorithm (default \code{"mfd"}, or 4):
#' \itemize{
#' \item [0] Deterministic 8 (\code{"d8"} or 0)
#' \item [1] Rho 8 (\code{"rho8"}, or 1)
#' \item [2] Braunschweiger Reliefmodell (\code{"braunschweig"} or 2)
#' \item [3] Deterministic Infinity (\code{"dinf"} or 3)
#' \item [4] Multiple Flow Direction (\code{"mfd"} or 4)
#' \item [5] Multiple Triangular Flow Direction (\code{"mtfd"}, or 5)
#' \item [6] Multiple Maximum Gradient Based Flow Direction (\code{"mdg"}, or 6)}
#' @param linear.threshold numeric (number of grid cells): threshold above which linear flow (i.e. the Deterministic 8 algorithm) will be used; linear flow is disabled for \code{linear.threshold=Inf} (the default)
#' @param convergence numeric >=0: a parameter for tuning convergent/ divergent flow; default value of \code{1.1} gives realistic results and should not be changed
#' @param env list, setting up a SAGA geoprocessing environment as created by \code{\link{rsaga.env}}
#' @param ... further arguments to \code{\link{rsaga.geoprocessor}}
#' @details Refer to the references for details on the available algorithms.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references
#' Deterministic 8:
#'
#' O'Callaghan, J.F., Mark, D.M. (1984): The extraction of drainage networks from digital elevation data. Computer Vision, Graphics and Image Processing, 28: 323-344.
#'
#' Rho 8:
#'
#' Fairfield, J., Leymarie, P. (1991): Drainage networks from grid digital elevation models. Water Resources Research, 27: 709-717.
#'
#' Braunschweiger Reliefmodell:
#'
#' Bauer, J., Rohdenburg, H., Bork, H.-R. (1985): Ein Digitales Reliefmodell als Vorraussetzung fuer ein deterministisches Modell der Wasser- und Stoff-Fluesse. Landschaftsgenese und Landschaftsoekologie, H. 10, Parameteraufbereitung fuer deterministische Gebiets-Wassermodelle, Grundlagenarbeiten zu Analyse von Agrar-Oekosystemen, eds.: Bork, H.-R., Rohdenburg, H., p. 1-15.
#'
#' Deterministic Infinity:
#'
#' Tarboton, D.G. (1997): A new method for the determination of flow directions and upslope areas in grid digital elevation models. Water Ressources Research, 33(2): 309-319.
#'
#' Multiple Flow Direction:
#'
#' Freeman, G.T. (1991): Calculating catchment area with divergent flow based on a regular grid. Computers and Geosciences, 17: 413-22.
#'
#' Quinn, P.F., Beven, K.J., Chevallier, P., Planchon, O. (1991): The prediction of hillslope flow paths for distributed hydrological modelling using digital terrain models. Hydrological Processes, 5: 59-79.
#'
#' Multiple Triangular Flow Direction:
#'
#' Seibert, J., McGlynn, B. (2007): A new triangular multiple flow direction algorithm for computing upslope areas from gridded digital elevation models. Water Ressources Research, 43, W04501.
#'
#' Multiple Flow Direction Based on Maximum Downslope Gradient:
#'
#' Qin, C.Z., Zhu, A-X., Pei, T., Li, B.L., Scholten, T., Zhou, C.H. (2011): An approach to computing topographic wetness index based on maximum downslope gradient. Precision Agriculture, 12(1): 32-43.
#'
#' @author Alexander Brenning and Donovan Bangs (R interface), Olaf Conrad (SAGA module), Thomas Grabs (MTFD algorithm)
#' @examples
#' \dontrun{
#' # Calculation of contributing area with default settings:
#' rsaga.topdown.processing(in.dem = "dem", out.carea = "carea")
#' # Calculation of contributing area by maximunm downslope gradient:
#' rsaga.topdown.processing(in.dem = "dem", out.carea = "carea",
#' method = "mdg")
#' }
#' @seealso \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.wetness.index}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @keywords spatial interface
#' @export
rsaga.topdown.processing = function(in.dem, in.sinkroute, in.weight, in.mean, in.material, in.target,
in.lin.val, in.lin.dir,
out.carea, out.mean, out.tot.mat, out.acc.left, out.acc.right,
out.flowpath, step, method = "mfd", linear.threshold = Inf, convergence = 1.1,
env = rsaga.env(), ...) {
## Version Stop - SAGA GIS Version < 2.1.3
if (env$version != "2.1.3" & env$version != "2.1.4" & env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3") {
stop("rsaga.topdown.processing requires SAGA GIS 2.1.3 or higher;\n",
"see help(rsaga.parallel.processing) for similar function in earlier versions")
}
in.dem = default.file.extension(in.dem,".sgrd")
pp.choices = c("d8","rho8","braunschweig","dinf","mfd", "mtfd", "mdg")
method = match.arg.ext(method, choices=pp.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
param = list( ELEVATION=in.dem )
if (!missing(in.sinkroute)) {
in.sinkroute = default.file.extension(in.sinkroute,".sgrd")
param = c(param, SINKROUTE=in.sinkroute)
}
if (!missing(in.weight)) {
in.weight = default.file.extension(in.weight,".sgrd")
param = c(param, SINKROUTE=in.weight)
}
if (!missing(in.mean)) {
in.mean = default.file.extension(in.mean, ".sgrd")
param = c(param,VAL_INPUT=in.mean)
}
if (!missing(in.material)) {
in.material = default.file.extension(in.material, ".sgrd")
param = c(param, MATERIAL=in.material)
}
if (!missing(in.target)) {
in.target = default.file.extension(in.target, ".sgrd")
param = c(param, TARGET=in.target)
}
if (!missing(in.lin.val)) {
in.lin.val = default.file.extension(in.lin.val, ".sgrd")
param = c(param, LINEAR_VAL=in.lin.val)
}
if (!missing(in.lin.dir)){
in.lin.dir = default.file.extension(in.lin.dir, ".sgrd")
param = c(param, LINEAR_DIR=in.lin.dir)
}
if (!missing(out.carea))
param = c(param, CAREA=out.carea)
if (!missing(out.mean))
param = c(param, VAL_MEAN=out.mean)
if (!missing(out.tot.mat))
param = c(param, ACCU_TOT=out.tot.mat)
if (!missing(out.acc.left))
param = c(param, ACCU_LEFT=out.acc.left)
if (!missing(out.acc.right))
param = c(param, ACCU_RIGHT=out.acc.right)
if (!missing(out.flowpath))
param = c(param, FLOWLEN=out.flowpath)
param = c(param, METHOD=method)
if (is.finite(linear.threshold)) {
param = c(param, LINEAR_DO=TRUE, LINEAR_MIN=linear.threshold)
} else param = c(param, LINEAR_DO=FALSE)
param = c(param, CONVERGENCE=convergence)
module = "Catchment Area (Top-Down)"
if (env$version == "2.2.0" | env$version == "2.2.1" | env$version == "2.2.2" |
env$version == "2.2.3") {
module = "Flow Accumulation (Top-Down)"
}
rsaga.geoprocessor(lib = "ta_hydrology", module = module, param, env = env, ...)
}
#' SAGA Modules SAGA Wetness Index
#'
#' Calculate the SAGA Wetness Index (SWI), a modified topographic wetness index (TWI)
#' @name rsaga.wetness.index
#' @param in.dem input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.wetness.index output file (optional): wetness index grid file name. Existing files of the same name will be overwritten!
#' @param out.carea output file (optional): catchment area grid file name
#' @param out.cslope output file (optional): catchment slope grid file name
#' @param out.mod.carea output file (optional): file name of modified catchment area grid
#' @param suction SAGA GIS 2.1.0+: positive numeric value (optional): the lower this value is the stronger is the suction effect; defaults to a value of 10 (more detailed information is currently not available in the SAGA GIS documentation
#' @param area.type character or numeric (optional): type of area: \code{"absolute"} (or numeric code 0): absolute catchment area; \code{"square root"} (code 1; the default): square root of catchment area; \code{"specific"} (code 2): specific catchment area
#' @param slope.type character or numeric (optional): type of slope: \code{"local"} (or numeric code 0): local slope; \code{"catchment"} (or code 1; the default): catchment slope.
#' @param slope.min numeric (optional): minimum slope; default: 0
#' @param slope.offset numeric (optional): offset slope; default: 0.1
#' @param slope.weight numeric (optional): weighting factor for slope in index calculation; default: 1
#' @param t.param SAGA GIS up to version 2.0.8: positive numeric value (optional): undocumented
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}.)
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details The SAGA Wetness Index is similar to the Topographic Wetness Index (TWI), but it is based on a modified catchment area calculation (\code{out.mod.carea}), which does not treat the flow as a thin film as done in the calculation of catchment areas in conventional algorithms. As a result, the SWI tends to assign a more realistic, higher potential soil wetness than the TWI to grid cells situated in valley floors with a small vertical distance to a channel.
#'
#' This module and its arguments changed substantially from SAGA GIS 2.0.8 to version 2.1.0. It appears to me that the new algorithm is similar (but not identical) to the old one when using \code{area.type="absolute"} and \code{slope.type="local"} but I haven't tried out all possible options. This help file will be updated as soon as additional documentation becomes available.
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @references Boehner, J., Koethe, R. Conrad, O., Gross, J., Ringeler, A., Selige, T. (2002): Soil Regionalisation by Means of Terrain Analysis and Process Parameterisation. In: Micheli, E., Nachtergaele, F., Montanarella, L. (ed.): Soil Classification 2001. European Soil Bureau, Research Report No. 7, EUR 20398 EN, Luxembourg. pp.213-222.
#'
#' Boehner, J. and Selige, T. (2006): Spatial prediction of soil attributes using terrain analysis and climate regionalisation. In: Boehner, J., McCloy, K.R., Strobl, J. [Ed.]: SAGA - Analysis and Modelling Applications, Goettinger Geographische Abhandlungen, Goettingen: 13-28.
#' @author Alexander Brenning (R interface), Juergen Boehner and Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.parallel.processing}}, \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' rsaga.wetness.index("dem.sgrd","swi.sgrd")
#' }
#' @keywords spatial interface
#' @export
rsaga.wetness.index = function( in.dem,
out.wetness.index, out.carea, out.cslope,
out.mod.carea,
# since SAGA GIS 2.1.0:
suction, area.type, slope.type, slope.min, slope.offset, slope.weight,
# up to SAGA GIS 2.0.8:
t.param,
env = rsaga.env(), ...)
{
in.dem = default.file.extension(in.dem,".sgrd")
if (missing(out.carea)) {
out.carea = tempfile()
on.exit(unlink(paste(out.carea,".*",sep="")), add = TRUE)
}
if (missing(out.cslope)) {
out.cslope = tempfile()
on.exit(unlink(paste(out.cslope,".*",sep="")), add=TRUE)
}
if (missing(out.mod.carea)) {
out.mod.carea = tempfile()
on.exit(unlink(paste(out.mod.carea,".*",sep="")), add=TRUE)
}
if (env$version == "2.1.0" | env$version == "2.1.1" | env$version == "2.1.2" |
env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
param = list(DEM=in.dem, AREA=out.carea, SLOPE=out.cslope,
AREA_MOD=out.mod.carea, TWI=out.wetness.index)
if (!missing(suction)) {
suction = as.numeric(suction)
if (suction <= 0) stop("'suction' argument must be >0")
param = c(param, SUCTION=suction)
}
if (!missing(area.type)) {
area.type = match.arg.ext(area.type,choices=c("absolute","square root","specific"),base=0,ignore.case=TRUE,numeric=TRUE)
param = c(param, AREA_TYPE=area.type)
}
if (!missing(slope.type)) {
slope.type = match.arg.ext(slope.type,choices=c("local","catchment"),base=0,ignore.case=TRUE,numeric=TRUE)
param = c(param, SLOPE_TYPE=slope.type)
}
if (!missing(slope.min)) {
slope.min = as.numeric(slope.min)
if (slope.min < 0) stop("'slope.min' argument must be >=0")
param = c(param, SLOPE.MIN=slope.min)
}
if (!missing(slope.offset)) {
slope.offset = as.numeric(slope.offset)
if (slope.offset < 0) stop("'slope.offset' argument must be >=0")
param = c(param, SLOPE.OFF=slope.offset)
}
if (!missing(slope.weight)) {
slope.weight = as.numeric(slope.weight)
if (slope.weight < 0) stop("'slope.weight' argument must be >=0")
param = c(param, SLOPE.WEIGHT=slope.weight)
}
if (!missing(t.param))
warning("argument 't.param' (in saga_cmd: T) supported only up to SAGA GIS 2.0.8")
} else {
param = list(DEM=in.dem, C=out.carea, GN=out.cslope,
CS=out.mod.carea, SB=out.wetness.index)
if (!missing(t.param))
param = c(param, T=as.numeric(t.param))
if (!missing(suction) | !missing(area.type) | !missing(slope.type) | !missing(slope.min) | !missing(slope.offset) | !missing(slope.weight))
warning("arguments 'suction', 'area.type', 'slope.min', 'slope.type', 'slope.offset'\n",
"and 'slope.weight' not supported prior to SAGA GIS 2.1.0")
}
rsaga.geoprocessor(lib = "ta_hydrology",
module = "SAGA Wetness Index",
param, ..., env = env)
}
######## Module grid_calculus ########
#' SAGA Module Grid Calculus
#'
#' Perform Arithmetic Operations on Grids
#' @name rsaga.grid.calculus
#' @param in.grids input character vector: SAGA grid files (default file extension: \code{.sgrd})
#' @param out.grid output: grid file resulting from the cell-by-cell application of 'formula' to the grids. Existing files will be overwritten!
#' @param formula character string of formula specifying the arithmetic operation to be performed on the \code{in.grids} (see Details); if this is a formula, only the right hand side will be used.
#' @param coef numeric: coefficient vector to be used for the linear combination of the \code{in.grids}. If \code{coef} as one more element than \code{in.grids}, the first one will be interpreted as an intercept.
#' @param cf.digits integer: number of digits used when converting the \code{coef}ficients to character strings (trailing zeros will be removed)
#' @param remove.zeros logical: if \code{TRUE}, terms (grids) with coefficient (numerically) equal to zero (after rounding to \code{cf.digits} digits) will be removed from the formula
#' @param remove.ones logical: if \code{TRUE} (th edefault), factors equal to 1 (after rounding to \code{cf.digits} digits) will be removed from the formula
#' @param env RSAGA geoprocessing environment, generated by a call to \code{\link{rsaga.env}}
#' @param ... optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @details The \code{in.grids} are represented in the \code{formula} by the letters \code{a} (for \code{in.grids[1]}), \code{b} etc. Thus, if \code{in.grids[1]} is Landsat TM channel 3 and \code{in.grids[2]} is channel 4, the NDVI formula (TM3-TM4)/(TM3+TM4) can be represented by the character string \code{"(a-b)/(a+b)"} (any spaces are removed) or the formula \code{~(a-b)/(a+b)} in the \code{formula} argument.
#'
#' In addition to +, -, *, and /, the following operators and functions are available for the \code{formula} definition:
#' \itemize{
#' \item \eqn{\hat{\ }}{^} power
#' \item \code{sin(a)} sine
#' \item \code{cos(a)} cosine
#' \item \code{tan(a)} tangent
#' \item \code{asin(a)} arc sine
#' \item \code{acos(a)} arc cosine
#' \item \code{atan(a)} arc tangent
#' \item \code{atan2(a,b)} arc tangent of b/a
#' \item \code{abs(a)} absolute value
#' \item \code{int(a)} convert to integer
#' \item \code{sqr(a)} square
#' \item \code{sqrt(a)} square root
#' \item \code{ln(a)} natural logarithm
#' \item \code{log(a)} base 10 logarithm
#' \item \code{mod(a,b)} modulo
#' \item \code{gt(a, b)} returns 1 if a greater b
#' \item \code{lt(a, b)} returns 1 if a lower b
#' \item \code{eq(a, b)} returns 1 if a equal b
#' \item \code{ifelse(switch, x, y)} returns x if switch equals 1 else y
#' }
#'
#' Using \code{remove.zeros=FALSE} might have the side effect that no data areas in the grid with coefficient 0 are passed on to the results grid. (To be confirmed.)
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{local.function}}, \code{\link{focal.function}}, and \code{\link{multi.focal.function}} for a more flexible framework for combining grids or applying local and focal functions; \code{\link{rsaga.geoprocessor}}, \code{\link{rsaga.env}}
#' @examples
#' \dontrun{
#' # using SAGA grids:
#' # calculate the NDVI from Landsat TM bands 3 and 4:
#' rsaga.grid.calculus(c("tm3.sgrd","tm4.sgrd"), "ndvi.sgrd", ~(a-b)/(a+b))
#' # apply a linear regression equation to grids:
#' coefs = c(20,-0.6)
#' # maybe from a linear regression of mean annual air temperature (MAAT)
#' # against elevation - something like:
#' # coefs = coef( lm( maat ~ elevation ) )
#' rsaga.linear.combination("elevation.sgrd", "maat.sgrd", coefs)
#' # equivalent:
#' rsaga.grid.calculus("elevation.sgrd", "maat.sgrd", "20 - 0.6*a")
#' }
#' @keywords spatial interface
#' @export
rsaga.grid.calculus = function(in.grids, out.grid, formula,
env = rsaga.env(), ...)
{
in.grids = default.file.extension(in.grids, ".sgrd")
in.grids = paste(in.grids, collapse = ";")
if (any(class(formula) == "formula"))
formula = rev( as.character(formula) )[1]
formula = gsub(" ", "", formula)
if (env$version == "2.0.4") {
param = list( INPUT = in.grids, RESULT = out.grid,
FORMUL = formula )
} else {
param = list( GRIDS = in.grids, RESULT = out.grid,
FORMULA = formula )
}
rsaga.geoprocessor(lib = "grid_calculus",
module = "Grid Calculator", # was = 1
param = param, env = env, ...)
}
#' @rdname rsaga.grid.calculus
#' @name rsaga.linear.combination
#' @export
rsaga.linear.combination = function(in.grids, out.grid, coef,
cf.digits = 16, remove.zeros = FALSE, remove.ones = TRUE,
env = rsaga.env(), ...)
{
fmt = paste("%.", cf.digits, "f", sep = "")
coef = sprintf(fmt, coef)
zero = sprintf(fmt, 0)
omit = rep(FALSE, length(coef))
if (length(coef) == length(in.grids)) { # no intercept provided
coef = c(NA, coef)
omit = c(TRUE, omit)
}
nvars = length(coef)
if (nvars != length(in.grids) + 1)
stop("'coef' must have length 'length(in.grids)' or 'length(in.grids)+1'")
# Simplify the formula by removing terms that are zero
# (after rounding to the specified number of digits):
if (remove.zeros)
omit = omit | (coef == zero)
# Zero intercept is always removed:
omit[1] = omit[1] | (coef[1] == zero)
# Remove zeros at the end of the coefficients:
for (i in 1:nvars) {
if (omit[i]) next
# Are there any digits at all?
if (length(grep(".", coef[i], fixed = TRUE)) == 0) next
nc = nchar(coef[i])
# Remove all trailing zeros:
while (substr(coef[i], nc, nc) == "0") {
coef[i] = substr(coef[i], 1, nc - 1)
nc = nchar(coef[i])
}
# Remove trailing decimal point:
if (substr(coef[i], nc, nc) == ".")
coef[i] = substr(coef[i], 1, nc - 1)
}
# Set up the formula:
ltrs = letters[ 1 : sum(!omit[-1]) ]
if (!omit[1]) ltrs = c("intercept", ltrs)
formula = paste(coef[ !omit ], ltrs,
collapse = "+", sep = "*")
formula = gsub("*intercept", "", formula, fixed = TRUE)
formula = gsub("+-", "-", formula, fixed = TRUE)
if (remove.ones) {
formula = gsub("-1*", "-", formula, fixed = TRUE)
formula = gsub("+1*", "+", formula, fixed = TRUE)
}
rsaga.grid.calculus(in.grids = in.grids[!omit[-1]], out.grid = out.grid,
formula = formula, env = env, ...)
}
######## Module shapes_grid ########
#' Contour Lines from a Grid
#'
#' Creates a contour lines shapefile from a grid file in SAGA grid format.
#' @name rsaga.contour
#' @param in.grid input: digital elevation model (DEM) as SAGA grid file (default file extension: \code{.sgrd})
#' @param out.shapefile output: contour line shapefile. Existing files will be overwritten!
#' @param zstep,zmin,zmax lower limit, upper limit, and equidistance of contour lines
#' @param vertex optional parameter: vertex type for resulting contours. Default \code{"xy"} (or 0). Only available with SAGA GIS 2.1.3+. \itemize{
#' \item [0] \code{"xy"}
#' \item [1] \code{"xyz"}}
#' @param env A SAGA geoprocessing environment, see \code{\link{rsaga.env}}
#' @param ... arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @return The type of object returned depends on the \code{intern} argument passed to the \code{\link{rsaga.geoprocessor}}. For \code{intern=FALSE} it is a numerical error code (0: success), or otherwise (the default) a character vector with the module's console output.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA module)
#' @seealso \code{\link{rsaga.geoprocessor}}
#' @keywords spatial interface
#' @export
rsaga.contour = function(in.grid,out.shapefile,zstep,zmin,zmax,vertex="xy",env=rsaga.env(),...) {
in.grid = default.file.extension(in.grid,".sgrd")
# 'INPUT' changed to 'GRID' with SAGA 2.1.3
if(env$version != "2.1.3" & env$version != "2.1.4" & env$version != "2.2.0" & env$version != "2.2.1" &
env$version != "2.2.2" & env$version != "2.2.3"){
param = list(INPUT=in.grid,CONTOUR=out.shapefile)
} else {
param = list(GRID=in.grid,CONTOUR=out.shapefile)
}
if (!missing(zmin)) param = c(param, ZMIN=as.numeric(zmin))
if (!missing(zmax)) param = c(param, ZMAX=as.numeric(zmax))
if (!missing(zstep)) {
stopifnot(as.numeric(zstep)>0)
param = c(param, ZSTEP=as.numeric(zstep))
}
v.choices = c("xy", "xyz")
vertex = match.arg.ext(vertex, choices=v.choices,
numeric=TRUE, ignore.case=TRUE, base=0)
if (!missing(vertex)) {
if (env$version == "2.1.3" | env$version == "2.1.4") {
param = c(param, VERTEX=vertex)
}
}
rsaga.geoprocessor(lib = "shapes_grid",
module = "Contour Lines from Grid",
param, env = env,...)
}
#' Add Grid Values to Point Shapefile
#'
#' Pick values from SAGA grids and attach them as a new variables to a point shapefile.
#' @name rsaga.add.grid.values.to.points
#' @param in.grids Input: character vector with names of (one or more) SAGA GIS grid files to be converted into a point shapefile.
#' @param in.shapefile Input point shapefile (default extension: \code{.shp}).
#' @param out.shapefile Output point shapefile (default extension: \code{.shp}).
#' @param method interpolation method to be used; choices: nearest neighbour interpolation (default), bilinear interpolation, inverse distance weighting, bicubic spline interpolation, B-splines.
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details Retrieves information from the selected grids at the positions of the points of the selected points layer and adds it to the resulting layer.
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA modules)
#' @note This function uses module \code{Add Grid Values to Points} in SAGA GIS library \code{shapes_grid}.
#' @seealso \code{\link{pick.from.points}}, \code{\link{pick.from.ascii.grid}}, \code{\link{pick.from.saga.grid}}, \code{\link{rsaga.grid.to.points}}
#' @keywords spatial interface
#' @export
rsaga.add.grid.values.to.points = function(in.shapefile,
in.grids, out.shapefile,
method = c("nearest.neighbour", "bilinear",
"idw", "bicubic.spline", "b.spline"), ...)
{
in.grids = default.file.extension(in.grids,".sgrd")
in.grids = paste(in.grids, collapse = ";")
# check if this is SAGA version dependent:
in.shapefile = default.file.extension(in.shapefile,".shp")
out.shapefile = default.file.extension(out.shapefile,".shp")
method = match.arg.ext(method, base = 0, ignore.case = TRUE, numeric = TRUE)
param = list(SHAPES = in.shapefile, GRIDS = in.grids,
RESULT = out.shapefile, INTERPOL = method)
rsaga.geoprocessor(lib = "shapes_grid",
module = "Add Grid Values to Points", # was: = 0
param, ...)
}
#' Convert SAGA grid file to point shapefile
#'
#' Convert SAGA grid file to point (or polygon) shapefile - either completely or only a random sample of grid cells.
#' @name rsaga.grid.to.points
#' @param in.grids Input: names of (possibly several) SAGA GIS grid files to be converted into a point shapefile.
#' @param in.grid Input: SAGA grid file from which to sample.
#' @param out.shapefile Output: point shapefile (default extension: \code{.shp}). Existing files will be overwritten!
#' @param in.clip.polygons optional polygon shapefile to be used for clipping/masking an area
#' @param exclude.nodata logical (default: \code{TRUE}): skip 'nodata' grid cells?
#' @param type character string: \code{"nodes"}: create point shapefile of grid center points; \code{"cells"} (only supported by SAGA GIS 2.0.6+): create polygon shapefile with grid cell boundaries
#' @param freq integer >=1: sampling frequency: on average 1 out of 'freq' grid cells are selected
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}; required by \code{rsaga.grid.to.points} to determine version-dependent SAGA module name and arguments
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}
#' @author Alexander Brenning (R interface), Olaf Conrad (SAGA modules)
#' @note These functions use modules \code{Grid Values to Shapes} (pre-2.0.6 name: \code{Grid Values to Points}) and \code{Grid Values to Points (randomly)} in SAGA library \code{shapes_grid}.
#'
#' The SAGA 2.0.6+ module \code{Grid Values to Shapes} is more flexible than the earlier versions as it allows to create grid cell polygons instead of center points (see argument \code{type}).
#' @seealso \code{\link{rsaga.add.grid.values.to.points}}
#' @examples
#' \dontrun{
#' # one point per grid cell, exclude nodata areas:
#' rsaga.grid.to.points("dem", "dempoints")
#' # take only every 20th point, but to not exclude nodata areas:
#' rsaga.grid.to.points.randomly("dem", "dempoints20", freq = 20)
#' }
#' @keywords spatial interface
#' @export
rsaga.grid.to.points = function(in.grids, out.shapefile,
in.clip.polygons, exclude.nodata = TRUE,
type = "nodes", env = rsaga.env(), ...)
{
in.grids = default.file.extension(in.grids,".sgrd")
in.grids = paste(in.grids, collapse = ";")
type = match.arg.ext(type, numeric=TRUE, ignore.case=TRUE, base=0,
choices=c("nodes","cells"))
if (type == 1 & (env$version == "2.0.4" | env$version == "2.0.5")) {
type = 0
warning("type == 'cells' not supported by SAGA 2.0.4 and 2.0.5; using type = 'nodes'")
}
param = list(GRIDS = in.grids)
if (env$version == "2.0.4" | env$version == "2.0.5") {
param = c(param, POINTS = out.shapefile)
} else param = c(param, SHAPES = out.shapefile)
param = c(param, NODATA = exclude.nodata)
if (!missing(in.clip.polygons))
param = c(param, POLYGONS = in.clip.polygons)
if (!(env$version == "2.0.4" | env$version == "2.0.5"))
param = c(param, TYPE = type)
module = "Grid Values to Shapes"
if (!rsaga.module.exists("shapes_grid",module,env=env))
#if (env$version == "2.0.4" | env$version == "2.0.5")
module = "Grid Values to Points"
rsaga.geoprocessor(lib = "shapes_grid",
module = module, # was: = 3
param, env = env, ...)
}
#' @rdname rsaga.grid.to.points
#' @name rsaga.grid.to.points.randomly
#' @export
rsaga.grid.to.points.randomly = function(in.grid,
out.shapefile, freq, ...)
{
in.grid = default.file.extension(in.grid, ".sgrd")
out.shapefile = default.file.extension(out.shapefile, ".shp")
if (freq < 1) stop("'freq' must be an integer >=1")
param = list(GRID = in.grid, FREQ = freq, POINTS = out.shapefile)
rsaga.geoprocessor(lib = "shapes_grid",
module = "Grid Values to Points (randomly)", # was: = 4
param, ...)
}
#' Spatial Interpolation Methods
#'
#' Spatial interpolation of point data using inverse distance to a power (inverse distance weighting, IDW), nearest neighbors, or modified quadratic shephard.
#' @name rsaga.inverse.distance
#' @param in.shapefile Input: point shapefile (default extension: \code{.shp}).
#' @param out.grid Output: filename for interpolated grid (SAGA grid file). Existing files will be overwritten!
#' @param field numeric or character: number or name of attribute in the shapefile's attribute table to be interpolated; the first attribute is represented by a zero.
#' @param power numeric (>0): exponent used in inverse distance weighting (usually 1 or 2)
#' @param maxdist numeric: maximum distance of points to be used for inverse distance interpolation (search radius); no search radius is applied when this argument is missing or equals \code{Inf}
#' @param nmax Maximum number of nearest points to be used for interpolation; \code{nmax=Inf} is a valid value (no upper limit)
#' @param quadratic.neighbors integer >=5; default 13.
#' @param weighting.neighbors integer >=3; default 19.
#' @param target required argument of type list: parameters identifying the target area, e.g. the x/y extent and cellsize, or name of a reference grid; see \code{\link{rsaga.target}}.
#' @param env RSAGA geoprocessing environment created by \code{\link{rsaga.env}}, required because module(s) depend(s) on SAGA version
#' @param ... Optional arguments to be passed to \code{\link{rsaga.geoprocessor}}, including the \code{env} RSAGA geoprocessing environment.
#' @details These functions use modules from the \code{grid_gridding} SAGA GIS library. They do not support SAGA GIS 2.0.4, which differs in some argument names and parameterizations. Target grid parameterization by grid file name currently doesn't work with SAGA GIS 2.1.0 Release Candidate 1 (see also \code{\link{rsaga.target}}); stay tuned for future updates and fixes.
#' @references QSHEP2D: Fortran routines implementing the Quadratic Shepard method for bivariate interpolation of scattered data (see R. J. Renka, ACM TOMS 14 (1988) pp.149-150). Classes: E2b. Interpolation of scattered, non-gridded multivariate data.
#' @author Alexander Brenning (R interface), Andre Ringeler and Olaf Conrad (SAGA modules)
#' @note The 'Inverse Distance Weighted' module of SAGA GIS not only support inverse-distance weighted interpolation, but also exponential and other weighting schemes (command line argument WEIGHTING); these are however not accessible through this function, but only through the \code{rsaga.geoprocessor}, if needed. See \code{rsaga.get.usage("grid_gridding","Inverse Distance Weighted")} for details.
#'
#' See the example section in the help file for \code{\link[shapefiles]{write.shapefile}} in package \code{shapefiles} to learn how to apply these interpolation functions to a shapefile exported from a data.frame.
#'
#' Modified Quadratic Shephard method: based on module 660 in TOMS (see references).
#' @seealso \code{\link{rsaga.target}}; \code{\link[gstat]{idw}} in package \code{gstat}.
#' @keywords spatial interface
#' @export
rsaga.inverse.distance = function(in.shapefile, out.grid, field,
power = 1, maxdist, nmax = 100,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.inverse.distance doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (power <= 0) stop("'power' must be >0")
if (field < 0) stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
module = "Inverse Distance Weighted"
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field,
WEIGHTING = 0, # IDW
MODE = 0, # search mode: all directions
POWER = power)
is.global = (missing(maxdist))
if (!missing(maxdist)) {
if (maxdist <= 0) stop("'maxdist' must be >0")
if (maxdist == Inf) is.global = TRUE
}
if (is.global) {
param = c(param, list(RANGE = 1))
} else
param = c(param, list(RANGE = 0, RADIUS = maxdist))
#use.all = (missing(nmax))
#if (!missing(nmax)) {
if (nmax <= 0) stop("'nmax' must be an integer >0, or Inf")
use.all = (nmax == Inf)
#}
if (use.all) {
param = c(param, list(POINTS = 1))
} else
param = c(param, list(POINTS = 0, NPOINTS = nmax))
param = c(param, target)
# Translate some argument names for SAGA GIS 2.1.0+:
if (substr(env$version,1,4) != "2.0.") {
nm = names(param)
nm[ nm == "RANGE" ] = "SEARCH_RANGE"
nm[ nm == "RADIUS" ] = "SEARCH_RADIUS"
nm[ nm == "POINTS" ] = "SEARCH_POINTS_ALL"
nm[ nm == "NPOINTS" ] = "SEARCH_POINTS_MAX"
nm[ nm == "MODE" ] = "SEARCH_DIRECTION"
nm[ nm == "POWER" ] = "WEIGHT_POWER"
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
}
names(param) = nm
# Translate some argument names for SAGA 2.2.0
if (substr(env$version,1,4) == "2.2."){
nm = names(param)
nm[ nm == "WEIGHTING" ] = "DW_WEIGHTING"
nm[ nm == "WEIGHT_POWER" ] = "DW_IDW_POWER"
nm[ nm == "WEIGHT_BANDWIDTH" ] = "DW_BANDWIDTH"
}
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = module,
param = param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.nearest.neighbour
#' @export
rsaga.nearest.neighbour = function(in.shapefile, out.grid, field,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.nearest.neighbour doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Nearest Neighbour", # was: = 2 (=1 in earlier SAGA version)
param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.modified.quadratic.shephard
#' @export
rsaga.modified.quadratic.shephard = function(in.shapefile, out.grid, field,
quadratic.neighbors = 13, weighting.neighbors = 19,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.modified.quadratic.shephard doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
if (quadratic.neighbors < 5)
stop("'quadratic.neighbors' must be an integer >=5")
if (weighting.neighbors < 5)
stop("'weighting.neighbors' must be an integer >=3")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field,
QUADRATIC_NEIGHBORS = quadratic.neighbors,
WEIGHTING_NEIGHBORS = weighting.neighbors)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Modifed Quadratic Shepard", # = 4 (earlier SAGA versions: =2)
param, env = env, ...)
}
#' @rdname rsaga.inverse.distance
#' @name rsaga.triangulation
#' @export
rsaga.triangulation = function(in.shapefile, out.grid, field,
target, env = rsaga.env(), ...)
{
if (env$version == "2.0.4")
stop("rsaga.triangulation doesn't support SAGA GIS 2.0.4 any longer\n",
" because some of the arguments have changed")
stopifnot(!missing(target))
if (field < 0)
stop("'field' must be an integer >=0")
in.shapefile = default.file.extension(in.shapefile, ".shp")
out.grid = default.file.extension(out.grid, ".sgrd")
if (target$TARGET == 1) {
if (target$GRID_GRID != out.grid) {
rsaga.copy.sgrd(target$GRID_GRID, out.grid, env = env)
target$GRID_GRID = out.grid
}
}
param = list(
USER_GRID = out.grid,
SHAPES = in.shapefile,
FIELD = field)
param = c(param, target)
# TARGET parameters changed SAGA 2.1.3:
if (env$version == "2.1.3" | env$version == "2.1.4" | env$version == "2.2.0" |
env$version == "2.2.1" | env$version == "2.2.2" | env$version == "2.2.3") {
nm = names(param)
nm[ nm == "USER_GRID" ] = "TARGET_OUT_GRID"
nm[ nm == "TARGET" ] = "TARGET_DEFINITION"
nm[ nm == "GRID_GRID" ] = "TARGET_TEMPLATE"
nm[ nm == "USER_SIZE" ] = "TARGET_USER_SIZE"
nm[ nm == "USER_FIT" ] = "TARGET_USER_FITS"
nm[ nm == "USER_XMIN" ] = "TARGET_USER_XMIN"
nm[ nm == "USER_XMAX" ] = "TARGET_USER_XMAX"
nm[ nm == "USER_YMIN" ] = "TARGET_USER_YMIN"
nm[ nm == "USER_YMAX" ] = "TARGET_USER_YMAX"
names(param) = nm
}
rsaga.geoprocessor(lib = "grid_gridding",
module = "Triangulation",
param, env = env, ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.Cat.R
\name{plot.Cat}
\alias{plot.Cat}
\alias{plot,Cat}
\alias{plot,Cat-method}
\title{Plotting function for Cat object}
\usage{
\S4method{plot}{Cat}(x, item, plotType, xlim = c(-5, 5), ...)
}
\arguments{
\item{x}{Cat object.}
\item{item}{Numeric. Item index.}
\item{plotType}{Character. Either "IRF", "ICC", or "IIF" for item response functions, item characteristic functions, and item information functions, respectively.}
\item{xlim}{Vector. Range of the x-axis.}
\item{...}{Other arguments passed to plot().}
}
\description{
Function to plot item response functions, item characteristic functions, and item information functions
for Cat objects.
}
|
/man/plot.Cat.Rd
|
no_license
|
erossiter/catSurv
|
R
| false | true | 739 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.Cat.R
\name{plot.Cat}
\alias{plot.Cat}
\alias{plot,Cat}
\alias{plot,Cat-method}
\title{Plotting function for Cat object}
\usage{
\S4method{plot}{Cat}(x, item, plotType, xlim = c(-5, 5), ...)
}
\arguments{
\item{x}{Cat object.}
\item{item}{Numeric. Item index.}
\item{plotType}{Character. Either "IRF", "ICC", or "IIF" for item response functions, item characteristic functions, and item information functions, respectively.}
\item{xlim}{Vector. Range of the x-axis.}
\item{...}{Other arguments passed to plot().}
}
\description{
Function to plot item response functions, item characteristic functions, and item information functions
for Cat objects.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfm-weighting.R
\docType{methods}
\name{weight}
\alias{smoother}
\alias{weight}
\alias{weight,dfm,character-method}
\alias{weight,dfm,numeric-method}
\title{weight the feature frequencies in a dfm}
\usage{
weight(x, type, ...)
\S4method{weight}{dfm,character}(x, type = c("frequency", "relFreq",
"relMaxFreq", "logFreq", "tfidf"), ...)
\S4method{weight}{dfm,numeric}(x, type, ...)
smoother(x, smoothing = 1)
}
\arguments{
\item{x}{document-feature matrix created by \link{dfm}}
\item{type}{a label of the weight type, or a named numeric vector of values to apply to the dfm. One of:
\describe{
\item{\code{"frequency"}}{integer feature count (default when a dfm is created)}
\item{\code{"relFreq"}}{the proportion of the feature counts of total feature counts (aka relative frequency)}
\item{\code{"relMaxFreq"}}{the proportion of the feature counts of the highest feature count in a document}
\item{\code{"logFreq"}}{natural logarithm of the feature count}
\item{\code{"tfidf"}}{Term-frequency * inverse document frequency. For a
full explanation, see, for example,
\url{http://nlp.stanford.edu/IR-book/html/htmledition/term-frequency-and-weighting-1.html}.
This implementation will not return negative values. For finer-grained
control, call \code{\link{tfidf}} directly.}
\item{a named numeric vector}{a named numeric vector of weights to be applied to the dfm,
where the names of the vector correspond to feature labels of the dfm, and
the weights will be applied as multipliers to the existing feature counts
for the corresponding named fatures. Any features not named will be
assigned a weight of 1.0 (meaning they will be unchanged).}
}}
\item{...}{not currently used. For finer grained control, consider calling \code{\link{tf}} or \code{\link{tfidf}} directly.}
\item{smoothing}{constant added to the dfm cells for smoothing, default is 1}
}
\value{
The dfm with weighted values.
}
\description{
Returns a document by feature matrix with the feature frequencies weighted
according to one of several common methods.
}
\details{
This converts a matrix from sparse to dense format, so may exceed memory
requirements depending on the size of your input matrix.
}
\examples{
dtm <- dfm(inaugCorpus)
x <- apply(dtm, 1, function(tf) tf/max(tf))
topfeatures(dtm)
normDtm <- weight(dtm, "relFreq")
topfeatures(normDtm)
maxTfDtm <- weight(dtm, type="relMaxFreq")
topfeatures(maxTfDtm)
logTfDtm <- weight(dtm, type="logFreq")
topfeatures(logTfDtm)
tfidfDtm <- weight(dtm, type="tfidf")
topfeatures(tfidfDtm)
# combine these methods for more complex weightings, e.g. as in Section 6.4
# of Introduction to Information Retrieval
head(logTfDtm <- weight(dtm, type="logFreq"))
head(tfidf(logTfDtm, normalize = FALSE))
\dontshow{
testdfm <- dfm(inaugTexts[1:5], verbose = FALSE)
for (w in c("frequency", "relFreq", "relMaxFreq", "logFreq", "tfidf")) {
testw <- weight(testdfm, w)
cat("\\n\\n=== weight() TEST for:", w, "; class:", class(testw), "\\n")
head(testw)
}}
# apply numeric weights
str <- c("apple is better than banana", "banana banana apple much better")
weights <- c(apple = 5, banana = 3, much = 0.5)
(mydfm <- dfm(str, ignoredFeatures = stopwords("english"), verbose = FALSE))
weight(mydfm, weights)
}
\author{
Paul Nulty and Kenneth Benoit
}
\references{
Manning, Christopher D., Prabhakar Raghavan, and Hinrich Schutze.
\emph{Introduction to Information Retrieval}. Vol. 1. Cambridge: Cambridge
University Press, 2008.
}
\seealso{
\code{\link{tfidf}}
}
|
/man/weight.Rd
|
no_license
|
HaiyanLW/quanteda
|
R
| false | true | 3,602 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfm-weighting.R
\docType{methods}
\name{weight}
\alias{smoother}
\alias{weight}
\alias{weight,dfm,character-method}
\alias{weight,dfm,numeric-method}
\title{weight the feature frequencies in a dfm}
\usage{
weight(x, type, ...)
\S4method{weight}{dfm,character}(x, type = c("frequency", "relFreq",
"relMaxFreq", "logFreq", "tfidf"), ...)
\S4method{weight}{dfm,numeric}(x, type, ...)
smoother(x, smoothing = 1)
}
\arguments{
\item{x}{document-feature matrix created by \link{dfm}}
\item{type}{a label of the weight type, or a named numeric vector of values to apply to the dfm. One of:
\describe{
\item{\code{"frequency"}}{integer feature count (default when a dfm is created)}
\item{\code{"relFreq"}}{the proportion of the feature counts of total feature counts (aka relative frequency)}
\item{\code{"relMaxFreq"}}{the proportion of the feature counts of the highest feature count in a document}
\item{\code{"logFreq"}}{natural logarithm of the feature count}
\item{\code{"tfidf"}}{Term-frequency * inverse document frequency. For a
full explanation, see, for example,
\url{http://nlp.stanford.edu/IR-book/html/htmledition/term-frequency-and-weighting-1.html}.
This implementation will not return negative values. For finer-grained
control, call \code{\link{tfidf}} directly.}
\item{a named numeric vector}{a named numeric vector of weights to be applied to the dfm,
where the names of the vector correspond to feature labels of the dfm, and
the weights will be applied as multipliers to the existing feature counts
for the corresponding named fatures. Any features not named will be
assigned a weight of 1.0 (meaning they will be unchanged).}
}}
\item{...}{not currently used. For finer grained control, consider calling \code{\link{tf}} or \code{\link{tfidf}} directly.}
\item{smoothing}{constant added to the dfm cells for smoothing, default is 1}
}
\value{
The dfm with weighted values.
}
\description{
Returns a document by feature matrix with the feature frequencies weighted
according to one of several common methods.
}
\details{
This converts a matrix from sparse to dense format, so may exceed memory
requirements depending on the size of your input matrix.
}
\examples{
dtm <- dfm(inaugCorpus)
x <- apply(dtm, 1, function(tf) tf/max(tf))
topfeatures(dtm)
normDtm <- weight(dtm, "relFreq")
topfeatures(normDtm)
maxTfDtm <- weight(dtm, type="relMaxFreq")
topfeatures(maxTfDtm)
logTfDtm <- weight(dtm, type="logFreq")
topfeatures(logTfDtm)
tfidfDtm <- weight(dtm, type="tfidf")
topfeatures(tfidfDtm)
# combine these methods for more complex weightings, e.g. as in Section 6.4
# of Introduction to Information Retrieval
head(logTfDtm <- weight(dtm, type="logFreq"))
head(tfidf(logTfDtm, normalize = FALSE))
\dontshow{
testdfm <- dfm(inaugTexts[1:5], verbose = FALSE)
for (w in c("frequency", "relFreq", "relMaxFreq", "logFreq", "tfidf")) {
testw <- weight(testdfm, w)
cat("\\n\\n=== weight() TEST for:", w, "; class:", class(testw), "\\n")
head(testw)
}}
# apply numeric weights
str <- c("apple is better than banana", "banana banana apple much better")
weights <- c(apple = 5, banana = 3, much = 0.5)
(mydfm <- dfm(str, ignoredFeatures = stopwords("english"), verbose = FALSE))
weight(mydfm, weights)
}
\author{
Paul Nulty and Kenneth Benoit
}
\references{
Manning, Christopher D., Prabhakar Raghavan, and Hinrich Schutze.
\emph{Introduction to Information Retrieval}. Vol. 1. Cambridge: Cambridge
University Press, 2008.
}
\seealso{
\code{\link{tfidf}}
}
|
####--- All "Math" and "Math2" group methods for all Matrix classes (incl sparseVector) ------
#### ==== ===== but diagonalMatrix -> ./diagMatrix.R and abIndex.R
#### ~~~~~~~~~~~~ ~~~~~~~~~
###--------- Csparse
Math.vecGenerics <- grep("^cum", getGroupMembers("Math"), value=TRUE)
## "cummax" .. "cumsum" : work on full *vector* and return vector also for matrix input
setMethod("Math",
signature(x = "CsparseMatrix"),
function(x) {
if(.Generic %nin% Math.vecGenerics && is0(callGeneric(0.))) {
## sparseness, symm., triang.,... preserved
cl <- class(x)
has.x <- !extends(cl, "nsparseMatrix")
## has.x <==> *not* nonzero-pattern == "nMatrix"
if(has.x) {
type <- storage.mode(x@x)
r <- callGeneric(x@x)
} else { ## nsparseMatrix
type <- ""
r <- rep.int(as.double(callGeneric(TRUE)),
switch(.sp.class(cl),
CsparseMatrix = length(x@i),
TsparseMatrix = length(x@i),
RsparseMatrix = length(x@j)))
}
if(type == storage.mode(r)) {
x@x <- r
x
} else { ## e.g. abs( <lgC> ) --> integer Csparse
## FIXME: when we have 'i*' classes, use them here:
rx <- new(sub("^.", "d", cl))
rx@x <- as.double(r)
## result is "same"
sNams <- slotNames(cl)
for(nm in sNams[sNams != "x"])
slot(rx, nm) <- slot(x, nm)
rx
}
} else { ## no sparseness (or no matrix!):
callGeneric(C2dense(x))
}
}) ## {Math}
###--------- ddenseMatrix
##' Used for dt[rp]Matrix, ds[yp]Matrix (and subclasses, e.g. dpo*(), cor*() !):
##' as dgeMatrix has direct method:
setMethod("Math", "ddenseMatrix", function(x)
{
if(.Generic %in% Math.vecGenerics) # vector result
callGeneric(as(x,"dgeMatrix")@x)
else if(is(x, "symmetricMatrix")) { ## -> result symmetric: keeps class
cl <- .class0(x)
if(cl %in% (scl <- c("dsyMatrix","dspMatrix"))) {
x@x <- callGeneric(x@x)
x
} else { ## *sub*class of dsy/dsp: e.g., dpoMatrix
## -> "[dsy/dsp]Matrix":
x <- as(x, scl[match(scl, names(getClass(cl)@contains), nomatch=0L)])
x@x <- callGeneric(x@x)
x
}
}
else { ## triangularMatrix (no need for testing), includes, e.g. "corMatrix"!
if(is0(f0 <- callGeneric(0.))) { ## -> result remains triangular
cl <- .class0(x)
if(cl %in% (scl <- c("dtrMatrix","dtpMatrix"))) {
x@x <- callGeneric(x@x)
x
} else { ## *sub*class of dtr/dtp: e.g., corMatrix
## -> "[dtr/dtp]Matrix":
x <- as(x, scl[match(scl, names(getClass(cl)@contains), nomatch=0L)])
x@x <- callGeneric(x@x)
x
}
}
else { ## result is general: *could* use f0 for the whole 0-triangle,
## but this is much easier:
callGeneric(as(x,"dgeMatrix"))
}
}
})
###--------- denseMatrix
## FIXME: Once we have integer (idense..), sign(), abs(.) may need different:
setMethod("Math", signature(x = "denseMatrix"),
function(x) callGeneric(as(x, "dMatrix")))
# -> ./ddenseMatrix.R has next method
###--------- dgeMatrix
setMethod("Math", signature(x = "dgeMatrix"),
function(x) {
if(.Generic %in% Math.vecGenerics)
callGeneric(x@x)
else {
x@x <- callGeneric(x@x)
x
}
})
###--------- diagMatrix
## Till 2014-08-04, went via "dtC" (triangular) -- "Math" method in ./Math.R
setMethod("Math", signature(x = "diagonalMatrix"),
function(x) {
if(.Generic %in% Math.vecGenerics) # vector result
callGeneric(.diag2mat(x))
else if(is0(f0 <- callGeneric(0.))) { ## result remains diagonal
cl <- class(x)
if(!extends(cl, "ddiMatrix"))
cl <- class(x <- as(x, "dMatrix"))
##d type <- storage.mode(x@x)
if(x@diag == "U") {
##d if((f1 <- callGeneric(as1(mod=type))) == 1 && type == "double")
if((f1 <- callGeneric(1.)) == 1)
return(x) # [ddi] as f(0) = 0, f(1) = 1
else {
n <- x@Dim[1]
return( Diagonal(n=n, x = rep.int(f1, n)) )
}
}
r <- callGeneric(x@x)
##d if(type == storage.mode(r)) {
x@x <- r
x
##d } else { ## e.g. abs( <lgC> ) --> integer Csparse
##d ## FIXME: when we have 'i*' classes, use them here:
##d rx <- new(sub("^.", "d", cl))
##d rx@x <- as.double(r)
##d ## result is "same"
##d sNams <- slotNames(cl)
##d for(nm in sNams[sNams != "x"])
##d slot(rx, nm) <- slot(x, nm)
##d rx
##d }
} else { ## no sparseness, i.e., no diagonal, but still symmetric:
## FIXME: gain efficiency by reusing f0 for *all* off-diagonal entries!
callGeneric(as(as(as(.diag2sT(x), "dMatrix"), "denseMatrix"), "dspMatrix"))
}
}) ## {Math}
## NB: "Math2" (round, signif) for diagMatrix is perfectly via "dMatrix"
###--------- dMatrix
## Use these as "catch-all" -- more specific methods are for sub-classes (sparse)
setMethod("Math2", signature(x = "dMatrix"),
## Assume that Generic(u, k) |--> u for u in {0,1}
## which is true for round(), signif() ==> all structure maintained
function(x, digits) {
x@x <- callGeneric(x@x, digits = digits)
x
})
## the same, first coercing to "dMatrix":
setMethod("Math2", signature(x = "Matrix"),
function(x, digits) {
x <- as(x, "dMatrix")
x@x <- callGeneric(x@x, digits = digits)
x
})
###--------- sparseMatrix
setMethod("Math", signature(x = "sparseMatrix"),
function(x) callGeneric(as(x, "CsparseMatrix")))
###--------- sparseVector
setMethod("Math", signature(x = "sparseVector"),
function(x) {
if(.Generic %nin% Math.vecGenerics && is0(callGeneric(0.))) {
## sparseness preserved
cld <- getClassDef(cx <- class(x))
kind <- .M.kindC(cld)# "d", "n", "l", "i", "z", ...
has.x <- kind != "n"
if(has.x) {
rx <- callGeneric(x@x)
if(kind == "d") {
x@x <- rx
x
}
else {
new("dsparseVector", x = rx, i = x@i, length = x@length)
}
} else { ## kind == "n"
new("dsparseVector", x = rep.int(callGeneric(1), length(x@i)),
i = x@i, length = x@length)
}
} else { ## dense
callGeneric(sp2vec(x))
}
})
setMethod("Math2", signature(x = "dsparseVector"),
## Assume that Generic(u, k) |--> u for u in {0,1}
## which is true for round(), signif() ==> all structure maintained
function(x, digits) {
x@x <- callGeneric(x@x, digits = digits)
x
})
## the same, first coercing to "dsparseVector":
setMethod("Math2", signature(x = "sparseVector"),
function(x, digits) {
x <- as(x, "dsparseVector")
x@x <- callGeneric(x@x, digits = digits)
x
})
|
/branches/Matrix-new-SuiteSparse/R/Math.R
|
no_license
|
LTLA/Matrix
|
R
| false | false | 6,955 |
r
|
####--- All "Math" and "Math2" group methods for all Matrix classes (incl sparseVector) ------
#### ==== ===== but diagonalMatrix -> ./diagMatrix.R and abIndex.R
#### ~~~~~~~~~~~~ ~~~~~~~~~
###--------- Csparse
Math.vecGenerics <- grep("^cum", getGroupMembers("Math"), value=TRUE)
## "cummax" .. "cumsum" : work on full *vector* and return vector also for matrix input
setMethod("Math",
signature(x = "CsparseMatrix"),
function(x) {
if(.Generic %nin% Math.vecGenerics && is0(callGeneric(0.))) {
## sparseness, symm., triang.,... preserved
cl <- class(x)
has.x <- !extends(cl, "nsparseMatrix")
## has.x <==> *not* nonzero-pattern == "nMatrix"
if(has.x) {
type <- storage.mode(x@x)
r <- callGeneric(x@x)
} else { ## nsparseMatrix
type <- ""
r <- rep.int(as.double(callGeneric(TRUE)),
switch(.sp.class(cl),
CsparseMatrix = length(x@i),
TsparseMatrix = length(x@i),
RsparseMatrix = length(x@j)))
}
if(type == storage.mode(r)) {
x@x <- r
x
} else { ## e.g. abs( <lgC> ) --> integer Csparse
## FIXME: when we have 'i*' classes, use them here:
rx <- new(sub("^.", "d", cl))
rx@x <- as.double(r)
## result is "same"
sNams <- slotNames(cl)
for(nm in sNams[sNams != "x"])
slot(rx, nm) <- slot(x, nm)
rx
}
} else { ## no sparseness (or no matrix!):
callGeneric(C2dense(x))
}
}) ## {Math}
###--------- ddenseMatrix
##' Used for dt[rp]Matrix, ds[yp]Matrix (and subclasses, e.g. dpo*(), cor*() !):
##' as dgeMatrix has direct method:
setMethod("Math", "ddenseMatrix", function(x)
{
if(.Generic %in% Math.vecGenerics) # vector result
callGeneric(as(x,"dgeMatrix")@x)
else if(is(x, "symmetricMatrix")) { ## -> result symmetric: keeps class
cl <- .class0(x)
if(cl %in% (scl <- c("dsyMatrix","dspMatrix"))) {
x@x <- callGeneric(x@x)
x
} else { ## *sub*class of dsy/dsp: e.g., dpoMatrix
## -> "[dsy/dsp]Matrix":
x <- as(x, scl[match(scl, names(getClass(cl)@contains), nomatch=0L)])
x@x <- callGeneric(x@x)
x
}
}
else { ## triangularMatrix (no need for testing), includes, e.g. "corMatrix"!
if(is0(f0 <- callGeneric(0.))) { ## -> result remains triangular
cl <- .class0(x)
if(cl %in% (scl <- c("dtrMatrix","dtpMatrix"))) {
x@x <- callGeneric(x@x)
x
} else { ## *sub*class of dtr/dtp: e.g., corMatrix
## -> "[dtr/dtp]Matrix":
x <- as(x, scl[match(scl, names(getClass(cl)@contains), nomatch=0L)])
x@x <- callGeneric(x@x)
x
}
}
else { ## result is general: *could* use f0 for the whole 0-triangle,
## but this is much easier:
callGeneric(as(x,"dgeMatrix"))
}
}
})
###--------- denseMatrix
## FIXME: Once we have integer (idense..), sign(), abs(.) may need different:
setMethod("Math", signature(x = "denseMatrix"),
function(x) callGeneric(as(x, "dMatrix")))
# -> ./ddenseMatrix.R has next method
###--------- dgeMatrix
setMethod("Math", signature(x = "dgeMatrix"),
function(x) {
if(.Generic %in% Math.vecGenerics)
callGeneric(x@x)
else {
x@x <- callGeneric(x@x)
x
}
})
###--------- diagMatrix
## Till 2014-08-04, went via "dtC" (triangular) -- "Math" method in ./Math.R
setMethod("Math", signature(x = "diagonalMatrix"),
function(x) {
if(.Generic %in% Math.vecGenerics) # vector result
callGeneric(.diag2mat(x))
else if(is0(f0 <- callGeneric(0.))) { ## result remains diagonal
cl <- class(x)
if(!extends(cl, "ddiMatrix"))
cl <- class(x <- as(x, "dMatrix"))
##d type <- storage.mode(x@x)
if(x@diag == "U") {
##d if((f1 <- callGeneric(as1(mod=type))) == 1 && type == "double")
if((f1 <- callGeneric(1.)) == 1)
return(x) # [ddi] as f(0) = 0, f(1) = 1
else {
n <- x@Dim[1]
return( Diagonal(n=n, x = rep.int(f1, n)) )
}
}
r <- callGeneric(x@x)
##d if(type == storage.mode(r)) {
x@x <- r
x
##d } else { ## e.g. abs( <lgC> ) --> integer Csparse
##d ## FIXME: when we have 'i*' classes, use them here:
##d rx <- new(sub("^.", "d", cl))
##d rx@x <- as.double(r)
##d ## result is "same"
##d sNams <- slotNames(cl)
##d for(nm in sNams[sNams != "x"])
##d slot(rx, nm) <- slot(x, nm)
##d rx
##d }
} else { ## no sparseness, i.e., no diagonal, but still symmetric:
## FIXME: gain efficiency by reusing f0 for *all* off-diagonal entries!
callGeneric(as(as(as(.diag2sT(x), "dMatrix"), "denseMatrix"), "dspMatrix"))
}
}) ## {Math}
## NB: "Math2" (round, signif) for diagMatrix is perfectly via "dMatrix"
###--------- dMatrix
## Use these as "catch-all" -- more specific methods are for sub-classes (sparse)
setMethod("Math2", signature(x = "dMatrix"),
## Assume that Generic(u, k) |--> u for u in {0,1}
## which is true for round(), signif() ==> all structure maintained
function(x, digits) {
x@x <- callGeneric(x@x, digits = digits)
x
})
## the same, first coercing to "dMatrix":
setMethod("Math2", signature(x = "Matrix"),
function(x, digits) {
x <- as(x, "dMatrix")
x@x <- callGeneric(x@x, digits = digits)
x
})
###--------- sparseMatrix
setMethod("Math", signature(x = "sparseMatrix"),
function(x) callGeneric(as(x, "CsparseMatrix")))
###--------- sparseVector
setMethod("Math", signature(x = "sparseVector"),
function(x) {
if(.Generic %nin% Math.vecGenerics && is0(callGeneric(0.))) {
## sparseness preserved
cld <- getClassDef(cx <- class(x))
kind <- .M.kindC(cld)# "d", "n", "l", "i", "z", ...
has.x <- kind != "n"
if(has.x) {
rx <- callGeneric(x@x)
if(kind == "d") {
x@x <- rx
x
}
else {
new("dsparseVector", x = rx, i = x@i, length = x@length)
}
} else { ## kind == "n"
new("dsparseVector", x = rep.int(callGeneric(1), length(x@i)),
i = x@i, length = x@length)
}
} else { ## dense
callGeneric(sp2vec(x))
}
})
setMethod("Math2", signature(x = "dsparseVector"),
## Assume that Generic(u, k) |--> u for u in {0,1}
## which is true for round(), signif() ==> all structure maintained
function(x, digits) {
x@x <- callGeneric(x@x, digits = digits)
x
})
## the same, first coercing to "dsparseVector":
setMethod("Math2", signature(x = "sparseVector"),
function(x, digits) {
x <- as(x, "dsparseVector")
x@x <- callGeneric(x@x, digits = digits)
x
})
|
## rmatio, a R interface to the C library matio, MAT File I/O Library.
## Copyright (C) 2013-2014 Stefan Widgren
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## rmatio is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##
## Check read and write of files in the matio test datasets
## (http://sourceforge.net/p/matio/matio_test_datasets/ci/master/tree/)
##
## The script to generate the matio test datasets is included below
## (http://sourceforge.net/p/matio/matio_test_datasets/ci/master/tree/matio_test_cases.m)
##
## The following datasets are included in rmatio (inst/extdata)
## - matio_test_cases_compressed_le.mat
## - matio_test_cases_v4_be.mat
## - matio_test_cases_v4_le.mat
## - small_v4_be.mat
## - small_v4_le.mat
## % Generate test datasets for matio library
## %
## % Copyright 2010-2013 Christopher C. Hulbert. All rights reserved.
## %
## % Redistribution and use in source and binary forms, with or without
## % modification, are permitted provided that the following conditions are met:
## %
## % 1. Redistributions of source code must retain the above copyright notice,
## % this list of conditions and the following disclaimer.
## %
## % 2. Redistributions in binary form must reproduce the above copyright
## % notice, this list of conditions and the following disclaimer in the
## % documentation and/or other materials provided with the distribution.
## %
## % THIS SOFTWARE IS PROVIDED BY CHRISTOPHER C. HULBERT ``AS IS'' AND ANY EXPRESS
## % OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
## % OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## % EVENT SHALL CHRISTOPHER C. HULBERT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## % INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## % (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## % LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## % ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## % (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## [c,m,e]=computer;
## if e == 'B'
## e_str = '_be';
## else
## e_str = '_le';
## end
## rand('seed',931316785);
## var1 = reshape(1:20,4,5);
## var2 = reshape(single(1:20),4,5);
## var3 = reshape(int64(1:20),4,5);
## var4 = reshape(uint64(1:20),4,5);
## var5 = reshape(int32(1:20),4,5);
## var6 = reshape(uint32(1:20),4,5);
## var7 = reshape(int16(1:20),4,5);
## var8 = reshape(uint16(1:20),4,5);
## var9 = reshape(int8(1:20),4,5);
## var10 = reshape(uint8(1:20),4,5);
## var11 = reshape(complex(1:20,21:40),4,5);
## var12 = reshape(single(complex(1:20,21:40)),4,5);
## var13 = reshape(int64(complex(1:20,21:40)),4,5);
## var14 = reshape(uint64(complex(1:20,21:40)),4,5);
## var15 = reshape(int32(complex(1:20,21:40)),4,5);
## var16 = reshape(uint32(complex(1:20,21:40)),4,5);
## var17 = reshape(int16(complex(1:20,21:40)),4,5);
## var18 = reshape(uint16(complex(1:20,21:40)),4,5);
## var19 = reshape(int8(complex(1:20,21:40)),4,5);
## var20 = reshape(uint8(complex(1:20,21:40)),4,5);
## var21 = sparse(diag(1:5));
## var22 = sparse(diag(complex(1:5,6:10)));
## var23 = [];
## var24 = ['abcdefghijklmnopqrstuvwxyz';
## 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
## '1234567890!@#$%^&*()-_=+`~';
## '[{]}\|;:''",<.>/? '];
## %% Structure Variables
## var25 = struct();
## var26 = repmat(struct('field1',[],'field2',[]),0,1);
## var27(1).field1 = zeros(0,1);
## var27(1).field2 = repmat(' ',0,1);
## var27(2).field1 = repmat(struct,0,1);
## var27(2).field2 = repmat({zeros(0,0)},0,1);
## var28 = [struct('field1',1,'field2',reshape(2:13,3,4));
## struct('field1',14,'field2',reshape(15:26,3,4))];
## var29 = [struct('field1',single(1),'field2',reshape(single(2:13),3,4));
## struct('field1',single(14),'field2',reshape(single(15:26),3,4))];
## var30 = [struct('field1',int64(1),'field2',reshape(int64(2:13),3,4));
## struct('field1',int64(14),'field2',reshape(int64(15:26),3,4))];
## var31 = [struct('field1',uint64(1),'field2',reshape(uint64(2:13),3,4));
## struct('field1',uint64(14),'field2',reshape(uint64(15:26),3,4))];
## var32 = [struct('field1',int32(1),'field2',reshape(int32(2:13),3,4));
## struct('field1',int32(14),'field2',reshape(int32(15:26),3,4))];
## var33 = [struct('field1',uint32(1),'field2',reshape(uint32(2:13),3,4));
## struct('field1',uint32(14),'field2',reshape(uint32(15:26),3,4))];
## var34 = [struct('field1',int16(1),'field2',reshape(int16(2:13),3,4));
## struct('field1',int16(14),'field2',reshape(int16(15:26),3,4))];
## var35 = [struct('field1',uint16(1),'field2',reshape(uint16(2:13),3,4));
## struct('field1',uint16(14),'field2',reshape(uint16(15:26),3,4))];
## var36 = [struct('field1',int8(1),'field2',reshape(int8(2:13),3,4));
## struct('field1',int8(14),'field2',reshape(int8(15:26),3,4))];
## var37 = [struct('field1',uint8(1),'field2',reshape(uint8(2:13),3,4));
## struct('field1',uint8(14),'field2',reshape(uint8(15:26),3,4))];
## var38 = [struct('field1',1+51*j,'field2',reshape((2:13)+(52:63)*j,3,4));
## struct('field1',14+64*j,'field2',reshape((15:26)+(65:76)*j,3,4))];
## var39 = [struct('field1',single(1+51*j),...
## 'field2',reshape(single((2:13)+(52:63)*j),3,4));
## struct('field1',single(14+64*j),...
## 'field2',reshape(single((15:26)+(65:76)*j),3,4))];
## var40 = [struct('field1',int64(1+51*j),...
## 'field2',reshape(int64((2:13)+(52:63)*j),3,4));
## struct('field1',int64(14+64*j),...
## 'field2',reshape(int64((15:26)+(65:76)*j),3,4))];
## var41 = [struct('field1',uint64(1+51*j),...
## 'field2',reshape(uint64((2:13)+(52:63)*j),3,4));
## struct('field1',uint64(14+64*j),...
## 'field2',reshape(uint64((15:26)+(65:76)*j),3,4))];
## var42 = [struct('field1',int32(1+51*j),...
## 'field2',reshape(int32((2:13)+(52:63)*j),3,4));
## struct('field1',int32(14+64*j),...
## 'field2',reshape(int32((15:26)+(65:76)*j),3,4))];
## var43 = [struct('field1',uint32(1+51*j),...
## 'field2',reshape(uint32((2:13)+(52:63)*j),3,4));
## struct('field1',uint32(14+64*j),...
## 'field2',reshape(uint32((15:26)+(65:76)*j),3,4))];
## var44 = [struct('field1',int16(1+51*j),...
## 'field2',reshape(int16((2:13)+(52:63)*j),3,4));
## struct('field1',int16(14+64*j),...
## 'field2',reshape(int16((15:26)+(65:76)*j),3,4))];
## var45 = [struct('field1',uint16(1+51*j),...
## 'field2',reshape(uint16((2:13)+(52:63)*j),3,4));
## struct('field1',uint16(14+64*j),...
## 'field2',reshape(uint16((15:26)+(65:76)*j),3,4))];
## var46 = [struct('field1',int8(1+51*j),...
## 'field2',reshape(int8((2:13)+(52:63)*j),3,4));
## struct('field1',int8(14+64*j),...
## 'field2',reshape(int8((15:26)+(65:76)*j),3,4))];
## var47 = [struct('field1',uint8(1+51*j),...
## 'field2',reshape(uint8((2:13)+(52:63)*j),3,4));
## struct('field1',uint8(14+64*j),...
## 'field2',reshape(uint8((15:26)+(65:76)*j),3,4))];
## var48 = struct('field1',sparse(triu(reshape(1:20,4,5))),...
## 'field2',sparse(triu(reshape(1:20,4,5))'));
## var49 = struct('field1',sparse(triu(reshape((1:20)+j*(21:40),4,5))),...
## 'field2',sparse(triu(reshape((1:20)+j*(21:40),4,5))'));
## var50 = [struct('field1','abcdefghijklmnopqrstuvwxyz',...;
## 'field2','ABCDEFGHIJKLMNOPQRSTUVWXYZ');
## struct('field1','1234567890!@#$%^&*()-_=+`~',...
## 'field2','[{]}\|;:''",<.>/? ')];
## %% Cell-Array Variables
## var51 = {};
## var52 = {[] single([]) int64([]) uint64([]) int32([]) uint32([]) int16([]) uint16([]) int8([]) uint8([])};
## var53 = {[1 2;3 4] [5 6 7;8 9 10] [11 12 13 14;15 16 17 18];
## [19 20;21 22] [23 24;25 26;27 28] [29 30;31 32;33 34;35 36]};
## var54 = {single([1 2;3 4]) single([5 6 7;8 9 10]) ...
## single([11 12 13 14;15 16 17 18]); single([19 20;21 22]) ...
## single([23 24;25 26;27 28]) single([29 30;31 32;33 34;35 36])};
## var55 = {int64([1 2;3 4]) int64([5 6 7;8 9 10]) ...
## int64([11 12 13 14;15 16 17 18]); int64([19 20;21 22]) ...
## int64([23 24;25 26;27 28]) int64([29 30;31 32;33 34;35 36])};
## var56 = {uint64([1 2;3 4]) uint64([5 6 7;8 9 10]) ...
## uint64([11 12 13 14;15 16 17 18]); uint64([19 20;21 22]) ...
## uint64([23 24;25 26;27 28]) uint64([29 30;31 32;33 34;35 36])};
## var57 = {int32([1 2;3 4]) int32([5 6 7;8 9 10]) ...
## int32([11 12 13 14;15 16 17 18]); int32([19 20;21 22]) ...
## int32([23 24;25 26;27 28]) int32([29 30;31 32;33 34;35 36])};
## var58 = {uint32([1 2;3 4]) uint32([5 6 7;8 9 10]) ...
## uint32([11 12 13 14;15 16 17 18]); uint32([19 20;21 22]) ...
## uint32([23 24;25 26;27 28]) uint32([29 30;31 32;33 34;35 36])};
## var59 = {int16([1 2;3 4]) int16([5 6 7;8 9 10]) ...
## int16([11 12 13 14;15 16 17 18]); int16([19 20;21 22]) ...
## int16([23 24;25 26;27 28]) int16([29 30;31 32;33 34;35 36])};
## var60 = {uint16([1 2;3 4]) uint16([5 6 7;8 9 10]) ...
## uint16([11 12 13 14;15 16 17 18]); uint16([19 20;21 22]) ...
## uint16([23 24;25 26;27 28]) uint16([29 30;31 32;33 34;35 36])};
## var61 = {int8([1 2;3 4]) int8([5 6 7;8 9 10]) ...
## int8([11 12 13 14;15 16 17 18]); int8([19 20;21 22]) ...
## int8([23 24;25 26;27 28]) int8([29 30;31 32;33 34;35 36])};
## var62 = {uint8([1 2;3 4]) uint8([5 6 7;8 9 10]) ...
## uint8([11 12 13 14;15 16 17 18]); uint8([19 20;21 22]) ...
## uint8([23 24;25 26;27 28]) uint8([29 30;31 32;33 34;35 36])};
## var63 = {sparse(triu(reshape(1:20,4,5))) sparse(triu(reshape(1:20,4,5))')};
## var64 = {sparse(triu(reshape((1:20)+j*(21:40),4,5)));
## sparse(triu(reshape((1:20)+j*(21:40),4,5))')};
## var65 = {'abcdefghijklmnopqrstuvwxyz' '1234567890!@#$%^&*()-_=+`~';
## 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '[{]}\|;:''",<.>/? '};
## var66 = {var25 var26 var27};
## var67 = {var28 var29 var30 var31 var32 var33 var34 var35 var36 var37;
## var38 var39 var40 var41 var42 var43 var44 var45 var46 var47};
## var68 = {struct('field1',sparse(triu(reshape(1:20,4,5))),...
## 'field2',sparse(triu(reshape(1:20,4,5))'));
## struct('field1',sparse(triu(reshape((1:20)+j*(21:40),4,5))),...
## 'field2',sparse(triu(reshape((1:20)+j*(21:40),4,5))'))};
## var69 = {struct('field1','abcdefghijklmnopqrstuvwxyz',...;
## 'field2','ABCDEFGHIJKLMNOPQRSTUVWXYZ');
## struct('field1','1234567890!@#$%^&*()-_=+`~',...
## 'field2','[{]}\|;:''",<.>/? ')};
## int16_data = intmin('int16'):intmax('int16');
## uint16_data = intmin('uint16'):intmax('uint16');
## int8_data = int8(-128:127);
## uint8_data = uint8(0:255);
## var70 = reshape(1:32*32*32,32,32,32);
## var71 = reshape(single(1:32*32*32),32,32,32);
## var72 = reshape(int64(1:32*32*32),32,32,32);
## var73 = reshape(uint64(1:32*32*32),32,32,32);
## var74 = reshape(int32(1:32*32*32),32,32,32);
## var75 = reshape(uint32(1:32*32*32),32,32,32);
## var76 = reshape(int16(1:32*32*32),32,32,32);
## var77 = reshape(uint16(1:32*32*32),32,32,32);
## I = round(1+(numel(int8_data)-1)*rand(32,32,32));
## J = round(1+(numel(int8_data)-1)*rand(32,32,32));
## var78 = reshape(int8_data(I),32,32,32);
## I = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## var79 = reshape(uint8_data(I),32,32,32);
## var80 = reshape((1:2:2*32^3) + j*(2:2:2*32^3),32,32,32);
## var81 = reshape(single((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var82 = reshape(int64((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var83 = reshape(uint64((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var84 = reshape(int32((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var85 = reshape(uint32((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## I = round(1+(numel(int16_data)-1)*rand(32,32,32));
## J = round(1+(numel(int16_data)-1)*rand(32,32,32));
## var86 = reshape(complex(int16_data(I),int16_data(J)),32,32,32);
## I = round(1+(numel(uint16_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint16_data)-1)*rand(32,32,32));
## var87 = reshape(complex(uint16_data(I),uint16_data(J)),32,32,32);
## I = round(1+(numel(int8_data)-1)*rand(32,32,32));
## J = round(1+(numel(int8_data)-1)*rand(32,32,32));
## var88 = reshape(complex(int8_data(I),int8_data(J)),32,32,32);
## I = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## var89 = reshape(complex(uint8_data(I),uint8_data(J)),32,32,32);
## var90 = tril(true(5));
## var91 = [struct('field1',logical(mod(reshape(0:19,4,5),2)),...
## 'field2',~mod(reshape(0:19,4,5),2));...
## struct('field1',tril(true(5)),'field2',triu(true(5)))];
## var92 = {logical(mod(reshape(0:19,4,5),2));~mod(reshape(0:19,4,5),2);...
## tril(true(5));triu(true(5))};
## save('-v6',['matio_test_cases_uncompressed' e_str '.mat'],'var*');
## save(['matio_test_cases_compressed' e_str '.mat'],'var*');
## save('-v7.3',['matio_test_cases_hdf' e_str '.mat'],'var*');
## save('-v4',['matio_test_cases_v4' e_str '.mat'],'var1','var11','var21',...
## 'var22','var24');
## x = pi;
## save('-v4',['small_v4' e_str '.mat'],'x');
##
## Load rmatio
##
library(rmatio)
test_mat_v4_file <- function(x) {
## var1 read as double
var1 <- array(seq_len(20), c(4,5))
storage.mode(var1) <- 'double'
stopifnot(identical(x$var1, var1))
## var11 read as complex
var11 <- array(c(1+21i, 2+22i, 3+23i, 4+24i, 5+25i, 6+26i, 7+27i,
8+28i, 9+29i, 10+30i, 11+31i, 12+32i, 13+33i,
14+34i, 15+35i, 16+36i, 17+37i, 18+38i, 19+39i,
20+40i), c(4,5))
stopifnot(identical(x$var11, var11))
## var21 read as a sparse matrix
var21 <- as(diag(1:5), 'dgCMatrix')
stopifnot(identical(x$var21, var21))
## var22 read as a complex matrix
var22 <- structure(c(1+6i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 2+7i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 3+8i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 4+9i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 5+10i),
.Dim = c(5L, 5L))
stopifnot(identical(x$var22, var22))
## var24 read as character vector
stopifnot(identical(x$var24, c("abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"1234567890!@#$%^&*()-_=+`~",
"[{]}\\|;:'\",<.>/? ")))
}
test_mat_v5_file <- function(x) {
## var1, ..., var4 read as double
var1 <- array(seq_len(20), c(4,5))
storage.mode(var1) <- 'double'
stopifnot(identical(x$var1, var1))
stopifnot(identical(x$var2, var1))
stopifnot(identical(x$var3, var1))
stopifnot(identical(x$var4, var1))
## var5 read as integer
var5 <- array(seq_len(20), c(4,5))
storage.mode(var5) <- 'integer'
stopifnot(identical(x$var5, var5))
## var6 read as double
stopifnot(identical(x$var6, var1))
## var7, ..., var10 read as integer
stopifnot(identical(x$var7, var5))
stopifnot(identical(x$var8, var5))
stopifnot(identical(x$var9, var5))
stopifnot(identical(x$var10, var5))
## var11, ..., var20 read as complex
var11 <- array(c(1+21i, 2+22i, 3+23i, 4+24i, 5+25i, 6+26i, 7+27i,
8+28i, 9+29i, 10+30i, 11+31i, 12+32i, 13+33i,
14+34i, 15+35i, 16+36i, 17+37i, 18+38i, 19+39i,
20+40i), c(4,5))
stopifnot(identical(x$var11, var11))
stopifnot(identical(x$var12, var11))
stopifnot(identical(x$var13, var11))
stopifnot(identical(x$var14, var11))
stopifnot(identical(x$var15, var11))
stopifnot(identical(x$var16, var11))
stopifnot(identical(x$var17, var11))
stopifnot(identical(x$var18, var11))
stopifnot(identical(x$var19, var11))
stopifnot(identical(x$var20, var11))
## var21 read as a sparse matrix
var21 <- as(diag(1:5), 'dgCMatrix')
stopifnot(identical(x$var21, var21))
## var22 read as a dense complex matrix
var22 <- array(c(1+6i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 2+7i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 3+8i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 4+9i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 5+10i), c(5L, 5L))
stopifnot(identical(x$var22, var22))
## var23 read as double
stopifnot(identical(x$var23, numeric(0)))
## var24 read as character vector
stopifnot(identical(x$var24, c("abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"1234567890!@#$%^&*()-_=+`~",
"[{]}\\|;:'\",<.>/? ")))
## Structure Variables
## var25 read as an empty named list
var25 <- structure(list(), .Names = character(0))
stopifnot(identical(x$var25, var25))
var26 <- list(field1=list(), field2=list())
stopifnot(identical(x$var26, var26))
var27 <- list(field1=numeric(0), field2=character(0))
stopifnot(identical(x$var27, var27))
## var28, ..., var31 read as double
var28 <- list(field1=list(1, 14),
field2=list(array(as.numeric(2:13), c(3,4)),
array(as.numeric(15:26), c(3,4))))
stopifnot(identical(x$var28, var28))
stopifnot(identical(x$var29, var28))
stopifnot(identical(x$var30, var28))
stopifnot(identical(x$var31, var28))
## var32 read as integer
var32 <- list(field1=list(1L, 14L),
field2=list(array(2:13, c(3,4)),
array(15:26, c(3,4))))
stopifnot(identical(x$var32, var32))
## var33 read as double
stopifnot(identical(x$var33, var28))
## var34, ..., var37 read as integer
stopifnot(identical(x$var34, var32))
stopifnot(identical(x$var35, var32))
stopifnot(identical(x$var36, var32))
stopifnot(identical(x$var37, var32))
## var38, ..., var47 read as complex
var38 <- list(field1=list(1+51i, 14+64i),
field2=list(array(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), c(3,4)), array(c(15+65i, 16+66i, 17+67i,
18+68i, 19+69i, 20+70i, 21+71i, 22+72i, 23+73i,
24+74i, 25+75i, 26+76i), c(3,4))))
stopifnot(identical(x$var38, var38))
stopifnot(identical(x$var39, var38))
stopifnot(identical(x$var40, var38))
stopifnot(identical(x$var41, var38))
stopifnot(identical(x$var42, var38))
stopifnot(identical(x$var43, var38))
stopifnot(identical(x$var44, var38))
stopifnot(identical(x$var45, var38))
stopifnot(identical(x$var46, var38))
stopifnot(identical(x$var47, var38))
var48 <- list(field1=list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE))),
field2=list(tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE))))
stopifnot(identical(x$var48, var48))
var49 <- list(field1=list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i,
6+26i, 0+0i, 0+0i, 9+29i, 10+30i, 11+31i, 0+0i,
13+33i, 14+34i, 15+35i, 16+36i, 17+37i, 18+38i,
19+39i, 20+40i), c(4,5))),
field2=list(array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i,
0+0i, 6-26i, 10-30i, 14-34i, 18-38i, 0+0i, 0+0i,
11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4))))
stopifnot(identical(x$var49, var49))
var50 <- list(field1 = c("abcdefghijklmnopqrstuvwxyz",
"1234567890!@#$%^&*()-_=+`~"),
field2 = c("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var50, var50))
## Cell-Array Variables
var51 <- list()
stopifnot(identical(x$var51, var51))
var52 <- list(numeric(0), numeric(0), integer(0), integer(0), integer(0),
integer(0), integer(0), integer(0), integer(0), integer(0))
stopifnot(identical(x$var52, var52))
## var53, ..., var56 read as double
var53 <- list(list(array(c(1, 3, 2, 4), c(2, 2)),
array(c(5, 8, 6, 9, 7, 10), c(2,3)),
array(c(11, 15, 12, 16, 13, 17, 14, 18), c(2, 4))),
list(array(c(19, 21, 20, 22), c(2, 2)),
array(c(23, 25, 27, 24, 26, 28), c(3L, 2L)),
array(c(29, 31, 33, 35, 30, 32, 34, 36), c(4, 2))))
stopifnot(identical(x$var53, var53))
stopifnot(identical(x$var54, var53))
stopifnot(identical(x$var55, var53))
stopifnot(identical(x$var56, var53))
## var57 read as integer
var57 <- list(list(array(c(1L, 3L, 2L, 4L), c(2, 2)),
array(c(5L, 8L, 6L, 9L, 7L, 10L), c(2,3)),
array(c(11L, 15L, 12L, 16L, 13L, 17L, 14L, 18L), c(2, 4))),
list(array(c(19L, 21L, 20L, 22L), c(2, 2)),
array(c(23L, 25L, 27L, 24L, 26L, 28L), c(3L, 2L)),
array(c(29L, 31L, 33L, 35L, 30L, 32L, 34L, 36L), c(4, 2))))
stopifnot(identical(x$var57, var57))
## var58 read as double
stopifnot(identical(x$var58, var53))
## var59, ..., var62 read as integer
stopifnot(identical(x$var59, var57))
stopifnot(identical(x$var60, var57))
stopifnot(identical(x$var61, var57))
stopifnot(identical(x$var62, var57))
var63 <- list(list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE)),
tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE))))
stopifnot(identical(x$var63, var63))
var64 <- list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i,
6+26i, 0+0i, 0+0i, 9+29i, 10+30i, 11+31i, 0+0i,
13+33i, 14+34i, 15+35i, 16+36i, 17+37i, 18+38i,
19+39i, 20+40i), c(4,5)),
array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i,
0+0i, 6-26i, 10-30i, 14-34i, 18-38i, 0+0i, 0+0i,
11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4)))
stopifnot(identical(x$var64, var64))
var65 <- list(list("abcdefghijklmnopqrstuvwxyz",
"1234567890!@#$%^&*()-_=+`~"),
list("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var65, var65))
var66 <- list(structure(list(),
.Names = character(0)),
list(field1=list(), field2=list()),
structure(list(field1 = numeric(0),
field2 = character(0)),
.Names = c("field1", "field2")))
stopifnot(identical(x$var66, var66))
var67 <- list(list(structure(list(field1 = list(1, 14), field2 =
list( structure(c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13), .Dim = 3:4), structure(c(15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26 ), .Dim = 3:4))),
.Names = c("field1", "field2")), structure(list(
field1 = list(1, 14), field2 = list(structure(c(2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), .Dim = 3:4),
structure(c(15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26), .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1, 14),
field2 = list( structure(c(2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13), .Dim = 3:4), structure(c(15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26 ), .Dim =
3:4))), .Names = c("field1", "field2")),
structure(list( field1 = list(1, 14), field2 =
list(structure(c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13), .Dim = 3:4), structure(c(15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26), .Dim = 3:4))), .Names =
c("field1", "field2")), structure(list(field1 =
list(1L, 14L), field2 = list( structure(2:13, .Dim =
3:4), structure(15:26, .Dim = 3:4))), .Names =
c("field1", "field2")), structure(list(field1 =
list(1, 14), field2 = list( structure(c(2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13), .Dim = 3:4),
structure(c(15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26 ), .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list( field1 = list(1L, 14L),
field2 = list(structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2"))), list(structure(list(field1 =
list(1+51i, 14+64i), field2 =
list(structure(c(2+52i, 3+53i, 4+54i, 5+55i, 6+56i,
7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2"))))
stopifnot(identical(x$var67, var67))
var68 <- list(list(field1=list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE))),
field2=list(tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE)))),
list(field1=list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i, 6+26i, 0+0i, 0+0i, 9+29i,
10+30i, 11+31i, 0+0i, 13+33i, 14+34i, 15+35i, 16+36i,
17+37i, 18+38i, 19+39i, 20+40i), c(4,5))),
field2=list(array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i, 0+0i, 6-26i, 10-30i,
14-34i, 18-38i, 0+0i, 0+0i, 11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4)))))
stopifnot(identical(x$var68, var68))
var69 <- list(list(field1 = "abcdefghijklmnopqrstuvwxyz",
field2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
list(field1 = "1234567890!@#$%^&*()-_=+`~",
field2 = "[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var69, var69))
## var70, ..., var73 read as double
var70 = array(seq_len(32^3), c(32,32,32));
storage.mode(var70) <- 'double'
stopifnot(identical(x$var70, var70))
stopifnot(identical(x$var71, var70))
stopifnot(identical(x$var72, var70))
stopifnot(identical(x$var73, var70))
## var74 read as integer
var74 = array(seq_len(32^3), c(32,32,32));
storage.mode(var74) <- 'integer'
stopifnot(identical(x$var74, var74))
## var75 read as double
stopifnot(identical(x$var75, var70))
## var76 read as integer
## var76 = reshape(int16(1:32*32*32),32,32,32);
var76 = array(c(seq_len(32767), 32767), c(32,32,32));
storage.mode(var76) <- 'integer'
stopifnot(identical(x$var76, var76))
## var77 read as integer
## var77 = reshape(uint16(1:32*32*32),32,32,32);
stopifnot(identical(x$var77, var74))
## var78 read as integer
var78_slab <- array(c(1L, 37L, -55L, -70L, -48L, -84L, -96L, -93L,
-91L, -24L, -123L, -92L, 39L, 109L, -69L, 68L, 76L, -42L, -4L,
36L, 45L, -89L, -60L, -19L, 99L, 85L, 76L, 109L, 96L, -60L, 24L,
112L, 74L, -52L, -57L, 2L, 106L, -34L, -77L, 92L, 30L, -126L,
-55L, 3L, 95L, -75L, -77L, -83L, -112L, 69L, -57L, -97L, 36L, 37L,
90L, -76L, 117L, 12L, -108L, 51L, -35L, -105L, -1L, 99L, -103L,
55L, -77L, 27L, 59L, -38L, -96L, 32L, -52L, -32L, 123L, 63L, 122L,
59L, -19L, 84L, -99L, -27L, 121L, 25L, -22L, 8L, -70L, 9L, 40L,
27L, 90L, 122L, 124L, 7L, -2L, -84L, -45L, -79L, -120L, 39L, 88L,
-55L, -95L, -27L, -5L, -11L, 48L, 125L, 94L, 36L, 115L, -121L,
99L, 47L, -108L, -99L, -9L, -70L, 67L, -89L, 107L, -80L, 113L,
27L, 45L, 94L, 124L, 7L, 54L, 76L, -54L, -48L),
c(11L, 6L, 2L))
stopifnot(identical(x$var78[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var78_slab))
## var79 read as integer
var79_slab <- structure(c(36L, 154L, 198L, 69L, 227L, 194L, 140L,
19L, 11L, 17L, 151L, 74L, 65L, 249L,
151L, 109L, 131L, 194L, 237L, 190L,
218L, 35L, 142L, 205L, 42L, 43L, 2L,
15L, 177L, 240L, 164L, 112L, 138L, 112L,
97L, 154L, 26L, 191L, 53L, 214L, 216L,
229L, 187L, 192L, 148L, 230L, 21L, 210L,
13L, 28L, 216L, 1L, 21L, 20L, 179L,
136L, 111L, 22L, 103L, 188L, 221L, 4L,
139L, 238L, 175L, 207L, 193L, 24L, 236L,
141L, 144L, 226L, 48L, 36L, 207L, 199L,
30L, 249L, 124L, 83L, 90L, 102L, 213L,
227L, 255L, 152L, 212L, 62L, 252L, 18L,
133L, 214L, 101L, 22L, 176L, 101L, 233L,
106L, 39L, 237L, 241L, 212L, 19L, 102L,
212L, 123L, 57L, 225L, 27L, 178L, 98L,
88L, 40L, 115L, 185L, 226L, 23L, 22L,
160L, 111L, 100L, 44L, 145L, 144L, 223L,
123L, 48L, 190L, 70L, 115L, 113L, 7L),
.Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var79[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var79_slab))
## var80, ..., var85 read as complex
var80 = array(complex(real=seq(1, 2*32^3, 2), imaginary=seq(2, 2*32^3, 2)), c(32,32,32))
stopifnot(identical(x$var80, var80))
stopifnot(identical(x$var81, var80))
stopifnot(identical(x$var82, var80))
stopifnot(identical(x$var83, var80))
stopifnot(identical(x$var84, var80))
stopifnot(identical(x$var85, var80))
## var86 read as complex
var86_slab <- structure(c(31419+12074i, 11550+31935i,
14036-25970i, 12950-26466i,
11549-13221i, 8802-13913i, -8540+20686i,
82-2536i, 6556+25498i, -22761-19987i,
-14602-32033i, -16493-9400i,
15688+20383i, -10204-15596i,
14390-20232i, -21008+3161i,
22712-11293i, 16110-30851i, 6298-2954i,
-10732-25971i, -15133+15777i,
-23705+32679i, 2771-10261i,
-25754-31533i, 27007-3499i, -7948-4034i,
1047-17085i, 28781-6213i, 22132+27840i,
-2300-1878i, 14919-19351i,
-32577+15858i, 28973-16332i,
-13965+16888i, -1934-19892i,
24973-17196i, 29557-23198i,
-26196-27443i, 17138+17163i,
4173+22551i, 21456-372i, -12554-766i,
-11899+26000i, -13223-7784i,
-25303+5509i, 16352+363i, 9743-9728i,
6783-16717i, 27364-31769i, 845-5483i,
-5927+30238i, 9080+5283i, 25263-12552i,
-6594+9643i, -21471-8832i, 13196+15195i,
-12123-23263i, -10874+25982i,
-11012-7600i, -10870-24078i,
-4181+21428i, 19441-25234i,
27199-11909i, -4204+14748i,
-12638+6522i, -21381+3874i, 10009-1227i,
23342+16514i, 27792-11483i,
-16194-25596i, 9911-20183i,
-28341+22322i, 3508-7880i, 20369-482i,
-4578+21351i, 18426+31578i,
-26016+17152i, 23621+10035i,
-26955-8794i, 3884+18834i, 25669+17940i,
19480-25157i, -24243-10527i,
4464+28510i, -6616-24233i, -14599-1762i,
-19424+6424i, -29786+14220i,
-22333-20060i, -28080-30034i,
-24965+30996i, 11506-19238i,
-5438+17227i, 6325+19908i, 16761+20643i,
18192-20736i, -18629+17622i,
2759-21496i, 15764+18250i, -28782+3888i,
-26055-4279i, 17075-5598i, 15629+19686i,
-27961-5378i, 22850-7463i, -17585+883i,
-1421-6768i, 28956-7833i, 19321-31347i,
22739-27552i, -5804-12847i,
-19613+7820i, -24500+23384i,
6332-32352i, 9084+8866i, 12787+16795i,
27522+650i, 7219-25890i, -16811-5838i,
22569+6221i, 21757-28407i, -2817+10293i,
-666-21153i, 6202+23652i, -27772+7728i,
24485+27017i, 7898+4210i, 8062+28365i,
-28910-21963i, 7298-10445i,
29980+25776i, 5019-2720i), .Dim = c(11L,
6L, 2L))
stopifnot(identical(x$var86[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var86_slab))
## var87 read as complex
var87_slab <- structure(c(63490+42712i, 29714+8391i, 61483+15861i,
15850+33874i, 55082+3631i, 45390+5685i,
10423+60745i, 44728+38383i,
43170+36676i, 46711+20162i,
49188+52357i, 48615+54935i,
11083+51765i, 2973+2285i, 47407+28409i,
33701+18840i, 34873+47323i, 13711+7195i,
6046+45393i, 44211+53418i, 16574+31089i,
41210+28626i, 14063+54325i,
15062+33495i, 51022+34454i,
60470+39080i, 44448+44287i,
64996+30009i, 283+64783i, 18983+191i,
3436+45327i, 44524+18230i, 15729+21345i,
12093+4405i, 8834+7721i, 54420+31822i,
26675+29151i, 42660+29044i,
65323+50276i, 7139+32010i, 47157+29016i,
35840+18163i, 15379+14923i,
30214+63617i, 47337+59129i,
62332+27780i, 3623+2694i, 11530+27741i,
4141+36801i, 64250+50570i, 39722+49989i,
26118+40150i, 28214+21931i,
25798+27469i, 62573+58367i, 25576+6335i,
12201+17545i, 49659+37636i,
28187+58668i, 3135+5269i, 16458+14517i,
31662+28316i, 38430+36198i,
47450+50404i, 201+49160i, 50221+54517i,
47469+42818i, 7124+35280i, 34741+37493i,
44064+39694i, 6995+4154i, 7531+33600i,
54978+37775i, 15416+23755i,
27259+51376i, 64300+6331i, 1212+37261i,
8920+61180i, 29466+54541i, 65042+23250i,
1978+56271i, 14377+47601i, 12353+11382i,
8336+45819i, 45530+55892i, 8908+40510i,
11258+60909i, 56638+57033i,
45303+55200i, 4881+31280i, 38207+48851i,
807+7069i, 50473+21558i, 6544+29624i,
52787+2034i, 5986+58345i, 57054+22602i,
2935+55316i, 42851+16897i, 10001+43967i,
48614+38397i, 14572+48016i,
25265+24936i, 1715+40096i, 37345+13265i,
11025+46066i, 37554+31865i, 4791+53489i,
22789+15282i, 23465+10050i,
58589+45954i, 9073+25270i, 20846+33025i,
22905+59697i, 22981+42347i,
24828+62213i, 46887+36422i,
50013+60501i, 29781+52236i,
37210+42165i, 37785+60771i,
16219+17867i, 59934+13526i,
58464+17576i, 34801+59568i,
20649+30475i, 55999+42307i, 4464+16054i,
23302+47556i, 24764+55871i,
22331+53630i, 63541+34455i), .Dim =
c(11L, 6L, 2L))
stopifnot(identical(x$var87[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var87_slab))
## var88 read as complex
var88_slab <- structure(c(24-109i, -38+98i, -32+69i, 1-58i,
106+37i, 23-118i, 86-117i, 3-10i,
-94+111i, 43-69i, 37+24i, 5+103i,
-71-109i, -9+76i, 52-40i, -48+1i,
-1-71i, 43+106i, 121-39i, 68+45i, 9+7i,
49-122i, 79-98i, -125-9i, -81-45i,
-30+6i, 109-65i, -111-88i, -21-57i,
-59-22i, 64-47i, 38+36i, 32+84i,
-11-41i, -4-5i, 111-12i, -79+23i,
118-68i, 60-49i, -31-47i, 98-49i,
19-84i, 114-19i, 126+112i, 3-95i,
-69-113i, -24-55i, -40-78i, 81-95i,
108-15i, -37+5i, 24-97i, 65+17i,
-79-19i, -54-36i, -99-15i, -92-70i,
115+62i, -29-52i, -121+61i, -62+101i,
-72+52i, -110-111i, 126+124i, 85-44i,
-32+42i, 10+105i, -88-104i, 82+96i,
98-85i, -66-27i, 83-12i, -18-32i,
58-92i, -59-89i, -45+16i, -36-68i,
-120-94i, 69+16i, 5+114i, 80+42i, -7+4i,
74+84i, 83+76i, -74+93i, -124+7i,
-61+54i, 3-83i, -79-87i, -50+8i,
-13-48i, 51-21i, -95+3i, -115-5i,
117+120i, 66+29i, 64+10i, 54-101i,
51+74i, 55-21i, -78+49i, -61+22i,
74-73i, -26+71i, 81+0i, -45-123i,
-8+58i, 63-124i, -83-112i, 97+123i,
19+106i, 18+47i, -39-73i, -47-60i,
-102-120i, -17-102i, 18+66i, -12-59i,
-41+34i, 46+51i, -61-40i, 47-65i,
-103-35i, 127+30i, 28+16i, -72-13i,
-76-67i, -46+33i, 40+109i, -117+25i,
103+12i, 89-9i), .Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var88[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var88_slab))
## var89 read as complex
var89_slab <- structure(c(233+19i, 99+152i, 194+176i, 219+39i,
182+250i, 198+121i, 70+99i, 250+205i,
154+19i, 20+127i, 55+248i, 98+36i,
129+239i, 246+207i, 42+97i, 209+14i,
114+150i, 245+204i, 62+143i, 192+87i,
88+125i, 254+59i, 145+191i, 23+137i,
229+96i, 17+104i, 158+68i, 85+154i,
207+145i, 112+116i, 135+182i, 138+79i,
118+171i, 87+191i, 187+27i, 224+67i,
96+230i, 66+32i, 196+72i, 239+53i,
15+197i, 111+188i, 225+144i, 190+181i,
30+24i, 149+230i, 56+158i, 114+96i,
242+79i, 82+206i, 106+114i, 8+202i,
87+217i, 138+126i, 26+138i, 142+157i,
186+204i, 139+252i, 130+202i, 197+221i,
169+34i, 160+148i, 131+129i, 255+157i,
93+4i, 140+149i, 182+191i, 45+138i,
41+32i, 74+142i, 173+106i, 14+5i,
56+15i, 159+205i, 218+172i, 235+235i,
253+46i, 212+183i, 121+33i, 193+87i,
95+150i, 75+167i, 139+111i, 241+161i,
62+17i, 165+198i, 249+244i, 207+210i,
51+193i, 81+133i, 42+147i, 219+48i,
185+124i, 65+8i, 120+240i, 24+19i,
104+3i, 23+34i, 88+216i, 46+45i,
90+128i, 171+187i, 105+53i, 23+168i,
111+218i, 91+101i, 90+72i, 178+168i,
172+79i, 176+42i, 16+249i, 137+65i,
149+145i, 87+154i, 111+32i, 179+46i,
164+95i, 121+106i, 125+144i, 215+171i,
48+141i, 110+132i, 234+197i, 22+223i,
229+72i, 169+26i, 128+186i, 87+139i,
152+77i, 17+58i, 215+242i, 62+229i ),
.Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var89[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var89_slab))
var90 <- array(c(TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE), c(5L, 5L))
stopifnot(identical(x$var90, var90))
var91 <- structure(list(field1 = list(structure(c(FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE), .Dim = 4:5), structure(c(TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE), .Dim = c(5L, 5L))), field2 =
list( structure(c(TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE), .Dim
= 4:5), structure(c(TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE),
.Dim = c(5L, 5L)))), .Names =
c("field1", "field2"))
stopifnot(identical(x$var91, var91))
var92 <- list(structure(c(FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE),
.Dim = 4:5),
structure(c(TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE),
.Dim = 4:5),
structure(c(TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, TRUE),
.Dim = c(5L, 5L)),
structure(c(TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE),
.Dim = c(5L, 5L)))
stopifnot(identical(x$var92, var92))
}
##
## small_v4_le.mat
##
infile <- system.file('extdata/small_v4_le.mat', package='rmatio')
x.in <- read.mat(infile)
stopifnot(identical(x.in$x, pi))
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
}
##
## small_v4_be.mat
##
infile <- system.file('extdata/small_v4_be.mat', package='rmatio')
x.in <- read.mat(infile)
stopifnot(identical(x.in$x, pi))
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
}
##
## matio_test_cases_v4_le.mat
##
infile <- system.file('extdata/matio_test_cases_v4_le.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v4_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
}
##
## matio_test_cases_v4_be.mat
##
infile <- system.file('extdata/matio_test_cases_v4_be.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v4_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
}
##
## matio_test_cases_compressed_le.mat
##
infile <- system.file('extdata/matio_test_cases_compressed_le.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v5_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v5_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v5_file(x.out)
}
|
/rmatio/tests/matio_test_datasets.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 53,937 |
r
|
## rmatio, a R interface to the C library matio, MAT File I/O Library.
## Copyright (C) 2013-2014 Stefan Widgren
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## rmatio is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
##
## Check read and write of files in the matio test datasets
## (http://sourceforge.net/p/matio/matio_test_datasets/ci/master/tree/)
##
## The script to generate the matio test datasets is included below
## (http://sourceforge.net/p/matio/matio_test_datasets/ci/master/tree/matio_test_cases.m)
##
## The following datasets are included in rmatio (inst/extdata)
## - matio_test_cases_compressed_le.mat
## - matio_test_cases_v4_be.mat
## - matio_test_cases_v4_le.mat
## - small_v4_be.mat
## - small_v4_le.mat
## % Generate test datasets for matio library
## %
## % Copyright 2010-2013 Christopher C. Hulbert. All rights reserved.
## %
## % Redistribution and use in source and binary forms, with or without
## % modification, are permitted provided that the following conditions are met:
## %
## % 1. Redistributions of source code must retain the above copyright notice,
## % this list of conditions and the following disclaimer.
## %
## % 2. Redistributions in binary form must reproduce the above copyright
## % notice, this list of conditions and the following disclaimer in the
## % documentation and/or other materials provided with the distribution.
## %
## % THIS SOFTWARE IS PROVIDED BY CHRISTOPHER C. HULBERT ``AS IS'' AND ANY EXPRESS
## % OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
## % OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
## % EVENT SHALL CHRISTOPHER C. HULBERT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
## % INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
## % (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
## % LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
## % ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## % (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## % SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## [c,m,e]=computer;
## if e == 'B'
## e_str = '_be';
## else
## e_str = '_le';
## end
## rand('seed',931316785);
## var1 = reshape(1:20,4,5);
## var2 = reshape(single(1:20),4,5);
## var3 = reshape(int64(1:20),4,5);
## var4 = reshape(uint64(1:20),4,5);
## var5 = reshape(int32(1:20),4,5);
## var6 = reshape(uint32(1:20),4,5);
## var7 = reshape(int16(1:20),4,5);
## var8 = reshape(uint16(1:20),4,5);
## var9 = reshape(int8(1:20),4,5);
## var10 = reshape(uint8(1:20),4,5);
## var11 = reshape(complex(1:20,21:40),4,5);
## var12 = reshape(single(complex(1:20,21:40)),4,5);
## var13 = reshape(int64(complex(1:20,21:40)),4,5);
## var14 = reshape(uint64(complex(1:20,21:40)),4,5);
## var15 = reshape(int32(complex(1:20,21:40)),4,5);
## var16 = reshape(uint32(complex(1:20,21:40)),4,5);
## var17 = reshape(int16(complex(1:20,21:40)),4,5);
## var18 = reshape(uint16(complex(1:20,21:40)),4,5);
## var19 = reshape(int8(complex(1:20,21:40)),4,5);
## var20 = reshape(uint8(complex(1:20,21:40)),4,5);
## var21 = sparse(diag(1:5));
## var22 = sparse(diag(complex(1:5,6:10)));
## var23 = [];
## var24 = ['abcdefghijklmnopqrstuvwxyz';
## 'ABCDEFGHIJKLMNOPQRSTUVWXYZ';
## '1234567890!@#$%^&*()-_=+`~';
## '[{]}\|;:''",<.>/? '];
## %% Structure Variables
## var25 = struct();
## var26 = repmat(struct('field1',[],'field2',[]),0,1);
## var27(1).field1 = zeros(0,1);
## var27(1).field2 = repmat(' ',0,1);
## var27(2).field1 = repmat(struct,0,1);
## var27(2).field2 = repmat({zeros(0,0)},0,1);
## var28 = [struct('field1',1,'field2',reshape(2:13,3,4));
## struct('field1',14,'field2',reshape(15:26,3,4))];
## var29 = [struct('field1',single(1),'field2',reshape(single(2:13),3,4));
## struct('field1',single(14),'field2',reshape(single(15:26),3,4))];
## var30 = [struct('field1',int64(1),'field2',reshape(int64(2:13),3,4));
## struct('field1',int64(14),'field2',reshape(int64(15:26),3,4))];
## var31 = [struct('field1',uint64(1),'field2',reshape(uint64(2:13),3,4));
## struct('field1',uint64(14),'field2',reshape(uint64(15:26),3,4))];
## var32 = [struct('field1',int32(1),'field2',reshape(int32(2:13),3,4));
## struct('field1',int32(14),'field2',reshape(int32(15:26),3,4))];
## var33 = [struct('field1',uint32(1),'field2',reshape(uint32(2:13),3,4));
## struct('field1',uint32(14),'field2',reshape(uint32(15:26),3,4))];
## var34 = [struct('field1',int16(1),'field2',reshape(int16(2:13),3,4));
## struct('field1',int16(14),'field2',reshape(int16(15:26),3,4))];
## var35 = [struct('field1',uint16(1),'field2',reshape(uint16(2:13),3,4));
## struct('field1',uint16(14),'field2',reshape(uint16(15:26),3,4))];
## var36 = [struct('field1',int8(1),'field2',reshape(int8(2:13),3,4));
## struct('field1',int8(14),'field2',reshape(int8(15:26),3,4))];
## var37 = [struct('field1',uint8(1),'field2',reshape(uint8(2:13),3,4));
## struct('field1',uint8(14),'field2',reshape(uint8(15:26),3,4))];
## var38 = [struct('field1',1+51*j,'field2',reshape((2:13)+(52:63)*j,3,4));
## struct('field1',14+64*j,'field2',reshape((15:26)+(65:76)*j,3,4))];
## var39 = [struct('field1',single(1+51*j),...
## 'field2',reshape(single((2:13)+(52:63)*j),3,4));
## struct('field1',single(14+64*j),...
## 'field2',reshape(single((15:26)+(65:76)*j),3,4))];
## var40 = [struct('field1',int64(1+51*j),...
## 'field2',reshape(int64((2:13)+(52:63)*j),3,4));
## struct('field1',int64(14+64*j),...
## 'field2',reshape(int64((15:26)+(65:76)*j),3,4))];
## var41 = [struct('field1',uint64(1+51*j),...
## 'field2',reshape(uint64((2:13)+(52:63)*j),3,4));
## struct('field1',uint64(14+64*j),...
## 'field2',reshape(uint64((15:26)+(65:76)*j),3,4))];
## var42 = [struct('field1',int32(1+51*j),...
## 'field2',reshape(int32((2:13)+(52:63)*j),3,4));
## struct('field1',int32(14+64*j),...
## 'field2',reshape(int32((15:26)+(65:76)*j),3,4))];
## var43 = [struct('field1',uint32(1+51*j),...
## 'field2',reshape(uint32((2:13)+(52:63)*j),3,4));
## struct('field1',uint32(14+64*j),...
## 'field2',reshape(uint32((15:26)+(65:76)*j),3,4))];
## var44 = [struct('field1',int16(1+51*j),...
## 'field2',reshape(int16((2:13)+(52:63)*j),3,4));
## struct('field1',int16(14+64*j),...
## 'field2',reshape(int16((15:26)+(65:76)*j),3,4))];
## var45 = [struct('field1',uint16(1+51*j),...
## 'field2',reshape(uint16((2:13)+(52:63)*j),3,4));
## struct('field1',uint16(14+64*j),...
## 'field2',reshape(uint16((15:26)+(65:76)*j),3,4))];
## var46 = [struct('field1',int8(1+51*j),...
## 'field2',reshape(int8((2:13)+(52:63)*j),3,4));
## struct('field1',int8(14+64*j),...
## 'field2',reshape(int8((15:26)+(65:76)*j),3,4))];
## var47 = [struct('field1',uint8(1+51*j),...
## 'field2',reshape(uint8((2:13)+(52:63)*j),3,4));
## struct('field1',uint8(14+64*j),...
## 'field2',reshape(uint8((15:26)+(65:76)*j),3,4))];
## var48 = struct('field1',sparse(triu(reshape(1:20,4,5))),...
## 'field2',sparse(triu(reshape(1:20,4,5))'));
## var49 = struct('field1',sparse(triu(reshape((1:20)+j*(21:40),4,5))),...
## 'field2',sparse(triu(reshape((1:20)+j*(21:40),4,5))'));
## var50 = [struct('field1','abcdefghijklmnopqrstuvwxyz',...;
## 'field2','ABCDEFGHIJKLMNOPQRSTUVWXYZ');
## struct('field1','1234567890!@#$%^&*()-_=+`~',...
## 'field2','[{]}\|;:''",<.>/? ')];
## %% Cell-Array Variables
## var51 = {};
## var52 = {[] single([]) int64([]) uint64([]) int32([]) uint32([]) int16([]) uint16([]) int8([]) uint8([])};
## var53 = {[1 2;3 4] [5 6 7;8 9 10] [11 12 13 14;15 16 17 18];
## [19 20;21 22] [23 24;25 26;27 28] [29 30;31 32;33 34;35 36]};
## var54 = {single([1 2;3 4]) single([5 6 7;8 9 10]) ...
## single([11 12 13 14;15 16 17 18]); single([19 20;21 22]) ...
## single([23 24;25 26;27 28]) single([29 30;31 32;33 34;35 36])};
## var55 = {int64([1 2;3 4]) int64([5 6 7;8 9 10]) ...
## int64([11 12 13 14;15 16 17 18]); int64([19 20;21 22]) ...
## int64([23 24;25 26;27 28]) int64([29 30;31 32;33 34;35 36])};
## var56 = {uint64([1 2;3 4]) uint64([5 6 7;8 9 10]) ...
## uint64([11 12 13 14;15 16 17 18]); uint64([19 20;21 22]) ...
## uint64([23 24;25 26;27 28]) uint64([29 30;31 32;33 34;35 36])};
## var57 = {int32([1 2;3 4]) int32([5 6 7;8 9 10]) ...
## int32([11 12 13 14;15 16 17 18]); int32([19 20;21 22]) ...
## int32([23 24;25 26;27 28]) int32([29 30;31 32;33 34;35 36])};
## var58 = {uint32([1 2;3 4]) uint32([5 6 7;8 9 10]) ...
## uint32([11 12 13 14;15 16 17 18]); uint32([19 20;21 22]) ...
## uint32([23 24;25 26;27 28]) uint32([29 30;31 32;33 34;35 36])};
## var59 = {int16([1 2;3 4]) int16([5 6 7;8 9 10]) ...
## int16([11 12 13 14;15 16 17 18]); int16([19 20;21 22]) ...
## int16([23 24;25 26;27 28]) int16([29 30;31 32;33 34;35 36])};
## var60 = {uint16([1 2;3 4]) uint16([5 6 7;8 9 10]) ...
## uint16([11 12 13 14;15 16 17 18]); uint16([19 20;21 22]) ...
## uint16([23 24;25 26;27 28]) uint16([29 30;31 32;33 34;35 36])};
## var61 = {int8([1 2;3 4]) int8([5 6 7;8 9 10]) ...
## int8([11 12 13 14;15 16 17 18]); int8([19 20;21 22]) ...
## int8([23 24;25 26;27 28]) int8([29 30;31 32;33 34;35 36])};
## var62 = {uint8([1 2;3 4]) uint8([5 6 7;8 9 10]) ...
## uint8([11 12 13 14;15 16 17 18]); uint8([19 20;21 22]) ...
## uint8([23 24;25 26;27 28]) uint8([29 30;31 32;33 34;35 36])};
## var63 = {sparse(triu(reshape(1:20,4,5))) sparse(triu(reshape(1:20,4,5))')};
## var64 = {sparse(triu(reshape((1:20)+j*(21:40),4,5)));
## sparse(triu(reshape((1:20)+j*(21:40),4,5))')};
## var65 = {'abcdefghijklmnopqrstuvwxyz' '1234567890!@#$%^&*()-_=+`~';
## 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '[{]}\|;:''",<.>/? '};
## var66 = {var25 var26 var27};
## var67 = {var28 var29 var30 var31 var32 var33 var34 var35 var36 var37;
## var38 var39 var40 var41 var42 var43 var44 var45 var46 var47};
## var68 = {struct('field1',sparse(triu(reshape(1:20,4,5))),...
## 'field2',sparse(triu(reshape(1:20,4,5))'));
## struct('field1',sparse(triu(reshape((1:20)+j*(21:40),4,5))),...
## 'field2',sparse(triu(reshape((1:20)+j*(21:40),4,5))'))};
## var69 = {struct('field1','abcdefghijklmnopqrstuvwxyz',...;
## 'field2','ABCDEFGHIJKLMNOPQRSTUVWXYZ');
## struct('field1','1234567890!@#$%^&*()-_=+`~',...
## 'field2','[{]}\|;:''",<.>/? ')};
## int16_data = intmin('int16'):intmax('int16');
## uint16_data = intmin('uint16'):intmax('uint16');
## int8_data = int8(-128:127);
## uint8_data = uint8(0:255);
## var70 = reshape(1:32*32*32,32,32,32);
## var71 = reshape(single(1:32*32*32),32,32,32);
## var72 = reshape(int64(1:32*32*32),32,32,32);
## var73 = reshape(uint64(1:32*32*32),32,32,32);
## var74 = reshape(int32(1:32*32*32),32,32,32);
## var75 = reshape(uint32(1:32*32*32),32,32,32);
## var76 = reshape(int16(1:32*32*32),32,32,32);
## var77 = reshape(uint16(1:32*32*32),32,32,32);
## I = round(1+(numel(int8_data)-1)*rand(32,32,32));
## J = round(1+(numel(int8_data)-1)*rand(32,32,32));
## var78 = reshape(int8_data(I),32,32,32);
## I = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## var79 = reshape(uint8_data(I),32,32,32);
## var80 = reshape((1:2:2*32^3) + j*(2:2:2*32^3),32,32,32);
## var81 = reshape(single((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var82 = reshape(int64((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var83 = reshape(uint64((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var84 = reshape(int32((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## var85 = reshape(uint32((1:2:2*32^3) + j*(2:2:2*32^3)),32,32,32);
## I = round(1+(numel(int16_data)-1)*rand(32,32,32));
## J = round(1+(numel(int16_data)-1)*rand(32,32,32));
## var86 = reshape(complex(int16_data(I),int16_data(J)),32,32,32);
## I = round(1+(numel(uint16_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint16_data)-1)*rand(32,32,32));
## var87 = reshape(complex(uint16_data(I),uint16_data(J)),32,32,32);
## I = round(1+(numel(int8_data)-1)*rand(32,32,32));
## J = round(1+(numel(int8_data)-1)*rand(32,32,32));
## var88 = reshape(complex(int8_data(I),int8_data(J)),32,32,32);
## I = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## J = round(1+(numel(uint8_data)-1)*rand(32,32,32));
## var89 = reshape(complex(uint8_data(I),uint8_data(J)),32,32,32);
## var90 = tril(true(5));
## var91 = [struct('field1',logical(mod(reshape(0:19,4,5),2)),...
## 'field2',~mod(reshape(0:19,4,5),2));...
## struct('field1',tril(true(5)),'field2',triu(true(5)))];
## var92 = {logical(mod(reshape(0:19,4,5),2));~mod(reshape(0:19,4,5),2);...
## tril(true(5));triu(true(5))};
## save('-v6',['matio_test_cases_uncompressed' e_str '.mat'],'var*');
## save(['matio_test_cases_compressed' e_str '.mat'],'var*');
## save('-v7.3',['matio_test_cases_hdf' e_str '.mat'],'var*');
## save('-v4',['matio_test_cases_v4' e_str '.mat'],'var1','var11','var21',...
## 'var22','var24');
## x = pi;
## save('-v4',['small_v4' e_str '.mat'],'x');
##
## Load rmatio
##
library(rmatio)
test_mat_v4_file <- function(x) {
## var1 read as double
var1 <- array(seq_len(20), c(4,5))
storage.mode(var1) <- 'double'
stopifnot(identical(x$var1, var1))
## var11 read as complex
var11 <- array(c(1+21i, 2+22i, 3+23i, 4+24i, 5+25i, 6+26i, 7+27i,
8+28i, 9+29i, 10+30i, 11+31i, 12+32i, 13+33i,
14+34i, 15+35i, 16+36i, 17+37i, 18+38i, 19+39i,
20+40i), c(4,5))
stopifnot(identical(x$var11, var11))
## var21 read as a sparse matrix
var21 <- as(diag(1:5), 'dgCMatrix')
stopifnot(identical(x$var21, var21))
## var22 read as a complex matrix
var22 <- structure(c(1+6i, 0+0i, 0+0i, 0+0i, 0+0i,
0+0i, 2+7i, 0+0i, 0+0i, 0+0i,
0+0i, 0+0i, 3+8i, 0+0i, 0+0i,
0+0i, 0+0i, 0+0i, 4+9i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 5+10i),
.Dim = c(5L, 5L))
stopifnot(identical(x$var22, var22))
## var24 read as character vector
stopifnot(identical(x$var24, c("abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"1234567890!@#$%^&*()-_=+`~",
"[{]}\\|;:'\",<.>/? ")))
}
test_mat_v5_file <- function(x) {
## var1, ..., var4 read as double
var1 <- array(seq_len(20), c(4,5))
storage.mode(var1) <- 'double'
stopifnot(identical(x$var1, var1))
stopifnot(identical(x$var2, var1))
stopifnot(identical(x$var3, var1))
stopifnot(identical(x$var4, var1))
## var5 read as integer
var5 <- array(seq_len(20), c(4,5))
storage.mode(var5) <- 'integer'
stopifnot(identical(x$var5, var5))
## var6 read as double
stopifnot(identical(x$var6, var1))
## var7, ..., var10 read as integer
stopifnot(identical(x$var7, var5))
stopifnot(identical(x$var8, var5))
stopifnot(identical(x$var9, var5))
stopifnot(identical(x$var10, var5))
## var11, ..., var20 read as complex
var11 <- array(c(1+21i, 2+22i, 3+23i, 4+24i, 5+25i, 6+26i, 7+27i,
8+28i, 9+29i, 10+30i, 11+31i, 12+32i, 13+33i,
14+34i, 15+35i, 16+36i, 17+37i, 18+38i, 19+39i,
20+40i), c(4,5))
stopifnot(identical(x$var11, var11))
stopifnot(identical(x$var12, var11))
stopifnot(identical(x$var13, var11))
stopifnot(identical(x$var14, var11))
stopifnot(identical(x$var15, var11))
stopifnot(identical(x$var16, var11))
stopifnot(identical(x$var17, var11))
stopifnot(identical(x$var18, var11))
stopifnot(identical(x$var19, var11))
stopifnot(identical(x$var20, var11))
## var21 read as a sparse matrix
var21 <- as(diag(1:5), 'dgCMatrix')
stopifnot(identical(x$var21, var21))
## var22 read as a dense complex matrix
var22 <- array(c(1+6i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 2+7i, 0+0i,
0+0i, 0+0i, 0+0i, 0+0i, 3+8i, 0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 4+9i,
0+0i, 0+0i, 0+0i, 0+0i, 0+0i, 5+10i), c(5L, 5L))
stopifnot(identical(x$var22, var22))
## var23 read as double
stopifnot(identical(x$var23, numeric(0)))
## var24 read as character vector
stopifnot(identical(x$var24, c("abcdefghijklmnopqrstuvwxyz",
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"1234567890!@#$%^&*()-_=+`~",
"[{]}\\|;:'\",<.>/? ")))
## Structure Variables
## var25 read as an empty named list
var25 <- structure(list(), .Names = character(0))
stopifnot(identical(x$var25, var25))
var26 <- list(field1=list(), field2=list())
stopifnot(identical(x$var26, var26))
var27 <- list(field1=numeric(0), field2=character(0))
stopifnot(identical(x$var27, var27))
## var28, ..., var31 read as double
var28 <- list(field1=list(1, 14),
field2=list(array(as.numeric(2:13), c(3,4)),
array(as.numeric(15:26), c(3,4))))
stopifnot(identical(x$var28, var28))
stopifnot(identical(x$var29, var28))
stopifnot(identical(x$var30, var28))
stopifnot(identical(x$var31, var28))
## var32 read as integer
var32 <- list(field1=list(1L, 14L),
field2=list(array(2:13, c(3,4)),
array(15:26, c(3,4))))
stopifnot(identical(x$var32, var32))
## var33 read as double
stopifnot(identical(x$var33, var28))
## var34, ..., var37 read as integer
stopifnot(identical(x$var34, var32))
stopifnot(identical(x$var35, var32))
stopifnot(identical(x$var36, var32))
stopifnot(identical(x$var37, var32))
## var38, ..., var47 read as complex
var38 <- list(field1=list(1+51i, 14+64i),
field2=list(array(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), c(3,4)), array(c(15+65i, 16+66i, 17+67i,
18+68i, 19+69i, 20+70i, 21+71i, 22+72i, 23+73i,
24+74i, 25+75i, 26+76i), c(3,4))))
stopifnot(identical(x$var38, var38))
stopifnot(identical(x$var39, var38))
stopifnot(identical(x$var40, var38))
stopifnot(identical(x$var41, var38))
stopifnot(identical(x$var42, var38))
stopifnot(identical(x$var43, var38))
stopifnot(identical(x$var44, var38))
stopifnot(identical(x$var45, var38))
stopifnot(identical(x$var46, var38))
stopifnot(identical(x$var47, var38))
var48 <- list(field1=list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE))),
field2=list(tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE))))
stopifnot(identical(x$var48, var48))
var49 <- list(field1=list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i,
6+26i, 0+0i, 0+0i, 9+29i, 10+30i, 11+31i, 0+0i,
13+33i, 14+34i, 15+35i, 16+36i, 17+37i, 18+38i,
19+39i, 20+40i), c(4,5))),
field2=list(array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i,
0+0i, 6-26i, 10-30i, 14-34i, 18-38i, 0+0i, 0+0i,
11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4))))
stopifnot(identical(x$var49, var49))
var50 <- list(field1 = c("abcdefghijklmnopqrstuvwxyz",
"1234567890!@#$%^&*()-_=+`~"),
field2 = c("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var50, var50))
## Cell-Array Variables
var51 <- list()
stopifnot(identical(x$var51, var51))
var52 <- list(numeric(0), numeric(0), integer(0), integer(0), integer(0),
integer(0), integer(0), integer(0), integer(0), integer(0))
stopifnot(identical(x$var52, var52))
## var53, ..., var56 read as double
var53 <- list(list(array(c(1, 3, 2, 4), c(2, 2)),
array(c(5, 8, 6, 9, 7, 10), c(2,3)),
array(c(11, 15, 12, 16, 13, 17, 14, 18), c(2, 4))),
list(array(c(19, 21, 20, 22), c(2, 2)),
array(c(23, 25, 27, 24, 26, 28), c(3L, 2L)),
array(c(29, 31, 33, 35, 30, 32, 34, 36), c(4, 2))))
stopifnot(identical(x$var53, var53))
stopifnot(identical(x$var54, var53))
stopifnot(identical(x$var55, var53))
stopifnot(identical(x$var56, var53))
## var57 read as integer
var57 <- list(list(array(c(1L, 3L, 2L, 4L), c(2, 2)),
array(c(5L, 8L, 6L, 9L, 7L, 10L), c(2,3)),
array(c(11L, 15L, 12L, 16L, 13L, 17L, 14L, 18L), c(2, 4))),
list(array(c(19L, 21L, 20L, 22L), c(2, 2)),
array(c(23L, 25L, 27L, 24L, 26L, 28L), c(3L, 2L)),
array(c(29L, 31L, 33L, 35L, 30L, 32L, 34L, 36L), c(4, 2))))
stopifnot(identical(x$var57, var57))
## var58 read as double
stopifnot(identical(x$var58, var53))
## var59, ..., var62 read as integer
stopifnot(identical(x$var59, var57))
stopifnot(identical(x$var60, var57))
stopifnot(identical(x$var61, var57))
stopifnot(identical(x$var62, var57))
var63 <- list(list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE)),
tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE))))
stopifnot(identical(x$var63, var63))
var64 <- list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i,
6+26i, 0+0i, 0+0i, 9+29i, 10+30i, 11+31i, 0+0i,
13+33i, 14+34i, 15+35i, 16+36i, 17+37i, 18+38i,
19+39i, 20+40i), c(4,5)),
array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i,
0+0i, 6-26i, 10-30i, 14-34i, 18-38i, 0+0i, 0+0i,
11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4)))
stopifnot(identical(x$var64, var64))
var65 <- list(list("abcdefghijklmnopqrstuvwxyz",
"1234567890!@#$%^&*()-_=+`~"),
list("ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var65, var65))
var66 <- list(structure(list(),
.Names = character(0)),
list(field1=list(), field2=list()),
structure(list(field1 = numeric(0),
field2 = character(0)),
.Names = c("field1", "field2")))
stopifnot(identical(x$var66, var66))
var67 <- list(list(structure(list(field1 = list(1, 14), field2 =
list( structure(c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13), .Dim = 3:4), structure(c(15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26 ), .Dim = 3:4))),
.Names = c("field1", "field2")), structure(list(
field1 = list(1, 14), field2 = list(structure(c(2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), .Dim = 3:4),
structure(c(15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26), .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1, 14),
field2 = list( structure(c(2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13), .Dim = 3:4), structure(c(15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26 ), .Dim =
3:4))), .Names = c("field1", "field2")),
structure(list( field1 = list(1, 14), field2 =
list(structure(c(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13), .Dim = 3:4), structure(c(15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26), .Dim = 3:4))), .Names =
c("field1", "field2")), structure(list(field1 =
list(1L, 14L), field2 = list( structure(2:13, .Dim =
3:4), structure(15:26, .Dim = 3:4))), .Names =
c("field1", "field2")), structure(list(field1 =
list(1, 14), field2 = list( structure(c(2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13), .Dim = 3:4),
structure(c(15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26 ), .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list( field1 = list(1L, 14L),
field2 = list(structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2")), structure(list(field1 = list(1L, 14L),
field2 = list( structure(2:13, .Dim = 3:4),
structure(15:26, .Dim = 3:4))), .Names = c("field1",
"field2"))), list(structure(list(field1 =
list(1+51i, 14+64i), field2 =
list(structure(c(2+52i, 3+53i, 4+54i, 5+55i, 6+56i,
7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2")),
structure(list(field1 = list(1+51i, 14+64i), field2
= list( structure(c(2+52i, 3+53i, 4+54i, 5+55i,
6+56i, 7+57i, 8+58i, 9+59i, 10+60i, 11+61i, 12+62i,
13+63i), .Dim = 3:4), structure(c(15+65i, 16+66i,
17+67i, 18+68i, 19+69i, 20+70i, 21+71i, 22+72i,
23+73i, 24+74i, 25+75i, 26+76i), .Dim = 3:4))),
.Names = c("field1", "field2"))))
stopifnot(identical(x$var67, var67))
var68 <- list(list(field1=list(triu(Matrix(1:20, nrow=4, ncol=5, sparse=TRUE))),
field2=list(tril(Matrix(1:20, nrow=5, ncol=4, sparse=TRUE, byrow=TRUE)))),
list(field1=list(array(c(1+21i, 0+0i, 0+0i, 0+0i, 5+25i, 6+26i, 0+0i, 0+0i, 9+29i,
10+30i, 11+31i, 0+0i, 13+33i, 14+34i, 15+35i, 16+36i,
17+37i, 18+38i, 19+39i, 20+40i), c(4,5))),
field2=list(array(c(1-21i, 5-25i, 9-29i, 13-33i, 17-37i, 0+0i, 6-26i, 10-30i,
14-34i, 18-38i, 0+0i, 0+0i, 11-31i, 15-35i, 19-39i, 0+0i, 0+0i, 0+0i,
16-36i, 20-40i), c(5,4)))))
stopifnot(identical(x$var68, var68))
var69 <- list(list(field1 = "abcdefghijklmnopqrstuvwxyz",
field2 = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"),
list(field1 = "1234567890!@#$%^&*()-_=+`~",
field2 = "[{]}\\|;:'\",<.>/? "))
stopifnot(identical(x$var69, var69))
## var70, ..., var73 read as double
var70 = array(seq_len(32^3), c(32,32,32));
storage.mode(var70) <- 'double'
stopifnot(identical(x$var70, var70))
stopifnot(identical(x$var71, var70))
stopifnot(identical(x$var72, var70))
stopifnot(identical(x$var73, var70))
## var74 read as integer
var74 = array(seq_len(32^3), c(32,32,32));
storage.mode(var74) <- 'integer'
stopifnot(identical(x$var74, var74))
## var75 read as double
stopifnot(identical(x$var75, var70))
## var76 read as integer
## var76 = reshape(int16(1:32*32*32),32,32,32);
var76 = array(c(seq_len(32767), 32767), c(32,32,32));
storage.mode(var76) <- 'integer'
stopifnot(identical(x$var76, var76))
## var77 read as integer
## var77 = reshape(uint16(1:32*32*32),32,32,32);
stopifnot(identical(x$var77, var74))
## var78 read as integer
var78_slab <- array(c(1L, 37L, -55L, -70L, -48L, -84L, -96L, -93L,
-91L, -24L, -123L, -92L, 39L, 109L, -69L, 68L, 76L, -42L, -4L,
36L, 45L, -89L, -60L, -19L, 99L, 85L, 76L, 109L, 96L, -60L, 24L,
112L, 74L, -52L, -57L, 2L, 106L, -34L, -77L, 92L, 30L, -126L,
-55L, 3L, 95L, -75L, -77L, -83L, -112L, 69L, -57L, -97L, 36L, 37L,
90L, -76L, 117L, 12L, -108L, 51L, -35L, -105L, -1L, 99L, -103L,
55L, -77L, 27L, 59L, -38L, -96L, 32L, -52L, -32L, 123L, 63L, 122L,
59L, -19L, 84L, -99L, -27L, 121L, 25L, -22L, 8L, -70L, 9L, 40L,
27L, 90L, 122L, 124L, 7L, -2L, -84L, -45L, -79L, -120L, 39L, 88L,
-55L, -95L, -27L, -5L, -11L, 48L, 125L, 94L, 36L, 115L, -121L,
99L, 47L, -108L, -99L, -9L, -70L, 67L, -89L, 107L, -80L, 113L,
27L, 45L, 94L, 124L, 7L, 54L, 76L, -54L, -48L),
c(11L, 6L, 2L))
stopifnot(identical(x$var78[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var78_slab))
## var79 read as integer
var79_slab <- structure(c(36L, 154L, 198L, 69L, 227L, 194L, 140L,
19L, 11L, 17L, 151L, 74L, 65L, 249L,
151L, 109L, 131L, 194L, 237L, 190L,
218L, 35L, 142L, 205L, 42L, 43L, 2L,
15L, 177L, 240L, 164L, 112L, 138L, 112L,
97L, 154L, 26L, 191L, 53L, 214L, 216L,
229L, 187L, 192L, 148L, 230L, 21L, 210L,
13L, 28L, 216L, 1L, 21L, 20L, 179L,
136L, 111L, 22L, 103L, 188L, 221L, 4L,
139L, 238L, 175L, 207L, 193L, 24L, 236L,
141L, 144L, 226L, 48L, 36L, 207L, 199L,
30L, 249L, 124L, 83L, 90L, 102L, 213L,
227L, 255L, 152L, 212L, 62L, 252L, 18L,
133L, 214L, 101L, 22L, 176L, 101L, 233L,
106L, 39L, 237L, 241L, 212L, 19L, 102L,
212L, 123L, 57L, 225L, 27L, 178L, 98L,
88L, 40L, 115L, 185L, 226L, 23L, 22L,
160L, 111L, 100L, 44L, 145L, 144L, 223L,
123L, 48L, 190L, 70L, 115L, 113L, 7L),
.Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var79[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var79_slab))
## var80, ..., var85 read as complex
var80 = array(complex(real=seq(1, 2*32^3, 2), imaginary=seq(2, 2*32^3, 2)), c(32,32,32))
stopifnot(identical(x$var80, var80))
stopifnot(identical(x$var81, var80))
stopifnot(identical(x$var82, var80))
stopifnot(identical(x$var83, var80))
stopifnot(identical(x$var84, var80))
stopifnot(identical(x$var85, var80))
## var86 read as complex
var86_slab <- structure(c(31419+12074i, 11550+31935i,
14036-25970i, 12950-26466i,
11549-13221i, 8802-13913i, -8540+20686i,
82-2536i, 6556+25498i, -22761-19987i,
-14602-32033i, -16493-9400i,
15688+20383i, -10204-15596i,
14390-20232i, -21008+3161i,
22712-11293i, 16110-30851i, 6298-2954i,
-10732-25971i, -15133+15777i,
-23705+32679i, 2771-10261i,
-25754-31533i, 27007-3499i, -7948-4034i,
1047-17085i, 28781-6213i, 22132+27840i,
-2300-1878i, 14919-19351i,
-32577+15858i, 28973-16332i,
-13965+16888i, -1934-19892i,
24973-17196i, 29557-23198i,
-26196-27443i, 17138+17163i,
4173+22551i, 21456-372i, -12554-766i,
-11899+26000i, -13223-7784i,
-25303+5509i, 16352+363i, 9743-9728i,
6783-16717i, 27364-31769i, 845-5483i,
-5927+30238i, 9080+5283i, 25263-12552i,
-6594+9643i, -21471-8832i, 13196+15195i,
-12123-23263i, -10874+25982i,
-11012-7600i, -10870-24078i,
-4181+21428i, 19441-25234i,
27199-11909i, -4204+14748i,
-12638+6522i, -21381+3874i, 10009-1227i,
23342+16514i, 27792-11483i,
-16194-25596i, 9911-20183i,
-28341+22322i, 3508-7880i, 20369-482i,
-4578+21351i, 18426+31578i,
-26016+17152i, 23621+10035i,
-26955-8794i, 3884+18834i, 25669+17940i,
19480-25157i, -24243-10527i,
4464+28510i, -6616-24233i, -14599-1762i,
-19424+6424i, -29786+14220i,
-22333-20060i, -28080-30034i,
-24965+30996i, 11506-19238i,
-5438+17227i, 6325+19908i, 16761+20643i,
18192-20736i, -18629+17622i,
2759-21496i, 15764+18250i, -28782+3888i,
-26055-4279i, 17075-5598i, 15629+19686i,
-27961-5378i, 22850-7463i, -17585+883i,
-1421-6768i, 28956-7833i, 19321-31347i,
22739-27552i, -5804-12847i,
-19613+7820i, -24500+23384i,
6332-32352i, 9084+8866i, 12787+16795i,
27522+650i, 7219-25890i, -16811-5838i,
22569+6221i, 21757-28407i, -2817+10293i,
-666-21153i, 6202+23652i, -27772+7728i,
24485+27017i, 7898+4210i, 8062+28365i,
-28910-21963i, 7298-10445i,
29980+25776i, 5019-2720i), .Dim = c(11L,
6L, 2L))
stopifnot(identical(x$var86[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var86_slab))
## var87 read as complex
var87_slab <- structure(c(63490+42712i, 29714+8391i, 61483+15861i,
15850+33874i, 55082+3631i, 45390+5685i,
10423+60745i, 44728+38383i,
43170+36676i, 46711+20162i,
49188+52357i, 48615+54935i,
11083+51765i, 2973+2285i, 47407+28409i,
33701+18840i, 34873+47323i, 13711+7195i,
6046+45393i, 44211+53418i, 16574+31089i,
41210+28626i, 14063+54325i,
15062+33495i, 51022+34454i,
60470+39080i, 44448+44287i,
64996+30009i, 283+64783i, 18983+191i,
3436+45327i, 44524+18230i, 15729+21345i,
12093+4405i, 8834+7721i, 54420+31822i,
26675+29151i, 42660+29044i,
65323+50276i, 7139+32010i, 47157+29016i,
35840+18163i, 15379+14923i,
30214+63617i, 47337+59129i,
62332+27780i, 3623+2694i, 11530+27741i,
4141+36801i, 64250+50570i, 39722+49989i,
26118+40150i, 28214+21931i,
25798+27469i, 62573+58367i, 25576+6335i,
12201+17545i, 49659+37636i,
28187+58668i, 3135+5269i, 16458+14517i,
31662+28316i, 38430+36198i,
47450+50404i, 201+49160i, 50221+54517i,
47469+42818i, 7124+35280i, 34741+37493i,
44064+39694i, 6995+4154i, 7531+33600i,
54978+37775i, 15416+23755i,
27259+51376i, 64300+6331i, 1212+37261i,
8920+61180i, 29466+54541i, 65042+23250i,
1978+56271i, 14377+47601i, 12353+11382i,
8336+45819i, 45530+55892i, 8908+40510i,
11258+60909i, 56638+57033i,
45303+55200i, 4881+31280i, 38207+48851i,
807+7069i, 50473+21558i, 6544+29624i,
52787+2034i, 5986+58345i, 57054+22602i,
2935+55316i, 42851+16897i, 10001+43967i,
48614+38397i, 14572+48016i,
25265+24936i, 1715+40096i, 37345+13265i,
11025+46066i, 37554+31865i, 4791+53489i,
22789+15282i, 23465+10050i,
58589+45954i, 9073+25270i, 20846+33025i,
22905+59697i, 22981+42347i,
24828+62213i, 46887+36422i,
50013+60501i, 29781+52236i,
37210+42165i, 37785+60771i,
16219+17867i, 59934+13526i,
58464+17576i, 34801+59568i,
20649+30475i, 55999+42307i, 4464+16054i,
23302+47556i, 24764+55871i,
22331+53630i, 63541+34455i), .Dim =
c(11L, 6L, 2L))
stopifnot(identical(x$var87[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var87_slab))
## var88 read as complex
var88_slab <- structure(c(24-109i, -38+98i, -32+69i, 1-58i,
106+37i, 23-118i, 86-117i, 3-10i,
-94+111i, 43-69i, 37+24i, 5+103i,
-71-109i, -9+76i, 52-40i, -48+1i,
-1-71i, 43+106i, 121-39i, 68+45i, 9+7i,
49-122i, 79-98i, -125-9i, -81-45i,
-30+6i, 109-65i, -111-88i, -21-57i,
-59-22i, 64-47i, 38+36i, 32+84i,
-11-41i, -4-5i, 111-12i, -79+23i,
118-68i, 60-49i, -31-47i, 98-49i,
19-84i, 114-19i, 126+112i, 3-95i,
-69-113i, -24-55i, -40-78i, 81-95i,
108-15i, -37+5i, 24-97i, 65+17i,
-79-19i, -54-36i, -99-15i, -92-70i,
115+62i, -29-52i, -121+61i, -62+101i,
-72+52i, -110-111i, 126+124i, 85-44i,
-32+42i, 10+105i, -88-104i, 82+96i,
98-85i, -66-27i, 83-12i, -18-32i,
58-92i, -59-89i, -45+16i, -36-68i,
-120-94i, 69+16i, 5+114i, 80+42i, -7+4i,
74+84i, 83+76i, -74+93i, -124+7i,
-61+54i, 3-83i, -79-87i, -50+8i,
-13-48i, 51-21i, -95+3i, -115-5i,
117+120i, 66+29i, 64+10i, 54-101i,
51+74i, 55-21i, -78+49i, -61+22i,
74-73i, -26+71i, 81+0i, -45-123i,
-8+58i, 63-124i, -83-112i, 97+123i,
19+106i, 18+47i, -39-73i, -47-60i,
-102-120i, -17-102i, 18+66i, -12-59i,
-41+34i, 46+51i, -61-40i, 47-65i,
-103-35i, 127+30i, 28+16i, -72-13i,
-76-67i, -46+33i, 40+109i, -117+25i,
103+12i, 89-9i), .Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var88[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var88_slab))
## var89 read as complex
var89_slab <- structure(c(233+19i, 99+152i, 194+176i, 219+39i,
182+250i, 198+121i, 70+99i, 250+205i,
154+19i, 20+127i, 55+248i, 98+36i,
129+239i, 246+207i, 42+97i, 209+14i,
114+150i, 245+204i, 62+143i, 192+87i,
88+125i, 254+59i, 145+191i, 23+137i,
229+96i, 17+104i, 158+68i, 85+154i,
207+145i, 112+116i, 135+182i, 138+79i,
118+171i, 87+191i, 187+27i, 224+67i,
96+230i, 66+32i, 196+72i, 239+53i,
15+197i, 111+188i, 225+144i, 190+181i,
30+24i, 149+230i, 56+158i, 114+96i,
242+79i, 82+206i, 106+114i, 8+202i,
87+217i, 138+126i, 26+138i, 142+157i,
186+204i, 139+252i, 130+202i, 197+221i,
169+34i, 160+148i, 131+129i, 255+157i,
93+4i, 140+149i, 182+191i, 45+138i,
41+32i, 74+142i, 173+106i, 14+5i,
56+15i, 159+205i, 218+172i, 235+235i,
253+46i, 212+183i, 121+33i, 193+87i,
95+150i, 75+167i, 139+111i, 241+161i,
62+17i, 165+198i, 249+244i, 207+210i,
51+193i, 81+133i, 42+147i, 219+48i,
185+124i, 65+8i, 120+240i, 24+19i,
104+3i, 23+34i, 88+216i, 46+45i,
90+128i, 171+187i, 105+53i, 23+168i,
111+218i, 91+101i, 90+72i, 178+168i,
172+79i, 176+42i, 16+249i, 137+65i,
149+145i, 87+154i, 111+32i, 179+46i,
164+95i, 121+106i, 125+144i, 215+171i,
48+141i, 110+132i, 234+197i, 22+223i,
229+72i, 169+26i, 128+186i, 87+139i,
152+77i, 17+58i, 215+242i, 62+229i ),
.Dim = c(11L, 6L, 2L))
stopifnot(identical(x$var89[seq(2,32,3), seq(4,32,5), seq(8,32,16)], var89_slab))
var90 <- array(c(TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE,
TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE,
FALSE, FALSE, TRUE), c(5L, 5L))
stopifnot(identical(x$var90, var90))
var91 <- structure(list(field1 = list(structure(c(FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE), .Dim = 4:5), structure(c(TRUE,
TRUE, TRUE, TRUE, TRUE, FALSE, TRUE,
TRUE, TRUE, TRUE, FALSE, FALSE, TRUE,
TRUE, TRUE, FALSE, FALSE, FALSE, TRUE,
TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE), .Dim = c(5L, 5L))), field2 =
list( structure(c(TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE), .Dim
= 4:5), structure(c(TRUE, FALSE,
FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, TRUE, TRUE, TRUE),
.Dim = c(5L, 5L)))), .Names =
c("field1", "field2"))
stopifnot(identical(x$var91, var91))
var92 <- list(structure(c(FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE),
.Dim = 4:5),
structure(c(TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE,
TRUE, FALSE, TRUE, FALSE, TRUE,
FALSE, TRUE, FALSE, TRUE, FALSE),
.Dim = 4:5),
structure(c(TRUE, TRUE, TRUE, TRUE, TRUE,
FALSE, TRUE, TRUE, TRUE, TRUE,
FALSE, FALSE, TRUE, TRUE, TRUE,
FALSE, FALSE, FALSE, TRUE, TRUE,
FALSE, FALSE, FALSE, FALSE, TRUE),
.Dim = c(5L, 5L)),
structure(c(TRUE, FALSE, FALSE, FALSE, FALSE,
TRUE, TRUE, FALSE, FALSE, FALSE,
TRUE, TRUE, TRUE, FALSE, FALSE,
TRUE, TRUE, TRUE, TRUE, FALSE,
TRUE, TRUE, TRUE, TRUE, TRUE),
.Dim = c(5L, 5L)))
stopifnot(identical(x$var92, var92))
}
##
## small_v4_le.mat
##
infile <- system.file('extdata/small_v4_le.mat', package='rmatio')
x.in <- read.mat(infile)
stopifnot(identical(x.in$x, pi))
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
}
##
## small_v4_be.mat
##
infile <- system.file('extdata/small_v4_be.mat', package='rmatio')
x.in <- read.mat(infile)
stopifnot(identical(x.in$x, pi))
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
stopifnot(identical(x.out$x, pi))
}
##
## matio_test_cases_v4_le.mat
##
infile <- system.file('extdata/matio_test_cases_v4_le.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v4_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
}
##
## matio_test_cases_v4_be.mat
##
infile <- system.file('extdata/matio_test_cases_v4_be.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v4_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v4_file(x.out)
}
##
## matio_test_cases_compressed_le.mat
##
infile <- system.file('extdata/matio_test_cases_compressed_le.mat', package='rmatio')
x.in <- read.mat(infile)
test_mat_v5_file(x.in)
## Write as MAT5 uncompressed
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=FALSE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v5_file(x.out)
## Run the same test with compression
if(rmatio:::have.zlib()) {
filename <- tempfile(fileext = ".mat")
write.mat(x.in, filename=filename, compression=TRUE, version='MAT5')
x.out <- read.mat(filename)
unlink(filename)
test_mat_v5_file(x.out)
}
|
##load the tables into R
activity_labels <- read.table(file="activity_labels.txt")
features <- read.table(file="features.txt")
testsub <- read.table(file="./test/subject_test.txt")
testx <- read.table(file="./test/X_test.txt")
testy <- read.table(file="./test/y_test.txt")
trainsub <- read.table(file="./train/subject_train.txt")
trainx <- read.table(file="./train/X_train.txt")
trainy <- read.table(file="./train/y_train.txt")
## add column names to the test and train "x" sets
colnames(testx) <- features$V2
colnames(trainx) <- features$V2
##get only the columns related to mean and standard deviation measures
test_names <- names(testx)
train_names <- names(trainx)
test_relevant_col_id <- c(grep("-mean()",test_names, fixed = TRUE),grep("-std()",test_names, fixed = TRUE))
train_relevant_col_id <- c(grep("-mean()",train_names, fixed = TRUE),grep("-std()",train_names, fixed = TRUE))
test_relevant_col_id <- sort(test_relevant_col_id)
train_relevant_col_id <- sort(train_relevant_col_id)
testx_ex <- testx[,test_relevant_col_id]
trainx_ex <- trainx[,train_relevant_col_id]
## get activity names vs. id for test and train "y" sets
testy <- merge(testy, activity_labels, by = "V1")
trainy <- merge(trainy, activity_labels, by = "V1")
## add column names to the test and train "y" sets
colnames(testy) <- c("activity_id","activity")
colnames(trainy) <- c("activity_id","activity")
## add column names to the test and train "subject" sets
colnames(testsub) <- "subject_id"
colnames(trainsub) <- "subject_id"
## merge the "x" and "y" sets for both train and test sets
test <- cbind(testsub, testy[-1], testx_ex)
train <- cbind(trainsub, trainy[-1], trainx_ex)
## merge the train and the test sets
base <- rbind(test, train)
###create the table with the mean for each column by subect and activity
base$subject_id <- as.factor(base$subject_id)
subject <- levels(base$subject_id)
activity <- levels(base$activity)
final <- base[NULL,]
for (i in subject){
sub <- base[base$subject_id == i,]
for (j in activity){
act <-sub[sub$activity == j,]
m<- sapply(act, mean, na.rm=TRUE)
final <- rbind(final,m)
final[nrow(final),1:2] <- c(i,j)
}
}
##rename the columns
names <- names(base)
for (x in 3:length(names)) {
names[x] <- paste("mean(",names[x],")",sep="")
}
colnames(final) <- names
##write the table for submission
write.table(final, file="final.txt", row.names = FALSE)
|
/run_analysis.r
|
no_license
|
bbensid/Getting_and_Cleaning_Data_Project
|
R
| false | false | 2,401 |
r
|
##load the tables into R
activity_labels <- read.table(file="activity_labels.txt")
features <- read.table(file="features.txt")
testsub <- read.table(file="./test/subject_test.txt")
testx <- read.table(file="./test/X_test.txt")
testy <- read.table(file="./test/y_test.txt")
trainsub <- read.table(file="./train/subject_train.txt")
trainx <- read.table(file="./train/X_train.txt")
trainy <- read.table(file="./train/y_train.txt")
## add column names to the test and train "x" sets
colnames(testx) <- features$V2
colnames(trainx) <- features$V2
##get only the columns related to mean and standard deviation measures
test_names <- names(testx)
train_names <- names(trainx)
test_relevant_col_id <- c(grep("-mean()",test_names, fixed = TRUE),grep("-std()",test_names, fixed = TRUE))
train_relevant_col_id <- c(grep("-mean()",train_names, fixed = TRUE),grep("-std()",train_names, fixed = TRUE))
test_relevant_col_id <- sort(test_relevant_col_id)
train_relevant_col_id <- sort(train_relevant_col_id)
testx_ex <- testx[,test_relevant_col_id]
trainx_ex <- trainx[,train_relevant_col_id]
## get activity names vs. id for test and train "y" sets
testy <- merge(testy, activity_labels, by = "V1")
trainy <- merge(trainy, activity_labels, by = "V1")
## add column names to the test and train "y" sets
colnames(testy) <- c("activity_id","activity")
colnames(trainy) <- c("activity_id","activity")
## add column names to the test and train "subject" sets
colnames(testsub) <- "subject_id"
colnames(trainsub) <- "subject_id"
## merge the "x" and "y" sets for both train and test sets
test <- cbind(testsub, testy[-1], testx_ex)
train <- cbind(trainsub, trainy[-1], trainx_ex)
## merge the train and the test sets
base <- rbind(test, train)
###create the table with the mean for each column by subect and activity
base$subject_id <- as.factor(base$subject_id)
subject <- levels(base$subject_id)
activity <- levels(base$activity)
final <- base[NULL,]
for (i in subject){
sub <- base[base$subject_id == i,]
for (j in activity){
act <-sub[sub$activity == j,]
m<- sapply(act, mean, na.rm=TRUE)
final <- rbind(final,m)
final[nrow(final),1:2] <- c(i,j)
}
}
##rename the columns
names <- names(base)
for (x in 3:length(names)) {
names[x] <- paste("mean(",names[x],")",sep="")
}
colnames(final) <- names
##write the table for submission
write.table(final, file="final.txt", row.names = FALSE)
|
library(robCompositions)
### Name: trondelagC
### Title: regional geochemical survey of soil C in Norway
### Aliases: trondelagC
### Keywords: data
### ** Examples
data(trondelagC)
str(trondelagC)
|
/data/genthat_extracted_code/robCompositions/examples/trondelagC.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 205 |
r
|
library(robCompositions)
### Name: trondelagC
### Title: regional geochemical survey of soil C in Norway
### Aliases: trondelagC
### Keywords: data
### ** Examples
data(trondelagC)
str(trondelagC)
|
setwd("C:/Users/abirp/OneDrive/Desktop/Coursera_Projects/Course_4_exploratory_data_analysis/exdata_data_household_power_consumption")
list.files("./")
household_power_consumption<-read.table("./household_power_consumption.txt",sep = ";",header = TRUE,na.strings="?")
household_power_consumption$Timestamp<-paste(household_power_consumption$Date,household_power_consumption$Time)
household_power_consumption$Timestamp<-as.POSIXct(strptime(household_power_consumption$Timestamp,"%d/%m/%Y %H:%M:%S"))
household_power_consumption$Date<-as.Date(household_power_consumption$Date,'%d/%m/%Y')
subset_data<-household_power_consumption[(household_power_consumption$Date>= as.Date("2007-02-01"))&(household_power_consumption$Date<= as.Date("2007-02-02")) ,]
subset_data$Sub_metering_1<-as.numeric(subset_data$Sub_metering_1)
subset_data$Sub_metering_2<-as.numeric(subset_data$Sub_metering_2)
subset_data$Sub_metering_3<-as.numeric(subset_data$Sub_metering_3)
subset_data$Voltage<-as.numeric(subset_data$Voltage)
subset_data$Global_reactive_power<-as.numeric(subset_data$Global_reactive_power)
png(filename="plot4.png", width=480, height=480, units="px")
par(mfrow=c(2,2))
plot(y=subset_data$Global_active_power,x=subset_data$Timestamp,type='l',xlab="",ylab='Global Active Power')
plot(y=subset_data$Voltage,x=subset_data$Timestamp,type='l',xlab="datetime",ylab='Voltage')
plot(y=subset_data$Sub_metering_1,x=subset_data$Timestamp,type = "l",xlab = "",ylab='Energy sub metering')
lines(y=subset_data$Sub_metering_2,x=subset_data$Timestamp,col="red")
lines(y=subset_data$Sub_metering_3,x=subset_data$Timestamp,col="blue")
legend("topright", col = c("Black", "Red", "Blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1,cex=0.5)
plot(y=subset_data$Global_reactive_power,x=subset_data$Timestamp,type='l',xlab="datetime",ylab='Global_reactive_power')
dev.off()
|
/plot4.R
|
no_license
|
abir-pattnaik/ExData_Plotting1
|
R
| false | false | 1,924 |
r
|
setwd("C:/Users/abirp/OneDrive/Desktop/Coursera_Projects/Course_4_exploratory_data_analysis/exdata_data_household_power_consumption")
list.files("./")
household_power_consumption<-read.table("./household_power_consumption.txt",sep = ";",header = TRUE,na.strings="?")
household_power_consumption$Timestamp<-paste(household_power_consumption$Date,household_power_consumption$Time)
household_power_consumption$Timestamp<-as.POSIXct(strptime(household_power_consumption$Timestamp,"%d/%m/%Y %H:%M:%S"))
household_power_consumption$Date<-as.Date(household_power_consumption$Date,'%d/%m/%Y')
subset_data<-household_power_consumption[(household_power_consumption$Date>= as.Date("2007-02-01"))&(household_power_consumption$Date<= as.Date("2007-02-02")) ,]
subset_data$Sub_metering_1<-as.numeric(subset_data$Sub_metering_1)
subset_data$Sub_metering_2<-as.numeric(subset_data$Sub_metering_2)
subset_data$Sub_metering_3<-as.numeric(subset_data$Sub_metering_3)
subset_data$Voltage<-as.numeric(subset_data$Voltage)
subset_data$Global_reactive_power<-as.numeric(subset_data$Global_reactive_power)
png(filename="plot4.png", width=480, height=480, units="px")
par(mfrow=c(2,2))
plot(y=subset_data$Global_active_power,x=subset_data$Timestamp,type='l',xlab="",ylab='Global Active Power')
plot(y=subset_data$Voltage,x=subset_data$Timestamp,type='l',xlab="datetime",ylab='Voltage')
plot(y=subset_data$Sub_metering_1,x=subset_data$Timestamp,type = "l",xlab = "",ylab='Energy sub metering')
lines(y=subset_data$Sub_metering_2,x=subset_data$Timestamp,col="red")
lines(y=subset_data$Sub_metering_3,x=subset_data$Timestamp,col="blue")
legend("topright", col = c("Black", "Red", "Blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1,cex=0.5)
plot(y=subset_data$Global_reactive_power,x=subset_data$Timestamp,type='l',xlab="datetime",ylab='Global_reactive_power')
dev.off()
|
---
title: "market_regime_identification_Using_CorrMatrix"
author: "Ran Cao"
date: "8/21/2020"
output: html_document
---
# import data & data pre-processing
library(readr)
asset_returns <- read.csv("~/Library/asset_returns.csv")
View(asset_returns)
ncol(asset_returns)
factors_returns <- read.csv("~/Library/factors_returns.csv")
ncol(factors_returns)
# delete columns that contain NA
factors_returns<-subset(factors_returns, select=colMeans(is.na(factors_returns)) == 0)
asset_returns<-subset(asset_returns, select=colMeans(is.na(asset_returns)) == 0) # change from 61 to 51
#convert data type
factors_returns$fecha = as.POSIXct(strptime(factors_returns$fecha, format = "%Y-%m-%d"))
# combine two datasets together
whole_dataset = cbind(factors_returns,asset_returns) # first 50 are factors, last 51 are asset
ncol(whole_dataset)
View(whole_dataset)
# correlation matrix difference and largest eigenvalue difference visualization & clustering with PAM method
library(lubridate)
window_list = list()
matrix_list = list()
largest_eigenvalue = list()
diff_list = list()
per_row_result = list()
eigenvalue_diff_list = list()
eigenvalue_diff = list()
# 20 year data
starting_date = whole_dataset$fecha[1]
ending_date = tail(whole_dataset$fecha,1)
# initial setup (the first window) for a two year length
window_date_function <- function(c) {
window_start_date <- starting_date
window_end_date <- whole_dataset$fecha[1] %m+% months(c)
window_list[[1]] = c(window_start_date,window_end_date)
i = 2
while (window_end_date <= ending_date)
{
window_start_date <- window_start_date %m+% months(1)
window_end_date <- window_start_date %m+% months(c)
window_list[[i]] <<- c(window_start_date,window_end_date)
i <- i+1
}
t = length(window_list)
window_list[[t]][2] <<- ending_date
# matrix list
# loop
for (i in 1:length(window_list)){
two_year_window = whole_dataset %>% dplyr::filter(fecha>=window_list[[i]][1]& fecha<=window_list[[i]][2]) %>% select(-1)
matrix_list[[i]] <<- cor(two_year_window, method = "pearson", use = "complete.obs")
largest_eigenvalue[[i]] <- eigen(matrix_list[[i]])$values[1]
}
# get the differences for correlation matrix diffenrences
num_variale = ncol(whole_dataset)-1 # since the first column is time, not assets
# loop
for (i in 1:length(matrix_list)){
for(j in 1:length(matrix_list)){
matrix_diff = matrix_list[[i]]-matrix_list[[j]]
per_row_result[[j]] <- abs(sum(matrix_diff))/num_variale/num_variale
}
diff_list[[i]] <<- per_row_result
}
corr_matrix_diff <- data.frame(matrix(unlist(diff_list), nrow = t, byrow = T), stringsAsFactors = F)
corr_matrix_diff_list <<- as.matrix(corr_matrix_diff)
# create diff list for the eigenvalue
for (i in 1:length(largest_eigenvalue)){
for(j in 1:length(largest_eigenvalue)){
matrix_diff = largest_eigenvalue[[i]]-largest_eigenvalue[[j]]
eigenvalue_diff[[j]] <- abs(sum(matrix_diff))
}
eigenvalue_diff_list[[i]] <<- eigenvalue_diff
}
largesteigenvalue_diff <- data.frame(matrix(unlist(eigenvalue_diff_list), nrow = t, byrow = T), stringsAsFactors = F)
largesteigenvalue_diff_list <<- as.matrix(largesteigenvalue_diff)
# change the column/row names
starting = ymd('2000-06-05')
tt = c(starting)
for (i in 2:t){
starting = starting %m+% months(1)
tt[i] = starting
}
tt = as.character(tt)
# visualize corr matrix diff
colnames(corr_matrix_diff_list) <<-tt
rownames(corr_matrix_diff_list) <<-tt
colnames(largesteigenvalue_diff_list) <<-tt
rownames(largesteigenvalue_diff_list) <<-tt
}
# main
window_date_function(c=6) # c represents the months. half year c=6, one year c=12, 1.5 year c=18, 2 year c=24
View(largesteigenvalue_diff_list)
View(corr_matrix_diff_list)
# one year
window_date_function(c=12)
# one and half year
window_date_function(c=18)
# two years
window_date_function(c=24)
# visdualization for corr matrix & largest 5 eigenvalues
library(gplots)
my_palette <- colorRampPalette(c("white","orange"))
heatmap.2(corr_matrix_diff_list,
main = "Corr Matrices Differences", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
heatmap.2(largesteigenvalue_diff_list,
main = "Largest Eigenvalue Differences", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
###################################### clustering
# correlation matrix diff
library(cluster)
library(ggplot2)
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = corr_matrix_diff_list,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# potential data points: 4 and 7 (2 years window length)
pam.corrmatrix4 <- pam(corr_matrix_diff_list, 4,diss=TRUE)
pam.corrmatrix7 <- pam(corr_matrix_diff_list, 7,diss=TRUE)
print(pam.corrmatrix4)
pam.corrmatrix4$clustering
pam.corrmatrix7$clustering
corrmatrix4 = list()
for (i in 1:4){
corrmatrix4[[i]] = pam.corrmatrix4$clustering[pam.corrmatrix4$clustering == i]
}
corrmatrix7 = list()
for (i in 1:7){
corrmatrix7[[i]] = pam.corrmatrix7$clustering[pam.corrmatrix7$clustering == i]
}
# check # optimal points for the 5 largest eigenvalue approach
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = largesteigenvalue_diff_list,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# potential data points: 2 (half year)
pam.eigenvalue2 <- pam(largesteigenvalue_diff_list, 2,diss=TRUE)
eigenvalue2 = list()
for (i in 1:2){
eigenvalue2[[i]] = pam.eigenvalue2$clustering[pam.eigenvalue2$clustering == i]
}
list1 <- corrmatrix4[1][1]
df1 <- as.data.frame(list1)
vec1 <- row.names(df1)
list2 <- corrmatrix4[2][1]
df2 <- as.data.frame(list2)
vec2 <- row.names(df2)
list3 <- corrmatrix4[3][1]
df3 <- as.data.frame(list3)
vec3 <- row.names(df3)
list4 <- corrmatrix4[4][1]
df4 <- as.data.frame(list4)
vec4 <- row.names(df4)
# save output as csv
write.csv(vec1,"~/Library/regime1",row.names = FALSE)
# optimiziton1: select 5 largest eigenvalues instead of only one largest eigenvalue
# based on two measures (Euclidean Distance & Cosine Similarity)
temp_matrix_list = list()
five_largest_eigenvalue = list()
# loop
for (i in 1:length(window_start_end_list[[1]])){
two_year_window = whole_dataset %>% dplyr::filter(fecha>=window_start_end_list[[1]][[i]][1]& fecha<=window_start_end_list[[1]][[i]][2]) %>% select(-1)
temp_matrix_list[[i]] = cor(two_year_window, method = "pearson", use = "complete.obs")
five_largest_eigenvalue[[i]] <- eigen(temp_matrix_list[[i]])$values[1:5]
}
# calculate distance between two vectors - two ways to calculate: Euclidean Distance and Cosine Similarity
library(geometry)
# get the differences for correlation matrix diffenrences
euclidean_distance = list()
euclidean_distance_per_row = list()
cosine_distance = list()
cosine_distance_per_row = list()
# loop
for (y in 1:length(local_matrix_list)){
for(u in 1:length(local_matrix_list)){
euclidean_distance_per_row[[u]] = sqrt(sum((five_largest_eigenvalue[[y]]-five_largest_eigenvalue[[u]])^2))
cosine_distance_per_row[[u]] = dot(five_largest_eigenvalue[[y]],five_largest_eigenvalue[[u]])/sqrt(sum(five_largest_eigenvalue[[y]]^2))/sqrt(sum(five_largest_eigenvalue[[u]]^2))
}
euclidean_distance[[y]] <- euclidean_distance_per_row
cosine_distance[[y]] <- cosine_distance_per_row
}
ed_temp <- data.frame(matrix(unlist(euclidean_distance), nrow = length(euclidean_distance), byrow = T), stringsAsFactors = F)
euclidean_distance_output <- as.matrix(ed_temp)
cd_temp <- data.frame(matrix(unlist(cosine_distance), nrow = length(cosine_distance), byrow = T), stringsAsFactors = F)
cosine_distance_output <- as.matrix(cd_temp)
starting = ymd('2000-06-05')
tt = c(starting)
for (i in 2:length(euclidean_distance)){
starting = starting %m+% months(1)
tt[i] = starting
}
tt = as.character(tt)
# visualize corr matrix diff
colnames(euclidean_distance_output) <- tt
rownames(euclidean_distance_output) <- tt
colnames(cosine_distance_output) <- tt
rownames(cosine_distance_output) <- tt
# heatmap in axis order
# for Corr Matrices Differences
library(gplots)
my_palette <- colorRampPalette(c("white","orange"))
heatmap.2(euclidean_distance_output,
main = "5 Largest Eigenvalues Change Using Euclidean Distance", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
heatmap.2(cosine_distance_output,
main = "5 Largest Eigenvalues Change Using Cosine Distance", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
# cluster
#euclidean distance
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = euclidean_distance_output,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# euclidean distance @ half year - optimal: 2
pam.euclideandishalf2 <- pam(euclidean_distance_output,2,diss=TRUE)
# euclidean distance @ one and half year - optimal: 2,4
pam.euclideandisonehalf4 <- pam(euclidean_distance_output,4,diss=TRUE)
# euclidean distance @ 2yrs - optimal: 2&5
pam.euclideandis2 <- pam(euclidean_distance_output,2,diss=TRUE)
pam.euclideandis5 <- pam(euclidean_distance_output,5,diss=TRUE)
euclideandis2 = list()
euclideandishalf2 = list()
for (i in 1:2){
euclideandishalf2[[i]] = pam.euclideandishalf2$clustering[pam.euclideandishalf2$clustering == i]
}
euclideandisonehalf4 = list()
for (i in 1:4){
euclideandisonehalf4[[i]] = pam.euclideandisonehalf4$clustering[pam.euclideandisonehalf4$clustering == i]
}
euclideandis5 = list()
for (i in 1:5){
euclideandis5[[i]] = pam.euclideandis5$clustering[pam.euclideandis5$clustering == i]
}
# cosine distance
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = cosine_distance_output,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# cosine distance @ 2 years - optimal: 2
pam.cosinedistance2 <- pam(cosine_distance_output,2,diss=TRUE)
# cosine distance @ one year - optimal: 2
pam.cosinedistance3 <- pam(cosine_distance_output,3,diss=TRUE)
pam.cosinedistance4 <- pam(cosine_distance_output,4,diss=TRUE)
cosinedistance2 = list()
for (i in 1:2){
cosinedistance2[[i]] = pam.cosinedistance2$clustering[pam.cosinedistance2$clustering == i]
}
cosinedistance3 = list()
for (i in 1:3){
cosinedistance3[[i]] = pam.cosinedistance3$clustering[pam.cosinedistance3$clustering == i]
}
cosinedistance4 = list()
for (i in 1:4){
cosinedistance4[[i]] = pam.cosinedistance4$clustering[pam.cosinedistance4$clustering == i]
}
# optimiziton2: apply another clustering method, based on the idea of high-dimensional data
# regarding each window as a variable, and each asset as a component of the variable
# loop this process
new_window_list = list()
# 20 year data
starting_date = whole_dataset$fecha[1]
ending_date = tail(whole_dataset$fecha,1)
# initial setup (the first window) for a two year length
window_start_date <- starting_date
window_end_date <- whole_dataset$fecha[1] %m+% months(6)
new_window_list[[1]] = c(window_start_date,window_end_date)
i = 2
while (window_end_date <= ending_date)
{
window_start_date <- window_start_date %m+% months(1)
window_end_date <- window_start_date %m+% months(6)
new_window_list[[i]] <- c(window_start_date,window_end_date)
i <- i+1
}
k = length(new_window_list)
new_window_list[[k]][2] <- ending_date
# construct new data
window_return1 <- whole_dataset %>% dplyr::filter(fecha>=new_window_list[[1]][1]&fecha<=new_window_list[[1]][2]) %>% select(-1) %>% colSums()
for (i in 2:length(new_window_list)){
local_window_return = whole_dataset %>% dplyr::filter(fecha>=new_window_list[[i]][1]&fecha<=new_window_list[[i]][2]) %>% select(-1) %>% colSums()
window_return1 = cbind(window_return1,local_window_return)
}
colnames(window_return1)
colnames(window_return1) = tt1
# clustering
window_return = t(window_return1)
#find the number of optimal clusters
# Total within sum of squares Plot
within_ss = sapply(1:10,FUN = function(x){
set.seed(617)
kmeans(x = window_return,centers = x,iter.max = 1000,nstart = 25)$tot.withinss})
ggplot(data=data.frame(cluster = 1:10,within_ss),aes(x=cluster,y=within_ss))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(1,10,1))
# ratio plot
ratio_ss = sapply(1:10,FUN = function(x) {
set.seed(617)
km = kmeans(x = window_return,centers = x,iter.max = 1000,nstart = 25)
km$betweenss/km$totss} )
ggplot(data=data.frame(cluster = 1:10,ratio_ss),aes(x=cluster,y=ratio_ss))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(1,10,1))
# optimal result
potential_choices = list()
# Silhouette Plot
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = window_return,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# half year
km_half = kmeans(x = window_return,centers = 3,iter.max=10000,nstart=25)
k_segments_half = km_half$cluster
table(k_segments_half)
potential_choices[[4]] = k_segments_half
# 2 year
km = kmeans(x = window_return,centers = 4,iter.max=10000,nstart=25)
k_segments = km$cluster
table(k_segments)
potential_choices[[2]] = k_segments
km3 = kmeans(x = window_return,centers = 3,iter.max=10000,nstart=25)
k_segments3 = km3$cluster
table(k_segments3)
potential_choices[[1]] = k_segments3
km2 = kmeans(x = window_return,centers = 2,iter.max=10000,nstart=25)
k_segments2 = km2$cluster
table(k_segments2)
library(psych)
temp = data.frame(cluster = factor(k_segments),
factor1 = fa(window_return,nfactors = 2,rotate = 'varimax')$scores[,1],
factor2 = fa(window_return,nfactors = 2,rotate = 'varimax')$scores[,2])
ggplot(temp,aes(x=factor1,y=factor2,col=cluster))+
geom_point()
# Hierarchical Cluster
d = dist(x = window_return,method = 'euclidean')
clusters = hclust(d = d,method='ward.D2')
plot(clusters)
library(gridExtra)
library(factoextra)
grid.arrange(fviz_dend(x = clusters,k=2),
fviz_dend(x = clusters,k=3),
fviz_dend(x = clusters,k=4)
)
h_segments2 = cutree(tree = clusters,k=2)
table(h_segments2)
h_segments3 = cutree(tree = clusters,k=3)
table(h_segments3)
h_segments4 = cutree(tree = clusters,k=4)
table(h_segments4)
```
```{r}
# calculate the volatility of each market
# new method, half year, k-means, 3 clusters
halfyear_3_regime1 = list()
halfyear_3_regime2 = list()
halfyear_3_regime3 = list()
halfyear_3_regime2[[1]] = c(ymd('2000-06-05'), ymd('2001-03-04'))
halfyear_3_regime3[[1]] = c(ymd('2001-03-05'), ymd('2001-05-04'))
halfyear_3_regime2[[2]] = c(ymd('2001-05-05'), ymd('2001-10-04'))
halfyear_3_regime3[[2]] = c(ymd('2001-10-05'), ymd('2002-01-04'))
halfyear_3_regime1[[1]] = c(ymd('2002-01-05'), ymd('2002-03-04'))
halfyear_3_regime3[[3]] = c(ymd('2002-03-05'), ymd('2002-05-05'))
halfyear_3_regime2[[3]] = c(ymd('2002-05-05'), ymd('2002-11-04'))
halfyear_3_regime3[[4]] = c(ymd('2002-11-05'), ymd('2003-03-04'))
halfyear_3_regime1[[2]] = c(ymd('2003-03-05'), ymd('2004-03-04'))
halfyear_3_regime3[[5]] = c(ymd('2004-03-05'), ymd('2004-09-04'))
halfyear_3_regime1[[3]] = c(ymd('2004-09-05'), ymd('2005-03-04'))
halfyear_3_regime3[[6]] = c(ymd('2005-03-05'), ymd('2005-07-04'))
halfyear_3_regime1[[4]] = c(ymd('2005-07-05'), ymd('2006-04-04'))
halfyear_3_regime3[[7]] = c(ymd('2006-04-05'), ymd('2006-10-04'))
halfyear_3_regime1[[5]] = c(ymd('2006-10-05'), ymd('2007-11-04'))
halfyear_3_regime3[[8]] = c(ymd('2007-11-05'), ymd('2008-04-04'))
halfyear_3_regime2[[4]] = c(ymd('2008-04-05'), ymd('2009-02-04'))
halfyear_3_regime1[[6]] = c(ymd('2009-02-05'), ymd('2010-02-04'))
halfyear_3_regime3[[9]] = c(ymd('2010-02-05'), ymd('2010-08-04'))
halfyear_3_regime1[[7]] = c(ymd('2010-08-05'), ymd('2011-01-04'))
halfyear_3_regime3[[10]] = c(ymd('2011-01-05'), ymd('2011-06-04'))
halfyear_3_regime2[[5]] = c(ymd('2011-06-05'), ymd('2011-11-04'))
halfyear_3_regime3[[11]] = c(ymd('2011-11-05'), ymd('2012-01-04'))
halfyear_3_regime1[[8]] = c(ymd('2012-01-05'), ymd('2012-02-04'))
halfyear_3_regime3[[12]] = c(ymd('2012-02-05'), ymd('2012-10-04'))
halfyear_3_regime1[[9]] = c(ymd('2012-10-05'), ymd('2013-01-04'))
halfyear_3_regime3[[13]] = c(ymd('2013-01-05'), ymd('2014-06-04'))
halfyear_3_regime1[[10]] = c(ymd('2014-06-05'), ymd('2014-07-04'))
halfyear_3_regime3[[14]] = c(ymd('2014-07-05'), ymd('2015-06-04'))
halfyear_3_regime2[[6]] = c(ymd('2015-06-05'), ymd('2015-12-04'))
halfyear_3_regime3[[15]] = c(ymd('2015-12-05'), ymd('2016-05-04'))
halfyear_3_regime1[[11]] = c(ymd('2016-05-05'), ymd('2016-09-04'))
halfyear_3_regime3[[16]] = c(ymd('2016-09-05'), ymd('2017-03-04'))
halfyear_3_regime1[[12]] = c(ymd('2017-03-05'), ymd('2017-12-04'))
halfyear_3_regime3[[17]] = c(ymd('2017-12-05'), ymd('2020-01-04'))
halfyear_3_regime2[[7]] = c(ymd('2020-01-05'), ymd('2020-05-28'))
# bear
ly_bear = list()
ly_bear[[1]] = c(ymd('2000-06-05'), ymd('2002-07-24'))
ly_bear[[2]] = c(ymd('2007-10-18'), ymd('2009-03-16'))
ly_bear[[3]] = c(ymd('2020-02-21'), ymd('2020-05-21'))
regime1.data = list()
for (a in 1:length(ly_bear)){
regime1.data.copy = factors_returns %>% dplyr::filter(fecha>=ly_bear[[a]][1] & fecha<=ly_bear[[a]][2]) %>% select(-1)
regime1.data = rbind(regime1.data,regime1.data.copy)
}
View(regime1.data)
# bull
regime2.data = list()
regime2.data.copy = factors_returns %>% dplyr::filter(fecha>='2002-07-24' & fecha<='2007-10-18') %>% select(-1)
regime2.data = rbind(regime2.data,regime2.data.copy)
View(regime2.data)
# recovery
regime3.data = list()
regime3.data.copy = factors_returns %>% dplyr::filter(fecha>='2009-03-16' & fecha<='2020-02-21') %>% select(-1)
regime3.data = rbind(regime3.data,regime3.data.copy)
View(regime3.data)
volatility_list1 = list()
for(i in 1:ncol(regime1.data)){
volatility_list1[[i]] = sqrt(var(regime1.data[,i]))
}
mean(unlist(volatility_list1)) #0.0101847
volatility_list2 = list()
for(i in 1:ncol(regime2.data)){
volatility_list2[[i]] = sqrt(var(regime2.data[,i]))
}
mean(unlist(volatility_list2)) #0.01741643
volatility_list3 = list()
for(i in 1:ncol(regime3.data)){
volatility_list3[[i]] = sqrt(var(regime3.data[,i]))
}
mean(unlist(volatility_list3)) #0.01045842
|
/Market_Regime_Identification_CorrMatrix.R
|
no_license
|
ran-cao/Quantitative_Modeling_Market_Regime_Detection
|
R
| false | false | 20,109 |
r
|
---
title: "market_regime_identification_Using_CorrMatrix"
author: "Ran Cao"
date: "8/21/2020"
output: html_document
---
# import data & data pre-processing
library(readr)
asset_returns <- read.csv("~/Library/asset_returns.csv")
View(asset_returns)
ncol(asset_returns)
factors_returns <- read.csv("~/Library/factors_returns.csv")
ncol(factors_returns)
# delete columns that contain NA
factors_returns<-subset(factors_returns, select=colMeans(is.na(factors_returns)) == 0)
asset_returns<-subset(asset_returns, select=colMeans(is.na(asset_returns)) == 0) # change from 61 to 51
#convert data type
factors_returns$fecha = as.POSIXct(strptime(factors_returns$fecha, format = "%Y-%m-%d"))
# combine two datasets together
whole_dataset = cbind(factors_returns,asset_returns) # first 50 are factors, last 51 are asset
ncol(whole_dataset)
View(whole_dataset)
# correlation matrix difference and largest eigenvalue difference visualization & clustering with PAM method
library(lubridate)
window_list = list()
matrix_list = list()
largest_eigenvalue = list()
diff_list = list()
per_row_result = list()
eigenvalue_diff_list = list()
eigenvalue_diff = list()
# 20 year data
starting_date = whole_dataset$fecha[1]
ending_date = tail(whole_dataset$fecha,1)
# initial setup (the first window) for a two year length
window_date_function <- function(c) {
window_start_date <- starting_date
window_end_date <- whole_dataset$fecha[1] %m+% months(c)
window_list[[1]] = c(window_start_date,window_end_date)
i = 2
while (window_end_date <= ending_date)
{
window_start_date <- window_start_date %m+% months(1)
window_end_date <- window_start_date %m+% months(c)
window_list[[i]] <<- c(window_start_date,window_end_date)
i <- i+1
}
t = length(window_list)
window_list[[t]][2] <<- ending_date
# matrix list
# loop
for (i in 1:length(window_list)){
two_year_window = whole_dataset %>% dplyr::filter(fecha>=window_list[[i]][1]& fecha<=window_list[[i]][2]) %>% select(-1)
matrix_list[[i]] <<- cor(two_year_window, method = "pearson", use = "complete.obs")
largest_eigenvalue[[i]] <- eigen(matrix_list[[i]])$values[1]
}
# get the differences for correlation matrix diffenrences
num_variale = ncol(whole_dataset)-1 # since the first column is time, not assets
# loop
for (i in 1:length(matrix_list)){
for(j in 1:length(matrix_list)){
matrix_diff = matrix_list[[i]]-matrix_list[[j]]
per_row_result[[j]] <- abs(sum(matrix_diff))/num_variale/num_variale
}
diff_list[[i]] <<- per_row_result
}
corr_matrix_diff <- data.frame(matrix(unlist(diff_list), nrow = t, byrow = T), stringsAsFactors = F)
corr_matrix_diff_list <<- as.matrix(corr_matrix_diff)
# create diff list for the eigenvalue
for (i in 1:length(largest_eigenvalue)){
for(j in 1:length(largest_eigenvalue)){
matrix_diff = largest_eigenvalue[[i]]-largest_eigenvalue[[j]]
eigenvalue_diff[[j]] <- abs(sum(matrix_diff))
}
eigenvalue_diff_list[[i]] <<- eigenvalue_diff
}
largesteigenvalue_diff <- data.frame(matrix(unlist(eigenvalue_diff_list), nrow = t, byrow = T), stringsAsFactors = F)
largesteigenvalue_diff_list <<- as.matrix(largesteigenvalue_diff)
# change the column/row names
starting = ymd('2000-06-05')
tt = c(starting)
for (i in 2:t){
starting = starting %m+% months(1)
tt[i] = starting
}
tt = as.character(tt)
# visualize corr matrix diff
colnames(corr_matrix_diff_list) <<-tt
rownames(corr_matrix_diff_list) <<-tt
colnames(largesteigenvalue_diff_list) <<-tt
rownames(largesteigenvalue_diff_list) <<-tt
}
# main
window_date_function(c=6) # c represents the months. half year c=6, one year c=12, 1.5 year c=18, 2 year c=24
View(largesteigenvalue_diff_list)
View(corr_matrix_diff_list)
# one year
window_date_function(c=12)
# one and half year
window_date_function(c=18)
# two years
window_date_function(c=24)
# visdualization for corr matrix & largest 5 eigenvalues
library(gplots)
my_palette <- colorRampPalette(c("white","orange"))
heatmap.2(corr_matrix_diff_list,
main = "Corr Matrices Differences", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
heatmap.2(largesteigenvalue_diff_list,
main = "Largest Eigenvalue Differences", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
###################################### clustering
# correlation matrix diff
library(cluster)
library(ggplot2)
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = corr_matrix_diff_list,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# potential data points: 4 and 7 (2 years window length)
pam.corrmatrix4 <- pam(corr_matrix_diff_list, 4,diss=TRUE)
pam.corrmatrix7 <- pam(corr_matrix_diff_list, 7,diss=TRUE)
print(pam.corrmatrix4)
pam.corrmatrix4$clustering
pam.corrmatrix7$clustering
corrmatrix4 = list()
for (i in 1:4){
corrmatrix4[[i]] = pam.corrmatrix4$clustering[pam.corrmatrix4$clustering == i]
}
corrmatrix7 = list()
for (i in 1:7){
corrmatrix7[[i]] = pam.corrmatrix7$clustering[pam.corrmatrix7$clustering == i]
}
# check # optimal points for the 5 largest eigenvalue approach
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = largesteigenvalue_diff_list,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# potential data points: 2 (half year)
pam.eigenvalue2 <- pam(largesteigenvalue_diff_list, 2,diss=TRUE)
eigenvalue2 = list()
for (i in 1:2){
eigenvalue2[[i]] = pam.eigenvalue2$clustering[pam.eigenvalue2$clustering == i]
}
list1 <- corrmatrix4[1][1]
df1 <- as.data.frame(list1)
vec1 <- row.names(df1)
list2 <- corrmatrix4[2][1]
df2 <- as.data.frame(list2)
vec2 <- row.names(df2)
list3 <- corrmatrix4[3][1]
df3 <- as.data.frame(list3)
vec3 <- row.names(df3)
list4 <- corrmatrix4[4][1]
df4 <- as.data.frame(list4)
vec4 <- row.names(df4)
# save output as csv
write.csv(vec1,"~/Library/regime1",row.names = FALSE)
# optimiziton1: select 5 largest eigenvalues instead of only one largest eigenvalue
# based on two measures (Euclidean Distance & Cosine Similarity)
temp_matrix_list = list()
five_largest_eigenvalue = list()
# loop
for (i in 1:length(window_start_end_list[[1]])){
two_year_window = whole_dataset %>% dplyr::filter(fecha>=window_start_end_list[[1]][[i]][1]& fecha<=window_start_end_list[[1]][[i]][2]) %>% select(-1)
temp_matrix_list[[i]] = cor(two_year_window, method = "pearson", use = "complete.obs")
five_largest_eigenvalue[[i]] <- eigen(temp_matrix_list[[i]])$values[1:5]
}
# calculate distance between two vectors - two ways to calculate: Euclidean Distance and Cosine Similarity
library(geometry)
# get the differences for correlation matrix diffenrences
euclidean_distance = list()
euclidean_distance_per_row = list()
cosine_distance = list()
cosine_distance_per_row = list()
# loop
for (y in 1:length(local_matrix_list)){
for(u in 1:length(local_matrix_list)){
euclidean_distance_per_row[[u]] = sqrt(sum((five_largest_eigenvalue[[y]]-five_largest_eigenvalue[[u]])^2))
cosine_distance_per_row[[u]] = dot(five_largest_eigenvalue[[y]],five_largest_eigenvalue[[u]])/sqrt(sum(five_largest_eigenvalue[[y]]^2))/sqrt(sum(five_largest_eigenvalue[[u]]^2))
}
euclidean_distance[[y]] <- euclidean_distance_per_row
cosine_distance[[y]] <- cosine_distance_per_row
}
ed_temp <- data.frame(matrix(unlist(euclidean_distance), nrow = length(euclidean_distance), byrow = T), stringsAsFactors = F)
euclidean_distance_output <- as.matrix(ed_temp)
cd_temp <- data.frame(matrix(unlist(cosine_distance), nrow = length(cosine_distance), byrow = T), stringsAsFactors = F)
cosine_distance_output <- as.matrix(cd_temp)
starting = ymd('2000-06-05')
tt = c(starting)
for (i in 2:length(euclidean_distance)){
starting = starting %m+% months(1)
tt[i] = starting
}
tt = as.character(tt)
# visualize corr matrix diff
colnames(euclidean_distance_output) <- tt
rownames(euclidean_distance_output) <- tt
colnames(cosine_distance_output) <- tt
rownames(cosine_distance_output) <- tt
# heatmap in axis order
# for Corr Matrices Differences
library(gplots)
my_palette <- colorRampPalette(c("white","orange"))
heatmap.2(euclidean_distance_output,
main = "5 Largest Eigenvalues Change Using Euclidean Distance", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
heatmap.2(cosine_distance_output,
main = "5 Largest Eigenvalues Change Using Cosine Distance", # heat map title
notecol="black", # change font color of cell labels to black
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
col=my_palette,
Rowv=FALSE,
Colv=FALSE,
dendrogram="none")
# cluster
#euclidean distance
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = euclidean_distance_output,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# euclidean distance @ half year - optimal: 2
pam.euclideandishalf2 <- pam(euclidean_distance_output,2,diss=TRUE)
# euclidean distance @ one and half year - optimal: 2,4
pam.euclideandisonehalf4 <- pam(euclidean_distance_output,4,diss=TRUE)
# euclidean distance @ 2yrs - optimal: 2&5
pam.euclideandis2 <- pam(euclidean_distance_output,2,diss=TRUE)
pam.euclideandis5 <- pam(euclidean_distance_output,5,diss=TRUE)
euclideandis2 = list()
euclideandishalf2 = list()
for (i in 1:2){
euclideandishalf2[[i]] = pam.euclideandishalf2$clustering[pam.euclideandishalf2$clustering == i]
}
euclideandisonehalf4 = list()
for (i in 1:4){
euclideandisonehalf4[[i]] = pam.euclideandisonehalf4$clustering[pam.euclideandisonehalf4$clustering == i]
}
euclideandis5 = list()
for (i in 1:5){
euclideandis5[[i]] = pam.euclideandis5$clustering[pam.euclideandis5$clustering == i]
}
# cosine distance
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = cosine_distance_output,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# cosine distance @ 2 years - optimal: 2
pam.cosinedistance2 <- pam(cosine_distance_output,2,diss=TRUE)
# cosine distance @ one year - optimal: 2
pam.cosinedistance3 <- pam(cosine_distance_output,3,diss=TRUE)
pam.cosinedistance4 <- pam(cosine_distance_output,4,diss=TRUE)
cosinedistance2 = list()
for (i in 1:2){
cosinedistance2[[i]] = pam.cosinedistance2$clustering[pam.cosinedistance2$clustering == i]
}
cosinedistance3 = list()
for (i in 1:3){
cosinedistance3[[i]] = pam.cosinedistance3$clustering[pam.cosinedistance3$clustering == i]
}
cosinedistance4 = list()
for (i in 1:4){
cosinedistance4[[i]] = pam.cosinedistance4$clustering[pam.cosinedistance4$clustering == i]
}
# optimiziton2: apply another clustering method, based on the idea of high-dimensional data
# regarding each window as a variable, and each asset as a component of the variable
# loop this process
new_window_list = list()
# 20 year data
starting_date = whole_dataset$fecha[1]
ending_date = tail(whole_dataset$fecha,1)
# initial setup (the first window) for a two year length
window_start_date <- starting_date
window_end_date <- whole_dataset$fecha[1] %m+% months(6)
new_window_list[[1]] = c(window_start_date,window_end_date)
i = 2
while (window_end_date <= ending_date)
{
window_start_date <- window_start_date %m+% months(1)
window_end_date <- window_start_date %m+% months(6)
new_window_list[[i]] <- c(window_start_date,window_end_date)
i <- i+1
}
k = length(new_window_list)
new_window_list[[k]][2] <- ending_date
# construct new data
window_return1 <- whole_dataset %>% dplyr::filter(fecha>=new_window_list[[1]][1]&fecha<=new_window_list[[1]][2]) %>% select(-1) %>% colSums()
for (i in 2:length(new_window_list)){
local_window_return = whole_dataset %>% dplyr::filter(fecha>=new_window_list[[i]][1]&fecha<=new_window_list[[i]][2]) %>% select(-1) %>% colSums()
window_return1 = cbind(window_return1,local_window_return)
}
colnames(window_return1)
colnames(window_return1) = tt1
# clustering
window_return = t(window_return1)
#find the number of optimal clusters
# Total within sum of squares Plot
within_ss = sapply(1:10,FUN = function(x){
set.seed(617)
kmeans(x = window_return,centers = x,iter.max = 1000,nstart = 25)$tot.withinss})
ggplot(data=data.frame(cluster = 1:10,within_ss),aes(x=cluster,y=within_ss))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(1,10,1))
# ratio plot
ratio_ss = sapply(1:10,FUN = function(x) {
set.seed(617)
km = kmeans(x = window_return,centers = x,iter.max = 1000,nstart = 25)
km$betweenss/km$totss} )
ggplot(data=data.frame(cluster = 1:10,ratio_ss),aes(x=cluster,y=ratio_ss))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(1,10,1))
# optimal result
potential_choices = list()
# Silhouette Plot
silhouette_width = sapply(2:10,
FUN = function(x) pam(x = window_return,k = x,diss=TRUE)$silinfo$avg.width)
ggplot(data=data.frame(cluster = 2:10,silhouette_width),aes(x=cluster,y=silhouette_width))+
geom_line(col='steelblue',size=1.2)+
geom_point()+
scale_x_continuous(breaks=seq(2,10,1))
# half year
km_half = kmeans(x = window_return,centers = 3,iter.max=10000,nstart=25)
k_segments_half = km_half$cluster
table(k_segments_half)
potential_choices[[4]] = k_segments_half
# 2 year
km = kmeans(x = window_return,centers = 4,iter.max=10000,nstart=25)
k_segments = km$cluster
table(k_segments)
potential_choices[[2]] = k_segments
km3 = kmeans(x = window_return,centers = 3,iter.max=10000,nstart=25)
k_segments3 = km3$cluster
table(k_segments3)
potential_choices[[1]] = k_segments3
km2 = kmeans(x = window_return,centers = 2,iter.max=10000,nstart=25)
k_segments2 = km2$cluster
table(k_segments2)
library(psych)
temp = data.frame(cluster = factor(k_segments),
factor1 = fa(window_return,nfactors = 2,rotate = 'varimax')$scores[,1],
factor2 = fa(window_return,nfactors = 2,rotate = 'varimax')$scores[,2])
ggplot(temp,aes(x=factor1,y=factor2,col=cluster))+
geom_point()
# Hierarchical Cluster
d = dist(x = window_return,method = 'euclidean')
clusters = hclust(d = d,method='ward.D2')
plot(clusters)
library(gridExtra)
library(factoextra)
grid.arrange(fviz_dend(x = clusters,k=2),
fviz_dend(x = clusters,k=3),
fviz_dend(x = clusters,k=4)
)
h_segments2 = cutree(tree = clusters,k=2)
table(h_segments2)
h_segments3 = cutree(tree = clusters,k=3)
table(h_segments3)
h_segments4 = cutree(tree = clusters,k=4)
table(h_segments4)
```
```{r}
# calculate the volatility of each market
# new method, half year, k-means, 3 clusters
halfyear_3_regime1 = list()
halfyear_3_regime2 = list()
halfyear_3_regime3 = list()
halfyear_3_regime2[[1]] = c(ymd('2000-06-05'), ymd('2001-03-04'))
halfyear_3_regime3[[1]] = c(ymd('2001-03-05'), ymd('2001-05-04'))
halfyear_3_regime2[[2]] = c(ymd('2001-05-05'), ymd('2001-10-04'))
halfyear_3_regime3[[2]] = c(ymd('2001-10-05'), ymd('2002-01-04'))
halfyear_3_regime1[[1]] = c(ymd('2002-01-05'), ymd('2002-03-04'))
halfyear_3_regime3[[3]] = c(ymd('2002-03-05'), ymd('2002-05-05'))
halfyear_3_regime2[[3]] = c(ymd('2002-05-05'), ymd('2002-11-04'))
halfyear_3_regime3[[4]] = c(ymd('2002-11-05'), ymd('2003-03-04'))
halfyear_3_regime1[[2]] = c(ymd('2003-03-05'), ymd('2004-03-04'))
halfyear_3_regime3[[5]] = c(ymd('2004-03-05'), ymd('2004-09-04'))
halfyear_3_regime1[[3]] = c(ymd('2004-09-05'), ymd('2005-03-04'))
halfyear_3_regime3[[6]] = c(ymd('2005-03-05'), ymd('2005-07-04'))
halfyear_3_regime1[[4]] = c(ymd('2005-07-05'), ymd('2006-04-04'))
halfyear_3_regime3[[7]] = c(ymd('2006-04-05'), ymd('2006-10-04'))
halfyear_3_regime1[[5]] = c(ymd('2006-10-05'), ymd('2007-11-04'))
halfyear_3_regime3[[8]] = c(ymd('2007-11-05'), ymd('2008-04-04'))
halfyear_3_regime2[[4]] = c(ymd('2008-04-05'), ymd('2009-02-04'))
halfyear_3_regime1[[6]] = c(ymd('2009-02-05'), ymd('2010-02-04'))
halfyear_3_regime3[[9]] = c(ymd('2010-02-05'), ymd('2010-08-04'))
halfyear_3_regime1[[7]] = c(ymd('2010-08-05'), ymd('2011-01-04'))
halfyear_3_regime3[[10]] = c(ymd('2011-01-05'), ymd('2011-06-04'))
halfyear_3_regime2[[5]] = c(ymd('2011-06-05'), ymd('2011-11-04'))
halfyear_3_regime3[[11]] = c(ymd('2011-11-05'), ymd('2012-01-04'))
halfyear_3_regime1[[8]] = c(ymd('2012-01-05'), ymd('2012-02-04'))
halfyear_3_regime3[[12]] = c(ymd('2012-02-05'), ymd('2012-10-04'))
halfyear_3_regime1[[9]] = c(ymd('2012-10-05'), ymd('2013-01-04'))
halfyear_3_regime3[[13]] = c(ymd('2013-01-05'), ymd('2014-06-04'))
halfyear_3_regime1[[10]] = c(ymd('2014-06-05'), ymd('2014-07-04'))
halfyear_3_regime3[[14]] = c(ymd('2014-07-05'), ymd('2015-06-04'))
halfyear_3_regime2[[6]] = c(ymd('2015-06-05'), ymd('2015-12-04'))
halfyear_3_regime3[[15]] = c(ymd('2015-12-05'), ymd('2016-05-04'))
halfyear_3_regime1[[11]] = c(ymd('2016-05-05'), ymd('2016-09-04'))
halfyear_3_regime3[[16]] = c(ymd('2016-09-05'), ymd('2017-03-04'))
halfyear_3_regime1[[12]] = c(ymd('2017-03-05'), ymd('2017-12-04'))
halfyear_3_regime3[[17]] = c(ymd('2017-12-05'), ymd('2020-01-04'))
halfyear_3_regime2[[7]] = c(ymd('2020-01-05'), ymd('2020-05-28'))
# bear
ly_bear = list()
ly_bear[[1]] = c(ymd('2000-06-05'), ymd('2002-07-24'))
ly_bear[[2]] = c(ymd('2007-10-18'), ymd('2009-03-16'))
ly_bear[[3]] = c(ymd('2020-02-21'), ymd('2020-05-21'))
regime1.data = list()
for (a in 1:length(ly_bear)){
regime1.data.copy = factors_returns %>% dplyr::filter(fecha>=ly_bear[[a]][1] & fecha<=ly_bear[[a]][2]) %>% select(-1)
regime1.data = rbind(regime1.data,regime1.data.copy)
}
View(regime1.data)
# bull
regime2.data = list()
regime2.data.copy = factors_returns %>% dplyr::filter(fecha>='2002-07-24' & fecha<='2007-10-18') %>% select(-1)
regime2.data = rbind(regime2.data,regime2.data.copy)
View(regime2.data)
# recovery
regime3.data = list()
regime3.data.copy = factors_returns %>% dplyr::filter(fecha>='2009-03-16' & fecha<='2020-02-21') %>% select(-1)
regime3.data = rbind(regime3.data,regime3.data.copy)
View(regime3.data)
volatility_list1 = list()
for(i in 1:ncol(regime1.data)){
volatility_list1[[i]] = sqrt(var(regime1.data[,i]))
}
mean(unlist(volatility_list1)) #0.0101847
volatility_list2 = list()
for(i in 1:ncol(regime2.data)){
volatility_list2[[i]] = sqrt(var(regime2.data[,i]))
}
mean(unlist(volatility_list2)) #0.01741643
volatility_list3 = list()
for(i in 1:ncol(regime3.data)){
volatility_list3[[i]] = sqrt(var(regime3.data[,i]))
}
mean(unlist(volatility_list3)) #0.01045842
|
getwd()
hpc <- read.table("C:/R_test/coursera/data/household_power_consumption.txt", header=T, sep=";")
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
a <- hpc[(hpc$Date=="2007-02-01") | (hpc$Date=="2007-02-02"),]
a$Global_active_power <- as.numeric(as.character(a$Global_active_power))
a$Global_reactive_power <- as.numeric(as.character(a$Global_reactive_power))
a$Voltage <- as.numeric(as.character(a$Voltage))
a <- transform(a, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
a$Sub_metering_1 <- as.numeric(as.character(a$Sub_metering_1))
a$Sub_metering_2 <- as.numeric(as.character(a$Sub_metering_2))
a$Sub_metering_3 <- as.numeric(as.character(a$Sub_metering_3))
plot4 <- function() {
par(mfrow=c(2,2))
##PLOT 1
plot(a$timestamp,a$Global_active_power, type="l", xlab="", ylab="Global Active Power")
##PLOT 2
plot(a$timestamp,a$Voltage, type="l", xlab="datetime", ylab="Voltage")
##PLOT 3
plot(a$timestamp,a$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(a$timestamp,a$Sub_metering_2,col="red")
lines(a$timestamp,a$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),
c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),
lty=c(1,1), bty="n", cex=.5)
#PLOT 4
plot(a$timestamp,a$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=500, height=500)
dev.off()
cat("plot4.png has been saved in", getwd())
}
plot4()
|
/plot4.R
|
no_license
|
Moonswng/ExData_Plotting1
|
R
| false | false | 1,522 |
r
|
getwd()
hpc <- read.table("C:/R_test/coursera/data/household_power_consumption.txt", header=T, sep=";")
hpc$Date <- as.Date(hpc$Date, format="%d/%m/%Y")
a <- hpc[(hpc$Date=="2007-02-01") | (hpc$Date=="2007-02-02"),]
a$Global_active_power <- as.numeric(as.character(a$Global_active_power))
a$Global_reactive_power <- as.numeric(as.character(a$Global_reactive_power))
a$Voltage <- as.numeric(as.character(a$Voltage))
a <- transform(a, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
a$Sub_metering_1 <- as.numeric(as.character(a$Sub_metering_1))
a$Sub_metering_2 <- as.numeric(as.character(a$Sub_metering_2))
a$Sub_metering_3 <- as.numeric(as.character(a$Sub_metering_3))
plot4 <- function() {
par(mfrow=c(2,2))
##PLOT 1
plot(a$timestamp,a$Global_active_power, type="l", xlab="", ylab="Global Active Power")
##PLOT 2
plot(a$timestamp,a$Voltage, type="l", xlab="datetime", ylab="Voltage")
##PLOT 3
plot(a$timestamp,a$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(a$timestamp,a$Sub_metering_2,col="red")
lines(a$timestamp,a$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),
c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),
lty=c(1,1), bty="n", cex=.5)
#PLOT 4
plot(a$timestamp,a$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=500, height=500)
dev.off()
cat("plot4.png has been saved in", getwd())
}
plot4()
|
#' Predicting playoff series results
#'
#' @param teamH The team with home-court advantage (HCA) for the series.
#' @param teamA The team without home-court advantage.
#' @param t1pH Probability of the team with HCA winning at home.
#' @param t1pA Probability of the team with HCA winning, when they play on the road.
#'
#' @return A numeric vector with probabilities of each possible series outcome.
#'
#' @keywords internal
#'
#' @author astroud
seriesOutcomes = function(teamH, teamA, t1pH, t1pA) {
h4 = t1pH^2*t1pA^2
h5 = (2*t1pH^3*t1pA*(1-t1pA)) + (2* t1pH^2*t1pA^2*(1-t1pH))
h6 = (6*t1pH^2*t1pA^2*(1-t1pH)*(1-t1pA)) + (3*t1pH*t1pA^3*(1-t1pH)^2) + (t1pH^3*t1pA*(1-t1pA)^2)
h7 = (t1pH^4*(1-t1pA)^3) + (9*t1pH^3*t1pA*(1-t1pH)*(1-t1pA)^2) + (9*t1pH^2*t1pA^2*(1-t1pH)^2*(1-t1pA)) + (t1pA^3*t1pH*(1-t1pH)^3)
a7 = ((1-t1pH)^4*t1pA^3) + (9*(1-t1pH)^3*(1-t1pA)*t1pH*t1pA^2) + (9*(1-t1pH)^2*(1-t1pA)^2*t1pH^2*t1pA) + ((1-t1pA)^3*(1-t1pH)*t1pH^3)
a6 = (6*(1-t1pH)^2*(1-t1pA)^2*t1pH*t1pA) + (3*(1-t1pH)*(1-t1pA)^3*t1pH^2) + ((1-t1pH)^3*(1-t1pA)*t1pA^2)
a5 = (2*(1-t1pH)^3*(1-t1pA)*t1pA) + (2*(1-t1pH)^2*(1-t1pA)^2*t1pH)
a4 = (1-t1pH)^2*(1-t1pA)^2
results = c(h4, h5, h6, h7, a7, a6, a5, a4)
names(results) = c(paste(teamH, "in", 4:7), paste(teamA, "in", 7:4))
results
}
#' Bar plot predicting playoff series outcomes
#'
#' @details This barplot is currently a fairly poor visualization - one of our
#' primary TODOs is to clean up and prettify the visualization it gives. We plan
#' on learning ggplot to implement this.
#'
#' @param home The team with home-court advantage in the series.
#' @param away The team without home-court advantage.
#' @param outcomes A vector with all possible outcomes of a 7-game
#' playoff series (e.g. Home wins in 6), each with its associated probability.
#' @return A bar plot showing the probability of each playoff series outcome.
#'
#' @importFrom graphics barplot
#'
#' @keywords internal
#'
#' @author kitliu5
plotSeriesOutcomes = function(home, away, outcomes) {
graphics::barplot(outcomes,
main=paste(home, "(H)", "vs.", away, "(A)", "Playoff Series Prediction", sep = " "),
horiz=TRUE,
names.arg=names(outcomes))
}
#' Bar plot predicting playoff series
#'
#' @details Note that while both the Elo model and Bradley-Terry model
#' use three-letter abbreviations to refer to teams, these abbreviations
#' differ because the models are built using datasets from different sources
#' (FiveThirtyEight versus stats.nba.com). Thus, be careful when specifying
#' team names. We hope to add a function in the future that will allow for
#' easier conversion between team names and abbreviations.
#'
#' @param prob A probability matrix with each entry representing the
#' probability of the team in that row defeating the tean in that
#' column. It can be generated by \code{\link{buildWinProbMatrixBT}} or
#' \code{\link{buildWinProbMatrixElo}}.
#' @param home The team with home-court advantage in the series.
#' @param away The team without home-court advantage.
#' @param plot A logical value indicating whether the user would
#' like to generate a barplot of series outcomes.
#' @return A numeric vector giving the probability of each playoff series outcome.
#'
#' @export
#'
#' @examples
#' playoffSeries(seasonWinProb2017, home = "BOS", away = "SAS")
#' @author kitliu5 and astroud
playoffSeries = function(prob, home, away, plot = FALSE){
if (! home %in% prob$Team){
stop("Home team is unavailable")
}
if (! away %in% prob$Team){
stop("Away team is unavailable")
}
homeWinProb = prob[which(prob$Team == home), which(colnames(prob) == away)]
awayWinProb = 1 - prob[which(prob$Team == away), which(colnames(prob) == home)]
resultsDistribution = seriesOutcomes(home, away, homeWinProb, awayWinProb)
####################
# begin astroud code
if(plot) {
plotSeriesOutcomes(home, away, resultsDistribution)
}
results = c(sum(resultsDistribution[1:4]), sum(resultsDistribution[5:8]), resultsDistribution)
names(results) = c(home, away, names(resultsDistribution))
results
# end astroud code, all other code by kitliu5
#############################################
}
|
/R/playoffSeries.R
|
no_license
|
kitliu5/nbamodelR
|
R
| false | false | 4,230 |
r
|
#' Predicting playoff series results
#'
#' @param teamH The team with home-court advantage (HCA) for the series.
#' @param teamA The team without home-court advantage.
#' @param t1pH Probability of the team with HCA winning at home.
#' @param t1pA Probability of the team with HCA winning, when they play on the road.
#'
#' @return A numeric vector with probabilities of each possible series outcome.
#'
#' @keywords internal
#'
#' @author astroud
seriesOutcomes = function(teamH, teamA, t1pH, t1pA) {
h4 = t1pH^2*t1pA^2
h5 = (2*t1pH^3*t1pA*(1-t1pA)) + (2* t1pH^2*t1pA^2*(1-t1pH))
h6 = (6*t1pH^2*t1pA^2*(1-t1pH)*(1-t1pA)) + (3*t1pH*t1pA^3*(1-t1pH)^2) + (t1pH^3*t1pA*(1-t1pA)^2)
h7 = (t1pH^4*(1-t1pA)^3) + (9*t1pH^3*t1pA*(1-t1pH)*(1-t1pA)^2) + (9*t1pH^2*t1pA^2*(1-t1pH)^2*(1-t1pA)) + (t1pA^3*t1pH*(1-t1pH)^3)
a7 = ((1-t1pH)^4*t1pA^3) + (9*(1-t1pH)^3*(1-t1pA)*t1pH*t1pA^2) + (9*(1-t1pH)^2*(1-t1pA)^2*t1pH^2*t1pA) + ((1-t1pA)^3*(1-t1pH)*t1pH^3)
a6 = (6*(1-t1pH)^2*(1-t1pA)^2*t1pH*t1pA) + (3*(1-t1pH)*(1-t1pA)^3*t1pH^2) + ((1-t1pH)^3*(1-t1pA)*t1pA^2)
a5 = (2*(1-t1pH)^3*(1-t1pA)*t1pA) + (2*(1-t1pH)^2*(1-t1pA)^2*t1pH)
a4 = (1-t1pH)^2*(1-t1pA)^2
results = c(h4, h5, h6, h7, a7, a6, a5, a4)
names(results) = c(paste(teamH, "in", 4:7), paste(teamA, "in", 7:4))
results
}
#' Bar plot predicting playoff series outcomes
#'
#' @details This barplot is currently a fairly poor visualization - one of our
#' primary TODOs is to clean up and prettify the visualization it gives. We plan
#' on learning ggplot to implement this.
#'
#' @param home The team with home-court advantage in the series.
#' @param away The team without home-court advantage.
#' @param outcomes A vector with all possible outcomes of a 7-game
#' playoff series (e.g. Home wins in 6), each with its associated probability.
#' @return A bar plot showing the probability of each playoff series outcome.
#'
#' @importFrom graphics barplot
#'
#' @keywords internal
#'
#' @author kitliu5
plotSeriesOutcomes = function(home, away, outcomes) {
graphics::barplot(outcomes,
main=paste(home, "(H)", "vs.", away, "(A)", "Playoff Series Prediction", sep = " "),
horiz=TRUE,
names.arg=names(outcomes))
}
#' Bar plot predicting playoff series
#'
#' @details Note that while both the Elo model and Bradley-Terry model
#' use three-letter abbreviations to refer to teams, these abbreviations
#' differ because the models are built using datasets from different sources
#' (FiveThirtyEight versus stats.nba.com). Thus, be careful when specifying
#' team names. We hope to add a function in the future that will allow for
#' easier conversion between team names and abbreviations.
#'
#' @param prob A probability matrix with each entry representing the
#' probability of the team in that row defeating the tean in that
#' column. It can be generated by \code{\link{buildWinProbMatrixBT}} or
#' \code{\link{buildWinProbMatrixElo}}.
#' @param home The team with home-court advantage in the series.
#' @param away The team without home-court advantage.
#' @param plot A logical value indicating whether the user would
#' like to generate a barplot of series outcomes.
#' @return A numeric vector giving the probability of each playoff series outcome.
#'
#' @export
#'
#' @examples
#' playoffSeries(seasonWinProb2017, home = "BOS", away = "SAS")
#' @author kitliu5 and astroud
playoffSeries = function(prob, home, away, plot = FALSE){
if (! home %in% prob$Team){
stop("Home team is unavailable")
}
if (! away %in% prob$Team){
stop("Away team is unavailable")
}
homeWinProb = prob[which(prob$Team == home), which(colnames(prob) == away)]
awayWinProb = 1 - prob[which(prob$Team == away), which(colnames(prob) == home)]
resultsDistribution = seriesOutcomes(home, away, homeWinProb, awayWinProb)
####################
# begin astroud code
if(plot) {
plotSeriesOutcomes(home, away, resultsDistribution)
}
results = c(sum(resultsDistribution[1:4]), sum(resultsDistribution[5:8]), resultsDistribution)
names(results) = c(home, away, names(resultsDistribution))
results
# end astroud code, all other code by kitliu5
#############################################
}
|
ctStanParnamesRaw <- function(fit){
if(!class(fit) %in% 'ctStanFit' || !length(fit$stanfit$stanfit@sim)==0) stop('Not an optimized ctStanFit model!')
ms <- fit$setup$matsetup
ms <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1,]
ms <- ms[order(ms$param),]
rawpopmeans<-ms$parname
iv <- ms$parname[ms$indvarying >0]
rawpopsd <-c()
rawpopucorr <- c()
if(length(iv) > 0){
rawpopsd <- paste0('rawpopsd_',iv)
rawpopucorr <- matrix(paste0('rawpopucorr_',rep(iv,times=length(iv)),'_',rep(iv,each=length(iv))),length(iv),length(iv))
rawpopucorr <- rawpopucorr[lower.tri(rawpopucorr)]
}
tipredeffects <- c()
if(fit$standata$ntipred > 0){
tipredeffects <- rep(NA,max(fit$standata$TIPREDEFFECTsetup))
counter <- 0
for(j in 1:ncol(fit$standata$TIPREDEFFECTsetup)){
for(i in 1:nrow(fit$standata$TIPREDEFFECTsetup)){
if(fit$standata$TIPREDEFFECTsetup[i,j] > 0){
counter <- counter + 1
tipredeffects[counter] <- paste0(rawpopmeans[i],'_', fit$ctstanmodel$TIpredNames[j])
}
}
}
}
return(list(rawpopmeans=rawpopmeans,rawpopsd=rawpopsd,rawpopucorr=rawpopucorr,tipredeffects=tipredeffects))
}
TIPREDEFFECTnames <- function(fit){
if(fit$standata$ntipred > 0){
ms <- fit$setup$matsetup
tie <- fit$standata$TIPREDEFFECTsetup
colnames(tie) <- paste0(fit$ctstanmodel$TIpredNames)
pars <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1,]
pars <- pars[order(pars$param),]
rownames(tie) <- pars$parname
return(tie)
} else return(c())
}
popcovnames <- function(fit){
if(fit$standata$nindvarying > 0){
ms <- fit$setup$matsetup
pars <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1 & ms$indvarying > 0,]
pars <- pars[order(pars$param),]
popcov <- matrix(NA,fit$standata$nindvarying ,fit$standata$nindvarying,
dimnames = list( pars$parname, pars$parname) )
return(popcov)
} else return(c())
}
checkTIauto <- function(){
Tpoints=30
n.manifest=1
n.TDpred=0
n.TIpred=1
n.latent=1
n.subjects=100
TI1 <- rnorm(n.subjects)
gm<-ctModel(type='omx', Tpoints=Tpoints,n.latent=n.latent,
n.TDpred=n.TDpred,n.manifest=n.manifest,
MANIFESTVAR=diag(0.5,1),
LAMBDA=diag(1,1),
DRIFT=matrix(c(-.3),nrow=1),
DIFFUSION=matrix(c(2),1),
T0VAR=diag(10,1))
for(i in 1:n.subjects){
gm$CINT[1,1] <- TI1[i]*.5+rnorm(1,0,.6)
ndat<-ctGenerate(gm,n.subjects=1,burnin=30,logdtsd=.4)
ndat <- cbind(ndat,TI1[i])
ndat[,1] <- i
if(i>1) tdat <- rbind(tdat,ndat) else tdat <- ndat
}
colnames(tdat)[4] <- 'TI1'
tdat$TI2 <- rnorm(nrow(tdat))
# colnames(tdat)[5] <- 'TI2'
tdat[2,'Y1'] <- NA
tdat[tdat[,'id']==2,'TI1'] <- NA
checkm<-ctModel(type='stanct',Tpoints=Tpoints,
MANIFESTVAR=diag(0.5,1),
# DRIFT=matrix(c(-.3),nrow=1),
# DIFFUSION=matrix(c(2),1),
n.latent=n.latent,n.TDpred=n.TDpred,
n.TIpred=2,
MANIFESTMEANS=matrix(0,nrow=n.manifest),
CINT=matrix(c('cint1'),ncol=1),
n.manifest=n.manifest,LAMBDA=diag(1))
# checkm$pars$indvarying[!checkm$pars$matrix %in% 'T0MEANS'] <- FALSE
checkm$TIpredAuto <- 1L
fit1<-ctStanFit(tdat,checkm,chains=1,optimize=TRUE,cores=1,verbose=0,
# intoverpop=F,
plot=10,
# savesubjectmatrices = F,plot=F,
# init=init,
# fit=F,
optimcontrol=list(is=FALSE,stochastic=T,subsamplesize=1,carefulfit=F),
nopriors=F)
summary(fit1)
}
whichsubjectpars <- function(standata,subjects=NA){
a1=standata$nparams+standata$nindvarying+
(standata$nindvarying^2-standata$nindvarying)/2
whichbase <- 1:a1
if(standata$intoverpop ==0 && standata$nindvarying > 0){ #then there are subject pars
whichsubjects <- a1+cseq(from=subjects,to=standata$nindvarying*standata$nsubjects,
by=standata$nsubjects)
whichbase <- c(whichbase,whichsubjects)
}
if(standata$ntipredeffects > 0) {
tipredstart <- (a1+
ifelse(standata$intoverpop,0,standata$nindvarying*standata$nsubjects)+1)
whichbase <- c(whichbase,tipredstart:(tipredstart+standata$ntipredeffects -1
# ifelse(standata$doonesubject >0,0,-1)
#disabled the doonesubject thing
))
}
return(whichbase)
}
scorecalc <- function(standata,est,stanmodel,subjectsonly=TRUE,
returnsubjectlist=TRUE,cores=2){
standata$dokalmanpriormodifier <- ifelse(subjectsonly, 1/standata$nsubjects,1/standata$ndatapoints)
scores <- list()
# browser()
sf <- suppressMessages(try(stan_reinitsf(stanmodel,standata,fast = TRUE)))
if('try-error' %in% class(sf)) fast=FALSE else fast=TRUE
for(i in 1:standata$nsubjects){
whichpars = whichsubjectpars(standata,i)
scores[[i]]<-matrix(NA,length(whichpars),ifelse(subjectsonly,1,sum(standata$subject==i)))
standata1 <- standatact_specificsubjects(standata,i)
for(j in 1:ncol(scores[[i]])){
standata1$llsinglerow=as.integer(ifelse(subjectsonly,0,j))
sf <- stan_reinitsf(stanmodel,standata1,fast = fast)
if(fast) scores[[i]][,j] <- sf$grad_log_prob(
upars=est[whichpars],
adjust_transform = TRUE)
if(!fast) scores[[i]][,j] <- rstan::grad_log_prob(sf,
upars=est[whichpars],
adjust_transform = TRUE)
}
}
if(subjectsonly) scores <- matrix(unlist(scores),nrow=length(scores[[i]]))
if(!returnsubjectlist){ #return data.table
if('list' %in% class(scores)){
scores=lapply(scores,function(x) data.table(t(x)))
scores=rbindlist(scores)
} else scores <- t(scores)
}
return(scores)
}
ctTIauto <- function(fit,tipreds=NA){
if(is.na(tipreds[1])) tipreds <- fit$standata$tipredsdata
# colnames(tipreds) <- paste0('ti',1:ncol(tipreds))
scores <- scorecalc(standata = fit$standata,
est = fit$stanfit$rawest,stanmodel = fit$stanmodel)
scores <- scores[1:fit$standata$nparams,,drop=FALSE]
rownames(scores) <- paste0('p',1:nrow(scores))
# matchindex <- match(1:fit$standata$nparams,fit$setup$matsetup$param)
# rownames(scores)[1:fit$standata$nparams] <- fit$standata$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
sc <- list()
for(i in 1:nrow(scores)){
# for(j in 1:ncol(tipreds)){
# plot(sort(tipreds[,j]),scores[i,][order(tipreds[,j])],ylab=rownames(scores)[i],xlab=colnames(tipreds)[j])
# }
sc[[i]]=summary(lm(scores[i,] ~ tipreds))$coefficients
}
names(sc)[1:fit$standata$nparams]<-paste0('p',1:nrow(scores)) #fit$setup$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
s2=lapply(sc,function(x) {
x=x[-1,,drop=FALSE]
rownames(x) <- gsub('^tipreds','',rownames(x))
rownames(x) <- paste0('ti',1:nrow(x))
return(x)
})
TIPREDEFFECTsetup = matrix(NA,length(s2),nrow(s2[[1]]))
for(i in 1:length(s2)){
TIPREDEFFECTsetup[i,] <- s2[[i]][,4]
}
if(fit$standata$nindvarying > 0 && fit$standata$intoverpop > 0){
fit$setup$matsetup <- data.frame(fit$standata$matsetup)
e=stan_constrainsamples(sm = fit$stanmodel,standata = fit$standata,
samples = matrix(fit$stanfit$rawest,nrow=1),savescores=TRUE,quiet=TRUE,pcovn=2)
p=sort(unique(fit$setup$matsetup$row[fit$setup$matsetup$indvarying>0]))# | fit$setup$matsetup$tipred]))
firstsub <- rep(TRUE,fit$standata$ndatapoints) #which rows represent first rows per subject
for(i in 2:fit$standata$ndatapoints){
if(fit$standata$subject[i] == fit$standata$subject[i-1]) firstsub[i] <- FALSE
}
e$etasmooth <- array(e$etaa[,3,,,drop=FALSE],dim=dim(e$etaa)[-2])
states <- ctCollapse(e$etasmooth[,firstsub,p,drop=FALSE],1,mean)
sc=list()
for(i in 1:ncol(states)){
sc[[i]]=summary(lm(states[,i] ~ tipreds))$coefficients[-1,,drop=FALSE]
}
for(i in 1:length(sc)){
TIPREDEFFECTsetup[fit$setup$matsetup$param[fit$setup$matsetup$indvarying %in% i],] <- sc[[i]][,4]
}
}
if(any(is.na(TIPREDEFFECTsetup))) warning('NA found, probably unused parameters?')
TIPREDEFFECTsetup[is.na(TIPREDEFFECTsetup)] <- 1
return(TIPREDEFFECTsetup)
}
ctIndVarAuto <- function(fit,aicthreshold = -2){
if(requireNamespace('lme4',quietly=TRUE)){
scores <- scorecalc(standata = fit$standata,est = fit$stanfit$rawest,
stanmodel = fit$stanmodel,subjectsonly = FALSE)
scores <- t(do.call(cbind,scores))
scores <- scores[,1:fit$standata$nparams]
colnames(scores) <- paste0('p',1:ncol(scores))
sdat <- fit$standata
sdat$savescores <- 1L
# sdat$nopriors <- 1L
sdat$popcovn <- 5L
e=rstan::constrain_pars(stan_reinitsf(fit$stanmodel,sdat),fit$stanfit$rawest)
colnames(e$etaprior) <- paste0('etaprior',1:ncol(e$etaprior))
etaprior <- scale(e$etaprior[,1:fit$ctstanmodel$n.latent,drop=FALSE])
etaprior2 <- scale(etaprior^2)
colnames(etaprior2) <- paste0(colnames(etaprior),'_sq')
scores <- scale(scores)
statelist <- list()
indvarying <- c()
out <-list()
try(suppressWarnings(suppressMessages({
for(i in 1:ncol(scores)){
# for(j in 1:ncol(tipreds)){
# plot(sort(tipreds[,j]),scores[i,][order(tipreds[,j])],ylab=rownames(scores)[i],xlab=colnames(tipreds)[j])
# }
states <-c()
dat=data.frame(scores=scores[,i],subject=fit$standata$subject,one=1,etaprior,etaprior2)
f <- paste0('scores ~ (1|subject)')
f1<-paste0('scores ~ (1|one)')
f2<- paste0('+',c(colnames(etaprior),colnames(etaprior2)),collapse='+')
l=lme4::lmer(data = dat,formula(paste0(f,f2)))
s=summary(l)
states<-rownames(s$coefficients[abs(s$coefficients[,3]) > 1.96 &
abs(s$coefficients[,1]) > .1,])[-1]
f2 <- paste0(ifelse(length(states)>0,'+',''),states,collapse='+')
l1 <- lme4::lmer(data = dat,formula(paste0(f,f2)))
l2=lme4::lmer(data = dat,formula(paste0(f1,f2)),control=lme4::lmerControl(check.nlev.gtr.1="ignore"))
a=as.matrix(anova(l,l1,l2))
# a=a[2,]-a[1,]
s1=summary(l1)
if(s1$varcor$subject[1]^2 > .05 && a[2,'AIC']-a[1,'AIC'] < 3) indvarying[i] <- TRUE #if 5% or more variance explained
saic <-c()
for(si in seq_along(states)){
fs <- paste0(ifelse(length(states)>1,'+',''),states[-si],collapse='+')
ls <- lme4::lmer(data = dat,formula(paste0(f,fs)))
as <- as.matrix(anova(l1,ls))
saic[si] <- as[2,'AIC']-as[1,'AIC']
}
sumout <- s1$coefficients[,1,drop=FALSE]^2
sumout <- cbind(sumout,c(0,saic))
sumout <- rbind(sumout, c(s1$varcor$subject[1]^2,a[1,'AIC']-a[2,'AIC']))[-1,,drop=FALSE]
rownames(sumout)[nrow(sumout)] <- 'random'
colnames(sumout)[2] <- 'AICdiff'
out[[i]] <- sumout
statelist[[i]] <- states
}
})),silent=TRUE)
names(out)[1:fit$standata$nparams]<-fit$setup$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
o=sapply(out,min)
out <- out[order(o)]
out <- lapply(out,function(x) x[x[,2]< aicthreshold,,drop=FALSE])
out <- out[lapply(out,length)>0]
return(out)
} else stop('lme4 package needed -- install.packages("lme4")')
}
|
/R/ctTIpredAuto.R
|
no_license
|
csetraynor/ctsem
|
R
| false | false | 11,074 |
r
|
ctStanParnamesRaw <- function(fit){
if(!class(fit) %in% 'ctStanFit' || !length(fit$stanfit$stanfit@sim)==0) stop('Not an optimized ctStanFit model!')
ms <- fit$setup$matsetup
ms <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1,]
ms <- ms[order(ms$param),]
rawpopmeans<-ms$parname
iv <- ms$parname[ms$indvarying >0]
rawpopsd <-c()
rawpopucorr <- c()
if(length(iv) > 0){
rawpopsd <- paste0('rawpopsd_',iv)
rawpopucorr <- matrix(paste0('rawpopucorr_',rep(iv,times=length(iv)),'_',rep(iv,each=length(iv))),length(iv),length(iv))
rawpopucorr <- rawpopucorr[lower.tri(rawpopucorr)]
}
tipredeffects <- c()
if(fit$standata$ntipred > 0){
tipredeffects <- rep(NA,max(fit$standata$TIPREDEFFECTsetup))
counter <- 0
for(j in 1:ncol(fit$standata$TIPREDEFFECTsetup)){
for(i in 1:nrow(fit$standata$TIPREDEFFECTsetup)){
if(fit$standata$TIPREDEFFECTsetup[i,j] > 0){
counter <- counter + 1
tipredeffects[counter] <- paste0(rawpopmeans[i],'_', fit$ctstanmodel$TIpredNames[j])
}
}
}
}
return(list(rawpopmeans=rawpopmeans,rawpopsd=rawpopsd,rawpopucorr=rawpopucorr,tipredeffects=tipredeffects))
}
TIPREDEFFECTnames <- function(fit){
if(fit$standata$ntipred > 0){
ms <- fit$setup$matsetup
tie <- fit$standata$TIPREDEFFECTsetup
colnames(tie) <- paste0(fit$ctstanmodel$TIpredNames)
pars <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1,]
pars <- pars[order(pars$param),]
rownames(tie) <- pars$parname
return(tie)
} else return(c())
}
popcovnames <- function(fit){
if(fit$standata$nindvarying > 0){
ms <- fit$setup$matsetup
pars <- ms[ms$param > 0 & ms$when %in% 0 & ms$copyrow < 1 & ms$indvarying > 0,]
pars <- pars[order(pars$param),]
popcov <- matrix(NA,fit$standata$nindvarying ,fit$standata$nindvarying,
dimnames = list( pars$parname, pars$parname) )
return(popcov)
} else return(c())
}
checkTIauto <- function(){
Tpoints=30
n.manifest=1
n.TDpred=0
n.TIpred=1
n.latent=1
n.subjects=100
TI1 <- rnorm(n.subjects)
gm<-ctModel(type='omx', Tpoints=Tpoints,n.latent=n.latent,
n.TDpred=n.TDpred,n.manifest=n.manifest,
MANIFESTVAR=diag(0.5,1),
LAMBDA=diag(1,1),
DRIFT=matrix(c(-.3),nrow=1),
DIFFUSION=matrix(c(2),1),
T0VAR=diag(10,1))
for(i in 1:n.subjects){
gm$CINT[1,1] <- TI1[i]*.5+rnorm(1,0,.6)
ndat<-ctGenerate(gm,n.subjects=1,burnin=30,logdtsd=.4)
ndat <- cbind(ndat,TI1[i])
ndat[,1] <- i
if(i>1) tdat <- rbind(tdat,ndat) else tdat <- ndat
}
colnames(tdat)[4] <- 'TI1'
tdat$TI2 <- rnorm(nrow(tdat))
# colnames(tdat)[5] <- 'TI2'
tdat[2,'Y1'] <- NA
tdat[tdat[,'id']==2,'TI1'] <- NA
checkm<-ctModel(type='stanct',Tpoints=Tpoints,
MANIFESTVAR=diag(0.5,1),
# DRIFT=matrix(c(-.3),nrow=1),
# DIFFUSION=matrix(c(2),1),
n.latent=n.latent,n.TDpred=n.TDpred,
n.TIpred=2,
MANIFESTMEANS=matrix(0,nrow=n.manifest),
CINT=matrix(c('cint1'),ncol=1),
n.manifest=n.manifest,LAMBDA=diag(1))
# checkm$pars$indvarying[!checkm$pars$matrix %in% 'T0MEANS'] <- FALSE
checkm$TIpredAuto <- 1L
fit1<-ctStanFit(tdat,checkm,chains=1,optimize=TRUE,cores=1,verbose=0,
# intoverpop=F,
plot=10,
# savesubjectmatrices = F,plot=F,
# init=init,
# fit=F,
optimcontrol=list(is=FALSE,stochastic=T,subsamplesize=1,carefulfit=F),
nopriors=F)
summary(fit1)
}
whichsubjectpars <- function(standata,subjects=NA){
a1=standata$nparams+standata$nindvarying+
(standata$nindvarying^2-standata$nindvarying)/2
whichbase <- 1:a1
if(standata$intoverpop ==0 && standata$nindvarying > 0){ #then there are subject pars
whichsubjects <- a1+cseq(from=subjects,to=standata$nindvarying*standata$nsubjects,
by=standata$nsubjects)
whichbase <- c(whichbase,whichsubjects)
}
if(standata$ntipredeffects > 0) {
tipredstart <- (a1+
ifelse(standata$intoverpop,0,standata$nindvarying*standata$nsubjects)+1)
whichbase <- c(whichbase,tipredstart:(tipredstart+standata$ntipredeffects -1
# ifelse(standata$doonesubject >0,0,-1)
#disabled the doonesubject thing
))
}
return(whichbase)
}
scorecalc <- function(standata,est,stanmodel,subjectsonly=TRUE,
returnsubjectlist=TRUE,cores=2){
standata$dokalmanpriormodifier <- ifelse(subjectsonly, 1/standata$nsubjects,1/standata$ndatapoints)
scores <- list()
# browser()
sf <- suppressMessages(try(stan_reinitsf(stanmodel,standata,fast = TRUE)))
if('try-error' %in% class(sf)) fast=FALSE else fast=TRUE
for(i in 1:standata$nsubjects){
whichpars = whichsubjectpars(standata,i)
scores[[i]]<-matrix(NA,length(whichpars),ifelse(subjectsonly,1,sum(standata$subject==i)))
standata1 <- standatact_specificsubjects(standata,i)
for(j in 1:ncol(scores[[i]])){
standata1$llsinglerow=as.integer(ifelse(subjectsonly,0,j))
sf <- stan_reinitsf(stanmodel,standata1,fast = fast)
if(fast) scores[[i]][,j] <- sf$grad_log_prob(
upars=est[whichpars],
adjust_transform = TRUE)
if(!fast) scores[[i]][,j] <- rstan::grad_log_prob(sf,
upars=est[whichpars],
adjust_transform = TRUE)
}
}
if(subjectsonly) scores <- matrix(unlist(scores),nrow=length(scores[[i]]))
if(!returnsubjectlist){ #return data.table
if('list' %in% class(scores)){
scores=lapply(scores,function(x) data.table(t(x)))
scores=rbindlist(scores)
} else scores <- t(scores)
}
return(scores)
}
ctTIauto <- function(fit,tipreds=NA){
if(is.na(tipreds[1])) tipreds <- fit$standata$tipredsdata
# colnames(tipreds) <- paste0('ti',1:ncol(tipreds))
scores <- scorecalc(standata = fit$standata,
est = fit$stanfit$rawest,stanmodel = fit$stanmodel)
scores <- scores[1:fit$standata$nparams,,drop=FALSE]
rownames(scores) <- paste0('p',1:nrow(scores))
# matchindex <- match(1:fit$standata$nparams,fit$setup$matsetup$param)
# rownames(scores)[1:fit$standata$nparams] <- fit$standata$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
sc <- list()
for(i in 1:nrow(scores)){
# for(j in 1:ncol(tipreds)){
# plot(sort(tipreds[,j]),scores[i,][order(tipreds[,j])],ylab=rownames(scores)[i],xlab=colnames(tipreds)[j])
# }
sc[[i]]=summary(lm(scores[i,] ~ tipreds))$coefficients
}
names(sc)[1:fit$standata$nparams]<-paste0('p',1:nrow(scores)) #fit$setup$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
s2=lapply(sc,function(x) {
x=x[-1,,drop=FALSE]
rownames(x) <- gsub('^tipreds','',rownames(x))
rownames(x) <- paste0('ti',1:nrow(x))
return(x)
})
TIPREDEFFECTsetup = matrix(NA,length(s2),nrow(s2[[1]]))
for(i in 1:length(s2)){
TIPREDEFFECTsetup[i,] <- s2[[i]][,4]
}
if(fit$standata$nindvarying > 0 && fit$standata$intoverpop > 0){
fit$setup$matsetup <- data.frame(fit$standata$matsetup)
e=stan_constrainsamples(sm = fit$stanmodel,standata = fit$standata,
samples = matrix(fit$stanfit$rawest,nrow=1),savescores=TRUE,quiet=TRUE,pcovn=2)
p=sort(unique(fit$setup$matsetup$row[fit$setup$matsetup$indvarying>0]))# | fit$setup$matsetup$tipred]))
firstsub <- rep(TRUE,fit$standata$ndatapoints) #which rows represent first rows per subject
for(i in 2:fit$standata$ndatapoints){
if(fit$standata$subject[i] == fit$standata$subject[i-1]) firstsub[i] <- FALSE
}
e$etasmooth <- array(e$etaa[,3,,,drop=FALSE],dim=dim(e$etaa)[-2])
states <- ctCollapse(e$etasmooth[,firstsub,p,drop=FALSE],1,mean)
sc=list()
for(i in 1:ncol(states)){
sc[[i]]=summary(lm(states[,i] ~ tipreds))$coefficients[-1,,drop=FALSE]
}
for(i in 1:length(sc)){
TIPREDEFFECTsetup[fit$setup$matsetup$param[fit$setup$matsetup$indvarying %in% i],] <- sc[[i]][,4]
}
}
if(any(is.na(TIPREDEFFECTsetup))) warning('NA found, probably unused parameters?')
TIPREDEFFECTsetup[is.na(TIPREDEFFECTsetup)] <- 1
return(TIPREDEFFECTsetup)
}
ctIndVarAuto <- function(fit,aicthreshold = -2){
if(requireNamespace('lme4',quietly=TRUE)){
scores <- scorecalc(standata = fit$standata,est = fit$stanfit$rawest,
stanmodel = fit$stanmodel,subjectsonly = FALSE)
scores <- t(do.call(cbind,scores))
scores <- scores[,1:fit$standata$nparams]
colnames(scores) <- paste0('p',1:ncol(scores))
sdat <- fit$standata
sdat$savescores <- 1L
# sdat$nopriors <- 1L
sdat$popcovn <- 5L
e=rstan::constrain_pars(stan_reinitsf(fit$stanmodel,sdat),fit$stanfit$rawest)
colnames(e$etaprior) <- paste0('etaprior',1:ncol(e$etaprior))
etaprior <- scale(e$etaprior[,1:fit$ctstanmodel$n.latent,drop=FALSE])
etaprior2 <- scale(etaprior^2)
colnames(etaprior2) <- paste0(colnames(etaprior),'_sq')
scores <- scale(scores)
statelist <- list()
indvarying <- c()
out <-list()
try(suppressWarnings(suppressMessages({
for(i in 1:ncol(scores)){
# for(j in 1:ncol(tipreds)){
# plot(sort(tipreds[,j]),scores[i,][order(tipreds[,j])],ylab=rownames(scores)[i],xlab=colnames(tipreds)[j])
# }
states <-c()
dat=data.frame(scores=scores[,i],subject=fit$standata$subject,one=1,etaprior,etaprior2)
f <- paste0('scores ~ (1|subject)')
f1<-paste0('scores ~ (1|one)')
f2<- paste0('+',c(colnames(etaprior),colnames(etaprior2)),collapse='+')
l=lme4::lmer(data = dat,formula(paste0(f,f2)))
s=summary(l)
states<-rownames(s$coefficients[abs(s$coefficients[,3]) > 1.96 &
abs(s$coefficients[,1]) > .1,])[-1]
f2 <- paste0(ifelse(length(states)>0,'+',''),states,collapse='+')
l1 <- lme4::lmer(data = dat,formula(paste0(f,f2)))
l2=lme4::lmer(data = dat,formula(paste0(f1,f2)),control=lme4::lmerControl(check.nlev.gtr.1="ignore"))
a=as.matrix(anova(l,l1,l2))
# a=a[2,]-a[1,]
s1=summary(l1)
if(s1$varcor$subject[1]^2 > .05 && a[2,'AIC']-a[1,'AIC'] < 3) indvarying[i] <- TRUE #if 5% or more variance explained
saic <-c()
for(si in seq_along(states)){
fs <- paste0(ifelse(length(states)>1,'+',''),states[-si],collapse='+')
ls <- lme4::lmer(data = dat,formula(paste0(f,fs)))
as <- as.matrix(anova(l1,ls))
saic[si] <- as[2,'AIC']-as[1,'AIC']
}
sumout <- s1$coefficients[,1,drop=FALSE]^2
sumout <- cbind(sumout,c(0,saic))
sumout <- rbind(sumout, c(s1$varcor$subject[1]^2,a[1,'AIC']-a[2,'AIC']))[-1,,drop=FALSE]
rownames(sumout)[nrow(sumout)] <- 'random'
colnames(sumout)[2] <- 'AICdiff'
out[[i]] <- sumout
statelist[[i]] <- states
}
})),silent=TRUE)
names(out)[1:fit$standata$nparams]<-fit$setup$matsetup$parname[match(1:fit$standata$nparams,fit$setup$matsetup$param)]
o=sapply(out,min)
out <- out[order(o)]
out <- lapply(out,function(x) x[x[,2]< aicthreshold,,drop=FALSE])
out <- out[lapply(out,length)>0]
return(out)
} else stop('lme4 package needed -- install.packages("lme4")')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scsf-package.r
\docType{data}
\name{inlandwaters}
\alias{inlandwaters}
\title{Inland waters, for parts of Australia.}
\description{
The inland waters are lake and so on, presenting as holes
within the bounded regions of Australian provinces.
}
\details{
This is an extract from the old Manifold DVD. It is in `sf` format`.
}
|
/man/inlandwaters.Rd
|
no_license
|
mdsumner/scsf
|
R
| false | true | 403 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scsf-package.r
\docType{data}
\name{inlandwaters}
\alias{inlandwaters}
\title{Inland waters, for parts of Australia.}
\description{
The inland waters are lake and so on, presenting as holes
within the bounded regions of Australian provinces.
}
\details{
This is an extract from the old Manifold DVD. It is in `sf` format`.
}
|
#' scr3_17
#'
#' @description
#' The solution to the exercise 3_17 for Rizzo's Book.
#'
#' Compare the performance of the Beta generator of Exercise 3.7, Example 3.8 and the R generator
#' rbeta. Fix the parameters a =2,b= 2 and time each generator on 1000 iterations with sample size
#' 5000. (See Example 3.19.) Are the results different for different choices of a and b?
#'
#' @references Maria L. Rizzo. (2016). Statistical computing with r.
#'
#' @param n sample size
#'
#' @return list
#' @export
#' @importFrom GGally ggpairs
#' @importFrom MASS mvrnorm
#'
#'
#' @examples scr3_17(n=50,a=2,b=2,N=100)
scr3_17<-function(n=50,a=2,b=2,N=100){
scr7<-function(n=1000,a=3,b=2){
x <- NULL
k <- 0
while (k<n){
y <- runif(1)
u <- runif(1)
if (u<((y^(a-1))*((1-y)^(b-1)))*4) {
k <- k+1
x[k] <- y
}
}
x
}
set.seed(100)
system.time(for (i in 1:N)
scr7(n,a,b))
set.seed(100)
system.time(for (i in 1:N)
rlnorm(n,a,b))
set.seed(100)
system.time(for (i in 1:N)
rbeta(n,a,b))
}
|
/SCR/R/scr3_17.R
|
no_license
|
Creatran/SCR
|
R
| false | false | 1,056 |
r
|
#' scr3_17
#'
#' @description
#' The solution to the exercise 3_17 for Rizzo's Book.
#'
#' Compare the performance of the Beta generator of Exercise 3.7, Example 3.8 and the R generator
#' rbeta. Fix the parameters a =2,b= 2 and time each generator on 1000 iterations with sample size
#' 5000. (See Example 3.19.) Are the results different for different choices of a and b?
#'
#' @references Maria L. Rizzo. (2016). Statistical computing with r.
#'
#' @param n sample size
#'
#' @return list
#' @export
#' @importFrom GGally ggpairs
#' @importFrom MASS mvrnorm
#'
#'
#' @examples scr3_17(n=50,a=2,b=2,N=100)
scr3_17<-function(n=50,a=2,b=2,N=100){
scr7<-function(n=1000,a=3,b=2){
x <- NULL
k <- 0
while (k<n){
y <- runif(1)
u <- runif(1)
if (u<((y^(a-1))*((1-y)^(b-1)))*4) {
k <- k+1
x[k] <- y
}
}
x
}
set.seed(100)
system.time(for (i in 1:N)
scr7(n,a,b))
set.seed(100)
system.time(for (i in 1:N)
rlnorm(n,a,b))
set.seed(100)
system.time(for (i in 1:N)
rbeta(n,a,b))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{exoExample}
\alias{exoExample}
\title{\code{ExoData} results for FoxA1 ChIP-exo experiment}
\format{ExoData object, which are \code{GRanges} with additional columns.}
\usage{
data(exoExample)
}
\value{
An \code{ExoData} object with the 3rd replicate of the FoxA1
experiment from \code{ChIPExoQualExample}.
}
\description{
\code{ExoData} object, generated with \code{ChIPexoQual} and the
file:
}
\details{
\itemize{
\item ChIPexo_carroll_FoxA1_mouse_rep3_chr1.bam
}
}
|
/man/exoExample.Rd
|
no_license
|
welch16/ChIPexoQual
|
R
| false | true | 574 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{exoExample}
\alias{exoExample}
\title{\code{ExoData} results for FoxA1 ChIP-exo experiment}
\format{ExoData object, which are \code{GRanges} with additional columns.}
\usage{
data(exoExample)
}
\value{
An \code{ExoData} object with the 3rd replicate of the FoxA1
experiment from \code{ChIPExoQualExample}.
}
\description{
\code{ExoData} object, generated with \code{ChIPexoQual} and the
file:
}
\details{
\itemize{
\item ChIPexo_carroll_FoxA1_mouse_rep3_chr1.bam
}
}
|
## THIS SUBMODULE HAS ALL THE NECESSARY FUNCTIONS TO
# (1) - COMPUTE THE RECHARGE RATES PER AQUIFER
# (2) - COMPUTE PRECIPITATION RUNOFF, SOIL DRAINAGE (BOTH USING MITERRA RF) AND AQUIFER RECHARGE (USING RECHARGE RATES AND PRECIPITATION)
# (3) - COMPUTE THE NO3 CONCENTRATION IN LEACHED WATER
############ READ ME --------------------- #############################
## drainage calculated by aquifer recharge rates + irrigation is not correct but I wont erase these sections
## drainage is now given as a proxy of water balance from below the root zone
source('./GIS_module/Function/compute_GIS_leaching_fractions.R')
source('./GIS_module/Function/General_GIS_functions.R')
source('./GIS_module/Function/compute_GIS_leaching_pathways.R')
source('./ExploratoryAnalysis_module/Command function/GIS_functions.R')
source('./WaterBalance_module/Functions/GW_Water_Balance.R')
## ----------------------------------------------------------------------------------------------------------------
## RECHARGE RATE COMPUTATION --------------------------------------------------------------------------------------
## ----------------------------------------------------------------------------------------------------------------
get_gw_recharge_df <- function() {
rech_df <- get_modellingDf_file('df_recharge_rates_gw', 'Recharge')
return(rech_df)
}
#this is because some polygons are not be rasterized
rasterize_subset <- function(){
#this is because some polygons are not be rasterized
wrong_sub <- readOGR('./GIS_module/Output/Modelling/Recharge/subset_wrong.shp')
wrong_sub$df_recha_1 <- as.numeric(wrong_sub$df_recha_1)
rast <- general_rasterize(wrong_sub, 'df_recha_1')
return(rast)
}
rasterize_gw_recharge <- function() {
df <- get_gw_recharge_df()
gw <- load_shp('gw')
gw <- spTransform(gw, CRS(' +proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs '))
colnames(gw@data)[5] <- 'aquifer_ID'
gw <- merge(gw, df, 'aquifer_ID')
rast <- general_rasterize(gw, 'recharge')
wrong_subset <- rasterize_subset()
rast <- mosaic(rast, wrong_subset, fun='sum')
write_raster_modelling(rast, 'rast_recharge_gw', 'Recharge')
}
get_rast_gw_recharge <- function() {
gw_rech <- get_modelling_files('Recharge', 'rast_recharge_gw')
}
## ----------------------------------------------------------------------------------------------------------------
## get data --------------------------------------------------------------------------------------
## ----------------------------------------------------------------------------------------------------------------
get_drainage_rasters <- function(pattern_file, irrig_mode) {
#gets rasters within the Drainage folder
# d <- get_drainage_rasters('rf_prec99', 'Default)
drainage_rasters <- get_nloading_gw(pattern_file, irrig_mode, main_subfolder = 'Drainage')
}
get_Nc_rasters <- function(pattern_file, irrig_mode) {
drainage_rasters <- get_nloading_gw(pattern_file, irrig_mode, main_subfolder = 'Nc')
}
## ---------------------------------------------------------------------------------------
## Write Drainage rasters ------------------------------------------
## ---------------------------------------------------------------------------------------
write_gw_rasters <- function(write, rasterfile, filename, irrig_mode, main_subfolder) {
if (write==TRUE) {
write_pathway_leaching(rasterfile, filename, irrig_mode, main_subfolder)
}
}
## ---------------------------------------------------------------------------------------
## Drainage as calculated with runoff fractions ------------------------------------------
## ---------------------------------------------------------------------------------------
#d <- rf_precipitation_runoff(1999, TRUE, 'Default')
rf_precipitation_runoff <- function(year, write, irrig_mode) {
rf <- get_GIS_file(paste0('Rf', year_prefix(year)), 'MITERRA')
prec <- get_precipitation(year, 'rast_caa')
rf_prec <- rf*prec
write_gw_rasters(write, rf_prec, paste0('rf_prec', year_prefix(year)), irrig_mode, 'Drainage')
return(rf_prec)
}
rf_precipitation_drainage <- function(year, write, irrig_mode) {
prec <- get_precipitation(year, 'rast_caa')
rf_prec <- rf_precipitation_runoff(year, FALSE,irrig_mode)
drainage_gw <- prec-rf_prec
rm(rf_prec)
write_gw_rasters(write, drainage_gw, paste0('rf_drainage', year_prefix(year)), irrig_mode, 'Drainage')
# return(drainage_gw)
}
## ---------------------------------------------------------------------------------------
## Drainage as calculated with GW recharge rates------------------------------------------
## ---------------------------------------------------------------------------------------
gw_drainage <- function(year, write, irrig_mode)
{
rech_rates <- get_rast_gw_recharge()/100
prec <- get_precipitation(year, 'rast_caa')
gw_recharge <- rech_rates*prec
write_gw_rasters(write, gw_recharge, paste0('gw_recharge', year_prefix(year)), irrig_mode, 'Drainage')
if(write==FALSE){return(gw_recharge)}
}
#corrects drainage based on the adjustment factor
#only to be used in compute_drainage_enetering_gw as this is already computed in Nc
correct_gw_drainage <- function(year, write, irrig_mode)
{
drainage <- gw_drainage(year, write, irrig_mode)
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
drainage <- drainage*adj_factor*10000
return(drainage)
}
compute_drainage_entering_gw <- function(year, irrig_mode)
{
computation_gw_general_func(correct_gw_drainage, year, irrig_mode, 'drainage')
}
## ---------------------------------------------------------------------------------------
## Computation of Nc using both methods --------------------------------------------------
## ---------------------------------------------------------------------------------------
# general function to compute Nc
method_parameters <- function(method) {
#method can be either 'RF' or 'GW'
#method_df <- method_parameters('RF')
#load conditions;; position 1 - drainage specification || position 2 - Nc filename when writing
method_rf <- c('rf_drainage', 'rf_Nc')
method_gw <- c('gw_recharge', 'gw_Nc')
if (method=='RF'){return(method_rf)} else if (method=='GW'){return(method_gw)}
}
general_Nc_func <- function(year, write, irrig_mode) {
# general function to calculate Nc (mg N/L) for each cellgrid
# get water balance for each cellgrid
water_balance <- raster(select_WB_subfolder_file('Water_surplus', 'MOSAIC', year))*1000 #Litres
n_loads <- correct_nloads_gw(year, FALSE, irrig_mode) #N-loads are already corrected to the adj factor but in kg N
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
water_balance <- water_balance*adj_factor
n_loads <- n_loads *1000*1000 #in mg N
Nc <- n_loads/water_balance
if (write==TRUE) {
write_gw_rasters(write, rasterfile = Nc, filename = paste0('Nc_WB_', year_prefix(year)), irrig_mode = irrig_mode, 'Nc')
}
return(Nc)
rm(list=c('water_balance', 'adj_factor', 'n_loads'))
}
## FUNCTION DISREGARDED DUE TO CHANGES IN DRAINAGE (IE RECHARGE) TO WATER BALANCE
general_Nc_func_OLD <- function(year, write, irrig_mode, method) {
#general function to compute Nc based on method parameters
method_df <- method_parameters(method)
#load data
drainage <- get_drainage_rasters(paste0(method_df[1], year_prefix(year)), irrig_mode)
n_loads <- correct_nloads_gw(year, FALSE, irrig_mode) #N-loads are already corrected to the adj factor but in kg N
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
#conversions
drainage <- drainage*adj_factor*10000 #10000 is the conversion factor mm*ha to m3 then L
n_loads <- n_loads *1000*1000 #in mg N
Nc <- n_loads/drainage
write_gw_rasters(write, Nc, paste0(method_df[2], year_prefix(year)), irrig_mode, 'Nc')
}
#Nc using soil drainage
# wrong
rf_Nc_gw <- function(year, write, irrig_mode)
{
general_Nc_func_OLD(year, write, irrig_mode, 'RF')
}
#Nc using aquifer recharge rates
# wrong
gw_Nc_gw <- function(year, write, irrig_mode)
{
general_Nc_func_OLD(year, write, irrig_mode, 'GW')
}
#Nc using WATER BALANCE
wb_Nc_gw <- function(year, write, irrig_mode) {
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
general_Nc_func(year, write, irrig)
}
## VERY IMPORTANT
df_compute_gw_Nc <- function(year, write, irrig_mode) {
# READ: correct approach to calculate NO3 in leached water
# USES THE WATER BALANCE FROM BELOW THE ROOT ZONE AS A PROXY (see WaterBalance_module)
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
gw_WB <- read.csv(select_WB_subfolder_file('GW', 'df', year)) #in litres
nload_df <-get_modellingDf_file(paste0('nload_df_gw', year_prefix(year)), 'Pathway_leaching', irrig)[, -1] #in kg N
gw_WB$n_load_kgN <- nload_df$n.load
gw_WB$Nc_mgNL <- gw_WB$n_load_kgN*1000*1000/gw_WB$wsurplus_L #in mg N/L
gw_WB$Nc_mgNO3L <- gw_WB$Nc_mgNL*50/11.3 #in mg NO3/L
if (write==TRUE){write_csv_modelling('Nc', subfolder_pattern = irrig, gw_WB, paste0('nc_df_gw', year_prefix(year)))}
return(gw_WB)
}
## DISREGARDED
df_compute_gw_nc_OLD <- function(year,write, irrig_mode) {
#compute Nc of each GW in a dataframe format
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
rech_df <- get_modellingDf_file(paste0('drainage_df_gw', year_prefix(year)), 'Drainage', irrig)[, -1]
nload_df <-get_modellingDf_file(paste0('nload_df_gw', year_prefix(year)), 'Pathway_leaching', 'Default')[, -1] #in kg N
Nc <- nload_df$n.load*1000*1000/rech_df$drainage #in mg N/L
df <- cbind(rech_df, nload_df[, 2], Nc)
df$NcN <- df$Nc*50/11.3
colnames(df)[3] <- 'N-loads[mg N]'
colnames(df)[4] <- 'Nc[mg N/L]'
colnames(df)[5] <- 'Nc[mg NO3/L]'
if (write==TRUE){write_csv_modelling('Nc', subfolder_pattern = irrig, df, paste0('nc_df_gw', year_prefix(year)))}
return(df)
}
get_df_Nc <- function(year, irrig_mode) {
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
nc_df <- get_modellingDf_file(paste0('nc_df_gw', year_prefix(year)), 'Nc', irrig)[, -1]
return(nc_df)
}
get_main_df_gw_dataset <- function(year, write) {
gw_df <- get_df_Nc(year, irrig_mode = 'Irrig')
rech <- get_gw_recharge_df()
colnames(rech)[1] <- 'GW_ID'
#get HUs
hydro <- select_maindata_pattern('Hydrogeological')
hydro_df <- read.csv(list.files(hydro, full.names = T))
gw_df <- merge(gw_df, c(hydro_df, rech), 'GW_ID', all.x=F)
if (write==TRUE){write_csv_modelling('Nc', 'Default', gw_df, paste0('dataset_gw_', year_prefix(year)))}
return(gw_df)
}
|
/GIS_module/Function/GW_computation_functions.R
|
permissive
|
shekharsg/MITERRA-PORTUGAL
|
R
| false | false | 10,805 |
r
|
## THIS SUBMODULE HAS ALL THE NECESSARY FUNCTIONS TO
# (1) - COMPUTE THE RECHARGE RATES PER AQUIFER
# (2) - COMPUTE PRECIPITATION RUNOFF, SOIL DRAINAGE (BOTH USING MITERRA RF) AND AQUIFER RECHARGE (USING RECHARGE RATES AND PRECIPITATION)
# (3) - COMPUTE THE NO3 CONCENTRATION IN LEACHED WATER
############ READ ME --------------------- #############################
## drainage calculated by aquifer recharge rates + irrigation is not correct but I wont erase these sections
## drainage is now given as a proxy of water balance from below the root zone
source('./GIS_module/Function/compute_GIS_leaching_fractions.R')
source('./GIS_module/Function/General_GIS_functions.R')
source('./GIS_module/Function/compute_GIS_leaching_pathways.R')
source('./ExploratoryAnalysis_module/Command function/GIS_functions.R')
source('./WaterBalance_module/Functions/GW_Water_Balance.R')
## ----------------------------------------------------------------------------------------------------------------
## RECHARGE RATE COMPUTATION --------------------------------------------------------------------------------------
## ----------------------------------------------------------------------------------------------------------------
get_gw_recharge_df <- function() {
rech_df <- get_modellingDf_file('df_recharge_rates_gw', 'Recharge')
return(rech_df)
}
#this is because some polygons are not be rasterized
rasterize_subset <- function(){
#this is because some polygons are not be rasterized
wrong_sub <- readOGR('./GIS_module/Output/Modelling/Recharge/subset_wrong.shp')
wrong_sub$df_recha_1 <- as.numeric(wrong_sub$df_recha_1)
rast <- general_rasterize(wrong_sub, 'df_recha_1')
return(rast)
}
rasterize_gw_recharge <- function() {
df <- get_gw_recharge_df()
gw <- load_shp('gw')
gw <- spTransform(gw, CRS(' +proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs '))
colnames(gw@data)[5] <- 'aquifer_ID'
gw <- merge(gw, df, 'aquifer_ID')
rast <- general_rasterize(gw, 'recharge')
wrong_subset <- rasterize_subset()
rast <- mosaic(rast, wrong_subset, fun='sum')
write_raster_modelling(rast, 'rast_recharge_gw', 'Recharge')
}
get_rast_gw_recharge <- function() {
gw_rech <- get_modelling_files('Recharge', 'rast_recharge_gw')
}
## ----------------------------------------------------------------------------------------------------------------
## get data --------------------------------------------------------------------------------------
## ----------------------------------------------------------------------------------------------------------------
get_drainage_rasters <- function(pattern_file, irrig_mode) {
#gets rasters within the Drainage folder
# d <- get_drainage_rasters('rf_prec99', 'Default)
drainage_rasters <- get_nloading_gw(pattern_file, irrig_mode, main_subfolder = 'Drainage')
}
get_Nc_rasters <- function(pattern_file, irrig_mode) {
drainage_rasters <- get_nloading_gw(pattern_file, irrig_mode, main_subfolder = 'Nc')
}
## ---------------------------------------------------------------------------------------
## Write Drainage rasters ------------------------------------------
## ---------------------------------------------------------------------------------------
write_gw_rasters <- function(write, rasterfile, filename, irrig_mode, main_subfolder) {
if (write==TRUE) {
write_pathway_leaching(rasterfile, filename, irrig_mode, main_subfolder)
}
}
## ---------------------------------------------------------------------------------------
## Drainage as calculated with runoff fractions ------------------------------------------
## ---------------------------------------------------------------------------------------
#d <- rf_precipitation_runoff(1999, TRUE, 'Default')
rf_precipitation_runoff <- function(year, write, irrig_mode) {
rf <- get_GIS_file(paste0('Rf', year_prefix(year)), 'MITERRA')
prec <- get_precipitation(year, 'rast_caa')
rf_prec <- rf*prec
write_gw_rasters(write, rf_prec, paste0('rf_prec', year_prefix(year)), irrig_mode, 'Drainage')
return(rf_prec)
}
rf_precipitation_drainage <- function(year, write, irrig_mode) {
prec <- get_precipitation(year, 'rast_caa')
rf_prec <- rf_precipitation_runoff(year, FALSE,irrig_mode)
drainage_gw <- prec-rf_prec
rm(rf_prec)
write_gw_rasters(write, drainage_gw, paste0('rf_drainage', year_prefix(year)), irrig_mode, 'Drainage')
# return(drainage_gw)
}
## ---------------------------------------------------------------------------------------
## Drainage as calculated with GW recharge rates------------------------------------------
## ---------------------------------------------------------------------------------------
gw_drainage <- function(year, write, irrig_mode)
{
rech_rates <- get_rast_gw_recharge()/100
prec <- get_precipitation(year, 'rast_caa')
gw_recharge <- rech_rates*prec
write_gw_rasters(write, gw_recharge, paste0('gw_recharge', year_prefix(year)), irrig_mode, 'Drainage')
if(write==FALSE){return(gw_recharge)}
}
#corrects drainage based on the adjustment factor
#only to be used in compute_drainage_enetering_gw as this is already computed in Nc
correct_gw_drainage <- function(year, write, irrig_mode)
{
drainage <- gw_drainage(year, write, irrig_mode)
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
drainage <- drainage*adj_factor*10000
return(drainage)
}
compute_drainage_entering_gw <- function(year, irrig_mode)
{
computation_gw_general_func(correct_gw_drainage, year, irrig_mode, 'drainage')
}
## ---------------------------------------------------------------------------------------
## Computation of Nc using both methods --------------------------------------------------
## ---------------------------------------------------------------------------------------
# general function to compute Nc
method_parameters <- function(method) {
#method can be either 'RF' or 'GW'
#method_df <- method_parameters('RF')
#load conditions;; position 1 - drainage specification || position 2 - Nc filename when writing
method_rf <- c('rf_drainage', 'rf_Nc')
method_gw <- c('gw_recharge', 'gw_Nc')
if (method=='RF'){return(method_rf)} else if (method=='GW'){return(method_gw)}
}
general_Nc_func <- function(year, write, irrig_mode) {
# general function to calculate Nc (mg N/L) for each cellgrid
# get water balance for each cellgrid
water_balance <- raster(select_WB_subfolder_file('Water_surplus', 'MOSAIC', year))*1000 #Litres
n_loads <- correct_nloads_gw(year, FALSE, irrig_mode) #N-loads are already corrected to the adj factor but in kg N
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
water_balance <- water_balance*adj_factor
n_loads <- n_loads *1000*1000 #in mg N
Nc <- n_loads/water_balance
if (write==TRUE) {
write_gw_rasters(write, rasterfile = Nc, filename = paste0('Nc_WB_', year_prefix(year)), irrig_mode = irrig_mode, 'Nc')
}
return(Nc)
rm(list=c('water_balance', 'adj_factor', 'n_loads'))
}
## FUNCTION DISREGARDED DUE TO CHANGES IN DRAINAGE (IE RECHARGE) TO WATER BALANCE
general_Nc_func_OLD <- function(year, write, irrig_mode, method) {
#general function to compute Nc based on method parameters
method_df <- method_parameters(method)
#load data
drainage <- get_drainage_rasters(paste0(method_df[1], year_prefix(year)), irrig_mode)
n_loads <- correct_nloads_gw(year, FALSE, irrig_mode) #N-loads are already corrected to the adj factor but in kg N
adj_factor <- get_modelling_files('Adjustment_factor', paste0('rast_adj_factor', year_prefix(year)))
#conversions
drainage <- drainage*adj_factor*10000 #10000 is the conversion factor mm*ha to m3 then L
n_loads <- n_loads *1000*1000 #in mg N
Nc <- n_loads/drainage
write_gw_rasters(write, Nc, paste0(method_df[2], year_prefix(year)), irrig_mode, 'Nc')
}
#Nc using soil drainage
# wrong
rf_Nc_gw <- function(year, write, irrig_mode)
{
general_Nc_func_OLD(year, write, irrig_mode, 'RF')
}
#Nc using aquifer recharge rates
# wrong
gw_Nc_gw <- function(year, write, irrig_mode)
{
general_Nc_func_OLD(year, write, irrig_mode, 'GW')
}
#Nc using WATER BALANCE
wb_Nc_gw <- function(year, write, irrig_mode) {
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
general_Nc_func(year, write, irrig)
}
## VERY IMPORTANT
df_compute_gw_Nc <- function(year, write, irrig_mode) {
# READ: correct approach to calculate NO3 in leached water
# USES THE WATER BALANCE FROM BELOW THE ROOT ZONE AS A PROXY (see WaterBalance_module)
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
gw_WB <- read.csv(select_WB_subfolder_file('GW', 'df', year)) #in litres
nload_df <-get_modellingDf_file(paste0('nload_df_gw', year_prefix(year)), 'Pathway_leaching', irrig)[, -1] #in kg N
gw_WB$n_load_kgN <- nload_df$n.load
gw_WB$Nc_mgNL <- gw_WB$n_load_kgN*1000*1000/gw_WB$wsurplus_L #in mg N/L
gw_WB$Nc_mgNO3L <- gw_WB$Nc_mgNL*50/11.3 #in mg NO3/L
if (write==TRUE){write_csv_modelling('Nc', subfolder_pattern = irrig, gw_WB, paste0('nc_df_gw', year_prefix(year)))}
return(gw_WB)
}
## DISREGARDED
df_compute_gw_nc_OLD <- function(year,write, irrig_mode) {
#compute Nc of each GW in a dataframe format
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
rech_df <- get_modellingDf_file(paste0('drainage_df_gw', year_prefix(year)), 'Drainage', irrig)[, -1]
nload_df <-get_modellingDf_file(paste0('nload_df_gw', year_prefix(year)), 'Pathway_leaching', 'Default')[, -1] #in kg N
Nc <- nload_df$n.load*1000*1000/rech_df$drainage #in mg N/L
df <- cbind(rech_df, nload_df[, 2], Nc)
df$NcN <- df$Nc*50/11.3
colnames(df)[3] <- 'N-loads[mg N]'
colnames(df)[4] <- 'Nc[mg N/L]'
colnames(df)[5] <- 'Nc[mg NO3/L]'
if (write==TRUE){write_csv_modelling('Nc', subfolder_pattern = irrig, df, paste0('nc_df_gw', year_prefix(year)))}
return(df)
}
get_df_Nc <- function(year, irrig_mode) {
ifelse(missing(irrig_mode)==TRUE, irrig <- 'Default', irrig <- 'Irrig')
nc_df <- get_modellingDf_file(paste0('nc_df_gw', year_prefix(year)), 'Nc', irrig)[, -1]
return(nc_df)
}
get_main_df_gw_dataset <- function(year, write) {
gw_df <- get_df_Nc(year, irrig_mode = 'Irrig')
rech <- get_gw_recharge_df()
colnames(rech)[1] <- 'GW_ID'
#get HUs
hydro <- select_maindata_pattern('Hydrogeological')
hydro_df <- read.csv(list.files(hydro, full.names = T))
gw_df <- merge(gw_df, c(hydro_df, rech), 'GW_ID', all.x=F)
if (write==TRUE){write_csv_modelling('Nc', 'Default', gw_df, paste0('dataset_gw_', year_prefix(year)))}
return(gw_df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/handlers-general.R
\name{on_initialize}
\alias{on_initialize}
\title{initialize handler}
\usage{
on_initialize(self, id, params)
}
\arguments{
\item{self}{a \link{LanguageServer} object}
\item{id}{a numeric, the id of the process that started the server}
\item{params}{a named list, the \code{initialize} Request options
@keywords internal}
}
\description{
Handler to the \href{https://microsoft.github.io/language-server-protocol/specification#initialize}{initialize} \link{Request}.
}
|
/man/on_initialize.Rd
|
no_license
|
kongdd/languageserver
|
R
| false | true | 568 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/handlers-general.R
\name{on_initialize}
\alias{on_initialize}
\title{initialize handler}
\usage{
on_initialize(self, id, params)
}
\arguments{
\item{self}{a \link{LanguageServer} object}
\item{id}{a numeric, the id of the process that started the server}
\item{params}{a named list, the \code{initialize} Request options
@keywords internal}
}
\description{
Handler to the \href{https://microsoft.github.io/language-server-protocol/specification#initialize}{initialize} \link{Request}.
}
|
/MacOSX10.7.sdk/System/Library/Frameworks/ApplicationServices.framework/Versions/A/Frameworks/HIServices.framework/Versions/A/Headers/Processes.r
|
no_license
|
mandalin/DissertationCODE
|
R
| false | false | 4,619 |
r
| ||
ReadData <- read.csv(file.choose()) # missing.csv
colSums(is.na(ReadData))
nrow(ReadData)
df<-ReadData
z <- df[df$AGE > quantile(df$AGE, .25) - 1.5*IQR(df$AGE) &
df$AGE < quantile(df$AGE, .75) + 1.5*IQR(df$AGE), ] #rows
nrow(z)
z <- z[z$GENDER > quantile(z$GENDER, .25) - 1.5*IQR(z$GENDER) &
z$GENDER < quantile(z$GENDER, .75) + 1.5*IQR(z$GENDER), ] #rows
nrow(z)
z <- z[z$HEIGHT > quantile(z$HEIGHT, .25) - 1.5*IQR(z$HEIGHT) &
z$HEIGHT < quantile(z$HEIGHT, .75) + 1.5*IQR(z$HEIGHT), ] #rows
nrow(z)
z <- z[z$WEIGHT > quantile(z$WEIGHT, .25) - 1.5*IQR(z$WEIGHT) &
z$WEIGHT < quantile(z$WEIGHT, .75) + 1.5*IQR(z$WEIGHT), ] #rows
nrow(z)
z <- z[z$AP_HIGH > quantile(z$AP_HIGH, .25) - 1.5*IQR(z$AP_HIGH) &
z$AP_HIGH < quantile(z$AP_HIGH, .75) + 1.5*IQR(z$AP_HIGH), ] #rows
nrow(z)
z <- z[z$AP_LOW > quantile(z$AP_LOW, .25) - 1.5*IQR(z$AP_LOW) &
z$AP_LOW < quantile(z$AP_LOW, .75) + 1.5*IQR(z$AP_LOW), ] #rows
nrow(z)
#z <- z[z$CHOLESTEROL > quantile(z$CHOLESTEROL, .25) - 1.5*IQR(z$CHOLESTEROL) &
# z$CHOLESTEROL < quantile(z$CHOLESTEROL, .75) + 1.5*IQR(z$CHOLESTEROL), ] #rows
#nrow(z)
#z <- z[z$GLUCOSE > quantile(z$GLUCOSE, .25) - 1.5*IQR(z$GLUCOSE) &
# z$GLUCOSE < quantile(z$GLUCOSE, .75) + 1.5*IQR(z$GLUCOSE), ] #rows
#nrow(z)
#z <- z[z$SMOKE > quantile(z$SMOKE, .25) - 1.5*IQR(z$SMOKE) &
# z$SMOKE < quantile(z$SMOKE, .75) + 1.5*IQR(z$SMOKE), ] #rows
#nrow(z)
#z <- z[z$ALCOHOL > quantile(z$ALCOHOL, .25) - 1.5*IQR(z$ALCOHOL) &
# z$ALCOHOL < quantile(z$ALCOHOL, .75) + 1.5*IQR(z$ALCOHOL), ] #rows
#nrow(z)
#z <- z[z$PHYSICAL_ACTIVITY > quantile(z$PHYSICAL_ACTIVITY, .25) - 1.5*IQR(z$PHYSICAL_ACTIVITY) &
# z$PHYSICAL_ACTIVITY < quantile(z$PHYSICAL_ACTIVITY, .75) + 1.5*IQR(z$PHYSICAL_ACTIVITY), ] #rows
#nrow(z)
#z <- z[z$CARDIO_DISEASE > quantile(z$CARDIO_DISEASE, .25) - 1.5*IQR(z$CARDIO_DISEASE) &
# z$CARDIO_DISEASE < quantile(z$CARDIO_DISEASE, .75) + 1.5*IQR(z$CARDIO_DISEASE), ] #rows
#nrow(z)
write.csv(z,"C:\\Users\\Himanshu Patel\\Downloads\\cardiovascular-disease-dataset\\Cleansed_MyData.csv", row.names = TRUE)
|
/Remove-Outliers.R
|
no_license
|
himanpatel/Heart-Disease-Prediction
|
R
| false | false | 2,176 |
r
|
ReadData <- read.csv(file.choose()) # missing.csv
colSums(is.na(ReadData))
nrow(ReadData)
df<-ReadData
z <- df[df$AGE > quantile(df$AGE, .25) - 1.5*IQR(df$AGE) &
df$AGE < quantile(df$AGE, .75) + 1.5*IQR(df$AGE), ] #rows
nrow(z)
z <- z[z$GENDER > quantile(z$GENDER, .25) - 1.5*IQR(z$GENDER) &
z$GENDER < quantile(z$GENDER, .75) + 1.5*IQR(z$GENDER), ] #rows
nrow(z)
z <- z[z$HEIGHT > quantile(z$HEIGHT, .25) - 1.5*IQR(z$HEIGHT) &
z$HEIGHT < quantile(z$HEIGHT, .75) + 1.5*IQR(z$HEIGHT), ] #rows
nrow(z)
z <- z[z$WEIGHT > quantile(z$WEIGHT, .25) - 1.5*IQR(z$WEIGHT) &
z$WEIGHT < quantile(z$WEIGHT, .75) + 1.5*IQR(z$WEIGHT), ] #rows
nrow(z)
z <- z[z$AP_HIGH > quantile(z$AP_HIGH, .25) - 1.5*IQR(z$AP_HIGH) &
z$AP_HIGH < quantile(z$AP_HIGH, .75) + 1.5*IQR(z$AP_HIGH), ] #rows
nrow(z)
z <- z[z$AP_LOW > quantile(z$AP_LOW, .25) - 1.5*IQR(z$AP_LOW) &
z$AP_LOW < quantile(z$AP_LOW, .75) + 1.5*IQR(z$AP_LOW), ] #rows
nrow(z)
#z <- z[z$CHOLESTEROL > quantile(z$CHOLESTEROL, .25) - 1.5*IQR(z$CHOLESTEROL) &
# z$CHOLESTEROL < quantile(z$CHOLESTEROL, .75) + 1.5*IQR(z$CHOLESTEROL), ] #rows
#nrow(z)
#z <- z[z$GLUCOSE > quantile(z$GLUCOSE, .25) - 1.5*IQR(z$GLUCOSE) &
# z$GLUCOSE < quantile(z$GLUCOSE, .75) + 1.5*IQR(z$GLUCOSE), ] #rows
#nrow(z)
#z <- z[z$SMOKE > quantile(z$SMOKE, .25) - 1.5*IQR(z$SMOKE) &
# z$SMOKE < quantile(z$SMOKE, .75) + 1.5*IQR(z$SMOKE), ] #rows
#nrow(z)
#z <- z[z$ALCOHOL > quantile(z$ALCOHOL, .25) - 1.5*IQR(z$ALCOHOL) &
# z$ALCOHOL < quantile(z$ALCOHOL, .75) + 1.5*IQR(z$ALCOHOL), ] #rows
#nrow(z)
#z <- z[z$PHYSICAL_ACTIVITY > quantile(z$PHYSICAL_ACTIVITY, .25) - 1.5*IQR(z$PHYSICAL_ACTIVITY) &
# z$PHYSICAL_ACTIVITY < quantile(z$PHYSICAL_ACTIVITY, .75) + 1.5*IQR(z$PHYSICAL_ACTIVITY), ] #rows
#nrow(z)
#z <- z[z$CARDIO_DISEASE > quantile(z$CARDIO_DISEASE, .25) - 1.5*IQR(z$CARDIO_DISEASE) &
# z$CARDIO_DISEASE < quantile(z$CARDIO_DISEASE, .75) + 1.5*IQR(z$CARDIO_DISEASE), ] #rows
#nrow(z)
write.csv(z,"C:\\Users\\Himanshu Patel\\Downloads\\cardiovascular-disease-dataset\\Cleansed_MyData.csv", row.names = TRUE)
|
#' Set options/hooks for RefManageR
#'
#' This function is used to access and set package options for RefManageR, similar to \code{\link{options}}.
#' The options are listed in the details
#' @param ... a character vector or strings specifying option names to access; or to set options values,
#' a named list or vector of option values or options specified in name=value pairs.
#' @param restore.defaults logical; if TRUE, \code{...}'s are ignored and all package options are restored to their
#' defaults.
#' @export
#' @details The following are valid package options.
#'
#' \bold{Options for searching/indexing a BibEntry object. See \code{\link{[.BibEntry}} and
#' \code{\link{[<-.BibEntry}}}
#' \enumerate{
#' \item \code{match.author} - string; controls how name list fields (author, editor, translator, etc.) are matched
#' when searching for names.
#' \dQuote{family.with.initials} require family names and given name initials to match, \dQuote{exact} requires names to match
#' exactly, and any other value results in only family names being compared (the default).
#' \item \code{match.date} - string; controls how date fields are matched when searching. If \dQuote{year.only} (the default),
#' only years are checked for equality when comparing dates, otherwise months and days will also be compared,
#' if they are available.
#' \item \code{use.regex} - logical; if \code{TRUE}, regular expressions are used when searching non-date fields; otherwise, exact
#' matching is used.
#' \item \code{ignore.case} - logical; if \code{TRUE}, case is ignored when searching.
#' \item \code{return.ind} - logical; if \code{TRUE} the return value of \code{\link{SearchBib}} and the operators
#' \code{\link{[.BibEntry}}, will be the indices of any matches; otherwise, a \code{BibEntry}
#' object is returned.
#' }
#'
#' \bold{Options for Printing with \code{\link{print.BibEntry}} and \code{\link{PrintBibliography}}}
#' \enumerate{
#' \item \code{bib.style} - string; Biblatex bibliography style to use when printing and formatting a BibEntry object. Possible
#' values are \dQuote{numeric} (default), \dQuote{authoryear}, \dQuote{authortitle}, \dQuote{alphabetic}, \dQuote{draft}.
#' \item \code{first.inits} - logical; if \code{TRUE}, only given name initials are displayed when printing; otherwise, full names
#' are used.
#' \item \code{dashed} - logical; if \code{TRUE} and \code{bib.style = "authoryear"} or \code{bib.style = "authortitle"},
#' recurring author and editor names are replaced with \dQuote{---} when printing.
#' \item \code{sorting} - string; controls how BibEntry objects are sorted. Possible values are \dQuote{nty}, \dQuote{nyt},
#' \dQuote{nyvt}, \dQuote{anyt}, \dQuote{anyvt}, \dQuote{ynt}, \dQuote{ydnt}, \dQuote{none}, \dQuote{debug}; see
#' \code{\link{sort.BibEntry}}
#' \item \code{max.names} - numeric; maximum number of names to display before using \dQuote{et al.} when formatting and printing name
#' list fields. This is also the minimum number of names that will be displayed if \dQuote{et al.} is used
#' (minnames package option in Biblatex)
#' \item \code{no.print.fields} character vector; fields that should not be printed,
#' e.g., doi, url, isbn, etc.
#' \item \code{style} - character string naming the printing style. Possible values are
#' plain text (style \dQuote{text}), BibTeX (\dQuote{Bibtex}), BibLaTeX (\dQuote{Biblatex}),
#' a mixture of plain text and BibTeX as
#' traditionally used for citations (\dQuote{citation}), HTML (\dQuote{html}),
#' LaTeX (\dQuote{latex}), \dQuote{markdown},
#' R code (\dQuote{R}), and a simple copy of the textVersion elements
#' (style \dQuote{textVersion}, see \code{\link{BibEntry}})
#' }
#'
#' \bold{Options for the \code{\link{Cite}} functions}
#' \enumerate{
#' \item \code{cite.style} - character string; bibliography style to use to generate citations.
#' \item \code{style} - as above, but used to format the citations.
#' \item \code{hyperlink} - character string or logical; for use with \code{style = "markdown"}
#' and \code{style = "html"} (ignored otherwise). If \code{FALSE}, no hyperlink
#' will be generated for the citation or in the bibliography when printing. If set equal to \code{"to.bib"}, then hyperlinks will be
#' generated pointing connecting the citation and bibliography. The default value, \code{"to.doc"},
#' will try to create the hyperlink using the \code{url}, \code{doi}, or \code{eprint} fields of
#' entry. If these fields are not available, the hyperlink will point to the bibliography. See
#' also \code{\link{open.BibEntry}}.
#' \item \code{super} - logical; should superscripts be used for numeric citations? Ignored if
#' \code{cite.style != "numeric"}.
#' \item \code{max.names} - numeric; same as above, except for citations.
#' \item \code{longnamesfirst} logical; should the first time a citation appears in the text
#' not be truncated at \code{max.names}?
#' \item \code{bibpunct} - character vector; punctuation to use in a citation. The entries in \code{bibpunct} are as follows
#' \enumerate{
#' \item The left delimiter for non-alphabetic and non-numeric citation styles
#' \item The right delimiter for non-alphabetic and non-numeric citation styles
#' \item The left delimiter for alphabetic and numeric citation styles
#' \item The right delimiter for alphabetic and numeric citation styles
#' \item The separator between references in a citation.
#' \item Punctuation to go between the author and year.
#' }
#' }
#'
#' \bold{Other}
#' \enumerate{
#' \item \code{check.entries} - string or \code{FALSE}; if \code{FALSE} entries are not checked to ensure that they have all the
#' required fields for the type of entry; if \dQuote{warn} then entries are checked, but only a warning is issued and the
#' entry is processed anyway; otherwise an error is produced if an entry does not have the required fields (default). Note that
#' the majority of fields listed as required for a particular entry type in the Biblatex manual are not actually required for
#' Biblatex to produce an entry.
#' \item \code{merge.fields.to.check} - character vector; for \code{\link{merge.BibEntry}} and the operator \code{\link{+.BibEntry}},
#' the fields that should be checked when comparing entries for equality when merging BibEntry objects. Specifying
#' \dQuote{all} results in all fields be checked with \code{\link{duplicated}}. The default is \dQuote{key} to only check for
#' duplicated keys.
#' }
#' @note If \code{...} is missing and \code{restore.defaults = FALSE}, all options and their current values will be returned
#' as a list.
#' @return if a vector of option names is supplied, the current value of the requested options, or if \code{...} is missing,
#' all current option values; otherwise, when setting options the old values of the changed options are (invisibly)
#' returned as a list.
#' @seealso \code{\link{print.BibEntry}}, \code{\link{BibEntry}}, \code{\link{options}}
#' @examples
#' BibOptions()
#' BibOptions("first.inits", "bib.style")
#'
#' oldopts <- BibOptions(first.inits = FALSE, bib.style = "authoryear")
#' oldopts
#' BibOptions(oldopts)
#'
#' BibOptions(restore.defaults = TRUE)
BibOptions <- function(..., restore.defaults = FALSE){
if (restore.defaults)
return(invisible(mapply(assign, .BibOptNames, .Defaults, MoreArgs = list(envir=.BibOptions))))
if (missing(...))
return(mget(.BibOptNames, envir = .BibOptions))
opts <- list(...)
nom <- names(opts)
if (is.null(nom) && !is.list(opts[[1L]])){
opts <- unlist(opts)
return(mget(opts[opts %in% .BibOptNames], envir = .BibOptions))
}else{
if (is.list(opts[[1L]])){
opts <- opts[[1L]]
nom <- names(opts)
}
if (any(!nom %in% .BibOptNames))
stop('Invalid name specified, see ?BibOptions')
ind <- nom %in% .LogicalBibOptNames
if (any(ind)){
opts[ind] <- as.logical(opts[ind])
if (any(is.na(opts[ind])))
stop("One of the specified option values should be logical and is not, see ?BibOptions")
names(opts[ind]) <- nom[ind]
}
oldopts <- mget(nom, envir=.BibOptions)
mapply(assign, nom, opts, MoreArgs = list(envir=.BibOptions))
invisible(oldopts)
}
}
.Defaults <- list(match.author='family.name', match.date='year.only', return.ind=FALSE,
merge.fields.to.check = 'key', bib.style = 'numeric', first.inits = TRUE,
dashed = TRUE, sorting = NULL, check.entries = 'error', use.regex = TRUE,
ignore.case = TRUE, max.names = 3, cite.style = "authoryear",
longnamesfirst = TRUE, hyperlink = "to.doc", style = "text",
super = FALSE, bibpunct = c("(", ")", "[", "]", ";", ","),
no.print.fields = character(0))
.BibOptions <- list2env(.Defaults)
.BibOptNames <- names(.Defaults)
.LogicalBibOptNames <- c("return.ind", "first.inits", "dashed", "use.regex", "ignore.case",
"longnamesfirst", "super")
.cites <- new.env()
assign("indices", logical(0), .cites)
assign("labs", character(0), .cites)
assign("sty", "authoryear", .cites)
globalVariables("return.labs")
|
/R/02BibOptions.R
|
no_license
|
huangrh/RefManageR
|
R
| false | false | 9,160 |
r
|
#' Set options/hooks for RefManageR
#'
#' This function is used to access and set package options for RefManageR, similar to \code{\link{options}}.
#' The options are listed in the details
#' @param ... a character vector or strings specifying option names to access; or to set options values,
#' a named list or vector of option values or options specified in name=value pairs.
#' @param restore.defaults logical; if TRUE, \code{...}'s are ignored and all package options are restored to their
#' defaults.
#' @export
#' @details The following are valid package options.
#'
#' \bold{Options for searching/indexing a BibEntry object. See \code{\link{[.BibEntry}} and
#' \code{\link{[<-.BibEntry}}}
#' \enumerate{
#' \item \code{match.author} - string; controls how name list fields (author, editor, translator, etc.) are matched
#' when searching for names.
#' \dQuote{family.with.initials} require family names and given name initials to match, \dQuote{exact} requires names to match
#' exactly, and any other value results in only family names being compared (the default).
#' \item \code{match.date} - string; controls how date fields are matched when searching. If \dQuote{year.only} (the default),
#' only years are checked for equality when comparing dates, otherwise months and days will also be compared,
#' if they are available.
#' \item \code{use.regex} - logical; if \code{TRUE}, regular expressions are used when searching non-date fields; otherwise, exact
#' matching is used.
#' \item \code{ignore.case} - logical; if \code{TRUE}, case is ignored when searching.
#' \item \code{return.ind} - logical; if \code{TRUE} the return value of \code{\link{SearchBib}} and the operators
#' \code{\link{[.BibEntry}}, will be the indices of any matches; otherwise, a \code{BibEntry}
#' object is returned.
#' }
#'
#' \bold{Options for Printing with \code{\link{print.BibEntry}} and \code{\link{PrintBibliography}}}
#' \enumerate{
#' \item \code{bib.style} - string; Biblatex bibliography style to use when printing and formatting a BibEntry object. Possible
#' values are \dQuote{numeric} (default), \dQuote{authoryear}, \dQuote{authortitle}, \dQuote{alphabetic}, \dQuote{draft}.
#' \item \code{first.inits} - logical; if \code{TRUE}, only given name initials are displayed when printing; otherwise, full names
#' are used.
#' \item \code{dashed} - logical; if \code{TRUE} and \code{bib.style = "authoryear"} or \code{bib.style = "authortitle"},
#' recurring author and editor names are replaced with \dQuote{---} when printing.
#' \item \code{sorting} - string; controls how BibEntry objects are sorted. Possible values are \dQuote{nty}, \dQuote{nyt},
#' \dQuote{nyvt}, \dQuote{anyt}, \dQuote{anyvt}, \dQuote{ynt}, \dQuote{ydnt}, \dQuote{none}, \dQuote{debug}; see
#' \code{\link{sort.BibEntry}}
#' \item \code{max.names} - numeric; maximum number of names to display before using \dQuote{et al.} when formatting and printing name
#' list fields. This is also the minimum number of names that will be displayed if \dQuote{et al.} is used
#' (minnames package option in Biblatex)
#' \item \code{no.print.fields} character vector; fields that should not be printed,
#' e.g., doi, url, isbn, etc.
#' \item \code{style} - character string naming the printing style. Possible values are
#' plain text (style \dQuote{text}), BibTeX (\dQuote{Bibtex}), BibLaTeX (\dQuote{Biblatex}),
#' a mixture of plain text and BibTeX as
#' traditionally used for citations (\dQuote{citation}), HTML (\dQuote{html}),
#' LaTeX (\dQuote{latex}), \dQuote{markdown},
#' R code (\dQuote{R}), and a simple copy of the textVersion elements
#' (style \dQuote{textVersion}, see \code{\link{BibEntry}})
#' }
#'
#' \bold{Options for the \code{\link{Cite}} functions}
#' \enumerate{
#' \item \code{cite.style} - character string; bibliography style to use to generate citations.
#' \item \code{style} - as above, but used to format the citations.
#' \item \code{hyperlink} - character string or logical; for use with \code{style = "markdown"}
#' and \code{style = "html"} (ignored otherwise). If \code{FALSE}, no hyperlink
#' will be generated for the citation or in the bibliography when printing. If set equal to \code{"to.bib"}, then hyperlinks will be
#' generated pointing connecting the citation and bibliography. The default value, \code{"to.doc"},
#' will try to create the hyperlink using the \code{url}, \code{doi}, or \code{eprint} fields of
#' entry. If these fields are not available, the hyperlink will point to the bibliography. See
#' also \code{\link{open.BibEntry}}.
#' \item \code{super} - logical; should superscripts be used for numeric citations? Ignored if
#' \code{cite.style != "numeric"}.
#' \item \code{max.names} - numeric; same as above, except for citations.
#' \item \code{longnamesfirst} logical; should the first time a citation appears in the text
#' not be truncated at \code{max.names}?
#' \item \code{bibpunct} - character vector; punctuation to use in a citation. The entries in \code{bibpunct} are as follows
#' \enumerate{
#' \item The left delimiter for non-alphabetic and non-numeric citation styles
#' \item The right delimiter for non-alphabetic and non-numeric citation styles
#' \item The left delimiter for alphabetic and numeric citation styles
#' \item The right delimiter for alphabetic and numeric citation styles
#' \item The separator between references in a citation.
#' \item Punctuation to go between the author and year.
#' }
#' }
#'
#' \bold{Other}
#' \enumerate{
#' \item \code{check.entries} - string or \code{FALSE}; if \code{FALSE} entries are not checked to ensure that they have all the
#' required fields for the type of entry; if \dQuote{warn} then entries are checked, but only a warning is issued and the
#' entry is processed anyway; otherwise an error is produced if an entry does not have the required fields (default). Note that
#' the majority of fields listed as required for a particular entry type in the Biblatex manual are not actually required for
#' Biblatex to produce an entry.
#' \item \code{merge.fields.to.check} - character vector; for \code{\link{merge.BibEntry}} and the operator \code{\link{+.BibEntry}},
#' the fields that should be checked when comparing entries for equality when merging BibEntry objects. Specifying
#' \dQuote{all} results in all fields be checked with \code{\link{duplicated}}. The default is \dQuote{key} to only check for
#' duplicated keys.
#' }
#' @note If \code{...} is missing and \code{restore.defaults = FALSE}, all options and their current values will be returned
#' as a list.
#' @return if a vector of option names is supplied, the current value of the requested options, or if \code{...} is missing,
#' all current option values; otherwise, when setting options the old values of the changed options are (invisibly)
#' returned as a list.
#' @seealso \code{\link{print.BibEntry}}, \code{\link{BibEntry}}, \code{\link{options}}
#' @examples
#' BibOptions()
#' BibOptions("first.inits", "bib.style")
#'
#' oldopts <- BibOptions(first.inits = FALSE, bib.style = "authoryear")
#' oldopts
#' BibOptions(oldopts)
#'
#' BibOptions(restore.defaults = TRUE)
BibOptions <- function(..., restore.defaults = FALSE){
if (restore.defaults)
return(invisible(mapply(assign, .BibOptNames, .Defaults, MoreArgs = list(envir=.BibOptions))))
if (missing(...))
return(mget(.BibOptNames, envir = .BibOptions))
opts <- list(...)
nom <- names(opts)
if (is.null(nom) && !is.list(opts[[1L]])){
opts <- unlist(opts)
return(mget(opts[opts %in% .BibOptNames], envir = .BibOptions))
}else{
if (is.list(opts[[1L]])){
opts <- opts[[1L]]
nom <- names(opts)
}
if (any(!nom %in% .BibOptNames))
stop('Invalid name specified, see ?BibOptions')
ind <- nom %in% .LogicalBibOptNames
if (any(ind)){
opts[ind] <- as.logical(opts[ind])
if (any(is.na(opts[ind])))
stop("One of the specified option values should be logical and is not, see ?BibOptions")
names(opts[ind]) <- nom[ind]
}
oldopts <- mget(nom, envir=.BibOptions)
mapply(assign, nom, opts, MoreArgs = list(envir=.BibOptions))
invisible(oldopts)
}
}
.Defaults <- list(match.author='family.name', match.date='year.only', return.ind=FALSE,
merge.fields.to.check = 'key', bib.style = 'numeric', first.inits = TRUE,
dashed = TRUE, sorting = NULL, check.entries = 'error', use.regex = TRUE,
ignore.case = TRUE, max.names = 3, cite.style = "authoryear",
longnamesfirst = TRUE, hyperlink = "to.doc", style = "text",
super = FALSE, bibpunct = c("(", ")", "[", "]", ";", ","),
no.print.fields = character(0))
.BibOptions <- list2env(.Defaults)
.BibOptNames <- names(.Defaults)
.LogicalBibOptNames <- c("return.ind", "first.inits", "dashed", "use.regex", "ignore.case",
"longnamesfirst", "super")
.cites <- new.env()
assign("indices", logical(0), .cites)
assign("labs", character(0), .cites)
assign("sty", "authoryear", .cites)
globalVariables("return.labs")
|
#source("c:/Work07/StatDesign07/Programs/R/Examples/Diet.R",print.eval=TRUE)#
#Does anova for Sulphur data
data<-read.table("c:/WorkHome07/StatDesign/DataSets/Diet.txt",sep = "",header=T)
Diet<-as.character(data[,1])
Subject<-as.character(data[,2])
Time<-as.character(data[,3])
BP<-data[,4]
aovdata <- data.frame(Diet,Subject,Time,BP)
#----------This gives the SplitPlot anova table with no tests----------------
summary(aov(BP~Diet*Subject*Time,data=aovdata))
#----------This gives the SplitPlot anova table with incorrect tests----------------
summary(aov(BP~Diet*Subject+Time*Diet,data=aovdata))
#--------------Oneway on averages-------------------------------------------
Diet<-as.character(c(1,1,1,2,2,2,3,3,3,4,4,4))
BPavg<-2*c(129,128,125.5,128,130.5,132,118.5,120.5,123.5,145,144,146)
aovdata <- data.frame(Diet,BPavg)
summary(aov(BPavg~Diet,data=aovdata))
#--------------Below the Line----------------------------------------------
Diet<-as.character(c(1,1,1,2,2,2,3,3,3,4,4,4))
BPdiff<-c(-12,-16,-7,-22,-11,-20,-9,-23,-17,-10,-6,-16)/2
aovdata <- data.frame(Diet,BPdiff,Subject)
summary(aov(BPdiff~Diet,data=aovdata))
|
/Diet.R
|
no_license
|
raikon123/Experimental-Design-Power-Points
|
R
| false | false | 1,154 |
r
|
#source("c:/Work07/StatDesign07/Programs/R/Examples/Diet.R",print.eval=TRUE)#
#Does anova for Sulphur data
data<-read.table("c:/WorkHome07/StatDesign/DataSets/Diet.txt",sep = "",header=T)
Diet<-as.character(data[,1])
Subject<-as.character(data[,2])
Time<-as.character(data[,3])
BP<-data[,4]
aovdata <- data.frame(Diet,Subject,Time,BP)
#----------This gives the SplitPlot anova table with no tests----------------
summary(aov(BP~Diet*Subject*Time,data=aovdata))
#----------This gives the SplitPlot anova table with incorrect tests----------------
summary(aov(BP~Diet*Subject+Time*Diet,data=aovdata))
#--------------Oneway on averages-------------------------------------------
Diet<-as.character(c(1,1,1,2,2,2,3,3,3,4,4,4))
BPavg<-2*c(129,128,125.5,128,130.5,132,118.5,120.5,123.5,145,144,146)
aovdata <- data.frame(Diet,BPavg)
summary(aov(BPavg~Diet,data=aovdata))
#--------------Below the Line----------------------------------------------
Diet<-as.character(c(1,1,1,2,2,2,3,3,3,4,4,4))
BPdiff<-c(-12,-16,-7,-22,-11,-20,-9,-23,-17,-10,-6,-16)/2
aovdata <- data.frame(Diet,BPdiff,Subject)
summary(aov(BPdiff~Diet,data=aovdata))
|
#***
#CAMINHO
#biblioteca/funcionais/aplicacao/filtro_mes_servico.R
library(lubridate)
#- Esta funcao recebe de uma a tres tabelas, respectivamente com: 1 a 3 nomes de colunas
#com informacoes, 1 a 3 nomes de colunas acerca de data, e 1 a 3 colunas acerca de valores
#categoricos. Recebe valoresPadrao, casos, meses, e anos.
#- Esta funcao filtra segundo os anos e meses dados, as colunas de informacao, segundo
#as categorias das colunas categoricas, e aplica uma funcao qualquer a estes dados.
#(deve-se preencher corretamente a funcao, de acordo com a funcao que sera aplicada (indicador))
#- Retorna uma tabela com os resultados da funcao aplicada para cada categoria e data escolhida.
filtro_mes_variavel <- function(tabelaA, coluna_quantidade_somatorioA, tabelaB = NULL, coluna_quantidade_somatorioB = NULL,
tabelaC = NULL, coluna_quantidade_somatorioC = NULL,
colunafiltroA, colunafiltroB, colunafiltroC,
colunaDataA, colunaDataB,colunaDataC = NULL, meses, anos, indicador, valores_padrao_aplicados = NULL,
casos = NULL, valoresPadrao = NULL
) { #linhas tabela = n, meses = k, anos = t, indicador = "complexidade"
aux <- data.frame(servicio = NA, data = NA, indicador = NA) #complexidade 1
retorno <- NULL #complexidade 1
print("1")
tabelaA[colunaDataA] <- as.Date(tabelaA[[colunaDataA]]) #complexidade n
print("2")
if(!is.null(tabelaB)) {
tabelaB[colunaDataB] <- as.Date(tabelaB[[colunaDataB]]) #complexidade n
}
if(!is.null(colunaDataC)) {
tabelaC[colunaDataC] <- as.Date(tabelaC[[colunaDataC]]) #complexidade n
}
print("2")
valores <- as.vector(levels(as.factor(tabelaA[[colunafiltroA]]))) #complexidade n, gera-se valores = m
print(valores)
for(i in 1:length(valores)) { #m vezes
print("3")
tabelaFiltradaA <- subset(tabelaA, tabelaA[[colunafiltroA]] %in% valores[i]) #complexidade n
if(!is.null(tabelaB)) {
tabelaFiltradaB <- subset(tabelaB, tabelaB[[colunafiltroB]] %in% valores[i])
}
if(!is.null(tabelaC)) {
tabelaFiltradaC <- subset(tabelaC, tabelaC[[colunafiltroC]] %in% valores[i])
}
for(ano in anos) { #k vezes
for(mes in meses) { #t vezes
tabela_filtradaA <- subset(tabelaFiltradaA, (month(as.POSIXlt(tabelaFiltradaA[[colunaDataA]]))== mes) &
year(as.POSIXlt(tabelaFiltradaA[[colunaDataA]]))== ano) #complexidade n
if(!is.null(tabelaB)) {
tabela_filtradaB <- subset(tabelaFiltradaB, (month(as.POSIXlt(tabelaFiltradaB[[colunaDataB]]))== mes) &
year(as.POSIXlt(tabelaFiltradaB[[colunaDataB]]))== ano)
}
if(!is.null(tabelaC)) {
tabela_filtradaC <- subset(tabelaFiltradaC, (month(as.POSIXlt(tabelaFiltradaC[[colunaDataC]]))== mes) &
year(as.POSIXlt(tabelaFiltradaC[[colunaDataC]]))== ano)
}
if(!is.null(coluna_quantidade_somatorioB) & is.null(coluna_quantidade_somatorioC)) {
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], valoresPadrao = valoresPadrao)
}
} else if(!is.null(coluna_quantidade_somatorioC)){
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabelaFiltradaC[[coluna_quantidade_somatorioC]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], valoresPadrao = valoresPadrao)
}
} else {
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], valoresPadrao = valoresPadrao)
}
}
aux[1,1] <- valores[i]
if(mes < 10) {
aux[1,2] <- paste(ano,"/", "0" ,mes, "/","01", sep ="")
} else {
aux[1,2] <- paste(ano,"/",mes, "/","01", sep ="")
}
aux[1,3] <- resultado
retorno <- rbind(retorno, aux)
}
}
}
return(retorno)
} #complexidade mkt X max(n, complexidadeIndicador), sendo k < 12 (pode ser quadratica em alguns casos, porem na maioria dos casos eh aprox quadratica)
#tabela <- filtro_mes_variavel(tabelaA = material,
# coluna_quantidade_somatorioA = "KA_CANTID",
# tabelaB = hospitalizacion,
# coluna_quantidade_somatorioB = "HO_DIASH",
# colunafiltroB = "HO_CSERV",
# colunafiltroA= "KA_CSER",
# colunaDataA = "KA_FECDES",
# colunaDataB = "HO_FHOSP",
# meses = c(1:6),
# anos = c(2018),
# indicador = indicadorConsumo)
|
/biblioteca/funcionais/aplicacao/filtro_mes_servico.R
|
no_license
|
acgabriel3/bi_arbo
|
R
| false | false | 7,773 |
r
|
#***
#CAMINHO
#biblioteca/funcionais/aplicacao/filtro_mes_servico.R
library(lubridate)
#- Esta funcao recebe de uma a tres tabelas, respectivamente com: 1 a 3 nomes de colunas
#com informacoes, 1 a 3 nomes de colunas acerca de data, e 1 a 3 colunas acerca de valores
#categoricos. Recebe valoresPadrao, casos, meses, e anos.
#- Esta funcao filtra segundo os anos e meses dados, as colunas de informacao, segundo
#as categorias das colunas categoricas, e aplica uma funcao qualquer a estes dados.
#(deve-se preencher corretamente a funcao, de acordo com a funcao que sera aplicada (indicador))
#- Retorna uma tabela com os resultados da funcao aplicada para cada categoria e data escolhida.
filtro_mes_variavel <- function(tabelaA, coluna_quantidade_somatorioA, tabelaB = NULL, coluna_quantidade_somatorioB = NULL,
tabelaC = NULL, coluna_quantidade_somatorioC = NULL,
colunafiltroA, colunafiltroB, colunafiltroC,
colunaDataA, colunaDataB,colunaDataC = NULL, meses, anos, indicador, valores_padrao_aplicados = NULL,
casos = NULL, valoresPadrao = NULL
) { #linhas tabela = n, meses = k, anos = t, indicador = "complexidade"
aux <- data.frame(servicio = NA, data = NA, indicador = NA) #complexidade 1
retorno <- NULL #complexidade 1
print("1")
tabelaA[colunaDataA] <- as.Date(tabelaA[[colunaDataA]]) #complexidade n
print("2")
if(!is.null(tabelaB)) {
tabelaB[colunaDataB] <- as.Date(tabelaB[[colunaDataB]]) #complexidade n
}
if(!is.null(colunaDataC)) {
tabelaC[colunaDataC] <- as.Date(tabelaC[[colunaDataC]]) #complexidade n
}
print("2")
valores <- as.vector(levels(as.factor(tabelaA[[colunafiltroA]]))) #complexidade n, gera-se valores = m
print(valores)
for(i in 1:length(valores)) { #m vezes
print("3")
tabelaFiltradaA <- subset(tabelaA, tabelaA[[colunafiltroA]] %in% valores[i]) #complexidade n
if(!is.null(tabelaB)) {
tabelaFiltradaB <- subset(tabelaB, tabelaB[[colunafiltroB]] %in% valores[i])
}
if(!is.null(tabelaC)) {
tabelaFiltradaC <- subset(tabelaC, tabelaC[[colunafiltroC]] %in% valores[i])
}
for(ano in anos) { #k vezes
for(mes in meses) { #t vezes
tabela_filtradaA <- subset(tabelaFiltradaA, (month(as.POSIXlt(tabelaFiltradaA[[colunaDataA]]))== mes) &
year(as.POSIXlt(tabelaFiltradaA[[colunaDataA]]))== ano) #complexidade n
if(!is.null(tabelaB)) {
tabela_filtradaB <- subset(tabelaFiltradaB, (month(as.POSIXlt(tabelaFiltradaB[[colunaDataB]]))== mes) &
year(as.POSIXlt(tabelaFiltradaB[[colunaDataB]]))== ano)
}
if(!is.null(tabelaC)) {
tabela_filtradaC <- subset(tabelaFiltradaC, (month(as.POSIXlt(tabelaFiltradaC[[colunaDataC]]))== mes) &
year(as.POSIXlt(tabelaFiltradaC[[colunaDataC]]))== ano)
}
if(!is.null(coluna_quantidade_somatorioB) & is.null(coluna_quantidade_somatorioC)) {
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]], valoresPadrao = valoresPadrao)
}
} else if(!is.null(coluna_quantidade_somatorioC)){
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabelaFiltradaC[[coluna_quantidade_somatorioC]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]],
tabela_filtradaB[[coluna_quantidade_somatorioB]],
tabela_filtradaC[[coluna_quantidade_somatorioC]], valoresPadrao = valoresPadrao)
}
} else {
if(is.null(casos) & is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]])
} else if(!is.null(casos) & !is.null(valoresPadrao)){
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], casos = casos, valoresPadrao = valoresPadrao)
} else if(!is.null(casos)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], casos = casos)
} else if(!is.null(valoresPadrao)) {
resultado <- indicador(tabela_filtradaA[[coluna_quantidade_somatorioA]], valoresPadrao = valoresPadrao)
}
}
aux[1,1] <- valores[i]
if(mes < 10) {
aux[1,2] <- paste(ano,"/", "0" ,mes, "/","01", sep ="")
} else {
aux[1,2] <- paste(ano,"/",mes, "/","01", sep ="")
}
aux[1,3] <- resultado
retorno <- rbind(retorno, aux)
}
}
}
return(retorno)
} #complexidade mkt X max(n, complexidadeIndicador), sendo k < 12 (pode ser quadratica em alguns casos, porem na maioria dos casos eh aprox quadratica)
#tabela <- filtro_mes_variavel(tabelaA = material,
# coluna_quantidade_somatorioA = "KA_CANTID",
# tabelaB = hospitalizacion,
# coluna_quantidade_somatorioB = "HO_DIASH",
# colunafiltroB = "HO_CSERV",
# colunafiltroA= "KA_CSER",
# colunaDataA = "KA_FECDES",
# colunaDataB = "HO_FHOSP",
# meses = c(1:6),
# anos = c(2018),
# indicador = indicadorConsumo)
|
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Modeling Pb in soil surrounding Notre-Dame areas
# copyright Yuling Yao and Lex van Geen
# May 2020
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
setwd("./ND") # set your dir and put the stan file that dir
soil.data=read.csv("soil2.csv")
x1=soil.data$Longitude
x2=soil.data$Latitude
y=soil.data$Soil.Pb..mg.kg.
plume=as.integer( soil.data$Relative.to.plume)==2
center=c(2.3496642, 48.8530245)# (Longitude , Latitude) of ND
type=as.integer( soil.data$Type)-1
type_word=levels(soil.data$Type)[-1]
library(RColorBrewer)
library(grDevices)
library(rstan)
options(mc.cores=8)
map2color<-function(x,pal,limits=NULL, breaks=NULL ){
if(is.null(limits)) limits=range(x)
if(is.null(breaks )) breaks=seq(limits[1],limits[2],length.out=length(pal)+1)
pal[findInterval(x, breaks , all.inside=TRUE)]
}
my.palette <- colorRampPalette(brewer.pal(n = 9, name = "YlOrRd"))(20) # generate color shades
col_brew=map2color( log10(y),my.palette, breaks = seq(1,4,length.out = 20))
library("geosphere")
n=length(y)
d=a=a_radian2=c() ## distance and angle of all collected samples
for( i in 1:n){
d[i]= distGeo(c(center[1], center[2]), c(x1[i], x2[i]))
#a[i]=angleFromCoordinate(center[2], center[1], x2[i], x1[i] )
a[i]= (bearing(c(center[1], center[2]), c(x1[i], x2[i]))+ 360) %%360
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Fit a Gaussian process %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
d_unit=d/1000 ## convert m to KM
y_log=log10(y) ## use log (base 10) in the actual model
a_radian=a/180*pi ## convert degree from (0,360) to (0, 2pi)
circle= (as.integer( soil.data$Original.circles)==3)
n_0= 29 ## number of grids in each dimension.
grid_d=seq(0.1, 1.5,length.out = n_0)
grid_theta=seq(0, 2*pi,length.out = n_0+1)[-c(n_0+1)]
delta_d=grid_d[2]-grid_d[1] ## spacing
delta_theta=grid_theta[2]-grid_theta[1]
temp=grid_1Dto2D=matrix(NA, 2, n_0^2)
for (i in c(1:n_0))
for (j in c(1:n_0)){
temp[,(i-1)*n_0+j]= c(grid_d[i], grid_theta[j])
grid_1Dto2D[,(i-1)*n_0+j]= c(i,j)
}
d_test=temp[1,]
theta_test=temp[2,]
m2= stan_model("type.stan")
y_forth=y^(1/4)
stan_fit=sampling (m2, data=list(N=length(y_forth), y=y_forth,
d=d_unit, theta=a_radian,
N_test=length(d_test),
d_test=d_test, theta_test=theta_test,
type=type),
iter=3000,chains=4)
print(stan_fit)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Posterior inference %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fit_draw=rstan::extract(stan_fit)
f_predict_mean=colMeans(fit_draw$f_predict)
f_predict_sd=apply(fit_draw$f_predict, 2, sd)
range(f_predict_sd)
range(f_predict_mean)^4
range(log10(f_predict_mean^4))
col_brew_test=map2color(f_predict_mean^4 ,my.palette, breaks = seq(30, 650 ,length.out = 20))
col_brew_test_sd=map2color(f_predict_sd ,my.palette, breaks = seq(0.2, 0.8,length.out = 20))
col_brew_test_log=map2color( log10(f_predict_mean^4), my.palette, breaks = seq(log10(30), log10(650), length.out = 20))
f_draw=(fit_draw$f_predict)^4
y_draw=(fit_draw$y_predict)^4
range(a[plume])
## use the data label to determine the boundary of the plume : 260 < angle < 310
id_in_test= which(theta_test*180/pi >259.5 & theta_test*180/pi <310.5 )
id_in_test_grid= which(grid_theta*180/pi >259.5 & grid_theta*180/pi <310.5 )
which_id_is_grid_d=function(i){
which(grid_1Dto2D[1,]== i)
}
which_id_smaller_grid_d=function(i){
which(grid_1Dto2D[1,]<= i)
}
which_id_is_grid_theta=function(i){
which(grid_1Dto2D[2,]== i)
}
S= dim(f_draw)[1]
average_across_distance=average_across_theta=average_across_theta_d_500=average_across_theta_d_1000=matrix(NA, S, n_0)
for (i in 1:n_0)
average_across_distance[,i]= rowMeans(f_draw [,which_id_is_grid_d(i)])
for (i in 1:n_0){
average_across_theta[,i]= rowMeans(f_draw [,which_id_is_grid_theta(i)])
average_across_theta_d_500[,i]= rowMeans(f_draw [, which(grid_1Dto2D[2,]== i ) ][,5:9] )
# print( grid_d[5:9] )
average_across_theta_d_1000[,i]= rowMeans(f_draw [, which(grid_1Dto2D[2,]== i) ][,17:21] )
# print( grid_d[17:21] )
}
colMeans(average_across_theta_d_500[,id_in_test_grid])
mean( rowMeans(average_across_theta_d_500[,-id_in_test_grid]))
quantile( rowMeans(average_across_theta_d_500[,id_in_test_grid]), c(0.025,0.975))
apply(average_across_theta_d_500[,id_in_test_grid], 2, mean )
apply(average_across_theta_d_500[,id_in_test_grid], 2, quantile, c(0.025,0.975))
average_across_plume=average_across_nplume=average_across_plume_diff=y_average_across_plume_diff=y_average_across_plume_diff_exp=average_across_plume_diff_exp=average_across_plume_exp=average_across_nplume_exp=y_average_across_plume_diff_anti_exp=average_across_plume_diff_anti_exp=average_across_plume_diff_exp_inside=y_average_across_plume_diff_exp_inside=average_across_plume_exp_inside=average_across_nplume_exp_inside=average_across_plume_y=average_across_nplume_y=matrix(NA, S, n_0)
for (i in 1:n_0){
average_across_plume[,i]= rowMeans((f_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]))
average_across_nplume[,i]= rowMeans((f_draw [,which_id_is_grid_d (i)[-id_in_test_grid ] ]))
average_across_plume_y[,i]= rowMeans((y_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]))
average_across_nplume_y[,i]= rowMeans((y_draw [,which_id_is_grid_d (i)[-id_in_test_grid ] ]))
average_across_plume_diff[,i]= rowMeans(f_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]) -rowMeans(f_draw [,which_id_is_grid_d (i)[-id_in_test_grid] ])
average_across_plume_diff_exp_inside[,i]= rowMeans(( f_draw [,intersect(id_in_test, which_id_smaller_grid_d (i))]) ) -rowMeans(( f_draw [,intersect( c(1:(n_0^2)) [-id_in_test], which_id_smaller_grid_d (i))]) )
y_average_across_plume_diff[,i]= rowMeans(y_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]) -rowMeans(y_draw [,which_id_is_grid_d (i)[-id_in_test_grid] ])
}
excess_area=excess_area_y=excess_in_circle= excess_in_circle_y=weighted_plume=weighted_nplume=weighted_plume_y=weighted_nplume_y=matrix(NA, dim (average_across_plume_diff)[1], n_0)
for( j in 1:n_0)
for( s in 1:dim (average_across_plume_diff)[1] ){
grid_d_s=grid_d[1:j]
excess_area[s, j]= sum( average_across_plume_diff[s, 1:j] * grid_d_s /sum(grid_d_s))
excess_area_y[s, j]= sum( y_average_across_plume_diff[s,1:j] * grid_d_s /sum(grid_d_s))
excess_in_circle[s,j] =pi * (grid_d[j] * 1000)^2 * 6/60 * 0.01 *2* 1000 * excess_area[s, j] /1e6
excess_in_circle_y[s,j] =pi * (grid_d[j] * 1000)^2 * 6/60 * 0.01 *2* 1000 * excess_area_y [s, j] /1e6
weighted_plume[s,j]=sum( average_across_plume[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_nplume[s,j]=sum( average_across_nplume[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_plume_y[s,j]=sum( average_across_plume_y[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_nplume_y[s,j]=sum( average_across_nplume_y[s, 1:j] * grid_d_s /sum(grid_d_s))
}
type_draw=extract(stan_fit, pars="type_eff")$type_eff
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Prediction in the ordinal coordinate %%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
n_0=31
x1_grid=seq(-1.5, 1.5, length.out =n_0)
x2_grid=seq(-1.5, 1.5, length.out =n_0)
x1_data= d* sin(a/180*pi)
x2_data= d*cos(a/180*pi)
delta_x=x1_grid[2]-x1_grid[1]
temp=matrix(NA, 2, n_0^2)
for (i in c(1:n_0))
for (j in c(1:n_0))
temp[,(i-1)*n_0+j]= c(x1_grid[i], x2_grid[j])
x_grid=temp
x1_test=x_grid[1,]
x2_test=x_grid[2,]
d_test_new=sqrt(x1_test^2 +x2_test^2 )
theta_test_new=pi/2- atan2( x2_test, x1_test)
stan_fit=sampling (m2, data=list(N=length(y_forth), y=y_forth,
d=d_unit, theta=a_radian,
N_test=length(d_test_new),
d_test=d_test_new, theta_test=theta_test_new,
type=type),
iter=3000,chains=4)
print(stan_fit)
fit_draw=extract(stan_fit)
f_predict_mean=apply( (fit_draw$f_predict)^4 , 2, mean)
f_predict_mean_matrix=matrix(NA, n_0, n_0)
for (i in c(1:n_0))
for (j in c(1:n_0))
f_predict_mean_matrix[i,j]= f_predict_mean[(i-1)*n_0+j]
|
/soilPb.R
|
permissive
|
yao-yl/parisPb
|
R
| false | false | 8,238 |
r
|
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Modeling Pb in soil surrounding Notre-Dame areas
# copyright Yuling Yao and Lex van Geen
# May 2020
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
setwd("./ND") # set your dir and put the stan file that dir
soil.data=read.csv("soil2.csv")
x1=soil.data$Longitude
x2=soil.data$Latitude
y=soil.data$Soil.Pb..mg.kg.
plume=as.integer( soil.data$Relative.to.plume)==2
center=c(2.3496642, 48.8530245)# (Longitude , Latitude) of ND
type=as.integer( soil.data$Type)-1
type_word=levels(soil.data$Type)[-1]
library(RColorBrewer)
library(grDevices)
library(rstan)
options(mc.cores=8)
map2color<-function(x,pal,limits=NULL, breaks=NULL ){
if(is.null(limits)) limits=range(x)
if(is.null(breaks )) breaks=seq(limits[1],limits[2],length.out=length(pal)+1)
pal[findInterval(x, breaks , all.inside=TRUE)]
}
my.palette <- colorRampPalette(brewer.pal(n = 9, name = "YlOrRd"))(20) # generate color shades
col_brew=map2color( log10(y),my.palette, breaks = seq(1,4,length.out = 20))
library("geosphere")
n=length(y)
d=a=a_radian2=c() ## distance and angle of all collected samples
for( i in 1:n){
d[i]= distGeo(c(center[1], center[2]), c(x1[i], x2[i]))
#a[i]=angleFromCoordinate(center[2], center[1], x2[i], x1[i] )
a[i]= (bearing(c(center[1], center[2]), c(x1[i], x2[i]))+ 360) %%360
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Fit a Gaussian process %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
d_unit=d/1000 ## convert m to KM
y_log=log10(y) ## use log (base 10) in the actual model
a_radian=a/180*pi ## convert degree from (0,360) to (0, 2pi)
circle= (as.integer( soil.data$Original.circles)==3)
n_0= 29 ## number of grids in each dimension.
grid_d=seq(0.1, 1.5,length.out = n_0)
grid_theta=seq(0, 2*pi,length.out = n_0+1)[-c(n_0+1)]
delta_d=grid_d[2]-grid_d[1] ## spacing
delta_theta=grid_theta[2]-grid_theta[1]
temp=grid_1Dto2D=matrix(NA, 2, n_0^2)
for (i in c(1:n_0))
for (j in c(1:n_0)){
temp[,(i-1)*n_0+j]= c(grid_d[i], grid_theta[j])
grid_1Dto2D[,(i-1)*n_0+j]= c(i,j)
}
d_test=temp[1,]
theta_test=temp[2,]
m2= stan_model("type.stan")
y_forth=y^(1/4)
stan_fit=sampling (m2, data=list(N=length(y_forth), y=y_forth,
d=d_unit, theta=a_radian,
N_test=length(d_test),
d_test=d_test, theta_test=theta_test,
type=type),
iter=3000,chains=4)
print(stan_fit)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Posterior inference %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
fit_draw=rstan::extract(stan_fit)
f_predict_mean=colMeans(fit_draw$f_predict)
f_predict_sd=apply(fit_draw$f_predict, 2, sd)
range(f_predict_sd)
range(f_predict_mean)^4
range(log10(f_predict_mean^4))
col_brew_test=map2color(f_predict_mean^4 ,my.palette, breaks = seq(30, 650 ,length.out = 20))
col_brew_test_sd=map2color(f_predict_sd ,my.palette, breaks = seq(0.2, 0.8,length.out = 20))
col_brew_test_log=map2color( log10(f_predict_mean^4), my.palette, breaks = seq(log10(30), log10(650), length.out = 20))
f_draw=(fit_draw$f_predict)^4
y_draw=(fit_draw$y_predict)^4
range(a[plume])
## use the data label to determine the boundary of the plume : 260 < angle < 310
id_in_test= which(theta_test*180/pi >259.5 & theta_test*180/pi <310.5 )
id_in_test_grid= which(grid_theta*180/pi >259.5 & grid_theta*180/pi <310.5 )
which_id_is_grid_d=function(i){
which(grid_1Dto2D[1,]== i)
}
which_id_smaller_grid_d=function(i){
which(grid_1Dto2D[1,]<= i)
}
which_id_is_grid_theta=function(i){
which(grid_1Dto2D[2,]== i)
}
S= dim(f_draw)[1]
average_across_distance=average_across_theta=average_across_theta_d_500=average_across_theta_d_1000=matrix(NA, S, n_0)
for (i in 1:n_0)
average_across_distance[,i]= rowMeans(f_draw [,which_id_is_grid_d(i)])
for (i in 1:n_0){
average_across_theta[,i]= rowMeans(f_draw [,which_id_is_grid_theta(i)])
average_across_theta_d_500[,i]= rowMeans(f_draw [, which(grid_1Dto2D[2,]== i ) ][,5:9] )
# print( grid_d[5:9] )
average_across_theta_d_1000[,i]= rowMeans(f_draw [, which(grid_1Dto2D[2,]== i) ][,17:21] )
# print( grid_d[17:21] )
}
colMeans(average_across_theta_d_500[,id_in_test_grid])
mean( rowMeans(average_across_theta_d_500[,-id_in_test_grid]))
quantile( rowMeans(average_across_theta_d_500[,id_in_test_grid]), c(0.025,0.975))
apply(average_across_theta_d_500[,id_in_test_grid], 2, mean )
apply(average_across_theta_d_500[,id_in_test_grid], 2, quantile, c(0.025,0.975))
average_across_plume=average_across_nplume=average_across_plume_diff=y_average_across_plume_diff=y_average_across_plume_diff_exp=average_across_plume_diff_exp=average_across_plume_exp=average_across_nplume_exp=y_average_across_plume_diff_anti_exp=average_across_plume_diff_anti_exp=average_across_plume_diff_exp_inside=y_average_across_plume_diff_exp_inside=average_across_plume_exp_inside=average_across_nplume_exp_inside=average_across_plume_y=average_across_nplume_y=matrix(NA, S, n_0)
for (i in 1:n_0){
average_across_plume[,i]= rowMeans((f_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]))
average_across_nplume[,i]= rowMeans((f_draw [,which_id_is_grid_d (i)[-id_in_test_grid ] ]))
average_across_plume_y[,i]= rowMeans((y_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]))
average_across_nplume_y[,i]= rowMeans((y_draw [,which_id_is_grid_d (i)[-id_in_test_grid ] ]))
average_across_plume_diff[,i]= rowMeans(f_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]) -rowMeans(f_draw [,which_id_is_grid_d (i)[-id_in_test_grid] ])
average_across_plume_diff_exp_inside[,i]= rowMeans(( f_draw [,intersect(id_in_test, which_id_smaller_grid_d (i))]) ) -rowMeans(( f_draw [,intersect( c(1:(n_0^2)) [-id_in_test], which_id_smaller_grid_d (i))]) )
y_average_across_plume_diff[,i]= rowMeans(y_draw [,which_id_is_grid_d (i)[id_in_test_grid ] ]) -rowMeans(y_draw [,which_id_is_grid_d (i)[-id_in_test_grid] ])
}
excess_area=excess_area_y=excess_in_circle= excess_in_circle_y=weighted_plume=weighted_nplume=weighted_plume_y=weighted_nplume_y=matrix(NA, dim (average_across_plume_diff)[1], n_0)
for( j in 1:n_0)
for( s in 1:dim (average_across_plume_diff)[1] ){
grid_d_s=grid_d[1:j]
excess_area[s, j]= sum( average_across_plume_diff[s, 1:j] * grid_d_s /sum(grid_d_s))
excess_area_y[s, j]= sum( y_average_across_plume_diff[s,1:j] * grid_d_s /sum(grid_d_s))
excess_in_circle[s,j] =pi * (grid_d[j] * 1000)^2 * 6/60 * 0.01 *2* 1000 * excess_area[s, j] /1e6
excess_in_circle_y[s,j] =pi * (grid_d[j] * 1000)^2 * 6/60 * 0.01 *2* 1000 * excess_area_y [s, j] /1e6
weighted_plume[s,j]=sum( average_across_plume[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_nplume[s,j]=sum( average_across_nplume[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_plume_y[s,j]=sum( average_across_plume_y[s, 1:j] * grid_d_s /sum(grid_d_s))
weighted_nplume_y[s,j]=sum( average_across_nplume_y[s, 1:j] * grid_d_s /sum(grid_d_s))
}
type_draw=extract(stan_fit, pars="type_eff")$type_eff
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Prediction in the ordinal coordinate %%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
n_0=31
x1_grid=seq(-1.5, 1.5, length.out =n_0)
x2_grid=seq(-1.5, 1.5, length.out =n_0)
x1_data= d* sin(a/180*pi)
x2_data= d*cos(a/180*pi)
delta_x=x1_grid[2]-x1_grid[1]
temp=matrix(NA, 2, n_0^2)
for (i in c(1:n_0))
for (j in c(1:n_0))
temp[,(i-1)*n_0+j]= c(x1_grid[i], x2_grid[j])
x_grid=temp
x1_test=x_grid[1,]
x2_test=x_grid[2,]
d_test_new=sqrt(x1_test^2 +x2_test^2 )
theta_test_new=pi/2- atan2( x2_test, x1_test)
stan_fit=sampling (m2, data=list(N=length(y_forth), y=y_forth,
d=d_unit, theta=a_radian,
N_test=length(d_test_new),
d_test=d_test_new, theta_test=theta_test_new,
type=type),
iter=3000,chains=4)
print(stan_fit)
fit_draw=extract(stan_fit)
f_predict_mean=apply( (fit_draw$f_predict)^4 , 2, mean)
f_predict_mean_matrix=matrix(NA, n_0, n_0)
for (i in c(1:n_0))
for (j in c(1:n_0))
f_predict_mean_matrix[i,j]= f_predict_mean[(i-1)*n_0+j]
|
install.packages('tseries')
install.packages('dplyr')
install.packages('vars')
install.packages('fGarch')
install.packages('forecast')
install.packages('rugarch')
install.packages("quantmod")
install.packages("ggthemes")
install.packages('ggplot2')
install.packages("zoo")
install.packages("ggfortify")
install.packages("gridExtra")
install.packages("tidyverse")
install.packages("gdtools")
install.packages("luzlogr")
install.packages("KFAS")
install.packages("dlm")
install.packages("R2PPT")
install.packages("RDCOMClient",repos="http://www.omegahat.net/R")
installed.packages("rmarkdown")
install.packages("paws")
install.packages("aws")
install.packages("ggrepel")
install.packages("RJDBC")
install.packages("rJava")
install.packages("magrittr")
install.packages("systemfonts")
install.packages("set")
install.packages("remotes")
install.packages("backports")
remotes::install_github("Gedevan-Aleksizde/fontregisterer", repos = NULL, type = "source")
|
/WindowsSetting/R/install_packages.R
|
permissive
|
ralphpfuller/dotfiles
|
R
| false | false | 984 |
r
|
install.packages('tseries')
install.packages('dplyr')
install.packages('vars')
install.packages('fGarch')
install.packages('forecast')
install.packages('rugarch')
install.packages("quantmod")
install.packages("ggthemes")
install.packages('ggplot2')
install.packages("zoo")
install.packages("ggfortify")
install.packages("gridExtra")
install.packages("tidyverse")
install.packages("gdtools")
install.packages("luzlogr")
install.packages("KFAS")
install.packages("dlm")
install.packages("R2PPT")
install.packages("RDCOMClient",repos="http://www.omegahat.net/R")
installed.packages("rmarkdown")
install.packages("paws")
install.packages("aws")
install.packages("ggrepel")
install.packages("RJDBC")
install.packages("rJava")
install.packages("magrittr")
install.packages("systemfonts")
install.packages("set")
install.packages("remotes")
install.packages("backports")
remotes::install_github("Gedevan-Aleksizde/fontregisterer", repos = NULL, type = "source")
|
# Curtis Kephart
# just a file to help understand how to load redwood3 data.
# load libraries
library(readr)
library(dplyr)
library(ggplot2)
wldata <- read_csv("~/Dropbox/SSEL/Wl/data/example_data/Bubbles Weakest Link-2017-06-23 05-22-57.181343.csv") %>%
dplyr::mutate(
Key = as.factor(Key)
)
# types of messages saved
table(wldata$Key)
#state is saved on each tick, and logs anything we want it to log, action, payoff, etc
wldata = redwoodParser(wldata, 'state')
ggplot(
data = wldata %>% dplyr::filter(Key == "state"),
aes(
x = ClientTime
)
) +
geom_histogram(bins = 250)
ggplot(
data = wldata %>%
dplyr::filter(Key == 'state') %>%
group_by(Period, Group) %>%
dplyr::mutate(
PeriodTime = Time - min(Time)
),
aes(
x = PeriodTime,
y = state.action,
colour = as.factor(state.subjectid)
)
) +
geom_line() +
facet_grid(Period~.) +
labs(
title = "Actions"
)
ggplot(
data = wldata %>%
dplyr::filter(Key == 'state') %>%
group_by(Period, Group) %>%
dplyr::mutate(
PeriodTime = Time - min(Time)
),
aes(
x = PeriodTime,
y = state.payoff,
colour = as.factor(state.subjectid)
)
) +
geom_line() +
facet_grid(Period~.) +
labs(
title = "Payoffs"
)
|
/code/load_redwood_data.r
|
no_license
|
EconomiCurtis/wl
|
R
| false | false | 1,270 |
r
|
# Curtis Kephart
# just a file to help understand how to load redwood3 data.
# load libraries
library(readr)
library(dplyr)
library(ggplot2)
wldata <- read_csv("~/Dropbox/SSEL/Wl/data/example_data/Bubbles Weakest Link-2017-06-23 05-22-57.181343.csv") %>%
dplyr::mutate(
Key = as.factor(Key)
)
# types of messages saved
table(wldata$Key)
#state is saved on each tick, and logs anything we want it to log, action, payoff, etc
wldata = redwoodParser(wldata, 'state')
ggplot(
data = wldata %>% dplyr::filter(Key == "state"),
aes(
x = ClientTime
)
) +
geom_histogram(bins = 250)
ggplot(
data = wldata %>%
dplyr::filter(Key == 'state') %>%
group_by(Period, Group) %>%
dplyr::mutate(
PeriodTime = Time - min(Time)
),
aes(
x = PeriodTime,
y = state.action,
colour = as.factor(state.subjectid)
)
) +
geom_line() +
facet_grid(Period~.) +
labs(
title = "Actions"
)
ggplot(
data = wldata %>%
dplyr::filter(Key == 'state') %>%
group_by(Period, Group) %>%
dplyr::mutate(
PeriodTime = Time - min(Time)
),
aes(
x = PeriodTime,
y = state.payoff,
colour = as.factor(state.subjectid)
)
) +
geom_line() +
facet_grid(Period~.) +
labs(
title = "Payoffs"
)
|
library(keras)
library(caret)
kdd_prediction <- read.csv("kdd_prediction.csv")
#one hot encoding
table(as.numeric(kdd_prediction$result), kdd_prediction$result)
trans <- c("dos","normal","probe","r2l","u2r")
names(trans) <- c(1,2,3,4,5)
which(sapply(kdd_prediction, is.factor))
kdd_prediction <- transform(kdd_prediction, flag=as.integer(flag))
kdd_prediction <- transform(kdd_prediction, protocol_type=as.integer(protocol_type))
kdd_prediction <- transform(kdd_prediction, service=as.integer(service))
kdd_prediction_numeric <- kdd_prediction
kdd_prediction_numeric <- transform(kdd_prediction, result=as.integer(result))
kdd_prediction_numeric1 <- kdd_prediction_numeric
#scaling value
scl <- function(x){ (x - min(x))/(max(x) - min(x)) }
kdd_prediction_numeric <- data.frame(lapply(kdd_prediction_numeric, scl))
#delete NAN's values
kdd_prediction_numeric <- kdd_prediction_numeric[colSums(!is.na(kdd_prediction_numeric)) > 0]
kdd_prediction_numeric$result <- kdd_prediction_numeric1$result
#cross validation
percentage = round(nrow(kdd_prediction_numeric) *50/100)
cat('There are ', percentage, 'necessary to divide KDD dataset in train (70%) in test (30%).')
train <- kdd_prediction_numeric[ (1:percentage), ]
test <- kdd_prediction_numeric[ (percentage:nrow(kdd_prediction_numeric)), ]
#print dim train and test
dim(train)
dim(test)
#ecnoding data
matrix_train <- as.matrix(train[, -ncol(train)])
matrix_test <- as.matrix(test[, -ncol(test)])
train_label <- train$result
test_label <- test$result
one_hot_train_labels <- to_categorical(train_label)
one_hot_test_labels <- to_categorical(test_label)
head(test_label)
head(one_hot_test_labels)
#fit first model
model <- keras_model_sequential() %>%
model <- keras_model_sequential() %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001), input_shape = c(ncol(train)-1)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 6, activation = "softmax")
model %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
val_indices <- 1:200
x_val <- matrix_train[val_indices,]
partial_x_train <- matrix_train[-val_indices,]
y_val <- one_hot_train_labels[val_indices,]
partial_y_train = one_hot_train_labels[-val_indices,]
history <- model %>% fit(
partial_x_train,
partial_y_train,
epochs = 200,
batch_size = 512,
validation_data = list(x_val, y_val)
)
plot(history)
#turn on profilng
Rprof()
#fit
model <- keras_model_sequential() %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001), input_shape = c(ncol(train)-1)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 6, activation = "softmax")
model %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
history <- model %>% fit(
partial_x_train,
partial_y_train,
epochs = 15,
batch_size = 512,
validation_data = list(x_val, y_val)
)
results <- model %>% evaluate(matrix_test, one_hot_test_labels)
#predict
predictions <- model %>% predict(matrix_test)
#turn off profiling
Rprof(NULL)
#get prediction information
head(predictions)
dim(predictions)
sum(predictions[1,])
which.max(predictions[1,])
result_predict <- apply(predictions,1,function(x) which(x==max(x)))
result_predict <- result_predict -1
#reversing one hot encoding
attack_predict <- trans[ as.character(result_predict) ]
#get profiling informations
prof<-summaryRprof()
#print profiling informations
prof$by.total
#confusion matrix
one_hot_test_ <- apply(one_hot_test_labels,1,function(x) which(x==max(x)))
one_hot_test_ <- one_hot_test_ -1
attack_test<- trans[ as.character(one_hot_test_) ]
confusionMatrix(factor(attack_predict), factor(attack_test), mode = "everything")
#table (attack_predict, attack_test)
library(pROC)
predictions <- as.numeric(as.numeric(factor(attack_predict)),as.numeric(factor(attack_test)))
roc.multi <- multiclass.roc(test$result, predictions)
print(roc.multi)
rs <- roc.multi[['rocs']]
plot.roc(rs[[1]])
sapply(2:length(rs),function(i) lines.roc(rs[[i]],col=i))
auc(roc.multi)
print(roc.multi[['rocs']])
|
/models evaluation/keras/keras-kdd.R
|
permissive
|
marksniper/Network-Intrusion-Detection-System
|
R
| false | false | 5,614 |
r
|
library(keras)
library(caret)
kdd_prediction <- read.csv("kdd_prediction.csv")
#one hot encoding
table(as.numeric(kdd_prediction$result), kdd_prediction$result)
trans <- c("dos","normal","probe","r2l","u2r")
names(trans) <- c(1,2,3,4,5)
which(sapply(kdd_prediction, is.factor))
kdd_prediction <- transform(kdd_prediction, flag=as.integer(flag))
kdd_prediction <- transform(kdd_prediction, protocol_type=as.integer(protocol_type))
kdd_prediction <- transform(kdd_prediction, service=as.integer(service))
kdd_prediction_numeric <- kdd_prediction
kdd_prediction_numeric <- transform(kdd_prediction, result=as.integer(result))
kdd_prediction_numeric1 <- kdd_prediction_numeric
#scaling value
scl <- function(x){ (x - min(x))/(max(x) - min(x)) }
kdd_prediction_numeric <- data.frame(lapply(kdd_prediction_numeric, scl))
#delete NAN's values
kdd_prediction_numeric <- kdd_prediction_numeric[colSums(!is.na(kdd_prediction_numeric)) > 0]
kdd_prediction_numeric$result <- kdd_prediction_numeric1$result
#cross validation
percentage = round(nrow(kdd_prediction_numeric) *50/100)
cat('There are ', percentage, 'necessary to divide KDD dataset in train (70%) in test (30%).')
train <- kdd_prediction_numeric[ (1:percentage), ]
test <- kdd_prediction_numeric[ (percentage:nrow(kdd_prediction_numeric)), ]
#print dim train and test
dim(train)
dim(test)
#ecnoding data
matrix_train <- as.matrix(train[, -ncol(train)])
matrix_test <- as.matrix(test[, -ncol(test)])
train_label <- train$result
test_label <- test$result
one_hot_train_labels <- to_categorical(train_label)
one_hot_test_labels <- to_categorical(test_label)
head(test_label)
head(one_hot_test_labels)
#fit first model
model <- keras_model_sequential() %>%
model <- keras_model_sequential() %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001), input_shape = c(ncol(train)-1)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 6, activation = "softmax")
model %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
val_indices <- 1:200
x_val <- matrix_train[val_indices,]
partial_x_train <- matrix_train[-val_indices,]
y_val <- one_hot_train_labels[val_indices,]
partial_y_train = one_hot_train_labels[-val_indices,]
history <- model %>% fit(
partial_x_train,
partial_y_train,
epochs = 200,
batch_size = 512,
validation_data = list(x_val, y_val)
)
plot(history)
#turn on profilng
Rprof()
#fit
model <- keras_model_sequential() %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001), input_shape = c(ncol(train)-1)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 1024, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 512, activation = "relu", kernel_regularizer = regularizer_l2(0.001)) %>%
layer_dense(units = 6, activation = "softmax")
model %>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
history <- model %>% fit(
partial_x_train,
partial_y_train,
epochs = 15,
batch_size = 512,
validation_data = list(x_val, y_val)
)
results <- model %>% evaluate(matrix_test, one_hot_test_labels)
#predict
predictions <- model %>% predict(matrix_test)
#turn off profiling
Rprof(NULL)
#get prediction information
head(predictions)
dim(predictions)
sum(predictions[1,])
which.max(predictions[1,])
result_predict <- apply(predictions,1,function(x) which(x==max(x)))
result_predict <- result_predict -1
#reversing one hot encoding
attack_predict <- trans[ as.character(result_predict) ]
#get profiling informations
prof<-summaryRprof()
#print profiling informations
prof$by.total
#confusion matrix
one_hot_test_ <- apply(one_hot_test_labels,1,function(x) which(x==max(x)))
one_hot_test_ <- one_hot_test_ -1
attack_test<- trans[ as.character(one_hot_test_) ]
confusionMatrix(factor(attack_predict), factor(attack_test), mode = "everything")
#table (attack_predict, attack_test)
library(pROC)
predictions <- as.numeric(as.numeric(factor(attack_predict)),as.numeric(factor(attack_test)))
roc.multi <- multiclass.roc(test$result, predictions)
print(roc.multi)
rs <- roc.multi[['rocs']]
plot.roc(rs[[1]])
sapply(2:length(rs),function(i) lines.roc(rs[[i]],col=i))
auc(roc.multi)
print(roc.multi[['rocs']])
|
library(dplyr)
library(sqldf)
exam<-read.csv('csv_exam.csv')
#c1 c2는 같은 결과!
c1 <- exam[exam$class==1,]
c1 <- exam[exam$class %in% c(1,3),] # class가 1~3인 값을 갖고 오라는 뜻
c2<-exam %>% filter(class==1)
c3 <- exam[exam$math>=50 & exam$english >= 70,]
c4 <- sqldf('SELECT * FROM exam
WHERE math>=50
AND english>=70
')
mpg
#page 133 displ이 4이상인차량 5이상인 차량의 고속도로 평균 연비를 비교하시오
qq <- data.frame(mpg)
is.data.frame(qq)
avg4 <- colMeans(sqldf('select hwy from mpg where displ< 5'))
avg5 <- colMeans(sqldf('select hwy from mpg where displ>=5.0'))
avg <- aggregate(hwy~displ, mpg, mean);
ex1_1 <- mean(avg[avg$displ<5,'hwy'])
avg <- aggregate(hwy~displ <= 4, mpg, mean);
# manufacturer의 cty가 평균적으로 audi와 toyota중에 어느 쪽이 높은지 알아보자
avgAudi <- mean(qq[qq$manufacturer=='audi', c(8)])
avgToyota <- mean(qq[qq$manufacturer=='toyota', c(8)])
tmp <- aggregate(qq$cty~manufacturer, mpg, mean)
ex2 <- tmp[tmp$manufacturer=='audi' | tmp$manufacturer=='toyota',]
#chevrolet ford honda 자동차의 고속도려 연비 뽑아내고 전체 평균 구해보자(hwy)
tmp2 <- mpg[mpg$manufacturer=='chevrolet'
| mpg$manufacturer=='ford'
| mpg$manufacturer=='honda', c(1,8)]
tmp3 <- mpg[mpg$manufacturer %in% c('chevrolet', 'ford', 'honda'),c(1,8)]
ex3 <- colMeans(tmp3[,2])
ex4 <- colMeans(tmp2[,2])
temp <- aggregate(hwy~displ <=4, mpg, mean)
qplot(temp$hwy, x=c('over 4', 'under 5'))
|
/rProject/day03/r5.R
|
no_license
|
4rchive7/TIL
|
R
| false | false | 1,558 |
r
|
library(dplyr)
library(sqldf)
exam<-read.csv('csv_exam.csv')
#c1 c2는 같은 결과!
c1 <- exam[exam$class==1,]
c1 <- exam[exam$class %in% c(1,3),] # class가 1~3인 값을 갖고 오라는 뜻
c2<-exam %>% filter(class==1)
c3 <- exam[exam$math>=50 & exam$english >= 70,]
c4 <- sqldf('SELECT * FROM exam
WHERE math>=50
AND english>=70
')
mpg
#page 133 displ이 4이상인차량 5이상인 차량의 고속도로 평균 연비를 비교하시오
qq <- data.frame(mpg)
is.data.frame(qq)
avg4 <- colMeans(sqldf('select hwy from mpg where displ< 5'))
avg5 <- colMeans(sqldf('select hwy from mpg where displ>=5.0'))
avg <- aggregate(hwy~displ, mpg, mean);
ex1_1 <- mean(avg[avg$displ<5,'hwy'])
avg <- aggregate(hwy~displ <= 4, mpg, mean);
# manufacturer의 cty가 평균적으로 audi와 toyota중에 어느 쪽이 높은지 알아보자
avgAudi <- mean(qq[qq$manufacturer=='audi', c(8)])
avgToyota <- mean(qq[qq$manufacturer=='toyota', c(8)])
tmp <- aggregate(qq$cty~manufacturer, mpg, mean)
ex2 <- tmp[tmp$manufacturer=='audi' | tmp$manufacturer=='toyota',]
#chevrolet ford honda 자동차의 고속도려 연비 뽑아내고 전체 평균 구해보자(hwy)
tmp2 <- mpg[mpg$manufacturer=='chevrolet'
| mpg$manufacturer=='ford'
| mpg$manufacturer=='honda', c(1,8)]
tmp3 <- mpg[mpg$manufacturer %in% c('chevrolet', 'ford', 'honda'),c(1,8)]
ex3 <- colMeans(tmp3[,2])
ex4 <- colMeans(tmp2[,2])
temp <- aggregate(hwy~displ <=4, mpg, mean)
qplot(temp$hwy, x=c('over 4', 'under 5'))
|
fileList <- list.files(path="data/",full.names=T)
run <- 125
runData <- list()
for (file in fileList) {
runData[[file]] <- (read.csv(file))[run,]
}
save(runData,file="condensedData")
|
/in_progress/models/costBenefitAnalysis/bigDataRun.R
|
no_license
|
mmcdermott/disease-modeling
|
R
| false | false | 189 |
r
|
fileList <- list.files(path="data/",full.names=T)
run <- 125
runData <- list()
for (file in fileList) {
runData[[file]] <- (read.csv(file))[run,]
}
save(runData,file="condensedData")
|
library(tidyverse)
library(magrittr)
# generate some high dimensional data that has 3 main clusters
num_dimension <- 10
num_clusters <- 3
num_samples <- 15
num_obs <- num_clusters*num_samples
# takes two row indexes of our data and returns their distance from eachother
pw_dist <- function(i, j, data){
# calc norm
data_frame(i = i, j = j, dist = sqrt(sum((data[i,] - data[j,])^2)))
}
# matrix where each row is a centroid and the columns are its location in k-d space.
cluster_centroids <- runif(num_dimension*num_clusters, min = -10, max = 10) %>%
matrix(ncol = num_dimension, nrow = num_clusters)
gen_data_for_center <- function(center_id){
centroid <- cluster_centroids[center_id, ]
MASS::mvrnorm(num_samples, mu = centroid, Sigma = diag(num_dimension)) %>%
as_data_frame()
}
# generate matrix of values from clusters
all_data <- 1:num_clusters %>%
map_df(gen_data_for_center) %>%
as.matrix()
# all combinations of datapoints
pairwise_distances <- 1:num_obs %>%
map_df(~data_frame(p1 = (.:num_obs), p2 = .)) %$%
map2_df(p1, p2, ~pw_dist(.x, .y, data = all_data))
# observation info
obs_info <- data_frame(
id = 1:num_obs,
cluster_id = 1:num_clusters %>% rep(each = num_samples)
)
pairwise_distances %>% write_csv('distances.csv')
obs_info %>% write_csv('obs_info.csv')
links <- pairwise_distances %>%
rename(source = i, target = j, value = dist) %>%
filter(value > 0)
nodes <- obs_info %>%
mutate(group = map_chr(cluster_id, ~(letters[.]))) %>%
select(-cluster_id)
list(links = links, nodes = nodes) %>%
jsonlite::toJSON() %>%
write_lines('data_info.json')
|
/generate_data.R
|
no_license
|
nstrayer/tsne_presentation
|
R
| false | false | 1,626 |
r
|
library(tidyverse)
library(magrittr)
# generate some high dimensional data that has 3 main clusters
num_dimension <- 10
num_clusters <- 3
num_samples <- 15
num_obs <- num_clusters*num_samples
# takes two row indexes of our data and returns their distance from eachother
pw_dist <- function(i, j, data){
# calc norm
data_frame(i = i, j = j, dist = sqrt(sum((data[i,] - data[j,])^2)))
}
# matrix where each row is a centroid and the columns are its location in k-d space.
cluster_centroids <- runif(num_dimension*num_clusters, min = -10, max = 10) %>%
matrix(ncol = num_dimension, nrow = num_clusters)
gen_data_for_center <- function(center_id){
centroid <- cluster_centroids[center_id, ]
MASS::mvrnorm(num_samples, mu = centroid, Sigma = diag(num_dimension)) %>%
as_data_frame()
}
# generate matrix of values from clusters
all_data <- 1:num_clusters %>%
map_df(gen_data_for_center) %>%
as.matrix()
# all combinations of datapoints
pairwise_distances <- 1:num_obs %>%
map_df(~data_frame(p1 = (.:num_obs), p2 = .)) %$%
map2_df(p1, p2, ~pw_dist(.x, .y, data = all_data))
# observation info
obs_info <- data_frame(
id = 1:num_obs,
cluster_id = 1:num_clusters %>% rep(each = num_samples)
)
pairwise_distances %>% write_csv('distances.csv')
obs_info %>% write_csv('obs_info.csv')
links <- pairwise_distances %>%
rename(source = i, target = j, value = dist) %>%
filter(value > 0)
nodes <- obs_info %>%
mutate(group = map_chr(cluster_id, ~(letters[.]))) %>%
select(-cluster_id)
list(links = links, nodes = nodes) %>%
jsonlite::toJSON() %>%
write_lines('data_info.json')
|
#' Demonstration means and variances using permutations
#'
#' @param population A vector of values to be sampled from
#' @param sample.size A number (2-10) for the number of observations to be drawn
#' @param pop.mu A value for population true mean
#' @param pop.sigma2 A value for population true variance
#' @param show.summary Controls display of summary statistics (TRUE/FALSE)
#' @param show.plots Controls whether plots are shown
#' @param show.details Controls whether permutations are shown
#'
#' @return
#' ratio of estimated variance/true variance
#'
#' @examples
#' \dontrun{
#' demo_permutations(population=c(1:3),sample.size=2)
#' }
#' @export
demo_permutations <- function(population=c(1:3),
sample.size=2,
pop.mu=NULL,pop.sigma2=NULL,
show.summary=TRUE,
show.plots=TRUE,
show.details=TRUE){
cat("\n
*******************************************************************************
This function allows you to recreate the demonstrations shown in class of the
sampling distributions of the mean and variance. You will define the values
that comprise your population and your sample size, ie, the number of
observations you want to draw from that population. You will be shown the
sampling distribution of the mean and variance.
*******************************************************************************\n\n")
#packages required:
#install.packages("gtools") #new for this problem set
#library(gtools)
#library(mosaic)
#library(psych)
pop.size <- length(population)
pop.mu <- mean(population)
if(sample.size < 2){
stop("Sorry, the variance of a sample is undefined if you
only have 1 observation. Try a sample size between 2-10.")
}
if(sample.size > 10){
stop("Sorry, your sample size would require more memory
than R has allocated to your workspace. Try a sample size between 2-10.")
}
if(is.null(pop.mu)){
pop.mu <- mean(population)
}
if(is.null(pop.sigma2)){
pop.sigma2 <- sum((population-pop.mu)^2)/(pop.size) #force parasample.meanseter using N, not N-1
}
samples <- permutations(n=pop.size,r=sample.size,v=population,repeats.allowed=TRUE)
ntrials <- dim(samples)[[1]]
trials <- c(1:ntrials)
sample.means <- rowMeans(samples)
squared.deviations <- (samples - sample.means)^2
sum.sq <- rowSums(squared.deviations)
var.biased <- sum.sq/sample.size
var.unbiased <- sum.sq/(sample.size - 1)
long.run.mean <- mean(sample.means)
long.run.var.biased <- mean(var.biased)
long.run.var.unbiased <- mean(var.unbiased)
fraction.of.sigma2 <- long.run.var.biased/pop.sigma2
my.work <- data.frame(samples,sample.means,
squared.deviations,sum.sq,var.biased,var.unbiased)
summary.work <- data.frame(pop.mu,pop.sigma2,long.run.mean,long.run.var.biased,long.run.var.unbiased,
fraction.of.sigma2)
my.work <- round(my.work,2)
summary.work <- round(summary.work,2)
obs.names <- paste("Obs",1:sample.size,sep="")
dev.names <- paste("Dev",1:sample.size,sep="")
vnames <- c(obs.names,"Means",dev.names,"SUM.SQ","var(N)","var(N-1)")
dimnames(my.work) <- list(trials,vnames)
if(show.plots==TRUE){
par(mfrow=c(1,2))
if(sample.size==2 & round(pop.sigma2,2)==.67){
cbreaks1 <- c(.75,1.25,1.75,2.25,2.75,3.25)
cbreaks2 <- c(0,.2,.4,.8,1,1.2)
}
else{
cbreaks1 <- NA; cbreaks2 <- NA
}
if(is.na(cbreaks1[1])){
hist(sample.means,col="tan",main="",xlab="Sample Mean")
}
else {
hist(sample.means,col="tan",main="",xlab="Sample Mean",breaks=cbreaks1)
}
mtext(side=3,text="Distribution of Sample Means",cex=1,line=1)
abline(v=c(pop.mu,long.run.mean),col=c("red","blue"),lwd=3,lty=c(1,2))
if(is.na(cbreaks1[1])){
hist(var.biased,col="tan",main="",xlab="Sample Variance (using N)")
}
else {
hist(var.biased,col="tan",main="",xlab="Sample Variance (using N)",breaks=cbreaks2)
}
mtext(side=3,text="Distribution of Sample Variances",cex=1,line=1)
abline(v=c(pop.sigma2,long.run.var.biased),col=c("red","blue"),lwd=3,lty=c(1,2))
}
if(show.details==TRUE){
cat("\n###################################################")
cat("\n\nPermutations:\n\n")
print(my.work)
}
if(show.summary==TRUE){
cat("\n###################################################")
cat("\n\nSummary of population, parameters and estimators:\n")
cat("\nPopulation: ",population)
cat("\nSample size (N): ",sample.size)
cat("\nNumber of permutations:",ntrials,"\n")
summary.stats <- round(colMeans(my.work),2)
long.run.stats <- matrix(c(pop.mu,pop.sigma2,
long.run.mean,
long.run.var.biased,long.run.var.unbiased,
fraction.of.sigma2))
long.run.stats <- round(long.run.stats,3)
rnames <- c("pop.mu","pop.sigma2","long.run.mean","long.run.var.biased",
"long.run.var.unbiased","fraction.of.sigma2")
cnames <- ""
dimnames(long.run.stats) <- list(rnames,cnames)
print(long.run.stats)
cat("\n###################################################\n\n")
}
fraction.of.sigma2
}
|
/R/demo_permutations.r
|
no_license
|
CLPS0900/CLPS0900R
|
R
| false | false | 5,132 |
r
|
#' Demonstration means and variances using permutations
#'
#' @param population A vector of values to be sampled from
#' @param sample.size A number (2-10) for the number of observations to be drawn
#' @param pop.mu A value for population true mean
#' @param pop.sigma2 A value for population true variance
#' @param show.summary Controls display of summary statistics (TRUE/FALSE)
#' @param show.plots Controls whether plots are shown
#' @param show.details Controls whether permutations are shown
#'
#' @return
#' ratio of estimated variance/true variance
#'
#' @examples
#' \dontrun{
#' demo_permutations(population=c(1:3),sample.size=2)
#' }
#' @export
demo_permutations <- function(population=c(1:3),
sample.size=2,
pop.mu=NULL,pop.sigma2=NULL,
show.summary=TRUE,
show.plots=TRUE,
show.details=TRUE){
cat("\n
*******************************************************************************
This function allows you to recreate the demonstrations shown in class of the
sampling distributions of the mean and variance. You will define the values
that comprise your population and your sample size, ie, the number of
observations you want to draw from that population. You will be shown the
sampling distribution of the mean and variance.
*******************************************************************************\n\n")
#packages required:
#install.packages("gtools") #new for this problem set
#library(gtools)
#library(mosaic)
#library(psych)
pop.size <- length(population)
pop.mu <- mean(population)
if(sample.size < 2){
stop("Sorry, the variance of a sample is undefined if you
only have 1 observation. Try a sample size between 2-10.")
}
if(sample.size > 10){
stop("Sorry, your sample size would require more memory
than R has allocated to your workspace. Try a sample size between 2-10.")
}
if(is.null(pop.mu)){
pop.mu <- mean(population)
}
if(is.null(pop.sigma2)){
pop.sigma2 <- sum((population-pop.mu)^2)/(pop.size) #force parasample.meanseter using N, not N-1
}
samples <- permutations(n=pop.size,r=sample.size,v=population,repeats.allowed=TRUE)
ntrials <- dim(samples)[[1]]
trials <- c(1:ntrials)
sample.means <- rowMeans(samples)
squared.deviations <- (samples - sample.means)^2
sum.sq <- rowSums(squared.deviations)
var.biased <- sum.sq/sample.size
var.unbiased <- sum.sq/(sample.size - 1)
long.run.mean <- mean(sample.means)
long.run.var.biased <- mean(var.biased)
long.run.var.unbiased <- mean(var.unbiased)
fraction.of.sigma2 <- long.run.var.biased/pop.sigma2
my.work <- data.frame(samples,sample.means,
squared.deviations,sum.sq,var.biased,var.unbiased)
summary.work <- data.frame(pop.mu,pop.sigma2,long.run.mean,long.run.var.biased,long.run.var.unbiased,
fraction.of.sigma2)
my.work <- round(my.work,2)
summary.work <- round(summary.work,2)
obs.names <- paste("Obs",1:sample.size,sep="")
dev.names <- paste("Dev",1:sample.size,sep="")
vnames <- c(obs.names,"Means",dev.names,"SUM.SQ","var(N)","var(N-1)")
dimnames(my.work) <- list(trials,vnames)
if(show.plots==TRUE){
par(mfrow=c(1,2))
if(sample.size==2 & round(pop.sigma2,2)==.67){
cbreaks1 <- c(.75,1.25,1.75,2.25,2.75,3.25)
cbreaks2 <- c(0,.2,.4,.8,1,1.2)
}
else{
cbreaks1 <- NA; cbreaks2 <- NA
}
if(is.na(cbreaks1[1])){
hist(sample.means,col="tan",main="",xlab="Sample Mean")
}
else {
hist(sample.means,col="tan",main="",xlab="Sample Mean",breaks=cbreaks1)
}
mtext(side=3,text="Distribution of Sample Means",cex=1,line=1)
abline(v=c(pop.mu,long.run.mean),col=c("red","blue"),lwd=3,lty=c(1,2))
if(is.na(cbreaks1[1])){
hist(var.biased,col="tan",main="",xlab="Sample Variance (using N)")
}
else {
hist(var.biased,col="tan",main="",xlab="Sample Variance (using N)",breaks=cbreaks2)
}
mtext(side=3,text="Distribution of Sample Variances",cex=1,line=1)
abline(v=c(pop.sigma2,long.run.var.biased),col=c("red","blue"),lwd=3,lty=c(1,2))
}
if(show.details==TRUE){
cat("\n###################################################")
cat("\n\nPermutations:\n\n")
print(my.work)
}
if(show.summary==TRUE){
cat("\n###################################################")
cat("\n\nSummary of population, parameters and estimators:\n")
cat("\nPopulation: ",population)
cat("\nSample size (N): ",sample.size)
cat("\nNumber of permutations:",ntrials,"\n")
summary.stats <- round(colMeans(my.work),2)
long.run.stats <- matrix(c(pop.mu,pop.sigma2,
long.run.mean,
long.run.var.biased,long.run.var.unbiased,
fraction.of.sigma2))
long.run.stats <- round(long.run.stats,3)
rnames <- c("pop.mu","pop.sigma2","long.run.mean","long.run.var.biased",
"long.run.var.unbiased","fraction.of.sigma2")
cnames <- ""
dimnames(long.run.stats) <- list(rnames,cnames)
print(long.run.stats)
cat("\n###################################################\n\n")
}
fraction.of.sigma2
}
|
wt_t_dist=function(x,y,w){
return(sum(w*(x-y)*atan(x-y)))
}
vec_t_wt=function(x,y,w){
return((x-y)*atan(x-y))
}
Update_mu_1=function(x,tmax=20){
mu=median(x)
for(t in 1:tmax){
f1=sum(atan(mu-x)+(mu-x)/(1+(mu-x)^2))
f2=sum(2/(1+(mu-x)^2))
mu=mu-f1/f2
}
return(mu)
}
Update_mu=function(X,tmax=20){
p=dim(X)[2]
mu=numeric(p)
for(l in 1:p){
mu[l]=Update_mu_1(X[,l],tmax)
}
return(mu)
}
t_wkmeans=function(X,M,beta=4,tmax=30){
if(is.vector(M)==TRUE){
M=as.matrix(M)
M=t(M)
}
n=dim(X)[1]
d=dim(X)[2]
c=dim(M)[1]
weight=rep(1/d,d)
label=numeric(n)
dist=numeric(c)
t=0
D=numeric(d)
#update membership
repeat{
t=t+1
for(i in 1 : n){
for(j in 1 : c){
dist[j]=wt_t_dist(X[i,],M[j,],weight^beta)
}
label[i]=which.min(dist)
}
#update centres
for(i in 1:c){
I=which(label==i)
M[i,]=Update_mu(X[I,])#colMeans(X[I,])
}
#update weights
for(j in 1:d){
D[j]=0
}
for(i in 1:c){
I=which(label==i)
for(k in I){
D=D+vec_t_wt(X[k,],M[i,])
}
}
for(i in 1:d){
if(D[i]!=0){
D[i]=1/D[i]
D[i]=D[i]^(1/(beta-1))
}
}
sum=sum(D)
weight=D/sum
if(t>tmax){
break
}
}
return(list(label,M,weight))
}
|
/functions.R
|
permissive
|
SaptarshiC98/tdivergence
|
R
| false | false | 1,431 |
r
|
wt_t_dist=function(x,y,w){
return(sum(w*(x-y)*atan(x-y)))
}
vec_t_wt=function(x,y,w){
return((x-y)*atan(x-y))
}
Update_mu_1=function(x,tmax=20){
mu=median(x)
for(t in 1:tmax){
f1=sum(atan(mu-x)+(mu-x)/(1+(mu-x)^2))
f2=sum(2/(1+(mu-x)^2))
mu=mu-f1/f2
}
return(mu)
}
Update_mu=function(X,tmax=20){
p=dim(X)[2]
mu=numeric(p)
for(l in 1:p){
mu[l]=Update_mu_1(X[,l],tmax)
}
return(mu)
}
t_wkmeans=function(X,M,beta=4,tmax=30){
if(is.vector(M)==TRUE){
M=as.matrix(M)
M=t(M)
}
n=dim(X)[1]
d=dim(X)[2]
c=dim(M)[1]
weight=rep(1/d,d)
label=numeric(n)
dist=numeric(c)
t=0
D=numeric(d)
#update membership
repeat{
t=t+1
for(i in 1 : n){
for(j in 1 : c){
dist[j]=wt_t_dist(X[i,],M[j,],weight^beta)
}
label[i]=which.min(dist)
}
#update centres
for(i in 1:c){
I=which(label==i)
M[i,]=Update_mu(X[I,])#colMeans(X[I,])
}
#update weights
for(j in 1:d){
D[j]=0
}
for(i in 1:c){
I=which(label==i)
for(k in I){
D=D+vec_t_wt(X[k,],M[i,])
}
}
for(i in 1:d){
if(D[i]!=0){
D[i]=1/D[i]
D[i]=D[i]^(1/(beta-1))
}
}
sum=sum(D)
weight=D/sum
if(t>tmax){
break
}
}
return(list(label,M,weight))
}
|
### Helper Funcitons for various issues encountered with the data.
#*************************************************************
## Capwords ------------------------------------------------------
# Character case issues:
# Converting first character in each word from lower case to upper case
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
#*************************************************************
# SA Slope calculator ---- slopeSAer
# Create a function that checks to see if there's enough data to calculate a linear regression,
# and then regresses pin height across time (as decimal year)
slopeSAer <- function(e) {
if(nrow(e) < 2) {return(data.frame(intercept = NA, slope = NA)) # if number of rows (data points) is less than 2 return NA's
} else { # if there's enough data take data = e (which will be subsetted in later functions) then...
p <- coef(lm(plugMeanAccret ~ DecYear, data = e)) # regress the plug depth against time (decimal years) and return the coefficients of the regression- slope and intercept
p <- data.frame(slope = round(p[2], digits= 4)) # subset out just the slope coefficient from the object p
}
}
#*************************************************************
# SET Slope Calculator ---- slopeer based on Philippe Hensel's original code
# Create a function that checks to see if there's enough data to calculate a linear regression,
# and then regresses pin height across time (as decimal year)
slopeer <- function(d) {
if(nrow(d) < 2) {
return(data.frame(intercept = NA, slope = NA))
}else {
p <- coef(lm(Raw ~ DecYear, data = d))
p <- data.frame(slope = round(p[2], digits= 4))
}
}
#*************************************************************
# Function to calculate the standard error
stder <- function(x){ sqrt(var(x,na.rm=TRUE)/length(na.omit(x)))}
|
/02_helpers.R
|
no_license
|
Brewstarke/SET-MH
|
R
| false | false | 2,100 |
r
|
### Helper Funcitons for various issues encountered with the data.
#*************************************************************
## Capwords ------------------------------------------------------
# Character case issues:
# Converting first character in each word from lower case to upper case
capwords <- function(s, strict = FALSE) {
cap <- function(s) paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
#*************************************************************
# SA Slope calculator ---- slopeSAer
# Create a function that checks to see if there's enough data to calculate a linear regression,
# and then regresses pin height across time (as decimal year)
slopeSAer <- function(e) {
if(nrow(e) < 2) {return(data.frame(intercept = NA, slope = NA)) # if number of rows (data points) is less than 2 return NA's
} else { # if there's enough data take data = e (which will be subsetted in later functions) then...
p <- coef(lm(plugMeanAccret ~ DecYear, data = e)) # regress the plug depth against time (decimal years) and return the coefficients of the regression- slope and intercept
p <- data.frame(slope = round(p[2], digits= 4)) # subset out just the slope coefficient from the object p
}
}
#*************************************************************
# SET Slope Calculator ---- slopeer based on Philippe Hensel's original code
# Create a function that checks to see if there's enough data to calculate a linear regression,
# and then regresses pin height across time (as decimal year)
slopeer <- function(d) {
if(nrow(d) < 2) {
return(data.frame(intercept = NA, slope = NA))
}else {
p <- coef(lm(Raw ~ DecYear, data = d))
p <- data.frame(slope = round(p[2], digits= 4))
}
}
#*************************************************************
# Function to calculate the standard error
stder <- function(x){ sqrt(var(x,na.rm=TRUE)/length(na.omit(x)))}
|
# LSTM training helpers ---------------------------------------------------
run_k_fold <- function(d, k = 10, dnn_config) {
# extract info needed to specify features of test dataset
all_seg_ids <- d %>% distinct(seg_id, speech_register, speaker_id)
n_test <- as.integer(nrow(all_seg_ids) * (1 - dnn_config$prop_train))
n_per_register_test <- n_test / 2
n_speakers <- d %>% distinct(speaker_id) %>% nrow()
n_per_speaker_test <- as.integer(n_per_register_test / n_speakers)
segs_per_speaker <- all_seg_ids %>% count(speaker_id, speech_register)
speaker_blacklist <- segs_per_speaker %>% filter(n < n_per_speaker_test) %>% pull(speaker_id)
1:k %>% map(create_fold,
d = d,
seg_info = all_seg_ids,
blacklist = speaker_blacklist,
size_per_speaker = n_per_speaker_test)
}
create_fold <- function(fold, d, seg_info, blacklist, size_per_speaker) {
test_seg_ids <- seg_info %>%
filter( !(speaker_id %in% blacklist) ) %>%
group_by(speech_register, speaker_id) %>%
sample_n(size = size_per_speaker, replace = FALSE) %>%
ungroup() %>%
select(speaker_id, seg_id) %>%
mutate(sample_id = 1:n(),
fold_id = fold)
d %>%
filter(seg_id %in% test_seg_ids$seg_id) %>%
select(speaker_id, seg_id, speech_register, time_bin_id, duration_ms) %>%
left_join(., test_seg_ids, by = c("seg_id", "speaker_id"))
}
create_lstm <- function(d, lstm_config) {
# extract some model params for tensor shaping
n_shapes <- d$n_qshapes
input_shape <- n_shapes + 1
input_length <- d$train_data$prev_cluster_seq[[1]] %>% length()
model <- keras_model_sequential()
model %>%
layer_embedding(
input_dim = input_shape,
output_dim = lstm_config$lstm_output_dim,
input_length = input_length) %>%
layer_lstm(units = lstm_config$lstm_units,
input_shape = c(input_length, input_shape),
dropout = lstm_config$dropout,
return_sequences = F) %>%
layer_dense(n_shapes, activation = "softmax")
## multi-layer lstm
# model %>%
# layer_embedding(
# input_dim = input_shape,
# output_dim = lstm_config$lstm_output_dim,
# input_length = input_length) %>%
# layer_lstm(units = lstm_config$lstm_units,
# input_shape = c(input_length, input_shape),
# dropout = lstm_config$dropout,
# return_sequences = TRUE) %>%
# layer_lstm(units = lstm_config$lstm_units,
# input_shape = c(input_length, input_shape),
# dropout = lstm_config$dropout,
# return_sequences = FALSE) %>%
# layer_dense(n_shapes, activation = "softmax")
optimizer <- optimizer_rmsprop(lr = lstm_config$lr)
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer,
metrics = list('accuracy')
)
model
}
# Train LSTM ---------------------------------------------------
train_lstm <- function(model, model_name, input_data, lstm_config) {
summary(model)
# train model
if(lstm_config$include_early_stop) {
m_fit <- model %>%
fit(input_data$d_vectorized$train_in,
input_data$d_vectorized$train_out,
batch_size = lstm_config$batch_size,
epochs = lstm_config$n_epochs,
validation_split = lstm_config$validation_split,
shuffle = lstm_config$shuffle,
callbacks = lstm_config$early_stop
)
} else {
m_fit <- model %>%
fit(input_data$d_vectorized$train_in,
input_data$d_vectorized$train_out,
batch_size = lstm_config$batch_size,
epochs = lstm_config$n_epochs,
validation_split = lstm_config$validation_split,
shuffle = lstm_config$shuffle
)
}
# generate predictions
preds <- model %>% predict(input_data$d_vectorized$test_in)
# tidy up predictions
d_tidy_preds <- tidy_preds(preds, input_data, input_data$exp_run_id)
# save model
if (lstm_config$save_model) {model %>% save_model_hdf5(here(glue::glue("models/", {model_name}, "_mod.h5")))}
list(fit = m_fit, d_preds = d_tidy_preds)
}
# Safe train lstm ---------------------------------------------------------
safe_train_lstm <- safely(train_lstm)
# Tidy model predictions --------------------------------------------------
tidy_preds <- function(preds, d, run_id, nqshapes) {
d_preds <- preds %>%
as_tibble(.name_repair = "universal") %>%
clean_names()
colnames(d_preds) <- colnames(d_preds) %>% str_replace("x", "shape_")
d_preds <- d_preds %>%
mutate(seg_id = d$test_data$next_cluster_seg_id %>% as.character(),
speaker_id = d$test_data$next_cluster_speaker_id %>% as.character(),
exp_run_id = run_id,
speech_register = d$test_data$next_cluster_speech_register %>% as.character(),
time_bin_id = d$test_data$next_cluster_time_bin_id %>% as.character(),
target_cluster = d$test_data$next_cluster %>% as.character(),
dataset = d$test_data$next_cluster_dataset %>% as.character(),
duration_ms = d$test_data$next_cluster_duration_ms %>% as.character()) %>%
select(seg_id, speaker_id, exp_run_id, dataset, speech_register,
time_bin_id, target_cluster, duration_ms, everything())
# tidy the predictions
first_shape_col <- "shape_1"
last_shape_col <- colnames(d_preds)[colnames(d_preds) %>% length()]
d_preds_tidy <- d_preds %>%
gather(key = "cluster_shape",
value = "prob_mass",
first_shape_col:last_shape_col)
d_preds_tidy %>%
group_by(seg_id, time_bin_id) %>%
mutate(is_target_cluster = ifelse(str_extract(cluster_shape, "\\d+") == target_cluster, TRUE, FALSE),
predicted_cluster = which.max(prob_mass),
dataset = str_to_lower(dataset),
correct_pred = ifelse(target_cluster == predicted_cluster, 1, 0))
}
|
/code/00_helper_functions/lstm-train-h.R
|
permissive
|
kemacdonald/lena-pred
|
R
| false | false | 5,959 |
r
|
# LSTM training helpers ---------------------------------------------------
run_k_fold <- function(d, k = 10, dnn_config) {
# extract info needed to specify features of test dataset
all_seg_ids <- d %>% distinct(seg_id, speech_register, speaker_id)
n_test <- as.integer(nrow(all_seg_ids) * (1 - dnn_config$prop_train))
n_per_register_test <- n_test / 2
n_speakers <- d %>% distinct(speaker_id) %>% nrow()
n_per_speaker_test <- as.integer(n_per_register_test / n_speakers)
segs_per_speaker <- all_seg_ids %>% count(speaker_id, speech_register)
speaker_blacklist <- segs_per_speaker %>% filter(n < n_per_speaker_test) %>% pull(speaker_id)
1:k %>% map(create_fold,
d = d,
seg_info = all_seg_ids,
blacklist = speaker_blacklist,
size_per_speaker = n_per_speaker_test)
}
create_fold <- function(fold, d, seg_info, blacklist, size_per_speaker) {
test_seg_ids <- seg_info %>%
filter( !(speaker_id %in% blacklist) ) %>%
group_by(speech_register, speaker_id) %>%
sample_n(size = size_per_speaker, replace = FALSE) %>%
ungroup() %>%
select(speaker_id, seg_id) %>%
mutate(sample_id = 1:n(),
fold_id = fold)
d %>%
filter(seg_id %in% test_seg_ids$seg_id) %>%
select(speaker_id, seg_id, speech_register, time_bin_id, duration_ms) %>%
left_join(., test_seg_ids, by = c("seg_id", "speaker_id"))
}
create_lstm <- function(d, lstm_config) {
# extract some model params for tensor shaping
n_shapes <- d$n_qshapes
input_shape <- n_shapes + 1
input_length <- d$train_data$prev_cluster_seq[[1]] %>% length()
model <- keras_model_sequential()
model %>%
layer_embedding(
input_dim = input_shape,
output_dim = lstm_config$lstm_output_dim,
input_length = input_length) %>%
layer_lstm(units = lstm_config$lstm_units,
input_shape = c(input_length, input_shape),
dropout = lstm_config$dropout,
return_sequences = F) %>%
layer_dense(n_shapes, activation = "softmax")
## multi-layer lstm
# model %>%
# layer_embedding(
# input_dim = input_shape,
# output_dim = lstm_config$lstm_output_dim,
# input_length = input_length) %>%
# layer_lstm(units = lstm_config$lstm_units,
# input_shape = c(input_length, input_shape),
# dropout = lstm_config$dropout,
# return_sequences = TRUE) %>%
# layer_lstm(units = lstm_config$lstm_units,
# input_shape = c(input_length, input_shape),
# dropout = lstm_config$dropout,
# return_sequences = FALSE) %>%
# layer_dense(n_shapes, activation = "softmax")
optimizer <- optimizer_rmsprop(lr = lstm_config$lr)
model %>% compile(
loss = "categorical_crossentropy",
optimizer = optimizer,
metrics = list('accuracy')
)
model
}
# Train LSTM ---------------------------------------------------
train_lstm <- function(model, model_name, input_data, lstm_config) {
summary(model)
# train model
if(lstm_config$include_early_stop) {
m_fit <- model %>%
fit(input_data$d_vectorized$train_in,
input_data$d_vectorized$train_out,
batch_size = lstm_config$batch_size,
epochs = lstm_config$n_epochs,
validation_split = lstm_config$validation_split,
shuffle = lstm_config$shuffle,
callbacks = lstm_config$early_stop
)
} else {
m_fit <- model %>%
fit(input_data$d_vectorized$train_in,
input_data$d_vectorized$train_out,
batch_size = lstm_config$batch_size,
epochs = lstm_config$n_epochs,
validation_split = lstm_config$validation_split,
shuffle = lstm_config$shuffle
)
}
# generate predictions
preds <- model %>% predict(input_data$d_vectorized$test_in)
# tidy up predictions
d_tidy_preds <- tidy_preds(preds, input_data, input_data$exp_run_id)
# save model
if (lstm_config$save_model) {model %>% save_model_hdf5(here(glue::glue("models/", {model_name}, "_mod.h5")))}
list(fit = m_fit, d_preds = d_tidy_preds)
}
# Safe train lstm ---------------------------------------------------------
safe_train_lstm <- safely(train_lstm)
# Tidy model predictions --------------------------------------------------
tidy_preds <- function(preds, d, run_id, nqshapes) {
d_preds <- preds %>%
as_tibble(.name_repair = "universal") %>%
clean_names()
colnames(d_preds) <- colnames(d_preds) %>% str_replace("x", "shape_")
d_preds <- d_preds %>%
mutate(seg_id = d$test_data$next_cluster_seg_id %>% as.character(),
speaker_id = d$test_data$next_cluster_speaker_id %>% as.character(),
exp_run_id = run_id,
speech_register = d$test_data$next_cluster_speech_register %>% as.character(),
time_bin_id = d$test_data$next_cluster_time_bin_id %>% as.character(),
target_cluster = d$test_data$next_cluster %>% as.character(),
dataset = d$test_data$next_cluster_dataset %>% as.character(),
duration_ms = d$test_data$next_cluster_duration_ms %>% as.character()) %>%
select(seg_id, speaker_id, exp_run_id, dataset, speech_register,
time_bin_id, target_cluster, duration_ms, everything())
# tidy the predictions
first_shape_col <- "shape_1"
last_shape_col <- colnames(d_preds)[colnames(d_preds) %>% length()]
d_preds_tidy <- d_preds %>%
gather(key = "cluster_shape",
value = "prob_mass",
first_shape_col:last_shape_col)
d_preds_tidy %>%
group_by(seg_id, time_bin_id) %>%
mutate(is_target_cluster = ifelse(str_extract(cluster_shape, "\\d+") == target_cluster, TRUE, FALSE),
predicted_cluster = which.max(prob_mass),
dataset = str_to_lower(dataset),
correct_pred = ifelse(target_cluster == predicted_cluster, 1, 0))
}
|
#' Function to check the format of the file of parameter used to simulate data
#' under or estimate parameters of extensions of the Isolation-with-migration
#' model.
#' The function \code{\link{check_param}} checks that the information on the
#' values and descriptions of the prior distributions for the fixed and
#' variable (or estimated) parameters, respectively, specified in the file
#' \code{paramfile} (see \code{\link{param_est}} and \code{\link{param_sim}})
#' are in the correct format to simulate data under (or to estimate parameters
#' of) extensions of the Isolation-with-migration model with the function
#' \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}). \cr This
#' function is called within the functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}}.
#'
#'
#' @param param The matrix \eqn{9} (or \eqn{11}) \eqn{\times} \eqn{3} of either
#' the values or descriptions of the prior distributions for the fixed and
#' variable (or estimated) parameters, respectively, required to simulate
#' data under (or to estimate parameters of) extensions of the
#' Isolation-with-migration model with the functions
#' \code{\link{simulate_data}} and \code{\link{estimate_IMc}}.
#' \item\strong{Specifying variable (or estimated) parameters: } Any of the
#' demographic parameters listed below can be either fixed (i.e., only one
#' value specified after the keyword) or variable (or estimated). \cr If a
#' parameter \eqn{i} \eqn{\in} \eqn{[1:7]} is to be variable (or estimated),
#' the information on the uniform prior distribution is specify with three
#' values, two floating numbers and an integer as follows: \cr
#' \code{param[i,]=c(}\eqn{\Theta_l}\code{, }\eqn{\Theta_u}\code{,
#' }\eqn{\Theta_n}\code{)}. \tabular{ll}{ \eqn{\Theta_l}\tab : The lower
#' limit on the prior distribution range. \eqn{\Theta_l}\eqn{\ge}\eqn{0}. \cr
#' \eqn{\Theta_u} \tab : The upper limit on the prior distribution range.
#' \eqn{\Theta_l}\eqn{\le}\eqn{\Theta_u}. \cr \eqn{\Theta_n} \tab : The
#' number of values to consider along the prior distribution range.
#' \eqn{\Theta_n>0} \cr } ATTENTION: \code{\link{check_param}} requires that
#' \eqn{0\le}\eqn{\Theta_l}\eqn{\le}\eqn{\Theta_u} and \eqn{\Theta_n>0}. \cr
#' If only one value is specified (i.e., \eqn{\Theta_l>0} and \eqn{\Theta_u}
#' not specified or \eqn{\Theta_u=\Theta_l}), the parameter is considered
#' fixed to \eqn{\Theta_l}.
#'
#' The twelve rows correspond to the twelve following parameters and keywords:
#' \item\strong{REQUIRED PARAMETERS: }
#' \item\code{param[1,]~}\emph{\code{theta_1}} ATTENTION:
#' \code{\link{check_param}} requires that either the parameter value of the
#' population mutation rate per bp per generation for population 1 (i.e.
#' \code{param[1,1]=}\eqn{\theta_1>0}) or the prior distribution for
#' \eqn{theta_1} (see above for the restrictions) be specified:\cr
#' \code{param[1,1]}\eqn{\theta_1} or
#' \code{param[1,]=c(}\eqn{\theta_1l}\code{, }\eqn{\theta_1u}\code{,
#' }\eqn{\theta_1n}\code{)}. \cr \tabular{ll}{ ---> Where: \tab \cr
#' \eqn{\theta_1=4N_1*\mu} \tab : The population mutation rate per bp per
#' generation for population 1 (required). \cr \eqn{N_1} \tab : The effective
#' population size in population 1 (the reference population so by default
#' and unless specified, \eqn{N_1=N_2=N_A}). \cr \eqn{\mu} \tab : The genomic
#' generational mutation rate per bp. } The information on the parameter
#' \eqn{\theta_1} can be specified with the keyword \emph{\code{theta_1}} in
#' the file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). % end theta
#' \item\code{param[2,]~}\emph{\code{M_present}} \code{param[2,1]=}\eqn{M_p}
#' or \code{param[2,]=c(}\eqn{M_pl}\code{, }\eqn{M_pu}\code{,
#' }\eqn{M_pn}\code{)}\cr specifies \eqn{M_p=4N_1*m_p}, the number of
#' migrants exchanged each generation by the TWO populations at present. \cr
#' \eqn{M_p} has different definitions depending of the model considered: \cr
#' * \eqn{M_p} is the symmetrical rate of gene flow between TWO populations
#' in an island model (i.e., \eqn{0<t<\inf}) as specified with: \cr -- At
#' least one of the independent genomic regions has \eqn{n_1>0} and
#' \eqn{n_2>0} as specified in the file \code{regfile} (see
#' \code{\link{info_region}}). \cr -- Either \code{param[5,1]=}\eqn{T_s=0} or
#' \code{param[5,1]=NA}. \cr ATTENTION: \code{\link{check_param}} requires
#' that \eqn{M_p>0} when fixed or \eqn{0\le}\eqn{M_pl}\eqn{\le}\eqn{M_pu} and
#' \eqn{M_pn>0} when variable (or estimated) be specified in \code{param[2,]}
#' in case of an island model. \cr * \eqn{M_p} is the constant symmetrical
#' rate of gene flow since the split until present (i.e., \eqn{0<t<T_s}) if
#' there is a population split (as specified with \eqn{T_s>0} when fixed or
#' \eqn{0\le}\eqn{T_sl}\eqn{\le}\eqn{T_su} and \eqn{T_sn>0} when variable (or
#' estimated) in \code{param[5,]}). \cr * \eqn{M_p} is the constant
#' symmetrical rate of gene flow since the time of gene flow rate change
#' until present (i.e., \eqn{0<t<T_c}) if a time at which the gene flow rate
#' changed is specified with: \cr -- \eqn{0<T_c<T_s} (as specified with
#' \eqn{0<\epsilon<1} when fixed or
#' \eqn{0\le}\eqn{\epsilon_l}\eqn{\le}\eqn{\epsilon_u}\eqn{\le1} and
#' \eqn{\epsilon_n>0} when variable (or estimated) in \code{param[6,]}). \cr
#' -- \eqn{M_c \not=M_p} (as specified with \eqn{0\le}\eqn{M_c \not=M_p} when
#' fixed in \code{param[2,1]} and \code{param[7,1]}).\cr \tabular{l}{ --->
#' Where, \eqn{m_p} : The generational fraction of migrant individuals at
#' present. } The information on the parameter \eqn{M_p} can be specified
#' with the keyword \emph{\code{M_present}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end M_p % end req
#' arg \item\strong{OPTIONAL DEMOGRAPHIC PARAMETERS: } All the parameters
#' listed below are optional and can be either fixed or variable (or
#' estimated). \item\code{param[3,]~}\emph{\code{theta_2}}
#' \code{param[3,1]=}\eqn{\theta_2} or
#' \code{param[3,]=c(}\eqn{\theta_2l}\code{, }\eqn{\theta_2u}\code{,
#' }\eqn{\theta_2n}\code{)}\cr specifies \eqn{\theta_2=4N_2*\mu}, the
#' population mutation rate per bp per generation for population 2. \cr
#' \tabular{l}{ ---> Where, \eqn{N_2} : The effective population size in
#' population 2. } ATTENTION: \code{\link{check_param}} requires that
#' \eqn{theta_2>0} when specified and fixed in \code{param[3,1]}. \cr The
#' information on the parameter \eqn{\theta_2} can be specified with the
#' keyword \emph{\code{theta_2}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end theta2
#' \item\code{param[4,]~}\emph{\code{theta_A}}
#' \code{param[4,1]=}\eqn{\theta_A} or
#' \code{param[4,]=c(}\eqn{\theta_Al}\code{, }\eqn{\theta_Au}\code{,
#' }\eqn{\theta_An}\code{)}\cr specifies \eqn{\theta_A=4N_A*\mu}, the
#' ancestral population mutation rate per bp per generation. \tabular{l}{
#' ---> Where, \eqn{N_A} : The ancestral effective population size. }
#' ATTENTION: \code{\link{check_param}} requires that \eqn{theta_A>0} when
#' specified and fixed in \code{param[4,1]}. \cr The information on the
#' parameter \eqn{\theta_A} can be specified with the keyword
#' \emph{\code{theta_A}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end thetaA
#' \item\code{param[5,]~}\emph{\code{T_split}} \code{param[5,1]=}\eqn{T_s} or
#' \code{param[5,]=c(}\eqn{T_sl}\code{, }\eqn{T_su}\code{,
#' }\eqn{T_sn}\code{)}\cr specifies \eqn{T_s}, the split time in unit of
#' \eqn{4N_1} generations between the TWO populations. \cr The information on
#' the parameter \eqn{T_s} can be specified with the keyword
#' \emph{\code{T_split}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end T_split
#' \item\code{param[6,]~}\emph{\code{T_change}}
#' \code{param[6,1]=}\eqn{\epsilon} or
#' \code{param[6,]=c(}\eqn{\epsilon_l}\code{, }\eqn{\epsilon_u}\code{,
#' }\eqn{\epsilon_n}\code{)}\cr specifies the ratio \eqn{\epsilon=T_c/T_s}.
#' \tabular{l}{ ---> Where, \eqn{T_c} : The time at which the rate of gene
#' flow changed between the two populations in unit of \eqn{4N_1}
#' generations. } The information on the parameter \eqn{\epsilon} can be
#' specified with the keyword \emph{\code{T_change}} in the file
#' \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}).\cr ATTENTION: \code{\link{check_param}} requires
#' that \eqn{0}\eqn{\le}\eqn{T_c}\eqn{\le}\eqn{T_s} when specified and fixed
#' in \code{param[5,1]} and \code{param[6,1]}). % end T_change
#' \item\code{param[7,]~}\emph{\code{M_change}} \code{param[7,1]=}\eqn{M_c}
#' or \code{param[7,]=c(}\eqn{M_cl}\code{, }\eqn{M_cu}\code{,
#' }\eqn{M_cn}\code{)}\cr specifies \eqn{M_c=4N_1*m_c}, the number of
#' migrants exchanged each generation by the TWO populations since the split
#' until the time of gene flow rate change (i.e., \eqn{T_c<t<T_s}).
#' \tabular{l}{ ---> Where, \eqn{m_c} : The generational fraction of migrant
#' individuals between \eqn{T_c<t<T_s}. } The information on the parameter
#' \eqn{M_c} can be specified with the keyword \emph{\code{M_change}} in the
#' file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). % end M_c % end Optional demo
#' \item\strong{OPTIONAL NUISANCE PARAMETER: }
#' \item\code{param[8,]~}\emph{\code{rho}} This specifies the parameter on
#' the intra-region recombination rate.\cr The region-specific recombination
#' rate per generation is calculated with \eqn{\rho_r=\beta*4N_1c*(Z-1)} for
#' the genomic region \eqn{r} \eqn{\in} \eqn{[1,R]} (as specified in
#' \code{param[9,1]}). \cr \code{param[8,]} can have the five following
#' forms: \tabular{lllll}{ \code{param[8,1]=}\tab \eqn{\rho} \tab \tab \tab :
#' This specifies that the genomic average population intra-region
#' recombination rate per bp per generation is fixed to the value
#' \eqn{\rho=4N_1*c}. \cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=\rho*w*(Z-1)} for the genomic region \eqn{r} (here
#' \eqn{w=\beta}). \eqn{\rho_r} is fixed across estimation steps in the
#' function \code{\link{estimate_IMc}}. \cr \code{param[8,1]=} \tab \code{1}
#' \tab \tab \tab : This specifies that an estimate of the region-specific
#' population recombination rate per bp, \eqn{\rho_o=4N_1*c_o}, is KNOWN from
#' linkage disequilibrium analysis and specified with the parameter
#' \eqn{w=\beta*\rho_o} for each recombining region (in the file
#' \code{regfile}, see \code{\link{info_region}}).\cr \tab \tab \tab \tab In
#' this case, \eqn{\rho_r=w*(Z-1)} for the genomic region \eqn{r}.
#' \eqn{\rho_r} is fixed across estimation steps in the function
#' \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{2}\code{,}\tab \eqn{\mu}\code{,}\tab \code{NA)}\tab : This specifies
#' that an estimate of the region-specific recombination rate per bp,
#' \eqn{c_o}, is KNOWN from pedigree analysis and specified with the
#' parameter \eqn{w=\beta*c_o} for each recombining region (in the file
#' \code{regfile}, see \code{\link{info_region}}). \cr \tab \tab \tab \tab
#' This also specifies \eqn{\mu}, an independent estimate of the genomic
#' generational mutation rate per bp.\cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=w*(Z-1)\theta_1/\mu} for the genomic region \eqn{r}.
#' \eqn{\rho_r} varies across estimation steps if \eqn{theta_1} is estimated
#' in the function \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{-}\code{1}\code{,}\tab \eqn{1/}\eqn{\lambda}\code{,}\tab \code{NA)}
#' \tab : This specifies that the intra-region recombination rate is UNKNOWN
#' and the ratio of recombination over mutation rate for the genomic region
#' \eqn{r}, \eqn{\alpha=c_r/\mu}, is drawn from an exponential distribution
#' with mean \eqn{1/}\eqn{\lambda}. \cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=w*\alpha*(Z-1)*theta_1} for the genomic region \eqn{r}.
#' \eqn{\rho_r} varies across estimation steps if \eqn{theta_1} is estimated
#' in the function \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{-}\code{2}\code{,}\tab \eqn{\nu}\code{,}\tab \eqn{\sigma}\code{)}
#' \tab : This specifies that the intra-region recombination rate is UNKNOWN
#' and the ratio of recombination over mutation rate for the genomic region
#' \eqn{r}, \eqn{\alpha=c_r/\mu}, is drawn from an normal distribution with
#' mean \eqn{\nu} and standard deviation \eqn{\sigma}. \cr \tab \tab \tab
#' \tab In this case, \eqn{\rho_r=w*\alpha*(Z-1)*theta_1} for the genomic
#' region \eqn{r}. \eqn{\rho_r} varies across estimation steps if
#' \eqn{theta_1} is estimated in the function \code{\link{estimate_IMc}}. }
#' Where: \tabular{ll}{ \eqn{\rho=4N_1*c}\tab : The genomic average
#' population intra-region recombination rate per bp per generation. \cr
#' \eqn{\rho_r=\beta*(Z}\eqn{-}\eqn{1)*4N_1c}\tab : The region-specific
#' recombination rate per generation for the genomic region considered. \cr
#' \eqn{\rho_o=4N_1*c_o} \tab : The estimate of the region-specific
#' population recombination rate per bp per generation for the genomic region
#' considered from linkage disequilibrium analysis. \cr \eqn{c} \tab : The
#' genomic generational cross-over rate per bp. \cr \eqn{c_o} \tab : The
#' estimate of the region-specific cross-over rate per bp per generation for
#' the genomic region considered from pedigree analysis. \cr
#' \eqn{\alpha=c_r/\mu} \tab : Drawn from a prior distribution. \cr \eqn{c_r}
#' \tab : The generational region-specific cross-over rate per bp for the
#' genomic region \eqn{r}. \cr \eqn{w} \tab : The recombination scalar for
#' the genomic region considered specified in the file \code{regfile} (see
#' \code{\link{info_region}}).\cr \eqn{\beta} \tab : The ratio of the
#' region-specific population recombination rate per bp over
#' \eqn{\rho=4N_1*c} for the genomic region considered.\cr \tab
#' \eqn{\beta=}\code{"1"} (\code{"0.5"} in \emph{Drosophila}) for autosomal
#' region, \code{"0.5"} for X- and \code{"0"} for Y- and mtDNA-linked region.
#' \cr \eqn{Z} \tab : The size in bp of the genomic region considered. \cr
#' \tab \eqn{z_s} and \eqn{z_e} (such as \eqn{Z=z_e-z_s}) are specified in
#' the file \code{regfile} (see \code{\link{info_region}}). } The
#' information on the parameter \eqn{\rho} can be specified with the keyword
#' \emph{\code{rho}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end rho % end
#' nuisance \item\strong{OPTIONAL OTHER PARAMETERS: }
#' \item\code{param[9,1]~}\emph{\code{nregions}} \code{param[9,1]}\eqn{R} is
#' the number of independent genomic regions considered.\cr \eqn{R} can be
#' specified with the keyword \emph{\code{nregions}} in the file
#' \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). \cr ATTENTION: \code{\link{check_param}}
#' requires that \code{param[9,1]=}\eqn{R>0} be specified. \cr
#'
#' \item\strong{OPTIONAL PARAMETERS SPECIFIC TO \code{\link{estimate_IMc}}: }
#' \item\code{param[10,1]~}\emph{\code{howmany}} \code{param[10,1]=}\eqn{H}
#' specifies \eqn{H}, the number of data sets to simulate per set of
#' parameters (i.e., per grid point) to estimate the likelihood of the data
#' given the set of parameters of the extension of the
#' isolation-with-migration model.\cr By default,
#' \code{param[10,1]=}\eqn{1000}.\cr ATTENTION: \code{\link{check_param}}
#' requires that \code{param[10,1]=}\eqn{H>0} when specified. \cr The
#' information on \eqn{J} can be specified with the keyword
#' \emph{\code{howmany}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr % end howmany
#' \item\code{param[11,1]~}\emph{\code{parallel}} \code{param[11,1]=}\eqn{J}
#' specifies \eqn{J}, the number of jobs to run in parallel to perform the
#' estimation of the posterior distribution of the parameters of the
#' extension of the isolation-with-migration model.\cr By default,
#' \code{param[11,1]=}\eqn{1}, i.e., no parallelization.\cr ATTENTION:
#' \code{\link{check_param}} requires that \code{param[11,1]=}\eqn{J>0} when
#' specified.\cr The information on \eqn{J} can be specified with the keyword
#' \emph{\code{parallel}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr % end parallel %
#' end extra
#' @param paramfile The name of the file with the values and descriptions of
#' the prior distributions for the fixed and variable (or estimated)
#' parameters, respectively, required to estimate the parameters of
#' extensions of the Isolation-with-migration model with the function
#' \code{\link{estimate_IMc}}.\cr By default the file name is
#' \code{"estimation.par"}.
#' @param listparam The list of possible keywords/parameters in the required
#' order: \cr For function \code{\link{simulate_data}}:\cr
#' \code{c("}\emph{\code{theta_1}}\code{", "}\emph{\code{M_present}}\code{",
#' "}\emph{\code{theta_2}}\code{", "}\emph{\code{theta_A}}\code{",
#' "}\emph{\code{T_split}}\code{", "}\emph{\code{T_change}}\code{",
#' "}\emph{\code{M_change}}\code{", "}\emph{\code{rho}}\code{",
#' "}\emph{\code{nregions}}\code{")}.\cr For function
#' \code{\link{estimate_IMc}}:\cr \code{c("}\emph{\code{theta_1}}\code{",
#' "}\emph{\code{M_present}}\code{", "}\emph{\code{theta_2}}\code{",
#' "}\emph{\code{theta_A}}\code{", "}\emph{\code{T_split}}\code{",
#' "}\emph{\code{T_change}}\code{", "}\emph{\code{M_change}}\code{",
#' "}\emph{\code{rho}}\code{", "}\emph{\code{nregions}}\code{",
#' "}\emph{\code{howmany}}\code{", "}\emph{\code{parallel}}\code{")}.
#' @return The function \code{\link{check_param}} outputs error and warning
#' messages regarding the format/information in the matrix of values
#' \code{param} as well as the following data frame: \item{list("$ok")}{
#' \code{$ok} takes the value \code{"1"} if the format of the input file
#' \code{paramfile} for the function \code{\link{simulate_data}} (or
#' \code{\link{estimate_IMc}}) has the format/information required. \cr
#' Otherwise, \code{$ok} takes the value \code{"0"} to inform the function
#' \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}) that it should
#' stop. \cr In this later, case,\code{\link{check_param}} outputs an error
#' message explaining what part of the format/information is incorrect. }
#' \item{list("$param")}{ The matrix \eqn{9} (or \eqn{11}) \eqn{\times}
#' \eqn{3} of the information for the parameters of the model that will be
#' simulated provided as input to the function \code{\link{check_param}}, but
#' updated to take into account parameters that will be ignored by the
#' function \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}).\cr
#' When the matrix of values is updated, \code{\link{check_param}} outputs a
#' "WARNING" message.\cr } e.g. for the function \code{\link{estimate_IMc}}:
#' \code{[1] "PROBLEM: Error message on the parameter values and prior
#' distributions for the }\R\code{ function estimate_IMc."}\cr \code{$ok} \cr
#' \code{[1] 0}\cr \code{$param}\cr \tabular{llll}{ \tab \code{[,1]} \tab
#' \code{[,2]} \tab \code{[,3]} \cr \code{[1,]} \tab \code{0.0001} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[2,]} \tab \code{0.0000} \tab
#' \code{1e+01} \tab \code{20}\cr \code{[3,]} \tab \code{ 1.5000} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[4,]} \tab \code{0.0001} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[5,]} \tab \code{0.0000} \tab
#' \code{2e+00} \tab \code{10}\cr \code{[6,]} \tab \code{ 0.0000} \tab
#' \code{1e+00} \tab \code{10}\cr \code{[7,]} \tab \code{0.0000} \tab
#' \code{2e+01} \tab \code{20}\cr \code{[8,]} \tab \code{0.0005} \tab
#' \code{NA} \tab \code{NA}\cr \code{[9,]} \tab \code{4.0000} \tab \code{NA}
#' \tab \code{NA}\cr \code{[10,]} \tab \code{NA} \tab \code{NA} \tab \code{
#' NA}\cr \code{[11,]} \tab \code{NA} \tab \code{NA} \tab \code{ NA} }
#' @note \itemATTENTION: It is the user's responsibility to mind the following
#' restrictions: -> \code{\link{check_param}} requires that
#' \code{param[1,1]=}\eqn{R>0} be specified. \cr -> \code{\link{check_param}}
#' requires that either the parameter value (i.e.
#' \code{param[1,1]=}\eqn{\theta_1>0}) or the prior distribution for
#' \eqn{theta_1} (see above for the restrictions) be specified in
#' \code{param[1,]}. \cr -> \code{\link{check_param}} requires that
#' \eqn{M_p>0} when fixed or \eqn{0\le}\eqn{M_pl}\eqn{\le}\eqn{M_pu} and
#' \eqn{M_pn>0} when variable (or estimated) be specified in \code{param[2,]}
#' in case of an island model. \cr -> \code{\link{check_param}} requires that
#' \code{param[j,i]=}\eqn{theta_i}\eqn{\ge}\eqn{0} for any \eqn{i} \eqn{\in}
#' \eqn{{2,A}} and \eqn{j} \eqn{\in} \eqn{{2,3}}. \cr ->
#' \code{\link{check_param}} requires that
#' \eqn{0}\eqn{\le}\eqn{T_c}\eqn{\le}\eqn{T_s} when specified and fixed in
#' \code{param[5,1]} and \code{param[6,1]}.\cr -> \code{\link{check_param}}
#' requires that \code{param[10,1]=}\eqn{H>0} when specified. \cr ->
#' \code{\link{check_param}} requires that \code{param[11,1]=}\eqn{J>0} when
#' specified.\cr
#' @author Celine Becquet - \email{celine.becquet@@gmail.com}.
#' @seealso The functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}} calls the function \code{\link{check_param}},
#' which in turn calls the function \code{\link{error_message}}.\cr The
#' function \code{\link{check_param}} checks the format of the files like
#' \code{\link{param_sim}} and \code{\link{param_est}}.\cr Other functions to
#' check the format of input files: \cr \code{\link{get_rho}} and
#' \code{\link{check_reg}}. \cr Lists of definitions of the symbols and
#' parameters mentioned in this file are found in
#' \code{\link{Rmspack-package}}.
#' @keywords error print
#' @examples
#'
#' ### Write the file of information on the parameters in the local directory.
#' data(simulation_files) # download the data
#'
#' write.table(file="est", x=param_est, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "est" containing (with comments):
#' read.csv("est",header =FALSE,sep="", comment="#")
#'
#' ### Create the inputs for the function.
#' listparam=c("theta_1", "M_present", "theta_2", "theta_A", "T_split", "T_change", "M_change", "rho", "nregions", "howmany", "parallel")
#' param=scan("est", comment.char="#", what=c("c", "n", "n", "n"), allowEscapes=FALSE, fill=TRUE, sep="\n", strip.white=TRUE, quiet=TRUE)
#' vparam=order_param(param, listparam)
#' vparam
#'
#' ## Case with no errors.
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#' ## Case with warning
#' vparam[3, 1]=.001 # theta_2l==theta_2u
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#'
#' ## Case with error in defining the prior of theta_2.
#' vparam[3, 1]=.002 # theta_2l>theta_2u
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#' # Clean up the directory.
#' unlink("est")
#'
NULL
#' Function to check the format of the files with the information on the
#' genomic regions and multiple loci.
#' The function \code{\link{check_reg}} checks that the information on the
#' independent genomic regions described in the file \code{regfile} and the
#' information on the loci for the multi-locus genomic regions in the file
#' \code{locifile} are in the correct format to simulate data with the function
#' \code{\link{simulate_data}} or estimate models of Isolation-with-migration
#' and possible extensions with the function \code{\link{estimate_IMc}}. \cr
#' This function is called by the functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}}.
#'
#'
#' @param nregions The number of independent genomic regions considered,
#' \eqn{R}. \cr \eqn{R} is specified with the keyword \emph{\code{nregions}}
#' in the file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). \cr
#' @param info_region The matrix \eqn{R} \eqn{\times} \eqn{10} of information
#' on the \eqn{R} independent genomic regions. \cr Each genomic region is
#' described by ten values: \tabular{ll}{ \eqn{r} \tab : The genomic region
#' number, \eqn{r} \eqn{\in} \eqn{[1,R]}. \cr \tab \eqn{R} is the number of
#' independent genomic regions specified with the argument
#' \emph{\code{nregions}}. \cr \tab ATTENTION: The independent genomic region
#' numbers, \eqn{r}, need to be in order, i.e., region 1 starts on line 2,
#' region 2 on line 3 \dots{} region \eqn{R} on line \eqn{R+1}. \cr
#' \emph{\code{Region name}} \tab : The name of the genomic region \eqn{r},
#' which should contain at least ONE non-numerical character. \cr \eqn{x}
#' \tab : The inheritance scalar for the genomic region \eqn{r} (i.e.,
#' \code{"1"} for autosomal region, \code{"0.75"} for X- and \code{"0.5"} for
#' Y- and mtDNA-linked region). \cr \eqn{v} \tab : The mutation rate scalar
#' for the genomic region \eqn{r} (which can be estimated e.g., from
#' divergence data). \cr \eqn{w} \tab : The recombination scalar for the
#' genomic region \eqn{r}.\cr \tab -- Usually \eqn{w=\beta}, the ratio of the
#' locus-specific population recombination rate per bp over
#' \eqn{\rho=4N_1*c}. \cr \tab -- If an estimate of the region-specific
#' population recombination rate per bp is available for each region from
#' linkage disequilibrium analysis, \eqn{\rho_o=4N_1*c_o}, set
#' \eqn{w=\beta*\rho_o} to incorporate this knowledge in the simulation or
#' estimation (with \code{"}\emph{\code{rho}} \code{1"} in the file
#' \code{paramfile}, see \code{\link{param_sim}} and
#' \code{\link{param_est}}).\cr \tab In this case, \eqn{w} is the scaled
#' sex-averaged region-specific population recombination rate per bp, i.e.,
#' for an X-linked locus \eqn{c_o} is the female recombination rate and
#' \eqn{\beta=0.5} so that \eqn{\beta*\rho_o=2N_1*c_o}. \cr \tab -- If an
#' estimate of the region-specific recombination rate per bp is available for
#' each region from pedigree analysis, \eqn{c_o}, set \eqn{w=\beta*c_o} to
#' incorporate this knowledge in the simulation or estimation (with
#' \code{"}\emph{\code{rho}} \code{2"} in the file \code{paramfile}, see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr \tab In this
#' case, \eqn{w} is the scaled sex-averaged region-specific recombination
#' rate per bp, i.e., for an X-linked locus, \eqn{c_o} is the estimated
#' female recombination rate so the scaled sex-averaged recombination rate is
#' \eqn{\beta*c_o=0.5c}. \cr \eqn{n_1} \tab : The sample size from population
#' 1 for the genomic region \eqn{r}. \cr \eqn{n_2} \tab : The sample size
#' from population 2 for the genomic region \eqn{r}. \cr \eqn{z_s} \tab : The
#' start position of the genomic region \eqn{r} in bp. \cr \eqn{z_e} \tab :
#' The end position of the genomic region \eqn{r} in bp. \cr \eqn{Y} \tab :
#' The number of loci spanning the genomic region \eqn{r}. \cr }Where:
#' \tabular{ll}{ \eqn{\rho=4N_1*c}\tab : The genomic average population
#' intra-region recombination rate per bp per generation. \cr
#' \eqn{\rho_o=4N_1*c_o} \tab : The estimate of the region-specific
#' population recombination rate per bp per generation for the genomic region
#' considered from linkage disequilibrium analysis. \cr \eqn{N_1} \tab : The
#' effective population size in population 1 (the reference population). \cr
#' \eqn{c} \tab : The genomic generational cross-over rate per bp. \cr
#' \eqn{c_o} \tab : The estimate of the region-specific cross-over rate per
#' bp per generation for the genomic region considered from pedigree
#' analysis.\cr \eqn{\beta} \tab : The ratio of the region-specific
#' population recombination rate per bp over \eqn{\rho=4N_1*c} for the
#' genomic region considered.\cr \tab \eqn{\beta=}\code{"1"} (\code{"0.5"} in
#' \emph{Drosophila}) for autosomal region, \code{"0.5"} for X- and
#' \code{"0"} for Y- and mtDNA-linked region. } See
#' \code{\link{info_region}} for further details.
#' @param info_loci The matrix \eqn{\Sigma{Y_r}} \eqn{\times} \eqn{6} of
#' information on the loci for the multi-locus genomic regions (as specified
#' with \eqn{Y_r>1} for the multi-locus region \eqn{r} in
#' \code{info_region[}\eqn{r}\code{]$V10}). \cr This matrix is empty unless
#' \eqn{\Sigma{Y_r}>0}. \cr Each locus is described by six values:
#' \tabular{ll}{ \eqn{r} \tab : The multi-locus genomic region number,
#' \eqn{r} \eqn{\in} \eqn{[1,R]}, that this locus is part of. \cr \tab
#' ATTENTION: The multi-locus genomic region numbers, \eqn{r}, need to be in
#' order, i.e., the information for the loci for the first multi-locus
#' genomic region (\eqn{a}) are from line 2 to \eqn{Y_a+1}, for the second
#' multi-locus genomic region (\eqn{b}), the loci information starts on line
#' \eqn{Y_a+2} to \eqn{Y_a+Y_b+1} \dots{} \eqn{a} and \eqn{b} \eqn{\in}
#' \eqn{[1,R]}. \cr \eqn{y} \tab : The locus number, \eqn{y} \eqn{\in}
#' \eqn{[1,Y]}. \cr \tab \eqn{Y} is the total number of loci spanning the
#' multi-locus genomic region \eqn{r} as specified in the matrix
#' \code{info_region}. \cr \tab ATTENTION: The loci numbers, \eqn{y}, need to
#' be in order, i.e., information for locus 1 of the first multi-locus
#' genomic region \eqn{r} is on line 2, locus 2 on line 3 \dots{} locus
#' \eqn{Y} on line \eqn{Y+1}. \cr \eqn{n_1y} \tab : The sample size from
#' population 1 for the locus \eqn{y}.\cr \tab \eqn{n_1y}\eqn{\le}\eqn{n_1},
#' where \eqn{n_1} for the multi-locus genomic region \eqn{r} is specified in
#' the matrix \code{info_region}. \cr \eqn{n_2y} \tab : The sample size from
#' population 2 for the locus \eqn{y}.\cr \tab \eqn{n_2y}\eqn{\le}\eqn{n_2},
#' where \eqn{n_2} for the multi-locus genomic region \eqn{r} is specified in
#' the matrix \code{info_region}. \cr \eqn{z_sy} \tab : The start position of
#' the locus \eqn{y} in bp.\cr \tab \eqn{z_s1}\eqn{\ge}\eqn{z_s}, where
#' \eqn{z_s} is the start position of the multi-locus genomic region \eqn{r}
#' specified in the matrix \code{info_region}. \cr \eqn{z_ey} \tab : The end
#' position of the locus \eqn{y} in bp.\cr \tab \eqn{z_eY}\eqn{\le}\eqn{z_e}
#' where \eqn{z_e} is the end position of the multi-locus genomic region
#' \eqn{r} specified in the matrix \code{info_region}. } See
#' \code{\link{info_loci}} for further details.
#' @param locifile The name of the file with the information on the loci for
#' the multi-locus genomic regions (as specified with \eqn{Y>1} in the file
#' \code{regfile}). \cr By default the file name is \code{"info.loc"}.
#' @return The function \code{\link{check_reg}} outputs \code{"1"} if the
#' format of the matrices \code{info_region} (and \code{info_loci}) (from the
#' input file \code{regfile} (and \code{locifile})) for the calling function
#' (either \code{\link{simulate_data}} or \code{\link{estimate_IMc}}) have
#' the format/information required. \cr Otherwise, \code{\link{check_reg}}
#' informs the calling function that it should stop and outputs an error
#' message explaining what part of the format/information is incorrect: \cr
#' \tabular{ll}{ e.g.:\tab \code{[1] "Error message on independent
#' (potentially multi-locus) genomic regions."}\cr \tab \code{[1] 0} }
#' @note \itemATTENTION: It is the user's responsibility to mind the following
#' restrictions: \itemIn the matrix \code{info_region}: -> The independent
#' genomic region numbers, \eqn{r}, need to be in order, i.e., region 1
#' starts on line 2, region 2 on line 3 \dots{} region \eqn{R} on line
#' \eqn{R+1}. \cr -> Reasonable values need to be specified for each genomic
#' region.
#'
#' \itemIn the matrix \code{info_loci}: -> The loci numbers, \eqn{y}, need to
#' be in order, i.e., information for locus 1 of the first multi-locus
#' genomic region is on line 2, locus 2 on line 3 \dots{} locus \eqn{Y} on
#' line \eqn{Y+1}. \cr -> The multi-locus genomic region numbers, \eqn{r},
#' need to be in order, i.e., the information for the loci for the first
#' multi-locus genomic region (\eqn{a}) are from line 2 to \eqn{Y_a+1}, for
#' the second multi-locus genomic region (\eqn{b}), the loci information
#' starts on line \eqn{Y_a+2} to \eqn{Y_a+Y_b+1} \dots{} \eqn{a} and \eqn{b}
#' are \eqn{\in} \eqn{[1,R]}. \cr -> Reasonable values need to be specified
#' for each locus.
#' @author Celine Becquet - \email{celine.becquet@@gmail.com}.
#' @seealso The functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}} call the function \code{\link{check_reg}},
#' which in turn calls the function \code{\link{error_message}}. \cr Other
#' functions to check the format of input files: \cr \code{\link{get_rho}}
#' and \code{\link{check_param}}. \cr Lists of definitions of the symbols and
#' parameters mentioned in this file are found in
#' \code{\link{Rmspack-package}}.
#' @keywords error print
#' @examples
#'
#' ### Write the files of information in the local directory.
#' data(simulation_files) # download the data
#'
#' write.table(file="regions", x=info_region, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "regions" containing:
#' read.csv("regions",header =FALSE,sep="", comment="#")
#'
#' write.table(file="loci", x=info_loci, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "loci" containing:
#' read.csv("loci",header =FALSE,sep="", comment="#")
#'
#' ### Create the inputs for the function.
#' reg=read.table("regions", skip=1, fill=TRUE)
#' loci=read.table("loci", skip=1, fill=TRUE)
#'
#' ## Case with no errors.
#' check_reg(nregions=4, info_region=reg, info_loci=loci, locifile="loci")
#'
#' ## Case with error in the # of regions
#' reg[1, 1]=2
#' check_reg(nregions=4, info_region=reg, info_loci=loci, locifile="loci")
#'
#' # Clean up the directory.
#' unlink(c("regions", "loci"))
#'
NULL
|
/2010_R_C_EstL/Rpackage_EstL/Rmspack/R/EstLprep-package.R
|
no_license
|
celinesf/personal
|
R
| false | false | 34,567 |
r
|
#' Function to check the format of the file of parameter used to simulate data
#' under or estimate parameters of extensions of the Isolation-with-migration
#' model.
#' The function \code{\link{check_param}} checks that the information on the
#' values and descriptions of the prior distributions for the fixed and
#' variable (or estimated) parameters, respectively, specified in the file
#' \code{paramfile} (see \code{\link{param_est}} and \code{\link{param_sim}})
#' are in the correct format to simulate data under (or to estimate parameters
#' of) extensions of the Isolation-with-migration model with the function
#' \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}). \cr This
#' function is called within the functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}}.
#'
#'
#' @param param The matrix \eqn{9} (or \eqn{11}) \eqn{\times} \eqn{3} of either
#' the values or descriptions of the prior distributions for the fixed and
#' variable (or estimated) parameters, respectively, required to simulate
#' data under (or to estimate parameters of) extensions of the
#' Isolation-with-migration model with the functions
#' \code{\link{simulate_data}} and \code{\link{estimate_IMc}}.
#' \item\strong{Specifying variable (or estimated) parameters: } Any of the
#' demographic parameters listed below can be either fixed (i.e., only one
#' value specified after the keyword) or variable (or estimated). \cr If a
#' parameter \eqn{i} \eqn{\in} \eqn{[1:7]} is to be variable (or estimated),
#' the information on the uniform prior distribution is specify with three
#' values, two floating numbers and an integer as follows: \cr
#' \code{param[i,]=c(}\eqn{\Theta_l}\code{, }\eqn{\Theta_u}\code{,
#' }\eqn{\Theta_n}\code{)}. \tabular{ll}{ \eqn{\Theta_l}\tab : The lower
#' limit on the prior distribution range. \eqn{\Theta_l}\eqn{\ge}\eqn{0}. \cr
#' \eqn{\Theta_u} \tab : The upper limit on the prior distribution range.
#' \eqn{\Theta_l}\eqn{\le}\eqn{\Theta_u}. \cr \eqn{\Theta_n} \tab : The
#' number of values to consider along the prior distribution range.
#' \eqn{\Theta_n>0} \cr } ATTENTION: \code{\link{check_param}} requires that
#' \eqn{0\le}\eqn{\Theta_l}\eqn{\le}\eqn{\Theta_u} and \eqn{\Theta_n>0}. \cr
#' If only one value is specified (i.e., \eqn{\Theta_l>0} and \eqn{\Theta_u}
#' not specified or \eqn{\Theta_u=\Theta_l}), the parameter is considered
#' fixed to \eqn{\Theta_l}.
#'
#' The twelve rows correspond to the twelve following parameters and keywords:
#' \item\strong{REQUIRED PARAMETERS: }
#' \item\code{param[1,]~}\emph{\code{theta_1}} ATTENTION:
#' \code{\link{check_param}} requires that either the parameter value of the
#' population mutation rate per bp per generation for population 1 (i.e.
#' \code{param[1,1]=}\eqn{\theta_1>0}) or the prior distribution for
#' \eqn{theta_1} (see above for the restrictions) be specified:\cr
#' \code{param[1,1]}\eqn{\theta_1} or
#' \code{param[1,]=c(}\eqn{\theta_1l}\code{, }\eqn{\theta_1u}\code{,
#' }\eqn{\theta_1n}\code{)}. \cr \tabular{ll}{ ---> Where: \tab \cr
#' \eqn{\theta_1=4N_1*\mu} \tab : The population mutation rate per bp per
#' generation for population 1 (required). \cr \eqn{N_1} \tab : The effective
#' population size in population 1 (the reference population so by default
#' and unless specified, \eqn{N_1=N_2=N_A}). \cr \eqn{\mu} \tab : The genomic
#' generational mutation rate per bp. } The information on the parameter
#' \eqn{\theta_1} can be specified with the keyword \emph{\code{theta_1}} in
#' the file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). % end theta
#' \item\code{param[2,]~}\emph{\code{M_present}} \code{param[2,1]=}\eqn{M_p}
#' or \code{param[2,]=c(}\eqn{M_pl}\code{, }\eqn{M_pu}\code{,
#' }\eqn{M_pn}\code{)}\cr specifies \eqn{M_p=4N_1*m_p}, the number of
#' migrants exchanged each generation by the TWO populations at present. \cr
#' \eqn{M_p} has different definitions depending of the model considered: \cr
#' * \eqn{M_p} is the symmetrical rate of gene flow between TWO populations
#' in an island model (i.e., \eqn{0<t<\inf}) as specified with: \cr -- At
#' least one of the independent genomic regions has \eqn{n_1>0} and
#' \eqn{n_2>0} as specified in the file \code{regfile} (see
#' \code{\link{info_region}}). \cr -- Either \code{param[5,1]=}\eqn{T_s=0} or
#' \code{param[5,1]=NA}. \cr ATTENTION: \code{\link{check_param}} requires
#' that \eqn{M_p>0} when fixed or \eqn{0\le}\eqn{M_pl}\eqn{\le}\eqn{M_pu} and
#' \eqn{M_pn>0} when variable (or estimated) be specified in \code{param[2,]}
#' in case of an island model. \cr * \eqn{M_p} is the constant symmetrical
#' rate of gene flow since the split until present (i.e., \eqn{0<t<T_s}) if
#' there is a population split (as specified with \eqn{T_s>0} when fixed or
#' \eqn{0\le}\eqn{T_sl}\eqn{\le}\eqn{T_su} and \eqn{T_sn>0} when variable (or
#' estimated) in \code{param[5,]}). \cr * \eqn{M_p} is the constant
#' symmetrical rate of gene flow since the time of gene flow rate change
#' until present (i.e., \eqn{0<t<T_c}) if a time at which the gene flow rate
#' changed is specified with: \cr -- \eqn{0<T_c<T_s} (as specified with
#' \eqn{0<\epsilon<1} when fixed or
#' \eqn{0\le}\eqn{\epsilon_l}\eqn{\le}\eqn{\epsilon_u}\eqn{\le1} and
#' \eqn{\epsilon_n>0} when variable (or estimated) in \code{param[6,]}). \cr
#' -- \eqn{M_c \not=M_p} (as specified with \eqn{0\le}\eqn{M_c \not=M_p} when
#' fixed in \code{param[2,1]} and \code{param[7,1]}).\cr \tabular{l}{ --->
#' Where, \eqn{m_p} : The generational fraction of migrant individuals at
#' present. } The information on the parameter \eqn{M_p} can be specified
#' with the keyword \emph{\code{M_present}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end M_p % end req
#' arg \item\strong{OPTIONAL DEMOGRAPHIC PARAMETERS: } All the parameters
#' listed below are optional and can be either fixed or variable (or
#' estimated). \item\code{param[3,]~}\emph{\code{theta_2}}
#' \code{param[3,1]=}\eqn{\theta_2} or
#' \code{param[3,]=c(}\eqn{\theta_2l}\code{, }\eqn{\theta_2u}\code{,
#' }\eqn{\theta_2n}\code{)}\cr specifies \eqn{\theta_2=4N_2*\mu}, the
#' population mutation rate per bp per generation for population 2. \cr
#' \tabular{l}{ ---> Where, \eqn{N_2} : The effective population size in
#' population 2. } ATTENTION: \code{\link{check_param}} requires that
#' \eqn{theta_2>0} when specified and fixed in \code{param[3,1]}. \cr The
#' information on the parameter \eqn{\theta_2} can be specified with the
#' keyword \emph{\code{theta_2}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end theta2
#' \item\code{param[4,]~}\emph{\code{theta_A}}
#' \code{param[4,1]=}\eqn{\theta_A} or
#' \code{param[4,]=c(}\eqn{\theta_Al}\code{, }\eqn{\theta_Au}\code{,
#' }\eqn{\theta_An}\code{)}\cr specifies \eqn{\theta_A=4N_A*\mu}, the
#' ancestral population mutation rate per bp per generation. \tabular{l}{
#' ---> Where, \eqn{N_A} : The ancestral effective population size. }
#' ATTENTION: \code{\link{check_param}} requires that \eqn{theta_A>0} when
#' specified and fixed in \code{param[4,1]}. \cr The information on the
#' parameter \eqn{\theta_A} can be specified with the keyword
#' \emph{\code{theta_A}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end thetaA
#' \item\code{param[5,]~}\emph{\code{T_split}} \code{param[5,1]=}\eqn{T_s} or
#' \code{param[5,]=c(}\eqn{T_sl}\code{, }\eqn{T_su}\code{,
#' }\eqn{T_sn}\code{)}\cr specifies \eqn{T_s}, the split time in unit of
#' \eqn{4N_1} generations between the TWO populations. \cr The information on
#' the parameter \eqn{T_s} can be specified with the keyword
#' \emph{\code{T_split}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end T_split
#' \item\code{param[6,]~}\emph{\code{T_change}}
#' \code{param[6,1]=}\eqn{\epsilon} or
#' \code{param[6,]=c(}\eqn{\epsilon_l}\code{, }\eqn{\epsilon_u}\code{,
#' }\eqn{\epsilon_n}\code{)}\cr specifies the ratio \eqn{\epsilon=T_c/T_s}.
#' \tabular{l}{ ---> Where, \eqn{T_c} : The time at which the rate of gene
#' flow changed between the two populations in unit of \eqn{4N_1}
#' generations. } The information on the parameter \eqn{\epsilon} can be
#' specified with the keyword \emph{\code{T_change}} in the file
#' \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}).\cr ATTENTION: \code{\link{check_param}} requires
#' that \eqn{0}\eqn{\le}\eqn{T_c}\eqn{\le}\eqn{T_s} when specified and fixed
#' in \code{param[5,1]} and \code{param[6,1]}). % end T_change
#' \item\code{param[7,]~}\emph{\code{M_change}} \code{param[7,1]=}\eqn{M_c}
#' or \code{param[7,]=c(}\eqn{M_cl}\code{, }\eqn{M_cu}\code{,
#' }\eqn{M_cn}\code{)}\cr specifies \eqn{M_c=4N_1*m_c}, the number of
#' migrants exchanged each generation by the TWO populations since the split
#' until the time of gene flow rate change (i.e., \eqn{T_c<t<T_s}).
#' \tabular{l}{ ---> Where, \eqn{m_c} : The generational fraction of migrant
#' individuals between \eqn{T_c<t<T_s}. } The information on the parameter
#' \eqn{M_c} can be specified with the keyword \emph{\code{M_change}} in the
#' file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). % end M_c % end Optional demo
#' \item\strong{OPTIONAL NUISANCE PARAMETER: }
#' \item\code{param[8,]~}\emph{\code{rho}} This specifies the parameter on
#' the intra-region recombination rate.\cr The region-specific recombination
#' rate per generation is calculated with \eqn{\rho_r=\beta*4N_1c*(Z-1)} for
#' the genomic region \eqn{r} \eqn{\in} \eqn{[1,R]} (as specified in
#' \code{param[9,1]}). \cr \code{param[8,]} can have the five following
#' forms: \tabular{lllll}{ \code{param[8,1]=}\tab \eqn{\rho} \tab \tab \tab :
#' This specifies that the genomic average population intra-region
#' recombination rate per bp per generation is fixed to the value
#' \eqn{\rho=4N_1*c}. \cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=\rho*w*(Z-1)} for the genomic region \eqn{r} (here
#' \eqn{w=\beta}). \eqn{\rho_r} is fixed across estimation steps in the
#' function \code{\link{estimate_IMc}}. \cr \code{param[8,1]=} \tab \code{1}
#' \tab \tab \tab : This specifies that an estimate of the region-specific
#' population recombination rate per bp, \eqn{\rho_o=4N_1*c_o}, is KNOWN from
#' linkage disequilibrium analysis and specified with the parameter
#' \eqn{w=\beta*\rho_o} for each recombining region (in the file
#' \code{regfile}, see \code{\link{info_region}}).\cr \tab \tab \tab \tab In
#' this case, \eqn{\rho_r=w*(Z-1)} for the genomic region \eqn{r}.
#' \eqn{\rho_r} is fixed across estimation steps in the function
#' \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{2}\code{,}\tab \eqn{\mu}\code{,}\tab \code{NA)}\tab : This specifies
#' that an estimate of the region-specific recombination rate per bp,
#' \eqn{c_o}, is KNOWN from pedigree analysis and specified with the
#' parameter \eqn{w=\beta*c_o} for each recombining region (in the file
#' \code{regfile}, see \code{\link{info_region}}). \cr \tab \tab \tab \tab
#' This also specifies \eqn{\mu}, an independent estimate of the genomic
#' generational mutation rate per bp.\cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=w*(Z-1)\theta_1/\mu} for the genomic region \eqn{r}.
#' \eqn{\rho_r} varies across estimation steps if \eqn{theta_1} is estimated
#' in the function \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{-}\code{1}\code{,}\tab \eqn{1/}\eqn{\lambda}\code{,}\tab \code{NA)}
#' \tab : This specifies that the intra-region recombination rate is UNKNOWN
#' and the ratio of recombination over mutation rate for the genomic region
#' \eqn{r}, \eqn{\alpha=c_r/\mu}, is drawn from an exponential distribution
#' with mean \eqn{1/}\eqn{\lambda}. \cr \tab \tab \tab \tab In this case,
#' \eqn{\rho_r=w*\alpha*(Z-1)*theta_1} for the genomic region \eqn{r}.
#' \eqn{\rho_r} varies across estimation steps if \eqn{theta_1} is estimated
#' in the function \code{\link{estimate_IMc}}. \cr \code{param[8,]=c(}\tab
#' \code{-}\code{2}\code{,}\tab \eqn{\nu}\code{,}\tab \eqn{\sigma}\code{)}
#' \tab : This specifies that the intra-region recombination rate is UNKNOWN
#' and the ratio of recombination over mutation rate for the genomic region
#' \eqn{r}, \eqn{\alpha=c_r/\mu}, is drawn from an normal distribution with
#' mean \eqn{\nu} and standard deviation \eqn{\sigma}. \cr \tab \tab \tab
#' \tab In this case, \eqn{\rho_r=w*\alpha*(Z-1)*theta_1} for the genomic
#' region \eqn{r}. \eqn{\rho_r} varies across estimation steps if
#' \eqn{theta_1} is estimated in the function \code{\link{estimate_IMc}}. }
#' Where: \tabular{ll}{ \eqn{\rho=4N_1*c}\tab : The genomic average
#' population intra-region recombination rate per bp per generation. \cr
#' \eqn{\rho_r=\beta*(Z}\eqn{-}\eqn{1)*4N_1c}\tab : The region-specific
#' recombination rate per generation for the genomic region considered. \cr
#' \eqn{\rho_o=4N_1*c_o} \tab : The estimate of the region-specific
#' population recombination rate per bp per generation for the genomic region
#' considered from linkage disequilibrium analysis. \cr \eqn{c} \tab : The
#' genomic generational cross-over rate per bp. \cr \eqn{c_o} \tab : The
#' estimate of the region-specific cross-over rate per bp per generation for
#' the genomic region considered from pedigree analysis. \cr
#' \eqn{\alpha=c_r/\mu} \tab : Drawn from a prior distribution. \cr \eqn{c_r}
#' \tab : The generational region-specific cross-over rate per bp for the
#' genomic region \eqn{r}. \cr \eqn{w} \tab : The recombination scalar for
#' the genomic region considered specified in the file \code{regfile} (see
#' \code{\link{info_region}}).\cr \eqn{\beta} \tab : The ratio of the
#' region-specific population recombination rate per bp over
#' \eqn{\rho=4N_1*c} for the genomic region considered.\cr \tab
#' \eqn{\beta=}\code{"1"} (\code{"0.5"} in \emph{Drosophila}) for autosomal
#' region, \code{"0.5"} for X- and \code{"0"} for Y- and mtDNA-linked region.
#' \cr \eqn{Z} \tab : The size in bp of the genomic region considered. \cr
#' \tab \eqn{z_s} and \eqn{z_e} (such as \eqn{Z=z_e-z_s}) are specified in
#' the file \code{regfile} (see \code{\link{info_region}}). } The
#' information on the parameter \eqn{\rho} can be specified with the keyword
#' \emph{\code{rho}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). % end rho % end
#' nuisance \item\strong{OPTIONAL OTHER PARAMETERS: }
#' \item\code{param[9,1]~}\emph{\code{nregions}} \code{param[9,1]}\eqn{R} is
#' the number of independent genomic regions considered.\cr \eqn{R} can be
#' specified with the keyword \emph{\code{nregions}} in the file
#' \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). \cr ATTENTION: \code{\link{check_param}}
#' requires that \code{param[9,1]=}\eqn{R>0} be specified. \cr
#'
#' \item\strong{OPTIONAL PARAMETERS SPECIFIC TO \code{\link{estimate_IMc}}: }
#' \item\code{param[10,1]~}\emph{\code{howmany}} \code{param[10,1]=}\eqn{H}
#' specifies \eqn{H}, the number of data sets to simulate per set of
#' parameters (i.e., per grid point) to estimate the likelihood of the data
#' given the set of parameters of the extension of the
#' isolation-with-migration model.\cr By default,
#' \code{param[10,1]=}\eqn{1000}.\cr ATTENTION: \code{\link{check_param}}
#' requires that \code{param[10,1]=}\eqn{H>0} when specified. \cr The
#' information on \eqn{J} can be specified with the keyword
#' \emph{\code{howmany}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr % end howmany
#' \item\code{param[11,1]~}\emph{\code{parallel}} \code{param[11,1]=}\eqn{J}
#' specifies \eqn{J}, the number of jobs to run in parallel to perform the
#' estimation of the posterior distribution of the parameters of the
#' extension of the isolation-with-migration model.\cr By default,
#' \code{param[11,1]=}\eqn{1}, i.e., no parallelization.\cr ATTENTION:
#' \code{\link{check_param}} requires that \code{param[11,1]=}\eqn{J>0} when
#' specified.\cr The information on \eqn{J} can be specified with the keyword
#' \emph{\code{parallel}} in the file \code{paramfile} (see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr % end parallel %
#' end extra
#' @param paramfile The name of the file with the values and descriptions of
#' the prior distributions for the fixed and variable (or estimated)
#' parameters, respectively, required to estimate the parameters of
#' extensions of the Isolation-with-migration model with the function
#' \code{\link{estimate_IMc}}.\cr By default the file name is
#' \code{"estimation.par"}.
#' @param listparam The list of possible keywords/parameters in the required
#' order: \cr For function \code{\link{simulate_data}}:\cr
#' \code{c("}\emph{\code{theta_1}}\code{", "}\emph{\code{M_present}}\code{",
#' "}\emph{\code{theta_2}}\code{", "}\emph{\code{theta_A}}\code{",
#' "}\emph{\code{T_split}}\code{", "}\emph{\code{T_change}}\code{",
#' "}\emph{\code{M_change}}\code{", "}\emph{\code{rho}}\code{",
#' "}\emph{\code{nregions}}\code{")}.\cr For function
#' \code{\link{estimate_IMc}}:\cr \code{c("}\emph{\code{theta_1}}\code{",
#' "}\emph{\code{M_present}}\code{", "}\emph{\code{theta_2}}\code{",
#' "}\emph{\code{theta_A}}\code{", "}\emph{\code{T_split}}\code{",
#' "}\emph{\code{T_change}}\code{", "}\emph{\code{M_change}}\code{",
#' "}\emph{\code{rho}}\code{", "}\emph{\code{nregions}}\code{",
#' "}\emph{\code{howmany}}\code{", "}\emph{\code{parallel}}\code{")}.
#' @return The function \code{\link{check_param}} outputs error and warning
#' messages regarding the format/information in the matrix of values
#' \code{param} as well as the following data frame: \item{list("$ok")}{
#' \code{$ok} takes the value \code{"1"} if the format of the input file
#' \code{paramfile} for the function \code{\link{simulate_data}} (or
#' \code{\link{estimate_IMc}}) has the format/information required. \cr
#' Otherwise, \code{$ok} takes the value \code{"0"} to inform the function
#' \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}) that it should
#' stop. \cr In this later, case,\code{\link{check_param}} outputs an error
#' message explaining what part of the format/information is incorrect. }
#' \item{list("$param")}{ The matrix \eqn{9} (or \eqn{11}) \eqn{\times}
#' \eqn{3} of the information for the parameters of the model that will be
#' simulated provided as input to the function \code{\link{check_param}}, but
#' updated to take into account parameters that will be ignored by the
#' function \code{\link{simulate_data}} (or \code{\link{estimate_IMc}}).\cr
#' When the matrix of values is updated, \code{\link{check_param}} outputs a
#' "WARNING" message.\cr } e.g. for the function \code{\link{estimate_IMc}}:
#' \code{[1] "PROBLEM: Error message on the parameter values and prior
#' distributions for the }\R\code{ function estimate_IMc."}\cr \code{$ok} \cr
#' \code{[1] 0}\cr \code{$param}\cr \tabular{llll}{ \tab \code{[,1]} \tab
#' \code{[,2]} \tab \code{[,3]} \cr \code{[1,]} \tab \code{0.0001} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[2,]} \tab \code{0.0000} \tab
#' \code{1e+01} \tab \code{20}\cr \code{[3,]} \tab \code{ 1.5000} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[4,]} \tab \code{0.0001} \tab
#' \code{1e-03} \tab \code{10}\cr \code{[5,]} \tab \code{0.0000} \tab
#' \code{2e+00} \tab \code{10}\cr \code{[6,]} \tab \code{ 0.0000} \tab
#' \code{1e+00} \tab \code{10}\cr \code{[7,]} \tab \code{0.0000} \tab
#' \code{2e+01} \tab \code{20}\cr \code{[8,]} \tab \code{0.0005} \tab
#' \code{NA} \tab \code{NA}\cr \code{[9,]} \tab \code{4.0000} \tab \code{NA}
#' \tab \code{NA}\cr \code{[10,]} \tab \code{NA} \tab \code{NA} \tab \code{
#' NA}\cr \code{[11,]} \tab \code{NA} \tab \code{NA} \tab \code{ NA} }
#' @note \itemATTENTION: It is the user's responsibility to mind the following
#' restrictions: -> \code{\link{check_param}} requires that
#' \code{param[1,1]=}\eqn{R>0} be specified. \cr -> \code{\link{check_param}}
#' requires that either the parameter value (i.e.
#' \code{param[1,1]=}\eqn{\theta_1>0}) or the prior distribution for
#' \eqn{theta_1} (see above for the restrictions) be specified in
#' \code{param[1,]}. \cr -> \code{\link{check_param}} requires that
#' \eqn{M_p>0} when fixed or \eqn{0\le}\eqn{M_pl}\eqn{\le}\eqn{M_pu} and
#' \eqn{M_pn>0} when variable (or estimated) be specified in \code{param[2,]}
#' in case of an island model. \cr -> \code{\link{check_param}} requires that
#' \code{param[j,i]=}\eqn{theta_i}\eqn{\ge}\eqn{0} for any \eqn{i} \eqn{\in}
#' \eqn{{2,A}} and \eqn{j} \eqn{\in} \eqn{{2,3}}. \cr ->
#' \code{\link{check_param}} requires that
#' \eqn{0}\eqn{\le}\eqn{T_c}\eqn{\le}\eqn{T_s} when specified and fixed in
#' \code{param[5,1]} and \code{param[6,1]}.\cr -> \code{\link{check_param}}
#' requires that \code{param[10,1]=}\eqn{H>0} when specified. \cr ->
#' \code{\link{check_param}} requires that \code{param[11,1]=}\eqn{J>0} when
#' specified.\cr
#' @author Celine Becquet - \email{celine.becquet@@gmail.com}.
#' @seealso The functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}} calls the function \code{\link{check_param}},
#' which in turn calls the function \code{\link{error_message}}.\cr The
#' function \code{\link{check_param}} checks the format of the files like
#' \code{\link{param_sim}} and \code{\link{param_est}}.\cr Other functions to
#' check the format of input files: \cr \code{\link{get_rho}} and
#' \code{\link{check_reg}}. \cr Lists of definitions of the symbols and
#' parameters mentioned in this file are found in
#' \code{\link{Rmspack-package}}.
#' @keywords error print
#' @examples
#'
#' ### Write the file of information on the parameters in the local directory.
#' data(simulation_files) # download the data
#'
#' write.table(file="est", x=param_est, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "est" containing (with comments):
#' read.csv("est",header =FALSE,sep="", comment="#")
#'
#' ### Create the inputs for the function.
#' listparam=c("theta_1", "M_present", "theta_2", "theta_A", "T_split", "T_change", "M_change", "rho", "nregions", "howmany", "parallel")
#' param=scan("est", comment.char="#", what=c("c", "n", "n", "n"), allowEscapes=FALSE, fill=TRUE, sep="\n", strip.white=TRUE, quiet=TRUE)
#' vparam=order_param(param, listparam)
#' vparam
#'
#' ## Case with no errors.
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#' ## Case with warning
#' vparam[3, 1]=.001 # theta_2l==theta_2u
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#'
#' ## Case with error in defining the prior of theta_2.
#' vparam[3, 1]=.002 # theta_2l>theta_2u
#' check_param(param=vparam, paramfile="est", listparam=listparam)
#'
#' # Clean up the directory.
#' unlink("est")
#'
NULL
#' Function to check the format of the files with the information on the
#' genomic regions and multiple loci.
#' The function \code{\link{check_reg}} checks that the information on the
#' independent genomic regions described in the file \code{regfile} and the
#' information on the loci for the multi-locus genomic regions in the file
#' \code{locifile} are in the correct format to simulate data with the function
#' \code{\link{simulate_data}} or estimate models of Isolation-with-migration
#' and possible extensions with the function \code{\link{estimate_IMc}}. \cr
#' This function is called by the functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}}.
#'
#'
#' @param nregions The number of independent genomic regions considered,
#' \eqn{R}. \cr \eqn{R} is specified with the keyword \emph{\code{nregions}}
#' in the file \code{paramfile} (see \code{\link{param_sim}} and
#' \code{\link{param_est}}). \cr
#' @param info_region The matrix \eqn{R} \eqn{\times} \eqn{10} of information
#' on the \eqn{R} independent genomic regions. \cr Each genomic region is
#' described by ten values: \tabular{ll}{ \eqn{r} \tab : The genomic region
#' number, \eqn{r} \eqn{\in} \eqn{[1,R]}. \cr \tab \eqn{R} is the number of
#' independent genomic regions specified with the argument
#' \emph{\code{nregions}}. \cr \tab ATTENTION: The independent genomic region
#' numbers, \eqn{r}, need to be in order, i.e., region 1 starts on line 2,
#' region 2 on line 3 \dots{} region \eqn{R} on line \eqn{R+1}. \cr
#' \emph{\code{Region name}} \tab : The name of the genomic region \eqn{r},
#' which should contain at least ONE non-numerical character. \cr \eqn{x}
#' \tab : The inheritance scalar for the genomic region \eqn{r} (i.e.,
#' \code{"1"} for autosomal region, \code{"0.75"} for X- and \code{"0.5"} for
#' Y- and mtDNA-linked region). \cr \eqn{v} \tab : The mutation rate scalar
#' for the genomic region \eqn{r} (which can be estimated e.g., from
#' divergence data). \cr \eqn{w} \tab : The recombination scalar for the
#' genomic region \eqn{r}.\cr \tab -- Usually \eqn{w=\beta}, the ratio of the
#' locus-specific population recombination rate per bp over
#' \eqn{\rho=4N_1*c}. \cr \tab -- If an estimate of the region-specific
#' population recombination rate per bp is available for each region from
#' linkage disequilibrium analysis, \eqn{\rho_o=4N_1*c_o}, set
#' \eqn{w=\beta*\rho_o} to incorporate this knowledge in the simulation or
#' estimation (with \code{"}\emph{\code{rho}} \code{1"} in the file
#' \code{paramfile}, see \code{\link{param_sim}} and
#' \code{\link{param_est}}).\cr \tab In this case, \eqn{w} is the scaled
#' sex-averaged region-specific population recombination rate per bp, i.e.,
#' for an X-linked locus \eqn{c_o} is the female recombination rate and
#' \eqn{\beta=0.5} so that \eqn{\beta*\rho_o=2N_1*c_o}. \cr \tab -- If an
#' estimate of the region-specific recombination rate per bp is available for
#' each region from pedigree analysis, \eqn{c_o}, set \eqn{w=\beta*c_o} to
#' incorporate this knowledge in the simulation or estimation (with
#' \code{"}\emph{\code{rho}} \code{2"} in the file \code{paramfile}, see
#' \code{\link{param_sim}} and \code{\link{param_est}}). \cr \tab In this
#' case, \eqn{w} is the scaled sex-averaged region-specific recombination
#' rate per bp, i.e., for an X-linked locus, \eqn{c_o} is the estimated
#' female recombination rate so the scaled sex-averaged recombination rate is
#' \eqn{\beta*c_o=0.5c}. \cr \eqn{n_1} \tab : The sample size from population
#' 1 for the genomic region \eqn{r}. \cr \eqn{n_2} \tab : The sample size
#' from population 2 for the genomic region \eqn{r}. \cr \eqn{z_s} \tab : The
#' start position of the genomic region \eqn{r} in bp. \cr \eqn{z_e} \tab :
#' The end position of the genomic region \eqn{r} in bp. \cr \eqn{Y} \tab :
#' The number of loci spanning the genomic region \eqn{r}. \cr }Where:
#' \tabular{ll}{ \eqn{\rho=4N_1*c}\tab : The genomic average population
#' intra-region recombination rate per bp per generation. \cr
#' \eqn{\rho_o=4N_1*c_o} \tab : The estimate of the region-specific
#' population recombination rate per bp per generation for the genomic region
#' considered from linkage disequilibrium analysis. \cr \eqn{N_1} \tab : The
#' effective population size in population 1 (the reference population). \cr
#' \eqn{c} \tab : The genomic generational cross-over rate per bp. \cr
#' \eqn{c_o} \tab : The estimate of the region-specific cross-over rate per
#' bp per generation for the genomic region considered from pedigree
#' analysis.\cr \eqn{\beta} \tab : The ratio of the region-specific
#' population recombination rate per bp over \eqn{\rho=4N_1*c} for the
#' genomic region considered.\cr \tab \eqn{\beta=}\code{"1"} (\code{"0.5"} in
#' \emph{Drosophila}) for autosomal region, \code{"0.5"} for X- and
#' \code{"0"} for Y- and mtDNA-linked region. } See
#' \code{\link{info_region}} for further details.
#' @param info_loci The matrix \eqn{\Sigma{Y_r}} \eqn{\times} \eqn{6} of
#' information on the loci for the multi-locus genomic regions (as specified
#' with \eqn{Y_r>1} for the multi-locus region \eqn{r} in
#' \code{info_region[}\eqn{r}\code{]$V10}). \cr This matrix is empty unless
#' \eqn{\Sigma{Y_r}>0}. \cr Each locus is described by six values:
#' \tabular{ll}{ \eqn{r} \tab : The multi-locus genomic region number,
#' \eqn{r} \eqn{\in} \eqn{[1,R]}, that this locus is part of. \cr \tab
#' ATTENTION: The multi-locus genomic region numbers, \eqn{r}, need to be in
#' order, i.e., the information for the loci for the first multi-locus
#' genomic region (\eqn{a}) are from line 2 to \eqn{Y_a+1}, for the second
#' multi-locus genomic region (\eqn{b}), the loci information starts on line
#' \eqn{Y_a+2} to \eqn{Y_a+Y_b+1} \dots{} \eqn{a} and \eqn{b} \eqn{\in}
#' \eqn{[1,R]}. \cr \eqn{y} \tab : The locus number, \eqn{y} \eqn{\in}
#' \eqn{[1,Y]}. \cr \tab \eqn{Y} is the total number of loci spanning the
#' multi-locus genomic region \eqn{r} as specified in the matrix
#' \code{info_region}. \cr \tab ATTENTION: The loci numbers, \eqn{y}, need to
#' be in order, i.e., information for locus 1 of the first multi-locus
#' genomic region \eqn{r} is on line 2, locus 2 on line 3 \dots{} locus
#' \eqn{Y} on line \eqn{Y+1}. \cr \eqn{n_1y} \tab : The sample size from
#' population 1 for the locus \eqn{y}.\cr \tab \eqn{n_1y}\eqn{\le}\eqn{n_1},
#' where \eqn{n_1} for the multi-locus genomic region \eqn{r} is specified in
#' the matrix \code{info_region}. \cr \eqn{n_2y} \tab : The sample size from
#' population 2 for the locus \eqn{y}.\cr \tab \eqn{n_2y}\eqn{\le}\eqn{n_2},
#' where \eqn{n_2} for the multi-locus genomic region \eqn{r} is specified in
#' the matrix \code{info_region}. \cr \eqn{z_sy} \tab : The start position of
#' the locus \eqn{y} in bp.\cr \tab \eqn{z_s1}\eqn{\ge}\eqn{z_s}, where
#' \eqn{z_s} is the start position of the multi-locus genomic region \eqn{r}
#' specified in the matrix \code{info_region}. \cr \eqn{z_ey} \tab : The end
#' position of the locus \eqn{y} in bp.\cr \tab \eqn{z_eY}\eqn{\le}\eqn{z_e}
#' where \eqn{z_e} is the end position of the multi-locus genomic region
#' \eqn{r} specified in the matrix \code{info_region}. } See
#' \code{\link{info_loci}} for further details.
#' @param locifile The name of the file with the information on the loci for
#' the multi-locus genomic regions (as specified with \eqn{Y>1} in the file
#' \code{regfile}). \cr By default the file name is \code{"info.loc"}.
#' @return The function \code{\link{check_reg}} outputs \code{"1"} if the
#' format of the matrices \code{info_region} (and \code{info_loci}) (from the
#' input file \code{regfile} (and \code{locifile})) for the calling function
#' (either \code{\link{simulate_data}} or \code{\link{estimate_IMc}}) have
#' the format/information required. \cr Otherwise, \code{\link{check_reg}}
#' informs the calling function that it should stop and outputs an error
#' message explaining what part of the format/information is incorrect: \cr
#' \tabular{ll}{ e.g.:\tab \code{[1] "Error message on independent
#' (potentially multi-locus) genomic regions."}\cr \tab \code{[1] 0} }
#' @note \itemATTENTION: It is the user's responsibility to mind the following
#' restrictions: \itemIn the matrix \code{info_region}: -> The independent
#' genomic region numbers, \eqn{r}, need to be in order, i.e., region 1
#' starts on line 2, region 2 on line 3 \dots{} region \eqn{R} on line
#' \eqn{R+1}. \cr -> Reasonable values need to be specified for each genomic
#' region.
#'
#' \itemIn the matrix \code{info_loci}: -> The loci numbers, \eqn{y}, need to
#' be in order, i.e., information for locus 1 of the first multi-locus
#' genomic region is on line 2, locus 2 on line 3 \dots{} locus \eqn{Y} on
#' line \eqn{Y+1}. \cr -> The multi-locus genomic region numbers, \eqn{r},
#' need to be in order, i.e., the information for the loci for the first
#' multi-locus genomic region (\eqn{a}) are from line 2 to \eqn{Y_a+1}, for
#' the second multi-locus genomic region (\eqn{b}), the loci information
#' starts on line \eqn{Y_a+2} to \eqn{Y_a+Y_b+1} \dots{} \eqn{a} and \eqn{b}
#' are \eqn{\in} \eqn{[1,R]}. \cr -> Reasonable values need to be specified
#' for each locus.
#' @author Celine Becquet - \email{celine.becquet@@gmail.com}.
#' @seealso The functions \code{\link{simulate_data}} and
#' \code{\link{estimate_IMc}} call the function \code{\link{check_reg}},
#' which in turn calls the function \code{\link{error_message}}. \cr Other
#' functions to check the format of input files: \cr \code{\link{get_rho}}
#' and \code{\link{check_param}}. \cr Lists of definitions of the symbols and
#' parameters mentioned in this file are found in
#' \code{\link{Rmspack-package}}.
#' @keywords error print
#' @examples
#'
#' ### Write the files of information in the local directory.
#' data(simulation_files) # download the data
#'
#' write.table(file="regions", x=info_region, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "regions" containing:
#' read.csv("regions",header =FALSE,sep="", comment="#")
#'
#' write.table(file="loci", x=info_loci, row.name=FALSE, col.names=FALSE, quote=FALSE)
#' # Creates the file "loci" containing:
#' read.csv("loci",header =FALSE,sep="", comment="#")
#'
#' ### Create the inputs for the function.
#' reg=read.table("regions", skip=1, fill=TRUE)
#' loci=read.table("loci", skip=1, fill=TRUE)
#'
#' ## Case with no errors.
#' check_reg(nregions=4, info_region=reg, info_loci=loci, locifile="loci")
#'
#' ## Case with error in the # of regions
#' reg[1, 1]=2
#' check_reg(nregions=4, info_region=reg, info_loci=loci, locifile="loci")
#'
#' # Clean up the directory.
#' unlink(c("regions", "loci"))
#'
NULL
|
require(quantmod)
start <- Sys.Date()-30
end <- Sys.Date()-2
code <- "601318"
tckr_601318= paste(code,"SS",sep=".")
data_601318 <- getSymbols(tckr_601318, from = start, to = end, auto.assign = FALSE)
tckr <- c("GRPN")
data <- getSymbols(tckr, from = "2015-01-01", to = "2015-05-01", auto.assign = FALSE)
macd <- MACD( data[,"Close"], 12, 26, 9, maType="EMA" )
macd2 <- MACD( data[,"Close"], 12, 26, 9, maType=list(list(SMA), list(EMA, wilder=TRUE), list(SMA)) )
View(macd)
|
/signals/macd_learning.R
|
no_license
|
tedddy/Learn_R
|
R
| false | false | 487 |
r
|
require(quantmod)
start <- Sys.Date()-30
end <- Sys.Date()-2
code <- "601318"
tckr_601318= paste(code,"SS",sep=".")
data_601318 <- getSymbols(tckr_601318, from = start, to = end, auto.assign = FALSE)
tckr <- c("GRPN")
data <- getSymbols(tckr, from = "2015-01-01", to = "2015-05-01", auto.assign = FALSE)
macd <- MACD( data[,"Close"], 12, 26, 9, maType="EMA" )
macd2 <- MACD( data[,"Close"], 12, 26, 9, maType=list(list(SMA), list(EMA, wilder=TRUE), list(SMA)) )
View(macd)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_similarity.R
\name{permute_similarity}
\alias{permute_similarity}
\title{Compute a p-value for similarity using permutation}
\usage{
permute_similarity(
expr_mat,
ref_mat,
cluster_ids,
n_perm,
per_cell = FALSE,
compute_method,
rm0 = FALSE,
...
)
}
\arguments{
\item{expr_mat}{single-cell expression matrix}
\item{ref_mat}{reference expression matrix}
\item{cluster_ids}{clustering info of single-cell data assume that
genes have ALREADY BEEN filtered}
\item{n_perm}{number of permutations}
\item{per_cell}{run per cell?}
\item{compute_method}{method(s) for computing similarity scores}
\item{rm0}{consider 0 as missing data, recommended for per_cell}
\item{...}{additional parameters}
}
\value{
matrix of numeric values
}
\description{
Permute cluster labels to calculate empirical p-value
}
|
/man/permute_similarity.Rd
|
permissive
|
standardgalactic/clustifyr
|
R
| false | true | 901 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_similarity.R
\name{permute_similarity}
\alias{permute_similarity}
\title{Compute a p-value for similarity using permutation}
\usage{
permute_similarity(
expr_mat,
ref_mat,
cluster_ids,
n_perm,
per_cell = FALSE,
compute_method,
rm0 = FALSE,
...
)
}
\arguments{
\item{expr_mat}{single-cell expression matrix}
\item{ref_mat}{reference expression matrix}
\item{cluster_ids}{clustering info of single-cell data assume that
genes have ALREADY BEEN filtered}
\item{n_perm}{number of permutations}
\item{per_cell}{run per cell?}
\item{compute_method}{method(s) for computing similarity scores}
\item{rm0}{consider 0 as missing data, recommended for per_cell}
\item{...}{additional parameters}
}
\value{
matrix of numeric values
}
\description{
Permute cluster labels to calculate empirical p-value
}
|
library("QFFX")
testFXForwardRateTriRun <- function()
{
startDate <- "2007-01-03"
endDate <- "2007-01-12"
quoteside = "mid"
cross = "usd/jpy"
tsdb <- TimeSeriesDB()
fxhist <- FXForwardRateTriRun(FXCurr=FXCurr$setByCross(cross),tenorList = c("spot"),
startDate=startDate,endDate=endDate,tsdb=tsdb,writeToTSDB=FALSE, overUnder = "over",rebalPeriod = "1d",BackPopulate=TRUE)
checkDate <- as.POSIXct("2007-01-12")
checkEquals(round(as.numeric(fxhist[checkDate,1]),5),10.17641)
}
|
/R/src/QFFX/tests/testFXForwardRateTriRun.R
|
no_license
|
rsheftel/ratel
|
R
| false | false | 519 |
r
|
library("QFFX")
testFXForwardRateTriRun <- function()
{
startDate <- "2007-01-03"
endDate <- "2007-01-12"
quoteside = "mid"
cross = "usd/jpy"
tsdb <- TimeSeriesDB()
fxhist <- FXForwardRateTriRun(FXCurr=FXCurr$setByCross(cross),tenorList = c("spot"),
startDate=startDate,endDate=endDate,tsdb=tsdb,writeToTSDB=FALSE, overUnder = "over",rebalPeriod = "1d",BackPopulate=TRUE)
checkDate <- as.POSIXct("2007-01-12")
checkEquals(round(as.numeric(fxhist[checkDate,1]),5),10.17641)
}
|
################################
## Single proportion
## Testing the probability = 0.5 with a two-sided alternative
## We have observed 518 out of 1154
## Without continuity corrections
prop.test(x=518, n=1154, p = 0.5, correct = FALSE)
################################
## Pill study: two proportions
## Reading the table into R
pill.study <- matrix(c(23, 34, 35, 132), ncol = 2)
rownames(pill.study) <- c("Blood Clot", "No Clot")
colnames(pill.study) <- c("Pill", "No pill")
## Testing that the probabilities for the two groups are equal
prop.test(t(pill.study), correct = FALSE)
## Or simply directly by
prop.test(x=c(23,35), n=c(57,167), correct = FALSE)
################################
## Pill study: two proportions, chi-square test
## Chi2 test for testing the probabilities for the two groups are equal
chisq.test(pill.study, correct = FALSE)
## If we want the expected numbers save the test in an object
chi <- chisq.test(pill.study, correct = FALSE)
## The expected values
chi$expected
################################
## Poll study: contingency table, chi-square test
## Reading the table into r
poll <-matrix(c(79, 91, 93, 84, 66, 60, 37, 43, 47), ncol = 3, byrow = TRUE)
colnames(poll) <- c("4 weeks", "2 weeks", "1 week")
rownames(poll) <- c("Cand1", "Cand2", "Undecided")
## Column percentages
colpercent <- prop.table(poll, 2)
colpercent
# Plotting percentages
par(mar=c(5,4,4.1,2)+0.1)
barplot(t(colpercent), beside = TRUE, col = 2:4, las = 1,
ylab = "Percent each week", xlab = "Candidate",
main = "Distribution of Votes")
legend( legend = colnames(poll), fill = 2:4,"topright", cex = 0.5)
par(mar=c(5,4,4,2)+0.1)
################################
## Testing same distribution in the three populations
chi <- chisq.test(poll, correct = FALSE)
chi
## Expected values
chi$expected
|
/introstat/slides02323/week10.R
|
no_license
|
Collinnn/Aflevering1
|
R
| false | false | 1,836 |
r
|
################################
## Single proportion
## Testing the probability = 0.5 with a two-sided alternative
## We have observed 518 out of 1154
## Without continuity corrections
prop.test(x=518, n=1154, p = 0.5, correct = FALSE)
################################
## Pill study: two proportions
## Reading the table into R
pill.study <- matrix(c(23, 34, 35, 132), ncol = 2)
rownames(pill.study) <- c("Blood Clot", "No Clot")
colnames(pill.study) <- c("Pill", "No pill")
## Testing that the probabilities for the two groups are equal
prop.test(t(pill.study), correct = FALSE)
## Or simply directly by
prop.test(x=c(23,35), n=c(57,167), correct = FALSE)
################################
## Pill study: two proportions, chi-square test
## Chi2 test for testing the probabilities for the two groups are equal
chisq.test(pill.study, correct = FALSE)
## If we want the expected numbers save the test in an object
chi <- chisq.test(pill.study, correct = FALSE)
## The expected values
chi$expected
################################
## Poll study: contingency table, chi-square test
## Reading the table into r
poll <-matrix(c(79, 91, 93, 84, 66, 60, 37, 43, 47), ncol = 3, byrow = TRUE)
colnames(poll) <- c("4 weeks", "2 weeks", "1 week")
rownames(poll) <- c("Cand1", "Cand2", "Undecided")
## Column percentages
colpercent <- prop.table(poll, 2)
colpercent
# Plotting percentages
par(mar=c(5,4,4.1,2)+0.1)
barplot(t(colpercent), beside = TRUE, col = 2:4, las = 1,
ylab = "Percent each week", xlab = "Candidate",
main = "Distribution of Votes")
legend( legend = colnames(poll), fill = 2:4,"topright", cex = 0.5)
par(mar=c(5,4,4,2)+0.1)
################################
## Testing same distribution in the three populations
chi <- chisq.test(poll, correct = FALSE)
chi
## Expected values
chi$expected
|
\name{focus<-}
\alias{focus<-}
\alias{focus<-.default}
\title{Set focus onto object.}
\usage{
focus(x) <- value
\method{focus}{default}(x) <- value
}
\arguments{
\item{x}{object}
\item{value}{logical. Set focus state.}
}
\description{
For some widgets, this sets user focus (e.g. gedit gets
focus for typing).
Basic S3 method for focus
}
|
/man/focus.Rd
|
no_license
|
kecoli/gWidgetsWWW2
|
R
| false | false | 356 |
rd
|
\name{focus<-}
\alias{focus<-}
\alias{focus<-.default}
\title{Set focus onto object.}
\usage{
focus(x) <- value
\method{focus}{default}(x) <- value
}
\arguments{
\item{x}{object}
\item{value}{logical. Set focus state.}
}
\description{
For some widgets, this sets user focus (e.g. gedit gets
focus for typing).
Basic S3 method for focus
}
|
d <- read.table("/Users/charlesbecker/Downloads/household_power_consumption.txt", sep = ";", header = T, na.strings = "?")
d$dt <- paste(d$Date,d$Time)
d$t <- strptime(d$dt, format = "%d/%m/%Y %H:%M:%S")
dat <- subset(d, d$Date == "1/2/2007" | d$Date == "2/2/2007")
par(mfrow = c(1,1))
plot(dat$t, dat$Sub_metering_1, type = 'l',
xlab = "",
ylab = "Energy Sub Metering")
lines(dat$t, dat$Sub_metering_2, col = "red")
lines(dat$t, dat$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"), lty = 1)
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
/Plot3.R
|
no_license
|
charlie-becker/ExData_Plotting1
|
R
| false | false | 678 |
r
|
d <- read.table("/Users/charlesbecker/Downloads/household_power_consumption.txt", sep = ";", header = T, na.strings = "?")
d$dt <- paste(d$Date,d$Time)
d$t <- strptime(d$dt, format = "%d/%m/%Y %H:%M:%S")
dat <- subset(d, d$Date == "1/2/2007" | d$Date == "2/2/2007")
par(mfrow = c(1,1))
plot(dat$t, dat$Sub_metering_1, type = 'l',
xlab = "",
ylab = "Energy Sub Metering")
lines(dat$t, dat$Sub_metering_2, col = "red")
lines(dat$t, dat$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col = c("black","red","blue"), lty = 1)
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{kmsvm}
\alias{kmsvm}
\title{K means clustering based SVM}
\usage{
kmsvm(x, y, centers)
}
\description{
K means clustering based SVM
}
|
/man/kmsvm.Rd
|
no_license
|
kevin05jan/iop
|
R
| false | true | 235 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{kmsvm}
\alias{kmsvm}
\title{K means clustering based SVM}
\usage{
kmsvm(x, y, centers)
}
\description{
K means clustering based SVM
}
|
/SWPS_Intro_To_R/01_basic_r/basic_code.R
|
no_license
|
MikolajSedek/RCode
|
R
| false | false | 3,869 |
r
| ||
#!/usr/bin/Rscript --slave --no-restore --no-init-file
#
# Renderiza o gráfico da série histórica das probabilidades do Erro Tipo I nos
# testes de aderência das distribuições de frequências dos números sorteadas nos
# concursos da Mega-Sena, criando e se necessário, atualizando a tabela de
# valores da estatística e respectivas probabilidades a cada concurso.
#
library(RSQLite)
con <- dbConnect(SQLite(), "megasena.sqlite")
# verifica se o db contém a tabela "fit"
if (dbExistsTable(con, "fit")) {
# requisita o número de registros na tabela de resultados dos testes
nr=dbGetQuery(con, "SELECT COUNT(*) FROM fit")[1,1]
} else {
cat('\n> Criação e preenchimento da tabela "fit" em andamento.\n')
query <- "-- tabela dos testes de aderência dos números nos concursos
CREATE TABLE IF NOT EXISTS fit (
concurso INTEGER UNIQUE,
estatistica DOUBLE,
pvalue DOUBLE CHECK (pvalue >= 0 AND pvalue <= 1),
FOREIGN KEY (concurso) REFERENCES concursos(concurso)
)"
rs <- dbSendStatement(con, query)
dbClearResult(rs)
nr=0
}
# requisita o número de registros da tabela de concursos
nrecs=dbGetQuery(con, "SELECT COUNT(*) AS NRECS FROM concursos")[1,1]
# atualiza a tabela de testes de aderência se o seu número de registros
# é menor que o número de registros da tabela de concursos
if (nr < nrecs) {
# notifica a operação em andamento
cat("\n> Inclusão de", nrecs-nr, 'registro(s) à tabela "fit" iniciada.')
# ativa a restrição que impede inserções de registros que
# não correspondem a nenhum registro na tabela referenciada
rs <- dbSendStatement(con, "PRAGMA FOREIGN_KEYS = ON")
dbClearResult(rs)
# requisita todos os números sorteados na série histórica dos concursos
mega <- dbGetQuery(con, "SELECT concurso, dezena FROM dezenas_sorteadas")
# atualização conforme número de registros a inserir
if (nrecs-nr == 1) {
teste <- chisq.test(tabulate(mega$dezena, nbins=60), correct=F)
# registra os resultados do único teste
query=sprintf("INSERT INTO fit SELECT %d, %f, %f", nrecs, as.double(teste$statistic), teste$p.value)
rs <- dbSendStatement(con, query)
} else {
# "prepared statement" para inserção de registro na tabela fit
query="INSERT INTO fit (concurso, estatistica, pvalue) VALUES ($concurso, $statistic, $pvalue)"
rs <- dbSendStatement(con, query)
if (nr == 0) {
frequencias <- vector("integer", length=60)
} else {
frequencias <- tabulate(mega$dezena[mega$concurso <= nr], nbins=60)
}
# loop para inclusão de registros na tabela "fit"
for (concurso in (nr+1):nrecs) {
numeros <- mega$dezena[mega$concurso == concurso]
frequencias[numeros] <- frequencias[numeros] + 1
# executa o teste com dados tabulados até "concurso"
teste <- chisq.test(frequencias, correct=(concurso < 1000))
# registra os resultados do teste
parameters <- list("concurso"=concurso, "statistic"=as.double(teste$statistic), "pvalue"=teste$p.value)
dbBind(rs, parameters)
}
}
dbClearResult(rs)
cat(".finalizada.\n\n")
}
# requisita números de concursoss, respectivas probabilidades de testes de
# aderência e datas de sorteio
query='SELECT concurso, pvalue, data_sorteio FROM fit NATURAL JOIN concursos WHERE concurso >= 1'
mega <- dbGetQuery(con, query)
dbDisconnect(con)
nrecs=length(mega$concurso)
# prepara arquivo como dispositivo de impressão do gráfico
# com tamanho igual a dos frames de vídeo HD1080
png(filename="img/fit.png", width=1920, height=1080, pointsize=28, family="Quicksand")
par(
mar=c(3, 4, 4, 1), font=2, bg="white",
cex.main=1.2, font.main=2, col.main="steelblue",
cex.lab=.9, font.lab=2, col.lab="steelblue",
cex.axis=.8, font.axis=2, col.axis="gray40"
)
# renderiza a série das probabilidades nos testes de aderência
plot(
mega$concurso,
mega$pvalue,
xlim=c(mega$concurso[1], mega$concurso[nrecs]),
ylim=c(0, 1),
ylab="", # evita renderização de "dummy" label
xlab="",
type="p", # "nebula" de pontos
pch=1, # símbolo dos pontos == circulo
col="gold", # cor de renderização dos pontos
axes=FALSE # inibe renderização dos eixos e do frame
)
title(main="Série do Erro Tipo I sob H: X~U\u276A1, 60\u276B", line=2)
title(xlab="concursos", line=1.375)
title(ylab="probabilidade", line=2.5)
# eixo dos números dos concursos
z <- seq((mega$concurso[1] %/% 200 + 1)*200, mega$concurso[nrecs], 200)
axis(1, at=c(mega$concurso[1], z), tck=-0.015, mgp=c(0, .2, 0))
rug(z[z-100>mega$concurso[1]]-100, side=1, col="gray40", ticksize=-0.01, lwd=2)
# eixo das probabilidades
z <- seq(from=.1, to=1, by=.2)
axis(2, at=c(0, z+0.1), las=1, tck=-0.015, mgp=c(0, .75, 0))
rug(z, side=2, col="gray40", ticksize=-0.01, lwd=2)
# linhas referentes a valores de probabilidades
abline(h=c(0, z, z+0.1), lty="dotted", lwd=.8, col="gray50")
# texto e linha referente ao nível de confiança dos testes
abline(h=0.05, lty="dashed", lwd=1.125, col="red")
text(par("usr")[2], .05, "α = 5%", adj=c(1, -0.5), cex=.67, col="red")
# conecta os pontos da "nebula" para caracterização a priori
lines(mega$concurso, mega$pvalue, lty="solid", lwd=1, col="orangered")
# seleção dos primeiros concursos de cada ano componente da série
primeiros <- mega[!duplicated(substr(mega$data_sorteio, 0, 4)),]
# eixo dos anos de primeiros concursos -- somente labels visíveis
axis(
3, at=primeiros$concurso,
labels=substr(primeiros$data_sorteio, 0, 4),
mgp=c(0, 0, 0), # posiciona abaixo do default
col='transparent', # escala "invisível"
font.axis=4, col.axis="mediumpurple"
)
# linhas verticais referentes aos anos de primeiros concursos
abline(v=primeiros$concurso, lty="dotted", lwd=1, col="gray50")
# anexa o concurso mais recente ao final de primeiros assegurando unicidade
if (tail(primeiros$concurso, 1) != mega$concurso[nrecs]) primeiros <- rbind(primeiros, mega[nrecs,])
# seleciona variáveis relevantes para ajustes
primeiros <- subset(primeiros, select=c("concurso", "pvalue"))
# evidencia os primeiros concursos de cada ano e o mais recente
points(primeiros, col="purple", pch=20)
# conecta os pontos dos primeiros concursos de cada ano e do mais recente
lines(primeiros, col="purple")
MODEL_NAME <- c("linear", "poly 2", "poly 3", "poly 4")
CORES <- c("darkgreen", "darkcyan", "navy", "darkred")
par(lwd=1.2)
# ajusta reta de mínimos quadrados às observações
fit <- lm(pvalue ~ concurso, data=primeiros)
lines(primeiros$concurso, predict(fit, primeiros), col=CORES[1])
# ajusta polinômio de grau 2
fit2 <- lm(pvalue ~ poly(concurso, 2, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit2, primeiros), col=CORES[2])
# ajusta polinômio de grau 3
fit3 <- lm(pvalue ~ poly(concurso, 3, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit3, primeiros), col=CORES[3])
# ajusta polinômio de grau 4
fit4 <- lm(pvalue ~ poly(concurso, 4, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit4, primeiros), col=CORES[4])
# pré-renderização da legenda para obter suas coordenadas e dimensões
leg <- legend(
"topright", inset=c(0, 0.05), legend=MODEL_NAME, lwd=c(par("lwd")),
cex=.75, seg.len=c(1), x.intersp=.5, lty=c(par("lty")), plot=FALSE
)
# renderização de facto usando as coordenadas e dimensões obtidas
legend(
x=c(leg$rect$left-15, leg$rect$left+leg$rect$w),
y=c(leg$rect$top, leg$rect$top-leg$rect$h),
legend=MODEL_NAME, col=CORES, box.col="gray", box.lwd=1,
seg.len=c(1), x.intersp=.5, lty=c(par("lty")), lwd=c(par("lwd")),
cex=.75, text.col="gray50"
)
mtext(
"Gerado via GNU R-cran.", side=1, line=2, adj=1.02, cex=.7, font=4, col="gray"
)
dev.off() # finaliza o dispositivo gráfico
|
/R/fit.R
|
no_license
|
dekassegui/db-megasena
|
R
| false | false | 7,728 |
r
|
#!/usr/bin/Rscript --slave --no-restore --no-init-file
#
# Renderiza o gráfico da série histórica das probabilidades do Erro Tipo I nos
# testes de aderência das distribuições de frequências dos números sorteadas nos
# concursos da Mega-Sena, criando e se necessário, atualizando a tabela de
# valores da estatística e respectivas probabilidades a cada concurso.
#
library(RSQLite)
con <- dbConnect(SQLite(), "megasena.sqlite")
# verifica se o db contém a tabela "fit"
if (dbExistsTable(con, "fit")) {
# requisita o número de registros na tabela de resultados dos testes
nr=dbGetQuery(con, "SELECT COUNT(*) FROM fit")[1,1]
} else {
cat('\n> Criação e preenchimento da tabela "fit" em andamento.\n')
query <- "-- tabela dos testes de aderência dos números nos concursos
CREATE TABLE IF NOT EXISTS fit (
concurso INTEGER UNIQUE,
estatistica DOUBLE,
pvalue DOUBLE CHECK (pvalue >= 0 AND pvalue <= 1),
FOREIGN KEY (concurso) REFERENCES concursos(concurso)
)"
rs <- dbSendStatement(con, query)
dbClearResult(rs)
nr=0
}
# requisita o número de registros da tabela de concursos
nrecs=dbGetQuery(con, "SELECT COUNT(*) AS NRECS FROM concursos")[1,1]
# atualiza a tabela de testes de aderência se o seu número de registros
# é menor que o número de registros da tabela de concursos
if (nr < nrecs) {
# notifica a operação em andamento
cat("\n> Inclusão de", nrecs-nr, 'registro(s) à tabela "fit" iniciada.')
# ativa a restrição que impede inserções de registros que
# não correspondem a nenhum registro na tabela referenciada
rs <- dbSendStatement(con, "PRAGMA FOREIGN_KEYS = ON")
dbClearResult(rs)
# requisita todos os números sorteados na série histórica dos concursos
mega <- dbGetQuery(con, "SELECT concurso, dezena FROM dezenas_sorteadas")
# atualização conforme número de registros a inserir
if (nrecs-nr == 1) {
teste <- chisq.test(tabulate(mega$dezena, nbins=60), correct=F)
# registra os resultados do único teste
query=sprintf("INSERT INTO fit SELECT %d, %f, %f", nrecs, as.double(teste$statistic), teste$p.value)
rs <- dbSendStatement(con, query)
} else {
# "prepared statement" para inserção de registro na tabela fit
query="INSERT INTO fit (concurso, estatistica, pvalue) VALUES ($concurso, $statistic, $pvalue)"
rs <- dbSendStatement(con, query)
if (nr == 0) {
frequencias <- vector("integer", length=60)
} else {
frequencias <- tabulate(mega$dezena[mega$concurso <= nr], nbins=60)
}
# loop para inclusão de registros na tabela "fit"
for (concurso in (nr+1):nrecs) {
numeros <- mega$dezena[mega$concurso == concurso]
frequencias[numeros] <- frequencias[numeros] + 1
# executa o teste com dados tabulados até "concurso"
teste <- chisq.test(frequencias, correct=(concurso < 1000))
# registra os resultados do teste
parameters <- list("concurso"=concurso, "statistic"=as.double(teste$statistic), "pvalue"=teste$p.value)
dbBind(rs, parameters)
}
}
dbClearResult(rs)
cat(".finalizada.\n\n")
}
# requisita números de concursoss, respectivas probabilidades de testes de
# aderência e datas de sorteio
query='SELECT concurso, pvalue, data_sorteio FROM fit NATURAL JOIN concursos WHERE concurso >= 1'
mega <- dbGetQuery(con, query)
dbDisconnect(con)
nrecs=length(mega$concurso)
# prepara arquivo como dispositivo de impressão do gráfico
# com tamanho igual a dos frames de vídeo HD1080
png(filename="img/fit.png", width=1920, height=1080, pointsize=28, family="Quicksand")
par(
mar=c(3, 4, 4, 1), font=2, bg="white",
cex.main=1.2, font.main=2, col.main="steelblue",
cex.lab=.9, font.lab=2, col.lab="steelblue",
cex.axis=.8, font.axis=2, col.axis="gray40"
)
# renderiza a série das probabilidades nos testes de aderência
plot(
mega$concurso,
mega$pvalue,
xlim=c(mega$concurso[1], mega$concurso[nrecs]),
ylim=c(0, 1),
ylab="", # evita renderização de "dummy" label
xlab="",
type="p", # "nebula" de pontos
pch=1, # símbolo dos pontos == circulo
col="gold", # cor de renderização dos pontos
axes=FALSE # inibe renderização dos eixos e do frame
)
title(main="Série do Erro Tipo I sob H: X~U\u276A1, 60\u276B", line=2)
title(xlab="concursos", line=1.375)
title(ylab="probabilidade", line=2.5)
# eixo dos números dos concursos
z <- seq((mega$concurso[1] %/% 200 + 1)*200, mega$concurso[nrecs], 200)
axis(1, at=c(mega$concurso[1], z), tck=-0.015, mgp=c(0, .2, 0))
rug(z[z-100>mega$concurso[1]]-100, side=1, col="gray40", ticksize=-0.01, lwd=2)
# eixo das probabilidades
z <- seq(from=.1, to=1, by=.2)
axis(2, at=c(0, z+0.1), las=1, tck=-0.015, mgp=c(0, .75, 0))
rug(z, side=2, col="gray40", ticksize=-0.01, lwd=2)
# linhas referentes a valores de probabilidades
abline(h=c(0, z, z+0.1), lty="dotted", lwd=.8, col="gray50")
# texto e linha referente ao nível de confiança dos testes
abline(h=0.05, lty="dashed", lwd=1.125, col="red")
text(par("usr")[2], .05, "α = 5%", adj=c(1, -0.5), cex=.67, col="red")
# conecta os pontos da "nebula" para caracterização a priori
lines(mega$concurso, mega$pvalue, lty="solid", lwd=1, col="orangered")
# seleção dos primeiros concursos de cada ano componente da série
primeiros <- mega[!duplicated(substr(mega$data_sorteio, 0, 4)),]
# eixo dos anos de primeiros concursos -- somente labels visíveis
axis(
3, at=primeiros$concurso,
labels=substr(primeiros$data_sorteio, 0, 4),
mgp=c(0, 0, 0), # posiciona abaixo do default
col='transparent', # escala "invisível"
font.axis=4, col.axis="mediumpurple"
)
# linhas verticais referentes aos anos de primeiros concursos
abline(v=primeiros$concurso, lty="dotted", lwd=1, col="gray50")
# anexa o concurso mais recente ao final de primeiros assegurando unicidade
if (tail(primeiros$concurso, 1) != mega$concurso[nrecs]) primeiros <- rbind(primeiros, mega[nrecs,])
# seleciona variáveis relevantes para ajustes
primeiros <- subset(primeiros, select=c("concurso", "pvalue"))
# evidencia os primeiros concursos de cada ano e o mais recente
points(primeiros, col="purple", pch=20)
# conecta os pontos dos primeiros concursos de cada ano e do mais recente
lines(primeiros, col="purple")
MODEL_NAME <- c("linear", "poly 2", "poly 3", "poly 4")
CORES <- c("darkgreen", "darkcyan", "navy", "darkred")
par(lwd=1.2)
# ajusta reta de mínimos quadrados às observações
fit <- lm(pvalue ~ concurso, data=primeiros)
lines(primeiros$concurso, predict(fit, primeiros), col=CORES[1])
# ajusta polinômio de grau 2
fit2 <- lm(pvalue ~ poly(concurso, 2, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit2, primeiros), col=CORES[2])
# ajusta polinômio de grau 3
fit3 <- lm(pvalue ~ poly(concurso, 3, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit3, primeiros), col=CORES[3])
# ajusta polinômio de grau 4
fit4 <- lm(pvalue ~ poly(concurso, 4, raw=T), data=primeiros)
lines(primeiros$concurso, predict(fit4, primeiros), col=CORES[4])
# pré-renderização da legenda para obter suas coordenadas e dimensões
leg <- legend(
"topright", inset=c(0, 0.05), legend=MODEL_NAME, lwd=c(par("lwd")),
cex=.75, seg.len=c(1), x.intersp=.5, lty=c(par("lty")), plot=FALSE
)
# renderização de facto usando as coordenadas e dimensões obtidas
legend(
x=c(leg$rect$left-15, leg$rect$left+leg$rect$w),
y=c(leg$rect$top, leg$rect$top-leg$rect$h),
legend=MODEL_NAME, col=CORES, box.col="gray", box.lwd=1,
seg.len=c(1), x.intersp=.5, lty=c(par("lty")), lwd=c(par("lwd")),
cex=.75, text.col="gray50"
)
mtext(
"Gerado via GNU R-cran.", side=1, line=2, adj=1.02, cex=.7, font=4, col="gray"
)
dev.off() # finaliza o dispositivo gráfico
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_osw_query.R
\name{getPrecursorsQueryID}
\alias{getPrecursorsQueryID}
\title{Get precursor Info}
\usage{
getPrecursorsQueryID(analytes, runType = "DIA_Proteomics")
}
\arguments{
\item{analytes}{(integer) A vector of integer that is searched in PRECURSOR.ID.}
\item{runType}{(char) This must be one of the strings "DIA_Proteomics", "DIA_IPF", "DIA_Metabolomics".}
}
\value{
SQL query to be searched.
}
\description{
For each precursor in the table respective transition ids are fetched.
Order of transition is kept same as the order of their intensities in \code{\link{getTransitionsQuery}}.
}
\seealso{
\code{\link{fetchPrecursorsInfo}}
}
\author{
Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
ORCID: 0000-0003-3500-8152
License: (c) Author (2020) + GPL-3
Date: 2020-04-04
}
\keyword{internal}
|
/man/getPrecursorsQueryID.Rd
|
no_license
|
singjc/DIAlignR
|
R
| false | true | 884 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_osw_query.R
\name{getPrecursorsQueryID}
\alias{getPrecursorsQueryID}
\title{Get precursor Info}
\usage{
getPrecursorsQueryID(analytes, runType = "DIA_Proteomics")
}
\arguments{
\item{analytes}{(integer) A vector of integer that is searched in PRECURSOR.ID.}
\item{runType}{(char) This must be one of the strings "DIA_Proteomics", "DIA_IPF", "DIA_Metabolomics".}
}
\value{
SQL query to be searched.
}
\description{
For each precursor in the table respective transition ids are fetched.
Order of transition is kept same as the order of their intensities in \code{\link{getTransitionsQuery}}.
}
\seealso{
\code{\link{fetchPrecursorsInfo}}
}
\author{
Shubham Gupta, \email{shubh.gupta@mail.utoronto.ca}
ORCID: 0000-0003-3500-8152
License: (c) Author (2020) + GPL-3
Date: 2020-04-04
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_movies.R
\name{filter_movies}
\alias{filter_movies}
\title{Filter Movie Results}
\usage{
filter_movies(mov_info = get_mov_info_de(), details = NULL,
quoted = FALSE)
}
\arguments{
\item{mov_info}{dataframe of movie information}
\item{details}{which details to retreive}
\item{quoted}{whether the details parameter should be quoted}
}
\value{
filtered movies
}
\description{
Filter Movie Results
}
\examples{
filter_movies(mov_info = get_mov_info_de()) # returns all details
filter_movies(mov_info = get_mov_info_en(), details = c("title", "runtime", "plot")) # returns selected columns
filter_movies(details = expr(starts_with("poster")), quoted = TRUE) ##TODO: check expr/quo/enquo
filter_movies(details = c("Who am I?", "Who are you?")) # error
filter_movies(mov_info = get_mov_info_en(), details = c("title", "runtime", ":P", ":D")) # warning; returns matchess
}
|
/man/filter_movies.Rd
|
permissive
|
sowla/kino
|
R
| false | true | 959 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_movies.R
\name{filter_movies}
\alias{filter_movies}
\title{Filter Movie Results}
\usage{
filter_movies(mov_info = get_mov_info_de(), details = NULL,
quoted = FALSE)
}
\arguments{
\item{mov_info}{dataframe of movie information}
\item{details}{which details to retreive}
\item{quoted}{whether the details parameter should be quoted}
}
\value{
filtered movies
}
\description{
Filter Movie Results
}
\examples{
filter_movies(mov_info = get_mov_info_de()) # returns all details
filter_movies(mov_info = get_mov_info_en(), details = c("title", "runtime", "plot")) # returns selected columns
filter_movies(details = expr(starts_with("poster")), quoted = TRUE) ##TODO: check expr/quo/enquo
filter_movies(details = c("Who am I?", "Who are you?")) # error
filter_movies(mov_info = get_mov_info_en(), details = c("title", "runtime", ":P", ":D")) # warning; returns matchess
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfb_play_types.R
\name{cfb_play_types}
\alias{cfb_play_types}
\title{College Football Mapping for Play Types}
\source{
\url{https://api.collegefootballdata.com/play/types}
}
\usage{
cfb_play_types()
}
\value{
A data frame with 48 rows and 3 variables:
\describe{
\item{play_type_id}{Referencing play type id}
\item{text}{play type description}
\item{abbreviation}{play type abbreviation used for function call}
...
}
}
\description{
This data frame helps identifies all play types identified in the play-by-play data.
This can be used to filter out play types when calling functions before hand.
}
|
/man/cfb_play_types.Rd
|
permissive
|
saiemgilani/cfbscrapR
|
R
| false | true | 676 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfb_play_types.R
\name{cfb_play_types}
\alias{cfb_play_types}
\title{College Football Mapping for Play Types}
\source{
\url{https://api.collegefootballdata.com/play/types}
}
\usage{
cfb_play_types()
}
\value{
A data frame with 48 rows and 3 variables:
\describe{
\item{play_type_id}{Referencing play type id}
\item{text}{play type description}
\item{abbreviation}{play type abbreviation used for function call}
...
}
}
\description{
This data frame helps identifies all play types identified in the play-by-play data.
This can be used to filter out play types when calling functions before hand.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geodata.R
\name{geoGrid}
\alias{geoGrid}
\title{Check, Cross, and Plot Coordinates with Polygons}
\usage{
geoGrid(coords, map, fix_coords = FALSE, plot = FALSE, all = FALSE,
alpha = 0.3)
}
\arguments{
\item{coords}{Dataframe. Dataframe containing at least langitud
and latitud data}
\item{map}{SpatialPolygonsDataFrame or .shp directory}
\item{fix_coords}{Boolean. Transform and fix coordinates system?}
\item{plot}{Boolean. Return plot with coordinates inside the grid?}
\item{all}{Boolean. Include all coordinates in plot, i.e. only the
ones who are inside the grids?}
\item{alpha}{Numeric. Points transparency for the plot}
}
\description{
This function checks a series of coordinates and return a join
with the information of each coordinate and its respective grid.
Note that the coords and shapes coordinates MUST have the same
lon/lat reference system for it to work succesfully.
}
|
/man/geoGrid.Rd
|
no_license
|
nfultz/lares
|
R
| false | true | 977 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geodata.R
\name{geoGrid}
\alias{geoGrid}
\title{Check, Cross, and Plot Coordinates with Polygons}
\usage{
geoGrid(coords, map, fix_coords = FALSE, plot = FALSE, all = FALSE,
alpha = 0.3)
}
\arguments{
\item{coords}{Dataframe. Dataframe containing at least langitud
and latitud data}
\item{map}{SpatialPolygonsDataFrame or .shp directory}
\item{fix_coords}{Boolean. Transform and fix coordinates system?}
\item{plot}{Boolean. Return plot with coordinates inside the grid?}
\item{all}{Boolean. Include all coordinates in plot, i.e. only the
ones who are inside the grids?}
\item{alpha}{Numeric. Points transparency for the plot}
}
\description{
This function checks a series of coordinates and return a join
with the information of each coordinate and its respective grid.
Note that the coords and shapes coordinates MUST have the same
lon/lat reference system for it to work succesfully.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiAnnotator.R
\name{getLowestDists}
\alias{getLowestDists}
\title{Get the lowest biological distance from the 5' or 3' boundaries of query
and subject.}
\usage{
getLowestDists(
query = NULL,
subject = NULL,
res.nrst = NULL,
side = "either",
relativeTo = "subject"
)
}
\arguments{
\item{query}{GRanges object to be used as the query which holds data for
'queryHits' attribute of res.nrst.}
\item{subject}{GRanges object to be used as the subject which holds data for
'subjectHits' attribute of res.nrst.}
\item{res.nrst}{a dataframe of nearest indices as returned by
\code{\link[IRanges]{nearest}}.}
\item{side}{boundary of subject/annotation to use to calculate the
nearest distance. Options are '5p','3p', or the default 'either'.}
\item{relativeTo}{calculate distance relative to query or subject.
Default is 'subject'. See documentation of \code{\link{getNearestFeature}}
for more information.}
}
\value{
res.nrst with lowest distances appended at the end.
}
\description{
Given a query and subject with indicies from \code{\link[IRanges]{nearest}},
calculate the shortest biological distance to either boundaries of the query
and subject. This is a helper function utilized in
\code{\link{getNearestFeature}}, \code{\link{get2NearestFeature}}
}
\note{
for cases where a query has multiple nearest neighbors or overlaps
with >1 subjects, the function will choose the subject with the lowest
absolute distance.
}
\examples{
query <- GRanges("A", IRanges(c(1, 5, 12, 20), width = 1),
strand = c("-", "+", "-", "+"))
subject <- GRanges("A", IRanges(c(1, 5, 10, 15, 21), width = 8:4),
strand = c("+", "+", "-", "-", "-"))
res <- as.data.frame(nearest(query, subject, select = "all",
ignore.strand = TRUE))
res <- getLowestDists(query, subject, res, "either", "query")
}
\seealso{
\code{\link{getNearestFeature}}, \code{\link{get2NearestFeature}}.
}
|
/man/getLowestDists.Rd
|
no_license
|
malnirav/hiAnnotator
|
R
| false | true | 1,942 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiAnnotator.R
\name{getLowestDists}
\alias{getLowestDists}
\title{Get the lowest biological distance from the 5' or 3' boundaries of query
and subject.}
\usage{
getLowestDists(
query = NULL,
subject = NULL,
res.nrst = NULL,
side = "either",
relativeTo = "subject"
)
}
\arguments{
\item{query}{GRanges object to be used as the query which holds data for
'queryHits' attribute of res.nrst.}
\item{subject}{GRanges object to be used as the subject which holds data for
'subjectHits' attribute of res.nrst.}
\item{res.nrst}{a dataframe of nearest indices as returned by
\code{\link[IRanges]{nearest}}.}
\item{side}{boundary of subject/annotation to use to calculate the
nearest distance. Options are '5p','3p', or the default 'either'.}
\item{relativeTo}{calculate distance relative to query or subject.
Default is 'subject'. See documentation of \code{\link{getNearestFeature}}
for more information.}
}
\value{
res.nrst with lowest distances appended at the end.
}
\description{
Given a query and subject with indicies from \code{\link[IRanges]{nearest}},
calculate the shortest biological distance to either boundaries of the query
and subject. This is a helper function utilized in
\code{\link{getNearestFeature}}, \code{\link{get2NearestFeature}}
}
\note{
for cases where a query has multiple nearest neighbors or overlaps
with >1 subjects, the function will choose the subject with the lowest
absolute distance.
}
\examples{
query <- GRanges("A", IRanges(c(1, 5, 12, 20), width = 1),
strand = c("-", "+", "-", "+"))
subject <- GRanges("A", IRanges(c(1, 5, 10, 15, 21), width = 8:4),
strand = c("+", "+", "-", "-", "-"))
res <- as.data.frame(nearest(query, subject, select = "all",
ignore.strand = TRUE))
res <- getLowestDists(query, subject, res, "either", "query")
}
\seealso{
\code{\link{getNearestFeature}}, \code{\link{get2NearestFeature}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chilling.R
\name{chilling}
\alias{chilling}
\title{Calculation of chilling and heat from hourly temperature records}
\usage{
chilling(
hourtemps = NULL,
Start_JDay = 1,
End_JDay = 366,
THourly = NULL,
misstolerance = 50
)
}
\arguments{
\item{hourtemps}{a list of two elements, with element 'hourtemps' being a
dataframe of hourly temperatures (e.g. produced by stack_hourly_temps). This
data frame must have a column for Year, a column for JDay (Julian date, or
day of the year), a column for Hour and a column for Temp (hourly
temperature). The second (optional) element is QC, which is a data.frame
indicating completeness of the dataset. This is automatically produced by
stack_hourly_temps.}
\item{Start_JDay}{the start date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{End_JDay}{the end date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{THourly}{the same as hourtemps. This argument is only retained for
downward compatibility and can be ignored in most cases.}
\item{misstolerance}{maximum percentage of values for a given season that
can be missing without the record being removed from the output. Defaults to
50.}
}
\value{
data frame showing chilling and heat totals for the respective
periods for all seasons included in the temperature records. Columns are
Season, End_year (the year when the period ended), Days (the duration of the
period), Chilling_Hours, Utah_Model, Chill_portions and GDH. If the weather
input consisted of a list with elements hourtemps and QC, the output also
contains columns from QC that indicate the completeness of the weather
record that the calculations are based on.
}
\description{
Function to calculate three common horticultural chill metrics and one heat
metric from stacked hourly temperatures (produced by stack_hourly_temps).
Metrics that are calculated are Chilling Hours, Chill Units according to the
Utah Model, Chill Portions according to the Dynamic Model and Growing Degree
Hours.
}
\details{
Chill metrics are calculated as given in the references below. Chilling
Hours are all hours with temperatures between 0 and 7.2 degrees C. Units of
the Utah Model are calculated as suggested by Richardson et al. (1974)
(different weights for different temperature ranges, and negation of
chilling by warm temperatures). Chill Portions are calculated according to
Fishman et al. (1987a,b). More honestly, they are calculated according to an
Excel sheet produced by Amnon Erez and colleagues, which converts the
complex equations in the Fishman papers into relatively simple Excel
functions. These were translated into R. References to papers that include
the full functions are given below. Growing Degree Hours are calculated
according to Anderson et al. (1986), using the default values they suggest.
}
\note{
After doing extensive model comparisons, and reviewing a lot of
relevant literature, I do not recommend using the Chilling Hours or Utah
Models, especially in warm climates! The Dynamic Model (Chill Portions),
though far from perfect, seems much more reliable.
}
\examples{
# weather <- fix_weather(KA_weather[which(KA_weather$Year > 2006), ])
# hourtemps <- stack_hourly_temps(weather, latitude = 50.4)
# chilling(hourtemps, 305, 60)
chilling(stack_hourly_temps(fix_weather(KA_weather[which(KA_weather$Year > 2006), ]),
latitude = 50.4))
}
\references{
Model references:
Chilling Hours:
Weinberger JH (1950) Chilling requirements of peach varieties. Proc Am Soc
Hortic Sci 56, 122-128
Bennett JP (1949) Temperature and bud rest period. Calif Agric 3 (11), 9+12
Utah Model:
Richardson EA, Seeley SD, Walker DR (1974) A model for estimating the
completion of rest for Redhaven and Elberta peach trees. HortScience 9(4),
331-332
Dynamic Model:
Erez A, Fishman S, Linsley-Noakes GC, Allan P (1990) The dynamic model for
rest completion in peach buds. Acta Hortic 276, 165-174
Fishman S, Erez A, Couvillon GA (1987a) The temperature dependence of
dormancy breaking in plants - computer simulation of processes studied under
controlled temperatures. J Theor Biol 126(3), 309-321
Fishman S, Erez A, Couvillon GA (1987b) The temperature dependence of
dormancy breaking in plants - mathematical analysis of a two-step model
involving a cooperative transition. J Theor Biol 124(4), 473-483
Growing Degree Hours:
Anderson JL, Richardson EA, Kesner CD (1986) Validation of chill unit and
flower bud phenology models for 'Montmorency' sour cherry. Acta Hortic 184,
71-78
Model comparisons and model equations:
Luedeling E, Zhang M, Luedeling V and Girvetz EH, 2009. Sensitivity of
winter chill models for fruit and nut trees to climatic changes expected in
California's Central Valley. Agriculture, Ecosystems and Environment 133,
23-31
Luedeling E, Zhang M, McGranahan G and Leslie C, 2009. Validation of winter
chill models using historic records of walnut phenology. Agricultural and
Forest Meteorology 149, 1854-1864
Luedeling E and Brown PH, 2011. A global analysis of the comparability of
winter chill models for fruit and nut trees. International Journal of
Biometeorology 55, 411-421
Luedeling E, Kunz A and Blanke M, 2011. Mehr Chilling fuer Obstbaeume in
waermeren Wintern? (More winter chill for fruit trees in warmer winters?).
Erwerbs-Obstbau 53, 145-155
Review on chilling models in a climate change context:
Luedeling E, 2012. Climate change impacts on winter chill for temperate
fruit and nut production: a review. Scientia Horticulturae 144, 218-229
The PLS method is described here:
Luedeling E and Gassner A, 2012. Partial Least Squares Regression for
analyzing walnut phenology in California. Agricultural and Forest
Meteorology 158, 43-52.
Wold S (1995) PLS for multivariate linear modeling. In: van der Waterbeemd H
(ed) Chemometric methods in molecular design: methods and principles in
medicinal chemistry, vol 2. Chemie, Weinheim, pp 195-218.
Wold S, Sjostrom M, Eriksson L (2001) PLS-regression: a basic tool of
chemometrics. Chemometr Intell Lab 58(2), 109-130.
Mevik B-H, Wehrens R, Liland KH (2011) PLS: Partial Least Squares and
Principal Component Regression. R package version 2.3-0.
http://CRAN.R-project.org/package0pls.
Some applications of the PLS procedure:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
Yu H, Luedeling E and Xu J, 2010. Stronger winter than spring warming delays
spring phenology on the Tibetan Plateau. Proceedings of the National Academy
of Sciences (PNAS) 107 (51), 22151-22156.
Yu H, Xu J, Okuto E and Luedeling E, 2012. Seasonal Response of Grasslands
to Climate Change on the Tibetan Plateau. PLoS ONE 7(11), e49230.
The exact procedure was used here:
Luedeling E, Guo L, Dai J, Leslie C, Blanke M, 2013. Differential responses
of trees to temperature variation during the chilling and forcing phases.
Agricultural and Forest Meteorology 181, 33-42.
The chillR package:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
}
\author{
Eike Luedeling
}
\keyword{and}
\keyword{calculation}
\keyword{chill}
\keyword{heat}
|
/man/chilling.Rd
|
no_license
|
cran/chillR
|
R
| false | true | 7,640 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chilling.R
\name{chilling}
\alias{chilling}
\title{Calculation of chilling and heat from hourly temperature records}
\usage{
chilling(
hourtemps = NULL,
Start_JDay = 1,
End_JDay = 366,
THourly = NULL,
misstolerance = 50
)
}
\arguments{
\item{hourtemps}{a list of two elements, with element 'hourtemps' being a
dataframe of hourly temperatures (e.g. produced by stack_hourly_temps). This
data frame must have a column for Year, a column for JDay (Julian date, or
day of the year), a column for Hour and a column for Temp (hourly
temperature). The second (optional) element is QC, which is a data.frame
indicating completeness of the dataset. This is automatically produced by
stack_hourly_temps.}
\item{Start_JDay}{the start date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{End_JDay}{the end date (in Julian date, or day of the year) of the
period, for which chill and heat should be quantified.}
\item{THourly}{the same as hourtemps. This argument is only retained for
downward compatibility and can be ignored in most cases.}
\item{misstolerance}{maximum percentage of values for a given season that
can be missing without the record being removed from the output. Defaults to
50.}
}
\value{
data frame showing chilling and heat totals for the respective
periods for all seasons included in the temperature records. Columns are
Season, End_year (the year when the period ended), Days (the duration of the
period), Chilling_Hours, Utah_Model, Chill_portions and GDH. If the weather
input consisted of a list with elements hourtemps and QC, the output also
contains columns from QC that indicate the completeness of the weather
record that the calculations are based on.
}
\description{
Function to calculate three common horticultural chill metrics and one heat
metric from stacked hourly temperatures (produced by stack_hourly_temps).
Metrics that are calculated are Chilling Hours, Chill Units according to the
Utah Model, Chill Portions according to the Dynamic Model and Growing Degree
Hours.
}
\details{
Chill metrics are calculated as given in the references below. Chilling
Hours are all hours with temperatures between 0 and 7.2 degrees C. Units of
the Utah Model are calculated as suggested by Richardson et al. (1974)
(different weights for different temperature ranges, and negation of
chilling by warm temperatures). Chill Portions are calculated according to
Fishman et al. (1987a,b). More honestly, they are calculated according to an
Excel sheet produced by Amnon Erez and colleagues, which converts the
complex equations in the Fishman papers into relatively simple Excel
functions. These were translated into R. References to papers that include
the full functions are given below. Growing Degree Hours are calculated
according to Anderson et al. (1986), using the default values they suggest.
}
\note{
After doing extensive model comparisons, and reviewing a lot of
relevant literature, I do not recommend using the Chilling Hours or Utah
Models, especially in warm climates! The Dynamic Model (Chill Portions),
though far from perfect, seems much more reliable.
}
\examples{
# weather <- fix_weather(KA_weather[which(KA_weather$Year > 2006), ])
# hourtemps <- stack_hourly_temps(weather, latitude = 50.4)
# chilling(hourtemps, 305, 60)
chilling(stack_hourly_temps(fix_weather(KA_weather[which(KA_weather$Year > 2006), ]),
latitude = 50.4))
}
\references{
Model references:
Chilling Hours:
Weinberger JH (1950) Chilling requirements of peach varieties. Proc Am Soc
Hortic Sci 56, 122-128
Bennett JP (1949) Temperature and bud rest period. Calif Agric 3 (11), 9+12
Utah Model:
Richardson EA, Seeley SD, Walker DR (1974) A model for estimating the
completion of rest for Redhaven and Elberta peach trees. HortScience 9(4),
331-332
Dynamic Model:
Erez A, Fishman S, Linsley-Noakes GC, Allan P (1990) The dynamic model for
rest completion in peach buds. Acta Hortic 276, 165-174
Fishman S, Erez A, Couvillon GA (1987a) The temperature dependence of
dormancy breaking in plants - computer simulation of processes studied under
controlled temperatures. J Theor Biol 126(3), 309-321
Fishman S, Erez A, Couvillon GA (1987b) The temperature dependence of
dormancy breaking in plants - mathematical analysis of a two-step model
involving a cooperative transition. J Theor Biol 124(4), 473-483
Growing Degree Hours:
Anderson JL, Richardson EA, Kesner CD (1986) Validation of chill unit and
flower bud phenology models for 'Montmorency' sour cherry. Acta Hortic 184,
71-78
Model comparisons and model equations:
Luedeling E, Zhang M, Luedeling V and Girvetz EH, 2009. Sensitivity of
winter chill models for fruit and nut trees to climatic changes expected in
California's Central Valley. Agriculture, Ecosystems and Environment 133,
23-31
Luedeling E, Zhang M, McGranahan G and Leslie C, 2009. Validation of winter
chill models using historic records of walnut phenology. Agricultural and
Forest Meteorology 149, 1854-1864
Luedeling E and Brown PH, 2011. A global analysis of the comparability of
winter chill models for fruit and nut trees. International Journal of
Biometeorology 55, 411-421
Luedeling E, Kunz A and Blanke M, 2011. Mehr Chilling fuer Obstbaeume in
waermeren Wintern? (More winter chill for fruit trees in warmer winters?).
Erwerbs-Obstbau 53, 145-155
Review on chilling models in a climate change context:
Luedeling E, 2012. Climate change impacts on winter chill for temperate
fruit and nut production: a review. Scientia Horticulturae 144, 218-229
The PLS method is described here:
Luedeling E and Gassner A, 2012. Partial Least Squares Regression for
analyzing walnut phenology in California. Agricultural and Forest
Meteorology 158, 43-52.
Wold S (1995) PLS for multivariate linear modeling. In: van der Waterbeemd H
(ed) Chemometric methods in molecular design: methods and principles in
medicinal chemistry, vol 2. Chemie, Weinheim, pp 195-218.
Wold S, Sjostrom M, Eriksson L (2001) PLS-regression: a basic tool of
chemometrics. Chemometr Intell Lab 58(2), 109-130.
Mevik B-H, Wehrens R, Liland KH (2011) PLS: Partial Least Squares and
Principal Component Regression. R package version 2.3-0.
http://CRAN.R-project.org/package0pls.
Some applications of the PLS procedure:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
Yu H, Luedeling E and Xu J, 2010. Stronger winter than spring warming delays
spring phenology on the Tibetan Plateau. Proceedings of the National Academy
of Sciences (PNAS) 107 (51), 22151-22156.
Yu H, Xu J, Okuto E and Luedeling E, 2012. Seasonal Response of Grasslands
to Climate Change on the Tibetan Plateau. PLoS ONE 7(11), e49230.
The exact procedure was used here:
Luedeling E, Guo L, Dai J, Leslie C, Blanke M, 2013. Differential responses
of trees to temperature variation during the chilling and forcing phases.
Agricultural and Forest Meteorology 181, 33-42.
The chillR package:
Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
requirements of cherry trees - a statistical approach. International Journal
of Biometeorology 57,679-689.
}
\author{
Eike Luedeling
}
\keyword{and}
\keyword{calculation}
\keyword{chill}
\keyword{heat}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfe-tidiers.R
\name{augment.felm}
\alias{augment.felm}
\title{Augment data with information from a(n) felm object}
\usage{
\method{augment}{felm}(x, data = NULL, ...)
}
\arguments{
\item{x}{A \code{felm} object returned from \code{\link[lfe:felm]{lfe::felm()}}.}
\item{data}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing the original
data that was used to produce the object \code{x}. Defaults to
\code{stats::model.frame(x)} so that \code{augment(my_fit)} returns the augmented
original data. \strong{Do not} pass new data to the \code{data} argument.
Augment will report information such as influence and cooks distance for
data passed to the \code{data} argument. These measures are only defined for
the original training data.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Additionally, if you pass
\code{newdata = my_tibble} to an \code{\link[=augment]{augment()}} method that does not
accept a \code{newdata} argument, it will use the default value for
the \code{data} argument.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} containing the data passed to \code{augment},
and \strong{additional} columns:
\item{.fitted}{The predicted response for that observation.}
\item{.resid}{The residual for a particular point. Present only when
data has been passed to \code{augment} via the \code{data} argument.}
}
\description{
Augment accepts a model object and a dataset and adds
information about each observation in the dataset. Most commonly, this
includes predicted values in the \code{.fitted} column, residuals in the
\code{.resid} column, and standard errors for the fitted values in a \code{.se.fit}
column. New columns always begin with a \code{.} prefix to avoid overwriting
columns in the original dataset.
Users may pass data to augment via either the \code{data} argument or the
\code{newdata} argument. If the user passes data to the \code{data} argument,
it \strong{must} be exactly the data that was used to fit the model
object. Pass datasets to \code{newdata} to augment data that was not used
during model fitting. This still requires that all columns used to fit
the model are present.
Augment will often behavior different depending on whether \code{data} or
\code{newdata} is specified. This is because there is often information
associated with training observations (such as influences or related)
measures that is not meaningfully defined for new observations.
For convenience, many augment methods provide default \code{data} arguments,
so that \code{augment(fit)} will return the augmented training data. In these
cases augment tries to reconstruct the original data based on the model
object, with some varying degrees of success.
The augmented dataset is always returned as a \link[tibble:tibble]{tibble::tibble} with the
\strong{same number of rows} as the passed dataset. This means that the
passed data must be coercible to a tibble. At this time, tibbles do not
support matrix-columns. This means you should not specify a matrix
of covariates in a model formula during the original model fitting
process, and that \code{\link[splines:ns]{splines::ns()}}, \code{\link[stats:poly]{stats::poly()}} and
\code{\link[survival:Surv]{survival::Surv()}} objects are not supported in input data. If you
encounter errors, try explicitly passing a tibble, or fitting the original
model on data in a tibble.
We are in the process of defining behaviors for models fit with various
\link{na.action} arguments, but make no guarantees about behavior when data is
missing at this time.
}
\seealso{
\code{\link[=augment]{augment()}}, \code{\link[lfe:felm]{lfe::felm()}}
Other felm tidiers: \code{\link{tidy.felm}}
}
\concept{felm tidiers}
|
/man/augment.felm.Rd
|
no_license
|
sjewo/broom
|
R
| false | true | 4,144 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lfe-tidiers.R
\name{augment.felm}
\alias{augment.felm}
\title{Augment data with information from a(n) felm object}
\usage{
\method{augment}{felm}(x, data = NULL, ...)
}
\arguments{
\item{x}{A \code{felm} object returned from \code{\link[lfe:felm]{lfe::felm()}}.}
\item{data}{A \code{\link[=data.frame]{data.frame()}} or \code{\link[tibble:tibble]{tibble::tibble()}} containing the original
data that was used to produce the object \code{x}. Defaults to
\code{stats::model.frame(x)} so that \code{augment(my_fit)} returns the augmented
original data. \strong{Do not} pass new data to the \code{data} argument.
Augment will report information such as influence and cooks distance for
data passed to the \code{data} argument. These measures are only defined for
the original training data.}
\item{...}{Additional arguments. Not used. Needed to match generic
signature only. \strong{Cautionary note:} Misspelled arguments will be
absorbed in \code{...}, where they will be ignored. If the misspelled
argument has a default value, the default value will be used.
For example, if you pass \code{conf.lvel = 0.9}, all computation will
proceed using \code{conf.level = 0.95}. Additionally, if you pass
\code{newdata = my_tibble} to an \code{\link[=augment]{augment()}} method that does not
accept a \code{newdata} argument, it will use the default value for
the \code{data} argument.}
}
\value{
A \code{\link[tibble:tibble]{tibble::tibble()}} containing the data passed to \code{augment},
and \strong{additional} columns:
\item{.fitted}{The predicted response for that observation.}
\item{.resid}{The residual for a particular point. Present only when
data has been passed to \code{augment} via the \code{data} argument.}
}
\description{
Augment accepts a model object and a dataset and adds
information about each observation in the dataset. Most commonly, this
includes predicted values in the \code{.fitted} column, residuals in the
\code{.resid} column, and standard errors for the fitted values in a \code{.se.fit}
column. New columns always begin with a \code{.} prefix to avoid overwriting
columns in the original dataset.
Users may pass data to augment via either the \code{data} argument or the
\code{newdata} argument. If the user passes data to the \code{data} argument,
it \strong{must} be exactly the data that was used to fit the model
object. Pass datasets to \code{newdata} to augment data that was not used
during model fitting. This still requires that all columns used to fit
the model are present.
Augment will often behavior different depending on whether \code{data} or
\code{newdata} is specified. This is because there is often information
associated with training observations (such as influences or related)
measures that is not meaningfully defined for new observations.
For convenience, many augment methods provide default \code{data} arguments,
so that \code{augment(fit)} will return the augmented training data. In these
cases augment tries to reconstruct the original data based on the model
object, with some varying degrees of success.
The augmented dataset is always returned as a \link[tibble:tibble]{tibble::tibble} with the
\strong{same number of rows} as the passed dataset. This means that the
passed data must be coercible to a tibble. At this time, tibbles do not
support matrix-columns. This means you should not specify a matrix
of covariates in a model formula during the original model fitting
process, and that \code{\link[splines:ns]{splines::ns()}}, \code{\link[stats:poly]{stats::poly()}} and
\code{\link[survival:Surv]{survival::Surv()}} objects are not supported in input data. If you
encounter errors, try explicitly passing a tibble, or fitting the original
model on data in a tibble.
We are in the process of defining behaviors for models fit with various
\link{na.action} arguments, but make no guarantees about behavior when data is
missing at this time.
}
\seealso{
\code{\link[=augment]{augment()}}, \code{\link[lfe:felm]{lfe::felm()}}
Other felm tidiers: \code{\link{tidy.felm}}
}
\concept{felm tidiers}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cplot.R, R/cplot_glm.R, R/cplot_loess.R,
% R/cplot_polr.R
\name{cplot}
\alias{cplot}
\alias{cplot.lm}
\alias{cplot.glm}
\alias{cplot.loess}
\alias{cplot.polr}
\alias{cplot.multinom}
\title{Conditional predicted value and average marginal effect plots for models}
\usage{
cplot(object, ...)
\method{cplot}{lm}(object, x = attributes(terms(object))[["term.labels"]][1L],
dx = x, what = c("prediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{glm}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "effect"), data = prediction::find_data(object),
type = c("response", "link"), vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{loess}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "effect"), data = prediction::find_data(object),
type = c("response", "link"), vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{polr}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "classprediction", "stackedprediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L, xvals = seq_range(data[[x]], n
= n), level = 0.95, draw = TRUE, xlab = x, ylab = if (match.arg(what)
== "effect") paste0("Marginal effect of ", dx) else paste0("Predicted value"),
xlim = NULL, ylim = if (match.arg(what) \%in\% c("prediction",
"stackedprediction")) c(0, 1.04) else NULL, lwd = 1L, col = "black",
lty = 1L, factor.lty = 1L, factor.pch = 19L, factor.col = col,
factor.fill = factor.col, factor.cex = 1L, xaxs = "i", yaxs = xaxs,
las = 1L, scatter = FALSE, scatter.pch = 19L,
scatter.col = factor.col, scatter.bg = scatter.col, scatter.cex = 0.5,
rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{multinom}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "classprediction", "stackedprediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L, xvals = seq_range(data[[x]], n
= n), level = 0.95, draw = TRUE, xlab = x, ylab = if (match.arg(what)
== "effect") paste0("Marginal effect of ", dx) else paste0("Predicted value"),
xlim = NULL, ylim = if (match.arg(what) \%in\% c("prediction",
"stackedprediction")) c(0, 1.04) else NULL, lwd = 1L, col = "black",
lty = 1L, factor.lty = 1L, factor.pch = 19L, factor.col = col,
factor.fill = factor.col, factor.cex = 1L, xaxs = "i", yaxs = xaxs,
las = 1L, scatter = FALSE, scatter.pch = 19L,
scatter.col = factor.col, scatter.bg = scatter.col, scatter.cex = 0.5,
rug = TRUE, rug.col = col, rug.size = -0.02, ...)
}
\arguments{
\item{object}{A model object.}
\item{\dots}{Additional arguments passed to \code{\link[graphics]{plot}}.}
\item{x}{A character string specifying the name of variable to use as the x-axis dimension in the plot.}
\item{dx}{If \code{what = "effect"}, the variable whose conditional marginal effect should be displayed. By default it is \code{x} (so the plot displays the marginal effect of \code{x} across values of \code{x}); ignored otherwise. If \code{dx} is a factor with more than 2 levels, an error will be issued.}
\item{what}{A character string specifying whether to draw a \dQuote{prediction} (fitted values from the model, calculated using \code{\link[stats]{predict}}) or an \dQuote{effect} (average marginal effect of \code{dx} conditional on \code{x}, using \code{\link{margins}}). Methods for classes other than \dQuote{lm} or \dQuote{glm} may provided additional options (e.g., \code{cplot.polr()} provides \dQuote{stackedprediction} and \dQuote{class} alternatives).}
\item{data}{A data frame to override the default value offered in \code{object[["model"]]}.}
\item{type}{A character string specifying whether to calculate predictions on the response scale (default) or link (only relevant for non-linear models).}
\item{vcov}{A matrix containing the variance-covariance matrix for estimated model coefficients, or a function to perform the estimation with \code{model} as its only argument.}
\item{at}{Currently ignored.}
\item{n}{An integer specifying the number of points across \code{x} at which to calculate the predicted value or marginal effect, when \code{x} is numeric. Ignored otherwise.}
\item{xvals}{A numeric vector of values at which to calculate predictions or marginal effects, if \code{x} is numeric. By default, it is calculated from the data using \code{\link{seq_range}}. If \code{x} is a factor, this is ignored, as is \code{n}.}
\item{level}{The confidence level required (used to draw uncertainty bounds).}
\item{draw}{A logical (default \code{TRUE}), specifying whether to draw the plot. If \code{FALSE}, the data used in drawing are returned as a list of data.frames. This might be useful if you want to plot using an alternative plotting package (e.g., ggplot2). Also, if set to value \dQuote{add}, then the resulting data is added to the existing plot.}
\item{xlab}{A character string specifying the value of \code{xlab} in \code{\link[graphics]{plot}}.}
\item{ylab}{A character string specifying the value of \code{ylab} in \code{\link[graphics]{plot}}.}
\item{xlim}{A two-element numeric vector specifying the x-axis limits. Set automatically if missing.}
\item{ylim}{A two-element numeric vector specifying the y-axis limits. Set automatically if missing.}
\item{lwd}{An integer specifying the width of the prediction or marginal effect line. See \code{\link[graphics]{lines}}. If \code{x} is a factor variable in the model, this is used to set the line width of the error bars.}
\item{col}{A character string specifying the color of the prediction or marginal effect line. If \code{x} is a factor variable in the model, this is used to set the color of the error bars.}
\item{lty}{An integer specifying the \dQuote{line type} of the prediction or marginal effect line. See \code{\link[graphics]{par}}. If \code{x} is a factor variable in the model, this is used to set the line type of the error bars.}
\item{se.type}{A character string specifying whether to draw the confidence interval as \dQuote{lines} (the default, using \code{\link[graphics]{lines}}) or a \dQuote{shade} (using \code{\link[graphics]{polygon}}).}
\item{se.col}{If \code{se.type = "lines"}, a character string specifying the color of the confidence interval lines. If \code{se.type = "shade"}, the color of the shaded region border.}
\item{se.fill}{If \code{se.type = "shade"}, the color of the shaded region. Ignored otherwise.}
\item{se.lwd}{If \code{se.type = "lines"}, the width of the confidence interval lines. See \code{\link[graphics]{lines}}.}
\item{se.lty}{If \code{se.type = "lines"}, an integer specifying the \dQuote{line type} of the confidence interval lines; if \code{se.type = "shade"}, the line type of the shaded polygon border. See \code{\link[graphics]{par}}.}
\item{factor.lty}{If \code{x} is a factor variable in the model, this is used to set the line type of an optional line connecting predictions across factor levels. If \code{factor.lty = 0L} (the default), no line is drawn.. See \code{\link[graphics]{par}}.}
\item{factor.pch}{If \code{x} is a factor variable in the model, the shape to use when drawing points. See \code{\link[graphics]{points}}.}
\item{factor.col}{If \code{x} is a factor variable in the model, the color to use for the border of the points. See \code{\link[graphics]{points}}.}
\item{factor.fill}{If \code{x} is a factor variable in the model, the color to use for the fill of the points. See \code{\link[graphics]{points}}.}
\item{factor.cex}{If \code{x} is a factor variable in the model, the \dQuote{expansion factor} to use for the point size. See \code{\link[graphics]{points}}.}
\item{xaxs}{A character string specifying \code{xaxs}. See \code{\link[graphics]{par}}.}
\item{yaxs}{A character string specifying \code{xaxs}. See \code{\link[graphics]{par}}.}
\item{las}{An integer string specifying \code{las}. See \code{\link[graphics]{par}}.}
\item{scatter}{A logical indicating whether to plot the observed data in \code{data} as a scatterplot.}
\item{scatter.pch}{If \code{scatter = TRUE}, an integer specifying a shape to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.col}{If \code{scatter = TRUE}, a character string specifying a color to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.bg}{If \code{scatter = TRUE}, a character string specifying a color to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.cex}{If \code{scatter = TRUE}, an integer specifying the size of the points. See \code{\link[graphics]{points}}.}
\item{rug}{A logical specifying whether to include an x-axis \dQuote{rug} (see \code{\link[graphics]{rug}}).}
\item{rug.col}{A character string specifying \code{col} to \code{\link[graphics]{rug}}.}
\item{rug.size}{A numeric value specifying \code{ticksize} to \code{\link[graphics]{rug}}.}
}
\value{
A tidy data frame containing the data used to draw the plot. Use \code{draw = FALSE} to simply generate the data structure for use elsewhere.
}
\description{
Draw one or more conditioanl effects plots reflecting predictions or marginal effects from a model, conditional on a covariate. Currently methods exist for \dQuote{lm}, \dQuote{glm}, \dQuote{loess} class models.
}
\details{
Note that when \code{what = "prediction"}, the plots show predictions holding values of the data at their mean or mode, whereas when \code{what = "effect"} average marginal effects (i.e., at observed values) are shown.
The overall aesthetic is somewhat similar to to the output produced by the \code{marginalModelPlot()} function in the \bold{\href{https://cran.r-project.org/package=car}{car}} package.
}
\examples{
\dontrun{
require('datasets')
# prediction from several angles
m <- lm(Sepal.Length ~ Sepal.Width, data = iris)
cplot(m)
# more complex model
m <- lm(Sepal.Length ~ Sepal.Width * Petal.Width * I(Petal.Width ^ 2),
data = head(iris, 50))
## marginal effect of 'Petal.Width' across 'Petal.Width'
cplot(m, x = "Petal.Width", what = "effect", n = 10)
# factor independent variables
mtcars[["am"]] <- factor(mtcars[["am"]])
m <- lm(mpg ~ am * wt, data = mtcars)
## predicted values for each factor level
cplot(m, x = "am")
## marginal effect of each factor level across numeric variable
cplot(m, x = "wt", dx = "am", what = "effect")
# marginal effect of 'Petal.Width' across 'Sepal.Width'
## without drawing the plot
## this might be useful for using, e.g., ggplot2 for plotting
tmp <- cplot(m, x = "Sepal.Width", dx = "Petal.Width",
what = "effect", n = 10, draw = FALSE)
if (require("ggplot2")) {
# use ggplot2 instead of base graphics
ggplot(tmp, aes(x = Petal.Width, y = "effect")) +
geom_line(lwd = 2) +
geom_line(aes(y = effect + 1.96*se.effect)) +
geom_line(aes(y = effect - 1.96*se.effect))
}
# a non-linear model
m <- glm(am ~ wt*drat, data = mtcars, family = binomial)
cplot(m, x = "wt") # prediction
# effects on linear predictor and outcome
cplot(m, x = "drat", dx = "wt", what = "effect", type = "link")
cplot(m, x = "drat", dx = "wt", what = "effect", type = "response")
# plot conditional predictions across a third factor
local({
iris$long <- rbinom(nrow(iris), 1, 0.6)
x <- glm(long ~ Sepal.Width*Species, data = iris)
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "setosa", ],
ylim = c(0,1), col = "red", se.fill = rgb(1,0,0,.5), xlim = c(2,4.5))
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "versicolor", ],
draw = "add", col = "blue", se.fill = rgb(0,1,0,.5))
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "virginica", ],
draw = "add", col = "green", se.fill = rgb(0,0,1,.5))
})
# ordinal outcome
if (require("MASS")) {
# x is a factor variable
house.plr <- polr(Sat ~ Infl + Type + Cont, weights = Freq,
data = housing)
## predicted probabilities
cplot(house.plr)
## cumulative predicted probabilities
cplot(house.plr, what = "stacked")
## ggplot2 example
if (require("ggplot2")) {
ggplot(cplot(house.plr), aes(x = xvals, y = yvals, group = level)) +
geom_line(aes(color = level))
}
# x is continuous
cyl.plr <- polr(factor(cyl) ~ wt, data = mtcars)
cplot(cyl.plr, col = c("red", "purple", "blue"), what = "stacked")
cplot(cyl.plr, what = "class")
}
}
}
\seealso{
\code{\link{plot.margins}}, \code{\link{persp.lm}}
}
\keyword{graphics}
|
/man/cplot.Rd
|
no_license
|
rungec/margins
|
R
| false | true | 14,984 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cplot.R, R/cplot_glm.R, R/cplot_loess.R,
% R/cplot_polr.R
\name{cplot}
\alias{cplot}
\alias{cplot.lm}
\alias{cplot.glm}
\alias{cplot.loess}
\alias{cplot.polr}
\alias{cplot.multinom}
\title{Conditional predicted value and average marginal effect plots for models}
\usage{
cplot(object, ...)
\method{cplot}{lm}(object, x = attributes(terms(object))[["term.labels"]][1L],
dx = x, what = c("prediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{glm}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "effect"), data = prediction::find_data(object),
type = c("response", "link"), vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{loess}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "effect"), data = prediction::find_data(object),
type = c("response", "link"), vcov = stats::vcov(object), at, n = 25L,
xvals = prediction::seq_range(data[[x]], n = n), level = 0.95,
draw = TRUE, xlab = x, ylab = if (match.arg(what) == "prediction")
paste0("Predicted value") else paste0("Marginal effect of ", dx),
xlim = NULL, ylim = NULL, lwd = 1L, col = "black", lty = 1L,
se.type = c("shade", "lines", "none"), se.col = "black",
se.fill = grDevices::gray(0.5, 0.5), se.lwd = lwd, se.lty = if
(match.arg(se.type) == "lines") 1L else 0L, factor.lty = 0L,
factor.pch = 19L, factor.col = se.col, factor.fill = factor.col,
factor.cex = 1L, xaxs = "i", yaxs = xaxs, las = 1L, scatter = FALSE,
scatter.pch = 19L, scatter.col = se.col, scatter.bg = scatter.col,
scatter.cex = 0.5, rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{polr}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "classprediction", "stackedprediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L, xvals = seq_range(data[[x]], n
= n), level = 0.95, draw = TRUE, xlab = x, ylab = if (match.arg(what)
== "effect") paste0("Marginal effect of ", dx) else paste0("Predicted value"),
xlim = NULL, ylim = if (match.arg(what) \%in\% c("prediction",
"stackedprediction")) c(0, 1.04) else NULL, lwd = 1L, col = "black",
lty = 1L, factor.lty = 1L, factor.pch = 19L, factor.col = col,
factor.fill = factor.col, factor.cex = 1L, xaxs = "i", yaxs = xaxs,
las = 1L, scatter = FALSE, scatter.pch = 19L,
scatter.col = factor.col, scatter.bg = scatter.col, scatter.cex = 0.5,
rug = TRUE, rug.col = col, rug.size = -0.02, ...)
\method{cplot}{multinom}(object,
x = attributes(terms(object))[["term.labels"]][1L], dx = x,
what = c("prediction", "classprediction", "stackedprediction", "effect"),
data = prediction::find_data(object), type = c("response", "link"),
vcov = stats::vcov(object), at, n = 25L, xvals = seq_range(data[[x]], n
= n), level = 0.95, draw = TRUE, xlab = x, ylab = if (match.arg(what)
== "effect") paste0("Marginal effect of ", dx) else paste0("Predicted value"),
xlim = NULL, ylim = if (match.arg(what) \%in\% c("prediction",
"stackedprediction")) c(0, 1.04) else NULL, lwd = 1L, col = "black",
lty = 1L, factor.lty = 1L, factor.pch = 19L, factor.col = col,
factor.fill = factor.col, factor.cex = 1L, xaxs = "i", yaxs = xaxs,
las = 1L, scatter = FALSE, scatter.pch = 19L,
scatter.col = factor.col, scatter.bg = scatter.col, scatter.cex = 0.5,
rug = TRUE, rug.col = col, rug.size = -0.02, ...)
}
\arguments{
\item{object}{A model object.}
\item{\dots}{Additional arguments passed to \code{\link[graphics]{plot}}.}
\item{x}{A character string specifying the name of variable to use as the x-axis dimension in the plot.}
\item{dx}{If \code{what = "effect"}, the variable whose conditional marginal effect should be displayed. By default it is \code{x} (so the plot displays the marginal effect of \code{x} across values of \code{x}); ignored otherwise. If \code{dx} is a factor with more than 2 levels, an error will be issued.}
\item{what}{A character string specifying whether to draw a \dQuote{prediction} (fitted values from the model, calculated using \code{\link[stats]{predict}}) or an \dQuote{effect} (average marginal effect of \code{dx} conditional on \code{x}, using \code{\link{margins}}). Methods for classes other than \dQuote{lm} or \dQuote{glm} may provided additional options (e.g., \code{cplot.polr()} provides \dQuote{stackedprediction} and \dQuote{class} alternatives).}
\item{data}{A data frame to override the default value offered in \code{object[["model"]]}.}
\item{type}{A character string specifying whether to calculate predictions on the response scale (default) or link (only relevant for non-linear models).}
\item{vcov}{A matrix containing the variance-covariance matrix for estimated model coefficients, or a function to perform the estimation with \code{model} as its only argument.}
\item{at}{Currently ignored.}
\item{n}{An integer specifying the number of points across \code{x} at which to calculate the predicted value or marginal effect, when \code{x} is numeric. Ignored otherwise.}
\item{xvals}{A numeric vector of values at which to calculate predictions or marginal effects, if \code{x} is numeric. By default, it is calculated from the data using \code{\link{seq_range}}. If \code{x} is a factor, this is ignored, as is \code{n}.}
\item{level}{The confidence level required (used to draw uncertainty bounds).}
\item{draw}{A logical (default \code{TRUE}), specifying whether to draw the plot. If \code{FALSE}, the data used in drawing are returned as a list of data.frames. This might be useful if you want to plot using an alternative plotting package (e.g., ggplot2). Also, if set to value \dQuote{add}, then the resulting data is added to the existing plot.}
\item{xlab}{A character string specifying the value of \code{xlab} in \code{\link[graphics]{plot}}.}
\item{ylab}{A character string specifying the value of \code{ylab} in \code{\link[graphics]{plot}}.}
\item{xlim}{A two-element numeric vector specifying the x-axis limits. Set automatically if missing.}
\item{ylim}{A two-element numeric vector specifying the y-axis limits. Set automatically if missing.}
\item{lwd}{An integer specifying the width of the prediction or marginal effect line. See \code{\link[graphics]{lines}}. If \code{x} is a factor variable in the model, this is used to set the line width of the error bars.}
\item{col}{A character string specifying the color of the prediction or marginal effect line. If \code{x} is a factor variable in the model, this is used to set the color of the error bars.}
\item{lty}{An integer specifying the \dQuote{line type} of the prediction or marginal effect line. See \code{\link[graphics]{par}}. If \code{x} is a factor variable in the model, this is used to set the line type of the error bars.}
\item{se.type}{A character string specifying whether to draw the confidence interval as \dQuote{lines} (the default, using \code{\link[graphics]{lines}}) or a \dQuote{shade} (using \code{\link[graphics]{polygon}}).}
\item{se.col}{If \code{se.type = "lines"}, a character string specifying the color of the confidence interval lines. If \code{se.type = "shade"}, the color of the shaded region border.}
\item{se.fill}{If \code{se.type = "shade"}, the color of the shaded region. Ignored otherwise.}
\item{se.lwd}{If \code{se.type = "lines"}, the width of the confidence interval lines. See \code{\link[graphics]{lines}}.}
\item{se.lty}{If \code{se.type = "lines"}, an integer specifying the \dQuote{line type} of the confidence interval lines; if \code{se.type = "shade"}, the line type of the shaded polygon border. See \code{\link[graphics]{par}}.}
\item{factor.lty}{If \code{x} is a factor variable in the model, this is used to set the line type of an optional line connecting predictions across factor levels. If \code{factor.lty = 0L} (the default), no line is drawn.. See \code{\link[graphics]{par}}.}
\item{factor.pch}{If \code{x} is a factor variable in the model, the shape to use when drawing points. See \code{\link[graphics]{points}}.}
\item{factor.col}{If \code{x} is a factor variable in the model, the color to use for the border of the points. See \code{\link[graphics]{points}}.}
\item{factor.fill}{If \code{x} is a factor variable in the model, the color to use for the fill of the points. See \code{\link[graphics]{points}}.}
\item{factor.cex}{If \code{x} is a factor variable in the model, the \dQuote{expansion factor} to use for the point size. See \code{\link[graphics]{points}}.}
\item{xaxs}{A character string specifying \code{xaxs}. See \code{\link[graphics]{par}}.}
\item{yaxs}{A character string specifying \code{xaxs}. See \code{\link[graphics]{par}}.}
\item{las}{An integer string specifying \code{las}. See \code{\link[graphics]{par}}.}
\item{scatter}{A logical indicating whether to plot the observed data in \code{data} as a scatterplot.}
\item{scatter.pch}{If \code{scatter = TRUE}, an integer specifying a shape to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.col}{If \code{scatter = TRUE}, a character string specifying a color to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.bg}{If \code{scatter = TRUE}, a character string specifying a color to use for plotting the data. See \code{\link[graphics]{points}}.}
\item{scatter.cex}{If \code{scatter = TRUE}, an integer specifying the size of the points. See \code{\link[graphics]{points}}.}
\item{rug}{A logical specifying whether to include an x-axis \dQuote{rug} (see \code{\link[graphics]{rug}}).}
\item{rug.col}{A character string specifying \code{col} to \code{\link[graphics]{rug}}.}
\item{rug.size}{A numeric value specifying \code{ticksize} to \code{\link[graphics]{rug}}.}
}
\value{
A tidy data frame containing the data used to draw the plot. Use \code{draw = FALSE} to simply generate the data structure for use elsewhere.
}
\description{
Draw one or more conditioanl effects plots reflecting predictions or marginal effects from a model, conditional on a covariate. Currently methods exist for \dQuote{lm}, \dQuote{glm}, \dQuote{loess} class models.
}
\details{
Note that when \code{what = "prediction"}, the plots show predictions holding values of the data at their mean or mode, whereas when \code{what = "effect"} average marginal effects (i.e., at observed values) are shown.
The overall aesthetic is somewhat similar to to the output produced by the \code{marginalModelPlot()} function in the \bold{\href{https://cran.r-project.org/package=car}{car}} package.
}
\examples{
\dontrun{
require('datasets')
# prediction from several angles
m <- lm(Sepal.Length ~ Sepal.Width, data = iris)
cplot(m)
# more complex model
m <- lm(Sepal.Length ~ Sepal.Width * Petal.Width * I(Petal.Width ^ 2),
data = head(iris, 50))
## marginal effect of 'Petal.Width' across 'Petal.Width'
cplot(m, x = "Petal.Width", what = "effect", n = 10)
# factor independent variables
mtcars[["am"]] <- factor(mtcars[["am"]])
m <- lm(mpg ~ am * wt, data = mtcars)
## predicted values for each factor level
cplot(m, x = "am")
## marginal effect of each factor level across numeric variable
cplot(m, x = "wt", dx = "am", what = "effect")
# marginal effect of 'Petal.Width' across 'Sepal.Width'
## without drawing the plot
## this might be useful for using, e.g., ggplot2 for plotting
tmp <- cplot(m, x = "Sepal.Width", dx = "Petal.Width",
what = "effect", n = 10, draw = FALSE)
if (require("ggplot2")) {
# use ggplot2 instead of base graphics
ggplot(tmp, aes(x = Petal.Width, y = "effect")) +
geom_line(lwd = 2) +
geom_line(aes(y = effect + 1.96*se.effect)) +
geom_line(aes(y = effect - 1.96*se.effect))
}
# a non-linear model
m <- glm(am ~ wt*drat, data = mtcars, family = binomial)
cplot(m, x = "wt") # prediction
# effects on linear predictor and outcome
cplot(m, x = "drat", dx = "wt", what = "effect", type = "link")
cplot(m, x = "drat", dx = "wt", what = "effect", type = "response")
# plot conditional predictions across a third factor
local({
iris$long <- rbinom(nrow(iris), 1, 0.6)
x <- glm(long ~ Sepal.Width*Species, data = iris)
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "setosa", ],
ylim = c(0,1), col = "red", se.fill = rgb(1,0,0,.5), xlim = c(2,4.5))
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "versicolor", ],
draw = "add", col = "blue", se.fill = rgb(0,1,0,.5))
cplot(x, x = "Sepal.Width", data = iris[iris$Species == "virginica", ],
draw = "add", col = "green", se.fill = rgb(0,0,1,.5))
})
# ordinal outcome
if (require("MASS")) {
# x is a factor variable
house.plr <- polr(Sat ~ Infl + Type + Cont, weights = Freq,
data = housing)
## predicted probabilities
cplot(house.plr)
## cumulative predicted probabilities
cplot(house.plr, what = "stacked")
## ggplot2 example
if (require("ggplot2")) {
ggplot(cplot(house.plr), aes(x = xvals, y = yvals, group = level)) +
geom_line(aes(color = level))
}
# x is continuous
cyl.plr <- polr(factor(cyl) ~ wt, data = mtcars)
cplot(cyl.plr, col = c("red", "purple", "blue"), what = "stacked")
cplot(cyl.plr, what = "class")
}
}
}
\seealso{
\code{\link{plot.margins}}, \code{\link{persp.lm}}
}
\keyword{graphics}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gnlmm.R
\name{prediction}
\alias{prediction}
\title{Prediction after a gnlmm fit}
\usage{
prediction(fit, pred, data = NULL, mc.cores = 1)
}
\arguments{
\item{fit}{a dynmodel fit object}
\item{pred}{prediction function}
\item{data}{new data}
\item{mc.cores}{number of cores (for Linux only)}
}
\value{
observed and predicted
}
\description{
Generate predictions after a generalized non-linear mixed effect model fit
}
\examples{
\dontrun{
ode <- "
d/dt(depot) =-KA*depot;
d/dt(centr) = KA*depot - KE*centr;
"
sys1 = RxODE(ode)
pars <- function()
{
CL = exp(THETA[1] + ETA[1])#; if (CL>100) CL=100
KA = exp(THETA[2] + ETA[2])#; if (KA>20) KA=20
KE = exp(THETA[3])
V = CL/KE
sig2 = exp(THETA[4])
}
llik <- function() {
pred = centr/V
dnorm(DV, pred, sd=sqrt(sig2), log=TRUE)
}
inits = list(THTA=c(-3.22, 0.47, -2.45, 0))
inits$OMGA=list(ETA[1]~.027, ETA[2]~.37)
theo <- read.table("theo_md.txt", head=TRUE)
fit = gnlmm(llik, theo, inits, pars, sys1,
control=list(trace=TRUE, nAQD=5))
pred = function() {
pred = centr/V
}
s = prediction(fit, pred)
plot(s$p, s$dv); abline(0,1,col="red")
}
}
|
/man/prediction.Rd
|
no_license
|
mattfidler/nlmixr
|
R
| false | true | 1,185 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gnlmm.R
\name{prediction}
\alias{prediction}
\title{Prediction after a gnlmm fit}
\usage{
prediction(fit, pred, data = NULL, mc.cores = 1)
}
\arguments{
\item{fit}{a dynmodel fit object}
\item{pred}{prediction function}
\item{data}{new data}
\item{mc.cores}{number of cores (for Linux only)}
}
\value{
observed and predicted
}
\description{
Generate predictions after a generalized non-linear mixed effect model fit
}
\examples{
\dontrun{
ode <- "
d/dt(depot) =-KA*depot;
d/dt(centr) = KA*depot - KE*centr;
"
sys1 = RxODE(ode)
pars <- function()
{
CL = exp(THETA[1] + ETA[1])#; if (CL>100) CL=100
KA = exp(THETA[2] + ETA[2])#; if (KA>20) KA=20
KE = exp(THETA[3])
V = CL/KE
sig2 = exp(THETA[4])
}
llik <- function() {
pred = centr/V
dnorm(DV, pred, sd=sqrt(sig2), log=TRUE)
}
inits = list(THTA=c(-3.22, 0.47, -2.45, 0))
inits$OMGA=list(ETA[1]~.027, ETA[2]~.37)
theo <- read.table("theo_md.txt", head=TRUE)
fit = gnlmm(llik, theo, inits, pars, sys1,
control=list(trace=TRUE, nAQD=5))
pred = function() {
pred = centr/V
}
s = prediction(fit, pred)
plot(s$p, s$dv); abline(0,1,col="red")
}
}
|
#' @importFrom ggplot2 ggplot_build
#' @export
# ggplot_build.ggpsychro {{{
ggplot_build.ggpsychro <- function (plot) {
# get rid of R CMD check NOTE
x <- y <- relhum <- label <- wetbulb <- vappres <- specvol <- enthalpy <- NULL
# retrieve meta data
meta <- plot$psychro
# get all layers
layers <- plot$layers
lapply(layers, function (l) l$aes_params)
# move mastarea and sat line to the last
layers <- cover_mask(layers)
# get all layer grid types
type <- get_geom_types(layers)
# get current scale list
scales <- plot$scales
# get coord
coord <- plot$coordinates
# add dry-bulb and hum-ratio scales if not exist
scales <- add_default_scales(coord, scales, meta$units)
# do nothing if there is no grid layer
if (all(is.na(type))) return(NextMethod())
for (i in seq_along(type)) {
aes <- type[[i]]
# skip if not a valid layer
if (is.na(aes)) next
if (aes %in% c("maskarea", "linesat")) {
# create init data for this layer
layers[[i]]$data <- compute_mask_data(layers[[i]], coord, scales$get_scales("x"))
layers[[i]]$mapping <- aes(x = x, y = y, relhum = relhum)
} else {
aes <- gsub("grid_", "", aes, fixed = TRUE)
# if there is already a scale, directly use its breaks
if (scales$has_scale(aes)) {
sc_grid <- scales$get_scales(aes)
new_grid <- FALSE
# create a new scale for this variable
} else {
sc_grid <- get_scale_by_aes(aes)(units = plot$psychro$units)
new_grid <- TRUE
scales$add(sc_grid)
}
# need to calculte limits to get breaks
if (!length(sc_grid$get_breaks())) {
ranges <- coord$limits
x_range <- scales$get_scales("x")$transform(ranges$x)
y_range <- scales$get_scales("y")$transform(ranges$y)
if (aes == "wetbulb") {
# calculate grid scale limits
v_range <- range(with_units(meta$units,
GetTWetBulbFromHumRatio(x_range, y_range, meta$pressure)))
# get the break step of drybulb
xstep <- diff(scales$get_scales("x")$get_breaks_minor())[[1L]]
# calculate min wetbulb with whole x steps away
v_range[[1L]] <- x_range[[1L]] - ceiling((x_range[[1L]] - v_range[[1L]]) / xstep) * xstep
v_range <- c(v_range[[1L]], x_range[[2L]])
} else if (aes == "vappres"){
# calculate vapor pressure range at the y axis range
v_range <- with_units(meta$units, GetVapPresFromHumRatio(y_range, meta$pressure))
v_range <- round(v_range)
} else if (aes == "specvol") {
# calculate spec vol range at the x and y axis range
v_range <- with_units(meta$units, GetMoistAirVolume(x_range, y_range, meta$pressure))
} else if (aes == "enthalpy") {
v_range <- with_units(meta$units, GetMoistAirEnthalpy(x_range, y_range))
v_range <- round(v_range)
}
# keep the original in case it is user defined
if (new_grid) sc_grid <- sc_grid$clone()
# train grid scale in order to get breaks
sc_grid$train(sc_grid$transform(v_range))
}
# recreate grid layer input table
data <- compute_grid_data(layers[[i]], coord, scales$get_scales("x"), sc_grid, keep_bounds = !new_grid)
# for wetbulb, drybulb should be greater than wetbulb
if (aes == "wetbulb") data <- data[data$x >= data$wetbulb, ]
# add mappings
layers[[i]]$data <- data
layers[[i]]$mapping <- switch(aes,
relhum = aes(x = x, y = y, relhum = relhum, label = label),
wetbulb = aes(x = x, y = y, wetbulb = wetbulb, label = label),
vappres = aes(x = x, y = y, vappres = vappres, label = label),
specvol = aes(x = x, y = y, specvol = specvol, label = label),
enthalpy = aes(x = x, y = y, enthalpy = enthalpy, label = label)
)
}
}
# assign back
plot$scales <- scales
plot$layers <- layers
NextMethod()
}
# }}}
# add_default_scales {{{
add_default_scales <- function (coord, scales, units) {
if (!scales$has_scale("x")) {
scales$add(scale_drybulb_continuous(units = units))
scales$get_scales("x")$train(coord$limits$x)
}
if (!scales$has_scale("y")) {
scales$add(scale_humratio_continuous(units = units))
scales$get_scales("y")$train(coord$limits$y)
}
scales
}
# }}}
# get_trans_by_aes {{{
get_trans_by_aes <- function (aes) {
get(paste0(aes, "_trans"))
}
# }}}
# get_scale_by_aes {{{
get_scale_by_aes <- function (aes) {
get(paste0("scale_", aes))
}
# }}}
# abort_unit_waiver {{{
abort_unit_waiver <- function (object_name) {
stop(sprintf("%s: 'units' cannot be 'waiver()' when adding to a non-ggpsychro plot.",
gsub("()", "", object_name, fixed = TRUE)),
call. = FALSE
)
}
# }}}
# compute_mask_data {{{
compute_mask_data <- function (layer, coord, scale_drybulb) {
# get drybulb limits in the transformed range
lim_drybulb <- scale_drybulb$transform(coord$limits$x)
# get x based on axis x limits
x <- seq(lim_drybulb[[1L]], lim_drybulb[[2L]], length.out = layer$aes_params$n)
new_data_frame(list(x = x, y = rep(0.0, length(x)), relhum = rep(1.0, length(x))))
}
# }}}
# compute_grid_data {{{
compute_grid_data <- function (layer, coord, scale_drybulb, scale_grid, keep_bounds) {
# get drybulb limits in the transformed range
lim_drybulb <- scale_drybulb$transform(coord$limits$x)
# get major breaks
breaks <- stats::na.omit(scale_grid$get_breaks())
if (is.null(breaks) || !length(breaks)) {
empty <- new_data_frame(list(value = numeric(), x = numeric(), y = numeric(), label = character()))
names(empty)[1L] <- scale_grid$aesthetics
return(empty)
}
# get minor breaks
breaks_minor <- scale_grid$get_breaks_minor(2)
if (!length(breaks_minor)) breaks_minor <- breaks
# make sure labels and breaks have the same length
labels <- character(length(breaks_minor))
# exclude the bounds
idx <- breaks > min(breaks)
labs <- scale_grid$get_labels(breaks[idx])
if (!is.null(labs)) labels[breaks_minor %in% breaks[idx]] <- labs
# combine in a data frame
data <- new_data_frame(list(value = breaks_minor, label = labels))
names(data)[1L] <- scale_grid$aesthetics
data <- rep_dataframe(data, layer$aes_params$n)
# get x based on axis x limits
x <- seq(lim_drybulb[[1L]], lim_drybulb[[2L]], length.out = layer$aes_params$n)
data$x <- rep(x, each = length(breaks_minor))
data$y <- 0
if (scale_grid$scale_name == "wetbulb") {
# make sure there is a point on the saturation line
data <- do.call(rbind, lapply(split(data, data$wetbulb), function (d) {
if (any(d$x == min(d$wetbulb))) return(d)
sat <- d[1L, ]
sat$x <- min(d$wetbulb)
rbind(sat, d)
}))
}
data
}
# }}}
|
/R/plot-build.R
|
permissive
|
hongyuanjia/ggpsychro
|
R
| false | false | 7,457 |
r
|
#' @importFrom ggplot2 ggplot_build
#' @export
# ggplot_build.ggpsychro {{{
ggplot_build.ggpsychro <- function (plot) {
# get rid of R CMD check NOTE
x <- y <- relhum <- label <- wetbulb <- vappres <- specvol <- enthalpy <- NULL
# retrieve meta data
meta <- plot$psychro
# get all layers
layers <- plot$layers
lapply(layers, function (l) l$aes_params)
# move mastarea and sat line to the last
layers <- cover_mask(layers)
# get all layer grid types
type <- get_geom_types(layers)
# get current scale list
scales <- plot$scales
# get coord
coord <- plot$coordinates
# add dry-bulb and hum-ratio scales if not exist
scales <- add_default_scales(coord, scales, meta$units)
# do nothing if there is no grid layer
if (all(is.na(type))) return(NextMethod())
for (i in seq_along(type)) {
aes <- type[[i]]
# skip if not a valid layer
if (is.na(aes)) next
if (aes %in% c("maskarea", "linesat")) {
# create init data for this layer
layers[[i]]$data <- compute_mask_data(layers[[i]], coord, scales$get_scales("x"))
layers[[i]]$mapping <- aes(x = x, y = y, relhum = relhum)
} else {
aes <- gsub("grid_", "", aes, fixed = TRUE)
# if there is already a scale, directly use its breaks
if (scales$has_scale(aes)) {
sc_grid <- scales$get_scales(aes)
new_grid <- FALSE
# create a new scale for this variable
} else {
sc_grid <- get_scale_by_aes(aes)(units = plot$psychro$units)
new_grid <- TRUE
scales$add(sc_grid)
}
# need to calculte limits to get breaks
if (!length(sc_grid$get_breaks())) {
ranges <- coord$limits
x_range <- scales$get_scales("x")$transform(ranges$x)
y_range <- scales$get_scales("y")$transform(ranges$y)
if (aes == "wetbulb") {
# calculate grid scale limits
v_range <- range(with_units(meta$units,
GetTWetBulbFromHumRatio(x_range, y_range, meta$pressure)))
# get the break step of drybulb
xstep <- diff(scales$get_scales("x")$get_breaks_minor())[[1L]]
# calculate min wetbulb with whole x steps away
v_range[[1L]] <- x_range[[1L]] - ceiling((x_range[[1L]] - v_range[[1L]]) / xstep) * xstep
v_range <- c(v_range[[1L]], x_range[[2L]])
} else if (aes == "vappres"){
# calculate vapor pressure range at the y axis range
v_range <- with_units(meta$units, GetVapPresFromHumRatio(y_range, meta$pressure))
v_range <- round(v_range)
} else if (aes == "specvol") {
# calculate spec vol range at the x and y axis range
v_range <- with_units(meta$units, GetMoistAirVolume(x_range, y_range, meta$pressure))
} else if (aes == "enthalpy") {
v_range <- with_units(meta$units, GetMoistAirEnthalpy(x_range, y_range))
v_range <- round(v_range)
}
# keep the original in case it is user defined
if (new_grid) sc_grid <- sc_grid$clone()
# train grid scale in order to get breaks
sc_grid$train(sc_grid$transform(v_range))
}
# recreate grid layer input table
data <- compute_grid_data(layers[[i]], coord, scales$get_scales("x"), sc_grid, keep_bounds = !new_grid)
# for wetbulb, drybulb should be greater than wetbulb
if (aes == "wetbulb") data <- data[data$x >= data$wetbulb, ]
# add mappings
layers[[i]]$data <- data
layers[[i]]$mapping <- switch(aes,
relhum = aes(x = x, y = y, relhum = relhum, label = label),
wetbulb = aes(x = x, y = y, wetbulb = wetbulb, label = label),
vappres = aes(x = x, y = y, vappres = vappres, label = label),
specvol = aes(x = x, y = y, specvol = specvol, label = label),
enthalpy = aes(x = x, y = y, enthalpy = enthalpy, label = label)
)
}
}
# assign back
plot$scales <- scales
plot$layers <- layers
NextMethod()
}
# }}}
# add_default_scales {{{
add_default_scales <- function (coord, scales, units) {
if (!scales$has_scale("x")) {
scales$add(scale_drybulb_continuous(units = units))
scales$get_scales("x")$train(coord$limits$x)
}
if (!scales$has_scale("y")) {
scales$add(scale_humratio_continuous(units = units))
scales$get_scales("y")$train(coord$limits$y)
}
scales
}
# }}}
# get_trans_by_aes {{{
get_trans_by_aes <- function (aes) {
get(paste0(aes, "_trans"))
}
# }}}
# get_scale_by_aes {{{
get_scale_by_aes <- function (aes) {
get(paste0("scale_", aes))
}
# }}}
# abort_unit_waiver {{{
abort_unit_waiver <- function (object_name) {
stop(sprintf("%s: 'units' cannot be 'waiver()' when adding to a non-ggpsychro plot.",
gsub("()", "", object_name, fixed = TRUE)),
call. = FALSE
)
}
# }}}
# compute_mask_data {{{
compute_mask_data <- function (layer, coord, scale_drybulb) {
# get drybulb limits in the transformed range
lim_drybulb <- scale_drybulb$transform(coord$limits$x)
# get x based on axis x limits
x <- seq(lim_drybulb[[1L]], lim_drybulb[[2L]], length.out = layer$aes_params$n)
new_data_frame(list(x = x, y = rep(0.0, length(x)), relhum = rep(1.0, length(x))))
}
# }}}
# compute_grid_data {{{
compute_grid_data <- function (layer, coord, scale_drybulb, scale_grid, keep_bounds) {
# get drybulb limits in the transformed range
lim_drybulb <- scale_drybulb$transform(coord$limits$x)
# get major breaks
breaks <- stats::na.omit(scale_grid$get_breaks())
if (is.null(breaks) || !length(breaks)) {
empty <- new_data_frame(list(value = numeric(), x = numeric(), y = numeric(), label = character()))
names(empty)[1L] <- scale_grid$aesthetics
return(empty)
}
# get minor breaks
breaks_minor <- scale_grid$get_breaks_minor(2)
if (!length(breaks_minor)) breaks_minor <- breaks
# make sure labels and breaks have the same length
labels <- character(length(breaks_minor))
# exclude the bounds
idx <- breaks > min(breaks)
labs <- scale_grid$get_labels(breaks[idx])
if (!is.null(labs)) labels[breaks_minor %in% breaks[idx]] <- labs
# combine in a data frame
data <- new_data_frame(list(value = breaks_minor, label = labels))
names(data)[1L] <- scale_grid$aesthetics
data <- rep_dataframe(data, layer$aes_params$n)
# get x based on axis x limits
x <- seq(lim_drybulb[[1L]], lim_drybulb[[2L]], length.out = layer$aes_params$n)
data$x <- rep(x, each = length(breaks_minor))
data$y <- 0
if (scale_grid$scale_name == "wetbulb") {
# make sure there is a point on the saturation line
data <- do.call(rbind, lapply(split(data, data$wetbulb), function (d) {
if (any(d$x == min(d$wetbulb))) return(d)
sat <- d[1L, ]
sat$x <- min(d$wetbulb)
rbind(sat, d)
}))
}
data
}
# }}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_image_slider}
\alias{wbt_image_slider}
\title{Image slider}
\usage{
wbt_image_slider(
input1,
input2,
output,
palette1 = "grey",
reverse1 = FALSE,
label1 = "",
palette2 = "grey",
reverse2 = FALSE,
label2 = "",
height = 600,
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{input1}{Name of the left input image file.}
\item{input2}{Name of the right input image file.}
\item{output}{Name of the output HTML file (*.html).}
\item{palette1}{Left image palette; options are 'grey', 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridi', 'gn_yl', 'pi_y_g', 'bl_yl_rd', 'deep', and 'rgb'.}
\item{reverse1}{Reverse left image palette?.}
\item{label1}{Left image label (leave blank for none).}
\item{palette2}{Right image palette; options are 'grey', 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridi', 'gn_yl', 'pi_y_g', 'bl_yl_rd', 'deep', and 'rgb'.}
\item{reverse2}{Reverse right image palette?.}
\item{label2}{Right image label (leave blank for none).}
\item{height}{Image height, in pixels.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
This tool creates an image slider from two input images.
}
|
/man/wbt_image_slider.Rd
|
permissive
|
dondealban/whiteboxR
|
R
| false | true | 1,578 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_image_slider}
\alias{wbt_image_slider}
\title{Image slider}
\usage{
wbt_image_slider(
input1,
input2,
output,
palette1 = "grey",
reverse1 = FALSE,
label1 = "",
palette2 = "grey",
reverse2 = FALSE,
label2 = "",
height = 600,
wd = NULL,
verbose_mode = FALSE,
compress_rasters = FALSE
)
}
\arguments{
\item{input1}{Name of the left input image file.}
\item{input2}{Name of the right input image file.}
\item{output}{Name of the output HTML file (*.html).}
\item{palette1}{Left image palette; options are 'grey', 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridi', 'gn_yl', 'pi_y_g', 'bl_yl_rd', 'deep', and 'rgb'.}
\item{reverse1}{Reverse left image palette?.}
\item{label1}{Left image label (leave blank for none).}
\item{palette2}{Right image palette; options are 'grey', 'atlas', 'high_relief', 'arid', 'soft', 'muted', 'purple', 'viridi', 'gn_yl', 'pi_y_g', 'bl_yl_rd', 'deep', and 'rgb'.}
\item{reverse2}{Reverse right image palette?.}
\item{label2}{Right image label (leave blank for none).}
\item{height}{Image height, in pixels.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
\item{compress_rasters}{Sets the flag used by WhiteboxTools to determine whether to use compression for output rasters.}
}
\value{
Returns the tool text outputs.
}
\description{
This tool creates an image slider from two input images.
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/graphModels.R
\name{connectNodes}
\alias{connectNodes}
\title{Connect two nodes}
\usage{
connectNodes(dotModel, node1, node2, connectionType)
}
\arguments{
\item{dotModel}{The basic model}
\item{node1}{The starting node}
\item{node2}{The ending node}
\item{connectionType}{The type of connection to add between nodes 1 and 2}
}
\description{
To do: add details
}
\keyword{internal}
|
/man/connectNodes.Rd
|
no_license
|
clbustos/MplusAutomation
|
R
| false | false | 494 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/graphModels.R
\name{connectNodes}
\alias{connectNodes}
\title{Connect two nodes}
\usage{
connectNodes(dotModel, node1, node2, connectionType)
}
\arguments{
\item{dotModel}{The basic model}
\item{node1}{The starting node}
\item{node2}{The ending node}
\item{connectionType}{The type of connection to add between nodes 1 and 2}
}
\description{
To do: add details
}
\keyword{internal}
|
library(multicon)
### Name: partwhole
### Title: Part-Whole Correlation
### Aliases: partwhole
### Keywords: part-whole correlation composite
### ** Examples
data(bfi.set)
# Imagine we want to find the best two-item composite that correlates
# highest with the full 8 items available to measure extraversion.
# Three (of the extraversion) items need to be reverse scored
sBFI6r <- 6 - bfi.set$sBFI6
sBFI21r <- 6 - bfi.set$sBFI21
sBFI31r <- 6 - bfi.set$sBFI31
# Now put them all into one data.frame
ext.vars <- data.frame(bfi.set$sBFI1, sBFI6r, bfi.set$sBFI11,
bfi.set$sBFI16, sBFI21r, bfi.set$sBFI26, sBFI31r, bfi.set$sBFI36)
head(ext.vars) # Looks good
# Now compute the parwhole correlation for all possible 2-item composites
partwhole(ext.vars, 2)
|
/data/genthat_extracted_code/multicon/examples/partwhole.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 771 |
r
|
library(multicon)
### Name: partwhole
### Title: Part-Whole Correlation
### Aliases: partwhole
### Keywords: part-whole correlation composite
### ** Examples
data(bfi.set)
# Imagine we want to find the best two-item composite that correlates
# highest with the full 8 items available to measure extraversion.
# Three (of the extraversion) items need to be reverse scored
sBFI6r <- 6 - bfi.set$sBFI6
sBFI21r <- 6 - bfi.set$sBFI21
sBFI31r <- 6 - bfi.set$sBFI31
# Now put them all into one data.frame
ext.vars <- data.frame(bfi.set$sBFI1, sBFI6r, bfi.set$sBFI11,
bfi.set$sBFI16, sBFI21r, bfi.set$sBFI26, sBFI31r, bfi.set$sBFI36)
head(ext.vars) # Looks good
# Now compute the parwhole correlation for all possible 2-item composites
partwhole(ext.vars, 2)
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 3.81959243159313e-313, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 7.74603141657821e-304, SL95 = 1.1955509896925e+30, nage = 771751936L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829785-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 388 |
r
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 3.81959243159313e-313, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 7.74603141657821e-304, SL95 = 1.1955509896925e+30, nage = 771751936L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:7, WORD_NUM:94">
</head>
<body bgcolor="white">
<a href="#0" id="0">Trushin.</a>
<a href="#1" id="1">Resign!"</a>
<a href="#2" id="2">; If they had been only personal enemies, Gorbachev might have been undone by them.</a>
<a href="#3" id="3">"; He called the crisis "one of the most difficult trials of the entire history" of his six-year rule.</a>
<a href="#4" id="4">; Despite the force brought to bear by the coup leaders, they were unable to dislodge Yeltsin from the Russian parliament, where he rallied the opposition, to enforce the curfew, or prevent large demonstrations nationwide.</a>
<a href="#5" id="5">"Let me sign a decree suspending the activity of the Russian Communist Party.</a>
<a href="#6" id="6">"I said I wouldn't make any deals," he said then.</a>
</body>
</html>
|
/DUC-Dataset/Summary_m100_R/D101.M.100.html.R
|
no_license
|
Angela7126/SLNSumEval
|
R
| false | false | 843 |
r
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:7, WORD_NUM:94">
</head>
<body bgcolor="white">
<a href="#0" id="0">Trushin.</a>
<a href="#1" id="1">Resign!"</a>
<a href="#2" id="2">; If they had been only personal enemies, Gorbachev might have been undone by them.</a>
<a href="#3" id="3">"; He called the crisis "one of the most difficult trials of the entire history" of his six-year rule.</a>
<a href="#4" id="4">; Despite the force brought to bear by the coup leaders, they were unable to dislodge Yeltsin from the Russian parliament, where he rallied the opposition, to enforce the curfew, or prevent large demonstrations nationwide.</a>
<a href="#5" id="5">"Let me sign a decree suspending the activity of the Russian Communist Party.</a>
<a href="#6" id="6">"I said I wouldn't make any deals," he said then.</a>
</body>
</html>
|
raw_apex_df <- gsheet2tbl("https://docs.google.com/spreadsheets/d/1v40AgpaoRrA3v5eEjztB5Rw_OGKRHw3L56hhJdzdqAs/edit#gid=13184181")
# initial cleaning: get rid of rows with all NAs, clean up survival time, etc.
apex_df <- filter(raw_apex_df, rowSums(is.na(raw_apex_df)) != ncol(raw_apex_df)) %>%
mutate(Timestamp_raw = parse_date_time(Timestamp, orders = "mdy HMS", tz = "EST"),
Timestamp = as.character(Timestamp_raw),
survival_time_dt = strptime(`Survival Time`, format = "%M:%S"),
`Survival Time (min)` = round(minute(survival_time_dt) + second(survival_time_dt) / 60, digits = 2)) %>%
select(-c(X9, X10, survival_time_dt, `Survival Time`)) # %>%
# arrange(desc(Timestamp))
# summary stats df
summary_stats_df <- apex_df %>%
group_by(Player) %>%
summarize(`Num Games Played` = n(),
`Num Wins` = sum(ifelse(`Squad Placed` == 1, TRUE, FALSE)),
`Total Damage` = sum(Damage),
`Total Kills` = sum(Kills),
`Total Assists` = sum(Assists),
`Total Knocks` = sum(Knocks),
`Total Survival Time` = sum(`Survival Time (min)`),
`KDR` = round(`Total Kills` / `Num Games Played`, 2),
`Damage Per Game` = round(`Total Damage` / `Num Games Played`, 0),
`Damage Per Minute` = round(`Total Damage` / `Total Survival Time`, 1)) %>%
arrange(desc(`Num Games Played`))
leaderboard_df <- rbind(top_n(apex_df, 1, Damage) %>%
mutate(Statistic = "Most Damage"),
top_n(apex_df, 1, Kills) %>%
mutate(Statistic = "Most Kills"),
top_n(apex_df, 1, Assists) %>%
mutate(Statistic = "Most Assists"),
top_n(apex_df, 1, Knocks) %>%
mutate(Statistic = "Most Knocks")) %>%
select(-c(Timestamp_raw))
test <- apex_df %>%
filter(Player == "Oliver") %>%
mutate(`Game Number` = row_number())
test <- cbind(test, "Cumulative Damage" = cumsum(test$Damage))
damage_over_time <- plot_ly(test, x = ~`Timestamp_raw`, y = ~`Cumulative Damage`,
type = 'scatter', mode = 'lines') %>%
layout(title = paste("Damage over time by Oliver"), showlegend = T,
xaxis = list(title = "Date"),
yaxis = list(title = "Cumulative Damage"))
damage_over_time
oliver_df <- apex_df %>%
filter(Player == "Oliver") %>%
mutate(`Game Number` = row_number())
oliver_df <- cbind(oliver_df, "Cumulative Damage" = cumsum(oliver_df$Damage))
connor_df <- apex_df %>%
filter(Player == "Connor") %>%
mutate(`Game Number` = row_number())
connor_df <- cbind(connor_df, "Cumulative Damage" = cumsum(connor_df$Damage))
isaac_df <- apex_df %>%
filter(Player == "Isaac") %>%
mutate(`Game Number` = row_number())
isaac_df <- cbind(isaac_df, "Cumulative Damage" = cumsum(isaac_df$Damage))
nat_df <- apex_df %>%
filter(Player == "Nat") %>%
mutate(`Game Number` = row_number())
nat_df <- cbind(nat_df, "Cumulative Damage" = cumsum(nat_df$Damage))
thomas_df <- apex_df %>%
filter(Player == "Thomas") %>%
mutate(`Game Number` = row_number())
thomas_df <- cbind(thomas_df, "Cumulative Damage" = cumsum(thomas_df$Damage))
final_df <- rbind(oliver_df, connor_df, isaac_df, nat_df, thomas_df)
|
/scratch.R
|
no_license
|
Oliver-BE/visualizing-areas-of-interest
|
R
| false | false | 3,390 |
r
|
raw_apex_df <- gsheet2tbl("https://docs.google.com/spreadsheets/d/1v40AgpaoRrA3v5eEjztB5Rw_OGKRHw3L56hhJdzdqAs/edit#gid=13184181")
# initial cleaning: get rid of rows with all NAs, clean up survival time, etc.
apex_df <- filter(raw_apex_df, rowSums(is.na(raw_apex_df)) != ncol(raw_apex_df)) %>%
mutate(Timestamp_raw = parse_date_time(Timestamp, orders = "mdy HMS", tz = "EST"),
Timestamp = as.character(Timestamp_raw),
survival_time_dt = strptime(`Survival Time`, format = "%M:%S"),
`Survival Time (min)` = round(minute(survival_time_dt) + second(survival_time_dt) / 60, digits = 2)) %>%
select(-c(X9, X10, survival_time_dt, `Survival Time`)) # %>%
# arrange(desc(Timestamp))
# summary stats df
summary_stats_df <- apex_df %>%
group_by(Player) %>%
summarize(`Num Games Played` = n(),
`Num Wins` = sum(ifelse(`Squad Placed` == 1, TRUE, FALSE)),
`Total Damage` = sum(Damage),
`Total Kills` = sum(Kills),
`Total Assists` = sum(Assists),
`Total Knocks` = sum(Knocks),
`Total Survival Time` = sum(`Survival Time (min)`),
`KDR` = round(`Total Kills` / `Num Games Played`, 2),
`Damage Per Game` = round(`Total Damage` / `Num Games Played`, 0),
`Damage Per Minute` = round(`Total Damage` / `Total Survival Time`, 1)) %>%
arrange(desc(`Num Games Played`))
leaderboard_df <- rbind(top_n(apex_df, 1, Damage) %>%
mutate(Statistic = "Most Damage"),
top_n(apex_df, 1, Kills) %>%
mutate(Statistic = "Most Kills"),
top_n(apex_df, 1, Assists) %>%
mutate(Statistic = "Most Assists"),
top_n(apex_df, 1, Knocks) %>%
mutate(Statistic = "Most Knocks")) %>%
select(-c(Timestamp_raw))
test <- apex_df %>%
filter(Player == "Oliver") %>%
mutate(`Game Number` = row_number())
test <- cbind(test, "Cumulative Damage" = cumsum(test$Damage))
damage_over_time <- plot_ly(test, x = ~`Timestamp_raw`, y = ~`Cumulative Damage`,
type = 'scatter', mode = 'lines') %>%
layout(title = paste("Damage over time by Oliver"), showlegend = T,
xaxis = list(title = "Date"),
yaxis = list(title = "Cumulative Damage"))
damage_over_time
oliver_df <- apex_df %>%
filter(Player == "Oliver") %>%
mutate(`Game Number` = row_number())
oliver_df <- cbind(oliver_df, "Cumulative Damage" = cumsum(oliver_df$Damage))
connor_df <- apex_df %>%
filter(Player == "Connor") %>%
mutate(`Game Number` = row_number())
connor_df <- cbind(connor_df, "Cumulative Damage" = cumsum(connor_df$Damage))
isaac_df <- apex_df %>%
filter(Player == "Isaac") %>%
mutate(`Game Number` = row_number())
isaac_df <- cbind(isaac_df, "Cumulative Damage" = cumsum(isaac_df$Damage))
nat_df <- apex_df %>%
filter(Player == "Nat") %>%
mutate(`Game Number` = row_number())
nat_df <- cbind(nat_df, "Cumulative Damage" = cumsum(nat_df$Damage))
thomas_df <- apex_df %>%
filter(Player == "Thomas") %>%
mutate(`Game Number` = row_number())
thomas_df <- cbind(thomas_df, "Cumulative Damage" = cumsum(thomas_df$Damage))
final_df <- rbind(oliver_df, connor_df, isaac_df, nat_df, thomas_df)
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 2.00877667922349e-139, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -6.24349738065159e+144, SL95 = 2.0091296205884e-139, nage = 587202560L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615829853-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 390 |
r
|
testlist <- list(Beta = 0, CVLinf = 0, FM = 2.00877667922349e-139, L50 = 0, L95 = 0, LenBins = numeric(0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = -6.24349738065159e+144, SL95 = 2.0091296205884e-139, nage = 587202560L, nlen = 0L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
rm(list = ls())
library(dplyr)
library(BayesianTools)
library(minpack.lm)
library(LianaHydro)
library(ggplot2)
library(reshape2)
library(zoo)
library(stringr)
library(ggstance)
filePV <- "/home/femeunier/Documents/projects/LianaHydro/data/PV.all.csv"
dataPV <- read.csv(filePV,stringsAsFactors = FALSE,na.strings=c("","NA")) %>% filter(Organ == "Leaf") %>% dplyr::select(Species,GrowthForm,p0,tlp,rwc.tlp,epsil,Cft,wd,sla,MAP,MAT,Reference,Biome,Long,Lat,Organ) %>%
mutate(kl=NA,ksat=NA,Al.As=NA,ax=NA,p50=NA,Pmd=NA,Ppd=NA,Id=NA,Function=NA)
fileVC <- "/home/femeunier/Documents/projects/LianaHydro/data/VC.all.csv"
dataVC <- read.csv(fileVC,stringsAsFactors = FALSE,na.strings=c("","NA")) %>% dplyr::select(Species,GrowthForm,kl,ksat,Al.As,ax,p50,Pmd,Ppd,wd,sla,MAP,MAT,Reference,Biome,Long,Lat,Id,Function) %>% mutate(
p0=NA,tlp=NA,rwc.tlp=NA,epsil=NA,Cft=NA,Organ=NA)
data.all <- rbind(dataPV,dataVC)
data.all <- correct.obs(data.all)
data.all <- add.properties.data(data.all)
Cols <- c(rgb(0,0,139/255),rgb(0.10,0.50,0.00))
mLianas <- nlsLM(data = dataVC %>% filter(GrowthForm == "Liana"),
ax ~ a*(-p50)**b,
start=list(a=54.4, b=-1.17), control = nls.control(maxiter = 500, tol = 1e-05, minFactor = 1/1024/10,
printEval = TRUE, warnOnly = TRUE))
mTrees <- nlsLM(data = dataVC %>% filter(GrowthForm == "Tree"),
ax ~ a*(-p50)**b,
start=list(a=54.4, b=-1.17), control = nls.control(maxiter = 500, tol = 1e-05, minFactor = 1/1024/10,
printEval = TRUE, warnOnly = TRUE))
data_added <- dataVC %>% mutate(ax.mod = case_when(
GrowthForm == "Liana" & is.na(ax) ~ coef(mLianas)[1]*(-p50)** coef(mLianas)[2],
GrowthForm == "Tree" & is.na(ax) ~ coef(mTrees)[1]*(-p50)** coef(mTrees)[2],
TRUE ~ ax))
N <- nrow(dataVC)
FN <- c("weibull","sigmoidal","polynomial","polynomial2","cumnorm")
Ps <- c(12,50,88)
psi <- seq(-10,0,length.out = 100)
lianas <- trees <- summary <- c()
for (i in seq(1,N)){
currentId <- data_added$Id[i]
currentGF <- data_added$GrowthForm[i]
currentP50 <- data_added$p50[i]
currentax <- data_added$ax[i]
currentfun <- data_added$Function[i]
if(is.finite(currentId)){
file <- paste0("./data/",tolower(currentGF),"rawdata.csv")
data <- read.csv(file,header = TRUE) %>% mutate(psi = - abs(psi)) %>% dplyr::select(Id,psi,PLC) %>% filter(Id == currentId)
models <- opt.data(data = data, function.names = FN)
models <- add.properties(models,x = Ps)
best.model <- find.best.model(models)[[1]]
current <- data.frame(Id = i,
GF = currentGF,
model = best.model$name,
P50 = best.model$invert[which(names(best.model$invert) == "P50")],
ax50 = best.model$slopes[which(names(best.model$slopes) == "ax50")],
RMSE = best.model$RMSE,
r2 = best.model$r.squared)
summary <- rbind(summary,
current)
function2call <- match.fun(best.model$name)
PLC <- do.call(function2call,list(psi,best.model$best.params[1],best.model$best.params[2]))
if (currentGF == "Liana"){
lianas <- rbind(lianas,PLC)
} else {
trees <- rbind(trees,PLC)
}
} else if (!is.na(currentP50) & !is.na(currentax) & !is.na(currentfun) & str_length(currentfun)>0){
params <- obs2params(ax50=-abs(currentax),P50=currentP50,funbest = currentfun)
current <-
data.frame(Id = i,
GF = currentGF,
model = currentfun,
P50 = currentP50,
ax50 = -abs(currentax),
RMSE = NA,
r2 = NA)
summary <- rbind(summary,current)
function2call <- match.fun(currentfun)
PLC <- do.call(function2call,list(psi,params[1],params[2]))
if (currentGF == "Liana"){
lianas <- rbind(lianas,PLC)
} else {
trees <- rbind(trees,PLC)
}
}
}
########################################################################################################################
# Bootstrap
Nbootstrap = 30
Nliana <- nrow(lianas)
Ntree <- nrow(trees)
ksat_liana <- dataVC %>% filter(!is.na(kl) & GrowthForm == "Liana")%>%pull(kl)
Nliana_K <- length(ksat_liana)
ksat_tree <- dataVC %>% filter(!is.na(kl) & GrowthForm == "Tree")%>%pull(kl)
Ntree_K <- length(ksat_tree)
ksat_liana <- dataVC %>% filter(!is.na(ksat) & GrowthForm == "Liana")%>%pull(ksat)
Nliana_K <- length(ksat_liana)
ksat_tree <- dataVC %>% filter(!is.na(ksat) & GrowthForm == "Tree")%>%pull(ksat)
Ntree_K <- length(ksat_tree)
bootstrap <- data.summary <- data.frame()
for (i in seq(1,Nbootstrap)){
print(i/Nbootstrap)
# Liana
sample <- sample.int(Nliana, size = Nliana, replace = TRUE)
liana_sample <- lianas[sample,]
data.liana <- data.frame(psi = rep(psi,Nliana),
PLC = as.vector(t(liana_sample)))
models <- opt.data(data = data.liana,
function.names = c("weibull","sigmoidal","polynomial","polynomial2","cumnorm"))
models <- add.properties(models,x = 50)
best.modelL <- find.best.model(models)[[1]]
sampleK <- sample.int(Nliana_K, size = Nliana_K, replace = TRUE)
KsampleL <- mean(ksat_liana[sampleK])
PLC_L <- best.modelL$PLC.predict.all
K_L <- KsampleL*(1-PLC_L/100)
bootstrap <- rbind(bootstrap,
data.frame(id = i,
psi = best.modelL$psi.all,
PLC = PLC_L,
k = K_L,GF = "Liana"))
# Tree
sample <- sample.int(Ntree, size = Ntree, replace = TRUE)
tree_sample <- trees[sample,]
data.tree <- data.frame(psi = rep(psi,Ntree),
PLC = as.vector(t(tree_sample)))
models <- opt.data(data = data.tree,
function.names = c("weibull","sigmoidal","polynomial","polynomial2"))
models <- add.properties(models,x = 50)
best.modelT <- find.best.model(models)[[1]]
sampleK <- sample.int(Ntree_K, size = Ntree_K, replace = TRUE)
KsampleT <- mean(ksat_tree[sampleK])
PLC_T <- best.modelT$PLC.predict.all
K_T <- KsampleT*(1-PLC_T/100)
bootstrap <- rbind(bootstrap,
data.frame(id = i,
psi = best.modelT$psi.all,
PLC = PLC_T,
k = K_T,
GF = "Tree"))
# Summary
data.summary <- rbind(data.summary,
data.frame(P50 = best.modelL$invert,
ax50 = best.modelL$slopes,
GF = "Liana"),
data.frame(P50 = best.modelT$invert,
ax50 = best.modelT$slopes,
GF = "Tree"))
}
N = 1
signif.all <- data.frame()
for (i in seq(1,N)){
print(i)
signif <- bootstrap %>% group_by(psi,GF) %>% ungroup() %>% group_by(psi) %>% summarise(PLC = kruskal.test(formula = PLC ~ GF)$p.value,
k = kruskal.test(formula = k ~ GF)$p.value) %>% mutate(num = i)
signif.all <- rbind(signif.all,
signif)
}
signif <- signif.all %>% group_by(psi) %>% summarise(alpha_PLC = mean(PLC),
alpha_k = mean(k)) %>% mutate(alpha_PLC = rollapply(alpha_PLC,100,mean,na.rm=TRUE,partial=TRUE),
alpha_k = rollapply(alpha_k,100,mean,na.rm=TRUE,partial=TRUE))
bootstrap_sum <- bootstrap %>% filter(k > 0) %>% group_by(GF,psi) %>% summarise(PLC_m = mean(PLC),
PLC_low = quantile(PLC,0.025),
PLC_high = quantile(PLC,0.975),
k_m = mean(k),
k_low = quantile(k,0.025),
k_high = quantile(k,0.975))
pos <- bootstrap_sum %>% group_by(GF) %>% summarise(P50low = psi[which.min(abs(PLC_low - 50))],
P50high = psi[which.min(abs(PLC_high - 50))],
P50m = psi[which.min(abs(PLC_m - 50))])
P50bootstrap <- bootstrap %>% group_by(GF,id) %>% filter((abs(PLC - 50)) == min(abs(PLC - 50)))
P50l <- pos %>% filter(GF == "Liana")
P50t <- pos %>% filter(GF == "Tree")
slopes <- data.summary %>% group_by(GF) %>% summarise(ax50m = mean(ax50))
intercept <- c(50-slopes$ax50m[1]*P50l$P50m,50-slopes$ax50m[2]*P50t$P50m)
psi_x <- c(0.5,1.5)
y = slopes$ax50m[1]*psi_x*c(P50l$P50m)+intercept[1]
psi_x2 <- c(0.6,1.4)
y2 = slopes$ax50m[2]*psi_x2*c(P50t$P50m)+intercept[2]
w = 0.8; Top = 101.1 ; bottom = 1
# PLC curves
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = PLC_low,ymax = PLC_high),alpha = 0.1,colour = NA) +
geom_boxploth(data = P50bootstrap %>% filter(GF == "Liana"),aes(x = psi,y= 3,fill = GF),alpha = 0.2,width = 5,outlier.shape = NA) +
geom_boxploth(data = P50bootstrap %>% filter(GF == "Tree"),aes(x = psi,y= 3,fill = GF),alpha = 0.2,width = 5,outlier.shape = NA) +
geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.01],
ymin = Top -w,
ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.05],
ymin = Top -w,
ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_segment(aes(x = psi_x[1]*P50l$P50m,xend = psi_x[2]*P50l$P50m,
y = y[1], yend = y[2]), colour = Cols[1],linetype = 2) +
geom_segment(aes(x = psi_x2[1]*P50t$P50m,xend = psi_x2[2]*P50t$P50m,
y = y2[1], yend = y2[2]), colour = Cols[2],linetype = 2) +
geom_line(data = bootstrap_sum,aes(x = psi,y = PLC_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(limits = c(0,102),expand = c(0.0,0.0)) +
theme_bw() + theme(legend.position = "none",
text = element_text(size = 16)) + labs(x = "",y = "")
ggsave(plot = last_plot(),
filename = "./Figures/VC.png", dpi = 300, width = 15, height = 8)
# # PLC curves
# ggplot() +
# geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
# fill = as.factor(GF),ymin = PLC_low,ymax = PLC_high),alpha = 0.1,colour = NA) +
# geom_boxploth(data = data.all %>% filter(GrowthForm == "Liana"),aes(x = p50,y= 6,fill = GrowthForm),alpha = 0.2,width = 3,outlier.shape = NA) +
# geom_boxploth(data = data.all %>% filter(GrowthForm == "Tree"),aes(x = p50,y= 3,fill = GrowthForm),alpha = 0.2,width = 3,outlier.shape = NA) +
# geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.01],
# ymin = Top -w,
# ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
# geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.05],
# ymin = Top -w,
# ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
# geom_segment(aes(x = psi_x[1]*P50l$P50m,xend = psi_x[2]*P50l$P50m,
# y = y[1], yend = y[2]), colour = Cols[1],linetype = 2) +
# geom_segment(aes(x = psi_x2[1]*P50t$P50m,xend = psi_x2[2]*P50t$P50m,
# y = y2[1], yend = y2[2]), colour = Cols[2],linetype = 2) +
# geom_line(data = bootstrap_sum,aes(x = psi,y = PLC_m,color = as.factor(GF))) +
# scale_color_manual(values = Cols) +
# scale_fill_manual(values = Cols) +
# scale_x_continuous(expand = c(0,0)) +
# scale_y_continuous(limits = c(0,102),expand = c(0.0,0.0)) +
# theme_bw() + theme(legend.position = "none") + labs(x = "",y = "")
ggplot(data = dataVC, aes(x = GrowthForm,y = ksat,
fill = as.factor(GrowthForm)))+
geom_boxplot(alpha = 0.3) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
scale_y_log10() +
labs(x = "",y= "") +
theme_bw() + theme(legend.position = "none",
axis.text.x = element_blank(),text = element_text(size = 16))
ggsave(plot = last_plot(),
filename = "./Figures/ksatboxplot.png", dpi = 300, width = 5, height = 5)
# k curves
Top = 35 ; w = 4 ; bottom = 0.003 ; wbot = 0.0002 ; bottom2 = bottom - 2*wbot;
bottom3 = 0.1; wbot2 = 0.01
bottom4=0.08; wbot2 = 0.01
yminS05 <- yminS01 <- signif$psi^0*+Top - w
ymaxS05 <- ymaxS01 <- signif$psi^0*+Top + w
yminS05[signif$alpha_k>0.05] <- ymaxS05[signif$alpha_k>0.05] <- NA
ymaxS01[signif$alpha_k>0.01] <- yminS01[signif$alpha_k>0.01] <- NA
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = k_low,ymax = k_high),alpha = 0.1,colour = NA) +
geom_line(data = bootstrap_sum,aes(x = psi,y = k_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS01,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS05,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_boxploth(data = data.all,aes(x = Pmd,y= 0.03,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
geom_boxploth(data = data.all,aes(x = Ppd,y= 0.3,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
geom_boxploth(data = data.all,aes(x = tlp ,y= 0.003,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
scale_x_continuous(expand = c(0,0)) +
scale_y_log10(expand = c(0.01,0.01)) +
theme_bw() + theme(legend.position = "none") +
labs(x = "Water potential",y="Stem conductivity")
data_trait <- data.all %>% dplyr::select(GrowthForm, p50,Pmd,Ppd,tlp) %>% pivot_longer(cols = c(p50,Pmd,Ppd,tlp),names_to = "Trait", values_to = "value") %>% filter(!is.na(value)) %>%
mutate(Trait = as.factor(Trait))
data_trait$Trait <- factor(data_trait$Trait,levels(data_trait$Trait)[c(3,2,1,4)])
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = k_low,ymax = k_high),alpha = 0.1,colour = NA) +
geom_line(data = bootstrap_sum,aes(x = psi,y = k_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS01,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS05,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_boxploth(data = data_trait %>% filter(GrowthForm == "Tree"),aes(x = value,y= 0.03, group = Trait),alpha = 0.2, outlier.shape = NA,width = 0.5,
fill = Cols[2]) +
geom_boxploth(data = data_trait %>% filter(GrowthForm == "Liana"),aes(x = value,y= 0.005, group = Trait),alpha = 0.2, outlier.shape = NA,width = 0.5,
fill = Cols[1]) +
scale_x_continuous(expand = c(0,0)) +
scale_y_log10(expand = c(0.01,0.01)) +
theme_bw() + theme(legend.position = "none",
text = element_text(size = 16)) +
labs(x = "",y="")
ggsave(plot = last_plot(),
filename = "./Figures/ksatcurve.png", dpi = 300, width = 10, height = 8)
data_trait_fac <- data_trait %>% mutate(t = paste(GrowthForm,Trait,sep = '|'))
data_trait_tree <- data_trait_fac %>% filter(GrowthForm == "Tree")
data_trait_liana <- data_trait_fac %>% filter(GrowthForm == "Liana")
all.pw <- pairwise.wilcox.test(data_trait_fac$value, data_trait_fac$t)$p.value
all.pw[(all.pw < 0.05)] <- NA
tree.pw <- pairwise.wilcox.test(data_trait_tree$value, data_trait_tree$t)$p.value
tree.pw[(tree.pw < 0.01)] <- NA
liana.pw <- pairwise.wilcox.test(data_trait_liana$value, data_trait_liana$t)$p.value
liana.pw[(liana.pw < 0.01)] <- NA
data_trait_fac %>% group_by(t) %>% summarise(v = mean(value,na.rm=TRUE)) %>% arrange(desc(v))
|
/scripts/VC_K_curves.R
|
no_license
|
femeunier/LianaHydro
|
R
| false | false | 17,357 |
r
|
rm(list = ls())
library(dplyr)
library(BayesianTools)
library(minpack.lm)
library(LianaHydro)
library(ggplot2)
library(reshape2)
library(zoo)
library(stringr)
library(ggstance)
filePV <- "/home/femeunier/Documents/projects/LianaHydro/data/PV.all.csv"
dataPV <- read.csv(filePV,stringsAsFactors = FALSE,na.strings=c("","NA")) %>% filter(Organ == "Leaf") %>% dplyr::select(Species,GrowthForm,p0,tlp,rwc.tlp,epsil,Cft,wd,sla,MAP,MAT,Reference,Biome,Long,Lat,Organ) %>%
mutate(kl=NA,ksat=NA,Al.As=NA,ax=NA,p50=NA,Pmd=NA,Ppd=NA,Id=NA,Function=NA)
fileVC <- "/home/femeunier/Documents/projects/LianaHydro/data/VC.all.csv"
dataVC <- read.csv(fileVC,stringsAsFactors = FALSE,na.strings=c("","NA")) %>% dplyr::select(Species,GrowthForm,kl,ksat,Al.As,ax,p50,Pmd,Ppd,wd,sla,MAP,MAT,Reference,Biome,Long,Lat,Id,Function) %>% mutate(
p0=NA,tlp=NA,rwc.tlp=NA,epsil=NA,Cft=NA,Organ=NA)
data.all <- rbind(dataPV,dataVC)
data.all <- correct.obs(data.all)
data.all <- add.properties.data(data.all)
Cols <- c(rgb(0,0,139/255),rgb(0.10,0.50,0.00))
mLianas <- nlsLM(data = dataVC %>% filter(GrowthForm == "Liana"),
ax ~ a*(-p50)**b,
start=list(a=54.4, b=-1.17), control = nls.control(maxiter = 500, tol = 1e-05, minFactor = 1/1024/10,
printEval = TRUE, warnOnly = TRUE))
mTrees <- nlsLM(data = dataVC %>% filter(GrowthForm == "Tree"),
ax ~ a*(-p50)**b,
start=list(a=54.4, b=-1.17), control = nls.control(maxiter = 500, tol = 1e-05, minFactor = 1/1024/10,
printEval = TRUE, warnOnly = TRUE))
data_added <- dataVC %>% mutate(ax.mod = case_when(
GrowthForm == "Liana" & is.na(ax) ~ coef(mLianas)[1]*(-p50)** coef(mLianas)[2],
GrowthForm == "Tree" & is.na(ax) ~ coef(mTrees)[1]*(-p50)** coef(mTrees)[2],
TRUE ~ ax))
N <- nrow(dataVC)
FN <- c("weibull","sigmoidal","polynomial","polynomial2","cumnorm")
Ps <- c(12,50,88)
psi <- seq(-10,0,length.out = 100)
lianas <- trees <- summary <- c()
for (i in seq(1,N)){
currentId <- data_added$Id[i]
currentGF <- data_added$GrowthForm[i]
currentP50 <- data_added$p50[i]
currentax <- data_added$ax[i]
currentfun <- data_added$Function[i]
if(is.finite(currentId)){
file <- paste0("./data/",tolower(currentGF),"rawdata.csv")
data <- read.csv(file,header = TRUE) %>% mutate(psi = - abs(psi)) %>% dplyr::select(Id,psi,PLC) %>% filter(Id == currentId)
models <- opt.data(data = data, function.names = FN)
models <- add.properties(models,x = Ps)
best.model <- find.best.model(models)[[1]]
current <- data.frame(Id = i,
GF = currentGF,
model = best.model$name,
P50 = best.model$invert[which(names(best.model$invert) == "P50")],
ax50 = best.model$slopes[which(names(best.model$slopes) == "ax50")],
RMSE = best.model$RMSE,
r2 = best.model$r.squared)
summary <- rbind(summary,
current)
function2call <- match.fun(best.model$name)
PLC <- do.call(function2call,list(psi,best.model$best.params[1],best.model$best.params[2]))
if (currentGF == "Liana"){
lianas <- rbind(lianas,PLC)
} else {
trees <- rbind(trees,PLC)
}
} else if (!is.na(currentP50) & !is.na(currentax) & !is.na(currentfun) & str_length(currentfun)>0){
params <- obs2params(ax50=-abs(currentax),P50=currentP50,funbest = currentfun)
current <-
data.frame(Id = i,
GF = currentGF,
model = currentfun,
P50 = currentP50,
ax50 = -abs(currentax),
RMSE = NA,
r2 = NA)
summary <- rbind(summary,current)
function2call <- match.fun(currentfun)
PLC <- do.call(function2call,list(psi,params[1],params[2]))
if (currentGF == "Liana"){
lianas <- rbind(lianas,PLC)
} else {
trees <- rbind(trees,PLC)
}
}
}
########################################################################################################################
# Bootstrap
Nbootstrap = 30
Nliana <- nrow(lianas)
Ntree <- nrow(trees)
ksat_liana <- dataVC %>% filter(!is.na(kl) & GrowthForm == "Liana")%>%pull(kl)
Nliana_K <- length(ksat_liana)
ksat_tree <- dataVC %>% filter(!is.na(kl) & GrowthForm == "Tree")%>%pull(kl)
Ntree_K <- length(ksat_tree)
ksat_liana <- dataVC %>% filter(!is.na(ksat) & GrowthForm == "Liana")%>%pull(ksat)
Nliana_K <- length(ksat_liana)
ksat_tree <- dataVC %>% filter(!is.na(ksat) & GrowthForm == "Tree")%>%pull(ksat)
Ntree_K <- length(ksat_tree)
bootstrap <- data.summary <- data.frame()
for (i in seq(1,Nbootstrap)){
print(i/Nbootstrap)
# Liana
sample <- sample.int(Nliana, size = Nliana, replace = TRUE)
liana_sample <- lianas[sample,]
data.liana <- data.frame(psi = rep(psi,Nliana),
PLC = as.vector(t(liana_sample)))
models <- opt.data(data = data.liana,
function.names = c("weibull","sigmoidal","polynomial","polynomial2","cumnorm"))
models <- add.properties(models,x = 50)
best.modelL <- find.best.model(models)[[1]]
sampleK <- sample.int(Nliana_K, size = Nliana_K, replace = TRUE)
KsampleL <- mean(ksat_liana[sampleK])
PLC_L <- best.modelL$PLC.predict.all
K_L <- KsampleL*(1-PLC_L/100)
bootstrap <- rbind(bootstrap,
data.frame(id = i,
psi = best.modelL$psi.all,
PLC = PLC_L,
k = K_L,GF = "Liana"))
# Tree
sample <- sample.int(Ntree, size = Ntree, replace = TRUE)
tree_sample <- trees[sample,]
data.tree <- data.frame(psi = rep(psi,Ntree),
PLC = as.vector(t(tree_sample)))
models <- opt.data(data = data.tree,
function.names = c("weibull","sigmoidal","polynomial","polynomial2"))
models <- add.properties(models,x = 50)
best.modelT <- find.best.model(models)[[1]]
sampleK <- sample.int(Ntree_K, size = Ntree_K, replace = TRUE)
KsampleT <- mean(ksat_tree[sampleK])
PLC_T <- best.modelT$PLC.predict.all
K_T <- KsampleT*(1-PLC_T/100)
bootstrap <- rbind(bootstrap,
data.frame(id = i,
psi = best.modelT$psi.all,
PLC = PLC_T,
k = K_T,
GF = "Tree"))
# Summary
data.summary <- rbind(data.summary,
data.frame(P50 = best.modelL$invert,
ax50 = best.modelL$slopes,
GF = "Liana"),
data.frame(P50 = best.modelT$invert,
ax50 = best.modelT$slopes,
GF = "Tree"))
}
N = 1
signif.all <- data.frame()
for (i in seq(1,N)){
print(i)
signif <- bootstrap %>% group_by(psi,GF) %>% ungroup() %>% group_by(psi) %>% summarise(PLC = kruskal.test(formula = PLC ~ GF)$p.value,
k = kruskal.test(formula = k ~ GF)$p.value) %>% mutate(num = i)
signif.all <- rbind(signif.all,
signif)
}
signif <- signif.all %>% group_by(psi) %>% summarise(alpha_PLC = mean(PLC),
alpha_k = mean(k)) %>% mutate(alpha_PLC = rollapply(alpha_PLC,100,mean,na.rm=TRUE,partial=TRUE),
alpha_k = rollapply(alpha_k,100,mean,na.rm=TRUE,partial=TRUE))
bootstrap_sum <- bootstrap %>% filter(k > 0) %>% group_by(GF,psi) %>% summarise(PLC_m = mean(PLC),
PLC_low = quantile(PLC,0.025),
PLC_high = quantile(PLC,0.975),
k_m = mean(k),
k_low = quantile(k,0.025),
k_high = quantile(k,0.975))
pos <- bootstrap_sum %>% group_by(GF) %>% summarise(P50low = psi[which.min(abs(PLC_low - 50))],
P50high = psi[which.min(abs(PLC_high - 50))],
P50m = psi[which.min(abs(PLC_m - 50))])
P50bootstrap <- bootstrap %>% group_by(GF,id) %>% filter((abs(PLC - 50)) == min(abs(PLC - 50)))
P50l <- pos %>% filter(GF == "Liana")
P50t <- pos %>% filter(GF == "Tree")
slopes <- data.summary %>% group_by(GF) %>% summarise(ax50m = mean(ax50))
intercept <- c(50-slopes$ax50m[1]*P50l$P50m,50-slopes$ax50m[2]*P50t$P50m)
psi_x <- c(0.5,1.5)
y = slopes$ax50m[1]*psi_x*c(P50l$P50m)+intercept[1]
psi_x2 <- c(0.6,1.4)
y2 = slopes$ax50m[2]*psi_x2*c(P50t$P50m)+intercept[2]
w = 0.8; Top = 101.1 ; bottom = 1
# PLC curves
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = PLC_low,ymax = PLC_high),alpha = 0.1,colour = NA) +
geom_boxploth(data = P50bootstrap %>% filter(GF == "Liana"),aes(x = psi,y= 3,fill = GF),alpha = 0.2,width = 5,outlier.shape = NA) +
geom_boxploth(data = P50bootstrap %>% filter(GF == "Tree"),aes(x = psi,y= 3,fill = GF),alpha = 0.2,width = 5,outlier.shape = NA) +
geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.01],
ymin = Top -w,
ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.05],
ymin = Top -w,
ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_segment(aes(x = psi_x[1]*P50l$P50m,xend = psi_x[2]*P50l$P50m,
y = y[1], yend = y[2]), colour = Cols[1],linetype = 2) +
geom_segment(aes(x = psi_x2[1]*P50t$P50m,xend = psi_x2[2]*P50t$P50m,
y = y2[1], yend = y2[2]), colour = Cols[2],linetype = 2) +
geom_line(data = bootstrap_sum,aes(x = psi,y = PLC_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(limits = c(0,102),expand = c(0.0,0.0)) +
theme_bw() + theme(legend.position = "none",
text = element_text(size = 16)) + labs(x = "",y = "")
ggsave(plot = last_plot(),
filename = "./Figures/VC.png", dpi = 300, width = 15, height = 8)
# # PLC curves
# ggplot() +
# geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
# fill = as.factor(GF),ymin = PLC_low,ymax = PLC_high),alpha = 0.1,colour = NA) +
# geom_boxploth(data = data.all %>% filter(GrowthForm == "Liana"),aes(x = p50,y= 6,fill = GrowthForm),alpha = 0.2,width = 3,outlier.shape = NA) +
# geom_boxploth(data = data.all %>% filter(GrowthForm == "Tree"),aes(x = p50,y= 3,fill = GrowthForm),alpha = 0.2,width = 3,outlier.shape = NA) +
# geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.01],
# ymin = Top -w,
# ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
# geom_ribbon(data = data.frame(x = signif$psi[signif$alpha_PLC<0.05],
# ymin = Top -w,
# ymax = Top + w),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
# geom_segment(aes(x = psi_x[1]*P50l$P50m,xend = psi_x[2]*P50l$P50m,
# y = y[1], yend = y[2]), colour = Cols[1],linetype = 2) +
# geom_segment(aes(x = psi_x2[1]*P50t$P50m,xend = psi_x2[2]*P50t$P50m,
# y = y2[1], yend = y2[2]), colour = Cols[2],linetype = 2) +
# geom_line(data = bootstrap_sum,aes(x = psi,y = PLC_m,color = as.factor(GF))) +
# scale_color_manual(values = Cols) +
# scale_fill_manual(values = Cols) +
# scale_x_continuous(expand = c(0,0)) +
# scale_y_continuous(limits = c(0,102),expand = c(0.0,0.0)) +
# theme_bw() + theme(legend.position = "none") + labs(x = "",y = "")
ggplot(data = dataVC, aes(x = GrowthForm,y = ksat,
fill = as.factor(GrowthForm)))+
geom_boxplot(alpha = 0.3) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
scale_y_log10() +
labs(x = "",y= "") +
theme_bw() + theme(legend.position = "none",
axis.text.x = element_blank(),text = element_text(size = 16))
ggsave(plot = last_plot(),
filename = "./Figures/ksatboxplot.png", dpi = 300, width = 5, height = 5)
# k curves
Top = 35 ; w = 4 ; bottom = 0.003 ; wbot = 0.0002 ; bottom2 = bottom - 2*wbot;
bottom3 = 0.1; wbot2 = 0.01
bottom4=0.08; wbot2 = 0.01
yminS05 <- yminS01 <- signif$psi^0*+Top - w
ymaxS05 <- ymaxS01 <- signif$psi^0*+Top + w
yminS05[signif$alpha_k>0.05] <- ymaxS05[signif$alpha_k>0.05] <- NA
ymaxS01[signif$alpha_k>0.01] <- yminS01[signif$alpha_k>0.01] <- NA
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = k_low,ymax = k_high),alpha = 0.1,colour = NA) +
geom_line(data = bootstrap_sum,aes(x = psi,y = k_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS01,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS05,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_boxploth(data = data.all,aes(x = Pmd,y= 0.03,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
geom_boxploth(data = data.all,aes(x = Ppd,y= 0.3,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
geom_boxploth(data = data.all,aes(x = tlp ,y= 0.003,fill = GrowthForm,color = GrowthForm),alpha = 0.2, outlier.shape = NA,width = 0.4) +
scale_x_continuous(expand = c(0,0)) +
scale_y_log10(expand = c(0.01,0.01)) +
theme_bw() + theme(legend.position = "none") +
labs(x = "Water potential",y="Stem conductivity")
data_trait <- data.all %>% dplyr::select(GrowthForm, p50,Pmd,Ppd,tlp) %>% pivot_longer(cols = c(p50,Pmd,Ppd,tlp),names_to = "Trait", values_to = "value") %>% filter(!is.na(value)) %>%
mutate(Trait = as.factor(Trait))
data_trait$Trait <- factor(data_trait$Trait,levels(data_trait$Trait)[c(3,2,1,4)])
ggplot() +
geom_ribbon(data = bootstrap_sum,aes(x = psi,color = as.factor(GF),
fill = as.factor(GF),ymin = k_low,ymax = k_high),alpha = 0.1,colour = NA) +
geom_line(data = bootstrap_sum,aes(x = psi,y = k_m,color = as.factor(GF))) +
scale_color_manual(values = Cols) +
scale_fill_manual(values = Cols) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS01,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "darkgrey", colour = NA,alpha = 0.5) +
geom_ribbon(data = data.frame(x = signif$psi,
ymin = yminS05,
ymax = ymaxS05),aes(x = x,ymin = ymin, ymax = ymax), fill = "lightgrey", colour = NA,alpha = 0.5) +
geom_boxploth(data = data_trait %>% filter(GrowthForm == "Tree"),aes(x = value,y= 0.03, group = Trait),alpha = 0.2, outlier.shape = NA,width = 0.5,
fill = Cols[2]) +
geom_boxploth(data = data_trait %>% filter(GrowthForm == "Liana"),aes(x = value,y= 0.005, group = Trait),alpha = 0.2, outlier.shape = NA,width = 0.5,
fill = Cols[1]) +
scale_x_continuous(expand = c(0,0)) +
scale_y_log10(expand = c(0.01,0.01)) +
theme_bw() + theme(legend.position = "none",
text = element_text(size = 16)) +
labs(x = "",y="")
ggsave(plot = last_plot(),
filename = "./Figures/ksatcurve.png", dpi = 300, width = 10, height = 8)
data_trait_fac <- data_trait %>% mutate(t = paste(GrowthForm,Trait,sep = '|'))
data_trait_tree <- data_trait_fac %>% filter(GrowthForm == "Tree")
data_trait_liana <- data_trait_fac %>% filter(GrowthForm == "Liana")
all.pw <- pairwise.wilcox.test(data_trait_fac$value, data_trait_fac$t)$p.value
all.pw[(all.pw < 0.05)] <- NA
tree.pw <- pairwise.wilcox.test(data_trait_tree$value, data_trait_tree$t)$p.value
tree.pw[(tree.pw < 0.01)] <- NA
liana.pw <- pairwise.wilcox.test(data_trait_liana$value, data_trait_liana$t)$p.value
liana.pw[(liana.pw < 0.01)] <- NA
data_trait_fac %>% group_by(t) %>% summarise(v = mean(value,na.rm=TRUE)) %>% arrange(desc(v))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracking-runs.R
\name{mlflow_set_tag}
\alias{mlflow_set_tag}
\title{Set Tag}
\usage{
mlflow_set_tag(key, value, run_id = NULL, client = NULL)
}
\arguments{
\item{key}{Name of the tag. Maximum size is 255 bytes. This field is required.}
\item{value}{String value of the tag being logged. Maximum size is 500 bytes. This field is required.}
\item{run_id}{Run ID.}
\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}.
If specified, MLflow will use the tracking server associated with the passed-in client. If
unspecified (the common case),
MLflow will use the tracking server associated with the current tracking URI.}
}
\description{
Sets a tag on a run. Tags are run metadata that can be updated during a run and
after a run completes.
}
|
/mlflow/R/mlflow/man/mlflow_set_tag.Rd
|
permissive
|
mlflow/mlflow
|
R
| false | true | 859 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tracking-runs.R
\name{mlflow_set_tag}
\alias{mlflow_set_tag}
\title{Set Tag}
\usage{
mlflow_set_tag(key, value, run_id = NULL, client = NULL)
}
\arguments{
\item{key}{Name of the tag. Maximum size is 255 bytes. This field is required.}
\item{value}{String value of the tag being logged. Maximum size is 500 bytes. This field is required.}
\item{run_id}{Run ID.}
\item{client}{(Optional) An MLflow client object returned from \link[mlflow]{mlflow_client}.
If specified, MLflow will use the tracking server associated with the passed-in client. If
unspecified (the common case),
MLflow will use the tracking server associated with the current tracking URI.}
}
\description{
Sets a tag on a run. Tags are run metadata that can be updated during a run and
after a run completes.
}
|
#' bindMatches
#'
#' Given 2 data frames(dfA and dfB) and a nx2 matrix of matched indices, will return a dataframe containing
#' both dataframes bound by index. If there are identical names in both dataframes, the result
#' will differentiate by adding idA(default is "A.") to the duplicate names in dfA, and idB(default is "B.") to those in dfB.
#'
#' @usage bindMatches(dfA, dfB, inds, idA, idB)
#'
#' @param dfA First data frame to bind
#' @param dfB Second data frame to bind
#' @param inds A nx2 index pair matrix to bind on
#' @param idA What to concatenate to a column name in dfA if there exists a identical column in dfB
#' @param idB What to concatenate to a column name in dfB if there exists a identical column in dfB
#'
#'
#' @return \code{bindMatches} returns a dataframe of the original dataframes bound by index.
#'
#' @author Sam Murray<slmurray@andrew.cmu.edu>
#' @export
#' @import tidyverse
bindMatches <- function(dfA, dfB, inds, idA = "A.", idB = "B.") {
namesA = names(dfA)
namesB = names(dfB)
namesAB = intersect(namesA, namesB)
if(length(namesAB != 0)){
dup_inds_A <- namesA %in% namesAB
namesA[dup_inds_A] <- paste0(idA,namesA[dup_inds_A])
names(dfA) <- namesA
dup_inds_B <- namesB %in% namesAB
namesB[dup_inds_B] <- paste0(idB,namesB[dup_inds_B])
names(dfB) <- namesB
}
return(cbind(dfA[inds[,1], ], dfB[inds[,2], ]))
}
#' bindMatches_p
#'
#' Same as bindMatches, but also binds an extra column for the probability of each match
#'
#' @usage bindMatches(dfA, dfB, inds, p, idA, idB)
#'
#' @param dfA First data frame to bind
#' @param dfB First data frame to bind
#' @param inds A nx2 index pair matrix to bind on
#' @param P a nx1 vector of the probability of each pair
#' @param idA What to concatenate to a column name in dfA if there exists a identical column in dfB
#' @param idB What to concatenate to a column name in dfB if there exists a identical column in dfB
#'
#'
#' @return \code{bindMatches} returns a dataframe of the original dataframes bound by index.
#'
#' @author Sam Murray<slmurray@andrew.cmu.edu>
#' @export
#' @import tidyverse
bindMatches_p <- function(dfA, dfB, p , inds, idA = "A.", idB = "B.") {
namesA = names(dfA)
namesB = names(dfB)
namesAB = intersect(namesA, namesB)
if(length(namesAB != 0)){
dup_inds_A <- namesA %in% namesAB
namesA[dup_inds_A] <- paste0(idA,namesA[dup_inds_A])
names(dfA) <- namesA
dup_inds_B <- namesB %in% namesAB
namesB[dup_inds_B] <- paste0(idB,namesB[dup_inds_B])
names(dfB) <- namesB
}
return(cbind(dfA[inds[,1], ], dfB[inds[,2], ],p))
}
|
/.Rproj.user/70865A01/sources/per/t/a1202cfe/6D8DA4E2-contents
|
no_license
|
Sam-Murray/RecordLinkUtil
|
R
| false | false | 2,618 |
#' bindMatches
#'
#' Given 2 data frames(dfA and dfB) and a nx2 matrix of matched indices, will return a dataframe containing
#' both dataframes bound by index. If there are identical names in both dataframes, the result
#' will differentiate by adding idA(default is "A.") to the duplicate names in dfA, and idB(default is "B.") to those in dfB.
#'
#' @usage bindMatches(dfA, dfB, inds, idA, idB)
#'
#' @param dfA First data frame to bind
#' @param dfB Second data frame to bind
#' @param inds A nx2 index pair matrix to bind on
#' @param idA What to concatenate to a column name in dfA if there exists a identical column in dfB
#' @param idB What to concatenate to a column name in dfB if there exists a identical column in dfB
#'
#'
#' @return \code{bindMatches} returns a dataframe of the original dataframes bound by index.
#'
#' @author Sam Murray<slmurray@andrew.cmu.edu>
#' @export
#' @import tidyverse
bindMatches <- function(dfA, dfB, inds, idA = "A.", idB = "B.") {
namesA = names(dfA)
namesB = names(dfB)
namesAB = intersect(namesA, namesB)
if(length(namesAB != 0)){
dup_inds_A <- namesA %in% namesAB
namesA[dup_inds_A] <- paste0(idA,namesA[dup_inds_A])
names(dfA) <- namesA
dup_inds_B <- namesB %in% namesAB
namesB[dup_inds_B] <- paste0(idB,namesB[dup_inds_B])
names(dfB) <- namesB
}
return(cbind(dfA[inds[,1], ], dfB[inds[,2], ]))
}
#' bindMatches_p
#'
#' Same as bindMatches, but also binds an extra column for the probability of each match
#'
#' @usage bindMatches(dfA, dfB, inds, p, idA, idB)
#'
#' @param dfA First data frame to bind
#' @param dfB First data frame to bind
#' @param inds A nx2 index pair matrix to bind on
#' @param P a nx1 vector of the probability of each pair
#' @param idA What to concatenate to a column name in dfA if there exists a identical column in dfB
#' @param idB What to concatenate to a column name in dfB if there exists a identical column in dfB
#'
#'
#' @return \code{bindMatches} returns a dataframe of the original dataframes bound by index.
#'
#' @author Sam Murray<slmurray@andrew.cmu.edu>
#' @export
#' @import tidyverse
bindMatches_p <- function(dfA, dfB, p , inds, idA = "A.", idB = "B.") {
namesA = names(dfA)
namesB = names(dfB)
namesAB = intersect(namesA, namesB)
if(length(namesAB != 0)){
dup_inds_A <- namesA %in% namesAB
namesA[dup_inds_A] <- paste0(idA,namesA[dup_inds_A])
names(dfA) <- namesA
dup_inds_B <- namesB %in% namesAB
namesB[dup_inds_B] <- paste0(idB,namesB[dup_inds_B])
names(dfB) <- namesB
}
return(cbind(dfA[inds[,1], ], dfB[inds[,2], ],p))
}
|
|
#' Add numbers, ignoring NAs
#'
#' @param x numeric vector
#' @param digits for \code{round}
#' @param signif for \code{signif}
#'
#' @importFrom stringr str_trim
#' @importFrom units set_units
#' @importFrom unittools has_units restore_units
#'
#' @return sum of (non-missing) values in \code{x}
#'
#' @export
total <- function (
x,
na.rm = TRUE,
digits = Inf,
signif = Inf,
verbose = getOption("verbose")
) {
msg <- function (...) if(isTRUE(verbose)) message("[total] ", ...)
if (!is.numeric(x)) {
stop("input must be numeric")
}
# as.numeric() helps prevent integer overflows
summed <- sum(as.numeric(x), na.rm = na.rm)
if (unittools::has_units(x)) {
summed <- unittools::restore_units(summed, from = x)
}
if (isTRUE(is.finite(digits)) && isTRUE(is.finite(signif))) {
msg("WARNING: `digits` takes precedence over `signif`")
}
if (is.finite(digits)) {
msg("rounding to ", digits, " digits")
summed <- base::round(summed, digits = digits)
} else if (is.finite(signif)) {
msg("rounding to ", digits, " significant digits")
summed <- base::signif(summed, digits = signif)
}
# try to restore class (but see integer overflow protection above)
handle_error <- function (e) {
msg("WARNING: ", stringr::str_trim(as.character(e)))
msg('promoting result to class "numeric" instead of "', class(x), '"')
return(summed)
}
tryCatch(
class(summed) <- class(x),
warning = handle_error,
error = handle_error)
return(summed)
}
|
/R/total.R
|
no_license
|
BAAQMD/qtytools
|
R
| false | false | 1,517 |
r
|
#' Add numbers, ignoring NAs
#'
#' @param x numeric vector
#' @param digits for \code{round}
#' @param signif for \code{signif}
#'
#' @importFrom stringr str_trim
#' @importFrom units set_units
#' @importFrom unittools has_units restore_units
#'
#' @return sum of (non-missing) values in \code{x}
#'
#' @export
total <- function (
x,
na.rm = TRUE,
digits = Inf,
signif = Inf,
verbose = getOption("verbose")
) {
msg <- function (...) if(isTRUE(verbose)) message("[total] ", ...)
if (!is.numeric(x)) {
stop("input must be numeric")
}
# as.numeric() helps prevent integer overflows
summed <- sum(as.numeric(x), na.rm = na.rm)
if (unittools::has_units(x)) {
summed <- unittools::restore_units(summed, from = x)
}
if (isTRUE(is.finite(digits)) && isTRUE(is.finite(signif))) {
msg("WARNING: `digits` takes precedence over `signif`")
}
if (is.finite(digits)) {
msg("rounding to ", digits, " digits")
summed <- base::round(summed, digits = digits)
} else if (is.finite(signif)) {
msg("rounding to ", digits, " significant digits")
summed <- base::signif(summed, digits = signif)
}
# try to restore class (but see integer overflow protection above)
handle_error <- function (e) {
msg("WARNING: ", stringr::str_trim(as.character(e)))
msg('promoting result to class "numeric" instead of "', class(x), '"')
return(summed)
}
tryCatch(
class(summed) <- class(x),
warning = handle_error,
error = handle_error)
return(summed)
}
|
#' Choose folder interactively.
#'
#' On Windows, will use 'choose.dir' on Mac will use 'tk_choose.dir'.
#'
#' @return A pathname.
#' @export
choose_directory <- function() {
if (exists('choose.dir')) {
utils::choose.dir()
} else {
tcltk::tk_choose.dir()
}
}
|
/R/choose_directory.R
|
no_license
|
SPI-Birds/pipelines
|
R
| false | false | 274 |
r
|
#' Choose folder interactively.
#'
#' On Windows, will use 'choose.dir' on Mac will use 'tk_choose.dir'.
#'
#' @return A pathname.
#' @export
choose_directory <- function() {
if (exists('choose.dir')) {
utils::choose.dir()
} else {
tcltk::tk_choose.dir()
}
}
|
\name{MetaLandSim-package}
\alias{MetaLandSim-package}
\alias{MetaLandSim}
\docType{package}
\title{
Landscape And Range Expansion Simulation
}
\description{
The package MetaLandSim is a simulation environment, allowing the generation of random landscapes, represented as graphs, the simulation of landscape dynamics, metapopulation dynamics and range expansion.\cr
The package was developed as part of the Ph.D. thesis of Frederico Mestre (SFRH/BD/73768/2010), funded by European Social Funds and the Portuguese Foundation for Science and Technology, and included in the project NETPERSIST (PTDC/AAG-MAA/3227/2012), funded by European Regional Development Fund (ERDF) through COMPETE programme and Portuguese national funds through the Portuguese Foundation for Science and Technology.\cr
It is intended to provide a virtual environment, enabling the experimentation and simulation of processes at two scales: landscape and range. The simulation approach, taken by MetaLandSim, presents several advantages, like allowing the test of several alternatives and the knowledge of the full system (Peck, 2004; Zurell et al. 2009). The role of simulation in landscape ecology is fundamental due to the spatial and temporal scale of the studied phenomena, which frequently hinders experimentation (Ims, 2005).\cr
Here, graph and metapopulation theories are combined, which is a broadly accepted strategy to provide a modelling framework for metapopulation dynamics (Cantwell & Forman, 1993; Bunn et al. 2000; Ricotta et al. 2000; Minor & Urban, 2008; Galpern et al. 2011). Also, several graph-based connectivity metrics can be computed from the landscape graphs. This set of metrics have been proven useful elsewhere (Urban & Keitt, 2001; Calabrese & Fagan, 2004). The graph representation of landscape has one major advantage: it effectively summarizes spatial relationships between elements and facilitates a multi-scale analysis integrating patch and landscape level analysis (Calabrese & Fagan, 2004).\cr
MetaLandSim operates at two scales, providing researchers with the possibility of:
\itemize{
\item Landscape scale - Simulation of metapopulation occupation on a dynamic landscape, computation of connectivity metrics.
\item Range scale - Computes dispersal model and range expansion scenario simulation.
}
The landscape unit, an object of class \code{\link{landscape}}, is the basic simulation unit at both these scales. At the landscape scale, the persistence of the metapopulation in a dynamic landscape is evaluated through the simulation of landscape dynamics using the function \code{\link{iterate.graph}} or \code{\link{manage_landscape_sim}}.
At the range scale the metapopulation is allowed to expand to other, empty, landscape units using \code{\link{range_expansion}}, producing an object of class \code{\link{expansion}}. The function \code{\link{range_raster}} allows the conversion of the dispersal model obtained with the previous function into a raster. Finally, also at the range scale, the user can analyse the outcome of several alternative landscapes in range expansion speed and maximum dispersal distance, using the function \code{\link{manage_expansion_sim}}.\cr
Since version 1.0 new IFM parameter estimation capabilities are available, which based upon Bayesian statistics, using the functions first developed for the paper Risk et al.(2011).\cr\cr
We thank Dr. Santiago Saura (Universidad Politecnica de Madrid) for the very useful inputs and for the R script which greatly improved the connectivity metrics capabilities of MetaLandSim.\cr \cr
After version 2.0.0 MetaLandSim had a few major changes: 1) There is no Graphic User Interface, the
user will have to resort solely to the usual R user interface; 2) It does not use GRASS, resorting uniquely to R packages to conduct the simulations (mainly terra); 3) It depends on much less packages
(after removing rgrass7, maptools, rgeos, raster, tcltk and fgui); 4) There were some major changes to
the functions \code{\link{range_raster}} and \code{\link{range_expansion}}. In what concerns
\code{\link{range_expansion}} the output, rather than considering distinct dispersal probabilities
in all four cardinal directions (as in previous versions), considers the same probability of dispersal
from a current presence in all directions. This has implications in the \code{\link{range_raster}}
function, that converts the dispersal probability to a raster. However, this does not change the results
in any meaningfull way given that these kinds of simulations require many iterations in which
the distinctions between the dispersal to all four directions was diluted.
}
\details{
\tabular{ll}{
Package: \tab MetaLandSim\cr
Type: \tab Package\cr
Version: \tab 2.0.0\cr
Date: \tab 2022-01-12\cr
License: GPL (>=2)\cr
}
}
\author{
Frederico Mestre, Fernando Canovas, Benjamin Risk, Ricardo Pita, Antonio Mira and Pedro Beja.
Maintainer: Frederico Mestre <mestre.frederico@gmail.com>
}
\references{
Bunn, A. G., Urban, D. L. and Keitt, T. H. (2000). Landscape connectivity: a conservation application of graph theory. Journal of Environmental Management, 59(4), 265-278.
Calabrese, J. M. and Fagan, W. F. (2004). A comparison-shopper's guide to connectivity metrics. Frontiers in Ecology and the Environment, 2(10), 529-536.
Cantwell, M. D. and Forman, R. T. (1993). Landscape graphs: ecological modelling with graph theory to detect configurations common to diverse landscapes. Landscape Ecology, 8(4), 239-255.
Galpern, P., Manseau, M. and Fall, A. (2011). Patch-based graphs of landscape connectivity: a guide to construction, analysis and application for conservation. Biological Conservation, 144(1), 44-55.
Ims, R.A. (2005). The role of experiments in landscape ecology. In: Wiens, J.A., and Moss, M.R. (eds.). Issues and Perspectives in Landscape Ecology. Cambridge University Press. pp. 70-78.
Mestre, F., Pita, R., Pauperio, J., Martins, F. M., Alves, P. C., Mira, A., & Beja, P. (2015). Combining distribution modelling and non-invasive genetics to improve range shift forecasting. Ecological Modelling, 297, 171-179.
Mestre, F., Risk, B. B., Mira, A., Beja, P., & Pita, R. (2017). A metapopulation approach to predict species range shifts under different climate change and landscape connectivity scenarios. Ecological Modelling, 359, 406-414.
Mestre, F., Pita, R., Mira, A., Beja, P. (2020). Species traits, patch turnover and successional dynamics: When does intermediate disturbance favour metapopulation occupancy?. BMC Ecology.
Minor, E. S. and Urban, D. L. (2008). A Graph Theory Framework for Evaluating Landscape Connectivity and Conservation Planning. Conservation Biology, 22(2), 297-307.
Peck, S. L. (2004). Simulation as experiment: a philosophical reassessment for biological modelling. Trends in Ecology & Evolution, 19(10), 530-534.
Ricotta, C., Stanisci, A., Avena, G. C., and Blasi, C. (2000). Quantifying the network connectivity of landscape mosaics: a graph-theoretical approach. Community Ecology, 1(1), 89-94.
Risk, B. B., De Valpine, P., Beissinger, S. R. (2011). A robust design formulation of the incidence function model of metapopulation dynamics applied to two species of rails. Ecology, 92(2), 462-474.
Urban, D. and Keitt, T. (2001). Landscape connectivity: a graph-theoretic perspective. Ecology, 82(5), 1205-1218.
Zurell, D., Berger, U., Cabral, J.S., Jeltsch, F., Meynard, C.N., Munkemuller, T., Nehrbass, N., Pagel, J., Reineking, B., Schroder, B. and Grimm, V. (2009). The virtual ecologist approach: simulating data and observers. Oikos, 119(4), 622-635.
}
|
/man/MetaLandSim-package.Rd
|
no_license
|
cran/MetaLandSim
|
R
| false | false | 7,672 |
rd
|
\name{MetaLandSim-package}
\alias{MetaLandSim-package}
\alias{MetaLandSim}
\docType{package}
\title{
Landscape And Range Expansion Simulation
}
\description{
The package MetaLandSim is a simulation environment, allowing the generation of random landscapes, represented as graphs, the simulation of landscape dynamics, metapopulation dynamics and range expansion.\cr
The package was developed as part of the Ph.D. thesis of Frederico Mestre (SFRH/BD/73768/2010), funded by European Social Funds and the Portuguese Foundation for Science and Technology, and included in the project NETPERSIST (PTDC/AAG-MAA/3227/2012), funded by European Regional Development Fund (ERDF) through COMPETE programme and Portuguese national funds through the Portuguese Foundation for Science and Technology.\cr
It is intended to provide a virtual environment, enabling the experimentation and simulation of processes at two scales: landscape and range. The simulation approach, taken by MetaLandSim, presents several advantages, like allowing the test of several alternatives and the knowledge of the full system (Peck, 2004; Zurell et al. 2009). The role of simulation in landscape ecology is fundamental due to the spatial and temporal scale of the studied phenomena, which frequently hinders experimentation (Ims, 2005).\cr
Here, graph and metapopulation theories are combined, which is a broadly accepted strategy to provide a modelling framework for metapopulation dynamics (Cantwell & Forman, 1993; Bunn et al. 2000; Ricotta et al. 2000; Minor & Urban, 2008; Galpern et al. 2011). Also, several graph-based connectivity metrics can be computed from the landscape graphs. This set of metrics have been proven useful elsewhere (Urban & Keitt, 2001; Calabrese & Fagan, 2004). The graph representation of landscape has one major advantage: it effectively summarizes spatial relationships between elements and facilitates a multi-scale analysis integrating patch and landscape level analysis (Calabrese & Fagan, 2004).\cr
MetaLandSim operates at two scales, providing researchers with the possibility of:
\itemize{
\item Landscape scale - Simulation of metapopulation occupation on a dynamic landscape, computation of connectivity metrics.
\item Range scale - Computes dispersal model and range expansion scenario simulation.
}
The landscape unit, an object of class \code{\link{landscape}}, is the basic simulation unit at both these scales. At the landscape scale, the persistence of the metapopulation in a dynamic landscape is evaluated through the simulation of landscape dynamics using the function \code{\link{iterate.graph}} or \code{\link{manage_landscape_sim}}.
At the range scale the metapopulation is allowed to expand to other, empty, landscape units using \code{\link{range_expansion}}, producing an object of class \code{\link{expansion}}. The function \code{\link{range_raster}} allows the conversion of the dispersal model obtained with the previous function into a raster. Finally, also at the range scale, the user can analyse the outcome of several alternative landscapes in range expansion speed and maximum dispersal distance, using the function \code{\link{manage_expansion_sim}}.\cr
Since version 1.0 new IFM parameter estimation capabilities are available, which based upon Bayesian statistics, using the functions first developed for the paper Risk et al.(2011).\cr\cr
We thank Dr. Santiago Saura (Universidad Politecnica de Madrid) for the very useful inputs and for the R script which greatly improved the connectivity metrics capabilities of MetaLandSim.\cr \cr
After version 2.0.0 MetaLandSim had a few major changes: 1) There is no Graphic User Interface, the
user will have to resort solely to the usual R user interface; 2) It does not use GRASS, resorting uniquely to R packages to conduct the simulations (mainly terra); 3) It depends on much less packages
(after removing rgrass7, maptools, rgeos, raster, tcltk and fgui); 4) There were some major changes to
the functions \code{\link{range_raster}} and \code{\link{range_expansion}}. In what concerns
\code{\link{range_expansion}} the output, rather than considering distinct dispersal probabilities
in all four cardinal directions (as in previous versions), considers the same probability of dispersal
from a current presence in all directions. This has implications in the \code{\link{range_raster}}
function, that converts the dispersal probability to a raster. However, this does not change the results
in any meaningfull way given that these kinds of simulations require many iterations in which
the distinctions between the dispersal to all four directions was diluted.
}
\details{
\tabular{ll}{
Package: \tab MetaLandSim\cr
Type: \tab Package\cr
Version: \tab 2.0.0\cr
Date: \tab 2022-01-12\cr
License: GPL (>=2)\cr
}
}
\author{
Frederico Mestre, Fernando Canovas, Benjamin Risk, Ricardo Pita, Antonio Mira and Pedro Beja.
Maintainer: Frederico Mestre <mestre.frederico@gmail.com>
}
\references{
Bunn, A. G., Urban, D. L. and Keitt, T. H. (2000). Landscape connectivity: a conservation application of graph theory. Journal of Environmental Management, 59(4), 265-278.
Calabrese, J. M. and Fagan, W. F. (2004). A comparison-shopper's guide to connectivity metrics. Frontiers in Ecology and the Environment, 2(10), 529-536.
Cantwell, M. D. and Forman, R. T. (1993). Landscape graphs: ecological modelling with graph theory to detect configurations common to diverse landscapes. Landscape Ecology, 8(4), 239-255.
Galpern, P., Manseau, M. and Fall, A. (2011). Patch-based graphs of landscape connectivity: a guide to construction, analysis and application for conservation. Biological Conservation, 144(1), 44-55.
Ims, R.A. (2005). The role of experiments in landscape ecology. In: Wiens, J.A., and Moss, M.R. (eds.). Issues and Perspectives in Landscape Ecology. Cambridge University Press. pp. 70-78.
Mestre, F., Pita, R., Pauperio, J., Martins, F. M., Alves, P. C., Mira, A., & Beja, P. (2015). Combining distribution modelling and non-invasive genetics to improve range shift forecasting. Ecological Modelling, 297, 171-179.
Mestre, F., Risk, B. B., Mira, A., Beja, P., & Pita, R. (2017). A metapopulation approach to predict species range shifts under different climate change and landscape connectivity scenarios. Ecological Modelling, 359, 406-414.
Mestre, F., Pita, R., Mira, A., Beja, P. (2020). Species traits, patch turnover and successional dynamics: When does intermediate disturbance favour metapopulation occupancy?. BMC Ecology.
Minor, E. S. and Urban, D. L. (2008). A Graph Theory Framework for Evaluating Landscape Connectivity and Conservation Planning. Conservation Biology, 22(2), 297-307.
Peck, S. L. (2004). Simulation as experiment: a philosophical reassessment for biological modelling. Trends in Ecology & Evolution, 19(10), 530-534.
Ricotta, C., Stanisci, A., Avena, G. C., and Blasi, C. (2000). Quantifying the network connectivity of landscape mosaics: a graph-theoretical approach. Community Ecology, 1(1), 89-94.
Risk, B. B., De Valpine, P., Beissinger, S. R. (2011). A robust design formulation of the incidence function model of metapopulation dynamics applied to two species of rails. Ecology, 92(2), 462-474.
Urban, D. and Keitt, T. (2001). Landscape connectivity: a graph-theoretic perspective. Ecology, 82(5), 1205-1218.
Zurell, D., Berger, U., Cabral, J.S., Jeltsch, F., Meynard, C.N., Munkemuller, T., Nehrbass, N., Pagel, J., Reineking, B., Schroder, B. and Grimm, V. (2009). The virtual ecologist approach: simulating data and observers. Oikos, 119(4), 622-635.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monthly.R
\name{islamic_interbank_rate}
\alias{islamic_interbank_rate}
\title{Islamic Interbank Rate}
\source{
https://api.bnm.gov.my/
}
\usage{
islamic_interbank_rate(date, year, month)
}
\arguments{
\item{date}{Character string or ... in ISO-8601 format (YYYY-MM-DD).
If specified, return interest volume for the
specified date. If left blank, return today's interest volume.}
\item{year, month}{Year and month as integers.}
}
\description{
This function allows you to ... from the BNM API.
}
\examples{
islamic_interbank_rate()
}
\keyword{...}
|
/man/islamic_interbank_rate.Rd
|
no_license
|
DanielYuo/bnmr
|
R
| false | true | 626 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/monthly.R
\name{islamic_interbank_rate}
\alias{islamic_interbank_rate}
\title{Islamic Interbank Rate}
\source{
https://api.bnm.gov.my/
}
\usage{
islamic_interbank_rate(date, year, month)
}
\arguments{
\item{date}{Character string or ... in ISO-8601 format (YYYY-MM-DD).
If specified, return interest volume for the
specified date. If left blank, return today's interest volume.}
\item{year, month}{Year and month as integers.}
}
\description{
This function allows you to ... from the BNM API.
}
\examples{
islamic_interbank_rate()
}
\keyword{...}
|
# 과제2
library(tidytext)
library(dplyr)
library(tidyr)
library(stringr)
library(lubridate)
library(ggplot2)
sentiments %>% group_by(lexicon) %>% summarise(n = n())
table(sentiments$lexicon)
sentiments %>% count(lexicon, sort = T)
count(sentiments, lexicon)
summarise(sentiments, n=n())
sejong <- "Sejong reinforced Confucian policies and executed major ‘legal amendments’ (공법; 貢法). He also personally created and promulgated the Korean alphabet Hangul,[2][3] encouraged advancements of scientific technology, and instituted many other efforts to stabilize and improve prosperity. He dispatched military campaigns to the north and instituted the Samin policy (사민정책; 徙民政策) to attract new settlers to the region. To the south, he subjugated Japanese pirates and captured Tsushima Island (also known as Daema Island in the Korean language)."
str_length(unlist(str_split(sejong, " ")))
length(unlist(str_split(sejong, "\\.")))
length(str_split(sejong, " "))
length(unlist(str_extract_all(sejong, boundary("word"))))
str_length(str_split(sejong, " "))
sejong_sents <- str_extract_all(sejong, boundary("sentence")) %>%
unlist()
class(sejong_sents)
sejong_sents
sejong_sents <- str_extract_all(sejong, boundary("sentence"))
class(sejong_sents)
sejong_sents <- unlist(str_extract(sejong, boundary("sentence")))
class(sejong_sents)
sejong_sents <- str_extract_all(sejong, boundary("word"))
class(sejong_sents)
sejong_sents <- str_split(sejong, " ") %>% unlist()
class(sejong_sents)
sejong_sents
sejong_sents_df <- data_frame(line=1:4, text=sejong_sents)
sejong_tidy <- sejong_sents_df %>% unnest_tokens(word, text, token = "words")
sejong_tidy
sejong_words <- sejong_tidy %>% unnest_tokens(word, text, token = "regex", pattern="[^[:word:]#@]")
sejong_words
|
/workspace/final1~11.R
|
no_license
|
inhyeokk/textmining
|
R
| false | false | 1,790 |
r
|
# 과제2
library(tidytext)
library(dplyr)
library(tidyr)
library(stringr)
library(lubridate)
library(ggplot2)
sentiments %>% group_by(lexicon) %>% summarise(n = n())
table(sentiments$lexicon)
sentiments %>% count(lexicon, sort = T)
count(sentiments, lexicon)
summarise(sentiments, n=n())
sejong <- "Sejong reinforced Confucian policies and executed major ‘legal amendments’ (공법; 貢法). He also personally created and promulgated the Korean alphabet Hangul,[2][3] encouraged advancements of scientific technology, and instituted many other efforts to stabilize and improve prosperity. He dispatched military campaigns to the north and instituted the Samin policy (사민정책; 徙民政策) to attract new settlers to the region. To the south, he subjugated Japanese pirates and captured Tsushima Island (also known as Daema Island in the Korean language)."
str_length(unlist(str_split(sejong, " ")))
length(unlist(str_split(sejong, "\\.")))
length(str_split(sejong, " "))
length(unlist(str_extract_all(sejong, boundary("word"))))
str_length(str_split(sejong, " "))
sejong_sents <- str_extract_all(sejong, boundary("sentence")) %>%
unlist()
class(sejong_sents)
sejong_sents
sejong_sents <- str_extract_all(sejong, boundary("sentence"))
class(sejong_sents)
sejong_sents <- unlist(str_extract(sejong, boundary("sentence")))
class(sejong_sents)
sejong_sents <- str_extract_all(sejong, boundary("word"))
class(sejong_sents)
sejong_sents <- str_split(sejong, " ") %>% unlist()
class(sejong_sents)
sejong_sents
sejong_sents_df <- data_frame(line=1:4, text=sejong_sents)
sejong_tidy <- sejong_sents_df %>% unnest_tokens(word, text, token = "words")
sejong_tidy
sejong_words <- sejong_tidy %>% unnest_tokens(word, text, token = "regex", pattern="[^[:word:]#@]")
sejong_words
|
#!/usr/bin/env Rscript
packages <- c(
'docopt',
'data.table',
'ggplot2',
'ggrepel',
'arules',
'CLONETv2'
)
repository <- 'https://cloud.r-project.org/'
install.packages(packages, repos = repository)
rr <- as.integer(!all(sapply(packages, function (x) require(x, character.only = TRUE))))
quit(save = 'no', status = rr)
|
/docker/clonetv2/rootfs/build/bin/install_r_packages.R
|
no_license
|
demichelislab/SPICE-pipeline-CWL
|
R
| false | false | 342 |
r
|
#!/usr/bin/env Rscript
packages <- c(
'docopt',
'data.table',
'ggplot2',
'ggrepel',
'arules',
'CLONETv2'
)
repository <- 'https://cloud.r-project.org/'
install.packages(packages, repos = repository)
rr <- as.integer(!all(sapply(packages, function (x) require(x, character.only = TRUE))))
quit(save = 'no', status = rr)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defunct.R
\name{taxonsearch}
\alias{taxonsearch}
\title{Search for taxa in GBIF.}
\usage{
taxonsearch(...)
}
\description{
This function is defunct.
}
\seealso{
occ_search
}
\keyword{internal}
|
/man/taxonsearch.Rd
|
no_license
|
cran/rgbif
|
R
| false | true | 286 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defunct.R
\name{taxonsearch}
\alias{taxonsearch}
\title{Search for taxa in GBIF.}
\usage{
taxonsearch(...)
}
\description{
This function is defunct.
}
\seealso{
occ_search
}
\keyword{internal}
|
#Read in consumer complaint data.
#Original source:
# https://catalog.data.gov/dataset/consumer-complaint-database#topic=consumer_navigation
setwd("C:/Users/Owner/Documents")
consumer_complaints<-read.csv("Consumer_Complaints.csv",stringsAsFactors = FALSE)
head(consumer_complaints)
str(consumer_complaints)
consumer_complaints_banksOnly<-consumer_complaints[consumer_complaints$Product %in%
c("Consumer Loan","Bank account or service"),]
str(consumer_complaints_banksOnly)#108,764 obs
library(dplyr)
min(consumer_complaints_banksOnly$Date.received)#min date is 1/1/13
max(consumer_complaints_banksOnly$Date.received)#max date is 9/9/16
#Next read in the equity capital information for the banks. The purpose is to
#assign a size to the banks so that we can scale their complaint counts accordingly
#Source: http://www.usbanklocations.com/bank-rank/total-equity-capital.html?d=2016-09-30
Ranked_Banks_12_31_11<-read.csv('Ranked Banks 12-31-11.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_12<-read.csv('Ranked Banks 3-31-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_12<-read.csv('Ranked Banks 6-30-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_12<-read.csv('Ranked Banks 9-30-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_12<-read.csv('Ranked Banks 12-31-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_13<-read.csv('Ranked Banks 3-31-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_13<-read.csv('Ranked Banks 6-30-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_13<-read.csv('Ranked Banks 9-30-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_13<-read.csv('Ranked Banks 12-31-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_14<-read.csv('Ranked Banks 3-31-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_14<-read.csv('Ranked Banks 6-30-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_14<-read.csv('Ranked Banks 9-30-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_14<-read.csv('Ranked Banks 12-31-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_15<-read.csv('Ranked Banks 3-31-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_15<-read.csv('Ranked Banks 6-30-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_15<-read.csv('Ranked Banks 9-30-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_15<-read.csv('Ranked Banks 12-31-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_16<-read.csv('Ranked Banks 3-31-16.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_16<-read.csv('Ranked Banks 6-30-16.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_16<-read.csv('Ranked Banks 9-30-16.csv',stringsAsFactors=FALSE)
#Stack the ranked banks into one set
all_ranked_banks <- rbind(Ranked_Banks_12_31_11,
Ranked_Banks_3_31_12,
Ranked_Banks_6_30_12,
Ranked_Banks_9_30_12,
Ranked_Banks_12_31_12,
Ranked_Banks_3_31_13,
Ranked_Banks_6_30_13,
Ranked_Banks_9_30_13,
Ranked_Banks_12_31_13,
Ranked_Banks_3_31_14,
Ranked_Banks_6_30_14,
Ranked_Banks_9_30_14,
Ranked_Banks_12_31_14,
Ranked_Banks_3_31_15,
Ranked_Banks_6_30_15,
Ranked_Banks_9_30_15,
Ranked_Banks_12_31_15,
Ranked_Banks_3_31_16,
Ranked_Banks_6_30_16,
Ranked_Banks_9_30_16)
rm(Ranked_Banks_12_31_11,
Ranked_Banks_3_31_12,
Ranked_Banks_6_30_12,
Ranked_Banks_9_30_12,
Ranked_Banks_12_31_12,
Ranked_Banks_3_31_13,
Ranked_Banks_6_30_13,
Ranked_Banks_9_30_13,
Ranked_Banks_12_31_13,
Ranked_Banks_3_31_14,
Ranked_Banks_6_30_14,
Ranked_Banks_9_30_14,
Ranked_Banks_12_31_14,
Ranked_Banks_3_31_15,
Ranked_Banks_6_30_15,
Ranked_Banks_9_30_15,
Ranked_Banks_12_31_15,
Ranked_Banks_3_31_16,
Ranked_Banks_6_30_16,
Ranked_Banks_9_30_16)
#Reformat the dates
all_ranked_banks$Date2<-as.Date(all_ranked_banks$Date,format="%m/%d/%Y")
head(all_ranked_banks)
str(all_ranked_banks)
all_ranked_banks2<-subset(all_ranked_banks,select=-Date)
rm(all_ranked_banks)
str(all_ranked_banks2)#133880 obs
#write.csv(all_ranked_banks2, file = "banks_ranked_by_equity_capital.csv")
max(all_ranked_banks2$Rank) #7366
maxrank<-50
all_ranked_banks2_top<-all_ranked_banks2[all_ranked_banks2$Rank<=maxrank,]
str(all_ranked_banks2_top)
head(all_ranked_banks2_top)
#Extract unique bank names. Bank names will be merge keys. We want to experiment with
#some "fuzzy" merge techniques.
unique_banks_from_complaints<-unique(consumer_complaints_banksOnly$Company)
length(unique_banks_from_complaints)#1130
unique_banks_from_complaints[1:20]
write.csv(unique_banks_from_complaints,file="unique_banks_from_complaints.csv")
unique_banks_from_ranked_banks_top<-unique(all_ranked_banks2_top$Bank.Name)
length(unique_banks_from_ranked_banks_top)#67 banks
unique_banks_from_ranked_banks_top
write.csv(unique_banks_from_ranked_banks_top,file="top ranked banks.csv")
#Create the Cartesian product of the two sets of unique bank names. When we get that
#done we'll find the Levenstein distances for all bank name pairs.
library(dplyr)
huge_table<-full_join(unique_banks_from_complaints,unique_banks_from_ranked_banks,by=NULL)
#full_join isn't working. I get this error message:
#Error in UseMethod("full_join") :
# no applicable method for 'full_join' applied to an object of class "character"
#Read in banks with numbers of branches
#Original source http://www.usbanklocations.com/bank-rank/number-of-branches.html
RankedBanksByNumBranch_12_31_11<-read.csv('RankedBanksByNumBranch 12-31-11.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_12<-read.csv('RankedBanksByNumBranch 3-31-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_12<-read.csv('RankedBanksByNumBranch 6-30-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_12<-read.csv('RankedBanksByNumBranch 9-30-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_12<-read.csv('RankedBanksByNumBranch 12-31-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_13<-read.csv('RankedBanksByNumBranch 3-31-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_13<-read.csv('RankedBanksByNumBranch 6-30-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_13<-read.csv('RankedBanksByNumBranch 9-30-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_13<-read.csv('RankedBanksByNumBranch 12-31-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_14<-read.csv('RankedBanksByNumBranch 3-31-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_14<-read.csv('RankedBanksByNumBranch 6-30-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_14<-read.csv('RankedBanksByNumBranch 9-30-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_14<-read.csv('RankedBanksByNumBranch 12-31-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_15<-read.csv('RankedBanksByNumBranch 3-31-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_15<-read.csv('RankedBanksByNumBranch 6-30-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_15<-read.csv('RankedBanksByNumBranch 9-30-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_15<-read.csv('RankedBanksByNumBranch 12-31-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_16<-read.csv('RankedBanksByNumBranch 3-31-16.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_16<-read.csv('RankedBanksByNumBranch 6-30-16.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_16<-read.csv('RankedBanksByNumBranch 9-30-16.csv',stringsAsFactors=FALSE)
#Stack the ranked (by number of branches) banks into one set
all_RankedBanksByNumBranch <- rbind(RankedBanksByNumBranch_12_31_11,
RankedBanksByNumBranch_3_31_12,
RankedBanksByNumBranch_6_30_12,
RankedBanksByNumBranch_9_30_12,
RankedBanksByNumBranch_12_31_12,
RankedBanksByNumBranch_3_31_13,
RankedBanksByNumBranch_6_30_13,
RankedBanksByNumBranch_9_30_13,
RankedBanksByNumBranch_12_31_13,
RankedBanksByNumBranch_3_31_14,
RankedBanksByNumBranch_6_30_14,
RankedBanksByNumBranch_9_30_14,
RankedBanksByNumBranch_12_31_14,
RankedBanksByNumBranch_3_31_15,
RankedBanksByNumBranch_6_30_15,
RankedBanksByNumBranch_9_30_15,
RankedBanksByNumBranch_12_31_15,
RankedBanksByNumBranch_3_31_16,
RankedBanksByNumBranch_6_30_16,
RankedBanksByNumBranch_9_30_16)
rm(RankedBanksByNumBranch_12_31_11,
RankedBanksByNumBranch_3_31_12,
RankedBanksByNumBranch_6_30_12,
RankedBanksByNumBranch_9_30_12,
RankedBanksByNumBranch_12_31_12,
RankedBanksByNumBranch_3_31_13,
RankedBanksByNumBranch_6_30_13,
RankedBanksByNumBranch_9_30_13,
RankedBanksByNumBranch_12_31_13,
RankedBanksByNumBranch_3_31_14,
RankedBanksByNumBranch_6_30_14,
RankedBanksByNumBranch_9_30_14,
RankedBanksByNumBranch_12_31_14,
RankedBanksByNumBranch_3_31_15,
RankedBanksByNumBranch_6_30_15,
RankedBanksByNumBranch_9_30_15,
RankedBanksByNumBranch_12_31_15,
RankedBanksByNumBranch_3_31_16,
RankedBanksByNumBranch_6_30_16,
RankedBanksByNumBranch_9_30_16)
#Reformat the dates
all_RankedBanksByNumBranch$Date2<-as.Date(all_RankedBanksByNumBranch$Date,format="%m/%d/%Y")
head(all_RankedBanksByNumBranch)
str(all_RankedBanksByNumBranch)
all_RankedBanksByNumBranch2<-subset(all_RankedBanksByNumBranch,select=-Date)
rm(all_RankedBanksByNumBranch)
install.packages("stringdist")
library(stringdist)
#We will compare every bank name in the set of banks from the complaints data to the bank names in the
#set of bank sizes. We will keep every pair of bank names that is "close", that is, every
#pair of bank names with string distance <= threshold.
distances <-data.frame(bank1=character(),bank2=character(),cleanedUpBank1=character(),
cleanedUpBank2=character(), d=integer(),stringsAsFactors = FALSE)
threshold<-0.31
for (i in 1:length(unique_banks_from_complaints)){
for (j in 1:length(unique_banks_from_ranked_banks_top)){
#Here we remove words from the bank names that give no information about the bank's identity.
#These words are sometimes called "stop words"
CleanedUpBankName1<-gsub(' BANKS ',' ',toupper(unique_banks_from_complaints[i]))
CleanedUpBankName1<-gsub('BANKS ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANKS','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANK ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANK ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANK','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANK','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SAVINGS AND LOAN','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SAVINGS & LOAN','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' INC.','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('COMPANY','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CO[.]','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANCO','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FINANCIAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FINANCE','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CAPITAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SERVICES','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CORPORATION','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CITIZENS ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CITIZENS ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CITIZENS','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FIRST ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' TRUST ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('TRUST ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' TRUST','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(',','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('&','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('[.]','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' PUERTO RICO ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' PUERTO RICO','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('PUERTO RICO ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' OF THE ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' OF ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' THE ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' AND ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('NATIONAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('COMMUNITY','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('ASSOCIATION','',CleanedUpBankName1)
CleanedUpBankName2<-gsub(' BANKS ',' ',toupper(unique_banks_from_ranked_banks_top[j]))
CleanedUpBankName2<-gsub('BANKS ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANKS','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANK ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANK ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANK','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANK','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SAVINGS AND LOAN','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SAVINGS & LOAN','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' INC.','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('COMPANY','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CO[.]','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANCO','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FINANCIAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FINANCE','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CAPITAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SERVICES','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CORPORATION','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CITIZENS ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CITIZENS ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CITIZENS','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FIRST ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' TRUST ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('TRUST ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' TRUST','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(',','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('&','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('[.]','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' PUERTO RICO ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' PUERTO RICO','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('PUERTO RICO ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' OF THE ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' OF ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' THE ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' AND ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('NATIONAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('COMMUNITY','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('ASSOCIATION','',CleanedUpBankName2)
#Distance between 2 bank names is defined as longest common substring distance,
#normalized by length of cleaned up bank name
if (max(nchar(CleanedUpBankName1),nchar(CleanedUpBankName2))<=4){
dist<-stringdist(CleanedUpBankName1,CleanedUpBankName2,method="lv")
}
else {
dist<-stringdist(CleanedUpBankName1,CleanedUpBankName2,method="lv")/min(nchar(CleanedUpBankName1),nchar(CleanedUpBankName2))
}
if (dist <= threshold){
newrow<-data.frame(bank1=unique_banks_from_complaints[i],
bank2=unique_banks_from_ranked_banks_top[j],
cleanedUpBank1=CleanedUpBankName1,cleanedUpBank2=CleanedUpBankName2,
d=dist, stringsAsFactors = FALSE)
distances<-rbind(distances,newrow)
}
}
}
write.csv(distances,file="Bank name pairs and distances2.csv")
################################################################################################################################
###########################################################################################################################################
for (i in 1:nrow(all_ranked_banks2_top))
{
if (all_ranked_banks2_top$Bank.Name[i]=='Raymond James Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Raymond James Bank, N. A.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Regions Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Regions Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Reliant Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Reliant Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Ally Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Ally Financial Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Commerce Bank & Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'Commerce Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Meridian Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Meridian Financial Services, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Chemical Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Chemical Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Bank, Ltd.'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Fifth Third Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Fifth Third Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Santander Bank, N.A.'){all_ranked_banks2_top$Bank.Name_std[i]<-'Santander Bank US'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First PREMIER Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Premier Financial, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Synchrony Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Synchrony Financial'}
else if (all_ranked_banks2_top$Bank.Name[i]=='JPMorgan Chase Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'JPMorgan Chase & Co.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Financial National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of America'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of America'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Tennessee Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Tennessee Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=="'People's United Bank'"){all_ranked_banks2_top$Bank.Name_std[i]<-"'People's United Bank'"}
else if (all_ranked_banks2_top$Bank.Name[i]=='TCF National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'TCF National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Citibank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Citibank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of the West'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of the West'}
else if (all_ranked_banks2_top$Bank.Name[i]=='SunTrust Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'SunTrust Banks, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BMO Harris Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'BMO Harris'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Capital One'){all_ranked_banks2_top$Bank.Name_std[i]<-'Capital One'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstmerit Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstMerit Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The Huntington National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'The Huntington National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Union Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Union Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Union Bank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'Union Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BankUnited'){all_ranked_banks2_top$Bank.Name_std[i]<-'BankUnited'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Morgan Stanley Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Morgan Stanley'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BNY Mellon'){all_ranked_banks2_top$Bank.Name_std[i]<-'BNY Mellon'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Astoria Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Astoria Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='New York Community Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'New York Community Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Comerica Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Comerica'}
else if (all_ranked_banks2_top$Bank.Name[i]=='USAA Savings Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'USAA Savings'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First National Bank of Omaha'){all_ranked_banks2_top$Bank.Name_std[i]<-'First National Bank of Omaha'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Citizens Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Citizens'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Discover Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Discover'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Popular de Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Popular de Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Commerce Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Commerce Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='E*TRADE Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'E*Trade Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BancorpSouth Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'BancorpSouth Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Valley National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Valley National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Frost Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Frost Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='UMB Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'UMB Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Associated Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Associated Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='FirstBank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstbank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='East West Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'East West Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Webster Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Webster Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Hawaiian Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Hawaiian Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of Hawaii'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of Hawaii'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Washington Federal'){all_ranked_banks2_top$Bank.Name_std[i]<-'Washington Federal'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Sallie Mae Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Sallie Mae'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Whitney Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Whitney Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Charles Schwab Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Charles Schwab Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Synovus Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Synovus Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Popular North America'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Popular North America'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Arvest Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Arvest Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Santander Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Santander Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Investors Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Investors Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='EverBank'){all_ranked_banks2_top$Bank.Name_std[i]<-'EverBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Republic Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Republic Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Iberiabank'){all_ranked_banks2_top$Bank.Name_std[i]<-'IBERIABANK'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Trustmark National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Trustmark Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='City National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'City National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Goldman Sachs Bank USA'){all_ranked_banks2_top$Bank.Name_std[i]<-'Goldman Sachs Bank USA'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The PrivateBank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'The PrivateBank and Trust Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='State Street Bank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'State Street Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The Northern Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'The Northern Trust Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Rabobank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Rabobank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstbank of Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank of Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First National Bank of Pennsylvania'){all_ranked_banks2_top$Bank.Name_std[i]<-'First National Bank of Pennsylvania'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Scottrade Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Scottrade Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Old National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Old National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Blackhawk Bank & Trust'){all_ranked_banks2_top$Bank.Name_std[i]<-'Blackhawk Finance'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Cathay Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Cathay Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Pacific Western Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'PACIFIC WESTERN BANK'}
else if (all_ranked_banks2_top$Bank.Name[i]=='MidFirst Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'MidFirst Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Niagara Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Niagara Bank'}
else {all_ranked_banks2_top$Bank.Name_std[i]<-''}
}
write.csv(all_ranked_banks2_top,file="Bank names with standardized names.csv")
|
/Capstone project.R
|
no_license
|
DoctorC1968/Capstone-Project
|
R
| false | false | 27,896 |
r
|
#Read in consumer complaint data.
#Original source:
# https://catalog.data.gov/dataset/consumer-complaint-database#topic=consumer_navigation
setwd("C:/Users/Owner/Documents")
consumer_complaints<-read.csv("Consumer_Complaints.csv",stringsAsFactors = FALSE)
head(consumer_complaints)
str(consumer_complaints)
consumer_complaints_banksOnly<-consumer_complaints[consumer_complaints$Product %in%
c("Consumer Loan","Bank account or service"),]
str(consumer_complaints_banksOnly)#108,764 obs
library(dplyr)
min(consumer_complaints_banksOnly$Date.received)#min date is 1/1/13
max(consumer_complaints_banksOnly$Date.received)#max date is 9/9/16
#Next read in the equity capital information for the banks. The purpose is to
#assign a size to the banks so that we can scale their complaint counts accordingly
#Source: http://www.usbanklocations.com/bank-rank/total-equity-capital.html?d=2016-09-30
Ranked_Banks_12_31_11<-read.csv('Ranked Banks 12-31-11.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_12<-read.csv('Ranked Banks 3-31-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_12<-read.csv('Ranked Banks 6-30-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_12<-read.csv('Ranked Banks 9-30-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_12<-read.csv('Ranked Banks 12-31-12.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_13<-read.csv('Ranked Banks 3-31-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_13<-read.csv('Ranked Banks 6-30-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_13<-read.csv('Ranked Banks 9-30-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_13<-read.csv('Ranked Banks 12-31-13.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_14<-read.csv('Ranked Banks 3-31-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_14<-read.csv('Ranked Banks 6-30-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_14<-read.csv('Ranked Banks 9-30-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_14<-read.csv('Ranked Banks 12-31-14.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_15<-read.csv('Ranked Banks 3-31-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_15<-read.csv('Ranked Banks 6-30-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_15<-read.csv('Ranked Banks 9-30-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_12_31_15<-read.csv('Ranked Banks 12-31-15.csv',stringsAsFactors=FALSE)
Ranked_Banks_3_31_16<-read.csv('Ranked Banks 3-31-16.csv',stringsAsFactors=FALSE)
Ranked_Banks_6_30_16<-read.csv('Ranked Banks 6-30-16.csv',stringsAsFactors=FALSE)
Ranked_Banks_9_30_16<-read.csv('Ranked Banks 9-30-16.csv',stringsAsFactors=FALSE)
#Stack the ranked banks into one set
all_ranked_banks <- rbind(Ranked_Banks_12_31_11,
Ranked_Banks_3_31_12,
Ranked_Banks_6_30_12,
Ranked_Banks_9_30_12,
Ranked_Banks_12_31_12,
Ranked_Banks_3_31_13,
Ranked_Banks_6_30_13,
Ranked_Banks_9_30_13,
Ranked_Banks_12_31_13,
Ranked_Banks_3_31_14,
Ranked_Banks_6_30_14,
Ranked_Banks_9_30_14,
Ranked_Banks_12_31_14,
Ranked_Banks_3_31_15,
Ranked_Banks_6_30_15,
Ranked_Banks_9_30_15,
Ranked_Banks_12_31_15,
Ranked_Banks_3_31_16,
Ranked_Banks_6_30_16,
Ranked_Banks_9_30_16)
rm(Ranked_Banks_12_31_11,
Ranked_Banks_3_31_12,
Ranked_Banks_6_30_12,
Ranked_Banks_9_30_12,
Ranked_Banks_12_31_12,
Ranked_Banks_3_31_13,
Ranked_Banks_6_30_13,
Ranked_Banks_9_30_13,
Ranked_Banks_12_31_13,
Ranked_Banks_3_31_14,
Ranked_Banks_6_30_14,
Ranked_Banks_9_30_14,
Ranked_Banks_12_31_14,
Ranked_Banks_3_31_15,
Ranked_Banks_6_30_15,
Ranked_Banks_9_30_15,
Ranked_Banks_12_31_15,
Ranked_Banks_3_31_16,
Ranked_Banks_6_30_16,
Ranked_Banks_9_30_16)
#Reformat the dates
all_ranked_banks$Date2<-as.Date(all_ranked_banks$Date,format="%m/%d/%Y")
head(all_ranked_banks)
str(all_ranked_banks)
all_ranked_banks2<-subset(all_ranked_banks,select=-Date)
rm(all_ranked_banks)
str(all_ranked_banks2)#133880 obs
#write.csv(all_ranked_banks2, file = "banks_ranked_by_equity_capital.csv")
max(all_ranked_banks2$Rank) #7366
maxrank<-50
all_ranked_banks2_top<-all_ranked_banks2[all_ranked_banks2$Rank<=maxrank,]
str(all_ranked_banks2_top)
head(all_ranked_banks2_top)
#Extract unique bank names. Bank names will be merge keys. We want to experiment with
#some "fuzzy" merge techniques.
unique_banks_from_complaints<-unique(consumer_complaints_banksOnly$Company)
length(unique_banks_from_complaints)#1130
unique_banks_from_complaints[1:20]
write.csv(unique_banks_from_complaints,file="unique_banks_from_complaints.csv")
unique_banks_from_ranked_banks_top<-unique(all_ranked_banks2_top$Bank.Name)
length(unique_banks_from_ranked_banks_top)#67 banks
unique_banks_from_ranked_banks_top
write.csv(unique_banks_from_ranked_banks_top,file="top ranked banks.csv")
#Create the Cartesian product of the two sets of unique bank names. When we get that
#done we'll find the Levenstein distances for all bank name pairs.
library(dplyr)
huge_table<-full_join(unique_banks_from_complaints,unique_banks_from_ranked_banks,by=NULL)
#full_join isn't working. I get this error message:
#Error in UseMethod("full_join") :
# no applicable method for 'full_join' applied to an object of class "character"
#Read in banks with numbers of branches
#Original source http://www.usbanklocations.com/bank-rank/number-of-branches.html
RankedBanksByNumBranch_12_31_11<-read.csv('RankedBanksByNumBranch 12-31-11.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_12<-read.csv('RankedBanksByNumBranch 3-31-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_12<-read.csv('RankedBanksByNumBranch 6-30-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_12<-read.csv('RankedBanksByNumBranch 9-30-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_12<-read.csv('RankedBanksByNumBranch 12-31-12.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_13<-read.csv('RankedBanksByNumBranch 3-31-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_13<-read.csv('RankedBanksByNumBranch 6-30-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_13<-read.csv('RankedBanksByNumBranch 9-30-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_13<-read.csv('RankedBanksByNumBranch 12-31-13.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_14<-read.csv('RankedBanksByNumBranch 3-31-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_14<-read.csv('RankedBanksByNumBranch 6-30-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_14<-read.csv('RankedBanksByNumBranch 9-30-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_14<-read.csv('RankedBanksByNumBranch 12-31-14.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_15<-read.csv('RankedBanksByNumBranch 3-31-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_15<-read.csv('RankedBanksByNumBranch 6-30-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_15<-read.csv('RankedBanksByNumBranch 9-30-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_12_31_15<-read.csv('RankedBanksByNumBranch 12-31-15.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_3_31_16<-read.csv('RankedBanksByNumBranch 3-31-16.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_6_30_16<-read.csv('RankedBanksByNumBranch 6-30-16.csv',stringsAsFactors=FALSE)
RankedBanksByNumBranch_9_30_16<-read.csv('RankedBanksByNumBranch 9-30-16.csv',stringsAsFactors=FALSE)
#Stack the ranked (by number of branches) banks into one set
all_RankedBanksByNumBranch <- rbind(RankedBanksByNumBranch_12_31_11,
RankedBanksByNumBranch_3_31_12,
RankedBanksByNumBranch_6_30_12,
RankedBanksByNumBranch_9_30_12,
RankedBanksByNumBranch_12_31_12,
RankedBanksByNumBranch_3_31_13,
RankedBanksByNumBranch_6_30_13,
RankedBanksByNumBranch_9_30_13,
RankedBanksByNumBranch_12_31_13,
RankedBanksByNumBranch_3_31_14,
RankedBanksByNumBranch_6_30_14,
RankedBanksByNumBranch_9_30_14,
RankedBanksByNumBranch_12_31_14,
RankedBanksByNumBranch_3_31_15,
RankedBanksByNumBranch_6_30_15,
RankedBanksByNumBranch_9_30_15,
RankedBanksByNumBranch_12_31_15,
RankedBanksByNumBranch_3_31_16,
RankedBanksByNumBranch_6_30_16,
RankedBanksByNumBranch_9_30_16)
rm(RankedBanksByNumBranch_12_31_11,
RankedBanksByNumBranch_3_31_12,
RankedBanksByNumBranch_6_30_12,
RankedBanksByNumBranch_9_30_12,
RankedBanksByNumBranch_12_31_12,
RankedBanksByNumBranch_3_31_13,
RankedBanksByNumBranch_6_30_13,
RankedBanksByNumBranch_9_30_13,
RankedBanksByNumBranch_12_31_13,
RankedBanksByNumBranch_3_31_14,
RankedBanksByNumBranch_6_30_14,
RankedBanksByNumBranch_9_30_14,
RankedBanksByNumBranch_12_31_14,
RankedBanksByNumBranch_3_31_15,
RankedBanksByNumBranch_6_30_15,
RankedBanksByNumBranch_9_30_15,
RankedBanksByNumBranch_12_31_15,
RankedBanksByNumBranch_3_31_16,
RankedBanksByNumBranch_6_30_16,
RankedBanksByNumBranch_9_30_16)
#Reformat the dates
all_RankedBanksByNumBranch$Date2<-as.Date(all_RankedBanksByNumBranch$Date,format="%m/%d/%Y")
head(all_RankedBanksByNumBranch)
str(all_RankedBanksByNumBranch)
all_RankedBanksByNumBranch2<-subset(all_RankedBanksByNumBranch,select=-Date)
rm(all_RankedBanksByNumBranch)
install.packages("stringdist")
library(stringdist)
#We will compare every bank name in the set of banks from the complaints data to the bank names in the
#set of bank sizes. We will keep every pair of bank names that is "close", that is, every
#pair of bank names with string distance <= threshold.
distances <-data.frame(bank1=character(),bank2=character(),cleanedUpBank1=character(),
cleanedUpBank2=character(), d=integer(),stringsAsFactors = FALSE)
threshold<-0.31
for (i in 1:length(unique_banks_from_complaints)){
for (j in 1:length(unique_banks_from_ranked_banks_top)){
#Here we remove words from the bank names that give no information about the bank's identity.
#These words are sometimes called "stop words"
CleanedUpBankName1<-gsub(' BANKS ',' ',toupper(unique_banks_from_complaints[i]))
CleanedUpBankName1<-gsub('BANKS ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANKS','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANK ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANK ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' BANK','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANK','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SAVINGS AND LOAN','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SAVINGS & LOAN','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' INC.','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('COMPANY','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CO[.]','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('BANCO','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FINANCIAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FINANCE','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CAPITAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('SERVICES','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CORPORATION','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CITIZENS ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('CITIZENS ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' CITIZENS','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('FIRST ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' TRUST ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub('TRUST ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' TRUST','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(',','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('&','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('[.]','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' PUERTO RICO ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' PUERTO RICO','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('PUERTO RICO ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' OF THE ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' OF ',' ',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' THE ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub(' AND ','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('NATIONAL','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('COMMUNITY','',CleanedUpBankName1)
CleanedUpBankName1<-gsub('ASSOCIATION','',CleanedUpBankName1)
CleanedUpBankName2<-gsub(' BANKS ',' ',toupper(unique_banks_from_ranked_banks_top[j]))
CleanedUpBankName2<-gsub('BANKS ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANKS','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANK ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANK ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' BANK','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANK','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SAVINGS AND LOAN','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SAVINGS & LOAN','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' INC.','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('COMPANY','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CO[.]','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('BANCO','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FINANCIAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FINANCE','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CAPITAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('SERVICES','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CORPORATION','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CITIZENS ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('CITIZENS ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' CITIZENS','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('FIRST ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' TRUST ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub('TRUST ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' TRUST','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(',','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('&','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('[.]','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' PUERTO RICO ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' PUERTO RICO','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('PUERTO RICO ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' OF THE ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' OF ',' ',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' THE ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub(' AND ','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('NATIONAL','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('COMMUNITY','',CleanedUpBankName2)
CleanedUpBankName2<-gsub('ASSOCIATION','',CleanedUpBankName2)
#Distance between 2 bank names is defined as longest common substring distance,
#normalized by length of cleaned up bank name
if (max(nchar(CleanedUpBankName1),nchar(CleanedUpBankName2))<=4){
dist<-stringdist(CleanedUpBankName1,CleanedUpBankName2,method="lv")
}
else {
dist<-stringdist(CleanedUpBankName1,CleanedUpBankName2,method="lv")/min(nchar(CleanedUpBankName1),nchar(CleanedUpBankName2))
}
if (dist <= threshold){
newrow<-data.frame(bank1=unique_banks_from_complaints[i],
bank2=unique_banks_from_ranked_banks_top[j],
cleanedUpBank1=CleanedUpBankName1,cleanedUpBank2=CleanedUpBankName2,
d=dist, stringsAsFactors = FALSE)
distances<-rbind(distances,newrow)
}
}
}
write.csv(distances,file="Bank name pairs and distances2.csv")
################################################################################################################################
###########################################################################################################################################
for (i in 1:nrow(all_ranked_banks2_top))
{
if (all_ranked_banks2_top$Bank.Name[i]=='Raymond James Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Raymond James Bank, N. A.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Regions Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Regions Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Reliant Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Reliant Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Ally Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Ally Financial Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Commerce Bank & Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'Commerce Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Meridian Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Meridian Financial Services, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Chemical Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Chemical Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Bank, Ltd.'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Fifth Third Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Fifth Third Financial Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Santander Bank, N.A.'){all_ranked_banks2_top$Bank.Name_std[i]<-'Santander Bank US'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First PREMIER Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Premier Financial, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Synchrony Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Synchrony Financial'}
else if (all_ranked_banks2_top$Bank.Name[i]=='JPMorgan Chase Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'JPMorgan Chase & Co.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Wells Fargo Financial National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Wells Fargo & Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of America'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of America'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Tennessee Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Tennessee Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=="'People's United Bank'"){all_ranked_banks2_top$Bank.Name_std[i]<-"'People's United Bank'"}
else if (all_ranked_banks2_top$Bank.Name[i]=='TCF National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'TCF National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Citibank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Citibank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of the West'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of the West'}
else if (all_ranked_banks2_top$Bank.Name[i]=='SunTrust Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'SunTrust Banks, Inc.'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BMO Harris Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'BMO Harris'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Capital One'){all_ranked_banks2_top$Bank.Name_std[i]<-'Capital One'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstmerit Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstMerit Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The Huntington National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'The Huntington National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Union Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Union Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Union Bank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'Union Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BankUnited'){all_ranked_banks2_top$Bank.Name_std[i]<-'BankUnited'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Morgan Stanley Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Morgan Stanley'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BNY Mellon'){all_ranked_banks2_top$Bank.Name_std[i]<-'BNY Mellon'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Astoria Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Astoria Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='New York Community Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'New York Community Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Comerica Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Comerica'}
else if (all_ranked_banks2_top$Bank.Name[i]=='USAA Savings Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'USAA Savings'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First National Bank of Omaha'){all_ranked_banks2_top$Bank.Name_std[i]<-'First National Bank of Omaha'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Citizens Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Citizens'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Discover Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Discover'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Popular de Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Popular de Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Commerce Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Commerce Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='E*TRADE Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'E*Trade Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='BancorpSouth Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'BancorpSouth Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Valley National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Valley National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Frost Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Frost Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='UMB Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'UMB Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Associated Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Associated Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='FirstBank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstbank'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='East West Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'East West Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Webster Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Webster Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Hawaiian Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Hawaiian Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Bank of Hawaii'){all_ranked_banks2_top$Bank.Name_std[i]<-'Bank of Hawaii'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Washington Federal'){all_ranked_banks2_top$Bank.Name_std[i]<-'Washington Federal'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Sallie Mae Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Sallie Mae'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Whitney Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Whitney Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Charles Schwab Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Charles Schwab Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Synovus Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Synovus Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Popular North America'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Popular North America'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Arvest Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Arvest Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Banco Santander Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'Banco Santander Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Investors Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Investors Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='EverBank'){all_ranked_banks2_top$Bank.Name_std[i]<-'EverBank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Republic Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Republic Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Iberiabank'){all_ranked_banks2_top$Bank.Name_std[i]<-'IBERIABANK'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Trustmark National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Trustmark Corporation'}
else if (all_ranked_banks2_top$Bank.Name[i]=='City National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'City National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Goldman Sachs Bank USA'){all_ranked_banks2_top$Bank.Name_std[i]<-'Goldman Sachs Bank USA'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The PrivateBank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'The PrivateBank and Trust Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='State Street Bank and Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'State Street Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='The Northern Trust Company'){all_ranked_banks2_top$Bank.Name_std[i]<-'The Northern Trust Company'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Rabobank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Rabobank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Firstbank of Puerto Rico'){all_ranked_banks2_top$Bank.Name_std[i]<-'FirstBank of Puerto Rico'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First National Bank of Pennsylvania'){all_ranked_banks2_top$Bank.Name_std[i]<-'First National Bank of Pennsylvania'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Scottrade Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Scottrade Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Old National Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Old National Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Blackhawk Bank & Trust'){all_ranked_banks2_top$Bank.Name_std[i]<-'Blackhawk Finance'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Cathay Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'Cathay Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='Pacific Western Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'PACIFIC WESTERN BANK'}
else if (all_ranked_banks2_top$Bank.Name[i]=='MidFirst Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'MidFirst Bank'}
else if (all_ranked_banks2_top$Bank.Name[i]=='First Niagara Bank'){all_ranked_banks2_top$Bank.Name_std[i]<-'First Niagara Bank'}
else {all_ranked_banks2_top$Bank.Name_std[i]<-''}
}
write.csv(all_ranked_banks2_top,file="Bank names with standardized names.csv")
|
x=seq(-pi,pi,length=50)
y=x
f=outer(x,y,function(x,y)cos(y)/(1+x^2))
fa=(f-t(f))/2
contour(x,y,f)
contour(x,y,f,nlevels=45,add=T)
contour(x,y,fa,nlevels=15)
image(x,y,fa)
persp(x,y,fa)
persp(x,y,fa,theta=30)
persp(x,y,fa,theta=30,phi=20)
persp(x,y,fa,theta=30,phi=70)
persp(x,y,fa,theta=30,phi=40)
|
/src/r_script/test_r.R
|
no_license
|
tuanxhbk/learn_R
|
R
| false | false | 298 |
r
|
x=seq(-pi,pi,length=50)
y=x
f=outer(x,y,function(x,y)cos(y)/(1+x^2))
fa=(f-t(f))/2
contour(x,y,f)
contour(x,y,f,nlevels=45,add=T)
contour(x,y,fa,nlevels=15)
image(x,y,fa)
persp(x,y,fa)
persp(x,y,fa,theta=30)
persp(x,y,fa,theta=30,phi=20)
persp(x,y,fa,theta=30,phi=70)
persp(x,y,fa,theta=30,phi=40)
|
#' crisp: A package for fitting a model that partitions the covariate space into blocks in a data-adaptive way.
#'
#' This package is called crisp for "Convex Regression with Interpretable Sharp Partitions",
#' which considers the problem of predicting an outcome variable on the basis of two covariates,
#' using an interpretable yet non-additive model. CRISP partitions the covariate space into
#' blocks in a data-adaptive way, and fits a mean model within each block.
#' Unlike other partitioning methods, CRISP is fit using a non-greedy approach by solving a
#' convex optimization problem, resulting in low-variance fits. More details are provided
#' in Petersen, A., Simon, N., and Witten, D. (2016). Convex Regression with Interpretable
#' Sharp Partitions. Journal of Machine Learning Research, 17(94): 1-31 <http://jmlr.org/papers/volume17/15-344/15-344.pdf>.
#'
#' The main functions are: (1)\code{\link{crisp}} and (2)\code{\link{crispCV}}. The first function
#' \code{\link{crisp}} fits CRISP for a sequence of tuning parameters and provides the fits
#' for this entire sequence of tuning parameters. The second function \code{\link{crispCV}} considers
#' a sequence of tuning parameters and provides the fits, but also returns the optimal tuning parameter,
#' as chosen using K-fold cross-validation.
#'
#' @examples
#' \dontrun{
#' #general example illustrating all functions
#' #see specific function help pages for details of using each function
#'
#' #generate data (using a very small 'n' for illustration purposes)
#' set.seed(1)
#' data <- sim.data(n = 15, scenario = 2)
#' #plot the mean model for the scenario from which we generated data
#' plot(data)
#'
#' #fit model for a range of tuning parameters, i.e., lambda values
#' #lambda sequence is chosen automatically if not specified
#' crisp.out <- crisp(X = data$X, y = data$y)
#' #or fit model and select lambda using 2-fold cross-validation
#' #note: use larger 'n.fold' (e.g., 10) in practice
#' crispCV.out <- crispCV(X = data$X, y = data$y, n.fold = 2)
#'
#' #summarize all of the fits
#' summary(crisp.out)
#' #or just summarize a single fit
#' #we examine the fit with an index of 25. that is, lambda of
#' crisp.out$lambda.seq[25]
#' summary(crisp.out, lambda.index = 25)
#' #lastly, we can summarize the fit chosen using cross-validation
#' summary(crispCV.out)
#' #and also plot the cross-validation error
#' plot(summary(crispCV.out))
#' #the lambda chosen by cross-validation is also available using
#' crispCV.out$lambda.cv
#'
#' #plot the estimated relationships between two predictors and outcome
#' #do this for a specific fit
#' plot(crisp.out, lambda.index = 25)
#' #or for the fit chosen using cross-validation
#' plot(crispCV.out)
#'
#' #we can make predictions for a covariate matrix with new observations
#' #new.X with 20 observations
#' new.data <- sim.data(n = 20, scenario = 2)
#' new.X <- new.data$X
#' #these will give the same predictions:
#' yhat1 <- predict(crisp.out, new.X = new.X, lambda.index = crispCV.out$index.cv)
#' yhat2 <- predict(crispCV.out, new.X = new.X)
#' }
#' @docType package
#' @name crisp-package
#' @aliases crisp-package
NULL
|
/R/crisp-package.R
|
no_license
|
cran/crisp
|
R
| false | false | 3,155 |
r
|
#' crisp: A package for fitting a model that partitions the covariate space into blocks in a data-adaptive way.
#'
#' This package is called crisp for "Convex Regression with Interpretable Sharp Partitions",
#' which considers the problem of predicting an outcome variable on the basis of two covariates,
#' using an interpretable yet non-additive model. CRISP partitions the covariate space into
#' blocks in a data-adaptive way, and fits a mean model within each block.
#' Unlike other partitioning methods, CRISP is fit using a non-greedy approach by solving a
#' convex optimization problem, resulting in low-variance fits. More details are provided
#' in Petersen, A., Simon, N., and Witten, D. (2016). Convex Regression with Interpretable
#' Sharp Partitions. Journal of Machine Learning Research, 17(94): 1-31 <http://jmlr.org/papers/volume17/15-344/15-344.pdf>.
#'
#' The main functions are: (1)\code{\link{crisp}} and (2)\code{\link{crispCV}}. The first function
#' \code{\link{crisp}} fits CRISP for a sequence of tuning parameters and provides the fits
#' for this entire sequence of tuning parameters. The second function \code{\link{crispCV}} considers
#' a sequence of tuning parameters and provides the fits, but also returns the optimal tuning parameter,
#' as chosen using K-fold cross-validation.
#'
#' @examples
#' \dontrun{
#' #general example illustrating all functions
#' #see specific function help pages for details of using each function
#'
#' #generate data (using a very small 'n' for illustration purposes)
#' set.seed(1)
#' data <- sim.data(n = 15, scenario = 2)
#' #plot the mean model for the scenario from which we generated data
#' plot(data)
#'
#' #fit model for a range of tuning parameters, i.e., lambda values
#' #lambda sequence is chosen automatically if not specified
#' crisp.out <- crisp(X = data$X, y = data$y)
#' #or fit model and select lambda using 2-fold cross-validation
#' #note: use larger 'n.fold' (e.g., 10) in practice
#' crispCV.out <- crispCV(X = data$X, y = data$y, n.fold = 2)
#'
#' #summarize all of the fits
#' summary(crisp.out)
#' #or just summarize a single fit
#' #we examine the fit with an index of 25. that is, lambda of
#' crisp.out$lambda.seq[25]
#' summary(crisp.out, lambda.index = 25)
#' #lastly, we can summarize the fit chosen using cross-validation
#' summary(crispCV.out)
#' #and also plot the cross-validation error
#' plot(summary(crispCV.out))
#' #the lambda chosen by cross-validation is also available using
#' crispCV.out$lambda.cv
#'
#' #plot the estimated relationships between two predictors and outcome
#' #do this for a specific fit
#' plot(crisp.out, lambda.index = 25)
#' #or for the fit chosen using cross-validation
#' plot(crispCV.out)
#'
#' #we can make predictions for a covariate matrix with new observations
#' #new.X with 20 observations
#' new.data <- sim.data(n = 20, scenario = 2)
#' new.X <- new.data$X
#' #these will give the same predictions:
#' yhat1 <- predict(crisp.out, new.X = new.X, lambda.index = crispCV.out$index.cv)
#' yhat2 <- predict(crispCV.out, new.X = new.X)
#' }
#' @docType package
#' @name crisp-package
#' @aliases crisp-package
NULL
|
# library to read netcdf files
library(ncdf4)
#library to plot maps with legend easily
library(fields)
# set wd to HadGEM2ES_Soil_Ancil.nc location
#setwd("C://Users//CBE//OneDrive - University of Exeter//soildata_hadgem")
# open netcdf
anci <- nc_open("HadGEM2ES_Soil_Ancil.nc")
# coordinates from where you want the data, example some site in amazon
lat <- -1.74
lon <- -51.46
# check variable names, units and file structure
# anci
# field342 - SATURATED SOIL WATER SUCTION
# field332 - VOL SMC AT SATURATION AFTER TIMESTEP
# field1381 - CLAPP-HORNBERGER B COEFFICIENT
# The longitude in the hadgem file is in a weird format
# The greenwich meridian is 0, and then it increases to 360 to the east, see
b <- ncvar_get(anci, varid = "field1381")
image.plot(anci$dim$longitude$vals,
anci$dim$latitude$vals,
b)
# Function to convert the longitute from -180:180 to 0:360
conv_lon<-function(longitude) {
x <- c(seq(181,360,by=0.5),seq(0,180,by=0.5))
y <- seq(-180,180,by=0.5)
return(x[which.min(abs(y - lon))])
}
lon360<-conv_lon(lon)
# extract the data from the nearest gridcell for your example site
b <- ncvar_get(anci, varid = "field1381",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
b<-c(b)
Psi_sat <- ncvar_get(anci, varid = "field342",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
Psi_sat_cm<-c(Psi_sat*100)# transform m to cm
smc_sat <- ncvar_get(anci, varid = "field332",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
smc_sat<-c(smc_sat)
# Example using Clapp & Hornberger (1978) equations to convert smc to Psi_soil
smc<-seq(0.1*smc_sat,smc_sat,length.out = 1000)# example smc vector
Psi_soil_cm<-Psi_sat_cm*(smc/smc_sat)^(-b)
Psi_soil<--Psi_soil_cm*98.04*1e-6# convert Psi_soil from cm to MPa
plot(Psi_soil~smc)
|
/Hidraulica/soil_dat.R
|
no_license
|
gabisophia/fenologia
|
R
| false | false | 2,137 |
r
|
# library to read netcdf files
library(ncdf4)
#library to plot maps with legend easily
library(fields)
# set wd to HadGEM2ES_Soil_Ancil.nc location
#setwd("C://Users//CBE//OneDrive - University of Exeter//soildata_hadgem")
# open netcdf
anci <- nc_open("HadGEM2ES_Soil_Ancil.nc")
# coordinates from where you want the data, example some site in amazon
lat <- -1.74
lon <- -51.46
# check variable names, units and file structure
# anci
# field342 - SATURATED SOIL WATER SUCTION
# field332 - VOL SMC AT SATURATION AFTER TIMESTEP
# field1381 - CLAPP-HORNBERGER B COEFFICIENT
# The longitude in the hadgem file is in a weird format
# The greenwich meridian is 0, and then it increases to 360 to the east, see
b <- ncvar_get(anci, varid = "field1381")
image.plot(anci$dim$longitude$vals,
anci$dim$latitude$vals,
b)
# Function to convert the longitute from -180:180 to 0:360
conv_lon<-function(longitude) {
x <- c(seq(181,360,by=0.5),seq(0,180,by=0.5))
y <- seq(-180,180,by=0.5)
return(x[which.min(abs(y - lon))])
}
lon360<-conv_lon(lon)
# extract the data from the nearest gridcell for your example site
b <- ncvar_get(anci, varid = "field1381",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
b<-c(b)
Psi_sat <- ncvar_get(anci, varid = "field342",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
Psi_sat_cm<-c(Psi_sat*100)# transform m to cm
smc_sat <- ncvar_get(anci, varid = "field332",count = c(1,1),
start= c(which.min(abs(anci$dim$longitude$vals - lon360)),
which.min(abs(anci$dim$latitude$vals - lat))))
smc_sat<-c(smc_sat)
# Example using Clapp & Hornberger (1978) equations to convert smc to Psi_soil
smc<-seq(0.1*smc_sat,smc_sat,length.out = 1000)# example smc vector
Psi_soil_cm<-Psi_sat_cm*(smc/smc_sat)^(-b)
Psi_soil<--Psi_soil_cm*98.04*1e-6# convert Psi_soil from cm to MPa
plot(Psi_soil~smc)
|
context("resource-schedule")
test_that("capacity & queue size change", {
inf_sch <- schedule(c(8, 16, 24), c(1, 2, 3), Inf)
fin_sch <- schedule(c(8, 16, 24), c(1, 2, 3), 24)
expect_output(print(inf_sch))
limits <- simmer(verbose=TRUE) %>%
add_resource("dummy", inf_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24))
expect_equal(limits$server, c(1, 2, 3))
limits <- simmer() %>%
add_resource("dummy", fin_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24, 32, 40, 48))
expect_equal(limits$server, c(1, 2, 3, 1, 2, 3))
})
test_that("queue size changes", {
inf_sch <- schedule(c(8, 16, 24), c(1, 2, 3), Inf)
fin_sch <- schedule(c(8, 16, 24), c(1, 2, 3), 24)
limits <- simmer() %>%
add_resource("dummy", 1, inf_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24))
expect_equal(limits$queue, c(1, 2, 3))
limits <- simmer() %>%
add_resource("dummy", 1, fin_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24, 32, 40, 48))
expect_equal(limits$queue, c(1, 2, 3, 1, 2, 3))
})
test_that("arrivals 1) are dequeued when resource's capacity increases and 2) remain in server when it decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch) %>%
add_generator("asdf", t, at(0, 0, 0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 3))
expect_equal(arrivals$activity_time, c(2, 2, 2))
})
test_that("arrivals 1) are dequeued when resource's capacity increases and 2) remain in server when it decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch) %>%
add_generator("asdf", t, at(0, 0, 0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 3))
expect_equal(arrivals$activity_time, c(2, 2, 2))
})
test_that("arrivals are preempted when resource's capacity decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch, preemptive=TRUE) %>%
add_generator("asdf", t, at(0, 0, 0), restart=TRUE) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 5))
expect_equal(arrivals$activity_time, c(2, 2, 3))
})
test_that("resource's capacity decreases before post-release tasks", {
t <- create_trajectory() %>%
seize("t-rex") %>%
timeout(5) %>%
release("t-rex")
arrivals <- simmer() %>%
add_resource("t-rex", capacity = schedule(timetable = c(5,10,15),
period=Inf,
values = c(1,0,1))) %>%
add_generator("piggy", t, at(0,0,0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(10, 20, 25))
expect_equal(arrivals$activity_time, c(5, 5, 5))
})
|
/tests/testthat/test-simmer-resource-schedule.R
|
no_license
|
pitercius/simmer
|
R
| false | false | 3,477 |
r
|
context("resource-schedule")
test_that("capacity & queue size change", {
inf_sch <- schedule(c(8, 16, 24), c(1, 2, 3), Inf)
fin_sch <- schedule(c(8, 16, 24), c(1, 2, 3), 24)
expect_output(print(inf_sch))
limits <- simmer(verbose=TRUE) %>%
add_resource("dummy", inf_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24))
expect_equal(limits$server, c(1, 2, 3))
limits <- simmer() %>%
add_resource("dummy", fin_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24, 32, 40, 48))
expect_equal(limits$server, c(1, 2, 3, 1, 2, 3))
})
test_that("queue size changes", {
inf_sch <- schedule(c(8, 16, 24), c(1, 2, 3), Inf)
fin_sch <- schedule(c(8, 16, 24), c(1, 2, 3), 24)
limits <- simmer() %>%
add_resource("dummy", 1, inf_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24))
expect_equal(limits$queue, c(1, 2, 3))
limits <- simmer() %>%
add_resource("dummy", 1, fin_sch) %>%
run(16) %>% reset() %>% run(48) %>%
get_mon_resources("limits")
expect_equal(limits$time, c(8, 16, 24, 32, 40, 48))
expect_equal(limits$queue, c(1, 2, 3, 1, 2, 3))
})
test_that("arrivals 1) are dequeued when resource's capacity increases and 2) remain in server when it decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch) %>%
add_generator("asdf", t, at(0, 0, 0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 3))
expect_equal(arrivals$activity_time, c(2, 2, 2))
})
test_that("arrivals 1) are dequeued when resource's capacity increases and 2) remain in server when it decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch) %>%
add_generator("asdf", t, at(0, 0, 0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 3))
expect_equal(arrivals$activity_time, c(2, 2, 2))
})
test_that("arrivals are preempted when resource's capacity decreases", {
t <- create_trajectory() %>%
seize("dummy", 1) %>%
timeout(2) %>%
release("dummy", 1)
inf_sch <- schedule(c(0, 1, 2), c(1, 3, 1), Inf)
arrivals <- simmer() %>%
add_resource("dummy", inf_sch, preemptive=TRUE) %>%
add_generator("asdf", t, at(0, 0, 0), restart=TRUE) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(2, 3, 5))
expect_equal(arrivals$activity_time, c(2, 2, 3))
})
test_that("resource's capacity decreases before post-release tasks", {
t <- create_trajectory() %>%
seize("t-rex") %>%
timeout(5) %>%
release("t-rex")
arrivals <- simmer() %>%
add_resource("t-rex", capacity = schedule(timetable = c(5,10,15),
period=Inf,
values = c(1,0,1))) %>%
add_generator("piggy", t, at(0,0,0)) %>%
run() %>%
get_mon_arrivals()
expect_equal(arrivals$end_time, c(10, 20, 25))
expect_equal(arrivals$activity_time, c(5, 5, 5))
})
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0,family="gaussian",standardize=FALSE)
sink('./bone_002.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/ReliefF/bone/bone_002.R
|
no_license
|
esbgkannan/QSMART
|
R
| false | false | 340 |
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0,family="gaussian",standardize=FALSE)
sink('./bone_002.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.sagemaker_operations.R
\name{create_algorithm}
\alias{create_algorithm}
\title{Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace}
\usage{
create_algorithm(AlgorithmName, AlgorithmDescription = NULL,
TrainingSpecification, InferenceSpecification = NULL,
ValidationSpecification = NULL, CertifyForMarketplace = NULL)
}
\arguments{
\item{AlgorithmName}{[required] The name of the algorithm.}
\item{AlgorithmDescription}{A description of the algorithm.}
\item{TrainingSpecification}{[required] Specifies details about training jobs run by this algorithm, including the following:
\itemize{
\item The Amazon ECR path of the container and the version digest of the algorithm.
\item The hyperparameters that the algorithm supports.
\item The instance types that the algorithm supports for training.
\item Whether the algorithm supports distributed training.
\item The metrics that the algorithm emits to Amazon CloudWatch.
\item Which metrics that the algorithm emits can be used as the objective metric for hyperparameter tuning jobs.
\item The input channels that the algorithm supports for training data. For example, an algorithm might support \code{train}, \code{validation}, and \code{test} channels.
}}
\item{InferenceSpecification}{Specifies details about inference jobs that the algorithm runs, including the following:
\itemize{
\item The Amazon ECR paths of containers that contain the inference code and model artifacts.
\item The instance types that the algorithm supports for transform jobs and real-time endpoints used for inference.
\item The input and output content formats that the algorithm supports for inference.
}}
\item{ValidationSpecification}{Specifies configurations for one or more training jobs and that Amazon SageMaker runs to test the algorithm's training code and, optionally, one or more batch transform jobs that Amazon SageMaker runs to test the algorithm's inference code.}
\item{CertifyForMarketplace}{Whether to certify the algorithm so that it can be listed in AWS Marketplace.}
}
\description{
Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.
}
\section{Accepted Parameters}{
\preformatted{create_algorithm(
AlgorithmName = "string",
AlgorithmDescription = "string",
TrainingSpecification = list(
TrainingImage = "string",
TrainingImageDigest = "string",
SupportedHyperParameters = list(
list(
Name = "string",
Description = "string",
Type = "Integer"|"Continuous"|"Categorical"|"FreeText",
Range = list(
IntegerParameterRangeSpecification = list(
MinValue = "string",
MaxValue = "string"
),
ContinuousParameterRangeSpecification = list(
MinValue = "string",
MaxValue = "string"
),
CategoricalParameterRangeSpecification = list(
Values = list(
"string"
)
)
),
IsTunable = TRUE|FALSE,
IsRequired = TRUE|FALSE,
DefaultValue = "string"
)
),
SupportedTrainingInstanceTypes = list(
"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"
),
SupportsDistributedTraining = TRUE|FALSE,
MetricDefinitions = list(
list(
Name = "string",
Regex = "string"
)
),
TrainingChannels = list(
list(
Name = "string",
Description = "string",
IsRequired = TRUE|FALSE,
SupportedContentTypes = list(
"string"
),
SupportedCompressionTypes = list(
"None"|"Gzip"
),
SupportedInputModes = list(
"Pipe"|"File"
)
)
),
SupportedTuningJobObjectiveMetrics = list(
list(
Type = "Maximize"|"Minimize",
MetricName = "string"
)
)
),
InferenceSpecification = list(
Containers = list(
list(
ContainerHostname = "string",
Image = "string",
ImageDigest = "string",
ModelDataUrl = "string",
ProductId = "string"
)
),
SupportedTransformInstanceTypes = list(
"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"
),
SupportedRealtimeInferenceInstanceTypes = list(
"ml.t2.medium"|"ml.t2.large"|"ml.t2.xlarge"|"ml.t2.2xlarge"|"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.large"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.large"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"
),
SupportedContentTypes = list(
"string"
),
SupportedResponseMIMETypes = list(
"string"
)
),
ValidationSpecification = list(
ValidationRole = "string",
ValidationProfiles = list(
list(
ProfileName = "string",
TrainingJobDefinition = list(
TrainingInputMode = "Pipe"|"File",
HyperParameters = list(
"string"
),
InputDataConfig = list(
list(
ChannelName = "string",
DataSource = list(
S3DataSource = list(
S3DataType = "ManifestFile"|"S3Prefix"|"AugmentedManifestFile",
S3Uri = "string",
S3DataDistributionType = "FullyReplicated"|"ShardedByS3Key",
AttributeNames = list(
"string"
)
)
),
ContentType = "string",
CompressionType = "None"|"Gzip",
RecordWrapperType = "None"|"RecordIO",
InputMode = "Pipe"|"File",
ShuffleConfig = list(
Seed = 123
)
)
),
OutputDataConfig = list(
KmsKeyId = "string",
S3OutputPath = "string"
),
ResourceConfig = list(
InstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge",
InstanceCount = 123,
VolumeSizeInGB = 123,
VolumeKmsKeyId = "string"
),
StoppingCondition = list(
MaxRuntimeInSeconds = 123
)
),
TransformJobDefinition = list(
MaxConcurrentTransforms = 123,
MaxPayloadInMB = 123,
BatchStrategy = "MultiRecord"|"SingleRecord",
Environment = list(
"string"
),
TransformInput = list(
DataSource = list(
S3DataSource = list(
S3DataType = "ManifestFile"|"S3Prefix"|"AugmentedManifestFile",
S3Uri = "string"
)
),
ContentType = "string",
CompressionType = "None"|"Gzip",
SplitType = "None"|"Line"|"RecordIO"|"TFRecord"
),
TransformOutput = list(
S3OutputPath = "string",
Accept = "string",
AssembleWith = "None"|"Line",
KmsKeyId = "string"
),
TransformResources = list(
InstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge",
InstanceCount = 123,
VolumeKmsKeyId = "string"
)
)
)
)
),
CertifyForMarketplace = TRUE|FALSE
)
}
}
|
/service/paws.sagemaker/man/create_algorithm.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false | true | 9,078 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.sagemaker_operations.R
\name{create_algorithm}
\alias{create_algorithm}
\title{Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace}
\usage{
create_algorithm(AlgorithmName, AlgorithmDescription = NULL,
TrainingSpecification, InferenceSpecification = NULL,
ValidationSpecification = NULL, CertifyForMarketplace = NULL)
}
\arguments{
\item{AlgorithmName}{[required] The name of the algorithm.}
\item{AlgorithmDescription}{A description of the algorithm.}
\item{TrainingSpecification}{[required] Specifies details about training jobs run by this algorithm, including the following:
\itemize{
\item The Amazon ECR path of the container and the version digest of the algorithm.
\item The hyperparameters that the algorithm supports.
\item The instance types that the algorithm supports for training.
\item Whether the algorithm supports distributed training.
\item The metrics that the algorithm emits to Amazon CloudWatch.
\item Which metrics that the algorithm emits can be used as the objective metric for hyperparameter tuning jobs.
\item The input channels that the algorithm supports for training data. For example, an algorithm might support \code{train}, \code{validation}, and \code{test} channels.
}}
\item{InferenceSpecification}{Specifies details about inference jobs that the algorithm runs, including the following:
\itemize{
\item The Amazon ECR paths of containers that contain the inference code and model artifacts.
\item The instance types that the algorithm supports for transform jobs and real-time endpoints used for inference.
\item The input and output content formats that the algorithm supports for inference.
}}
\item{ValidationSpecification}{Specifies configurations for one or more training jobs and that Amazon SageMaker runs to test the algorithm's training code and, optionally, one or more batch transform jobs that Amazon SageMaker runs to test the algorithm's inference code.}
\item{CertifyForMarketplace}{Whether to certify the algorithm so that it can be listed in AWS Marketplace.}
}
\description{
Create a machine learning algorithm that you can use in Amazon SageMaker and list in the AWS Marketplace.
}
\section{Accepted Parameters}{
\preformatted{create_algorithm(
AlgorithmName = "string",
AlgorithmDescription = "string",
TrainingSpecification = list(
TrainingImage = "string",
TrainingImageDigest = "string",
SupportedHyperParameters = list(
list(
Name = "string",
Description = "string",
Type = "Integer"|"Continuous"|"Categorical"|"FreeText",
Range = list(
IntegerParameterRangeSpecification = list(
MinValue = "string",
MaxValue = "string"
),
ContinuousParameterRangeSpecification = list(
MinValue = "string",
MaxValue = "string"
),
CategoricalParameterRangeSpecification = list(
Values = list(
"string"
)
)
),
IsTunable = TRUE|FALSE,
IsRequired = TRUE|FALSE,
DefaultValue = "string"
)
),
SupportedTrainingInstanceTypes = list(
"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"
),
SupportsDistributedTraining = TRUE|FALSE,
MetricDefinitions = list(
list(
Name = "string",
Regex = "string"
)
),
TrainingChannels = list(
list(
Name = "string",
Description = "string",
IsRequired = TRUE|FALSE,
SupportedContentTypes = list(
"string"
),
SupportedCompressionTypes = list(
"None"|"Gzip"
),
SupportedInputModes = list(
"Pipe"|"File"
)
)
),
SupportedTuningJobObjectiveMetrics = list(
list(
Type = "Maximize"|"Minimize",
MetricName = "string"
)
)
),
InferenceSpecification = list(
Containers = list(
list(
ContainerHostname = "string",
Image = "string",
ImageDigest = "string",
ModelDataUrl = "string",
ProductId = "string"
)
),
SupportedTransformInstanceTypes = list(
"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"
),
SupportedRealtimeInferenceInstanceTypes = list(
"ml.t2.medium"|"ml.t2.large"|"ml.t2.xlarge"|"ml.t2.2xlarge"|"ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.large"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.large"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"
),
SupportedContentTypes = list(
"string"
),
SupportedResponseMIMETypes = list(
"string"
)
),
ValidationSpecification = list(
ValidationRole = "string",
ValidationProfiles = list(
list(
ProfileName = "string",
TrainingJobDefinition = list(
TrainingInputMode = "Pipe"|"File",
HyperParameters = list(
"string"
),
InputDataConfig = list(
list(
ChannelName = "string",
DataSource = list(
S3DataSource = list(
S3DataType = "ManifestFile"|"S3Prefix"|"AugmentedManifestFile",
S3Uri = "string",
S3DataDistributionType = "FullyReplicated"|"ShardedByS3Key",
AttributeNames = list(
"string"
)
)
),
ContentType = "string",
CompressionType = "None"|"Gzip",
RecordWrapperType = "None"|"RecordIO",
InputMode = "Pipe"|"File",
ShuffleConfig = list(
Seed = 123
)
)
),
OutputDataConfig = list(
KmsKeyId = "string",
S3OutputPath = "string"
),
ResourceConfig = list(
InstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge",
InstanceCount = 123,
VolumeSizeInGB = 123,
VolumeKmsKeyId = "string"
),
StoppingCondition = list(
MaxRuntimeInSeconds = 123
)
),
TransformJobDefinition = list(
MaxConcurrentTransforms = 123,
MaxPayloadInMB = 123,
BatchStrategy = "MultiRecord"|"SingleRecord",
Environment = list(
"string"
),
TransformInput = list(
DataSource = list(
S3DataSource = list(
S3DataType = "ManifestFile"|"S3Prefix"|"AugmentedManifestFile",
S3Uri = "string"
)
),
ContentType = "string",
CompressionType = "None"|"Gzip",
SplitType = "None"|"Line"|"RecordIO"|"TFRecord"
),
TransformOutput = list(
S3OutputPath = "string",
Accept = "string",
AssembleWith = "None"|"Line",
KmsKeyId = "string"
),
TransformResources = list(
InstanceType = "ml.m4.xlarge"|"ml.m4.2xlarge"|"ml.m4.4xlarge"|"ml.m4.10xlarge"|"ml.m4.16xlarge"|"ml.c4.xlarge"|"ml.c4.2xlarge"|"ml.c4.4xlarge"|"ml.c4.8xlarge"|"ml.p2.xlarge"|"ml.p2.8xlarge"|"ml.p2.16xlarge"|"ml.p3.2xlarge"|"ml.p3.8xlarge"|"ml.p3.16xlarge"|"ml.c5.xlarge"|"ml.c5.2xlarge"|"ml.c5.4xlarge"|"ml.c5.9xlarge"|"ml.c5.18xlarge"|"ml.m5.large"|"ml.m5.xlarge"|"ml.m5.2xlarge"|"ml.m5.4xlarge"|"ml.m5.12xlarge"|"ml.m5.24xlarge",
InstanceCount = 123,
VolumeKmsKeyId = "string"
)
)
)
)
),
CertifyForMarketplace = TRUE|FALSE
)
}
}
|
#Query SVDBS
library(RODBC); library(data.table)
if(Sys.info()['sysname']=="Windows"){
data.dir <- "L:\\Rworkspace\\RSurvey\\"
out.dir <- "L:\\EcoAP\\Data\\survey\\"
memory.limit(4000)
}
if(Sys.info()['sysname']=="Linux"){
data.dir <- "slucey/Rworkspace/RSurvey/"
out.dir <- "slucey/EcoAP/Data/survey/"
uid <- 'slucey'
cat('Oracle Password:')
pwd <- readLines(n=1) #If reading from source, need to manually add pwd here
}
if(Sys.info()['sysname']=="Windows"){
channel <- odbcDriverConnect()
} else {
channel <- odbcConnect('sole', uid, pwd)
}
tables <- as.data.table(sqlTables(channel))
SVDBS <- tables[TABLE_SCHEM == 'SVDBS', ]
svspp <- as.data.table(sqlQuery(channel, 'select * from SVSPECIES_LIST'))
|
/Query_survey.R
|
no_license
|
slucey/RSurvey
|
R
| false | false | 756 |
r
|
#Query SVDBS
library(RODBC); library(data.table)
if(Sys.info()['sysname']=="Windows"){
data.dir <- "L:\\Rworkspace\\RSurvey\\"
out.dir <- "L:\\EcoAP\\Data\\survey\\"
memory.limit(4000)
}
if(Sys.info()['sysname']=="Linux"){
data.dir <- "slucey/Rworkspace/RSurvey/"
out.dir <- "slucey/EcoAP/Data/survey/"
uid <- 'slucey'
cat('Oracle Password:')
pwd <- readLines(n=1) #If reading from source, need to manually add pwd here
}
if(Sys.info()['sysname']=="Windows"){
channel <- odbcDriverConnect()
} else {
channel <- odbcConnect('sole', uid, pwd)
}
tables <- as.data.table(sqlTables(channel))
SVDBS <- tables[TABLE_SCHEM == 'SVDBS', ]
svspp <- as.data.table(sqlQuery(channel, 'select * from SVSPECIES_LIST'))
|
diabetesRisk <- function(glucose) glucose/200
shinyServer(
function(input,output){
output$inputValue <- renderPrint({input$glucose})
output$prediction <- renderPrint({diabetesRisk(input$glucose)})
}
)
|
/diabetes_prediction/server.R
|
no_license
|
cottalucas/Data_Products
|
R
| false | false | 211 |
r
|
diabetesRisk <- function(glucose) glucose/200
shinyServer(
function(input,output){
output$inputValue <- renderPrint({input$glucose})
output$prediction <- renderPrint({diabetesRisk(input$glucose)})
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataObj.r
\name{replic8}
\alias{replic8}
\title{Enlarge (replicate) a DLM data object to create an additional dimension for
simulation / sensitivity testing}
\usage{
replic8(Data, nrep)
}
\arguments{
\item{Data}{A data-limited methods data object}
\item{nrep}{The number of positions to expand the DLM object to}
}
\description{
Replicates position 1 data to multiple positions for sensitivity testing etc
}
\author{
T. Carruthers
}
|
/man/replic8.Rd
|
no_license
|
Lijiuqi/DLMtool
|
R
| false | true | 512 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataObj.r
\name{replic8}
\alias{replic8}
\title{Enlarge (replicate) a DLM data object to create an additional dimension for
simulation / sensitivity testing}
\usage{
replic8(Data, nrep)
}
\arguments{
\item{Data}{A data-limited methods data object}
\item{nrep}{The number of positions to expand the DLM object to}
}
\description{
Replicates position 1 data to multiple positions for sensitivity testing etc
}
\author{
T. Carruthers
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_functions.R
\name{clusterIDassign}
\alias{clusterIDassign}
\title{Automatic assigner of cluster name.}
\usage{
clusterIDassign(
object,
clusters.names,
clusters.column = "SCT_snn_res.0.1",
output = NULL,
new_colName = "cell_type"
)
}
\arguments{
\item{object}{Seurat object}
\item{clusters.names}{list of names for each cluster in order}
\item{clusters.column}{metadata column containing the clusters to be named. Must have numbers. The names will be given in the order of the number/factors}
\item{output}{if an output with the cell information, including cluster name and number is desire please indicate output file}
}
\value{
Seurat object but with the cluster names assigned.
}
\description{
Automatic assigner of cluster name.
}
|
/PBMCtools.ER/man/clusterIDassign.Rd
|
no_license
|
genomelias/PBMCtools.ER
|
R
| false | true | 834 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_functions.R
\name{clusterIDassign}
\alias{clusterIDassign}
\title{Automatic assigner of cluster name.}
\usage{
clusterIDassign(
object,
clusters.names,
clusters.column = "SCT_snn_res.0.1",
output = NULL,
new_colName = "cell_type"
)
}
\arguments{
\item{object}{Seurat object}
\item{clusters.names}{list of names for each cluster in order}
\item{clusters.column}{metadata column containing the clusters to be named. Must have numbers. The names will be given in the order of the number/factors}
\item{output}{if an output with the cell information, including cluster name and number is desire please indicate output file}
}
\value{
Seurat object but with the cluster names assigned.
}
\description{
Automatic assigner of cluster name.
}
|
## This is a mix of illustrations/simulations for learning purposes and actual examples
## of how to use R for regression analysis of real data (and some ideas for plotting)
## At the end of this file a summary of the most important R-commands for regression analysis
## is repeated
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
abline(lm(y~x))
hwdata <- data.frame(cbind(x, y))
myfit <- lm(y ~ x, data = hwdata)
hwdata_and_fit <- data.frame(hwdata, predict(myfit, interval="confidence"))
suppressWarnings(hwdata_and_fit <- data.frame(hwdata_and_fit, predict(myfit, interval="prediction")))
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
abline(lm(y~x))
with(hwdata_and_fit, lines(sort(x), sort(lwr), col = 2))
with(hwdata_and_fit, lines(sort(x), sort(upr), col = 2))
with(hwdata_and_fit, lines(sort(x), sort(lwr.1), col = 3))
with(hwdata_and_fit, lines(sort(x), sort(upr.1), col = 3))
summary(lm(y ~ x))
################################################################
## Simulate a linear model with normally distrubuted
## errors and estimate the parameters
## FiRST MAKE DATA:
## Generates x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## FROM HERE: as real data analysis, we have th data in x and y:
## A scatter plot of x and y
plot(x, y)
## Find the least squares estimates, use Theorem 5.4
(beta1hat <- sum( (y-mean(y))*(x-mean(x)) ) / sum( (x-mean(x))^2 ))
(beta0hat <- mean(y) - beta1hat*mean(x))
## Use lm() to find the estimates
lm(y ~ x)
## Plot the fitted line
abline(lm(y ~ x), col="red")
################################################################
## See how the parameter estimates are distributed
## Number of repeats
nRepeat <- 1000
## Two vectors to save the estimates in
Beta0Hat <- numeric(nRepeat)
Beta1Hat <- numeric(nRepeat)
## Repeat the simulation and the estimation nRepeat times
for(i in 1:nRepeat){
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate the linear regression model
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Save the estimates
Beta0Hat[i] <- fit$coefficients[1]
Beta1Hat[i] <- fit$coefficients[2]
}
## See their empirical distribution
par(mfrow=c(1,2))
hist(Beta0Hat, probability=TRUE)
hist(Beta1Hat, probability=TRUE)
## Gå lige tilbage til slides and kør næste slide
## See the estimate of the standard deviation of the parameter estimates
summary(fit)
################################################################
## Hypothesis tests om signifikante parametre
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate Y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## See summary - what we need
summary(fit)
################################################################
## Make confidence intervals for the parameters
## number of repeats
nRepeat <- 100
## Did we catch the correct parameter
TrueValInCI <- logical(nRepeat)
## Repeat the simulation and estimation nRepeat times:
for(i in 1:nRepeat){
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Luckily R can compute the confidence interval (level=1-alpha)
(ci <- confint(fit, "(Intercept)", level=0.95))
## Was the correct parameter value "caught" by the interval? (covered)
(TrueValInCI[i] <- ci[1] < beta0 & beta0 < ci[2])
}
## How often did this happen?
sum(TrueValInCI) / nRepeat
################################################################
## Example of confidence interval for the line
## Make a sequence of x values
xval <- seq(from=-2, to=6, length.out=100)
## Use the predict function
CI <- predict(fit, newdata=data.frame(x=xval),
interval="confidence",
level=.95)
## Check what we got
head(CI)
## Plot the data, model fit and intervals
plot(x, y, pch=20)
abline(fit)
lines(xval, CI[, "lwr"], lty=2, col="red", lwd=2)
lines(xval, CI[, "upr"], lty=2, col="red", lwd=2)
################################################################
## Example with prediction interval
## Make a sequence of x values
xval <- seq(from=-2, to=6, length.out=100)
## Use the predict function
PI <- predict(fit, newdata=data.frame(x=xval),
interval="prediction",
level=.95)
## Check what we got
head(CI)
## Plot the data, model fit and intervals
plot(x, y, pch=20)
abline(fit)
lines(xval, PI[, "lwr"], lty=2, col="blue", lwd=2)
lines(xval, PI[, "upr"], lty=2, col="blue", lwd=2)
################################################################
## Generates x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Scatter plot
plot(x,y)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## The "true" line
abline(beta0, beta1)
## Plot of fit
abline(fit, col="red")
## See summary
summary(fit)
## Correlation between x and y
cor(x,y)
## Squared becomes the "Multiple R-squared" from summary(fit)
cor(x,y)^2
#Højde data:
cor(x, y)^2
## A summary of the most important R-commands for regression analysis
## The Height-weight data:
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
## Scatter plot
plot(x,y)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Plot of fit
abline(fit, col="red")
## See summary (and tests)
summary(fit)
## See confidence intervals for parameters:
confint(fit)
## "Manually" found for slope:
ssx <- sum((x-mean(x))^2)
se_beta1 <- 3.881 * sqrt(1/ssx)
1.1127 - qt(0.975, 8) * se_beta1
1.1127 + qt(0.975, 8) * se_beta1
## See line confidence intervals for given x-values
predict(fit, interval=c("confidence"))
## See line prediction interval for given x-values
predict(fit, interval=c("prediction"))
## See confidence interval for x0=180:
new <- data.frame(x = 180)
predict(fit, new, interval=c("confidence"))
## See prediction interval for x0=180:
predict(fit, new, interval=c("prediction"))
## Residuals:
resid(fit)
|
/snippets/week8.R
|
no_license
|
andreasharmuth/statistics
|
R
| false | false | 6,757 |
r
|
## This is a mix of illustrations/simulations for learning purposes and actual examples
## of how to use R for regression analysis of real data (and some ideas for plotting)
## At the end of this file a summary of the most important R-commands for regression analysis
## is repeated
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
abline(lm(y~x))
hwdata <- data.frame(cbind(x, y))
myfit <- lm(y ~ x, data = hwdata)
hwdata_and_fit <- data.frame(hwdata, predict(myfit, interval="confidence"))
suppressWarnings(hwdata_and_fit <- data.frame(hwdata_and_fit, predict(myfit, interval="prediction")))
plot(x, y, xlab="Height", ylab="Weight", cex=1.5, col="blue", type="n")
text(x, y, 1:10, cex=1.5, col="blue")
abline(lm(y~x))
with(hwdata_and_fit, lines(sort(x), sort(lwr), col = 2))
with(hwdata_and_fit, lines(sort(x), sort(upr), col = 2))
with(hwdata_and_fit, lines(sort(x), sort(lwr.1), col = 3))
with(hwdata_and_fit, lines(sort(x), sort(upr.1), col = 3))
summary(lm(y ~ x))
################################################################
## Simulate a linear model with normally distrubuted
## errors and estimate the parameters
## FiRST MAKE DATA:
## Generates x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## FROM HERE: as real data analysis, we have th data in x and y:
## A scatter plot of x and y
plot(x, y)
## Find the least squares estimates, use Theorem 5.4
(beta1hat <- sum( (y-mean(y))*(x-mean(x)) ) / sum( (x-mean(x))^2 ))
(beta0hat <- mean(y) - beta1hat*mean(x))
## Use lm() to find the estimates
lm(y ~ x)
## Plot the fitted line
abline(lm(y ~ x), col="red")
################################################################
## See how the parameter estimates are distributed
## Number of repeats
nRepeat <- 1000
## Two vectors to save the estimates in
Beta0Hat <- numeric(nRepeat)
Beta1Hat <- numeric(nRepeat)
## Repeat the simulation and the estimation nRepeat times
for(i in 1:nRepeat){
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate the linear regression model
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Save the estimates
Beta0Hat[i] <- fit$coefficients[1]
Beta1Hat[i] <- fit$coefficients[2]
}
## See their empirical distribution
par(mfrow=c(1,2))
hist(Beta0Hat, probability=TRUE)
hist(Beta1Hat, probability=TRUE)
## Gå lige tilbage til slides and kør næste slide
## See the estimate of the standard deviation of the parameter estimates
summary(fit)
################################################################
## Hypothesis tests om signifikante parametre
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate Y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## See summary - what we need
summary(fit)
################################################################
## Make confidence intervals for the parameters
## number of repeats
nRepeat <- 100
## Did we catch the correct parameter
TrueValInCI <- logical(nRepeat)
## Repeat the simulation and estimation nRepeat times:
for(i in 1:nRepeat){
## Generate x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Luckily R can compute the confidence interval (level=1-alpha)
(ci <- confint(fit, "(Intercept)", level=0.95))
## Was the correct parameter value "caught" by the interval? (covered)
(TrueValInCI[i] <- ci[1] < beta0 & beta0 < ci[2])
}
## How often did this happen?
sum(TrueValInCI) / nRepeat
################################################################
## Example of confidence interval for the line
## Make a sequence of x values
xval <- seq(from=-2, to=6, length.out=100)
## Use the predict function
CI <- predict(fit, newdata=data.frame(x=xval),
interval="confidence",
level=.95)
## Check what we got
head(CI)
## Plot the data, model fit and intervals
plot(x, y, pch=20)
abline(fit)
lines(xval, CI[, "lwr"], lty=2, col="red", lwd=2)
lines(xval, CI[, "upr"], lty=2, col="red", lwd=2)
################################################################
## Example with prediction interval
## Make a sequence of x values
xval <- seq(from=-2, to=6, length.out=100)
## Use the predict function
PI <- predict(fit, newdata=data.frame(x=xval),
interval="prediction",
level=.95)
## Check what we got
head(CI)
## Plot the data, model fit and intervals
plot(x, y, pch=20)
abline(fit)
lines(xval, PI[, "lwr"], lty=2, col="blue", lwd=2)
lines(xval, PI[, "upr"], lty=2, col="blue", lwd=2)
################################################################
## Generates x
x <- runif(n=20, min=-2, max=4)
## Simulate y
beta0=50; beta1=200; sigma=90
y <- beta0 + beta1 * x + rnorm(n=length(x), mean=0, sd=sigma)
## Scatter plot
plot(x,y)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## The "true" line
abline(beta0, beta1)
## Plot of fit
abline(fit, col="red")
## See summary
summary(fit)
## Correlation between x and y
cor(x,y)
## Squared becomes the "Multiple R-squared" from summary(fit)
cor(x,y)^2
#Højde data:
cor(x, y)^2
## A summary of the most important R-commands for regression analysis
## The Height-weight data:
x=c(168,161,167,179,184,166,198,187,191,179)
y=c(65.5, 58.3, 68.1, 85.7, 80.5, 63.4, 102.6, 91.4, 86.7, 78.9)
## Scatter plot
plot(x,y)
## Use lm() to find the estimates
fit <- lm(y ~ x)
## Plot of fit
abline(fit, col="red")
## See summary (and tests)
summary(fit)
## See confidence intervals for parameters:
confint(fit)
## "Manually" found for slope:
ssx <- sum((x-mean(x))^2)
se_beta1 <- 3.881 * sqrt(1/ssx)
1.1127 - qt(0.975, 8) * se_beta1
1.1127 + qt(0.975, 8) * se_beta1
## See line confidence intervals for given x-values
predict(fit, interval=c("confidence"))
## See line prediction interval for given x-values
predict(fit, interval=c("prediction"))
## See confidence interval for x0=180:
new <- data.frame(x = 180)
predict(fit, new, interval=c("confidence"))
## See prediction interval for x0=180:
predict(fit, new, interval=c("prediction"))
## Residuals:
resid(fit)
|
library(mecgen)
library(SDMTools)
# Read the micro cells
micros <- read.csv(file="fixed-cells/cobo-calleja/micro-cells.csv", header=TRUE,
sep = ' ')
micros$type <- rep('micro', nrow(micros))
# Generate with random uniform the pico cells
repulsion <- 50
cobo.bl <- c(40.253541,-3.775409)
cobo.br <- c(40.253541,-3.737324)
cobo.tr <- c(40.276686,-3.737324)
cobo.tl <- c(40.276686,-3.775409)
# Insert the first pico cell
curr_pico.lon <- runif(1, min = cobo.bl[2], max = cobo.br[2])
curr_pico.lat <- runif(1, min = cobo.br[1], max = cobo.tr[1])
pico_cells <- data.frame(lon = curr_pico.lon, lat = curr_pico.lat, type='pico')
while(nrow(pico_cells) < 40 - 1) {
curr_pico.lon <- runif(1, min = cobo.bl[2], max = cobo.br[2])
curr_pico.lat <- runif(1, min = cobo.br[1], max = cobo.tr[1])
no_overlap <- TRUE
for (row in 1:nrow(pico_cells)) {
pico <- pico_cells[row,]
dis <- SDMTools::distance(lat1 = curr_pico.lat, lon1 = curr_pico.lon,
lat2 = pico$lat, lon2 = pico$lon)$distance
if (dis < repulsion) {
no_overlap <- FALSE
break
}
}
if (no_overlap) {
pico_cells <- rbind(pico_cells,
data.frame(lon=curr_pico.lon, lat=curr_pico.lat,
type='pico'))
}
}
# Obtain the cells of Cobo Calleja
cobo_cells <- rbind(micros, data.frame(head(pico_cells, 10)))
assocs <- build5GScenario(lats = cobo_cells$lat, lons = cobo_cells$lon)
# Obtain the link and nodes frames
m1Assoc <- assocs[[1]]
m1Coords <- assocs[[2]]
m1AccAssocs <- assocs[[3]]
accCentCoords <- assocs[[4]]
m2Assocs <- assocs[[5]]
m2Switches <- assocs[[6]]
m2AggAssocs <- assocs[[7]]
aggCentCoords <- assocs[[8]]
m3Assocs <- assocs[[9]]
m3Switches <- assocs[[10]]
# Attach remaining pico cells to the M1 nodes where other pico cells that are
# close to them attach to
orphan_picos <- data.frame()
child_picos <- data.frame()
for (row in 1:nrow(pico_cells)) {
pico <- pico_cells[row,]
orphan <- length(which(m1Assoc$lon == pico$lon &
m1Assoc$lat == pico$lat)) == 0
if (orphan)
orphan_picos <- rbind(orphan_picos, data.frame(pico))
else
child_picos <- rbind(child_picos, data.frame(pico))
}
orphan_picos$assoc_m1 <- rep(-1, nrow(orphan_picos))
# Store the used M1 switches
used_m1s <- data.frame()
for (gr in unique(m1Assoc$group)) {
idx <- which(m1Coords$group == gr)
used_m1s <- rbind(used_m1s, data.frame(m1Coords[idx,]))
}
# Store how many pico cells are associated to them
used_m1s$num_picos <- rep(0, nrow(used_m1s))
for (row in 2:nrow(used_m1s)) {
used_m1 <- used_m1s[row,]
cells_of_m1 <- subset(m1Assoc, group == used_m1$group)
for (row2 in 1:nrow(child_picos)) {
child_pico <- child_picos[row2,]
filtered <- subset(cells_of_m1, lon==child_pico$lon &
lat==child_pico$lat)
if(nrow(subset(cells_of_m1, lon==child_pico$lon &
lat==child_pico$lat)) > 0) {
used_m1s[row,]$num_picos = used_m1s[row,]$num_picos + 1
}
}
}
# Store how many more pico cells they can hold (x4 pico = x1 macro)
for(row in 1:nrow(used_m1s)) {
used_m1s[row,]$num_picos <- used_m1s[row,]$num_picos * 4
used_m1s[row,]$num_picos <- used_m1s[row,]$num_picos -
(used_m1s[row,]$num_picos / 4)
}
# Find the closest orphan for each used M1
m1 <- 1
while (length(which(orphan_picos$assoc_m1 == -1)) > 0) {
if (used_m1s[m1,]$num_picos == 0) {
m1 <- which(used_m1s$num_picos > 0)[1]
}
m1_switch <- used_m1s[m1,]
min_dis <- Inf
min_orphan <- -1
for (row in 1:nrow(orphan_picos)) {
orphan_pico <- orphan_picos[row,]
if (orphan_pico$assoc_m1 == -1) {
dis <- SDMTools::distance(lat1 = m1_switch$lat, lon1 = m1_switch$lon,
lat2 = orphan_pico$lat,
lon2 = orphan_pico$lon)$distance
if (dis < min_dis) {
min_dis <- dis
min_orphan <- row
}
}
}
# do the assignment
orphan_picos[min_orphan,]$assoc_m1 = m1_switch$group
used_m1s[m1,]$num_picos = used_m1s[m1,]$num_picos - 1
# Choose next M1 switch to assign antennas
m1 <- (m1 + 1) %% (nrow(used_m1s) + 1)
m1 <- ifelse(m1 == 0, yes=1, no=m1)
}
# Append the M1 switches
orphan_picos$type <- NULL
orphan_picos$group <- orphan_picos$assoc_m1
orphan_picos$assoc_m1 <- NULL
m1Assoc <- rbind(m1Assoc, orphan_picos)
# Create the frames
frames <- graphFrames(m1Assoc, m1Coords, m1AccAssocs, accCentCoords,
m2Assocs, m2Switches, m2AggAssocs, aggCentCoords,
m3Assocs, m3Switches)
# Add node property for the cell type
pico_nodes <- c()
micro_nodes <- c()
cell_nodes <- subset(frames$nodes, type=='cell')
for (i in 1:nrow(cell_nodes)) {
if (nrow(subset(pico_cells, lon == cell_nodes[i,]$lon &
lat == cell_nodes[i,]$lat)) > 0)
pico_nodes <- c(pico_nodes, as.character(cell_nodes[i,]$id))
else
micro_nodes <- c(micro_nodes, as.character(cell_nodes[i,]$id))
}
newNodes <- addNodeProps(nodes = frames$nodes, id_ = pico_nodes,
properties = list(size=rep('pico', length(pico_nodes)),
coverageRadius=
rep(100, length(pico_nodes)),
delay=rep(2.5, length(pico_nodes)),
cost=rep(1.6, length(pico_nodes))))
newNodes <- addNodeProps(nodes = newNodes, id_ = micro_nodes,
properties = list(size=rep('micro', length(micro_nodes)),
coverageRadius=
rep(400, length(micro_nodes)),
delay=rep(5, length(micro_nodes)),
cost=rep(34.6, length(pico_nodes))))
frames$nodes <- newNodes
# Attach edge servers
attachFrames <- attachServers(nodes = frames$nodes, links = frames$links,
numServers = 6,
bandwidth = 12,
bandwidthUnits = "Mbps",
distance = 0,
distanceUnits = "meter",
switchType = "m1",
properties = list(cpu=2, mem=20, disk=100,
cost=5.83),
idPrefix = "edge_server")
# Attach cloud servers
attachFrames <- attachServers(nodes = frames$nodes, links = frames$links,
numServers = 2,
bandwidth = 12,
bandwidthUnits = "Mbps",
distance = 0,
distanceUnits = "meter",
switchType = "m3",
properties = list(cpu=20, mem=200, disk=1000,
cost=2.48),
idPrefix = "cloud_server")
######### THE TWO SQUARES WHERE FOG NODES APPEAR #########
# # left square
# tl = list(lat=40.266662, lon=-3.756308)
# br = list(lat=40.262594, lon=-3.751914)
# # right square
# tl2 = list(lat=40.264600, lon=-3.751753)
# br2 = list(lat=40.260469, lon=-3.748170)
square1 <- data.frame(tl_lat=40.266662, tl_lon=-3.756308,
br_lat=40.262594, br_lon=-3.751914)
square2 <- data.frame(tl_lat=40.264600, tl_lon=-3.751753,
br_lat=40.260469, br_lon=-3.748170)
squares <- rbind(square1, square2) # TODO DEPRECATED, just using one
squares <- square1
######## GENERATE THE FOG NODES #######
robots_per_square <- 10
mesh_robot_connections <- ncol(combn(rep(0,robots_per_square), 2))
from_sqs <- data.frame(matrix(0, ncol = nrow(squares),
nrow = mesh_robot_connections))
to_sqs <- data.frame(matrix(0, ncol = nrow(squares),
nrow = mesh_robot_connections))
for (i in 1:nrow(squares)) {
tos <- c()
froms <- c()
square <- squares[i,]
prefix <- paste("robot_sq", i, sep="")
attachFrames <- attachFogNodes(nodes = attachFrames$nodes,
links = attachFrames$links,
latB = square$br_lat, latT = square$tl_lat,
lonL = square$tl_lon, lonR = square$br_lon,
numNodes = robots_per_square,
properties = list(cpu = 1, mem = 1, disk=10,
cost=15.27),
bandwidth = 20, bandwidthUnits = "Mpbs",
idPrefix = prefix)
# Remove the links of the fog nodes
attachFrames$links <- head(attachFrames$links,
nrow(attachFrames$links) - robots_per_square)
# Connect fog nodes among them
last_robot_ids <- as.vector(tail(attachFrames$nodes, robots_per_square)$id)
robot_pairs <- combn(last_robot_ids, 2)
for (c in 1:ncol(robot_pairs)) {
link <- tail(attachFrames$links, 1)
link$from <- robot_pairs[1,c]
link$to <- robot_pairs[2,c]
# Include the robot link in the connectivity data.frame
froms <- c(froms, link$from)
tos <- c(tos, link$to)
# Get robot coordinates
from_row <- as.numeric(rownames(subset(attachFrames$nodes, id==link$from)))
from_lon <- attachFrames$nodes[from_row,]$lon
from_lat <- attachFrames$nodes[from_row,]$lat
to_row <- as.numeric(rownames(subset(attachFrames$nodes, id==link$to)))
to_lon <- attachFrames$nodes[to_row,]$lon
to_lat <- attachFrames$nodes[to_row,]$lat
# Attach the robot with its distances
link$distance <- SDMTools::distance(lat1 = to_lat, lon1 = to_lon,
lat2 = from_lat,
lon2 = from_lon)$distance
attachFrames$links <- rbind(attachFrames$links, link)
}
# Specify the meshed connections delays of robots inside the square
from_sqs[,i] <- froms
to_sqs[,i] <- tos
# Attach endpoint node
square_endpoint <- paste("endpoint_sq", i, sep="")
last_node <- tail(attachFrames$nodes, 1)
last_robot <- tail(attachFrames$nodes, 1)
last_link <- tail(attachFrames$links, 1)
#
last_node$id <- square_endpoint
last_node$type <- 'endpoint'
last_node$cpu <- 0
last_node$mem <- 0
last_node$disk <- 0
attachFrames$nodes <- rbind(attachFrames$nodes, last_node)
#
last_link$from <- square_endpoint
last_link$to <- last_robot$id
attachFrames$links <- rbind(attachFrames$links, last_link)
}
######### SET THE D2D DELAY BETWEEN ROBOTS #########
d2d_delay <- 0.2 # ms
for (i in 1:nrow(squares)) {
newLinks <- addLinkProps(links = attachFrames$links, from_ = from_sqs[,i],
to_ = to_sqs[,i],
properties = list(delay=rep(d2d_delay,
nrow(from_sqs))))
attachFrames$links <- newLinks
}
######### SET FIXED INFRA DELAYS ASSUMING FIBER #########
froms <- c()
tos <- c()
delays <- c()
for (row in 1:nrow(attachFrames$links)) {
from_id <- as.character(attachFrames$links[row,]$from)
to_id <- as.character(attachFrames$links[row,]$to)
distance <- as.numeric(attachFrames$links[row,]$distance)
is_src_robot <- grepl("robot", from_id)
is_dst_robot <- grepl("robot", to_id)
if (!is_src_robot && !is_dst_robot) {
froms <- c(froms, from_id)
tos <- c(tos, to_id)
delays <- c(delays, distance / 3e5) # delay is in ms, distance in M, light
}
}
newLinks <- addLinkProps(links = attachFrames$links, from_ = froms, to_ = tos,
properties = list(delay=delays))
attachFrames$links <- newLinks
# Generate the fiber hops delay, which include the processing delay of switches
fiber_delays <- read.csv('fiber_delays.csv', header = FALSE)
froms <- c()
tos <- c()
delays <- c()
for (row in 1:nrow(attachFrames$links)) {
from <- as.character(attachFrames$links[row,]$from)
to <- as.character(attachFrames$links[row,]$to)
is_src_cell <- grepl("cell", from)
is_src_m1 <- grepl("m1", from)
is_dst_m1 <- grepl("m1", to)
is_src_m2 <- grepl("m2", from)
is_dst_m2 <- grepl("m2", to)
is_src_m3 <- grepl("m3", from)
is_dst_m3 <- grepl("m3", to)
if ((is_src_cell && is_dst_m1) ||
(is_src_m1 && is_dst_m1) ||
(is_src_m1 && is_dst_m2) ||
(is_src_m2 && is_dst_m2) ||
(is_src_m2 && is_dst_m2) ||
(is_src_m3 && is_dst_m3)) {
print('in')
froms <- c(froms, from)
tos <- c(tos, to)
rand_delay <- runif(1, min=1, max=nrow(fiber_delays))
delays <- c(delays, fiber_delays[rand_delay, ]) # delay is in ms
}
}
newLinks <- addLinkProps(links = attachFrames$links, from_ = froms, to_ = tos,
properties = list(delay=delays))
attachFrames$links <- newLinks
# Store the generated graph
links <- attachFrames$links
nodes <- attachFrames$nodes
g = igraph::graph_from_data_frame(links, vertices = nodes, directed = FALSE)
igraph::write_graph(graph = g, file = "/tmp/infra.gml", format = "gml")
|
/placement/constructive_mapper/graphs/infra-gen.R
|
no_license
|
MartinPJorge/placement
|
R
| false | false | 13,239 |
r
|
library(mecgen)
library(SDMTools)
# Read the micro cells
micros <- read.csv(file="fixed-cells/cobo-calleja/micro-cells.csv", header=TRUE,
sep = ' ')
micros$type <- rep('micro', nrow(micros))
# Generate with random uniform the pico cells
repulsion <- 50
cobo.bl <- c(40.253541,-3.775409)
cobo.br <- c(40.253541,-3.737324)
cobo.tr <- c(40.276686,-3.737324)
cobo.tl <- c(40.276686,-3.775409)
# Insert the first pico cell
curr_pico.lon <- runif(1, min = cobo.bl[2], max = cobo.br[2])
curr_pico.lat <- runif(1, min = cobo.br[1], max = cobo.tr[1])
pico_cells <- data.frame(lon = curr_pico.lon, lat = curr_pico.lat, type='pico')
while(nrow(pico_cells) < 40 - 1) {
curr_pico.lon <- runif(1, min = cobo.bl[2], max = cobo.br[2])
curr_pico.lat <- runif(1, min = cobo.br[1], max = cobo.tr[1])
no_overlap <- TRUE
for (row in 1:nrow(pico_cells)) {
pico <- pico_cells[row,]
dis <- SDMTools::distance(lat1 = curr_pico.lat, lon1 = curr_pico.lon,
lat2 = pico$lat, lon2 = pico$lon)$distance
if (dis < repulsion) {
no_overlap <- FALSE
break
}
}
if (no_overlap) {
pico_cells <- rbind(pico_cells,
data.frame(lon=curr_pico.lon, lat=curr_pico.lat,
type='pico'))
}
}
# Obtain the cells of Cobo Calleja
cobo_cells <- rbind(micros, data.frame(head(pico_cells, 10)))
assocs <- build5GScenario(lats = cobo_cells$lat, lons = cobo_cells$lon)
# Obtain the link and nodes frames
m1Assoc <- assocs[[1]]
m1Coords <- assocs[[2]]
m1AccAssocs <- assocs[[3]]
accCentCoords <- assocs[[4]]
m2Assocs <- assocs[[5]]
m2Switches <- assocs[[6]]
m2AggAssocs <- assocs[[7]]
aggCentCoords <- assocs[[8]]
m3Assocs <- assocs[[9]]
m3Switches <- assocs[[10]]
# Attach remaining pico cells to the M1 nodes where other pico cells that are
# close to them attach to
orphan_picos <- data.frame()
child_picos <- data.frame()
for (row in 1:nrow(pico_cells)) {
pico <- pico_cells[row,]
orphan <- length(which(m1Assoc$lon == pico$lon &
m1Assoc$lat == pico$lat)) == 0
if (orphan)
orphan_picos <- rbind(orphan_picos, data.frame(pico))
else
child_picos <- rbind(child_picos, data.frame(pico))
}
orphan_picos$assoc_m1 <- rep(-1, nrow(orphan_picos))
# Store the used M1 switches
used_m1s <- data.frame()
for (gr in unique(m1Assoc$group)) {
idx <- which(m1Coords$group == gr)
used_m1s <- rbind(used_m1s, data.frame(m1Coords[idx,]))
}
# Store how many pico cells are associated to them
used_m1s$num_picos <- rep(0, nrow(used_m1s))
for (row in 2:nrow(used_m1s)) {
used_m1 <- used_m1s[row,]
cells_of_m1 <- subset(m1Assoc, group == used_m1$group)
for (row2 in 1:nrow(child_picos)) {
child_pico <- child_picos[row2,]
filtered <- subset(cells_of_m1, lon==child_pico$lon &
lat==child_pico$lat)
if(nrow(subset(cells_of_m1, lon==child_pico$lon &
lat==child_pico$lat)) > 0) {
used_m1s[row,]$num_picos = used_m1s[row,]$num_picos + 1
}
}
}
# Store how many more pico cells they can hold (x4 pico = x1 macro)
for(row in 1:nrow(used_m1s)) {
used_m1s[row,]$num_picos <- used_m1s[row,]$num_picos * 4
used_m1s[row,]$num_picos <- used_m1s[row,]$num_picos -
(used_m1s[row,]$num_picos / 4)
}
# Find the closest orphan for each used M1
m1 <- 1
while (length(which(orphan_picos$assoc_m1 == -1)) > 0) {
if (used_m1s[m1,]$num_picos == 0) {
m1 <- which(used_m1s$num_picos > 0)[1]
}
m1_switch <- used_m1s[m1,]
min_dis <- Inf
min_orphan <- -1
for (row in 1:nrow(orphan_picos)) {
orphan_pico <- orphan_picos[row,]
if (orphan_pico$assoc_m1 == -1) {
dis <- SDMTools::distance(lat1 = m1_switch$lat, lon1 = m1_switch$lon,
lat2 = orphan_pico$lat,
lon2 = orphan_pico$lon)$distance
if (dis < min_dis) {
min_dis <- dis
min_orphan <- row
}
}
}
# do the assignment
orphan_picos[min_orphan,]$assoc_m1 = m1_switch$group
used_m1s[m1,]$num_picos = used_m1s[m1,]$num_picos - 1
# Choose next M1 switch to assign antennas
m1 <- (m1 + 1) %% (nrow(used_m1s) + 1)
m1 <- ifelse(m1 == 0, yes=1, no=m1)
}
# Append the M1 switches
orphan_picos$type <- NULL
orphan_picos$group <- orphan_picos$assoc_m1
orphan_picos$assoc_m1 <- NULL
m1Assoc <- rbind(m1Assoc, orphan_picos)
# Create the frames
frames <- graphFrames(m1Assoc, m1Coords, m1AccAssocs, accCentCoords,
m2Assocs, m2Switches, m2AggAssocs, aggCentCoords,
m3Assocs, m3Switches)
# Add node property for the cell type
pico_nodes <- c()
micro_nodes <- c()
cell_nodes <- subset(frames$nodes, type=='cell')
for (i in 1:nrow(cell_nodes)) {
if (nrow(subset(pico_cells, lon == cell_nodes[i,]$lon &
lat == cell_nodes[i,]$lat)) > 0)
pico_nodes <- c(pico_nodes, as.character(cell_nodes[i,]$id))
else
micro_nodes <- c(micro_nodes, as.character(cell_nodes[i,]$id))
}
newNodes <- addNodeProps(nodes = frames$nodes, id_ = pico_nodes,
properties = list(size=rep('pico', length(pico_nodes)),
coverageRadius=
rep(100, length(pico_nodes)),
delay=rep(2.5, length(pico_nodes)),
cost=rep(1.6, length(pico_nodes))))
newNodes <- addNodeProps(nodes = newNodes, id_ = micro_nodes,
properties = list(size=rep('micro', length(micro_nodes)),
coverageRadius=
rep(400, length(micro_nodes)),
delay=rep(5, length(micro_nodes)),
cost=rep(34.6, length(pico_nodes))))
frames$nodes <- newNodes
# Attach edge servers
attachFrames <- attachServers(nodes = frames$nodes, links = frames$links,
numServers = 6,
bandwidth = 12,
bandwidthUnits = "Mbps",
distance = 0,
distanceUnits = "meter",
switchType = "m1",
properties = list(cpu=2, mem=20, disk=100,
cost=5.83),
idPrefix = "edge_server")
# Attach cloud servers
attachFrames <- attachServers(nodes = frames$nodes, links = frames$links,
numServers = 2,
bandwidth = 12,
bandwidthUnits = "Mbps",
distance = 0,
distanceUnits = "meter",
switchType = "m3",
properties = list(cpu=20, mem=200, disk=1000,
cost=2.48),
idPrefix = "cloud_server")
######### THE TWO SQUARES WHERE FOG NODES APPEAR #########
# # left square
# tl = list(lat=40.266662, lon=-3.756308)
# br = list(lat=40.262594, lon=-3.751914)
# # right square
# tl2 = list(lat=40.264600, lon=-3.751753)
# br2 = list(lat=40.260469, lon=-3.748170)
square1 <- data.frame(tl_lat=40.266662, tl_lon=-3.756308,
br_lat=40.262594, br_lon=-3.751914)
square2 <- data.frame(tl_lat=40.264600, tl_lon=-3.751753,
br_lat=40.260469, br_lon=-3.748170)
squares <- rbind(square1, square2) # TODO DEPRECATED, just using one
squares <- square1
######## GENERATE THE FOG NODES #######
robots_per_square <- 10
mesh_robot_connections <- ncol(combn(rep(0,robots_per_square), 2))
from_sqs <- data.frame(matrix(0, ncol = nrow(squares),
nrow = mesh_robot_connections))
to_sqs <- data.frame(matrix(0, ncol = nrow(squares),
nrow = mesh_robot_connections))
for (i in 1:nrow(squares)) {
tos <- c()
froms <- c()
square <- squares[i,]
prefix <- paste("robot_sq", i, sep="")
attachFrames <- attachFogNodes(nodes = attachFrames$nodes,
links = attachFrames$links,
latB = square$br_lat, latT = square$tl_lat,
lonL = square$tl_lon, lonR = square$br_lon,
numNodes = robots_per_square,
properties = list(cpu = 1, mem = 1, disk=10,
cost=15.27),
bandwidth = 20, bandwidthUnits = "Mpbs",
idPrefix = prefix)
# Remove the links of the fog nodes
attachFrames$links <- head(attachFrames$links,
nrow(attachFrames$links) - robots_per_square)
# Connect fog nodes among them
last_robot_ids <- as.vector(tail(attachFrames$nodes, robots_per_square)$id)
robot_pairs <- combn(last_robot_ids, 2)
for (c in 1:ncol(robot_pairs)) {
link <- tail(attachFrames$links, 1)
link$from <- robot_pairs[1,c]
link$to <- robot_pairs[2,c]
# Include the robot link in the connectivity data.frame
froms <- c(froms, link$from)
tos <- c(tos, link$to)
# Get robot coordinates
from_row <- as.numeric(rownames(subset(attachFrames$nodes, id==link$from)))
from_lon <- attachFrames$nodes[from_row,]$lon
from_lat <- attachFrames$nodes[from_row,]$lat
to_row <- as.numeric(rownames(subset(attachFrames$nodes, id==link$to)))
to_lon <- attachFrames$nodes[to_row,]$lon
to_lat <- attachFrames$nodes[to_row,]$lat
# Attach the robot with its distances
link$distance <- SDMTools::distance(lat1 = to_lat, lon1 = to_lon,
lat2 = from_lat,
lon2 = from_lon)$distance
attachFrames$links <- rbind(attachFrames$links, link)
}
# Specify the meshed connections delays of robots inside the square
from_sqs[,i] <- froms
to_sqs[,i] <- tos
# Attach endpoint node
square_endpoint <- paste("endpoint_sq", i, sep="")
last_node <- tail(attachFrames$nodes, 1)
last_robot <- tail(attachFrames$nodes, 1)
last_link <- tail(attachFrames$links, 1)
#
last_node$id <- square_endpoint
last_node$type <- 'endpoint'
last_node$cpu <- 0
last_node$mem <- 0
last_node$disk <- 0
attachFrames$nodes <- rbind(attachFrames$nodes, last_node)
#
last_link$from <- square_endpoint
last_link$to <- last_robot$id
attachFrames$links <- rbind(attachFrames$links, last_link)
}
######### SET THE D2D DELAY BETWEEN ROBOTS #########
d2d_delay <- 0.2 # ms
for (i in 1:nrow(squares)) {
newLinks <- addLinkProps(links = attachFrames$links, from_ = from_sqs[,i],
to_ = to_sqs[,i],
properties = list(delay=rep(d2d_delay,
nrow(from_sqs))))
attachFrames$links <- newLinks
}
######### SET FIXED INFRA DELAYS ASSUMING FIBER #########
froms <- c()
tos <- c()
delays <- c()
for (row in 1:nrow(attachFrames$links)) {
from_id <- as.character(attachFrames$links[row,]$from)
to_id <- as.character(attachFrames$links[row,]$to)
distance <- as.numeric(attachFrames$links[row,]$distance)
is_src_robot <- grepl("robot", from_id)
is_dst_robot <- grepl("robot", to_id)
if (!is_src_robot && !is_dst_robot) {
froms <- c(froms, from_id)
tos <- c(tos, to_id)
delays <- c(delays, distance / 3e5) # delay is in ms, distance in M, light
}
}
newLinks <- addLinkProps(links = attachFrames$links, from_ = froms, to_ = tos,
properties = list(delay=delays))
attachFrames$links <- newLinks
# Generate the fiber hops delay, which include the processing delay of switches
fiber_delays <- read.csv('fiber_delays.csv', header = FALSE)
froms <- c()
tos <- c()
delays <- c()
for (row in 1:nrow(attachFrames$links)) {
from <- as.character(attachFrames$links[row,]$from)
to <- as.character(attachFrames$links[row,]$to)
is_src_cell <- grepl("cell", from)
is_src_m1 <- grepl("m1", from)
is_dst_m1 <- grepl("m1", to)
is_src_m2 <- grepl("m2", from)
is_dst_m2 <- grepl("m2", to)
is_src_m3 <- grepl("m3", from)
is_dst_m3 <- grepl("m3", to)
if ((is_src_cell && is_dst_m1) ||
(is_src_m1 && is_dst_m1) ||
(is_src_m1 && is_dst_m2) ||
(is_src_m2 && is_dst_m2) ||
(is_src_m2 && is_dst_m2) ||
(is_src_m3 && is_dst_m3)) {
print('in')
froms <- c(froms, from)
tos <- c(tos, to)
rand_delay <- runif(1, min=1, max=nrow(fiber_delays))
delays <- c(delays, fiber_delays[rand_delay, ]) # delay is in ms
}
}
newLinks <- addLinkProps(links = attachFrames$links, from_ = froms, to_ = tos,
properties = list(delay=delays))
attachFrames$links <- newLinks
# Store the generated graph
links <- attachFrames$links
nodes <- attachFrames$nodes
g = igraph::graph_from_data_frame(links, vertices = nodes, directed = FALSE)
igraph::write_graph(graph = g, file = "/tmp/infra.gml", format = "gml")
|
library(shinyjs)
library(googleAuthR)
library(googleID)
options(googleAuthR.scopes.selected = c("https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile"))
options("googleAuthR.webapp.client_id" = "769713801246-qk2qhqpqt1k0g8rurm0jkomg73kggj1i.apps.googleusercontent.com")
options("googleAuthR.webapp.client_secret" = "VTMwnOGWKame7JZPFlV4G7v0")
ui <- navbarPage(
title = "App Name",
windowTitle = "Browser window title",
tabPanel("Tab 1",
useShinyjs(),
sidebarLayout(
sidebarPanel(
p("Welcome!"),
googleAuthUI("gauth_login")
),
mainPanel(
textOutput("display_username"),
textOutput("display_username2")
)
)
),
tabPanel("Tab 2",
p("Layout for tab 2")
)
)
server <- function(input, output, session) {
## Global variables needed throughout the app
rv <- reactiveValues(
login = FALSE
)
## Authentication
accessToken <- callModule(googleAuth, "gauth_login",
login_class = "btn btn-primary",
logout_class = "btn btn-primary")
userDetails <- reactive({
validate(
need(accessToken(), "not logged in")
)
rv$login <- TRUE
with_shiny(get_user_info, shiny_access_token = accessToken())
})
## Users
googleAuthR::gar_auth()
user <- get_user_info()
the_list <- whitelist(user, c("mcknightalan@gmail.com", "another@email.com", "yet@anotheremail.com"))
output$display_username <- renderText({
validate(
need(userDetails(), "getting user details")
)
if(the_list){
a<- "You are on the list."
}else{
a <- "If you're not on the list, you're not getting in."
}
a
})
## Display user's Google display name after successful login
output$display_username2 <- renderText({
validate(
need(userDetails(), "getting user details")
)
userDetails()$displayName
})
## Workaround to avoid shinyaps.io URL problems
observe({
if (rv$login) {
shinyjs::onclick("gauth_login-googleAuthUi",
shinyjs::runjs("window.location.href = 'https://yourdomain.shinyapps.io/appName';"))
}
})
}
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
alanmcknight/LoginLearning1
|
R
| false | false | 2,447 |
r
|
library(shinyjs)
library(googleAuthR)
library(googleID)
options(googleAuthR.scopes.selected = c("https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile"))
options("googleAuthR.webapp.client_id" = "769713801246-qk2qhqpqt1k0g8rurm0jkomg73kggj1i.apps.googleusercontent.com")
options("googleAuthR.webapp.client_secret" = "VTMwnOGWKame7JZPFlV4G7v0")
ui <- navbarPage(
title = "App Name",
windowTitle = "Browser window title",
tabPanel("Tab 1",
useShinyjs(),
sidebarLayout(
sidebarPanel(
p("Welcome!"),
googleAuthUI("gauth_login")
),
mainPanel(
textOutput("display_username"),
textOutput("display_username2")
)
)
),
tabPanel("Tab 2",
p("Layout for tab 2")
)
)
server <- function(input, output, session) {
## Global variables needed throughout the app
rv <- reactiveValues(
login = FALSE
)
## Authentication
accessToken <- callModule(googleAuth, "gauth_login",
login_class = "btn btn-primary",
logout_class = "btn btn-primary")
userDetails <- reactive({
validate(
need(accessToken(), "not logged in")
)
rv$login <- TRUE
with_shiny(get_user_info, shiny_access_token = accessToken())
})
## Users
googleAuthR::gar_auth()
user <- get_user_info()
the_list <- whitelist(user, c("mcknightalan@gmail.com", "another@email.com", "yet@anotheremail.com"))
output$display_username <- renderText({
validate(
need(userDetails(), "getting user details")
)
if(the_list){
a<- "You are on the list."
}else{
a <- "If you're not on the list, you're not getting in."
}
a
})
## Display user's Google display name after successful login
output$display_username2 <- renderText({
validate(
need(userDetails(), "getting user details")
)
userDetails()$displayName
})
## Workaround to avoid shinyaps.io URL problems
observe({
if (rv$login) {
shinyjs::onclick("gauth_login-googleAuthUi",
shinyjs::runjs("window.location.href = 'https://yourdomain.shinyapps.io/appName';"))
}
})
}
shinyApp(ui = ui, server = server)
|
conCat <- function(vec1, vec2){
if(length(vec1) == 0){
return(c())
}
if(length(vec1)!=length(vec2)){
print("Two vectors must be the same length to concat in this pipeline.")
}
res <- c()
for(i in 1:length(vec1)){
res <- c(res, vec1[i], vec2[i])
}
return(res)
}
|
/scripts/helper.R
|
no_license
|
zhangyuqing/batch_prediction_pipeline
|
R
| false | false | 288 |
r
|
conCat <- function(vec1, vec2){
if(length(vec1) == 0){
return(c())
}
if(length(vec1)!=length(vec2)){
print("Two vectors must be the same length to concat in this pipeline.")
}
res <- c()
for(i in 1:length(vec1)){
res <- c(res, vec1[i], vec2[i])
}
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printers.R
\name{knit_print.flextable}
\alias{knit_print.flextable}
\title{Render flextable in rmarkdown}
\usage{
\method{knit_print}{flextable}(x, ...)
}
\arguments{
\item{x}{a \code{flextable} object}
\item{...}{further arguments, not used.}
}
\description{
Function used to render flextable in knitr/rmarkdown documents.
HTML, Word and PowerPoint outputs are supported.
Function \code{htmltools_value} return an HTML version of the flextable,
this function is to be used within Shiny applications with \code{renderUI()}.
}
\note{
For Word (docx) output, if pandoc version >= 2.0 is used, a raw XML block
with the table code will be inserted. If pandoc version < 2.0 is used, an
error will be raised. Insertion of images is not supported
with rmarkdown for Word documents (use the package officedown instead).
For PowerPoint (pptx) output, if pandoc version < 2.4 is used, an error
will be raised.
}
\section{Word chunk options}{
Result can be aligned with chunk option \code{ft.align} that
accepts values 'left', 'center' and 'right'.
Word option 'Allow row to break across pages' can be
activated with chunk option \code{ft.split} set to TRUE.
}
\section{PowerPoint chunk options}{
Position should be defined with options \code{ft.left}
and \code{ft.top}. Theses are the top left coordinates
of the placeholder that will contain the table. They
default to \code{{r ft.left=1, ft.left=2}}.
}
\seealso{
Other flextable print function: \code{\link{as_raster}},
\code{\link{docx_value}}, \code{\link{format.flextable}},
\code{\link{htmltools_value}},
\code{\link{plot.flextable}},
\code{\link{print.flextable}},
\code{\link{save_as_html}}, \code{\link{save_as_image}}
}
\author{
Maxim Nazarov
}
\concept{flextable print function}
|
/man/knit_print.flextable.Rd
|
no_license
|
mnazarov/flextable
|
R
| false | true | 1,825 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printers.R
\name{knit_print.flextable}
\alias{knit_print.flextable}
\title{Render flextable in rmarkdown}
\usage{
\method{knit_print}{flextable}(x, ...)
}
\arguments{
\item{x}{a \code{flextable} object}
\item{...}{further arguments, not used.}
}
\description{
Function used to render flextable in knitr/rmarkdown documents.
HTML, Word and PowerPoint outputs are supported.
Function \code{htmltools_value} return an HTML version of the flextable,
this function is to be used within Shiny applications with \code{renderUI()}.
}
\note{
For Word (docx) output, if pandoc version >= 2.0 is used, a raw XML block
with the table code will be inserted. If pandoc version < 2.0 is used, an
error will be raised. Insertion of images is not supported
with rmarkdown for Word documents (use the package officedown instead).
For PowerPoint (pptx) output, if pandoc version < 2.4 is used, an error
will be raised.
}
\section{Word chunk options}{
Result can be aligned with chunk option \code{ft.align} that
accepts values 'left', 'center' and 'right'.
Word option 'Allow row to break across pages' can be
activated with chunk option \code{ft.split} set to TRUE.
}
\section{PowerPoint chunk options}{
Position should be defined with options \code{ft.left}
and \code{ft.top}. Theses are the top left coordinates
of the placeholder that will contain the table. They
default to \code{{r ft.left=1, ft.left=2}}.
}
\seealso{
Other flextable print function: \code{\link{as_raster}},
\code{\link{docx_value}}, \code{\link{format.flextable}},
\code{\link{htmltools_value}},
\code{\link{plot.flextable}},
\code{\link{print.flextable}},
\code{\link{save_as_html}}, \code{\link{save_as_image}}
}
\author{
Maxim Nazarov
}
\concept{flextable print function}
|
library(CARBayes)
library(sp)
library(spdep)
library(nnet)
library(MASS)
library(coda)
library(readr)
library(dplyr)
library(gclus)
library(ggplot2)
options(scipen = 999)
# For multinomial (no spatial effects): https://stats.idre.ucla.edu/r/dae/multinomial-logistic-regression/
# For spatial effects: https://cran.r-project.org/web/packages/CARBayes/vignettes/CARBayes.pdf
# Starts with alldata object from 03_getvars.R.
#
# Prepare ------------------------------------------------------------------------------------------
#
# Read
rundata <- read_rds("./rivanna_data/working/gentri/alldata.Rds")
rundata <- rundata %>% arrange(GEOID)
# Create sp object
rundata <- as(rundata, "Spatial")
# Create other objects
Wnb <- poly2nb(rundata) # turn data into a neighborhood (nb) object
W <- nb2mat(Wnb, style = "B") # create neighborhood matrix
#
# Test for spatial autocorrelation ----------------------------------------------------------------------------------
#
# https://rpubs.com/chrisbrunsdon/part2
# turn data into a listw object, the required form of binary spatial adjacency information
# (based on border sharing) used by the moran.mc and jointcount.mc functions
Wlist <- nb2listw(Wnb, style = "B")
# A permutation test for same colour join count statistics calculated by using nsim random
# permutations of fx for the given spatial weighting scheme, to establish the ranks of the
# observed statistics (for each colour) in relation to the nsim simulated values.
# Null hypothesis: no clustering.
joincount.mc(as.factor(rundata@data$type1218), Wlist, nsim = 1000)
# test for spatial autocorrelation using a spatial weights matrix in weights list form for
# testing whether same-colour joins occur more frequently than would be expected if the zones
# were labelled in a spatially random way. The assumptions underlying the test are sensitive
# to the form of the graph of neighbour relationships and other factors, and results may be
# checked against those of joincount.mc permutations.
# Null hypothesis: no clustering.
joincount.test(as.factor(rundata@data$type1218), Wlist)
#
# Inspect ---------------------------------------------------------------------------------
#
cormatdata <- rundata@data %>% select(chg1218_tct_withba,
chg1218_tct_hhinc_pct,
chg1218_tct_nonhispwh,
chg1218_tct_medrent_pct,
chg1218_tct_medhome_pct,
chg1218_tct_singfam,
chg1218_tct_nonfam,
chg1218_tct_popdens,
chg1218_tct_housdens,
chg1218_tct_popgrowth,
chg1218_tct_renters)
cormat <- cov(cormatdata)
cpairs(cormatdata, order.single(cormat), dmat.color(cormat))
#
# Try MVS.CARleroux -----------------------------------------------------------------------------------------------
#
# Create trials variable (why 10? should be just 1 trial per tract)
modeltrials <- rep(1, nrow(cormatdata))
# Create outcome variable
rundata@data <- rundata@data %>% mutate(c1_notvul = ifelse(type1218 == "Not vulnerable", 1, 0),
c2_vulnotg = ifelse(type1218 == "Vulnerable, did not gentrify", 1, 0),
c3_vulg = ifelse(type1218 == "Vulnerable, gentrified", 1, 0))
rundata@data$c4_vul = rundata@data$c2_vulnotg + rundata@data$c3_vulg
# binomial outcomes: c3_vulg, c4_vul
# Model
# Inference for this model is based on 3 parallel Markov chains
# try running for 10 million samples, 1 million burn-in, thin every 2000 (5000 final samples)
# NOTE: with a binomial outcome we can use MALA=TRUE and get potentially much faster mixing (Langevin updates)
# the MCMC does do adaptive tuning as it runs, so no need to estimate the proposal s.d.
#data2 <- rundata@data %>% dplyr::select(tct_diffhou12,tct_newbuild18,tct_multunit12,tct_transit12,
# chg1218_tct_renters,chg1218_tct_medhome_pct,chg1218_tct_medrent_pct,
# chg1218_tct_singfam,chg1218_tct_popgrowth,chg1218_tct_housdens
#)
##cpairs(data2)
#cov.mean <- apply(data2,2,mean)
#cov.sd <- apply(data2,2,sd)
#for(j in 1:ncol(data2)){
# data2[,j] <- (data2[,j]-cov.mean[j])/cov.sd[j]
#}
# try again with normalized betas (mean 0, sd 1)
#chain2.norm <- MVS.CARleroux(formula = as.matrix(rundata@data[ , c("c2_vulnotg", "c1_notvul", "c3_vulg")]) ~
# tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
# chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
# chg1218_tct_popgrowth + chg1218_tct_housdens,
# data = data2,
# family = "multinomial", W = W, burnin = 1e5, n.sample = 2.1e6, thin = 500, trials = modeltrials)
# Outcome = Gentrified
chain1.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# Outcome = vulnerable
chain1.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# Outcome = vulnerable not gentrified
chain1.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# save.image("~/git/fairfax/src/gentri/mcmcoutBinomial.RData")
# Output:
# (i) posterior median (Median);
# (ii) 95% credible intervals (2.5%, 97.5%);
# (iii) the effective number of independent samples (n.effective);
# (iv) the convergence diagnostic proposed by Geweke (1992) (Geweke.diag) as a Z-score (should be <1.96)
print(chain1.binG)
print(chain2.binG)
print(chain3.binG)
print(chain1.binVng)
print(chain2.binVng)
print(chain3.binVng)
# Combine results: Vulnerable gentrified
beta.samples.matrix <- rbind(chain1.binG$samples$beta, chain1.binG$samples$beta, chain1.binG$samples$beta)
colnames(beta.samples.matrix) <- colnames(chain1.binG$X)
# Then posterior medians and 95% credible intervals can be computed as follows:
round(t(apply(beta.samples.matrix, 2, quantile, c(0.5, 0.025, 0.975))),5)
# Combine results: Vulnerable not gentrified
beta.samples.matrix <- rbind(chain1.binVng$samples$beta, chain1.binVng$samples$beta, chain1.binVng$samples$beta)
colnames(beta.samples.matrix) <- colnames(chain1.binVng$X)
# Then posterior medians and 95% credible intervals can be computesd as follows:
round(t(apply(beta.samples.matrix, 2, quantile, c(0.5, 0.025, 0.975))),5)
#
# Model diagnostics -----------------------------------------------------------------------------------------------
#
# http://www.math.kit.edu/stoch/lehre/abib2010w/media/coda.pdf
# http://sbfnk.github.io/mfiidd/slides/mcmc_slides2.pdf
# http://sbfnk.github.io/mfiidd/mcmc_diagnostics.html
# https://theoreticalecology.wordpress.com/2011/12/09/mcmc-chain-analysis-and-convergence-diagnostics-with-coda-in-r/
# http://patricklam.org/teaching/convergence_print.pdf
# http://www.johnmyleswhite.com/notebook/2010/08/29/mcmc-diagnostics-in-r-with-the-coda-package/
# http://wlm.userweb.mwn.de/R/wlmRcoda.htm
# Fit indices
chain1.binG$modelfit
chain2.binG$modelfit
chain3.binG$modelfit
# Assess MCMC sample convergence
beta.samplesG <- mcmc.list(chain1.binG$samples$beta, chain2.binG$samples$beta, chain3.binG$samples$beta)
beta.samplesV <- mcmc.list(chain1.binV$samples$beta, chain2.binV$samples$beta, chain3.binV$samples$beta)
# 2000 x 11 (intercept + 10 variables)
#plot(chain1$samples$beta[,13],type="l")
# plots of posterior distribution for each of the three chains
plot(beta.samplesG[,10])
plot(beta.samplesV[,10])
library(coda)
beta.samplesGall <- mcmc(rbind(chain1.binG$samples$beta, chain2.binG$samples$beta, chain3.binG$samples$beta))
beta.samplesVall <- mcmc(rbind(chain1.binV$samples$beta, chain2.binV$samples$beta, chain3.binV$samples$beta))
HPDinterval(beta.samplesGall)
HPDinterval(beta.samplesVall)
#rho.samples <- mcmc.list(chain1.bin$samples$rho, chain2.bin$samples$rho, chain3.bin$samples$rho)
#plot(rho.samples[,1]) # values >0 show the best fit has a significant spatial correlation
# Potential scale reduction factor
# Total value less than 1.1 is suggestive of convergence.
# For covariates pproximate convergence is diagnosed when the upper limit is close to 1.
# A factor of 1 means that between variance and within chain variance are equal,
# larger values mean that there is still a notable difference between chains.
# The gelman plot shows the development of the scale-reduction over time (chain steps).
gelman.diag(beta.samplesG)
gelman.plot(beta.samplesG) #these should not be going back up
#
# Output ------------------------------------------------------------------------------------------
#
# trace plots of (beta.samples) for each of 10 variables (5x2) for the two models
# posterior means + HPD credible intervals for each of the 10 variables for the two models (tables)
# show ability to do model predictions under new covariates
#
# Compare to nonspatial models ------------------------------------------------------------------------------------------
#
chain3.binG.nonspatial <- glm(formula = c3_vulg ~
tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
chg1218_tct_popgrowth + chg1218_tct_housdens,
data = rundata@data,
family = "binomial")
confint(chain3.binG.nonspatial)
data.frame( coef(chain3.binG.nonspatial) )
chain3.binV.nonspatial <- glm(formula = c4_vul ~
tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
chg1218_tct_popgrowth + chg1218_tct_housdens,
data = rundata@data,
family = "binomial")
confint(chain3.binV.nonspatial)
data.frame( coef(chain3.binV.nonspatial) )
#
# Trace plots ------------------------------------------------------------------------------------------
#
pdf("~/git/fairfax/src/gentri/trace_binG.pdf",height=6,width=4)
par(mar=c(0,0,0,0))
plot(beta.samplesG[,1:11])
dev.off()
pdf("~/git/fairfax/src/gentri/trace_binV.pdf",height=6,width=4)
par(mar=c(0,0,0,0))
plot(beta.samplesV[,1:11])
dev.off()
#
# AUC, GOF ------------------------------------------------------------------------------------------
#
library(pROC)
pdf("~/git/fairfax/src/gentri/auc.pdf",width=8,height=4)
par(mfrow=c(1,2))
plot( roc(response=rundata@data$c3_vulg, predictor=chain1.binG$fitted.values), main="Outcome:Gentrify" )
text(x=0.4,y=0.4,labels="AUC=0.83")
plot( roc(response=rundata@data$c4_vul, predictor=chain1.binV$fitted.values), main="Outcome:Vulnerable or Gentrify" )
text(x=0.4,y=0.4,labels="AUC=0.84")
dev.off()
#
# Model predictions ------------------------------------------------------------------------------------------
#
chain1.binG$fitted.values # model estimated probability of each tract to gentrify
# predict( chain1.binG ) # no 'predict' method for CARBayes; need to do it manually...
# Run non-spatial
predmodel <- glm(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
family = "binomial")
confint(predmodel)
data.frame( coef(predmodel) )
# Create new DF holding constant and manipulating key var
predmodelV <- glm(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
family = "binomial")
confint(predmodelV)
data.frame( coef(predmodelV) )
#
# REVISED Model predictions: Housing price ------------------------------------------------------
#
# reduce the change in home prices by X percent (0 to 50 in intervals of 5)
reduce <- seq(0,25,by=2.5)
pred_probs <- matrix(NA,nrow=nrow(rundata@data),ncol=length(reduce))
for(i in 1:length(reduce)){
newdata_houseprice <- rundata@data
newdata_houseprice$chg1218_tct_medhome_pct <- newdata_houseprice$chg1218_tct_medhome_pct - reduce[i]
pred_probs[,i] <- predict(predmodel, newdata = newdata_houseprice, type = "response")
}
# do the same but for vulnerable tracts
pred_probsV <- matrix(NA,nrow=nrow(rundata@data),ncol=length(reduce))
for(i in 1:length(reduce)){
newdata_houseprice <- rundata@data
newdata_houseprice$chg1218_tct_medhome_pct <- newdata_houseprice$chg1218_tct_medhome_pct - reduce[i]
pred_probsV[,i] <- predict(predmodelV, newdata = newdata_houseprice, type = "response")
}
# look at the intervention effects on fitted probabilities: 10% reduction in housing costs (reduce[5]=10)
# see which tracts that would gentrify will no longer gentrify
sum( pred_probs[,1] > .5 ) # 35 tracts
sum( pred_probs[,5] > .5 ) # 19 tracts
which(pred_probs[,1] > .5 & pred_probs[,5] <= .5 ) # 16 no longer gentrify
hist( pred_probs[,5] - pred_probs[,1] )
sum( pred_probsV[,1] > .5 ) # 87 tracts
sum( pred_probsV[,5] > .5 ) # 72 tracts
which(pred_probsV[,1] > .5 & pred_probsV[,5] <= .5) # 15 no longer vulnerable
# create outcome variables
data <- st_as_sf(rundata)
threshhold = 0.4
data$type1218_predicted <- "Not vulnerable"
data$type1218_predicted[ pred_probsV[,1] > threshhold ] <- "Vulnerable, did not gentrify"
data$type1218_predicted[ pred_probs[,1] > threshhold ] <- "Vulnerable, gentrified"
data$type1218_intervention <- "Not vulnerable"
data$type1218_intervention[ pred_probsV[,5] > threshhold ] <- "Vulnerable, did not gentrify"
data$type1218_intervention[ pred_probs[,5] > threshhold ] <- "Vulnerable, gentrified"
table(data$type1218)
table(data$type1218_predicted)
table(data$type1218_intervention)
#
# Choropleth plots: 10% housing price reduction ------------------------------------------------------
#
library(tigris)
library(ggplot2)
library(ggthemes)
ffxgeo <- tracts(state = 51, county = 059, year = 2018)
ffxgeo <- st_as_sf(ffxgeo)
ffxgeo <- ffxgeo %>% select(GEOID, geometry)
p1 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218), size = 0.2) +
labs(title = "True Outcomes\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
p2 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218_predicted), size = 0.2) +
labs(title = "Model Predicted Outcomes\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
p3 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218_intervention), size = 0.2) +
labs(title = "Intervention, 10% Housing Cost Reduction\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
pdf("~/git/fairfax/src/gentri/interventionExample.pdf",width=12,height=5)
multiplot(p1,p2,p3,cols=3)
dev.off()
#
# Model predictions: Housing price ------------------------------------------------------
#
newdata_houseprice <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = seq(0, 80, by = 5),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_houseprice$housepriceP <- predict(predmodel, newdata = newdata_houseprice, type = "response")
# Get SE
newdata_houseprice <- cbind(newdata_houseprice, predict(predmodel, newdata = newdata_houseprice, type = "link",
se = TRUE))
newdata_houseprice <- within(newdata_houseprice, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_houseprice, aes(x = chg1218_tct_medhome_pct, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent increase in median property value",
x = "% increase in median property value",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 80, 10), limits = c(0, 80)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: Unemployment ------------------------------------------------------
#
newdata_unempl <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = seq(0, 15, by = 5),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_unempl$unemplP <- predict(predmodel, newdata = newdata_unempl, type = "response")
# Get SE
newdata_unempl <- cbind(newdata_unempl, predict(predmodel, newdata = newdata_unempl, type = "link",
se = TRUE))
newdata_unempl <- within(newdata_unempl, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_unempl, aes(x = tct_unemp12, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent unemployed",
x = "% unemployed",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 15, 5), limits = c(0, 15)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: With BA ------------------------------------------------------
#
newdata_withba <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = seq(0, 25, by = 5),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_withba$withbaP <- predict(predmodel, newdata = newdata_withba, type = "response")
# Get SE
newdata_withba <- cbind(newdata_withba, predict(predmodel, newdata = newdata_withba, type = "link",
se = TRUE))
newdata_withba <- within(newdata_withba, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_withba, aes(x = chg1218_tct_withba, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent change in population with BA",
x = "% change in population with BA",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 25, 5), limits = c(0, 25)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: Non Hispanic white ------------------------------------------------------
#
newdata_nonhispw <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = seq(0, 20, by = 5),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_nonhispw$nonhispP <- predict(predmodel, newdata = newdata_nonhispw, type = "response")
# Get SE
newdata_nonhispw <- cbind(newdata_nonhispw, predict(predmodel, newdata = newdata_nonhispw, type = "link",
se = TRUE))
newdata_nonhispw <- within(newdata_nonhispw, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_nonhispw, aes(x = chg1218_tct_nonhispwh, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent change in non-Hispanic white population",
x = "% change in non-Hispanic white population",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 20, 5), limits = c(0, 20)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
|
/src/gentri/05_spatialBinomial.R
|
permissive
|
uva-bi-sdad/fairfax
|
R
| false | false | 34,805 |
r
|
library(CARBayes)
library(sp)
library(spdep)
library(nnet)
library(MASS)
library(coda)
library(readr)
library(dplyr)
library(gclus)
library(ggplot2)
options(scipen = 999)
# For multinomial (no spatial effects): https://stats.idre.ucla.edu/r/dae/multinomial-logistic-regression/
# For spatial effects: https://cran.r-project.org/web/packages/CARBayes/vignettes/CARBayes.pdf
# Starts with alldata object from 03_getvars.R.
#
# Prepare ------------------------------------------------------------------------------------------
#
# Read
rundata <- read_rds("./rivanna_data/working/gentri/alldata.Rds")
rundata <- rundata %>% arrange(GEOID)
# Create sp object
rundata <- as(rundata, "Spatial")
# Create other objects
Wnb <- poly2nb(rundata) # turn data into a neighborhood (nb) object
W <- nb2mat(Wnb, style = "B") # create neighborhood matrix
#
# Test for spatial autocorrelation ----------------------------------------------------------------------------------
#
# https://rpubs.com/chrisbrunsdon/part2
# turn data into a listw object, the required form of binary spatial adjacency information
# (based on border sharing) used by the moran.mc and jointcount.mc functions
Wlist <- nb2listw(Wnb, style = "B")
# A permutation test for same colour join count statistics calculated by using nsim random
# permutations of fx for the given spatial weighting scheme, to establish the ranks of the
# observed statistics (for each colour) in relation to the nsim simulated values.
# Null hypothesis: no clustering.
joincount.mc(as.factor(rundata@data$type1218), Wlist, nsim = 1000)
# test for spatial autocorrelation using a spatial weights matrix in weights list form for
# testing whether same-colour joins occur more frequently than would be expected if the zones
# were labelled in a spatially random way. The assumptions underlying the test are sensitive
# to the form of the graph of neighbour relationships and other factors, and results may be
# checked against those of joincount.mc permutations.
# Null hypothesis: no clustering.
joincount.test(as.factor(rundata@data$type1218), Wlist)
#
# Inspect ---------------------------------------------------------------------------------
#
cormatdata <- rundata@data %>% select(chg1218_tct_withba,
chg1218_tct_hhinc_pct,
chg1218_tct_nonhispwh,
chg1218_tct_medrent_pct,
chg1218_tct_medhome_pct,
chg1218_tct_singfam,
chg1218_tct_nonfam,
chg1218_tct_popdens,
chg1218_tct_housdens,
chg1218_tct_popgrowth,
chg1218_tct_renters)
cormat <- cov(cormatdata)
cpairs(cormatdata, order.single(cormat), dmat.color(cormat))
#
# Try MVS.CARleroux -----------------------------------------------------------------------------------------------
#
# Create trials variable (why 10? should be just 1 trial per tract)
modeltrials <- rep(1, nrow(cormatdata))
# Create outcome variable
rundata@data <- rundata@data %>% mutate(c1_notvul = ifelse(type1218 == "Not vulnerable", 1, 0),
c2_vulnotg = ifelse(type1218 == "Vulnerable, did not gentrify", 1, 0),
c3_vulg = ifelse(type1218 == "Vulnerable, gentrified", 1, 0))
rundata@data$c4_vul = rundata@data$c2_vulnotg + rundata@data$c3_vulg
# binomial outcomes: c3_vulg, c4_vul
# Model
# Inference for this model is based on 3 parallel Markov chains
# try running for 10 million samples, 1 million burn-in, thin every 2000 (5000 final samples)
# NOTE: with a binomial outcome we can use MALA=TRUE and get potentially much faster mixing (Langevin updates)
# the MCMC does do adaptive tuning as it runs, so no need to estimate the proposal s.d.
#data2 <- rundata@data %>% dplyr::select(tct_diffhou12,tct_newbuild18,tct_multunit12,tct_transit12,
# chg1218_tct_renters,chg1218_tct_medhome_pct,chg1218_tct_medrent_pct,
# chg1218_tct_singfam,chg1218_tct_popgrowth,chg1218_tct_housdens
#)
##cpairs(data2)
#cov.mean <- apply(data2,2,mean)
#cov.sd <- apply(data2,2,sd)
#for(j in 1:ncol(data2)){
# data2[,j] <- (data2[,j]-cov.mean[j])/cov.sd[j]
#}
# try again with normalized betas (mean 0, sd 1)
#chain2.norm <- MVS.CARleroux(formula = as.matrix(rundata@data[ , c("c2_vulnotg", "c1_notvul", "c3_vulg")]) ~
# tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
# chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
# chg1218_tct_popgrowth + chg1218_tct_housdens,
# data = data2,
# family = "multinomial", W = W, burnin = 1e5, n.sample = 2.1e6, thin = 500, trials = modeltrials)
# Outcome = Gentrified
chain1.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binG <- S.CARleroux(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# Outcome = vulnerable
chain1.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binV <- S.CARleroux(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# Outcome = vulnerable not gentrified
chain1.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain2.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
chain3.binVng <- S.CARleroux(formula = c2_vulnotg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
MALA = TRUE,
family = "binomial", W = W, burnin = 1e4, n.sample = 2.1e5, thin = 100, trials = modeltrials)
# save.image("~/git/fairfax/src/gentri/mcmcoutBinomial.RData")
# Output:
# (i) posterior median (Median);
# (ii) 95% credible intervals (2.5%, 97.5%);
# (iii) the effective number of independent samples (n.effective);
# (iv) the convergence diagnostic proposed by Geweke (1992) (Geweke.diag) as a Z-score (should be <1.96)
print(chain1.binG)
print(chain2.binG)
print(chain3.binG)
print(chain1.binVng)
print(chain2.binVng)
print(chain3.binVng)
# Combine results: Vulnerable gentrified
beta.samples.matrix <- rbind(chain1.binG$samples$beta, chain1.binG$samples$beta, chain1.binG$samples$beta)
colnames(beta.samples.matrix) <- colnames(chain1.binG$X)
# Then posterior medians and 95% credible intervals can be computed as follows:
round(t(apply(beta.samples.matrix, 2, quantile, c(0.5, 0.025, 0.975))),5)
# Combine results: Vulnerable not gentrified
beta.samples.matrix <- rbind(chain1.binVng$samples$beta, chain1.binVng$samples$beta, chain1.binVng$samples$beta)
colnames(beta.samples.matrix) <- colnames(chain1.binVng$X)
# Then posterior medians and 95% credible intervals can be computesd as follows:
round(t(apply(beta.samples.matrix, 2, quantile, c(0.5, 0.025, 0.975))),5)
#
# Model diagnostics -----------------------------------------------------------------------------------------------
#
# http://www.math.kit.edu/stoch/lehre/abib2010w/media/coda.pdf
# http://sbfnk.github.io/mfiidd/slides/mcmc_slides2.pdf
# http://sbfnk.github.io/mfiidd/mcmc_diagnostics.html
# https://theoreticalecology.wordpress.com/2011/12/09/mcmc-chain-analysis-and-convergence-diagnostics-with-coda-in-r/
# http://patricklam.org/teaching/convergence_print.pdf
# http://www.johnmyleswhite.com/notebook/2010/08/29/mcmc-diagnostics-in-r-with-the-coda-package/
# http://wlm.userweb.mwn.de/R/wlmRcoda.htm
# Fit indices
chain1.binG$modelfit
chain2.binG$modelfit
chain3.binG$modelfit
# Assess MCMC sample convergence
beta.samplesG <- mcmc.list(chain1.binG$samples$beta, chain2.binG$samples$beta, chain3.binG$samples$beta)
beta.samplesV <- mcmc.list(chain1.binV$samples$beta, chain2.binV$samples$beta, chain3.binV$samples$beta)
# 2000 x 11 (intercept + 10 variables)
#plot(chain1$samples$beta[,13],type="l")
# plots of posterior distribution for each of the three chains
plot(beta.samplesG[,10])
plot(beta.samplesV[,10])
library(coda)
beta.samplesGall <- mcmc(rbind(chain1.binG$samples$beta, chain2.binG$samples$beta, chain3.binG$samples$beta))
beta.samplesVall <- mcmc(rbind(chain1.binV$samples$beta, chain2.binV$samples$beta, chain3.binV$samples$beta))
HPDinterval(beta.samplesGall)
HPDinterval(beta.samplesVall)
#rho.samples <- mcmc.list(chain1.bin$samples$rho, chain2.bin$samples$rho, chain3.bin$samples$rho)
#plot(rho.samples[,1]) # values >0 show the best fit has a significant spatial correlation
# Potential scale reduction factor
# Total value less than 1.1 is suggestive of convergence.
# For covariates pproximate convergence is diagnosed when the upper limit is close to 1.
# A factor of 1 means that between variance and within chain variance are equal,
# larger values mean that there is still a notable difference between chains.
# The gelman plot shows the development of the scale-reduction over time (chain steps).
gelman.diag(beta.samplesG)
gelman.plot(beta.samplesG) #these should not be going back up
#
# Output ------------------------------------------------------------------------------------------
#
# trace plots of (beta.samples) for each of 10 variables (5x2) for the two models
# posterior means + HPD credible intervals for each of the 10 variables for the two models (tables)
# show ability to do model predictions under new covariates
#
# Compare to nonspatial models ------------------------------------------------------------------------------------------
#
chain3.binG.nonspatial <- glm(formula = c3_vulg ~
tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
chg1218_tct_popgrowth + chg1218_tct_housdens,
data = rundata@data,
family = "binomial")
confint(chain3.binG.nonspatial)
data.frame( coef(chain3.binG.nonspatial) )
chain3.binV.nonspatial <- glm(formula = c4_vul ~
tct_diffhou12 + tct_newbuild18 + tct_multunit12 + tct_transit12 +
chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct + chg1218_tct_singfam +
chg1218_tct_popgrowth + chg1218_tct_housdens,
data = rundata@data,
family = "binomial")
confint(chain3.binV.nonspatial)
data.frame( coef(chain3.binV.nonspatial) )
#
# Trace plots ------------------------------------------------------------------------------------------
#
pdf("~/git/fairfax/src/gentri/trace_binG.pdf",height=6,width=4)
par(mar=c(0,0,0,0))
plot(beta.samplesG[,1:11])
dev.off()
pdf("~/git/fairfax/src/gentri/trace_binV.pdf",height=6,width=4)
par(mar=c(0,0,0,0))
plot(beta.samplesV[,1:11])
dev.off()
#
# AUC, GOF ------------------------------------------------------------------------------------------
#
library(pROC)
pdf("~/git/fairfax/src/gentri/auc.pdf",width=8,height=4)
par(mfrow=c(1,2))
plot( roc(response=rundata@data$c3_vulg, predictor=chain1.binG$fitted.values), main="Outcome:Gentrify" )
text(x=0.4,y=0.4,labels="AUC=0.83")
plot( roc(response=rundata@data$c4_vul, predictor=chain1.binV$fitted.values), main="Outcome:Vulnerable or Gentrify" )
text(x=0.4,y=0.4,labels="AUC=0.84")
dev.off()
#
# Model predictions ------------------------------------------------------------------------------------------
#
chain1.binG$fitted.values # model estimated probability of each tract to gentrify
# predict( chain1.binG ) # no 'predict' method for CARBayes; need to do it manually...
# Run non-spatial
predmodel <- glm(formula = c3_vulg ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
family = "binomial")
confint(predmodel)
data.frame( coef(predmodel) )
# Create new DF holding constant and manipulating key var
predmodelV <- glm(formula = c4_vul ~
tct_multunit12 + tct_vacant12 + tct_newbuild18 +
chg1218_tct_singfam + chg1218_tct_renters + chg1218_tct_medhome_pct + chg1218_tct_medrent_pct +
chg1218_tct_housdens + chg1218_tct_popgrowth +
tct_rentburd12 + tct_diffhou12 + tct_transit12 + tct_unemp12 + tct_inpov12 +
chg1218_tct_withba + chg1218_tct_nonhispwh + chg1218_tct_nonfam + chg1218_tct_hhinc_pct,
data = rundata@data,
family = "binomial")
confint(predmodelV)
data.frame( coef(predmodelV) )
#
# REVISED Model predictions: Housing price ------------------------------------------------------
#
# reduce the change in home prices by X percent (0 to 50 in intervals of 5)
reduce <- seq(0,25,by=2.5)
pred_probs <- matrix(NA,nrow=nrow(rundata@data),ncol=length(reduce))
for(i in 1:length(reduce)){
newdata_houseprice <- rundata@data
newdata_houseprice$chg1218_tct_medhome_pct <- newdata_houseprice$chg1218_tct_medhome_pct - reduce[i]
pred_probs[,i] <- predict(predmodel, newdata = newdata_houseprice, type = "response")
}
# do the same but for vulnerable tracts
pred_probsV <- matrix(NA,nrow=nrow(rundata@data),ncol=length(reduce))
for(i in 1:length(reduce)){
newdata_houseprice <- rundata@data
newdata_houseprice$chg1218_tct_medhome_pct <- newdata_houseprice$chg1218_tct_medhome_pct - reduce[i]
pred_probsV[,i] <- predict(predmodelV, newdata = newdata_houseprice, type = "response")
}
# look at the intervention effects on fitted probabilities: 10% reduction in housing costs (reduce[5]=10)
# see which tracts that would gentrify will no longer gentrify
sum( pred_probs[,1] > .5 ) # 35 tracts
sum( pred_probs[,5] > .5 ) # 19 tracts
which(pred_probs[,1] > .5 & pred_probs[,5] <= .5 ) # 16 no longer gentrify
hist( pred_probs[,5] - pred_probs[,1] )
sum( pred_probsV[,1] > .5 ) # 87 tracts
sum( pred_probsV[,5] > .5 ) # 72 tracts
which(pred_probsV[,1] > .5 & pred_probsV[,5] <= .5) # 15 no longer vulnerable
# create outcome variables
data <- st_as_sf(rundata)
threshhold = 0.4
data$type1218_predicted <- "Not vulnerable"
data$type1218_predicted[ pred_probsV[,1] > threshhold ] <- "Vulnerable, did not gentrify"
data$type1218_predicted[ pred_probs[,1] > threshhold ] <- "Vulnerable, gentrified"
data$type1218_intervention <- "Not vulnerable"
data$type1218_intervention[ pred_probsV[,5] > threshhold ] <- "Vulnerable, did not gentrify"
data$type1218_intervention[ pred_probs[,5] > threshhold ] <- "Vulnerable, gentrified"
table(data$type1218)
table(data$type1218_predicted)
table(data$type1218_intervention)
#
# Choropleth plots: 10% housing price reduction ------------------------------------------------------
#
library(tigris)
library(ggplot2)
library(ggthemes)
ffxgeo <- tracts(state = 51, county = 059, year = 2018)
ffxgeo <- st_as_sf(ffxgeo)
ffxgeo <- ffxgeo %>% select(GEOID, geometry)
p1 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218), size = 0.2) +
labs(title = "True Outcomes\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
p2 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218_predicted), size = 0.2) +
labs(title = "Model Predicted Outcomes\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
p3 <- ggplot(data = data) +
geom_sf(data = ffxgeo, size = 0.2, fill = "#F0F0F0") +
geom_sf(aes(fill = type1218_intervention), size = 0.2) +
labs(title = "Intervention, 10% Housing Cost Reduction\nFairfax County Tract-Level Gentrification\n2008/12 to 2014/18") +
theme_map() +
theme(plot.title = element_text(size = 13, face = "bold", hjust = 0.5),
legend.title = element_text(size = 11, face = "bold"),
legend.text = element_text(size = 11)) +
scale_fill_manual(name = "Status", guide = "legend", values = c("#FCFDBF", "#FEC98D", "#F1605D"), na.value = "FFFFFF")
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
pdf("~/git/fairfax/src/gentri/interventionExample.pdf",width=12,height=5)
multiplot(p1,p2,p3,cols=3)
dev.off()
#
# Model predictions: Housing price ------------------------------------------------------
#
newdata_houseprice <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = seq(0, 80, by = 5),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_houseprice$housepriceP <- predict(predmodel, newdata = newdata_houseprice, type = "response")
# Get SE
newdata_houseprice <- cbind(newdata_houseprice, predict(predmodel, newdata = newdata_houseprice, type = "link",
se = TRUE))
newdata_houseprice <- within(newdata_houseprice, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_houseprice, aes(x = chg1218_tct_medhome_pct, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent increase in median property value",
x = "% increase in median property value",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 80, 10), limits = c(0, 80)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: Unemployment ------------------------------------------------------
#
newdata_unempl <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = seq(0, 15, by = 5),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_unempl$unemplP <- predict(predmodel, newdata = newdata_unempl, type = "response")
# Get SE
newdata_unempl <- cbind(newdata_unempl, predict(predmodel, newdata = newdata_unempl, type = "link",
se = TRUE))
newdata_unempl <- within(newdata_unempl, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_unempl, aes(x = tct_unemp12, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent unemployed",
x = "% unemployed",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 15, 5), limits = c(0, 15)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: With BA ------------------------------------------------------
#
newdata_withba <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = seq(0, 25, by = 5),
chg1218_tct_nonhispwh = mean(chg1218_tct_nonhispwh),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_withba$withbaP <- predict(predmodel, newdata = newdata_withba, type = "response")
# Get SE
newdata_withba <- cbind(newdata_withba, predict(predmodel, newdata = newdata_withba, type = "link",
se = TRUE))
newdata_withba <- within(newdata_withba, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_withba, aes(x = chg1218_tct_withba, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent change in population with BA",
x = "% change in population with BA",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 25, 5), limits = c(0, 25)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
#
# Model predictions: Non Hispanic white ------------------------------------------------------
#
newdata_nonhispw <- with(rundata@data,
data.frame(tct_multunit12 = mean(tct_multunit12),
tct_vacant12 = mean(tct_vacant12),
tct_newbuild18 = mean(tct_newbuild18),
chg1218_tct_singfam = mean(chg1218_tct_singfam),
chg1218_tct_renters = mean(chg1218_tct_renters),
chg1218_tct_medhome_pct = mean(chg1218_tct_medhome_pct),
chg1218_tct_medrent_pct = mean(chg1218_tct_medrent_pct),
chg1218_tct_housdens = mean(chg1218_tct_housdens),
chg1218_tct_popgrowth = mean(chg1218_tct_popgrowth),
tct_rentburd12 = mean(tct_rentburd12),
tct_diffhou12 = mean(tct_diffhou12),
tct_transit12 = mean(tct_transit12),
tct_unemp12 = mean(tct_unemp12),
tct_inpov12 = mean(tct_inpov12),
chg1218_tct_withba = mean(chg1218_tct_withba),
chg1218_tct_nonhispwh = seq(0, 20, by = 5),
chg1218_tct_nonfam = mean(chg1218_tct_nonfam),
chg1218_tct_hhinc_pct = mean(chg1218_tct_hhinc_pct)
))
newdata_nonhispw$nonhispP <- predict(predmodel, newdata = newdata_nonhispw, type = "response")
# Get SE
newdata_nonhispw <- cbind(newdata_nonhispw, predict(predmodel, newdata = newdata_nonhispw, type = "link",
se = TRUE))
newdata_nonhispw <- within(newdata_nonhispw, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
# Plot
ggplot(newdata_nonhispw, aes(x = chg1218_tct_nonhispwh, y = PredictedProb)) +
labs(title = "Predicted probability of tract gentrification by percent change in non-Hispanic white population",
x = "% change in non-Hispanic white population",
y = "Predicted probability") +
geom_ribbon(aes(ymin = LL, ymax = UL), alpha = 0.2) +
geom_line(size = 1) +
scale_x_continuous(breaks = seq(0, 20, 5), limits = c(0, 20)) +
scale_y_continuous(breaks = seq(0, 1, 0.20), limits = c(0, 1))
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
navbarPage("Groundwater Data", id = 'nav', collapsible = TRUE, position = "fixed-top",
# Sidebar with a slider input for number of bins
tabPanel("Interactive map",
div(class="outer",
tags$head(
# Include custom CSS
includeCSS("styles.css")
),
absolutePanel(id = "controls", class = "panel panel-default",
fixed = TRUE, draggable = TRUE, style="z-index:500;",
top = 60, right = "auto", left = 60, bottom = "auto", width = 500,
h2("Data explorer"), height = 650,
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30),
plotOutput("plot.bounds", height = "200px"),
plotOutput("plot.clicked", height = "200px")
),
# Mapa de Leaflet
leafletOutput("map", height = "100%", width = "100%")
)
),
tabPanel("Data",
dataTableOutput("data")
)
)
))
|
/ui.R
|
no_license
|
p-ortega/hydro-webmap
|
R
| false | false | 2,003 |
r
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
navbarPage("Groundwater Data", id = 'nav', collapsible = TRUE, position = "fixed-top",
# Sidebar with a slider input for number of bins
tabPanel("Interactive map",
div(class="outer",
tags$head(
# Include custom CSS
includeCSS("styles.css")
),
absolutePanel(id = "controls", class = "panel panel-default",
fixed = TRUE, draggable = TRUE, style="z-index:500;",
top = 60, right = "auto", left = 60, bottom = "auto", width = 500,
h2("Data explorer"), height = 650,
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30),
plotOutput("plot.bounds", height = "200px"),
plotOutput("plot.clicked", height = "200px")
),
# Mapa de Leaflet
leafletOutput("map", height = "100%", width = "100%")
)
),
tabPanel("Data",
dataTableOutput("data")
)
)
))
|
library(plyr)
##the following run.analysis function will work through the full script to:
##1.data download
##2. merging training and test datasets & returning merged dataframes
##3. exctarcting the mean and standard deviation of each measurment
##4. naming activities into decriptive names
##5. descriptive column name for subjects
##6. conbining dataframes into one
##7. creating a tidy dataset
##8. writing the tidy dataset into a csv
run.analysis = function() {
##1.data download
download.data()
##2. merging training and test datasets & returning merged dataframes
merged <- merge.datasets()
##3. exctarcting the mean and standard deviation of each measurment
cx <- extract.mean.and.std(merged$x)
##4. naming activities into decriptive names
cy <- name.activities(merged$y)
##5. descriptive column name for subjects
colnames(merged$subject) <- c("subject")
##6. conbining dataframes into one
combined <- bind.data(cx, cy, merged$subject)
##7. creating a tidy dataset
tidy <- create.tidy.dataset(combined)
##8. writing the tidy dataset into a csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
}
download.data = function() {
##the following function creates a folder GettingAndCleaningData after checking if it exists of not
if (!file.exists("GettingAndCleaningData")) {
message("Creating data directory")
dir.create("GettingAndCleaningData")
}
##the following function downloads the data into GettingAndCleaningData and unzippes the data into folder UCI HAR Dataset
if (!file.exists("GettingAndCleaningData/UCI HAR Dataset")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
message("Downloading data")
download.file(fileURL, destfile="GettingAndCleaningData/UCI_HAR_data.zip", method="libcurl")
unzip("./GettingAndCleaningData/UCI_HAR_data.zip", exdir="./GettingAndCleaningData")
}
list.files(path="GettingAndCleaningData")
list.files(path = "GettingAndCleaningData/UCI HAR Dataset")
}
merge.datasets = function() {
"Merge training and test datasets"
##the folloiwng functions read and stors data into variables for processing
message("reading X_train.txt")
training.x <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/subject_test.txt")
##the following function merges the X,Y and subject of both the training and test datasets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
## following funtion returns the merger
list(x=merged.x, y=merged.y, subject=merged.subject)
}
extract.mean.and.std = function(df) {
##this fucntions reads the featres table and returns the mean and standard deviation of measurment when given a the dataset (x values)
features <- read.table("GettingAndCleaningData/UCI HAR Dataset/features.txt")
mean.col <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
edf <- df[, (mean.col | std.col)]
colnames(edf) <- features[(mean.col | std.col), 2]
edf
}
name.activities = function(df) {
##function to name each activity in the dataset a descreptive name
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
bind.data <- function(x, y, subjects) {
##function to combine mean-std values (x), activities (y) and subjects into one dataframe
cbind(x, y, subjects)
}
create.tidy.dataset = function(df) {
##this fucntion takes the x,y and subjects and creates a seperate dataset with the avg of each variable for each activity and subject
tidy <- ddply(df, .(subject, activity), function(x) colMeans(x[,1:60]))
tidy
}
|
/run_analysis.R
|
no_license
|
LaithAdass/GettingAndCleaningData
|
R
| false | false | 4,588 |
r
|
library(plyr)
##the following run.analysis function will work through the full script to:
##1.data download
##2. merging training and test datasets & returning merged dataframes
##3. exctarcting the mean and standard deviation of each measurment
##4. naming activities into decriptive names
##5. descriptive column name for subjects
##6. conbining dataframes into one
##7. creating a tidy dataset
##8. writing the tidy dataset into a csv
run.analysis = function() {
##1.data download
download.data()
##2. merging training and test datasets & returning merged dataframes
merged <- merge.datasets()
##3. exctarcting the mean and standard deviation of each measurment
cx <- extract.mean.and.std(merged$x)
##4. naming activities into decriptive names
cy <- name.activities(merged$y)
##5. descriptive column name for subjects
colnames(merged$subject) <- c("subject")
##6. conbining dataframes into one
combined <- bind.data(cx, cy, merged$subject)
##7. creating a tidy dataset
tidy <- create.tidy.dataset(combined)
##8. writing the tidy dataset into a csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
}
download.data = function() {
##the following function creates a folder GettingAndCleaningData after checking if it exists of not
if (!file.exists("GettingAndCleaningData")) {
message("Creating data directory")
dir.create("GettingAndCleaningData")
}
##the following function downloads the data into GettingAndCleaningData and unzippes the data into folder UCI HAR Dataset
if (!file.exists("GettingAndCleaningData/UCI HAR Dataset")) {
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
message("Downloading data")
download.file(fileURL, destfile="GettingAndCleaningData/UCI_HAR_data.zip", method="libcurl")
unzip("./GettingAndCleaningData/UCI_HAR_data.zip", exdir="./GettingAndCleaningData")
}
list.files(path="GettingAndCleaningData")
list.files(path = "GettingAndCleaningData/UCI HAR Dataset")
}
merge.datasets = function() {
"Merge training and test datasets"
##the folloiwng functions read and stors data into variables for processing
message("reading X_train.txt")
training.x <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
training.y <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
training.subject <- read.table("GettingAndCleaningData/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
test.x <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
test.y <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
test.subject <- read.table("GettingAndCleaningData/UCI HAR Dataset/test/subject_test.txt")
##the following function merges the X,Y and subject of both the training and test datasets
merged.x <- rbind(training.x, test.x)
merged.y <- rbind(training.y, test.y)
merged.subject <- rbind(training.subject, test.subject)
## following funtion returns the merger
list(x=merged.x, y=merged.y, subject=merged.subject)
}
extract.mean.and.std = function(df) {
##this fucntions reads the featres table and returns the mean and standard deviation of measurment when given a the dataset (x values)
features <- read.table("GettingAndCleaningData/UCI HAR Dataset/features.txt")
mean.col <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
std.col <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
edf <- df[, (mean.col | std.col)]
colnames(edf) <- features[(mean.col | std.col), 2]
edf
}
name.activities = function(df) {
##function to name each activity in the dataset a descreptive name
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
bind.data <- function(x, y, subjects) {
##function to combine mean-std values (x), activities (y) and subjects into one dataframe
cbind(x, y, subjects)
}
create.tidy.dataset = function(df) {
##this fucntion takes the x,y and subjects and creates a seperate dataset with the avg of each variable for each activity and subject
tidy <- ddply(df, .(subject, activity), function(x) colMeans(x[,1:60]))
tidy
}
|
my.RCircos.Set.Cytoband.data<-function(cyto.band.info)
{
# Reset colors for chromosome bands. Use yellow color for unknow
# ______________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
stain2color <- as.character(cyto.band.info$Stain);
band.color <- rep(colors()[652], length(stain2color));
stains <- c("gneg", "acen", "stalk", "gvar", "gpos", "gpos100",
"gpos75", "gpos66", "gpos50", "gpos33", "gpos25");
color.index <- c(1, 552, 615, 418, 24, 24, 193, 203, 213, 223, 233);
for(a.stain in 1:length(stains))
{
bands <- which(stain2color==stains[a.stain]);
if(length(bands)>0)
{ band.color[bands] <- colors()[color.index[a.stain]]; }
}
cyto.band.info["BandColor"] <- band.color;
# Assign colors to chromosome highlight. There are total 50
# colors and the last 26 colors are reserved for future.
# ___________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
chrom.color <- c(552, 574, 645, 498, 450, 81, 26, 584, 524, 472,
32, 57, 615, 635, 547, 254, 100, 72, 630, 589,
8, 95, 568, 52);
chrom2color <- as.character(cyto.band.info$Chromosome);
chromosomes <- unique(chrom2color);
# In case of multiple ideogram plot, recycle the colors
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
num.chrom <- length(chromosomes);
num.color <- length(chrom.color);
if(num.chrom>num.color)
{
recycle.time <- floor(num.chrom/num.color);
if(recycle.time>1)
{ chrom.color <- rep(chrom.color, recycle.time); }
remains <- num.chrom%%num.color
if(remains > 0)
{ chrom.color <- c(chrom.color, chrom.color[1:remains]); }
}
for(a.chr in 1:length(chromosomes))
{
rows <- which(chrom2color==chromosomes[a.chr]);
if(length(rows)>0)
{ chrom2color[rows] <- colors()[chrom.color[a.chr]]; }
}
cyto.band.info["ChrColor"] <- chrom2color;
# Total base pairs and relative length of each band
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
plot.par <- RCircos.Get.Plot.Parameters();
cyto.band.info$ChromStart <- as.numeric(cyto.band.info$ChromStart);
cyto.band.info$ChromEnd <- as.numeric(cyto.band.info$ChromEnd);
band.len <- cyto.band.info$ChromEnd - cyto.band.info$ChromStart;
cyto.band.info["Length"] <- band.len;
cyto.band.info["Unit"]<- round(band.len/plot.par$base.per.unit, digits=0);
# Relative locations of each band in clockwise
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Relative.Loc <- cyto.band.info$Unit;
for(i in 2:length(Relative.Loc))
{ Relative.Loc[i] <- Relative.Loc[i] + Relative.Loc[i-1]; }
cyto.band.info["Location"] <- Relative.Loc;
if( plot.par$chrom.paddings>0)
{
chroms <- unique(cyto.band.info$Chromosome);
chroms <- chroms[(chroms==chroms[1])==F];
num.pad <- plot.par$chrom.paddings;
for(a.chr in 1:length(chroms))
{
index <- grep(paste(chroms[a.chr], "$", sep=""), cyto.band.info$Chromosome);
cyto.band.info$Location[index] <- num.pad + cyto.band.info$Location[index];
num.pad <- num.pad + plot.par$chrom.paddings;
}
}
print(cyto.band.info["Location"])
# Put the cyto.band.info data in RCircos environment
# ______________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
RCircosEnvironment <- NULL;
RCircosEnvironment <- get("RCircos.Env", envir=globalenv());
RCircosEnvironment[["RCircos.Cytoband"]] <- cyto.band.info;
}
|
/RCircos.Set.Cytoband.data.R
|
no_license
|
wangpanqiao/GenomeCircos
|
R
| false | false | 3,683 |
r
|
my.RCircos.Set.Cytoband.data<-function(cyto.band.info)
{
# Reset colors for chromosome bands. Use yellow color for unknow
# ______________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
stain2color <- as.character(cyto.band.info$Stain);
band.color <- rep(colors()[652], length(stain2color));
stains <- c("gneg", "acen", "stalk", "gvar", "gpos", "gpos100",
"gpos75", "gpos66", "gpos50", "gpos33", "gpos25");
color.index <- c(1, 552, 615, 418, 24, 24, 193, 203, 213, 223, 233);
for(a.stain in 1:length(stains))
{
bands <- which(stain2color==stains[a.stain]);
if(length(bands)>0)
{ band.color[bands] <- colors()[color.index[a.stain]]; }
}
cyto.band.info["BandColor"] <- band.color;
# Assign colors to chromosome highlight. There are total 50
# colors and the last 26 colors are reserved for future.
# ___________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
chrom.color <- c(552, 574, 645, 498, 450, 81, 26, 584, 524, 472,
32, 57, 615, 635, 547, 254, 100, 72, 630, 589,
8, 95, 568, 52);
chrom2color <- as.character(cyto.band.info$Chromosome);
chromosomes <- unique(chrom2color);
# In case of multiple ideogram plot, recycle the colors
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
num.chrom <- length(chromosomes);
num.color <- length(chrom.color);
if(num.chrom>num.color)
{
recycle.time <- floor(num.chrom/num.color);
if(recycle.time>1)
{ chrom.color <- rep(chrom.color, recycle.time); }
remains <- num.chrom%%num.color
if(remains > 0)
{ chrom.color <- c(chrom.color, chrom.color[1:remains]); }
}
for(a.chr in 1:length(chromosomes))
{
rows <- which(chrom2color==chromosomes[a.chr]);
if(length(rows)>0)
{ chrom2color[rows] <- colors()[chrom.color[a.chr]]; }
}
cyto.band.info["ChrColor"] <- chrom2color;
# Total base pairs and relative length of each band
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
plot.par <- RCircos.Get.Plot.Parameters();
cyto.band.info$ChromStart <- as.numeric(cyto.band.info$ChromStart);
cyto.band.info$ChromEnd <- as.numeric(cyto.band.info$ChromEnd);
band.len <- cyto.band.info$ChromEnd - cyto.band.info$ChromStart;
cyto.band.info["Length"] <- band.len;
cyto.band.info["Unit"]<- round(band.len/plot.par$base.per.unit, digits=0);
# Relative locations of each band in clockwise
# __________________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
Relative.Loc <- cyto.band.info$Unit;
for(i in 2:length(Relative.Loc))
{ Relative.Loc[i] <- Relative.Loc[i] + Relative.Loc[i-1]; }
cyto.band.info["Location"] <- Relative.Loc;
if( plot.par$chrom.paddings>0)
{
chroms <- unique(cyto.band.info$Chromosome);
chroms <- chroms[(chroms==chroms[1])==F];
num.pad <- plot.par$chrom.paddings;
for(a.chr in 1:length(chroms))
{
index <- grep(paste(chroms[a.chr], "$", sep=""), cyto.band.info$Chromosome);
cyto.band.info$Location[index] <- num.pad + cyto.band.info$Location[index];
num.pad <- num.pad + plot.par$chrom.paddings;
}
}
print(cyto.band.info["Location"])
# Put the cyto.band.info data in RCircos environment
# ______________________________________________________
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
RCircosEnvironment <- NULL;
RCircosEnvironment <- get("RCircos.Env", envir=globalenv());
RCircosEnvironment[["RCircos.Cytoband"]] <- cyto.band.info;
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 550062336L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827175-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false | false | 487 |
r
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 550062336L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
## Odsetek stacji wg liczby dostępnych rowerów
require(ggplot2)
require(ggpubr)
d <- read.csv("MEVO_DAILY_BIKES.csv", sep = ';', header=T, na.string="NA");
##rains <- read.csv("mevo_rains_daily.csv", sep = ';', header=T, na.string="NA");
mstat <- 100 - d$sstat;
d["mstat"] <- mstat;
##d["rains"] <- rains$opad
p1 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: liczba dostępnych rowerów (zstat=0, sstat<2, mstat>1)") +
geom_line(aes(y = zstat, colour = 'zstat'), size=.25) +
geom_line(aes(y = sstat, colour = 'sstat'), size=.25) +
geom_line(aes(y = mstat, colour = 'mstat'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=zstat, colour='zstat'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=sstat, colour='sstat'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=mstat, colour='mstat'), method="loess", size=.5) +
##geom_line(aes(y = rains, colour = 'rains'), size=1) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p2 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: 0 dostępnych rowerów (ga =gdynia, gd =gdańsk)") +
geom_line(aes(y = gd0p, colour = 'gd0p'), size=.25) +
geom_line(aes(y = ga0p, colour = 'ga0p'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=gd0p, colour='gd0p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=ga0p, colour='ga0p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p3 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: max 1 dostępnych rowerów (ga=gdynia, gd=gdańsk)") +
geom_line(aes(y = gd1p, colour = 'gd1p'), size=.25) +
geom_line(aes(y = ga1p, colour = 'ga1p'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=gd1p, colour='gd1p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=ga1p, colour='ga1p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p4 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje (Sopot): liczba dostępnych rowerów (sop0p=0 / sop1p <2)") +
geom_line(aes(y = sop1p, colour = 'sop1p'), size=0.25) +
geom_line(aes(y = sop0p, colour = 'sop0p'), size=0.25) +
geom_smooth(aes(x = as.Date(day), y=sop1p, colour='sop1p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=sop0p, colour='sop0p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
ggarrange(p1, p2, p3, p4, ncol = 2, nrow = 2)
ggsave(file="mevo_daily_zstats.pdf", width=12)
|
/Mevo/mevo_daily_zstats.R
|
no_license
|
hrpunio/Data
|
R
| false | false | 2,952 |
r
|
## Odsetek stacji wg liczby dostępnych rowerów
require(ggplot2)
require(ggpubr)
d <- read.csv("MEVO_DAILY_BIKES.csv", sep = ';', header=T, na.string="NA");
##rains <- read.csv("mevo_rains_daily.csv", sep = ';', header=T, na.string="NA");
mstat <- 100 - d$sstat;
d["mstat"] <- mstat;
##d["rains"] <- rains$opad
p1 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: liczba dostępnych rowerów (zstat=0, sstat<2, mstat>1)") +
geom_line(aes(y = zstat, colour = 'zstat'), size=.25) +
geom_line(aes(y = sstat, colour = 'sstat'), size=.25) +
geom_line(aes(y = mstat, colour = 'mstat'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=zstat, colour='zstat'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=sstat, colour='sstat'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=mstat, colour='mstat'), method="loess", size=.5) +
##geom_line(aes(y = rains, colour = 'rains'), size=1) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p2 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: 0 dostępnych rowerów (ga =gdynia, gd =gdańsk)") +
geom_line(aes(y = gd0p, colour = 'gd0p'), size=.25) +
geom_line(aes(y = ga0p, colour = 'ga0p'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=gd0p, colour='gd0p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=ga0p, colour='ga0p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p3 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje: max 1 dostępnych rowerów (ga=gdynia, gd=gdańsk)") +
geom_line(aes(y = gd1p, colour = 'gd1p'), size=.25) +
geom_line(aes(y = ga1p, colour = 'ga1p'), size=.25) +
geom_smooth(aes(x = as.Date(day), y=gd1p, colour='gd1p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=ga1p, colour='ga1p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
p4 <- ggplot(d, aes(x = as.Date(day))) +
ggtitle("MEVO stacje (Sopot): liczba dostępnych rowerów (sop0p=0 / sop1p <2)") +
geom_line(aes(y = sop1p, colour = 'sop1p'), size=0.25) +
geom_line(aes(y = sop0p, colour = 'sop0p'), size=0.25) +
geom_smooth(aes(x = as.Date(day), y=sop1p, colour='sop1p'), method="loess", size=.5) +
geom_smooth(aes(x = as.Date(day), y=sop0p, colour='sop0p'), method="loess", size=.5) +
ylab(label="%") +
##theme(legend.title=element_blank()) +
labs(colour = "Rowery: ") +
theme(legend.position="top") +
theme(legend.text=element_text(size=10));
ggarrange(p1, p2, p3, p4, ncol = 2, nrow = 2)
ggsave(file="mevo_daily_zstats.pdf", width=12)
|
library(tidyverse)
# load raw data
d0 <- read.csv("../data/raw/Baby mental life: Study 1_August 9, 2018_07.29.csv")
# get rid of repeating GPS coordinates
repeat_gps <- d0 %>%
select(starts_with("Location"), ResponseId) %>%
mutate_at(vars(starts_with("Location")),
funs(as.numeric(as.character(.)))) %>%
filter(!is.na(LocationLatitude), !is.na(LocationLongitude)) %>%
count(LocationLatitude, LocationLongitude) %>%
filter(n > 1) %>%
mutate(latlong = paste(LocationLatitude, LocationLongitude, sep = ", "))
d0_norep <- d0 %>%
mutate_at(vars(starts_with("Location")),
funs(as.numeric(as.character(.)))) %>%
mutate(latlong = paste(LocationLatitude, LocationLongitude, sep = ", "),
duplicateGPS = ifelse(latlong %in% repeat_gps$latlong,
TRUE, FALSE)) %>%
select(-latlong)
# remove identifying variables
d1 <- d0_norep %>%
select(-c(IPAddress, starts_with("Recipient"), ExternalReference,
starts_with("Location"), DistributionChannel, MTurkCode)) %>%
data.frame() %>%
mutate(ResponseId = 999:(998+nrow(d0_norep)))
write.csv(d1, "../data/deidentified/baby_mental_life_s1_data.csv")
|
/code/deidentify_s1.R
|
no_license
|
kgweisman/baby_mental_life_ms
|
R
| false | false | 1,185 |
r
|
library(tidyverse)
# load raw data
d0 <- read.csv("../data/raw/Baby mental life: Study 1_August 9, 2018_07.29.csv")
# get rid of repeating GPS coordinates
repeat_gps <- d0 %>%
select(starts_with("Location"), ResponseId) %>%
mutate_at(vars(starts_with("Location")),
funs(as.numeric(as.character(.)))) %>%
filter(!is.na(LocationLatitude), !is.na(LocationLongitude)) %>%
count(LocationLatitude, LocationLongitude) %>%
filter(n > 1) %>%
mutate(latlong = paste(LocationLatitude, LocationLongitude, sep = ", "))
d0_norep <- d0 %>%
mutate_at(vars(starts_with("Location")),
funs(as.numeric(as.character(.)))) %>%
mutate(latlong = paste(LocationLatitude, LocationLongitude, sep = ", "),
duplicateGPS = ifelse(latlong %in% repeat_gps$latlong,
TRUE, FALSE)) %>%
select(-latlong)
# remove identifying variables
d1 <- d0_norep %>%
select(-c(IPAddress, starts_with("Recipient"), ExternalReference,
starts_with("Location"), DistributionChannel, MTurkCode)) %>%
data.frame() %>%
mutate(ResponseId = 999:(998+nrow(d0_norep)))
write.csv(d1, "../data/deidentified/baby_mental_life_s1_data.csv")
|
# Functions for BAT 2.1
# By Mans Thulin
# mans@statistikkonsult.com
############
# Format time data
# Switch to next day if machine runs for more than 24 hours
fixTime<-function(timeString)
{
hour<-as.numeric(substring(timeString,1,2))-24
if(hour<10){hourString<-paste(0,hour,sep="")} else { hourString<-as.character(hour) }
return(hourString)
}
# Adjust measurements to control for increasing OD in blank wells
adjustBlanks<-function(OD,adjustAlt,blankID)
{
totalMeanBlank<-0
diffTimes<-OD$Time
# Alternative 1: adjust using the mean of all blanks
if(adjustAlt==1)
{
for(a in blankID)
{
totalMeanBlank<-totalMeanBlank+mean(eval(parse(text=paste("OD$",as.character(a),sep=""))))
}
totalMeanBlank<-totalMeanBlank/length(blankID)
}
# Alternative 2: adjust using only the blanks at the same timepoint
if(adjustAlt==2)
{
matrixOfBlanks<-matrix(NA,dim(OD)[1],length(blankID))
for(a in 1:length(blankID))
{
matrixOfBlanks[,a]<-eval(parse(text=paste("OD$",as.character(blankID[a]),sep="")))
}
totalMeanBlank<-matrix(c(diffTimes,rep(rowMeans(matrixOfBlanks),dim(OD)[2]-1)),dim(OD))
}
OD<-OD-totalMeanBlank
OD$Time<-diffTimes
OD[OD<0]<-0.0001
return(OD)
}
referencePlotter<-function(namesList,OD,maskingLower,maskingUpper,calculatedValues,ncols,maskType=1,maskingLowerX=1,maskingUpperX=2)
{
x<-OD$Time
par(mfrow=c(1,ncols),cex=1.1)
# Step through all reference wells:
for(i in 1:length(namesList)){
# Get the name of the well:
whichWell<-namesList[i]
# Mask:
if(maskType==1)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLowerX,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpperX,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
# if(nrow(ODmasked)>0) { m<-lm(maskedy~ODmasked$Time) }
Rnow<-round(cor(ODmasked$Time,maskedy),4)
pointsUsedNow<-dim(ODmasked)[1]
# Save plot for diagnostics
y<-log(eval(parse(text=paste("OD$",as.character(whichWell),sep=""))))
plot(x,y,type="l",lwd=2,main=paste("R=",as.character(Rnow),"in",as.character(whichWell),sep=" "),xlab="Time",ylab="ln(OD)",sub=paste(pointsUsedNow,"points used for estimate",sep=" "))
grid()
points(x,y,type="l",lwd=3)
if(nrow(ODmasked)>0) { points(ODmasked$Time,maskedy,col=2,type="l",lwd=2) }
if(maskType==1) { axis(2,at=log(c(maskingLower,maskingUpper)),col=4,tck=1,labels=rep("",2),lwd=2,lty=3) } else { axis(1,at=c(maskingLowerX,maskingUpperX),col=4,tck=1,labels=rep("",2),lwd=2,lty=3) }
}
}
###################################################
referencePlotter2<-function(refNamesList,namesList,OD,maskingLower,maskingUpper,numRep,calculatedValues)
{
fullNamesList<-c(refNamesList,namesList)
x<-OD$Time
# Step through all reference wells:
for(i in 1:length(fullNamesList)){
if(i>nrow(calculatedValues)) { calculatedValues<-rbind(calculatedValues,rep(NA,nrow(calculatedValues))) }
# Get the name of the well:
whichWell<-fullNamesList[i]
# Add well name to data frame:
calculatedValues$Well[i]<-whichWell
# Mask:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
###################################################
# Fit curve
if(nrow(ODmasked)>0)
{
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
m<-lm(maskedy~ODmasked$Time)
calculatedValues$fittedValue[i]<-m$coefficients[2]
calculatedValues$R[i]<-cor(ODmasked$Time,maskedy)
calculatedValues$pointsUsed[i]<-dim(ODmasked)[1]
# Calculate doubling time
calculatedValues$doubTime[i]<-log(2)/m$coefficients[2]
# Add masking interval to data frame
calculatedValues$mask1[i]<-maskingLower
calculatedValues$mask2[i]<-maskingUpper
calculatedValues$masktype[i]<-1
} else { calculatedValues$R[i]<-10; calculatedValues$warningMessages[i]<- "No growth detected - the well appears to be empty or blank." }
}
# Relative growth rate
refMean<-mean(calculatedValues$fittedValue[1:length(refNamesList)])
calculatedValues$growthRate<-calculatedValues$fittedValue/refMean
# Group averages
calculatedValues$groupMeanDoubTime<-calculatedValues$groupMeanGrowthRate<-calculatedValues$groupGrowthRateSD<-" "
calculatedValues$groupMeanDoubTime[1:length(refNamesList)]<-mean(calculatedValues$doubTime[1:length(refNamesList)])
calculatedValues$groupMeanGrowthRate[1:length(refNamesList)]<-1
calculatedValues$groupGrowthRateSD[1:length(refNamesList)]<-sd(calculatedValues$growthRate[1:length(refNamesList)])
j<-length(refNamesList)
for(i in 1:length(namesList))
{
if((i+j) %% numRep == 0)
{
calculatedValues$groupMeanDoubTime[i+j]<-mean(calculatedValues$doubTime[(i+j-numRep+1):(i+j)],na.rm=TRUE)
calculatedValues$groupMeanGrowthRate[i+j]<-mean(calculatedValues$growthRate[(i+j-numRep+1):(i+j)],na.rm=TRUE)
calculatedValues$groupGrowthRateSD[i+j]<-sd(calculatedValues$growthRate[(i+j-numRep+1):(i+j)],na.rm=TRUE)
}
}
return(calculatedValues)
}
###################################################
referencePlotter3<-function(whichWell,refNamesList,namesList,OD,maskingLower,maskingUpper,numRep,calculatedValues,maskType=1,maskingLowerX=1,maskingUpperX=2)
{
x<-OD$Time
i<-which(calculatedValues$Well==whichWell)
# Mask:
if(maskType<1.5)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLowerX,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpperX,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
m<-lm(maskedy~ODmasked$Time)
calculatedValues$fittedValue[i]<-m$coefficients[2]
calculatedValues$R[i]<-cor(ODmasked$Time,maskedy)
calculatedValues$pointsUsed[i]<-dim(ODmasked)[1]
# Calculate doubling time
calculatedValues$doubTime[i]<-log(2)/m$coefficients[2]
# Add masking interval to data frame
if(maskType==1)
{ calculatedValues$mask1[i]<-maskingLower; calculatedValues$mask2[i]<-maskingUpper } else
{ calculatedValues$mask1[i]<-maskingLowerX; calculatedValues$mask2[i]<-maskingUpperX; calculatedValues$masktype[i]<-2 }
# Relative growth rate
refMean<-mean(calculatedValues$fittedValue[1:length(refNamesList)])
calculatedValues$growthRate<-calculatedValues$fittedValue/refMean
# Group averages
calculatedValues$groupMeanDoubTime[1:length(refNamesList)]<-mean(calculatedValues$doubTime[1:length(refNamesList)])
calculatedValues$groupMeanGrowthRate[1:length(refNamesList)]<-1
calculatedValues$groupGrowthRateSD[1:length(refNamesList)]<-sd(calculatedValues$growthRate[1:length(refNamesList)])
j<-length(refNamesList)
for(k in 1:length(namesList))
{
if((k+j) %% numRep == 0)
{
calculatedValues$groupMeanDoubTime[k+j]<-mean(calculatedValues$doubTime[(k+j-numRep+1):(k+j)])
calculatedValues$groupMeanGrowthRate[k+j]<-mean(calculatedValues$growthRate[(k+j-numRep+1):(k+j)])
calculatedValues$groupGrowthRateSD[k+j]<-sd(calculatedValues$growthRate[(k+j-numRep+1):(k+j)])
}
}
return(calculatedValues)
}
#######################
finalPlotter<-function(namesList,OD,calculatedValues)
{
x<-OD$Time
# Step through all wells:
for(i in 1:length(namesList)){
# Get the name of the well:
whichWell<-namesList[i]
maskingLower<-calculatedValues$mask1[i]
maskingUpper<-calculatedValues$mask2[i]
if(is.na(maskingLower)) { maskingLower<- 10}
if(is.na(maskingUpper)) { maskingUpper<- 11}
# Mask:
if(calculatedValues$masktype[i]<1.5)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
# if(nrow(ODmasked)>0) { m<-lm(maskedy~ODmasked$Time) }
Rnow<-round(cor(ODmasked$Time,maskedy),4)
pointsUsedNow<-dim(ODmasked)[1]
# Save plot for diagnostics
y<-log(eval(parse(text=paste("OD$",as.character(whichWell),sep=""))))
plot(x,y,type="l",lwd=2,main=paste("R=",as.character(Rnow),"in",as.character(whichWell),sep=" "),xlab="Time",ylab="ln(OD)",sub=paste(pointsUsedNow,"points used for estimate",sep=" "))
grid()
points(x,y,type="l",lwd=3)
if(nrow(ODmasked)>0) { points(ODmasked$Time,maskedy,col=2,type="l",lwd=2) }
#axis(2,at=log(c(maskingLower,maskingUpper)),col=4,tck=0.025,labels=rep("",2),lwd=2)
}
}
|
/bat-helpers.R
|
permissive
|
mthulin/BAT
|
R
| false | false | 10,994 |
r
|
# Functions for BAT 2.1
# By Mans Thulin
# mans@statistikkonsult.com
############
# Format time data
# Switch to next day if machine runs for more than 24 hours
fixTime<-function(timeString)
{
hour<-as.numeric(substring(timeString,1,2))-24
if(hour<10){hourString<-paste(0,hour,sep="")} else { hourString<-as.character(hour) }
return(hourString)
}
# Adjust measurements to control for increasing OD in blank wells
adjustBlanks<-function(OD,adjustAlt,blankID)
{
totalMeanBlank<-0
diffTimes<-OD$Time
# Alternative 1: adjust using the mean of all blanks
if(adjustAlt==1)
{
for(a in blankID)
{
totalMeanBlank<-totalMeanBlank+mean(eval(parse(text=paste("OD$",as.character(a),sep=""))))
}
totalMeanBlank<-totalMeanBlank/length(blankID)
}
# Alternative 2: adjust using only the blanks at the same timepoint
if(adjustAlt==2)
{
matrixOfBlanks<-matrix(NA,dim(OD)[1],length(blankID))
for(a in 1:length(blankID))
{
matrixOfBlanks[,a]<-eval(parse(text=paste("OD$",as.character(blankID[a]),sep="")))
}
totalMeanBlank<-matrix(c(diffTimes,rep(rowMeans(matrixOfBlanks),dim(OD)[2]-1)),dim(OD))
}
OD<-OD-totalMeanBlank
OD$Time<-diffTimes
OD[OD<0]<-0.0001
return(OD)
}
referencePlotter<-function(namesList,OD,maskingLower,maskingUpper,calculatedValues,ncols,maskType=1,maskingLowerX=1,maskingUpperX=2)
{
x<-OD$Time
par(mfrow=c(1,ncols),cex=1.1)
# Step through all reference wells:
for(i in 1:length(namesList)){
# Get the name of the well:
whichWell<-namesList[i]
# Mask:
if(maskType==1)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLowerX,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpperX,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
# if(nrow(ODmasked)>0) { m<-lm(maskedy~ODmasked$Time) }
Rnow<-round(cor(ODmasked$Time,maskedy),4)
pointsUsedNow<-dim(ODmasked)[1]
# Save plot for diagnostics
y<-log(eval(parse(text=paste("OD$",as.character(whichWell),sep=""))))
plot(x,y,type="l",lwd=2,main=paste("R=",as.character(Rnow),"in",as.character(whichWell),sep=" "),xlab="Time",ylab="ln(OD)",sub=paste(pointsUsedNow,"points used for estimate",sep=" "))
grid()
points(x,y,type="l",lwd=3)
if(nrow(ODmasked)>0) { points(ODmasked$Time,maskedy,col=2,type="l",lwd=2) }
if(maskType==1) { axis(2,at=log(c(maskingLower,maskingUpper)),col=4,tck=1,labels=rep("",2),lwd=2,lty=3) } else { axis(1,at=c(maskingLowerX,maskingUpperX),col=4,tck=1,labels=rep("",2),lwd=2,lty=3) }
}
}
###################################################
referencePlotter2<-function(refNamesList,namesList,OD,maskingLower,maskingUpper,numRep,calculatedValues)
{
fullNamesList<-c(refNamesList,namesList)
x<-OD$Time
# Step through all reference wells:
for(i in 1:length(fullNamesList)){
if(i>nrow(calculatedValues)) { calculatedValues<-rbind(calculatedValues,rep(NA,nrow(calculatedValues))) }
# Get the name of the well:
whichWell<-fullNamesList[i]
# Add well name to data frame:
calculatedValues$Well[i]<-whichWell
# Mask:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
###################################################
# Fit curve
if(nrow(ODmasked)>0)
{
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
m<-lm(maskedy~ODmasked$Time)
calculatedValues$fittedValue[i]<-m$coefficients[2]
calculatedValues$R[i]<-cor(ODmasked$Time,maskedy)
calculatedValues$pointsUsed[i]<-dim(ODmasked)[1]
# Calculate doubling time
calculatedValues$doubTime[i]<-log(2)/m$coefficients[2]
# Add masking interval to data frame
calculatedValues$mask1[i]<-maskingLower
calculatedValues$mask2[i]<-maskingUpper
calculatedValues$masktype[i]<-1
} else { calculatedValues$R[i]<-10; calculatedValues$warningMessages[i]<- "No growth detected - the well appears to be empty or blank." }
}
# Relative growth rate
refMean<-mean(calculatedValues$fittedValue[1:length(refNamesList)])
calculatedValues$growthRate<-calculatedValues$fittedValue/refMean
# Group averages
calculatedValues$groupMeanDoubTime<-calculatedValues$groupMeanGrowthRate<-calculatedValues$groupGrowthRateSD<-" "
calculatedValues$groupMeanDoubTime[1:length(refNamesList)]<-mean(calculatedValues$doubTime[1:length(refNamesList)])
calculatedValues$groupMeanGrowthRate[1:length(refNamesList)]<-1
calculatedValues$groupGrowthRateSD[1:length(refNamesList)]<-sd(calculatedValues$growthRate[1:length(refNamesList)])
j<-length(refNamesList)
for(i in 1:length(namesList))
{
if((i+j) %% numRep == 0)
{
calculatedValues$groupMeanDoubTime[i+j]<-mean(calculatedValues$doubTime[(i+j-numRep+1):(i+j)],na.rm=TRUE)
calculatedValues$groupMeanGrowthRate[i+j]<-mean(calculatedValues$growthRate[(i+j-numRep+1):(i+j)],na.rm=TRUE)
calculatedValues$groupGrowthRateSD[i+j]<-sd(calculatedValues$growthRate[(i+j-numRep+1):(i+j)],na.rm=TRUE)
}
}
return(calculatedValues)
}
###################################################
referencePlotter3<-function(whichWell,refNamesList,namesList,OD,maskingLower,maskingUpper,numRep,calculatedValues,maskType=1,maskingLowerX=1,maskingUpperX=2)
{
x<-OD$Time
i<-which(calculatedValues$Well==whichWell)
# Mask:
if(maskType<1.5)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLowerX,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpperX,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
m<-lm(maskedy~ODmasked$Time)
calculatedValues$fittedValue[i]<-m$coefficients[2]
calculatedValues$R[i]<-cor(ODmasked$Time,maskedy)
calculatedValues$pointsUsed[i]<-dim(ODmasked)[1]
# Calculate doubling time
calculatedValues$doubTime[i]<-log(2)/m$coefficients[2]
# Add masking interval to data frame
if(maskType==1)
{ calculatedValues$mask1[i]<-maskingLower; calculatedValues$mask2[i]<-maskingUpper } else
{ calculatedValues$mask1[i]<-maskingLowerX; calculatedValues$mask2[i]<-maskingUpperX; calculatedValues$masktype[i]<-2 }
# Relative growth rate
refMean<-mean(calculatedValues$fittedValue[1:length(refNamesList)])
calculatedValues$growthRate<-calculatedValues$fittedValue/refMean
# Group averages
calculatedValues$groupMeanDoubTime[1:length(refNamesList)]<-mean(calculatedValues$doubTime[1:length(refNamesList)])
calculatedValues$groupMeanGrowthRate[1:length(refNamesList)]<-1
calculatedValues$groupGrowthRateSD[1:length(refNamesList)]<-sd(calculatedValues$growthRate[1:length(refNamesList)])
j<-length(refNamesList)
for(k in 1:length(namesList))
{
if((k+j) %% numRep == 0)
{
calculatedValues$groupMeanDoubTime[k+j]<-mean(calculatedValues$doubTime[(k+j-numRep+1):(k+j)])
calculatedValues$groupMeanGrowthRate[k+j]<-mean(calculatedValues$growthRate[(k+j-numRep+1):(k+j)])
calculatedValues$groupGrowthRateSD[k+j]<-sd(calculatedValues$growthRate[(k+j-numRep+1):(k+j)])
}
}
return(calculatedValues)
}
#######################
finalPlotter<-function(namesList,OD,calculatedValues)
{
x<-OD$Time
# Step through all wells:
for(i in 1:length(namesList)){
# Get the name of the well:
whichWell<-namesList[i]
maskingLower<-calculatedValues$mask1[i]
maskingUpper<-calculatedValues$mask2[i]
if(is.na(maskingLower)) { maskingLower<- 10}
if(is.na(maskingUpper)) { maskingUpper<- 11}
# Mask:
if(calculatedValues$masktype[i]<1.5)
{
# Vertical masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<OD$",as.character(whichWell),")",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">ODmasked$",as.character(whichWell),")",sep="")))
} else
{
# Horizontal masking interval:
ODmasked<-eval(parse(text=paste("subset(OD,",maskingLower,"<=Time)",sep="")))
ODmasked<-eval(parse(text=paste("subset(ODmasked,",maskingUpper,">=Time)",sep="")))
}
###################################################
# Fit curve
maskedy<-log(eval(parse(text=paste("ODmasked$",as.character(whichWell),sep=""))))
# if(nrow(ODmasked)>0) { m<-lm(maskedy~ODmasked$Time) }
Rnow<-round(cor(ODmasked$Time,maskedy),4)
pointsUsedNow<-dim(ODmasked)[1]
# Save plot for diagnostics
y<-log(eval(parse(text=paste("OD$",as.character(whichWell),sep=""))))
plot(x,y,type="l",lwd=2,main=paste("R=",as.character(Rnow),"in",as.character(whichWell),sep=" "),xlab="Time",ylab="ln(OD)",sub=paste(pointsUsedNow,"points used for estimate",sep=" "))
grid()
points(x,y,type="l",lwd=3)
if(nrow(ODmasked)>0) { points(ODmasked$Time,maskedy,col=2,type="l",lwd=2) }
#axis(2,at=log(c(maskingLower,maskingUpper)),col=4,tck=0.025,labels=rep("",2),lwd=2)
}
}
|
#5 How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City?
#usage: source("plot5.R")
#output: plot5.png
#clear workspace
rm(list=ls())
#load libraries
library(dplyr)
library(tidyr)
library(ggplot2)
library(magrittr)
#make sure requried packages are installed and loaded
if(!require(dplyr) | !require(ggplot2) | !require(tidyr) | !require(magrittr)){
stop('The required packages not installed')
}
#check if the data files are located in the current working directory
if (!file.exists("summarySCC_PM25.rds")){
stop("Error, data file summarySCC_PM25.rds not found in the current directory")}
if (!file.exists("Source_Classification_Code.rds")){
stop("Error, data file Source_Classification_Code.rds not found in the current directory")}
#read data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Extract SCC codes for on-road vehicles emissions from EI.Sector
df.vh<-select(SCC,SCC,EI.Sector)%>%
filter(grepl("Vehicles",EI.Sector))
#Extract all Baltomore city emissions data
#Then use SCC codes in "df.vh" to extract Baltimore city vehicle emissions data
#Both subsetting data and plotting done via chanining in code below.
#
ggsave("plot5.png")
NEI%>%
filter(fips == "24510")%>%
.[.$SCC %in% df.vh$SCC,]%>%
group_by(year)%>%
summarise(BAL=sum(Emissions))%>%
ggplot(data = .,aes(as.character(year),BAL))+
geom_bar(stat="identity")+
theme_bw() +
xlab("Year")+
ylab("Total emissions (in tons)")+
ggtitle("Emissions from motor vehicle sources in Baltimore city")
dev.off()
|
/plot5.R
|
no_license
|
dynamics77/exdata-data-NEI_data
|
R
| false | false | 1,605 |
r
|
#5 How have emissions from motor vehicle sources changed from 1999–2008 in Baltimore City?
#usage: source("plot5.R")
#output: plot5.png
#clear workspace
rm(list=ls())
#load libraries
library(dplyr)
library(tidyr)
library(ggplot2)
library(magrittr)
#make sure requried packages are installed and loaded
if(!require(dplyr) | !require(ggplot2) | !require(tidyr) | !require(magrittr)){
stop('The required packages not installed')
}
#check if the data files are located in the current working directory
if (!file.exists("summarySCC_PM25.rds")){
stop("Error, data file summarySCC_PM25.rds not found in the current directory")}
if (!file.exists("Source_Classification_Code.rds")){
stop("Error, data file Source_Classification_Code.rds not found in the current directory")}
#read data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Extract SCC codes for on-road vehicles emissions from EI.Sector
df.vh<-select(SCC,SCC,EI.Sector)%>%
filter(grepl("Vehicles",EI.Sector))
#Extract all Baltomore city emissions data
#Then use SCC codes in "df.vh" to extract Baltimore city vehicle emissions data
#Both subsetting data and plotting done via chanining in code below.
#
ggsave("plot5.png")
NEI%>%
filter(fips == "24510")%>%
.[.$SCC %in% df.vh$SCC,]%>%
group_by(year)%>%
summarise(BAL=sum(Emissions))%>%
ggplot(data = .,aes(as.character(year),BAL))+
geom_bar(stat="identity")+
theme_bw() +
xlab("Year")+
ylab("Total emissions (in tons)")+
ggtitle("Emissions from motor vehicle sources in Baltimore city")
dev.off()
|
# data clean up
data <- read.csv("file.csv")
|
/data_clean.R
|
no_license
|
jonmasingale/VIP_test
|
R
| false | false | 45 |
r
|
# data clean up
data <- read.csv("file.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{sim_coin_tosses}
\alias{sim_coin_tosses}
\title{Simulate a sequence of coin tosses}
\usage{
sim_coin_tosses(n_sims, n, prob)
}
\arguments{
\item{n_sims}{The number of simulations}
\item{n}{The length of the coin toss sequence}
\item{prob}{the probability of a head (1)}
}
\value{
An list of length \code{n_sims}, where each
element is a vector of length \code{n} containing a
coin toss sequence: 0 (tails), 1 (heads).
}
\description{
Simulate a sequence of coin tosses
}
\examples{
sim_coin_tosses(n_sims = 2, n = 4, prob = 1)
}
|
/man/sim_coin_tosses.Rd
|
no_license
|
ST541-Fall2018/cointoss
|
R
| false | true | 629 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{sim_coin_tosses}
\alias{sim_coin_tosses}
\title{Simulate a sequence of coin tosses}
\usage{
sim_coin_tosses(n_sims, n, prob)
}
\arguments{
\item{n_sims}{The number of simulations}
\item{n}{The length of the coin toss sequence}
\item{prob}{the probability of a head (1)}
}
\value{
An list of length \code{n_sims}, where each
element is a vector of length \code{n} containing a
coin toss sequence: 0 (tails), 1 (heads).
}
\description{
Simulate a sequence of coin tosses
}
\examples{
sim_coin_tosses(n_sims = 2, n = 4, prob = 1)
}
|
#Creating elegant data visualisations using the "Grammar of Graphics": the case of ggplot2 📈
#install.packages() is a function which download and install packages from CRAN-like repositories or from local files
#The first package to install is raster through which is possible reading, writing, manipulating, analyzing and modeling of spatial data. The package implements basic and high-level functions for raster data and for vector data operations such as intersections:
install.packages("raster")
#The second package to install is RStoolbox, a toolbox for remote sensing image processing and analysis such as calculating spectral indices, principal component transformation, unsupervised and supervised classification or fractional cover analyses:
install.packages("RStoolbox")
#The third package to install is ggplot2, which is a system for declaratively creating graphics, based on "The Grammar of Graphics". The user provides the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details:
install.packages(ggplot2)
#The fourth package to install is gridExtra, which provides a number of user-level functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables:
install.packages(gridExtra)
#library() is a function which load and attach add-on packages, in this case the previous raster, RStoolbox, ggplot2 and gridExtra packages:
library(raster)
library(RStoolbox)
library(ggplot2)
#In my computer R must use the folder named simply lab and I exploit a function (setwd) for the association between R and lab folder:
setwd("C:/lab/")
#I exploit a function (brick) to import data from lab folder - external - to R - internal* -:
p224r63 <- brick("p224r63_2011_masked.grd")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
#The graphical visualization of p224r63 in true colors, being LANDSAT equipped with true colour, thermal and multispectral sensors in a complex acquisition system, will be obtained from the visible spectrum to which I associate B1_sre:blue, B2_sre:green, B3_sre:red spectral bands of the initial object
ggRGB(p224r63,3,2,1, stretch="lin")
#This graphical visualization in true colors is therefore in RGB and independent from user's selection of a palette of colors through colorRampPalette function because the three levels of interest selected by the user herself/himself from B1_sre to B7_sre are combined such that they represent exclusively the red, green and blue channel in true colour sensors of LANDSAT satellite
#The graphical visualization of p224r63 in false color, not being the human eye thermal and multispectral sensors in a biological complex acquisition system, will be obtained by substituting B1_sre:blue spectral band with B4_sre:near-infrared, B5_sre:mid-infrared, B6_sre:far-infrared or B7_sre:other mid-infrared spectral bands on a case by case basis in remote sensing analysis
ggRGB(p224r63,4,3,2, stretch="lin")
#This is the graphical visualization of p224r63_2011 in false color for vegetational coverage of the Amazon rainforest through the red channel
#The components r, g and b in defining the syntax of the plotRGB () or ggRGB () function can be omitted and replaced by the numbers that identify the bands (B1_sre: blue spectral band with B4_sre: near-infrared, B5_sre: mid-infrared, B6_sre: far-infrared or B7_sre: other mid-infrared spectral bands if the satellite is always Landsat) to be displayed through those channels as a single satellite image!
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
p1 <- ggRGB(p224r63,3,2,1, stretch="lin")
p2 <- ggRGB(p224r63,4,3,2, stretch="lin")
grid.arrange(p1, p2, nrow = 2)
#Sequence of informatic commands for R_code_ggplot2.r
install.packages("raster")
install.packages("RStoolbox")
install.packages(ggplot2)
install.packages(gridExtra)
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
setwd("C:/lab/")
p224r63 <- brick("p224r63_2011_masked.grd")
ggRGB(p224r63,3,2,1, stretch="lin")
ggRGB(p224r63,4,3,2, stretch="lin")
p1 <- ggRGB(p224r63,3,2,1, stretch="lin")
p2 <- ggRGB(p224r63,4,3,2, stretch="lin")
grid.arrange(p1, p2, nrow = 2)
|
/R_code_ggplot2.r
|
no_license
|
AndreaCapponi/telerilevamento_2021
|
R
| false | false | 4,866 |
r
|
#Creating elegant data visualisations using the "Grammar of Graphics": the case of ggplot2 📈
#install.packages() is a function which download and install packages from CRAN-like repositories or from local files
#The first package to install is raster through which is possible reading, writing, manipulating, analyzing and modeling of spatial data. The package implements basic and high-level functions for raster data and for vector data operations such as intersections:
install.packages("raster")
#The second package to install is RStoolbox, a toolbox for remote sensing image processing and analysis such as calculating spectral indices, principal component transformation, unsupervised and supervised classification or fractional cover analyses:
install.packages("RStoolbox")
#The third package to install is ggplot2, which is a system for declaratively creating graphics, based on "The Grammar of Graphics". The user provides the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details:
install.packages(ggplot2)
#The fourth package to install is gridExtra, which provides a number of user-level functions to work with "grid" graphics, notably to arrange multiple grid-based plots on a page, and draw tables:
install.packages(gridExtra)
#library() is a function which load and attach add-on packages, in this case the previous raster, RStoolbox, ggplot2 and gridExtra packages:
library(raster)
library(RStoolbox)
library(ggplot2)
#In my computer R must use the folder named simply lab and I exploit a function (setwd) for the association between R and lab folder:
setwd("C:/lab/")
#I exploit a function (brick) to import data from lab folder - external - to R - internal* -:
p224r63 <- brick("p224r63_2011_masked.grd")
#Important: the brick function creates a RasterBrick object that is a multi-layer raster object typically from a multi-layer (band) file. Instead the raster function creates a RasterLayer object from scratch, a file, an Extent object, a matrix, an 'image' object, or from a Raster *, Spatial *, im (spatstat) asc, kasc (adehabitat *), grf (geoR) or kde object!
#ggRGB() is a function that calculates RGB color composite RasterLayers for plotting with ggplot2. Optional values for clipping and and stretching can be used to enhance the imagery
#The graphical visualization of p224r63 in true colors, being LANDSAT equipped with true colour, thermal and multispectral sensors in a complex acquisition system, will be obtained from the visible spectrum to which I associate B1_sre:blue, B2_sre:green, B3_sre:red spectral bands of the initial object
ggRGB(p224r63,3,2,1, stretch="lin")
#This graphical visualization in true colors is therefore in RGB and independent from user's selection of a palette of colors through colorRampPalette function because the three levels of interest selected by the user herself/himself from B1_sre to B7_sre are combined such that they represent exclusively the red, green and blue channel in true colour sensors of LANDSAT satellite
#The graphical visualization of p224r63 in false color, not being the human eye thermal and multispectral sensors in a biological complex acquisition system, will be obtained by substituting B1_sre:blue spectral band with B4_sre:near-infrared, B5_sre:mid-infrared, B6_sre:far-infrared or B7_sre:other mid-infrared spectral bands on a case by case basis in remote sensing analysis
ggRGB(p224r63,4,3,2, stretch="lin")
#This is the graphical visualization of p224r63_2011 in false color for vegetational coverage of the Amazon rainforest through the red channel
#The components r, g and b in defining the syntax of the plotRGB () or ggRGB () function can be omitted and replaced by the numbers that identify the bands (B1_sre: blue spectral band with B4_sre: near-infrared, B5_sre: mid-infrared, B6_sre: far-infrared or B7_sre: other mid-infrared spectral bands if the satellite is always Landsat) to be displayed through those channels as a single satellite image!
#arrangeGrob set up a gtable layout to place multiple grobs on a page. In particular grid.arrange() draw on the current device and is useful to organize ggRGB elements after simply renamed them:
p1 <- ggRGB(p224r63,3,2,1, stretch="lin")
p2 <- ggRGB(p224r63,4,3,2, stretch="lin")
grid.arrange(p1, p2, nrow = 2)
#Sequence of informatic commands for R_code_ggplot2.r
install.packages("raster")
install.packages("RStoolbox")
install.packages(ggplot2)
install.packages(gridExtra)
library(raster)
library(RStoolbox)
library(ggplot2)
library(gridExtra)
setwd("C:/lab/")
p224r63 <- brick("p224r63_2011_masked.grd")
ggRGB(p224r63,3,2,1, stretch="lin")
ggRGB(p224r63,4,3,2, stretch="lin")
p1 <- ggRGB(p224r63,3,2,1, stretch="lin")
p2 <- ggRGB(p224r63,4,3,2, stretch="lin")
grid.arrange(p1, p2, nrow = 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.