content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
---|---|---|---|---|---|---|---|---|---|
#################################################################
## calculate change in area % for scenarios
##################################################################
inimap<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/scen000/MODIS_LCType.grd')
endmap<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/scen002/MODIS_LCType_scen002_year_2045.grd')
HS_map<-raster("C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/output/scen000/output_SDM/Clivia_miniata_HSmap.asc")
HS_map1<-HS_map>0.264
species_mask<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/Clivia_all_LU_mask.grd')
species_mask[species_mask==0]<-NA
landcells<-cellStats(!is.na(HS_map1),stat='sum')
inimap_c<-crop(inimap,HS_map1)
endmap_c<-crop(endmap,HS_map1)
inimap_c[inimap_c>0]<-1
endmap_c[endmap_c==0]<-NA
inimap_sum<-cellStats(!is.na(inimap_c*species_mask),stat='sum')
inimap_sum
inimap_sum_rel<-inimap_sum*100/landcells
inimap_sum_rel
endmap_sum<-cellStats(!is.na(endmap_c*species_mask),stat='sum')
endmap_sum
endmap_sum_rel<-endmap_sum*100/landcells
endmap_sum_rel
#############################################################################################
#### END ###
#############################################################################################
|
/calc_suitable_landcover.R
|
no_license
|
vgro/Groner_medicinal_plants
|
R
| false | false | 1,452 |
r
|
#################################################################
## calculate change in area % for scenarios
##################################################################
inimap<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/scen000/MODIS_LCType.grd')
endmap<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/scen002/MODIS_LCType_scen002_year_2045.grd')
HS_map<-raster("C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/output/scen000/output_SDM/Clivia_miniata_HSmap.asc")
HS_map1<-HS_map>0.264
species_mask<-raster('C:/Users/Vivienne Groner/Desktop/Vivienne/2020/SDM/Clivia_miniata_2020/predictors/MODIS/Clivia_all_LU_mask.grd')
species_mask[species_mask==0]<-NA
landcells<-cellStats(!is.na(HS_map1),stat='sum')
inimap_c<-crop(inimap,HS_map1)
endmap_c<-crop(endmap,HS_map1)
inimap_c[inimap_c>0]<-1
endmap_c[endmap_c==0]<-NA
inimap_sum<-cellStats(!is.na(inimap_c*species_mask),stat='sum')
inimap_sum
inimap_sum_rel<-inimap_sum*100/landcells
inimap_sum_rel
endmap_sum<-cellStats(!is.na(endmap_c*species_mask),stat='sum')
endmap_sum
endmap_sum_rel<-endmap_sum*100/landcells
endmap_sum_rel
#############################################################################################
#### END ###
#############################################################################################
|
\name{summary.OptimMCL.HCAR}
\alias{summary.OptimMCL.HCAR}
\alias{ranef.HCAR}
\title{Summary the output from the iterative procedure of maximising the
Monte Carlo likelihood.
}
\description{The summary function summarizes the output of the output from the
function \code{\link{OptimMCL.HCAR}} and the \code{ranef.HCAR} calculate the empirical Bayesian estimates of the random effects given the Monte Carlo maximum likelihood estimates.
}
\usage{
\method{summary}{OptimMCL.HCAR}(object, trace.all = TRUE, mc.covar =
TRUE, ...)
ranef.HCAR(pars, data)
}
\arguments{
\item{object}{ an OptimMCL object returned by \code{\link{OptimMCL.HCAR}}.}
\item{trace.all}{ an logic value tells whether the input object given
by \code{\link{OptimMCL.HCAR}} contains results from all iterations of not}
\item{mc.covar}{ if TRUE, the estimated covariance matrix of the
MC-MLE is returned}
\item{...}{arguments passed to or from other methods.}
\item{pars}{the paramter values for calculating the empirical Bayesian estimates of the random effects; a list or enivironment of data for example same as described in \code{\link{sim.HCAR}}} % FIXME
\item{data}{A list or an environment contains the variables same as described in \code{\link{sim.HCAR}}.} % FIXME
}
\value{
The summary function returns a list containing the following objects:
\describe{
\item{MC.mle,}{ the final MC-MLE}
\item{N.iter,}{ the total number of iterations}
\item{total.time,}{ the total time elapsed}
\item{convergence,}{ if TRUE the procedure converges}
\item{hessian,}{ the Hessian at the MC-MLE if given; the default is NULL}
\item{mc.covar}{ the estimated covariance matrix of the MC-MLE if
given; the default is NULL}
\item{mc.samples}{the Monte Carlo samples size used in the initial
stage and after the first convergence.}
}
The \code{ranef.HCAR} function return a dataframe object containing the estimated random effects and their corresponding standard deviations.
}
\author{
Zhe Sha \email{zhesha1006@gmail.com}
}
\seealso{
\code{\link{mcl.HCAR}}, \code{\link{sim.HCAR}}, \code{\link{OptimMCL.HCAR}}
}
\examples{
## See examples for OptimMCL
}
\keyword{HCAR}
\keyword{Monte Carlo likelihood}
\keyword{Spatial}
|
/mclcar_0.1-8/man/summary.OptimMCL.HCAR.Rd
|
no_license
|
shazhe/mclcar
|
R
| false | false | 2,230 |
rd
|
\name{summary.OptimMCL.HCAR}
\alias{summary.OptimMCL.HCAR}
\alias{ranef.HCAR}
\title{Summary the output from the iterative procedure of maximising the
Monte Carlo likelihood.
}
\description{The summary function summarizes the output of the output from the
function \code{\link{OptimMCL.HCAR}} and the \code{ranef.HCAR} calculate the empirical Bayesian estimates of the random effects given the Monte Carlo maximum likelihood estimates.
}
\usage{
\method{summary}{OptimMCL.HCAR}(object, trace.all = TRUE, mc.covar =
TRUE, ...)
ranef.HCAR(pars, data)
}
\arguments{
\item{object}{ an OptimMCL object returned by \code{\link{OptimMCL.HCAR}}.}
\item{trace.all}{ an logic value tells whether the input object given
by \code{\link{OptimMCL.HCAR}} contains results from all iterations of not}
\item{mc.covar}{ if TRUE, the estimated covariance matrix of the
MC-MLE is returned}
\item{...}{arguments passed to or from other methods.}
\item{pars}{the paramter values for calculating the empirical Bayesian estimates of the random effects; a list or enivironment of data for example same as described in \code{\link{sim.HCAR}}} % FIXME
\item{data}{A list or an environment contains the variables same as described in \code{\link{sim.HCAR}}.} % FIXME
}
\value{
The summary function returns a list containing the following objects:
\describe{
\item{MC.mle,}{ the final MC-MLE}
\item{N.iter,}{ the total number of iterations}
\item{total.time,}{ the total time elapsed}
\item{convergence,}{ if TRUE the procedure converges}
\item{hessian,}{ the Hessian at the MC-MLE if given; the default is NULL}
\item{mc.covar}{ the estimated covariance matrix of the MC-MLE if
given; the default is NULL}
\item{mc.samples}{the Monte Carlo samples size used in the initial
stage and after the first convergence.}
}
The \code{ranef.HCAR} function return a dataframe object containing the estimated random effects and their corresponding standard deviations.
}
\author{
Zhe Sha \email{zhesha1006@gmail.com}
}
\seealso{
\code{\link{mcl.HCAR}}, \code{\link{sim.HCAR}}, \code{\link{OptimMCL.HCAR}}
}
\examples{
## See examples for OptimMCL
}
\keyword{HCAR}
\keyword{Monte Carlo likelihood}
\keyword{Spatial}
|
##overall goal here is simply to examine how household energy usage varies over a 2-day period
##in February, 2007. Your task is to reconstruct the following plots below, all of which were
##constructed using the base plotting system.
getwd()
setwd("~/JH Data Science/Exploratory Data Analysis/Week1 Peer Graded Assignment Course Project")
getwd()
##?read.table
household_power_consumption <- read.table("household_power_consumption.txt",
sep = ";",
header = TRUE)
household_power_consumption$Date <- as.Date(household_power_consumption$Date, format="%d/%m/%Y")
View(household_power_consumption)
## summary of the Datframe household_power_consumption
head(household_power_consumption)
tail(household_power_consumption)
nrow(household_power_consumption)
ncol(household_power_consumption)
names(household_power_consumption)
## Reading Subset of Data for two days
household_power_consumption1 <- household_power_consumption[(household_power_consumption$Date=="2007-02-01") | (household_power_consumption$Date=="2007-02-02"),]
## Summary of household_power_consumption1
head(household_power_consumption1)
tail(household_power_consumption1)
nrow(household_power_consumption1)
ncol(household_power_consumption1)
names(household_power_consumption1)
## #Convert global active power column, global reactive power colums, Sub_metering columns, and the Voltage column to numeric
household_power_consumption1$Global_active_power <- as.numeric(as.character(household_power_consumption1$Global_active_power))
household_power_consumption1$Global_reactive_power <- as.numeric(as.character(household_power_consumption1$Global_reactive_power))
household_power_consumption$Voltage <- as.numeric(as.character(household_power_consumption$Voltage))
household_power_consumption1 <- transform(household_power_consumption1, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
household_power_consumption1$Sub_metering_1 <- as.numeric(as.character(household_power_consumption1$Sub_metering_1))
household_power_consumption1$Sub_metering_2 <- as.numeric(as.character(household_power_consumption1$Sub_metering_2))
household_power_consumption1$Sub_metering_3 <- as.numeric(as.character(household_power_consumption1$Sub_metering_3))
### Code for Plot 2
plot2 <- function() {
plot(household_power_consumption1$timestamp,household_power_consumption1$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
cat("plot2.png has been saved in", getwd())
}
### Call plot2()function
plot2()
|
/plot2.R
|
no_license
|
kum8ar/Coursera_JHU_ExploratoryDS_Project1
|
R
| false | false | 2,665 |
r
|
##overall goal here is simply to examine how household energy usage varies over a 2-day period
##in February, 2007. Your task is to reconstruct the following plots below, all of which were
##constructed using the base plotting system.
getwd()
setwd("~/JH Data Science/Exploratory Data Analysis/Week1 Peer Graded Assignment Course Project")
getwd()
##?read.table
household_power_consumption <- read.table("household_power_consumption.txt",
sep = ";",
header = TRUE)
household_power_consumption$Date <- as.Date(household_power_consumption$Date, format="%d/%m/%Y")
View(household_power_consumption)
## summary of the Datframe household_power_consumption
head(household_power_consumption)
tail(household_power_consumption)
nrow(household_power_consumption)
ncol(household_power_consumption)
names(household_power_consumption)
## Reading Subset of Data for two days
household_power_consumption1 <- household_power_consumption[(household_power_consumption$Date=="2007-02-01") | (household_power_consumption$Date=="2007-02-02"),]
## Summary of household_power_consumption1
head(household_power_consumption1)
tail(household_power_consumption1)
nrow(household_power_consumption1)
ncol(household_power_consumption1)
names(household_power_consumption1)
## #Convert global active power column, global reactive power colums, Sub_metering columns, and the Voltage column to numeric
household_power_consumption1$Global_active_power <- as.numeric(as.character(household_power_consumption1$Global_active_power))
household_power_consumption1$Global_reactive_power <- as.numeric(as.character(household_power_consumption1$Global_reactive_power))
household_power_consumption$Voltage <- as.numeric(as.character(household_power_consumption$Voltage))
household_power_consumption1 <- transform(household_power_consumption1, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
household_power_consumption1$Sub_metering_1 <- as.numeric(as.character(household_power_consumption1$Sub_metering_1))
household_power_consumption1$Sub_metering_2 <- as.numeric(as.character(household_power_consumption1$Sub_metering_2))
household_power_consumption1$Sub_metering_3 <- as.numeric(as.character(household_power_consumption1$Sub_metering_3))
### Code for Plot 2
plot2 <- function() {
plot(household_power_consumption1$timestamp,household_power_consumption1$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
cat("plot2.png has been saved in", getwd())
}
### Call plot2()function
plot2()
|
# Load packages, reserve a core for OS and other programs
library(parallel)
library(doParallel)
library(tidyverse)
library(rvest)
library("jiebaR")
cl <- makeCluster(detectCores() - 1) # convention to leave 1 core for OS
registerDoParallel(cl)
#Scrap data
content<-sapply(PDWebpageList, function(url){
tryCatch(
url %>%
as.character() %>%
read_html() %>%
html_nodes(xpath='//*[contains(concat( " ", @class, " " ), concat( " ", "c_c", " " ))]') %>%
html_text(),
print(url),
error = function(e){NA} # a function that returns NA regardless of what it's passed
)
})
#Clean scrapped data#
content <- gsub(".* \t\t\t\t ", "", content)
content <- gsub("\r\n\r\n", "", content)
content <- trimws(content)
content<-filter(content, is.na(content)==FALSE)
#Graph Li Wenliang time trend data
liSearch<-read.csv("liwenliang_hits.csv")
liSearch <- filter(liSearch, is.na(search)==FALSE)
liSearch$date <- as.Date(liSearch$date, "%m/%d/%Y")
liSearch <- mutate(liSearch, logSearch = log(search))
ggplot(liSearch, aes(date, logSearch))+geom_line()+labs(x="Date since December 1, 2019", y="log(Number of Baidu Searches for Li Wenliang)")
|
/PDailyScratch.R
|
no_license
|
juan-qian/814-Political_Identity
|
R
| false | false | 1,175 |
r
|
# Load packages, reserve a core for OS and other programs
library(parallel)
library(doParallel)
library(tidyverse)
library(rvest)
library("jiebaR")
cl <- makeCluster(detectCores() - 1) # convention to leave 1 core for OS
registerDoParallel(cl)
#Scrap data
content<-sapply(PDWebpageList, function(url){
tryCatch(
url %>%
as.character() %>%
read_html() %>%
html_nodes(xpath='//*[contains(concat( " ", @class, " " ), concat( " ", "c_c", " " ))]') %>%
html_text(),
print(url),
error = function(e){NA} # a function that returns NA regardless of what it's passed
)
})
#Clean scrapped data#
content <- gsub(".* \t\t\t\t ", "", content)
content <- gsub("\r\n\r\n", "", content)
content <- trimws(content)
content<-filter(content, is.na(content)==FALSE)
#Graph Li Wenliang time trend data
liSearch<-read.csv("liwenliang_hits.csv")
liSearch <- filter(liSearch, is.na(search)==FALSE)
liSearch$date <- as.Date(liSearch$date, "%m/%d/%Y")
liSearch <- mutate(liSearch, logSearch = log(search))
ggplot(liSearch, aes(date, logSearch))+geom_line()+labs(x="Date since December 1, 2019", y="log(Number of Baidu Searches for Li Wenliang)")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Pwr.r
\name{Pwr}
\alias{Pwr}
\title{Power for single-stage parallel-group RCT.}
\usage{
Pwr(pc, pt, Nc, alp = 0.05)
}
\arguments{
\item{pc}{Response probability in control group.}
\item{pt}{Response probability in treatment group.}
\item{Nc}{Sample size per group.}
\item{alp}{Significance level (default: 0.05).}
}
\value{
Sample size for one group.
}
\description{
\code{Pwr} calculates the power in an equal-size group
two-arm randomized clinical trial with a binary response.
}
\examples{
Pwr(0.2,0.3,389,0.05)
}
\references{
Ahn, C., Heo, M. and Zhang, S. \emph{Sample Size Calculations for Clustered and Longitudinal Outcomes in Clinical Research}.
CRC Press, 2014.
}
\seealso{
\code{\link{Nct}}.
}
\author{
Arsenio Nhacolo
}
|
/man/Pwr.Rd
|
no_license
|
arsenionhacolo/InferenceBEAGSD
|
R
| false | true | 813 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Pwr.r
\name{Pwr}
\alias{Pwr}
\title{Power for single-stage parallel-group RCT.}
\usage{
Pwr(pc, pt, Nc, alp = 0.05)
}
\arguments{
\item{pc}{Response probability in control group.}
\item{pt}{Response probability in treatment group.}
\item{Nc}{Sample size per group.}
\item{alp}{Significance level (default: 0.05).}
}
\value{
Sample size for one group.
}
\description{
\code{Pwr} calculates the power in an equal-size group
two-arm randomized clinical trial with a binary response.
}
\examples{
Pwr(0.2,0.3,389,0.05)
}
\references{
Ahn, C., Heo, M. and Zhang, S. \emph{Sample Size Calculations for Clustered and Longitudinal Outcomes in Clinical Research}.
CRC Press, 2014.
}
\seealso{
\code{\link{Nct}}.
}
\author{
Arsenio Nhacolo
}
|
# Assignment 6 Visualizing Data
# Accessing the Data API
library(wbstats)
library(ggplot2)
library(tidyr)
library(dplyr)
library(maps)
library(RColorBrewer)
options(scipen = 999)
# 1. Loading and Understanding the Data
#View(wbindicators())
top_10_co2_countries <- wb(country = "countries_only", indicator = c("EN.ATM.CO2E.KT"), mrv = 1) %>% # Access World Bank data
arrange(-value) %>%
head(10)
# 2. CO2 Emissions by Country
top_10_c02_plot <- ggplot(top_10_co2_countries) +
geom_col(mapping = aes(x = reorder(iso3c, value), y = value)) +
labs(title = "Top 10 Countries by CO2 Emissions", x = "Country (iso3)", y = top_10_co2_countries[["indicator"]])
############################################################################################################################
# 3. US Income Equality over Time
us_income_years <- wb(country = "USA", # Access World Bank data
indicator = c("SI.DST.FRST.20","SI.DST.04TH.20","SI.DST.10TH.10"),
mrv = 20, return_wide = TRUE) %>%
mutate(date = as.numeric(date), # Change dates into numeric value
"Top 10% of Pop." = SI.DST.10TH.10,
"Bottom 40% of Pop." = SI.DST.04TH.20 + SI.DST.FRST.20 ) %>% # Make column names readable
gather(key = category , value = "percent", c("Top 10% of Pop.", "Bottom 40% of Pop."))
us_wealth_plot <- ggplot(us_income_years, mapping = aes(x = date, y = percent, color = category)) +
geom_point() +
geom_line() +
labs(
title = "US Wealth Equality over Time", # Adding labels to the visualizations
x = "Year", # and changning legend title to group
y = "Percentage of income held",
color = "Group"
)
###################################################################################################################
# 4. Health Expenditures by Country
high_income_countries <- wbcountries() %>% # Accessing world bank to filter for
filter(income == "High income") %>% # high income countries
pull(iso3c)
updated_cache <- wbcache()
health_costs <- wb(country = high_income_countries, # Created data frame
indicator = c("SH.XPD.OOPC.PC.CD","SH.XPD.PVTD.PC.CD","SH.XPD.GHED.PC.CD","SH.XPD.CHEX.PC.CD"), # using the high income
mrv = 1, cache = updated_cache) %>% # data frame and updated cache
arrange(value)
health_costs[health_costs$indicatorID == "SH.XPD.OOPC.PC.CD", "indicatorID"] <- "Out of Pocket Costs" # Changing the indicator
health_costs[health_costs$indicatorID == "SH.XPD.CHEX.PC.CD", "indicatorID"] <- "Total Spending" # idea values in the rows
health_costs[health_costs$indicatorID == "SH.XPD.GHED.PC.CD", "indicatorID"] <- "Government Spending" # into readable values
health_costs[health_costs$indicatorID == "SH.XPD.PVTD.PC.CD", "indicatorID"] <- "Private Spending"
total_health_costs <- health_costs[health_costs$indicatorID == "Total Spending", ] # Data frame for only total spending
healthcare_costs_plots <- ggplot(health_costs, mapping = aes(x = reorder(iso3c, value), y = value , color = indicatorID)) +
geom_linerange(total_health_costs, mapping = aes(ymax = value, ymin = 0)) +
geom_point(mapping = aes(shape = indicatorID)) +
labs(
title = "Health Care Expenditures (per capita)",
x = "Country",
y = "Current US$",
color = "Type",
shape = "Type"
) +
theme(axis.text = element_text(size = 4, angle = 90), legend.position = c(.3, .75))
###################################################################################################################
# 5. Map: Changes in Forestation around the World
forest_area <- wb(country = "countries_only", indicator = c("AG.LND.FRST.ZS"), mrv = 20) %>%
spread(key = date, value = value)
forest_data <- forest_area %>%
mutate(forest_change = forest_area[["2016"]] - forest_area[["1997"]]) # Creating column with 2016 values minus
# the 1997 values
#maps_data("world)
maps_df <- map_data("world") %>% # Creating data frame for map_data using iso3c value to match with the
mutate(iso3c = iso.alpha(region, n = 3)) # country name
forest_and_map <- left_join(maps_df, forest_data, by = "iso3c") # Left joining maps data frame with my forest data
world_forest_plot <- ggplot(forest_and_map) +
geom_polygon(mapping = aes(x = long, y = lat, group = group, fill = forest_change)) +
scale_fill_distiller(palette = "RdYlGn", direction = -1) +
coord_quickmap() +
theme_void() +
labs(
title = "Change in Forested Area 1997-2016"
)
###################################################################################################################
# 6. Access to Electricity
access_to_electricity <- wb(country = "countries_only", indicator = c("EG.ELC.ACCS.RU.ZS"), mrv = 20) %>%
filter(value <= 75) %>% # Accessing database and filtering for the countries
arrange(-value) %>% # who have 75 percent or less access to electricty
spread(key = date, value = value) # then arranging by value
access_to_electricity_fifteen_years <- access_to_electricity %>%
mutate(change_in_access = access_to_electricity[["2015"]] - access_to_electricity[["2000"]]) # Creating column with 2015 values minus
# the 2000 values
maps_electricity_df <- map_data("world") %>%
mutate(iso3c = iso.alpha(region, n = 3))
access_and_map <- left_join(maps_electricity_df, access_to_electricity_fifteen_years, by = "iso3c")
global_access_to_electricity_plot <- ggplot(access_and_map) + # Creating map plot for the change in
geom_polygon(mapping = aes(x = long, y = lat, group = group, fill = change_in_access)) + # access to electricity from 2000-2015
scale_fill_distiller(palette = "Spectral", direction = -1) +
coord_quickmap() +
theme_void() +
labs(
title = "Growth in Access to Electricity 2000-2015"
)
|
/A6_Visulization/analysis.R
|
no_license
|
BasedAbe/R-code
|
R
| false | false | 6,778 |
r
|
# Assignment 6 Visualizing Data
# Accessing the Data API
library(wbstats)
library(ggplot2)
library(tidyr)
library(dplyr)
library(maps)
library(RColorBrewer)
options(scipen = 999)
# 1. Loading and Understanding the Data
#View(wbindicators())
top_10_co2_countries <- wb(country = "countries_only", indicator = c("EN.ATM.CO2E.KT"), mrv = 1) %>% # Access World Bank data
arrange(-value) %>%
head(10)
# 2. CO2 Emissions by Country
top_10_c02_plot <- ggplot(top_10_co2_countries) +
geom_col(mapping = aes(x = reorder(iso3c, value), y = value)) +
labs(title = "Top 10 Countries by CO2 Emissions", x = "Country (iso3)", y = top_10_co2_countries[["indicator"]])
############################################################################################################################
# 3. US Income Equality over Time
us_income_years <- wb(country = "USA", # Access World Bank data
indicator = c("SI.DST.FRST.20","SI.DST.04TH.20","SI.DST.10TH.10"),
mrv = 20, return_wide = TRUE) %>%
mutate(date = as.numeric(date), # Change dates into numeric value
"Top 10% of Pop." = SI.DST.10TH.10,
"Bottom 40% of Pop." = SI.DST.04TH.20 + SI.DST.FRST.20 ) %>% # Make column names readable
gather(key = category , value = "percent", c("Top 10% of Pop.", "Bottom 40% of Pop."))
us_wealth_plot <- ggplot(us_income_years, mapping = aes(x = date, y = percent, color = category)) +
geom_point() +
geom_line() +
labs(
title = "US Wealth Equality over Time", # Adding labels to the visualizations
x = "Year", # and changning legend title to group
y = "Percentage of income held",
color = "Group"
)
###################################################################################################################
# 4. Health Expenditures by Country
high_income_countries <- wbcountries() %>% # Accessing world bank to filter for
filter(income == "High income") %>% # high income countries
pull(iso3c)
updated_cache <- wbcache()
health_costs <- wb(country = high_income_countries, # Created data frame
indicator = c("SH.XPD.OOPC.PC.CD","SH.XPD.PVTD.PC.CD","SH.XPD.GHED.PC.CD","SH.XPD.CHEX.PC.CD"), # using the high income
mrv = 1, cache = updated_cache) %>% # data frame and updated cache
arrange(value)
health_costs[health_costs$indicatorID == "SH.XPD.OOPC.PC.CD", "indicatorID"] <- "Out of Pocket Costs" # Changing the indicator
health_costs[health_costs$indicatorID == "SH.XPD.CHEX.PC.CD", "indicatorID"] <- "Total Spending" # idea values in the rows
health_costs[health_costs$indicatorID == "SH.XPD.GHED.PC.CD", "indicatorID"] <- "Government Spending" # into readable values
health_costs[health_costs$indicatorID == "SH.XPD.PVTD.PC.CD", "indicatorID"] <- "Private Spending"
total_health_costs <- health_costs[health_costs$indicatorID == "Total Spending", ] # Data frame for only total spending
healthcare_costs_plots <- ggplot(health_costs, mapping = aes(x = reorder(iso3c, value), y = value , color = indicatorID)) +
geom_linerange(total_health_costs, mapping = aes(ymax = value, ymin = 0)) +
geom_point(mapping = aes(shape = indicatorID)) +
labs(
title = "Health Care Expenditures (per capita)",
x = "Country",
y = "Current US$",
color = "Type",
shape = "Type"
) +
theme(axis.text = element_text(size = 4, angle = 90), legend.position = c(.3, .75))
###################################################################################################################
# 5. Map: Changes in Forestation around the World
forest_area <- wb(country = "countries_only", indicator = c("AG.LND.FRST.ZS"), mrv = 20) %>%
spread(key = date, value = value)
forest_data <- forest_area %>%
mutate(forest_change = forest_area[["2016"]] - forest_area[["1997"]]) # Creating column with 2016 values minus
# the 1997 values
#maps_data("world)
maps_df <- map_data("world") %>% # Creating data frame for map_data using iso3c value to match with the
mutate(iso3c = iso.alpha(region, n = 3)) # country name
forest_and_map <- left_join(maps_df, forest_data, by = "iso3c") # Left joining maps data frame with my forest data
world_forest_plot <- ggplot(forest_and_map) +
geom_polygon(mapping = aes(x = long, y = lat, group = group, fill = forest_change)) +
scale_fill_distiller(palette = "RdYlGn", direction = -1) +
coord_quickmap() +
theme_void() +
labs(
title = "Change in Forested Area 1997-2016"
)
###################################################################################################################
# 6. Access to Electricity
access_to_electricity <- wb(country = "countries_only", indicator = c("EG.ELC.ACCS.RU.ZS"), mrv = 20) %>%
filter(value <= 75) %>% # Accessing database and filtering for the countries
arrange(-value) %>% # who have 75 percent or less access to electricty
spread(key = date, value = value) # then arranging by value
access_to_electricity_fifteen_years <- access_to_electricity %>%
mutate(change_in_access = access_to_electricity[["2015"]] - access_to_electricity[["2000"]]) # Creating column with 2015 values minus
# the 2000 values
maps_electricity_df <- map_data("world") %>%
mutate(iso3c = iso.alpha(region, n = 3))
access_and_map <- left_join(maps_electricity_df, access_to_electricity_fifteen_years, by = "iso3c")
global_access_to_electricity_plot <- ggplot(access_and_map) + # Creating map plot for the change in
geom_polygon(mapping = aes(x = long, y = lat, group = group, fill = change_in_access)) + # access to electricity from 2000-2015
scale_fill_distiller(palette = "Spectral", direction = -1) +
coord_quickmap() +
theme_void() +
labs(
title = "Growth in Access to Electricity 2000-2015"
)
|
#` @import partykit
.list.rules.party <- function(x, i = NULL, ...) {
if (is.null(i)) i <- partykit::nodeids(x, terminal = TRUE)
if (length(i) > 1) {
ret <- sapply(i, .list.rules.party, x = x)
names(ret) <- if (is.character(i)) i else names(x)[i]
return(ret)
}
if (is.character(i) && !is.null(names(x)))
i <- which(names(x) %in% i)
stopifnot(length(i) == 1 & is.numeric(i))
stopifnot(i <= length(x) & i >= 1)
i <- as.integer(i)
dat <- partykit::data_party(x, i)
if (!is.null(x$fitted)) {
findx <- which("(fitted)" == names(dat))[1]
fit <- dat[,findx:ncol(dat), drop = FALSE]
dat <- dat[,-(findx:ncol(dat)), drop = FALSE]
if (ncol(dat) == 0)
dat <- x$data
} else {
fit <- NULL
dat <- x$data
}
rule <- c()
recFun <- function(node) {
if (partykit::id_node(node) == i) return(NULL)
kid <- sapply(kids_node(node), partykit::id_node)
whichkid <- max(which(kid <= i))
split <- split_node(node)
ivar <- varid_split(split)
svar <- names(dat)[ivar]
index <- index_split(split)
if (is.factor(dat[, svar])) {
if ((is.null(index)) | (!is.null(breaks_split(split))))
index <- ((1:nlevels(dat[, svar])) > breaks_split(split)) + 1
slevels <- levels(dat[, svar])[index == whichkid]
srule <- paste(svar, " %in% c(\"",
paste(slevels, collapse = "\", \"", sep = ""), "\")",
sep = "")
} else {
if (is.null(index)) index <- 1:length(kid)
breaks <- cbind(c(-Inf, breaks_split(split)),
c(breaks_split(split), Inf))
sbreak <- breaks[index == whichkid,]
right <- right_split(split)
srule <- c()
if (is.finite(sbreak[1]))
srule <- c(srule,
paste(svar, ifelse(right, ">", ">="), sbreak[1]))
if (is.finite(sbreak[2]))
srule <- c(srule,
paste(svar, ifelse(right, "<=", "<"), sbreak[2]))
srule <- paste(srule, collapse = " & ")
}
rule <<- c(rule, srule)
return(recFun(node[[whichkid]]))
}
node <- recFun(partykit::node_party(x))
paste(rule, collapse = " & ")
}
|
/R/partykitModFuns.R
|
no_license
|
wpihongzhang/fiveSTAR
|
R
| false | false | 2,150 |
r
|
#` @import partykit
.list.rules.party <- function(x, i = NULL, ...) {
if (is.null(i)) i <- partykit::nodeids(x, terminal = TRUE)
if (length(i) > 1) {
ret <- sapply(i, .list.rules.party, x = x)
names(ret) <- if (is.character(i)) i else names(x)[i]
return(ret)
}
if (is.character(i) && !is.null(names(x)))
i <- which(names(x) %in% i)
stopifnot(length(i) == 1 & is.numeric(i))
stopifnot(i <= length(x) & i >= 1)
i <- as.integer(i)
dat <- partykit::data_party(x, i)
if (!is.null(x$fitted)) {
findx <- which("(fitted)" == names(dat))[1]
fit <- dat[,findx:ncol(dat), drop = FALSE]
dat <- dat[,-(findx:ncol(dat)), drop = FALSE]
if (ncol(dat) == 0)
dat <- x$data
} else {
fit <- NULL
dat <- x$data
}
rule <- c()
recFun <- function(node) {
if (partykit::id_node(node) == i) return(NULL)
kid <- sapply(kids_node(node), partykit::id_node)
whichkid <- max(which(kid <= i))
split <- split_node(node)
ivar <- varid_split(split)
svar <- names(dat)[ivar]
index <- index_split(split)
if (is.factor(dat[, svar])) {
if ((is.null(index)) | (!is.null(breaks_split(split))))
index <- ((1:nlevels(dat[, svar])) > breaks_split(split)) + 1
slevels <- levels(dat[, svar])[index == whichkid]
srule <- paste(svar, " %in% c(\"",
paste(slevels, collapse = "\", \"", sep = ""), "\")",
sep = "")
} else {
if (is.null(index)) index <- 1:length(kid)
breaks <- cbind(c(-Inf, breaks_split(split)),
c(breaks_split(split), Inf))
sbreak <- breaks[index == whichkid,]
right <- right_split(split)
srule <- c()
if (is.finite(sbreak[1]))
srule <- c(srule,
paste(svar, ifelse(right, ">", ">="), sbreak[1]))
if (is.finite(sbreak[2]))
srule <- c(srule,
paste(svar, ifelse(right, "<=", "<"), sbreak[2]))
srule <- paste(srule, collapse = " & ")
}
rule <<- c(rule, srule)
return(recFun(node[[whichkid]]))
}
node <- recFun(partykit::node_party(x))
paste(rule, collapse = " & ")
}
|
#' Get values from intersecting objects
#'
#' Extract values from intersecting sf objects
#'
#' @param x an sf object
#' @param y an sf object
#' @param x_id a column in x that uniquely identfies features
#' @param y_id a column in y that uniquely identfies features
#' @import sf
#' @import purrr
#' @import dplyr
#' @importFrom magrittr %>%
#' @return a tibble with nrows equal to nrows(x) containing the intersecting values
#' @export xy_intersection
xy_intersection <-
function(x, y, x_id, y_id) {
# Get the intersects
intersects <-
sf::st_intersects(x, y)
# Create a logical vector where the intersect is empty
# This ensures that the operation that retreives the values doesn't encounter empty indexes
empty_x <- purrr::map(intersects,
~ length(.x) == 0) %>% unlist()
# Simplify x and y to tables
x_table <- sf::st_drop_geometry(x)
y_table <- sf::st_drop_geometry(y)
# Map over the intersect and x to return all pairs of ids that intersect
x_y <-
purrr::map2(.x = intersects[!empty_x],
.y = x_table[!empty_x, x_id],
~ tibble(y = y_table[.x, y_id], x = .y)) %>%
dplyr::bind_rows()
# Rename the columns to match the input variable
colnames(x_y) <- c(y_id, x_id)
# Perform a final step to ensure that all values of x_id are present in the output
dplyr::left_join(x_table[x_id], x_y, by = x_id)
}
|
/R/xy_intersection.R
|
permissive
|
MatthewJWhittle/spatialutils
|
R
| false | false | 1,423 |
r
|
#' Get values from intersecting objects
#'
#' Extract values from intersecting sf objects
#'
#' @param x an sf object
#' @param y an sf object
#' @param x_id a column in x that uniquely identfies features
#' @param y_id a column in y that uniquely identfies features
#' @import sf
#' @import purrr
#' @import dplyr
#' @importFrom magrittr %>%
#' @return a tibble with nrows equal to nrows(x) containing the intersecting values
#' @export xy_intersection
xy_intersection <-
function(x, y, x_id, y_id) {
# Get the intersects
intersects <-
sf::st_intersects(x, y)
# Create a logical vector where the intersect is empty
# This ensures that the operation that retreives the values doesn't encounter empty indexes
empty_x <- purrr::map(intersects,
~ length(.x) == 0) %>% unlist()
# Simplify x and y to tables
x_table <- sf::st_drop_geometry(x)
y_table <- sf::st_drop_geometry(y)
# Map over the intersect and x to return all pairs of ids that intersect
x_y <-
purrr::map2(.x = intersects[!empty_x],
.y = x_table[!empty_x, x_id],
~ tibble(y = y_table[.x, y_id], x = .y)) %>%
dplyr::bind_rows()
# Rename the columns to match the input variable
colnames(x_y) <- c(y_id, x_id)
# Perform a final step to ensure that all values of x_id are present in the output
dplyr::left_join(x_table[x_id], x_y, by = x_id)
}
|
###batting log wrangled in "substring.r"
###data was slightly edited from original. redo in R.
library(rethinking)
library(tidyverse)
###
###
set.seed(1234)
options(mc.cores = parallel::detectCores())
###
m3 <- map2stan(
alist(
rdiff ~ dnorm( mu, sigma),
#likelihood function
mu <- A + B + C,
#varying intercepts
A <- h[home] + a[away],
#batting predictors
B <- hits_h[home] * Hh + double_h[home] * B2Bh + triple_h[home] * B3Bh + HR_h[home] * BHRh + balls_h[home] * BBBh +
hits_a[away] * Ha + double_a[away] * B2Ba + triple_a[away] * B3Ba + HR_a[away] * BHRa + balls_a[away] * BBBa,
#pitching predictors
C <- hits_allowed_h[home] * HHAh + pballs_h[home] * PBBh + pstrikeouts_h[home] * PSOh + strikes_h[home] * Strh +
hits_allowed_a[away] * HHAa + pballs_a[away] * PBBa + pstrikeouts_a[away] * PSOa + strikes_a[away] * Stra,
###adaptive priors
c(h, hits_h, double_h, triple_h, HR_h, balls_h, hits_allowed_h, pballs_h, pstrikeouts_h, strikes_h)[home] ~ dmvnormNC(sigma_home,Rho_home),
c(a, hits_a, double_a, triple_a, HR_a, balls_a, hits_allowed_a, pballs_a, pstrikeouts_a, strikes_a)[away] ~ dmvnormNC(sigma_away,Rho_away),
###priors
sigma_home ~ dcauchy(0,2),
sigma_away ~ dcauchy(0,2),
Rho_home ~ dlkjcorr(4),
Rho_away ~ dlkjcorr(4),
sigma ~ dcauchy(0,2)),
data=newdata2,
iter=5000 , warmup=2000 , chains=1, cores=4, control = list(adapt_delta = 0.99, max_treedepth = 15))
|
/Bayesian Baseball 2018/scripts/Modeling/Two Factor Models/battingpitching.r
|
no_license
|
blakeshurtz/Bayesian-Baseball
|
R
| false | false | 1,471 |
r
|
###batting log wrangled in "substring.r"
###data was slightly edited from original. redo in R.
library(rethinking)
library(tidyverse)
###
###
set.seed(1234)
options(mc.cores = parallel::detectCores())
###
m3 <- map2stan(
alist(
rdiff ~ dnorm( mu, sigma),
#likelihood function
mu <- A + B + C,
#varying intercepts
A <- h[home] + a[away],
#batting predictors
B <- hits_h[home] * Hh + double_h[home] * B2Bh + triple_h[home] * B3Bh + HR_h[home] * BHRh + balls_h[home] * BBBh +
hits_a[away] * Ha + double_a[away] * B2Ba + triple_a[away] * B3Ba + HR_a[away] * BHRa + balls_a[away] * BBBa,
#pitching predictors
C <- hits_allowed_h[home] * HHAh + pballs_h[home] * PBBh + pstrikeouts_h[home] * PSOh + strikes_h[home] * Strh +
hits_allowed_a[away] * HHAa + pballs_a[away] * PBBa + pstrikeouts_a[away] * PSOa + strikes_a[away] * Stra,
###adaptive priors
c(h, hits_h, double_h, triple_h, HR_h, balls_h, hits_allowed_h, pballs_h, pstrikeouts_h, strikes_h)[home] ~ dmvnormNC(sigma_home,Rho_home),
c(a, hits_a, double_a, triple_a, HR_a, balls_a, hits_allowed_a, pballs_a, pstrikeouts_a, strikes_a)[away] ~ dmvnormNC(sigma_away,Rho_away),
###priors
sigma_home ~ dcauchy(0,2),
sigma_away ~ dcauchy(0,2),
Rho_home ~ dlkjcorr(4),
Rho_away ~ dlkjcorr(4),
sigma ~ dcauchy(0,2)),
data=newdata2,
iter=5000 , warmup=2000 , chains=1, cores=4, control = list(adapt_delta = 0.99, max_treedepth = 15))
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 299727231L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860123-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 728 |
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 299727231L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/write.R
\name{write.cas}
\alias{write.cas}
\title{Write a SaTScan cas (case) file}
\usage{
write.cas(x, location, filename, userownames = FALSE)
}
\arguments{
\item{x}{Your data frame.}
\item{location}{Directory location where the file should be written}
\item{filename}{Name for the output file in the OS; .cas will be added.}
\item{userownames}{If TRUE, will write the row names into the file.}
}
\description{
Write a SaTScan cas (case) file
}
\details{
Writes the input data frame to the OS, using the .cas extension. Contents of the data
frame should be only what you want SaTScan to see.
This is a simple function that calls write.table, since SaTScan just needs ASCII files.
}
|
/man/write.cas.Rd
|
no_license
|
skhan8/rsatscan
|
R
| false | false | 775 |
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/write.R
\name{write.cas}
\alias{write.cas}
\title{Write a SaTScan cas (case) file}
\usage{
write.cas(x, location, filename, userownames = FALSE)
}
\arguments{
\item{x}{Your data frame.}
\item{location}{Directory location where the file should be written}
\item{filename}{Name for the output file in the OS; .cas will be added.}
\item{userownames}{If TRUE, will write the row names into the file.}
}
\description{
Write a SaTScan cas (case) file
}
\details{
Writes the input data frame to the OS, using the .cas extension. Contents of the data
frame should be only what you want SaTScan to see.
This is a simple function that calls write.table, since SaTScan just needs ASCII files.
}
|
#Create sub-metering plot
library(lubridate)
library(grDevices)
elecpwconsump <- read.table("./household_power_consumption.txt",header = TRUE,sep = ";")
elecpwconsump$Date <- as.Date(as.character(elecpwconsump$Date),"%d/%m/%Y")
filteredelecdata <- subset(elecpwconsump,
as.POSIXlt(elecpwconsump$Date)$year == 107
& months(elecpwconsump$Date) == "February"
& as.POSIXlt(elecpwconsump$Date)$mday %in% c("1","2"))
filteredelecdata$Time <- ymd_hms(paste(filteredelecdata$Date,filteredelecdata$Time))
#Initialize the PNG graphic file device
png("plot3.png",width = 480, height = 480)
with(filteredelecdata,
plot(Time,
as.numeric(as.character(Sub_metering_1)),
ylab = "Energy sub metering",
type = "l"))
lines(filteredelecdata$Time,
as.numeric(as.character(filteredelecdata$Sub_metering_2)),
col="red")
lines(filteredelecdata$Time,
as.numeric(as.character(filteredelecdata$Sub_metering_3)),
col="blue")
legend("topright",
lty = c(1,1,1),
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lwd = 1,cex = 0.7)
dev.off()
|
/plot3.R
|
no_license
|
viseshraj/ExData_Plotting1
|
R
| false | false | 1,228 |
r
|
#Create sub-metering plot
library(lubridate)
library(grDevices)
elecpwconsump <- read.table("./household_power_consumption.txt",header = TRUE,sep = ";")
elecpwconsump$Date <- as.Date(as.character(elecpwconsump$Date),"%d/%m/%Y")
filteredelecdata <- subset(elecpwconsump,
as.POSIXlt(elecpwconsump$Date)$year == 107
& months(elecpwconsump$Date) == "February"
& as.POSIXlt(elecpwconsump$Date)$mday %in% c("1","2"))
filteredelecdata$Time <- ymd_hms(paste(filteredelecdata$Date,filteredelecdata$Time))
#Initialize the PNG graphic file device
png("plot3.png",width = 480, height = 480)
with(filteredelecdata,
plot(Time,
as.numeric(as.character(Sub_metering_1)),
ylab = "Energy sub metering",
type = "l"))
lines(filteredelecdata$Time,
as.numeric(as.character(filteredelecdata$Sub_metering_2)),
col="red")
lines(filteredelecdata$Time,
as.numeric(as.character(filteredelecdata$Sub_metering_3)),
col="blue")
legend("topright",
lty = c(1,1,1),
col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lwd = 1,cex = 0.7)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_WYD.R
\name{add_WYD}
\alias{add_WYD}
\title{Add Day of Year, Day of Water Year.}
\usage{
add_WYD(df, datecolumn)
}
\arguments{
\item{df}{a dataframe}
\item{datecolumn}{quoted, requires a POSIXct formatted date or datetime}
}
\description{
This function takes a POSIXct formatted column name and
adds new columns to a dataframe for Day of Year (Julian),
Day of Water Year (starting Oct 1 and ending Sep 30),
and Water Year (from Oct 1 to Sep 30).
}
\details{
This function should be used as follows:
Reassign or create the dataframe you would like to add new Water Year columns to. This function will generate three new columns, \bold{DOY} (Day of Year), \bold{WY} (Water Year, starting Oct 1), and \bold{DOWY} (Day of Water Year, starting Oct 1). It relies on \code{dowy} and \code{wt_yr} functions, as well as the \code{lubridate} package.
}
\examples{
# get data
airq <- datasets::airquality
# add a year (this data from 1973)
airq$year <- 1973
# make a date col
airq$date <- with(airq, paste0(year, "-", Month, "-", Day))
head(airq)
# now format into POSIX with lubridate
require(lubridate)
airq$date <- lubridate::ymd(airq$date)
# now run function:
airq <- add_WYD(airq, "date")
}
|
/man/add_WYD.Rd
|
permissive
|
ryanpeek/wateRshedTools
|
R
| false | true | 1,271 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_WYD.R
\name{add_WYD}
\alias{add_WYD}
\title{Add Day of Year, Day of Water Year.}
\usage{
add_WYD(df, datecolumn)
}
\arguments{
\item{df}{a dataframe}
\item{datecolumn}{quoted, requires a POSIXct formatted date or datetime}
}
\description{
This function takes a POSIXct formatted column name and
adds new columns to a dataframe for Day of Year (Julian),
Day of Water Year (starting Oct 1 and ending Sep 30),
and Water Year (from Oct 1 to Sep 30).
}
\details{
This function should be used as follows:
Reassign or create the dataframe you would like to add new Water Year columns to. This function will generate three new columns, \bold{DOY} (Day of Year), \bold{WY} (Water Year, starting Oct 1), and \bold{DOWY} (Day of Water Year, starting Oct 1). It relies on \code{dowy} and \code{wt_yr} functions, as well as the \code{lubridate} package.
}
\examples{
# get data
airq <- datasets::airquality
# add a year (this data from 1973)
airq$year <- 1973
# make a date col
airq$date <- with(airq, paste0(year, "-", Month, "-", Day))
head(airq)
# now format into POSIX with lubridate
require(lubridate)
airq$date <- lubridate::ymd(airq$date)
# now run function:
airq <- add_WYD(airq, "date")
}
|
# Functions that use the 'magick' package (ImageMagick) to combine separate brainview images into one image.
# Currently 'magick' is an optional dependency only.
#' @title Combine several brainview images into a new figure.
#'
#' @description Create a new image from several image tiles, the exact layout depends on the number of given images.
#'
#' @param brainview_images vector of character strings, paths to the brainview images, usually in PNG format
#'
#' @param output_img path to output image that including the file extension
#'
#' @param colorbar_img path to the main image containing the separate colorbar, usually an image in PNG format
#'
#' @param silent logical, whether to suppress messages
#'
#' @param grid_like logical, whether to arrange the images in a grid-like fashion. If FALSE, they will all be merged horizontally.
#'
#' @param border_geometry string, a geometry string passed to \code{magick::image_border} to define the borders to add to each image tile. The default value adds 5 pixels, both horizontally and vertically.
#'
#' @param background_color hex color string, such as "#DDDDDD" or "#FFFFFF". The color to use when extending images (e.g., when creating the border). WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @param map_bg_to_transparency logical, whether to map the background_color to transparency for the final PNG export.
#'
#' @return named list with entries: 'brainview_images': vector of character strings, the paths to the input images. 'output_img_path': character string, path to the output image. 'merged_img': the magick image instance.
#'
#' @export
arrange.brainview.images <- function(brainview_images, output_img, colorbar_img=NULL, silent=TRUE, grid_like=TRUE, border_geometry="5x5", background_color = "white", map_bg_to_transparency = FALSE) {
if (requireNamespace("magick", quietly = TRUE)) {
# load image files
images = magick::image_read(brainview_images);
# trim images
images = magick::image_trim(images);
# Add tiny border back (to prevent them from touching each other)
images = magick::image_border(images, background_color, border_geometry);
num_img = length(images);
if(grid_like) {
if(num_img == 3 || num_img == 4) {
top_row = wrapped.image.append(images[1:2], background_color = background_color);
bottom_row = wrapped.image.append(images[3:num_img], background_color = background_color);
merged_img = wrapped.image.append(c(top_row, bottom_row), stack = TRUE, background_color = background_color);
} else if(num_img == 8 || num_img == 9) {
top_row = wrapped.image.append(images[1:3], background_color = background_color);
mid_row = wrapped.image.append(images[4:6], background_color = background_color);
top_and_mid = wrapped.image.append(c(top_row, mid_row), stack = TRUE, background_color = background_color);
if(num_img == 9) {
bottom_row = wrapped.image.append(images[7:num_img], background_color = background_color);
} else {
# For 8 images, it gets a bit trickier. It looks very bad if the two
# images are on the left and the right is empty, so we add an empty
# image of appropriate size between them.
width_top_and_mid = magick::image_info(top_and_mid)$width;
bottom_row_so_far = wrapped.image.append(images[7:num_img], background_color = background_color);
width_bottom_so_far = magick::image_info(images[7])$width + magick::image_info(images[8])$width;
width_missing = width_top_and_mid - width_bottom_so_far;
if(width_missing > 0L) {
mid = magick::image_blank(width_missing, magick::image_info(bottom_row_so_far)$height, background_color);
bottom_row = wrapped.image.append(c(images[7], mid, images[8]), background_color = background_color);
} else {
bottom_row = bottom_row_so_far;
}
}
merged_img = wrapped.image.append(c(top_and_mid, bottom_row), stack = TRUE, background_color = background_color);
} else {
if(num_img <= 10L) {
merged_img = wrapped.image.append(images, background_color = background_color);
} else {
# For more than 10 images, plot 10 per row.
num_per_row = 10L;
num_rows = as.integer(ceiling(num_img / num_per_row));
num_in_last_row = (num_rows * num_per_row) %% num_img;
start_img_idx = 1L;
for(row_idx in seq.int(num_rows)) {
img_row = wrapped.image.append(images[start_img_idx:min((start_img_idx+num_per_row-1L),num_img)], background_color = background_color);
if(row_idx == 1L) {
merged_img = img_row;
} else {
width_so_far = magick::image_info(merged_img)$width;
width_img_row = magick::image_info(img_row)$width;
width_missing = width_so_far - width_img_row;
if(width_missing > 0L) {
blank_right_img = magick::image_blank(width_missing, magick::image_info(img_row)$height, background_color);
img_row = wrapped.image.append(c(img_row, blank_right_img), background_color = background_color);
}
merged_img = wrapped.image.append(c(merged_img, img_row), stack = TRUE, background_color = background_color);
}
start_img_idx = start_img_idx + num_per_row;
}
}
}
} else {
merged_img = wrapped.image.append(images, background_color = background_color);
}
if(map_bg_to_transparency) {
merged_img = image.remap.color(merged_img, source_color=background_color, source_point = NULL);
}
magick::image_write(merged_img, path = output_img);
if(! silent) {
message(sprintf("Merged image written to '%s' (current working dir is '%s').\n", output_img, getwd()));
}
return(invisible(list('merged_img'=merged_img, 'brainview_images'=brainview_images, 'output_img_path'=output_img)));
} else {
warning("The 'magick' package must be installed to use this functionality. Merged image NOT written.");
return(invisible(NULL));
}
}
#' @title Remap a color in an image, typically used to set the background color to transparent.
#'
#' @description Offers 2 algorithm: remap color by flood-filling from a given pixel, or remap a hardcoded color throughout the entire image. Provide one of 'source_color' or 'source_point' by setting the other to NULL. If both are given, source_color takes precedence and source_point is silently ignored.
#'
#' @param source_color the source color that should be replaced in the whole image. Set to NULL to disable.
#'
#' @param source_point the source pixel in which to start the flood filling. Set to NULL to disable.
#'
#' @param target_color an image magick color string, use 'none' for transparency. Only used with flood fill.
#'
#' @keywords internal
image.remap.color <- function(source_img, source_color=NULL, source_point="+1+1", target_color="none") {
if(is.null(source_color)) {
if(is.null(source_point)) {
stop("One of 'source_color' or 'source_point' must be provided.");
} else {
remapped_img = magick::image_fill(source_img, target_color, point = source_point, fuzz = 0);
}
} else {
remapped_img = magick::image_transparent(source_img, source_color, fuzz = 0);
}
return(remapped_img);
}
#' @title Combine several brainview images as a grid into a new figure.
#'
#' @description Create a new image from several image tiles, the exact layout is a grid with n per row.
#'
#' @inheritParams arrange.brainview.images
#'
#' @param output_img path to output image that including the file extension
#'
#' @param num_per_row positive integer, the number of image tiles per row.
#'
#' @param captions vector of character strings or NULL, the (optional) text annotations for the images. Useful to print the subject identifier onto the individual tiles. Length must match number of image tiles in 'brainview_images'.
#'
#' @return named list with entries: 'brainview_images': vector of character strings, the paths to the input images. 'output_img_path': character string, path to the output image. 'merged_img': the magick image instance.
#'
#' @note The tiles are written row-wise, in the order in which they occur in the parameter 'brainview_images'.
#'
#' @export
arrange.brainview.images.grid <- function(brainview_images, output_img, colorbar_img=NULL, silent=TRUE, num_per_row=10L, border_geometry="5x5", background_color = "white", captions=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
# load image files
images = magick::image_read(brainview_images);
# trim images
images = magick::image_trim(images);
# Add tiny border back (to prevent them from touching each other)
images = magick::image_border(images, background_color, border_geometry);
num_img = length(images);
images = images.rescale.to.max.canvas(images, background = background_color);
# annotate if requested
images = images.annotate(images, captions, background = background_color);
num_rows = as.integer(ceiling(num_img / num_per_row));
num_in_last_row = (num_rows * num_per_row) %% num_img;
start_img_idx = 1L;
for(row_idx in seq.int(num_rows)) {
row_start_idx = start_img_idx;
row_end_idx = min((start_img_idx+num_per_row-1L),num_img);
img_row = wrapped.image.append(images[row_start_idx:row_end_idx], background_color = background_color);
if(row_idx == 1L) {
merged_img = img_row;
} else {
width_so_far = magick::image_info(merged_img)$width;
width_img_row = magick::image_info(img_row)$width;
width_missing = width_so_far - width_img_row;
if(width_missing > 0L) {
blank_right_img = magick::image_blank(width_missing, magick::image_info(img_row)$height, background_color);
img_row = wrapped.image.append(c(img_row, blank_right_img), background_color = background_color);
}
merged_img = wrapped.image.append(c(merged_img, img_row), stack = TRUE, background_color = background_color);
}
start_img_idx = start_img_idx + num_per_row;
}
magick::image_write(merged_img, path = output_img);
if(! silent) {
message(sprintf("Merged image written to '%s' (current working dir is '%s').\n", output_img, getwd()));
}
return(invisible(list('merged_img'=merged_img, 'brainview_images'=brainview_images, 'output_img_path'=output_img)));
} else {
warning("The 'magick' package must be installed to use this functionality. Merged image NOT written.");
return(invisible(NULL));
}
}
#' @title Rescale all images canvas to match the largest one.
#'
#' @param images vector of magick images
#'
#' @param background color string, like 'white' or '#00FF00'
#'
#' @return vector of magick images, the rescaled images
#'
#' @keywords internal
images.rescale.to.max.canvas <- function(images, background="white") {
if (requireNamespace("magick", quietly = TRUE)) {
num_img = length(images);
# Determine max tile heigth and width to resize canvas of all tiles, so all images have the same width and height.
tile_widths = rep(NA, num_img);
tile_heights = rep(NA, num_img);
for(img_idx in seq.int(num_img)) {
tile_widths[img_idx] = magick::image_info(images[img_idx])$width;
tile_heights[img_idx] = magick::image_info(images[img_idx])$height;
}
max_tile_width = max(tile_widths);
max_tile_height = max(tile_heights);
#cat(sprintf("The min and max tile widths are %d and %d.\n", min(tile_widths), max_tile_width));
#cat(sprintf("The min and max tile heights are %d and %d.\n", min(tile_heights), max_tile_height));
geom_string = sprintf("%dx%d", max_tile_width, max_tile_height);
#cat(sprintf("Using geom string '%s' for the %d tiles.\n", geom_string, num_img));
imgs_rescaled = images;
for(img_idx in seq.int(num_img)) {
imgs_rescaled[img_idx] = magick::image_extent(images[img_idx], geom_string, color = background);
}
return(imgs_rescaled);
} else {
warning("The 'magick' package must be installed to use this functionality.");
return(invisible(NULL));
}
}
#' @title Annotate image with text.
#'
#' @inheritParams images.rescale.to.max.canvas
#'
#' @param annotations vector of character strings, the strings to print onto the tiles
#'
#' @param do_extend logical, whether to add the space for the annotation text below the existing image tile
#'
#' @return vector of magick images, the annotated images
#'
#' @keywords internal
images.annotate <- function(images, annotations, do_extend = TRUE, background = 'white') {
if (requireNamespace("magick", quietly = TRUE)) {
num_img = length(images);
if(is.null(annotations)) {
return(images);
}
annotations = recycle(annotations, num_img);
imgs_annotated = images;
for(img_idx in seq.int(num_img)) {
font_size = 30L;
if(do_extend) {
extend_height_by = font_size * 2L;
lower_extension_img = magick::image_blank(magick::image_info(images[img_idx])$width, extend_height_by, background);
merged_img = wrapped.image.append(c(images[img_idx], lower_extension_img), stack = TRUE, background_color = background);
} else {
merged_img = images[img_idx];
}
font_color = "black";
if(background == "black" || background == "#000000") {
font_color = "white";
}
imgs_annotated[img_idx] = magick::image_annotate(merged_img, annotations[img_idx], size = font_size, gravity = "south", color = font_color);
}
return(imgs_annotated);
} else {
warning("The 'magick' package must be installed to use this functionality.");
return(invisible(NULL));
}
}
#' @title Visualize coloredmeshes from several angles and combine the images into a new figure.
#'
#' @description Create a tight layout view of coloredmeshes from several angles. Creates separate `sd_<angle>` images, then crops and finally merges them into a single output image with image magick. The `coloredmeshes` to pass to this function are usually obtained by running any `vis*` function (like \code{\link[fsbrain]{vis.subject.morph.native}}, \code{\link[fsbrain]{vis.subject.morph.standard}}, \code{\link[fsbrain]{vis.subject.label}}, \code{\link[fsbrain]{vis.subject.annot}}, and others). That means you can use this function to visualize all kinds of data, e.g., morphometry data in native and standard space, labels, and brain atlases.
#
#' @param coloredmeshes, list of coloredmesh. A coloredmesh is a named list as returned by the `coloredmesh.from*` functions (like \code{\link{coloredmesh.from.morph.native}}). It has the entries 'mesh' of type tmesh3d, a 'col', which is a color specification for such a mesh. The `vis*` functions (like \code{\link[fsbrain]{vis.subject.morph.native}}) all return a list of coloredmeshes.
#'
#' @param view_angles list of strings. See \code{\link{get.view.angle.names}} for all valid strings.
#'
#' @param rgloptions option list passed to \code{\link{par3d}}. Example: \code{rgloptions = list("windowRect"=c(50,50,1000,1000))}.
#'
#' @param rglactions named list. A list in which the names are from a set of pre-defined actions. The values can be used to specify parameters for the action.
#'
#' @param style character string, a rendering style, e.g., 'default', 'shiny' or 'semitransparent'. Alternatively, a named list of style parameters (see \code{\link{material3d}}), e.g., \code{list("shininess"=50, specular="black", alpha=0.5)}. Use the magic word 'from_mesh' to use the 'style' field of each coloredmesh instead of a single, global style. In that case, you will have to make sure your meshes have such a field, if not, the style 'default' is used as a fallback for those which don't.
#'
#' @param output_img string, path to the output file. Defaults to "fsbrain_arranged.png"
#'
#' @param silent logical, whether to suppress all messages
#'
#' @param grid_like logical, whether to arrange the images in a grid-like fashion. If FALSE, they will all be merged horizontally. Passed to \code{\link[fsbrain]{arrange.brainview.images}}.
#'
#' @param background_color hex color string (like '#FFFFFF'), the color to use for the background. Ignored if 'transparency_color' is not NULL. To get a transparent background, use 'transparency_color' instead of this parameter. WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @param transparency_color hex color string (like '#FFFFFF'), the temporary background color that will get mapped to transparency, or NULL if you do not want a transparent background. If used, it can be any color that does not occur in the foreground. Try '#FFFFFF' (white) or '#000000' (black) if in doubt. WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @return named list, see \code{\link{arrange.brainview.images}} for details
#'
#' @examples
#' \dontrun{
#' fsbrain::download_optional_data();
#' subjects_dir = fsbrain::get_optional_data_filepath("subjects_dir");
#' # Use any vis function to get coloredmeshes. You can visualize morphometry,
#' # labels, an atlas, whatever. You can suppress the view unless you need it.
#' coloredmeshes = vis.subject.morph.native(subjects_dir, "subject1", "thickness",
#' cortex_only=TRUE, rglactions=list("clip_data"=c(0.05, 0.95)),
#' views=NULL);
#' # The meshes contain the surface, data, and color information and can be
#' # visualized. You could adapt the rendering style while doing so:
#' vislayout.from.coloredmeshes(coloredmeshes, style='shiny');
#' # You could change the rendering style on a per-mesh basis.
#' coloredmeshes[[1]]$style = list("shininess"=50, alpha=0.5);
#' vislayout.from.coloredmeshes(coloredmeshes, style='from_mesh');
#' }
#'
#'
#' @family visualization functions
#' @export
vislayout.from.coloredmeshes <- function(coloredmeshes, view_angles=get.view.angle.names(angle_set = "t4"), rgloptions = rglo(), rglactions=list(), style="default", output_img="fsbrain_arranged.png", silent=FALSE, grid_like=TRUE, background_color = "white", transparency_color=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
view_images = tempfile(view_angles, fileext = ".png"); # generate one temporary file name for each image
map_bg_to_transparency = FALSE;
if(! is.null(transparency_color)) {
map_bg_to_transparency = TRUE;
if(background_color != "white") {
warning("Parameter 'transparency_color' is set, ignoring custom value for parameter 'background_color'.");
}
background_color = transparency_color;
}
# Create the temporary images at the temp paths
for(view_idx in seq_len(length(view_angles))) {
view = view_angles[[view_idx]];
view_image = view_images[[view_idx]];
internal_rglactions = list("snapshot_png"=view_image);
if(rglactions.has.key(rglactions, "snapshot_png")) {
warning("The key 'snapshot_png' in the 'rglactions' parameter is not supported for this function, it will be ignored. Use 'output_img' instead.");
rglactions$snapshot_png = NULL;
}
final_rglactions = modifyList(rglactions, internal_rglactions);
brainviews(c(view), coloredmeshes, rgloptions = rgloptions, rglactions = final_rglactions, style = style, background = background_color);
}
# Now merge them into one
return(invisible(arrange.brainview.images(view_images, output_img, silent=silent, grid_like=grid_like, background_color = background_color, map_bg_to_transparency = map_bg_to_transparency)));
} else {
warning("The 'magick' package must be installed to use this functionality. Image with manual layout NOT written.");
return(invisible(NULL));
}
}
#' @title Export high-quality brainview image with a colorbar.
#'
#' @description This function serves as an easy (but slightly inflexible) way to export a high-quality, tight-layout, colorbar figure to disk. If no colorbar is required, one can use \code{vislayout.from.coloredmeshes} instead.
#'
#' @inheritParams vislayout.from.coloredmeshes
#'
#' @param colorbar_legend character string or NULL, the title for the colorbar.
#'
#' @param img_only logical, whether to return only the resulting image
#'
#' @param horizontal logical, whether to plot the colorbar horizontally (TRUE) or vertically (FALSE). Pass 'NULL' to force no colorbar at all.
#'
#' @param silent logical, whether to suppress messages
#'
#' @param quality integer, an arbitrary quality. This is the resolution per tile before trimming, divided by 1000, in pixels. Example: 1L means 1000x1000 pixels per tile before trimming. Currently supported values: \code{1L..2L}. Note that the resolution you can get is also limited by your screen resolution.
#'
#' @param image.plot_extra_options named list, custom options for fields::image.plot. Overwrites those derived from the quality setting. If in doubt, leave this alone.
#'
#' @param large_legend logical, whether to plot extra large legend text, affects the font size of the colorbar_legend and the tick labels.
#'
#' @param style the rendering style, see \code{material3d} or use a predefined style like 'default' or 'shiny'.
#'
#' @param grid_like logical, passed to \code{vislayout.from.coloredmeshes}.
#'
#' @return magick image instance or named list, depending on the value of 'img_only'. If the latter, the list contains the fields 'rev_vl', 'rev_cb', and 'rev_ex', which are the return values of the functions \code{vislayout.from.coloredmeshes}, \code{coloredmesh.plot.colorbar.separate}, and {combine.colorbar.with.brainview.image}, respectively.
#'
#' @note Note that your screen resolution has to be high enough to generate the final image in the requested resolution, see the 'fsbrain FAQ' vignette for details and solutions if you run into trouble.
#'
#' @examples
#' \dontrun{
#' rand_data = rnorm(327684, 5, 1.5);
#' cm = vis.data.on.fsaverage(morph_data_both=rand_data,
#' rglactions=list('no_vis'=T));
#' vis.export.from.coloredmeshes(cm, colorbar_legend='Random data',
#' output_img="~/fsbrain_arranged.png");
#' }
#'
#' @export
vis.export.from.coloredmeshes <- function(coloredmeshes, colorbar_legend=NULL, img_only=TRUE, horizontal=TRUE, silent = TRUE, quality=1L, output_img="fsbrain_arranged.png", image.plot_extra_options=NULL, large_legend=TRUE, view_angles = get.view.angle.names(angle_set = "t4"), style = 'default', grid_like = TRUE, background_color = "white", transparency_color=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
quality = as.integer(quality);
if(quality < 1L | quality > 2L) {
stop("The parameter 'quality' must be an integer in range 1-2.");
}
if(is.null(image.plot_extra_options)) {
if(quality == 1L) {
if(large_legend) {
font_s = 4;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
} else {
font_s = 1.8;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 2, legend.mar = 12, legend.line=-4, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
}
} else { # quality 2
if(large_legend) {
font_s = 4;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
} else {
font_s = 2.6;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
}
}
}
rgloptions = list('windowRect'=c(50, 50, 1000 * quality, 1000 * quality));
if(can.plot.colorbar.from.coloredmeshes(coloredmeshes) && !(is.null(horizontal))) {
tmp_img = tempfile(fileext = ".png");
res_vl = vislayout.from.coloredmeshes(coloredmeshes, rgloptions = rgloptions, view_angles = view_angles, silent = silent, output_img = tmp_img, style = style, grid_like = grid_like, background_color = background_color);
png_options=list('filename'='fsbrain_cbar.png', 'width'=1400, 'height'=1400, 'bg'=background_color);
res_cb = coloredmesh.plot.colorbar.separate(coloredmeshes, image.plot_extra_options=image.plot_extra_options, silent = silent, png_options=png_options);
res_ex = combine.colorbar.with.brainview.image(horizontal = horizontal, silent = silent, brainview_img = tmp_img, output_img = output_img, background_color = background_color, transparency_color = transparency_color);
if(img_only) {
return(res_ex$merged_img);
}
} else {
res_vl = vislayout.from.coloredmeshes(coloredmeshes, rgloptions = rgloptions, view_angles = view_angles, silent = silent, output_img = output_img, style = style, grid_like = grid_like, background_color = background_color, transparency_color = transparency_color);
res_cb = NULL;
res_ex = NULL;
if(img_only) {
return(res_vl$merged_img);
}
}
return(invisible(list('res_vl'=res_vl, 'res_cb'=res_cb, 'res_ex'=res_ex)));
} else {
warning("The 'magick' package must be installed to use this functionality. Image NOT written.");
return(invisible(NULL));
}
}
#' @title Export high-quality brainview image with a colorbar.
#'
#' @description This function serves as an easy (but slightly inflexible) way to export a high-quality, tight-layout, colorbar figure to disk. If no colorbar is required, one can use \code{vislayout.from.coloredmeshes} instead. It is an alias for `vis.export.from.coloredmeshes` that requires less typing.
#'
#' @inheritParams vis.export.from.coloredmeshes
#'
#' @return magick image instance or named list, depending on the value of 'img_only'. If the latter, the list contains the fields 'rev_vl', 'rev_cb', and 'rev_ex', which are the return values of the functions \code{vislayout.from.coloredmeshes}, \code{coloredmesh.plot.colorbar.separate}, and {combine.colorbar.with.brainview.image}, respectively.
#'
#' @note Note that your screen resolution has to be high enough to generate the final image in the requested resolution, see the 'fsbrain FAQ' vignette for details and solutions if you run into trouble.
#'
#' @examples
#' \dontrun{
#' rand_data = rnorm(327684, 5, 1.5);
#' cm = vis.data.on.fsaverage(morph_data_both=rand_data,
#' rglactions=list('no_vis'=T));
#' export(cm, colorbar_legend='Random data',
#' output_img="~/fsbrain_arranged.png");
#' }
#'
#' @export
export <- function(coloredmeshes, colorbar_legend=NULL, img_only=TRUE, horizontal=TRUE, silent = TRUE, quality=1L, output_img="fsbrain_arranged.png", image.plot_extra_options=NULL, large_legend=TRUE, view_angles = get.view.angle.names(angle_set = "t4"), style = 'default', grid_like = TRUE, background_color = "white", transparency_color=NULL) {
return(vis.export.from.coloredmeshes(coloredmeshes, colorbar_legend = colorbar_legend, img_only = img_only, horizontal = horizontal, silent = silent, quality=quality, output_img=output_img, image.plot_extra_options=image.plot_extra_options, large_legend=large_legend, view_angles = view_angles, style = style, grid_like = grid_like, background_color = background_color, transparency_color=transparency_color));
}
|
/R/brainview_magick.R
|
permissive
|
haitaoge/fsbrain
|
R
| false | false | 29,427 |
r
|
# Functions that use the 'magick' package (ImageMagick) to combine separate brainview images into one image.
# Currently 'magick' is an optional dependency only.
#' @title Combine several brainview images into a new figure.
#'
#' @description Create a new image from several image tiles, the exact layout depends on the number of given images.
#'
#' @param brainview_images vector of character strings, paths to the brainview images, usually in PNG format
#'
#' @param output_img path to output image that including the file extension
#'
#' @param colorbar_img path to the main image containing the separate colorbar, usually an image in PNG format
#'
#' @param silent logical, whether to suppress messages
#'
#' @param grid_like logical, whether to arrange the images in a grid-like fashion. If FALSE, they will all be merged horizontally.
#'
#' @param border_geometry string, a geometry string passed to \code{magick::image_border} to define the borders to add to each image tile. The default value adds 5 pixels, both horizontally and vertically.
#'
#' @param background_color hex color string, such as "#DDDDDD" or "#FFFFFF". The color to use when extending images (e.g., when creating the border). WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @param map_bg_to_transparency logical, whether to map the background_color to transparency for the final PNG export.
#'
#' @return named list with entries: 'brainview_images': vector of character strings, the paths to the input images. 'output_img_path': character string, path to the output image. 'merged_img': the magick image instance.
#'
#' @export
arrange.brainview.images <- function(brainview_images, output_img, colorbar_img=NULL, silent=TRUE, grid_like=TRUE, border_geometry="5x5", background_color = "white", map_bg_to_transparency = FALSE) {
if (requireNamespace("magick", quietly = TRUE)) {
# load image files
images = magick::image_read(brainview_images);
# trim images
images = magick::image_trim(images);
# Add tiny border back (to prevent them from touching each other)
images = magick::image_border(images, background_color, border_geometry);
num_img = length(images);
if(grid_like) {
if(num_img == 3 || num_img == 4) {
top_row = wrapped.image.append(images[1:2], background_color = background_color);
bottom_row = wrapped.image.append(images[3:num_img], background_color = background_color);
merged_img = wrapped.image.append(c(top_row, bottom_row), stack = TRUE, background_color = background_color);
} else if(num_img == 8 || num_img == 9) {
top_row = wrapped.image.append(images[1:3], background_color = background_color);
mid_row = wrapped.image.append(images[4:6], background_color = background_color);
top_and_mid = wrapped.image.append(c(top_row, mid_row), stack = TRUE, background_color = background_color);
if(num_img == 9) {
bottom_row = wrapped.image.append(images[7:num_img], background_color = background_color);
} else {
# For 8 images, it gets a bit trickier. It looks very bad if the two
# images are on the left and the right is empty, so we add an empty
# image of appropriate size between them.
width_top_and_mid = magick::image_info(top_and_mid)$width;
bottom_row_so_far = wrapped.image.append(images[7:num_img], background_color = background_color);
width_bottom_so_far = magick::image_info(images[7])$width + magick::image_info(images[8])$width;
width_missing = width_top_and_mid - width_bottom_so_far;
if(width_missing > 0L) {
mid = magick::image_blank(width_missing, magick::image_info(bottom_row_so_far)$height, background_color);
bottom_row = wrapped.image.append(c(images[7], mid, images[8]), background_color = background_color);
} else {
bottom_row = bottom_row_so_far;
}
}
merged_img = wrapped.image.append(c(top_and_mid, bottom_row), stack = TRUE, background_color = background_color);
} else {
if(num_img <= 10L) {
merged_img = wrapped.image.append(images, background_color = background_color);
} else {
# For more than 10 images, plot 10 per row.
num_per_row = 10L;
num_rows = as.integer(ceiling(num_img / num_per_row));
num_in_last_row = (num_rows * num_per_row) %% num_img;
start_img_idx = 1L;
for(row_idx in seq.int(num_rows)) {
img_row = wrapped.image.append(images[start_img_idx:min((start_img_idx+num_per_row-1L),num_img)], background_color = background_color);
if(row_idx == 1L) {
merged_img = img_row;
} else {
width_so_far = magick::image_info(merged_img)$width;
width_img_row = magick::image_info(img_row)$width;
width_missing = width_so_far - width_img_row;
if(width_missing > 0L) {
blank_right_img = magick::image_blank(width_missing, magick::image_info(img_row)$height, background_color);
img_row = wrapped.image.append(c(img_row, blank_right_img), background_color = background_color);
}
merged_img = wrapped.image.append(c(merged_img, img_row), stack = TRUE, background_color = background_color);
}
start_img_idx = start_img_idx + num_per_row;
}
}
}
} else {
merged_img = wrapped.image.append(images, background_color = background_color);
}
if(map_bg_to_transparency) {
merged_img = image.remap.color(merged_img, source_color=background_color, source_point = NULL);
}
magick::image_write(merged_img, path = output_img);
if(! silent) {
message(sprintf("Merged image written to '%s' (current working dir is '%s').\n", output_img, getwd()));
}
return(invisible(list('merged_img'=merged_img, 'brainview_images'=brainview_images, 'output_img_path'=output_img)));
} else {
warning("The 'magick' package must be installed to use this functionality. Merged image NOT written.");
return(invisible(NULL));
}
}
#' @title Remap a color in an image, typically used to set the background color to transparent.
#'
#' @description Offers 2 algorithm: remap color by flood-filling from a given pixel, or remap a hardcoded color throughout the entire image. Provide one of 'source_color' or 'source_point' by setting the other to NULL. If both are given, source_color takes precedence and source_point is silently ignored.
#'
#' @param source_color the source color that should be replaced in the whole image. Set to NULL to disable.
#'
#' @param source_point the source pixel in which to start the flood filling. Set to NULL to disable.
#'
#' @param target_color an image magick color string, use 'none' for transparency. Only used with flood fill.
#'
#' @keywords internal
image.remap.color <- function(source_img, source_color=NULL, source_point="+1+1", target_color="none") {
if(is.null(source_color)) {
if(is.null(source_point)) {
stop("One of 'source_color' or 'source_point' must be provided.");
} else {
remapped_img = magick::image_fill(source_img, target_color, point = source_point, fuzz = 0);
}
} else {
remapped_img = magick::image_transparent(source_img, source_color, fuzz = 0);
}
return(remapped_img);
}
#' @title Combine several brainview images as a grid into a new figure.
#'
#' @description Create a new image from several image tiles, the exact layout is a grid with n per row.
#'
#' @inheritParams arrange.brainview.images
#'
#' @param output_img path to output image that including the file extension
#'
#' @param num_per_row positive integer, the number of image tiles per row.
#'
#' @param captions vector of character strings or NULL, the (optional) text annotations for the images. Useful to print the subject identifier onto the individual tiles. Length must match number of image tiles in 'brainview_images'.
#'
#' @return named list with entries: 'brainview_images': vector of character strings, the paths to the input images. 'output_img_path': character string, path to the output image. 'merged_img': the magick image instance.
#'
#' @note The tiles are written row-wise, in the order in which they occur in the parameter 'brainview_images'.
#'
#' @export
arrange.brainview.images.grid <- function(brainview_images, output_img, colorbar_img=NULL, silent=TRUE, num_per_row=10L, border_geometry="5x5", background_color = "white", captions=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
# load image files
images = magick::image_read(brainview_images);
# trim images
images = magick::image_trim(images);
# Add tiny border back (to prevent them from touching each other)
images = magick::image_border(images, background_color, border_geometry);
num_img = length(images);
images = images.rescale.to.max.canvas(images, background = background_color);
# annotate if requested
images = images.annotate(images, captions, background = background_color);
num_rows = as.integer(ceiling(num_img / num_per_row));
num_in_last_row = (num_rows * num_per_row) %% num_img;
start_img_idx = 1L;
for(row_idx in seq.int(num_rows)) {
row_start_idx = start_img_idx;
row_end_idx = min((start_img_idx+num_per_row-1L),num_img);
img_row = wrapped.image.append(images[row_start_idx:row_end_idx], background_color = background_color);
if(row_idx == 1L) {
merged_img = img_row;
} else {
width_so_far = magick::image_info(merged_img)$width;
width_img_row = magick::image_info(img_row)$width;
width_missing = width_so_far - width_img_row;
if(width_missing > 0L) {
blank_right_img = magick::image_blank(width_missing, magick::image_info(img_row)$height, background_color);
img_row = wrapped.image.append(c(img_row, blank_right_img), background_color = background_color);
}
merged_img = wrapped.image.append(c(merged_img, img_row), stack = TRUE, background_color = background_color);
}
start_img_idx = start_img_idx + num_per_row;
}
magick::image_write(merged_img, path = output_img);
if(! silent) {
message(sprintf("Merged image written to '%s' (current working dir is '%s').\n", output_img, getwd()));
}
return(invisible(list('merged_img'=merged_img, 'brainview_images'=brainview_images, 'output_img_path'=output_img)));
} else {
warning("The 'magick' package must be installed to use this functionality. Merged image NOT written.");
return(invisible(NULL));
}
}
#' @title Rescale all images canvas to match the largest one.
#'
#' @param images vector of magick images
#'
#' @param background color string, like 'white' or '#00FF00'
#'
#' @return vector of magick images, the rescaled images
#'
#' @keywords internal
images.rescale.to.max.canvas <- function(images, background="white") {
if (requireNamespace("magick", quietly = TRUE)) {
num_img = length(images);
# Determine max tile heigth and width to resize canvas of all tiles, so all images have the same width and height.
tile_widths = rep(NA, num_img);
tile_heights = rep(NA, num_img);
for(img_idx in seq.int(num_img)) {
tile_widths[img_idx] = magick::image_info(images[img_idx])$width;
tile_heights[img_idx] = magick::image_info(images[img_idx])$height;
}
max_tile_width = max(tile_widths);
max_tile_height = max(tile_heights);
#cat(sprintf("The min and max tile widths are %d and %d.\n", min(tile_widths), max_tile_width));
#cat(sprintf("The min and max tile heights are %d and %d.\n", min(tile_heights), max_tile_height));
geom_string = sprintf("%dx%d", max_tile_width, max_tile_height);
#cat(sprintf("Using geom string '%s' for the %d tiles.\n", geom_string, num_img));
imgs_rescaled = images;
for(img_idx in seq.int(num_img)) {
imgs_rescaled[img_idx] = magick::image_extent(images[img_idx], geom_string, color = background);
}
return(imgs_rescaled);
} else {
warning("The 'magick' package must be installed to use this functionality.");
return(invisible(NULL));
}
}
#' @title Annotate image with text.
#'
#' @inheritParams images.rescale.to.max.canvas
#'
#' @param annotations vector of character strings, the strings to print onto the tiles
#'
#' @param do_extend logical, whether to add the space for the annotation text below the existing image tile
#'
#' @return vector of magick images, the annotated images
#'
#' @keywords internal
images.annotate <- function(images, annotations, do_extend = TRUE, background = 'white') {
if (requireNamespace("magick", quietly = TRUE)) {
num_img = length(images);
if(is.null(annotations)) {
return(images);
}
annotations = recycle(annotations, num_img);
imgs_annotated = images;
for(img_idx in seq.int(num_img)) {
font_size = 30L;
if(do_extend) {
extend_height_by = font_size * 2L;
lower_extension_img = magick::image_blank(magick::image_info(images[img_idx])$width, extend_height_by, background);
merged_img = wrapped.image.append(c(images[img_idx], lower_extension_img), stack = TRUE, background_color = background);
} else {
merged_img = images[img_idx];
}
font_color = "black";
if(background == "black" || background == "#000000") {
font_color = "white";
}
imgs_annotated[img_idx] = magick::image_annotate(merged_img, annotations[img_idx], size = font_size, gravity = "south", color = font_color);
}
return(imgs_annotated);
} else {
warning("The 'magick' package must be installed to use this functionality.");
return(invisible(NULL));
}
}
#' @title Visualize coloredmeshes from several angles and combine the images into a new figure.
#'
#' @description Create a tight layout view of coloredmeshes from several angles. Creates separate `sd_<angle>` images, then crops and finally merges them into a single output image with image magick. The `coloredmeshes` to pass to this function are usually obtained by running any `vis*` function (like \code{\link[fsbrain]{vis.subject.morph.native}}, \code{\link[fsbrain]{vis.subject.morph.standard}}, \code{\link[fsbrain]{vis.subject.label}}, \code{\link[fsbrain]{vis.subject.annot}}, and others). That means you can use this function to visualize all kinds of data, e.g., morphometry data in native and standard space, labels, and brain atlases.
#
#' @param coloredmeshes, list of coloredmesh. A coloredmesh is a named list as returned by the `coloredmesh.from*` functions (like \code{\link{coloredmesh.from.morph.native}}). It has the entries 'mesh' of type tmesh3d, a 'col', which is a color specification for such a mesh. The `vis*` functions (like \code{\link[fsbrain]{vis.subject.morph.native}}) all return a list of coloredmeshes.
#'
#' @param view_angles list of strings. See \code{\link{get.view.angle.names}} for all valid strings.
#'
#' @param rgloptions option list passed to \code{\link{par3d}}. Example: \code{rgloptions = list("windowRect"=c(50,50,1000,1000))}.
#'
#' @param rglactions named list. A list in which the names are from a set of pre-defined actions. The values can be used to specify parameters for the action.
#'
#' @param style character string, a rendering style, e.g., 'default', 'shiny' or 'semitransparent'. Alternatively, a named list of style parameters (see \code{\link{material3d}}), e.g., \code{list("shininess"=50, specular="black", alpha=0.5)}. Use the magic word 'from_mesh' to use the 'style' field of each coloredmesh instead of a single, global style. In that case, you will have to make sure your meshes have such a field, if not, the style 'default' is used as a fallback for those which don't.
#'
#' @param output_img string, path to the output file. Defaults to "fsbrain_arranged.png"
#'
#' @param silent logical, whether to suppress all messages
#'
#' @param grid_like logical, whether to arrange the images in a grid-like fashion. If FALSE, they will all be merged horizontally. Passed to \code{\link[fsbrain]{arrange.brainview.images}}.
#'
#' @param background_color hex color string (like '#FFFFFF'), the color to use for the background. Ignored if 'transparency_color' is not NULL. To get a transparent background, use 'transparency_color' instead of this parameter. WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @param transparency_color hex color string (like '#FFFFFF'), the temporary background color that will get mapped to transparency, or NULL if you do not want a transparent background. If used, it can be any color that does not occur in the foreground. Try '#FFFFFF' (white) or '#000000' (black) if in doubt. WARNING: Do not use color names (like 'gray'), as their interpretation differs between rgl and image magick!
#'
#' @return named list, see \code{\link{arrange.brainview.images}} for details
#'
#' @examples
#' \dontrun{
#' fsbrain::download_optional_data();
#' subjects_dir = fsbrain::get_optional_data_filepath("subjects_dir");
#' # Use any vis function to get coloredmeshes. You can visualize morphometry,
#' # labels, an atlas, whatever. You can suppress the view unless you need it.
#' coloredmeshes = vis.subject.morph.native(subjects_dir, "subject1", "thickness",
#' cortex_only=TRUE, rglactions=list("clip_data"=c(0.05, 0.95)),
#' views=NULL);
#' # The meshes contain the surface, data, and color information and can be
#' # visualized. You could adapt the rendering style while doing so:
#' vislayout.from.coloredmeshes(coloredmeshes, style='shiny');
#' # You could change the rendering style on a per-mesh basis.
#' coloredmeshes[[1]]$style = list("shininess"=50, alpha=0.5);
#' vislayout.from.coloredmeshes(coloredmeshes, style='from_mesh');
#' }
#'
#'
#' @family visualization functions
#' @export
vislayout.from.coloredmeshes <- function(coloredmeshes, view_angles=get.view.angle.names(angle_set = "t4"), rgloptions = rglo(), rglactions=list(), style="default", output_img="fsbrain_arranged.png", silent=FALSE, grid_like=TRUE, background_color = "white", transparency_color=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
view_images = tempfile(view_angles, fileext = ".png"); # generate one temporary file name for each image
map_bg_to_transparency = FALSE;
if(! is.null(transparency_color)) {
map_bg_to_transparency = TRUE;
if(background_color != "white") {
warning("Parameter 'transparency_color' is set, ignoring custom value for parameter 'background_color'.");
}
background_color = transparency_color;
}
# Create the temporary images at the temp paths
for(view_idx in seq_len(length(view_angles))) {
view = view_angles[[view_idx]];
view_image = view_images[[view_idx]];
internal_rglactions = list("snapshot_png"=view_image);
if(rglactions.has.key(rglactions, "snapshot_png")) {
warning("The key 'snapshot_png' in the 'rglactions' parameter is not supported for this function, it will be ignored. Use 'output_img' instead.");
rglactions$snapshot_png = NULL;
}
final_rglactions = modifyList(rglactions, internal_rglactions);
brainviews(c(view), coloredmeshes, rgloptions = rgloptions, rglactions = final_rglactions, style = style, background = background_color);
}
# Now merge them into one
return(invisible(arrange.brainview.images(view_images, output_img, silent=silent, grid_like=grid_like, background_color = background_color, map_bg_to_transparency = map_bg_to_transparency)));
} else {
warning("The 'magick' package must be installed to use this functionality. Image with manual layout NOT written.");
return(invisible(NULL));
}
}
#' @title Export high-quality brainview image with a colorbar.
#'
#' @description This function serves as an easy (but slightly inflexible) way to export a high-quality, tight-layout, colorbar figure to disk. If no colorbar is required, one can use \code{vislayout.from.coloredmeshes} instead.
#'
#' @inheritParams vislayout.from.coloredmeshes
#'
#' @param colorbar_legend character string or NULL, the title for the colorbar.
#'
#' @param img_only logical, whether to return only the resulting image
#'
#' @param horizontal logical, whether to plot the colorbar horizontally (TRUE) or vertically (FALSE). Pass 'NULL' to force no colorbar at all.
#'
#' @param silent logical, whether to suppress messages
#'
#' @param quality integer, an arbitrary quality. This is the resolution per tile before trimming, divided by 1000, in pixels. Example: 1L means 1000x1000 pixels per tile before trimming. Currently supported values: \code{1L..2L}. Note that the resolution you can get is also limited by your screen resolution.
#'
#' @param image.plot_extra_options named list, custom options for fields::image.plot. Overwrites those derived from the quality setting. If in doubt, leave this alone.
#'
#' @param large_legend logical, whether to plot extra large legend text, affects the font size of the colorbar_legend and the tick labels.
#'
#' @param style the rendering style, see \code{material3d} or use a predefined style like 'default' or 'shiny'.
#'
#' @param grid_like logical, passed to \code{vislayout.from.coloredmeshes}.
#'
#' @return magick image instance or named list, depending on the value of 'img_only'. If the latter, the list contains the fields 'rev_vl', 'rev_cb', and 'rev_ex', which are the return values of the functions \code{vislayout.from.coloredmeshes}, \code{coloredmesh.plot.colorbar.separate}, and {combine.colorbar.with.brainview.image}, respectively.
#'
#' @note Note that your screen resolution has to be high enough to generate the final image in the requested resolution, see the 'fsbrain FAQ' vignette for details and solutions if you run into trouble.
#'
#' @examples
#' \dontrun{
#' rand_data = rnorm(327684, 5, 1.5);
#' cm = vis.data.on.fsaverage(morph_data_both=rand_data,
#' rglactions=list('no_vis'=T));
#' vis.export.from.coloredmeshes(cm, colorbar_legend='Random data',
#' output_img="~/fsbrain_arranged.png");
#' }
#'
#' @export
vis.export.from.coloredmeshes <- function(coloredmeshes, colorbar_legend=NULL, img_only=TRUE, horizontal=TRUE, silent = TRUE, quality=1L, output_img="fsbrain_arranged.png", image.plot_extra_options=NULL, large_legend=TRUE, view_angles = get.view.angle.names(angle_set = "t4"), style = 'default', grid_like = TRUE, background_color = "white", transparency_color=NULL) {
if (requireNamespace("magick", quietly = TRUE)) {
quality = as.integer(quality);
if(quality < 1L | quality > 2L) {
stop("The parameter 'quality' must be an integer in range 1-2.");
}
if(is.null(image.plot_extra_options)) {
if(quality == 1L) {
if(large_legend) {
font_s = 4;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
} else {
font_s = 1.8;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 2, legend.mar = 12, legend.line=-4, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
}
} else { # quality 2
if(large_legend) {
font_s = 4;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
} else {
font_s = 2.6;
image.plot_extra_options = list(horizontal = horizontal, legend.cex = font_s, legend.width = 4, legend.mar = 18, legend.line=-6, legend.lab=colorbar_legend, axis.args = list(cex.axis = font_s, mgp=c(3,(max(1.0, font_s -1)),0)));
}
}
}
rgloptions = list('windowRect'=c(50, 50, 1000 * quality, 1000 * quality));
if(can.plot.colorbar.from.coloredmeshes(coloredmeshes) && !(is.null(horizontal))) {
tmp_img = tempfile(fileext = ".png");
res_vl = vislayout.from.coloredmeshes(coloredmeshes, rgloptions = rgloptions, view_angles = view_angles, silent = silent, output_img = tmp_img, style = style, grid_like = grid_like, background_color = background_color);
png_options=list('filename'='fsbrain_cbar.png', 'width'=1400, 'height'=1400, 'bg'=background_color);
res_cb = coloredmesh.plot.colorbar.separate(coloredmeshes, image.plot_extra_options=image.plot_extra_options, silent = silent, png_options=png_options);
res_ex = combine.colorbar.with.brainview.image(horizontal = horizontal, silent = silent, brainview_img = tmp_img, output_img = output_img, background_color = background_color, transparency_color = transparency_color);
if(img_only) {
return(res_ex$merged_img);
}
} else {
res_vl = vislayout.from.coloredmeshes(coloredmeshes, rgloptions = rgloptions, view_angles = view_angles, silent = silent, output_img = output_img, style = style, grid_like = grid_like, background_color = background_color, transparency_color = transparency_color);
res_cb = NULL;
res_ex = NULL;
if(img_only) {
return(res_vl$merged_img);
}
}
return(invisible(list('res_vl'=res_vl, 'res_cb'=res_cb, 'res_ex'=res_ex)));
} else {
warning("The 'magick' package must be installed to use this functionality. Image NOT written.");
return(invisible(NULL));
}
}
#' @title Export high-quality brainview image with a colorbar.
#'
#' @description This function serves as an easy (but slightly inflexible) way to export a high-quality, tight-layout, colorbar figure to disk. If no colorbar is required, one can use \code{vislayout.from.coloredmeshes} instead. It is an alias for `vis.export.from.coloredmeshes` that requires less typing.
#'
#' @inheritParams vis.export.from.coloredmeshes
#'
#' @return magick image instance or named list, depending on the value of 'img_only'. If the latter, the list contains the fields 'rev_vl', 'rev_cb', and 'rev_ex', which are the return values of the functions \code{vislayout.from.coloredmeshes}, \code{coloredmesh.plot.colorbar.separate}, and {combine.colorbar.with.brainview.image}, respectively.
#'
#' @note Note that your screen resolution has to be high enough to generate the final image in the requested resolution, see the 'fsbrain FAQ' vignette for details and solutions if you run into trouble.
#'
#' @examples
#' \dontrun{
#' rand_data = rnorm(327684, 5, 1.5);
#' cm = vis.data.on.fsaverage(morph_data_both=rand_data,
#' rglactions=list('no_vis'=T));
#' export(cm, colorbar_legend='Random data',
#' output_img="~/fsbrain_arranged.png");
#' }
#'
#' @export
export <- function(coloredmeshes, colorbar_legend=NULL, img_only=TRUE, horizontal=TRUE, silent = TRUE, quality=1L, output_img="fsbrain_arranged.png", image.plot_extra_options=NULL, large_legend=TRUE, view_angles = get.view.angle.names(angle_set = "t4"), style = 'default', grid_like = TRUE, background_color = "white", transparency_color=NULL) {
return(vis.export.from.coloredmeshes(coloredmeshes, colorbar_legend = colorbar_legend, img_only = img_only, horizontal = horizontal, silent = silent, quality=quality, output_img=output_img, image.plot_extra_options=image.plot_extra_options, large_legend=large_legend, view_angles = view_angles, style = style, grid_like = grid_like, background_color = background_color, transparency_color=transparency_color));
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deep.service.R
\name{deepAuction}
\alias{deepAuction}
\title{IEX Depth of Book}
\usage{
deepAuction(symbol)
}
\arguments{
\item{symbol}{a market symbol, one and only one symbol}
}
\description{
DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size
of resting displayed orders at a price and side, and do not indicate the
size or number of individual orders at any price level. Non-displayed orders
and non-displayed portions of reserve orders are not represented in DEEP.
}
\details{
DEEP also provides last trade price and size information.
Trades resulting from either displayed or non-displayed orders matching on IEX will be reported.
Routed executions will not be reported.
Data Weighting: FREE
Data Timing: REAL TIME
Data Schedule: Market hours 9:30am-4pm ET
Data Source(s): Consolidated Tape Investors Exchange
}
\examples{
deepAuction('AAPL')\\
}
\seealso{
Other deep service functions: \code{\link{deepOfficialPrice}},
\code{\link{deepTrades}}, \code{\link{deep}}
}
\concept{deep service functions}
|
/man/deepAuction.Rd
|
no_license
|
Sahanduiuc/iexcloudR
|
R
| false | true | 1,188 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deep.service.R
\name{deepAuction}
\alias{deepAuction}
\title{IEX Depth of Book}
\usage{
deepAuction(symbol)
}
\arguments{
\item{symbol}{a market symbol, one and only one symbol}
}
\description{
DEEP is used to receive real-time depth of book quotations direct from IEX.
The depth of book quotations received via DEEP provide an aggregated size
of resting displayed orders at a price and side, and do not indicate the
size or number of individual orders at any price level. Non-displayed orders
and non-displayed portions of reserve orders are not represented in DEEP.
}
\details{
DEEP also provides last trade price and size information.
Trades resulting from either displayed or non-displayed orders matching on IEX will be reported.
Routed executions will not be reported.
Data Weighting: FREE
Data Timing: REAL TIME
Data Schedule: Market hours 9:30am-4pm ET
Data Source(s): Consolidated Tape Investors Exchange
}
\examples{
deepAuction('AAPL')\\
}
\seealso{
Other deep service functions: \code{\link{deepOfficialPrice}},
\code{\link{deepTrades}}, \code{\link{deep}}
}
\concept{deep service functions}
|
# Install relevant libraries ---------------------------------------------------
library(igraph)
library(ggplot2)
library(grid)
library(sf)
library(tidyverse)
library(tigris)
# Import trip data -------------------------------------------------------------
dir <- "/home/marion/PVDResearch/Data/mobilityData/cleanData"
# dir <- "/Users/Alice/Documents"
filename <- "tripsYear1WithTracts"
path <- file.path(dir, paste(filename, ".csv", sep = ""))
assign(filename, read.csv(path))
# Import census tract data -----------------------------------------------------
dir <- "/home/marion/PVDResearch/PVDResearch/censusData"
# dir <- "/Users/Alice/Dropbox/pvd_summer/censusData"
filename <- "riData"
path <- file.path(dir, paste(filename, ".csv", sep = ""))
assign(filename, read.csv(path))
# Clean and organize trip data to cluster --------------------------------------
cleanData <- function(data, start_date, end_date) {
data %>%
filter(minutes >= 3) %>%
# Select time range
mutate(start_time = as.POSIXct(start_time, tz = "EST")) %>%
filter(start_time > start_date & start_time < end_date) %>%
# Round coordinates
mutate(start_latitude = 0.005*round(start_latitude/0.005, digits = 0),
start_longitude = 0.005*round(start_longitude/0.005, digits = 0),
end_latitude = 0.005*round(end_latitude/0.005, digits = 0),
end_longitude = 0.005*round(end_longitude/0.005, digits = 0)) %>%
# Create data frame of edges for clustering algorithm
mutate(from = paste("(", start_latitude, ", ", start_longitude, ")", sep = ""),
to = paste("(", end_latitude, ", ", end_longitude, ")", sep = "")) %>%
group_by(from, to) %>%
summarise(lat = start_latitude,
long = start_longitude,
weight = n())
}
dataWeek <- cleanData(tripsYear1WithTracts, start_date = "2019-07-01", end_date = "2019-07-08")
dataMonth <- cleanData(tripsYear1WithTracts, start_date = "2019-07-01", end_date = "2019-08-01")
dataYear <- cleanData(tripsYear1WithTracts, start_date = "2018-10-17", end_date = "2019-09-19")
# Create clusters using Louvain algorithm --------------------------------------
createClusters <- function(data){
# Create graph for clustering algorithm
g <- graph_from_data_frame(data, directed = FALSE)
# Create clusters
imc <- cluster_infomap(g)
clusters <- stack(membership(imc))
colnames(clusters) <- c("group", "coordinates")
# Map trip data to clusters
data$cluster <- NA
for (i in 1:nrow(clusters)) {
coord <- clusters[i,]$coordinates
group <- clusters[i,]$group
index <- which(data$from == coord)
data$cluster[index] <- group
}
# Track modularity, number of clusters, and number of nodes per cluster
modularity = round(modularity(imc), digits = 2)
numClusters = max(data$cluster)
numNodes <- data.frame(data) %>%
distinct(from, .keep_all = TRUE) %>%
group_by(cluster) %>%
summarise(count = n())
numNodes <- numNodes$count
list(data, modularity = modularity, numClusters = numClusters, numNodes = numNodes)
}
dataWeek <- createClusters(dataWeek)
dataMonth <- createClusters(dataMonth)
dataYear <- createClusters(dataYear)
# Plot clusters ----------------------------------------------------------------
createPlot <- function(data, title){
# Get map of Providence County census tracts
censusTracts <- tracts("RI", class = "sf") %>%
select(GEOID) %>%
filter(GEOID %in% riData$GEOID)
# Plot clusters over map of census tracts
ggplot(censusTracts) +
geom_sf() +
# Plot clusters
geom_point(data = data.frame(data[1]), aes(x = long, y = lat, color = as.factor(cluster)), size = 2) +
# Label plot
scale_color_discrete(name = "Number of Nodes per Cluster", labels = data$numNodes) +
guides(color = guide_legend(ncol = 4)) +
labs(title = title,
subtitle = paste("Modularity:", data$modularity,
"\nClusters:", data$numClusters)) +
# Remove gray background
theme_bw() +
# Remove grid
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# Rotate x axis labels
theme(axis.text.x = element_text(angle = 90))
}
plotWeek <- createPlot(dataWeek, "Infomap clusters on one week of data")
plotMonth <- createPlot(dataMonth, "Infomap clusters on one month of data")
plotYear <- createPlot(dataYear, "Infomap clusters on one year of data")
# Save plots -------------------------------------------------------------------
plots <- mget(ls(pattern="plot"))
dir <- "/home/marion/PVDResearch/Plots"
# dir <- "/Users/Alice/Dropbox/pvd_summer"
filenames <- c("Infomap_clusters_on_one_week", "Infomap_clusters_on_one_month", "Infomap_clusters_on_one_year")
paths <- file.path(dir, paste(filenames, ".png", sep = ""))
for(i in 1:length(plots)){
invisible(mapply(ggsave, file = paths[i], plot = plots[i]))
}
|
/scooterData/clusterInfomap.R
|
no_license
|
alicepaul/pvd_summer
|
R
| false | false | 4,878 |
r
|
# Install relevant libraries ---------------------------------------------------
library(igraph)
library(ggplot2)
library(grid)
library(sf)
library(tidyverse)
library(tigris)
# Import trip data -------------------------------------------------------------
dir <- "/home/marion/PVDResearch/Data/mobilityData/cleanData"
# dir <- "/Users/Alice/Documents"
filename <- "tripsYear1WithTracts"
path <- file.path(dir, paste(filename, ".csv", sep = ""))
assign(filename, read.csv(path))
# Import census tract data -----------------------------------------------------
dir <- "/home/marion/PVDResearch/PVDResearch/censusData"
# dir <- "/Users/Alice/Dropbox/pvd_summer/censusData"
filename <- "riData"
path <- file.path(dir, paste(filename, ".csv", sep = ""))
assign(filename, read.csv(path))
# Clean and organize trip data to cluster --------------------------------------
cleanData <- function(data, start_date, end_date) {
data %>%
filter(minutes >= 3) %>%
# Select time range
mutate(start_time = as.POSIXct(start_time, tz = "EST")) %>%
filter(start_time > start_date & start_time < end_date) %>%
# Round coordinates
mutate(start_latitude = 0.005*round(start_latitude/0.005, digits = 0),
start_longitude = 0.005*round(start_longitude/0.005, digits = 0),
end_latitude = 0.005*round(end_latitude/0.005, digits = 0),
end_longitude = 0.005*round(end_longitude/0.005, digits = 0)) %>%
# Create data frame of edges for clustering algorithm
mutate(from = paste("(", start_latitude, ", ", start_longitude, ")", sep = ""),
to = paste("(", end_latitude, ", ", end_longitude, ")", sep = "")) %>%
group_by(from, to) %>%
summarise(lat = start_latitude,
long = start_longitude,
weight = n())
}
dataWeek <- cleanData(tripsYear1WithTracts, start_date = "2019-07-01", end_date = "2019-07-08")
dataMonth <- cleanData(tripsYear1WithTracts, start_date = "2019-07-01", end_date = "2019-08-01")
dataYear <- cleanData(tripsYear1WithTracts, start_date = "2018-10-17", end_date = "2019-09-19")
# Create clusters using Louvain algorithm --------------------------------------
createClusters <- function(data){
# Create graph for clustering algorithm
g <- graph_from_data_frame(data, directed = FALSE)
# Create clusters
imc <- cluster_infomap(g)
clusters <- stack(membership(imc))
colnames(clusters) <- c("group", "coordinates")
# Map trip data to clusters
data$cluster <- NA
for (i in 1:nrow(clusters)) {
coord <- clusters[i,]$coordinates
group <- clusters[i,]$group
index <- which(data$from == coord)
data$cluster[index] <- group
}
# Track modularity, number of clusters, and number of nodes per cluster
modularity = round(modularity(imc), digits = 2)
numClusters = max(data$cluster)
numNodes <- data.frame(data) %>%
distinct(from, .keep_all = TRUE) %>%
group_by(cluster) %>%
summarise(count = n())
numNodes <- numNodes$count
list(data, modularity = modularity, numClusters = numClusters, numNodes = numNodes)
}
dataWeek <- createClusters(dataWeek)
dataMonth <- createClusters(dataMonth)
dataYear <- createClusters(dataYear)
# Plot clusters ----------------------------------------------------------------
createPlot <- function(data, title){
# Get map of Providence County census tracts
censusTracts <- tracts("RI", class = "sf") %>%
select(GEOID) %>%
filter(GEOID %in% riData$GEOID)
# Plot clusters over map of census tracts
ggplot(censusTracts) +
geom_sf() +
# Plot clusters
geom_point(data = data.frame(data[1]), aes(x = long, y = lat, color = as.factor(cluster)), size = 2) +
# Label plot
scale_color_discrete(name = "Number of Nodes per Cluster", labels = data$numNodes) +
guides(color = guide_legend(ncol = 4)) +
labs(title = title,
subtitle = paste("Modularity:", data$modularity,
"\nClusters:", data$numClusters)) +
# Remove gray background
theme_bw() +
# Remove grid
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
# Rotate x axis labels
theme(axis.text.x = element_text(angle = 90))
}
plotWeek <- createPlot(dataWeek, "Infomap clusters on one week of data")
plotMonth <- createPlot(dataMonth, "Infomap clusters on one month of data")
plotYear <- createPlot(dataYear, "Infomap clusters on one year of data")
# Save plots -------------------------------------------------------------------
plots <- mget(ls(pattern="plot"))
dir <- "/home/marion/PVDResearch/Plots"
# dir <- "/Users/Alice/Dropbox/pvd_summer"
filenames <- c("Infomap_clusters_on_one_week", "Infomap_clusters_on_one_month", "Infomap_clusters_on_one_year")
paths <- file.path(dir, paste(filenames, ".png", sep = ""))
for(i in 1:length(plots)){
invisible(mapply(ggsave, file = paths[i], plot = plots[i]))
}
|
## __________________________________________________________
##
## RepresentationXGroup
##
## INPUT: x: Adjency matrix
## cluste: Vector of the classes
##
## Representation of the nodes and the corresponding groups after reorganization
## __________________________________________________________
RepresentationXGroup <- function(x, cluster){
x <- SortMatrix(cluster, x); # reorganize the adjacency matrix
dim(x)[1]->n;
m<-which(x==1,arr.ind=TRUE);
plot(1, 1, xlim = c(0, n + 1), ylim = c(n + 1, 0), type = "n", axes= FALSE,xlab="classes",ylab="classes",main="Reorganized Adjacency matrix")
rect(m[,1]-0.5,m[,2]-0.5,m[,1]+0.5,m[,2]+0.5,col=1);
table(cluster)->limits; # find the class limits
cumsum(limits)[1:(length(limits)-1)]+0.5->limits;
abline(v=c(0.5,limits,n+0.5),h=c(0.5,limits,n+0.5),col="red");
}
## __________________________________________________________
##
## RepresentationXY
##
## INPUT: X: Adjency matrix
## Y: Similarity matrix
## node.classes: Vector of the classes
## Sigma: Variance
##
## Represention of the variables Yij and the affiliation matrix
## __________________________________________________________
RepresentationXY <- function(X, Y, node.classes, DrawGroup=TRUE) {
OrderX <- SortMatrix(node.classes,X); #sorted X#
OrderY <- SortMatrix(node.classes,Y); #sorted Y#
image(OrderY);
size=length(node.classes);
Xlie <- which(OrderX==1,arr.ind=TRUE);
points((Xlie-1)/(size-1), pch=20); #Dilatation#
temp <- rep(0, (max(node.classes)[1]+1));
if (DrawGroup==TRUE){
for (i in 1:max(node.classes)){
mq <- table(node.classes);
size <- length(node.classes);
temp[i+1] <- mq[i];
sum <- sum(temp);
axe <- sum/size;
abline(v=axe,h=axe,col="blue");
}
}
}
|
/representations.R
|
no_license
|
docteurZ/cohsmix
|
R
| false | false | 1,770 |
r
|
## __________________________________________________________
##
## RepresentationXGroup
##
## INPUT: x: Adjency matrix
## cluste: Vector of the classes
##
## Representation of the nodes and the corresponding groups after reorganization
## __________________________________________________________
RepresentationXGroup <- function(x, cluster){
x <- SortMatrix(cluster, x); # reorganize the adjacency matrix
dim(x)[1]->n;
m<-which(x==1,arr.ind=TRUE);
plot(1, 1, xlim = c(0, n + 1), ylim = c(n + 1, 0), type = "n", axes= FALSE,xlab="classes",ylab="classes",main="Reorganized Adjacency matrix")
rect(m[,1]-0.5,m[,2]-0.5,m[,1]+0.5,m[,2]+0.5,col=1);
table(cluster)->limits; # find the class limits
cumsum(limits)[1:(length(limits)-1)]+0.5->limits;
abline(v=c(0.5,limits,n+0.5),h=c(0.5,limits,n+0.5),col="red");
}
## __________________________________________________________
##
## RepresentationXY
##
## INPUT: X: Adjency matrix
## Y: Similarity matrix
## node.classes: Vector of the classes
## Sigma: Variance
##
## Represention of the variables Yij and the affiliation matrix
## __________________________________________________________
RepresentationXY <- function(X, Y, node.classes, DrawGroup=TRUE) {
OrderX <- SortMatrix(node.classes,X); #sorted X#
OrderY <- SortMatrix(node.classes,Y); #sorted Y#
image(OrderY);
size=length(node.classes);
Xlie <- which(OrderX==1,arr.ind=TRUE);
points((Xlie-1)/(size-1), pch=20); #Dilatation#
temp <- rep(0, (max(node.classes)[1]+1));
if (DrawGroup==TRUE){
for (i in 1:max(node.classes)){
mq <- table(node.classes);
size <- length(node.classes);
temp[i+1] <- mq[i];
sum <- sum(temp);
axe <- sum/size;
abline(v=axe,h=axe,col="blue");
}
}
}
|
library(dfped)
### Name: doseRange
### Title: Dose-range for the paediatric population according to adult
### clearance, adult doses and paediatric clearance.
### Aliases: doseRange
### ** Examples
## Not run:
##D
##D ########
##D # Note: For this example we are using a paediatric database that we have including data of
##D # children from 0 to 19 years old.
##D ########
##D
##D
##D # Doses of adults
##D doseAd <- data.frame("d1" = 100, "d2" = 150,"d3" = 200,"d4"= 250,"d5" =300)
##D
##D Cl_ad <- 3.95
##D children <- read.csv("/Users/artemistoumazi/paediatric_data_p3m/children_0_19.csv")
##D AGE <- children$Age
##D W <- children$Weight
##D W_ad <- 70
##D Cl_ad <- 3.95
##D F_ad <- 0.6
##D Eg <- 0
##D Eh <- 0.058
##D f_abs <- F_ad/((1 - Eh)*(1-Eg))
##D fu_ad <- 1
##D perc_CYPh <- data.frame("CYP3A4_5" = 0.7, "CYP1A2" = 0.3)
##D perc_CYPg <- data.frame("CYP3A4_5" = 1)
##D perc_alb <- 1
##D perc_alpha1AG <- 0
##D
##D data_molecule <- list(F_ad, f_abs, Eg, Eh, fu_ad, perc_CYPg, perc_CYPh, perc_alb,
##D perc_alpha1AG)
##D
##D # Compute the clearance of children using maturation adjustment via
##D # the function Clch.Mat().
##D Clch_mat <- Clch.Mat(AGE, W, Cl_ad, W_ad, data_molecule)
##D
##D doseRange(Clch_mat, Cl_ad, doseAd)
## End(Not run)
|
/data/genthat_extracted_code/dfped/examples/doseRange.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 1,414 |
r
|
library(dfped)
### Name: doseRange
### Title: Dose-range for the paediatric population according to adult
### clearance, adult doses and paediatric clearance.
### Aliases: doseRange
### ** Examples
## Not run:
##D
##D ########
##D # Note: For this example we are using a paediatric database that we have including data of
##D # children from 0 to 19 years old.
##D ########
##D
##D
##D # Doses of adults
##D doseAd <- data.frame("d1" = 100, "d2" = 150,"d3" = 200,"d4"= 250,"d5" =300)
##D
##D Cl_ad <- 3.95
##D children <- read.csv("/Users/artemistoumazi/paediatric_data_p3m/children_0_19.csv")
##D AGE <- children$Age
##D W <- children$Weight
##D W_ad <- 70
##D Cl_ad <- 3.95
##D F_ad <- 0.6
##D Eg <- 0
##D Eh <- 0.058
##D f_abs <- F_ad/((1 - Eh)*(1-Eg))
##D fu_ad <- 1
##D perc_CYPh <- data.frame("CYP3A4_5" = 0.7, "CYP1A2" = 0.3)
##D perc_CYPg <- data.frame("CYP3A4_5" = 1)
##D perc_alb <- 1
##D perc_alpha1AG <- 0
##D
##D data_molecule <- list(F_ad, f_abs, Eg, Eh, fu_ad, perc_CYPg, perc_CYPh, perc_alb,
##D perc_alpha1AG)
##D
##D # Compute the clearance of children using maturation adjustment via
##D # the function Clch.Mat().
##D Clch_mat <- Clch.Mat(AGE, W, Cl_ad, W_ad, data_molecule)
##D
##D doseRange(Clch_mat, Cl_ad, doseAd)
## End(Not run)
|
# patient screening
# run MBO query:
# * Patients - by Medication (Generic)
# - Facility (Curr): HH HERMANN;HH Trans Care;HH Rehab;HH Clinics
# - Date Only - Admit (Start): 7/1/2012 12:00:00 AM # in 12 month increments
# - Date Only - Admit (End): 1/1/2017 12:00:00 AM
# - Medication (Generic): WARFARIN, warfarin, apixaban, rivaroxaban,
# dabigatran, edoxaban
library(tidyverse)
library(lubridate)
library(stringr)
library(edwr)
library(icd)
screen_pts <- read_data("data/raw", "patients", FALSE) %>%
as.patients() %>%
filter(age >= 18,
discharge.datetime <= mdy("12/31/2016", tz = "US/Central"),
!(visit.type %in% c("Emergency", "Day Surgery", "Bedded Outpatient")))
mbo_doac <- concat_encounters(screen_pts$millennium.id)
# run EDW query:
# * Identifiers
screen_id <- read_data("data/raw", "identifiers") %>%
as.id()
first_visit <- screen_pts %>%
left_join(screen_id, by = "millennium.id") %>%
group_by(person.id) %>%
arrange(discharge.datetime, .by_group = TRUE) %>%
distinct(person.id, .keep_all = TRUE)
visit_mbo <- concat_encounters(first_visit$millennium.id)
visit_pie <- concat_encounters(first_visit$pie.id)
# run EDW query:
# * Lookup - Zipcodes - Encounter
# * Lookup - Zipcodes - Current
# use zip code from the encounter; if missing, use current zip code
mhhs_zipcodes <- read_csv("data/external/mhhs_region_zipcodes.csv") %>%
rename(zipcode = `USPS Zip Code`) %>%
mutate_at("zipcode", as.character)
screen_zipcodes <- read_data("data/raw", "zipcodes") %>%
distinct() %>%
rename(pie.id = `PowerInsight Encounter Id`,
zipcode = `Person Address- Zip Code`) %>%
mutate(zip5 = str_extract(zipcode, "....."))
screen_zip_curr <- read_data("data/raw", "zip-curr") %>%
distinct() %>%
rename(pie.id = `PowerInsight Encounter Id`,
zip_curr = `Person Address- Zip Code-Curr`) %>%
mutate(zip5_curr = str_extract(zip_curr, "....."))
zips <- full_join(screen_zipcodes, screen_zip_curr, by = "pie.id") %>%
mutate(zip_pt = coalesce(zip5, zip5_curr))
local_pts <- zips %>%
filter(zip5 %in% mhhs_zipcodes$zipcode) %>%
distinct(pie.id)
local_pie <- concat_encounters(local_pts$pie.id)
# run EDW query:
# * Medications - Home and Discharge - All
meds_home <- read_data("data/raw", "meds-home") %>%
as.meds_home()
# remove pts on oac at home
oac <- c("warfarin", "apixaban", "rivaroxaban", "dabigatran", "edoxaban")
home_oac <- meds_home %>%
filter(med %in% oac,
med.type == "Recorded / Home Meds") %>%
distinct(pie.id)
new_oac <- anti_join(local_pts, home_oac, by = "pie.id")
# find pts with d/c Rx for oac
dc_oac <- meds_home %>%
semi_join(new_oac, by = "pie.id") %>%
filter(med %in% oac,
med.type == "Prescription / Discharge Order")
# remove pts with > 1 oac on d/c
dc_pts <- dc_oac %>%
distinct(pie.id, med) %>%
count(pie.id) %>%
filter(n == 1)
oac_med <- distinct(dc_oac, pie.id, med)
oac_pie <- concat_encounters(dc_pts$pie.id)
# run EDW query:
# * Diagnosis (ICD-9/10-CM) - All
# ICD-9-CM
# Afib/flutter: 427.31, 427.32
# DVT, acute: 453.82, 453.83, 453.84, 453.85, 453.86, 453.87, 453.89, 453.9
# PE, acute: 415.12, 415.13, 415.19
indications_9 <- c("427.31", "427.32", "453.82", "453.83", "453.84", "453.85",
"453.86", "453.87", "453.89", "453.9", "415.12", "415.13",
"415.19")
cols <- fwf_empty("data/external/2016_I9gem.txt", col_names = c("icd9", "icd10", "other"))
icd10_gem <- read_fwf("data/external/2016_I9gem.txt", cols) %>%
filter(icd10 != "NoDx")
icd9_nod <- str_replace_all(indications_9, "\\.", "")
icd10 <- filter(icd10_gem, icd9 %in% icd9_nod)
indications <- c(icd9_nod, icd10$icd10)
diagnosis <- read_data("data/raw", "diagnosis") %>%
as.diagnosis() %>%
tidy_data()
screen_icd <- diagnosis %>%
mutate(icd = str_replace_all(diag.code, "\\.", "")) %>%
filter(icd %in% indications,
diag.type == "Final")
eligible_pts <- distinct(screen_icd, pie.id) %>%
left_join(screen_id, by = "pie.id") %>%
left_join(oac_med, by = "pie.id")
eligible_edw <- concat_encounters(eligible_pts$pie.id)
eligible_mbo <- concat_encounters(eligible_pts$millennium.id)
write_rds(eligible_pts, "data/tidy/eligible_pts.Rds", "gz")
# run MBO queries:
# * Demographics
# * Measures
# * Warfarin Information
# run EDW queries:
# * Labs - Pregnancy
# * Labs - Renal
|
/src/01_screen-patients.R
|
no_license
|
bgulbis/DOAC_Warfarin_Readmission
|
R
| false | false | 4,483 |
r
|
# patient screening
# run MBO query:
# * Patients - by Medication (Generic)
# - Facility (Curr): HH HERMANN;HH Trans Care;HH Rehab;HH Clinics
# - Date Only - Admit (Start): 7/1/2012 12:00:00 AM # in 12 month increments
# - Date Only - Admit (End): 1/1/2017 12:00:00 AM
# - Medication (Generic): WARFARIN, warfarin, apixaban, rivaroxaban,
# dabigatran, edoxaban
library(tidyverse)
library(lubridate)
library(stringr)
library(edwr)
library(icd)
screen_pts <- read_data("data/raw", "patients", FALSE) %>%
as.patients() %>%
filter(age >= 18,
discharge.datetime <= mdy("12/31/2016", tz = "US/Central"),
!(visit.type %in% c("Emergency", "Day Surgery", "Bedded Outpatient")))
mbo_doac <- concat_encounters(screen_pts$millennium.id)
# run EDW query:
# * Identifiers
screen_id <- read_data("data/raw", "identifiers") %>%
as.id()
first_visit <- screen_pts %>%
left_join(screen_id, by = "millennium.id") %>%
group_by(person.id) %>%
arrange(discharge.datetime, .by_group = TRUE) %>%
distinct(person.id, .keep_all = TRUE)
visit_mbo <- concat_encounters(first_visit$millennium.id)
visit_pie <- concat_encounters(first_visit$pie.id)
# run EDW query:
# * Lookup - Zipcodes - Encounter
# * Lookup - Zipcodes - Current
# use zip code from the encounter; if missing, use current zip code
mhhs_zipcodes <- read_csv("data/external/mhhs_region_zipcodes.csv") %>%
rename(zipcode = `USPS Zip Code`) %>%
mutate_at("zipcode", as.character)
screen_zipcodes <- read_data("data/raw", "zipcodes") %>%
distinct() %>%
rename(pie.id = `PowerInsight Encounter Id`,
zipcode = `Person Address- Zip Code`) %>%
mutate(zip5 = str_extract(zipcode, "....."))
screen_zip_curr <- read_data("data/raw", "zip-curr") %>%
distinct() %>%
rename(pie.id = `PowerInsight Encounter Id`,
zip_curr = `Person Address- Zip Code-Curr`) %>%
mutate(zip5_curr = str_extract(zip_curr, "....."))
zips <- full_join(screen_zipcodes, screen_zip_curr, by = "pie.id") %>%
mutate(zip_pt = coalesce(zip5, zip5_curr))
local_pts <- zips %>%
filter(zip5 %in% mhhs_zipcodes$zipcode) %>%
distinct(pie.id)
local_pie <- concat_encounters(local_pts$pie.id)
# run EDW query:
# * Medications - Home and Discharge - All
meds_home <- read_data("data/raw", "meds-home") %>%
as.meds_home()
# remove pts on oac at home
oac <- c("warfarin", "apixaban", "rivaroxaban", "dabigatran", "edoxaban")
home_oac <- meds_home %>%
filter(med %in% oac,
med.type == "Recorded / Home Meds") %>%
distinct(pie.id)
new_oac <- anti_join(local_pts, home_oac, by = "pie.id")
# find pts with d/c Rx for oac
dc_oac <- meds_home %>%
semi_join(new_oac, by = "pie.id") %>%
filter(med %in% oac,
med.type == "Prescription / Discharge Order")
# remove pts with > 1 oac on d/c
dc_pts <- dc_oac %>%
distinct(pie.id, med) %>%
count(pie.id) %>%
filter(n == 1)
oac_med <- distinct(dc_oac, pie.id, med)
oac_pie <- concat_encounters(dc_pts$pie.id)
# run EDW query:
# * Diagnosis (ICD-9/10-CM) - All
# ICD-9-CM
# Afib/flutter: 427.31, 427.32
# DVT, acute: 453.82, 453.83, 453.84, 453.85, 453.86, 453.87, 453.89, 453.9
# PE, acute: 415.12, 415.13, 415.19
indications_9 <- c("427.31", "427.32", "453.82", "453.83", "453.84", "453.85",
"453.86", "453.87", "453.89", "453.9", "415.12", "415.13",
"415.19")
cols <- fwf_empty("data/external/2016_I9gem.txt", col_names = c("icd9", "icd10", "other"))
icd10_gem <- read_fwf("data/external/2016_I9gem.txt", cols) %>%
filter(icd10 != "NoDx")
icd9_nod <- str_replace_all(indications_9, "\\.", "")
icd10 <- filter(icd10_gem, icd9 %in% icd9_nod)
indications <- c(icd9_nod, icd10$icd10)
diagnosis <- read_data("data/raw", "diagnosis") %>%
as.diagnosis() %>%
tidy_data()
screen_icd <- diagnosis %>%
mutate(icd = str_replace_all(diag.code, "\\.", "")) %>%
filter(icd %in% indications,
diag.type == "Final")
eligible_pts <- distinct(screen_icd, pie.id) %>%
left_join(screen_id, by = "pie.id") %>%
left_join(oac_med, by = "pie.id")
eligible_edw <- concat_encounters(eligible_pts$pie.id)
eligible_mbo <- concat_encounters(eligible_pts$millennium.id)
write_rds(eligible_pts, "data/tidy/eligible_pts.Rds", "gz")
# run MBO queries:
# * Demographics
# * Measures
# * Warfarin Information
# run EDW queries:
# * Labs - Pregnancy
# * Labs - Renal
|
testlist <- list(bytes1 = c(-1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802384-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 482 |
r
|
testlist <- list(bytes1 = c(-1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prettify.R
\name{prettify_strings}
\alias{prettify_strings}
\alias{prettify_de}
\alias{prettify_en}
\title{Replace/prettify matching words/terms in one vector by another}
\usage{
prettify_strings(x, current = .trans_df$english,
new = .trans_df$english_pretty)
prettify_de(x)
prettify_en(x)
}
\arguments{
\item{x}{A character vector (or factor) that should be renamed.}
\item{current}{A vector of characters (possibly subset of \code{x}).
Entries in \code{x} that match entries in \code{current} will be renamed
accroding to entries in \code{new}.}
\item{new}{A vector of characters that will replace entries in \code{x} which
have matches in \code{current}.}
}
\description{
The function searches for \code{x} values, that occur in \code{current}
and replaces them with entries in \code{new}. Usefull for quick
renaming/translation of survey column names and by using internal object
\code{.trans_df}
}
\keyword{internal}
|
/man/prettify.Rd
|
permissive
|
fsolt/coalitions
|
R
| false | true | 1,006 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prettify.R
\name{prettify_strings}
\alias{prettify_strings}
\alias{prettify_de}
\alias{prettify_en}
\title{Replace/prettify matching words/terms in one vector by another}
\usage{
prettify_strings(x, current = .trans_df$english,
new = .trans_df$english_pretty)
prettify_de(x)
prettify_en(x)
}
\arguments{
\item{x}{A character vector (or factor) that should be renamed.}
\item{current}{A vector of characters (possibly subset of \code{x}).
Entries in \code{x} that match entries in \code{current} will be renamed
accroding to entries in \code{new}.}
\item{new}{A vector of characters that will replace entries in \code{x} which
have matches in \code{current}.}
}
\description{
The function searches for \code{x} values, that occur in \code{current}
and replaces them with entries in \code{new}. Usefull for quick
renaming/translation of survey column names and by using internal object
\code{.trans_df}
}
\keyword{internal}
|
#' Do a redundancy analysis
#'
#' Perform a redundancy analysis (RA) as proposed by \insertCite{Hair2016;textual}{cSEM}
#' with reference to \insertCite{Chin1998;textual}{cSEM}.
#'
#' RA is confined to PLS-PM, specifically PLS-PM with at least one construct
#' whose weights are obtained by mode B. In cSEM this is the case if the construct
#' is modeled as a composite or if argument `.PLS_modes` was explicitly set to
#' mode B for at least one construct.
#' Hence RA is only conducted if `.approach_weights = "PLS-PM"` and if at least
#' one construct's mode is mode B.
#'
#' The principal idea of RA is to take two different measures of the
#' same construct and regress the scores obtained for each measure on each
#' other. If they are similar they are likely to measure the same "thing"
#' which is then taken as evidence that both measurement models actually
#' measure what they are supposed to measure (validity).
#'
#' There are several issues with the terminology and the reasoning behind this logic.
#' RA is therefore only implemented since reviewers are likely to demand
#' its computation, however, its actual application for validity assessment
#' is discouraged.
#'
#' Currently, the function is not applicable to models containing second-order
#' constructs.
#'
#' @usage doRedundancyAnalysis(.object = NULL)
#'
#' @return A named numeric vector of correlations. If
#' the weighting approach used to obtain `.object` is not `"PLS-PM"` or
#' non of the PLS outer modes was mode B, the function silently returns `NA`.
#'
#' @inheritParams csem_arguments
#'
#' @seealso [cSEMResults]
#'
#' @references
#' \insertAllCited{}
#' @export
doRedundancyAnalysis <- function(.object = NULL) {
if(inherits(.object, "cSEMResults_multi")) {
out <- lapply(.object, doRedundancyAnalysis)
class(out) <- c("cSEMRedundancyAnalysis", "cSEMRedundancyAnalysis_multi")
return(out)
} else if(inherits(.object, "cSEMResults_2ndorder")) {
stop2("Currently, `doRedundancyAnalysis()` is not implemented for models",
"containing higher-order constructs.")
} else {
if(.object$Information$Arguments$.approach_weights == "PLS-PM") {
modes <- .object$Information$Weight_info$Modes
modesB <- modes[modes == "modeB"]
if(length(modesB) > 0) {
args <- .object$Information$Arguments
# Functions resampleData() and testMICOM require the .id argument.
# It is therefore present in the Arguments list although the
# data set in the Arguments list does not contain the id column anymore.
# Therefore .id needs to be set to NULL
args[[".id"]] <- NULL
new_modes <- as.list(modes)
beta <- c()
for(j in names(modesB)) {
new_modes_j <- new_modes
new_modes_j[j] <- "modeA"
args[[".PLS_modes"]] <- new_modes_j
res_reflective <- do.call(csem, args)
Y <- res_reflective$Estimates$Construct_scores
X <- .object$Estimates$Construct_scores
beta[j] <- c(solve(t(X[,j]) %*% X[,j]) %*% t(X[,j]) %*% Y[,j])
}
} else {
beta <- NA
}
} else {
beta <- NA
}
class(beta) <- "cSEMRedundancyAnalysis"
return(beta)
}
}
|
/R/postestimate_doRedundancyAnalysis.R
|
no_license
|
RicoDiel/cSEM
|
R
| false | false | 3,348 |
r
|
#' Do a redundancy analysis
#'
#' Perform a redundancy analysis (RA) as proposed by \insertCite{Hair2016;textual}{cSEM}
#' with reference to \insertCite{Chin1998;textual}{cSEM}.
#'
#' RA is confined to PLS-PM, specifically PLS-PM with at least one construct
#' whose weights are obtained by mode B. In cSEM this is the case if the construct
#' is modeled as a composite or if argument `.PLS_modes` was explicitly set to
#' mode B for at least one construct.
#' Hence RA is only conducted if `.approach_weights = "PLS-PM"` and if at least
#' one construct's mode is mode B.
#'
#' The principal idea of RA is to take two different measures of the
#' same construct and regress the scores obtained for each measure on each
#' other. If they are similar they are likely to measure the same "thing"
#' which is then taken as evidence that both measurement models actually
#' measure what they are supposed to measure (validity).
#'
#' There are several issues with the terminology and the reasoning behind this logic.
#' RA is therefore only implemented since reviewers are likely to demand
#' its computation, however, its actual application for validity assessment
#' is discouraged.
#'
#' Currently, the function is not applicable to models containing second-order
#' constructs.
#'
#' @usage doRedundancyAnalysis(.object = NULL)
#'
#' @return A named numeric vector of correlations. If
#' the weighting approach used to obtain `.object` is not `"PLS-PM"` or
#' non of the PLS outer modes was mode B, the function silently returns `NA`.
#'
#' @inheritParams csem_arguments
#'
#' @seealso [cSEMResults]
#'
#' @references
#' \insertAllCited{}
#' @export
doRedundancyAnalysis <- function(.object = NULL) {
if(inherits(.object, "cSEMResults_multi")) {
out <- lapply(.object, doRedundancyAnalysis)
class(out) <- c("cSEMRedundancyAnalysis", "cSEMRedundancyAnalysis_multi")
return(out)
} else if(inherits(.object, "cSEMResults_2ndorder")) {
stop2("Currently, `doRedundancyAnalysis()` is not implemented for models",
"containing higher-order constructs.")
} else {
if(.object$Information$Arguments$.approach_weights == "PLS-PM") {
modes <- .object$Information$Weight_info$Modes
modesB <- modes[modes == "modeB"]
if(length(modesB) > 0) {
args <- .object$Information$Arguments
# Functions resampleData() and testMICOM require the .id argument.
# It is therefore present in the Arguments list although the
# data set in the Arguments list does not contain the id column anymore.
# Therefore .id needs to be set to NULL
args[[".id"]] <- NULL
new_modes <- as.list(modes)
beta <- c()
for(j in names(modesB)) {
new_modes_j <- new_modes
new_modes_j[j] <- "modeA"
args[[".PLS_modes"]] <- new_modes_j
res_reflective <- do.call(csem, args)
Y <- res_reflective$Estimates$Construct_scores
X <- .object$Estimates$Construct_scores
beta[j] <- c(solve(t(X[,j]) %*% X[,j]) %*% t(X[,j]) %*% Y[,j])
}
} else {
beta <- NA
}
} else {
beta <- NA
}
class(beta) <- "cSEMRedundancyAnalysis"
return(beta)
}
}
|
context("im3d io")
test_that("we can read im3d files",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
expect_is(d,'array')
expect_true(is.integer(d))
expect_equal(sum(d!=0), 28669)
expect_equal(read.im3d("testdata/nrrd/LHMask.nhdr"), d)
expect_is(d0<-read.im3d("testdata/nrrd/LHMask.nrrd", ReadData=FALSE),'im3d')
expect_equal(dim(d0), dim(d))
expect_equal(length(d0), 0L)
amfile="testdata/amira/AL-a_M.am"
expect_is(d<-read.im3d(amfile), 'im3d')
expect_is(d,'array')
expect_equivalent(dim(d), c(154L, 154L, 87L))
expect_is(d0<-read.im3d(amfile, ReadData=FALSE), 'im3d')
expect_equivalent(dim(d0), c(154L, 154L, 87L))
amfilenoam=tempfile()
file.copy(normalizePath(amfile),amfilenoam)
on.exit(unlink(amfilenoam))
expect_equal(d,read.im3d(amfilenoam))
expect_error(read.im3d("testdata/nrrd/LHMask.rhubarb"))
v3drawfile1ch='testdata/v3draw/L1DS1_crop_straight_crop_ch1.v3draw'
v3drawfile2ch='testdata/v3draw/L1DS1_crop_straight_crop.v3draw'
v3drawfile2chslice='testdata/v3draw/L1DS1_crop_straight_crop_slice.v3draw'
expect_error(read.im3d(v3drawfile2ch), "im3d is restricted to 3D")
expect_equal(x<-read.im3d(v3drawfile2ch, chan=1), y<-read.im3d(v3drawfile1ch))
expect_equal(x[,,1], read.im3d(v3drawfile2chslice)[,,1])
# nb we can't test for strict equality because read.im3d.vaa3draw adds a
# boundingbox in this case whereas read.nrrd does not
expect_equal(dim(read.im3d('testdata/v3draw/L1DS1_crop_straight_crop_ch1.nhdr')),
dim(y))
# check that we can read metadata only
expect_equal(boundingbox(read.im3d(v3drawfile1ch, ReadData = F)),
boundingbox(x))
})
test_that("round trip test for im3d is successful",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
dir.create(td<-tempfile())
tf=tempfile(tmpdir = td, fileext='.nrrd')
on.exit(unlink(td, recursive = TRUE))
write.im3d(d, tf, dtype='byte')
expect_is(d2<-read.im3d(tf),'im3d')
expect_equal(d2, d, tol=1e-6)
tf2=tempfile(fileext='.rhubarb')
expect_error(write.im3d(d, tf2))
tf3=tempfile(tmpdir = td, fileext='.nhdr')
# also check detached nrrd
expect_is(write.im3d(d, tf3, dtype='byte'), 'character')
expect_equal(d3<-read.im3d(tf3), d, tol=1e-6)
expect_true(file.exists(sub("\\.nhdr$",".raw.gz",tf3)))
# check nrrd header fields as well in detail
h1=attr(d,'header')
expect_equal(attr(d3,'header')[names(h1)], h1[names(h1)], tol=1e-6)
# AmiraMesh round trip
tf4 <- tempfile(tmpdir = td, fileext='.am')
expect_equal(write.im3d(d, tf4, enc = 'hxzip'), tf4)
expect_equal(d2 <- read.im3d(tf4), d)
expect_equal(attr(d2, 'dataDef')[, 'HxType'], "HxZip")
})
context("im3d")
test_that("we can set bounding box",{
z=im3d(BoundingBox=c(0,1,0,2,0,4), dims=c(2,3,4))
z1=z
boundingbox(z1)<-boundingbox(z)
expect_equal(z, z1)
# set bounding box with an im3d object
z2=z
boundingbox(z2)<-z
expect_equal(z, z2)
boundingbox(z2)<-NULL
expect_true(is.null(attr(z2,'BoundingBox')))
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
z3=z
boundingbox(z3)<-boundingbox(d)
expect_equal(boundingbox(z3), boundingbox(d))
z4=z
boundingbox(z4)<-boundingbox("testdata/nrrd/LHMask.nrrd")
expect_equal(boundingbox(z4), boundingbox(d))
})
test_that("we can use bounds to set im3d bounding box",{
expect_equal(im3d(dims=c(2,3,2), bounds=c(0,2,0,3,0,1)),
im3d(dims=c(2,3,2), voxdims=c(1,1,0.5), origin=c(0.5,0.5,0.25)))
})
test_that("we can construct an im3d using an im3d to supply attributes",{
d=rnorm(1000)
x=im3d(d, dims=c(10, 10, 10), BoundingBox=c(20,200,100,200,200,300))
expect_equal(x, im3d(x))
expect_equal(x, im3d(d, x))
x2=x
boundingbox(x2)=boundingbox(x)*2
# override bounding box
expect_equal(x2, im3d(x, BoundingBox=c(20,200,100,200,200,300)*2))
})
test_that("we can construct an im3d with additional attributes",{
d=rnorm(1000)
x=im3d(d, dims=c(10, 10, 10), BoundingBox=c(20,200,100,200,200,300),
units='microns',
materials=data.frame(name='Exterior',id=0,col=rgb(1,0,0)))
expect_is(x, "im3d")
expect_equal(attr(x, 'units'), 'microns')
})
context("materials")
test_that("we can read materials from an im3d or a file on disk",{
f='testdata/amira/LHMask.Labels.rle.am'
baseline = data.frame(
name = c("Exterior", "Inside"),
id = 1:2,
col = c("black", "#E02525"),
stringsAsFactors = F
)
rownames(baseline)=baseline$name
expect_equal(materials(f), baseline)
expect_equal(materials(read.im3d(f)), baseline)
})
context("converting points to volumes")
test_that("we can construct an im3d from a set of points",{
expect_is(im<-as.im3d(xyzmatrix(kcs20), voxdims=c(1,1,1)), "im3d")
dims=c(122, 100, 83)
expect_equivalent(dim(im), dims)
expect_equal(voxdims(im), c(1, 1, 1))
orig=apply(xyzmatrix(kcs20), 2, min)
expect_equal(boundingbox(im), structure(matrix(c(orig, orig+dims-1), ncol=3, byrow = T),
class='boundingbox'), tol=1e-6)
expect_is(im<-as.im3d(xyzmatrix(kcs20), voxdims=c(1, 1, 1),
BoundingBox=c(250, 410, 0, 130, 0, 120)), "im3d")
expect_equal(dim(im), c(161, 131, 121))
testim=im3d(dims = c(256, 128, 105),
voxdims = c(0.622087976539589, 0.622088062622309, 0.62208801843318))
expect_is(im2<-as.im3d(xyzmatrix(kcs20), testim), 'im3d')
expect_equal(boundingbox(im2), boundingbox(testim))
expect_equal(dim(im2), dim(testim))
expect_warning(as.im3d(xyzmatrix(kcs20), testim, origin = c(3,4,5)))
})
context("im3d boundingbox and friends")
test_that("dim, voxdims and boundingbox work",{
expect_equal(makeboundingbox(c(x0=0,x1=10,y0=0,y1=20,z0=0,z1=30), dims=c(1,2,3),
input='bounds'),
makeboundingbox(c(5, 5, 5, 15, 5, 25)))
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"), 'im3d')
expect_equal(dim(d),c(50,50,50))
expect_is(d0<-read.im3d("testdata/nrrd/LHMask.nrrd", ReadData=FALSE), 'im3d')
expect_equal(dim(d0),c(50,50,50))
expect_equal(voxdims(d), c(1.4, 1.4, 1.4))
bb_base=structure(c(0, 68.6, 0, 68.6, 0, 68.6), .Dim = 2:3, class='boundingbox')
expect_equal(boundingbox(d), bb_base)
expect_equal(boundingbox.character("testdata/nrrd/LHMask.nrrd"), bb_base)
bbdf=as.data.frame(unclass(bb_base))
expect_equal(boundingbox(bbdf),bb_base)
expect_is(am<-read.im3d("testdata/amira/VerySmallLabelField.am",
SimplifyAttributes=TRUE), 'im3d')
expect_equivalent(dim(am),c(2L,2L,1L))
expect_equal(voxdims(am),c(0.5,0.5,2))
# somewhat oddly, Amira decides that if dim=1 for any axis, the bounding
# box will not be 0 or infinite, but the size that would be expected for dim=2
expect_equal(boundingbox(am),structure(c(0, 0.5, 0, 0.5, 0, 2), .Dim = 2:3,
class='boundingbox'))
expect_is(nrrd<-read.im3d("testdata/amira/VerySmallLabelField.nrrd",
SimplifyAttributes=TRUE), 'im3d')
expect_equivalent(dim(am),c(2L,2L,1L))
expect_equal(voxdims(am),c(0.5,0.5,2))
# these should be equal when SimplifyAttributes=TRUE
expect_equal(nrrd, am)
expect_true(is.raw(nrrdraw<-read.im3d(ReadByteAsRaw=TRUE,
"testdata/amira/VerySmallLabelField.nrrd", SimplifyAttributes=TRUE)))
expect_true(is.raw(amraw<-read.im3d(ReadByteAsRaw=TRUE,
"testdata/amira/VerySmallLabelField.am", SimplifyAttributes=TRUE)))
# ... and again
expect_equal(nrrdraw, amraw)
kcs20bb=structure(c(284.594, 404.6951, 24.1869, 122.9557, 21.4379, 102.8015
), .Dim = 2:3, class = "boundingbox")
expect_equal(boundingbox(kcs20), kcs20bb, tol=1e-4)
})
context("im3d flip, slice and projection")
test_that("we can flip arrays",{
m=matrix(1:4, ncol=2, nrow=2, byrow=TRUE)
# NB the orientation is determined by matching x to
mf1=rbind(c(3,4),c(1,2))
mf2=rbind(c(2,1),c(4,3))
expect_equal(flip(m), mf1)
expect_equal(flip(m,flipdim=2), mf2)
expect_equal(flip(m,flipdim='y'), mf2)
expect_error(flip(m,flipdim='z'))
a6=array(1:6,1:3)
# singleton x dimension so flip has no effect
expect_equal(flip(a6), a6)
expect_equal(flip(a6, 2), array(c(2,1,4,3,6,5),1:3))
expect_equal(flip(a6, 3), array(c(5,6,3,4,1,2),1:3))
})
test_that("we can slice out subarray from image",{
i=im3d(array(1:6,1:3),voxdims=c(2,3,4))
i2=im3d(array(1:4,c(1,2,2)),voxdims=c(2,3,4))
expect_equal(imslice(i, 1:2, drop=FALSE), i2)
i4=im3d(array(1:6,2:3),dims=c(1,2,3),voxdims=c(2,3,4))
expect_equal(imslice(i, 1, 'x'), i4)
i3=im3d(array(1:4,c(2,2)),voxdims=c(2,3,4))
expect_equal(imslice(i, 1:2), i3)
# check that we can successfully extract the position of slice in new
# singleton dimension
i5=im3d(array(1:24, dim = c(2,3,4)),voxdims=c(2,3,4))
expect_equal(attr(imslice(i5,2),'z'), 4)
})
test_that("we can make projections",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"), 'im3d')
expect_equal(dim(d),c(50,50,50))
pd<-projection(d,projfun='sum')
sd=read.im3d("testdata/nrrd/LHMask_sum.nrrd")
expect_equal(pd, sd)
})
context("im3d unmask, mask, threshold")
test_that("unmask works",{
i=im3d(array(1:6,1:3),voxdims=c(2,3,4))
# unmask a vector of im3d contents by original im3d returns original
expect_equal(unmask(as.vector(i),i),i)
})
test_that("mask works",{
m=im3d(array(1:6,1:3),voxdims=c(2,3,4))
materials(m)<-data.frame(name=c('left','right'), id=2:3)
i=im3d(array(1:6,1:3), voxdims=c(2,3,4))
# unmask a vector of im3d contents by original im3d returns original
expect_is(mask(i, m, levels = 1), 'im3d')
expect_is(mask(i, m, levels = 1, rval='values'), 'integer')
expect_equal(sum(mask(i, m, levels = 1, invert = TRUE)), sum(2:6))
expect_equal(sum(mask(i, m, levels = c("left", "right"))), sum(1:2))
expect_equal(mask(i, m), i)
expect_warning(sum(mask(i, m, levels = c("rhubarb"))), "Dropping levels")
})
test_that("threshold works",{
i=im3d(array(rep(TRUE, 6), 1:3),voxdims=c(2, 3, 4))
# threshold a vector of logicals gives back the vector
expect_equal(threshold(i, 0), i)
# threshold a vector of integers gives appropriate logical vector
i2=im3d(array(1:6, 1:3), voxdims=c(2, 3, 4))
expect_equal(threshold(i2, 0), i)
# also works with logica input
expect_equal(threshold(i2, i2>0), i)
# can also use integer or raw modes
iraw=i
mode(iraw)='raw'
expect_equal(threshold(i2, 0, mode='raw'), iraw)
iint=i
mode(iint)='integer'
expect_equal(threshold(i2, 0, mode='integer'), iint)
})
context("im3d coordinate utilities")
test_that("xyzpos, ijkpos and imexpand.grid work",{
d=im3d(dim=c(20,30,40),origin=c(10,20,30),voxdims=c(1,2,3))
o=origin(d)
expect_equal(ijkpos(d,o), c(1,1,1))
expect_equal(xyzpos(d,c(1,1,1)), o)
far_corner=boundingbox(d)[c(2,4,6)]
expect_equal(ijkpos(d,far_corner), dim(d))
expect_equal(xyzpos(d,dim(d)), far_corner)
# round trip for 10 random points
set.seed(42)
ijks=mapply(sample,dim(d),10)
expect_equal(ijkpos(d,xyzpos(d,ijks)), ijks)
# check that imexpand.grid coords match direct translation of all indices
# by xyzpos
all_ijks=arrayInd(seq.int(prod(dim(d))), dim(d))
expect_equal(imexpand.grid(d), xyzpos(d,all_ijks))
})
context("clampmax")
test_that("clampmax works",{
# basic tests
expect_is(cf<-clampmax(-10, 10),'function')
expect_equal(cf(10, 20, Inf), NA_real_)
expect_equal(cf(5, 10, 20, Inf, na.rm = TRUE), 10)
expect_equal(cf(c(5, 10, 20, Inf), na.rm = TRUE), 10)
expect_is(cf2<-clampmax(-10, 10, replace.infinite = FALSE),'function')
expect_equal(cf2(10, 20, Inf), 10)
expect_equal(cf2(10, 20, NA, Inf, na.rm=TRUE), 10)
expect_equal(cf2(10, 20, NA, Inf, na.rm=FALSE), NA_real_)
# in combination with projection
LHMask=read.im3d('testdata/nrrd/LHMask.nrrd')
d=unmask(rnorm(sum(LHMask),mean=5,sd=5),LHMask)
p=projection(d,projfun=clampmax(0,10))
expect_true(max(p, na.rm=T)<=10)
expect_true(min(p, na.rm=T)>=0)
})
context("im3d plotting")
test_that("image.im3d works",{
LHMask=read.im3d('testdata/nrrd/LHMask.nrrd')
op=par(no.readonly = TRUE)
layout(matrix(c(1, 2), ncol = 2L), widths = c(1, 0.2))
baseline=list(zlim = 0:1, nlevels.actual = 21L, nlevels.orig = 20,
levels = c(0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95,
1),
colors = c("#000080", "#002894", "#0050A8", "#0078BC",
"#00A1D0", "#00C9E4", "#00F1F8", "#1AFFE4", "#43FFBB",
"#6BFF93", "#93FF6B", "#BBFF43", "#E4FF1A", "#FFF100",
"#FFC900", "#FFA100", "#FF7800", "#FF5000", "#FF2800",
"#FF0000")
)
expect_equal(rval<-image(imslice(LHMask,10), asp=TRUE), baseline)
expect_null(imscalebar(rval))
par(op)
})
|
/tests/testthat/test-im3d.R
|
no_license
|
natverse/nat
|
R
| false | false | 12,974 |
r
|
context("im3d io")
test_that("we can read im3d files",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
expect_is(d,'array')
expect_true(is.integer(d))
expect_equal(sum(d!=0), 28669)
expect_equal(read.im3d("testdata/nrrd/LHMask.nhdr"), d)
expect_is(d0<-read.im3d("testdata/nrrd/LHMask.nrrd", ReadData=FALSE),'im3d')
expect_equal(dim(d0), dim(d))
expect_equal(length(d0), 0L)
amfile="testdata/amira/AL-a_M.am"
expect_is(d<-read.im3d(amfile), 'im3d')
expect_is(d,'array')
expect_equivalent(dim(d), c(154L, 154L, 87L))
expect_is(d0<-read.im3d(amfile, ReadData=FALSE), 'im3d')
expect_equivalent(dim(d0), c(154L, 154L, 87L))
amfilenoam=tempfile()
file.copy(normalizePath(amfile),amfilenoam)
on.exit(unlink(amfilenoam))
expect_equal(d,read.im3d(amfilenoam))
expect_error(read.im3d("testdata/nrrd/LHMask.rhubarb"))
v3drawfile1ch='testdata/v3draw/L1DS1_crop_straight_crop_ch1.v3draw'
v3drawfile2ch='testdata/v3draw/L1DS1_crop_straight_crop.v3draw'
v3drawfile2chslice='testdata/v3draw/L1DS1_crop_straight_crop_slice.v3draw'
expect_error(read.im3d(v3drawfile2ch), "im3d is restricted to 3D")
expect_equal(x<-read.im3d(v3drawfile2ch, chan=1), y<-read.im3d(v3drawfile1ch))
expect_equal(x[,,1], read.im3d(v3drawfile2chslice)[,,1])
# nb we can't test for strict equality because read.im3d.vaa3draw adds a
# boundingbox in this case whereas read.nrrd does not
expect_equal(dim(read.im3d('testdata/v3draw/L1DS1_crop_straight_crop_ch1.nhdr')),
dim(y))
# check that we can read metadata only
expect_equal(boundingbox(read.im3d(v3drawfile1ch, ReadData = F)),
boundingbox(x))
})
test_that("round trip test for im3d is successful",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
dir.create(td<-tempfile())
tf=tempfile(tmpdir = td, fileext='.nrrd')
on.exit(unlink(td, recursive = TRUE))
write.im3d(d, tf, dtype='byte')
expect_is(d2<-read.im3d(tf),'im3d')
expect_equal(d2, d, tol=1e-6)
tf2=tempfile(fileext='.rhubarb')
expect_error(write.im3d(d, tf2))
tf3=tempfile(tmpdir = td, fileext='.nhdr')
# also check detached nrrd
expect_is(write.im3d(d, tf3, dtype='byte'), 'character')
expect_equal(d3<-read.im3d(tf3), d, tol=1e-6)
expect_true(file.exists(sub("\\.nhdr$",".raw.gz",tf3)))
# check nrrd header fields as well in detail
h1=attr(d,'header')
expect_equal(attr(d3,'header')[names(h1)], h1[names(h1)], tol=1e-6)
# AmiraMesh round trip
tf4 <- tempfile(tmpdir = td, fileext='.am')
expect_equal(write.im3d(d, tf4, enc = 'hxzip'), tf4)
expect_equal(d2 <- read.im3d(tf4), d)
expect_equal(attr(d2, 'dataDef')[, 'HxType'], "HxZip")
})
context("im3d")
test_that("we can set bounding box",{
z=im3d(BoundingBox=c(0,1,0,2,0,4), dims=c(2,3,4))
z1=z
boundingbox(z1)<-boundingbox(z)
expect_equal(z, z1)
# set bounding box with an im3d object
z2=z
boundingbox(z2)<-z
expect_equal(z, z2)
boundingbox(z2)<-NULL
expect_true(is.null(attr(z2,'BoundingBox')))
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"),'im3d')
z3=z
boundingbox(z3)<-boundingbox(d)
expect_equal(boundingbox(z3), boundingbox(d))
z4=z
boundingbox(z4)<-boundingbox("testdata/nrrd/LHMask.nrrd")
expect_equal(boundingbox(z4), boundingbox(d))
})
test_that("we can use bounds to set im3d bounding box",{
expect_equal(im3d(dims=c(2,3,2), bounds=c(0,2,0,3,0,1)),
im3d(dims=c(2,3,2), voxdims=c(1,1,0.5), origin=c(0.5,0.5,0.25)))
})
test_that("we can construct an im3d using an im3d to supply attributes",{
d=rnorm(1000)
x=im3d(d, dims=c(10, 10, 10), BoundingBox=c(20,200,100,200,200,300))
expect_equal(x, im3d(x))
expect_equal(x, im3d(d, x))
x2=x
boundingbox(x2)=boundingbox(x)*2
# override bounding box
expect_equal(x2, im3d(x, BoundingBox=c(20,200,100,200,200,300)*2))
})
test_that("we can construct an im3d with additional attributes",{
d=rnorm(1000)
x=im3d(d, dims=c(10, 10, 10), BoundingBox=c(20,200,100,200,200,300),
units='microns',
materials=data.frame(name='Exterior',id=0,col=rgb(1,0,0)))
expect_is(x, "im3d")
expect_equal(attr(x, 'units'), 'microns')
})
context("materials")
test_that("we can read materials from an im3d or a file on disk",{
f='testdata/amira/LHMask.Labels.rle.am'
baseline = data.frame(
name = c("Exterior", "Inside"),
id = 1:2,
col = c("black", "#E02525"),
stringsAsFactors = F
)
rownames(baseline)=baseline$name
expect_equal(materials(f), baseline)
expect_equal(materials(read.im3d(f)), baseline)
})
context("converting points to volumes")
test_that("we can construct an im3d from a set of points",{
expect_is(im<-as.im3d(xyzmatrix(kcs20), voxdims=c(1,1,1)), "im3d")
dims=c(122, 100, 83)
expect_equivalent(dim(im), dims)
expect_equal(voxdims(im), c(1, 1, 1))
orig=apply(xyzmatrix(kcs20), 2, min)
expect_equal(boundingbox(im), structure(matrix(c(orig, orig+dims-1), ncol=3, byrow = T),
class='boundingbox'), tol=1e-6)
expect_is(im<-as.im3d(xyzmatrix(kcs20), voxdims=c(1, 1, 1),
BoundingBox=c(250, 410, 0, 130, 0, 120)), "im3d")
expect_equal(dim(im), c(161, 131, 121))
testim=im3d(dims = c(256, 128, 105),
voxdims = c(0.622087976539589, 0.622088062622309, 0.62208801843318))
expect_is(im2<-as.im3d(xyzmatrix(kcs20), testim), 'im3d')
expect_equal(boundingbox(im2), boundingbox(testim))
expect_equal(dim(im2), dim(testim))
expect_warning(as.im3d(xyzmatrix(kcs20), testim, origin = c(3,4,5)))
})
context("im3d boundingbox and friends")
test_that("dim, voxdims and boundingbox work",{
expect_equal(makeboundingbox(c(x0=0,x1=10,y0=0,y1=20,z0=0,z1=30), dims=c(1,2,3),
input='bounds'),
makeboundingbox(c(5, 5, 5, 15, 5, 25)))
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"), 'im3d')
expect_equal(dim(d),c(50,50,50))
expect_is(d0<-read.im3d("testdata/nrrd/LHMask.nrrd", ReadData=FALSE), 'im3d')
expect_equal(dim(d0),c(50,50,50))
expect_equal(voxdims(d), c(1.4, 1.4, 1.4))
bb_base=structure(c(0, 68.6, 0, 68.6, 0, 68.6), .Dim = 2:3, class='boundingbox')
expect_equal(boundingbox(d), bb_base)
expect_equal(boundingbox.character("testdata/nrrd/LHMask.nrrd"), bb_base)
bbdf=as.data.frame(unclass(bb_base))
expect_equal(boundingbox(bbdf),bb_base)
expect_is(am<-read.im3d("testdata/amira/VerySmallLabelField.am",
SimplifyAttributes=TRUE), 'im3d')
expect_equivalent(dim(am),c(2L,2L,1L))
expect_equal(voxdims(am),c(0.5,0.5,2))
# somewhat oddly, Amira decides that if dim=1 for any axis, the bounding
# box will not be 0 or infinite, but the size that would be expected for dim=2
expect_equal(boundingbox(am),structure(c(0, 0.5, 0, 0.5, 0, 2), .Dim = 2:3,
class='boundingbox'))
expect_is(nrrd<-read.im3d("testdata/amira/VerySmallLabelField.nrrd",
SimplifyAttributes=TRUE), 'im3d')
expect_equivalent(dim(am),c(2L,2L,1L))
expect_equal(voxdims(am),c(0.5,0.5,2))
# these should be equal when SimplifyAttributes=TRUE
expect_equal(nrrd, am)
expect_true(is.raw(nrrdraw<-read.im3d(ReadByteAsRaw=TRUE,
"testdata/amira/VerySmallLabelField.nrrd", SimplifyAttributes=TRUE)))
expect_true(is.raw(amraw<-read.im3d(ReadByteAsRaw=TRUE,
"testdata/amira/VerySmallLabelField.am", SimplifyAttributes=TRUE)))
# ... and again
expect_equal(nrrdraw, amraw)
kcs20bb=structure(c(284.594, 404.6951, 24.1869, 122.9557, 21.4379, 102.8015
), .Dim = 2:3, class = "boundingbox")
expect_equal(boundingbox(kcs20), kcs20bb, tol=1e-4)
})
context("im3d flip, slice and projection")
test_that("we can flip arrays",{
m=matrix(1:4, ncol=2, nrow=2, byrow=TRUE)
# NB the orientation is determined by matching x to
mf1=rbind(c(3,4),c(1,2))
mf2=rbind(c(2,1),c(4,3))
expect_equal(flip(m), mf1)
expect_equal(flip(m,flipdim=2), mf2)
expect_equal(flip(m,flipdim='y'), mf2)
expect_error(flip(m,flipdim='z'))
a6=array(1:6,1:3)
# singleton x dimension so flip has no effect
expect_equal(flip(a6), a6)
expect_equal(flip(a6, 2), array(c(2,1,4,3,6,5),1:3))
expect_equal(flip(a6, 3), array(c(5,6,3,4,1,2),1:3))
})
test_that("we can slice out subarray from image",{
i=im3d(array(1:6,1:3),voxdims=c(2,3,4))
i2=im3d(array(1:4,c(1,2,2)),voxdims=c(2,3,4))
expect_equal(imslice(i, 1:2, drop=FALSE), i2)
i4=im3d(array(1:6,2:3),dims=c(1,2,3),voxdims=c(2,3,4))
expect_equal(imslice(i, 1, 'x'), i4)
i3=im3d(array(1:4,c(2,2)),voxdims=c(2,3,4))
expect_equal(imslice(i, 1:2), i3)
# check that we can successfully extract the position of slice in new
# singleton dimension
i5=im3d(array(1:24, dim = c(2,3,4)),voxdims=c(2,3,4))
expect_equal(attr(imslice(i5,2),'z'), 4)
})
test_that("we can make projections",{
expect_is(d<-read.im3d("testdata/nrrd/LHMask.nrrd"), 'im3d')
expect_equal(dim(d),c(50,50,50))
pd<-projection(d,projfun='sum')
sd=read.im3d("testdata/nrrd/LHMask_sum.nrrd")
expect_equal(pd, sd)
})
context("im3d unmask, mask, threshold")
test_that("unmask works",{
i=im3d(array(1:6,1:3),voxdims=c(2,3,4))
# unmask a vector of im3d contents by original im3d returns original
expect_equal(unmask(as.vector(i),i),i)
})
test_that("mask works",{
m=im3d(array(1:6,1:3),voxdims=c(2,3,4))
materials(m)<-data.frame(name=c('left','right'), id=2:3)
i=im3d(array(1:6,1:3), voxdims=c(2,3,4))
# unmask a vector of im3d contents by original im3d returns original
expect_is(mask(i, m, levels = 1), 'im3d')
expect_is(mask(i, m, levels = 1, rval='values'), 'integer')
expect_equal(sum(mask(i, m, levels = 1, invert = TRUE)), sum(2:6))
expect_equal(sum(mask(i, m, levels = c("left", "right"))), sum(1:2))
expect_equal(mask(i, m), i)
expect_warning(sum(mask(i, m, levels = c("rhubarb"))), "Dropping levels")
})
test_that("threshold works",{
i=im3d(array(rep(TRUE, 6), 1:3),voxdims=c(2, 3, 4))
# threshold a vector of logicals gives back the vector
expect_equal(threshold(i, 0), i)
# threshold a vector of integers gives appropriate logical vector
i2=im3d(array(1:6, 1:3), voxdims=c(2, 3, 4))
expect_equal(threshold(i2, 0), i)
# also works with logica input
expect_equal(threshold(i2, i2>0), i)
# can also use integer or raw modes
iraw=i
mode(iraw)='raw'
expect_equal(threshold(i2, 0, mode='raw'), iraw)
iint=i
mode(iint)='integer'
expect_equal(threshold(i2, 0, mode='integer'), iint)
})
context("im3d coordinate utilities")
test_that("xyzpos, ijkpos and imexpand.grid work",{
d=im3d(dim=c(20,30,40),origin=c(10,20,30),voxdims=c(1,2,3))
o=origin(d)
expect_equal(ijkpos(d,o), c(1,1,1))
expect_equal(xyzpos(d,c(1,1,1)), o)
far_corner=boundingbox(d)[c(2,4,6)]
expect_equal(ijkpos(d,far_corner), dim(d))
expect_equal(xyzpos(d,dim(d)), far_corner)
# round trip for 10 random points
set.seed(42)
ijks=mapply(sample,dim(d),10)
expect_equal(ijkpos(d,xyzpos(d,ijks)), ijks)
# check that imexpand.grid coords match direct translation of all indices
# by xyzpos
all_ijks=arrayInd(seq.int(prod(dim(d))), dim(d))
expect_equal(imexpand.grid(d), xyzpos(d,all_ijks))
})
context("clampmax")
test_that("clampmax works",{
# basic tests
expect_is(cf<-clampmax(-10, 10),'function')
expect_equal(cf(10, 20, Inf), NA_real_)
expect_equal(cf(5, 10, 20, Inf, na.rm = TRUE), 10)
expect_equal(cf(c(5, 10, 20, Inf), na.rm = TRUE), 10)
expect_is(cf2<-clampmax(-10, 10, replace.infinite = FALSE),'function')
expect_equal(cf2(10, 20, Inf), 10)
expect_equal(cf2(10, 20, NA, Inf, na.rm=TRUE), 10)
expect_equal(cf2(10, 20, NA, Inf, na.rm=FALSE), NA_real_)
# in combination with projection
LHMask=read.im3d('testdata/nrrd/LHMask.nrrd')
d=unmask(rnorm(sum(LHMask),mean=5,sd=5),LHMask)
p=projection(d,projfun=clampmax(0,10))
expect_true(max(p, na.rm=T)<=10)
expect_true(min(p, na.rm=T)>=0)
})
context("im3d plotting")
test_that("image.im3d works",{
LHMask=read.im3d('testdata/nrrd/LHMask.nrrd')
op=par(no.readonly = TRUE)
layout(matrix(c(1, 2), ncol = 2L), widths = c(1, 0.2))
baseline=list(zlim = 0:1, nlevels.actual = 21L, nlevels.orig = 20,
levels = c(0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95,
1),
colors = c("#000080", "#002894", "#0050A8", "#0078BC",
"#00A1D0", "#00C9E4", "#00F1F8", "#1AFFE4", "#43FFBB",
"#6BFF93", "#93FF6B", "#BBFF43", "#E4FF1A", "#FFF100",
"#FFC900", "#FFA100", "#FF7800", "#FF5000", "#FF2800",
"#FF0000")
)
expect_equal(rval<-image(imslice(LHMask,10), asp=TRUE), baseline)
expect_null(imscalebar(rval))
par(op)
})
|
trackSpeed <- function(dist, bin=5, interval=1){
v <- c()
for (i in 1:(length(dist)-bin)){
v[i] <- sum(dist[i:(i+bin-1)])/bin
}
return(v/interval)
}
|
/RImageBook/R/trackSpeed.R
|
no_license
|
tkatsuki/rimagebook
|
R
| false | false | 166 |
r
|
trackSpeed <- function(dist, bin=5, interval=1){
v <- c()
for (i in 1:(length(dist)-bin)){
v[i] <- sum(dist[i:(i+bin-1)])/bin
}
return(v/interval)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/efficiency_algorithms.R
\name{SeqKL}
\alias{SeqKL}
\title{Sequential Kullback-Leibler based algorithm for the MNL model.}
\usage{
SeqKL(
des = NULL,
cand.set,
n.alts,
par.draws,
alt.cte = NULL,
no.choice = NULL,
weights = NULL,
allow.rep = FALSE
)
}
\arguments{
\item{des}{A design matrix in which each row is a profile. If alternative
specific constants are present, those should be included as the first
column(s) of the design. Can be generated with \code{\link{Modfed}} or \code{\link{CEA}}.}
\item{cand.set}{A numeric matrix in which each row is a possible profile. The
\code{\link{Profiles}} function can be used to generate this matrix.}
\item{n.alts}{Numeric value indicating the number of alternatives per choice
set.}
\item{par.draws}{A matrix or a list, depending on \code{alt.cte}.}
\item{alt.cte}{A binary vector indicating for each alternative if an
alternative specific constant is desired.}
\item{no.choice}{An integer indicating the no choice alternative. The default
is \code{NULL}.}
\item{weights}{A vector containing the weights of the draws. Default is
\code{NULL}, See also \code{\link{ImpsampMNL}}.}
\item{allow.rep}{Logical value indicating whether repeated choice sets are
allowed in the design.}
}
\value{
\item{set}{Numeric matrix containing the choice set that maximizes the expected KL divergence.}
\item{kl}{Numeric value which is the Kullback leibler divergence.}
}
\description{
Selects the choice set that maximizes the Kullback-Leibler divergence between
the prior parameter values and the expected posterior, assuming a MNL model.
}
\details{
This algorithm is ideally used in an adaptive context. The algorithm selects
the choice set that maximizes the Kullback-Leibler
divergence between prior and expected posterior. Otherwisely framed the
algorithm selects the choice set that maximizes the expected information
gain.
If \code{alt.cte = NULL}, \code{par.draws} should be a matrix in which each
row is a sample from the multivariate parameter distribution. In case that
\code{alt.cte} is not \code{NULL}, a list containing two matrices should be
provided to \code{par.draws}. The first matrix containing the parameter draws
for the alternative specific parameters. The second matrix containing the
draws for the rest of the parameters.
The list of potential choice sets are created using
\code{\link[utils]{combn}}. The \code{weights} argument can be used when the
\code{par.draws} have
weights. This is for example the case when parameter values are updated using
\code{\link{ImpsampMNL}}.
}
\examples{
# KL efficient choice set, given parameter draws.
# Candidate profiles
cs <- Profiles(lvls = c(3, 3), coding = c("E", "E"))
m <- c(0.3, 0.2, -0.3, -0.2) # Prior mean (4 parameters).
pc <- diag(length(m)) # Prior variance
set.seed(123)
ps <- MASS::mvrnorm(n = 10, mu = m, Sigma = pc) # 10 draws.
# Efficient choice set to add.
SeqKL(cand.set = cs, n.alts = 2, alt.cte = NULL, par.draws = ps, weights = NULL)
# KL efficient choice set, given parameter draws.
# Candidate profiles
cs <- Profiles(lvls = c(3, 3), coding = c("C", "E"), c.lvls = list(c(5,3,1)))
m <- c(0.7, 0.3, -0.3, -0.2) # Prior mean (4 parameters).
pc <- diag(length(m)) # Prior variance
set.seed(123)
ps <- MASS::mvrnorm(n = 10, mu = m, Sigma = pc) # 10 draws.
sample <- list(ps[ , 1], ps[ , 2:4])
ac <- c(1, 0) # Alternative specific constant.
# Efficient choice set to add.
SeqKL(cand.set = cs, n.alts = 2, alt.cte = ac, par.draws = sample, weights = NULL)
}
\references{
\insertRef{crabbe}{idefix}
}
|
/man/SeqKL.Rd
|
no_license
|
cran/idefix
|
R
| false | true | 3,718 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/efficiency_algorithms.R
\name{SeqKL}
\alias{SeqKL}
\title{Sequential Kullback-Leibler based algorithm for the MNL model.}
\usage{
SeqKL(
des = NULL,
cand.set,
n.alts,
par.draws,
alt.cte = NULL,
no.choice = NULL,
weights = NULL,
allow.rep = FALSE
)
}
\arguments{
\item{des}{A design matrix in which each row is a profile. If alternative
specific constants are present, those should be included as the first
column(s) of the design. Can be generated with \code{\link{Modfed}} or \code{\link{CEA}}.}
\item{cand.set}{A numeric matrix in which each row is a possible profile. The
\code{\link{Profiles}} function can be used to generate this matrix.}
\item{n.alts}{Numeric value indicating the number of alternatives per choice
set.}
\item{par.draws}{A matrix or a list, depending on \code{alt.cte}.}
\item{alt.cte}{A binary vector indicating for each alternative if an
alternative specific constant is desired.}
\item{no.choice}{An integer indicating the no choice alternative. The default
is \code{NULL}.}
\item{weights}{A vector containing the weights of the draws. Default is
\code{NULL}, See also \code{\link{ImpsampMNL}}.}
\item{allow.rep}{Logical value indicating whether repeated choice sets are
allowed in the design.}
}
\value{
\item{set}{Numeric matrix containing the choice set that maximizes the expected KL divergence.}
\item{kl}{Numeric value which is the Kullback leibler divergence.}
}
\description{
Selects the choice set that maximizes the Kullback-Leibler divergence between
the prior parameter values and the expected posterior, assuming a MNL model.
}
\details{
This algorithm is ideally used in an adaptive context. The algorithm selects
the choice set that maximizes the Kullback-Leibler
divergence between prior and expected posterior. Otherwisely framed the
algorithm selects the choice set that maximizes the expected information
gain.
If \code{alt.cte = NULL}, \code{par.draws} should be a matrix in which each
row is a sample from the multivariate parameter distribution. In case that
\code{alt.cte} is not \code{NULL}, a list containing two matrices should be
provided to \code{par.draws}. The first matrix containing the parameter draws
for the alternative specific parameters. The second matrix containing the
draws for the rest of the parameters.
The list of potential choice sets are created using
\code{\link[utils]{combn}}. The \code{weights} argument can be used when the
\code{par.draws} have
weights. This is for example the case when parameter values are updated using
\code{\link{ImpsampMNL}}.
}
\examples{
# KL efficient choice set, given parameter draws.
# Candidate profiles
cs <- Profiles(lvls = c(3, 3), coding = c("E", "E"))
m <- c(0.3, 0.2, -0.3, -0.2) # Prior mean (4 parameters).
pc <- diag(length(m)) # Prior variance
set.seed(123)
ps <- MASS::mvrnorm(n = 10, mu = m, Sigma = pc) # 10 draws.
# Efficient choice set to add.
SeqKL(cand.set = cs, n.alts = 2, alt.cte = NULL, par.draws = ps, weights = NULL)
# KL efficient choice set, given parameter draws.
# Candidate profiles
cs <- Profiles(lvls = c(3, 3), coding = c("C", "E"), c.lvls = list(c(5,3,1)))
m <- c(0.7, 0.3, -0.3, -0.2) # Prior mean (4 parameters).
pc <- diag(length(m)) # Prior variance
set.seed(123)
ps <- MASS::mvrnorm(n = 10, mu = m, Sigma = pc) # 10 draws.
sample <- list(ps[ , 1], ps[ , 2:4])
ac <- c(1, 0) # Alternative specific constant.
# Efficient choice set to add.
SeqKL(cand.set = cs, n.alts = 2, alt.cte = ac, par.draws = sample, weights = NULL)
}
\references{
\insertRef{crabbe}{idefix}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.configservice_package.R
\docType{package}
\name{paws.configservice-package}
\alias{paws.configservice}
\alias{paws.configservice-package}
\title{paws.configservice: AWS Config}
\description{
AWS Config provides a way to keep track of the
configurations of all the AWS resources associated with your AWS
account. You can use AWS Config to get the current and historical
configurations of each AWS resource and also to get information about
the relationship between the resources. An AWS resource can be an
Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store
(EBS) volume, an elastic network Interface (ENI), or a security group.
For a complete list of resources currently supported by AWS Config,
see [Supported AWS
Resources](http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources).
}
\author{
\strong{Maintainer}: David Kretch \email{david.kretch@gmail.com}
Authors:
\itemize{
\item Adam Banker \email{adam.banker39@gmail.com}
}
Other contributors:
\itemize{
\item Amazon.com, Inc. [copyright holder]
}
}
\keyword{internal}
|
/service/paws.configservice/man/paws.configservice-package.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false | true | 1,218 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.configservice_package.R
\docType{package}
\name{paws.configservice-package}
\alias{paws.configservice}
\alias{paws.configservice-package}
\title{paws.configservice: AWS Config}
\description{
AWS Config provides a way to keep track of the
configurations of all the AWS resources associated with your AWS
account. You can use AWS Config to get the current and historical
configurations of each AWS resource and also to get information about
the relationship between the resources. An AWS resource can be an
Amazon Compute Cloud (Amazon EC2) instance, an Elastic Block Store
(EBS) volume, an elastic network Interface (ENI), or a security group.
For a complete list of resources currently supported by AWS Config,
see [Supported AWS
Resources](http://docs.aws.amazon.com/config/latest/developerguide/resource-config-reference.html#supported-resources).
}
\author{
\strong{Maintainer}: David Kretch \email{david.kretch@gmail.com}
Authors:
\itemize{
\item Adam Banker \email{adam.banker39@gmail.com}
}
Other contributors:
\itemize{
\item Amazon.com, Inc. [copyright holder]
}
}
\keyword{internal}
|
library(ggplot2)
library(dplyr)
setwd("C:/source/bio/project") # should be altered
NAME <- 'vsm.hg38' # should be altered
OUT_DIR <- 'results/'
bed_df <- read.delim(paste0('data/', NAME, '.bed'), as.is = TRUE, header = FALSE)
colnames(bed_df) <- c('chrom', 'start', 'end', 'name', 'score')
bed_df$len <- bed_df$end - bed_df$start
head(bed_df)
ggplot(bed_df) +
aes(x = len) +
geom_histogram() +
ggtitle(NAME, subtitle = sprintf('Number of peaks = %s', nrow(bed_df))) +
theme_bw()
ggsave(paste0('len_hist.', NAME, '.pdf'), path = OUT_DIR)
|
/src/not_filtered_peaks.r
|
no_license
|
qwerty-Bk/hse21_H3K27me3_ZDNA_human
|
R
| false | false | 549 |
r
|
library(ggplot2)
library(dplyr)
setwd("C:/source/bio/project") # should be altered
NAME <- 'vsm.hg38' # should be altered
OUT_DIR <- 'results/'
bed_df <- read.delim(paste0('data/', NAME, '.bed'), as.is = TRUE, header = FALSE)
colnames(bed_df) <- c('chrom', 'start', 'end', 'name', 'score')
bed_df$len <- bed_df$end - bed_df$start
head(bed_df)
ggplot(bed_df) +
aes(x = len) +
geom_histogram() +
ggtitle(NAME, subtitle = sprintf('Number of peaks = %s', nrow(bed_df))) +
theme_bw()
ggsave(paste0('len_hist.', NAME, '.pdf'), path = OUT_DIR)
|
library(readr)
library(ggplot2)
library(jsonlite)
library(dplyr)
library(plyr)
library(tibble)
library(maps)
library(RCurl)
library(ggmap)
library(mapdata)
library(devtools)
library(lubridate)
library(DataExplorer)
library(ggpubr)
library(gridExtra)
library(tidyverse)
library(reshape2)
library(ggrepel)
library(viridis)
library(ggcorrplot)
library(gridExtra)
#
CS <- read_csv('Champions.csv')
View(CS)
#
dim(CS)
glimpse(CS)
#Most populated primary role by original and current Year_Released?
CS = CS[!(CS$Year_Released %in% c("N/A","2009","2019")),]
temp = subset(CS,select=c(Primary_Role,Year_Released,Name))
length(unique(temp$Primary_Role))
#
#x %>% f %>% g %>% h is equivalent to h(g(f(x)))
temp = temp %>% group_by(Primary_Role) %>% summarise(count=n()) %>% arrange(desc(count))
ggplot(head(temp,10),aes(reorder(Primary_Role,count),count,fill=Primary_Role))+
geom_bar(stat="identity")+
theme(axis.text.x = element_text(angle=90,vjust=0.5),legend.position="none")+
ggtitle("Top 10 game Roles for OG and Current Year_Released Champions")+
coord_flip()+labs(x="Role",y="Count")
##Classes?
temp=CS %>% select(Class,Name) %>% group_by(Class) %>% summarise(count=n()) %>% arrange(desc(count))
temp$percentage= round((temp$count/sum(temp$count))*100,digits=2)
ggplot(temp,aes(Class,count,fill=Class))+geom_bar(stat="identity")+geom_label(aes(label=temp$percentage),size=2)+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="none")+ggtitle("Class with most game releases")+labs(x="Class",y="Count")
#
temp = CS %>% select(Class,Year_Released)
require(plyr)
temp=ddply(temp,.(Class,Year_Released),transform,count=length(Class))
temp=unique(temp)
detach('package:plyr',TRUE)
ggplot(temp,aes(Year_Released,count,group=1,color=Class))+geom_line(size=1)+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="none")+ggtitle("Trend of Class release by Year")+labs(x="Year",y="Count")+facet_wrap(~Class,scales='free_y',ncol=4)
#Most successul role?
temp = CS %>% select(Primary_Role,Win_Rate,Year_Released)
ggplot(temp,aes(Primary_Role,Win_Rate,fill=Primary_Role))+geom_boxplot(stat="boxplot",position="dodge",outlier.color="red")+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),plot.subtitle = element_text(hjust=0.5,face='bold'),legend.position="bottom")+ggtitle("Trend of Overall Winrate")+labs(x="Primary_Role",y="Mean Winrate",subtitle="Winrate")+scale_y_log10()
#Most Banworthy Champs for s9 so far?
temp = CS %>% select(Name,Ban_Rate,Primary_Role) %>% arrange(desc(Ban_Rate))
ggplot(head(temp,20),aes(factor(Name,levels=Name),Ban_Rate,fill=Primary_Role))+geom_bar(stat="identity")+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="bottom")+ggtitle("Top 20 Banworthy Champs")+labs(x="Name",y="Banrate")
#
top3class = CS %>% group_by(Class) %>% summarise(count=n()) %>% arrange(desc(count)) %>% head(3)
temp = CS[CS$Class %in% top3class$Class,]
temp = temp %>% group_by(Class,Year_Released) %>% summarize(count=n())
ggplot(temp,aes(Year_Released,Class,fill=count))+geom_tile(color="white",size=0.4)+theme(axis.text.x = element_text(size=10,hjust=0.5),plot.title=element_text(hjust=0.5,face='italic'),legend.position="bottom",legend.key.width = unit(3, "cm"))+ggtitle("Pick_Rate Trends over the Years")+scale_fill_viridis(
name="count",
option = 'C',
direction = -1,
na.value = "white",
limits = c(0, max(temp$count)))
#Count vs. Pickrate
temp= CS[CS$Class %in% top3class$Class,]
temp = temp %>% group_by(Class) %>% summarise(meanpick=mean(Pick_Rate)) %>% arrange(desc(meanpick))
temp = merge(top3class,temp,by="Class")
ggplot(temp,aes(x=count,y=meanpick,col=factor(Class),size=meanpick))+
geom_point(alpha=0.4)+theme(legend.position="bottom",plot.title = element_text(size=10,hjust=0.5))+labs(title="Mean Pick Rate",col="Class")
|
/LoL.R
|
no_license
|
RyanKirwan/Exploring-LoL-Datasets
|
R
| false | false | 4,102 |
r
|
library(readr)
library(ggplot2)
library(jsonlite)
library(dplyr)
library(plyr)
library(tibble)
library(maps)
library(RCurl)
library(ggmap)
library(mapdata)
library(devtools)
library(lubridate)
library(DataExplorer)
library(ggpubr)
library(gridExtra)
library(tidyverse)
library(reshape2)
library(ggrepel)
library(viridis)
library(ggcorrplot)
library(gridExtra)
#
CS <- read_csv('Champions.csv')
View(CS)
#
dim(CS)
glimpse(CS)
#Most populated primary role by original and current Year_Released?
CS = CS[!(CS$Year_Released %in% c("N/A","2009","2019")),]
temp = subset(CS,select=c(Primary_Role,Year_Released,Name))
length(unique(temp$Primary_Role))
#
#x %>% f %>% g %>% h is equivalent to h(g(f(x)))
temp = temp %>% group_by(Primary_Role) %>% summarise(count=n()) %>% arrange(desc(count))
ggplot(head(temp,10),aes(reorder(Primary_Role,count),count,fill=Primary_Role))+
geom_bar(stat="identity")+
theme(axis.text.x = element_text(angle=90,vjust=0.5),legend.position="none")+
ggtitle("Top 10 game Roles for OG and Current Year_Released Champions")+
coord_flip()+labs(x="Role",y="Count")
##Classes?
temp=CS %>% select(Class,Name) %>% group_by(Class) %>% summarise(count=n()) %>% arrange(desc(count))
temp$percentage= round((temp$count/sum(temp$count))*100,digits=2)
ggplot(temp,aes(Class,count,fill=Class))+geom_bar(stat="identity")+geom_label(aes(label=temp$percentage),size=2)+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="none")+ggtitle("Class with most game releases")+labs(x="Class",y="Count")
#
temp = CS %>% select(Class,Year_Released)
require(plyr)
temp=ddply(temp,.(Class,Year_Released),transform,count=length(Class))
temp=unique(temp)
detach('package:plyr',TRUE)
ggplot(temp,aes(Year_Released,count,group=1,color=Class))+geom_line(size=1)+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="none")+ggtitle("Trend of Class release by Year")+labs(x="Year",y="Count")+facet_wrap(~Class,scales='free_y',ncol=4)
#Most successul role?
temp = CS %>% select(Primary_Role,Win_Rate,Year_Released)
ggplot(temp,aes(Primary_Role,Win_Rate,fill=Primary_Role))+geom_boxplot(stat="boxplot",position="dodge",outlier.color="red")+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),plot.subtitle = element_text(hjust=0.5,face='bold'),legend.position="bottom")+ggtitle("Trend of Overall Winrate")+labs(x="Primary_Role",y="Mean Winrate",subtitle="Winrate")+scale_y_log10()
#Most Banworthy Champs for s9 so far?
temp = CS %>% select(Name,Ban_Rate,Primary_Role) %>% arrange(desc(Ban_Rate))
ggplot(head(temp,20),aes(factor(Name,levels=Name),Ban_Rate,fill=Primary_Role))+geom_bar(stat="identity")+theme(axis.text.x = element_text(angle=90,vjust=0.3),plot.title = element_text(hjust=0.5,face='italic'),legend.position="bottom")+ggtitle("Top 20 Banworthy Champs")+labs(x="Name",y="Banrate")
#
top3class = CS %>% group_by(Class) %>% summarise(count=n()) %>% arrange(desc(count)) %>% head(3)
temp = CS[CS$Class %in% top3class$Class,]
temp = temp %>% group_by(Class,Year_Released) %>% summarize(count=n())
ggplot(temp,aes(Year_Released,Class,fill=count))+geom_tile(color="white",size=0.4)+theme(axis.text.x = element_text(size=10,hjust=0.5),plot.title=element_text(hjust=0.5,face='italic'),legend.position="bottom",legend.key.width = unit(3, "cm"))+ggtitle("Pick_Rate Trends over the Years")+scale_fill_viridis(
name="count",
option = 'C',
direction = -1,
na.value = "white",
limits = c(0, max(temp$count)))
#Count vs. Pickrate
temp= CS[CS$Class %in% top3class$Class,]
temp = temp %>% group_by(Class) %>% summarise(meanpick=mean(Pick_Rate)) %>% arrange(desc(meanpick))
temp = merge(top3class,temp,by="Class")
ggplot(temp,aes(x=count,y=meanpick,col=factor(Class),size=meanpick))+
geom_point(alpha=0.4)+theme(legend.position="bottom",plot.title = element_text(size=10,hjust=0.5))+labs(title="Mean Pick Rate",col="Class")
|
getwd()
mydata= read.csv('C:\\Users\\benve\\Documents\\university\\year3\\CS3002\\Labs\\Lab 1\\forestfires.csv',sep=',')
plot(mydata)
View(mydata)
#refer by column name
plot(mydata$temp, mydata$wind)
#refer by index
plot(mydata[,9],mydata[,11])
#histogram
hist(mydata$temp)
#line plots
plot(mydata$temp,type="l")
#plot colours
plot(mydata$X, mydata$Y, col=mydata$temp)
#calculate mean
meantemp = mean(mydata$temp)
write.csv(meantemp, file = "C:\\Users\\benve\\Documents\\university\\year3\\CS3002\\Labs\\Lab 1\\Output.csv")
#linear model with regression
plot(mydata$temp,mydata$ISI)
lmfire=line(mydata$ISI~mydata$temp)
abline(coef(lmfire))
|
/Lab 1/lab1.R
|
no_license
|
Benveer/cs3002-labs
|
R
| false | false | 654 |
r
|
getwd()
mydata= read.csv('C:\\Users\\benve\\Documents\\university\\year3\\CS3002\\Labs\\Lab 1\\forestfires.csv',sep=',')
plot(mydata)
View(mydata)
#refer by column name
plot(mydata$temp, mydata$wind)
#refer by index
plot(mydata[,9],mydata[,11])
#histogram
hist(mydata$temp)
#line plots
plot(mydata$temp,type="l")
#plot colours
plot(mydata$X, mydata$Y, col=mydata$temp)
#calculate mean
meantemp = mean(mydata$temp)
write.csv(meantemp, file = "C:\\Users\\benve\\Documents\\university\\year3\\CS3002\\Labs\\Lab 1\\Output.csv")
#linear model with regression
plot(mydata$temp,mydata$ISI)
lmfire=line(mydata$ISI~mydata$temp)
abline(coef(lmfire))
|
# Definición del UI
shinyUI(basicPage(
fluidRow(
column(width = 4,
plotOutput("plot", height=300,
click = "plot_click", # Equiv, to click=clickOpts(id="plot_click")
hover = hoverOpts(id = "plot_hover", delayType = "throttle"),
brush = brushOpts(id = "plot_brush")
),
h4("Clicked points"),
tableOutput("plot_clickedpoints"),
h4("Brushed points"),
dataTableOutput("plot_brushedpoints")
),
column(width = 4,
verbatimTextOutput("plot_clickinfo"),
verbatimTextOutput("plot_hoverinfo")
),
column(width = 4,
wellPanel(actionButton("newplot", "New plot")),
verbatimTextOutput("plot_brushinfo")
)
)
) )
|
/programacion R/Shiny/click/ui.R
|
no_license
|
coalescencia/data-science
|
R
| false | false | 776 |
r
|
# Definición del UI
shinyUI(basicPage(
fluidRow(
column(width = 4,
plotOutput("plot", height=300,
click = "plot_click", # Equiv, to click=clickOpts(id="plot_click")
hover = hoverOpts(id = "plot_hover", delayType = "throttle"),
brush = brushOpts(id = "plot_brush")
),
h4("Clicked points"),
tableOutput("plot_clickedpoints"),
h4("Brushed points"),
dataTableOutput("plot_brushedpoints")
),
column(width = 4,
verbatimTextOutput("plot_clickinfo"),
verbatimTextOutput("plot_hoverinfo")
),
column(width = 4,
wellPanel(actionButton("newplot", "New plot")),
verbatimTextOutput("plot_brushinfo")
)
)
) )
|
context("rbind.mids")
expect_warning(imp1 <<- mice(nhanes[1:13, ], m = 2, maxit = 1, print = FALSE))
test_that("Constant variables are not imputed by default", {
expect_equal(sum(is.na(complete(imp1))), 6L)
})
expect_warning(imp1b <<- mice(nhanes[1:13, ], m = 2, maxit = 1, print = FALSE, remove.constant = FALSE))
test_that("Constant variables are imputed for remove.constant = FALSE", {
expect_equal(sum(is.na(complete(imp1b))), 0L)
})
imp2 <- mice(nhanes[14:25, ], m = 2, maxit = 1, print = FALSE)
imp3 <- mice(nhanes2, m = 2, maxit = 1, print = FALSE)
imp4 <- mice(nhanes2, m = 1, maxit = 1, print = FALSE)
expect_warning(imp5 <<- mice(nhanes[1:13, ], m = 2, maxit = 2, print = FALSE))
expect_error(imp6 <<- mice(nhanes[1:13, 2:3], m = 2, maxit = 2, print = FALSE), "nothing left to impute")
nh3 <- nhanes
colnames(nh3) <- c("AGE", "bmi", "hyp", "chl")
imp7 <- mice(nh3[14:25, ], m = 2, maxit = 2, print = FALSE)
expect_warning(imp8 <<- mice(nhanes[1:13, ], m = 2, maxit = 2, print = FALSE))
mylist <- list(age = NA, bmi = NA, hyp = NA, chl = NA)
nhalf <- nhanes[13:25, ]
test_that("Expands number of rows and imputes", {
expect_equal(nrow(complete(rbind(imp1, imp2))), 25L)
expect_equal(nrow(rbind(imp1, imp2)$imp$bmi), 9L)
})
test_that("throws error", {
expect_error(rbind(imp1, imp3), "datasets have different factor variables")
expect_error(rbind(imp3, imp4), "number of imputations differ")
expect_error(rbind(imp1, imp7), "datasets have different variable names")
})
test_that("throws warning", {
expect_warning(rbind(imp1, imp5),
"iterations differ, so no convergence diagnostics calculated")
})
r1 <- rbind(imp8, imp5)
r2 <- rbind(imp1, mylist)
r3 <- rbind(imp1, nhalf)
r4 <- rbind(imp1, imp2)
test_that("Produces longer imputed data", {
expect_identical(nrow(complete(r1)), 26L)
expect_identical(nrow(complete(r2)), 14L)
})
test_that("Constant variables are not imputed", {
expect_equal(sum(is.na(complete(r3))), 15L)
expect_equal(sum(is.na(complete(r4))), 6L)
})
# r11 <- mice.mids(rbind(imp1, imp5), print = FALSE)
# test_that("plot throws error on convergence diagnostics", {
# expect_error(plot(r11), "no convergence diagnostics found")
# })
r21 <- mice.mids(r2, print = FALSE)
r31 <- mice.mids(r3, print = FALSE)
# issue #59
set.seed <- 818
x <- rnorm(10)
D <- data.frame(x=x, y=2*x+rnorm(10))
D[c(2:4, 7), 1] <- NA
expect_error(D_mids <<- mice(D[1:5,], print = FALSE), "nothing left to impute")
expect_warning(D_mids <<- mice(D[1:5,], print = FALSE, remove.collinear = FALSE))
D_rbind <- mice:::rbind.mids(D_mids, D[6:10,])
cmp <- complete(D_rbind, 1)
test_that("Solves issue #59, rbind", expect_identical(cmp[6:10, ], D[6:10, ]))
# calculate chainMean and chainVar
# imp1 <- mice(nhanes[1:13, ], m = 5, maxit = 25, print = FALSE, seed = 123)
# imp2 <- mice(nhanes[14:25, ], m = 5, maxit = 25, print = FALSE, seed = 456)
# z <- rbind(imp1, imp2)
# plot(z)
#
# imp3 <- mice(nhanes, m = 5, maxit = 25, print = FALSE, seed = 123)
# plot(imp3)
#
# An interesting observation is that the SD(hyp, a) < SD(hyp, imp3). This is
# because SD(hyp, imp1) = 0.
|
/tests/testthat/test-rbind.R
|
no_license
|
worldbank/mice
|
R
| false | false | 3,215 |
r
|
context("rbind.mids")
expect_warning(imp1 <<- mice(nhanes[1:13, ], m = 2, maxit = 1, print = FALSE))
test_that("Constant variables are not imputed by default", {
expect_equal(sum(is.na(complete(imp1))), 6L)
})
expect_warning(imp1b <<- mice(nhanes[1:13, ], m = 2, maxit = 1, print = FALSE, remove.constant = FALSE))
test_that("Constant variables are imputed for remove.constant = FALSE", {
expect_equal(sum(is.na(complete(imp1b))), 0L)
})
imp2 <- mice(nhanes[14:25, ], m = 2, maxit = 1, print = FALSE)
imp3 <- mice(nhanes2, m = 2, maxit = 1, print = FALSE)
imp4 <- mice(nhanes2, m = 1, maxit = 1, print = FALSE)
expect_warning(imp5 <<- mice(nhanes[1:13, ], m = 2, maxit = 2, print = FALSE))
expect_error(imp6 <<- mice(nhanes[1:13, 2:3], m = 2, maxit = 2, print = FALSE), "nothing left to impute")
nh3 <- nhanes
colnames(nh3) <- c("AGE", "bmi", "hyp", "chl")
imp7 <- mice(nh3[14:25, ], m = 2, maxit = 2, print = FALSE)
expect_warning(imp8 <<- mice(nhanes[1:13, ], m = 2, maxit = 2, print = FALSE))
mylist <- list(age = NA, bmi = NA, hyp = NA, chl = NA)
nhalf <- nhanes[13:25, ]
test_that("Expands number of rows and imputes", {
expect_equal(nrow(complete(rbind(imp1, imp2))), 25L)
expect_equal(nrow(rbind(imp1, imp2)$imp$bmi), 9L)
})
test_that("throws error", {
expect_error(rbind(imp1, imp3), "datasets have different factor variables")
expect_error(rbind(imp3, imp4), "number of imputations differ")
expect_error(rbind(imp1, imp7), "datasets have different variable names")
})
test_that("throws warning", {
expect_warning(rbind(imp1, imp5),
"iterations differ, so no convergence diagnostics calculated")
})
r1 <- rbind(imp8, imp5)
r2 <- rbind(imp1, mylist)
r3 <- rbind(imp1, nhalf)
r4 <- rbind(imp1, imp2)
test_that("Produces longer imputed data", {
expect_identical(nrow(complete(r1)), 26L)
expect_identical(nrow(complete(r2)), 14L)
})
test_that("Constant variables are not imputed", {
expect_equal(sum(is.na(complete(r3))), 15L)
expect_equal(sum(is.na(complete(r4))), 6L)
})
# r11 <- mice.mids(rbind(imp1, imp5), print = FALSE)
# test_that("plot throws error on convergence diagnostics", {
# expect_error(plot(r11), "no convergence diagnostics found")
# })
r21 <- mice.mids(r2, print = FALSE)
r31 <- mice.mids(r3, print = FALSE)
# issue #59
set.seed <- 818
x <- rnorm(10)
D <- data.frame(x=x, y=2*x+rnorm(10))
D[c(2:4, 7), 1] <- NA
expect_error(D_mids <<- mice(D[1:5,], print = FALSE), "nothing left to impute")
expect_warning(D_mids <<- mice(D[1:5,], print = FALSE, remove.collinear = FALSE))
D_rbind <- mice:::rbind.mids(D_mids, D[6:10,])
cmp <- complete(D_rbind, 1)
test_that("Solves issue #59, rbind", expect_identical(cmp[6:10, ], D[6:10, ]))
# calculate chainMean and chainVar
# imp1 <- mice(nhanes[1:13, ], m = 5, maxit = 25, print = FALSE, seed = 123)
# imp2 <- mice(nhanes[14:25, ], m = 5, maxit = 25, print = FALSE, seed = 456)
# z <- rbind(imp1, imp2)
# plot(z)
#
# imp3 <- mice(nhanes, m = 5, maxit = 25, print = FALSE, seed = 123)
# plot(imp3)
#
# An interesting observation is that the SD(hyp, a) < SD(hyp, imp3). This is
# because SD(hyp, imp1) = 0.
|
# weighting edges
# the cells will be nodes
load("all_cells.rda")
# need the network
load("hybrid_cytokine_network.rda")
# need expression for each PAAD sample
load("paad_expr_cell_decon_and_clin.rda")
library("AnnotationDbi")
library("org.Hs.eg.db")
geneSymbols <- mapIds(org.Hs.eg.db, keys=rownames(paad), column="SYMBOL", keytype="ENTREZID", multiVals="first")
# edge genes
edges <- read.csv("network_data/LRPairs.csv",stringsAsFactors = F)
geneL <- unique(edges$Ligand.ApprovedSymbol)
geneR <- unique(edges$Receptor.ApprovedSymbol)
edgeGenes <- c(geneL, geneR)
# all in order
data.frame(colnames(step1)[-1], colnames(paad)[-1], clin$SampleBarcode)
# not present
presentCells <- allCells[!allCells %in% c("schwann", "delta")]
step1 <- step1[step1$Cell %in% presentCells,]
# create a ECDF for each cell type
library(MASS)
cellECDFS <- lapply(1:nrow(step1), function(a) {
try( {ecdf(step1[a,-1])} )
})
names(cellECDFS) <- step1$Cell
# get some smaller matrices
# and give genes a symbol name,
step1x <- step1[,-1]
paadx <- paad[geneSymbols %in% edgeGenes,-1]
rownames(paadx) <- geneSymbols[geneSymbols %in% edgeGenes]
library(hash)
zedf <- function(x){0}
qnames <- function(x) {
n <- c()
for (i in 1: (length(x)-1)) {
n <- c(n, paste(x[i], x[i+1], sep='_'))
}
n
}
# top level list of cells
condECDF <- list()
cellBins <- list()
# for each cell
for (ci in presentCells) {
print(ci)
# bin the cell quantity
ciquant <- quantile(x = step1x[ci,])
# list with an entry for each bin
binList <- list()
cellBins[[ci]] <- ciquant
#for each bin
for (qi in 1:(length(ciquant)-1)) {
print(qi)
idx <- which(as.numeric(step1x[ci,]) > as.numeric(ciquant[qi]) &
as.numeric(step1x[ci,]) <= as.numeric(ciquant[qi+1])
)
# for each gene in edgeGenes
geneList <- list()
for (gi in edgeGenes) {
# fit an ECDF
x <- na.omit(as.numeric(paadx[gi,idx]))
if (length(x) > 3) {
geneList[[gi]] <- ecdf(x)
} else {
geneList[[gi]] <- zedf
}
}
binList[[qi]] <- geneList
} # end bins
names(binList) <- qnames(names(ciquant))
condECDF[[ci]] <- binList
} # end cells
### need to know what bin to start with ###
binMap <- list()
# for each cell
for (ci in presentCells) {
print(ci)
# bin the cell quantity
ciquant <- quantile(x = step1x[ci,])
# list with an entry for cell type.
binMap[[ci]] <- ciquant
}
#> names(condECDF[[ci]])
#[1] "0%_25%" "25%_50%" "50%_75%" "75%_100%"
binfun <- function(x, ci, binMap) {
# given a cell quantity for a given sample,
# what bin was it placed into?
a <- which(binMap[[ci]] > x)
return(a-1)
}
save(binMap, condECDF, cellECDFS, file="p_distr.rda")
|
/Code/archive/Src/Archive/edge_weighting.R
|
no_license
|
IlyaLab/Pan-Cancer-Cell-Cell-Comm-Net
|
R
| false | false | 2,774 |
r
|
# weighting edges
# the cells will be nodes
load("all_cells.rda")
# need the network
load("hybrid_cytokine_network.rda")
# need expression for each PAAD sample
load("paad_expr_cell_decon_and_clin.rda")
library("AnnotationDbi")
library("org.Hs.eg.db")
geneSymbols <- mapIds(org.Hs.eg.db, keys=rownames(paad), column="SYMBOL", keytype="ENTREZID", multiVals="first")
# edge genes
edges <- read.csv("network_data/LRPairs.csv",stringsAsFactors = F)
geneL <- unique(edges$Ligand.ApprovedSymbol)
geneR <- unique(edges$Receptor.ApprovedSymbol)
edgeGenes <- c(geneL, geneR)
# all in order
data.frame(colnames(step1)[-1], colnames(paad)[-1], clin$SampleBarcode)
# not present
presentCells <- allCells[!allCells %in% c("schwann", "delta")]
step1 <- step1[step1$Cell %in% presentCells,]
# create a ECDF for each cell type
library(MASS)
cellECDFS <- lapply(1:nrow(step1), function(a) {
try( {ecdf(step1[a,-1])} )
})
names(cellECDFS) <- step1$Cell
# get some smaller matrices
# and give genes a symbol name,
step1x <- step1[,-1]
paadx <- paad[geneSymbols %in% edgeGenes,-1]
rownames(paadx) <- geneSymbols[geneSymbols %in% edgeGenes]
library(hash)
zedf <- function(x){0}
qnames <- function(x) {
n <- c()
for (i in 1: (length(x)-1)) {
n <- c(n, paste(x[i], x[i+1], sep='_'))
}
n
}
# top level list of cells
condECDF <- list()
cellBins <- list()
# for each cell
for (ci in presentCells) {
print(ci)
# bin the cell quantity
ciquant <- quantile(x = step1x[ci,])
# list with an entry for each bin
binList <- list()
cellBins[[ci]] <- ciquant
#for each bin
for (qi in 1:(length(ciquant)-1)) {
print(qi)
idx <- which(as.numeric(step1x[ci,]) > as.numeric(ciquant[qi]) &
as.numeric(step1x[ci,]) <= as.numeric(ciquant[qi+1])
)
# for each gene in edgeGenes
geneList <- list()
for (gi in edgeGenes) {
# fit an ECDF
x <- na.omit(as.numeric(paadx[gi,idx]))
if (length(x) > 3) {
geneList[[gi]] <- ecdf(x)
} else {
geneList[[gi]] <- zedf
}
}
binList[[qi]] <- geneList
} # end bins
names(binList) <- qnames(names(ciquant))
condECDF[[ci]] <- binList
} # end cells
### need to know what bin to start with ###
binMap <- list()
# for each cell
for (ci in presentCells) {
print(ci)
# bin the cell quantity
ciquant <- quantile(x = step1x[ci,])
# list with an entry for cell type.
binMap[[ci]] <- ciquant
}
#> names(condECDF[[ci]])
#[1] "0%_25%" "25%_50%" "50%_75%" "75%_100%"
binfun <- function(x, ci, binMap) {
# given a cell quantity for a given sample,
# what bin was it placed into?
a <- which(binMap[[ci]] > x)
return(a-1)
}
save(binMap, condECDF, cellECDFS, file="p_distr.rda")
|
# Create a package with state-level quarterly tax data
# 5/2/2018
# stabbr date source ic value level
# Note that historical state totals appear to be available only from around 1977 forward,
# but U.S. totals are available from the early 1960s.
# Package authoring with RStudio:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#****************************************************************************************************
# Libraries ####
#****************************************************************************************************
library("devtools")
# devtools::check(cleanup = FALSE) # when we want to see check output no matter what
library("magrittr")
library("plyr") # needed for ldply; must be loaded BEFORE dplyr
library("tidyverse")
options(tibble.print_max = 60, tibble.print_min = 60) # if more than 60 rows, print 60 - enough for states
# ggplot2 tibble tidyr readr purrr dplyr stringr forcats
library("scales")
library("hms") # hms, for times.
library("lubridate") # lubridate, for date/times.
library("readxl") # readxl, for .xls and .xlsx files.
library("haven") # haven, for SPSS, SAS and Stata files.
library("vctrs")
library("precis")
library("tibbletime") # https://business-science.github.io/tibbletime/
library("grDevices")
library("knitr")
library("zoo") # for rollapply
library("btools") # library that I created (install from github)
library("bdata")
#****************************************************************************************************
# Get and organize the latest qtax data from the Census API ####
#****************************************************************************************************
# Do this with every quarterly update
# get qtax - EVERYTHING from 1994 forward (that is when the API starts)
# we want to end up with a nice, clean data frame with:
# stabbr (chr), date (date), ic (chr), and value (dbl)
# For data available via api see: https://api.census.gov/data.html
# Here is a Census Bureau example:
# https://api.census.gov/data/timeseries/eits/qtax?get=cell_value,data_type_code,time_slot_id,error_data,category_code,seasonally_adj&for=us:*&time=2012&key=YOUR_KEY_GOES_HERE
censusapikey <- "b27cb41e46ffe3488af186dd80c64dce66bd5e87"
u1 <- "https://api.census.gov/data/timeseries/eits/qtax"
u2 <- "?get=cell_value,data_type_code,time_slot_id,error_data,geo_level_code,category_code,seasonally_adj"
u3 <- "" # to just get US: u3 <- "&for=us:*"
# u4 <- "&time=from+1987+to+2017" # note that qtax from api starts 1994q1 but this starts in 1987 just to be safe
u4 <- "&time=from+1992"
u5 <- paste0("&key=", censusapikey)
url <- paste0(u1, u2, u3, u4, u5)
url
system.time(qtdat <- jsonlite::fromJSON(url)) # returns a matrix
# look at the matrix
dim(qtdat)
qtdat[1:2, ]
qtdat[c(1, nrow(qtdat)), ] # 1992q1 start
# create a cleaned-up data frame
qapi1 <- data.frame(qtdat) %>% as_tibble()
glimpse(qapi1)
names(qapi1) <- t(qapi1[1, ])
qapi1 <- qapi1[-1, ]
glimpse(qapi1)
ht(qapi1)
count(qapi1, geo_level_code)
count(qapi1, geo_level_code, category_code) %>% spread(category_code, n) # we have US in cat3 - good
# clean the data: JUST keep the cat3 data
qapi2 <- qapi1 %>%
filter(category_code=="QTAXCAT3") %>%
mutate(value=as.numeric(as.character(cell_value)),
date=as.Date(paste(str_sub(time, 1, 4), as.numeric(str_sub(time, 7, 7))*3-2, 1, sep="-")),
stabbr=as.character(geo_level_code),
ic=as.character(data_type_code)) %>%
select(stabbr, date, ic, value) %>%
arrange(stabbr, date, ic)
glimpse(qapi2)
summary(qapi2)
# Save this file before moving on because the Census API is not always working, and good to have
# historical files to fall back on.
fn <- paste0("qtfromapi_", Sys.Date(), ".rds") # format(Sys.Date(), "%d%b%Y")
saveRDS(qapi2, paste0("./data-raw/", fn))
#****************************************************************************************************
# Save the national data ####
#****************************************************************************************************
qapi1 %>% filter()
natl <- qapi1 %>%
filter(category_code == "QTAXCAT1", !str_detect(data_type_code, "4QE")) %>% # we don't want 4-quarter sums
mutate(value=as.numeric(as.character(cell_value)),
date=as.Date(paste(str_sub(time, 1, 4), as.numeric(str_sub(time, 7, 7))*3-2, 1, sep="-")),
stabbr=as.character(geo_level_code),
ic=as.character(data_type_code)) %>%
select(stabbr, date, ic, value) %>%
arrange(stabbr, date, ic)
glimpse(natl)
summary(natl)
icodes <- readRDS("./data-raw/icodes.rds")
glimpse(icodes)
qtax_slgus <- natl %>%
left_join(icodes) %>%
select(stabbr, date, ic, vname, value, vdesc)
glimpse(qtax_slgus)
ht(qtax_slgus)
qtax_slgus %>% select(-value) %>% anyDuplicated()
count(qtax_slgus, ic, vname, vdesc)
comment(qtax_slgus) <- paste0("National state & local government quarterly tax data updated from Census API as of: ", Sys.Date())
comment(qtax_slgus)
use_data(qtax_slgus, overwrite = TRUE)
#****************************************************************************************************
# Combine the new data with the historical data ####
#****************************************************************************************************
# Do this with every quarterly update
# define api file to use (can use a prior file)
fn <- paste0("qtfromapi_", Sys.Date(), ".rds") # format(Sys.Date(), "%d%b%Y")
qapi <- readRDS(paste0("./data/", fn))
glimpse(qapi)
summary(qapi)
icodes <- readRDS("./data-raw/icodes.rds")
glimpse(icodes)
qth <- readRDS("./data-raw/qtaxhist_useforpackage.rds")
glimpse(qth)
qall <- bind_rows(qth, qapi2)
glimpse(qall)
qall %>%
filter(stabbr=="CT", ic=="TOTAL") %>%
ggplot(aes(date, value)) + geom_line()
qtax <- qall %>%
left_join(icodes) %>%
select(stabbr, date, ic, vname, value, vdesc)
glimpse(qtax)
ht(qtax)
qtax %>% select(-value) %>% anyDuplicated()
comment(qtax) <- paste0("Quarterly tax data updated from Census API as of: ", Sys.Date())
comment(qtax)
use_data(qtax, overwrite = TRUE)
|
/data-raw/programs/OLD_get_latest_qtax_from_API.r
|
no_license
|
donboyd5/qtax
|
R
| false | false | 6,315 |
r
|
# Create a package with state-level quarterly tax data
# 5/2/2018
# stabbr date source ic value level
# Note that historical state totals appear to be available only from around 1977 forward,
# but U.S. totals are available from the early 1960s.
# Package authoring with RStudio:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
#****************************************************************************************************
# Libraries ####
#****************************************************************************************************
library("devtools")
# devtools::check(cleanup = FALSE) # when we want to see check output no matter what
library("magrittr")
library("plyr") # needed for ldply; must be loaded BEFORE dplyr
library("tidyverse")
options(tibble.print_max = 60, tibble.print_min = 60) # if more than 60 rows, print 60 - enough for states
# ggplot2 tibble tidyr readr purrr dplyr stringr forcats
library("scales")
library("hms") # hms, for times.
library("lubridate") # lubridate, for date/times.
library("readxl") # readxl, for .xls and .xlsx files.
library("haven") # haven, for SPSS, SAS and Stata files.
library("vctrs")
library("precis")
library("tibbletime") # https://business-science.github.io/tibbletime/
library("grDevices")
library("knitr")
library("zoo") # for rollapply
library("btools") # library that I created (install from github)
library("bdata")
#****************************************************************************************************
# Get and organize the latest qtax data from the Census API ####
#****************************************************************************************************
# Do this with every quarterly update
# get qtax - EVERYTHING from 1994 forward (that is when the API starts)
# we want to end up with a nice, clean data frame with:
# stabbr (chr), date (date), ic (chr), and value (dbl)
# For data available via api see: https://api.census.gov/data.html
# Here is a Census Bureau example:
# https://api.census.gov/data/timeseries/eits/qtax?get=cell_value,data_type_code,time_slot_id,error_data,category_code,seasonally_adj&for=us:*&time=2012&key=YOUR_KEY_GOES_HERE
censusapikey <- "b27cb41e46ffe3488af186dd80c64dce66bd5e87"
u1 <- "https://api.census.gov/data/timeseries/eits/qtax"
u2 <- "?get=cell_value,data_type_code,time_slot_id,error_data,geo_level_code,category_code,seasonally_adj"
u3 <- "" # to just get US: u3 <- "&for=us:*"
# u4 <- "&time=from+1987+to+2017" # note that qtax from api starts 1994q1 but this starts in 1987 just to be safe
u4 <- "&time=from+1992"
u5 <- paste0("&key=", censusapikey)
url <- paste0(u1, u2, u3, u4, u5)
url
system.time(qtdat <- jsonlite::fromJSON(url)) # returns a matrix
# look at the matrix
dim(qtdat)
qtdat[1:2, ]
qtdat[c(1, nrow(qtdat)), ] # 1992q1 start
# create a cleaned-up data frame
qapi1 <- data.frame(qtdat) %>% as_tibble()
glimpse(qapi1)
names(qapi1) <- t(qapi1[1, ])
qapi1 <- qapi1[-1, ]
glimpse(qapi1)
ht(qapi1)
count(qapi1, geo_level_code)
count(qapi1, geo_level_code, category_code) %>% spread(category_code, n) # we have US in cat3 - good
# clean the data: JUST keep the cat3 data
qapi2 <- qapi1 %>%
filter(category_code=="QTAXCAT3") %>%
mutate(value=as.numeric(as.character(cell_value)),
date=as.Date(paste(str_sub(time, 1, 4), as.numeric(str_sub(time, 7, 7))*3-2, 1, sep="-")),
stabbr=as.character(geo_level_code),
ic=as.character(data_type_code)) %>%
select(stabbr, date, ic, value) %>%
arrange(stabbr, date, ic)
glimpse(qapi2)
summary(qapi2)
# Save this file before moving on because the Census API is not always working, and good to have
# historical files to fall back on.
fn <- paste0("qtfromapi_", Sys.Date(), ".rds") # format(Sys.Date(), "%d%b%Y")
saveRDS(qapi2, paste0("./data-raw/", fn))
#****************************************************************************************************
# Save the national data ####
#****************************************************************************************************
qapi1 %>% filter()
natl <- qapi1 %>%
filter(category_code == "QTAXCAT1", !str_detect(data_type_code, "4QE")) %>% # we don't want 4-quarter sums
mutate(value=as.numeric(as.character(cell_value)),
date=as.Date(paste(str_sub(time, 1, 4), as.numeric(str_sub(time, 7, 7))*3-2, 1, sep="-")),
stabbr=as.character(geo_level_code),
ic=as.character(data_type_code)) %>%
select(stabbr, date, ic, value) %>%
arrange(stabbr, date, ic)
glimpse(natl)
summary(natl)
icodes <- readRDS("./data-raw/icodes.rds")
glimpse(icodes)
qtax_slgus <- natl %>%
left_join(icodes) %>%
select(stabbr, date, ic, vname, value, vdesc)
glimpse(qtax_slgus)
ht(qtax_slgus)
qtax_slgus %>% select(-value) %>% anyDuplicated()
count(qtax_slgus, ic, vname, vdesc)
comment(qtax_slgus) <- paste0("National state & local government quarterly tax data updated from Census API as of: ", Sys.Date())
comment(qtax_slgus)
use_data(qtax_slgus, overwrite = TRUE)
#****************************************************************************************************
# Combine the new data with the historical data ####
#****************************************************************************************************
# Do this with every quarterly update
# define api file to use (can use a prior file)
fn <- paste0("qtfromapi_", Sys.Date(), ".rds") # format(Sys.Date(), "%d%b%Y")
qapi <- readRDS(paste0("./data/", fn))
glimpse(qapi)
summary(qapi)
icodes <- readRDS("./data-raw/icodes.rds")
glimpse(icodes)
qth <- readRDS("./data-raw/qtaxhist_useforpackage.rds")
glimpse(qth)
qall <- bind_rows(qth, qapi2)
glimpse(qall)
qall %>%
filter(stabbr=="CT", ic=="TOTAL") %>%
ggplot(aes(date, value)) + geom_line()
qtax <- qall %>%
left_join(icodes) %>%
select(stabbr, date, ic, vname, value, vdesc)
glimpse(qtax)
ht(qtax)
qtax %>% select(-value) %>% anyDuplicated()
comment(qtax) <- paste0("Quarterly tax data updated from Census API as of: ", Sys.Date())
comment(qtax)
use_data(qtax, overwrite = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shmc.R
\name{sample_tmb_hmc}
\alias{sample_tmb_hmc}
\title{Draw MCMC samples from a model posterior using a static HMC sampler.}
\usage{
sample_tmb_hmc(iter, fn, gr, init, L, eps, warmup = floor(iter/2),
seed = NULL, chain = 1, thin = 1, control = NULL)
}
\arguments{
\item{iter}{The number of samples to draw.}
\item{fn}{A function that returns the log of the posterior density.}
\item{gr}{A function that returns a vector of gradients of the log of
the posterior density (same as \code{fn}).}
\item{init}{A list of lists containing the initial parameter vectors,
one for each chain or a function. It is strongly recommended to
initialize multiple chains from dispersed points. A of NULL signifies
to use the starting values present in the model (i.e., \code{obj$par})
for all chains.}
\item{L}{The number of leapfrog steps to take. The NUTS algorithm does
not require this as an input. If \code{L=1} this function will perform
Langevin sampling. In some contexts \code{L} can roughly be thought of
as a thinning rate.}
\item{eps}{The step size. If a numeric value is passed, it will be used
throughout the entire chain. A \code{NULL} value will initiate
sampler_params of \code{eps} using the dual averaging algorithm during
the first \code{warmup} steps.}
\item{warmup}{The number of warmup iterations.}
\item{seed}{The random seed to use.}
\item{chain}{The chain number, for printing only.}
\item{thin}{The thinning rate to apply to samples. Typically not used
with NUTS.}
\item{control}{A list to control the sampler. See details for further
use.}
}
\value{
A list containing samples ('par') and algorithm details such as
step size adaptation and acceptance probabilities per iteration
('sampler_params').
}
\description{
Draw MCMC samples from a model posterior using a static HMC sampler.
}
\details{
This function implements algorithm 5 of Hoffman and Gelman
(2014), which includes adaptive step sizes (\code{eps}) via an
algorithm called dual averaging.
}
\references{
\itemize{ \item{Neal, R. M. (2011). MCMC using Hamiltonian
dynamics. Handbook of Markov Chain Monte Carlo.} \item{Hoffman and
Gelman (2014). The No-U-Turn sampler: Adaptively setting path lengths
in Hamiltonian Monte Carlo. J. Mach. Learn. Res. 15:1593-1623.} }
Hoffman and Gelman (2014). The No-U-Turn sampler: Adaptively setting
path lengths in Hamiltonian Monte Carlo. J. Mach. Learn. Res.
15:1593-1623.
}
\seealso{
\code{\link{sample_tmb}}
\code{\link{sample_tmb}}
}
|
/man/sample_tmb_hmc.Rd
|
no_license
|
ChuangWan/adnuts
|
R
| false | true | 2,562 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shmc.R
\name{sample_tmb_hmc}
\alias{sample_tmb_hmc}
\title{Draw MCMC samples from a model posterior using a static HMC sampler.}
\usage{
sample_tmb_hmc(iter, fn, gr, init, L, eps, warmup = floor(iter/2),
seed = NULL, chain = 1, thin = 1, control = NULL)
}
\arguments{
\item{iter}{The number of samples to draw.}
\item{fn}{A function that returns the log of the posterior density.}
\item{gr}{A function that returns a vector of gradients of the log of
the posterior density (same as \code{fn}).}
\item{init}{A list of lists containing the initial parameter vectors,
one for each chain or a function. It is strongly recommended to
initialize multiple chains from dispersed points. A of NULL signifies
to use the starting values present in the model (i.e., \code{obj$par})
for all chains.}
\item{L}{The number of leapfrog steps to take. The NUTS algorithm does
not require this as an input. If \code{L=1} this function will perform
Langevin sampling. In some contexts \code{L} can roughly be thought of
as a thinning rate.}
\item{eps}{The step size. If a numeric value is passed, it will be used
throughout the entire chain. A \code{NULL} value will initiate
sampler_params of \code{eps} using the dual averaging algorithm during
the first \code{warmup} steps.}
\item{warmup}{The number of warmup iterations.}
\item{seed}{The random seed to use.}
\item{chain}{The chain number, for printing only.}
\item{thin}{The thinning rate to apply to samples. Typically not used
with NUTS.}
\item{control}{A list to control the sampler. See details for further
use.}
}
\value{
A list containing samples ('par') and algorithm details such as
step size adaptation and acceptance probabilities per iteration
('sampler_params').
}
\description{
Draw MCMC samples from a model posterior using a static HMC sampler.
}
\details{
This function implements algorithm 5 of Hoffman and Gelman
(2014), which includes adaptive step sizes (\code{eps}) via an
algorithm called dual averaging.
}
\references{
\itemize{ \item{Neal, R. M. (2011). MCMC using Hamiltonian
dynamics. Handbook of Markov Chain Monte Carlo.} \item{Hoffman and
Gelman (2014). The No-U-Turn sampler: Adaptively setting path lengths
in Hamiltonian Monte Carlo. J. Mach. Learn. Res. 15:1593-1623.} }
Hoffman and Gelman (2014). The No-U-Turn sampler: Adaptively setting
path lengths in Hamiltonian Monte Carlo. J. Mach. Learn. Res.
15:1593-1623.
}
\seealso{
\code{\link{sample_tmb}}
\code{\link{sample_tmb}}
}
|
#Basic Machine Learning on kaggle titanic dataset
nomsg<-suppressMessages
nomsg(library(tidyverse))
nomsg(library(caret))
nomsg(library(mice))
nomsg(library(xgboost))
library(RANN)
library(caretEnsemble)
library(Amelia)
train<-read.csv("train.csv",stringsAsFactors = F)
train<-as.tibble(train)
#Remove cabin,name,Ticket for now as these may not predict survival
#View and deal with nas
newtrain<-train %>%
mutate(PassengerId=as.factor(PassengerId),Pclass=as.factor(Pclass),
Survived=as.factor(Survived),Embarked=as.factor(Embarked),
Sex=as.factor(Sex),
AgeGroup=as.factor(findInterval(Age,c(0,18,35,100)))) %>%
select(-PassengerId,-Name,-Ticket,-Cabin)
#Change levels
levels(newtrain$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtrain$Sex)<-c("F","M")
#Impute median
newtrain_1<-preProcess(newtrain,method="medianImpute")
newtrain_imp<-predict(newtrain_1,newtrain)
#checkNAs
anyNA(newtrain_imp)
#View NAS
newtrain_imp %>%
map_dbl(~sort(sum(is.na(.x),decreasing=T)))
#redo levels
newtrain_imp<-newtrain_imp %>%
mutate(AgeGroup=as.factor(findInterval(Age,c(0,18,35,100))))
levels(newtrain_imp$AgeGroup)<-c("Young","Mid Age","Aged")
anyNA(newtrain_imp)
#Let's visualise survival by Age Group
newtrain_imp %>%
ggplot(aes(Survived,fill=Sex))+geom_histogram(stat="count")+facet_wrap(AgeGroup~Pclass)+
ggtitle("Survival by class,Agegroup and Gender")+
theme(plot.title=element_text(hjust=0.5))+
scale_fill_manual(values=c("orange","steelblue4"))
#The graph does suggest that being of mid Age and embarking in the third class made you more likely to die
#Overall more women than men survived.
#Partition our data into a training and test dataset
#Deep deep water
nomsg(library(h2o))
h2o.init()
#Split our data frame
train_deep<-h2o.splitFrame(as.h2o(newtrain_imp),0.75)
train_set<-train_deep[[1]]
validate_set<-train_deep[[2]]
#Make predictions
y<-"Survived"
x<-setdiff(names(newtrain_imp),y)
gbm_fit<-h2o.gbm(x,y,train_set,nfolds = 10,
ntrees=5970,
learn_rate = 0.7,
max_depth = 50,
seed = 2,
nbins_cats = 156,
keep_cross_validation_predictions = T,
keep_cross_validation_fold_assignment = T,
validation_frame = validate_set
)
h2o.confusionMatrix(gbm_fit)
mod1<-h2o.performance(gbm_fit)
h2o.accuracy(mod1)
#make predictions on Test set
test<-read.csv("test.csv",stringsAsFactors = F)
test<-as.tibble(test)
newtest<-test %>%
mutate(PassengerId=as.factor(PassengerId),Pclass=as.factor(Pclass),
Sex=as.factor(Sex),
Embarked=as.factor(Embarked),
AgeGroup=as.factor(findInterval(Age,c(0,18,35,100)))) %>%
select(-Ticket,-Name,-Cabin)
levels(newtest$Embarked)<-c("","C","Q","S")
levels(newtest$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtest$Sex)<-c("F","M")
#Find NAs
newtest %>%
map_lgl(~anyNA(.x))
#Preprocess and remove NAs from age and Fare
newtest_1<-preProcess(newtest,method="medianImpute")
newtest_imp<-predict(newtest_1,newtest)
#Check Dona
newtest_imp<-newtest_imp %>%
mutate(AgeGroup=as.factor(findInterval(Age,c(0,18,35,100))))
levels(newtest_imp$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtest_imp$Sex)<-c("F","M")
newtest_imp_h2o<-as.h2o(newtest_imp)
h2o_prediction<-h2o.predict(gbm_fit,newtest_imp_h2o)
mymodel<-gbm_fit@model$cross_validation_holdout_predictions_frame_id$name
as.data.frame(gbm_fit@model$variable_importances) %>%
ggplot(aes(scaled_importance,variable,fill=variable))+geom_col()+
labs(x="Relative Importance",y="Feature")+
ggpubr::theme_cleveland()+
ggtitle("Feature Importance in The GBM Model")
#summary with class predictions= T
predictions<-as.data.frame(h2o_prediction) %>%
select(predict)
predictions_h2o<-cbind(newtest_imp,predictions)
names(predictions_h2o)
test_h2o<-predictions_h2o %>%
rename(Survived=`predict`) %>%
select(PassengerId,Survived)
write.csv(test_h2o,"waterwater.csv",row.names = F)
#Check confusion Matrix
|
/titan with h2o.R
|
no_license
|
Nelson-Gon/Deepdeeph2o
|
R
| false | false | 4,156 |
r
|
#Basic Machine Learning on kaggle titanic dataset
nomsg<-suppressMessages
nomsg(library(tidyverse))
nomsg(library(caret))
nomsg(library(mice))
nomsg(library(xgboost))
library(RANN)
library(caretEnsemble)
library(Amelia)
train<-read.csv("train.csv",stringsAsFactors = F)
train<-as.tibble(train)
#Remove cabin,name,Ticket for now as these may not predict survival
#View and deal with nas
newtrain<-train %>%
mutate(PassengerId=as.factor(PassengerId),Pclass=as.factor(Pclass),
Survived=as.factor(Survived),Embarked=as.factor(Embarked),
Sex=as.factor(Sex),
AgeGroup=as.factor(findInterval(Age,c(0,18,35,100)))) %>%
select(-PassengerId,-Name,-Ticket,-Cabin)
#Change levels
levels(newtrain$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtrain$Sex)<-c("F","M")
#Impute median
newtrain_1<-preProcess(newtrain,method="medianImpute")
newtrain_imp<-predict(newtrain_1,newtrain)
#checkNAs
anyNA(newtrain_imp)
#View NAS
newtrain_imp %>%
map_dbl(~sort(sum(is.na(.x),decreasing=T)))
#redo levels
newtrain_imp<-newtrain_imp %>%
mutate(AgeGroup=as.factor(findInterval(Age,c(0,18,35,100))))
levels(newtrain_imp$AgeGroup)<-c("Young","Mid Age","Aged")
anyNA(newtrain_imp)
#Let's visualise survival by Age Group
newtrain_imp %>%
ggplot(aes(Survived,fill=Sex))+geom_histogram(stat="count")+facet_wrap(AgeGroup~Pclass)+
ggtitle("Survival by class,Agegroup and Gender")+
theme(plot.title=element_text(hjust=0.5))+
scale_fill_manual(values=c("orange","steelblue4"))
#The graph does suggest that being of mid Age and embarking in the third class made you more likely to die
#Overall more women than men survived.
#Partition our data into a training and test dataset
#Deep deep water
nomsg(library(h2o))
h2o.init()
#Split our data frame
train_deep<-h2o.splitFrame(as.h2o(newtrain_imp),0.75)
train_set<-train_deep[[1]]
validate_set<-train_deep[[2]]
#Make predictions
y<-"Survived"
x<-setdiff(names(newtrain_imp),y)
gbm_fit<-h2o.gbm(x,y,train_set,nfolds = 10,
ntrees=5970,
learn_rate = 0.7,
max_depth = 50,
seed = 2,
nbins_cats = 156,
keep_cross_validation_predictions = T,
keep_cross_validation_fold_assignment = T,
validation_frame = validate_set
)
h2o.confusionMatrix(gbm_fit)
mod1<-h2o.performance(gbm_fit)
h2o.accuracy(mod1)
#make predictions on Test set
test<-read.csv("test.csv",stringsAsFactors = F)
test<-as.tibble(test)
newtest<-test %>%
mutate(PassengerId=as.factor(PassengerId),Pclass=as.factor(Pclass),
Sex=as.factor(Sex),
Embarked=as.factor(Embarked),
AgeGroup=as.factor(findInterval(Age,c(0,18,35,100)))) %>%
select(-Ticket,-Name,-Cabin)
levels(newtest$Embarked)<-c("","C","Q","S")
levels(newtest$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtest$Sex)<-c("F","M")
#Find NAs
newtest %>%
map_lgl(~anyNA(.x))
#Preprocess and remove NAs from age and Fare
newtest_1<-preProcess(newtest,method="medianImpute")
newtest_imp<-predict(newtest_1,newtest)
#Check Dona
newtest_imp<-newtest_imp %>%
mutate(AgeGroup=as.factor(findInterval(Age,c(0,18,35,100))))
levels(newtest_imp$AgeGroup)<-c("Young","Mid Age","Aged")
levels(newtest_imp$Sex)<-c("F","M")
newtest_imp_h2o<-as.h2o(newtest_imp)
h2o_prediction<-h2o.predict(gbm_fit,newtest_imp_h2o)
mymodel<-gbm_fit@model$cross_validation_holdout_predictions_frame_id$name
as.data.frame(gbm_fit@model$variable_importances) %>%
ggplot(aes(scaled_importance,variable,fill=variable))+geom_col()+
labs(x="Relative Importance",y="Feature")+
ggpubr::theme_cleveland()+
ggtitle("Feature Importance in The GBM Model")
#summary with class predictions= T
predictions<-as.data.frame(h2o_prediction) %>%
select(predict)
predictions_h2o<-cbind(newtest_imp,predictions)
names(predictions_h2o)
test_h2o<-predictions_h2o %>%
rename(Survived=`predict`) %>%
select(PassengerId,Survived)
write.csv(test_h2o,"waterwater.csv",row.names = F)
#Check confusion Matrix
|
###########################################################################################
### Filename: marginal.boxplot.R
### Creation Date: Friday, 13 November 2015 01:00 PM CST
### Last Modified: Thursday, 03 December 2015 08:30 AM CST
###########################################################################################
#' marginal.boxplot
#'
#' Plot boxplots on the margins of a scatterplot in ggplot2. This code is not totally written by
#' Ethan; he adapted it from \url{http://www.r-bloggers.com/scatterplot-with-marginal-boxplots/}.
#'
#' @param data The data.frame to feed into ggplot2
#' @param x A character string of the x-values to use in 'data'. Will be coerced to numeric if it isn't already.
#' @param y A character string of the y-values to use in 'data'. Will be coerced to numeric if it isn't already.
#' @param by A character string of the factor to use in 'data' for color/fill. If missing, defaults to no grouping. Will be coerced to factor if it isn't already.
#' @param xlim The x-limits. If missing, defaults to a good view.
#' @param ylim The y-limits. If missing, defaults to a good view.
#' @param xlab The x-label. Defaults to 'x'. See details.
#' @param ylab The y-label. Defaults to 'y'. See details.
#' @param bylab The label for the by-variable. Defaults to "".
#' @param width The width of jitter. Default 0.1. A warning is issued if > 0.3.
#' @param counts Display the counts of each 'by' group in the top right corner? If 'by' is missing, displays nrow(data).
#' @param countsize If the counts are displayed, what size should they be? Default = 5 - floor(# levels / 4).
#' @return Nothing is returned; however, the ggplot objects are printed.
#' @details
#' I considered using arguments that don't require character strings, but I think this way might just be safer.
#' The only downside is that I need to fill the xlab and ylab so they're not so dang ugly. That's why the
#' defaults are to 'x' and 'y'. The plus-side is that I have fewer if-else's when specifying '+xlab()+ylab()'
#' to 'ggplot()'. It's also easier (in my opinion) to catch when the variable names aren't in 'data'.
#' @examples
#' marginal.boxplot(pizza.data, x = "height", y = "time_to_fu")
#' marginal.boxplot(pizza.data, x = "height", y = "time_to_fu", by = "sex",
#' xlab = "Height", ylab = "Time to Follow-Up", bylab = "Sex", counts = TRUE)
#' @export
#' @seealso
#' \url{http://www.r-bloggers.com/scatterplot-with-marginal-boxplots/}
#' @import ggplot2
marginal.boxplot <- function(data, x, y, by, xlim, ylim, xlab = "x", ylab = "y", bylab = "", width = 0.5, counts = FALSE, countsize = 5)
{
if(missing(data) || !is.data.frame(data)) {stop("'data' argument must be a data.frame.")}
if(missing(x)) {stop("'x' argument must be non-missing.")}
if(missing(y)) {stop("'y' argument must be non-missing.")}
if(!is.character(x) || length(x) != 1) {stop("'x' argument must be character string.")}
if(!is.character(y) || length(y) != 1) {stop("'y' argument must be character string.")}
if(!(x %in% colnames(data))) {stop("'x' argument must be in colnames(data).")}
if(!(y %in% colnames(data))) {stop("'y' argument must be in colnames(data).")}
# if(!kinda.numeric(data[,x]) || !kinda.numeric(data[,y])) {stop("data[ , x] and data[ , y] must be coercible to numeric.")}
if(!is.numeric(data[,x])) {data[,x] <- as.numeric(data[,x])}
if(!is.numeric(data[,y])) {data[,y] <- as.numeric(data[,y])}
if(!missing(by) && (!is.character(by) || length(by) != 1)) {stop("'by' argument must be character string.")}
if(!missing(by) && !(by %in% colnames(data))) {stop("'by' argument must be in colnames(data).")}
# if(!missing(by) && !kinda.factor(data[,by])) {stop("data[ , by] argument must coercible to a factor.")}
if(!missing(by) && !is.factor(data[,by])) {data[,by] <- as.factor(data[,by])}
if(!missing(xlim) && (!is.vector(xlim) || !is.numeric(xlim) || length(xlim) != 2)) {stop("'xlim' argument must be a vector of length 2.")}
if(!missing(ylim) && (!is.vector(ylim) || !is.numeric(ylim) || length(ylim) != 2)) {stop("'ylim' argument must be a vector of length 2.")}
if(!is.character(xlab) || length(xlab) != 1) {stop("'xlab' argument must be character string.")}
if(!is.character(ylab) || length(ylab) != 1) {stop("'ylab' argument must be character string.")}
if(!is.character(bylab) || length(bylab) != 1) {stop("'bylab' argument must be character string.")}
if(!is.numeric(width) || length(width) != 1 || width < 0) {stop("'width' argument must be numeric constant >= 0.")}
if(!is.logical(counts)){stop("'counts' argument must be logical.")}
if(!is.numeric(countsize) || length(countsize) != 1 || countsize < 0){stop("'size' argument must be a numeric constant >= 0.")}
if(width > 1) {warning("'width' argument is > 1. Results may not be as expected.")}
if(missing(by))
{
p1 <- ggplot(data, aes_string(x = x, y = y)) +
geom_point() +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
expand_limits(x = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(plot.margin = grid::unit(c(0.9, 0.9, 0.5, 0.5), "lines"), legend.position = "none") +
xlab(xlab) +
ylab(ylab)
p2 <- ggplot(data, aes_string(x = "factor(1)", y = x)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 0.9, -0.5, 0.5), "lines"), legend.position = "none") +
coord_flip()
p3 <- ggplot(data, aes_string(x = "factor(1)", y = y)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(0.9, 1, 0.5, -0.5), "lines"), legend.position = "none")
tmp <- data.frame(x = factor(1), y = factor(1), n = paste0("n=",nrow(data)))
p4 <- ggplot(tmp, aes_string(x = "x", y = "y", label = "n")) +
geom_text(size = countsize) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 1, -0.5, -0.5), "lines"), legend.position = "none")
} else
{
p1 <- ggplot(data, aes_string(x = x, y = y, color = by)) +
geom_point() +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
expand_limits(x = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(plot.margin = grid::unit(c(0.9, 0.9, 0.5, 0.5), "lines"), legend.position = "none") +
xlab(xlab) +
ylab(ylab)
print(p1)
p2 <- ggplot(data, aes_string(x = by, y = x)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width), aes_string(color = by)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(axis.text.x = element_blank(), axis.title.x = element_blank(), axis.ticks.x = element_blank(),
plot.margin = grid::unit(c(1, 0.9, -0.5, 0.5), "lines"), legend.position = "none") +
coord_flip() +
xlab(bylab)
p3 <- ggplot(data, aes_string(x = by, y = y)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width), aes_string(color = by)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(0.9, 1, 0.5, -0.5), "lines"), legend.position = "none")
tmp <- data.frame(x = levels(data[,by]), y = levels(data[,by]), n = sapply(levels(data[,by]),
function(lvl, data_){paste0("n=",sum(data_ == lvl))}, data_ = data[,by]))
tmp$x <- factor(tmp$x, levels = levels(data[,by])) # need these to line up the right levels with the right levels
tmp$y <- factor(tmp$y, levels = levels(data[,by])) # need these to line up the right levels with the right levels
p4 <- ggplot(tmp, aes_string(x = "x", y = "y", label = "n", color = "x")) +
geom_text(size = countsize) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 1, -0.5, -0.5), "lines"), legend.position = "none")
}
gt1 <- ggplot_gtable(ggplot_build(p1))
gt2 <- ggplot_gtable(ggplot_build(p2))
gt3 <- ggplot_gtable(ggplot_build(p3))
gt4 <- ggplot_gtable(ggplot_build(p4))
# Get maximum widths and heights for x-axis and y-axis title and text
maxWidth <- grid::unit.pmax(gt1$widths[2:3], gt2$widths[2:3])
maxHeight <- grid::unit.pmax(gt1$heights[4:5], gt3$heights[4:5])
# Set the maximums in the gtables for gt1, gt2 and gt3
gt1$widths[2:3] <- as.list(maxWidth)
gt2$widths[2:3] <- as.list(maxWidth)
gt1$heights[4:5] <- as.list(maxHeight)
gt3$heights[4:5] <- as.list(maxHeight)
# Combine the scatterplot with the two marginal boxplots
# Create a new gtable
gt <- gtable::gtable(widths = grid::unit(c(7, 2), "null"), height = grid::unit(c(2, 7), "null"))
# Instert gt1, gt2 and gt3 into the new gtable
gt <- gtable::gtable_add_grob(gt, gt1, 2, 1)
gt <- gtable::gtable_add_grob(gt, gt2, 1, 1)
gt <- gtable::gtable_add_grob(gt, gt3, 2, 2)
if(counts)
{
gt <- gtable::gtable_add_grob(gt, gt4, 1, 2)
}
# And render the plot
grid::grid.newpage()
grid::grid.draw(gt)
}
|
/R/marginal.boxplot.R
|
no_license
|
eheinzen/Ethan
|
R
| false | false | 10,723 |
r
|
###########################################################################################
### Filename: marginal.boxplot.R
### Creation Date: Friday, 13 November 2015 01:00 PM CST
### Last Modified: Thursday, 03 December 2015 08:30 AM CST
###########################################################################################
#' marginal.boxplot
#'
#' Plot boxplots on the margins of a scatterplot in ggplot2. This code is not totally written by
#' Ethan; he adapted it from \url{http://www.r-bloggers.com/scatterplot-with-marginal-boxplots/}.
#'
#' @param data The data.frame to feed into ggplot2
#' @param x A character string of the x-values to use in 'data'. Will be coerced to numeric if it isn't already.
#' @param y A character string of the y-values to use in 'data'. Will be coerced to numeric if it isn't already.
#' @param by A character string of the factor to use in 'data' for color/fill. If missing, defaults to no grouping. Will be coerced to factor if it isn't already.
#' @param xlim The x-limits. If missing, defaults to a good view.
#' @param ylim The y-limits. If missing, defaults to a good view.
#' @param xlab The x-label. Defaults to 'x'. See details.
#' @param ylab The y-label. Defaults to 'y'. See details.
#' @param bylab The label for the by-variable. Defaults to "".
#' @param width The width of jitter. Default 0.1. A warning is issued if > 0.3.
#' @param counts Display the counts of each 'by' group in the top right corner? If 'by' is missing, displays nrow(data).
#' @param countsize If the counts are displayed, what size should they be? Default = 5 - floor(# levels / 4).
#' @return Nothing is returned; however, the ggplot objects are printed.
#' @details
#' I considered using arguments that don't require character strings, but I think this way might just be safer.
#' The only downside is that I need to fill the xlab and ylab so they're not so dang ugly. That's why the
#' defaults are to 'x' and 'y'. The plus-side is that I have fewer if-else's when specifying '+xlab()+ylab()'
#' to 'ggplot()'. It's also easier (in my opinion) to catch when the variable names aren't in 'data'.
#' @examples
#' marginal.boxplot(pizza.data, x = "height", y = "time_to_fu")
#' marginal.boxplot(pizza.data, x = "height", y = "time_to_fu", by = "sex",
#' xlab = "Height", ylab = "Time to Follow-Up", bylab = "Sex", counts = TRUE)
#' @export
#' @seealso
#' \url{http://www.r-bloggers.com/scatterplot-with-marginal-boxplots/}
#' @import ggplot2
marginal.boxplot <- function(data, x, y, by, xlim, ylim, xlab = "x", ylab = "y", bylab = "", width = 0.5, counts = FALSE, countsize = 5)
{
if(missing(data) || !is.data.frame(data)) {stop("'data' argument must be a data.frame.")}
if(missing(x)) {stop("'x' argument must be non-missing.")}
if(missing(y)) {stop("'y' argument must be non-missing.")}
if(!is.character(x) || length(x) != 1) {stop("'x' argument must be character string.")}
if(!is.character(y) || length(y) != 1) {stop("'y' argument must be character string.")}
if(!(x %in% colnames(data))) {stop("'x' argument must be in colnames(data).")}
if(!(y %in% colnames(data))) {stop("'y' argument must be in colnames(data).")}
# if(!kinda.numeric(data[,x]) || !kinda.numeric(data[,y])) {stop("data[ , x] and data[ , y] must be coercible to numeric.")}
if(!is.numeric(data[,x])) {data[,x] <- as.numeric(data[,x])}
if(!is.numeric(data[,y])) {data[,y] <- as.numeric(data[,y])}
if(!missing(by) && (!is.character(by) || length(by) != 1)) {stop("'by' argument must be character string.")}
if(!missing(by) && !(by %in% colnames(data))) {stop("'by' argument must be in colnames(data).")}
# if(!missing(by) && !kinda.factor(data[,by])) {stop("data[ , by] argument must coercible to a factor.")}
if(!missing(by) && !is.factor(data[,by])) {data[,by] <- as.factor(data[,by])}
if(!missing(xlim) && (!is.vector(xlim) || !is.numeric(xlim) || length(xlim) != 2)) {stop("'xlim' argument must be a vector of length 2.")}
if(!missing(ylim) && (!is.vector(ylim) || !is.numeric(ylim) || length(ylim) != 2)) {stop("'ylim' argument must be a vector of length 2.")}
if(!is.character(xlab) || length(xlab) != 1) {stop("'xlab' argument must be character string.")}
if(!is.character(ylab) || length(ylab) != 1) {stop("'ylab' argument must be character string.")}
if(!is.character(bylab) || length(bylab) != 1) {stop("'bylab' argument must be character string.")}
if(!is.numeric(width) || length(width) != 1 || width < 0) {stop("'width' argument must be numeric constant >= 0.")}
if(!is.logical(counts)){stop("'counts' argument must be logical.")}
if(!is.numeric(countsize) || length(countsize) != 1 || countsize < 0){stop("'size' argument must be a numeric constant >= 0.")}
if(width > 1) {warning("'width' argument is > 1. Results may not be as expected.")}
if(missing(by))
{
p1 <- ggplot(data, aes_string(x = x, y = y)) +
geom_point() +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
expand_limits(x = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(plot.margin = grid::unit(c(0.9, 0.9, 0.5, 0.5), "lines"), legend.position = "none") +
xlab(xlab) +
ylab(ylab)
p2 <- ggplot(data, aes_string(x = "factor(1)", y = x)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 0.9, -0.5, 0.5), "lines"), legend.position = "none") +
coord_flip()
p3 <- ggplot(data, aes_string(x = "factor(1)", y = y)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(0.9, 1, 0.5, -0.5), "lines"), legend.position = "none")
tmp <- data.frame(x = factor(1), y = factor(1), n = paste0("n=",nrow(data)))
p4 <- ggplot(tmp, aes_string(x = "x", y = "y", label = "n")) +
geom_text(size = countsize) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 1, -0.5, -0.5), "lines"), legend.position = "none")
} else
{
p1 <- ggplot(data, aes_string(x = x, y = y, color = by)) +
geom_point() +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
expand_limits(x = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(plot.margin = grid::unit(c(0.9, 0.9, 0.5, 0.5), "lines"), legend.position = "none") +
xlab(xlab) +
ylab(ylab)
print(p1)
p2 <- ggplot(data, aes_string(x = by, y = x)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width), aes_string(color = by)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(xlim)) c(min(data[,x]) - 0.1 * diff(range(data[,x])), max(data[,x]) + 0.1 * diff(range(data[,x]))) else xlim) +
theme(axis.text.x = element_blank(), axis.title.x = element_blank(), axis.ticks.x = element_blank(),
plot.margin = grid::unit(c(1, 0.9, -0.5, 0.5), "lines"), legend.position = "none") +
coord_flip() +
xlab(bylab)
p3 <- ggplot(data, aes_string(x = by, y = y)) +
geom_boxplot(outlier.colour = NA) +
geom_jitter(position = position_jitter(width = width), aes_string(color = by)) +
scale_y_continuous(expand = c(0, 0)) +
expand_limits(y = if(missing(ylim)) c(min(data[,y]) - 0.1 * diff(range(data[,y])), max(data[,y]) + 0.1 * diff(range(data[,y]))) else ylim) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(0.9, 1, 0.5, -0.5), "lines"), legend.position = "none")
tmp <- data.frame(x = levels(data[,by]), y = levels(data[,by]), n = sapply(levels(data[,by]),
function(lvl, data_){paste0("n=",sum(data_ == lvl))}, data_ = data[,by]))
tmp$x <- factor(tmp$x, levels = levels(data[,by])) # need these to line up the right levels with the right levels
tmp$y <- factor(tmp$y, levels = levels(data[,by])) # need these to line up the right levels with the right levels
p4 <- ggplot(tmp, aes_string(x = "x", y = "y", label = "n", color = "x")) +
geom_text(size = countsize) +
theme(axis.text = element_blank(), axis.title = element_blank(), axis.ticks = element_blank(),
plot.margin = grid::unit(c(1, 1, -0.5, -0.5), "lines"), legend.position = "none")
}
gt1 <- ggplot_gtable(ggplot_build(p1))
gt2 <- ggplot_gtable(ggplot_build(p2))
gt3 <- ggplot_gtable(ggplot_build(p3))
gt4 <- ggplot_gtable(ggplot_build(p4))
# Get maximum widths and heights for x-axis and y-axis title and text
maxWidth <- grid::unit.pmax(gt1$widths[2:3], gt2$widths[2:3])
maxHeight <- grid::unit.pmax(gt1$heights[4:5], gt3$heights[4:5])
# Set the maximums in the gtables for gt1, gt2 and gt3
gt1$widths[2:3] <- as.list(maxWidth)
gt2$widths[2:3] <- as.list(maxWidth)
gt1$heights[4:5] <- as.list(maxHeight)
gt3$heights[4:5] <- as.list(maxHeight)
# Combine the scatterplot with the two marginal boxplots
# Create a new gtable
gt <- gtable::gtable(widths = grid::unit(c(7, 2), "null"), height = grid::unit(c(2, 7), "null"))
# Instert gt1, gt2 and gt3 into the new gtable
gt <- gtable::gtable_add_grob(gt, gt1, 2, 1)
gt <- gtable::gtable_add_grob(gt, gt2, 1, 1)
gt <- gtable::gtable_add_grob(gt, gt3, 2, 2)
if(counts)
{
gt <- gtable::gtable_add_grob(gt, gt4, 1, 2)
}
# And render the plot
grid::grid.newpage()
grid::grid.draw(gt)
}
|
#' The butter function takes in a dataframe with 'words' and 'activation' columns, and
#' calls the spread function for a specific number of times to simulate the spread of
#' activation of a given initial activation space (specified in start_time) over time.
#'
#' Note: butter.decay() is a modified function of butter.retention() and calls the parallelized
#' spread.decay.parallel() function for faster processing. The main difference is that spread.decay()
#' specifies decay rate, d, which the rate at which activation is lost at each time step. Use the
#' estimate.time() function to figure out the number of time steps required for total activation in
#' the network to decrease to 10% of its value at t = 0.
#'
#' @param start_run A non-empty dataframe with 'words' and 'activation' columns. Must be specified.
#' @param decay Proportion of activation that is lost at each time step. Default is 20%.
#' @param retention Proportion of activation that remains in the node, ranges from 0 to 1. Default is 0.
#' @param suppress Suppress nodes with total final activation of < x units at each time step. Recommended value of x is 0.1% of initial activation of target at t = 0. Default is 0.1.
#' @param network Network where the spreading occurs. Must be specified. Default is gc.net.
#' @param time Number of time steps to run spread function for. Default is 10.
#' @return A compiled dataframe with 'words', 'activation' and 'time' columns showing the spread of activation in the network over time.
#' @examples
#' See Vignette for examples.
butter.parallel <- function(start_run, decay = 0.2, retention = 0, suppress = 0.1, network = gc.net, time = 10) {
# start_run = a non-empty dataframe with 'words' and 'activation' columns
# decay = proportion of activation that is lost over time, ranges from 0 to 1
# decay value default = 0.2
# retention = proportion of activation that remains in the node, ranges from 0 to 1
# retention value default = 0
# suppress = nodes with activation less than the suppress value will be suppressed at each time step
# suppress value default = 0.1 (0.1% of 100 units)
# network = network where the spreading occurs
# network value default = gc.net
# time = number of time steps to run spread() for
# time value default = 10
# check if start_run is in the correct format
if (is.data.frame(start_run) == F || colnames(start_run) != c('words', 'activation')) {
stop('Input data is not in the correct format. Must be a dataframe with -words-
and -activation- columns.')
}
# check if decay is a number from 0 to 1
if (decay < 0 || decay > 1) {
stop('Decay value is not a number from 0 to 1.')
}
# check if retention is a number from 0 to 1
if (retention < 0 || retention > 1) {
stop('Retention value is not a number from 0 to 1.')
}
# check if time is a non-negative number
if (time < 0 || is.numeric(time) == F) {
stop('Something is off with the time value.')
}
# check if network is an igraph object
if (is.igraph(network) == F) {
stop('Network is not an igraph object.')
}
# create an empty dataframe to store output
output <- data.frame(words = vector(), activation = vector(), time = vector(),
stringsAsFactors=FALSE)
for (t in 1:time) {
updated <- spread.parallel(start_run, decay, retention, suppress, network)
if (nrow(updated) > 0) {
# if updated is not empty, save the updated output
updated$time <- t
output <- rbind(output, updated)
# updated is now the new input (start_run)
start_run <- updated
} else {
print('Spread terminated due to low activations (< 1).')
return(output)
}
}
return(output)
}
|
/R/butter.parallel.R
|
no_license
|
csqsiew/samr
|
R
| false | false | 3,730 |
r
|
#' The butter function takes in a dataframe with 'words' and 'activation' columns, and
#' calls the spread function for a specific number of times to simulate the spread of
#' activation of a given initial activation space (specified in start_time) over time.
#'
#' Note: butter.decay() is a modified function of butter.retention() and calls the parallelized
#' spread.decay.parallel() function for faster processing. The main difference is that spread.decay()
#' specifies decay rate, d, which the rate at which activation is lost at each time step. Use the
#' estimate.time() function to figure out the number of time steps required for total activation in
#' the network to decrease to 10% of its value at t = 0.
#'
#' @param start_run A non-empty dataframe with 'words' and 'activation' columns. Must be specified.
#' @param decay Proportion of activation that is lost at each time step. Default is 20%.
#' @param retention Proportion of activation that remains in the node, ranges from 0 to 1. Default is 0.
#' @param suppress Suppress nodes with total final activation of < x units at each time step. Recommended value of x is 0.1% of initial activation of target at t = 0. Default is 0.1.
#' @param network Network where the spreading occurs. Must be specified. Default is gc.net.
#' @param time Number of time steps to run spread function for. Default is 10.
#' @return A compiled dataframe with 'words', 'activation' and 'time' columns showing the spread of activation in the network over time.
#' @examples
#' See Vignette for examples.
butter.parallel <- function(start_run, decay = 0.2, retention = 0, suppress = 0.1, network = gc.net, time = 10) {
# start_run = a non-empty dataframe with 'words' and 'activation' columns
# decay = proportion of activation that is lost over time, ranges from 0 to 1
# decay value default = 0.2
# retention = proportion of activation that remains in the node, ranges from 0 to 1
# retention value default = 0
# suppress = nodes with activation less than the suppress value will be suppressed at each time step
# suppress value default = 0.1 (0.1% of 100 units)
# network = network where the spreading occurs
# network value default = gc.net
# time = number of time steps to run spread() for
# time value default = 10
# check if start_run is in the correct format
if (is.data.frame(start_run) == F || colnames(start_run) != c('words', 'activation')) {
stop('Input data is not in the correct format. Must be a dataframe with -words-
and -activation- columns.')
}
# check if decay is a number from 0 to 1
if (decay < 0 || decay > 1) {
stop('Decay value is not a number from 0 to 1.')
}
# check if retention is a number from 0 to 1
if (retention < 0 || retention > 1) {
stop('Retention value is not a number from 0 to 1.')
}
# check if time is a non-negative number
if (time < 0 || is.numeric(time) == F) {
stop('Something is off with the time value.')
}
# check if network is an igraph object
if (is.igraph(network) == F) {
stop('Network is not an igraph object.')
}
# create an empty dataframe to store output
output <- data.frame(words = vector(), activation = vector(), time = vector(),
stringsAsFactors=FALSE)
for (t in 1:time) {
updated <- spread.parallel(start_run, decay, retention, suppress, network)
if (nrow(updated) > 0) {
# if updated is not empty, save the updated output
updated$time <- t
output <- rbind(output, updated)
# updated is now the new input (start_run)
start_run <- updated
} else {
print('Spread terminated due to low activations (< 1).')
return(output)
}
}
return(output)
}
|
dataRead <- "//mizazaycensql01/SCMBIR/Client Dashboard/DataIn"
setwd(dataRead)
tl <- list.files(getwd())
tl <- tl[grep("VoC", tl)]
tl <- tl[order(tl)]
tl <- tl[length(tl)]
VoC <- read.csv(tl)
FUNVOC <- function(PH, Metric){
VoC <- VoC[which(VoC$Product == PH),] ###
VoC$Date <- as.Date(VoC$Date, format = "%Y-%m-%d")
Score <- VoC[which(VoC$SurveyDescription == Metric),]###
Score <- Score[order(Score$Date, Score$Sentiment),]
a <-qplot(factor(format(Score$Date,format = "%Y-%m")), data = Score,
geom = "bar", fill = factor(Score$Sentiment),
y =Score$Count,stat="identity",
xlab = "Months",
ylab= "Service Ratings",
main = paste(PH,"VoC Ratings"))
a <- a + scale_fill_manual(values=c("#747679", "#003366", "#dc291e"),
labels = c("Negative \n 0-6 Rating","Neutral \n 7-8 Rating", "Positive \n 9-10 Rating"))
a <- a + theme(legend.title=element_blank())
return(a)
}
|
/graphs/VoCGraph.R
|
no_license
|
andrubrown/ClientDashboard
|
R
| false | false | 962 |
r
|
dataRead <- "//mizazaycensql01/SCMBIR/Client Dashboard/DataIn"
setwd(dataRead)
tl <- list.files(getwd())
tl <- tl[grep("VoC", tl)]
tl <- tl[order(tl)]
tl <- tl[length(tl)]
VoC <- read.csv(tl)
FUNVOC <- function(PH, Metric){
VoC <- VoC[which(VoC$Product == PH),] ###
VoC$Date <- as.Date(VoC$Date, format = "%Y-%m-%d")
Score <- VoC[which(VoC$SurveyDescription == Metric),]###
Score <- Score[order(Score$Date, Score$Sentiment),]
a <-qplot(factor(format(Score$Date,format = "%Y-%m")), data = Score,
geom = "bar", fill = factor(Score$Sentiment),
y =Score$Count,stat="identity",
xlab = "Months",
ylab= "Service Ratings",
main = paste(PH,"VoC Ratings"))
a <- a + scale_fill_manual(values=c("#747679", "#003366", "#dc291e"),
labels = c("Negative \n 0-6 Rating","Neutral \n 7-8 Rating", "Positive \n 9-10 Rating"))
a <- a + theme(legend.title=element_blank())
return(a)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patchMatch.R
\name{polarDecomposition}
\alias{polarDecomposition}
\title{polar decomposition with reflection control}
\usage{
polarDecomposition(X)
}
\arguments{
\item{X}{matrix}
}
\value{
decomposition into P Z and approximation of X by \code{P \%*\% Z}
}
\description{
decomposes X into P and Z (matrices) where Z is a rotation and P is shearing.
will prevent Z from containing reflections.
}
\examples{
pd = polarDecomposition( matrix( rnorm(9), nrow=3 ) )
}
\author{
Avants BB
}
|
/man/polarDecomposition.Rd
|
no_license
|
stnava/patchMatchR
|
R
| false | true | 563 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patchMatch.R
\name{polarDecomposition}
\alias{polarDecomposition}
\title{polar decomposition with reflection control}
\usage{
polarDecomposition(X)
}
\arguments{
\item{X}{matrix}
}
\value{
decomposition into P Z and approximation of X by \code{P \%*\% Z}
}
\description{
decomposes X into P and Z (matrices) where Z is a rotation and P is shearing.
will prevent Z from containing reflections.
}
\examples{
pd = polarDecomposition( matrix( rnorm(9), nrow=3 ) )
}
\author{
Avants BB
}
|
/R/NLo_parm.r
|
no_license
|
dpphat/partFilt
|
R
| false | false | 1,492 |
r
| ||
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "treebag"
#########################################################################
set.seed(2)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
seeds <- vector(mode = "list", length = nrow(training) + 1)
seeds <- lapply(seeds, function(x) 1:20)
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary,
seeds = seeds)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary,
seeds = seeds)
cctrl3 <- trainControl(method = "oob")
cctrl4 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary,
seeds = seeds)
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl2,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
set.seed(849)
test_class_oob_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl3,
nbagg = 7,
keepX = TRUE)
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl4,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rec <- train(x = rec_cls,
data = training,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
nbagg = 7,
keepX = TRUE)
if(
!isTRUE(
all.equal(test_class_cv_model$results,
test_class_rec$results))
)
stop("CV weights not giving the same results")
test_class_imp_rec <- varImp(test_class_rec)
test_class_pred_rec <- predict(test_class_rec, testing[, -ncol(testing)])
test_class_prob_rec <- predict(test_class_rec, testing[, -ncol(testing)],
type = "prob")
#########################################################################
library(caret)
library(plyr)
library(recipes)
library(dplyr)
set.seed(1)
training <- SLC14_1(30)
testing <- SLC14_1(100)
trainX <- training[, -ncol(training)]
trainY <- training$y
rec_reg <- recipe(y ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
testX <- trainX[, -ncol(training)]
testY <- trainX$y
rctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all", seeds = seeds)
rctrl2 <- trainControl(method = "LOOCV", seeds = seeds)
rctrl3 <- trainControl(method = "oob", seeds = seeds)
rctrl4 <- trainControl(method = "none", seeds = seeds)
set.seed(849)
test_reg_cv_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl1,
preProc = c("center", "scale"),
nbagg = 7)
test_reg_pred <- predict(test_reg_cv_model, testX)
set.seed(849)
test_reg_cv_form <- train(y ~ ., data = training,
method = "treebag",
trControl = rctrl1,
preProc = c("center", "scale"),
nbagg = 7)
test_reg_pred_form <- predict(test_reg_cv_form, testX)
set.seed(849)
test_reg_loo_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl2,
preProc = c("center", "scale"),
nbagg = 7)
set.seed(849)
test_reg_oob_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl3,
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
set.seed(849)
test_reg_none_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl4,
tuneGrid = test_reg_cv_model$bestTune,
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
test_reg_none_pred <- predict(test_reg_none_model, testX)
set.seed(849)
test_reg_rec <- train(x = rec_reg,
data = training,
method = "treebag",
trControl = rctrl1,
nbagg = 7,
keepX = TRUE)
if(
!isTRUE(
all.equal(test_reg_cv_model$results,
test_reg_rec$results))
)
stop("CV weights not giving the same results")
test_reg_imp_rec <- varImp(test_reg_rec)
test_reg_pred_rec <- predict(test_reg_rec, testing[, -ncol(testing)])
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
test_reg_predictors1 <- predictors(test_reg_cv_model)
test_class_predictors2 <- predictors(test_class_cv_model$finalModel)
test_reg_predictors2 <- predictors(test_reg_cv_model$finalModel)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
test_reg_imp <- varImp(test_reg_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
/RegressionTests/Code/treebag.R
|
no_license
|
topepo/caret
|
R
| false | false | 7,673 |
r
|
timestamp <- Sys.time()
library(caret)
library(plyr)
library(recipes)
library(dplyr)
model <- "treebag"
#########################################################################
set.seed(2)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
rec_cls <- recipe(Class ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
seeds <- vector(mode = "list", length = nrow(training) + 1)
seeds <- lapply(seeds, function(x) 1:20)
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all",
classProbs = TRUE,
summaryFunction = twoClassSummary,
seeds = seeds)
cctrl2 <- trainControl(method = "LOOCV",
classProbs = TRUE, summaryFunction = twoClassSummary,
seeds = seeds)
cctrl3 <- trainControl(method = "oob")
cctrl4 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary,
seeds = seeds)
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_prob <- predict(test_class_cv_model, testing[, -ncol(testing)], type = "prob")
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
test_class_prob_form <- predict(test_class_cv_form, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl2,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7)
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
set.seed(849)
test_class_oob_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl3,
nbagg = 7,
keepX = TRUE)
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "treebag",
trControl = cctrl4,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC",
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_class_none_prob <- predict(test_class_none_model, testing[, -ncol(testing)], type = "prob")
set.seed(849)
test_class_rec <- train(x = rec_cls,
data = training,
method = "treebag",
trControl = cctrl1,
metric = "ROC",
nbagg = 7,
keepX = TRUE)
if(
!isTRUE(
all.equal(test_class_cv_model$results,
test_class_rec$results))
)
stop("CV weights not giving the same results")
test_class_imp_rec <- varImp(test_class_rec)
test_class_pred_rec <- predict(test_class_rec, testing[, -ncol(testing)])
test_class_prob_rec <- predict(test_class_rec, testing[, -ncol(testing)],
type = "prob")
#########################################################################
library(caret)
library(plyr)
library(recipes)
library(dplyr)
set.seed(1)
training <- SLC14_1(30)
testing <- SLC14_1(100)
trainX <- training[, -ncol(training)]
trainY <- training$y
rec_reg <- recipe(y ~ ., data = training) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
testX <- trainX[, -ncol(training)]
testY <- trainX$y
rctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all", seeds = seeds)
rctrl2 <- trainControl(method = "LOOCV", seeds = seeds)
rctrl3 <- trainControl(method = "oob", seeds = seeds)
rctrl4 <- trainControl(method = "none", seeds = seeds)
set.seed(849)
test_reg_cv_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl1,
preProc = c("center", "scale"),
nbagg = 7)
test_reg_pred <- predict(test_reg_cv_model, testX)
set.seed(849)
test_reg_cv_form <- train(y ~ ., data = training,
method = "treebag",
trControl = rctrl1,
preProc = c("center", "scale"),
nbagg = 7)
test_reg_pred_form <- predict(test_reg_cv_form, testX)
set.seed(849)
test_reg_loo_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl2,
preProc = c("center", "scale"),
nbagg = 7)
set.seed(849)
test_reg_oob_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl3,
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
set.seed(849)
test_reg_none_model <- train(trainX, trainY,
method = "treebag",
trControl = rctrl4,
tuneGrid = test_reg_cv_model$bestTune,
preProc = c("center", "scale"),
nbagg = 7,
keepX = TRUE)
test_reg_none_pred <- predict(test_reg_none_model, testX)
set.seed(849)
test_reg_rec <- train(x = rec_reg,
data = training,
method = "treebag",
trControl = rctrl1,
nbagg = 7,
keepX = TRUE)
if(
!isTRUE(
all.equal(test_reg_cv_model$results,
test_reg_rec$results))
)
stop("CV weights not giving the same results")
test_reg_imp_rec <- varImp(test_reg_rec)
test_reg_pred_rec <- predict(test_reg_rec, testing[, -ncol(testing)])
#########################################################################
test_class_predictors1 <- predictors(test_class_cv_model)
test_reg_predictors1 <- predictors(test_reg_cv_model)
test_class_predictors2 <- predictors(test_class_cv_model$finalModel)
test_reg_predictors2 <- predictors(test_reg_cv_model$finalModel)
#########################################################################
test_class_imp <- varImp(test_class_cv_model)
test_reg_imp <- varImp(test_reg_cv_model)
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
timestamp_end <- Sys.time()
save(list = c(tests, "sInfo", "timestamp", "timestamp_end"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
if(!interactive())
q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sugsclusterProb.R
\name{sugsclusterProb}
\alias{sugsclusterProb}
\title{A function to compute the marginal probability of belong to an occupied cluster for the SUGS algorithm}
\usage{
sugsclusterProb(x, K, i, D, n, betaHat, phi, m, nu, S, lambda)
}
\arguments{
\item{x}{The current observation under consideration.}
\item{K}{The number of occupied clusters}
\item{i}{The current iteration of the SUGS algorithm}
\item{D}{The number of variables}
\item{n}{A numeric vector containing the number of observations allocated to each cluster}
\item{betaHat}{The grid of values for the dirichlet concentration parameter}
\item{phi}{The weights associated with the dirichlet grid prior}
\item{m}{The current posterior mean}
\item{nu}{The current posterior degrees of freedom}
\item{S}{The current posterior scale vector}
\item{lambda}{The current posterior for the mean variance}
}
\value{
The numeric vector with the unnormalised probability of beloging to each cluster.
}
\description{
A function to compute the marginal probability of belong to an occupied cluster for the SUGS algorithm
}
|
/man/sugsclusterProb.Rd
|
no_license
|
ococrook/sugsvarsel
|
R
| false | true | 1,173 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sugsclusterProb.R
\name{sugsclusterProb}
\alias{sugsclusterProb}
\title{A function to compute the marginal probability of belong to an occupied cluster for the SUGS algorithm}
\usage{
sugsclusterProb(x, K, i, D, n, betaHat, phi, m, nu, S, lambda)
}
\arguments{
\item{x}{The current observation under consideration.}
\item{K}{The number of occupied clusters}
\item{i}{The current iteration of the SUGS algorithm}
\item{D}{The number of variables}
\item{n}{A numeric vector containing the number of observations allocated to each cluster}
\item{betaHat}{The grid of values for the dirichlet concentration parameter}
\item{phi}{The weights associated with the dirichlet grid prior}
\item{m}{The current posterior mean}
\item{nu}{The current posterior degrees of freedom}
\item{S}{The current posterior scale vector}
\item{lambda}{The current posterior for the mean variance}
}
\value{
The numeric vector with the unnormalised probability of beloging to each cluster.
}
\description{
A function to compute the marginal probability of belong to an occupied cluster for the SUGS algorithm
}
|
library(queueing)
### Name: VT.o_MM1K
### Title: Returns the variance of the time spend in the M/M/1/K queueing
### model
### Aliases: VT.o_MM1K
### Keywords: M/M/1/K
### ** Examples
## create input parameters
i_mm1k <- NewInput.MM1K(lambda=5, mu=5.714, k=15)
## Build the model
o_mm1k <- QueueingModel(i_mm1k)
## Returns the variance
VT(o_mm1k)
|
/data/genthat_extracted_code/queueing/examples/VT.o_MM1K.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 358 |
r
|
library(queueing)
### Name: VT.o_MM1K
### Title: Returns the variance of the time spend in the M/M/1/K queueing
### model
### Aliases: VT.o_MM1K
### Keywords: M/M/1/K
### ** Examples
## create input parameters
i_mm1k <- NewInput.MM1K(lambda=5, mu=5.714, k=15)
## Build the model
o_mm1k <- QueueingModel(i_mm1k)
## Returns the variance
VT(o_mm1k)
|
library(varSelRF)
### Name: plot.varSelRF
### Title: Plot a varSelRF object
### Aliases: plot.varSelRF
### Keywords: tree classif
### ** Examples
x <- matrix(rnorm(25 * 30), ncol = 30)
x[1:10, 1:2] <- x[1:10, 1:2] + 2
cl <- factor(c(rep("A", 10), rep("B", 15)))
rf.vs1 <- varSelRF(x, cl, ntree = 200, ntreeIterat = 100,
vars.drop.frac = 0.2)
rf.vs1
plot(rf.vs1)
|
/data/genthat_extracted_code/varSelRF/examples/plot.varSelRF.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 391 |
r
|
library(varSelRF)
### Name: plot.varSelRF
### Title: Plot a varSelRF object
### Aliases: plot.varSelRF
### Keywords: tree classif
### ** Examples
x <- matrix(rnorm(25 * 30), ncol = 30)
x[1:10, 1:2] <- x[1:10, 1:2] + 2
cl <- factor(c(rep("A", 10), rep("B", 15)))
rf.vs1 <- varSelRF(x, cl, ntree = 200, ntreeIterat = 100,
vars.drop.frac = 0.2)
rf.vs1
plot(rf.vs1)
|
context("test - getFpcScores")
context("1. test - getFpcScoresIN validate input")
yMat <- matrix(rnorm(36), 6, 6) %>>% ((. + t(.)) / 2)
diag(yMat) <- diag(yMat) + 0.5
eigRes <- eigen(yMat)
splitVar <- rep(1, 6)
allTimePnts <- 1:6
meanFunc <- rnorm(6)
test_that("test - getFpcScoresIN validate input", {
expect_error(rfda:::getFpcScoresIN(c(NA, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(NaN, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(Inf, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(1, allTimePnts), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(NA, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(NaN, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(Inf, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(1, splitVar), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], NA), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], NaN), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], Inf), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat, 1), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], NA), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], NaN), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], Inf), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors, 1), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, NA, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, NaN, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, Inf, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, -1, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 2, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(NA, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(NaN, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(Inf, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(1, eigRes$values), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, 1:2))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, NA))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, NaN))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, Inf))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, -1.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, 0.0))
})
|
/tests/testthat/test-getFpcScores.R
|
permissive
|
alexchang2017/rfda
|
R
| false | false | 3,863 |
r
|
context("test - getFpcScores")
context("1. test - getFpcScoresIN validate input")
yMat <- matrix(rnorm(36), 6, 6) %>>% ((. + t(.)) / 2)
diag(yMat) <- diag(yMat) + 0.5
eigRes <- eigen(yMat)
splitVar <- rep(1, 6)
allTimePnts <- 1:6
meanFunc <- rnorm(6)
test_that("test - getFpcScoresIN validate input", {
expect_error(rfda:::getFpcScoresIN(c(NA, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(NaN, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(Inf, allTimePnts[1:5]), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(c(1, allTimePnts), splitVar, yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(NA, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(NaN, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(Inf, splitVar[1:5]), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, c(1, splitVar), yMat, eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], NA), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], NaN), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat[1:5, ], Inf), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, rbind(yMat, 1), eigRes$vectors, 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], NA), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], NaN), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors[1:5, ], Inf), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, rbind(eigRes$vectors, 1), 0, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, NA, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, NaN, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, Inf, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, -1, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 2, eigRes$values, 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(NA, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(NaN, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(Inf, eigRes$values[1:5]), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, c(1, eigRes$values), 2.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, 1:2))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, NA))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, NaN))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, Inf))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, -1.0))
expect_error(rfda:::getFpcScoresIN(allTimePnts, splitVar, yMat, eigRes$vectors, 0, eigRes$values, 0.0))
})
|
###################################################################################################
#' Objective Function Evaluation
#'
#' This function handles the evaluation of the objective function in \code{\link{spot}}.
#' This includes handling of the random number generator stream as well as the actual evaluation.
#'
#' @param x matrix of already known solutions, to determine whether RNG seeds for new solutions need to be incremented.
#' @param xnew matrix of new solutions.
#' @param fun objective function to evaluate the solutions in \code{xnew}.
#' @param seedFun initial seed to be used for the random number generator seed. Set to NA to avoid using a fixed seed.
#' @param noise parameter specifying whether the target function is noisy.
#' @param ... parameters passed to \code{fun}.
#'
#' @return the matrix ynew, which are the observations for fun(xnew)
#'
#' @seealso \code{\link{spot}} for more details on the parameters, e.g., \code{fun}
#'
#' @export
#' @keywords internal
###################################################################################################
objectiveFunctionEvaluation <- function(x=NULL,xnew,fun,seedFun=NA,noise=FALSE,...){ # TODO: vectorization
if(!is.null(x))
x <- data.matrix(x)
xnew <- data.matrix(xnew) #TODO: same as in ocba. else, problems with identical() due to names
## if xnew is empty, return.
if(nrow(xnew)==0)
return(numeric(0))
## save seed status (to avoid disturbing the main loops RNG stream)
## note: theoretically only needed in case of noise==TRUE, but always done to avoid mistakes.
if(exists(as.character(substitute(.Random.seed))))
SAVESEED<-.Random.seed
else
SAVESEED=NULL
if(noise & !is.na(seedFun)){
## calculate seeds for each evaluation
seed <- numeric(nrow(xnew))
for(i in 1:nrow(xnew)){
xnewi <- xnew[i,]
x <- rbind(x,xnewi)
repetitions <- sum(apply(x,1,identical,xnewi)) -1
seed[i] <- seedFun + repetitions
}
## either pass seed to objective function fun (if there is a parameter to receive it)
nms <- names(formals(fun))
passSeed <- FALSE
if(length(nms)>1){
passSeed <- names(formals(fun)[2])=="seed"
}
if(passSeed){
ynew <- fun(xnew,seed,...)
}else{ ## or set seed directly here
ynew <- NULL
for(i in 1:nrow(xnew)){
set.seed(seed[i])
ynew <- rbind(ynew,fun(xnew[i,,drop=FALSE]))
}
}
}else{
ynew <- fun(xnew,...)
}
## load seed status (to avoid disturbing the main loops RNG stream), see save at start of function.
if(!is.null(SAVESEED))
assign(".Random.seed", SAVESEED, envir=globalenv())
if(is.numeric(ynew))
ynew <- matrix(ynew,,1) #convert to column matrix
ynew
}
|
/R/objectiveFunctionEvaluation.R
|
no_license
|
bartzbeielstein/SPOT
|
R
| false | false | 2,777 |
r
|
###################################################################################################
#' Objective Function Evaluation
#'
#' This function handles the evaluation of the objective function in \code{\link{spot}}.
#' This includes handling of the random number generator stream as well as the actual evaluation.
#'
#' @param x matrix of already known solutions, to determine whether RNG seeds for new solutions need to be incremented.
#' @param xnew matrix of new solutions.
#' @param fun objective function to evaluate the solutions in \code{xnew}.
#' @param seedFun initial seed to be used for the random number generator seed. Set to NA to avoid using a fixed seed.
#' @param noise parameter specifying whether the target function is noisy.
#' @param ... parameters passed to \code{fun}.
#'
#' @return the matrix ynew, which are the observations for fun(xnew)
#'
#' @seealso \code{\link{spot}} for more details on the parameters, e.g., \code{fun}
#'
#' @export
#' @keywords internal
###################################################################################################
objectiveFunctionEvaluation <- function(x=NULL,xnew,fun,seedFun=NA,noise=FALSE,...){ # TODO: vectorization
if(!is.null(x))
x <- data.matrix(x)
xnew <- data.matrix(xnew) #TODO: same as in ocba. else, problems with identical() due to names
## if xnew is empty, return.
if(nrow(xnew)==0)
return(numeric(0))
## save seed status (to avoid disturbing the main loops RNG stream)
## note: theoretically only needed in case of noise==TRUE, but always done to avoid mistakes.
if(exists(as.character(substitute(.Random.seed))))
SAVESEED<-.Random.seed
else
SAVESEED=NULL
if(noise & !is.na(seedFun)){
## calculate seeds for each evaluation
seed <- numeric(nrow(xnew))
for(i in 1:nrow(xnew)){
xnewi <- xnew[i,]
x <- rbind(x,xnewi)
repetitions <- sum(apply(x,1,identical,xnewi)) -1
seed[i] <- seedFun + repetitions
}
## either pass seed to objective function fun (if there is a parameter to receive it)
nms <- names(formals(fun))
passSeed <- FALSE
if(length(nms)>1){
passSeed <- names(formals(fun)[2])=="seed"
}
if(passSeed){
ynew <- fun(xnew,seed,...)
}else{ ## or set seed directly here
ynew <- NULL
for(i in 1:nrow(xnew)){
set.seed(seed[i])
ynew <- rbind(ynew,fun(xnew[i,,drop=FALSE]))
}
}
}else{
ynew <- fun(xnew,...)
}
## load seed status (to avoid disturbing the main loops RNG stream), see save at start of function.
if(!is.null(SAVESEED))
assign(".Random.seed", SAVESEED, envir=globalenv())
if(is.numeric(ynew))
ynew <- matrix(ynew,,1) #convert to column matrix
ynew
}
|
# 1. write a function that take a vector as its argument and then calculate the mean of the vector
mean_calculator <- ... (x){
# calculate the sum of the vector
sum_x <- ...
# calculate the length of the vector
length_x <- ...
# calculate the mean
mean_x <- sum_x / length_x
# return the mean
...
}
# Now, let's create a vector
a_vector <- ... (1,3,5,7, 15,23)
# And, calculate its mean using our function
...
# to make sure that our function is working correctly, compare our result with the result of the mean function in R:
...(a_vector)
# 2. Edit your previous function so that it returns mean, length, and sum
statistics_calculator <- ... (x){
# calculate the sum of the vector
sum_x <- ...
# calculate the length of the vector
length_x <- ...
# calculate the mean
mean_x <- sum_x / length_x
# return the mean
statistics_list <- list...
...
}
# Now, let's create a vector
a_vector <- c (1,3,5,7, 15,23)
# And, calculate its values using our function
statistics_calculator (a_vector)
# 3. Create a dataframe! It can be any kind of dataframe. Be as creative as possible. As an example, look at the dataframe below.
# It is the Premier leauge table! Now it is your turn. Create your own dataframe.
rank <- c(1:5)
team <- c("liverpool","Tottenham","Man Utd","Man City","Chelsea")
match_played <- c(rep(24, 5))
win <- c(18,17,16,15,14)
draw <- c(2,3,5,6,7)
loss <- match_played - win - draw
pts <- (win * 3) + draw
Premier_league_table <- data.frame(
rank = rank,
team= team,
match_played = match_played,
win= win,
draw= draw,
loss = loss,
pts = pts
)
|
/homeworks/homework1_advanced.R
|
permissive
|
Reihaneh-ahmadi/r_for_data_analysis
|
R
| false | false | 1,618 |
r
|
# 1. write a function that take a vector as its argument and then calculate the mean of the vector
mean_calculator <- ... (x){
# calculate the sum of the vector
sum_x <- ...
# calculate the length of the vector
length_x <- ...
# calculate the mean
mean_x <- sum_x / length_x
# return the mean
...
}
# Now, let's create a vector
a_vector <- ... (1,3,5,7, 15,23)
# And, calculate its mean using our function
...
# to make sure that our function is working correctly, compare our result with the result of the mean function in R:
...(a_vector)
# 2. Edit your previous function so that it returns mean, length, and sum
statistics_calculator <- ... (x){
# calculate the sum of the vector
sum_x <- ...
# calculate the length of the vector
length_x <- ...
# calculate the mean
mean_x <- sum_x / length_x
# return the mean
statistics_list <- list...
...
}
# Now, let's create a vector
a_vector <- c (1,3,5,7, 15,23)
# And, calculate its values using our function
statistics_calculator (a_vector)
# 3. Create a dataframe! It can be any kind of dataframe. Be as creative as possible. As an example, look at the dataframe below.
# It is the Premier leauge table! Now it is your turn. Create your own dataframe.
rank <- c(1:5)
team <- c("liverpool","Tottenham","Man Utd","Man City","Chelsea")
match_played <- c(rep(24, 5))
win <- c(18,17,16,15,14)
draw <- c(2,3,5,6,7)
loss <- match_played - win - draw
pts <- (win * 3) + draw
Premier_league_table <- data.frame(
rank = rank,
team= team,
match_played = match_played,
win= win,
draw= draw,
loss = loss,
pts = pts
)
|
data(iris)
names(iris)
plot(iris$Petal.Width, iris$Sepal.Width, pch = 19, col = as.numeric(iris$Species))
legend(1, 4.5, legend = unique(iris$Species), col = unique(as.numeric(iris$Species)), pch = 19)
library(tree)
treel <- tree(Species ~ Sepal.Width + Petal.Width, data=iris)
summary(treel)
plot(treel)
text(treel)
plot(iris$Petal.Width, iris$Sepal.Width, pch = 19, col=as.numeric(iris$Species))
partition.tree(treel, label = "Species", add=TRUE)
legend(1.75, 4.5, legend = unique(iris$Species), col = unique(as.numeric(iris$Species)), pch = 19)
set.seed(32313)
newdata <- data.frame(Petal.Width = runif(20,0,2.5), Sepal.Width = runif(20,2,4.5))
pred1 <- predict(treel, newdata)
pred1
pred1 <- predict(treel, newdata, type="class")
plot(newdata$Petal.Width, newdata$Sepal.Width, col=as.numeric(pred1), pch=19)
partition.tree(treel, "Species", add = TRUE)
data(Cars93, package="MASS")
head(Cars93)
treeCars <- tree(DriveTrain ~ MPG.city + MPG.highway + AirBags + EngineSize + Width + Length + Weight + Price + Cylinders + Horsepower + Wheelbase, data = Cars93)
plot(treeCars)
text(treeCars)
par(mfrow=c(1,2))
plot(cv.tree(treeCars, FUN=prune.tree, method = "misclass"))
plot(cv.tree(treeCars))
pruneTree <- prune.tree(treeCars, best=4)
plot(pruneTree)
text(pruneTree)
table(Cars93$DriveTrain, predict(pruneTree, type = "class"))
table(Cars93$DriveTrain, predict(treeCars,type = "class"))
|
/Problem Set 2/Question2Code.R
|
no_license
|
ackivo/RScripts
|
R
| false | false | 1,389 |
r
|
data(iris)
names(iris)
plot(iris$Petal.Width, iris$Sepal.Width, pch = 19, col = as.numeric(iris$Species))
legend(1, 4.5, legend = unique(iris$Species), col = unique(as.numeric(iris$Species)), pch = 19)
library(tree)
treel <- tree(Species ~ Sepal.Width + Petal.Width, data=iris)
summary(treel)
plot(treel)
text(treel)
plot(iris$Petal.Width, iris$Sepal.Width, pch = 19, col=as.numeric(iris$Species))
partition.tree(treel, label = "Species", add=TRUE)
legend(1.75, 4.5, legend = unique(iris$Species), col = unique(as.numeric(iris$Species)), pch = 19)
set.seed(32313)
newdata <- data.frame(Petal.Width = runif(20,0,2.5), Sepal.Width = runif(20,2,4.5))
pred1 <- predict(treel, newdata)
pred1
pred1 <- predict(treel, newdata, type="class")
plot(newdata$Petal.Width, newdata$Sepal.Width, col=as.numeric(pred1), pch=19)
partition.tree(treel, "Species", add = TRUE)
data(Cars93, package="MASS")
head(Cars93)
treeCars <- tree(DriveTrain ~ MPG.city + MPG.highway + AirBags + EngineSize + Width + Length + Weight + Price + Cylinders + Horsepower + Wheelbase, data = Cars93)
plot(treeCars)
text(treeCars)
par(mfrow=c(1,2))
plot(cv.tree(treeCars, FUN=prune.tree, method = "misclass"))
plot(cv.tree(treeCars))
pruneTree <- prune.tree(treeCars, best=4)
plot(pruneTree)
text(pruneTree)
table(Cars93$DriveTrain, predict(pruneTree, type = "class"))
table(Cars93$DriveTrain, predict(treeCars,type = "class"))
|
#' Run alignment procedure using Mplus
#'
#' Facilitates running frequentist alignmnet procedure in Mplus. It creates Mplus input and data from R data.frame, runs the input using Mplus, and provides its summaries back in R.
#'
#' @param model Character. Formula in Mplus format, e.g. "Factor1 BY item1 item2 item3 item4; item4 WITH item3;", see example.
#' @param group Character, name of the grouping variable.
#' @param dat Data frame containing data.
#' @param categorical Character vector of variable names. Indicators that are binary or ordinal. Default is NULL.
#' @param sim.samples Vector of integers. Group sample sizes for simulation, the length of this vector also determines a number of simulation studies. Default is `c(100, 500, 1000)`. May take a substantial amount of time. Use NULL to avoid running simulations.
#' @param sim.reps A number of simulated datasets in each simulation. Default is 500.
#' @param Mplus_com Sometimes you don't have a direct access to Mplus, so this argument specifies what to send to a system command line. Default value is "mplus".
#' @param path Where all the .inp, .out, and .dat files should be stored?
#' @param summaries If the \code{\link[MIE]{extractAlignment}} and \code{\link[MIE]{extractAlignmentSim}} should be run after all the Mplus work is done. Default is FALSE.
#' @details The function runs in four steps to facilitate setting up and running alignment:
#' \enumerate{
#' \item Converts data.drame into a dat file compatible with Mplus. Saves it on the disk.
#' \item Creates Mplus input file for free alignemnent. Saves it on the disk.
#' \item Runs this input using the saved data, which produces an .out file.
#' \item Reads in the output, selects the appropriate reference group, creates a new input file for fixed alignment, and runs it.
#' \item (Optional) Runs simulations to check the reliability of the fixed alignment results.
#' \item (Optional) Using \code{\link[MIE]{extractAlignment}} and/or \code{\link[MIE]{extractAlignmentSim}} reads in the output file of the fixed alignment and summarizies them in a convenient, publishable tables.
#' }
#' The sequence of free, fixed alignment and simulations follows recommendations of Muthen & Asparouhov (2014).
#'
#' All the files created during these steps stay on the disk and can be used independently for reporting. For a detailed tutorial on alignment in general see \url{https://maksimrudnev.com/2019/05/01/alignment-tutorial/}
#'
#'
#' @examples
#' \dontrun{ aling.res = runAlignment(model = "Moral1 BY prostit homosex abortion divorce;
#' Moral2 BY benefits taxes bribes;",
#' group = "country",
#' dat = wvs.s1,
#' Mplus_com = "mplus",
#' summaries = T
#' )
#' }
#' @seealso \code{\link[MIE]{extractAlignment}}, \code{\link[MIE]{extractAlignmentSim}}, \code{\link[MIE]{extractAlignmentSim}}, \code{\link[MIE]{measurementInvarianceMplus}}
#'
#' @export
runAlignment <- function(
model,
group,
dat,
categorical=NULL,
sim.samples = c(100, 500, 1000),
sim.reps = 500,
Mplus_com = "mplus", #/Applications/Mplus/mplus
path = getwd(),
summaries = FALSE,
estimator="MLR",
listwise = "OFF",
processors=2
) {
# set working dirs ####
oldwd <- getwd()
setwd(path)
estimator <- toupper(estimator)
message("Creating input for free alignment.\n")
#clean the model syntax, derive variables included ####
var.list <- strsplit(model, ";|\n") [[1]]
var.list <- gsub("\\s+", " ", var.list)
var.list <- var.list[!var.list==""]
var.list <- var.list[!var.list==" "]
var.list <- gsub("^.*((by)|(BY))", "", var.list)
#var.list <- unlist(strsplit(var.list, "(?i)(by)", perl=TRUE))
#var.list <- unlist(strsplit(var.list[seq(2, length(var.list), by=2)], " "))
var.list <- unlist(strsplit(var.list, " "))
var.list <- var.list[!var.list=="WITH"]
var.list <- var.list[!var.list==""]
var.list <- unique(var.list)
#var.list <- paste(unique(unlist(var.list)), collapse=" ")
#var.list <- strsplit(var.list, " ")[[1]]
#var.list <- var.list[!var.list==""]
# var.list <- paste0("; ", model, " ;")
# var.list<- gsub("\n", ";", var.list)
# var.list <- paste(sapply(var.list, function(i) sub(".*BY *(.*?) *;.*", "\\1", i)), collapse=" ")
# var.list <- strsplit(gsub(" BY | +|;", " ", var.list), " ")[[1]]
# var.list <- var.list[!var.list ==""]
d <- dat[c(group, var.list)]
for(i in colnames(d)) d[,i] <- unclass(d[,i])
rm(i)
if(!is.numeric(d[,group])) {
#d[,group] <- gsub(" ", "_", as.character( d[,group] ) )
message("The group variable must be numeric!")
}
# Writing data file ####
#require(MplusAutomation)
#inp <- capture.output(prepareMplusData(d, "mplus_temp.tab"))
write.table(d, "mplus_temp.tab", quote=F, sep="\t", row.names=F, col.names=F, na=".")
#var.list <- gsub("\\.", "_", var.list)
list.of.groups = unique(as.matrix(d[,1]))
ngroups = length(list.of.groups)
# Making up the syntax for alignment ####
#
inp <- c("DATA:","\n",
" file = 'mplus_temp.tab';", "\n",
" LISTWISE = ", listwise, ";\n",
" VARIABLE:", "\n",
" names =", gsub("\\.", "_", group), " ", paste(gsub("\\.", "_", var.list), collapse="\n"), ";\n",
" missing = .;", "\n",
ifelse(any(is.null(categorical)),
"\n",
paste(" categorical = ", paste(categorical, collapse = "\n"), ";\n")
),
ifelse(estimator == "WLSMV",
paste(
" grouping = ", gsub("\\.", "_", group), "(", paste(list.of.groups, collapse = "\n")),
paste(
" classes = c(", ngroups, ");\n",
" knownclass = c(", paste0(gsub("\\.", "_", group), " = ", list.of.groups, " \n ", collapse=""), collapse="\n")),
");\n\n",
"ANALYSIS:\n",
ifelse(estimator == "WLSMV", "",
" type = mixture;\n"),
" estimator = ", estimator, ";\n",
" alignment =", kind = "", ";\n",
" processors =", processors, ";\n",
ifelse(any(is.null(categorical)) | estimator == "WLSMV",
"\n",
" algorithm = integration;\n\n"),
"MODEL:\n",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
model,
";\n\n",
"OUTPUT: align tech8 SVALUES;",
"\n\n",
"SAVEDATA: ", "\n",
" RANKING = ranking.dat; "
)
inp["kind"]<-"FREE"
cat(inp, file = "free.inp", sep="")
# Running free alignment ####
message("Running free alignment in Mplus.")
trash <- system(paste(Mplus_com, "free.inp"))
version <- system(paste(Mplus_com, "-version"), intern=T)
version <- sub("^Mplus VERSION *(.*?) *\\s.*", "\\1", version)
# Reading free, making a fixed alignment syntax ####
outFree <- paste(readLines("free.out"), collapse = "\n")
if(grepl("TO AVOID MISSPECIFICATION USE THE GROUP WITH VALUE", outFree)) {
refGroup <- sub(".*TO AVOID MISSPECIFICATION USE THE GROUP WITH VALUE *(.*?) *AS THE BASELINE GROUP.*", "\\1", outFree)
} else {
if(version == "8.8" & estimator != "BAYES") {
free.tab.means <- sub(".*SIGNIFICANCE LEVEL IN DESCENDING ORDER *(.*?) *MODEL COMMAND WITH FINAL ESTIMATES USED AS STARTING VALUES.*", "\\1", outFree)
refGroup <- as.character(read.table(text=sub(".*\n *(.*?) *\n\n\n\n\n.*", "\\1", free.tab.means))[3])
} else {
free.tab.means <- sub(".*FACTOR MEAN COMPARISON AT THE 5% SIGNIFICANCE LEVEL IN DESCENDING ORDER *(.*?) *QUALITY OF NUMERICAL RESULTS.*", "\\1", outFree)
refGroup <- as.character(read.table(text=sub(".*\n *(.*?) *\n\n\n\n\n.*", "\\1", free.tab.means))[3])
}
}
inp["kind"]<-paste0("FIXED(", refGroup, ")")
cat(inp, file = "fixed.inp", sep="")
message("Running fixed in Mplus.")
trash <- system(paste(Mplus_com, "fixed.inp"))
# Creating simulations ####
if(!is.null(sim.samples)) {
# ..extracting population values from fixed alignmnetn output ####
outFixed <- paste(readLines("fixed.out"), collapse = "\n")
stValues <- sub(".*MODEL COMMAND WITH FINAL ESTIMATES USED AS STARTING VALUES *(.*?) *\n\n\n\n.*", "\\1", outFixed)
stValues <- gsub("%C#", "%g#", stValues)
stValues <- gsub("c#", "g#", stValues)
corrupt.code <- sub(".*%OVERALL% *(.*?) *%g#1%.*", "\\1", stValues)
correction <-strsplit(corrupt.code, "\n")[[1]]
correction <- correction[grep(" BY ", correction)]
correction <- gsub(";", "*1;", correction)
stValues <- paste(paste(correction, collapse="\n"), "\n", substr(stValues, regexpr("%g#1%", stValues), nchar(stValues)))
if(!any(is.null(categorical))) {
g1 <- sub(".*%g#1% *(.*?) *%g#2%.*", "\\1", stValues)
g1 <- strsplit(g1, "\n")[[1]]
g1 <- g1[grep("\\[", g1)]
g1 <- g1[grep("\\$", g1)]
g1 <- sapply(g1 , function(x) sub(" *\\[ *(.*?) *\\$.*", "\\1", x))
gen.cat <- paste0(names(table(g1)), " (", table(g1), ")")
}
#..writing code for simulations####
for(x in sim.samples) {
code <- c("MONTECARLO:", "\n",
" NAMES = ", paste(gsub("\\.", "_", var.list), collapse = " "), ";\n",
" ngroups = ", ngroups, ";\n",
" NOBSERVATIONS =", ngroups, "(", x, ");\n",
" NREPS =", sim.reps, ";\n\n",
ifelse(any(is.null(categorical)),
"\n",
paste(
" CATEGORICAL =", paste(categorical, collapse = " "), ";\n",
" GENERATE = ", paste(gen.cat, collapse = " "),
";\n\n" )),
"ANALYSIS:\n",
ifelse(estimator == "WLSMV", "",
" TYPE = MIXTURE;\n"),
" alignment = ", "FIXED(", refGroup, ");\n",
" estimator = ", estimator, ";\n",
" processors =", processors, ";\n",
ifelse(any(is.null(categorical)) | estimator == "WLSMV",
"\n",
" algorithm = integration;\n\n"),
"MODEL POPULATION:",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
ifelse(estimator == "WLSMV",
{
gsub(" %", "MODEL ", stValues) %>%
gsub("%|#", "", .) %>%
paste(., collapse="\n")
},
paste(stValues, collapse="\n")
),
"\nMODEL:",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
ifelse(estimator == "WLSMV",
{
gsub(" %", "MODEL ", stValues) %>%
gsub("%|#", "", .) %>%
paste(collapse="\n")
},
paste(stValues, collapse="\n")
)
)
cat(code, sep="", file = paste0("sim", x , ".inp"))
}
for (x in sim.samples) {
message("Run simulation", x, "in Mplus.\n")
trash <- system(paste(Mplus_com, paste0("sim", x, ".inp")))
}
}
# Return summaries
if(summaries) {
if(!is.null(sim.samples)) {
otpt <- list(fixed= extractAlignment("fixed.out", silent = TRUE),
free = extractAlignment("free.out", silent = TRUE),
simulations = extractAlignmentSim(sapply(sim.samples, function(x) paste0("sim", x, ".out")), silent = TRUE)
)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Free alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$free$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Fixed alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$fixed$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of simulations", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$simulations)
} else {
otpt <- list(fixed = extractAlignment("fixed.out", silent = TRUE),
free = extractAlignment("free.out", silent = TRUE))
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Free alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$free$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Fixed alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$fixed$summary)
}
} else {
message("Done running models. Refer to the free.out, fixed.out, ranking.dat and some sim###.out files.\nConsider using `extractAlignment()` and `extractAlignmentSim()` to extract important parts.")
}
setwd(oldwd)
if(summaries) invisible(otpt)
}
|
/R/runAlignment.R
|
no_license
|
MaksimRudnev/MIE.package
|
R
| false | false | 13,296 |
r
|
#' Run alignment procedure using Mplus
#'
#' Facilitates running frequentist alignmnet procedure in Mplus. It creates Mplus input and data from R data.frame, runs the input using Mplus, and provides its summaries back in R.
#'
#' @param model Character. Formula in Mplus format, e.g. "Factor1 BY item1 item2 item3 item4; item4 WITH item3;", see example.
#' @param group Character, name of the grouping variable.
#' @param dat Data frame containing data.
#' @param categorical Character vector of variable names. Indicators that are binary or ordinal. Default is NULL.
#' @param sim.samples Vector of integers. Group sample sizes for simulation, the length of this vector also determines a number of simulation studies. Default is `c(100, 500, 1000)`. May take a substantial amount of time. Use NULL to avoid running simulations.
#' @param sim.reps A number of simulated datasets in each simulation. Default is 500.
#' @param Mplus_com Sometimes you don't have a direct access to Mplus, so this argument specifies what to send to a system command line. Default value is "mplus".
#' @param path Where all the .inp, .out, and .dat files should be stored?
#' @param summaries If the \code{\link[MIE]{extractAlignment}} and \code{\link[MIE]{extractAlignmentSim}} should be run after all the Mplus work is done. Default is FALSE.
#' @details The function runs in four steps to facilitate setting up and running alignment:
#' \enumerate{
#' \item Converts data.drame into a dat file compatible with Mplus. Saves it on the disk.
#' \item Creates Mplus input file for free alignemnent. Saves it on the disk.
#' \item Runs this input using the saved data, which produces an .out file.
#' \item Reads in the output, selects the appropriate reference group, creates a new input file for fixed alignment, and runs it.
#' \item (Optional) Runs simulations to check the reliability of the fixed alignment results.
#' \item (Optional) Using \code{\link[MIE]{extractAlignment}} and/or \code{\link[MIE]{extractAlignmentSim}} reads in the output file of the fixed alignment and summarizies them in a convenient, publishable tables.
#' }
#' The sequence of free, fixed alignment and simulations follows recommendations of Muthen & Asparouhov (2014).
#'
#' All the files created during these steps stay on the disk and can be used independently for reporting. For a detailed tutorial on alignment in general see \url{https://maksimrudnev.com/2019/05/01/alignment-tutorial/}
#'
#'
#' @examples
#' \dontrun{ aling.res = runAlignment(model = "Moral1 BY prostit homosex abortion divorce;
#' Moral2 BY benefits taxes bribes;",
#' group = "country",
#' dat = wvs.s1,
#' Mplus_com = "mplus",
#' summaries = T
#' )
#' }
#' @seealso \code{\link[MIE]{extractAlignment}}, \code{\link[MIE]{extractAlignmentSim}}, \code{\link[MIE]{extractAlignmentSim}}, \code{\link[MIE]{measurementInvarianceMplus}}
#'
#' @export
runAlignment <- function(
model,
group,
dat,
categorical=NULL,
sim.samples = c(100, 500, 1000),
sim.reps = 500,
Mplus_com = "mplus", #/Applications/Mplus/mplus
path = getwd(),
summaries = FALSE,
estimator="MLR",
listwise = "OFF",
processors=2
) {
# set working dirs ####
oldwd <- getwd()
setwd(path)
estimator <- toupper(estimator)
message("Creating input for free alignment.\n")
#clean the model syntax, derive variables included ####
var.list <- strsplit(model, ";|\n") [[1]]
var.list <- gsub("\\s+", " ", var.list)
var.list <- var.list[!var.list==""]
var.list <- var.list[!var.list==" "]
var.list <- gsub("^.*((by)|(BY))", "", var.list)
#var.list <- unlist(strsplit(var.list, "(?i)(by)", perl=TRUE))
#var.list <- unlist(strsplit(var.list[seq(2, length(var.list), by=2)], " "))
var.list <- unlist(strsplit(var.list, " "))
var.list <- var.list[!var.list=="WITH"]
var.list <- var.list[!var.list==""]
var.list <- unique(var.list)
#var.list <- paste(unique(unlist(var.list)), collapse=" ")
#var.list <- strsplit(var.list, " ")[[1]]
#var.list <- var.list[!var.list==""]
# var.list <- paste0("; ", model, " ;")
# var.list<- gsub("\n", ";", var.list)
# var.list <- paste(sapply(var.list, function(i) sub(".*BY *(.*?) *;.*", "\\1", i)), collapse=" ")
# var.list <- strsplit(gsub(" BY | +|;", " ", var.list), " ")[[1]]
# var.list <- var.list[!var.list ==""]
d <- dat[c(group, var.list)]
for(i in colnames(d)) d[,i] <- unclass(d[,i])
rm(i)
if(!is.numeric(d[,group])) {
#d[,group] <- gsub(" ", "_", as.character( d[,group] ) )
message("The group variable must be numeric!")
}
# Writing data file ####
#require(MplusAutomation)
#inp <- capture.output(prepareMplusData(d, "mplus_temp.tab"))
write.table(d, "mplus_temp.tab", quote=F, sep="\t", row.names=F, col.names=F, na=".")
#var.list <- gsub("\\.", "_", var.list)
list.of.groups = unique(as.matrix(d[,1]))
ngroups = length(list.of.groups)
# Making up the syntax for alignment ####
#
inp <- c("DATA:","\n",
" file = 'mplus_temp.tab';", "\n",
" LISTWISE = ", listwise, ";\n",
" VARIABLE:", "\n",
" names =", gsub("\\.", "_", group), " ", paste(gsub("\\.", "_", var.list), collapse="\n"), ";\n",
" missing = .;", "\n",
ifelse(any(is.null(categorical)),
"\n",
paste(" categorical = ", paste(categorical, collapse = "\n"), ";\n")
),
ifelse(estimator == "WLSMV",
paste(
" grouping = ", gsub("\\.", "_", group), "(", paste(list.of.groups, collapse = "\n")),
paste(
" classes = c(", ngroups, ");\n",
" knownclass = c(", paste0(gsub("\\.", "_", group), " = ", list.of.groups, " \n ", collapse=""), collapse="\n")),
");\n\n",
"ANALYSIS:\n",
ifelse(estimator == "WLSMV", "",
" type = mixture;\n"),
" estimator = ", estimator, ";\n",
" alignment =", kind = "", ";\n",
" processors =", processors, ";\n",
ifelse(any(is.null(categorical)) | estimator == "WLSMV",
"\n",
" algorithm = integration;\n\n"),
"MODEL:\n",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
model,
";\n\n",
"OUTPUT: align tech8 SVALUES;",
"\n\n",
"SAVEDATA: ", "\n",
" RANKING = ranking.dat; "
)
inp["kind"]<-"FREE"
cat(inp, file = "free.inp", sep="")
# Running free alignment ####
message("Running free alignment in Mplus.")
trash <- system(paste(Mplus_com, "free.inp"))
version <- system(paste(Mplus_com, "-version"), intern=T)
version <- sub("^Mplus VERSION *(.*?) *\\s.*", "\\1", version)
# Reading free, making a fixed alignment syntax ####
outFree <- paste(readLines("free.out"), collapse = "\n")
if(grepl("TO AVOID MISSPECIFICATION USE THE GROUP WITH VALUE", outFree)) {
refGroup <- sub(".*TO AVOID MISSPECIFICATION USE THE GROUP WITH VALUE *(.*?) *AS THE BASELINE GROUP.*", "\\1", outFree)
} else {
if(version == "8.8" & estimator != "BAYES") {
free.tab.means <- sub(".*SIGNIFICANCE LEVEL IN DESCENDING ORDER *(.*?) *MODEL COMMAND WITH FINAL ESTIMATES USED AS STARTING VALUES.*", "\\1", outFree)
refGroup <- as.character(read.table(text=sub(".*\n *(.*?) *\n\n\n\n\n.*", "\\1", free.tab.means))[3])
} else {
free.tab.means <- sub(".*FACTOR MEAN COMPARISON AT THE 5% SIGNIFICANCE LEVEL IN DESCENDING ORDER *(.*?) *QUALITY OF NUMERICAL RESULTS.*", "\\1", outFree)
refGroup <- as.character(read.table(text=sub(".*\n *(.*?) *\n\n\n\n\n.*", "\\1", free.tab.means))[3])
}
}
inp["kind"]<-paste0("FIXED(", refGroup, ")")
cat(inp, file = "fixed.inp", sep="")
message("Running fixed in Mplus.")
trash <- system(paste(Mplus_com, "fixed.inp"))
# Creating simulations ####
if(!is.null(sim.samples)) {
# ..extracting population values from fixed alignmnetn output ####
outFixed <- paste(readLines("fixed.out"), collapse = "\n")
stValues <- sub(".*MODEL COMMAND WITH FINAL ESTIMATES USED AS STARTING VALUES *(.*?) *\n\n\n\n.*", "\\1", outFixed)
stValues <- gsub("%C#", "%g#", stValues)
stValues <- gsub("c#", "g#", stValues)
corrupt.code <- sub(".*%OVERALL% *(.*?) *%g#1%.*", "\\1", stValues)
correction <-strsplit(corrupt.code, "\n")[[1]]
correction <- correction[grep(" BY ", correction)]
correction <- gsub(";", "*1;", correction)
stValues <- paste(paste(correction, collapse="\n"), "\n", substr(stValues, regexpr("%g#1%", stValues), nchar(stValues)))
if(!any(is.null(categorical))) {
g1 <- sub(".*%g#1% *(.*?) *%g#2%.*", "\\1", stValues)
g1 <- strsplit(g1, "\n")[[1]]
g1 <- g1[grep("\\[", g1)]
g1 <- g1[grep("\\$", g1)]
g1 <- sapply(g1 , function(x) sub(" *\\[ *(.*?) *\\$.*", "\\1", x))
gen.cat <- paste0(names(table(g1)), " (", table(g1), ")")
}
#..writing code for simulations####
for(x in sim.samples) {
code <- c("MONTECARLO:", "\n",
" NAMES = ", paste(gsub("\\.", "_", var.list), collapse = " "), ";\n",
" ngroups = ", ngroups, ";\n",
" NOBSERVATIONS =", ngroups, "(", x, ");\n",
" NREPS =", sim.reps, ";\n\n",
ifelse(any(is.null(categorical)),
"\n",
paste(
" CATEGORICAL =", paste(categorical, collapse = " "), ";\n",
" GENERATE = ", paste(gen.cat, collapse = " "),
";\n\n" )),
"ANALYSIS:\n",
ifelse(estimator == "WLSMV", "",
" TYPE = MIXTURE;\n"),
" alignment = ", "FIXED(", refGroup, ");\n",
" estimator = ", estimator, ";\n",
" processors =", processors, ";\n",
ifelse(any(is.null(categorical)) | estimator == "WLSMV",
"\n",
" algorithm = integration;\n\n"),
"MODEL POPULATION:",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
ifelse(estimator == "WLSMV",
{
gsub(" %", "MODEL ", stValues) %>%
gsub("%|#", "", .) %>%
paste(., collapse="\n")
},
paste(stValues, collapse="\n")
),
"\nMODEL:",
ifelse(estimator == "WLSMV", "",
" %OVERALL%\n"),
ifelse(estimator == "WLSMV",
{
gsub(" %", "MODEL ", stValues) %>%
gsub("%|#", "", .) %>%
paste(collapse="\n")
},
paste(stValues, collapse="\n")
)
)
cat(code, sep="", file = paste0("sim", x , ".inp"))
}
for (x in sim.samples) {
message("Run simulation", x, "in Mplus.\n")
trash <- system(paste(Mplus_com, paste0("sim", x, ".inp")))
}
}
# Return summaries
if(summaries) {
if(!is.null(sim.samples)) {
otpt <- list(fixed= extractAlignment("fixed.out", silent = TRUE),
free = extractAlignment("free.out", silent = TRUE),
simulations = extractAlignmentSim(sapply(sim.samples, function(x) paste0("sim", x, ".out")), silent = TRUE)
)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Free alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$free$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Fixed alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$fixed$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of simulations", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$simulations)
} else {
otpt <- list(fixed = extractAlignment("fixed.out", silent = TRUE),
free = extractAlignment("free.out", silent = TRUE))
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Free alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$free$summary)
cat("\n", "⎯⎯⎯⎯⎯⎯⎯⎯⎯ ", "Results of Fixed alignemnt", rep("⎯", getOption("width", 80)-20), "\n", sep="")
print(otpt$fixed$summary)
}
} else {
message("Done running models. Refer to the free.out, fixed.out, ranking.dat and some sim###.out files.\nConsider using `extractAlignment()` and `extractAlignmentSim()` to extract important parts.")
}
setwd(oldwd)
if(summaries) invisible(otpt)
}
|
# Here is the start of the credits
#
fluidRow(
column(3,
div(
img(src='emilogo.jpg', height=140,width=200),
img(src='https://www.pge.com/pge_global/common/images/pge-spot-full-rgb-pos-lg.png',
height = 70, width = 70)
)),
column(9,
p("This application was developed by ", a("EMI Consulting", href="http://emiconsulting.com"), "in collaboration with PG&E.",em("If you use this application for your evaluation efforts, please give us credit, like so: 'This table/plot was produced with the evalwaterfallr package developed by EMI Consulting and PG&E.'")),
p("For full reference, please see the ", a("evalwaterfallr package on GitHub", href="https://github.com/EMIjess/evalwaterfallr.git")),
p("For more information on the motivation for this package, see Kasman, Robert, Adam Scheer, Rachel Sackman, Rafael Friedmann, and Janice Berman. 2015. 'Development of Order-Independent Waterfall Graphics to Enable Comprehensive Understanding of Impact Evaluation Results.' Proceedings of the 2015 International Energy Program Evaluation Conference", a("at the IEPEC proceedings website.", href="http://www.iepec.org/wp-content/uploads/2015/papers/022.pdf")),
p("A more recent whitepaper includes the additive parameters, see Kasman, Robert and Adam Scheer. 2017.'Whitepaper: Development of Order-Independent Waterfall Graphics to Enable Comprehensive Understanding of Impact Evaluation Results.' ", a("Available for review at CPUC's Energy Data Web.", href="http://www.energydataweb.com/cpuc/deliverableView.aspx?did=1632&uid=0&cid=&tid="))
)
) # end of credits
|
/footer_credits.R
|
no_license
|
EMIConsulting/evalwaterfallr_shiny
|
R
| false | false | 2,115 |
r
|
# Here is the start of the credits
#
fluidRow(
column(3,
div(
img(src='emilogo.jpg', height=140,width=200),
img(src='https://www.pge.com/pge_global/common/images/pge-spot-full-rgb-pos-lg.png',
height = 70, width = 70)
)),
column(9,
p("This application was developed by ", a("EMI Consulting", href="http://emiconsulting.com"), "in collaboration with PG&E.",em("If you use this application for your evaluation efforts, please give us credit, like so: 'This table/plot was produced with the evalwaterfallr package developed by EMI Consulting and PG&E.'")),
p("For full reference, please see the ", a("evalwaterfallr package on GitHub", href="https://github.com/EMIjess/evalwaterfallr.git")),
p("For more information on the motivation for this package, see Kasman, Robert, Adam Scheer, Rachel Sackman, Rafael Friedmann, and Janice Berman. 2015. 'Development of Order-Independent Waterfall Graphics to Enable Comprehensive Understanding of Impact Evaluation Results.' Proceedings of the 2015 International Energy Program Evaluation Conference", a("at the IEPEC proceedings website.", href="http://www.iepec.org/wp-content/uploads/2015/papers/022.pdf")),
p("A more recent whitepaper includes the additive parameters, see Kasman, Robert and Adam Scheer. 2017.'Whitepaper: Development of Order-Independent Waterfall Graphics to Enable Comprehensive Understanding of Impact Evaluation Results.' ", a("Available for review at CPUC's Energy Data Web.", href="http://www.energydataweb.com/cpuc/deliverableView.aspx?did=1632&uid=0&cid=&tid="))
)
) # end of credits
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = 5:6), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613107002-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 322 |
r
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = 5:6), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
library(rgl)
mydir = "/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel"
myfiles = list.files(path=mydir, pattern="*.txt", full.names=TRUE)
tbl <- sapply(myfiles, read.table, comment.char=";",sep=",")
exercise = "A"
subset(tbl[[4]], tbl[[2]] == "A")
watch_gyro_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1607_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1609_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1611_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1613_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1615_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1619_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1621_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1623_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1625_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1627_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1607_accel_watch.tx", header = F, comment.char=";", sep=",")
watch_acc_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1609_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1611_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1613_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1615_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1619_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1621_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1623_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1625_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1627_accel_watch.txt", header = F, comment.char=";", sep=",")
phone_gryo_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1607_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1609_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1611_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1613_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1615_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1619_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1621_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1623_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1625_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1627_gyro_phone.txt", header = F, comment.char=";", sep=",")
# Phone Acceleration values
phone_acc_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1607_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1609_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1611_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1613_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1615_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1619_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1621_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1623_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1625_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1627_accel_phone.txt", header = F, comment.char=";", sep=",")
# Calculating means...
exercise = "A"
accX = rbind(subset(df1["V4"], df1["V2"] == exercise) , subset(df2["V4"], df2["V2"] == exercise) , subset(df3["V4"], df3["V2"] == exercise) , subset(df4["V4"], df4["V2"] == exercise) , subset(df5["V4"], df5["V2"] == exercise) , subset(df6["V4"], df6["V2"] == exercise) , subset(df7["V4"], df7["V2"] == exercise) , subset(df8["V4"], df8["V2"] == exercise) , subset(df9["V4"], df9["V2"] == exercise) , subset(df10["V4"], df10["V2"] == exercise) )
accY = rbind(subset(df1["V5"], df1["V2"] == exercise) , subset(df2["V5"], df2["V2"] == exercise) , subset(df3["V5"], df3["V2"] == exercise) , subset(df4["V5"], df4["V2"] == exercise) , subset(df5["V5"], df5["V2"] == exercise) , subset(df6["V5"], df6["V2"] == exercise) , subset(df7["V5"], df7["V2"] == exercise) , subset(df8["V5"], df8["V2"] == exercise) , subset(df9["V5"], df9["V2"] == exercise) , subset(df10["V5"], df10["V2"] == exercise) )
accZ = rbind(subset(df1["V6"], df1["V2"] == exercise) , subset(df2["V6"], df2["V2"] == exercise) , subset(df3["V6"], df3["V2"] == exercise) , subset(df4["V6"], df4["V2"] == exercise) , subset(df5["V6"], df5["V2"] == exercise) , subset(df6["V6"], df6["V2"] == exercise) , subset(df7["V6"], df7["V2"] == exercise) , subset(df8["V6"], df8["V2"] == exercise) , subset(df9["V6"], df9["V2"] == exercise) , subset(df10["V6"], df10["V6"] == exercise) )
meanAccX <- lapply(accX, mean)
meanAccY <- lapply(accY, mean)
meanAccZ <- lapply(accZ, mean)
# Change the dataset for this ......
groX = rbind(subset(df1["V4"], df1["V2"] == exercise) , subset(df2["V4"], df2["V2"] == exercise) , subset(df3["V4"], df3["V2"] == exercise) , subset(df4["V4"], df4["V2"] == exercise) , subset(df5["V4"], df5["V2"] == exercise) , subset(df6["V4"], df6["V2"] == exercise) , subset(df7["V4"], df7["V2"] == exercise) , subset(df8["V4"], df8["V2"] == exercise) , subset(df9["V4"], df9["V2"] == exercise) , subset(df10["V4"], df10["V2"] == exercise) )
groY = rbind(subset(df1["V5"], df1["V2"] == exercise) , subset(df2["V5"], df2["V2"] == exercise) , subset(df3["V5"], df3["V2"] == exercise) , subset(df4["V5"], df4["V2"] == exercise) , subset(df5["V5"], df5["V2"] == exercise) , subset(df6["V5"], df6["V2"] == exercise) , subset(df7["V5"], df7["V2"] == exercise) , subset(df8["V5"], df8["V2"] == exercise) , subset(df9["V5"], df9["V2"] == exercise) , subset(df10["V5"], df10["V2"] == exercise) )
groZ = rbind(subset(df1["V6"], df1["V2"] == exercise) , subset(df2["V6"], df2["V2"] == exercise) , subset(df3["V6"], df3["V2"] == exercise) , subset(df4["V6"], df4["V2"] == exercise) , subset(df5["V6"], df5["V2"] == exercise) , subset(df6["V6"], df6["V2"] == exercise) , subset(df7["V6"], df7["V2"] == exercise) , subset(df8["V6"], df8["V2"] == exercise) , subset(df9["V6"], df9["V2"] == exercise) , subset(df10["V6"], df10["V6"] == exercise) )
meanGroX <- lapply(groX, mean)
meanGroY <- lapply(groY, mean)
meanGroZ <- lapply(groZ, mean)
dim(df)
summary(df)
x <- subset(df["V4"], df["V2"] == exercise)
y <- subset(df["V5"], df["V2"] == exercise)
z <- subset(df["V6"], df["V2"] == exercise)
timestamp_data <- subset(df["V3"], df["V2"] == exercise)
numeric_x <- as.numeric(unlist(x))
numeric_y <- as.numeric(unlist(y))
numeric_z <- as.numeric(unlist(z))
numeric_timestamp_data <- as.numeric(unlist(timestamp_data))
# 3D plots -> x,y,z.
plot3d(numeric_x, numeric_y, numeric_z, xlab = " Acc X " , ylab = " Acc Y " , zlab = " Acc Z ")
par(mfrow=c(3,1))
plot(numeric_timestamp_data, numeric_x, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in X", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
plot(numeric_timestamp_data, numeric_y, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in Y", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
plot(numeric_timestamp_data, numeric_z, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in Z", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
|
/group_project_STA441.R
|
no_license
|
sadhushehjar/Multivariate-statistics-in-R
|
R
| false | false | 11,678 |
r
|
library(rgl)
mydir = "/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel"
myfiles = list.files(path=mydir, pattern="*.txt", full.names=TRUE)
tbl <- sapply(myfiles, read.table, comment.char=";",sep=",")
exercise = "A"
subset(tbl[[4]], tbl[[2]] == "A")
watch_gyro_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1607_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1609_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1611_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1613_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1615_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1619_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1621_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1623_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1625_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_gyro_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/gyro/data_1627_gyro_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1607_accel_watch.tx", header = F, comment.char=";", sep=",")
watch_acc_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1609_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1611_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1613_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1615_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1619_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1621_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1623_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1625_accel_watch.txt", header = F, comment.char=";", sep=",")
watch_acc_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/watch/accel/data_1627_accel_watch.txt", header = F, comment.char=";", sep=",")
phone_gryo_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1607_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1609_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1611_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1613_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1615_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1619_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1621_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1623_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1625_gyro_phone.txt", header = F, comment.char=";", sep=",")
phone_gryo_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/gyro/data_1627_gyro_phone.txt", header = F, comment.char=";", sep=",")
# Phone Acceleration values
phone_acc_df1 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1607_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df2 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1609_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df3 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1611_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df4 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1613_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df5 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1615_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df6 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1619_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df7 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1621_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df8 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1623_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df9 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1625_accel_phone.txt", header = F, comment.char=";", sep=",")
phone_acc_df10 <- read.table("/Users/shehjarsadhu/Desktop/Spring2020/STA441/Final_Project/wisdm-dataset/raw/phone/accel/data_1627_accel_phone.txt", header = F, comment.char=";", sep=",")
# Calculating means...
exercise = "A"
accX = rbind(subset(df1["V4"], df1["V2"] == exercise) , subset(df2["V4"], df2["V2"] == exercise) , subset(df3["V4"], df3["V2"] == exercise) , subset(df4["V4"], df4["V2"] == exercise) , subset(df5["V4"], df5["V2"] == exercise) , subset(df6["V4"], df6["V2"] == exercise) , subset(df7["V4"], df7["V2"] == exercise) , subset(df8["V4"], df8["V2"] == exercise) , subset(df9["V4"], df9["V2"] == exercise) , subset(df10["V4"], df10["V2"] == exercise) )
accY = rbind(subset(df1["V5"], df1["V2"] == exercise) , subset(df2["V5"], df2["V2"] == exercise) , subset(df3["V5"], df3["V2"] == exercise) , subset(df4["V5"], df4["V2"] == exercise) , subset(df5["V5"], df5["V2"] == exercise) , subset(df6["V5"], df6["V2"] == exercise) , subset(df7["V5"], df7["V2"] == exercise) , subset(df8["V5"], df8["V2"] == exercise) , subset(df9["V5"], df9["V2"] == exercise) , subset(df10["V5"], df10["V2"] == exercise) )
accZ = rbind(subset(df1["V6"], df1["V2"] == exercise) , subset(df2["V6"], df2["V2"] == exercise) , subset(df3["V6"], df3["V2"] == exercise) , subset(df4["V6"], df4["V2"] == exercise) , subset(df5["V6"], df5["V2"] == exercise) , subset(df6["V6"], df6["V2"] == exercise) , subset(df7["V6"], df7["V2"] == exercise) , subset(df8["V6"], df8["V2"] == exercise) , subset(df9["V6"], df9["V2"] == exercise) , subset(df10["V6"], df10["V6"] == exercise) )
meanAccX <- lapply(accX, mean)
meanAccY <- lapply(accY, mean)
meanAccZ <- lapply(accZ, mean)
# Change the dataset for this ......
groX = rbind(subset(df1["V4"], df1["V2"] == exercise) , subset(df2["V4"], df2["V2"] == exercise) , subset(df3["V4"], df3["V2"] == exercise) , subset(df4["V4"], df4["V2"] == exercise) , subset(df5["V4"], df5["V2"] == exercise) , subset(df6["V4"], df6["V2"] == exercise) , subset(df7["V4"], df7["V2"] == exercise) , subset(df8["V4"], df8["V2"] == exercise) , subset(df9["V4"], df9["V2"] == exercise) , subset(df10["V4"], df10["V2"] == exercise) )
groY = rbind(subset(df1["V5"], df1["V2"] == exercise) , subset(df2["V5"], df2["V2"] == exercise) , subset(df3["V5"], df3["V2"] == exercise) , subset(df4["V5"], df4["V2"] == exercise) , subset(df5["V5"], df5["V2"] == exercise) , subset(df6["V5"], df6["V2"] == exercise) , subset(df7["V5"], df7["V2"] == exercise) , subset(df8["V5"], df8["V2"] == exercise) , subset(df9["V5"], df9["V2"] == exercise) , subset(df10["V5"], df10["V2"] == exercise) )
groZ = rbind(subset(df1["V6"], df1["V2"] == exercise) , subset(df2["V6"], df2["V2"] == exercise) , subset(df3["V6"], df3["V2"] == exercise) , subset(df4["V6"], df4["V2"] == exercise) , subset(df5["V6"], df5["V2"] == exercise) , subset(df6["V6"], df6["V2"] == exercise) , subset(df7["V6"], df7["V2"] == exercise) , subset(df8["V6"], df8["V2"] == exercise) , subset(df9["V6"], df9["V2"] == exercise) , subset(df10["V6"], df10["V6"] == exercise) )
meanGroX <- lapply(groX, mean)
meanGroY <- lapply(groY, mean)
meanGroZ <- lapply(groZ, mean)
dim(df)
summary(df)
x <- subset(df["V4"], df["V2"] == exercise)
y <- subset(df["V5"], df["V2"] == exercise)
z <- subset(df["V6"], df["V2"] == exercise)
timestamp_data <- subset(df["V3"], df["V2"] == exercise)
numeric_x <- as.numeric(unlist(x))
numeric_y <- as.numeric(unlist(y))
numeric_z <- as.numeric(unlist(z))
numeric_timestamp_data <- as.numeric(unlist(timestamp_data))
# 3D plots -> x,y,z.
plot3d(numeric_x, numeric_y, numeric_z, xlab = " Acc X " , ylab = " Acc Y " , zlab = " Acc Z ")
par(mfrow=c(3,1))
plot(numeric_timestamp_data, numeric_x, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in X", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
plot(numeric_timestamp_data, numeric_y, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in Y", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
plot(numeric_timestamp_data, numeric_z, type="l", main = "Exercise: P" , xlab = "Timestamp", ylab="Acc in Z", cex.main=1.75, cex.lab=1.25, cex.axis=1.20)
|
mp_setapikey("../manifesto_apikey.txt")
contains_factors <- function(mpds) {
mpds %>%
lapply(class) %>%
magrittr::equals("factor") %>%
any()
}
mpds_large_enough <- function(mpds) {
expect_more_than(nrow(mpds), 3800)
expect_more_than(ncol(mpds), 130)
expect_true(all(c("country", "countryname",
"date", "edate",
"party", "per101", "rile") %in% names(mpds)))
}
test_that("main data set is formatted correctly", {
mpds <- mp_maindataset()
mpds_large_enough(mpds)
expect_false(mpds %>%
contains_factors())
expect_false(mp_southamerica_dataset() %>%
contains_factors())
})
interpolation_as_expected <- function(interpol, mpds, vars) {
expect_true(nrow(interpol) >= nrow(mpds))
expect_true(all(vars %in% names(interpol)))
expect_equal(unique(mpds$party), unique(interpol$party))
sapply(unique(mpds$party), function(the_party) {
sub_mpds <- subset(mpds, party == the_party)
sub_interpol <- subset(interpol, party == the_party)
expect_true(all(between(sub_interpol$edate,
min(sub_mpds$edate),
max(sub_mpds$edate))))
})
}
test_that("interpolation works", {
mpds <- subset(mp_maindataset(), countryname %in% c("Sri Lanka", "Switzerland"))
vars <- grep("(^rile$)|(^per\\d{3}$)", names(mpds), value = TRUE)
## constant interpolation
c_interpol <- mp_interpolate(mpds, by = "month", method = "constant")
interpolation_as_expected(c_interpol, mpds, vars)
expect_true(all(!is.na(c_interpol[,vars])))
all_unique_sd <- function(df) {
df %>%
select(one_of("party", vars)) %>%
group_by(party) %>%
summarise_each(funs(sd(unique(.))))
}
expect_equal(all_unique_sd(mpds), all_unique_sd(c_interpol))
## another zoo function
s_interpol <- mp_interpolate(mpds, approx = na.spline, maxgap = 3)
interpolation_as_expected(s_interpol, mpds, vars)
## custom function with modifiable parameters
test_approx <- function(x, value = 1) {
rep(value, times = length(x))
}
t_interpol <- mp_interpolate(mpds, approx = test_approx, value = 3)
interpolation_as_expected(t_interpol, mpds, vars)
expect_true(all(t_interpol %>%
anti_join(mpds, by = c("party", "edate")) %>%
select(one_of(vars))
== 3))
})
test_that("median voter computations work", {
## extracted data where adjusted makes no difference
expect_equal(median_voter_single(
c(9.6, -37.8, 9.5, 28, 23.81),
c(10.3, 46.5, 12.9, 15.8, 13.6)),
-8.546512, tolerance = 0.01)
expect_equal(median_voter_single(
c(9.6, -37.8, 9.5, 28, 23.81),
c(10.3, 46.5, 12.9, 15.8, 13.6),
adjusted = TRUE),
-8.546512, tolerance = 0.01)
## extracted data where adjusted makes a difference
expect_equal(median_voter_single(
c(-36.111, -9.048, -11.574, 5.91),
c(65.895, 16.661, 7.415, 4.549)),
-45.37972, tolerance = 0.01)
expect_equal(median_voter_single(
c(-36.111, -9.048, -11.574, 5.91),
c(65.895, 16.661, 7.415, 4.549),
adjusted = TRUE),
-30.781635, tolerance = 0.01)
})
median_voter_as_expected <- function(mm, mpds, adjusted = FALSE, scale = "rile", voteshare = "pervote") {
expect_true("median_voter" %in% names(mm))
expect_is(mm$median_voter %>% unlist(), "numeric")
expect_equal(nrow(mm),
nrow(mpds %>% group_by(country, edate) %>% summarise(n = n())))
## median_voter should be NA only if one of the input variables is NA
mpds[,"scale"] <- mpds[,scale]
mpds[,"voteshare"] <- mpds[,voteshare]
mm %>%
subset(is.na(median_voter)) %>%
select(country, edate) %>%
left_join(select(mpds, country, edate, scale, voteshare)) %>%
group_by(country, edate) %>%
summarise(scale_na = any(is.na(scale)),
voteshare_na = any(is.na(voteshare)),
too_few = (n() <= 1 & adjusted)) %>%
mutate(any_problem = scale_na | voteshare_na | too_few) %>%
ungroup() %>%
summarise(all_any_problem = all(any_problem)) %>%
select(all_any_problem) %>%
as.logical() %>%
expect_true()
}
test_that("median voter works on main data set", {
mpds <- mp_maindataset()
median_voter_as_expected(median_voter(mpds), mpds)
median_voter_as_expected(median_voter(mpds, adjusted = TRUE), mpds, adjusted = TRUE)
median_voter_as_expected(median_voter(mpds, scale = "per104"), mpds, scale = "per104")
})
test_that("South America dataset can be downloaded", {
list(mp_maindataset(south_america = TRUE),
mp_southamerica_dataset()) %>% lapply(function(mpdssa) {
expect_true("candidatename" %in% names(mpdssa))
expect_false(all(is.na(mpdssa$candidatename)))
expect_true(all(c("Chile", "Argentina", "Brazil") %in% mpdssa$countryname))
expect_false("Germany" %in% mpdssa$countryname)
})
expect_warning(mp_southamerica_dataset(version = "MPDS2012a"))
})
test_that("Foreign format dataset downloads work", {
require(haven)
mp_maindataset(download_format = "dta") %>%
read_dta() %>%
{ expect_is(.$party, "labelled");
mpds_large_enough(.) }
mp_maindataset(download_format = "sav") %>%
read_sav() %>%
mpds_large_enough()
require(readxl)
mp_maindataset(download_format = "xlsx") %>%
read_excel() %>%
mpds_large_enough()
## Test that cache is not broken
mp_maindataset() %>%
getElement("party") %>%
expect_is("numeric")
})
test_that("mp_cite returns data.frame", {
mp_cite() %>%
expect_is("data.frame")
mp_maindataset()
mp_maindataset("MPDS2014a")
mp_cite() %>%
expect_named(c("data", "source", "version", "citation"))
mp_cite() %>%
subset(data == "dataset") %>%
nrow() %>%
expect_more_than(1)
mp_southamerica_dataset()
mp_cite() %>%
subset(data == "dataset" &
grepl("MPDSSA", version) &
grepl("South America", citation, fixed = TRUE)) %>%
nrow() %>%
expect_more_than(0L)
})
|
/tests/testthat/testmaindataset.R
|
no_license
|
kbenoit/manifestoR
|
R
| false | false | 6,270 |
r
|
mp_setapikey("../manifesto_apikey.txt")
contains_factors <- function(mpds) {
mpds %>%
lapply(class) %>%
magrittr::equals("factor") %>%
any()
}
mpds_large_enough <- function(mpds) {
expect_more_than(nrow(mpds), 3800)
expect_more_than(ncol(mpds), 130)
expect_true(all(c("country", "countryname",
"date", "edate",
"party", "per101", "rile") %in% names(mpds)))
}
test_that("main data set is formatted correctly", {
mpds <- mp_maindataset()
mpds_large_enough(mpds)
expect_false(mpds %>%
contains_factors())
expect_false(mp_southamerica_dataset() %>%
contains_factors())
})
interpolation_as_expected <- function(interpol, mpds, vars) {
expect_true(nrow(interpol) >= nrow(mpds))
expect_true(all(vars %in% names(interpol)))
expect_equal(unique(mpds$party), unique(interpol$party))
sapply(unique(mpds$party), function(the_party) {
sub_mpds <- subset(mpds, party == the_party)
sub_interpol <- subset(interpol, party == the_party)
expect_true(all(between(sub_interpol$edate,
min(sub_mpds$edate),
max(sub_mpds$edate))))
})
}
test_that("interpolation works", {
mpds <- subset(mp_maindataset(), countryname %in% c("Sri Lanka", "Switzerland"))
vars <- grep("(^rile$)|(^per\\d{3}$)", names(mpds), value = TRUE)
## constant interpolation
c_interpol <- mp_interpolate(mpds, by = "month", method = "constant")
interpolation_as_expected(c_interpol, mpds, vars)
expect_true(all(!is.na(c_interpol[,vars])))
all_unique_sd <- function(df) {
df %>%
select(one_of("party", vars)) %>%
group_by(party) %>%
summarise_each(funs(sd(unique(.))))
}
expect_equal(all_unique_sd(mpds), all_unique_sd(c_interpol))
## another zoo function
s_interpol <- mp_interpolate(mpds, approx = na.spline, maxgap = 3)
interpolation_as_expected(s_interpol, mpds, vars)
## custom function with modifiable parameters
test_approx <- function(x, value = 1) {
rep(value, times = length(x))
}
t_interpol <- mp_interpolate(mpds, approx = test_approx, value = 3)
interpolation_as_expected(t_interpol, mpds, vars)
expect_true(all(t_interpol %>%
anti_join(mpds, by = c("party", "edate")) %>%
select(one_of(vars))
== 3))
})
test_that("median voter computations work", {
## extracted data where adjusted makes no difference
expect_equal(median_voter_single(
c(9.6, -37.8, 9.5, 28, 23.81),
c(10.3, 46.5, 12.9, 15.8, 13.6)),
-8.546512, tolerance = 0.01)
expect_equal(median_voter_single(
c(9.6, -37.8, 9.5, 28, 23.81),
c(10.3, 46.5, 12.9, 15.8, 13.6),
adjusted = TRUE),
-8.546512, tolerance = 0.01)
## extracted data where adjusted makes a difference
expect_equal(median_voter_single(
c(-36.111, -9.048, -11.574, 5.91),
c(65.895, 16.661, 7.415, 4.549)),
-45.37972, tolerance = 0.01)
expect_equal(median_voter_single(
c(-36.111, -9.048, -11.574, 5.91),
c(65.895, 16.661, 7.415, 4.549),
adjusted = TRUE),
-30.781635, tolerance = 0.01)
})
median_voter_as_expected <- function(mm, mpds, adjusted = FALSE, scale = "rile", voteshare = "pervote") {
expect_true("median_voter" %in% names(mm))
expect_is(mm$median_voter %>% unlist(), "numeric")
expect_equal(nrow(mm),
nrow(mpds %>% group_by(country, edate) %>% summarise(n = n())))
## median_voter should be NA only if one of the input variables is NA
mpds[,"scale"] <- mpds[,scale]
mpds[,"voteshare"] <- mpds[,voteshare]
mm %>%
subset(is.na(median_voter)) %>%
select(country, edate) %>%
left_join(select(mpds, country, edate, scale, voteshare)) %>%
group_by(country, edate) %>%
summarise(scale_na = any(is.na(scale)),
voteshare_na = any(is.na(voteshare)),
too_few = (n() <= 1 & adjusted)) %>%
mutate(any_problem = scale_na | voteshare_na | too_few) %>%
ungroup() %>%
summarise(all_any_problem = all(any_problem)) %>%
select(all_any_problem) %>%
as.logical() %>%
expect_true()
}
test_that("median voter works on main data set", {
mpds <- mp_maindataset()
median_voter_as_expected(median_voter(mpds), mpds)
median_voter_as_expected(median_voter(mpds, adjusted = TRUE), mpds, adjusted = TRUE)
median_voter_as_expected(median_voter(mpds, scale = "per104"), mpds, scale = "per104")
})
test_that("South America dataset can be downloaded", {
list(mp_maindataset(south_america = TRUE),
mp_southamerica_dataset()) %>% lapply(function(mpdssa) {
expect_true("candidatename" %in% names(mpdssa))
expect_false(all(is.na(mpdssa$candidatename)))
expect_true(all(c("Chile", "Argentina", "Brazil") %in% mpdssa$countryname))
expect_false("Germany" %in% mpdssa$countryname)
})
expect_warning(mp_southamerica_dataset(version = "MPDS2012a"))
})
test_that("Foreign format dataset downloads work", {
require(haven)
mp_maindataset(download_format = "dta") %>%
read_dta() %>%
{ expect_is(.$party, "labelled");
mpds_large_enough(.) }
mp_maindataset(download_format = "sav") %>%
read_sav() %>%
mpds_large_enough()
require(readxl)
mp_maindataset(download_format = "xlsx") %>%
read_excel() %>%
mpds_large_enough()
## Test that cache is not broken
mp_maindataset() %>%
getElement("party") %>%
expect_is("numeric")
})
test_that("mp_cite returns data.frame", {
mp_cite() %>%
expect_is("data.frame")
mp_maindataset()
mp_maindataset("MPDS2014a")
mp_cite() %>%
expect_named(c("data", "source", "version", "citation"))
mp_cite() %>%
subset(data == "dataset") %>%
nrow() %>%
expect_more_than(1)
mp_southamerica_dataset()
mp_cite() %>%
subset(data == "dataset" &
grepl("MPDSSA", version) &
grepl("South America", citation, fixed = TRUE)) %>%
nrow() %>%
expect_more_than(0L)
})
|
context('Readers')
test_that('Test 1: CSV Data file', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_01.csv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_01.csv')
variable.name <- clean.variable.name('example_01')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.01, envir = .TargetEnv)
})
test_that('Test 2: .csv.bz2', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_02.csv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_02.csv.bz2')
variable.name <- clean.variable.name('example_02')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.02, envir = .TargetEnv)
})
test_that('Test 3: csv.zip data', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_03.csv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_03.csv.zip')
variable.name <- clean.variable.name('example_03')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.03, envir = .TargetEnv)
})
test_that('Example 04: CSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_04.csv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_04.csv.gz')
variable.name <- clean.variable.name('example_04')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.04, envir = .TargetEnv)
})
test_that('Example 05: TSV Data File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_05.tsv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_05.tsv')
variable.name <- clean.variable.name('example_05')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.05, envir = .TargetEnv)
})
test_that('Example 06: TSV Data File with BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_06.tsv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_06.tsv.bz2')
variable.name <- clean.variable.name('example_06')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.06, envir = .TargetEnv)
})
test_that('Example 07: TSV Data File with Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_07.tsv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_07.tsv.zip')
variable.name <- clean.variable.name('example_07')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.07, envir = .TargetEnv)
})
test_that('Example 08: TSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_08.tsv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_08.tsv.gz')
variable.name <- clean.variable.name('example_08')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.08, envir = .TargetEnv)
})
test_that('Example 09: WSV Data File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_09.wsv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_09.wsv')
variable.name <- clean.variable.name('example_09')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
expect_false(any(is.na(as.matrix(get(variable.name)))))
rm(example.09, envir = .TargetEnv)
})
test_that('Example 10: WSV Data File with BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_10.wsv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_10.wsv.bz2')
variable.name <- clean.variable.name('example_10')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.10, envir = .TargetEnv)
})
test_that('Example 11: WSV Data File with Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_11.wsv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_11.wsv.zip')
variable.name <- clean.variable.name('example_11')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.11, envir = .TargetEnv)
})
test_that('Example 12: WSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_12.wsv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_12.wsv.gz')
variable.name <- clean.variable.name('example_12')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.12, envir = .TargetEnv)
})
test_that('Example 13: RData Data File with .RData Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_13.RData'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_13.RData')
variable.name <- clean.variable.name('example_13')
rdata.reader(data.file, filename, variable.name)
expect_that(exists('m'), is_true())
expect_that(names(get('m')), equals(c('N', 'Prime')))
expect_that(nrow(get('m')), equals(5))
expect_that(ncol(get('m')), equals(2))
expect_that(get('m')[5, 2], equals(11))
rm('m', envir = .TargetEnv)
})
test_that('Example 14: RData Data File with .rda Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_14.rda'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_14.rda')
variable.name <- clean.variable.name('example_14')
rdata.reader(data.file, filename, variable.name)
expect_that(exists('n'), is_true())
expect_that(names(get('n')), equals(c('N', 'Prime')))
expect_that(nrow(get('n')), equals(5))
expect_that(ncol(get('n')), equals(2))
expect_that(get('n')[5, 2], equals(11))
rm('n', envir = .TargetEnv)
})
test_that('Example 15: URL File with .url Extension', {
})
test_that('Example 16: TSV File with .tab Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_16.tab'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_16.tab')
variable.name <- clean.variable.name('example_16')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.16, envir = .TargetEnv)
})
test_that('Example 17: TSV File with .tab Extension and BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_17.tab.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_17.tab.bz2')
variable.name <- clean.variable.name('example_17')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.17, envir = .TargetEnv)
})
test_that('Example 18: TSV File with .tab Extension and Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_18.tab.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_18.tab.zip')
variable.name <- clean.variable.name('example_18')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.18, envir = .TargetEnv)
})
test_that('Example 19: TSV File with .tab Extension and GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_19.tab.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_19.tab.gz')
variable.name <- clean.variable.name('example_19')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.19, envir = .TargetEnv)
})
test_that('Example 20: WSV File with .txt Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_20.txt'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_20.txt')
variable.name <- clean.variable.name('example_20')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.20, envir = .TargetEnv)
})
test_that('Example 21: WSV File with .txt Extension and BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_21.txt.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_21.txt.bz2')
variable.name <- clean.variable.name('example_21')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.21, envir = .TargetEnv)
})
test_that('Example 22: WSV File with .txt Extension and Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_22.txt.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_22.txt.zip')
variable.name <- clean.variable.name('example_22')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.22, envir = .TargetEnv)
})
test_that('Example 23: WSV File with .txt Extension and GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_23.txt.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_23.txt.gz')
variable.name <- clean.variable.name('example_23')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.23, envir = .TargetEnv)
})
test_that('Example 24: R File with .R Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_24.R'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_24.R')
variable.name <- clean.variable.name('example_24')
r.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.24, envir = .TargetEnv)
})
test_that('Example 25: R File with .r Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_25.r'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_25.r')
variable.name <- clean.variable.name('example_25')
r.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.25, envir = .TargetEnv)
})
test_that('Example 26: Excel 2007 File with .xls Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_26.xls'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_26.xls')
variable.name <- clean.variable.name('example_26')
xls.reader(data.file, filename, variable.name)
variable.name <- paste(variable.name, '.Sheet1', sep = '')
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.26.Sheet1, envir = .TargetEnv)
})
test_that('Example 27: Excel 2011 File with .xlsx Extension', {
#data.file <- 'example_27.xlsx'
#filename <- file.path(system.file('example_data',
# package = 'ProjectTemplate'),
# 'example_27.xlsx')
#variable.name <- clean.variable.name('example_27')
#
#xlsx.reader(data.file, filename, variable.name)
#
#variable.name <- paste(variable.name, '.Sheet1', sep = '')
#
#expect_that(exists(variable.name), is_true())
#expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
#expect_that(nrow(get(variable.name)), equals(5))
#expect_that(ncol(get(variable.name)), equals(2))
#expect_that(get(variable.name)[5, 2], equals(11))
#rm(example.27.Sheet1, envir = .TargetEnv)
})
test_that('Example 28: SQLite3 Support with .sql Extension with table = "..."', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_28.db'),
table = 'example_28')
write.dcf(sql.file, file = 'example_28.sql', width = 1000)
data.file <- 'example_28.sql'
filename <- 'example_28.sql'
variable.name <- clean.variable.name('example_28')
sql.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.28, envir = .TargetEnv)
unlink('example_28.sql')
})
test_that('Example 29: SQLite3 Support with .sql Extension with query = "SELECT * FROM ..."', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_29.db'),
query = 'SELECT * FROM example_29')
write.dcf(sql.file, file = 'example_29.sql', width = 1000)
data.file <- 'example_29.sql'
filename <- 'example_29.sql'
variable.name <- clean.variable.name('example_29')
sql.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.29, envir = .TargetEnv)
unlink('example_29.sql')
})
test_that('Example 30: SQLite3 Support with .sql Extension and table = "*"', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_30.db'),
table = '*')
write.dcf(sql.file, file = 'example_30.sql', width = 1000)
data.file <- 'example_30.sql'
filename <- 'example_30.sql'
variable.name <- clean.variable.name('example_30')
sql.reader(data.file, filename, variable.name)
variable1.name <- clean.variable.name('example_30a')
variable2.name <- clean.variable.name('example_30b')
expect_that(exists(variable1.name), is_true())
expect_that(names(get(variable1.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable1.name)), equals(5))
expect_that(ncol(get(variable1.name)), equals(2))
expect_that(get(variable1.name)[5, 2], equals(11))
rm(example.30a, envir = .TargetEnv)
expect_that(exists(variable2.name), is_true())
expect_that(names(get(variable2.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable2.name)), equals(5))
expect_that(ncol(get(variable2.name)), equals(2))
expect_that(get(variable2.name)[5, 2], equals(11))
rm(example.30b, envir = .TargetEnv)
rm(example.30, envir = .TargetEnv)
unlink('example_30.sql')
})
test_that('Example 31: SQLite3 Support with .db Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_31.db'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_31.db')
variable.name <- clean.variable.name('example_31')
db.reader(data.file, filename, variable.name)
variable1.name <- clean.variable.name('example_31a')
variable2.name <- clean.variable.name('example_31b')
expect_that(exists(variable1.name), is_true())
expect_that(names(get(variable1.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable1.name)), equals(5))
expect_that(ncol(get(variable1.name)), equals(2))
expect_that(get(variable1.name)[5, 2], equals(11))
rm(example.31a, envir = .TargetEnv)
expect_that(exists(variable2.name), is_true())
expect_that(names(get(variable2.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable2.name)), equals(5))
expect_that(ncol(get(variable2.name)), equals(2))
expect_that(get(variable2.name)[5, 2], equals(11))
rm(example.31b, envir = .TargetEnv)
})
test_that('Example 32: Weka Support with .arff Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_32.arff'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_32.arff')
variable.name <- clean.variable.name('example_32')
arff.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.32, envir = .TargetEnv)
})
test_that('Example 33: Arbitary File Support with .file File Pointing to .db File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
info.file <- data.frame(path = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_28.db'),
extension = 'db')
write.dcf(info.file, file = 'example_33.file', width = 1000)
data.file <- 'example_33.file'
filename <- 'example_33.file'
variable.name <- clean.variable.name('example_28')
file.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.28, envir = .TargetEnv)
unlink('example_33.file')
})
test_that('Example 34: MP3 Support with .mp3 Extension', {
})
test_that('Example 35: PPM Support with .ppm Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_35.ppm'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_35.ppm')
variable.name <- clean.variable.name('example_35')
expect_warning(
ppm.reader(data.file, filename, variable.name),
" is NULL so the result will be NULL")
expect_that(exists(variable.name), is_true())
expect_that(as.character(class(get(variable.name))), equals('pixmapRGB'))
rm(example.35, envir = .TargetEnv)
})
test_that('Example 36: dBase Support with .dbf Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_36.dbf'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_36.dbf')
variable.name <- clean.variable.name('example_36')
dbf.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.36, envir = .TargetEnv)
})
test_that('Example 37: SPSS Support with .sav Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_37.sav'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_37.sav')
variable.name <- clean.variable.name('example_37')
expect_warning(
spss.reader(data.file, filename, variable.name),
"Unrecognized record type 7, subtype 18 encountered in system file")
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.37, envir = .TargetEnv)
})
test_that('Example 38: SPSS Support with .sav Extension / Alternative Generation', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_38.sav'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_38.sav')
variable.name <- clean.variable.name('example_38')
expect_warning(
spss.reader(data.file, filename, variable.name),
"Unrecognized record type 7, subtype 18 encountered in system file")
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.38, envir = .TargetEnv)
})
test_that('Example 39: Stata Support with .dta Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_39.dta'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_39.dta')
variable.name <- clean.variable.name('example_39')
stata.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.39, envir = .TargetEnv)
})
test_that('Example 40: Stata Support with .dta Extension / Alternative Generation', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_40.dta'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_40.dta')
variable.name <- clean.variable.name('example_40')
stata.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.40, envir = .TargetEnv)
})
test_that('Example 41: SAS Support with .xport Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_41.xport'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_41.xport')
variable.name <- clean.variable.name('example_41')
xport.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'PRIME')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.41, envir = .TargetEnv)
})
test_that('Example 42: SAS Support with .xpt Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_42.xpt'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_42.xpt')
variable.name <- clean.variable.name('example_42')
xport.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'PRIME')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.42, envir = .TargetEnv)
})
test_that('Example 43: ElasticSearch Support with .es Extension', {
})
|
/tests/testthat/test-readers.R
|
no_license
|
tomliptrot/ProjectTemplate
|
R
| false | false | 40,039 |
r
|
context('Readers')
test_that('Test 1: CSV Data file', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_01.csv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_01.csv')
variable.name <- clean.variable.name('example_01')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.01, envir = .TargetEnv)
})
test_that('Test 2: .csv.bz2', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_02.csv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_02.csv.bz2')
variable.name <- clean.variable.name('example_02')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.02, envir = .TargetEnv)
})
test_that('Test 3: csv.zip data', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_03.csv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_03.csv.zip')
variable.name <- clean.variable.name('example_03')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.03, envir = .TargetEnv)
})
test_that('Example 04: CSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_04.csv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_04.csv.gz')
variable.name <- clean.variable.name('example_04')
csv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.04, envir = .TargetEnv)
})
test_that('Example 05: TSV Data File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_05.tsv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_05.tsv')
variable.name <- clean.variable.name('example_05')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.05, envir = .TargetEnv)
})
test_that('Example 06: TSV Data File with BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_06.tsv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_06.tsv.bz2')
variable.name <- clean.variable.name('example_06')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.06, envir = .TargetEnv)
})
test_that('Example 07: TSV Data File with Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_07.tsv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_07.tsv.zip')
variable.name <- clean.variable.name('example_07')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.07, envir = .TargetEnv)
})
test_that('Example 08: TSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_08.tsv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_08.tsv.gz')
variable.name <- clean.variable.name('example_08')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.08, envir = .TargetEnv)
})
test_that('Example 09: WSV Data File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_09.wsv'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_09.wsv')
variable.name <- clean.variable.name('example_09')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
expect_false(any(is.na(as.matrix(get(variable.name)))))
rm(example.09, envir = .TargetEnv)
})
test_that('Example 10: WSV Data File with BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_10.wsv.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_10.wsv.bz2')
variable.name <- clean.variable.name('example_10')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.10, envir = .TargetEnv)
})
test_that('Example 11: WSV Data File with Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_11.wsv.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_11.wsv.zip')
variable.name <- clean.variable.name('example_11')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.11, envir = .TargetEnv)
})
test_that('Example 12: WSV Data File with GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_12.wsv.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_12.wsv.gz')
variable.name <- clean.variable.name('example_12')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.12, envir = .TargetEnv)
})
test_that('Example 13: RData Data File with .RData Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_13.RData'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_13.RData')
variable.name <- clean.variable.name('example_13')
rdata.reader(data.file, filename, variable.name)
expect_that(exists('m'), is_true())
expect_that(names(get('m')), equals(c('N', 'Prime')))
expect_that(nrow(get('m')), equals(5))
expect_that(ncol(get('m')), equals(2))
expect_that(get('m')[5, 2], equals(11))
rm('m', envir = .TargetEnv)
})
test_that('Example 14: RData Data File with .rda Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_14.rda'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_14.rda')
variable.name <- clean.variable.name('example_14')
rdata.reader(data.file, filename, variable.name)
expect_that(exists('n'), is_true())
expect_that(names(get('n')), equals(c('N', 'Prime')))
expect_that(nrow(get('n')), equals(5))
expect_that(ncol(get('n')), equals(2))
expect_that(get('n')[5, 2], equals(11))
rm('n', envir = .TargetEnv)
})
test_that('Example 15: URL File with .url Extension', {
})
test_that('Example 16: TSV File with .tab Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_16.tab'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_16.tab')
variable.name <- clean.variable.name('example_16')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.16, envir = .TargetEnv)
})
test_that('Example 17: TSV File with .tab Extension and BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_17.tab.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_17.tab.bz2')
variable.name <- clean.variable.name('example_17')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.17, envir = .TargetEnv)
})
test_that('Example 18: TSV File with .tab Extension and Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_18.tab.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_18.tab.zip')
variable.name <- clean.variable.name('example_18')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.18, envir = .TargetEnv)
})
test_that('Example 19: TSV File with .tab Extension and GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_19.tab.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_19.tab.gz')
variable.name <- clean.variable.name('example_19')
tsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.19, envir = .TargetEnv)
})
test_that('Example 20: WSV File with .txt Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_20.txt'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_20.txt')
variable.name <- clean.variable.name('example_20')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.20, envir = .TargetEnv)
})
test_that('Example 21: WSV File with .txt Extension and BZip2 Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_21.txt.bz2'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_21.txt.bz2')
variable.name <- clean.variable.name('example_21')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.21, envir = .TargetEnv)
})
test_that('Example 22: WSV File with .txt Extension and Zip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_22.txt.zip'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_22.txt.zip')
variable.name <- clean.variable.name('example_22')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.22, envir = .TargetEnv)
})
test_that('Example 23: WSV File with .txt Extension and GZip Compression', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_23.txt.gz'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_23.txt.gz')
variable.name <- clean.variable.name('example_23')
wsv.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.23, envir = .TargetEnv)
})
test_that('Example 24: R File with .R Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_24.R'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_24.R')
variable.name <- clean.variable.name('example_24')
r.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.24, envir = .TargetEnv)
})
test_that('Example 25: R File with .r Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_25.r'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_25.r')
variable.name <- clean.variable.name('example_25')
r.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.25, envir = .TargetEnv)
})
test_that('Example 26: Excel 2007 File with .xls Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_26.xls'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_26.xls')
variable.name <- clean.variable.name('example_26')
xls.reader(data.file, filename, variable.name)
variable.name <- paste(variable.name, '.Sheet1', sep = '')
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.26.Sheet1, envir = .TargetEnv)
})
test_that('Example 27: Excel 2011 File with .xlsx Extension', {
#data.file <- 'example_27.xlsx'
#filename <- file.path(system.file('example_data',
# package = 'ProjectTemplate'),
# 'example_27.xlsx')
#variable.name <- clean.variable.name('example_27')
#
#xlsx.reader(data.file, filename, variable.name)
#
#variable.name <- paste(variable.name, '.Sheet1', sep = '')
#
#expect_that(exists(variable.name), is_true())
#expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
#expect_that(nrow(get(variable.name)), equals(5))
#expect_that(ncol(get(variable.name)), equals(2))
#expect_that(get(variable.name)[5, 2], equals(11))
#rm(example.27.Sheet1, envir = .TargetEnv)
})
test_that('Example 28: SQLite3 Support with .sql Extension with table = "..."', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_28.db'),
table = 'example_28')
write.dcf(sql.file, file = 'example_28.sql', width = 1000)
data.file <- 'example_28.sql'
filename <- 'example_28.sql'
variable.name <- clean.variable.name('example_28')
sql.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.28, envir = .TargetEnv)
unlink('example_28.sql')
})
test_that('Example 29: SQLite3 Support with .sql Extension with query = "SELECT * FROM ..."', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_29.db'),
query = 'SELECT * FROM example_29')
write.dcf(sql.file, file = 'example_29.sql', width = 1000)
data.file <- 'example_29.sql'
filename <- 'example_29.sql'
variable.name <- clean.variable.name('example_29')
sql.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.29, envir = .TargetEnv)
unlink('example_29.sql')
})
test_that('Example 30: SQLite3 Support with .sql Extension and table = "*"', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
sql.file <- data.frame(type = 'sqlite',
dbname = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_30.db'),
table = '*')
write.dcf(sql.file, file = 'example_30.sql', width = 1000)
data.file <- 'example_30.sql'
filename <- 'example_30.sql'
variable.name <- clean.variable.name('example_30')
sql.reader(data.file, filename, variable.name)
variable1.name <- clean.variable.name('example_30a')
variable2.name <- clean.variable.name('example_30b')
expect_that(exists(variable1.name), is_true())
expect_that(names(get(variable1.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable1.name)), equals(5))
expect_that(ncol(get(variable1.name)), equals(2))
expect_that(get(variable1.name)[5, 2], equals(11))
rm(example.30a, envir = .TargetEnv)
expect_that(exists(variable2.name), is_true())
expect_that(names(get(variable2.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable2.name)), equals(5))
expect_that(ncol(get(variable2.name)), equals(2))
expect_that(get(variable2.name)[5, 2], equals(11))
rm(example.30b, envir = .TargetEnv)
rm(example.30, envir = .TargetEnv)
unlink('example_30.sql')
})
test_that('Example 31: SQLite3 Support with .db Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_31.db'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_31.db')
variable.name <- clean.variable.name('example_31')
db.reader(data.file, filename, variable.name)
variable1.name <- clean.variable.name('example_31a')
variable2.name <- clean.variable.name('example_31b')
expect_that(exists(variable1.name), is_true())
expect_that(names(get(variable1.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable1.name)), equals(5))
expect_that(ncol(get(variable1.name)), equals(2))
expect_that(get(variable1.name)[5, 2], equals(11))
rm(example.31a, envir = .TargetEnv)
expect_that(exists(variable2.name), is_true())
expect_that(names(get(variable2.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable2.name)), equals(5))
expect_that(ncol(get(variable2.name)), equals(2))
expect_that(get(variable2.name)[5, 2], equals(11))
rm(example.31b, envir = .TargetEnv)
})
test_that('Example 32: Weka Support with .arff Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_32.arff'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_32.arff')
variable.name <- clean.variable.name('example_32')
arff.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.32, envir = .TargetEnv)
})
test_that('Example 33: Arbitary File Support with .file File Pointing to .db File', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
info.file <- data.frame(path = file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_28.db'),
extension = 'db')
write.dcf(info.file, file = 'example_33.file', width = 1000)
data.file <- 'example_33.file'
filename <- 'example_33.file'
variable.name <- clean.variable.name('example_28')
file.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.28, envir = .TargetEnv)
unlink('example_33.file')
})
test_that('Example 34: MP3 Support with .mp3 Extension', {
})
test_that('Example 35: PPM Support with .ppm Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_35.ppm'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_35.ppm')
variable.name <- clean.variable.name('example_35')
expect_warning(
ppm.reader(data.file, filename, variable.name),
" is NULL so the result will be NULL")
expect_that(exists(variable.name), is_true())
expect_that(as.character(class(get(variable.name))), equals('pixmapRGB'))
rm(example.35, envir = .TargetEnv)
})
test_that('Example 36: dBase Support with .dbf Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_36.dbf'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_36.dbf')
variable.name <- clean.variable.name('example_36')
dbf.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.36, envir = .TargetEnv)
})
test_that('Example 37: SPSS Support with .sav Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_37.sav'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_37.sav')
variable.name <- clean.variable.name('example_37')
expect_warning(
spss.reader(data.file, filename, variable.name),
"Unrecognized record type 7, subtype 18 encountered in system file")
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.37, envir = .TargetEnv)
})
test_that('Example 38: SPSS Support with .sav Extension / Alternative Generation', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_38.sav'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_38.sav')
variable.name <- clean.variable.name('example_38')
expect_warning(
spss.reader(data.file, filename, variable.name),
"Unrecognized record type 7, subtype 18 encountered in system file")
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.38, envir = .TargetEnv)
})
test_that('Example 39: Stata Support with .dta Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_39.dta'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_39.dta')
variable.name <- clean.variable.name('example_39')
stata.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.39, envir = .TargetEnv)
})
test_that('Example 40: Stata Support with .dta Extension / Alternative Generation', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_40.dta'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_40.dta')
variable.name <- clean.variable.name('example_40')
stata.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'Prime')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.40, envir = .TargetEnv)
})
test_that('Example 41: SAS Support with .xport Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_41.xport'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_41.xport')
variable.name <- clean.variable.name('example_41')
xport.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'PRIME')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.41, envir = .TargetEnv)
})
test_that('Example 42: SAS Support with .xpt Extension', {
test_project <- tempfile('test_project')
suppressMessages(create.project(test_project, minimal = FALSE))
on.exit(unlink(test_project, recursive = TRUE), add = TRUE)
oldwd <- setwd(test_project)
on.exit(setwd(oldwd), add = TRUE)
suppressMessages(load.project())
data.file <- 'example_42.xpt'
filename <- file.path(system.file('example_data',
package = 'ProjectTemplate'),
'example_42.xpt')
variable.name <- clean.variable.name('example_42')
xport.reader(data.file, filename, variable.name)
expect_that(exists(variable.name), is_true())
expect_that(names(get(variable.name)), equals(c('N', 'PRIME')))
expect_that(nrow(get(variable.name)), equals(5))
expect_that(ncol(get(variable.name)), equals(2))
expect_that(get(variable.name)[5, 2], equals(11))
rm(example.42, envir = .TargetEnv)
})
test_that('Example 43: ElasticSearch Support with .es Extension', {
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_binary.R
\name{int_to_bin}
\alias{int_to_bin}
\title{Converts integer to vector of zeros and ones}
\usage{
int_to_bin(x)
}
\arguments{
\item{x}{a numeric vector of positive integers}
}
\value{
a matrix in which each column corresponds to one integer of x vector
}
\description{
Converts integer to vector of zeros and ones
}
|
/man/int_to_bin.Rd
|
permissive
|
fboehm/xu2012
|
R
| false | true | 411 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_binary.R
\name{int_to_bin}
\alias{int_to_bin}
\title{Converts integer to vector of zeros and ones}
\usage{
int_to_bin(x)
}
\arguments{
\item{x}{a numeric vector of positive integers}
}
\value{
a matrix in which each column corresponds to one integer of x vector
}
\description{
Converts integer to vector of zeros and ones
}
|
#' summary.freqlist
#'
#' Summarize the \code{freqlist} object.
#'
#' @param object an object of class \code{\link{freqlist}}
#' @param ... For \code{summary.freqlist}, these are passed to \code{\link{as.data.frame.freqlist}} (and hence to
#' \code{\link{freq.control}}). For the print method, these are
#' additional arguments passed to the \code{\link[knitr]{kable}} function.
#' @param x An object of class \code{summary.freqlist}.
#' @inheritParams summary.tableby
#' @return An object of class \code{"summary.freqlist"} (invisibly for the print method).
#' @seealso \code{\link{freqlist}}, \code{\link[base]{table}}, \code{\link[stats]{xtabs}}, \code{\link[knitr]{kable}},
#' \code{\link{freq.control}}, \code{\link{freqlist.internal}}
#'
#' @examples
#' # load mockstudy data
#' data(mockstudy)
#' tab.ex <- table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany")
#' noby <- freqlist(tab.ex, na.options = "include")
#' summary(noby)
#' withby <- freqlist(tab.ex, strata = c("arm","sex"), na.options = "showexclude")
#' summary(withby)
#' summary(withby, dupLabels = TRUE)
#' @author Tina Gunderson, with major revisions by Ethan Heinzen
#' @name summary.freqlist
NULL
#> NULL
#' @rdname summary.freqlist
#' @export
summary.freqlist <- function(object, ..., labelTranslations = NULL, title = NULL)
{
dat <- as.data.frame(object, ..., labelTranslations = labelTranslations, list.ok = TRUE)
structure(list(
object = set_attr(dat, "control", NULL),
control = attr(dat, "control"),
title = title
), class = c("summary.freqlist", "summary.arsenal_table"))
}
as_data_frame_summary_freqlist <- function(tb, labs, cntrl)
{
fmtdups <- function(x, i)
{
x[i] <- lapply(x[i], as.character)
if(nrow(x) == 0) return(x)
tab <- as.matrix(x[i])
tab[is.na(tab)] <- "NA"
num <- max(stringr::str_count(tab, ","))
for(col in seq_len(ncol(tab)))
{
tmp <- apply(tab[, 1:col, drop = FALSE], 1, paste, collapse = paste0(rep(",", num + 1), collapse = "")) # in R >= 3.3.0, we could use strrep instead
x[c(FALSE, tmp[-1] == tmp[-length(tmp)]), colnames(tab)[col]] <- ""
}
x
}
fmtdigits <- function(x, digits.count, digits.pct)
{
if(nrow(x) == 0) return(x)
if("Freq" %in% names(x)) x$Freq <- formatC(x$Freq, digits = digits.count, format = "f")
if("cumFreq" %in% names(x)) x$cumFreq <- formatC(x$cumFreq, digits = digits.count, format = "f")
if("freqPercent" %in% names(x)) x$freqPercent <- formatC(x$freqPercent, digits = digits.pct, format = "f")
if("cumPercent" %in% names(x)) x$cumPercent <- formatC(x$cumPercent, digits = digits.pct, format = "f")
x
}
idx <- names(tb) %nin% c("Freq", "cumFreq", "freqPercent", "cumPercent")
tb <- fmtdigits(tb, digits.count = cntrl$digits.count, digits.pct = cntrl$digits.pct)
if(!cntrl$dupLabels) tb <- fmtdups(tb, idx)
tb <- stats::setNames(tb, labs[names(tb)])
set_attr(set_attr(tb, "labels", NULL), "align", c("r", "l")[1 + idx])
}
#' @rdname summary.freqlist
#' @export
as.data.frame.summary.freqlist <- function(x, ..., list.ok = FALSE)
{
out <- Map(x$object, lapply(x$object, attr, "labels"), f = as_data_frame_summary_freqlist, MoreArgs = list(cntrl = x$control))
if(!list.ok)
{
if(length(out) == 1) out <- out[[1]] else warning("as.data.frame.summary.freqlist is returning a list of data.frames")
}
out
}
|
/R/summary.freqlist.R
|
no_license
|
umarfaruk9/arsenal
|
R
| false | false | 3,379 |
r
|
#' summary.freqlist
#'
#' Summarize the \code{freqlist} object.
#'
#' @param object an object of class \code{\link{freqlist}}
#' @param ... For \code{summary.freqlist}, these are passed to \code{\link{as.data.frame.freqlist}} (and hence to
#' \code{\link{freq.control}}). For the print method, these are
#' additional arguments passed to the \code{\link[knitr]{kable}} function.
#' @param x An object of class \code{summary.freqlist}.
#' @inheritParams summary.tableby
#' @return An object of class \code{"summary.freqlist"} (invisibly for the print method).
#' @seealso \code{\link{freqlist}}, \code{\link[base]{table}}, \code{\link[stats]{xtabs}}, \code{\link[knitr]{kable}},
#' \code{\link{freq.control}}, \code{\link{freqlist.internal}}
#'
#' @examples
#' # load mockstudy data
#' data(mockstudy)
#' tab.ex <- table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany")
#' noby <- freqlist(tab.ex, na.options = "include")
#' summary(noby)
#' withby <- freqlist(tab.ex, strata = c("arm","sex"), na.options = "showexclude")
#' summary(withby)
#' summary(withby, dupLabels = TRUE)
#' @author Tina Gunderson, with major revisions by Ethan Heinzen
#' @name summary.freqlist
NULL
#> NULL
#' @rdname summary.freqlist
#' @export
summary.freqlist <- function(object, ..., labelTranslations = NULL, title = NULL)
{
dat <- as.data.frame(object, ..., labelTranslations = labelTranslations, list.ok = TRUE)
structure(list(
object = set_attr(dat, "control", NULL),
control = attr(dat, "control"),
title = title
), class = c("summary.freqlist", "summary.arsenal_table"))
}
as_data_frame_summary_freqlist <- function(tb, labs, cntrl)
{
fmtdups <- function(x, i)
{
x[i] <- lapply(x[i], as.character)
if(nrow(x) == 0) return(x)
tab <- as.matrix(x[i])
tab[is.na(tab)] <- "NA"
num <- max(stringr::str_count(tab, ","))
for(col in seq_len(ncol(tab)))
{
tmp <- apply(tab[, 1:col, drop = FALSE], 1, paste, collapse = paste0(rep(",", num + 1), collapse = "")) # in R >= 3.3.0, we could use strrep instead
x[c(FALSE, tmp[-1] == tmp[-length(tmp)]), colnames(tab)[col]] <- ""
}
x
}
fmtdigits <- function(x, digits.count, digits.pct)
{
if(nrow(x) == 0) return(x)
if("Freq" %in% names(x)) x$Freq <- formatC(x$Freq, digits = digits.count, format = "f")
if("cumFreq" %in% names(x)) x$cumFreq <- formatC(x$cumFreq, digits = digits.count, format = "f")
if("freqPercent" %in% names(x)) x$freqPercent <- formatC(x$freqPercent, digits = digits.pct, format = "f")
if("cumPercent" %in% names(x)) x$cumPercent <- formatC(x$cumPercent, digits = digits.pct, format = "f")
x
}
idx <- names(tb) %nin% c("Freq", "cumFreq", "freqPercent", "cumPercent")
tb <- fmtdigits(tb, digits.count = cntrl$digits.count, digits.pct = cntrl$digits.pct)
if(!cntrl$dupLabels) tb <- fmtdups(tb, idx)
tb <- stats::setNames(tb, labs[names(tb)])
set_attr(set_attr(tb, "labels", NULL), "align", c("r", "l")[1 + idx])
}
#' @rdname summary.freqlist
#' @export
as.data.frame.summary.freqlist <- function(x, ..., list.ok = FALSE)
{
out <- Map(x$object, lapply(x$object, attr, "labels"), f = as_data_frame_summary_freqlist, MoreArgs = list(cntrl = x$control))
if(!list.ok)
{
if(length(out) == 1) out <- out[[1]] else warning("as.data.frame.summary.freqlist is returning a list of data.frames")
}
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_regression.R
\docType{class}
\name{linreg-class}
\alias{linreg-class}
\alias{linreg}
\title{LinearRegression class}
\value{
class
}
\description{
LinearRegression class containing information about relevant results from the linear regression method.
}
\section{Fields}{
\describe{
\item{\code{formula}}{A formula}
\item{\code{data}}{A data.frame}
\item{\code{regressions_coefficients}}{A vector}
\item{\code{fitted_values}}{A matrix}
\item{\code{residuals}}{A matrix}
\item{\code{degrees_freedom}}{A numeric}
\item{\code{residual_variance}}{A matrix}
\item{\code{variance_regression_coefficients}}{A matrix}
\item{\code{t_values}}{A vector}
}}
\section{Methods}{
\describe{
\item{\code{coef()}}{Returns the regression coefficients as a named vector.}
\item{\code{plot()}}{Plots the residuals ~ fitted values and scale-location.}
\item{\code{pred()}}{Returns the fitted values.}
\item{\code{print()}}{Prints input parameters and regression coefficients.}
\item{\code{resid()}}{Returns the residuals.}
\item{\code{summary()}}{Summarizes values of regression.}
}}
|
/man/linreg-class.Rd
|
no_license
|
senseiyukisan/732A94-Lab4
|
R
| false | true | 1,163 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_regression.R
\docType{class}
\name{linreg-class}
\alias{linreg-class}
\alias{linreg}
\title{LinearRegression class}
\value{
class
}
\description{
LinearRegression class containing information about relevant results from the linear regression method.
}
\section{Fields}{
\describe{
\item{\code{formula}}{A formula}
\item{\code{data}}{A data.frame}
\item{\code{regressions_coefficients}}{A vector}
\item{\code{fitted_values}}{A matrix}
\item{\code{residuals}}{A matrix}
\item{\code{degrees_freedom}}{A numeric}
\item{\code{residual_variance}}{A matrix}
\item{\code{variance_regression_coefficients}}{A matrix}
\item{\code{t_values}}{A vector}
}}
\section{Methods}{
\describe{
\item{\code{coef()}}{Returns the regression coefficients as a named vector.}
\item{\code{plot()}}{Plots the residuals ~ fitted values and scale-location.}
\item{\code{pred()}}{Returns the fitted values.}
\item{\code{print()}}{Prints input parameters and regression coefficients.}
\item{\code{resid()}}{Returns the residuals.}
\item{\code{summary()}}{Summarizes values of regression.}
}}
|
testlist <- list(phi = numeric(0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 0))
result <- do.call(dcurver:::ddc,testlist)
str(result)
|
/dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609866546-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false | false | 632 |
r
|
testlist <- list(phi = numeric(0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142, 2.88358101657793e-242, 2.898978379072e+128, 0))
result <- do.call(dcurver:::ddc,testlist)
str(result)
|
#'Function to build a feature panel based on specific genomic regions.
#'
#'\code{buildFeaturePanel} builds panel slots of a TargetExperiment object.
#'Input can be a bam file or a pileup matrix. If the bed file contains a high
#'number of amplicons, the bam file as input is recommended in order to
#'diminish memory requirements. The resulting object is a GRanges instance
#'having panel and counts/coverage information.
#'
#'@param object TargetExperiment class object.
#'@param BPPARAM An optional BiocParallelParam instance defining the parallel
#'back-end to be used during evaluation.
#'
#'@return GRanges object.
#'
#'@include TargetExperiment-pileupCounts.R
#'@exportMethod buildFeaturePanel
#'@docType methods
#'@name buildFeaturePanel
#'@rdname TargetExperiment-buildFeaturePanel
#'@aliases buildFeaturePanel-methods
#'@note see full example in \code{\link{TargetExperiment-class}}
#'@author Gabriela A. Merino \email{gmerino@@bdmg.com.ar}, Cristobal Fresno
#'\email{cfresno@@bdmg.com.ar}, Yanina Murua \email{ymurua@leloir.org.ar},
#'Andrea S. Llera \email{allera@leloir.org.ar} and Elmer A. Fernandez
#'\email{efernandez@bdmg.com.ar}
#'@examples
#'## loading TargetExperiment object
#'data(ampliPanel, package="TarSeqQC")
#'## Defining bam file, bed file and fasta file names and paths
#'setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
#' package="TarSeqQC", mustWork=TRUE)
#'setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
#' package="TarSeqQC", mustWork=TRUE)
#'
#'myFeaturePanel<-buildFeaturePanel(ampliPanel)
setGeneric(name="buildFeaturePanel", def=function(object,
BPPARAM=bpparam()){
standardGeneric("buildFeaturePanel")
})
#'
#'@name buildFeaturePanel
#'@rdname TargetExperiment-buildFeaturePanel
#'@importMethodsFrom BiocParallel bpmapply bplapply bpworkers bpprogressbar<-
#'@import BiocParallel
#'@importFrom Biostrings letterFrequency
#'@aliases buildFeaturePanel,TargetExperiment-method
#'@inheritParams buildFeaturePanel
setMethod(f="buildFeaturePanel", signature="TargetExperiment",
definition=function(object,BPPARAM=bpparam()){
# BPPARAM2<-bpparam()
# bpprogressbar(BPPARAM2)<-TRUE
bed_file<-getBedFile(object)
param <- getScanBamP(object)
#extract GC content from Fasta File
aux<-scanFa(getFastaFile(object), param=bed_file)
gc <- round(rowSums(letterFrequency(aux, letters= c("G","C")))/width(aux),3)
mcols(bed_file)<-cbind(mcols(bed_file),gc=gc)
rm(aux)
# ensure that only those reads overlapping targeted regions are counted
bamWhat(param) <-c("qname", "pos", "qwidth", "rname")
bamFile<-getBamFile(object)
aln <- scanBam(bamFile, param=param)
# create RangedData object
chrs <-as.character(unique(seqnames(bed_file)))
alName<-do.call(rbind,strsplit(names(aln), ":"))
info<-do.call(rbind,lapply(1:length(chrs), function(x){
chr<-chrs[x]
index<-which(alName[,1] == chr)
dfInfo<-do.call(rbind,lapply(1:length(index), function(i){
df<-as.data.frame(aln[[index[i]]])
reads<-GRanges(IRanges(df[,"pos"], width=df[,"qwidth"]),
ID=df[,"qname"], seqnames=df[,"rname"])
# compute counts of all reads
count<-coverage(reads)
#compute statistics
index2<-which(as.character(seqnames(bed_file)) == chr)
# select cromosome counts and features
chrCounts <- count[[chr]]
featRange <- ranges(bed_file[index2])[i]
aux1 <- lapply(1:length(featRange), function(j){
aux<-start(featRange)[j]:end(featRange)[j]
if(all(aux <= length(
chrCounts))){
return(chrCounts[aux])
}else{ return(c(0,0))}
})
# compute average median SD and IQR for counts per feature
cov <- floor(as.numeric(lapply(aux1, mean)))
sdcov <- floor(as.numeric(lapply(aux1, sd)))
medcount<-floor(as.numeric(lapply(aux1, median)))
iqrcount<-floor(as.numeric(lapply(aux1, IQR)))
return(cbind(names=names(featRange),coverage=cov, sdCoverage=sdcov,
medianCounts= medcount, IQRCounts=iqrcount))}))
return(as.data.frame(dfInfo))
})
)
rownames(info)<-info[,1]
info<-info[,-1]
info[,"coverage"]<-as.numeric(as.character(info[,"coverage"]))
info[,"sdCoverage"]<-as.numeric(as.character(info[,"sdCoverage"]))
info[,"medianCounts"]<-as.numeric(as.character(info[,"medianCounts"]))
info[,"IQRCounts"]<-as.numeric(as.character(info[,"IQRCounts"]))
m<-match(names(bed_file), rownames(info))
mcols(bed_file)<-cbind(mcols(bed_file),info[m,])
return(bed_file)
})
|
/R/TargetExperiment-buildFeaturePanel.R
|
no_license
|
gamerino/TarSeqQC
|
R
| false | false | 4,719 |
r
|
#'Function to build a feature panel based on specific genomic regions.
#'
#'\code{buildFeaturePanel} builds panel slots of a TargetExperiment object.
#'Input can be a bam file or a pileup matrix. If the bed file contains a high
#'number of amplicons, the bam file as input is recommended in order to
#'diminish memory requirements. The resulting object is a GRanges instance
#'having panel and counts/coverage information.
#'
#'@param object TargetExperiment class object.
#'@param BPPARAM An optional BiocParallelParam instance defining the parallel
#'back-end to be used during evaluation.
#'
#'@return GRanges object.
#'
#'@include TargetExperiment-pileupCounts.R
#'@exportMethod buildFeaturePanel
#'@docType methods
#'@name buildFeaturePanel
#'@rdname TargetExperiment-buildFeaturePanel
#'@aliases buildFeaturePanel-methods
#'@note see full example in \code{\link{TargetExperiment-class}}
#'@author Gabriela A. Merino \email{gmerino@@bdmg.com.ar}, Cristobal Fresno
#'\email{cfresno@@bdmg.com.ar}, Yanina Murua \email{ymurua@leloir.org.ar},
#'Andrea S. Llera \email{allera@leloir.org.ar} and Elmer A. Fernandez
#'\email{efernandez@bdmg.com.ar}
#'@examples
#'## loading TargetExperiment object
#'data(ampliPanel, package="TarSeqQC")
#'## Defining bam file, bed file and fasta file names and paths
#'setBamFile(ampliPanel)<-system.file("extdata", "mybam.bam",
#' package="TarSeqQC", mustWork=TRUE)
#'setFastaFile(ampliPanel)<-system.file("extdata", "myfasta.fa",
#' package="TarSeqQC", mustWork=TRUE)
#'
#'myFeaturePanel<-buildFeaturePanel(ampliPanel)
setGeneric(name="buildFeaturePanel", def=function(object,
BPPARAM=bpparam()){
standardGeneric("buildFeaturePanel")
})
#'
#'@name buildFeaturePanel
#'@rdname TargetExperiment-buildFeaturePanel
#'@importMethodsFrom BiocParallel bpmapply bplapply bpworkers bpprogressbar<-
#'@import BiocParallel
#'@importFrom Biostrings letterFrequency
#'@aliases buildFeaturePanel,TargetExperiment-method
#'@inheritParams buildFeaturePanel
setMethod(f="buildFeaturePanel", signature="TargetExperiment",
definition=function(object,BPPARAM=bpparam()){
# BPPARAM2<-bpparam()
# bpprogressbar(BPPARAM2)<-TRUE
bed_file<-getBedFile(object)
param <- getScanBamP(object)
#extract GC content from Fasta File
aux<-scanFa(getFastaFile(object), param=bed_file)
gc <- round(rowSums(letterFrequency(aux, letters= c("G","C")))/width(aux),3)
mcols(bed_file)<-cbind(mcols(bed_file),gc=gc)
rm(aux)
# ensure that only those reads overlapping targeted regions are counted
bamWhat(param) <-c("qname", "pos", "qwidth", "rname")
bamFile<-getBamFile(object)
aln <- scanBam(bamFile, param=param)
# create RangedData object
chrs <-as.character(unique(seqnames(bed_file)))
alName<-do.call(rbind,strsplit(names(aln), ":"))
info<-do.call(rbind,lapply(1:length(chrs), function(x){
chr<-chrs[x]
index<-which(alName[,1] == chr)
dfInfo<-do.call(rbind,lapply(1:length(index), function(i){
df<-as.data.frame(aln[[index[i]]])
reads<-GRanges(IRanges(df[,"pos"], width=df[,"qwidth"]),
ID=df[,"qname"], seqnames=df[,"rname"])
# compute counts of all reads
count<-coverage(reads)
#compute statistics
index2<-which(as.character(seqnames(bed_file)) == chr)
# select cromosome counts and features
chrCounts <- count[[chr]]
featRange <- ranges(bed_file[index2])[i]
aux1 <- lapply(1:length(featRange), function(j){
aux<-start(featRange)[j]:end(featRange)[j]
if(all(aux <= length(
chrCounts))){
return(chrCounts[aux])
}else{ return(c(0,0))}
})
# compute average median SD and IQR for counts per feature
cov <- floor(as.numeric(lapply(aux1, mean)))
sdcov <- floor(as.numeric(lapply(aux1, sd)))
medcount<-floor(as.numeric(lapply(aux1, median)))
iqrcount<-floor(as.numeric(lapply(aux1, IQR)))
return(cbind(names=names(featRange),coverage=cov, sdCoverage=sdcov,
medianCounts= medcount, IQRCounts=iqrcount))}))
return(as.data.frame(dfInfo))
})
)
rownames(info)<-info[,1]
info<-info[,-1]
info[,"coverage"]<-as.numeric(as.character(info[,"coverage"]))
info[,"sdCoverage"]<-as.numeric(as.character(info[,"sdCoverage"]))
info[,"medianCounts"]<-as.numeric(as.character(info[,"medianCounts"]))
info[,"IQRCounts"]<-as.numeric(as.character(info[,"IQRCounts"]))
m<-match(names(bed_file), rownames(info))
mcols(bed_file)<-cbind(mcols(bed_file),info[m,])
return(bed_file)
})
|
######################################################################################
# CRG
# Hana SUSAK
######################################################################################
# Function to calculate ploidy
# @param VAF - Variant allele frequency observed in reads;
# @param ploidy - ploidy in position of reported variant (optional, default = 2 ). In other words, is this variant together with CNV;
# @param ccf_cnv - Cancer Cell Fraction of this ploidy. For germline CNVs its 1, and for somatic CNVs it can take values in interval (0,1] (optional, default = 1);
# @param purity - purity of cancer tissue and it is value in interval (0,1] but is expected to be high, much closer to 1 then 0. (optional, default = 1)
ccfPloidy <- function (vaf, ploidy = 2, ccf_cnv = 1, purity = 1) {
if (sum(is.na(ploidy))){
ploidy[is.na(ploidy)] <- 2
}
if (sum(is.na(ccf_cnv))){
ccf_cnv[is.na(ccf_cnv)] <- 1
}
if (sum(is.na(purity))){
purity[is.na(purity)] <- 1
}
ccf <- ((2 + (ploidy-2)*ccf_cnv)*vaf)/purity
return(ccf)
}
# function to correct CCF above 1
# asumptions considered to correct CCF:
# 1) 1 < ccf <= 1.2 and ploidy = 2 => rough estimation of baf which should be 0.5 therefore CCF should be 1
# 2) 1.2 < ccf and ploidy = 2 => missing deletion
# 3) ploidy != 2 and ccf > 1 => CNV and SNV are found in fraction of same cells, so estimation is overestimated as above 1, and should be 1.
# In case there is no ploidy column, it will be assumed as 2
# @param sample.mutations - Data Frame with columns: 'VAF', 'ploidy', and 'CCF'
ccfCorrection <- function(sample.mutations){
if (!'purity' %in% colnames(sample.mutations)){
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & sample.mutations$ploidy == 2
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$vaf > 0.6 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$vaf > 0.6
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 & (sample.mutations$ploidy != 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
} else {
if (sum(is.na(sample.mutations$purity))){
sample.mutations[is.na(sample.mutations$purity),'purity'] <- 1
}
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- min( (sample.mutations[condition, ]$vaf*2 / sample.mutations[condition, ]$purity ) , 1)
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1.2 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1.2
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1, purity=sample.mutations[condition ,]$purity)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 #& sample.mutations$ploidy != 2
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
}
sample.mutations
}
# function to correct purrity
# asumptions considered to correct purity:
# 1) 95% of snps are in interval 0-1.2
# 2) 3 or more snps are above 1.2
# In case SNVs after correction for purity (and CNV if provided) are violating 2 mentioned condicions, purity is not used.
purityCorrection <- function(sample.mutations){
if (!'purity' %in% colnames(sample.mutations)){
stop('There need to be purity column for correction by purity')
} else {
## check conditions for each patient, less then 5% and less then 3 SNVs above 1.2 CCF estmated
if (sum(is.na(sample.mutations$purity))){
sample.mutations[is.na(sample.mutations$purity),'purity'] <- 1
}
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- min( (sample.mutations[condition, ]$vaf*2 / sample.mutations[condition, ]$purity ) , 1)
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1.2 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1.2
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1, purity=sample.mutations[condition ,]$purity)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 #& sample.mutations$ploidy != 2
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
}
sample.mutations
}
#' Calculation of Cancer Cell Fraction (CCF) for SNVs from allele frequency (VAF).
#' @description
#' \code{CCF} function calculates CCF for each variant based on its
#' allele frequency, CNV/ploidy context, cancer cell fraction of reporeted CNVS within variant position and purity of tumor tissue.
#' @param sample.mutations Data Frame which should follow MAF format. Columns (with exactly same names) which \code{sample.mutations} should have are:
#' \itemize{
#' \item VAF variant allele frequncey for reported SNV
#' \item ploidy (optional, default = 2) ploidy within reoported SNV.
#' For example if SNV is reporeted in Y chromosome and with no CNV in this position, ploidy should be 1.
#' If gender is not known, than recomandation is to to exclude all SNVs with X chromosome.
#' \item CCF_CNV (optional, default = 1) cancer cell fraction of somatic SNV in region with reported SNV.
#' \item purity (optional, default = 1) purity for sample in which SNV is reported.
#' }
#' If not provided they need to be specifed as paramiters of the CCF function.
#' @param VAF (optional) integer/numeric value indicating column in \code{sample.mutations} representing variant allele frequncey for reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column)
#' @param ploidy (optional) integer/numeric value indicating column in \code{sample.mutations} representing ploidy context of reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 2 is taken)
#' @param CCF_CNV (optional) integer/numeric value indicating column in \code{sample.mutations} representing CCF of CNV which is reportedin region of reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 1 is taken)
#' @param purity (optional) integer/numeric value indicating column in \code{sample.mutations} representing purity of tumor tissue for sample with reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 1 is taken)
#' @param correct (optional, default = TRUE) Correction to perform on SNVs for which CCF is calculated as larger then 1.
#' This is justifed with rough estimation of VAF values, missing CNVs and
#' violation of mutal exclusivit assumption (two mutatations in same gene/patient are in different cancer frations ).
#' It is recomanted to keep this parameter to TRUE value, othervise unrealistic CCF (> 1) values can be returned for some SNVs.
#' @return a data frame with one additional column, giving CCF vlaues for each SNV in intial \code{sample.mutations} data frame.
#' @keywords CCF
#' @examples
#' # Simulate some VAF, ploidy and CCF_CNV values
#' df <- data.frame(VAF=runif(100, min=0.05, max=0.75),
#' ploidy=sample(c(1:4), 100, replace=TRUE, prob=c(0.4,0.9,0.5,0.1)),
#' CCF_CNV=runif(100, min=0.1,max=1))
#' df[df$ploidy == 2, 'CCF_CNV'] <- 1
#' # call CCF function
#' df2 <- CCF(df)
#' head(df2)
#' @export
CCF <- function(sample.mutations, VAF = NULL, ploidy = NULL, CCF_CNV = NULL, purity = NULL, correct=TRUE){
if (is.atomic(sample.mutations)) {
sample.mutations <- data.frame(x = sample.mutations)
}
if (!is.null(VAF)){
sample.mutations <- assign.columns(sample.mutations, VAF, "VAF")
}
if (!is.null(ploidy)){
sample.mutations <- assign.columns(sample.mutations, ploidy, "ploidy")
}
if (!is.null(CCF_CNV)){
sample.mutations <- assign.columns(sample.mutations, CCF_CNV, "CCF_CNV")
}
if (!is.null(purity)){
sample.mutations <- assign.columns(sample.mutations, purity, "purity")
}
# make it not sensitive to lower/upper case in column names
original.col.names <- colnames(sample.mutations)
num.col <- ncol(sample.mutations)
colnames(sample.mutations) <- tolower(colnames(sample.mutations))
# check if BAF column is there
if ( 'vaf' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$vaf)){
stop("VAF column is not numeric!")
}
} else {
stop("There is no mandatory VAF column!")
}
if ( 'ploidy' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$ploidy)){
stop("Ploidy column is not numeric!")
}
if ( 'ccf_cnv' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$ccf_cnv)){
stop("CCF_CNV column is not numeric!")
}
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, sample.mutations$ccf_cnv, purity=sample.mutations$purity)
} else {
# calculate CCF! there is baf, ploidy and ccf of cnv
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, sample.mutations$ccf_cnv)
}
} else {
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, purity=sample.mutations$purity)
} else {
# calculate CCF! there is baf, ploidy and ccf of cnv
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy)
}
}
} else {
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, purity=sample.mutations$purity)
} else {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf)
}
}
if (correct){
sample.mutations <- ccfCorrection(sample.mutations)
}
colnames(sample.mutations)[1:num.col] <- original.col.names
sample.mutations
}
|
/R/ccf.R
|
permissive
|
yulab41/cDriver
|
R
| false | false | 13,702 |
r
|
######################################################################################
# CRG
# Hana SUSAK
######################################################################################
# Function to calculate ploidy
# @param VAF - Variant allele frequency observed in reads;
# @param ploidy - ploidy in position of reported variant (optional, default = 2 ). In other words, is this variant together with CNV;
# @param ccf_cnv - Cancer Cell Fraction of this ploidy. For germline CNVs its 1, and for somatic CNVs it can take values in interval (0,1] (optional, default = 1);
# @param purity - purity of cancer tissue and it is value in interval (0,1] but is expected to be high, much closer to 1 then 0. (optional, default = 1)
ccfPloidy <- function (vaf, ploidy = 2, ccf_cnv = 1, purity = 1) {
if (sum(is.na(ploidy))){
ploidy[is.na(ploidy)] <- 2
}
if (sum(is.na(ccf_cnv))){
ccf_cnv[is.na(ccf_cnv)] <- 1
}
if (sum(is.na(purity))){
purity[is.na(purity)] <- 1
}
ccf <- ((2 + (ploidy-2)*ccf_cnv)*vaf)/purity
return(ccf)
}
# function to correct CCF above 1
# asumptions considered to correct CCF:
# 1) 1 < ccf <= 1.2 and ploidy = 2 => rough estimation of baf which should be 0.5 therefore CCF should be 1
# 2) 1.2 < ccf and ploidy = 2 => missing deletion
# 3) ploidy != 2 and ccf > 1 => CNV and SNV are found in fraction of same cells, so estimation is overestimated as above 1, and should be 1.
# In case there is no ploidy column, it will be assumed as 2
# @param sample.mutations - Data Frame with columns: 'VAF', 'ploidy', and 'CCF'
ccfCorrection <- function(sample.mutations){
if (!'purity' %in% colnames(sample.mutations)){
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & sample.mutations$ploidy == 2
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$vaf > 0.6 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$vaf > 0.6
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 & (sample.mutations$ploidy != 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
} else {
if (sum(is.na(sample.mutations$purity))){
sample.mutations[is.na(sample.mutations$purity),'purity'] <- 1
}
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- min( (sample.mutations[condition, ]$vaf*2 / sample.mutations[condition, ]$purity ) , 1)
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1.2 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1.2
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1, purity=sample.mutations[condition ,]$purity)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 #& sample.mutations$ploidy != 2
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
}
sample.mutations
}
# function to correct purrity
# asumptions considered to correct purity:
# 1) 95% of snps are in interval 0-1.2
# 2) 3 or more snps are above 1.2
# In case SNVs after correction for purity (and CNV if provided) are violating 2 mentioned condicions, purity is not used.
purityCorrection <- function(sample.mutations){
if (!'purity' %in% colnames(sample.mutations)){
stop('There need to be purity column for correction by purity')
} else {
## check conditions for each patient, less then 5% and less then 3 SNVs above 1.2 CCF estmated
if (sum(is.na(sample.mutations$purity))){
sample.mutations[is.na(sample.mutations$purity),'purity'] <- 1
}
# correct BAF between 0.5 and 0.6 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6) & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- (sample.mutations$vaf > 0.5 & sample.mutations$vaf <=0.6 )
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- min( (sample.mutations[condition, ]$vaf*2 / sample.mutations[condition, ]$purity ) , 1)
}
# correct BAF between 0.6 and 1 and diploid
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1.2 & (sample.mutations$ploidy == 2 | is.na(sample.mutations$ploidy ))
} else {
condition <- sample.mutations$CCF > 1.2
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition,]$CCF <- ccfPloidy(sample.mutations[condition ,]$vaf, ploidy=1, purity=sample.mutations[condition ,]$purity)
}
# correct ploidy != 2 and ccf >1
if ( 'ploidy' %in% colnames(sample.mutations) ){
condition <- sample.mutations$CCF > 1 #& sample.mutations$ploidy != 2
} else {
condition <- sample.mutations$CCF > 1
}
if (sum(condition, na.rm = T)) {
condition[is.na(condition)] <- FALSE
sample.mutations[condition, ]$CCF <- 1
}
}
sample.mutations
}
#' Calculation of Cancer Cell Fraction (CCF) for SNVs from allele frequency (VAF).
#' @description
#' \code{CCF} function calculates CCF for each variant based on its
#' allele frequency, CNV/ploidy context, cancer cell fraction of reporeted CNVS within variant position and purity of tumor tissue.
#' @param sample.mutations Data Frame which should follow MAF format. Columns (with exactly same names) which \code{sample.mutations} should have are:
#' \itemize{
#' \item VAF variant allele frequncey for reported SNV
#' \item ploidy (optional, default = 2) ploidy within reoported SNV.
#' For example if SNV is reporeted in Y chromosome and with no CNV in this position, ploidy should be 1.
#' If gender is not known, than recomandation is to to exclude all SNVs with X chromosome.
#' \item CCF_CNV (optional, default = 1) cancer cell fraction of somatic SNV in region with reported SNV.
#' \item purity (optional, default = 1) purity for sample in which SNV is reported.
#' }
#' If not provided they need to be specifed as paramiters of the CCF function.
#' @param VAF (optional) integer/numeric value indicating column in \code{sample.mutations} representing variant allele frequncey for reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column)
#' @param ploidy (optional) integer/numeric value indicating column in \code{sample.mutations} representing ploidy context of reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 2 is taken)
#' @param CCF_CNV (optional) integer/numeric value indicating column in \code{sample.mutations} representing CCF of CNV which is reportedin region of reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 1 is taken)
#' @param purity (optional) integer/numeric value indicating column in \code{sample.mutations} representing purity of tumor tissue for sample with reported SNV.
#' Default is NULL value (in this case \code{sample.mutations} should already have this column, or default value of 1 is taken)
#' @param correct (optional, default = TRUE) Correction to perform on SNVs for which CCF is calculated as larger then 1.
#' This is justifed with rough estimation of VAF values, missing CNVs and
#' violation of mutal exclusivit assumption (two mutatations in same gene/patient are in different cancer frations ).
#' It is recomanted to keep this parameter to TRUE value, othervise unrealistic CCF (> 1) values can be returned for some SNVs.
#' @return a data frame with one additional column, giving CCF vlaues for each SNV in intial \code{sample.mutations} data frame.
#' @keywords CCF
#' @examples
#' # Simulate some VAF, ploidy and CCF_CNV values
#' df <- data.frame(VAF=runif(100, min=0.05, max=0.75),
#' ploidy=sample(c(1:4), 100, replace=TRUE, prob=c(0.4,0.9,0.5,0.1)),
#' CCF_CNV=runif(100, min=0.1,max=1))
#' df[df$ploidy == 2, 'CCF_CNV'] <- 1
#' # call CCF function
#' df2 <- CCF(df)
#' head(df2)
#' @export
CCF <- function(sample.mutations, VAF = NULL, ploidy = NULL, CCF_CNV = NULL, purity = NULL, correct=TRUE){
if (is.atomic(sample.mutations)) {
sample.mutations <- data.frame(x = sample.mutations)
}
if (!is.null(VAF)){
sample.mutations <- assign.columns(sample.mutations, VAF, "VAF")
}
if (!is.null(ploidy)){
sample.mutations <- assign.columns(sample.mutations, ploidy, "ploidy")
}
if (!is.null(CCF_CNV)){
sample.mutations <- assign.columns(sample.mutations, CCF_CNV, "CCF_CNV")
}
if (!is.null(purity)){
sample.mutations <- assign.columns(sample.mutations, purity, "purity")
}
# make it not sensitive to lower/upper case in column names
original.col.names <- colnames(sample.mutations)
num.col <- ncol(sample.mutations)
colnames(sample.mutations) <- tolower(colnames(sample.mutations))
# check if BAF column is there
if ( 'vaf' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$vaf)){
stop("VAF column is not numeric!")
}
} else {
stop("There is no mandatory VAF column!")
}
if ( 'ploidy' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$ploidy)){
stop("Ploidy column is not numeric!")
}
if ( 'ccf_cnv' %in% colnames(sample.mutations) ){
if (!is.numeric(sample.mutations$ccf_cnv)){
stop("CCF_CNV column is not numeric!")
}
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, sample.mutations$ccf_cnv, purity=sample.mutations$purity)
} else {
# calculate CCF! there is baf, ploidy and ccf of cnv
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, sample.mutations$ccf_cnv)
}
} else {
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy, purity=sample.mutations$purity)
} else {
# calculate CCF! there is baf, ploidy and ccf of cnv
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, sample.mutations$ploidy)
}
}
} else {
if ('purity' %in% colnames(sample.mutations) ) {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf, purity=sample.mutations$purity)
} else {
# calculate CCF as ploidy is 2
sample.mutations$CCF <- ccfPloidy(sample.mutations$vaf)
}
}
if (correct){
sample.mutations <- ccfCorrection(sample.mutations)
}
colnames(sample.mutations)[1:num.col] <- original.col.names
sample.mutations
}
|
context('All datasets')
test_that("The correct DropOutRates data is included", {
data(DropOutRates)
expect_that(digest(DropOutRates), equals("ca2667c6176daa1c21a05eb66d23dbaa"))
})
test_that("The correct AbuseRates data is included", {
data(AbuseRates)
expect_that(digest(AbuseRates), equals("1eda094dcea5de1fe160cf55720fae80"))
})
test_that("The correct AbusedDepressionRates data is included", {
data(AbusedDepressionRates)
expect_that(digest(AbusedDepressionRates), equals("01e481864995c27d933d156988e09f30"))
})
test_that("The correct NotAbusedDepressionRates data is included", {
data(NotAbusedDepressionRates)
expect_that(digest(NotAbusedDepressionRates), equals("ec4fa254d7640772e3912db30c830e98"))
})
test_that("The correct AbusedDebutRates data is included", {
data(AbusedDebutRates)
expect_that(digest(AbusedDebutRates), equals("b5c7c51cf2c2b7b1779bfb0b7a9a9b8e"))
})
test_that("The correct NotAbusedDebutRates data is included", {
data(NotAbusedDebutRates)
expect_that(digest(NotAbusedDebutRates), equals("30e0f12cd3d3421e4019b4d705618cc3"))
})
|
/inst/tests/test-datasets.R
|
no_license
|
philliplab/mochModelData
|
R
| false | false | 1,086 |
r
|
context('All datasets')
test_that("The correct DropOutRates data is included", {
data(DropOutRates)
expect_that(digest(DropOutRates), equals("ca2667c6176daa1c21a05eb66d23dbaa"))
})
test_that("The correct AbuseRates data is included", {
data(AbuseRates)
expect_that(digest(AbuseRates), equals("1eda094dcea5de1fe160cf55720fae80"))
})
test_that("The correct AbusedDepressionRates data is included", {
data(AbusedDepressionRates)
expect_that(digest(AbusedDepressionRates), equals("01e481864995c27d933d156988e09f30"))
})
test_that("The correct NotAbusedDepressionRates data is included", {
data(NotAbusedDepressionRates)
expect_that(digest(NotAbusedDepressionRates), equals("ec4fa254d7640772e3912db30c830e98"))
})
test_that("The correct AbusedDebutRates data is included", {
data(AbusedDebutRates)
expect_that(digest(AbusedDebutRates), equals("b5c7c51cf2c2b7b1779bfb0b7a9a9b8e"))
})
test_that("The correct NotAbusedDebutRates data is included", {
data(NotAbusedDebutRates)
expect_that(digest(NotAbusedDebutRates), equals("30e0f12cd3d3421e4019b4d705618cc3"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParallelUnix.R
\name{ParallelUnix}
\alias{ParallelUnix}
\title{Parallelize posterior draws for unix machine}
\usage{
ParallelUnix(TrainX, Times, Event, TestX, NumCores)
}
\arguments{
\item{TrainX}{Explanatory variables for training (in sample) data.
Must be a matrix with rows corresponding to observations and columns to variables}
\item{Times}{The time of event or right-censoring}
\item{Event}{The event indicator: 1 is and event while 0 is censored}
\item{TestX}{Explanatory variables for test (out of sample) data. Must be a matrix and
have the same structure as TrainX}
\item{NumCores}{Number of cores to run on, default is 2}
}
\description{
Function which runs in parallel to obtain posterior draws of the
survival function on a unix based machine
}
|
/man/ParallelUnix.Rd
|
no_license
|
nillen0/SurvBART
|
R
| false | true | 843 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ParallelUnix.R
\name{ParallelUnix}
\alias{ParallelUnix}
\title{Parallelize posterior draws for unix machine}
\usage{
ParallelUnix(TrainX, Times, Event, TestX, NumCores)
}
\arguments{
\item{TrainX}{Explanatory variables for training (in sample) data.
Must be a matrix with rows corresponding to observations and columns to variables}
\item{Times}{The time of event or right-censoring}
\item{Event}{The event indicator: 1 is and event while 0 is censored}
\item{TestX}{Explanatory variables for test (out of sample) data. Must be a matrix and
have the same structure as TrainX}
\item{NumCores}{Number of cores to run on, default is 2}
}
\description{
Function which runs in parallel to obtain posterior draws of the
survival function on a unix based machine
}
|
# USCRN
library(magrittr)
# Variables
# 1 WBANNO XXXXX
# 2 UTC_DATE YYYYMMDD
# 3 UTC_TIME HHmm
# 4 LST_DATE YYYYMMDD
# 5 LST_TIME HHmm
# 6 CRX_VN XXXXXX
# 7 LONGITUDE Decimal_degrees
# 8 LATITUDE Decimal_degrees
# 9 T_CALC Celsius
# 10 T_HR_AVG Celsius
# 11 T_MAX Celsius Maximum air temperature, in degrees C, during the hour.
# 12 T_MIN Celsius
# 13 P_CALC mm
# 14 SOLARAD W/m^2 Average global solar radiation
# 15 SOLARAD_FLAG X
# 16 SOLARAD_MAX W/m^2
# 17 SOLARAD_MAX_FLAG X
# 18 SOLARAD_MIN W/m^2
# 19 SOLARAD_MIN_FLAG X
# 20 SUR_TEMP_TYPE X
# 21 SUR_TEMP Celsius Average infrared surface temperature
# 22 SUR_TEMP_FLAG X
# 23 SUR_TEMP_MAX Celsius
# 24 SUR_TEMP_MAX_FLAG X
# 25 SUR_TEMP_MIN Celsius
# 26 SUR_TEMP_MIN_FLAG X
# 27 RH_HR_AVG %
# 28 RH_HR_AVG_FLAG X
# 29 SOIL_MOISTURE_5 m^3/m^3
# 30 SOIL_MOISTURE_10 m^3/m^3
# 31 SOIL_MOISTURE_20 m^3/m^3
# 32 SOIL_MOISTURE_50 m^3/m^3
# 33 SOIL_MOISTURE_100 m^3/m^3
# 34 SOIL_TEMP_5 Celsius
# 35 SOIL_TEMP_10 Celsius
# 36 SOIL_TEMP_20 Celsius
# 37 SOIL_TEMP_50 Celsius
# 38 SOIL_TEMP_100 Celsius
# CO: Nunn (-104.76, 40.81)
# WA: Spokane (-117.53, 47.42)
grabUSCRN <- function(var, loc, month) {
fulldf <- read.delim(paste0("Data/CRN/", loc, "_CRN.txt"), sep = "", header = F)
headers <- read.delim("Data/CRN/HEADERS_hourly.txt", sep = "", header = T, skip = 1)
colnames(fulldf) <- colnames(headers)
time <- paste0(floor(fulldf$LST_TIME / 100), ":", fulldf$LST_TIME %% 100)
df <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, var]) %>% na.omit()
df <- df[df$Date >= as.Date(paste0("2017-0", month, "-01")) &
df$Date <= as.Date(paste0("2017-0", month, "-31")), ]
return (df)
}
grabUSCRN1cm <- function(var, loc, month) {
fulldf <- read.delim(paste0("Data/CRN/", loc, "_CRN.txt"), sep = "", header = F)
headers <- read.delim("Data/CRN/HEADERS_hourly.txt", sep = "", header = T, skip = 1)
colnames(fulldf) <- colnames(headers)
time <- paste0(floor(fulldf$LST_TIME / 100), ":", fulldf$LST_TIME %% 100)
df <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, var]) %>% na.omit()
df <- df[df$Date >= as.Date(paste0("2017-0", month, "-01")) &
df$Date <= as.Date(paste0("2017-0", month, "-31")), ]
if(var=="T_MAX"){
dfsurf <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, "SUR_TEMP"]) %>% na.omit()
dfsurf <- dfsurf[dfsurf$Date >= as.Date(paste0("2017-0", month, "-01")) &
dfsurf$Date <= as.Date(paste0("2017-0", month, "-31")), ]
df$Data = mapply(air_temp_profile_neutral, df$Data, zr=2, z0=0.05, z=0.01, dfsurf$Data)
}
return (df)
}
# crn::downloadCRN(url = CRN.HOURLY.URL, directory = HOURLY_DIR, years = 2017)
# crn::collateHourly(HOURLY_DIR)
# df <- read.table("CRN_Hourly_2021-02-17.dat")
# colnames(df) <- headers
# df[df < -9000] <- NA
# # df[df == -99] <- NA
# time <- paste0(floor(df$LST_TIME / 100), ":", df$LST_TIME %% 100)
# df$Date <- format(as.POSIXct(paste(df$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M")
#
# df <- df[, c("WBANNO", "Date", "LONGITUDE", "LATITUDE", "T_MAX", "T_MIN", "SOLARAD", "SUR_TEMP")]
# original <- df
# Jan <- df[df$Date >= as.Date("2017-01-01") & df$Date <= as.Date("2017-01-31"), ]
# Jul <- df[df$Date >= as.Date("2017-07-01") & df$Date <= as.Date("2017-07-31"), ]
#
# fwrite(Jan, "USCRNmap_1")
# fwrite(Jul, "USCRNmap_7")
# Variables
# SUR_TEMP
# T_MAX
# SOLARAD
mapUSCRN <- function(var, month) {
#var <- ifelse(var == "SURFACE_TEMPERATURE", "SUR_TEMP", ifelse(var == "AIR_TEMPERATURE", "T_MAX", "SOLARAD"))
stations <- fread("CRN_stations.csv", sep = ",") %>% as.data.frame()
df <- fread(paste0("Data/CRN/USCRNmap_", month, ".csv")) %>% as.data.frame()
days <- c()
for (i in 1:31) {
days <- c(days, paste0("2017-0", month, "-", i))
}
fullDf <- data.frame(Date = rep(days, each = 24),
Hour = 0:23)
fullDf$Date <- format(as.POSIXct(paste0(fullDf$Date, " ", fullDf$Hour, ":00")), format = "%Y-%m-%d %H:%M")
for (i in 1 : nrow(stations)) {
station <- stations$Name[i]
stationData <- df[df$WBANNO == stations$WBANNO[i], c("Date", var)]
stationData[stationData < - 30] <- NA # Montrose, CO has surface temperature values that are completely off
stationData <- stationData %>% set_colnames(c("Date", station)) %>% na.omit()
fullDf <- merge(fullDf, stationData, by = "Date", all = T)
}
return (fullDf)
}
|
/R/USCRN.R
|
no_license
|
trenchproject/RShiny_Microclim
|
R
| false | false | 5,613 |
r
|
# USCRN
library(magrittr)
# Variables
# 1 WBANNO XXXXX
# 2 UTC_DATE YYYYMMDD
# 3 UTC_TIME HHmm
# 4 LST_DATE YYYYMMDD
# 5 LST_TIME HHmm
# 6 CRX_VN XXXXXX
# 7 LONGITUDE Decimal_degrees
# 8 LATITUDE Decimal_degrees
# 9 T_CALC Celsius
# 10 T_HR_AVG Celsius
# 11 T_MAX Celsius Maximum air temperature, in degrees C, during the hour.
# 12 T_MIN Celsius
# 13 P_CALC mm
# 14 SOLARAD W/m^2 Average global solar radiation
# 15 SOLARAD_FLAG X
# 16 SOLARAD_MAX W/m^2
# 17 SOLARAD_MAX_FLAG X
# 18 SOLARAD_MIN W/m^2
# 19 SOLARAD_MIN_FLAG X
# 20 SUR_TEMP_TYPE X
# 21 SUR_TEMP Celsius Average infrared surface temperature
# 22 SUR_TEMP_FLAG X
# 23 SUR_TEMP_MAX Celsius
# 24 SUR_TEMP_MAX_FLAG X
# 25 SUR_TEMP_MIN Celsius
# 26 SUR_TEMP_MIN_FLAG X
# 27 RH_HR_AVG %
# 28 RH_HR_AVG_FLAG X
# 29 SOIL_MOISTURE_5 m^3/m^3
# 30 SOIL_MOISTURE_10 m^3/m^3
# 31 SOIL_MOISTURE_20 m^3/m^3
# 32 SOIL_MOISTURE_50 m^3/m^3
# 33 SOIL_MOISTURE_100 m^3/m^3
# 34 SOIL_TEMP_5 Celsius
# 35 SOIL_TEMP_10 Celsius
# 36 SOIL_TEMP_20 Celsius
# 37 SOIL_TEMP_50 Celsius
# 38 SOIL_TEMP_100 Celsius
# CO: Nunn (-104.76, 40.81)
# WA: Spokane (-117.53, 47.42)
grabUSCRN <- function(var, loc, month) {
fulldf <- read.delim(paste0("Data/CRN/", loc, "_CRN.txt"), sep = "", header = F)
headers <- read.delim("Data/CRN/HEADERS_hourly.txt", sep = "", header = T, skip = 1)
colnames(fulldf) <- colnames(headers)
time <- paste0(floor(fulldf$LST_TIME / 100), ":", fulldf$LST_TIME %% 100)
df <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, var]) %>% na.omit()
df <- df[df$Date >= as.Date(paste0("2017-0", month, "-01")) &
df$Date <= as.Date(paste0("2017-0", month, "-31")), ]
return (df)
}
grabUSCRN1cm <- function(var, loc, month) {
fulldf <- read.delim(paste0("Data/CRN/", loc, "_CRN.txt"), sep = "", header = F)
headers <- read.delim("Data/CRN/HEADERS_hourly.txt", sep = "", header = T, skip = 1)
colnames(fulldf) <- colnames(headers)
time <- paste0(floor(fulldf$LST_TIME / 100), ":", fulldf$LST_TIME %% 100)
df <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, var]) %>% na.omit()
df <- df[df$Date >= as.Date(paste0("2017-0", month, "-01")) &
df$Date <= as.Date(paste0("2017-0", month, "-31")), ]
if(var=="T_MAX"){
dfsurf <- data.frame("Date" = format(as.POSIXct(paste(fulldf$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M"),
"Data" = fulldf[, "SUR_TEMP"]) %>% na.omit()
dfsurf <- dfsurf[dfsurf$Date >= as.Date(paste0("2017-0", month, "-01")) &
dfsurf$Date <= as.Date(paste0("2017-0", month, "-31")), ]
df$Data = mapply(air_temp_profile_neutral, df$Data, zr=2, z0=0.05, z=0.01, dfsurf$Data)
}
return (df)
}
# crn::downloadCRN(url = CRN.HOURLY.URL, directory = HOURLY_DIR, years = 2017)
# crn::collateHourly(HOURLY_DIR)
# df <- read.table("CRN_Hourly_2021-02-17.dat")
# colnames(df) <- headers
# df[df < -9000] <- NA
# # df[df == -99] <- NA
# time <- paste0(floor(df$LST_TIME / 100), ":", df$LST_TIME %% 100)
# df$Date <- format(as.POSIXct(paste(df$LST_DATE, time), format = "%Y%m%d %H:%M"), format = "%Y-%m-%d %H:%M")
#
# df <- df[, c("WBANNO", "Date", "LONGITUDE", "LATITUDE", "T_MAX", "T_MIN", "SOLARAD", "SUR_TEMP")]
# original <- df
# Jan <- df[df$Date >= as.Date("2017-01-01") & df$Date <= as.Date("2017-01-31"), ]
# Jul <- df[df$Date >= as.Date("2017-07-01") & df$Date <= as.Date("2017-07-31"), ]
#
# fwrite(Jan, "USCRNmap_1")
# fwrite(Jul, "USCRNmap_7")
# Variables
# SUR_TEMP
# T_MAX
# SOLARAD
mapUSCRN <- function(var, month) {
#var <- ifelse(var == "SURFACE_TEMPERATURE", "SUR_TEMP", ifelse(var == "AIR_TEMPERATURE", "T_MAX", "SOLARAD"))
stations <- fread("CRN_stations.csv", sep = ",") %>% as.data.frame()
df <- fread(paste0("Data/CRN/USCRNmap_", month, ".csv")) %>% as.data.frame()
days <- c()
for (i in 1:31) {
days <- c(days, paste0("2017-0", month, "-", i))
}
fullDf <- data.frame(Date = rep(days, each = 24),
Hour = 0:23)
fullDf$Date <- format(as.POSIXct(paste0(fullDf$Date, " ", fullDf$Hour, ":00")), format = "%Y-%m-%d %H:%M")
for (i in 1 : nrow(stations)) {
station <- stations$Name[i]
stationData <- df[df$WBANNO == stations$WBANNO[i], c("Date", var)]
stationData[stationData < - 30] <- NA # Montrose, CO has surface temperature values that are completely off
stationData <- stationData %>% set_colnames(c("Date", station)) %>% na.omit()
fullDf <- merge(fullDf, stationData, by = "Date", all = T)
}
return (fullDf)
}
|
#################################################################################
##
## R package spd by Alexios Ghalanos Copyright (C) 2008-2013
## This file is part of the R package spd.
##
## The R package spd is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package spd is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
# Developer Note: extendable class and method for different tail distributions.
# perhaps as we add more tailfit models it would make sense to create a class and
# method for creating a distribution design object which would hold the specific details
# which would then be passed on to the fit (i.e. distributionSpec)
spdfit<-function(data, upper = 0.9, lower = 0.1, tailfit="GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
UseMethod("spdfit")
}
.spdfit<-function(data, upper = 0.9, lower = 0.1, tailfit = "GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
ans<-switch(tailfit,
GPD = .gpdtails(data, upper, lower, tailfit, type, kernelfit, information, title, description,...))
#GEV = .gevtails(data, upper, lower, tailfit, type, kernelfit, information, title, description,...))
return(ans)
}
setMethod(f="spdfit", definition=.spdfit)
#-------------------------------------------------------------------------------------------------------------------
.gpdtails<-function(data, upper = 0.9, lower = 0.1, tailfit="GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
# need to add validity checks for type, kernel and information
if(!missing(kernelfit) & length(kernelfit)>1) kernelfit<-"normal"
if(missing(kernelfit)) kernelfit<-"normal"
if(is.null(title)) title=""
if(is.null(description)) description=""
x=data
x = sort(as.matrix(x)[,1])
call = match.call()
type = match.arg(type)
kernel=match.arg(kernelfit)
information = match.arg(information)
# Check Type and Convert:
x = as.vector(x)
N = length(x)
lp = trunc(N*lower)
up = trunc(N*(1-upper))
nUpper = x[N - up]
nLower = x[lp]
#upper tail
upperExceedances = x[x > nUpper]
upperExcess = upperExceedances - nUpper
#lower tail
lowerExceedances = x[x < nLower]
lowerExcess = nLower - lowerExceedances
#estimate GPD Tails and kernel interior
upperfit = gpdfit(x, nUpper, type = type, information = information, title = title , description = description, ...)
lowerfit = gpdfit(-x, -nLower, type = type, information = information, title = title , description = description, ...)
kernelFit = bkde(x, kernelfit, gridsize=as.integer(N),range.x=c(1.5*min(x),1.5*max(x)))
if (is.null(title)) title = "GPD Two-Tail Parameter Estimation"
if (is.null(description)) description = .description()
new("GPDTAILS",
call = call,
method = type,
kernel = kernelfit,
data = as.matrix(data),
threshold = list(upper = nUpper , lower = nLower),
ptails = list(upper = upper, lower = lower),
fit = list(upperFit = upperfit, lowerFit = lowerfit, kernelFit = kernelFit),
title = title,
description = description)
}
|
/spd/R/methods-spdFit.R
|
no_license
|
ingted/R-Examples
|
R
| false | false | 3,981 |
r
|
#################################################################################
##
## R package spd by Alexios Ghalanos Copyright (C) 2008-2013
## This file is part of the R package spd.
##
## The R package spd is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package spd is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
# Developer Note: extendable class and method for different tail distributions.
# perhaps as we add more tailfit models it would make sense to create a class and
# method for creating a distribution design object which would hold the specific details
# which would then be passed on to the fit (i.e. distributionSpec)
spdfit<-function(data, upper = 0.9, lower = 0.1, tailfit="GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
UseMethod("spdfit")
}
.spdfit<-function(data, upper = 0.9, lower = 0.1, tailfit = "GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
ans<-switch(tailfit,
GPD = .gpdtails(data, upper, lower, tailfit, type, kernelfit, information, title, description,...))
#GEV = .gevtails(data, upper, lower, tailfit, type, kernelfit, information, title, description,...))
return(ans)
}
setMethod(f="spdfit", definition=.spdfit)
#-------------------------------------------------------------------------------------------------------------------
.gpdtails<-function(data, upper = 0.9, lower = 0.1, tailfit="GPD", type = c("mle", "pwm"), kernelfit = c("normal","box","epanech","biweight","triweight"), information = c("observed", "expected"), title = NULL, description = NULL,...)
{
# need to add validity checks for type, kernel and information
if(!missing(kernelfit) & length(kernelfit)>1) kernelfit<-"normal"
if(missing(kernelfit)) kernelfit<-"normal"
if(is.null(title)) title=""
if(is.null(description)) description=""
x=data
x = sort(as.matrix(x)[,1])
call = match.call()
type = match.arg(type)
kernel=match.arg(kernelfit)
information = match.arg(information)
# Check Type and Convert:
x = as.vector(x)
N = length(x)
lp = trunc(N*lower)
up = trunc(N*(1-upper))
nUpper = x[N - up]
nLower = x[lp]
#upper tail
upperExceedances = x[x > nUpper]
upperExcess = upperExceedances - nUpper
#lower tail
lowerExceedances = x[x < nLower]
lowerExcess = nLower - lowerExceedances
#estimate GPD Tails and kernel interior
upperfit = gpdfit(x, nUpper, type = type, information = information, title = title , description = description, ...)
lowerfit = gpdfit(-x, -nLower, type = type, information = information, title = title , description = description, ...)
kernelFit = bkde(x, kernelfit, gridsize=as.integer(N),range.x=c(1.5*min(x),1.5*max(x)))
if (is.null(title)) title = "GPD Two-Tail Parameter Estimation"
if (is.null(description)) description = .description()
new("GPDTAILS",
call = call,
method = type,
kernel = kernelfit,
data = as.matrix(data),
threshold = list(upper = nUpper , lower = nLower),
ptails = list(upper = upper, lower = lower),
fit = list(upperFit = upperfit, lowerFit = lowerfit, kernelFit = kernelFit),
title = title,
description = description)
}
|
############################################################
# #
# Level of education #
# #
############################################################
# Load packages
library(tidyverse)
library(magrittr)
# Palettes
pal_za <- c('#9ecae1', '#3182bd')
scales::show_col(pal_za)
# Load data
data <- read_csv('data/education.csv')
# Glimpse
glimpse(data)
# Process data
data %<>%
mutate(education = factor(education),
education = fct_reorder(education,
desc(estimate_chronic)))
# Plot
pp <- ggplot(data = data) +
aes(x = education,
y = estimate_chronic,
ymin = ci.low_chronic,
ymax = ci.high_chronic) +
geom_errorbar(size = 1,
width = 0.25,
colour = '#000000') +
geom_point(size = 8,
shape = 21,
stroke = 1,
fill = pal_za[[2]],
colour = '#000000') +
geom_text(aes(y = 40,
label = paste0('(', unweighted_n, ')')),
size = 6) +
scale_x_discrete(labels = c('None',
'Primary\n(grades: 1-7)',
'Secondary\n(grades: 8-12)',
'Tertiary\n(> grade 12)')) +
labs(title = 'F: Prevalence by level of education',
subtitle = 'Numbers in parentheses show the unweighted sample sizes',
x = 'Level of education',
y = 'Prevalence of chronic pain (%)') +
scale_y_continuous(limits = c(0, 40)) +
theme_bw(base_size = 20) +
theme(panel.grid = element_blank(),
plot.subtitle = element_text(size = 14),
axis.title.y = element_text(margin = margin(r = 1,
unit = 'lines')),
axis.title.x = element_text(margin = margin(t = 1,
unit = 'lines')),
axis.text.y = element_text(colour = '#000000'),
axis.text.x = element_text(colour = '#000000'))
# Save
ggsave(filename = 'figures/supplement-1-F-education.png',
plot = pp,
height = 8,
width = 8)
|
/supplement-plot-F-education.R
|
permissive
|
kamermanpr/za-pain-epidemiology
|
R
| false | false | 2,305 |
r
|
############################################################
# #
# Level of education #
# #
############################################################
# Load packages
library(tidyverse)
library(magrittr)
# Palettes
pal_za <- c('#9ecae1', '#3182bd')
scales::show_col(pal_za)
# Load data
data <- read_csv('data/education.csv')
# Glimpse
glimpse(data)
# Process data
data %<>%
mutate(education = factor(education),
education = fct_reorder(education,
desc(estimate_chronic)))
# Plot
pp <- ggplot(data = data) +
aes(x = education,
y = estimate_chronic,
ymin = ci.low_chronic,
ymax = ci.high_chronic) +
geom_errorbar(size = 1,
width = 0.25,
colour = '#000000') +
geom_point(size = 8,
shape = 21,
stroke = 1,
fill = pal_za[[2]],
colour = '#000000') +
geom_text(aes(y = 40,
label = paste0('(', unweighted_n, ')')),
size = 6) +
scale_x_discrete(labels = c('None',
'Primary\n(grades: 1-7)',
'Secondary\n(grades: 8-12)',
'Tertiary\n(> grade 12)')) +
labs(title = 'F: Prevalence by level of education',
subtitle = 'Numbers in parentheses show the unweighted sample sizes',
x = 'Level of education',
y = 'Prevalence of chronic pain (%)') +
scale_y_continuous(limits = c(0, 40)) +
theme_bw(base_size = 20) +
theme(panel.grid = element_blank(),
plot.subtitle = element_text(size = 14),
axis.title.y = element_text(margin = margin(r = 1,
unit = 'lines')),
axis.title.x = element_text(margin = margin(t = 1,
unit = 'lines')),
axis.text.y = element_text(colour = '#000000'),
axis.text.x = element_text(colour = '#000000'))
# Save
ggsave(filename = 'figures/supplement-1-F-education.png',
plot = pp,
height = 8,
width = 8)
|
\name{pacotestset}
\alias{pacotestset}
\title{Create and Alter a Pacotest Options List}
\description{ The function creates or updates a list object, which is required for applying the \code{\link{pacotest}} function.}
\arguments{
\item{pacotestOptions}{
A options list for the \code{\link{pacotest}} function generated by the \code{\link{pacotestset}} function.
}
\item{testType}{
A string which specifies the type of the test for testing the simplifying assumption.
Possible values: \code{CCC} | \code{VI}
}
\item{grouping}{
For \code{testType = CCC}:
The grouping method which is used to obtain a partitioning of the support of the conditioning variable W.
Possible values: \code{TreeCCC} | \code{SumMedian} | \code{SumThirdsI} | \code{SumThirdsII} | \code{SumThirdsIII} | \code{SumQuartiles} | \code{ProdMedian} | \code{ProdThirdsI} | \code{ProdThirdsII} | \code{ProdThirdsIII} | \code{ProdQuartiles} | \code{TreeEC} | \code{TreeECOV}
}
\item{expMinSampleSize}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The minimum number of observations which are allocated to a group in the decision tree learning process. The default value is \code{100}.
}
\item{aggInfo}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The method used for aggregating information in the conditioning set. The information in the conditioning set can be aggregated by either taking the mean of all variables or the pairwise mean. The result is added as an additional variable which can be used by the decision tree to partition the support of the conditioning variable W.
Possible values: \code{none} | \code{meanAll} | \code{meanPairwise}
}
\item{withEstUncert}{
For \code{testType = CCC}:
A logical variable indicating whether the asymptotic-variance covariance matrix of the estimated correlations should be corrected for the estimation uncertainty of the probability integral transforms.
}
\item{estUncertWithRanks}{
For \code{testType = CCC}:
A logical variable indicating whether the asymptotic-variance covariance matrix of the estimated correlations should be corrected for the estimation uncertainty induced by using a semiparametric estimtor for the vine copula, i.e., empirical cdf's for the univariate margins and parametric copula families as building blocks of the R-vine copula.
}
\item{finalComparison}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
A variable specifying whether at the end of the decision tree all subsets being part of the partition are compared against each other or whether only the pair with the highest value of the test statistic is used.
Possible values: \code{pairwiseMax} | \code{all}
}
\item{penaltyParams}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
A vector of length two, specifying the functional form of the penalty. The penalty is a function of the sample size n and choosen to be lambda(n) = cn^(-beta). The first entry of the vector is specifying the level c of the penalty and needs to be a positive real number. The second entry of the vector is specifying the power beta of the penalty and needs to be choosen from the interval (0,1).
}
\item{gamma0Partition}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The gamma0 partition. I.e., the partition which is favoured via the penalty under the H0.
Possible values: \code{SumMedian} | \code{SumThirdsI} | \code{SumThirdsII} | \code{SumThirdsIII} | \code{SumQuartiles} | \code{ProdMedian} | \code{ProdThirdsI} | \code{ProdThirdsII} | \code{ProdThirdsIII} | \code{ProdQuartiles}
}
\item{groupedScatterplots}{
For \code{testType = CCC}:
A logical whether grouped scatterplots should be produced.
}
\item{decisionTreePlot}{
For \code{testType = CCC}:
A logical whether the partition of the support of W should be illustrated as a decision tree plot.
}
\item{numbBoot}{For \code{testType = VI}:
The number of bootstrap replications for computing p-values using the multiplier bootstrap approach.
}
}
\details{
Calling without any arguments prints all possible options.
\preformatted{pacotestset}
Calling with a string, that specifies the test type, gives back a option list with the default values corresponding to each test.
\preformatted{pacotestOptions = pacotestset('CCC')}
\preformatted{pacotestOptions = pacotestset('VI')}
Calling with pairs of parameter names and values creates an \code{pacotestOptions} list in which the named parameters have the specified values.
\preformatted{pacotestOptions = pacotestset('Name1',Value1,'Name2',Value2,...)}
Calling with an existing \code{pacotestOptions} list checks the list for consistency.
\preformatted{pacotestset(pacotestOptions)}
Calling with an existing \code{pacotestOptions} list and pairs of parameter names and values creates a copy of the existing list, where the named parameters are updated with the provided values.
\preformatted{pacotestOptionsNew = pacotestset(pacotestOptions,'Name1',Value1,'Name2',Value2,...)}
}
\value{The function returns a \code{pacotestOptions} list which can be used as input argument for the functions \code{\link{pacotest}}, \code{\link{pacotestRvineSeq}} and \code{\link{pacotestRvineSingleCopula}}.
}
\author{
Malte S. Kurz
}
\seealso{
\code{\link{pacotest-package}}, \code{\link{pacotest}}, \code{\link{pacotestRvineSeq}}, \code{\link{pacotestRvineSingleCopula}}
}
\references{
Kurz, M. S. and F. Spanhel (2017), "Testing the simplifying assumption in high-dimensional vine copulas", ArXiv e-prints \url{https://arxiv.org/abs/1706.02338}.
}
|
/man/pacotestset.Rd
|
no_license
|
SwapanK/pacotest
|
R
| false | false | 5,686 |
rd
|
\name{pacotestset}
\alias{pacotestset}
\title{Create and Alter a Pacotest Options List}
\description{ The function creates or updates a list object, which is required for applying the \code{\link{pacotest}} function.}
\arguments{
\item{pacotestOptions}{
A options list for the \code{\link{pacotest}} function generated by the \code{\link{pacotestset}} function.
}
\item{testType}{
A string which specifies the type of the test for testing the simplifying assumption.
Possible values: \code{CCC} | \code{VI}
}
\item{grouping}{
For \code{testType = CCC}:
The grouping method which is used to obtain a partitioning of the support of the conditioning variable W.
Possible values: \code{TreeCCC} | \code{SumMedian} | \code{SumThirdsI} | \code{SumThirdsII} | \code{SumThirdsIII} | \code{SumQuartiles} | \code{ProdMedian} | \code{ProdThirdsI} | \code{ProdThirdsII} | \code{ProdThirdsIII} | \code{ProdQuartiles} | \code{TreeEC} | \code{TreeECOV}
}
\item{expMinSampleSize}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The minimum number of observations which are allocated to a group in the decision tree learning process. The default value is \code{100}.
}
\item{aggInfo}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The method used for aggregating information in the conditioning set. The information in the conditioning set can be aggregated by either taking the mean of all variables or the pairwise mean. The result is added as an additional variable which can be used by the decision tree to partition the support of the conditioning variable W.
Possible values: \code{none} | \code{meanAll} | \code{meanPairwise}
}
\item{withEstUncert}{
For \code{testType = CCC}:
A logical variable indicating whether the asymptotic-variance covariance matrix of the estimated correlations should be corrected for the estimation uncertainty of the probability integral transforms.
}
\item{estUncertWithRanks}{
For \code{testType = CCC}:
A logical variable indicating whether the asymptotic-variance covariance matrix of the estimated correlations should be corrected for the estimation uncertainty induced by using a semiparametric estimtor for the vine copula, i.e., empirical cdf's for the univariate margins and parametric copula families as building blocks of the R-vine copula.
}
\item{finalComparison}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
A variable specifying whether at the end of the decision tree all subsets being part of the partition are compared against each other or whether only the pair with the highest value of the test statistic is used.
Possible values: \code{pairwiseMax} | \code{all}
}
\item{penaltyParams}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
A vector of length two, specifying the functional form of the penalty. The penalty is a function of the sample size n and choosen to be lambda(n) = cn^(-beta). The first entry of the vector is specifying the level c of the penalty and needs to be a positive real number. The second entry of the vector is specifying the power beta of the penalty and needs to be choosen from the interval (0,1).
}
\item{gamma0Partition}{
For \code{testType = CCC} with \code{grouping = TreeCCC | TreeECOV | TreeEC}:
The gamma0 partition. I.e., the partition which is favoured via the penalty under the H0.
Possible values: \code{SumMedian} | \code{SumThirdsI} | \code{SumThirdsII} | \code{SumThirdsIII} | \code{SumQuartiles} | \code{ProdMedian} | \code{ProdThirdsI} | \code{ProdThirdsII} | \code{ProdThirdsIII} | \code{ProdQuartiles}
}
\item{groupedScatterplots}{
For \code{testType = CCC}:
A logical whether grouped scatterplots should be produced.
}
\item{decisionTreePlot}{
For \code{testType = CCC}:
A logical whether the partition of the support of W should be illustrated as a decision tree plot.
}
\item{numbBoot}{For \code{testType = VI}:
The number of bootstrap replications for computing p-values using the multiplier bootstrap approach.
}
}
\details{
Calling without any arguments prints all possible options.
\preformatted{pacotestset}
Calling with a string, that specifies the test type, gives back a option list with the default values corresponding to each test.
\preformatted{pacotestOptions = pacotestset('CCC')}
\preformatted{pacotestOptions = pacotestset('VI')}
Calling with pairs of parameter names and values creates an \code{pacotestOptions} list in which the named parameters have the specified values.
\preformatted{pacotestOptions = pacotestset('Name1',Value1,'Name2',Value2,...)}
Calling with an existing \code{pacotestOptions} list checks the list for consistency.
\preformatted{pacotestset(pacotestOptions)}
Calling with an existing \code{pacotestOptions} list and pairs of parameter names and values creates a copy of the existing list, where the named parameters are updated with the provided values.
\preformatted{pacotestOptionsNew = pacotestset(pacotestOptions,'Name1',Value1,'Name2',Value2,...)}
}
\value{The function returns a \code{pacotestOptions} list which can be used as input argument for the functions \code{\link{pacotest}}, \code{\link{pacotestRvineSeq}} and \code{\link{pacotestRvineSingleCopula}}.
}
\author{
Malte S. Kurz
}
\seealso{
\code{\link{pacotest-package}}, \code{\link{pacotest}}, \code{\link{pacotestRvineSeq}}, \code{\link{pacotestRvineSingleCopula}}
}
\references{
Kurz, M. S. and F. Spanhel (2017), "Testing the simplifying assumption in high-dimensional vine copulas", ArXiv e-prints \url{https://arxiv.org/abs/1706.02338}.
}
|
#' Bird Function
#'
#' Multimodal single-objective test function. The implementation is based on the
#' mathematical formulation
#' \deqn{f(\mathbf{x}) = (\mathbf{x}_1 - \mathbf{x}_2)^2 + \exp((1 - \sin(\mathbf{x}_1))^2)\cos(\mathbf{x}_2) + \exp((1 - \cos(\mathbf{x}_2))^2)\sin(\mathbf{x}_1).}
#' The function is restricted to two dimensions with \eqn{\mathbf{x}_i \in [-2\pi, 2\pi], i = 1, 2.}
#'
#' @references S. K. Mishra, Global Optimization By Differential Evolution and
#' Particle Swarm Methods: Evaluation On Some Benchmark Functions, Munich Research
#' Papers in Economics.
#'
#' @template ret_smoof_single
#' @export
makeBirdFunction = function() {
makeSingleObjectiveFunction(
name = "Bird Function",
fn = function(x) {
a = (x[1] - x[2])^2
b = exp((1 - sin(x[1]))^2) * cos(x[2])
c = exp((1 - cos(x[2]))^2) * sin(x[1])
return(a + b + c)
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-2 * pi, -2 * pi),
upper = c(2 * pi, 2 * pi),
vector = TRUE
),
tags = attr(makeBirdFunction, "tags"),
global.opt.params = matrix(
c(4.70104, 3.15294,
-1.58214, -3.13024),
ncol = 2, byrow = TRUE),
global.opt.value = -106.764537
)
}
class(makeBirdFunction) = c("function", "smoof_generator")
attr(makeBirdFunction, "name") = c("Bird Function")
attr(makeBirdFunction, "type") = c("single-objective")
attr(makeBirdFunction, "tags") = c("continuous", "differentiable", "non-separable", "non-scalable", "multimodal")
|
/R/sof.bird.R
|
no_license
|
fmumbi/smoof
|
R
| false | false | 1,534 |
r
|
#' Bird Function
#'
#' Multimodal single-objective test function. The implementation is based on the
#' mathematical formulation
#' \deqn{f(\mathbf{x}) = (\mathbf{x}_1 - \mathbf{x}_2)^2 + \exp((1 - \sin(\mathbf{x}_1))^2)\cos(\mathbf{x}_2) + \exp((1 - \cos(\mathbf{x}_2))^2)\sin(\mathbf{x}_1).}
#' The function is restricted to two dimensions with \eqn{\mathbf{x}_i \in [-2\pi, 2\pi], i = 1, 2.}
#'
#' @references S. K. Mishra, Global Optimization By Differential Evolution and
#' Particle Swarm Methods: Evaluation On Some Benchmark Functions, Munich Research
#' Papers in Economics.
#'
#' @template ret_smoof_single
#' @export
makeBirdFunction = function() {
makeSingleObjectiveFunction(
name = "Bird Function",
fn = function(x) {
a = (x[1] - x[2])^2
b = exp((1 - sin(x[1]))^2) * cos(x[2])
c = exp((1 - cos(x[2]))^2) * sin(x[1])
return(a + b + c)
},
par.set = makeNumericParamSet(
len = 2L,
id = "x",
lower = c(-2 * pi, -2 * pi),
upper = c(2 * pi, 2 * pi),
vector = TRUE
),
tags = attr(makeBirdFunction, "tags"),
global.opt.params = matrix(
c(4.70104, 3.15294,
-1.58214, -3.13024),
ncol = 2, byrow = TRUE),
global.opt.value = -106.764537
)
}
class(makeBirdFunction) = c("function", "smoof_generator")
attr(makeBirdFunction, "name") = c("Bird Function")
attr(makeBirdFunction, "type") = c("single-objective")
attr(makeBirdFunction, "tags") = c("continuous", "differentiable", "non-separable", "non-scalable", "multimodal")
|
# Climate indices - this gives MEI impact on overlap
library(knitr)
library(readr)
library(tidyverse)
library(R2jags)
library(abind)
library(boot)
library(MCMCvis)
library(reshape2)
library(truncnorm)
library(lattice)
library(knitr)
library(kableExtra)
mei.data <- read.csv("mei_overlap.csv", header=F, stringsAsFactors = F)
colnames(mei.data)<- c("year","island","mei","overlap")
mei.data$island[mei.data$island=="midway"] <- 1
mei.data$island[mei.data$island=="tern"] <- 2
mei.data <- mei.data[c(2,1,3,4,5,7,6,8,9,10),]
data_by_group <- mei.data %>% group_split(island)
year=c(1,2,3,4,5,1,2,3,4,5)
jags.directory="/usr/local/bin"
JAGS<-"climateANOVA.jags"
cat("
model {
# likelihood
for (i in 1:10){
y[i] ~ dbeta(a[i], b[i])
a[i] <- mu[i]*phi
b[i] <- (1-mu[i])*phi
logit(mu[i]) <- alpha + beta_island[island[i]]*mei[i]+beta_year[year[i]]*mei[i]
}
for (j in 1:2){
beta_island[j] ~ dnorm(slope_hyper_island,sd_hyper_island)
}
for (j in 1:10){
beta_year[j] ~ dnorm(slope_hyper_year,sd_hyper_year)
}
# contrasts
for (j in 1:5){
delta_year[j] <- beta_year[j]-beta_year[1]
}
for (j in 6:10){
delta_year[j] <- beta_year[j]-beta_year[6]
}
for (j in 1:2){
delta_ternminusmid[j] <- beta_island[j]-beta_island[1]
}
# priors
alpha ~ dnorm(0,0.001)
slope_hyper_island ~ dnorm(0,0.25)
slope_hyper_year ~ dnorm(0,0.25)
sd_hyper_island ~ dnorm(0,0.25)T(0,)
sd_hyper_year ~ dnorm(0,0.25)T(0,)
phi ~ dgamma(0.1,0.1) # variance term (proportional reciprocal of the variance, called 'phi' here)
}",fill = TRUE, file=JAGS)
Dat <- list(
y=mei.data$overlap,
mei=mei.data$mei,
island = mei.data$island,
year=year
)
InitStage <- list(list(slope_hyper_year=-2,slope_hyper_island=-2,phi=0.0001, sd_hyper_island= 0.1,sd_hyper_year= 0.1),
list(slope_hyper_year=0,slope_hyper_island=0,phi=1, sd_hyper_island= 1,sd_hyper_year= 1),
list(slope_hyper_year=2,slope_hyper_island=2,phi=10, sd_hyper_island= 5,sd_hyper_year= 5))
# ParsStage <- c("mu","sigma", "beta_island", "slope_hyper", "alpha", "delta_ternminusmid")
ParsStage <- c("delta_year","beta_island","beta_year", "delta_ternminusmid","mu")
ni <- 120000 # number of draws from the posterior
nt <- 1 # thinning rate
nb <- 20000 # number to discard for burn-in
nc <- 3 # number of chains
results = jags(inits=InitStage,
n.chains=nc,
model.file=JAGS,
working.directory=getwd(),
data=Dat,
parameters.to.save=ParsStage,
n.thin=nt,
n.iter=ni,
n.burnin=nb,
DIC=T)
results
pretty<- MCMCsummary(results, params="mu")
MCMCtrace(results, pdf=T)
kable(pretty,
digits=3, booktabs=TRUE,
caption='Summary of Posteriors for model parameters') %>%
kable_styling(latex_options = "hold_position")
MCMCplot(results, params="means")
MCMCtrace(results, pdf=T)
MCMCtrace(results, pdf=F, type="density", params = "delta_ternminusmid[2]")
ternminusmid_slopepparam<- results$BUGSoutput$sims.matrix[,11]
save(ternminusmid_slopepparam, file="ternminusmid_slopeparam")
hist(ternminusmid_slopepparam, breaks=200)
|
/lme_mei_analysis_beta.R
|
no_license
|
dallasjordan/bayesian_project
|
R
| false | false | 3,234 |
r
|
# Climate indices - this gives MEI impact on overlap
library(knitr)
library(readr)
library(tidyverse)
library(R2jags)
library(abind)
library(boot)
library(MCMCvis)
library(reshape2)
library(truncnorm)
library(lattice)
library(knitr)
library(kableExtra)
mei.data <- read.csv("mei_overlap.csv", header=F, stringsAsFactors = F)
colnames(mei.data)<- c("year","island","mei","overlap")
mei.data$island[mei.data$island=="midway"] <- 1
mei.data$island[mei.data$island=="tern"] <- 2
mei.data <- mei.data[c(2,1,3,4,5,7,6,8,9,10),]
data_by_group <- mei.data %>% group_split(island)
year=c(1,2,3,4,5,1,2,3,4,5)
jags.directory="/usr/local/bin"
JAGS<-"climateANOVA.jags"
cat("
model {
# likelihood
for (i in 1:10){
y[i] ~ dbeta(a[i], b[i])
a[i] <- mu[i]*phi
b[i] <- (1-mu[i])*phi
logit(mu[i]) <- alpha + beta_island[island[i]]*mei[i]+beta_year[year[i]]*mei[i]
}
for (j in 1:2){
beta_island[j] ~ dnorm(slope_hyper_island,sd_hyper_island)
}
for (j in 1:10){
beta_year[j] ~ dnorm(slope_hyper_year,sd_hyper_year)
}
# contrasts
for (j in 1:5){
delta_year[j] <- beta_year[j]-beta_year[1]
}
for (j in 6:10){
delta_year[j] <- beta_year[j]-beta_year[6]
}
for (j in 1:2){
delta_ternminusmid[j] <- beta_island[j]-beta_island[1]
}
# priors
alpha ~ dnorm(0,0.001)
slope_hyper_island ~ dnorm(0,0.25)
slope_hyper_year ~ dnorm(0,0.25)
sd_hyper_island ~ dnorm(0,0.25)T(0,)
sd_hyper_year ~ dnorm(0,0.25)T(0,)
phi ~ dgamma(0.1,0.1) # variance term (proportional reciprocal of the variance, called 'phi' here)
}",fill = TRUE, file=JAGS)
Dat <- list(
y=mei.data$overlap,
mei=mei.data$mei,
island = mei.data$island,
year=year
)
InitStage <- list(list(slope_hyper_year=-2,slope_hyper_island=-2,phi=0.0001, sd_hyper_island= 0.1,sd_hyper_year= 0.1),
list(slope_hyper_year=0,slope_hyper_island=0,phi=1, sd_hyper_island= 1,sd_hyper_year= 1),
list(slope_hyper_year=2,slope_hyper_island=2,phi=10, sd_hyper_island= 5,sd_hyper_year= 5))
# ParsStage <- c("mu","sigma", "beta_island", "slope_hyper", "alpha", "delta_ternminusmid")
ParsStage <- c("delta_year","beta_island","beta_year", "delta_ternminusmid","mu")
ni <- 120000 # number of draws from the posterior
nt <- 1 # thinning rate
nb <- 20000 # number to discard for burn-in
nc <- 3 # number of chains
results = jags(inits=InitStage,
n.chains=nc,
model.file=JAGS,
working.directory=getwd(),
data=Dat,
parameters.to.save=ParsStage,
n.thin=nt,
n.iter=ni,
n.burnin=nb,
DIC=T)
results
pretty<- MCMCsummary(results, params="mu")
MCMCtrace(results, pdf=T)
kable(pretty,
digits=3, booktabs=TRUE,
caption='Summary of Posteriors for model parameters') %>%
kable_styling(latex_options = "hold_position")
MCMCplot(results, params="means")
MCMCtrace(results, pdf=T)
MCMCtrace(results, pdf=F, type="density", params = "delta_ternminusmid[2]")
ternminusmid_slopepparam<- results$BUGSoutput$sims.matrix[,11]
save(ternminusmid_slopepparam, file="ternminusmid_slopeparam")
hist(ternminusmid_slopepparam, breaks=200)
|
combine_animation_frames_video <- function(out_file, animation_cfg) {
#build video from pngs with ffmpeg
#note that this will use all frames in 6_visualize/tmp
#have to rename files since can't use globbing with ffmpeg on Windows :(
png_frames <- list.files('6_visualize/tmp', full.names = TRUE)
file_name_df <- tibble(origName = png_frames,
countFormatted = zeroPad(1:length(png_frames), padTo = 3),
newName = file.path("6_visualize/tmp", paste0("frame_", countFormatted, ".png")))
file.rename(from = file_name_df$origName, to = file_name_df$newName)
# added ffmpeg better code for reducing video size
# see https://unix.stackexchange.com/questions/28803/how-can-i-reduce-a-videos-size-with-ffmpeg
# and https://slhck.info/video/2017/02/24/crf-guide.html
shell_command <- sprintf(
"ffmpeg -y -framerate %s -i 6_visualize/tmp/frame_%%03d.png -r %s -pix_fmt yuv420p -vcodec libx264 -crf 27 %s",
animation_cfg$frame_rate, animation_cfg$output_frame_rate, "6_visualize/tmp/pre_downsized.mp4")
system(shell_command)
system(sprintf("ffmpeg -i %s -vf scale=iw/2:-1 %s", "6_visualize/tmp/pre_downsized.mp4", out_file))
file.rename(from = file_name_df$newName, to = file_name_df$origName)
}
combine_animation_frames_gif <- function(out_file, animation_cfg) {
#build gif from pngs with magick and simplify with gifsicle
#note that this will use all frames in 6_visualize/tmp
png_files <- paste(list.files('6_visualize/tmp', full.names = TRUE), collapse=' ')
tmp_dir <- '6_visualize/tmp/magick'
if(!dir.exists(tmp_dir)) dir.create(tmp_dir)
# create gif using magick
magick_command <- sprintf(
'convert -define registry:temporary-path=%s -limit memory 24GiB -delay %d -loop 0 %s %s',
tmp_dir, animation_cfg$frame_delay_cs, png_files, out_file)
if(Sys.info()[['sysname']] == "Windows") {
magick_command <- sprintf('magick %s', magick_command)
}
system(magick_command)
# simplify the gif with gifsicle - cuts size by about 2/3
gifsicle_command <- sprintf('gifsicle -b -O3 -d %s --colors 256 %s',
animation_cfg$frame_delay_cs, out_file)
system(gifsicle_command)
}
|
/6_visualize/src/combine_animation_frames.R
|
no_license
|
lindsayplatt/gage-conditions-gif
|
R
| false | false | 2,212 |
r
|
combine_animation_frames_video <- function(out_file, animation_cfg) {
#build video from pngs with ffmpeg
#note that this will use all frames in 6_visualize/tmp
#have to rename files since can't use globbing with ffmpeg on Windows :(
png_frames <- list.files('6_visualize/tmp', full.names = TRUE)
file_name_df <- tibble(origName = png_frames,
countFormatted = zeroPad(1:length(png_frames), padTo = 3),
newName = file.path("6_visualize/tmp", paste0("frame_", countFormatted, ".png")))
file.rename(from = file_name_df$origName, to = file_name_df$newName)
# added ffmpeg better code for reducing video size
# see https://unix.stackexchange.com/questions/28803/how-can-i-reduce-a-videos-size-with-ffmpeg
# and https://slhck.info/video/2017/02/24/crf-guide.html
shell_command <- sprintf(
"ffmpeg -y -framerate %s -i 6_visualize/tmp/frame_%%03d.png -r %s -pix_fmt yuv420p -vcodec libx264 -crf 27 %s",
animation_cfg$frame_rate, animation_cfg$output_frame_rate, "6_visualize/tmp/pre_downsized.mp4")
system(shell_command)
system(sprintf("ffmpeg -i %s -vf scale=iw/2:-1 %s", "6_visualize/tmp/pre_downsized.mp4", out_file))
file.rename(from = file_name_df$newName, to = file_name_df$origName)
}
combine_animation_frames_gif <- function(out_file, animation_cfg) {
#build gif from pngs with magick and simplify with gifsicle
#note that this will use all frames in 6_visualize/tmp
png_files <- paste(list.files('6_visualize/tmp', full.names = TRUE), collapse=' ')
tmp_dir <- '6_visualize/tmp/magick'
if(!dir.exists(tmp_dir)) dir.create(tmp_dir)
# create gif using magick
magick_command <- sprintf(
'convert -define registry:temporary-path=%s -limit memory 24GiB -delay %d -loop 0 %s %s',
tmp_dir, animation_cfg$frame_delay_cs, png_files, out_file)
if(Sys.info()[['sysname']] == "Windows") {
magick_command <- sprintf('magick %s', magick_command)
}
system(magick_command)
# simplify the gif with gifsicle - cuts size by about 2/3
gifsicle_command <- sprintf('gifsicle -b -O3 -d %s --colors 256 %s',
animation_cfg$frame_delay_cs, out_file)
system(gifsicle_command)
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 827
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 827
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-1-00.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 368
c no.of clauses 827
c no.of taut cls 1
c
c Output Parameters:
c remaining no.of clauses 827
c
c QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-1-00.qdimacs 368 827 E1 [] 1 50 318 827 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Letombe/Abduction/aim-50-3_4-yes1-1-00/aim-50-3_4-yes1-1-00.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false | false | 628 |
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 827
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 827
c
c Input Parameter (command line, file):
c input filename QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-1-00.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 368
c no.of clauses 827
c no.of taut cls 1
c
c Output Parameters:
c remaining no.of clauses 827
c
c QBFLIB/Letombe/Abduction/aim-50-3_4-yes1-1-00.qdimacs 368 827 E1 [] 1 50 318 827 NONE
|
## SANDRA COBO OLLERO --- 9 MARCH 2021
##that's an example with the aim of looking if I can connect github with Rstudio
plot(iris$Sepal.Length)
summary(iris$Sepal.Length)
##https://www.r-bloggers.com/2015/07/rstudio-and-github/
|
/test2.R
|
no_license
|
sandritachinita/test2
|
R
| false | false | 234 |
r
|
## SANDRA COBO OLLERO --- 9 MARCH 2021
##that's an example with the aim of looking if I can connect github with Rstudio
plot(iris$Sepal.Length)
summary(iris$Sepal.Length)
##https://www.r-bloggers.com/2015/07/rstudio-and-github/
|
testlist <- list(x = structure(c(2.31584307393098e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
/multivariance/inst/testfiles/doubleCenterBiasCorrected/AFL_doubleCenterBiasCorrected/doubleCenterBiasCorrected_valgrind_files/1613139770-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false | false | 321 |
r
|
testlist <- list(x = structure(c(2.31584307393098e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::doubleCenterBiasCorrected,testlist)
str(result)
|
#' lookup_users
#'
#' @description Returns Twitter user data_frame object for
#' specified user_ids or screen_names.
#'
#' @param users User id or screen name of target user.
#' @param token OAuth token (1.0 or 2.0). By default
#' \code{token = NULL} fetches a non-exhausted token from
#' an environment variable @describeIn tokens.
#' @param parse Logical, indicating whether or not to parse
#' return object into data frame(s).
#' @param clean_tweets logical indicating whether to remove non-ASCII
#' characters in text of tweets. defaults to TRUE.
#'
#' @seealso \url{https://dev.twitter.com/overview/documentation}
#' @examples
#' \dontrun{
#' # lookup vector of 1 or more user_id or screen_name
#' users <- c("potus", "hillaryclinton", "realdonaldtrump",
#' "fivethirtyeight", "cnn", "espn", "twitter")
#'
#' usr_df <- lookup_users(users)
#' usr_df
#'
#' # view tweet data for these users via tweets_data()
#' tweets_data(usr_df)
#' }
#'
#' @return json response object (max is 18000 per token)
#' @family users
#' @export
lookup_users <- function(users, token = NULL, parse = TRUE,
clean_tweets = TRUE) {
if (is.list(users)) {
users <- unlist(users, use.names = FALSE)
}
if (length(users) < 101) {
usr <- .user_lookup(users, token)
} else if (length(users) > 18000) {
users <- users[1:18000]
n.times <- ceiling(length(users) / 100)
from <- 1
usr <- vector("list", n.times)
for (i in seq_len(n.times)) {
to <- from + 99
if (to > length(users)) {
to <- length(users)
}
usr[[i]] <- .user_lookup(
users[from:to],
token, parse = parse)
from <- to + 1
if (from > length(users)) break
}
}
if (parse) {
usr <- parser(usr, clean_tweets = clean_tweets)
usr <- attr_tweetusers(usr[c("users", "tweets")])
}
usr
}
.user_lookup <- function(users, token = NULL) {
query <- "users/lookup"
if (is.list(users)) {
users <- unlist(users)
}
stopifnot(is.atomic(users))
if (length(users) > 100) {
users <- users[1:100]
}
params <- list(id_type = paste(users, collapse = ","))
names(params)[1] <- .ids_type(users)
url <- make_url(
query = query,
param = params)
token <- check_token(token, query = "users/lookup")
resp <- TWIT(get = TRUE, url, token)
from_js(resp)
}
|
/R/users.R
|
no_license
|
hucara/rtweet
|
R
| false | false | 2,336 |
r
|
#' lookup_users
#'
#' @description Returns Twitter user data_frame object for
#' specified user_ids or screen_names.
#'
#' @param users User id or screen name of target user.
#' @param token OAuth token (1.0 or 2.0). By default
#' \code{token = NULL} fetches a non-exhausted token from
#' an environment variable @describeIn tokens.
#' @param parse Logical, indicating whether or not to parse
#' return object into data frame(s).
#' @param clean_tweets logical indicating whether to remove non-ASCII
#' characters in text of tweets. defaults to TRUE.
#'
#' @seealso \url{https://dev.twitter.com/overview/documentation}
#' @examples
#' \dontrun{
#' # lookup vector of 1 or more user_id or screen_name
#' users <- c("potus", "hillaryclinton", "realdonaldtrump",
#' "fivethirtyeight", "cnn", "espn", "twitter")
#'
#' usr_df <- lookup_users(users)
#' usr_df
#'
#' # view tweet data for these users via tweets_data()
#' tweets_data(usr_df)
#' }
#'
#' @return json response object (max is 18000 per token)
#' @family users
#' @export
lookup_users <- function(users, token = NULL, parse = TRUE,
clean_tweets = TRUE) {
if (is.list(users)) {
users <- unlist(users, use.names = FALSE)
}
if (length(users) < 101) {
usr <- .user_lookup(users, token)
} else if (length(users) > 18000) {
users <- users[1:18000]
n.times <- ceiling(length(users) / 100)
from <- 1
usr <- vector("list", n.times)
for (i in seq_len(n.times)) {
to <- from + 99
if (to > length(users)) {
to <- length(users)
}
usr[[i]] <- .user_lookup(
users[from:to],
token, parse = parse)
from <- to + 1
if (from > length(users)) break
}
}
if (parse) {
usr <- parser(usr, clean_tweets = clean_tweets)
usr <- attr_tweetusers(usr[c("users", "tweets")])
}
usr
}
.user_lookup <- function(users, token = NULL) {
query <- "users/lookup"
if (is.list(users)) {
users <- unlist(users)
}
stopifnot(is.atomic(users))
if (length(users) > 100) {
users <- users[1:100]
}
params <- list(id_type = paste(users, collapse = ","))
names(params)[1] <- .ids_type(users)
url <- make_url(
query = query,
param = params)
token <- check_token(token, query = "users/lookup")
resp <- TWIT(get = TRUE, url, token)
from_js(resp)
}
|
setwd("~/GettingandCleaningData")
if (!file.exists("data")){dir.create("data")}
##read test data
data<-scan("./data/UCI HAR Dataset/test/X_test.txt",strip.white=TRUE)
n=length(data)/561
data<-matrix(data,nrow=n,ncol=561,byrow=T)
subjid<-read.table("./data/UCI HAR Dataset/test/subject_test.txt")
activity<-read.table("./data/UCI HAR Dataset/test/y_test.txt")
data<-cbind(subjid,activity,data)
testdata<-data.frame(data)
#read train data
data<-scan("./UCI HAR Dataset/train/X_train.txt",strip.white=TRUE)
n=length(data)/561
data<-matrix(data,nrow=n,ncol=561,byrow=T)
subjid<-read.table("./data/UCI HAR Dataset/train/subject_train.txt")
activity<-read.table("./data/UCI HAR Dataset/train/y_train.txt")
data<-cbind(subjid,activity,data)
traindata<-data.frame(data)
#Step 1: Merges the training and the test sets to create one data set
data<-rbind(traindata,testdata)
#get feature names from featurex.txt
colnames<-read.table("./data/UCI HAR Dataset/features.txt",sep="")
colnames<-colnames[,2]
colnames<-as.character(colnames)
colnames<-c("subjid","activity",colnames)
names(data)<-colnames
#step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
data<-data[,c(1,2,grep("mean",colnames),grep("std",colnames))]
#Step 3: Uses descriptive activity names to name the activities in the data set
act_label<-read.table("./data/UCI HAR Dataset/activity_labels.txt")
#Step 4: Appropriately labels the data set with descriptive variable names.
names(act_label)<-c("label","activity")
data$activity<-factor(data$activity,labels=act_label$activity,levels=act_label$label)
#creat data2 as independent tidy data set with the average of each variable for each
#activity and each subject.
tmp<-interaction(data$subjid,data$activity)
#length(tmp)
s <- split(data,tmp)
data2<-sapply(s,function(x) colMeans(x[,3:81]))
data2.T <- t(data2[,1:ncol(data2)])
nametmp<-rownames(data2.T)
subjid<-gsub("[^0-9*]","",nametmp)
subjid<-as.numeric(subjid)
activity<-gsub("[0-9*.]","",nametmp)
activity
subjid
data2.T<-data.frame(subjid,activity,data2.T)
write.table(data2.T, file = "tidydataset.txt",row.name=FALSE)
|
/run_analysis.R
|
no_license
|
RongSu/GettingCleaningDataProject
|
R
| false | false | 2,133 |
r
|
setwd("~/GettingandCleaningData")
if (!file.exists("data")){dir.create("data")}
##read test data
data<-scan("./data/UCI HAR Dataset/test/X_test.txt",strip.white=TRUE)
n=length(data)/561
data<-matrix(data,nrow=n,ncol=561,byrow=T)
subjid<-read.table("./data/UCI HAR Dataset/test/subject_test.txt")
activity<-read.table("./data/UCI HAR Dataset/test/y_test.txt")
data<-cbind(subjid,activity,data)
testdata<-data.frame(data)
#read train data
data<-scan("./UCI HAR Dataset/train/X_train.txt",strip.white=TRUE)
n=length(data)/561
data<-matrix(data,nrow=n,ncol=561,byrow=T)
subjid<-read.table("./data/UCI HAR Dataset/train/subject_train.txt")
activity<-read.table("./data/UCI HAR Dataset/train/y_train.txt")
data<-cbind(subjid,activity,data)
traindata<-data.frame(data)
#Step 1: Merges the training and the test sets to create one data set
data<-rbind(traindata,testdata)
#get feature names from featurex.txt
colnames<-read.table("./data/UCI HAR Dataset/features.txt",sep="")
colnames<-colnames[,2]
colnames<-as.character(colnames)
colnames<-c("subjid","activity",colnames)
names(data)<-colnames
#step 2: Extracts only the measurements on the mean and standard deviation for each measurement.
data<-data[,c(1,2,grep("mean",colnames),grep("std",colnames))]
#Step 3: Uses descriptive activity names to name the activities in the data set
act_label<-read.table("./data/UCI HAR Dataset/activity_labels.txt")
#Step 4: Appropriately labels the data set with descriptive variable names.
names(act_label)<-c("label","activity")
data$activity<-factor(data$activity,labels=act_label$activity,levels=act_label$label)
#creat data2 as independent tidy data set with the average of each variable for each
#activity and each subject.
tmp<-interaction(data$subjid,data$activity)
#length(tmp)
s <- split(data,tmp)
data2<-sapply(s,function(x) colMeans(x[,3:81]))
data2.T <- t(data2[,1:ncol(data2)])
nametmp<-rownames(data2.T)
subjid<-gsub("[^0-9*]","",nametmp)
subjid<-as.numeric(subjid)
activity<-gsub("[0-9*.]","",nametmp)
activity
subjid
data2.T<-data.frame(subjid,activity,data2.T)
write.table(data2.T, file = "tidydataset.txt",row.name=FALSE)
|
library(openxlsx)
library(lubridate)
wb<-read.csv("Book1.csv", header = T)
# Sina fix rates and acquired
wbco<-read.csv("wilshire charge offs cleaned.csv", header=T)
colnames(wbco)<-c("Note.Number","first_co_date","co_amt")
wb<-merge(wb,wbco, by="Note.Number", all.x=T)
rm(wbco)
wb$Y<-0
wb$Y[which(wb$Non.Accrual.Code%in%c(2,4))]<-1
wb$Y[which(wb$co_amt==1)]<-1
# Sina fix NAICS code
temp<-as.data.frame(wb$NAICS.Code)
temp$temp<-as.numeric(as.character(substr(as.character(temp[,1]),1,2)))
wb$temp_number<-temp$temp
rm(temp)
wb$Yr_origination<-year(mdy(wb$originationdate))
wb$Mn_origination<-month(mdy(wb$originationdate))
wb$Q_origination<-1
wb$Q_origination[which(wb$Mn_origination%in%c(4,5,6))]<-2
wb$Q_origination[which(wb$Mn_origination%in%c(7,8,9))]<-3
wb$Q_origination[which(wb$Mn_origination%in%c(10,11,12))]<-4
wb$Yr_maturity<-year(mdy(wb$maturitydate))
wb$Mn_maturity<-month(mdy(wb$maturitydate))
wb$Q_maturity<-1
wb$Q_maturity[which(wb$Mn_maturity%in%c(4,5,6))]<-2
wb$Q_maturity[which(wb$Mn_maturity%in%c(7,8,9))]<-3
wb$Q_maturity[which(wb$Mn_maturity%in%c(10,11,12))]<-4
wb$Yr_file<-year(mdy(wb$filedate))
wb$Mn_file<-month(mdy(wb$filedate))
wb$Q_file<-1
wb$Q_file[which(wb$Mn_file%in%c(4,5,6))]<-2
wb$Q_file[which(wb$Mn_file%in%c(7,8,9))]<-3
wb$Q_file[which(wb$Mn_file%in%c(10,11,12))]<-4
wb$ttm_m<-12*(wb$Yr_maturity-wb$Yr_file)+(wb$Mn_maturity-wb$Mn_file)
wb$ttm_q<-4*(wb$Yr_maturity-wb$Yr_file)+(wb$Q_maturity-wb$Q_file)
wb$loan_age_q<-4*(wb$Yr_file-wb$Yr_origination)+(wb$Q_file-wb$Q_origination)
wb$term_q<-4*(wb$Yr_maturity-wb$Yr_origination)+(wb$Q_maturity-wb$Q_origination)
wb$pob<-100*wb$loan_age_q/wb$term_q
# Sina fix Rate.Over.Split name
unique_accountNo<-unique(wb$Note.Number)
temp<-0
for(i in 1:length(unique_accountNo)){
temp[i]<-as.Date(min(mdy(wb$filedate[which(wb$Note.Number==unique_accountNo[i]&wb$Y==1)])), origin="1970-01-01")
}
temp<-cbind(unique_accountNo,as.Date(temp,origin = "1970-01-01"))
colnames(temp)<-c("Note.Number","min_nonAccDate")
wb<-merge(wb,temp,by="Note.Number")
rm(i)
rm(temp)
rm(unique_accountNo)
wb$f_nonAccDate<-0
wb$f_nonAccDate[which(is.na(wb$first_co_date)==T)]<-wb$min_nonAccDate[which(is.na(wb$first_co_date)==T)]
wb$f_nonAccDate[which(is.na(wb$first_co_date)==F)]<-min(as.numeric(wb$first_co_date[which(is.na(wb$first_co_date)==F)]),as.numeric(wb$min_nonAccDate[which(is.na(wb$first_co_date)==F)]))
wb$deleter<-1
wb$deleter[which(is.na(wb$first_co_date)==T)]<-0
wb$deleter[which(mdy(wb$filedate)<=ymd(wb$first_co_date))]<-0
wb<-wb[which(wb$deleter==0),]
wb$deleter<-1
wb$deleter[which(is.na(wb$min_nonAccDate)==T)]<-0
wb$deleter[which(mdy(wb$filedate)<=as.Date(wb$min_nonAccDate,origin="1970-01-01"))]<-0
wb<-wb[which(wb$deleter==0),]
wb<-wb[which(wb$Yr_maturity>2006),]
wb<-wb[which(wb$ttm_q>0),]
wb$boh_id<-"Wilshere"
wb<-wb[which(wb$Class.Code%in%c(2,3,5,6,10,13,20,21,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,59,60,61,63,99)),]
wb<-wb[which(is.na(wb$NAP...NAIP...NAIP.in.GL)==F&wb$NAP...NAIP...NAIP.in.GL!=0),]
wb<-wb[which(is.na(wb$Rate.Over.Split)==F&wb$Rate.Over.Split!=0),]
correction<-read.csv("property_code_correctio.csv",header=T)
# Sina fix column names
wb<-merge(wb,correction,by="Note.Number", all.x=T)
wb$propertyCodeNew<-wb$Property.Type.Code
wb$propertyCodeNew[which(is.na(wb$New_Code)==F)]<-wb$New_Code[which(is.na(wb$New_Code)==F)]
# Sina fix line 605
wb$boh_rating<-0
wb$boh_rating[which(wb$Loan.Rating.Code1==0)]<-0
wb$boh_rating[which(wb$Loan.Rating.Code1==1000)]<-1
wb$boh_rating[which(wb$Loan.Rating.Code1==2000)]<-2
wb$boh_rating[which(wb$Loan.Rating.Code1==3000)]<-3
wb$boh_rating[which(wb$Loan.Rating.Code1==4000)]<-4
wb$boh_rating[which(wb$Loan.Rating.Code1==5000)]<-4
wb$boh_rating[which(wb$Loan.Rating.Code1==6000)]<-1000
wb$boh_rating[which(wb$Loan.Rating.Code1==7000)]<-2000
wb$boh_rating[which(wb$Loan.Rating.Code1==8000)]<-3000
wb$boh_rating[which(wb$Loan.Rating.Code1==9000)]<-4000
|
/PD/Wilshire.R
|
no_license
|
hyunyouchoi/R-Studio
|
R
| false | false | 4,088 |
r
|
library(openxlsx)
library(lubridate)
wb<-read.csv("Book1.csv", header = T)
# Sina fix rates and acquired
wbco<-read.csv("wilshire charge offs cleaned.csv", header=T)
colnames(wbco)<-c("Note.Number","first_co_date","co_amt")
wb<-merge(wb,wbco, by="Note.Number", all.x=T)
rm(wbco)
wb$Y<-0
wb$Y[which(wb$Non.Accrual.Code%in%c(2,4))]<-1
wb$Y[which(wb$co_amt==1)]<-1
# Sina fix NAICS code
temp<-as.data.frame(wb$NAICS.Code)
temp$temp<-as.numeric(as.character(substr(as.character(temp[,1]),1,2)))
wb$temp_number<-temp$temp
rm(temp)
wb$Yr_origination<-year(mdy(wb$originationdate))
wb$Mn_origination<-month(mdy(wb$originationdate))
wb$Q_origination<-1
wb$Q_origination[which(wb$Mn_origination%in%c(4,5,6))]<-2
wb$Q_origination[which(wb$Mn_origination%in%c(7,8,9))]<-3
wb$Q_origination[which(wb$Mn_origination%in%c(10,11,12))]<-4
wb$Yr_maturity<-year(mdy(wb$maturitydate))
wb$Mn_maturity<-month(mdy(wb$maturitydate))
wb$Q_maturity<-1
wb$Q_maturity[which(wb$Mn_maturity%in%c(4,5,6))]<-2
wb$Q_maturity[which(wb$Mn_maturity%in%c(7,8,9))]<-3
wb$Q_maturity[which(wb$Mn_maturity%in%c(10,11,12))]<-4
wb$Yr_file<-year(mdy(wb$filedate))
wb$Mn_file<-month(mdy(wb$filedate))
wb$Q_file<-1
wb$Q_file[which(wb$Mn_file%in%c(4,5,6))]<-2
wb$Q_file[which(wb$Mn_file%in%c(7,8,9))]<-3
wb$Q_file[which(wb$Mn_file%in%c(10,11,12))]<-4
wb$ttm_m<-12*(wb$Yr_maturity-wb$Yr_file)+(wb$Mn_maturity-wb$Mn_file)
wb$ttm_q<-4*(wb$Yr_maturity-wb$Yr_file)+(wb$Q_maturity-wb$Q_file)
wb$loan_age_q<-4*(wb$Yr_file-wb$Yr_origination)+(wb$Q_file-wb$Q_origination)
wb$term_q<-4*(wb$Yr_maturity-wb$Yr_origination)+(wb$Q_maturity-wb$Q_origination)
wb$pob<-100*wb$loan_age_q/wb$term_q
# Sina fix Rate.Over.Split name
unique_accountNo<-unique(wb$Note.Number)
temp<-0
for(i in 1:length(unique_accountNo)){
temp[i]<-as.Date(min(mdy(wb$filedate[which(wb$Note.Number==unique_accountNo[i]&wb$Y==1)])), origin="1970-01-01")
}
temp<-cbind(unique_accountNo,as.Date(temp,origin = "1970-01-01"))
colnames(temp)<-c("Note.Number","min_nonAccDate")
wb<-merge(wb,temp,by="Note.Number")
rm(i)
rm(temp)
rm(unique_accountNo)
wb$f_nonAccDate<-0
wb$f_nonAccDate[which(is.na(wb$first_co_date)==T)]<-wb$min_nonAccDate[which(is.na(wb$first_co_date)==T)]
wb$f_nonAccDate[which(is.na(wb$first_co_date)==F)]<-min(as.numeric(wb$first_co_date[which(is.na(wb$first_co_date)==F)]),as.numeric(wb$min_nonAccDate[which(is.na(wb$first_co_date)==F)]))
wb$deleter<-1
wb$deleter[which(is.na(wb$first_co_date)==T)]<-0
wb$deleter[which(mdy(wb$filedate)<=ymd(wb$first_co_date))]<-0
wb<-wb[which(wb$deleter==0),]
wb$deleter<-1
wb$deleter[which(is.na(wb$min_nonAccDate)==T)]<-0
wb$deleter[which(mdy(wb$filedate)<=as.Date(wb$min_nonAccDate,origin="1970-01-01"))]<-0
wb<-wb[which(wb$deleter==0),]
wb<-wb[which(wb$Yr_maturity>2006),]
wb<-wb[which(wb$ttm_q>0),]
wb$boh_id<-"Wilshere"
wb<-wb[which(wb$Class.Code%in%c(2,3,5,6,10,13,20,21,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,59,60,61,63,99)),]
wb<-wb[which(is.na(wb$NAP...NAIP...NAIP.in.GL)==F&wb$NAP...NAIP...NAIP.in.GL!=0),]
wb<-wb[which(is.na(wb$Rate.Over.Split)==F&wb$Rate.Over.Split!=0),]
correction<-read.csv("property_code_correctio.csv",header=T)
# Sina fix column names
wb<-merge(wb,correction,by="Note.Number", all.x=T)
wb$propertyCodeNew<-wb$Property.Type.Code
wb$propertyCodeNew[which(is.na(wb$New_Code)==F)]<-wb$New_Code[which(is.na(wb$New_Code)==F)]
# Sina fix line 605
wb$boh_rating<-0
wb$boh_rating[which(wb$Loan.Rating.Code1==0)]<-0
wb$boh_rating[which(wb$Loan.Rating.Code1==1000)]<-1
wb$boh_rating[which(wb$Loan.Rating.Code1==2000)]<-2
wb$boh_rating[which(wb$Loan.Rating.Code1==3000)]<-3
wb$boh_rating[which(wb$Loan.Rating.Code1==4000)]<-4
wb$boh_rating[which(wb$Loan.Rating.Code1==5000)]<-4
wb$boh_rating[which(wb$Loan.Rating.Code1==6000)]<-1000
wb$boh_rating[which(wb$Loan.Rating.Code1==7000)]<-2000
wb$boh_rating[which(wb$Loan.Rating.Code1==8000)]<-3000
wb$boh_rating[which(wb$Loan.Rating.Code1==9000)]<-4000
|
#'
#' Read Compact Optical Profiling System (COPS) files in both CSV and TSV formats
#'
#'@param file is the file name without the full path
#'@param number.of.fields.before.date is the number of fields begore the date in the file name.
#' For example the file COPS_IML4_150626_1546_C_data_001.csv contains 2 fields before the date.
#'@param instruments.optics is the radiometer ids contained in the file.
#' The default is instruments.optics=c("Ed0", "EdZ", "LuZ")
#'
#'@return
#' A long list is returned. It typically includes matrices for Ed0, Edz and LuZ or EuZ.
#' For each sensor there is a vector of wavelengths (Ed0.waves, EdZ.waves, etc.).
#' Various data frame are also found:
#' \itemize{
#' \item{Ed0.anc contains two columns for Pitch and Roll;}
#' \item{EdZ.anc contains two columns for Pitch and Roll;}
#' \item{LuZ.anc or EuZ.anc contains two columns for Temperature and Depth;}
#' \item{Others contains other fields for time and BioShade (if available)}
#' }
#' To access the Depth of the profiler, for instance, one need to ListName$LuZ.anc$Depth
#'
#'
#' @author Simon Belanger
read.COPS <- function(file,number.of.fields.before.date,instruments.optics=c("Ed0", "EdZ", "LuZ")) {
dirdat=getwd()
instruments.others = "NA"
file.parts <- unlist(strsplit(file, "_"))
file.c <- paste(dirdat, file, sep = "/")
dte <- file.parts[number.of.fields.before.date + 1]
tim <- file.parts[number.of.fields.before.date + 2]
# extract information from date found in file name
y <- formatC(substr(dte, 1, 2), format = "d", width = 2, flag = "0")
m <- formatC(substr(dte, 3, 4), format = "d", width = 2, flag = "0")
d <- formatC(substr(dte, 5, 6), format = "d", width = 2, flag = "0")
H <- formatC(substr(tim, 1, 2), format = "d", width = 2, flag = "0")
M <- formatC(substr(tim, 3, 4), format = "d", width = 2, flag = "0")
dte <- as.POSIXct(strptime(paste(y, m, d, H, M, sep = ""), format = "%y%m%d%H%M"))
tim <- as.numeric(H) + as.numeric(M) / 60
# Modified by Simon Belanger on Aug 2016 to process COPS data obtained with uprofile 1.9.10 and after
# These files end by URC.csv or URC.tsv"
if (str_detect(file, "URC.")) {
# print("COPS data acquisition using uProfile 1.9.10 or later")
num <- file.parts[number.of.fields.before.date]
potential.gps.file.without.ext <- paste(c("GPS", file.parts[number.of.fields.before.date + 1]), collapse = "_")
potential.gps.file <- list.files(dirdat, pattern = potential.gps.file.without.ext)
if(length(potential.gps.file) < 0.5) potential.gps.file <- "NO_GPS_FILE"
} else {
num <- file.parts[number.of.fields.before.date + 3]
potential.gps.file.without.ext <- paste(c(file.parts[1:(number.of.fields.before.date + 2)], "gps"), collapse = "_")
potential.gps.file <- list.files(dirdat, pattern = potential.gps.file.without.ext)
if(length(potential.gps.file) < 0.5) potential.gps.file <- "NO_GPS_FILE"
}
# end
# Added by Simon
ext = unlist(strsplit(file.c, "[.]"))[2]
if (ext == "tsv" || ext =="txt") {
x = read.table(file = file.c, header = TRUE, as.is = TRUE, sep = "\t", check.names = FALSE)
} else{ x = read.table(file = file.c, header = TRUE, as.is = TRUE, sep = ",", check.names = FALSE)}
# END
ns <- names(x)
# added by simon to process 2011 data
if (str_detect(ns[1], "]")) {
ns <- sub("[", "", ns, fixed = TRUE)
ns <- sub("]", "", ns, fixed = TRUE)
}
ns=sapply(ns, strsplit, " ", perl=T) # Modified by Simon on June 22 2015
ns = sapply(ns, "[[", 1) # Modified by Simon on June 22 2015
names(x) <- ns
#########################
if(instruments.others == "NA") {
instruments <- instruments.optics
} else {
instruments <- c(instruments.optics, instruments.others)
}
ks <- NULL
for(instr in instruments) {
# print(instr)
k <- grep(paste("^", instr, sep = ""), ns, value = TRUE)
dummy <- x[k]
names(dummy) <- sub(paste("^", instr, sep = ""), "", names(dummy))
names(dummy) <- sub(paste("^", ":", sep = ""), "", names(dummy))
anc <- sapply(
strsplit(names(dummy), NULL),
function(dum) {
!all(dum %in% as.character(0:9))
}
)
waves <- as.numeric(names(dummy[!anc]))
assign(instr, as.matrix(dummy[!anc]))
assign(paste(instr, "anc", sep = "."), dummy[anc])
assign(paste(instr, "waves", sep = "."), waves)
ks <- append(ks, k)
}
k.others <- ns[! ns %in% ks]
Others <- x[k.others]
ret <- list()
for(instr in instruments) {
ret <- c(ret, list(get(instr)))
}
for(instr in instruments) {
ret <- c(ret, list(get(paste(instr, "anc", sep = "."))))
}
for(instr in instruments) {
ret <- c(ret, list(get(paste(instr, "waves", sep = "."))))
}
ret <- c(ret, list(Others))
names(ret) <- c(instruments, paste(instruments, "anc", sep = "."), paste(instruments, "waves", sep = "."), "Others")
ret <- c(ret, list(file = file, potential.gps.file = potential.gps.file))
ret
}
|
/R/read_cops.R
|
no_license
|
raphidoc/Cops
|
R
| false | false | 4,960 |
r
|
#'
#' Read Compact Optical Profiling System (COPS) files in both CSV and TSV formats
#'
#'@param file is the file name without the full path
#'@param number.of.fields.before.date is the number of fields begore the date in the file name.
#' For example the file COPS_IML4_150626_1546_C_data_001.csv contains 2 fields before the date.
#'@param instruments.optics is the radiometer ids contained in the file.
#' The default is instruments.optics=c("Ed0", "EdZ", "LuZ")
#'
#'@return
#' A long list is returned. It typically includes matrices for Ed0, Edz and LuZ or EuZ.
#' For each sensor there is a vector of wavelengths (Ed0.waves, EdZ.waves, etc.).
#' Various data frame are also found:
#' \itemize{
#' \item{Ed0.anc contains two columns for Pitch and Roll;}
#' \item{EdZ.anc contains two columns for Pitch and Roll;}
#' \item{LuZ.anc or EuZ.anc contains two columns for Temperature and Depth;}
#' \item{Others contains other fields for time and BioShade (if available)}
#' }
#' To access the Depth of the profiler, for instance, one need to ListName$LuZ.anc$Depth
#'
#'
#' @author Simon Belanger
read.COPS <- function(file,number.of.fields.before.date,instruments.optics=c("Ed0", "EdZ", "LuZ")) {
dirdat=getwd()
instruments.others = "NA"
file.parts <- unlist(strsplit(file, "_"))
file.c <- paste(dirdat, file, sep = "/")
dte <- file.parts[number.of.fields.before.date + 1]
tim <- file.parts[number.of.fields.before.date + 2]
# extract information from date found in file name
y <- formatC(substr(dte, 1, 2), format = "d", width = 2, flag = "0")
m <- formatC(substr(dte, 3, 4), format = "d", width = 2, flag = "0")
d <- formatC(substr(dte, 5, 6), format = "d", width = 2, flag = "0")
H <- formatC(substr(tim, 1, 2), format = "d", width = 2, flag = "0")
M <- formatC(substr(tim, 3, 4), format = "d", width = 2, flag = "0")
dte <- as.POSIXct(strptime(paste(y, m, d, H, M, sep = ""), format = "%y%m%d%H%M"))
tim <- as.numeric(H) + as.numeric(M) / 60
# Modified by Simon Belanger on Aug 2016 to process COPS data obtained with uprofile 1.9.10 and after
# These files end by URC.csv or URC.tsv"
if (str_detect(file, "URC.")) {
# print("COPS data acquisition using uProfile 1.9.10 or later")
num <- file.parts[number.of.fields.before.date]
potential.gps.file.without.ext <- paste(c("GPS", file.parts[number.of.fields.before.date + 1]), collapse = "_")
potential.gps.file <- list.files(dirdat, pattern = potential.gps.file.without.ext)
if(length(potential.gps.file) < 0.5) potential.gps.file <- "NO_GPS_FILE"
} else {
num <- file.parts[number.of.fields.before.date + 3]
potential.gps.file.without.ext <- paste(c(file.parts[1:(number.of.fields.before.date + 2)], "gps"), collapse = "_")
potential.gps.file <- list.files(dirdat, pattern = potential.gps.file.without.ext)
if(length(potential.gps.file) < 0.5) potential.gps.file <- "NO_GPS_FILE"
}
# end
# Added by Simon
ext = unlist(strsplit(file.c, "[.]"))[2]
if (ext == "tsv" || ext =="txt") {
x = read.table(file = file.c, header = TRUE, as.is = TRUE, sep = "\t", check.names = FALSE)
} else{ x = read.table(file = file.c, header = TRUE, as.is = TRUE, sep = ",", check.names = FALSE)}
# END
ns <- names(x)
# added by simon to process 2011 data
if (str_detect(ns[1], "]")) {
ns <- sub("[", "", ns, fixed = TRUE)
ns <- sub("]", "", ns, fixed = TRUE)
}
ns=sapply(ns, strsplit, " ", perl=T) # Modified by Simon on June 22 2015
ns = sapply(ns, "[[", 1) # Modified by Simon on June 22 2015
names(x) <- ns
#########################
if(instruments.others == "NA") {
instruments <- instruments.optics
} else {
instruments <- c(instruments.optics, instruments.others)
}
ks <- NULL
for(instr in instruments) {
# print(instr)
k <- grep(paste("^", instr, sep = ""), ns, value = TRUE)
dummy <- x[k]
names(dummy) <- sub(paste("^", instr, sep = ""), "", names(dummy))
names(dummy) <- sub(paste("^", ":", sep = ""), "", names(dummy))
anc <- sapply(
strsplit(names(dummy), NULL),
function(dum) {
!all(dum %in% as.character(0:9))
}
)
waves <- as.numeric(names(dummy[!anc]))
assign(instr, as.matrix(dummy[!anc]))
assign(paste(instr, "anc", sep = "."), dummy[anc])
assign(paste(instr, "waves", sep = "."), waves)
ks <- append(ks, k)
}
k.others <- ns[! ns %in% ks]
Others <- x[k.others]
ret <- list()
for(instr in instruments) {
ret <- c(ret, list(get(instr)))
}
for(instr in instruments) {
ret <- c(ret, list(get(paste(instr, "anc", sep = "."))))
}
for(instr in instruments) {
ret <- c(ret, list(get(paste(instr, "waves", sep = "."))))
}
ret <- c(ret, list(Others))
names(ret) <- c(instruments, paste(instruments, "anc", sep = "."), paste(instruments, "waves", sep = "."), "Others")
ret <- c(ret, list(file = file, potential.gps.file = potential.gps.file))
ret
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/index.annual.cycle.R
\name{index.annual.cycle}
\alias{index.annual.cycle}
\title{Annual cycle statistics}
\usage{
index.annual.cycle(ts, dates, type = c("min", "max", "amp", "relamp"))
}
\arguments{
\item{ts}{A vector containing the data}
\item{dates}{A character (or \code{POSIXct}) vector following the format \dQuote{YYYY-MM-DD}
(i.e., \code{format = "\%Y-\%m-\%d"} as in \code{\link{strptime}}). This is directly passed by the
VALUE objects (either stations or predictions) through the element \code{object$Dates$start}.}
\item{type}{Character string indicating the statistic. Currently implemented options are \code{"min"},
\code{"max"}, \code{"amp"} for amplitude, and \code{"relamp"} for relative amplitude (in \%).}
}
\value{
A float number with the corresponding statistis.
}
\description{
Computes several annual cycle statistics
}
\author{
J. Bedia
}
|
/man/index.annual.cycle.Rd
|
no_license
|
Predictia/R_VALUE
|
R
| false | true | 944 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/index.annual.cycle.R
\name{index.annual.cycle}
\alias{index.annual.cycle}
\title{Annual cycle statistics}
\usage{
index.annual.cycle(ts, dates, type = c("min", "max", "amp", "relamp"))
}
\arguments{
\item{ts}{A vector containing the data}
\item{dates}{A character (or \code{POSIXct}) vector following the format \dQuote{YYYY-MM-DD}
(i.e., \code{format = "\%Y-\%m-\%d"} as in \code{\link{strptime}}). This is directly passed by the
VALUE objects (either stations or predictions) through the element \code{object$Dates$start}.}
\item{type}{Character string indicating the statistic. Currently implemented options are \code{"min"},
\code{"max"}, \code{"amp"} for amplitude, and \code{"relamp"} for relative amplitude (in \%).}
}
\value{
A float number with the corresponding statistis.
}
\description{
Computes several annual cycle statistics
}
\author{
J. Bedia
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load RX file and merge with therapeutic class names ('tc1_names')
RX <- read.xport("C:/MEPS/h168a.ssp") %>%
left_join(tc1_names, by = "TC1") %>%
mutate(count = 1)
DRGpers <- RX %>%
filter(RXNDC != "-9" & RXDRGNAM != "-9") %>%
group_by(DUPERSID, VARSTR, VARPSU, PERWT14F, RXDRGNAM) %>%
summarise(n_RX = sum(count), RXXP14X = sum(RXXP14X)) %>%
mutate(count = 1) %>%
ungroup
DRGdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT14F,
data = DRGpers,
nest = TRUE
)
svyby(~count, by = ~RXDRGNAM, FUN = svytotal, design = DRGdsgn)
|
/_check/test_code/pmed/r_codes/totPOP_RXDRGNAM_ind_2014.R
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false | false | 909 |
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load RX file and merge with therapeutic class names ('tc1_names')
RX <- read.xport("C:/MEPS/h168a.ssp") %>%
left_join(tc1_names, by = "TC1") %>%
mutate(count = 1)
DRGpers <- RX %>%
filter(RXNDC != "-9" & RXDRGNAM != "-9") %>%
group_by(DUPERSID, VARSTR, VARPSU, PERWT14F, RXDRGNAM) %>%
summarise(n_RX = sum(count), RXXP14X = sum(RXXP14X)) %>%
mutate(count = 1) %>%
ungroup
DRGdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT14F,
data = DRGpers,
nest = TRUE
)
svyby(~count, by = ~RXDRGNAM, FUN = svytotal, design = DRGdsgn)
|
testlist <- list(A = structure(c(3.21991735724713e-193, 1.17980782037495e-16, 4.14310534109414e-235, 6.76251072361731e-281, 4.76852914066932e+305, 2.87579039425864e+227, 8.44453472300785e-14, 7.03605297312039e-119, 1.23014208619275e+199, 135758725862.383, 1.90368023855323e-163, 3.22669675848698e-206, 9.84523531314516e-285, 8.27330250955826e+125, 3.55272286488137e-212, 6490874891.54328, 6.21471881446177e+76, 3.84073102386631e+43, 4.77272786757479e+213, 9.88883635923669e-294, 1.46185410651695e-210, 2.49380986082252e+40, 2.34415779881272e-114, 3.060911825487e-217, 9.57698196001525e-179, 1.433843140479, 1.06090674398107e-193, 6.19013645171147e-55, 2.04281777793204e-236, 9.68344335507261e-284, 6.80279389052841e+241, 7.62621944834406e-10, 1.7209192687213e-259, 2.12974846571377e-151, 4.39167502548703e+166, 1.58111493414075e+37, 8.63889514769763e-108, 8.62464813508118e-49, 5.67926437158568e+153, 1.29528918278583e-306, 8.76782917980691e+82, 7.2650812584701e+96, 1.30559450891315e+113, 6.49240675428646e-41, 5.04942112164755e-41, 1.89757517484306e+281, 4.34359102800785e+136, 3.50601539753394e-107, 2.74941268315343e+167, 1.43921651142723e+284, 9.5166081650615e+36, 2.28631624962878e+185, 2.20809697527068e-206, 1.98404944024733e+207, 2.12435653693282e+161, 3.85745854479272e-267, 2.65255394908688e-288, 4.56624002337088e-84, 8.00302489104746e-265, 6.14309203518306e-123, 5.70766294799224e-249, 4.64652789365715e+246, 6.44230055914094e+81, 2.79957895240599e-162, 1.12416588721766e-201, 2.04463856486206e-228, 7.77965785189264e-125, 6.3289904307812e-153, 7.52432328136695e-41, 2.12505961521618e-109, 4.64956783063446e+234, 5.27310441501597e-53, 4.1919355844001e+229, 2.13784694513992e-69, 3.09210602547265e-130, 1.52748427369733e+163, 3.02317052006416e-276, 4.24853746667562e+161, 2.01138650315553e+270, 1.472744323039e-166, 3.37620712661777e+280, 338716234844.491, 4.13445393406048e-262, 4.19314771075492e-283, 229351945126719, 7.26310444196829e+131, 1.02719203191576e-107, 3.67671215107312e+157, 7.10580343625079e-106, 3.6550295533692e-107 ), .Dim = 10:9), B = structure(c(3.55944428654874e+63, 5.88861653990707e+227, 7.29972696122358e-182, 1.15694061063689e-174, 3.96431544540977e-305, 1.34524633811746e+271, 2.81898226729946e+220, 3.74805855410615e-74, 3.21006771573613e-86, 3.90404701043125e+157, 1.45195571047368e-264, 4.57802565716036e+271, 3.55559214023013e-152, 1.24598250767768e+143, 4.38804583058248e-36, 2.11445389749214e+167, 1.4987344029081e-30, 1.74501282963535e+142, 3.55757951185258e+297, 1.60785006249443e-212, 1.35482306633123e+120, 3.5077169630152e-119, 2.94917579114907e+20, 1.18449692896935e+148, 3.15803574328943e+43, 6.14741456275161e-157, 0.635825262725393, 6.55550311244084e+128, 1.56380165772758e-288, 7.86479162696159e-109, 4.7408634893347e+138, 1.36631748408693e-70, 9.96928013094796e-113, 2.84729135193153e+252, 2.80038660169727e-08, 3.45930293222387e+103, 5.11349047579308e-252, 1.27844360928017e-289, 1.55307493340479e-64, 4.79562061600702e+78), .Dim = c(5L, 8L)))
result <- do.call(hetGP:::fast_trace,testlist)
str(result)
|
/issuestests/hetGP/inst/testfiles/fast_trace/fast_trace_output/log_457439da60840556f569447e766c0854dd9066bc/fast_trace-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false | false | 3,100 |
r
|
testlist <- list(A = structure(c(3.21991735724713e-193, 1.17980782037495e-16, 4.14310534109414e-235, 6.76251072361731e-281, 4.76852914066932e+305, 2.87579039425864e+227, 8.44453472300785e-14, 7.03605297312039e-119, 1.23014208619275e+199, 135758725862.383, 1.90368023855323e-163, 3.22669675848698e-206, 9.84523531314516e-285, 8.27330250955826e+125, 3.55272286488137e-212, 6490874891.54328, 6.21471881446177e+76, 3.84073102386631e+43, 4.77272786757479e+213, 9.88883635923669e-294, 1.46185410651695e-210, 2.49380986082252e+40, 2.34415779881272e-114, 3.060911825487e-217, 9.57698196001525e-179, 1.433843140479, 1.06090674398107e-193, 6.19013645171147e-55, 2.04281777793204e-236, 9.68344335507261e-284, 6.80279389052841e+241, 7.62621944834406e-10, 1.7209192687213e-259, 2.12974846571377e-151, 4.39167502548703e+166, 1.58111493414075e+37, 8.63889514769763e-108, 8.62464813508118e-49, 5.67926437158568e+153, 1.29528918278583e-306, 8.76782917980691e+82, 7.2650812584701e+96, 1.30559450891315e+113, 6.49240675428646e-41, 5.04942112164755e-41, 1.89757517484306e+281, 4.34359102800785e+136, 3.50601539753394e-107, 2.74941268315343e+167, 1.43921651142723e+284, 9.5166081650615e+36, 2.28631624962878e+185, 2.20809697527068e-206, 1.98404944024733e+207, 2.12435653693282e+161, 3.85745854479272e-267, 2.65255394908688e-288, 4.56624002337088e-84, 8.00302489104746e-265, 6.14309203518306e-123, 5.70766294799224e-249, 4.64652789365715e+246, 6.44230055914094e+81, 2.79957895240599e-162, 1.12416588721766e-201, 2.04463856486206e-228, 7.77965785189264e-125, 6.3289904307812e-153, 7.52432328136695e-41, 2.12505961521618e-109, 4.64956783063446e+234, 5.27310441501597e-53, 4.1919355844001e+229, 2.13784694513992e-69, 3.09210602547265e-130, 1.52748427369733e+163, 3.02317052006416e-276, 4.24853746667562e+161, 2.01138650315553e+270, 1.472744323039e-166, 3.37620712661777e+280, 338716234844.491, 4.13445393406048e-262, 4.19314771075492e-283, 229351945126719, 7.26310444196829e+131, 1.02719203191576e-107, 3.67671215107312e+157, 7.10580343625079e-106, 3.6550295533692e-107 ), .Dim = 10:9), B = structure(c(3.55944428654874e+63, 5.88861653990707e+227, 7.29972696122358e-182, 1.15694061063689e-174, 3.96431544540977e-305, 1.34524633811746e+271, 2.81898226729946e+220, 3.74805855410615e-74, 3.21006771573613e-86, 3.90404701043125e+157, 1.45195571047368e-264, 4.57802565716036e+271, 3.55559214023013e-152, 1.24598250767768e+143, 4.38804583058248e-36, 2.11445389749214e+167, 1.4987344029081e-30, 1.74501282963535e+142, 3.55757951185258e+297, 1.60785006249443e-212, 1.35482306633123e+120, 3.5077169630152e-119, 2.94917579114907e+20, 1.18449692896935e+148, 3.15803574328943e+43, 6.14741456275161e-157, 0.635825262725393, 6.55550311244084e+128, 1.56380165772758e-288, 7.86479162696159e-109, 4.7408634893347e+138, 1.36631748408693e-70, 9.96928013094796e-113, 2.84729135193153e+252, 2.80038660169727e-08, 3.45930293222387e+103, 5.11349047579308e-252, 1.27844360928017e-289, 1.55307493340479e-64, 4.79562061600702e+78), .Dim = c(5L, 8L)))
result <- do.call(hetGP:::fast_trace,testlist)
str(result)
|
###################################################################
### Memory-based Collaborative Filtering Algorithm Starter Code ###
###################################################################
### Authors: CIndy Rush
### Project 3
### ADS Spring 2018
########################################################
######## Building the UI matrix for the MS Data ########
########################################################
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Class2")
source("functions.R")
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Proj3_Data/MS_sample")
# Load the data
MS_train <- read.csv("data_train.csv", as.is = TRUE, header = TRUE)
MS_train <- MS_train[, 2:4]
# Transform from narrow to wide, i.e. user-item matrix
# using MS_data_transform function
# Below takes 2.17 minutes
MS_UI <- MS_data_transform(MS_train)
save(MS_UI, file = "MS_UI.RData")
# Matrix Calculations
visit_nums <- rowSums(MS_UI != 0)
table(visit_nums)
mean(visit_nums)
median(visit_nums)
# Looping instead of rowSums()
long.row.sums <- function(UI) {
vec <- rep(NA, nrow(UI))
for (i in 1:nrow(UI)) {
vec[i] <- sum(UI[i,], na.rm = TRUE)
}
return(vec)
}
system.time(long.row.sums(MS_UI))
system.time(rowSums(MS_UI, na.rm = TRUE))
vec <- long.row.sums(MS_UI)
all(vec == rowSums(MS_UI, na.rm = TRUE))
###############################################################
######## Building the UI matrix for the EachMovie Data ########
###############################################################
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Proj3_Data/eachmovie_sample")
# Load the data
movie_train <- read.csv("data_train.csv", as.is = TRUE, header = TRUE)
movie_train <- movie_train[, 2:4]
# How we might fill in the user-item matrix using %in%
# Find sorted lists of users and vroots
users <- sort(unique(movie_train$User))
movies <- sort(unique(movie_train$Movie))
# Initiate the UI matrix
UI <- matrix(NA, nrow = length(users), ncol = length(movies))
row.names(UI) <- users
colnames(UI) <- movies
# We consider just user 1, finding user 1's movies and ratings
movies <- movie_train$Movie[movie_train$User == users[1]]
ratings <- movie_train$Score[movie_train$User == users[1]]
ord <- order(movies)
movies <- movies[ord]
ratings <- ratings[ord]
system.time(UI[1, colnames(UI) %in% movies] <- ratings)
# How we might fill in the user-item matrix using loops
long.in <- function(movies, ratings) {
# Cycle through the ratings, find the corresponding column
for (i in 1:length(ratings)) {
column <- which(colnames(UI) == movies[i])
UI[2, column] <- ratings[i]
print(column)
}
}
system.time(long.in(movies, ratings))
all(UI[1, ] == UI[2,], na.rm = TRUE)
# Compute the full matrix
# Below takes about 4 minutes
movie_UI <- movie_data_transform(movie_train)
save(movie_UI, file = "movie_UI.RData")
# Some calculations
total_ratings <- rowSums(movie_UI, na.rm = TRUE)
table(total_ratings)
mean(total_ratings)
median(total_ratings)
#################################################################
######## Calculating the Similarity Weights of the Users ########
#################################################################
# Initiate the similarity weight matrix
movie_UI <- as.matrix(movie_UI)
movie_sim_weight <- matrix(NA, nrow = nrow(movie_UI), ncol = nrow(movie_UI))
# Can calculate Pearson correlation between two rows of UI matrix as:
rowA <- movie_UI[1, ]
rowB <- movie_UI[2, ]
cor(rowA, rowB, method = 'pearson', use = "pairwise.complete.obs")
# Another way:
joint_values <- !is.na(rowA) & !is.na(rowB)
cor(rowA[joint_values], rowB[joint_values], method = 'pearson')
# First fill in row 1 of the similarity matrix using apply
system.time(vec1 <- apply(movie_UI, 1, cor, movie_UI[1, ], method = 'pearson', use = "pairwise.complete.obs"))
# Now fill in row 1 of the similarity matrix looping over the columns and
# calculating pairwise correlations
long.way <- function(row.num) {
for(i in 1:nrow(movie_UI)) {
movie_sim_weight[row.num, i] <- cor(movie_UI[i, ], movie_UI[row.num, ], method = 'pearson', use = "pairwise.complete.obs")
}
}
system.time(long.way(1))
# Calculate the full weights on the movie data
# The below took 87 minutes on my Macbook, 35 on my iMac
movie_sim <- calc_weight(movie_UI)
save(movie_sim, file = "movie_sim.RData")
# Calculate the full weights on the MS data
# The below took 30 minutes on my Macbook and 14 on my iMac
MS_sim <- calc_weight(MS_UI)
save(MS_sim, file = "MS_sim.RData")
###########################################################
######## Calculating the Predictions for the Users ########
###########################################################
# Calculate the predictions for user 1
# Initiate the prediction matrix and find the columns we need to predict
# for user 1.
pred_mat <- MS_UI
cols_to_predict <- which(MS_UI[1, ] == 0)
num_cols <- length(cols_to_predict)
# Transform the UI matrix into a deviation matrix since we want to calculate
# weighted averages of the deviations
neighb_weights <- MS_sim[1, ]
row_avgs <- apply(MS_UI, 1, mean, na.rm = TRUE)
dev_mat <- MS_UI - matrix(rep(row_avgs, ncol(MS_UI)), ncol = ncol(MS_UI))
# We'll calculate the predictions in two ways:
# First by looping over items where we want to make predictions
for (i in 1:num_cols) {
# For each column to predict, first find all deviations for that item
neighb_devs <- dev_mat[ ,cols_to_predict[i]]
# For each column to predict, calculate the prediction as the weighted average
pred_mat[1, cols_to_predict[i]] <- row_avgs[1] + sum(neighb_devs * neighb_weights, na.rm = TRUE)/sum(neighb_weights, na.rm = TRUE)
}
# Now using matrix equations
pred_mat2 <- MS_UI
weight_mat <- matrix(rep(neighb_weights, ncol(MS_UI)), ncol = ncol(MS_UI))
weight_sub <- weight_mat[, cols_to_predict]
neighb_devs <- dev_mat[ ,cols_to_predict]
# Now fill in all of row 1 with matrix equations
pred_mat2[1, cols_to_predict] <- row_avgs[1] + apply(neighb_devs * weight_sub, 2, sum, na.rm = TRUE)/sum(neighb_weights, na.rm = TRUE)
# They're the same
all(pred_mat2[1,] == pred_mat[1, ])
# Calculate predictions for MS
# This calculation took me 15 minutes
MS_pred <- pred_matrix(MS_UI, MS_sim)
save(MS_pred, file = "MS_pred.RData")
# Calculate predictions for movies
# This calculation took me 2493 second
movie_pred <- pred_matrix(movie_UI, movie_sim)
save(movie_pred, file = "movie_pred.RData")
|
/Project_Starter_Codes/Project3-Algorithms/doc/memory_based_model.R
|
no_license
|
GU4243-ADS/Spring2018
|
R
| false | false | 6,559 |
r
|
###################################################################
### Memory-based Collaborative Filtering Algorithm Starter Code ###
###################################################################
### Authors: CIndy Rush
### Project 3
### ADS Spring 2018
########################################################
######## Building the UI matrix for the MS Data ########
########################################################
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Class2")
source("functions.R")
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Proj3_Data/MS_sample")
# Load the data
MS_train <- read.csv("data_train.csv", as.is = TRUE, header = TRUE)
MS_train <- MS_train[, 2:4]
# Transform from narrow to wide, i.e. user-item matrix
# using MS_data_transform function
# Below takes 2.17 minutes
MS_UI <- MS_data_transform(MS_train)
save(MS_UI, file = "MS_UI.RData")
# Matrix Calculations
visit_nums <- rowSums(MS_UI != 0)
table(visit_nums)
mean(visit_nums)
median(visit_nums)
# Looping instead of rowSums()
long.row.sums <- function(UI) {
vec <- rep(NA, nrow(UI))
for (i in 1:nrow(UI)) {
vec[i] <- sum(UI[i,], na.rm = TRUE)
}
return(vec)
}
system.time(long.row.sums(MS_UI))
system.time(rowSums(MS_UI, na.rm = TRUE))
vec <- long.row.sums(MS_UI)
all(vec == rowSums(MS_UI, na.rm = TRUE))
###############################################################
######## Building the UI matrix for the EachMovie Data ########
###############################################################
setwd("/Users/cynthiarush/Dropbox/ADS_Spring_2018/Project3/Proj3_Data/eachmovie_sample")
# Load the data
movie_train <- read.csv("data_train.csv", as.is = TRUE, header = TRUE)
movie_train <- movie_train[, 2:4]
# How we might fill in the user-item matrix using %in%
# Find sorted lists of users and vroots
users <- sort(unique(movie_train$User))
movies <- sort(unique(movie_train$Movie))
# Initiate the UI matrix
UI <- matrix(NA, nrow = length(users), ncol = length(movies))
row.names(UI) <- users
colnames(UI) <- movies
# We consider just user 1, finding user 1's movies and ratings
movies <- movie_train$Movie[movie_train$User == users[1]]
ratings <- movie_train$Score[movie_train$User == users[1]]
ord <- order(movies)
movies <- movies[ord]
ratings <- ratings[ord]
system.time(UI[1, colnames(UI) %in% movies] <- ratings)
# How we might fill in the user-item matrix using loops
long.in <- function(movies, ratings) {
# Cycle through the ratings, find the corresponding column
for (i in 1:length(ratings)) {
column <- which(colnames(UI) == movies[i])
UI[2, column] <- ratings[i]
print(column)
}
}
system.time(long.in(movies, ratings))
all(UI[1, ] == UI[2,], na.rm = TRUE)
# Compute the full matrix
# Below takes about 4 minutes
movie_UI <- movie_data_transform(movie_train)
save(movie_UI, file = "movie_UI.RData")
# Some calculations
total_ratings <- rowSums(movie_UI, na.rm = TRUE)
table(total_ratings)
mean(total_ratings)
median(total_ratings)
#################################################################
######## Calculating the Similarity Weights of the Users ########
#################################################################
# Initiate the similarity weight matrix
movie_UI <- as.matrix(movie_UI)
movie_sim_weight <- matrix(NA, nrow = nrow(movie_UI), ncol = nrow(movie_UI))
# Can calculate Pearson correlation between two rows of UI matrix as:
rowA <- movie_UI[1, ]
rowB <- movie_UI[2, ]
cor(rowA, rowB, method = 'pearson', use = "pairwise.complete.obs")
# Another way:
joint_values <- !is.na(rowA) & !is.na(rowB)
cor(rowA[joint_values], rowB[joint_values], method = 'pearson')
# First fill in row 1 of the similarity matrix using apply
system.time(vec1 <- apply(movie_UI, 1, cor, movie_UI[1, ], method = 'pearson', use = "pairwise.complete.obs"))
# Now fill in row 1 of the similarity matrix looping over the columns and
# calculating pairwise correlations
long.way <- function(row.num) {
for(i in 1:nrow(movie_UI)) {
movie_sim_weight[row.num, i] <- cor(movie_UI[i, ], movie_UI[row.num, ], method = 'pearson', use = "pairwise.complete.obs")
}
}
system.time(long.way(1))
# Calculate the full weights on the movie data
# The below took 87 minutes on my Macbook, 35 on my iMac
movie_sim <- calc_weight(movie_UI)
save(movie_sim, file = "movie_sim.RData")
# Calculate the full weights on the MS data
# The below took 30 minutes on my Macbook and 14 on my iMac
MS_sim <- calc_weight(MS_UI)
save(MS_sim, file = "MS_sim.RData")
###########################################################
######## Calculating the Predictions for the Users ########
###########################################################
# Calculate the predictions for user 1
# Initiate the prediction matrix and find the columns we need to predict
# for user 1.
pred_mat <- MS_UI
cols_to_predict <- which(MS_UI[1, ] == 0)
num_cols <- length(cols_to_predict)
# Transform the UI matrix into a deviation matrix since we want to calculate
# weighted averages of the deviations
neighb_weights <- MS_sim[1, ]
row_avgs <- apply(MS_UI, 1, mean, na.rm = TRUE)
dev_mat <- MS_UI - matrix(rep(row_avgs, ncol(MS_UI)), ncol = ncol(MS_UI))
# We'll calculate the predictions in two ways:
# First by looping over items where we want to make predictions
for (i in 1:num_cols) {
# For each column to predict, first find all deviations for that item
neighb_devs <- dev_mat[ ,cols_to_predict[i]]
# For each column to predict, calculate the prediction as the weighted average
pred_mat[1, cols_to_predict[i]] <- row_avgs[1] + sum(neighb_devs * neighb_weights, na.rm = TRUE)/sum(neighb_weights, na.rm = TRUE)
}
# Now using matrix equations
pred_mat2 <- MS_UI
weight_mat <- matrix(rep(neighb_weights, ncol(MS_UI)), ncol = ncol(MS_UI))
weight_sub <- weight_mat[, cols_to_predict]
neighb_devs <- dev_mat[ ,cols_to_predict]
# Now fill in all of row 1 with matrix equations
pred_mat2[1, cols_to_predict] <- row_avgs[1] + apply(neighb_devs * weight_sub, 2, sum, na.rm = TRUE)/sum(neighb_weights, na.rm = TRUE)
# They're the same
all(pred_mat2[1,] == pred_mat[1, ])
# Calculate predictions for MS
# This calculation took me 15 minutes
MS_pred <- pred_matrix(MS_UI, MS_sim)
save(MS_pred, file = "MS_pred.RData")
# Calculate predictions for movies
# This calculation took me 2493 second
movie_pred <- pred_matrix(movie_UI, movie_sim)
save(movie_pred, file = "movie_pred.RData")
|
#__________________________________________________
# DATA GENERATORS
#__________________________________________________
#' Generates matrix of mixing probabilities.
#'
#' \code{genunifp} returns a matrix whose rows are independent
#' random vectors uniformly distributed over the M-simplex.
#'
#' @param n number of subjects (rows).
#' @param M number of mixing components (columns).
#' @return Matrix p of mixing probabilities with n rows and M columns.
#' The names of components (columns) are A,B,C...
#' The names of cases (rows) are 1,2,3,...
#'
#' @examples
#' p <- genunifp(10,2)
#' p
#'
#' p <- genunifp(1000,3)
#' plot(p[,3],p[,1],cex=0.2)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#__________________________________________________
genunifp <- function(n, M) {
p <- matrix(stats::rexp(n * M), nrow = n,dimnames=list(1:n,LETTERS[1:M]))
p <- t(apply(p, 1, function(x) x/sum(x)))
}
#__________________________________________________
#' Sample form mixture of normals with warying concentrations.
#'
#' \code{genormixt} generates a sample from the mixture
#' of normal distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param mean vector of components' means.
#' @param sd vector of components' standard deviations.
#' @return Vector with the sample.
#' The sample size equals the rows number of p.
#' @examples
#' x <- genormixt(genunifp(10,2),c(0,1),c(0.5,0.5))
#' plot(x)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215
#' @export
# __________________________________________________
genormixt <- function(p, mean, sd) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
smp <- stats::rnorm(nrow(p), mean[obj], sd[obj])
}
#_______________________________________________
#' Sample from
#' mixture of gamma distributions with varying concentrations.
#'
#' \code{gengamixt} generates a sample from the mixture
#' of gamma distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities.
#' with rows corresponding to subjects.
#' and columns coresponding to the mixture components.
#' @param shape vector of shape parameters for gamma distributions of
#' components.
#' @param rate vector of rate parameters for gamma distributions of
#' components.
#' @return Vector with the sample.
#' The sample size equals the rows number of p.
#' @examples
#' x <- genormixt(genunifp(10,2),c(2,1),c(0.5,0.5))
#' plot(x)
#' @export
# __________________________________________________
gengamixt <- function(p, shape, rate) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
smp <- stats::rgamma(nrow(p), shape[obj], rate[obj])
}
# __________________________________________________
#' Calculates minimax weights for components' distributions estimation.
#'
#' \code{lsweight} returns a matrix of individual weights
#' which correspond to minimax unbiased estimates for CDFs of
#' mixture components.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @return matrix (or data frame) of minimax weights of the same structure
#' as p
#' @examples
#' set.seed(3)
#' p <- genunifp(10,3)
#' a <- lsweight(p)
#' t(a)%*%p # the result is a unit matrix
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#____________________________________________________
lsweight <- function(p) {
p <- as.matrix(p)
a <- as.data.frame(p %*% MASS::ginv(t(p) %*% p))
names(a)<-colnames(p)
a
}
# ______________________________________________
#' Constructor for class \code{wtsamp}
#'
#' \code{wtsamp} returns an object of S3 class \code{wtsamp}
#' containing a sorted sample and a set of individual
#' and/or cummulative weights representing distributions
#' of different components.
#'
#' @param x numeric vector containing the sample values.
#' @param cumm matrix (or data frame) of cummulative weights
#' of components.
#' @param indiv matrix (or data frame) of individual weights
#' of components.
#' @return object of class \code{wtsamp} which contains the
#' following attributes:
#'
#' \describe{
#' \item{\code{xo}}{vector of sample values sorted
#' in the ascending order with \code{-Inf} as the first element.}
#' \item{\code{cumm}}{matrix of cummulative weigts reordered
#' at the same order as xo with 0 at the first row.}
#' \item{\code{indiv}}{matrix of individual weigts reordered
#' at the same order as xo.}
#' }
#'
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_______________________________________________
wtsamp <- function(x, cumm = NULL, indiv = NULL) {
o <- order(x)
xo <- c(-Inf, x[o])
if (!is.null(cumm))
cumm <- rbind(rep(0, ncol(cumm)), cumm[o, ])
if (!is.null(indiv))
indiv <- indiv[o, ]
structure(list(xo = xo, cumm = cumm, indiv = indiv), class = "wtsamp")
}
# ______________________________________________
#' Constructor for class \code{wtcens}
#'
#' \code{wtcens} returns an object of S3 class \code{wtcens}
#' containing the vector of sorted sample values, vector
#' of indicators of non-censoring
#' and a set of individual
#' and/or cummulative weights representing distributions
#' of different components.
#'
#' @param x numeric vector containing the sorted sample values.
#' @param delta logical vector of non-censoring indicators
#' @param cumm matrix (or data frame) of cummulative weights
#' of components.
#' @param indiv matrix (or data frame) of individual weights
#' of components.
#' @return object of class \code{wtcens}which contains the
#' following attributes:
#'
#' \describe{
#' \item{\code{xo}}{vector of sample values sorted
#' in the ascending order with \code{-Inf} as the first element.}
#' \item{\code{deltao}}{vector of non-censoring indicators reordered
#' at the same order as xo.}
#' \item{\code{cumm}}{matrix of cummulative weigts reordered
#' at the same order as xo with 0 at the first row.}
#' \item{\code{indiv}}{matrix of individual weigts reordered
#' at the same order as xo.}
#' }
#' @export
#_______________________________________________
wtcens <- function(x, delta, cumm = NULL, indiv = NULL) {
o <- order(x)
xo <- c(-Inf, x[o])
deltao <- delta[o]
if (!is.null(cumm))
cumm <- rbind(rep(0, ncol(cumm)), cumm[o, ])
if (!is.null(indiv))
indiv <- indiv[o, ]
structure(list(xo = xo, deltao = deltao, cumm = cumm, indiv = indiv), class = "wtcens")
}
# __________________________________________________
#' Calculates cummulative weights.
#'
#' \code{indiv2cumm} calculates cummulative sums of
#' individual weights and put them into cummulative weights
#' attribute.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#'
#' @return an object of the same class as \code{xs} with recalculated
#' cummulative weights.
#'
#' @examples
#'set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' ys <- indiv2cumm(xs) # create cummulative weights
#' xs1 <- cumm2indiv(ys) #xs1 is the same as xs
#' @export
#____________________________________________________
indiv2cumm <- function(xs) {
xs$cumm <- apply(xs$indiv, 2, cumsum)
xs$cumm <- rbind(rep(0, ncol(xs$cumm)), xs$cumm)
xs
}
# __________________________________________________
#' Calculates individual weights.
#'
#' \code{cumm2indiv} calculates differences with lag 1 from
#' cummulative weights and put them into individual weights
#' attribute.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#' @return an object of the same class as \code{xs} with recalculated
#' individual weights.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' ys <- indiv2cumm(xs) # create cummulative weights
#' xs1 <- cumm2indiv(ys) #xs1 is the same as xs
#' @export
#____________________________________________________
cumm2indiv <- function(xs) {
xs$indiv <- apply(xs$cumm, 2, diff)
xs
}
# __________________________________________________
#' Generator of weighted empirical CDS.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#'
#' @param m number of the component whose CDF is estimated.
#'
#' @return a function with the call \code{f(t)} where \code{t} is
#' the vector of points at which the estimate is calculated. \code{f(t)}
#' returns the vector of estimates.
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1<-edfgen(xs,1) # generate the estimate for 1-st component
#' F1(0) # 0.5289866 approximately 0.5
#' plot(F1,-3,3) # plot the estimate (approx. standard normal CDF )
#' @export
#____________________________________________________
edfgen <- function(xs, m) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
a <- xs$cumm[, m]
x <- xs$xo
f <- function(t) {
a[findInterval(t, x)]
}
}
#' Sup-correction of cummulative weights of a sample
#'
#' \code{corsup} calculates cummulative weights for
#' the upper nondecreasing envelope of the
#' CDFs of mixture components (sup-corrected weights). The weights are
#' truncated by 1.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' sup-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_sup <- corsup(xs)
#' F1_sup <- edfgen(xs_sup,1) # sup-corrected estimate for 1-st component
#' F1(0)
#' F1_sup(0)
#' plot(F1,-3,3)
#' curve(F1_sup,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# ________________________________________________________
corsup <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
xs$cumm <- pmin(apply(xs$cumm, 2, cummax), 1)
xs$indiv <- NULL
xs
}
#' Inf-correction of cummulative weights of a sample
#'
#' \code{corinf} calculates cummulative weights for
#' the lower nondecreasing envelope of the
#' CDFs of mixture components (inf-corrected weights). The weights are
#' truncated by 0.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' inf-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_inf <- corinf(xs)
#' F1_inf <- edfgen(xs_inf,1) # inf-corrected estimate for 1-st component
#' F1(0)
#' F1_inf(0)
#' plot(F1,-3,3)
#' curve(F1_inf,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# ________________________________________________________
corinf <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
n <- length(xs$x) - 1
xs$cumm <- xs$cumm[-1, ]
xs$cumm <- rbind(rep(0, ncol(xs$cumm)), pmax(apply(xs$cumm[n:1, ], 2, cummin), 0)[n:1,
])
xs$indiv <- NULL
xs
}
#' Mid-correction of cummulative weights of a sample
#'
#' \code{cormid} calculates cummulative weights as the mean
#' of sup- and inf-corrected weights
#' (mid-corrected weights).
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' mid-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_mid <- cormid(xs)
#' F1_mid <- edfgen(xs_mid,1) # mid-corrected estimate for 1-st component
#' F1(0)
#' F1_mid(0)
#' plot(F1,-3,3)
#' curve(F1_mid,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# ________________________________________________________
cormid <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
cummin <- pmin(apply(xs$cumm, 2, cummax), 1)
n <- length(xs$x) - 1
xs$cumm <- xs$cumm[-1, ]
xs$cumm <- (rbind(rep(0, ncol(xs$cumm)), pmax(apply(xs$cumm[n:1, ], 2, cummin), 0)[n:1,
]) + cummin)/2
xs$indiv <- NULL
xs
}
#_______________________________________
# RESAMPLING FUNCTIONS
#_______________________________________
#__________________________________________________
#' Resample form mixture with warying concentrations.
#'
#' \code{gensampmixt} generates a sample from the mixture
#' with mixing probabilities \code{p} and distributions of components given by
#' the weighted sample \code{xs}.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param xs object of class \code{wtsamp} with the weighted
#' sample. Defines the mixture components' distribution.
#'
#' @return a vector with the resampled sample.
#'
#' @details \code{gensampmixt} uses \code{randwtgen} for sampling
#' from components with the default value of the option
#' \code{delta0}.
#' @examples
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' xs<-cormid(xs) # correct the weights
#' x_resampled<-gensampmixt(p,xs) # resampled data
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# __________________________________________________
gensampmixt <- function(p, xs) {
randwt <- randwtgen(xs)
crange <- 1:ncol(p)
components <- apply(p, 1, function(pr) sample(crange, 1, replace = TRUE, prob = pr))
sapply(components, randwt, n = 1)
}
#' Creates a random number generator according to a weighted sample distribution
#'
#' \code{randwtgen} returns a function wchich produces random samples with the
#' distribution of a prescribed mixture component. The distribution is estimated
#' by a weighted sample.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a function \code{f(m,n,delta)}.
#' The function generates a sample of size \code{n} with the distribution
#' of the \code{m}-th component of the mixture. \code{delta} is the blurring
#' parameter.
#'
#' If \code{delta=0} the generated sample contains values from
#' \code{xs$x0} sampled with probabilities given by the \code{m}-th
#' column of \code{xs$indiv}.
#' If \code{delta>0} a random variable uniform on [-\code{delta},\code{delta}]
#' is added to each sampled value.
#'
#' The default value of \code{delta} is a half of the minimal distance
#' between \code{xs$x0} points.
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#__________________________________________________________________
randwtgen <- function(xs) {
x <- xs$x[-1]
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
prob <- xs$indiv
delta0 = min(diff(x))/2
randwt <- function(m, n, delta = delta0) {
r <- sample(x, n, prob = prob[, m], replace = TRUE)
if (delta > 0)
r <- r + stats::runif(n, -delta, delta)
r
}
}
#________________________________________
#' Calculates the means of all components for a weighted sample.
#'
#' \code{meanw} calculates weighted means of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' means
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' meanw(xs)
#' @export
# _______________________________________
meanw <- function(xs) {
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
mx<-as.vector(xs$xo[-1] %*% as.matrix(xs$indiv))
names(mx)<-colnames(xs$indiv)
return(mx)
}
#________________________________________
#' Calculates the variances of all components for a weighted sample.
#'
#' \code{varw} calculates weighted variances of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' variances
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' varw(xs)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
varw <- function(xs){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
sx<-as.vector((xs$xo[-1])^2 %*% as.matrix(xs$indiv)) -
(meanw(xs))^2
names(sx)<-colnames(xs$indiv)
return(sx)
}
#________________________________________
#' Calculates the standard deviations of all components for a weighted sample.
#'
#' \code{sdw} calculates weighted standard deviations of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @param corr function used for correction of weights before sd caclucation.
#' (no correction if code{corr=NULL}).
#'
#' @return a vector of components' standard deviations
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' sdw(xs)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
sdw <- function(xs,corr=cormid){
if(is.null(corr))
{if (is.null(xs$indiv)) xs <- cumm2indiv(xs)}
else
{xs <- cumm2indiv(corr(xs))}
sx<-sqrt(as.vector((xs$xo[-1])^2 %*% as.matrix(xs$indiv)) -
(meanw(xs))^2)
names(sx)<-colnames(xs$indiv)
return(sx)
}
#________________________________________
#' Calculates the quantiles of all components for a weighted sample.
#'
#' \code{quantilew} calculates weighted quantiles of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @param prob the level of the quantiles (one number).
#'
#' @return a vector of components' quantiles.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' quantilew(xs,1/3)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
quantilew <- function(xs,prob){
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
n <- nrow(xs$cumm)
M <- ncol(xs$cumm)
q <- numeric(M)
for( m in 1:M ){
j <- 1
while( xs$cumm[j,m]<prob ) j <- j+1
q_left <- xs$xo[j]
j <- n
while( xs$cumm[j,m]>prob ) j <- j-1
q_right <- xs$xo[j]
q[m] <- ( q_left + q_right )/2
}
names(q)<-colnames(xs$cumm)
return(q)
}
#________________________________________
#' Calculates the medians of all components for a weighted sample.
#'
#' \code{medianw} calculates weighted medians of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' medians
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' medianw(xs)
#' @export
# _______________________________________
medianw <- function(xs)quantilew(xs,0.5)
#________________________________________
#' Calculates the interquartile ranges of all components for a weighted sample.
#'
#' \code{IQRw} calculates weighted interquartile ranges of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' interquartile ranges
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' IQRw(xs)
#' @export
# _______________________________________
IQRw <- function(xs){
quantilew(xs,0.75) - quantilew(xs,0.25)
}
#_________________________________________
#' Epanechnikov kernel calculation.
#'
#' \code{Epanechn} calculates the value of Epanechnikov kernel at a given point.
#'
#' @param x a vector of points at which the kernel value is calculated.
#'
#' @return vector of the kernel values at given points.
#'
#' @examples
#' curve(Epanechn,-2,2)
#' @export
#_________________________________________
Epanechn<-function(x)ifelse(abs(x)<1,0.75*(1-x^2),0)
#_________________________________________
#' Generator of kernel density estimator for mixture components.
#'
#' \code{densgen} generates a function which calculates
#' a kernel density estimate for a prescribed mixture component
#' at a given set of points.
#'
#' @param xs a \code{wtsamp} object representing
#' a sample with distributions of different components.
#'
#' @param m a number of component whose density is estimated.
#'
#' @param Kern a function which calculates the kernel values
#' at given points (must be vectorized).
#'
#' @return a function
#' \code{f(x,h)} which calculates the estimate at points given in the
#' vector \code{x} with the bandwidth \code{h}.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' f<-densgen(xs,1) # create the estimator
#' f(c(0,1),1) # calculate the estimate at two points
#' curve(f(x,1),-3,3) # plot the graph (estimates N(0,1) density)
#'
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_________________________________________
densgen<-function(xs,m,Kern=Epanechn){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
a <- xs$indiv[, m]
x <- xs$xo[-1]
f <- Vectorize(
function(t,h) {
sum(a*Kern((t-x)/h))/h
},
vectorize.args ="t"
)
}
#_________________________________________
#' Silverman's rule of thumb for bandwidth selection
#'
#' \code{silvbw} calculates quasi-optimal bandwidth
#' for kernel density estimate by weighted sample
#' based on the mixture with varying concentrations approach
#'
#' @param xs a \code{wtsamp} object representing
#' a sample with distributions of different components.
#'
#' @param m a number of component for which the density is
#' estimated.
#'
#' @param delta the canonical bandwidth of the kernel
#' used in the estimate.
#'@details The default value of \code{delta}=1.7188 corresponds to
#' the Epanechnikov kernel.
#'
#' @return a numeric value of the Silverman's optimal bandwidth.
#'
#' @examples
#' #' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' f<-densgen(xs,1) # create the estimator
#' h<-silvbw(xs,1) # calculates the bandwidth by the Silverman's rule
#' curve(f(x,h),-3,3) # plot the graph (estimates N(0,1) density)
#'
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_________________________________________
silvbw<-function(xs,m,delta=1.7188){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
const<-1.011354 # (8*sqrt(pi)/3)^(1/5)/(stats::qnorm(0.75)-stats::qnorm(0.25))
const*delta*(sum((xs$indiv[,m])^2))^(1/5)*min(IQRw(xs)[m],sdw(xs)[m])
}
#________________________________________
# INFERENCE
#________________________________________
#
#' Estimates for standard deviations and CI
#' for weighted means by observations from the mixture.
#'
#' \code{sdMean} calculates estimates of standard deviatitions
#' and confidence intervals
#' for weighted means with minimax weights by observations
#' from the mixture with varying concentrations.
#'
#' @param x numeric vector with the observed sample or a
#' \code{wtsamp} object.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param comp a numeric vector with numbers of components
#' for which the standard deviations are estimated.
#'
#' @param means logical, if \code{TRUE} then the estimates for
#' components' means are included in the function value.
#'
#' @param CI logical, if \code{TRUE} then confidence bounds for
#' components' means are inculded in the function value.
#'
#' @param alpha confidense level for the confidence interval.
#'
#' @details If \code{CI=TRUE} then the function calculates
#' confidence intervals for the components' means
#' with covering probability \code{1-alpha}.
#'
#' If \code{x} is a vector then the weights for components' means and variances
#' are calculated as \code{lsweight(p)}. If \code{x} is a \code{wtsamp}
#' object than its own weights are used.
#'
#' @return if \code{CI & means =FALSE} the function returns a vector
#' of the estimated standard deviations
#' with NA for the components which were not estimated.
#' Else a data frame is returned in which there can be variables:
#' \code{sd} are standard deviations of estimates;
#' \code{means} are the estimates of means;
#' \code{lower} and \code{upper} are lower and upper bounds
#' of the confidence intervals for means.
#'
#'
#' @examples
#' set.seed(3)
#' M<-3 # number of mixture components
#' p <- genunifp(1000,M) # create mixing probabilities
#' m<-c(0,1,2) # true means of components
#' sd<-c(1,1,0.5) # true sd of components
#' x<-genormixt(p,m,sd) # sample generation
#' # Calculate sd only:
#' sdMean(x,p)
#' # the same:
#' sdMean(wtsamp(x,indiv=lsweight(p)),p)
#' # Calculate confidence intervals:
#' sdMean(x,p,means=TRUE,CI=TRUE)
#' # Plot confidence intervals:
#' CI<-sdMean(x,p,means=TRUE,CI=TRUE)
#' library(plotrix)
#' plotCI(1:M,CI$means,ui=CI$upper,li=CI$lower,
#' xlab=" ",ylab="means",xaxt="n")
#' axis(1,at=1:M,labels=row.names(CI))
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# _______________________________________
sdMean<-function(x,p,comp=1:ncol(p),
means=FALSE,CI=FALSE,alpha=0.05){
M<-ncol(p)
D<-rep(NA,M)
if(is.vector(x)&is.numeric(x))
sx<-wtsamp(x,indiv=lsweight(p))
else{
if(class(x)=="wtsamp")
sx<-x
else{
warning("x must be vector or wtsamp")
return(NA)
}
}
m<-meanw(sx)
m2<-varw(sx)+m^2
for(k in comp){
app<-matrix(ncol=M,nrow=M)
for(i in 1:M){
for(l in 1:i){
app[i,l]<-sum(sx$indiv[,k]^2*p[,i]*p[,l])
app[l,i]<-app[i,l]
}
}
ap<-apply(app,1,sum)
D[k]=sum(ap*m2)-m%*%app%*%m
if(D[k]<0){
warning("Negative estimate of variance is obtained",D[k])
D[k]=NA
}
}
D=sqrt(D)
if(!(means|CI)){
names(D)<-colnames(p)
return(D)
}else{
R<-data.frame(sd=D)
if(means)R$means<-m
if(CI){
lambda=stats::qnorm(1-alpha/2)
R$lower<-m-lambda*D
R$upper<-m+lambda*D
}
row.names(R)<-colnames(p)
return(R)
}
}
#______________________________________________________
#' Estimates for standard deviations and CI
#' for weighted medians by observations from the mixture.
#'
#' \code{sdMedian} calculates estimates of standard deviatitions
#' and confidence intervals
#' for weighted medians with minimax weights by observations
#' from the mixture with varying concentrations.
#'
#' @param x numeric vector with the observed sample or a
#' \code{wtsamp} object.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param comp a numeric vector with numbers of components
#' for which the standard deviations are estimated.
#'
#' @param medians logical, if \code{TRUE} then the estimates for
#' components' medians are included in the function value.
#'
#' @param CI logical, if \code{TRUE} then confidence bounds for
#' components' means are inculded in the function value.
#'
#' If \code{x} is a vector then the weights for components' medians and variances
#' are calculated as \code{lsweight(p)}. If \code{x} is a \code{wtsamp}
#' object than its own weights are used.
#'
#' @param alpha confidense level for the confidence interval.
#'
#' @details if \code{CI=TRUE} then the function calculates
#' confidence intervals for the components' medians
#' with covering probability \code{1-alpha}.
#'
#' @return if \code{CI & medians =FALSE} the function returns a vector
#' of the estimated standard deviations
#' with NA for the components which were not estimated.
#' Else a data frame is returned in which there can be variables:
#' \code{sd} are standard deviations of estimates;
#' \code{medians} are the estimates of medians;
#' \code{lower} and b n\code{upper} are lower and upper bounds
#' of the confidence intervals for medians.
#'c
#'
#' @examples
#' set.seed(3)
#' M<-3 # number of mixture components
#' p <- genunifp(1000,M) # create mixing probabilities
#' m<-c(0,1,2) # true means of components
#' sd<-c(1,1,0.5) # true sd of components
#' x<-genormixt(p,m,sd) # sample generation
#' # Calculate sd only:
#' sdMedian(x,p)
#' # the same result:
#' sdMedian(wtsamp(x,indiv=lsweight(p)),p)
#' # Calculate confidence intervals:
#' sdMedian(x,p,medians=TRUE,CI=TRUE)
#' # Plot confidence intervals:
#' CI<-sdMedian(x,p,medians=TRUE,CI=TRUE)
#' library(plotrix)
#' plotCI(1:M,CI$medians,ui=CI$upper,li=CI$lower,
#' xlab=" ",ylab="medians",xaxt="n")
#' axis(1,at=1:M,labels=row.names(CI))
#' @export
#________________________________________
sdMedian<-function(x,p,comp=1:ncol(p),
medians=FALSE,CI=FALSE,alpha=0.05){
M<-ncol(p)
D<-rep(NA,M)
if(is.vector(x)&is.numeric(x)){
sx<-wtsamp(x,indiv=lsweight(p))
sx <- indiv2cumm(sx)
}
else{
if(class(x)=="wtsamp")
sx<-x
if(is.null(sx$cumm)) sx <- indiv2cumm(sx)
else{
warning("x must be vector or wtsamp")
return(NA)
}
}
med<-medianw(sx)
for(k in comp){
Fm<-sx$cumm[findInterval(med[k], sx$xo),]
app<-matrix(ncol=M,nrow=M)
for(i in 1:M){
for(l in 1:i){
app[i,l]<-sum(sx$indiv[,k]^2*p[,i]*p[,l])
app[l,i]<-app[i,l]
}
}
ap<-apply(app,1,sum)
f<-densgen(sx,k)
zz<-sum(ap*Fm)-Fm%*%app%*%Fm
if(zz<0){
warning("Negative estimate of variance is obtained",zz)
zz=NA
}
else
D[k]<-sqrt(zz)/f(med[k],silvbw(sx,k))
}
if(!(medians|CI)){
names(D)<-colnames(p)
return(D)
}else{
R<-data.frame(sd=D)
if(medians)R$medians<-med
if(CI){
lambda=stats::qnorm(1-alpha/2)
R$lower<-med-lambda*D
R$upper<-med+lambda*D
}
row.names(R)<-colnames(p)
return(R)
}
}
# _______________________________________
# CENSORED DATA
# _______________________________________
#' Censored sample from
#' mixture of gamma distributions with varying concentrations.
#'
#' \code{gencensg} generates a censored sample from the mixture
#' of gamma distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities.
#' with rows corresponding to subjects.
#' and columns coresponding to the mixture components.
#' @param shape vector of shape parameters for gamma distributions of
#' non-censored components.
#' @param rate vector of rate parameters for gamma distributions of
#' non-censored components.
#' @param shapec vector of shape parameters for gamma distributions of
#' components' censors.
#' @param ratec vector of rate parameters for gamma distributions of
#' components' censors.
#' @return a data frame with the censored sample. The attribute
#' \code{$x} contains the sample values, the attribute \code{$delta}
#' contains the indicators of non-censoring.
#'
#' The sample size equals the rows number of p.
#' @examples
#' set.seed(3)
#' cs <- gencensg(genunifp(50,2),shape=c(2,1),rate=c(0.5,0.5),
#' shapec=c(1,1),ratec=c(0.3,0.3))
#' plot(cs$x,col=cs$delta+1)
#' @seealso
#' Maiboroda R. Khizanov V.
#' A modified Kaplan–Meier estimator for a model of mixtures with varying concentrations
#' Theor. Probability and Math. Statist. 92 (2016), 109-116
#' @export
# __________________________________________________
gencensg <- function(p, shape, rate, shapec, ratec) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
x <- stats::rgamma(nrow(p), shape = shape[obj], rate = rate[obj])
c <- stats::rgamma(nrow(p), shape = shapec[obj], rate = ratec[obj])
delta <- x < c
x <- pmin(x, c)
cs <- data.frame(x, delta)
}
# ______________________________________________________
#' Kaplan-Meyer's estimator for CDFs of mixture components
#'
#' \code{KMcdf} calculates cummulative weigts for the Kaplan-Meyer
#' estimator of CDFs of mixture components by a censored sample
#' of class \code{wtcens} and returns an object of class
#' \code{wtsamp} with the sample values and the calculated weights.
#'
#' @param cs object of class \code{wtcens} censored sample with
#' weights representing the distributions
#' of the censored components.
#'
#' @return object of class \code{wtsamp} containing the sample values
#' and the cummulative weights for the distributions of the components.
#'
#' @examples
#' set.seed(3)
#' n<-5000
#' p<-genunifp(n,2)
#' xs<-gencensg(p,shape=c(1.5,4),rate=c(1,1),shapec=c(1,1),ratec=c(0.3,0.3))
#' cs<-wtcens(x=xs$x,delta=xs$delta,indiv=lsweight(p))
#' KMest<-KMcdf(cs)
#' FKM<-edfgen(KMest,2)
#' curve(FKM,0,8)
#' curve(pgamma(x,shape=4,rate=1),col="red",add=TRUE)
#' @seealso
#' Maiboroda R. Khizanov V.
#' A modified Kaplan–Meier estimator for a model of mixtures with varying concentrations
#' Theor. Probability and Math. Statist. 92 (2016), 109-116
#' @export
# ______________________________________________________
KMcdf <- function(cs) {
if (is.null(cs$cumm))
cs <- indiv2cumm(cs)
if (is.null(cs$indiv))
cs <- cumm2indiv(cs)
wt <- 1 - apply(1 - cs$indiv * cs$deltao/(1 - utils::head(cs$cumm, n = -1)), 2, cumprod)
wt[is.nan(wt)] <- 1
wtsamp(cs$xo[-1], cumm = wt, indiv = NULL)
}
# ___________ The end.
|
/R/start2.R
|
no_license
|
RostyslavMaiboroda/mixvconc
|
R
| false | false | 39,161 |
r
|
#__________________________________________________
# DATA GENERATORS
#__________________________________________________
#' Generates matrix of mixing probabilities.
#'
#' \code{genunifp} returns a matrix whose rows are independent
#' random vectors uniformly distributed over the M-simplex.
#'
#' @param n number of subjects (rows).
#' @param M number of mixing components (columns).
#' @return Matrix p of mixing probabilities with n rows and M columns.
#' The names of components (columns) are A,B,C...
#' The names of cases (rows) are 1,2,3,...
#'
#' @examples
#' p <- genunifp(10,2)
#' p
#'
#' p <- genunifp(1000,3)
#' plot(p[,3],p[,1],cex=0.2)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#__________________________________________________
genunifp <- function(n, M) {
p <- matrix(stats::rexp(n * M), nrow = n,dimnames=list(1:n,LETTERS[1:M]))
p <- t(apply(p, 1, function(x) x/sum(x)))
}
#__________________________________________________
#' Sample form mixture of normals with warying concentrations.
#'
#' \code{genormixt} generates a sample from the mixture
#' of normal distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param mean vector of components' means.
#' @param sd vector of components' standard deviations.
#' @return Vector with the sample.
#' The sample size equals the rows number of p.
#' @examples
#' x <- genormixt(genunifp(10,2),c(0,1),c(0.5,0.5))
#' plot(x)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215
#' @export
# __________________________________________________
genormixt <- function(p, mean, sd) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
smp <- stats::rnorm(nrow(p), mean[obj], sd[obj])
}
#_______________________________________________
#' Sample from
#' mixture of gamma distributions with varying concentrations.
#'
#' \code{gengamixt} generates a sample from the mixture
#' of gamma distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities.
#' with rows corresponding to subjects.
#' and columns coresponding to the mixture components.
#' @param shape vector of shape parameters for gamma distributions of
#' components.
#' @param rate vector of rate parameters for gamma distributions of
#' components.
#' @return Vector with the sample.
#' The sample size equals the rows number of p.
#' @examples
#' x <- genormixt(genunifp(10,2),c(2,1),c(0.5,0.5))
#' plot(x)
#' @export
# __________________________________________________
gengamixt <- function(p, shape, rate) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
smp <- stats::rgamma(nrow(p), shape[obj], rate[obj])
}
# __________________________________________________
#' Calculates minimax weights for components' distributions estimation.
#'
#' \code{lsweight} returns a matrix of individual weights
#' which correspond to minimax unbiased estimates for CDFs of
#' mixture components.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @return matrix (or data frame) of minimax weights of the same structure
#' as p
#' @examples
#' set.seed(3)
#' p <- genunifp(10,3)
#' a <- lsweight(p)
#' t(a)%*%p # the result is a unit matrix
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#____________________________________________________
lsweight <- function(p) {
p <- as.matrix(p)
a <- as.data.frame(p %*% MASS::ginv(t(p) %*% p))
names(a)<-colnames(p)
a
}
# ______________________________________________
#' Constructor for class \code{wtsamp}
#'
#' \code{wtsamp} returns an object of S3 class \code{wtsamp}
#' containing a sorted sample and a set of individual
#' and/or cummulative weights representing distributions
#' of different components.
#'
#' @param x numeric vector containing the sample values.
#' @param cumm matrix (or data frame) of cummulative weights
#' of components.
#' @param indiv matrix (or data frame) of individual weights
#' of components.
#' @return object of class \code{wtsamp} which contains the
#' following attributes:
#'
#' \describe{
#' \item{\code{xo}}{vector of sample values sorted
#' in the ascending order with \code{-Inf} as the first element.}
#' \item{\code{cumm}}{matrix of cummulative weigts reordered
#' at the same order as xo with 0 at the first row.}
#' \item{\code{indiv}}{matrix of individual weigts reordered
#' at the same order as xo.}
#' }
#'
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_______________________________________________
wtsamp <- function(x, cumm = NULL, indiv = NULL) {
o <- order(x)
xo <- c(-Inf, x[o])
if (!is.null(cumm))
cumm <- rbind(rep(0, ncol(cumm)), cumm[o, ])
if (!is.null(indiv))
indiv <- indiv[o, ]
structure(list(xo = xo, cumm = cumm, indiv = indiv), class = "wtsamp")
}
# ______________________________________________
#' Constructor for class \code{wtcens}
#'
#' \code{wtcens} returns an object of S3 class \code{wtcens}
#' containing the vector of sorted sample values, vector
#' of indicators of non-censoring
#' and a set of individual
#' and/or cummulative weights representing distributions
#' of different components.
#'
#' @param x numeric vector containing the sorted sample values.
#' @param delta logical vector of non-censoring indicators
#' @param cumm matrix (or data frame) of cummulative weights
#' of components.
#' @param indiv matrix (or data frame) of individual weights
#' of components.
#' @return object of class \code{wtcens}which contains the
#' following attributes:
#'
#' \describe{
#' \item{\code{xo}}{vector of sample values sorted
#' in the ascending order with \code{-Inf} as the first element.}
#' \item{\code{deltao}}{vector of non-censoring indicators reordered
#' at the same order as xo.}
#' \item{\code{cumm}}{matrix of cummulative weigts reordered
#' at the same order as xo with 0 at the first row.}
#' \item{\code{indiv}}{matrix of individual weigts reordered
#' at the same order as xo.}
#' }
#' @export
#_______________________________________________
wtcens <- function(x, delta, cumm = NULL, indiv = NULL) {
o <- order(x)
xo <- c(-Inf, x[o])
deltao <- delta[o]
if (!is.null(cumm))
cumm <- rbind(rep(0, ncol(cumm)), cumm[o, ])
if (!is.null(indiv))
indiv <- indiv[o, ]
structure(list(xo = xo, deltao = deltao, cumm = cumm, indiv = indiv), class = "wtcens")
}
# __________________________________________________
#' Calculates cummulative weights.
#'
#' \code{indiv2cumm} calculates cummulative sums of
#' individual weights and put them into cummulative weights
#' attribute.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#'
#' @return an object of the same class as \code{xs} with recalculated
#' cummulative weights.
#'
#' @examples
#'set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' ys <- indiv2cumm(xs) # create cummulative weights
#' xs1 <- cumm2indiv(ys) #xs1 is the same as xs
#' @export
#____________________________________________________
indiv2cumm <- function(xs) {
xs$cumm <- apply(xs$indiv, 2, cumsum)
xs$cumm <- rbind(rep(0, ncol(xs$cumm)), xs$cumm)
xs
}
# __________________________________________________
#' Calculates individual weights.
#'
#' \code{cumm2indiv} calculates differences with lag 1 from
#' cummulative weights and put them into individual weights
#' attribute.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#' @return an object of the same class as \code{xs} with recalculated
#' individual weights.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' ys <- indiv2cumm(xs) # create cummulative weights
#' xs1 <- cumm2indiv(ys) #xs1 is the same as xs
#' @export
#____________________________________________________
cumm2indiv <- function(xs) {
xs$indiv <- apply(xs$cumm, 2, diff)
xs
}
# __________________________________________________
#' Generator of weighted empirical CDS.
#'
#' @param xs a \code{wtsamp} or \code{wtcens} object representing
#' a sample with distributions of different components.
#'
#' @param m number of the component whose CDF is estimated.
#'
#' @return a function with the call \code{f(t)} where \code{t} is
#' the vector of points at which the estimate is calculated. \code{f(t)}
#' returns the vector of estimates.
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1<-edfgen(xs,1) # generate the estimate for 1-st component
#' F1(0) # 0.5289866 approximately 0.5
#' plot(F1,-3,3) # plot the estimate (approx. standard normal CDF )
#' @export
#____________________________________________________
edfgen <- function(xs, m) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
a <- xs$cumm[, m]
x <- xs$xo
f <- function(t) {
a[findInterval(t, x)]
}
}
#' Sup-correction of cummulative weights of a sample
#'
#' \code{corsup} calculates cummulative weights for
#' the upper nondecreasing envelope of the
#' CDFs of mixture components (sup-corrected weights). The weights are
#' truncated by 1.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' sup-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_sup <- corsup(xs)
#' F1_sup <- edfgen(xs_sup,1) # sup-corrected estimate for 1-st component
#' F1(0)
#' F1_sup(0)
#' plot(F1,-3,3)
#' curve(F1_sup,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# ________________________________________________________
corsup <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
xs$cumm <- pmin(apply(xs$cumm, 2, cummax), 1)
xs$indiv <- NULL
xs
}
#' Inf-correction of cummulative weights of a sample
#'
#' \code{corinf} calculates cummulative weights for
#' the lower nondecreasing envelope of the
#' CDFs of mixture components (inf-corrected weights). The weights are
#' truncated by 0.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' inf-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_inf <- corinf(xs)
#' F1_inf <- edfgen(xs_inf,1) # inf-corrected estimate for 1-st component
#' F1(0)
#' F1_inf(0)
#' plot(F1,-3,3)
#' curve(F1_inf,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# ________________________________________________________
corinf <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
n <- length(xs$x) - 1
xs$cumm <- xs$cumm[-1, ]
xs$cumm <- rbind(rep(0, ncol(xs$cumm)), pmax(apply(xs$cumm[n:1, ], 2, cummin), 0)[n:1,
])
xs$indiv <- NULL
xs
}
#' Mid-correction of cummulative weights of a sample
#'
#' \code{cormid} calculates cummulative weights as the mean
#' of sup- and inf-corrected weights
#' (mid-corrected weights).
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return object from class \code{wtsamp} whose cummulative weights are
#' mid-corrected and individual weights are set to NULL.
#'
#' @details If cummulative weights are NULL they will be calculated
#' from the individual ones by \code{indiv2cum}.
#'
#' @examples
#' p <- genunifp(15,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' F1 <- edfgen(xs,1) # minimax the estimate for 1-st component
#' xs_mid <- cormid(xs)
#' F1_mid <- edfgen(xs_mid,1) # mid-corrected estimate for 1-st component
#' F1(0)
#' F1_mid(0)
#' plot(F1,-3,3)
#' curve(F1_mid,col="red",,lty="dashed",add=TRUE)
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# ________________________________________________________
cormid <- function(xs) {
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
cummin <- pmin(apply(xs$cumm, 2, cummax), 1)
n <- length(xs$x) - 1
xs$cumm <- xs$cumm[-1, ]
xs$cumm <- (rbind(rep(0, ncol(xs$cumm)), pmax(apply(xs$cumm[n:1, ], 2, cummin), 0)[n:1,
]) + cummin)/2
xs$indiv <- NULL
xs
}
#_______________________________________
# RESAMPLING FUNCTIONS
#_______________________________________
#__________________________________________________
#' Resample form mixture with warying concentrations.
#'
#' \code{gensampmixt} generates a sample from the mixture
#' with mixing probabilities \code{p} and distributions of components given by
#' the weighted sample \code{xs}.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param xs object of class \code{wtsamp} with the weighted
#' sample. Defines the mixture components' distribution.
#'
#' @return a vector with the resampled sample.
#'
#' @details \code{gensampmixt} uses \code{randwtgen} for sampling
#' from components with the default value of the option
#' \code{delta0}.
#' @examples
#' set.seed(3)
#' p <- genunifp(10,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' xs<-cormid(xs) # correct the weights
#' x_resampled<-gensampmixt(p,xs) # resampled data
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# __________________________________________________
gensampmixt <- function(p, xs) {
randwt <- randwtgen(xs)
crange <- 1:ncol(p)
components <- apply(p, 1, function(pr) sample(crange, 1, replace = TRUE, prob = pr))
sapply(components, randwt, n = 1)
}
#' Creates a random number generator according to a weighted sample distribution
#'
#' \code{randwtgen} returns a function wchich produces random samples with the
#' distribution of a prescribed mixture component. The distribution is estimated
#' by a weighted sample.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a function \code{f(m,n,delta)}.
#' The function generates a sample of size \code{n} with the distribution
#' of the \code{m}-th component of the mixture. \code{delta} is the blurring
#' parameter.
#'
#' If \code{delta=0} the generated sample contains values from
#' \code{xs$x0} sampled with probabilities given by the \code{m}-th
#' column of \code{xs$indiv}.
#' If \code{delta>0} a random variable uniform on [-\code{delta},\code{delta}]
#' is added to each sampled value.
#'
#' The default value of \code{delta} is a half of the minimal distance
#' between \code{xs$x0} points.
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#__________________________________________________________________
randwtgen <- function(xs) {
x <- xs$x[-1]
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
prob <- xs$indiv
delta0 = min(diff(x))/2
randwt <- function(m, n, delta = delta0) {
r <- sample(x, n, prob = prob[, m], replace = TRUE)
if (delta > 0)
r <- r + stats::runif(n, -delta, delta)
r
}
}
#________________________________________
#' Calculates the means of all components for a weighted sample.
#'
#' \code{meanw} calculates weighted means of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' means
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' meanw(xs)
#' @export
# _______________________________________
meanw <- function(xs) {
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
mx<-as.vector(xs$xo[-1] %*% as.matrix(xs$indiv))
names(mx)<-colnames(xs$indiv)
return(mx)
}
#________________________________________
#' Calculates the variances of all components for a weighted sample.
#'
#' \code{varw} calculates weighted variances of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' variances
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' varw(xs)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
varw <- function(xs){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
sx<-as.vector((xs$xo[-1])^2 %*% as.matrix(xs$indiv)) -
(meanw(xs))^2
names(sx)<-colnames(xs$indiv)
return(sx)
}
#________________________________________
#' Calculates the standard deviations of all components for a weighted sample.
#'
#' \code{sdw} calculates weighted standard deviations of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @param corr function used for correction of weights before sd caclucation.
#' (no correction if code{corr=NULL}).
#'
#' @return a vector of components' standard deviations
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' sdw(xs)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
sdw <- function(xs,corr=cormid){
if(is.null(corr))
{if (is.null(xs$indiv)) xs <- cumm2indiv(xs)}
else
{xs <- cumm2indiv(corr(xs))}
sx<-sqrt(as.vector((xs$xo[-1])^2 %*% as.matrix(xs$indiv)) -
(meanw(xs))^2)
names(sx)<-colnames(xs$indiv)
return(sx)
}
#________________________________________
#' Calculates the quantiles of all components for a weighted sample.
#'
#' \code{quantilew} calculates weighted quantiles of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @param prob the level of the quantiles (one number).
#'
#' @return a vector of components' quantiles.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' quantilew(xs,1/3)
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
# _______________________________________
quantilew <- function(xs,prob){
if (is.null(xs$cumm))
xs <- indiv2cumm(xs)
n <- nrow(xs$cumm)
M <- ncol(xs$cumm)
q <- numeric(M)
for( m in 1:M ){
j <- 1
while( xs$cumm[j,m]<prob ) j <- j+1
q_left <- xs$xo[j]
j <- n
while( xs$cumm[j,m]>prob ) j <- j-1
q_right <- xs$xo[j]
q[m] <- ( q_left + q_right )/2
}
names(q)<-colnames(xs$cumm)
return(q)
}
#________________________________________
#' Calculates the medians of all components for a weighted sample.
#'
#' \code{medianw} calculates weighted medians of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' medians
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' medianw(xs)
#' @export
# _______________________________________
medianw <- function(xs)quantilew(xs,0.5)
#________________________________________
#' Calculates the interquartile ranges of all components for a weighted sample.
#'
#' \code{IQRw} calculates weighted interquartile ranges of a sample
#' using the individual weights of all components.
#'
#' @param xs object from class \code{wtsamp} containing the sample
#' and weights for components' distributions.
#'
#' @return a vector of components' interquartile ranges
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' IQRw(xs)
#' @export
# _______________________________________
IQRw <- function(xs){
quantilew(xs,0.75) - quantilew(xs,0.25)
}
#_________________________________________
#' Epanechnikov kernel calculation.
#'
#' \code{Epanechn} calculates the value of Epanechnikov kernel at a given point.
#'
#' @param x a vector of points at which the kernel value is calculated.
#'
#' @return vector of the kernel values at given points.
#'
#' @examples
#' curve(Epanechn,-2,2)
#' @export
#_________________________________________
Epanechn<-function(x)ifelse(abs(x)<1,0.75*(1-x^2),0)
#_________________________________________
#' Generator of kernel density estimator for mixture components.
#'
#' \code{densgen} generates a function which calculates
#' a kernel density estimate for a prescribed mixture component
#' at a given set of points.
#'
#' @param xs a \code{wtsamp} object representing
#' a sample with distributions of different components.
#'
#' @param m a number of component whose density is estimated.
#'
#' @param Kern a function which calculates the kernel values
#' at given points (must be vectorized).
#'
#' @return a function
#' \code{f(x,h)} which calculates the estimate at points given in the
#' vector \code{x} with the bandwidth \code{h}.
#'
#' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' f<-densgen(xs,1) # create the estimator
#' f(c(0,1),1) # calculate the estimate at two points
#' curve(f(x,1),-3,3) # plot the graph (estimates N(0,1) density)
#'
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_________________________________________
densgen<-function(xs,m,Kern=Epanechn){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
a <- xs$indiv[, m]
x <- xs$xo[-1]
f <- Vectorize(
function(t,h) {
sum(a*Kern((t-x)/h))/h
},
vectorize.args ="t"
)
}
#_________________________________________
#' Silverman's rule of thumb for bandwidth selection
#'
#' \code{silvbw} calculates quasi-optimal bandwidth
#' for kernel density estimate by weighted sample
#' based on the mixture with varying concentrations approach
#'
#' @param xs a \code{wtsamp} object representing
#' a sample with distributions of different components.
#'
#' @param m a number of component for which the density is
#' estimated.
#'
#' @param delta the canonical bandwidth of the kernel
#' used in the estimate.
#'@details The default value of \code{delta}=1.7188 corresponds to
#' the Epanechnikov kernel.
#'
#' @return a numeric value of the Silverman's optimal bandwidth.
#'
#' @examples
#' #' @examples
#' set.seed(3)
#' p <- genunifp(1000,2) # create mixing probabilities
#' a <- lsweight(p) # calculate minimax weights
#' # create a weighted sample:
#' xs <- wtsamp(genormixt(p,c(0,1),c(1,1)),indiv=a)
#' f<-densgen(xs,1) # create the estimator
#' h<-silvbw(xs,1) # calculates the bandwidth by the Silverman's rule
#' curve(f(x,h),-3,3) # plot the graph (estimates N(0,1) density)
#'
#' @seealso
#' Maiboroda R., Sugakova O. "Statistics of mixtures with varying concentrations
#' with application to DNA microarray data analysis".
#' Nonparametric statistics (2012) v.24:1, p. 201 - 215.
#' @export
#_________________________________________
silvbw<-function(xs,m,delta=1.7188){
if (is.null(xs$indiv))
xs <- cumm2indiv(xs)
const<-1.011354 # (8*sqrt(pi)/3)^(1/5)/(stats::qnorm(0.75)-stats::qnorm(0.25))
const*delta*(sum((xs$indiv[,m])^2))^(1/5)*min(IQRw(xs)[m],sdw(xs)[m])
}
#________________________________________
# INFERENCE
#________________________________________
#
#' Estimates for standard deviations and CI
#' for weighted means by observations from the mixture.
#'
#' \code{sdMean} calculates estimates of standard deviatitions
#' and confidence intervals
#' for weighted means with minimax weights by observations
#' from the mixture with varying concentrations.
#'
#' @param x numeric vector with the observed sample or a
#' \code{wtsamp} object.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param comp a numeric vector with numbers of components
#' for which the standard deviations are estimated.
#'
#' @param means logical, if \code{TRUE} then the estimates for
#' components' means are included in the function value.
#'
#' @param CI logical, if \code{TRUE} then confidence bounds for
#' components' means are inculded in the function value.
#'
#' @param alpha confidense level for the confidence interval.
#'
#' @details If \code{CI=TRUE} then the function calculates
#' confidence intervals for the components' means
#' with covering probability \code{1-alpha}.
#'
#' If \code{x} is a vector then the weights for components' means and variances
#' are calculated as \code{lsweight(p)}. If \code{x} is a \code{wtsamp}
#' object than its own weights are used.
#'
#' @return if \code{CI & means =FALSE} the function returns a vector
#' of the estimated standard deviations
#' with NA for the components which were not estimated.
#' Else a data frame is returned in which there can be variables:
#' \code{sd} are standard deviations of estimates;
#' \code{means} are the estimates of means;
#' \code{lower} and \code{upper} are lower and upper bounds
#' of the confidence intervals for means.
#'
#'
#' @examples
#' set.seed(3)
#' M<-3 # number of mixture components
#' p <- genunifp(1000,M) # create mixing probabilities
#' m<-c(0,1,2) # true means of components
#' sd<-c(1,1,0.5) # true sd of components
#' x<-genormixt(p,m,sd) # sample generation
#' # Calculate sd only:
#' sdMean(x,p)
#' # the same:
#' sdMean(wtsamp(x,indiv=lsweight(p)),p)
#' # Calculate confidence intervals:
#' sdMean(x,p,means=TRUE,CI=TRUE)
#' # Plot confidence intervals:
#' CI<-sdMean(x,p,means=TRUE,CI=TRUE)
#' library(plotrix)
#' plotCI(1:M,CI$means,ui=CI$upper,li=CI$lower,
#' xlab=" ",ylab="means",xaxt="n")
#' axis(1,at=1:M,labels=row.names(CI))
#' @seealso
#' Maiboroda R. and Kubaichuk O.
#' Asymptotic normality of improved weighted empirical distribution functions.
#' Theor. Probability and Math. Statist. 69 (2004), 95-102
#' @export
# _______________________________________
sdMean<-function(x,p,comp=1:ncol(p),
means=FALSE,CI=FALSE,alpha=0.05){
M<-ncol(p)
D<-rep(NA,M)
if(is.vector(x)&is.numeric(x))
sx<-wtsamp(x,indiv=lsweight(p))
else{
if(class(x)=="wtsamp")
sx<-x
else{
warning("x must be vector or wtsamp")
return(NA)
}
}
m<-meanw(sx)
m2<-varw(sx)+m^2
for(k in comp){
app<-matrix(ncol=M,nrow=M)
for(i in 1:M){
for(l in 1:i){
app[i,l]<-sum(sx$indiv[,k]^2*p[,i]*p[,l])
app[l,i]<-app[i,l]
}
}
ap<-apply(app,1,sum)
D[k]=sum(ap*m2)-m%*%app%*%m
if(D[k]<0){
warning("Negative estimate of variance is obtained",D[k])
D[k]=NA
}
}
D=sqrt(D)
if(!(means|CI)){
names(D)<-colnames(p)
return(D)
}else{
R<-data.frame(sd=D)
if(means)R$means<-m
if(CI){
lambda=stats::qnorm(1-alpha/2)
R$lower<-m-lambda*D
R$upper<-m+lambda*D
}
row.names(R)<-colnames(p)
return(R)
}
}
#______________________________________________________
#' Estimates for standard deviations and CI
#' for weighted medians by observations from the mixture.
#'
#' \code{sdMedian} calculates estimates of standard deviatitions
#' and confidence intervals
#' for weighted medians with minimax weights by observations
#' from the mixture with varying concentrations.
#'
#' @param x numeric vector with the observed sample or a
#' \code{wtsamp} object.
#'
#' @param p matrix (or data frame) of mixing probabilities
#' with rows corresponding to subjects
#' and columns coresponding to the mixture components.
#' @param comp a numeric vector with numbers of components
#' for which the standard deviations are estimated.
#'
#' @param medians logical, if \code{TRUE} then the estimates for
#' components' medians are included in the function value.
#'
#' @param CI logical, if \code{TRUE} then confidence bounds for
#' components' means are inculded in the function value.
#'
#' If \code{x} is a vector then the weights for components' medians and variances
#' are calculated as \code{lsweight(p)}. If \code{x} is a \code{wtsamp}
#' object than its own weights are used.
#'
#' @param alpha confidense level for the confidence interval.
#'
#' @details if \code{CI=TRUE} then the function calculates
#' confidence intervals for the components' medians
#' with covering probability \code{1-alpha}.
#'
#' @return if \code{CI & medians =FALSE} the function returns a vector
#' of the estimated standard deviations
#' with NA for the components which were not estimated.
#' Else a data frame is returned in which there can be variables:
#' \code{sd} are standard deviations of estimates;
#' \code{medians} are the estimates of medians;
#' \code{lower} and b n\code{upper} are lower and upper bounds
#' of the confidence intervals for medians.
#'c
#'
#' @examples
#' set.seed(3)
#' M<-3 # number of mixture components
#' p <- genunifp(1000,M) # create mixing probabilities
#' m<-c(0,1,2) # true means of components
#' sd<-c(1,1,0.5) # true sd of components
#' x<-genormixt(p,m,sd) # sample generation
#' # Calculate sd only:
#' sdMedian(x,p)
#' # the same result:
#' sdMedian(wtsamp(x,indiv=lsweight(p)),p)
#' # Calculate confidence intervals:
#' sdMedian(x,p,medians=TRUE,CI=TRUE)
#' # Plot confidence intervals:
#' CI<-sdMedian(x,p,medians=TRUE,CI=TRUE)
#' library(plotrix)
#' plotCI(1:M,CI$medians,ui=CI$upper,li=CI$lower,
#' xlab=" ",ylab="medians",xaxt="n")
#' axis(1,at=1:M,labels=row.names(CI))
#' @export
#________________________________________
sdMedian<-function(x,p,comp=1:ncol(p),
medians=FALSE,CI=FALSE,alpha=0.05){
M<-ncol(p)
D<-rep(NA,M)
if(is.vector(x)&is.numeric(x)){
sx<-wtsamp(x,indiv=lsweight(p))
sx <- indiv2cumm(sx)
}
else{
if(class(x)=="wtsamp")
sx<-x
if(is.null(sx$cumm)) sx <- indiv2cumm(sx)
else{
warning("x must be vector or wtsamp")
return(NA)
}
}
med<-medianw(sx)
for(k in comp){
Fm<-sx$cumm[findInterval(med[k], sx$xo),]
app<-matrix(ncol=M,nrow=M)
for(i in 1:M){
for(l in 1:i){
app[i,l]<-sum(sx$indiv[,k]^2*p[,i]*p[,l])
app[l,i]<-app[i,l]
}
}
ap<-apply(app,1,sum)
f<-densgen(sx,k)
zz<-sum(ap*Fm)-Fm%*%app%*%Fm
if(zz<0){
warning("Negative estimate of variance is obtained",zz)
zz=NA
}
else
D[k]<-sqrt(zz)/f(med[k],silvbw(sx,k))
}
if(!(medians|CI)){
names(D)<-colnames(p)
return(D)
}else{
R<-data.frame(sd=D)
if(medians)R$medians<-med
if(CI){
lambda=stats::qnorm(1-alpha/2)
R$lower<-med-lambda*D
R$upper<-med+lambda*D
}
row.names(R)<-colnames(p)
return(R)
}
}
# _______________________________________
# CENSORED DATA
# _______________________________________
#' Censored sample from
#' mixture of gamma distributions with varying concentrations.
#'
#' \code{gencensg} generates a censored sample from the mixture
#' of gamma distributions
#' with mixing probabilities of components given by
#' the matrix p.
#'
#' @param p matrix (or data frame) of mixing probabilities.
#' with rows corresponding to subjects.
#' and columns coresponding to the mixture components.
#' @param shape vector of shape parameters for gamma distributions of
#' non-censored components.
#' @param rate vector of rate parameters for gamma distributions of
#' non-censored components.
#' @param shapec vector of shape parameters for gamma distributions of
#' components' censors.
#' @param ratec vector of rate parameters for gamma distributions of
#' components' censors.
#' @return a data frame with the censored sample. The attribute
#' \code{$x} contains the sample values, the attribute \code{$delta}
#' contains the indicators of non-censoring.
#'
#' The sample size equals the rows number of p.
#' @examples
#' set.seed(3)
#' cs <- gencensg(genunifp(50,2),shape=c(2,1),rate=c(0.5,0.5),
#' shapec=c(1,1),ratec=c(0.3,0.3))
#' plot(cs$x,col=cs$delta+1)
#' @seealso
#' Maiboroda R. Khizanov V.
#' A modified Kaplan–Meier estimator for a model of mixtures with varying concentrations
#' Theor. Probability and Math. Statist. 92 (2016), 109-116
#' @export
# __________________________________________________
gencensg <- function(p, shape, rate, shapec, ratec) {
nc <- 1:ncol(p)
obj <- apply(p, 1, function(x) sample(nc, 1, prob = x))
x <- stats::rgamma(nrow(p), shape = shape[obj], rate = rate[obj])
c <- stats::rgamma(nrow(p), shape = shapec[obj], rate = ratec[obj])
delta <- x < c
x <- pmin(x, c)
cs <- data.frame(x, delta)
}
# ______________________________________________________
#' Kaplan-Meyer's estimator for CDFs of mixture components
#'
#' \code{KMcdf} calculates cummulative weigts for the Kaplan-Meyer
#' estimator of CDFs of mixture components by a censored sample
#' of class \code{wtcens} and returns an object of class
#' \code{wtsamp} with the sample values and the calculated weights.
#'
#' @param cs object of class \code{wtcens} censored sample with
#' weights representing the distributions
#' of the censored components.
#'
#' @return object of class \code{wtsamp} containing the sample values
#' and the cummulative weights for the distributions of the components.
#'
#' @examples
#' set.seed(3)
#' n<-5000
#' p<-genunifp(n,2)
#' xs<-gencensg(p,shape=c(1.5,4),rate=c(1,1),shapec=c(1,1),ratec=c(0.3,0.3))
#' cs<-wtcens(x=xs$x,delta=xs$delta,indiv=lsweight(p))
#' KMest<-KMcdf(cs)
#' FKM<-edfgen(KMest,2)
#' curve(FKM,0,8)
#' curve(pgamma(x,shape=4,rate=1),col="red",add=TRUE)
#' @seealso
#' Maiboroda R. Khizanov V.
#' A modified Kaplan–Meier estimator for a model of mixtures with varying concentrations
#' Theor. Probability and Math. Statist. 92 (2016), 109-116
#' @export
# ______________________________________________________
KMcdf <- function(cs) {
if (is.null(cs$cumm))
cs <- indiv2cumm(cs)
if (is.null(cs$indiv))
cs <- cumm2indiv(cs)
wt <- 1 - apply(1 - cs$indiv * cs$deltao/(1 - utils::head(cs$cumm, n = -1)), 2, cumprod)
wt[is.nan(wt)] <- 1
wtsamp(cs$xo[-1], cumm = wt, indiv = NULL)
}
# ___________ The end.
|
#' Ensure Space After Comma
#'
#' Adds a space after a comma as strip and many other functions may consider a
#' comma separated string as one word (i.e., \code{"one,two,three"} becomes
#' \code{"onetwothree"} rather than \code{"one two three"}).
#'
#' @param text.var The text variable.
#' @return Returns a vector of strings with commas that have a space after them.
#' @keywords comma space
#' @export
#' @examples
#' \dontrun{
#' x <- c("the, dog,went", "I,like,it", "where are you", NA, "why", ",", ",f")
#' comma_spacer(x)
#' }
comma_spacer <- function(text.var) {
gsub("(,)([^ ])", "\\1 \\2", text.var)
}
|
/R/comma_spacer.R
|
no_license
|
cran/qdap
|
R
| false | false | 623 |
r
|
#' Ensure Space After Comma
#'
#' Adds a space after a comma as strip and many other functions may consider a
#' comma separated string as one word (i.e., \code{"one,two,three"} becomes
#' \code{"onetwothree"} rather than \code{"one two three"}).
#'
#' @param text.var The text variable.
#' @return Returns a vector of strings with commas that have a space after them.
#' @keywords comma space
#' @export
#' @examples
#' \dontrun{
#' x <- c("the, dog,went", "I,like,it", "where are you", NA, "why", ",", ",f")
#' comma_spacer(x)
#' }
comma_spacer <- function(text.var) {
gsub("(,)([^ ])", "\\1 \\2", text.var)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IPDFilecheck.R
\name{get_contents_cols}
\alias{get_contents_cols}
\title{Function to return the unique contents of the column given the column name}
\usage{
get_contents_cols(data, colname)
}
\arguments{
\item{data}{a data frame}
\item{colname}{name of column corresponding to year of birth}
}
\value{
the contents of the column, if success error if failure
}
\description{
Function to return the unique contents of the column given the column name
}
\examples{
get_contents_cols(data.frame(
"yob" = c(1951, 1980),
"Name" = c("John", "Dora")
), "yob")
}
|
/man/get_contents_cols.Rd
|
no_license
|
sheejamk/IPDFileCheck
|
R
| false | true | 639 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IPDFilecheck.R
\name{get_contents_cols}
\alias{get_contents_cols}
\title{Function to return the unique contents of the column given the column name}
\usage{
get_contents_cols(data, colname)
}
\arguments{
\item{data}{a data frame}
\item{colname}{name of column corresponding to year of birth}
}
\value{
the contents of the column, if success error if failure
}
\description{
Function to return the unique contents of the column given the column name
}
\examples{
get_contents_cols(data.frame(
"yob" = c(1951, 1980),
"Name" = c("John", "Dora")
), "yob")
}
|
library(readr)
library(ggplot2)
library(ggrepel)
# S&P data from https://github.com/CNuge/kaggle-code/tree/master/stock_data
all_stocks_5yr <- read_csv("all_stocks_5yr.csv")
# remover duplicate:
all_stocks_5yr <- filter(all_stocks_5yr, Name != "GOOGL")
# convert dates to standard format:
all_stocks_5yr$date <- as.Date(all_stocks_5yr$date, "%d.%m.%y")
# add column for weekly change:
all_stocks_5yr <- group_by(all_stocks_5yr, Name) %>%
mutate(diff_close = close - lag(close, 5),
diff_log_close = log(close) - log(lag(close, 5))) %>%
ungroup()
# assign sets according to initial closing price:
all_stocks_5yr <- group_by(all_stocks_5yr, Name) %>%
mutate(close_set = round(log10(first(close)) / 0.25)) %>%
ungroup()
# filter so each set has no more than 3 elements:
sum_df <- group_by(all_stocks_5yr, Name, close_set) %>%
summarise %>%
group_by(close_set) %>%
slice(1:3)
censored_df <- filter(all_stocks_5yr, Name %in% sum_df$Name)
# assign labels for plotting:
censored_df <- mutate(censored_df, label = if_else(date == max(date) & close_set > 9, Name, NA_character_))
# small_df <- group_by(censored_df, Name) %>%
# filter(row_number() %% 5 == 1) %>%
# ungroup()
##########
# symmetrical random walk with prediction:
p1 <- 0.5
w <- 100
len <- 1e6
short_len <- 1e3
x1 <- 1e-2
y1 <- -1e-2
W1 <- sample(c(y1, x1), len, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1) + w
Time <- seq(1, len, length = short_len)
S1 <- S1[Time]
sdf <- data.frame(price = S1, Time = Time)
# pdf("StockPriceRandomWalk.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1), las = 1)
plot(price ~ Time, data = sdf, type = "l", xlim = c(0, 1.1*len), ylim = c(80, 120),
xlab = "time", ylab = "stock price", col = "red")
t <- seq(len, 1.1*len, length = 0.1*short_len + 1)
w <- S1[length(S1)]
m <- sapply(t - len, exp_rw, p = p1, x = x1, y = y1) + w
v <- 2 * sqrt(sapply(t - len, var_rw, p = p1, x = x1, y = y1))
tdf <- data.frame(m = m, v = v, t = t)
lines(m ~ t, data = tdf, lty = 2)
lines(m + v ~ t, data = tdf, col = "grey")
lines(m - v ~ t, data = tdf, col = "grey")
# dev.off()
##########
# short-term Amazon stock price:
# pdf("StockPriceAmazonShortTerm.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, Name == "AMZN", date < "2015-01-01"), aes(date, close, group = Name)) +
geom_line(col = "red") +
ylab("closing price") +
scale_y_continuous(limits = c(100, 550)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# long-term Amazon stock price:
# pdf("StockPriceAmazonLongTerm.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, Name == "AMZN"), aes(date, close, group = Name)) +
geom_line(col = "red") +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# many stock prices (linear scale):
# pdf("StockPricesLinear.pdf", width = 4*0.85, height = 3*0.85)
ggplot(censored_df, aes(date, close, group = Name)) +
geom_line(col = "red") +
geom_label_repel(aes(label = label),
size = 2,
box.padding = 0.1,
label.padding = 0.1,
nudge_x = 500,
na.rm = TRUE) +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# many stock prices (log scale):
# pdf("StockPricesLog.pdf", width = 4*0.85, height = 3*0.85)
ggplot(censored_df, aes(date, close, group = Name)) +
geom_line(col = "red") +
scale_y_log10() +
geom_label_repel(aes(label = label),
size = 2,
box.padding = 0,
label.padding = 0.1,
nudge_x = 500,
na.rm = TRUE) +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# absolute weekly change in closing price versus closing price (log-log scale):
# pdf("StockPriceChanges.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, close > 10), aes(close, abs(diff_close))) +
geom_point(alpha = 0.01) +
geom_smooth() +
xlab("closing price") +
ylab("absolute change in closing price") +
scale_y_log10() +
scale_x_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# absolute weekly change in closing price versus closing price (log-log scale):
# pdf("StockPriceChangesLog.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, close > 10), aes(close, abs(diff_log_close))) +
geom_point(alpha = 0.01) +
geom_smooth() +
xlab("closing price") +
ylab("abs. change in log closing price") +
scale_y_log10() +
scale_x_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
##########
# geometric random walks:
p1 <- 0.5
w <- 100
u <- 1.001
d <- 2 - 1.001
len <- 1e6
short_len <- 1e3
Time <- seq(1, len, length = short_len)
sdf <- data.frame()
for(i in 1:5) {
W1 <- sample(c(0, 1), len, replace = TRUE, prob = c(1 - p1, p1))
W1 <- cumsum(W1)
S1 <- w * u^W1 * d^(1:len - W1)
S1_alt <- log(w) + log(u) * W1 + log(d) * (1:len - W1)
S1 <- S1[Time]
S1_alt <- S1_alt[Time]
W1 <- W1[Time]
sdf <- rbind(sdf, data.frame(Time = Time,
Wn = W1,
price = S1,
price_alt = S1_alt,
index = as.character(i)))
}
# pdf("StockPriceGeoRandomWalkLinearScale.pdf", width = 4*0.85, height = 3*0.85)
ggplot(sdf, aes(Time, price, group = index, col = index)) +
geom_line() +
xlab("time") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none") +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# pdf("StockPriceGeoRandomWalkLogScale.pdf", width = 4*0.85, height = 3*0.85)
ggplot(sdf, aes(Time, price, group = index, col = index)) +
geom_line() +
xlab("time") +
scale_y_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none") +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# long-term Amazon stock price with forecast:
start_date <- min(filter(censored_df, Name == "AMZN")$date)
end_date <- max(filter(censored_df, Name == "AMZN")$date)
start_price <- filter(censored_df, Name == "AMZN", date == start_date)$close
end_price <- filter(censored_df, Name == "AMZN", date == end_date)$close
rate <- (log(end_price) - log(start_price)) / as.numeric((end_date - start_date))
ft <- seq(start_date, end_date + 100, length = 1e3)
fdf <- data.frame(time = ft, forecast = start_price * exp(rate * (as.numeric(ft - start_date))))
|
/Week3_StockPrices.R
|
no_license
|
robjohnnoble/MathProcFin_R_code
|
R
| false | false | 7,349 |
r
|
library(readr)
library(ggplot2)
library(ggrepel)
# S&P data from https://github.com/CNuge/kaggle-code/tree/master/stock_data
all_stocks_5yr <- read_csv("all_stocks_5yr.csv")
# remover duplicate:
all_stocks_5yr <- filter(all_stocks_5yr, Name != "GOOGL")
# convert dates to standard format:
all_stocks_5yr$date <- as.Date(all_stocks_5yr$date, "%d.%m.%y")
# add column for weekly change:
all_stocks_5yr <- group_by(all_stocks_5yr, Name) %>%
mutate(diff_close = close - lag(close, 5),
diff_log_close = log(close) - log(lag(close, 5))) %>%
ungroup()
# assign sets according to initial closing price:
all_stocks_5yr <- group_by(all_stocks_5yr, Name) %>%
mutate(close_set = round(log10(first(close)) / 0.25)) %>%
ungroup()
# filter so each set has no more than 3 elements:
sum_df <- group_by(all_stocks_5yr, Name, close_set) %>%
summarise %>%
group_by(close_set) %>%
slice(1:3)
censored_df <- filter(all_stocks_5yr, Name %in% sum_df$Name)
# assign labels for plotting:
censored_df <- mutate(censored_df, label = if_else(date == max(date) & close_set > 9, Name, NA_character_))
# small_df <- group_by(censored_df, Name) %>%
# filter(row_number() %% 5 == 1) %>%
# ungroup()
##########
# symmetrical random walk with prediction:
p1 <- 0.5
w <- 100
len <- 1e6
short_len <- 1e3
x1 <- 1e-2
y1 <- -1e-2
W1 <- sample(c(y1, x1), len, replace = TRUE, prob = c(1 - p1, p1))
S1 <- cumsum(W1) + w
Time <- seq(1, len, length = short_len)
S1 <- S1[Time]
sdf <- data.frame(price = S1, Time = Time)
# pdf("StockPriceRandomWalk.pdf", width = 4, height = 3)
par(mar = c(4,4,1,1), las = 1)
plot(price ~ Time, data = sdf, type = "l", xlim = c(0, 1.1*len), ylim = c(80, 120),
xlab = "time", ylab = "stock price", col = "red")
t <- seq(len, 1.1*len, length = 0.1*short_len + 1)
w <- S1[length(S1)]
m <- sapply(t - len, exp_rw, p = p1, x = x1, y = y1) + w
v <- 2 * sqrt(sapply(t - len, var_rw, p = p1, x = x1, y = y1))
tdf <- data.frame(m = m, v = v, t = t)
lines(m ~ t, data = tdf, lty = 2)
lines(m + v ~ t, data = tdf, col = "grey")
lines(m - v ~ t, data = tdf, col = "grey")
# dev.off()
##########
# short-term Amazon stock price:
# pdf("StockPriceAmazonShortTerm.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, Name == "AMZN", date < "2015-01-01"), aes(date, close, group = Name)) +
geom_line(col = "red") +
ylab("closing price") +
scale_y_continuous(limits = c(100, 550)) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# long-term Amazon stock price:
# pdf("StockPriceAmazonLongTerm.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, Name == "AMZN"), aes(date, close, group = Name)) +
geom_line(col = "red") +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# many stock prices (linear scale):
# pdf("StockPricesLinear.pdf", width = 4*0.85, height = 3*0.85)
ggplot(censored_df, aes(date, close, group = Name)) +
geom_line(col = "red") +
geom_label_repel(aes(label = label),
size = 2,
box.padding = 0.1,
label.padding = 0.1,
nudge_x = 500,
na.rm = TRUE) +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# many stock prices (log scale):
# pdf("StockPricesLog.pdf", width = 4*0.85, height = 3*0.85)
ggplot(censored_df, aes(date, close, group = Name)) +
geom_line(col = "red") +
scale_y_log10() +
geom_label_repel(aes(label = label),
size = 2,
box.padding = 0,
label.padding = 0.1,
nudge_x = 500,
na.rm = TRUE) +
ylab("closing price") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# absolute weekly change in closing price versus closing price (log-log scale):
# pdf("StockPriceChanges.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, close > 10), aes(close, abs(diff_close))) +
geom_point(alpha = 0.01) +
geom_smooth() +
xlab("closing price") +
ylab("absolute change in closing price") +
scale_y_log10() +
scale_x_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# absolute weekly change in closing price versus closing price (log-log scale):
# pdf("StockPriceChangesLog.pdf", width = 4*0.85, height = 3*0.85)
ggplot(filter(censored_df, close > 10), aes(close, abs(diff_log_close))) +
geom_point(alpha = 0.01) +
geom_smooth() +
xlab("closing price") +
ylab("abs. change in log closing price") +
scale_y_log10() +
scale_x_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
##########
# geometric random walks:
p1 <- 0.5
w <- 100
u <- 1.001
d <- 2 - 1.001
len <- 1e6
short_len <- 1e3
Time <- seq(1, len, length = short_len)
sdf <- data.frame()
for(i in 1:5) {
W1 <- sample(c(0, 1), len, replace = TRUE, prob = c(1 - p1, p1))
W1 <- cumsum(W1)
S1 <- w * u^W1 * d^(1:len - W1)
S1_alt <- log(w) + log(u) * W1 + log(d) * (1:len - W1)
S1 <- S1[Time]
S1_alt <- S1_alt[Time]
W1 <- W1[Time]
sdf <- rbind(sdf, data.frame(Time = Time,
Wn = W1,
price = S1,
price_alt = S1_alt,
index = as.character(i)))
}
# pdf("StockPriceGeoRandomWalkLinearScale.pdf", width = 4*0.85, height = 3*0.85)
ggplot(sdf, aes(Time, price, group = index, col = index)) +
geom_line() +
xlab("time") +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none") +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# pdf("StockPriceGeoRandomWalkLogScale.pdf", width = 4*0.85, height = 3*0.85)
ggplot(sdf, aes(Time, price, group = index, col = index)) +
geom_line() +
xlab("time") +
scale_y_log10() +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "none") +
theme(plot.margin = unit(c(0.5,0.5,0.2,0.2), "cm"))
# dev.off()
# long-term Amazon stock price with forecast:
start_date <- min(filter(censored_df, Name == "AMZN")$date)
end_date <- max(filter(censored_df, Name == "AMZN")$date)
start_price <- filter(censored_df, Name == "AMZN", date == start_date)$close
end_price <- filter(censored_df, Name == "AMZN", date == end_date)$close
rate <- (log(end_price) - log(start_price)) / as.numeric((end_date - start_date))
ft <- seq(start_date, end_date + 100, length = 1e3)
fdf <- data.frame(time = ft, forecast = start_price * exp(rate * (as.numeric(ft - start_date))))
|
#' @title Read the reference gene file
#' @name read_master_gene_file
#'
#' @return a data.frame
#'
#' @export
#'
#' @examples
read_master_gene_file <- function() {
master_gene_file <- system.file("extdata", "master_gene_file.tsv", package = "EcoliGenes")
master_gene_table <- read.delim(master_gene_file, comment.char = "#", header = T, stringsAsFactors = F)
master_gene_table
}
#' #' @title Add info into the master table. Deprecated.
#' #' @name complete_master_gene_file
#' #'
#' #' @import dplyr
#' #' @export
#' #'
#' #' @examples
#' complete_master_gene_file <- function() {
#' master_gene_table <- read_master_gene_file()
#'
#' master_bis <- master_gene_table %>%
#' arrange(Consensus_start) %>%
#' dplyr::mutate(left_bnumber = lag(Consensus_bnumber, default = NA),
#' right_bnumber = lead(Consensus_bnumber, default = NA))
#' write_master_gene_file(master_bis)
#' }
#'
#' #' @title Overwrite the reference gene file. Meant to be used with `complete_master_gene_file()`
#' #' @name write_master_gene_file
#' #'
#' #' @return a data.frame
#' #'
#' #' @export
#' #'
#' #' @examples
#' write_master_gene_file <- function(data) {
#' write.table(data, file = "inst/extdata/master_gene_file_2021-06-08.tsv", col.names = T, row.names = F, quote = F, sep = "\t")
#' }
|
/R/master_gene_file.R
|
no_license
|
PGC-CCG/EcoliGenes
|
R
| false | false | 1,289 |
r
|
#' @title Read the reference gene file
#' @name read_master_gene_file
#'
#' @return a data.frame
#'
#' @export
#'
#' @examples
read_master_gene_file <- function() {
master_gene_file <- system.file("extdata", "master_gene_file.tsv", package = "EcoliGenes")
master_gene_table <- read.delim(master_gene_file, comment.char = "#", header = T, stringsAsFactors = F)
master_gene_table
}
#' #' @title Add info into the master table. Deprecated.
#' #' @name complete_master_gene_file
#' #'
#' #' @import dplyr
#' #' @export
#' #'
#' #' @examples
#' complete_master_gene_file <- function() {
#' master_gene_table <- read_master_gene_file()
#'
#' master_bis <- master_gene_table %>%
#' arrange(Consensus_start) %>%
#' dplyr::mutate(left_bnumber = lag(Consensus_bnumber, default = NA),
#' right_bnumber = lead(Consensus_bnumber, default = NA))
#' write_master_gene_file(master_bis)
#' }
#'
#' #' @title Overwrite the reference gene file. Meant to be used with `complete_master_gene_file()`
#' #' @name write_master_gene_file
#' #'
#' #' @return a data.frame
#' #'
#' #' @export
#' #'
#' #' @examples
#' write_master_gene_file <- function(data) {
#' write.table(data, file = "inst/extdata/master_gene_file_2021-06-08.tsv", col.names = T, row.names = F, quote = F, sep = "\t")
#' }
|
#drastically simplified, March 14, 2009 from two loops to 1 matrix operation
#modified July 2, 2013 to allow not counting the diagonal
"count.pairwise" <-
function (x, y=NULL,diagonal=TRUE)
{
if(is.null(y)) {n <- t(!is.na(x)) %*% (!is.na(x)) } else { n <- t(!is.na(x)) %*% (!is.na(y)) }
if(!diagonal) diag(n) <- NA
return(n) }
pairwiseDescribe <- function(x,diagonal=FALSE,...) {
cp <- count.pairwise(x,diagonal=diagonal)
cp <- as.vector(cp[lower.tri(cp)])
describe(cp,...)
}
|
/R/count.pairwise.R
|
no_license
|
frenchja/psych
|
R
| false | false | 504 |
r
|
#drastically simplified, March 14, 2009 from two loops to 1 matrix operation
#modified July 2, 2013 to allow not counting the diagonal
"count.pairwise" <-
function (x, y=NULL,diagonal=TRUE)
{
if(is.null(y)) {n <- t(!is.na(x)) %*% (!is.na(x)) } else { n <- t(!is.na(x)) %*% (!is.na(y)) }
if(!diagonal) diag(n) <- NA
return(n) }
pairwiseDescribe <- function(x,diagonal=FALSE,...) {
cp <- count.pairwise(x,diagonal=diagonal)
cp <- as.vector(cp[lower.tri(cp)])
describe(cp,...)
}
|
## Step 1. Data preperation
#(i) loading
#load IO data
IOT_b=read.csv(file="IOT_b.csv",header=T, as.is=T)
#load index mapping
sector_ind=read.csv(file="indcode_20161202.csv",header=T, as.is=T)
#(ii)preparing index
row_ind=sector_ind[,1:2]
col_ind=sector_ind[,3:4]
sec_group=sector_ind[,5:8]
va_ind=sector_ind[,9:10]
fd_ind=sector_ind[,12:13]
sec_BR=sector_ind[,c(5,6,15,16)]
#getting rid of NA
row_ind=row_ind[!is.na(row_ind[,1]),]
sec_group=sec_group[!is.na(sec_group[,1]),]
sec_BR=sec_BR[!is.na(sec_BR[,1]),]
va_ind=va_ind[!is.na(va_ind[,1]),]
fd_ind=fd_ind[!is.na(fd_ind[,1]),]
nsector=dim(sec_group)[1]
nBR=length(unique(sec_BR[,3]))
nva=dim(va_ind)[1]
nfd=dim(fd_ind)[1]
#(iii)preparing IO for merging with mapping
#set NA observations in IO as 0
IOT_b[is.na(IOT_b)]=0
#add row index to be used in merging with mapping data to IO
IOT_b$basecode_row=IOT_b$X
#get dimmension of IO
dim_IOT_b=dim(IOT_b)
## Step 2. Rowsum: merge and obtain rowsum using aggregate function
IOT_b_sec=merge(IOT_b,row_ind, by="basecode_row", all=T)
IOT_b_37=aggregate(IOT_b_sec[,4:(dim_IOT_b[2])],list(IOT_b_sec$sector_row),FUN=sum)
## Step 3. Column sum
#(i) Traspose rowsum
T_IOT_b_37=data.frame(t(IOT_b_37))
#(ii) add column names for transposed data
colnames(T_IOT_b_37)[1:nsector]=sec_group[(1:nsector),2]
colnames(T_IOT_b_37)[(nsector+1):(nsector+nva)]=va_ind[,2]
#(iii) drop Group indicator used in rowsum
T_IOT_b_37=T_IOT_b_37[-1,]
#(iv) add index to be used in column sum
T_IOT_b_37$basecode_col=col_ind[,2]
#(v) take column sum using aggregate function
T_IOT_37_col=aggregate(T_IOT_b_37[,1:(nsector+nva)],list(T_IOT_b_37$basecode_col),FUN=sum)
## Step 4. obtain IO table
#(i)obtain transpose of column sum
IOT_37=data.frame(t(T_IOT_37_col))
#(ii) add column names
colnames(IOT_37)[1:nsector]=sec_group[(1:37),2]
colnames(IOT_37)[(nsector+1):(nsector+nfd)]=fd_ind[,2]
#(iii) drop aggregatio indicator
IOT_37=IOT_37[-1,]
#Step 5. checking balance
# total input + Resout = Total Demand
check1=as.numeric(IOT_37["Tinput",(1:nsector)])+IOT_37$Resout[(1:nsector)]+IOT_37$Imp[(1:nsector)]-IOT_37$Dtotal[(1:nsector)]
check2=IOT_37$Qtotal[(1:nsector)]+IOT_37$Qself[(1:nsector)]+IOT_37$Resout[(1:nsector)]+IOT_37$Imp[(1:nsector)]-IOT_37$Dtotal[(1:nsector)]
check1
check2
write.csv(IOT_37, file="IO_model_1202.csv")
###### Group IO #####
#Step.1 Preparing data
#(i)set aside industry data IOT_37
IOT_37_Group=IOT_37
#(ii)preparing index to sort after merging
IOT_37_Group$index=(1:(dim(IOT_37)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_37_Group$sector=rownames(IOT_37)
#(iv) merge with sector-group mapping and sort to original order
IOT_37_Group=merge(IOT_37_Group,sec_group, by.x="sector", by.y="sector_name", all=T)
IOT_37_Group=IOT_37_Group[order(IOT_37_Group$index),]
#(v) give row names
rownames(IOT_37_Group)=IOT_37_Group$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_37_Group$Group_name[is.na(IOT_37_Group$Group_name)]=IOT_37_Group$sector[is.na(IOT_37_Group$Group_name)]
Gimax=max(IOT_37_Group$Group_ind,na.rm=T)
Giblank=length(IOT_37_Group$Group_ind[is.na(IOT_37_Group$Group_ind)])
IOT_37_Group$Group_ind[is.na(IOT_37_Group$Group_ind)]=((Gimax+1):(Gimax+Giblank))
#step 2. rowsum by aggregate function: take row sum
IOT_8=aggregate(IOT_37_Group[,2:(nsector+nfd+1)],list(IOT_37_Group$Group_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare group_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
Group_ind=data.frame(unique(cbind(IOT_37_Group$Group_ind,IOT_37_Group$Group_name)))
colnames(Group_ind)=c("Group_ind","Group_name")
##(i-2) add group_name and change rowname
IOT_8_row=merge(IOT_8, Group_ind, by.x="Group.1", by.y="Group_ind", all=T)
rownames(IOT_8_row)=IOT_8_row$Group_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_8_row=IOT_8_row[,-1*c(1, dim(IOT_8_row)[2])]
##(i-4)Transpose
T_IOT_8_col=data.frame(t(IOT_8_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_8_col$sector=rownames(T_IOT_8_col)
##(i-2) prepare index varible to sort after merging
T_IOT_8_col$index=(1:dim(T_IOT_8_col)[1])
##(i-3) merge with sector_group mapping
T_IOT_8_Group=merge(T_IOT_8_col,sec_group, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_8_Group=T_IOT_8_Group[order(T_IOT_8_Group$index),]
##(i-5) give rownames
rownames(T_IOT_8_Group)=T_IOT_8_Group$sector
#(ii) prepare variable for colum sum aggregation: prepare Group name and group index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_8_Group$Group_name[is.na(T_IOT_8_Group$Group_name)]=T_IOT_8_Group$sector[is.na(T_IOT_8_Group$Group_name)]
Gimax2=max(T_IOT_8_Group$Group_ind,na.rm=T)
Giblank2=length(T_IOT_8_Group$Group_ind[is.na(T_IOT_8_Group$Group_ind)])
T_IOT_8_Group$Group_ind[is.na(T_IOT_8_Group$Group_ind)]=((Gimax2+1):(Gimax2+Giblank2))
ngroup=8
#(iii) column sum by aggregate function
T_IOT_8=aggregate(T_IOT_8_Group[,2:(ngroup+nva+1)],list(T_IOT_8_Group$Group_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =group name + final demand elements
Group_ind_8=data.frame(unique(cbind(T_IOT_8_Group$Group_ind,T_IOT_8_Group$Group_name)))
colnames(Group_ind_8)=c("Group_ind","Group_name")
##(i-2) merge row name data
T_IOT_8=merge(T_IOT_8, Group_ind_8, by.x="Group.1", by.y="Group_ind", all=T)
##(i-3) change row name
rownames(T_IOT_8)=T_IOT_8$Group_name
#(ii) Take transpose again to obtain Group IO data
IOT_8=data.frame(t(T_IOT_8[,-1*c(1,dim(T_IOT_8)[2])]))
#checking balance
# total input + Resout = Total Demand
check3=as.numeric(IOT_8["Tinput",(1:ngroup)])+IOT_8$Resout[(1:ngroup)]+IOT_8$Imp[(1:ngroup)]-IOT_8$Dtotal[(1:ngroup)]
check4=IOT_8$Qtotal[(1:ngroup)]+IOT_8$Qself[(1:ngroup)]+IOT_8$Resout[(1:ngroup)]+IOT_8$Imp[(1:ngroup)]-IOT_8$Dtotal[(1:ngroup)]
check3
check4
write.csv(IOT_8, file="IO_group_1202.csv")
## Boehringer and Rutherford Toy model secter index
# Originally, BR has 6 sectors Elec, OIL, COAL, GAS, X(energy intensive),Y(non energy intensive)
# We separage agriculture to link with agriculutre bottom up model. That makes our sectors seven sectors
# ELEC, OIL, COAL,GASHEAT,EINT,NEINT,AGRI
# GAS and Heat are bundled as GASHEAT, ROIL and OIL are interageted into OIL
# On Nov.27th meeting. Agriculture sector definition is adjusted. Fishery and Forestry is separated from Agriculture and moved to NEINT.
#Step.1 Preparing data
#(i)set aside industry data IOT_37
IOT_37_BR=IOT_37
#(ii)preparing index to sort after merging
IOT_37_BR$index=(1:(dim(IOT_37)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_37_BR$sector=rownames(IOT_37_BR)
#(iv) merge with sector-BR index mapping and sort to original order
IOT_37_BR=merge(IOT_37_BR,sec_BR, by.x="sector", by.y="sector_name", all=T)
IOT_37_BR=IOT_37_BR[order(IOT_37_BR$index),]
#(v) give row names
rownames(IOT_37_BR)=IOT_37_BR$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_37_BR$BR_name[is.na(IOT_37_BR$BR_name)]=IOT_37_BR$sector[is.na(IOT_37_BR$BR_name)]
BRimax=max(IOT_37_BR$BR_ind,na.rm=T)
BRiblank=length(IOT_37_BR$BR_ind[is.na(IOT_37_BR$BR_ind)])
IOT_37_BR$BR_ind[is.na(IOT_37_BR$BR_ind)]=((BRimax+1):(BRimax+BRiblank))
#step 2. rowsum by aggregate function: take row sum
IOT_BR_row=aggregate(IOT_37_BR[,2:(nsector+nfd+1)],list(IOT_37_BR$BR_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare BR_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
BR_ind=data.frame(unique(cbind(IOT_37_BR$BR_ind,IOT_37_BR$BR_name)))
colnames(BR_ind)=c("BR_ind","BR_name")
##(i-2) add BR_name and change rowname
IOT_BR_row=merge(IOT_BR_row, BR_ind, by.x="Group.1", by.y="BR_ind", all=T)
rownames(IOT_BR_row)=IOT_BR_row$BR_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_BR_row=IOT_BR_row[,-1*c(1, dim(IOT_BR_row)[2])]
##(i-4)Transpose
T_IOT_BR_row=data.frame(t(IOT_BR_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_BR_row$sector=rownames(T_IOT_BR_row)
##(i-2) prepare index varible to sort after merging
T_IOT_BR_row$index=(1:dim(T_IOT_BR_row)[1])
##(i-3) merge with sector_BR mapping
T_IOT_BR_row=merge(T_IOT_BR_row,sec_BR, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_BR_row=T_IOT_BR_row[order(T_IOT_BR_row$index),]
##(i-5) give rownames
rownames(T_IOT_BR_row)=T_IOT_BR_row$sector
#(ii) prepare variable for colum sum aggregation: prepare BR name and BR index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_BR_row$BR_name[is.na(T_IOT_BR_row$BR_name)]=T_IOT_BR_row$sector[is.na(T_IOT_BR_row$BR_name)]
BRimax2=max(T_IOT_BR_row$BR_ind,na.rm=T)
BRiblank2=length(T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)])
T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)]=((BRimax2+1):(BRimax2+BRiblank2))
#ngroup=8
#(iii) column sum by aggregate function
T_IOT_BR=aggregate(T_IOT_BR_row[,2:(nBR+nva+1)],list(T_IOT_BR_row$BR_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =BR name + final demand elements
BR_ind_name=data.frame(unique(cbind(T_IOT_BR_row$BR_ind,T_IOT_BR_row$BR_name)))
colnames(BR_ind_name)=c("BR_ind","BR_name")
##(i-2) merge row name data
T_IOT_BR=merge(T_IOT_BR, BR_ind_name, by.x="Group.1", by.y="BR_ind", all=T)
##(i-3) change row name
rownames(T_IOT_BR)=T_IOT_BR$BR_name
#(ii) Take transpose again to obtain BR IO data
IOT_BR=data.frame(t(T_IOT_BR[,-1*c(1,dim(T_IOT_BR)[2])]))
#checking balance
# total input + Resout = Total Demand
check5=as.numeric(IOT_BR["Tinput",(1:nBR)])+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check6=IOT_BR$Qtotal[(1:nBR)]+IOT_BR$Qself[(1:nBR)]+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check5
check6
write.csv(IOT_BR, file="IO_B_1202.csv")
|
/SAM/agg_1202.r
|
no_license
|
katto2/GIRprep
|
R
| false | false | 10,442 |
r
|
## Step 1. Data preperation
#(i) loading
#load IO data
IOT_b=read.csv(file="IOT_b.csv",header=T, as.is=T)
#load index mapping
sector_ind=read.csv(file="indcode_20161202.csv",header=T, as.is=T)
#(ii)preparing index
row_ind=sector_ind[,1:2]
col_ind=sector_ind[,3:4]
sec_group=sector_ind[,5:8]
va_ind=sector_ind[,9:10]
fd_ind=sector_ind[,12:13]
sec_BR=sector_ind[,c(5,6,15,16)]
#getting rid of NA
row_ind=row_ind[!is.na(row_ind[,1]),]
sec_group=sec_group[!is.na(sec_group[,1]),]
sec_BR=sec_BR[!is.na(sec_BR[,1]),]
va_ind=va_ind[!is.na(va_ind[,1]),]
fd_ind=fd_ind[!is.na(fd_ind[,1]),]
nsector=dim(sec_group)[1]
nBR=length(unique(sec_BR[,3]))
nva=dim(va_ind)[1]
nfd=dim(fd_ind)[1]
#(iii)preparing IO for merging with mapping
#set NA observations in IO as 0
IOT_b[is.na(IOT_b)]=0
#add row index to be used in merging with mapping data to IO
IOT_b$basecode_row=IOT_b$X
#get dimmension of IO
dim_IOT_b=dim(IOT_b)
## Step 2. Rowsum: merge and obtain rowsum using aggregate function
IOT_b_sec=merge(IOT_b,row_ind, by="basecode_row", all=T)
IOT_b_37=aggregate(IOT_b_sec[,4:(dim_IOT_b[2])],list(IOT_b_sec$sector_row),FUN=sum)
## Step 3. Column sum
#(i) Traspose rowsum
T_IOT_b_37=data.frame(t(IOT_b_37))
#(ii) add column names for transposed data
colnames(T_IOT_b_37)[1:nsector]=sec_group[(1:nsector),2]
colnames(T_IOT_b_37)[(nsector+1):(nsector+nva)]=va_ind[,2]
#(iii) drop Group indicator used in rowsum
T_IOT_b_37=T_IOT_b_37[-1,]
#(iv) add index to be used in column sum
T_IOT_b_37$basecode_col=col_ind[,2]
#(v) take column sum using aggregate function
T_IOT_37_col=aggregate(T_IOT_b_37[,1:(nsector+nva)],list(T_IOT_b_37$basecode_col),FUN=sum)
## Step 4. obtain IO table
#(i)obtain transpose of column sum
IOT_37=data.frame(t(T_IOT_37_col))
#(ii) add column names
colnames(IOT_37)[1:nsector]=sec_group[(1:37),2]
colnames(IOT_37)[(nsector+1):(nsector+nfd)]=fd_ind[,2]
#(iii) drop aggregatio indicator
IOT_37=IOT_37[-1,]
#Step 5. checking balance
# total input + Resout = Total Demand
check1=as.numeric(IOT_37["Tinput",(1:nsector)])+IOT_37$Resout[(1:nsector)]+IOT_37$Imp[(1:nsector)]-IOT_37$Dtotal[(1:nsector)]
check2=IOT_37$Qtotal[(1:nsector)]+IOT_37$Qself[(1:nsector)]+IOT_37$Resout[(1:nsector)]+IOT_37$Imp[(1:nsector)]-IOT_37$Dtotal[(1:nsector)]
check1
check2
write.csv(IOT_37, file="IO_model_1202.csv")
###### Group IO #####
#Step.1 Preparing data
#(i)set aside industry data IOT_37
IOT_37_Group=IOT_37
#(ii)preparing index to sort after merging
IOT_37_Group$index=(1:(dim(IOT_37)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_37_Group$sector=rownames(IOT_37)
#(iv) merge with sector-group mapping and sort to original order
IOT_37_Group=merge(IOT_37_Group,sec_group, by.x="sector", by.y="sector_name", all=T)
IOT_37_Group=IOT_37_Group[order(IOT_37_Group$index),]
#(v) give row names
rownames(IOT_37_Group)=IOT_37_Group$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_37_Group$Group_name[is.na(IOT_37_Group$Group_name)]=IOT_37_Group$sector[is.na(IOT_37_Group$Group_name)]
Gimax=max(IOT_37_Group$Group_ind,na.rm=T)
Giblank=length(IOT_37_Group$Group_ind[is.na(IOT_37_Group$Group_ind)])
IOT_37_Group$Group_ind[is.na(IOT_37_Group$Group_ind)]=((Gimax+1):(Gimax+Giblank))
#step 2. rowsum by aggregate function: take row sum
IOT_8=aggregate(IOT_37_Group[,2:(nsector+nfd+1)],list(IOT_37_Group$Group_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare group_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
Group_ind=data.frame(unique(cbind(IOT_37_Group$Group_ind,IOT_37_Group$Group_name)))
colnames(Group_ind)=c("Group_ind","Group_name")
##(i-2) add group_name and change rowname
IOT_8_row=merge(IOT_8, Group_ind, by.x="Group.1", by.y="Group_ind", all=T)
rownames(IOT_8_row)=IOT_8_row$Group_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_8_row=IOT_8_row[,-1*c(1, dim(IOT_8_row)[2])]
##(i-4)Transpose
T_IOT_8_col=data.frame(t(IOT_8_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_8_col$sector=rownames(T_IOT_8_col)
##(i-2) prepare index varible to sort after merging
T_IOT_8_col$index=(1:dim(T_IOT_8_col)[1])
##(i-3) merge with sector_group mapping
T_IOT_8_Group=merge(T_IOT_8_col,sec_group, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_8_Group=T_IOT_8_Group[order(T_IOT_8_Group$index),]
##(i-5) give rownames
rownames(T_IOT_8_Group)=T_IOT_8_Group$sector
#(ii) prepare variable for colum sum aggregation: prepare Group name and group index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_8_Group$Group_name[is.na(T_IOT_8_Group$Group_name)]=T_IOT_8_Group$sector[is.na(T_IOT_8_Group$Group_name)]
Gimax2=max(T_IOT_8_Group$Group_ind,na.rm=T)
Giblank2=length(T_IOT_8_Group$Group_ind[is.na(T_IOT_8_Group$Group_ind)])
T_IOT_8_Group$Group_ind[is.na(T_IOT_8_Group$Group_ind)]=((Gimax2+1):(Gimax2+Giblank2))
ngroup=8
#(iii) column sum by aggregate function
T_IOT_8=aggregate(T_IOT_8_Group[,2:(ngroup+nva+1)],list(T_IOT_8_Group$Group_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =group name + final demand elements
Group_ind_8=data.frame(unique(cbind(T_IOT_8_Group$Group_ind,T_IOT_8_Group$Group_name)))
colnames(Group_ind_8)=c("Group_ind","Group_name")
##(i-2) merge row name data
T_IOT_8=merge(T_IOT_8, Group_ind_8, by.x="Group.1", by.y="Group_ind", all=T)
##(i-3) change row name
rownames(T_IOT_8)=T_IOT_8$Group_name
#(ii) Take transpose again to obtain Group IO data
IOT_8=data.frame(t(T_IOT_8[,-1*c(1,dim(T_IOT_8)[2])]))
#checking balance
# total input + Resout = Total Demand
check3=as.numeric(IOT_8["Tinput",(1:ngroup)])+IOT_8$Resout[(1:ngroup)]+IOT_8$Imp[(1:ngroup)]-IOT_8$Dtotal[(1:ngroup)]
check4=IOT_8$Qtotal[(1:ngroup)]+IOT_8$Qself[(1:ngroup)]+IOT_8$Resout[(1:ngroup)]+IOT_8$Imp[(1:ngroup)]-IOT_8$Dtotal[(1:ngroup)]
check3
check4
write.csv(IOT_8, file="IO_group_1202.csv")
## Boehringer and Rutherford Toy model secter index
# Originally, BR has 6 sectors Elec, OIL, COAL, GAS, X(energy intensive),Y(non energy intensive)
# We separage agriculture to link with agriculutre bottom up model. That makes our sectors seven sectors
# ELEC, OIL, COAL,GASHEAT,EINT,NEINT,AGRI
# GAS and Heat are bundled as GASHEAT, ROIL and OIL are interageted into OIL
# On Nov.27th meeting. Agriculture sector definition is adjusted. Fishery and Forestry is separated from Agriculture and moved to NEINT.
#Step.1 Preparing data
#(i)set aside industry data IOT_37
IOT_37_BR=IOT_37
#(ii)preparing index to sort after merging
IOT_37_BR$index=(1:(dim(IOT_37)[1]))
#(iii)preparing index to merge with sector-group mapping
IOT_37_BR$sector=rownames(IOT_37_BR)
#(iv) merge with sector-BR index mapping and sort to original order
IOT_37_BR=merge(IOT_37_BR,sec_BR, by.x="sector", by.y="sector_name", all=T)
IOT_37_BR=IOT_37_BR[order(IOT_37_BR$index),]
#(v) give row names
rownames(IOT_37_BR)=IOT_37_BR$sector
#(vi)preparing Group name and Group index to be used in aggregation. The VA part
IOT_37_BR$BR_name[is.na(IOT_37_BR$BR_name)]=IOT_37_BR$sector[is.na(IOT_37_BR$BR_name)]
BRimax=max(IOT_37_BR$BR_ind,na.rm=T)
BRiblank=length(IOT_37_BR$BR_ind[is.na(IOT_37_BR$BR_ind)])
IOT_37_BR$BR_ind[is.na(IOT_37_BR$BR_ind)]=((BRimax+1):(BRimax+BRiblank))
#step 2. rowsum by aggregate function: take row sum
IOT_BR_row=aggregate(IOT_37_BR[,2:(nsector+nfd+1)],list(IOT_37_BR$BR_ind),FUN=sum)
#step 3. prepare for column sum
#(i)add group name and index to rowsum data
## (i-1) prepare BR_name and index to add to rowsum data b/c character vector with group name was excluded in aggregation
BR_ind=data.frame(unique(cbind(IOT_37_BR$BR_ind,IOT_37_BR$BR_name)))
colnames(BR_ind)=c("BR_ind","BR_name")
##(i-2) add BR_name and change rowname
IOT_BR_row=merge(IOT_BR_row, BR_ind, by.x="Group.1", by.y="BR_ind", all=T)
rownames(IOT_BR_row)=IOT_BR_row$BR_name
##(i-3) getting rid of aggregate group indicator (1st variable) and Group_name variable, so thea we can apply aggregate function
IOT_BR_row=IOT_BR_row[,-1*c(1, dim(IOT_BR_row)[2])]
##(i-4)Transpose
T_IOT_BR_row=data.frame(t(IOT_BR_row))
#step 4. colsum by aggregate function
#(i) prepare to merge with mapping index
##(i-1) prepare merging varible to merge with sector_group mapping
T_IOT_BR_row$sector=rownames(T_IOT_BR_row)
##(i-2) prepare index varible to sort after merging
T_IOT_BR_row$index=(1:dim(T_IOT_BR_row)[1])
##(i-3) merge with sector_BR mapping
T_IOT_BR_row=merge(T_IOT_BR_row,sec_BR, by.x="sector", by.y="sector_name", all=T,sort=F)
##(i-4) sort merged data to 'before-merge' order
T_IOT_BR_row=T_IOT_BR_row[order(T_IOT_BR_row$index),]
##(i-5) give rownames
rownames(T_IOT_BR_row)=T_IOT_BR_row$sector
#(ii) prepare variable for colum sum aggregation: prepare BR name and BR index to use in aggregate funciton
## Group name =Group name + Final demand elements
T_IOT_BR_row$BR_name[is.na(T_IOT_BR_row$BR_name)]=T_IOT_BR_row$sector[is.na(T_IOT_BR_row$BR_name)]
BRimax2=max(T_IOT_BR_row$BR_ind,na.rm=T)
BRiblank2=length(T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)])
T_IOT_BR_row$BR_ind[is.na(T_IOT_BR_row$BR_ind)]=((BRimax2+1):(BRimax2+BRiblank2))
#ngroup=8
#(iii) column sum by aggregate function
T_IOT_BR=aggregate(T_IOT_BR_row[,2:(nBR+nva+1)],list(T_IOT_BR_row$BR_ind),FUN=sum)
#step 5. management after aggregation
#(i) add row names to aggregated data
##(i-1) prepare row names =BR name + final demand elements
BR_ind_name=data.frame(unique(cbind(T_IOT_BR_row$BR_ind,T_IOT_BR_row$BR_name)))
colnames(BR_ind_name)=c("BR_ind","BR_name")
##(i-2) merge row name data
T_IOT_BR=merge(T_IOT_BR, BR_ind_name, by.x="Group.1", by.y="BR_ind", all=T)
##(i-3) change row name
rownames(T_IOT_BR)=T_IOT_BR$BR_name
#(ii) Take transpose again to obtain BR IO data
IOT_BR=data.frame(t(T_IOT_BR[,-1*c(1,dim(T_IOT_BR)[2])]))
#checking balance
# total input + Resout = Total Demand
check5=as.numeric(IOT_BR["Tinput",(1:nBR)])+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check6=IOT_BR$Qtotal[(1:nBR)]+IOT_BR$Qself[(1:nBR)]+IOT_BR$Resout[(1:nBR)]+IOT_BR$Imp[(1:nBR)]-IOT_BR$Dtotal[(1:nBR)]
check5
check6
write.csv(IOT_BR, file="IO_B_1202.csv")
|
#' PCA-based outlier detection
#'
#' Takes a matrix of samples x measurements and looks for outliers in the two
#' first principal components of the data as defined by mahalanobis distance
#' to the center of the data in number of standard deviations
#'
#' @param x a numerical matrix with samples by row, measurements by column
#' @param prob How unlikely should a data point at least be in order to not be
#' considered part of the "center mass" of the data. Translated to k in
#' Chebyshev's inequality P(|Z| >= k) =< 1/k^2 and applied to the two
#' first PCs.
#'
#' @return an object of class \code{prcout}
#'
#' @export
prcout <- function(x, prob=0.01) {
if (is.null(rownames(x))) stop("input x should have rownames")
obj <- list()
obj$prc <- stats::prcomp(x)
# identify inner 100(1-prob)% mass of data in PCs 1 and 2
prc <- obj$prc$x[, 1:2]
alpha <- prob
k <- sqrt(1/alpha)
x95 <- abs((prc[, 1] - mean(prc[, 1]))/stats::sd(prc[, 1])) < k
y95 <- abs((prc[, 2] - mean(prc[, 2]))/stats::sd(prc[, 2])) < k
prc_inner <- prc[x95 & y95, ]
# define mahalanobis distances af f'n of inner data, apply to all data
obj$mu <- colMeans(prc_inner)
obj$Sigma <- stats::cov(prc_inner)
obj$sd <- stats::sd(stats::mahalanobis(prc_inner, obj$mu, obj$Sigma))
obj$mahalanobis <- stats::mahalanobis(prc, obj$mu, obj$Sigma)
class(obj) <- "prcout"
obj
}
#' Plot method for \code{prcout} objects
#'
#' Plots the two first PCs of the data with contour lines corresponding
#' to the mahalanobis distance to the data center (as defined by the inner mass
#' of the data). Highlights outliers or the samples defined in \code{highlight=}.
#' Alternately colors points by batch if the \code{batch} parameter is defined
#'
#' @param x object of class \code{prcout}
#' @param batch optional vector that indicates which bach a sample belogs to.
#' Points will be colored by batch if this vector is provided. Overrides
#' \code{highlight=}
#' @param highlight optional character vector with names of samles to highlight in the plot.
#' Overrides the highlighting of outliers
#' @param ... passed to predict(obj, ...) to label outliers
#' @seealso \code{\link{prcout}}, \code{\link{predict.prcout}}
#' @export
plot.prcout <- function(x, batch=NULL, highlight=NULL, ...) {
obj <- x
prc <- obj$prc$x[, 1:2]
# grid for contour lines
r1 <- range(prc[,1])
r2 <- range(prc[,2])
s1 <- seq(r1[1], r1[2], length.out=50)
s2 <- seq(r2[1], r2[2], length.out=50)
grid <- expand.grid(s1,s2)
mha <- matrix(stats::mahalanobis(grid, obj$mu, obj$Sigma), nrow=length(s1))
# plot
graphics::contour(s1, s2, mha/obj$sd, levels=1:6, xlab="PC 1", ylab="PC 2")
if (is.null(highlight)) {
hset <- predict.prcout(obj, ...)
} else {
hset <- highlight
}
if (!is.null(batch)) {
colv <- RColorBrewer::brewer.pal(12, "Set3")
batch <- as.numeric(as.factor(batch))
if (max(batch) > 12) { # if more than 12 colors, recycle
batch <- ((batch - 1) %% 12) + 1
}
colors <- colv[batch]
graphics::points(prc, col=colors, pch=20)
} else {
colors <- rep("black", nrow(prc))
colors[rownames(prc) %in% hset] <- "red"
graphics::points(prc[order(colors), ], col=colors[order(colors)], pch=20)
}
}
#' Predict method for \code{prcout} objects
#'
#' Provides a prediction for whether an observation is a suspected outlier
#'
#' @param object object of class "prcout"
#' @param sdev Number of standard deviations (in mahalanobis distance) that an
#' observation should be from the center to be called an outlier
#' @param ... unused
#'
#' @return a character vector of row names for the outliers as defined by the \code{sdev}
#' parameter.
#'
#' @export
#'
predict.prcout <- function(object, sdev=3, type='sampleid',...) {
o <- rownames(object$prc$x)[object$mahalanobis > sdev*object$sd]
if(type == 'sampleid') return(o)
if(type == 'coef') {
stopifnot(all( o %in% names(prc_all$mahalanobis)))
return(prc_all$mahalanobis[o]/object$sd)
}
}
|
/R/prcout.R
|
permissive
|
zouw2/nowaclean
|
R
| false | false | 4,013 |
r
|
#' PCA-based outlier detection
#'
#' Takes a matrix of samples x measurements and looks for outliers in the two
#' first principal components of the data as defined by mahalanobis distance
#' to the center of the data in number of standard deviations
#'
#' @param x a numerical matrix with samples by row, measurements by column
#' @param prob How unlikely should a data point at least be in order to not be
#' considered part of the "center mass" of the data. Translated to k in
#' Chebyshev's inequality P(|Z| >= k) =< 1/k^2 and applied to the two
#' first PCs.
#'
#' @return an object of class \code{prcout}
#'
#' @export
prcout <- function(x, prob=0.01) {
if (is.null(rownames(x))) stop("input x should have rownames")
obj <- list()
obj$prc <- stats::prcomp(x)
# identify inner 100(1-prob)% mass of data in PCs 1 and 2
prc <- obj$prc$x[, 1:2]
alpha <- prob
k <- sqrt(1/alpha)
x95 <- abs((prc[, 1] - mean(prc[, 1]))/stats::sd(prc[, 1])) < k
y95 <- abs((prc[, 2] - mean(prc[, 2]))/stats::sd(prc[, 2])) < k
prc_inner <- prc[x95 & y95, ]
# define mahalanobis distances af f'n of inner data, apply to all data
obj$mu <- colMeans(prc_inner)
obj$Sigma <- stats::cov(prc_inner)
obj$sd <- stats::sd(stats::mahalanobis(prc_inner, obj$mu, obj$Sigma))
obj$mahalanobis <- stats::mahalanobis(prc, obj$mu, obj$Sigma)
class(obj) <- "prcout"
obj
}
#' Plot method for \code{prcout} objects
#'
#' Plots the two first PCs of the data with contour lines corresponding
#' to the mahalanobis distance to the data center (as defined by the inner mass
#' of the data). Highlights outliers or the samples defined in \code{highlight=}.
#' Alternately colors points by batch if the \code{batch} parameter is defined
#'
#' @param x object of class \code{prcout}
#' @param batch optional vector that indicates which bach a sample belogs to.
#' Points will be colored by batch if this vector is provided. Overrides
#' \code{highlight=}
#' @param highlight optional character vector with names of samles to highlight in the plot.
#' Overrides the highlighting of outliers
#' @param ... passed to predict(obj, ...) to label outliers
#' @seealso \code{\link{prcout}}, \code{\link{predict.prcout}}
#' @export
plot.prcout <- function(x, batch=NULL, highlight=NULL, ...) {
obj <- x
prc <- obj$prc$x[, 1:2]
# grid for contour lines
r1 <- range(prc[,1])
r2 <- range(prc[,2])
s1 <- seq(r1[1], r1[2], length.out=50)
s2 <- seq(r2[1], r2[2], length.out=50)
grid <- expand.grid(s1,s2)
mha <- matrix(stats::mahalanobis(grid, obj$mu, obj$Sigma), nrow=length(s1))
# plot
graphics::contour(s1, s2, mha/obj$sd, levels=1:6, xlab="PC 1", ylab="PC 2")
if (is.null(highlight)) {
hset <- predict.prcout(obj, ...)
} else {
hset <- highlight
}
if (!is.null(batch)) {
colv <- RColorBrewer::brewer.pal(12, "Set3")
batch <- as.numeric(as.factor(batch))
if (max(batch) > 12) { # if more than 12 colors, recycle
batch <- ((batch - 1) %% 12) + 1
}
colors <- colv[batch]
graphics::points(prc, col=colors, pch=20)
} else {
colors <- rep("black", nrow(prc))
colors[rownames(prc) %in% hset] <- "red"
graphics::points(prc[order(colors), ], col=colors[order(colors)], pch=20)
}
}
#' Predict method for \code{prcout} objects
#'
#' Provides a prediction for whether an observation is a suspected outlier
#'
#' @param object object of class "prcout"
#' @param sdev Number of standard deviations (in mahalanobis distance) that an
#' observation should be from the center to be called an outlier
#' @param ... unused
#'
#' @return a character vector of row names for the outliers as defined by the \code{sdev}
#' parameter.
#'
#' @export
#'
predict.prcout <- function(object, sdev=3, type='sampleid',...) {
o <- rownames(object$prc$x)[object$mahalanobis > sdev*object$sd]
if(type == 'sampleid') return(o)
if(type == 'coef') {
stopifnot(all( o %in% names(prc_all$mahalanobis)))
return(prc_all$mahalanobis[o]/object$sd)
}
}
|
library(ggmap)
### Name: qmplot
### Title: Quick map plot
### Aliases: qmplot
### ** Examples
## Not run:
##D # these are skipped to conserve R check time
##D
##D qmplot(lon, lat, data = crime)
##D
##D
##D # only violent crimes
##D violent_crimes <- subset(crime,
##D offense != "auto theft" &
##D offense != "theft" &
##D offense != "burglary"
##D )
##D
##D # rank violent crimes
##D violent_crimes$offense <- factor(
##D violent_crimes$offense,
##D levels = c("robbery", "aggravated assault", "rape", "murder")
##D )
##D
##D # restrict to downtown
##D violent_crimes <- subset(violent_crimes,
##D -95.39681 <= lon & lon <= -95.34188 &
##D 29.73631 <= lat & lat <= 29.78400
##D )
##D
##D theme_set(theme_bw())
##D
##D qmplot(lon, lat, data = violent_crimes, colour = offense,
##D size = I(3.5), alpha = I(.6), legend = "topleft")
##D
##D qmplot(lon, lat, data = violent_crimes, geom = c("point","density2d"))
##D qmplot(lon, lat, data = violent_crimes) + facet_wrap(~ offense)
##D qmplot(lon, lat, data = violent_crimes, extent = "panel") + facet_wrap(~ offense)
##D qmplot(lon, lat, data = violent_crimes, extent = "panel", colour = offense, darken = .4) +
##D facet_wrap(~ month)
##D
##D
##D
##D
##D qmplot(long, lat, xend = long + delta_long,
##D color = I("red"), yend = lat + delta_lat, data = seals,
##D geom = "segment", zoom = 5)
##D
##D qmplot(long, lat, xend = long + delta_long, maptype = "watercolor",
##D yend = lat + delta_lat, data = seals,
##D geom = "segment", zoom = 6)
##D
##D
##D qmplot(lon, lat, data = wind, size = I(.5), alpha = I(.5)) +
##D ggtitle("NOAA Wind Report Sites")
##D
##D # thin down data set...
##D s <- seq(1, 227, 8)
##D thinwind <- subset(wind,
##D lon %in% unique(wind$lon)[s] &
##D lat %in% unique(wind$lat)[s]
##D )
##D
##D # for some reason adding arrows to the following plot bugs
##D theme_set(theme_bw(18))
##D
##D qmplot(lon, lat, data = thinwind, geom = "tile", fill = spd, alpha = spd,
##D legend = "bottomleft") +
##D geom_leg(aes(xend = lon + delta_lon, yend = lat + delta_lat)) +
##D scale_fill_gradient2("Wind Speed\nand\nDirection",
##D low = "green", mid = scales::muted("green"), high = "red") +
##D scale_alpha("Wind Speed\nand\nDirection", range = c(.1, .75)) +
##D guides(fill = guide_legend(), alpha = guide_legend())
##D
##D
##D
##D
##D ## kriging
##D ############################################################
##D # the below examples show kriging based on undeclared packages
##D # to better comply with CRAN's standards, we remove it from
##D # executing, but leave the code as a kind of case-study
##D # they also require the rgdal library
##D
##D
##D library(lattice)
##D library(sp)
##D library(rgdal)
##D
##D # load in and format the meuse dataset (see bivand, pebesma, and gomez-rubio)
##D data(meuse)
##D coordinates(meuse) <- c("x", "y")
##D proj4string(meuse) <- CRS("+init=epsg:28992")
##D meuse <- spTransform(meuse, CRS("+proj=longlat +datum=WGS84"))
##D
##D # plot
##D plot(meuse)
##D
##D m <- data.frame(slot(meuse, "coords"), slot(meuse, "data"))
##D names(m)[1:2] <- c("lon", "lat")
##D
##D qmplot(lon, lat, data = m)
##D qmplot(lon, lat, data = m, zoom = 14)
##D
##D
##D qmplot(lon, lat, data = m, size = zinc,
##D zoom = 14, source = "google", maptype = "satellite",
##D alpha = I(.75), color = I("green"),
##D legend = "topleft", darken = .2
##D ) + scale_size("Zinc (ppm)")
##D
##D
##D
##D
##D
##D
##D
##D
##D # load in the meuse.grid dataset (looking toward kriging)
##D library(gstat)
##D data(meuse.grid)
##D coordinates(meuse.grid) <- c("x", "y")
##D proj4string(meuse.grid) <- CRS("+init=epsg:28992")
##D meuse.grid <- spTransform(meuse.grid, CRS("+proj=longlat +datum=WGS84"))
##D
##D # plot it
##D plot(meuse.grid)
##D
##D mg <- data.frame(slot(meuse.grid, "coords"), slot(meuse.grid, "data"))
##D names(mg)[1:2] <- c("lon", "lat")
##D
##D qmplot(lon, lat, data = mg, shape = I(15), zoom = 14, legend = "topleft") +
##D geom_point(aes(size = zinc), data = m, color = "green") +
##D scale_size("Zinc (ppm)")
##D
##D
##D
##D # interpolate at unobserved locations (i.e. at meuse.grid points)
##D # pre-define scale for consistency
##D scale <- scale_color_gradient("Predicted\nZinc (ppm)",
##D low = "green", high = "red", lim = c(100, 1850)
##D )
##D
##D
##D
##D # inverse distance weighting
##D idw <- idw(log(zinc) ~ 1, meuse, meuse.grid, idp = 2.5)
##D mg$idw <- exp(slot(idw, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = idw,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # linear regression
##D lin <- krige(log(zinc) ~ 1, meuse, meuse.grid, degree = 1)
##D mg$lin <- exp(slot(idw, "lin")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = lin,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # trend surface analysis
##D tsa <- krige(log(zinc) ~ 1, meuse, meuse.grid, degree = 2)
##D mg$tsa <- exp(slot(tsa, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = tsa,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # ordinary kriging
##D vgram <- variogram(log(zinc) ~ 1, meuse) # plot(vgram)
##D vgramFit <- fit.variogram(vgram, vgm(1, "Exp", .2, .1))
##D ordKrige <- krige(log(zinc) ~ 1, meuse, meuse.grid, vgramFit)
##D mg$ordKrige <- exp(slot(ordKrige, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = ordKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # universal kriging
##D vgram <- variogram(log(zinc) ~ 1, meuse) # plot(vgram)
##D vgramFit <- fit.variogram(vgram, vgm(1, "Exp", .2, .1))
##D univKrige <- krige(log(zinc) ~ sqrt(dist), meuse, meuse.grid, vgramFit)
##D mg$univKrige <- exp(slot(univKrige, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = univKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # adding observed data layer
##D qmplot(lon, lat, data = mg, shape = I(15), color = univKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) +
##D geom_point(
##D aes(x = lon, y = lat, size = zinc),
##D data = m, shape = 1, color = "black"
##D ) +
##D scale +
##D scale_size("Observed\nLog Zinc")
##D
##D
##D
##D
##D
##D
## End(Not run) # end dontrun
|
/data/genthat_extracted_code/ggmap/examples/qmplot.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false | false | 6,535 |
r
|
library(ggmap)
### Name: qmplot
### Title: Quick map plot
### Aliases: qmplot
### ** Examples
## Not run:
##D # these are skipped to conserve R check time
##D
##D qmplot(lon, lat, data = crime)
##D
##D
##D # only violent crimes
##D violent_crimes <- subset(crime,
##D offense != "auto theft" &
##D offense != "theft" &
##D offense != "burglary"
##D )
##D
##D # rank violent crimes
##D violent_crimes$offense <- factor(
##D violent_crimes$offense,
##D levels = c("robbery", "aggravated assault", "rape", "murder")
##D )
##D
##D # restrict to downtown
##D violent_crimes <- subset(violent_crimes,
##D -95.39681 <= lon & lon <= -95.34188 &
##D 29.73631 <= lat & lat <= 29.78400
##D )
##D
##D theme_set(theme_bw())
##D
##D qmplot(lon, lat, data = violent_crimes, colour = offense,
##D size = I(3.5), alpha = I(.6), legend = "topleft")
##D
##D qmplot(lon, lat, data = violent_crimes, geom = c("point","density2d"))
##D qmplot(lon, lat, data = violent_crimes) + facet_wrap(~ offense)
##D qmplot(lon, lat, data = violent_crimes, extent = "panel") + facet_wrap(~ offense)
##D qmplot(lon, lat, data = violent_crimes, extent = "panel", colour = offense, darken = .4) +
##D facet_wrap(~ month)
##D
##D
##D
##D
##D qmplot(long, lat, xend = long + delta_long,
##D color = I("red"), yend = lat + delta_lat, data = seals,
##D geom = "segment", zoom = 5)
##D
##D qmplot(long, lat, xend = long + delta_long, maptype = "watercolor",
##D yend = lat + delta_lat, data = seals,
##D geom = "segment", zoom = 6)
##D
##D
##D qmplot(lon, lat, data = wind, size = I(.5), alpha = I(.5)) +
##D ggtitle("NOAA Wind Report Sites")
##D
##D # thin down data set...
##D s <- seq(1, 227, 8)
##D thinwind <- subset(wind,
##D lon %in% unique(wind$lon)[s] &
##D lat %in% unique(wind$lat)[s]
##D )
##D
##D # for some reason adding arrows to the following plot bugs
##D theme_set(theme_bw(18))
##D
##D qmplot(lon, lat, data = thinwind, geom = "tile", fill = spd, alpha = spd,
##D legend = "bottomleft") +
##D geom_leg(aes(xend = lon + delta_lon, yend = lat + delta_lat)) +
##D scale_fill_gradient2("Wind Speed\nand\nDirection",
##D low = "green", mid = scales::muted("green"), high = "red") +
##D scale_alpha("Wind Speed\nand\nDirection", range = c(.1, .75)) +
##D guides(fill = guide_legend(), alpha = guide_legend())
##D
##D
##D
##D
##D ## kriging
##D ############################################################
##D # the below examples show kriging based on undeclared packages
##D # to better comply with CRAN's standards, we remove it from
##D # executing, but leave the code as a kind of case-study
##D # they also require the rgdal library
##D
##D
##D library(lattice)
##D library(sp)
##D library(rgdal)
##D
##D # load in and format the meuse dataset (see bivand, pebesma, and gomez-rubio)
##D data(meuse)
##D coordinates(meuse) <- c("x", "y")
##D proj4string(meuse) <- CRS("+init=epsg:28992")
##D meuse <- spTransform(meuse, CRS("+proj=longlat +datum=WGS84"))
##D
##D # plot
##D plot(meuse)
##D
##D m <- data.frame(slot(meuse, "coords"), slot(meuse, "data"))
##D names(m)[1:2] <- c("lon", "lat")
##D
##D qmplot(lon, lat, data = m)
##D qmplot(lon, lat, data = m, zoom = 14)
##D
##D
##D qmplot(lon, lat, data = m, size = zinc,
##D zoom = 14, source = "google", maptype = "satellite",
##D alpha = I(.75), color = I("green"),
##D legend = "topleft", darken = .2
##D ) + scale_size("Zinc (ppm)")
##D
##D
##D
##D
##D
##D
##D
##D
##D # load in the meuse.grid dataset (looking toward kriging)
##D library(gstat)
##D data(meuse.grid)
##D coordinates(meuse.grid) <- c("x", "y")
##D proj4string(meuse.grid) <- CRS("+init=epsg:28992")
##D meuse.grid <- spTransform(meuse.grid, CRS("+proj=longlat +datum=WGS84"))
##D
##D # plot it
##D plot(meuse.grid)
##D
##D mg <- data.frame(slot(meuse.grid, "coords"), slot(meuse.grid, "data"))
##D names(mg)[1:2] <- c("lon", "lat")
##D
##D qmplot(lon, lat, data = mg, shape = I(15), zoom = 14, legend = "topleft") +
##D geom_point(aes(size = zinc), data = m, color = "green") +
##D scale_size("Zinc (ppm)")
##D
##D
##D
##D # interpolate at unobserved locations (i.e. at meuse.grid points)
##D # pre-define scale for consistency
##D scale <- scale_color_gradient("Predicted\nZinc (ppm)",
##D low = "green", high = "red", lim = c(100, 1850)
##D )
##D
##D
##D
##D # inverse distance weighting
##D idw <- idw(log(zinc) ~ 1, meuse, meuse.grid, idp = 2.5)
##D mg$idw <- exp(slot(idw, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = idw,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # linear regression
##D lin <- krige(log(zinc) ~ 1, meuse, meuse.grid, degree = 1)
##D mg$lin <- exp(slot(idw, "lin")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = lin,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # trend surface analysis
##D tsa <- krige(log(zinc) ~ 1, meuse, meuse.grid, degree = 2)
##D mg$tsa <- exp(slot(tsa, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = tsa,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # ordinary kriging
##D vgram <- variogram(log(zinc) ~ 1, meuse) # plot(vgram)
##D vgramFit <- fit.variogram(vgram, vgm(1, "Exp", .2, .1))
##D ordKrige <- krige(log(zinc) ~ 1, meuse, meuse.grid, vgramFit)
##D mg$ordKrige <- exp(slot(ordKrige, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = ordKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # universal kriging
##D vgram <- variogram(log(zinc) ~ 1, meuse) # plot(vgram)
##D vgramFit <- fit.variogram(vgram, vgm(1, "Exp", .2, .1))
##D univKrige <- krige(log(zinc) ~ sqrt(dist), meuse, meuse.grid, vgramFit)
##D mg$univKrige <- exp(slot(univKrige, "data")$var1.pred)
##D
##D qmplot(lon, lat, data = mg, shape = I(15), color = univKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) + scale
##D
##D
##D
##D # adding observed data layer
##D qmplot(lon, lat, data = mg, shape = I(15), color = univKrige,
##D zoom = 14, legend = "topleft", alpha = I(.75), darken = .4
##D ) +
##D geom_point(
##D aes(x = lon, y = lat, size = zinc),
##D data = m, shape = 1, color = "black"
##D ) +
##D scale +
##D scale_size("Observed\nLog Zinc")
##D
##D
##D
##D
##D
##D
## End(Not run) # end dontrun
|
leftZones = function()
{
zones=matrix(c(0,2,3,1), ncol=2, byrow=TRUE)
layout(zones, widths=c(1/5,4/5), heights=c(1/5,4/5))
}
# Not implemented; or won't work right. The problem is limits and such for the
# shifted barplot
rightZones = function()
{
zones=matrix(c(0,2,3,1), ncol=2, byrow=TRUE)
layout(zones, widths=c(1/5,4/5), heights=c(1/5,4/5))
}
funHist = function(x, f, parms=NULL , finv=NULL, means=TRUE, top=NULL)
{
# Make histogram information
y <- f(x, parms)
xhist = hist(x, plot=FALSE)
yhist = hist(y, plot=FALSE)
# Try to make histograms comparable
if(is.null(top)) top = max(c(xhist$counts, yhist$counts))
# Make sequences for main plot
xs <- seq(min(x), max(x), length.out=101)
ys = f(xs, parms)
# Main plot with mean line
par(mar=c(3,3,1,1))
plot(xs, ys , type="l", yaxt="n")
abline(v=mean(x), lwd=2)
# Do we want mean lines for the filtered mean?
if(means){
abline(v= finv(mean(y), parms), col="red", lwd=2)
abline(h=mean(y), col="red", lwd=2)
}
# Bottom histogram
par(mar=c(0,3,1,1))
barplot(xhist$counts, axes=FALSE, ylim=c(0, top), space=0)
# Side histogram
par(mar=c(3,0,1,1))
barplot(yhist$counts
, axes=FALSE
, xlim=c(top, 0)
, space=0, horiz=TRUE
)
}
### Old version; nothing wrong with it I guess
funhist = function(x, f, parms=NULL , finv=NULL, means=TRUE, top=NULL)
{
leftZones()
funHist(x, f, parms, finv, means, top)
}
expfun <- function(x, parms){return(with(parms, exp(-x/Tc)))}
logfun <- function(x, parms){return(with(parms, -Tc*log(x)))}
recip <- function(x, parms){return(1/x)}
idfun <- function(x, parms){return(x)}
sqfun <- function(x, parms){return(x^2)}
rootfun <- function(x, parms){return(sqrt(x))}
|
/funhist.R
|
no_license
|
parksw3/Generation_distributions
|
R
| false | false | 1,691 |
r
|
leftZones = function()
{
zones=matrix(c(0,2,3,1), ncol=2, byrow=TRUE)
layout(zones, widths=c(1/5,4/5), heights=c(1/5,4/5))
}
# Not implemented; or won't work right. The problem is limits and such for the
# shifted barplot
rightZones = function()
{
zones=matrix(c(0,2,3,1), ncol=2, byrow=TRUE)
layout(zones, widths=c(1/5,4/5), heights=c(1/5,4/5))
}
funHist = function(x, f, parms=NULL , finv=NULL, means=TRUE, top=NULL)
{
# Make histogram information
y <- f(x, parms)
xhist = hist(x, plot=FALSE)
yhist = hist(y, plot=FALSE)
# Try to make histograms comparable
if(is.null(top)) top = max(c(xhist$counts, yhist$counts))
# Make sequences for main plot
xs <- seq(min(x), max(x), length.out=101)
ys = f(xs, parms)
# Main plot with mean line
par(mar=c(3,3,1,1))
plot(xs, ys , type="l", yaxt="n")
abline(v=mean(x), lwd=2)
# Do we want mean lines for the filtered mean?
if(means){
abline(v= finv(mean(y), parms), col="red", lwd=2)
abline(h=mean(y), col="red", lwd=2)
}
# Bottom histogram
par(mar=c(0,3,1,1))
barplot(xhist$counts, axes=FALSE, ylim=c(0, top), space=0)
# Side histogram
par(mar=c(3,0,1,1))
barplot(yhist$counts
, axes=FALSE
, xlim=c(top, 0)
, space=0, horiz=TRUE
)
}
### Old version; nothing wrong with it I guess
funhist = function(x, f, parms=NULL , finv=NULL, means=TRUE, top=NULL)
{
leftZones()
funHist(x, f, parms, finv, means, top)
}
expfun <- function(x, parms){return(with(parms, exp(-x/Tc)))}
logfun <- function(x, parms){return(with(parms, -Tc*log(x)))}
recip <- function(x, parms){return(1/x)}
idfun <- function(x, parms){return(x)}
sqfun <- function(x, parms){return(x^2)}
rootfun <- function(x, parms){return(sqrt(x))}
|
###############
# Create a simple scatter plot of cars speed and stopping distances
# Christopher Gandrud
# 6 January 2012
###############
plot(x = cars$speed, y = cars$dist,
xlab = "Speed (mph)",
ylab = "Stopping Distance (ft)",
cex.lab = 1.5)
|
/B_analysts_sources_github/christophergandrud/Rep-Res-Examples/SimpleScatter.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false | false | 260 |
r
|
###############
# Create a simple scatter plot of cars speed and stopping distances
# Christopher Gandrud
# 6 January 2012
###############
plot(x = cars$speed, y = cars$dist,
xlab = "Speed (mph)",
ylab = "Stopping Distance (ft)",
cex.lab = 1.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phylo.R
\name{node_labels}
\alias{node_labels}
\alias{named_edges}
\alias{ape_layout_unrooted}
\title{Utilities for phylo class of ape package}
\usage{
node_labels(x)
named_edges(x)
ape_layout_unrooted(phy, centering = TRUE, rotate = 0)
}
\arguments{
\item{x}{phylo object}
\item{phy}{phylo object}
\item{centering}{boolean}
\item{rotate}{angle in radian}
}
\description{
Utilities for phylo class of ape package
}
|
/man/phylo.Rd
|
permissive
|
heavywatal/rwtl
|
R
| false | true | 498 |
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phylo.R
\name{node_labels}
\alias{node_labels}
\alias{named_edges}
\alias{ape_layout_unrooted}
\title{Utilities for phylo class of ape package}
\usage{
node_labels(x)
named_edges(x)
ape_layout_unrooted(phy, centering = TRUE, rotate = 0)
}
\arguments{
\item{x}{phylo object}
\item{phy}{phylo object}
\item{centering}{boolean}
\item{rotate}{angle in radian}
}
\description{
Utilities for phylo class of ape package
}
|
\name{dccFit}
\alias{dccFit}
\title{Dynamic Cross-Correlation Model Fitting
}
\description{Fits a DCC model using either multivariate Gaussian or
multivariate Student-t innovations. Two types of DCC models are available. The first type is proposed by Engle
and the other is by Tse and Tsui. Both models appear in the Journal of Business and
Economic Statistics, 2002.
}
\usage{
dccFit(rt, type = "TseTsui", theta = c(0.9, 0.02),
ub = c(0.92, 0.079999), lb = c(0.4, 1e-04),
cond.dist = "std", df = 7, m = 0)
}
\arguments{
\item{rt}{The T-by-k data matrix of k-dimensional standardized asset returns. Typically, they are the
standardized residuals of the command dccPre.
}
\item{type}{A logical switch to specify the type of DCC model.
Type="TseTsui" for Tse and Tsui's DCC model. Type = "Engle" for Engle's DCC model. Default is Tse-Tsui model.
}
\item{theta}{The initial parameter values for theta1 and theta2
}
\item{ub}{Upper bound of parameters
}
\item{lb}{Lower bound of parameters
}
\item{cond.dist}{Conditional innovation distribution with std for multivariate Student-t innovations.
}
\item{df}{degrees of freedom of the multivariate Student-t innovations.
}
\item{m}{For Tse and Tsui method only, m denotes the number of returns
used in local correlation matrix estimation
}
}
\value{
\item{estimates}{Parameter estimates}
\item{Hessian}{Hessian marix of the estimates}
\item{rho.t}{Time-varying correlation matrices.
Each row contains elements of a cross-correlation matrix.
}
}
\references{Tsay (2014, Chapter 7). Multivariate Time Series Analysis with R and Financial
Applications. John Wiley. Hoboken, NJ.
}
\author{Ruey S. Tsay
}
\seealso{dccPre
}
|
/man/dccFit.Rd
|
no_license
|
chenlu-hung/MTS
|
R
| false | false | 1,698 |
rd
|
\name{dccFit}
\alias{dccFit}
\title{Dynamic Cross-Correlation Model Fitting
}
\description{Fits a DCC model using either multivariate Gaussian or
multivariate Student-t innovations. Two types of DCC models are available. The first type is proposed by Engle
and the other is by Tse and Tsui. Both models appear in the Journal of Business and
Economic Statistics, 2002.
}
\usage{
dccFit(rt, type = "TseTsui", theta = c(0.9, 0.02),
ub = c(0.92, 0.079999), lb = c(0.4, 1e-04),
cond.dist = "std", df = 7, m = 0)
}
\arguments{
\item{rt}{The T-by-k data matrix of k-dimensional standardized asset returns. Typically, they are the
standardized residuals of the command dccPre.
}
\item{type}{A logical switch to specify the type of DCC model.
Type="TseTsui" for Tse and Tsui's DCC model. Type = "Engle" for Engle's DCC model. Default is Tse-Tsui model.
}
\item{theta}{The initial parameter values for theta1 and theta2
}
\item{ub}{Upper bound of parameters
}
\item{lb}{Lower bound of parameters
}
\item{cond.dist}{Conditional innovation distribution with std for multivariate Student-t innovations.
}
\item{df}{degrees of freedom of the multivariate Student-t innovations.
}
\item{m}{For Tse and Tsui method only, m denotes the number of returns
used in local correlation matrix estimation
}
}
\value{
\item{estimates}{Parameter estimates}
\item{Hessian}{Hessian marix of the estimates}
\item{rho.t}{Time-varying correlation matrices.
Each row contains elements of a cross-correlation matrix.
}
}
\references{Tsay (2014, Chapter 7). Multivariate Time Series Analysis with R and Financial
Applications. John Wiley. Hoboken, NJ.
}
\author{Ruey S. Tsay
}
\seealso{dccPre
}
|
#------------------------
# Combine network datasets
#------------------------
library(tidyverse)
#-----------------
# Total Invasives
#-----------------
#---NETN Total Invasives
netn_totinv<-read.csv("./data/NETN/NETN_invasive_total_data.csv")
netn_totinv<-netn_totinv %>% mutate(network='NETN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(netn_totinv)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle","plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# shift cycles for NETN, so 2006 is dropped and 2018 is part of cycle 3
netn_totinv<- netn_totinv %>% mutate(cycle= case_when(year<=2010~1,
between(year,2011,2014)~2,
between(year,2015,2018)~3))
#---MIDN Total Invasives
midn_totinv<-read.csv("./data/MIDN/MIDN_invasive_total_data.csv")
midn_totinv<-midn_totinv %>% mutate(network='MIDN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(midn_totinv)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle","plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# in MIDN shift COLO so cycle 1 is 2 and cycle 2 is 3 so timelines match better
midn_totinv<- midn_totinv %>% mutate(cycle=case_when(park=='COLO' & cycle==2 ~ 3,
park=='COLO' & cycle==1 ~ 2,
park!='COLO' & cycle==1 ~ 1,
park!='COLO' & cycle==2 ~ 2,
park!='COLO' & cycle==3 ~ 3))
#---NCRN Total Invasives
ncrn_totinv<-read.csv("./data/NCRN/NCRN_total_invasives.csv")[,-1]
ncrn_totinv<-ncrn_totinv %>% mutate(plot.freq=ifelse(num.plot.spp>0,1,0),
plot_number=str_sub(plot_name,start=-4)) %>%
select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,plot.freq,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
#---ERMN Total Invasives
ermn_totinv<-read.csv("./data/ERMN/ERMN_total_Invasives20190528.csv")
names(ermn_totinv)[names(ermn_totinv)=='ave.quad.r']<-'avg.quad.r'
# combine datasets
names(netn_totinv); names(ermn_totinv); names(midn_totinv); names(ncrn_totinv)
comb_totinv<-rbind(netn_totinv, midn_totinv, ermn_totinv, ncrn_totinv)
table(comb_totinv$park,comb_totinv$cycle)
lat.order<-comb_totinv %>% group_by(park) %>%
summarise(mean.lat=mean(y_coord,na.rm=T)) %>% arrange(-mean.lat)
lat.order$lat.rank <- NA
lat.order$lat.rank<-rank(lat.order$mean.lat)
comb_totinv<-comb_totinv %>% left_join(.,lat.order, by='park')
comb_totinv<-comb_totinv %>% arrange(desc(network),plot_name,cycle)
comb_totinv2<-comb_totinv %>% mutate(qpct.freq=ifelse(network!='NCRN',qpct.freq*100,qpct.freq))
write.csv(comb_totinv2,'./data/NETN-MIDN-ERMN-NCRN_total_invasives.csv', row.names=F)
#-----------------
# Invasives by guild
#-----------------
#---NETN Invasives by guild
netn_guild<-read.csv("./data/NETN/NETN_invasive_guild_data.csv")
names(netn_guild)
netn_guild<-netn_guild %>% mutate(network='NETN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(netn_guild)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle",'guild',"plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# shift cycles for NETN, so 2006 is dropped and 2018 is part of cycle 3
netn_guild<- netn_guild %>% mutate(cycle= case_when(year<=2010~1,
between(year,2011,2014)~2,
between(year,2015,2018)~3))
#---MIDN Invasives by guild
midn_guild<-read.csv("./data/MIDN/MIDN_invasive_guild_data.csv")
names(midn_guild)
midn_guild<-midn_guild %>% mutate(network='MIDN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(midn_guild)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle",'guild',"plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# in MIDN shift COLO so cycle 1 is 2 and cycle 2 is 3 so timelines match better
table(midn_guild$park, midn_guild$cycle)
midn_guild<- midn_guild %>% mutate(cycle=case_when(park=='COLO' & cycle==2 ~ 3,
park=='COLO' & cycle==1 ~ 2,
park!='COLO' & cycle==1 ~ 1,
park!='COLO' & cycle==2 ~ 2,
park!='COLO' & cycle==3 ~ 3))
#---ERMN Invasives by guild
ermn_guild<-read.csv("./data/ERMN/ERMN_Guild_Invasives20190528.csv")
names(ermn_guild)[names(ermn_guild)=="ave.quad.r"]<-'avg.quad.r'
#---NCRN Invasives by guild
ncrn_guild<-read.csv("./data/NCRN/NCRN_guild_invasives.csv")[,-1]
# ncrn_guild has two levels for shrubs- one for quads and one for microplots.
# the following steps calc. plot frequency using both shrub guilds, filters
# out the unwanted shrub-micro data, and cleans up factor level names
# and field names, etc to be able to incorporate with rest of networks' data.
ncrn_guild<-ncrn_guild %>% mutate(plot_number=str_sub(plot_name,start=-4)) %>%
select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,guild,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
#View(ncrn_guild)
# Pull out shrub data- if num.plot.spp>0 for shrubs-quad or shrubs-micro, plot.freq.shrub=1.
# Also combine # species in shrubs-micro and shrubs-quad
ncrn_shrubs<-ncrn_guild %>% filter(guild %in% c('Shrubs-Micro','Shrubs-Quad')) %>% droplevels() %>%
group_by(plot_name,year,cycle) %>%
summarise(plot.freq.shrub=ifelse(sum(num.plot.spp)>0,1,0), num.plot.spp.shrub=sum(num.plot.spp,na.rm=T)) %>%
mutate(guild=as.factor('Shrub')) %>% as.data.frame()
ncrn_guild2<-ncrn_guild %>% filter(guild!="Shrubs-Micro") %>% droplevels() %>%
mutate(guild = as.factor(case_when(guild=='Graminoids'~'Graminoid',
guild=='Herbaceous'~'Herbaceous',
guild=='Shrubs-Quad'~'Shrub',
guild=='Trees'~'Tree')))
ncrn_guild3<-merge(ncrn_guild2,ncrn_shrubs, by=c('plot_name','year','cycle','guild'),all.x=T)
ncrn_guild3$num.plot.spp.shrub[is.na(ncrn_guild3$num.plot.spp.shrub)]<-0
ncrn_guild3<-ncrn_guild3 %>% mutate(num.plot.spp = num.plot.spp+num.plot.spp.shrub) %>% select(-num.plot.spp.shrub)
ncrn_guild4<-ncrn_guild3 %>% mutate(plot.freq=ifelse(guild=='Shrub',plot.freq.shrub,ifelse(num.plot.spp>0,1,0)))
head(ncrn_guild4)
ncrn_guild5<- ncrn_guild4 %>% select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,guild,plot.freq,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
names(ermn_guild); names(netn_guild); names(midn_guild); names(ncrn_guild5)
comb_guild<-rbind(netn_guild,midn_guild,ermn_guild,ncrn_guild5)
comb_guild<-comb_guild %>% left_join(.,lat.order, by='park')
comb_guild<-comb_guild %>% mutate(qpct.freq=ifelse(network!='NCRN',qpct.freq*100,qpct.freq))
comb_guild2<-comb_guild %>% arrange(desc(network),plot_name,cycle)
#View(comb_guild2)
write.csv(comb_guild2,'./data/NETN-MIDN-ERMN-NCRN_guild_invasives.csv', row.names=F)
#-----------------
# Invasives by species
#-----------------
invlist<-read.csv("./data/Invasive_List.csv")
invlist_MIDN<-invlist %>% filter(MIDN==1) %>% droplevels() # only includes species on indicator list since 2007
invlist_NCRN<-invlist %>% filter(NCRN==1) %>% droplevels() # only includes species on indicator list since 2007
#---NETN Invasives by species
netn_species<-read.csv("./data/NETN/NETN_invasive_species_data.csv")
netn_species<-netn_species %>% mutate(network='NETN') %>%
select(network,Unit_Code:qpct.freq)
colnames(netn_species)<-c("network","park","plot_name","cycle",'species',"plot.freq",
"avg.cover",'quad.freq','qpct.freq')
netn_species_final<-merge(netn_species,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
netn_species_final<-netn_species_final %>%
select(network, park, plot_name, lat.rank, species, everything())
head(netn_species_final)
netn_check<-netn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(cycle)) %>% filter(numplots>3)
nrow(netn_check)
#---MIDN Invasives by species
midn_species<-read.csv("./data/MIDN/MIDN_invasive_species_data.csv")
names(midn_species)
midn_species<-midn_species %>% mutate(network='MIDN') %>%
select(network,Unit_Code:qpct.freq)
colnames(midn_species)<-c("network","park","plot_name","cycle",'species',"plot.freq",
"avg.cover",'quad.freq','qpct.freq')
midn_species_final<-merge(midn_species,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
# species list for 4-year status
midn_species_final<-midn_species_final %>%
select(network, park, plot_name, lat.rank, species, everything())
# species list for trend analysis
midn_species_trends <- midn_species_final %>% filter(species %in% invlist_MIDN$Latin_Name) %>% droplevels()
#View(midn_species_trends)
nrow(midn_species_final)
nrow(midn_species_trends)
#---ERMN Invasives by species
ermn_species<-read.csv('./data/ERMN/ERMN_Sp_Invasives20190528.csv')
head(ermn_species)
ermn_species2<-merge(ermn_species,comb_totinv[,c('plot_name','cycle','lat.rank')], by=c('plot_name','cycle'),all.x=T)
ermn_species2<-ermn_species2 %>% select(network,park,plot_name,lat.rank,Latin_name,cycle,plot.freq,avg.cover,quad.freq,qpct.freq)
names(ermn_species2)[names(ermn_species2)=="Latin_name"]<-'species'
invspp<-invlist$Latin_Name
ermn_unmatch<-ermn_species2 %>% filter(!(species %in% invspp))
sort(unique(ermn_unmatch$species)) #all species accounted for now.
ermn_species3<- merge(ermn_species2, invlist, by.x='species',by.y="Latin_Name",all.x=T)
names(ermn_species3) # Persicaria longiseta is duplicated because two synonyms- need to fix
ermn_species4<- ermn_species3 %>% mutate(species=ifelse(Accepted=='Y', paste0(species), paste0(Accepted.Name))) %>%
select(network, park, plot_name, lat.rank, species, cycle, plot.freq, avg.cover, quad.freq, qpct.freq) # replaced old with new names
ermn_species_final<-ermn_species4 %>% group_by(network, park, plot_name, lat.rank, species, cycle) %>%
summarise(plot.freq=sum(plot.freq), avg.cover=sum(avg.cover),
quad.freq=sum(quad.freq), qpct.freq=sum(qpct.freq)) %>% ungroup()
ermn_check<-ermn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(ermn_check) #0
#---NCRN Invasives by species
ncrn_species<-read.csv("./data/NCRN/NCRN_species_invasives.csv")[,-1]
nrow(ncrn_species) #280140
nrow(unique(ncrn_species)) #56020- each record is duplicated 5 times in the data
ncrn_unmatch<-ncrn_species %>% filter(!(species %in% invspp))
sort(unique(ncrn_unmatch$species)) #all species accounted for.
ncrn_species2<-unique(ncrn_species)
nrow(ncrn_species2) #56020
head(ncrn_species2)
# Combine all datasets
ncrn_species3<-merge(ncrn_species2,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
ncrn_species4<- merge(ncrn_species3, invlist, by.x='species',by.y="Latin_Name",all.x=T)
# species for 4-year status
ncrn_species5<- ncrn_species4 %>% mutate(species=ifelse(Accepted=='Y', paste0(species), paste0(Accepted.Name))) %>%
select(network, park, plot_name, lat.rank, species, cycle, plot.freq, avg.cover, quad.freq, qpct.freq) # replaced old with new names
View(ncrn_species5)
ncrn_species_final<-ncrn_species5 %>% group_by(network, park, plot_name, lat.rank, species, cycle) %>%
summarise(plot.freq=sum(plot.freq), avg.cover=sum(avg.cover),
quad.freq=sum(quad.freq), qpct.freq=sum(qpct.freq)) %>% ungroup()
ncrn_check<-ncrn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(ncrn_check) #0
# species list for trend analysis
ncrn_species_trends <- ncrn_species_final %>% filter(species %in% invlist_NCRN$Latin_Name) %>% droplevels()
#View(ncrn_species_trends)
# Combine network data
names(netn_species_final);names(midn_species_final);names(ermn_species_final);names(ncrn_species_final);
comb_species<-rbind(netn_species_final, midn_species_final, ermn_species_final, ncrn_species_final)
table(comb_species$network)
table(comb_species$park,comb_species$cycle)
comb_species2<-comb_species %>% arrange(desc(network),plot_name,cycle)
write.csv(comb_species2,'./data/NETN-MIDN-ERMN-NCRN_species_invasives_status.csv', row.names=F)
# Combine network data
comb_species_trends<-rbind(netn_species_final, midn_species_trends, ermn_species_final, ncrn_species_trends)
comb_species_check<-comb_species_trends %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(comb_species_check)#0
write.csv(comb_species_trends,'./data/NETN-MIDN-ERMN-NCRN_species_invasives.csv', row.names=F)
levels(comb_species_trends$species)
|
/scripts/01b_Combine_Network_Data.R
|
permissive
|
KateMMiller/invasive_trend_analysis
|
R
| false | false | 12,737 |
r
|
#------------------------
# Combine network datasets
#------------------------
library(tidyverse)
#-----------------
# Total Invasives
#-----------------
#---NETN Total Invasives
netn_totinv<-read.csv("./data/NETN/NETN_invasive_total_data.csv")
netn_totinv<-netn_totinv %>% mutate(network='NETN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(netn_totinv)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle","plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# shift cycles for NETN, so 2006 is dropped and 2018 is part of cycle 3
netn_totinv<- netn_totinv %>% mutate(cycle= case_when(year<=2010~1,
between(year,2011,2014)~2,
between(year,2015,2018)~3))
#---MIDN Total Invasives
midn_totinv<-read.csv("./data/MIDN/MIDN_invasive_total_data.csv")
midn_totinv<-midn_totinv %>% mutate(network='MIDN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(midn_totinv)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle","plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# in MIDN shift COLO so cycle 1 is 2 and cycle 2 is 3 so timelines match better
midn_totinv<- midn_totinv %>% mutate(cycle=case_when(park=='COLO' & cycle==2 ~ 3,
park=='COLO' & cycle==1 ~ 2,
park!='COLO' & cycle==1 ~ 1,
park!='COLO' & cycle==2 ~ 2,
park!='COLO' & cycle==3 ~ 3))
#---NCRN Total Invasives
ncrn_totinv<-read.csv("./data/NCRN/NCRN_total_invasives.csv")[,-1]
ncrn_totinv<-ncrn_totinv %>% mutate(plot.freq=ifelse(num.plot.spp>0,1,0),
plot_number=str_sub(plot_name,start=-4)) %>%
select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,plot.freq,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
#---ERMN Total Invasives
ermn_totinv<-read.csv("./data/ERMN/ERMN_total_Invasives20190528.csv")
names(ermn_totinv)[names(ermn_totinv)=='ave.quad.r']<-'avg.quad.r'
# combine datasets
names(netn_totinv); names(ermn_totinv); names(midn_totinv); names(ncrn_totinv)
comb_totinv<-rbind(netn_totinv, midn_totinv, ermn_totinv, ncrn_totinv)
table(comb_totinv$park,comb_totinv$cycle)
lat.order<-comb_totinv %>% group_by(park) %>%
summarise(mean.lat=mean(y_coord,na.rm=T)) %>% arrange(-mean.lat)
lat.order$lat.rank <- NA
lat.order$lat.rank<-rank(lat.order$mean.lat)
comb_totinv<-comb_totinv %>% left_join(.,lat.order, by='park')
comb_totinv<-comb_totinv %>% arrange(desc(network),plot_name,cycle)
comb_totinv2<-comb_totinv %>% mutate(qpct.freq=ifelse(network!='NCRN',qpct.freq*100,qpct.freq))
write.csv(comb_totinv2,'./data/NETN-MIDN-ERMN-NCRN_total_invasives.csv', row.names=F)
#-----------------
# Invasives by guild
#-----------------
#---NETN Invasives by guild
netn_guild<-read.csv("./data/NETN/NETN_invasive_guild_data.csv")
names(netn_guild)
netn_guild<-netn_guild %>% mutate(network='NETN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(netn_guild)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle",'guild',"plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# shift cycles for NETN, so 2006 is dropped and 2018 is part of cycle 3
netn_guild<- netn_guild %>% mutate(cycle= case_when(year<=2010~1,
between(year,2011,2014)~2,
between(year,2015,2018)~3))
#---MIDN Invasives by guild
midn_guild<-read.csv("./data/MIDN/MIDN_invasive_guild_data.csv")
names(midn_guild)
midn_guild<-midn_guild %>% mutate(network='MIDN') %>%
select(network,Unit_Code:Y_Coord,Year,cycle:numPlotSpp)
colnames(midn_guild)<-c("network","park","plot_name","plot_number","x_coord",
"y_coord","year","cycle",'guild',"plot.freq","avg.quad.r",
"avg.cover",'quad.freq','qpct.freq','num.plot.spp')
# in MIDN shift COLO so cycle 1 is 2 and cycle 2 is 3 so timelines match better
table(midn_guild$park, midn_guild$cycle)
midn_guild<- midn_guild %>% mutate(cycle=case_when(park=='COLO' & cycle==2 ~ 3,
park=='COLO' & cycle==1 ~ 2,
park!='COLO' & cycle==1 ~ 1,
park!='COLO' & cycle==2 ~ 2,
park!='COLO' & cycle==3 ~ 3))
#---ERMN Invasives by guild
ermn_guild<-read.csv("./data/ERMN/ERMN_Guild_Invasives20190528.csv")
names(ermn_guild)[names(ermn_guild)=="ave.quad.r"]<-'avg.quad.r'
#---NCRN Invasives by guild
ncrn_guild<-read.csv("./data/NCRN/NCRN_guild_invasives.csv")[,-1]
# ncrn_guild has two levels for shrubs- one for quads and one for microplots.
# the following steps calc. plot frequency using both shrub guilds, filters
# out the unwanted shrub-micro data, and cleans up factor level names
# and field names, etc to be able to incorporate with rest of networks' data.
ncrn_guild<-ncrn_guild %>% mutate(plot_number=str_sub(plot_name,start=-4)) %>%
select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,guild,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
#View(ncrn_guild)
# Pull out shrub data- if num.plot.spp>0 for shrubs-quad or shrubs-micro, plot.freq.shrub=1.
# Also combine # species in shrubs-micro and shrubs-quad
ncrn_shrubs<-ncrn_guild %>% filter(guild %in% c('Shrubs-Micro','Shrubs-Quad')) %>% droplevels() %>%
group_by(plot_name,year,cycle) %>%
summarise(plot.freq.shrub=ifelse(sum(num.plot.spp)>0,1,0), num.plot.spp.shrub=sum(num.plot.spp,na.rm=T)) %>%
mutate(guild=as.factor('Shrub')) %>% as.data.frame()
ncrn_guild2<-ncrn_guild %>% filter(guild!="Shrubs-Micro") %>% droplevels() %>%
mutate(guild = as.factor(case_when(guild=='Graminoids'~'Graminoid',
guild=='Herbaceous'~'Herbaceous',
guild=='Shrubs-Quad'~'Shrub',
guild=='Trees'~'Tree')))
ncrn_guild3<-merge(ncrn_guild2,ncrn_shrubs, by=c('plot_name','year','cycle','guild'),all.x=T)
ncrn_guild3$num.plot.spp.shrub[is.na(ncrn_guild3$num.plot.spp.shrub)]<-0
ncrn_guild3<-ncrn_guild3 %>% mutate(num.plot.spp = num.plot.spp+num.plot.spp.shrub) %>% select(-num.plot.spp.shrub)
ncrn_guild4<-ncrn_guild3 %>% mutate(plot.freq=ifelse(guild=='Shrub',plot.freq.shrub,ifelse(num.plot.spp>0,1,0)))
head(ncrn_guild4)
ncrn_guild5<- ncrn_guild4 %>% select(network,park,plot_name,plot_number,x_coord,y_coord,year,cycle,guild,plot.freq,
avg.quad.r,avg.cover,quad.freq,qpct.freq,num.plot.spp)
names(ermn_guild); names(netn_guild); names(midn_guild); names(ncrn_guild5)
comb_guild<-rbind(netn_guild,midn_guild,ermn_guild,ncrn_guild5)
comb_guild<-comb_guild %>% left_join(.,lat.order, by='park')
comb_guild<-comb_guild %>% mutate(qpct.freq=ifelse(network!='NCRN',qpct.freq*100,qpct.freq))
comb_guild2<-comb_guild %>% arrange(desc(network),plot_name,cycle)
#View(comb_guild2)
write.csv(comb_guild2,'./data/NETN-MIDN-ERMN-NCRN_guild_invasives.csv', row.names=F)
#-----------------
# Invasives by species
#-----------------
invlist<-read.csv("./data/Invasive_List.csv")
invlist_MIDN<-invlist %>% filter(MIDN==1) %>% droplevels() # only includes species on indicator list since 2007
invlist_NCRN<-invlist %>% filter(NCRN==1) %>% droplevels() # only includes species on indicator list since 2007
#---NETN Invasives by species
netn_species<-read.csv("./data/NETN/NETN_invasive_species_data.csv")
netn_species<-netn_species %>% mutate(network='NETN') %>%
select(network,Unit_Code:qpct.freq)
colnames(netn_species)<-c("network","park","plot_name","cycle",'species',"plot.freq",
"avg.cover",'quad.freq','qpct.freq')
netn_species_final<-merge(netn_species,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
netn_species_final<-netn_species_final %>%
select(network, park, plot_name, lat.rank, species, everything())
head(netn_species_final)
netn_check<-netn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(cycle)) %>% filter(numplots>3)
nrow(netn_check)
#---MIDN Invasives by species
midn_species<-read.csv("./data/MIDN/MIDN_invasive_species_data.csv")
names(midn_species)
midn_species<-midn_species %>% mutate(network='MIDN') %>%
select(network,Unit_Code:qpct.freq)
colnames(midn_species)<-c("network","park","plot_name","cycle",'species',"plot.freq",
"avg.cover",'quad.freq','qpct.freq')
midn_species_final<-merge(midn_species,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
# species list for 4-year status
midn_species_final<-midn_species_final %>%
select(network, park, plot_name, lat.rank, species, everything())
# species list for trend analysis
midn_species_trends <- midn_species_final %>% filter(species %in% invlist_MIDN$Latin_Name) %>% droplevels()
#View(midn_species_trends)
nrow(midn_species_final)
nrow(midn_species_trends)
#---ERMN Invasives by species
ermn_species<-read.csv('./data/ERMN/ERMN_Sp_Invasives20190528.csv')
head(ermn_species)
ermn_species2<-merge(ermn_species,comb_totinv[,c('plot_name','cycle','lat.rank')], by=c('plot_name','cycle'),all.x=T)
ermn_species2<-ermn_species2 %>% select(network,park,plot_name,lat.rank,Latin_name,cycle,plot.freq,avg.cover,quad.freq,qpct.freq)
names(ermn_species2)[names(ermn_species2)=="Latin_name"]<-'species'
invspp<-invlist$Latin_Name
ermn_unmatch<-ermn_species2 %>% filter(!(species %in% invspp))
sort(unique(ermn_unmatch$species)) #all species accounted for now.
ermn_species3<- merge(ermn_species2, invlist, by.x='species',by.y="Latin_Name",all.x=T)
names(ermn_species3) # Persicaria longiseta is duplicated because two synonyms- need to fix
ermn_species4<- ermn_species3 %>% mutate(species=ifelse(Accepted=='Y', paste0(species), paste0(Accepted.Name))) %>%
select(network, park, plot_name, lat.rank, species, cycle, plot.freq, avg.cover, quad.freq, qpct.freq) # replaced old with new names
ermn_species_final<-ermn_species4 %>% group_by(network, park, plot_name, lat.rank, species, cycle) %>%
summarise(plot.freq=sum(plot.freq), avg.cover=sum(avg.cover),
quad.freq=sum(quad.freq), qpct.freq=sum(qpct.freq)) %>% ungroup()
ermn_check<-ermn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(ermn_check) #0
#---NCRN Invasives by species
ncrn_species<-read.csv("./data/NCRN/NCRN_species_invasives.csv")[,-1]
nrow(ncrn_species) #280140
nrow(unique(ncrn_species)) #56020- each record is duplicated 5 times in the data
ncrn_unmatch<-ncrn_species %>% filter(!(species %in% invspp))
sort(unique(ncrn_unmatch$species)) #all species accounted for.
ncrn_species2<-unique(ncrn_species)
nrow(ncrn_species2) #56020
head(ncrn_species2)
# Combine all datasets
ncrn_species3<-merge(ncrn_species2,comb_totinv[,c('plot_name','cycle','lat.rank')],
by=c('plot_name', 'cycle'), all.x=T)
ncrn_species4<- merge(ncrn_species3, invlist, by.x='species',by.y="Latin_Name",all.x=T)
# species for 4-year status
ncrn_species5<- ncrn_species4 %>% mutate(species=ifelse(Accepted=='Y', paste0(species), paste0(Accepted.Name))) %>%
select(network, park, plot_name, lat.rank, species, cycle, plot.freq, avg.cover, quad.freq, qpct.freq) # replaced old with new names
View(ncrn_species5)
ncrn_species_final<-ncrn_species5 %>% group_by(network, park, plot_name, lat.rank, species, cycle) %>%
summarise(plot.freq=sum(plot.freq), avg.cover=sum(avg.cover),
quad.freq=sum(quad.freq), qpct.freq=sum(qpct.freq)) %>% ungroup()
ncrn_check<-ncrn_species_final %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(ncrn_check) #0
# species list for trend analysis
ncrn_species_trends <- ncrn_species_final %>% filter(species %in% invlist_NCRN$Latin_Name) %>% droplevels()
#View(ncrn_species_trends)
# Combine network data
names(netn_species_final);names(midn_species_final);names(ermn_species_final);names(ncrn_species_final);
comb_species<-rbind(netn_species_final, midn_species_final, ermn_species_final, ncrn_species_final)
table(comb_species$network)
table(comb_species$park,comb_species$cycle)
comb_species2<-comb_species %>% arrange(desc(network),plot_name,cycle)
write.csv(comb_species2,'./data/NETN-MIDN-ERMN-NCRN_species_invasives_status.csv', row.names=F)
# Combine network data
comb_species_trends<-rbind(netn_species_final, midn_species_trends, ermn_species_final, ncrn_species_trends)
comb_species_check<-comb_species_trends %>% group_by(plot_name, species) %>%
summarise(numplots=length(plot.freq)) %>% filter(numplots>3)
nrow(comb_species_check)#0
write.csv(comb_species_trends,'./data/NETN-MIDN-ERMN-NCRN_species_invasives.csv', row.names=F)
levels(comb_species_trends$species)
|
# convert a plink .frq to files suitable to garfield maftssd
library(here)
library(readr)
library(dplyr)
library(magrittr)
library(biomaRt)
library(GenomicRanges)
# OUTPUT FILES #########################################################################################################
output.dir <- here("data/garfield-data/maftssd/hg38-mirQTL-MIXED/")
dir.create(output.dir, recursive = TRUE, showWarnings = FALSE)
# INPUT FILES ##########################################################################################################
input.dir <- "/pine/scr/m/i/mikelaff/garfield-mirQTL-MIXED/"
#input.dir <- "~/Downloads/"
#gencode.gtf <- here("data/genome/hg38_UCSC/gencode.v25.primary_assembly.annotation.gtf")
# GLOBALS ##############################################################################################################
CHROMS <- c(seq(1,22), "X")
# Get TSS ##############################################################################################################
genemart <- useMart(biomart = "ENSEMBL_MART_ENSEMBL", dataset = "hsapiens_gene_ensembl")
df.tss <- as_tibble(getBM(attributes = c("transcription_start_site",
"chromosome_name",
"strand",
"ensembl_transcript_id",
"transcript_gencode_basic"),
filters = "transcript_gencode_basic", values = c(TRUE),
mart = genemart))
df.tss %<>%
dplyr::filter(chromosome_name %in% CHROMS)
df.tss$strand[df.tss$strand == 1] <- "+"
df.tss$strand[df.tss$strand == -1] <- "-"
gr.tss <- makeGRangesFromDataFrame(df.tss,
seqnames.field = "chromosome_name",
start.field = "transcription_start_site",
end.field = "transcription_start_site",
strand.field = "strand")
rm(genemart, df.tss)
# Import/Export ########################################################################################################
# Loop over each chr
for (chrom in CHROMS) {
df.maf <- NULL
gr.nearest <- NULL
gr.snp <- NULL
print(paste("Working on chrom:", chrom))
# import .frq
print("Importing...")
df.maf <- read_table2(paste0(input.dir, "chr", chrom, "_updated_var_ids_maf.frq"), col_types = "ccccdi")
# filter for duplicates remove excess columns
print("Filtering...")
df.maf %>%
dplyr::select(SNP, MAF) %>%
dplyr::filter(!duplicated(SNP)) -> df.maf
# remove chr and :
print("Formatting...")
df.maf$SNP <- as.integer(gsub(paste0(chrom, ":"), "", df.maf$SNP))
print("Getting TSS distance...")
# GRanges of all the SNPs
gr.snp <- GRanges(seqnames = chrom,
ranges = IRanges(start = df.maf$SNP, end = df.maf$SNP),
strand = "*")
# GRanges of nearest tss
gr.nearest <- gr.tss[nearest(gr.snp, gr.tss)]
# Distance to nearest tss (upstream of tss: negative, downstream of tss: positive)
df.maf$TSSD <- start(gr.snp) - start(gr.nearest)
# export formated tags file
print("Exporting...")
write_delim(df.maf, paste0(output.dir, "chr", chrom), delim = " ", col_names = FALSE)
}
|
/src/garfield/make_maftssd/hg38-mirQTL-MIXED/02_format_maftssd.R
|
no_license
|
mikelaff/mirna-eqtl-manuscript
|
R
| false | false | 3,343 |
r
|
# convert a plink .frq to files suitable to garfield maftssd
library(here)
library(readr)
library(dplyr)
library(magrittr)
library(biomaRt)
library(GenomicRanges)
# OUTPUT FILES #########################################################################################################
output.dir <- here("data/garfield-data/maftssd/hg38-mirQTL-MIXED/")
dir.create(output.dir, recursive = TRUE, showWarnings = FALSE)
# INPUT FILES ##########################################################################################################
input.dir <- "/pine/scr/m/i/mikelaff/garfield-mirQTL-MIXED/"
#input.dir <- "~/Downloads/"
#gencode.gtf <- here("data/genome/hg38_UCSC/gencode.v25.primary_assembly.annotation.gtf")
# GLOBALS ##############################################################################################################
CHROMS <- c(seq(1,22), "X")
# Get TSS ##############################################################################################################
genemart <- useMart(biomart = "ENSEMBL_MART_ENSEMBL", dataset = "hsapiens_gene_ensembl")
df.tss <- as_tibble(getBM(attributes = c("transcription_start_site",
"chromosome_name",
"strand",
"ensembl_transcript_id",
"transcript_gencode_basic"),
filters = "transcript_gencode_basic", values = c(TRUE),
mart = genemart))
df.tss %<>%
dplyr::filter(chromosome_name %in% CHROMS)
df.tss$strand[df.tss$strand == 1] <- "+"
df.tss$strand[df.tss$strand == -1] <- "-"
gr.tss <- makeGRangesFromDataFrame(df.tss,
seqnames.field = "chromosome_name",
start.field = "transcription_start_site",
end.field = "transcription_start_site",
strand.field = "strand")
rm(genemart, df.tss)
# Import/Export ########################################################################################################
# Loop over each chr
for (chrom in CHROMS) {
df.maf <- NULL
gr.nearest <- NULL
gr.snp <- NULL
print(paste("Working on chrom:", chrom))
# import .frq
print("Importing...")
df.maf <- read_table2(paste0(input.dir, "chr", chrom, "_updated_var_ids_maf.frq"), col_types = "ccccdi")
# filter for duplicates remove excess columns
print("Filtering...")
df.maf %>%
dplyr::select(SNP, MAF) %>%
dplyr::filter(!duplicated(SNP)) -> df.maf
# remove chr and :
print("Formatting...")
df.maf$SNP <- as.integer(gsub(paste0(chrom, ":"), "", df.maf$SNP))
print("Getting TSS distance...")
# GRanges of all the SNPs
gr.snp <- GRanges(seqnames = chrom,
ranges = IRanges(start = df.maf$SNP, end = df.maf$SNP),
strand = "*")
# GRanges of nearest tss
gr.nearest <- gr.tss[nearest(gr.snp, gr.tss)]
# Distance to nearest tss (upstream of tss: negative, downstream of tss: positive)
df.maf$TSSD <- start(gr.snp) - start(gr.nearest)
# export formated tags file
print("Exporting...")
write_delim(df.maf, paste0(output.dir, "chr", chrom), delim = " ", col_names = FALSE)
}
|
# RDA
# Prepare data ----
rm(list=ls())
source("code/data_processing_code.R")
library(psych)
treats_to_plot <- as.character(unique(treats$treat))[c(3,4)]
treats_trimmed <- treats[(treats$treat %in% treats_to_plot), ]
treats_trimmed$codes <- as.character(treats_trimmed$codes)
treats_dummy <- dummy.code(as.character(treats_trimmed$treat))
treats_trimmed <- cbind(treats_trimmed, treats_dummy)
treats_trimmed$sites <- rownames(treats_trimmed)
cp_treats <- treats_trimmed[treats_trimmed$treat %in% c("CONTROL","PREDATOR"),]$sites
csites <- treats_trimmed[treats_trimmed$treat %in% c("CONTROL"),]$sites
psites <- treats_trimmed[treats_trimmed$treat %in% c("PREDATOR"),]$sites
source("code/pdi.R")
at <- 5 #abundance_treshold
ins_bio_at <- ins_bio[ins_bio$amount >= at, ]
ins_bio_at <- ins_bio_at[-grep("aran|mant", ins_bio_at$morphotype), ]
contsites <- treats[treats$treat == "CONTROL",]$codes
predsites <- treats[treats$treat == "PREDATOR",]$codes
cbiomat <- contingencyTable2(ins_bio_at[ins_bio_at$plot %in% contsites, ],
"plot","morphotype","totbio")
pbiomat <- contingencyTable2(ins_bio_at[ins_bio_at$plot %in% predsites, ],
"plot","morphotype","totbio")
comparable <- colnames(cbiomat)[colnames(cbiomat) %in% colnames(pbiomat)]
cp_tr <- c(as.character(contsites),
as.character(predsites))
ins_bio_cp <- ins_bio_at[ins_bio_at$plot %in% cp_tr, ]
ins_bio_cp_comparable <- ins_bio_cp[ins_bio_cp$morphotype %in% comparable,]
ibc <- ins_bio_cp_comparable
ibc <- ibc[complete.cases(ibc),]
# Species in the exclosure treatment get _P note ant the end
ibc$morphotype <- as.character(ibc$morphotype)
ibc[ibc$plot %in% predsites, ]$morphotype <- paste(ibc[ibc$plot %in% predsites,]$morphotype,"P", sep = "_")
# Only one contingency table for all woody species
compFood <- contingencyTable2(ibc,
"tree",
"morphotype",
"totbio")
dim(compFood)
# envdat <- data.frame(treat = rep(c("predator", "control"),
# c(dim(compFood)[1],
# dim(compFood)[1])))
# rownames(envdat) <- rownames(compFood)
species_point_color <- rep(rgb(255,194,10,150,
maxColorValue = 255),
dim(compFood)[2])
species_point_color[grep("_P", colnames(compFood))] <- rgb(12,123,220,150,
maxColorValue = 255)
par(mfrow=c(1,1))
dietrda <- metaMDS(compFood)
foodDist <- vegdist(t(compFood))
plot(dietrda, type = "n", display = "species")
points(dietrda, display = "species",
col = species_point_color, pch=19, cex = 1.5)
# Shift vs pdi
#diet breadths of comparable species
distspec <- as.matrix(foodDist)
comparable <- comparable[comparable %in% colnames(distspec)]
dbspec <- diet_breadth_ab[comparable]
shiftvals <- diag(distspec[colnames(distspec) %in% comparable,
colnames(distspec) %in% paste(comparable,"P",sep ="_")])
shiftvals[shiftvals == 1] <- 0.999
# Linear regression may be weighted by abundance
cds <- ins_bio_cp[ins_bio_cp$morphotype %in% comparable, ]
cds$morphotype <- as.character(cds$morphotype)
coll_abu <- tapply(cds$amount, cds$morphotype, sum)
shiftvals
dbspec
coll_abu
w_sv <- rep(shiftvals, log(coll_abu))
w_db <- rep(dbspec, log(coll_abu))
betareg_mod <- betareg::betareg(shiftvals~dbspec,
weights = log(coll_abu),
type = "ML")
b_m_test_w <- betareg::betareg(w_sv~w_db,
type = "ML")
betareg_mod_inv <- betareg::betareg(shiftvals~dbspec,
weights = coll_abu,
type = "ML")
summary(betareg_mod)
summary(betareg_mod_inv)
summary(b_m_test_w)
# plot(betareg_mod)
plotdf <- as.data.frame(cbind(shiftvals, dbspec, coll_abu))
preddat <- predict(betareg_mod,
newdata = plotdf,
type = "quantile",
at = c(0.025, 0.975))
plotdf <- cbind(plotdf, preddat)
library(ggplot2)
ggplot(plotdf, aes(x = dbspec, y = shiftvals)) +
geom_point(size = log(coll_abu), alpha = 0.5) +
geom_line(aes(y = predict(betareg_mod, plotdf)), col = "grey20",lwd = 2) +
geom_ribbon(aes(ymin= q_0.025, ymax = q_0.975), alpha=0.2) +
theme_bw()+
xlab("Specialization (Paired Differences Index)")+
ylab("Bray-Curtis diet dissimilarity")
|
/code/pdi_diet_shift.R
|
no_license
|
szefer-piotr/garden_food_webs
|
R
| false | false | 4,478 |
r
|
# RDA
# Prepare data ----
rm(list=ls())
source("code/data_processing_code.R")
library(psych)
treats_to_plot <- as.character(unique(treats$treat))[c(3,4)]
treats_trimmed <- treats[(treats$treat %in% treats_to_plot), ]
treats_trimmed$codes <- as.character(treats_trimmed$codes)
treats_dummy <- dummy.code(as.character(treats_trimmed$treat))
treats_trimmed <- cbind(treats_trimmed, treats_dummy)
treats_trimmed$sites <- rownames(treats_trimmed)
cp_treats <- treats_trimmed[treats_trimmed$treat %in% c("CONTROL","PREDATOR"),]$sites
csites <- treats_trimmed[treats_trimmed$treat %in% c("CONTROL"),]$sites
psites <- treats_trimmed[treats_trimmed$treat %in% c("PREDATOR"),]$sites
source("code/pdi.R")
at <- 5 #abundance_treshold
ins_bio_at <- ins_bio[ins_bio$amount >= at, ]
ins_bio_at <- ins_bio_at[-grep("aran|mant", ins_bio_at$morphotype), ]
contsites <- treats[treats$treat == "CONTROL",]$codes
predsites <- treats[treats$treat == "PREDATOR",]$codes
cbiomat <- contingencyTable2(ins_bio_at[ins_bio_at$plot %in% contsites, ],
"plot","morphotype","totbio")
pbiomat <- contingencyTable2(ins_bio_at[ins_bio_at$plot %in% predsites, ],
"plot","morphotype","totbio")
comparable <- colnames(cbiomat)[colnames(cbiomat) %in% colnames(pbiomat)]
cp_tr <- c(as.character(contsites),
as.character(predsites))
ins_bio_cp <- ins_bio_at[ins_bio_at$plot %in% cp_tr, ]
ins_bio_cp_comparable <- ins_bio_cp[ins_bio_cp$morphotype %in% comparable,]
ibc <- ins_bio_cp_comparable
ibc <- ibc[complete.cases(ibc),]
# Species in the exclosure treatment get _P note ant the end
ibc$morphotype <- as.character(ibc$morphotype)
ibc[ibc$plot %in% predsites, ]$morphotype <- paste(ibc[ibc$plot %in% predsites,]$morphotype,"P", sep = "_")
# Only one contingency table for all woody species
compFood <- contingencyTable2(ibc,
"tree",
"morphotype",
"totbio")
dim(compFood)
# envdat <- data.frame(treat = rep(c("predator", "control"),
# c(dim(compFood)[1],
# dim(compFood)[1])))
# rownames(envdat) <- rownames(compFood)
species_point_color <- rep(rgb(255,194,10,150,
maxColorValue = 255),
dim(compFood)[2])
species_point_color[grep("_P", colnames(compFood))] <- rgb(12,123,220,150,
maxColorValue = 255)
par(mfrow=c(1,1))
dietrda <- metaMDS(compFood)
foodDist <- vegdist(t(compFood))
plot(dietrda, type = "n", display = "species")
points(dietrda, display = "species",
col = species_point_color, pch=19, cex = 1.5)
# Shift vs pdi
#diet breadths of comparable species
distspec <- as.matrix(foodDist)
comparable <- comparable[comparable %in% colnames(distspec)]
dbspec <- diet_breadth_ab[comparable]
shiftvals <- diag(distspec[colnames(distspec) %in% comparable,
colnames(distspec) %in% paste(comparable,"P",sep ="_")])
shiftvals[shiftvals == 1] <- 0.999
# Linear regression may be weighted by abundance
cds <- ins_bio_cp[ins_bio_cp$morphotype %in% comparable, ]
cds$morphotype <- as.character(cds$morphotype)
coll_abu <- tapply(cds$amount, cds$morphotype, sum)
shiftvals
dbspec
coll_abu
w_sv <- rep(shiftvals, log(coll_abu))
w_db <- rep(dbspec, log(coll_abu))
betareg_mod <- betareg::betareg(shiftvals~dbspec,
weights = log(coll_abu),
type = "ML")
b_m_test_w <- betareg::betareg(w_sv~w_db,
type = "ML")
betareg_mod_inv <- betareg::betareg(shiftvals~dbspec,
weights = coll_abu,
type = "ML")
summary(betareg_mod)
summary(betareg_mod_inv)
summary(b_m_test_w)
# plot(betareg_mod)
plotdf <- as.data.frame(cbind(shiftvals, dbspec, coll_abu))
preddat <- predict(betareg_mod,
newdata = plotdf,
type = "quantile",
at = c(0.025, 0.975))
plotdf <- cbind(plotdf, preddat)
library(ggplot2)
ggplot(plotdf, aes(x = dbspec, y = shiftvals)) +
geom_point(size = log(coll_abu), alpha = 0.5) +
geom_line(aes(y = predict(betareg_mod, plotdf)), col = "grey20",lwd = 2) +
geom_ribbon(aes(ymin= q_0.025, ymax = q_0.975), alpha=0.2) +
theme_bw()+
xlab("Specialization (Paired Differences Index)")+
ylab("Bray-Curtis diet dissimilarity")
|
context("ANOVAs: known bugs")
test_that("aov does not throw 'Error() model is singular' warning for missing values", {
data(md_12.1)
md_12.1b <- md_12.1[-1,]
expect_warning(aov_ez("id", "rt", md_12.1b, within = c("angle", "noise")), "Missing values", all = TRUE)
})
test_that("regex works correctly in aov_car when also having within factors outside the Error term", {
data(obk.long)
expect_is(aov_car(value ~ treatment * gender*phase*hour + Error(id/phase*hour), data = obk.long), "afex_aov")
})
test_that("another label bug (May 2014)", {
data("sk2011.1")
levels(sk2011.1$inference) <- c("A+:D-", "A+:D+", "A-:D+", "A- : D-")
expect_is(aov_ez("id", "response", sk2011.1, between = "instruction", within = c("type", "inference"), return = "Anova", fun_aggregate = mean), "Anova.mlm")
})
test_that("orig label bug", {
data(obk.long)
obk2 <- obk.long
levels(obk2$phase) <- c("fup test", "post-hans", "pre tenetious")
expect_is(aov_car(value ~ treatment * gender + age + Error(id/phase*hour), data = obk2, factorize=FALSE, return = "Anova"), "Anova.mlm")
})
test_that("ANCOVA check bug (reported by Gang Chen), January 2013", {
dat <- read.table(header=TRUE, text = "ID Group Gender ROI Value Propdd00 GAS0 MAD0 CPD0
2016 AE M 05_06 1.581 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 07_08 1.521 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 09_10 1.623 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 03_04 1.569 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 11_12 1.719 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 01_02 1.509 0.543 1.908 0.439999999999998 -0.5335
2031 HC F 09_10 1.739 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 01_02 1.763 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 03_04 1.8 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 11_12 1.793 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 05_06 1.765 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 07_08 1.654 -0.014 0.0480000000000018 -2.347 1.9665
2063 AE F 11_12 1.742 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 01_02 1.634 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 03_04 1.638 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 07_08 1.604 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 09_10 1.654 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 05_06 1.625 -0.027 2.348 -8.88 -0.0335000000000001
2042 HC M 05_06 1.649 -0.014 2.058 -3.497 -0.8635
2042 HC M 07_08 1.565 -0.014 2.058 -3.497 -0.8635
2042 HC M 09_10 1.765 -0.014 2.058 -3.497 -0.8635
2042 HC M 03_04 1.677 -0.014 2.058 -3.497 -0.8635
2042 HC M 11_12 1.706 -0.014 2.058 -3.497 -0.8635
2042 HC M 01_02 1.618 -0.014 2.058 -3.497 -0.8635
2071 AE M 05_06 1.712 -0.317 -0.802 6.74 1.9665
2071 AE M 07_08 1.64 -0.317 -0.802 6.74 1.9665
2071 AE M 09_10 1.791 -0.317 -0.802 6.74 1.9665
2071 AE M 03_04 1.725 -0.317 -0.802 6.74 1.9665
2071 AE M 11_12 1.782 -0.317 -0.802 6.74 1.9665
2071 AE M 01_02 1.712 -0.317 -0.802 6.74 1.9665
2134 HC M 05_06 1.672 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 07_08 1.657 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 09_10 1.791 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 03_04 1.633 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 11_12 1.859 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 01_02 1.653 -0.014 0.347999999999999 -5.807 -2.5335
2009 AE F 09_10 1.672 -0.027 1.058 3.36 11.1365
2009 AE F 03_04 1.723 -0.027 1.058 3.36 11.1365
2009 AE F 05_06 1.676 -0.027 1.058 3.36 11.1365
2009 AE F 07_08 1.622 -0.027 1.058 3.36 11.1365
2009 AE F 01_02 1.633 -0.027 1.058 3.36 11.1365
2009 AE F 11_12 1.853 -0.027 1.058 3.36 11.1365
2132 HC M 05_06 1.758 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 07_08 1.623 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 09_10 1.843 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 03_04 1.773 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 11_12 1.806 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 01_02 1.708 -0.014 -1.082 -2.857 -0.0335000000000001
2127 HC F 11_12 1.824 -0.014 0.628 6.223 -0.5335
2127 HC F 09_10 1.871 -0.014 0.628 6.223 -0.5335
2127 HC F 01_02 1.687 -0.014 0.628 6.223 -0.5335
2127 HC F 03_04 1.699 -0.014 0.628 6.223 -0.5335
2127 HC F 07_08 1.646 -0.014 0.628 6.223 -0.5335
2127 HC F 05_06 1.738 -0.014 0.628 6.223 -0.5335
2081 AE M 09_10 1.807 -0.027 -2.082 2.43 -1.5335
2081 AE M 11_12 1.917 -0.027 -2.082 2.43 -1.5335
2081 AE M 03_04 1.767 -0.027 -2.082 2.43 -1.5335
2081 AE M 05_06 1.776 -0.027 -2.082 2.43 -1.5335
2081 AE M 07_08 1.733 -0.027 -2.082 2.43 -1.5335
2081 AE M 01_02 1.775 -0.027 -2.082 2.43 -1.5335
2086 AE F 11_12 1.768 -0.457 -1.082 -1.76 6.9665
2086 AE F 09_10 1.769 -0.457 -1.082 -1.76 6.9665
2086 AE F 01_02 1.752 -0.457 -1.082 -1.76 6.9665
2086 AE F 03_04 1.769 -0.457 -1.082 -1.76 6.9665
2086 AE F 05_06 1.751 -0.457 -1.082 -1.76 6.9665
2086 AE F 07_08 1.728 -0.457 -1.082 -1.76 6.9665
2033 HC M 05_06 1.804 0.126 2.768 7.083 -2.2035
2033 HC M 07_08 1.784 0.126 2.768 7.083 -2.2035
2033 HC M 09_10 1.948 0.126 2.768 7.083 -2.2035
2033 HC M 03_04 1.821 0.126 2.768 7.083 -2.2035
2033 HC M 11_12 2.143 0.126 2.768 7.083 -2.2035
2033 HC M 01_02 1.824 0.126 2.768 7.083 -2.2035
2007 AE M 07_08 1.554 -0.027 0.488 -6.05 -0.5335
2007 AE M 05_06 1.643 -0.027 0.488 -6.05 -0.5335
2007 AE M 09_10 1.674 -0.027 0.488 -6.05 -0.5335
2007 AE M 03_04 1.593 -0.027 0.488 -6.05 -0.5335
2007 AE M 11_12 1.726 -0.027 0.488 -6.05 -0.5335
2007 AE M 01_02 1.517 -0.027 0.488 -6.05 -0.5335
6062 HC M 05_06 1.911 -0.014 -3.802 4.093 -3.5335
6062 HC M 07_08 1.887 -0.014 -3.802 4.093 -3.5335
6062 HC M 09_10 1.951 -0.014 -3.802 4.093 -3.5335
6062 HC M 03_04 1.798 -0.014 -3.802 4.093 -3.5335
6062 HC M 11_12 1.953 -0.014 -3.802 4.093 -3.5335
6062 HC M 01_02 1.772 -0.014 -3.802 4.093 -3.5335
2072 AE M 05_06 1.667 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 07_08 1.587 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 09_10 1.739 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 03_04 1.638 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 11_12 1.784 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 01_02 1.662 0.253 1.908 0.289999999999999 -1.0335
2008 HC F 05_06 1.623 -0.014 -1.372 -2.317 2.1365
2008 HC F 07_08 1.6 -0.014 -1.372 -2.317 2.1365
2008 HC F 09_10 1.688 -0.014 -1.372 -2.317 2.1365
2008 HC F 03_04 1.624 -0.014 -1.372 -2.317 2.1365
2008 HC F 11_12 1.772 -0.014 -1.372 -2.317 2.1365
2008 HC F 01_02 1.656 -0.014 -1.372 -2.317 2.1365
2070 AE F 05_06 1.657 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 07_08 1.579 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 09_10 1.75 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 03_04 1.808 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 11_12 1.777 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 01_02 1.702 0.113 -1.372 -0.140000000000001 -5.5335
2064 AE F 11_12 1.781 -0.027 -5.512 3.57 -3.5335
2064 AE F 09_10 1.724 -0.027 -5.512 3.57 -3.5335
2064 AE F 01_02 1.631 -0.027 -5.512 3.57 -3.5335
2064 AE F 03_04 1.607 -0.027 -5.512 3.57 -3.5335
2064 AE F 05_06 1.577 -0.027 -5.512 3.57 -3.5335
2064 AE F 07_08 1.546 -0.027 -5.512 3.57 -3.5335
2039 HC M 09_10 1.879 -0.014 2.628 -1.867 -5.5335
2039 HC M 11_12 1.918 -0.014 2.628 -1.867 -5.5335
2039 HC M 03_04 1.794 -0.014 2.628 -1.867 -5.5335
2039 HC M 05_06 1.787 -0.014 2.628 -1.867 -5.5335
2039 HC M 07_08 1.687 -0.014 2.628 -1.867 -5.5335
2039 HC M 01_02 1.774 -0.014 2.628 -1.867 -5.5335
2117 HC F 09_10 1.712 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 11_12 1.75 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 03_04 1.717 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 07_08 1.587 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 05_06 1.667 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 01_02 1.663 -0.014 0.917999999999999 1.293 3.7965
")
dat$ID <- as.factor(dat$ID)
fm <- aov_car(Value ~ Propdd00 + Group + Gender + GAS0 + MAD0 + CPD0 + Error(ID/ROI), data=dat, factorize=FALSE, return = "Anova")
fm0 <- aov_car(Value ~ MAD0 + CPD0 + Error(ID/ROI), data=dat, factorize=FALSE, return='afex_aov')
expect_is(fm, "Anova.mlm")
expect_is(fm0, "afex_aov")
})
test_that("ANOVA: ids in multiple between.subjects conditions", {
species<- c("a","b","c","c","b","c","b","b","a","b","c","c","a","a","b","b","a","a","b","c")
habitat<- c("x","x","x","y","y","y","x","x","y","z","y","y","z","z","x","x","y","y","z","z")
mvt.rate<-c(6,5,7,8,9,4,3,5,6,9,3,6,6,7,8,9,5,6,7,8)
ind<-as.factor(c(1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4))
data1<-data.frame(species, habitat, mvt.rate, ind)
# should give an error
expect_error(aov_ez("ind", "mvt.rate", data1, within = "habitat", between = "species"), "Following ids are in more than one between subjects condition:")
})
test_that("empty factors are not causing aov.cat to choke", {
data(sleepstudy) #Example data in lme4
sleepstudy$Days<-factor(sleepstudy$Days)
#Works with all factors
expect_is(aov_ez("Subject","Reaction",sleepstudy, within="Days", return = "Anova"), "Anova.mlm")
#If you remove a factor it fails...
expect_is(aov_ez("Subject","Reaction",sleepstudy[sleepstudy$Days!=9,], within="Days", return = "Anova"), "Anova.mlm")
})
test_that("factors have more than one level", {
data(obk.long)
expect_error(aov_car(value ~ treatment+ Error(id/phase), data = obk.long[ obk.long$treatment == "control",]), "one level only.")
expect_error(aov_car(value ~ treatment+ Error(id/phase), data = obk.long[ obk.long$phase == "pre",]), "one level only.")
})
test_that("variable names longer", {
data(obk.long)
obk.long$gender2 <- obk.long$treatment
orig <- aov_car(value ~ treatment * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender")
v1 <- aov_car(value ~ gender2 * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender")
v2 <- aov_car(value ~ gender2 * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender2")
expect_equivalent(orig$anova_table, v1$anova_table)
expect_identical(nice(orig)[,-1], nice(v1)[,-1])
expect_identical(nice(orig)[,c("df", "MSE", "F", "p.value")], nice(v2)[,c("df", "MSE", "F", "p.value")])
expect_equivalent(orig$anova_table[,c("num Df", "den Df", "MSE", "F", "Pr(>F)")], v2$anova_table[c("num Df", "den Df", "MSE", "F", "Pr(>F)")])
})
test_that("works with dplyr data.frames (see https://github.com/singmann/afex/issues/6):", {
if (getRversion() >= "3.1.2") {
skip_if_not_installed("dplyr")
data(md_12.1)
md2 <- dplyr::as_tibble(md_12.1)
expect_is(aov_ez("id", "rt", md2, within = c("angle", "noise"),
anova_table=list(correction = "none", es = "none")),
"afex_aov")
}
})
test_that("return='nice' works", {
data(md_12.1)
expect_is(aov_ez("id", "rt", md_12.1, within = c("angle", "noise"), return = "nice"), "data.frame")
})
test_that("aov_car works with column names containing spaces: https://github.com/singmann/afex/issues/22", {
data <- list("dependent" = rnorm(100), "RM Factor 1" = factor(rep(c("Level 1", "Level 2"), 50)), "subject" = factor(rep(1:50, each = 2)))
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
expect_is(aov_car(dependent ~ `RM Factor 1` + Error(subject/(`RM Factor 1`)), data), "afex_aov")
expect_is(aov_4(dependent ~ `RM Factor 1` + (`RM Factor 1`|subject), data), "afex_aov")
expect_is(aov_ez("subject", "dependent", data, within = "RM Factor 1"), "afex_aov")
})
test_that("aov_car works with column names containing spaces for between factors", {
data <- list("dependent" = rnorm(100), "RM Factor 1" = factor(rep(c("Level 1", "Level 2"), 50)), "subject" = factor(rep(1:100)))
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
expect_is(aov_car(dependent ~ `RM Factor 1` + Error(subject), data), "afex_aov")
expect_is(aov_4(dependent ~ `RM Factor 1` + (1|subject), data), "afex_aov")
expect_is(aov_ez("subject", "dependent", data, between = "RM Factor 1"), "afex_aov")
})
test_that("aov_ez works with multiple covariates", {
skip_if_not_installed("psychTools")
require(psychTools)
data(msq)
msq2 <- msq[!is.na(msq$Extraversion),]
msq2 <- droplevels(msq2[msq2$ID != "18",])
msq2$TOD <- msq2$TOD-mean(msq2$TOD)
msq2$MSQ_Time <- msq2$MSQ_Time-mean(msq2$MSQ_Time)
msq2$condition <- msq2$condition-mean(msq2$condition) # that is somewhat stupid
mulcov <- aov_ez(data=msq2, dv="Extraversion", id = "ID",
between = "condition",
covariate=c("TOD", "MSQ_Time"),
factorize=FALSE, fun_aggregate = mean)
expect_is(mulcov, "afex_aov")
})
test_that("aov_car works with p.val adjustment == NA for HF as well as GG", {
# see: https://github.com/singmann/afex/issues/36
skip_on_cran() ## takes rather long
load("anova_hf_error.rda")
#load("tests/testthat/anova_hf_error.rda")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"))),
"nice_table")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"),
anova_table = list(correction = "GG"))),
"nice_table")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"),
anova_table = list(correction = "HF"))),
"nice_table")
})
test_that("aov_car: character variables and factorize = FALSE", {
data(obk.long)
obk2 <- obk.long
obk2$treatment <- as.character(obk2$treatment)
a1 <- aov_car(value ~ treatment * gender + Error(id), data = obk.long,
fun_aggregate = mean)
a2 <- aov_car(value ~ treatment * gender + Error(id), data = obk2,
fun_aggregate = mean)
a3 <- aov_car(value ~ treatment * gender + Error(id), data = obk2,
fun_aggregate = mean, factorize = FALSE)
expect_equal(a1$anova_table, a2$anova_table)
expect_equal(a1$anova_table, a3$anova_table)
})
|
/tests/testthat/test-aov_car-bugs.R
|
no_license
|
minghao2016/afex
|
R
| false | false | 13,926 |
r
|
context("ANOVAs: known bugs")
test_that("aov does not throw 'Error() model is singular' warning for missing values", {
data(md_12.1)
md_12.1b <- md_12.1[-1,]
expect_warning(aov_ez("id", "rt", md_12.1b, within = c("angle", "noise")), "Missing values", all = TRUE)
})
test_that("regex works correctly in aov_car when also having within factors outside the Error term", {
data(obk.long)
expect_is(aov_car(value ~ treatment * gender*phase*hour + Error(id/phase*hour), data = obk.long), "afex_aov")
})
test_that("another label bug (May 2014)", {
data("sk2011.1")
levels(sk2011.1$inference) <- c("A+:D-", "A+:D+", "A-:D+", "A- : D-")
expect_is(aov_ez("id", "response", sk2011.1, between = "instruction", within = c("type", "inference"), return = "Anova", fun_aggregate = mean), "Anova.mlm")
})
test_that("orig label bug", {
data(obk.long)
obk2 <- obk.long
levels(obk2$phase) <- c("fup test", "post-hans", "pre tenetious")
expect_is(aov_car(value ~ treatment * gender + age + Error(id/phase*hour), data = obk2, factorize=FALSE, return = "Anova"), "Anova.mlm")
})
test_that("ANCOVA check bug (reported by Gang Chen), January 2013", {
dat <- read.table(header=TRUE, text = "ID Group Gender ROI Value Propdd00 GAS0 MAD0 CPD0
2016 AE M 05_06 1.581 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 07_08 1.521 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 09_10 1.623 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 03_04 1.569 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 11_12 1.719 0.543 1.908 0.439999999999998 -0.5335
2016 AE M 01_02 1.509 0.543 1.908 0.439999999999998 -0.5335
2031 HC F 09_10 1.739 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 01_02 1.763 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 03_04 1.8 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 11_12 1.793 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 05_06 1.765 -0.014 0.0480000000000018 -2.347 1.9665
2031 HC F 07_08 1.654 -0.014 0.0480000000000018 -2.347 1.9665
2063 AE F 11_12 1.742 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 01_02 1.634 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 03_04 1.638 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 07_08 1.604 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 09_10 1.654 -0.027 2.348 -8.88 -0.0335000000000001
2063 AE F 05_06 1.625 -0.027 2.348 -8.88 -0.0335000000000001
2042 HC M 05_06 1.649 -0.014 2.058 -3.497 -0.8635
2042 HC M 07_08 1.565 -0.014 2.058 -3.497 -0.8635
2042 HC M 09_10 1.765 -0.014 2.058 -3.497 -0.8635
2042 HC M 03_04 1.677 -0.014 2.058 -3.497 -0.8635
2042 HC M 11_12 1.706 -0.014 2.058 -3.497 -0.8635
2042 HC M 01_02 1.618 -0.014 2.058 -3.497 -0.8635
2071 AE M 05_06 1.712 -0.317 -0.802 6.74 1.9665
2071 AE M 07_08 1.64 -0.317 -0.802 6.74 1.9665
2071 AE M 09_10 1.791 -0.317 -0.802 6.74 1.9665
2071 AE M 03_04 1.725 -0.317 -0.802 6.74 1.9665
2071 AE M 11_12 1.782 -0.317 -0.802 6.74 1.9665
2071 AE M 01_02 1.712 -0.317 -0.802 6.74 1.9665
2134 HC M 05_06 1.672 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 07_08 1.657 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 09_10 1.791 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 03_04 1.633 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 11_12 1.859 -0.014 0.347999999999999 -5.807 -2.5335
2134 HC M 01_02 1.653 -0.014 0.347999999999999 -5.807 -2.5335
2009 AE F 09_10 1.672 -0.027 1.058 3.36 11.1365
2009 AE F 03_04 1.723 -0.027 1.058 3.36 11.1365
2009 AE F 05_06 1.676 -0.027 1.058 3.36 11.1365
2009 AE F 07_08 1.622 -0.027 1.058 3.36 11.1365
2009 AE F 01_02 1.633 -0.027 1.058 3.36 11.1365
2009 AE F 11_12 1.853 -0.027 1.058 3.36 11.1365
2132 HC M 05_06 1.758 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 07_08 1.623 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 09_10 1.843 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 03_04 1.773 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 11_12 1.806 -0.014 -1.082 -2.857 -0.0335000000000001
2132 HC M 01_02 1.708 -0.014 -1.082 -2.857 -0.0335000000000001
2127 HC F 11_12 1.824 -0.014 0.628 6.223 -0.5335
2127 HC F 09_10 1.871 -0.014 0.628 6.223 -0.5335
2127 HC F 01_02 1.687 -0.014 0.628 6.223 -0.5335
2127 HC F 03_04 1.699 -0.014 0.628 6.223 -0.5335
2127 HC F 07_08 1.646 -0.014 0.628 6.223 -0.5335
2127 HC F 05_06 1.738 -0.014 0.628 6.223 -0.5335
2081 AE M 09_10 1.807 -0.027 -2.082 2.43 -1.5335
2081 AE M 11_12 1.917 -0.027 -2.082 2.43 -1.5335
2081 AE M 03_04 1.767 -0.027 -2.082 2.43 -1.5335
2081 AE M 05_06 1.776 -0.027 -2.082 2.43 -1.5335
2081 AE M 07_08 1.733 -0.027 -2.082 2.43 -1.5335
2081 AE M 01_02 1.775 -0.027 -2.082 2.43 -1.5335
2086 AE F 11_12 1.768 -0.457 -1.082 -1.76 6.9665
2086 AE F 09_10 1.769 -0.457 -1.082 -1.76 6.9665
2086 AE F 01_02 1.752 -0.457 -1.082 -1.76 6.9665
2086 AE F 03_04 1.769 -0.457 -1.082 -1.76 6.9665
2086 AE F 05_06 1.751 -0.457 -1.082 -1.76 6.9665
2086 AE F 07_08 1.728 -0.457 -1.082 -1.76 6.9665
2033 HC M 05_06 1.804 0.126 2.768 7.083 -2.2035
2033 HC M 07_08 1.784 0.126 2.768 7.083 -2.2035
2033 HC M 09_10 1.948 0.126 2.768 7.083 -2.2035
2033 HC M 03_04 1.821 0.126 2.768 7.083 -2.2035
2033 HC M 11_12 2.143 0.126 2.768 7.083 -2.2035
2033 HC M 01_02 1.824 0.126 2.768 7.083 -2.2035
2007 AE M 07_08 1.554 -0.027 0.488 -6.05 -0.5335
2007 AE M 05_06 1.643 -0.027 0.488 -6.05 -0.5335
2007 AE M 09_10 1.674 -0.027 0.488 -6.05 -0.5335
2007 AE M 03_04 1.593 -0.027 0.488 -6.05 -0.5335
2007 AE M 11_12 1.726 -0.027 0.488 -6.05 -0.5335
2007 AE M 01_02 1.517 -0.027 0.488 -6.05 -0.5335
6062 HC M 05_06 1.911 -0.014 -3.802 4.093 -3.5335
6062 HC M 07_08 1.887 -0.014 -3.802 4.093 -3.5335
6062 HC M 09_10 1.951 -0.014 -3.802 4.093 -3.5335
6062 HC M 03_04 1.798 -0.014 -3.802 4.093 -3.5335
6062 HC M 11_12 1.953 -0.014 -3.802 4.093 -3.5335
6062 HC M 01_02 1.772 -0.014 -3.802 4.093 -3.5335
2072 AE M 05_06 1.667 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 07_08 1.587 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 09_10 1.739 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 03_04 1.638 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 11_12 1.784 0.253 1.908 0.289999999999999 -1.0335
2072 AE M 01_02 1.662 0.253 1.908 0.289999999999999 -1.0335
2008 HC F 05_06 1.623 -0.014 -1.372 -2.317 2.1365
2008 HC F 07_08 1.6 -0.014 -1.372 -2.317 2.1365
2008 HC F 09_10 1.688 -0.014 -1.372 -2.317 2.1365
2008 HC F 03_04 1.624 -0.014 -1.372 -2.317 2.1365
2008 HC F 11_12 1.772 -0.014 -1.372 -2.317 2.1365
2008 HC F 01_02 1.656 -0.014 -1.372 -2.317 2.1365
2070 AE F 05_06 1.657 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 07_08 1.579 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 09_10 1.75 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 03_04 1.808 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 11_12 1.777 0.113 -1.372 -0.140000000000001 -5.5335
2070 AE F 01_02 1.702 0.113 -1.372 -0.140000000000001 -5.5335
2064 AE F 11_12 1.781 -0.027 -5.512 3.57 -3.5335
2064 AE F 09_10 1.724 -0.027 -5.512 3.57 -3.5335
2064 AE F 01_02 1.631 -0.027 -5.512 3.57 -3.5335
2064 AE F 03_04 1.607 -0.027 -5.512 3.57 -3.5335
2064 AE F 05_06 1.577 -0.027 -5.512 3.57 -3.5335
2064 AE F 07_08 1.546 -0.027 -5.512 3.57 -3.5335
2039 HC M 09_10 1.879 -0.014 2.628 -1.867 -5.5335
2039 HC M 11_12 1.918 -0.014 2.628 -1.867 -5.5335
2039 HC M 03_04 1.794 -0.014 2.628 -1.867 -5.5335
2039 HC M 05_06 1.787 -0.014 2.628 -1.867 -5.5335
2039 HC M 07_08 1.687 -0.014 2.628 -1.867 -5.5335
2039 HC M 01_02 1.774 -0.014 2.628 -1.867 -5.5335
2117 HC F 09_10 1.712 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 11_12 1.75 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 03_04 1.717 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 07_08 1.587 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 05_06 1.667 -0.014 0.917999999999999 1.293 3.7965
2117 HC F 01_02 1.663 -0.014 0.917999999999999 1.293 3.7965
")
dat$ID <- as.factor(dat$ID)
fm <- aov_car(Value ~ Propdd00 + Group + Gender + GAS0 + MAD0 + CPD0 + Error(ID/ROI), data=dat, factorize=FALSE, return = "Anova")
fm0 <- aov_car(Value ~ MAD0 + CPD0 + Error(ID/ROI), data=dat, factorize=FALSE, return='afex_aov')
expect_is(fm, "Anova.mlm")
expect_is(fm0, "afex_aov")
})
test_that("ANOVA: ids in multiple between.subjects conditions", {
species<- c("a","b","c","c","b","c","b","b","a","b","c","c","a","a","b","b","a","a","b","c")
habitat<- c("x","x","x","y","y","y","x","x","y","z","y","y","z","z","x","x","y","y","z","z")
mvt.rate<-c(6,5,7,8,9,4,3,5,6,9,3,6,6,7,8,9,5,6,7,8)
ind<-as.factor(c(1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4))
data1<-data.frame(species, habitat, mvt.rate, ind)
# should give an error
expect_error(aov_ez("ind", "mvt.rate", data1, within = "habitat", between = "species"), "Following ids are in more than one between subjects condition:")
})
test_that("empty factors are not causing aov.cat to choke", {
data(sleepstudy) #Example data in lme4
sleepstudy$Days<-factor(sleepstudy$Days)
#Works with all factors
expect_is(aov_ez("Subject","Reaction",sleepstudy, within="Days", return = "Anova"), "Anova.mlm")
#If you remove a factor it fails...
expect_is(aov_ez("Subject","Reaction",sleepstudy[sleepstudy$Days!=9,], within="Days", return = "Anova"), "Anova.mlm")
})
test_that("factors have more than one level", {
data(obk.long)
expect_error(aov_car(value ~ treatment+ Error(id/phase), data = obk.long[ obk.long$treatment == "control",]), "one level only.")
expect_error(aov_car(value ~ treatment+ Error(id/phase), data = obk.long[ obk.long$phase == "pre",]), "one level only.")
})
test_that("variable names longer", {
data(obk.long)
obk.long$gender2 <- obk.long$treatment
orig <- aov_car(value ~ treatment * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender")
v1 <- aov_car(value ~ gender2 * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender")
v2 <- aov_car(value ~ gender2 * gender + age + Error(id/phase*hour), data = obk.long, factorize=FALSE, observed = "gender2")
expect_equivalent(orig$anova_table, v1$anova_table)
expect_identical(nice(orig)[,-1], nice(v1)[,-1])
expect_identical(nice(orig)[,c("df", "MSE", "F", "p.value")], nice(v2)[,c("df", "MSE", "F", "p.value")])
expect_equivalent(orig$anova_table[,c("num Df", "den Df", "MSE", "F", "Pr(>F)")], v2$anova_table[c("num Df", "den Df", "MSE", "F", "Pr(>F)")])
})
test_that("works with dplyr data.frames (see https://github.com/singmann/afex/issues/6):", {
if (getRversion() >= "3.1.2") {
skip_if_not_installed("dplyr")
data(md_12.1)
md2 <- dplyr::as_tibble(md_12.1)
expect_is(aov_ez("id", "rt", md2, within = c("angle", "noise"),
anova_table=list(correction = "none", es = "none")),
"afex_aov")
}
})
test_that("return='nice' works", {
data(md_12.1)
expect_is(aov_ez("id", "rt", md_12.1, within = c("angle", "noise"), return = "nice"), "data.frame")
})
test_that("aov_car works with column names containing spaces: https://github.com/singmann/afex/issues/22", {
data <- list("dependent" = rnorm(100), "RM Factor 1" = factor(rep(c("Level 1", "Level 2"), 50)), "subject" = factor(rep(1:50, each = 2)))
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
expect_is(aov_car(dependent ~ `RM Factor 1` + Error(subject/(`RM Factor 1`)), data), "afex_aov")
expect_is(aov_4(dependent ~ `RM Factor 1` + (`RM Factor 1`|subject), data), "afex_aov")
expect_is(aov_ez("subject", "dependent", data, within = "RM Factor 1"), "afex_aov")
})
test_that("aov_car works with column names containing spaces for between factors", {
data <- list("dependent" = rnorm(100), "RM Factor 1" = factor(rep(c("Level 1", "Level 2"), 50)), "subject" = factor(rep(1:100)))
attr(data, 'row.names') <- seq_len(length(data[[1]]))
attr(data, 'class') <- 'data.frame'
expect_is(aov_car(dependent ~ `RM Factor 1` + Error(subject), data), "afex_aov")
expect_is(aov_4(dependent ~ `RM Factor 1` + (1|subject), data), "afex_aov")
expect_is(aov_ez("subject", "dependent", data, between = "RM Factor 1"), "afex_aov")
})
test_that("aov_ez works with multiple covariates", {
skip_if_not_installed("psychTools")
require(psychTools)
data(msq)
msq2 <- msq[!is.na(msq$Extraversion),]
msq2 <- droplevels(msq2[msq2$ID != "18",])
msq2$TOD <- msq2$TOD-mean(msq2$TOD)
msq2$MSQ_Time <- msq2$MSQ_Time-mean(msq2$MSQ_Time)
msq2$condition <- msq2$condition-mean(msq2$condition) # that is somewhat stupid
mulcov <- aov_ez(data=msq2, dv="Extraversion", id = "ID",
between = "condition",
covariate=c("TOD", "MSQ_Time"),
factorize=FALSE, fun_aggregate = mean)
expect_is(mulcov, "afex_aov")
})
test_that("aov_car works with p.val adjustment == NA for HF as well as GG", {
# see: https://github.com/singmann/afex/issues/36
skip_on_cran() ## takes rather long
load("anova_hf_error.rda")
#load("tests/testthat/anova_hf_error.rda")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"))),
"nice_table")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"),
anova_table = list(correction = "GG"))),
"nice_table")
expect_is(nice(aov_ez("Snum", "RT", demo, within=c("DistF", "WidthF", "AngleF"),
anova_table = list(correction = "HF"))),
"nice_table")
})
test_that("aov_car: character variables and factorize = FALSE", {
data(obk.long)
obk2 <- obk.long
obk2$treatment <- as.character(obk2$treatment)
a1 <- aov_car(value ~ treatment * gender + Error(id), data = obk.long,
fun_aggregate = mean)
a2 <- aov_car(value ~ treatment * gender + Error(id), data = obk2,
fun_aggregate = mean)
a3 <- aov_car(value ~ treatment * gender + Error(id), data = obk2,
fun_aggregate = mean, factorize = FALSE)
expect_equal(a1$anova_table, a2$anova_table)
expect_equal(a1$anova_table, a3$anova_table)
})
|
#written by Santiago Velazco & Andre Andrade
# Function for summary of models performance
Validation_Table_TMLA<-function(Eval,
Eval_JS,
N){
#Creates model validation table
summres <- data.frame(matrix(0, N, 6))
for(i in 1:N){
#Calculate Evaluation Metrics
summres[i, ] <- cbind(AUC=Eval[[i]]@auc,
Kappa=max(Eval[[i]]@kappa),
TSS=sapply(Eval, function(x) max(x@TPR + x@TNR) - 1),
Jaccard=max(Eval_JS[[i]]$Jaccard),
Sorensen=max(Eval_JS[[i]]$Sorensen),
Fpb=max(Eval_JS[[i]]$Fpb))
colnames(summres) <- c( "AUC","Kappa", "TSS","Jaccard","Sorensen","Fpb")
}
if(N!=1){
resSD <- data.frame(matrix(round(apply(summres,2,sd), 3),nrow=1,ncol=6))
res <- data.frame(matrix(round(colMeans(summres), 3),nrow=1,ncol=6))
colnames(res) <- colnames(summres)
colnames(resSD) <- paste0(colnames(res),"_SD")
res <- cbind(res,resSD)
}else{
res <- data.frame(matrix(round(colMeans(summres), 3),nrow=1,ncol=6))
colnames(res) <- colnames(summres)
}
return(res)
}
|
/R/ValidationTable_TMLA.R
|
no_license
|
neiljun/ENMTML
|
R
| false | false | 1,228 |
r
|
#written by Santiago Velazco & Andre Andrade
# Function for summary of models performance
Validation_Table_TMLA<-function(Eval,
Eval_JS,
N){
#Creates model validation table
summres <- data.frame(matrix(0, N, 6))
for(i in 1:N){
#Calculate Evaluation Metrics
summres[i, ] <- cbind(AUC=Eval[[i]]@auc,
Kappa=max(Eval[[i]]@kappa),
TSS=sapply(Eval, function(x) max(x@TPR + x@TNR) - 1),
Jaccard=max(Eval_JS[[i]]$Jaccard),
Sorensen=max(Eval_JS[[i]]$Sorensen),
Fpb=max(Eval_JS[[i]]$Fpb))
colnames(summres) <- c( "AUC","Kappa", "TSS","Jaccard","Sorensen","Fpb")
}
if(N!=1){
resSD <- data.frame(matrix(round(apply(summres,2,sd), 3),nrow=1,ncol=6))
res <- data.frame(matrix(round(colMeans(summres), 3),nrow=1,ncol=6))
colnames(res) <- colnames(summres)
colnames(resSD) <- paste0(colnames(res),"_SD")
res <- cbind(res,resSD)
}else{
res <- data.frame(matrix(round(colMeans(summres), 3),nrow=1,ncol=6))
colnames(res) <- colnames(summres)
}
return(res)
}
|
brkpt.accuracy=function(model.brkpts, true.brkpts, acc.tol, dup.tol, miss.tol) {
model.brkpts<- as.numeric(model.brkpts) %>% na.omit() %>% as.vector()
true.brkpts<- as.numeric(true.brkpts) %>% na.omit() %>% as.vector()
all.brkpts<- data.frame(brks = c(true.brkpts, model.brkpts),
type = rep(c("True","Model"),c(length(true.brkpts),
length(model.brkpts))))
accuracy<- matrix(NA, length(model.brkpts), 1)
for (i in 1:length(model.brkpts)) { #assign brkpts as accurate or inaccurate
tmp<- c(model.brkpts[i] - (acc.tol:0), model.brkpts[i] + (1:acc.tol)) %in% true.brkpts %>%
sum()
if (tmp == 0) {
accuracy[i]<- "Inaccurate"
} else {
accuracy[i]<- "Accurate"
}
}
if (sum(abs(diff(model.brkpts)) <= dup.tol) >= 0) { #identify duplicate brkpts
ind<- which(abs(diff(model.brkpts)) <= dup.tol)
ind<- sort(c(ind, ind+1))
}
ind.acc<- ind[which(accuracy[ind] == "Accurate")]
ind.inacc<- ind[which(accuracy[ind] == "Inaccurate")]
accuracy[ind.acc]<- "Accurate Duplicate"
accuracy[ind.inacc]<- "Inaccurate Duplicate"
accuracy<- c(rep("True",length(true.brkpts)), accuracy)
#identify missing breakpoints from model
status<- matrix(NA,length(true.brkpts),1)
for (i in 1:length(true.brkpts)) {
tmp<- c(true.brkpts[i] - (miss.tol:0), true.brkpts[i] + (1:miss.tol)) %in% model.brkpts %>%
sum()
if (tmp == 0) {
status[i]<- "Missing"
} else {
status[i]<- "Present"
}
}
miss.ind<- which(status =="Missing")
all.brkpts$acc<- accuracy
all.brkpts[miss.ind, "acc"]<- "Missing"
all.brkpts
}
#--------------------------------------
extract.behav.props=function(params, lims, behav.names){
#only for gamma and wrapped Cauchy distributions
#only defined for step lengths ('dist') and turning angles ('rel.angle')
#params must be alist of data frames storing 2 cols for the params of the gamma and wrapped cauchy distributions
#lims must be a list of numeric vectors
#number of bins for both params are already defined as 5 (SL) and 8 (TA)
#order of states is set as encamped, ARS, transit
#Extract bin proportions for step lengths
props.SL<- list() #create list to store results per behavior
SL<- params[[1]] #extract SL params from argument; must be first
dist.bin.lims<- lims[[1]] #extract limits for SL from argument; must be first
for (j in 1:nrow(SL)) {
shape1=SL[j,1]
rate1=SL[j,2]
bins.estim=rep(NA,(length(dist.bin.lims) - 1))
for (i in 2:length(dist.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pgamma(dist.bin.lims[i],shape=shape1,rate=rate1)
} else {
bins.estim[i-1]=pgamma(dist.bin.lims[i],shape=shape1,rate=rate1)-
pgamma(dist.bin.lims[i-1],shape=shape1,rate=rate1)
}
}
props.SL[[j]]<- bins.estim
}
names(props.SL)<- behav.names
props.SL1<- props.SL %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Step Length", bin = rep(1:5, length(behav.names)))
#Extract bin proportions for turning angles
props.TA<- list() #create list to store results per behavior
TA<- params[[2]] #extract TA params from argument; must be second
angle.bin.lims<- lims[[2]] #extract limits for TA from argument; must be second
for (j in 1:nrow(TA)) {
mu1=circular(TA[j,1],type='angles',units='radians')
rho1=TA[j,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
pwrappedcauchy=integrate(dwrappedcauchy,
angle.bin.lims[i-1],
angle.bin.lims[i],
mu=mu1,
rho=rho1)
bins.estim[i-1]=pwrappedcauchy$value
}
props.TA[[j]]<- bins.estim
}
names(props.TA)<- behav.names
props.TA1<- props.TA %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Turning Angle", bin = rep(1:8, length(behav.names)))
#Combine proportions for SL and TA
props<- rbind(props.SL1, props.TA1)
props$behav<- factor(props$behav, levels = behav.names)
props<- props %>%
arrange(behav, var)
props
}
#--------------------------------------
extract.behav.props_weird=function(params, lims, behav.names){
#only for truncated normal and mixed distributions (beta, uniform, truncated normal);
#step lengths ('dist') and turning angles ('rel.angle') params must be a list
#of data frames storing 2 cols for the params of each distribution; lims must be a list
#of numeric vectors
#number of bins for both params are already defined as 5 (SL) and 8 (TA)
#order of states is set as encamped, ARS, transit
#Extract bin proportions for step lengths
props.SL<- list() #create list to store results per behavior
SL<- params[[1]] #extract SL params from argument; must be first
dist.bin.lims<- lims[[1]] #extract limits for SL from argument; must be first
for (j in 1:nrow(SL)) {
mean1=SL[j,1]
sd1=SL[j,2]
bins.estim=rep(NA,(length(dist.bin.lims) - 1))
for (i in 2:length(dist.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pnorm(dist.bin.lims[i],mean=mean1,sd=sd1) -
pnorm(0,mean=mean1,sd=sd1)
} else {
bins.estim[i-1]=pnorm(dist.bin.lims[i],mean=mean1,sd=sd1)-
pnorm(dist.bin.lims[i-1],mean=mean1,sd=sd1)
}
}
props.SL[[j]]<- bins.estim / sum(bins.estim)
}
names(props.SL)<- behav.names
props.SL1<- props.SL %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Step Length", bin = rep(1:5, length(behav.names)))
#Extract bin proportions for turning angles
props.TA<- list() #create list to store results per behavior
TA<- params[[2]] #extract TA params from argument; must be second
angle.bin.lims<- lims[[2]] #extract limits for TA from argument; must be second
#Encamped state (beta distrib)
a=TA[1,1]
b=TA[1,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pbeta((angle.bin.lims[i]+pi)/(2*pi),shape1=a,shape2=b) -
pbeta(0,shape1=a,shape2=b)
} else {
bins.estim[i-1]=pbeta((angle.bin.lims[i]+pi)/(2*pi),shape1=a,shape2=b)-
pbeta((angle.bin.lims[i-1]+pi)/(2*pi),shape1=a,shape2=b)
}
}
props.TA[[1]]<- bins.estim
#ARS state (uniform distrib)
lo=TA[2,1]
up=TA[2,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=punif(angle.bin.lims[i],min=lo,max=up) -
punif(-pi,min=lo,max=up)
} else {
bins.estim[i-1]=punif(angle.bin.lims[i],min=lo,max=up)-
punif(angle.bin.lims[i-1],min=lo,max=up)
}
}
props.TA[[2]]<- bins.estim
#Transit state (truncated normal distrib)
mean1=TA[3,1]
sd1=TA[3,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pnorm(angle.bin.lims[i],mean=mean1,sd=sd1) -
pnorm(-pi,mean=mean1,sd=sd1)
} else {
bins.estim[i-1]=pnorm(angle.bin.lims[i],mean=mean1,sd=sd1)-
pnorm(angle.bin.lims[i-1],mean=mean1,sd=sd1)
}
}
props.TA[[3]]<- bins.estim
names(props.TA)<- behav.names
props.TA1<- props.TA %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Turning Angle", bin = rep(1:8, length(behav.names)))
#Combine proportions for SL and TA
props<- rbind(props.SL1, props.TA1)
props$behav<- factor(props$behav, levels = behav.names)
props<- props %>%
arrange(behav, var)
props
}
#--------------------------------------
rtnorm=function(n,lo,hi,mu,sig, log = FALSE){ #generates truncated normal variates based on cumulative normal distribution
#normal truncated lo and hi
if (log == TRUE) {
mu<- exp(mu)
sig<- exp(sig)
}
if(length(lo) == 1 & length(mu) > 1)lo <- rep(lo,length(mu))
if(length(hi) == 1 & length(mu) > 1)hi <- rep(hi,length(mu))
q1 <- pnorm(lo,mu,sig) #cumulative distribution
q2 <- pnorm(hi,mu,sig) #cumulative distribution
z <- runif(n,q1,q2)
z <- qnorm(z,mu,sig)
z[z == -Inf] <- lo[z == -Inf]
z[z == Inf] <- hi[z == Inf]
z
}
#--------------------------------------
#density of truncated normal distribution
dtnorm=function(x,mean1,sd1,lo,hi){
denom=pnorm(hi,mean=mean1,sd=sd1)-pnorm(lo,mean=mean1,sd=sd1)
dnorm(x,mean=mean1,sd=sd1)/denom
}
|
/helper functions.R
|
no_license
|
ValleLabUF/method_comparison
|
R
| false | false | 8,998 |
r
|
brkpt.accuracy=function(model.brkpts, true.brkpts, acc.tol, dup.tol, miss.tol) {
model.brkpts<- as.numeric(model.brkpts) %>% na.omit() %>% as.vector()
true.brkpts<- as.numeric(true.brkpts) %>% na.omit() %>% as.vector()
all.brkpts<- data.frame(brks = c(true.brkpts, model.brkpts),
type = rep(c("True","Model"),c(length(true.brkpts),
length(model.brkpts))))
accuracy<- matrix(NA, length(model.brkpts), 1)
for (i in 1:length(model.brkpts)) { #assign brkpts as accurate or inaccurate
tmp<- c(model.brkpts[i] - (acc.tol:0), model.brkpts[i] + (1:acc.tol)) %in% true.brkpts %>%
sum()
if (tmp == 0) {
accuracy[i]<- "Inaccurate"
} else {
accuracy[i]<- "Accurate"
}
}
if (sum(abs(diff(model.brkpts)) <= dup.tol) >= 0) { #identify duplicate brkpts
ind<- which(abs(diff(model.brkpts)) <= dup.tol)
ind<- sort(c(ind, ind+1))
}
ind.acc<- ind[which(accuracy[ind] == "Accurate")]
ind.inacc<- ind[which(accuracy[ind] == "Inaccurate")]
accuracy[ind.acc]<- "Accurate Duplicate"
accuracy[ind.inacc]<- "Inaccurate Duplicate"
accuracy<- c(rep("True",length(true.brkpts)), accuracy)
#identify missing breakpoints from model
status<- matrix(NA,length(true.brkpts),1)
for (i in 1:length(true.brkpts)) {
tmp<- c(true.brkpts[i] - (miss.tol:0), true.brkpts[i] + (1:miss.tol)) %in% model.brkpts %>%
sum()
if (tmp == 0) {
status[i]<- "Missing"
} else {
status[i]<- "Present"
}
}
miss.ind<- which(status =="Missing")
all.brkpts$acc<- accuracy
all.brkpts[miss.ind, "acc"]<- "Missing"
all.brkpts
}
#--------------------------------------
extract.behav.props=function(params, lims, behav.names){
#only for gamma and wrapped Cauchy distributions
#only defined for step lengths ('dist') and turning angles ('rel.angle')
#params must be alist of data frames storing 2 cols for the params of the gamma and wrapped cauchy distributions
#lims must be a list of numeric vectors
#number of bins for both params are already defined as 5 (SL) and 8 (TA)
#order of states is set as encamped, ARS, transit
#Extract bin proportions for step lengths
props.SL<- list() #create list to store results per behavior
SL<- params[[1]] #extract SL params from argument; must be first
dist.bin.lims<- lims[[1]] #extract limits for SL from argument; must be first
for (j in 1:nrow(SL)) {
shape1=SL[j,1]
rate1=SL[j,2]
bins.estim=rep(NA,(length(dist.bin.lims) - 1))
for (i in 2:length(dist.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pgamma(dist.bin.lims[i],shape=shape1,rate=rate1)
} else {
bins.estim[i-1]=pgamma(dist.bin.lims[i],shape=shape1,rate=rate1)-
pgamma(dist.bin.lims[i-1],shape=shape1,rate=rate1)
}
}
props.SL[[j]]<- bins.estim
}
names(props.SL)<- behav.names
props.SL1<- props.SL %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Step Length", bin = rep(1:5, length(behav.names)))
#Extract bin proportions for turning angles
props.TA<- list() #create list to store results per behavior
TA<- params[[2]] #extract TA params from argument; must be second
angle.bin.lims<- lims[[2]] #extract limits for TA from argument; must be second
for (j in 1:nrow(TA)) {
mu1=circular(TA[j,1],type='angles',units='radians')
rho1=TA[j,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
pwrappedcauchy=integrate(dwrappedcauchy,
angle.bin.lims[i-1],
angle.bin.lims[i],
mu=mu1,
rho=rho1)
bins.estim[i-1]=pwrappedcauchy$value
}
props.TA[[j]]<- bins.estim
}
names(props.TA)<- behav.names
props.TA1<- props.TA %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Turning Angle", bin = rep(1:8, length(behav.names)))
#Combine proportions for SL and TA
props<- rbind(props.SL1, props.TA1)
props$behav<- factor(props$behav, levels = behav.names)
props<- props %>%
arrange(behav, var)
props
}
#--------------------------------------
extract.behav.props_weird=function(params, lims, behav.names){
#only for truncated normal and mixed distributions (beta, uniform, truncated normal);
#step lengths ('dist') and turning angles ('rel.angle') params must be a list
#of data frames storing 2 cols for the params of each distribution; lims must be a list
#of numeric vectors
#number of bins for both params are already defined as 5 (SL) and 8 (TA)
#order of states is set as encamped, ARS, transit
#Extract bin proportions for step lengths
props.SL<- list() #create list to store results per behavior
SL<- params[[1]] #extract SL params from argument; must be first
dist.bin.lims<- lims[[1]] #extract limits for SL from argument; must be first
for (j in 1:nrow(SL)) {
mean1=SL[j,1]
sd1=SL[j,2]
bins.estim=rep(NA,(length(dist.bin.lims) - 1))
for (i in 2:length(dist.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pnorm(dist.bin.lims[i],mean=mean1,sd=sd1) -
pnorm(0,mean=mean1,sd=sd1)
} else {
bins.estim[i-1]=pnorm(dist.bin.lims[i],mean=mean1,sd=sd1)-
pnorm(dist.bin.lims[i-1],mean=mean1,sd=sd1)
}
}
props.SL[[j]]<- bins.estim / sum(bins.estim)
}
names(props.SL)<- behav.names
props.SL1<- props.SL %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Step Length", bin = rep(1:5, length(behav.names)))
#Extract bin proportions for turning angles
props.TA<- list() #create list to store results per behavior
TA<- params[[2]] #extract TA params from argument; must be second
angle.bin.lims<- lims[[2]] #extract limits for TA from argument; must be second
#Encamped state (beta distrib)
a=TA[1,1]
b=TA[1,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pbeta((angle.bin.lims[i]+pi)/(2*pi),shape1=a,shape2=b) -
pbeta(0,shape1=a,shape2=b)
} else {
bins.estim[i-1]=pbeta((angle.bin.lims[i]+pi)/(2*pi),shape1=a,shape2=b)-
pbeta((angle.bin.lims[i-1]+pi)/(2*pi),shape1=a,shape2=b)
}
}
props.TA[[1]]<- bins.estim
#ARS state (uniform distrib)
lo=TA[2,1]
up=TA[2,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=punif(angle.bin.lims[i],min=lo,max=up) -
punif(-pi,min=lo,max=up)
} else {
bins.estim[i-1]=punif(angle.bin.lims[i],min=lo,max=up)-
punif(angle.bin.lims[i-1],min=lo,max=up)
}
}
props.TA[[2]]<- bins.estim
#Transit state (truncated normal distrib)
mean1=TA[3,1]
sd1=TA[3,2]
bins.estim=rep(NA,(length(angle.bin.lims) - 1))
for (i in 2:length(angle.bin.lims)) {
if (i-1 == 1) {
bins.estim[i-1]=pnorm(angle.bin.lims[i],mean=mean1,sd=sd1) -
pnorm(-pi,mean=mean1,sd=sd1)
} else {
bins.estim[i-1]=pnorm(angle.bin.lims[i],mean=mean1,sd=sd1)-
pnorm(angle.bin.lims[i-1],mean=mean1,sd=sd1)
}
}
props.TA[[3]]<- bins.estim
names(props.TA)<- behav.names
props.TA1<- props.TA %>%
bind_rows() %>%
pivot_longer(cols = names(.), names_to = "behav", values_to = "prop") %>%
arrange(behav) %>%
mutate(var = "Turning Angle", bin = rep(1:8, length(behav.names)))
#Combine proportions for SL and TA
props<- rbind(props.SL1, props.TA1)
props$behav<- factor(props$behav, levels = behav.names)
props<- props %>%
arrange(behav, var)
props
}
#--------------------------------------
rtnorm=function(n,lo,hi,mu,sig, log = FALSE){ #generates truncated normal variates based on cumulative normal distribution
#normal truncated lo and hi
if (log == TRUE) {
mu<- exp(mu)
sig<- exp(sig)
}
if(length(lo) == 1 & length(mu) > 1)lo <- rep(lo,length(mu))
if(length(hi) == 1 & length(mu) > 1)hi <- rep(hi,length(mu))
q1 <- pnorm(lo,mu,sig) #cumulative distribution
q2 <- pnorm(hi,mu,sig) #cumulative distribution
z <- runif(n,q1,q2)
z <- qnorm(z,mu,sig)
z[z == -Inf] <- lo[z == -Inf]
z[z == Inf] <- hi[z == Inf]
z
}
#--------------------------------------
#density of truncated normal distribution
dtnorm=function(x,mean1,sd1,lo,hi){
denom=pnorm(hi,mean=mean1,sd=sd1)-pnorm(lo,mean=mean1,sd=sd1)
dnorm(x,mean=mean1,sd=sd1)/denom
}
|
#######################################################################
# Auxiliary functions for the hyperPheno R package.
#
# The hyperPheno is developed and maintained by Bijan Seyednasrollah.
# The main initial development was done in November, 2017.
#
# Most recent release: https://github.com/bnasr/hyperPheno
#######################################################################
library(jpeg)
library(abind)
library(raster)
library(lubridate)
plotCLArray <- function(clArray, bands=1:3){
tmp <- tempfile()
if(length(dim(clArray))==2)
writeJPEG(clArray, target = tmp)
else
writeJPEG(clArray[,,bands], target = tmp)
plotJPEG(tmp)
}
plotJPEG <- function(path, add=FALSE)
{
jpgNative <- readJPEG(path, native=T) # read the file
res <- dim(jpgNative)[1:2] # get the resolution
if (!add) # initialize an empty plot area if add==FALSE
plot(NA,xlim=c(1,res[2]),ylim=c(1,res[1]), type='n',
xaxs='i',yaxs='i',
# xaxt='n',yaxt='n',
xlab='',ylab='',bty='o')
rasterImage(jpgNative,1,1,res[2],res[1])
}
loadCube <- function(zipfile, header = 'headers/cube.hdr'){
tmp <- '.tmp.cube'
# if(file.exists(tmp)) file.remove(tmp)
gunzip(filename = zipfile, destname = tmp, remove = FALSE, overwrite = TRUE)
a <- try(read.ENVI(tmp, headerfile = header), silent = T)
file.remove(tmp)
# attributes(a)$name <- basename(zipfile)
a
}
loadDark <- function(darkfile, header = 'headers/dark.hdr'){
d <- try(read.ENVI(darkfile, headerfile = header), silent = T)
if(class(d)=="try-error") return(NULL)
m <- apply(d, 3, mean)
m
}
saveJPEG <- function(hp, jpgFile, outDir = 'jpeg/'){
r <- rotate(rotate(hp[,,56]))
g <- rotate(rotate(hp[,,33]))
b <- rotate(rotate(hp[,,22]))
r <- r/(quantile(r, .99)*.99)
g <- g/(quantile(g, .99)*.99)
b <- b/(quantile(b, .99)*.99)
rgb <- abind(r, g, b, along = 3)
rgb[rgb>1] <- 1
rgb[rgb<0] <- 0
rgb <- aperm(rgb, perm = c(2,1,3))
writeJPEG(rgb, target = paste0(outDir, jpgFile))
invisible(rgb)
}
rotate <- function(x) t(apply(x, 2, rev))
parseROI <- function(roifilename, roipath = '', downloadDir = tempdir()){
fname <- paste0(roipath, roifilename)
#if(!file.exists(fname)) return(NULL)
roilines <- readLines(fname)
wEmptyLine <- roilines%in%c('', ' ', ' ')
wCommented <- as.vector(sapply(roilines, grepl, pattern = '^#'))
wNotSkip <- !(wEmptyLine|wCommented)
parseroiline <- function(roilines, property){
wProp <- grepl(roilines, pattern = property)
gsub(roilines[wProp], pattern = paste0('# ', property, ': '), replacement = '')
}
ROIList <- list(siteName = parseroiline(roilines[wCommented], 'Site'),
vegType = parseroiline(roilines[wCommented], 'Veg Type'),
ID = as.numeric(parseroiline(roilines[wCommented], 'ROI ID Number')),
Owner = parseroiline(roilines[wCommented], 'Owner'),
createDate = parseroiline(roilines[wCommented], 'Creation Date'),
createTime = parseroiline(roilines[wCommented], 'Creation Time'),
updateDate = parseroiline(roilines[wCommented], 'Update Date'),
updateTime = parseroiline(roilines[wCommented], 'Update Time'),
Description = parseroiline(roilines[wCommented], 'Description'),
masks = NULL)
parsedMasks <- read.table(textConnection(roilines[which(wNotSkip)]), sep = ',', header = T)
masksList <- list()
for(i in 1:nrow(parsedMasks)){
maskpath <- paste0(roipath, parsedMasks$maskfile[i])
maskpointspath <- gsub(maskpath, pattern = '.tif', replacement = '_vector.csv')
if(file.exists(maskpointspath)) {
dummy=0
maskpoints <- as.matrix(read.csv(maskpointspath, header = F, skip = 1))
}else{
maskpoints <- NULL
}
# maskpath <- tryDownload(maskpath, downloadDir = downloadDir, showLoad = T, Update = F)
tmpMask <- list(maskpoints = maskpoints,
startdate = as.character(parsedMasks$start_date[i]),
starttime = as.character(parsedMasks$start_time[i]),
enddate = as.character(parsedMasks$end_date[i]),
endtime = as.character(parsedMasks$end_time[i]),
sampleImage = as.character(parsedMasks$sample_image[i]),
rasteredMask = as.matrix(raster(maskpath)))
tmpMask$rasteredMask[(!is.na(tmpMask$rasteredMask))&tmpMask$rasteredMask!=0] <- 1
masksList[[length(masksList)+1]] <- tmpMask
}
names(masksList) <- gsub(parsedMasks$maskfile, pattern = '.tif', replacement = '')
ROIList$masks <- masksList
ROIList
}
getDNSpectra <- function(hp, hptime, ROI, prob = 0.5){
m <- findMask(ROI, hptime)
if(is.null(m)) return(NA)
if(length(m)==0) return(NA)
# par(mfrow=c(2,1))
# plotCLArray(hp[,,50]/6000)
# plotCLArray(m)
dm <- dim(hp)
sp <- rep(NA, dm[3])
for(s in 1:dm[3]) {
roi <- hp[,,s][m==0]
roi <- roi[(roi<4095)]
sp[s] <- quantile(roi, probs=prob, na.rm = T)
}
dummy <- 0
sp
}
findMask <- function(ROI, hptime){
hptime <- as.POSIXct(hptime, format='%Y-%m-%d %H:%M:%S')
nm <- length(ROI$masks)
mtimes <- data.frame(start= as.POSIXlt(rep(NA, nm)),
end= as.POSIXlt(rep(NA, nm)))
for(j in 1:nm){
m <- ROI$masks[[j]]
mtimes$start[j] <- as.POSIXlt(paste(m$startdate, m$starttime), format='%Y-%m-%d %H:%M:%S')
mtimes$end[j] <- as.POSIXlt(paste(m$enddate, m$endtime), format='%Y-%m-%d %H:%M:%S')
}
w <- apply(mtimes, MARGIN = 1, FUN = function(x){hptime>=x[1]&hptime<=x[2]})
if(!any(w)) return(NULL)
mi <- min(which(w))
m <- ROI$masks[[mi]]$rasteredMask
m <- rotate(rotate(t(m)))
m
}
getDT <- function(datadir= '/mnt/monsoon/data/archive/harvardbarnsoc',
format='.cube.gz' ){
cubes <- dir(path = datadir, pattern = paste0('*', format), full.names = T, recursive = T, all.files = T, no.. = T)
info <- as.data.table(file.info(cubes))
DT <- data.table(path=cubes, info[, .(size, mtime, ctime, atime)])
splt <- strsplit(gsub(basename(DT$path), pattern = format, replacement = ''), '_')
# all(unlist(lapply(splt, length))==5)
DT[, DateTime:=as.POSIXct(paste(apply(matrix(sprintf(as.numeric(unlist(strsplit(sapply(splt, function(x)(x[4])), split = '-'))), fmt = '%02d'), ncol=3, byrow = T), MARGIN = 1, paste, collapse='-'),
unlist(lapply(strsplit(sapply(splt, function(x)(x[5])) , split = '.', fixed = T), FUN = function(x)({paste(sprintf('%02d',as.numeric(x)), collapse = ':')})))),
"%d-%m-%Y %H:%M:%S", tz = "")]
DT[,Hour:=as.numeric(strftime(DateTime, format = '%H'))]
DT[,HM:=format(DateTime, format = '%H:%M')]
DT[,YMDHM:=format(DateTime, format = '%Y-%m-%d-%H-%M')]
DT
}
plotSp <- function(w,
scc,
col = '#80808020',
g = 1:ncol(scc),
fun = median,
sleep = NULL,
add = FALSE,
ylim=c(0, 0.035)){
if(length(g)!=ncol(scc)) stop('group length does not match with spectral array')
gu <- unique(g)
ngu <- length(gu)
if(length(col)==1) col <- rep(col, n)
if(!add)plot(NA, type='n', xlim = range(w), ylim = ylim, col = col[1],
xlab = 'w (nm)', ylab = 'Spectral coefficient (-)')
for(i in 1:ngu) {
ss <- scc[,g==gu[i]]
if(!is.null(dim(ss))) ss <- apply(ss, MARGIN = 1, FUN = fun, na.rm=T)
lines(w, ss, col = col[i])
if(!is.null(sleep))Sys.sleep(sleep)
}
}
|
/harvardsoc/f.R
|
permissive
|
GRSEB9S/hyperPheno
|
R
| false | false | 7,665 |
r
|
#######################################################################
# Auxiliary functions for the hyperPheno R package.
#
# The hyperPheno is developed and maintained by Bijan Seyednasrollah.
# The main initial development was done in November, 2017.
#
# Most recent release: https://github.com/bnasr/hyperPheno
#######################################################################
library(jpeg)
library(abind)
library(raster)
library(lubridate)
plotCLArray <- function(clArray, bands=1:3){
tmp <- tempfile()
if(length(dim(clArray))==2)
writeJPEG(clArray, target = tmp)
else
writeJPEG(clArray[,,bands], target = tmp)
plotJPEG(tmp)
}
plotJPEG <- function(path, add=FALSE)
{
jpgNative <- readJPEG(path, native=T) # read the file
res <- dim(jpgNative)[1:2] # get the resolution
if (!add) # initialize an empty plot area if add==FALSE
plot(NA,xlim=c(1,res[2]),ylim=c(1,res[1]), type='n',
xaxs='i',yaxs='i',
# xaxt='n',yaxt='n',
xlab='',ylab='',bty='o')
rasterImage(jpgNative,1,1,res[2],res[1])
}
loadCube <- function(zipfile, header = 'headers/cube.hdr'){
tmp <- '.tmp.cube'
# if(file.exists(tmp)) file.remove(tmp)
gunzip(filename = zipfile, destname = tmp, remove = FALSE, overwrite = TRUE)
a <- try(read.ENVI(tmp, headerfile = header), silent = T)
file.remove(tmp)
# attributes(a)$name <- basename(zipfile)
a
}
loadDark <- function(darkfile, header = 'headers/dark.hdr'){
d <- try(read.ENVI(darkfile, headerfile = header), silent = T)
if(class(d)=="try-error") return(NULL)
m <- apply(d, 3, mean)
m
}
saveJPEG <- function(hp, jpgFile, outDir = 'jpeg/'){
r <- rotate(rotate(hp[,,56]))
g <- rotate(rotate(hp[,,33]))
b <- rotate(rotate(hp[,,22]))
r <- r/(quantile(r, .99)*.99)
g <- g/(quantile(g, .99)*.99)
b <- b/(quantile(b, .99)*.99)
rgb <- abind(r, g, b, along = 3)
rgb[rgb>1] <- 1
rgb[rgb<0] <- 0
rgb <- aperm(rgb, perm = c(2,1,3))
writeJPEG(rgb, target = paste0(outDir, jpgFile))
invisible(rgb)
}
rotate <- function(x) t(apply(x, 2, rev))
parseROI <- function(roifilename, roipath = '', downloadDir = tempdir()){
fname <- paste0(roipath, roifilename)
#if(!file.exists(fname)) return(NULL)
roilines <- readLines(fname)
wEmptyLine <- roilines%in%c('', ' ', ' ')
wCommented <- as.vector(sapply(roilines, grepl, pattern = '^#'))
wNotSkip <- !(wEmptyLine|wCommented)
parseroiline <- function(roilines, property){
wProp <- grepl(roilines, pattern = property)
gsub(roilines[wProp], pattern = paste0('# ', property, ': '), replacement = '')
}
ROIList <- list(siteName = parseroiline(roilines[wCommented], 'Site'),
vegType = parseroiline(roilines[wCommented], 'Veg Type'),
ID = as.numeric(parseroiline(roilines[wCommented], 'ROI ID Number')),
Owner = parseroiline(roilines[wCommented], 'Owner'),
createDate = parseroiline(roilines[wCommented], 'Creation Date'),
createTime = parseroiline(roilines[wCommented], 'Creation Time'),
updateDate = parseroiline(roilines[wCommented], 'Update Date'),
updateTime = parseroiline(roilines[wCommented], 'Update Time'),
Description = parseroiline(roilines[wCommented], 'Description'),
masks = NULL)
parsedMasks <- read.table(textConnection(roilines[which(wNotSkip)]), sep = ',', header = T)
masksList <- list()
for(i in 1:nrow(parsedMasks)){
maskpath <- paste0(roipath, parsedMasks$maskfile[i])
maskpointspath <- gsub(maskpath, pattern = '.tif', replacement = '_vector.csv')
if(file.exists(maskpointspath)) {
dummy=0
maskpoints <- as.matrix(read.csv(maskpointspath, header = F, skip = 1))
}else{
maskpoints <- NULL
}
# maskpath <- tryDownload(maskpath, downloadDir = downloadDir, showLoad = T, Update = F)
tmpMask <- list(maskpoints = maskpoints,
startdate = as.character(parsedMasks$start_date[i]),
starttime = as.character(parsedMasks$start_time[i]),
enddate = as.character(parsedMasks$end_date[i]),
endtime = as.character(parsedMasks$end_time[i]),
sampleImage = as.character(parsedMasks$sample_image[i]),
rasteredMask = as.matrix(raster(maskpath)))
tmpMask$rasteredMask[(!is.na(tmpMask$rasteredMask))&tmpMask$rasteredMask!=0] <- 1
masksList[[length(masksList)+1]] <- tmpMask
}
names(masksList) <- gsub(parsedMasks$maskfile, pattern = '.tif', replacement = '')
ROIList$masks <- masksList
ROIList
}
getDNSpectra <- function(hp, hptime, ROI, prob = 0.5){
m <- findMask(ROI, hptime)
if(is.null(m)) return(NA)
if(length(m)==0) return(NA)
# par(mfrow=c(2,1))
# plotCLArray(hp[,,50]/6000)
# plotCLArray(m)
dm <- dim(hp)
sp <- rep(NA, dm[3])
for(s in 1:dm[3]) {
roi <- hp[,,s][m==0]
roi <- roi[(roi<4095)]
sp[s] <- quantile(roi, probs=prob, na.rm = T)
}
dummy <- 0
sp
}
findMask <- function(ROI, hptime){
hptime <- as.POSIXct(hptime, format='%Y-%m-%d %H:%M:%S')
nm <- length(ROI$masks)
mtimes <- data.frame(start= as.POSIXlt(rep(NA, nm)),
end= as.POSIXlt(rep(NA, nm)))
for(j in 1:nm){
m <- ROI$masks[[j]]
mtimes$start[j] <- as.POSIXlt(paste(m$startdate, m$starttime), format='%Y-%m-%d %H:%M:%S')
mtimes$end[j] <- as.POSIXlt(paste(m$enddate, m$endtime), format='%Y-%m-%d %H:%M:%S')
}
w <- apply(mtimes, MARGIN = 1, FUN = function(x){hptime>=x[1]&hptime<=x[2]})
if(!any(w)) return(NULL)
mi <- min(which(w))
m <- ROI$masks[[mi]]$rasteredMask
m <- rotate(rotate(t(m)))
m
}
getDT <- function(datadir= '/mnt/monsoon/data/archive/harvardbarnsoc',
format='.cube.gz' ){
cubes <- dir(path = datadir, pattern = paste0('*', format), full.names = T, recursive = T, all.files = T, no.. = T)
info <- as.data.table(file.info(cubes))
DT <- data.table(path=cubes, info[, .(size, mtime, ctime, atime)])
splt <- strsplit(gsub(basename(DT$path), pattern = format, replacement = ''), '_')
# all(unlist(lapply(splt, length))==5)
DT[, DateTime:=as.POSIXct(paste(apply(matrix(sprintf(as.numeric(unlist(strsplit(sapply(splt, function(x)(x[4])), split = '-'))), fmt = '%02d'), ncol=3, byrow = T), MARGIN = 1, paste, collapse='-'),
unlist(lapply(strsplit(sapply(splt, function(x)(x[5])) , split = '.', fixed = T), FUN = function(x)({paste(sprintf('%02d',as.numeric(x)), collapse = ':')})))),
"%d-%m-%Y %H:%M:%S", tz = "")]
DT[,Hour:=as.numeric(strftime(DateTime, format = '%H'))]
DT[,HM:=format(DateTime, format = '%H:%M')]
DT[,YMDHM:=format(DateTime, format = '%Y-%m-%d-%H-%M')]
DT
}
plotSp <- function(w,
scc,
col = '#80808020',
g = 1:ncol(scc),
fun = median,
sleep = NULL,
add = FALSE,
ylim=c(0, 0.035)){
if(length(g)!=ncol(scc)) stop('group length does not match with spectral array')
gu <- unique(g)
ngu <- length(gu)
if(length(col)==1) col <- rep(col, n)
if(!add)plot(NA, type='n', xlim = range(w), ylim = ylim, col = col[1],
xlab = 'w (nm)', ylab = 'Spectral coefficient (-)')
for(i in 1:ngu) {
ss <- scc[,g==gu[i]]
if(!is.null(dim(ss))) ss <- apply(ss, MARGIN = 1, FUN = fun, na.rm=T)
lines(w, ss, col = col[i])
if(!is.null(sleep))Sys.sleep(sleep)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.